repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
yangzilongdmgy/merge_monster_3d
[ "0595e36749d32c3d5537a3f707727a137c82076e" ]
[ "mmdet3d/models/dense_heads/assigner/assign_result.py" ]
[ "# Modification 2020 RangiLyu\n# Copyright 2018-2019 Open-MMLab.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\n\nfrom ...utils import util_mixins\n\n\nclass AssignResult(util_mixins.NiceRepr):\n \"\"\"\n Stores assignments between predicted and truth boxes.\n\n Attributes:\n num_gts (int): the number of truth boxes considered when computing this\n assignment\n\n gt_inds (LongTensor): for each predicted box indicates the 1-based\n index of the assigned truth box. 0 means unassigned and -1 means\n ignore.\n\n max_overlaps (FloatTensor): the iou between the predicted box and its\n assigned truth box.\n\n labels (None | LongTensor): If specified, for each predicted box\n indicates the category label of the assigned truth box.\n\n Example:\n >>> # An assign result between 4 predicted boxes and 9 true boxes\n >>> # where only two boxes were assigned.\n >>> num_gts = 9\n >>> max_overlaps = torch.LongTensor([0, .5, .9, 0])\n >>> gt_inds = torch.LongTensor([-1, 1, 2, 0])\n >>> labels = torch.LongTensor([0, 3, 4, 0])\n >>> self = AssignResult(num_gts, gt_inds, max_overlaps, labels)\n >>> print(str(self)) # xdoctest: +IGNORE_WANT\n <AssignResult(num_gts=9, gt_inds.shape=(4,), max_overlaps.shape=(4,),\n labels.shape=(4,))>\n >>> # Force addition of gt labels (when adding gt as proposals)\n >>> new_labels = torch.LongTensor([3, 4, 5])\n >>> self.add_gt_(new_labels)\n >>> print(str(self)) # xdoctest: +IGNORE_WANT\n <AssignResult(num_gts=9, gt_inds.shape=(7,), max_overlaps.shape=(7,),\n labels.shape=(7,))>\n \"\"\"\n\n def __init__(self, num_gts, gt_inds, max_overlaps, labels=None):\n self.num_gts = num_gts\n self.gt_inds = gt_inds\n self.max_overlaps = max_overlaps\n self.labels = labels\n # Interface for possible user-defined properties\n self._extra_properties = {}\n\n @property\n def num_preds(self):\n \"\"\"int: the number of predictions in this assignment\"\"\"\n return len(self.gt_inds)\n\n def set_extra_property(self, key, value):\n \"\"\"Set user-defined new property.\"\"\"\n assert key not in self.info\n self._extra_properties[key] = value\n\n def get_extra_property(self, key):\n \"\"\"Get user-defined property.\"\"\"\n return self._extra_properties.get(key, None)\n\n @property\n def info(self):\n \"\"\"dict: a dictionary of info about the object\"\"\"\n basic_info = {\n \"num_gts\": self.num_gts,\n \"num_preds\": self.num_preds,\n \"gt_inds\": self.gt_inds,\n \"max_overlaps\": self.max_overlaps,\n \"labels\": self.labels,\n }\n basic_info.update(self._extra_properties)\n return basic_info\n\n def __nice__(self):\n \"\"\"str: a \"nice\" summary string describing this assign result\"\"\"\n parts = []\n parts.append(f\"num_gts={self.num_gts!r}\")\n if self.gt_inds is None:\n parts.append(f\"gt_inds={self.gt_inds!r}\")\n else:\n parts.append(f\"gt_inds.shape={tuple(self.gt_inds.shape)!r}\")\n if self.max_overlaps is None:\n parts.append(f\"max_overlaps={self.max_overlaps!r}\")\n else:\n parts.append(\"max_overlaps.shape=\" f\"{tuple(self.max_overlaps.shape)!r}\")\n if self.labels is None:\n parts.append(f\"labels={self.labels!r}\")\n else:\n parts.append(f\"labels.shape={tuple(self.labels.shape)!r}\")\n return \", \".join(parts)\n\n @classmethod\n def random(cls, **kwargs):\n \"\"\"Create random AssignResult for tests or debugging.\n\n Args:\n num_preds: number of predicted boxes\n num_gts: number of true boxes\n p_ignore (float): probability of a predicted box assinged to an\n ignored truth\n p_assigned (float): probability of a predicted box not being\n assigned\n p_use_label (float | bool): with labels or not\n rng (None | int | numpy.random.RandomState): seed or state\n\n Returns:\n :obj:`AssignResult`: Randomly generated assign results.\n\n Example:\n >>> from nanodet.model.head.assigner.assign_result import AssignResult\n >>> self = AssignResult.random()\n >>> print(self.info)\n \"\"\"\n rng = kwargs.get(\"rng\", None)\n num_gts = kwargs.get(\"num_gts\", None)\n num_preds = kwargs.get(\"num_preds\", None)\n p_ignore = kwargs.get(\"p_ignore\", 0.3)\n p_assigned = kwargs.get(\"p_assigned\", 0.7)\n p_use_label = kwargs.get(\"p_use_label\", 0.5)\n num_classes = kwargs.get(\"p_use_label\", 3)\n\n import numpy as np\n\n if rng is None:\n rng = np.random.mtrand._rand\n elif isinstance(rng, int):\n rng = np.random.RandomState(rng)\n else:\n rng = rng\n if num_gts is None:\n num_gts = rng.randint(0, 8)\n if num_preds is None:\n num_preds = rng.randint(0, 16)\n\n if num_gts == 0:\n max_overlaps = torch.zeros(num_preds, dtype=torch.float32)\n gt_inds = torch.zeros(num_preds, dtype=torch.int64)\n if p_use_label is True or p_use_label < rng.rand():\n labels = torch.zeros(num_preds, dtype=torch.int64)\n else:\n labels = None\n else:\n import numpy as np\n\n # Create an overlap for each predicted box\n max_overlaps = torch.from_numpy(rng.rand(num_preds))\n\n # Construct gt_inds for each predicted box\n is_assigned = torch.from_numpy(rng.rand(num_preds) < p_assigned)\n # maximum number of assignments constraints\n n_assigned = min(num_preds, min(num_gts, is_assigned.sum()))\n\n assigned_idxs = np.where(is_assigned)[0]\n rng.shuffle(assigned_idxs)\n assigned_idxs = assigned_idxs[0:n_assigned]\n assigned_idxs.sort()\n\n is_assigned[:] = 0\n is_assigned[assigned_idxs] = True\n\n is_ignore = torch.from_numpy(rng.rand(num_preds) < p_ignore) & is_assigned\n\n gt_inds = torch.zeros(num_preds, dtype=torch.int64)\n\n true_idxs = np.arange(num_gts)\n rng.shuffle(true_idxs)\n true_idxs = torch.from_numpy(true_idxs)\n gt_inds[is_assigned] = true_idxs[:n_assigned]\n\n gt_inds = torch.from_numpy(rng.randint(1, num_gts + 1, size=num_preds))\n gt_inds[is_ignore] = -1\n gt_inds[~is_assigned] = 0\n max_overlaps[~is_assigned] = 0\n\n if p_use_label is True or p_use_label < rng.rand():\n if num_classes == 0:\n labels = torch.zeros(num_preds, dtype=torch.int64)\n else:\n labels = torch.from_numpy(\n # remind that we set FG labels to [0, num_class-1]\n # since mmdet v2.0\n # BG cat_id: num_class\n rng.randint(0, num_classes, size=num_preds)\n )\n labels[~is_assigned] = 0\n else:\n labels = None\n\n self = cls(num_gts, gt_inds, max_overlaps, labels)\n return self\n\n def add_gt_(self, gt_labels):\n \"\"\"Add ground truth as assigned results.\n\n Args:\n gt_labels (torch.Tensor): Labels of gt boxes\n \"\"\"\n self_inds = torch.arange(\n 1, len(gt_labels) + 1, dtype=torch.long, device=gt_labels.device\n )\n self.gt_inds = torch.cat([self_inds, self.gt_inds])\n\n self.max_overlaps = torch.cat(\n [self.max_overlaps.new_ones(len(gt_labels)), self.max_overlaps]\n )\n\n if self.labels is not None:\n self.labels = torch.cat([gt_labels, self.labels])\n" ]
[ [ "torch.zeros", "torch.cat", "numpy.random.RandomState", "torch.from_numpy", "numpy.where", "numpy.arange" ] ]
athbaltzis/deepblast
[ "63d29fd162e537de1630d4f98f2b559b61a611e3" ]
[ "deepblast/utils.py" ]
[ "import os\nimport numpy as np\nfrom scipy.stats import multivariate_normal\nimport inspect\nfrom sklearn.metrics.pairwise import pairwise_distances\n\n\ndef sample(transition_matrix, means, covs, start_state, n_samples,\n random_state):\n n_states, n_features, _ = covs.shape\n states = np.zeros(n_samples, dtype='int')\n emissions = np.zeros((n_samples, n_features))\n for i in range(n_samples):\n if i == 0:\n prev_state = start_state\n else:\n prev_state = states[i - 1]\n state = random_state.choice(n_states,\n p=transition_matrix[:, prev_state])\n emissions[i] = random_state.multivariate_normal(\n means[state], covs[state])\n states[i] = state\n return emissions, states\n\n\ndef make_data(T=20):\n \"\"\"\n Sample data from a HMM model and compute associated CRF potentials.\n \"\"\"\n\n random_state = np.random.RandomState(0)\n d = 0.2\n e = 0.1\n transition_matrix = np.array([[1 - 2 * d, d, d], [1 - e, e, 0],\n [1 - e, 0, e]])\n means = np.array([[0, 0], [10, 0], [5, -5]])\n covs = np.array([[[1, 0], [0, 1]], [[.2, 0], [0, .3]], [[2, 0], [0, 1]]])\n start_state = 0\n\n emissions, states = sample(transition_matrix,\n means,\n covs,\n start_state,\n n_samples=T,\n random_state=random_state)\n emission_log_likelihood = []\n for mean, cov in zip(means, covs):\n rv = multivariate_normal(mean, cov)\n emission_log_likelihood.append(rv.logpdf(emissions)[:, np.newaxis])\n emission_log_likelihood = np.concatenate(emission_log_likelihood, axis=1)\n log_transition_matrix = np.log(transition_matrix)\n\n # CRF potential from HMM model\n theta = emission_log_likelihood[:, :, np.newaxis] \\\n + log_transition_matrix[np.newaxis, :, :]\n\n return states, emissions, theta\n\n\ndef make_alignment_data():\n rng = np.random.RandomState(0)\n m, n = 2, 2\n X = rng.randn(m, 3)\n Y = rng.randn(n, 3)\n return pairwise_distances(X, Y) / 10\n\n\ndef get_data_path(fn, subfolder='data'):\n \"\"\"Return path to filename ``fn`` in the data folder.\n During testing it is often necessary to load data files. This\n function returns the full path to files in the ``data`` subfolder\n by default.\n Parameters\n ----------\n fn : str\n File name.\n subfolder : str, defaults to ``data``\n Name of the subfolder that contains the data.\n Returns\n -------\n str\n Inferred absolute path to the test data for the module where\n ``get_data_path(fn)`` is called.\n Notes\n -----\n The requested path may not point to an existing file, as its\n existence is not checked.\n This is from skbio's code base\n https://github.com/biocore/scikit-bio/blob/master/skbio/util/_testing.py#L50\n \"\"\"\n # getouterframes returns a list of tuples: the second tuple\n # contains info about the caller, and the second element is its\n # filename\n callers_filename = inspect.getouterframes(inspect.currentframe())[1][1]\n path = os.path.dirname(os.path.abspath(callers_filename))\n data_path = os.path.join(path, subfolder, fn)\n return data_path\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.log", "numpy.zeros", "numpy.random.RandomState", "scipy.stats.multivariate_normal", "sklearn.metrics.pairwise.pairwise_distances" ] ]
magicicada/slim
[ "ce05d40f56f5263cb039973af3e187cffc1d00b4" ]
[ "slim/types/TreatmentTypes.py" ]
[ "from __future__ import annotations\n\nfrom abc import abstractmethod, ABC\nfrom decimal import Decimal\nfrom enum import Enum\nfrom typing import Dict, cast\n\nimport numpy as np\n\n# A few extra general types\nfrom slim.simulation.lice_population import LicePopulation, GenoDistrib, GenoTreatmentValue,\\\n Alleles, GenoTreatmentDistrib\n\nMoney = Decimal\n\n\nclass Treatment(Enum):\n \"\"\"\n A stub for treatment types\n TODO: add other treatments here\n \"\"\"\n EMB = 0\n THERMOLICER = 1\n\n\nclass GeneticMechanism(Enum):\n \"\"\"\n Genetic mechanism to be used when generating egg genotypes\n \"\"\"\n DISCRETE = 1\n MATERNAL = 2\n\n\nclass HeterozygousResistance(Enum):\n \"\"\"\n Resistance in a monogenic, heterozygous setting.\n \"\"\"\n DOMINANT = 1\n INCOMPLETELY_DOMINANT = 2\n RECESSIVE = 3\n\n\nTreatmentResistance = Dict[HeterozygousResistance, float]\n\n\nclass TreatmentParams(ABC):\n \"\"\"\n Abstract class for all the treatments\n \"\"\"\n name = \"\"\n\n def __init__(self, payload):\n self.quadratic_fish_mortality_coeffs = np.array(payload[\"quadratic_fish_mortality_coeffs\"])\n self.effect_delay: int = payload[\"effect_delay\"]\n self.application_period: int = payload[\"application_period\"]\n\n @staticmethod\n def parse_pheno_resistance(pheno_resistance_dict: dict) -> TreatmentResistance:\n return {HeterozygousResistance[key.upper()]: val for key, val in pheno_resistance_dict.items()}\n\n def __get_mortality_pp_increase(self, temperature: float, fish_mass: float) -> float:\n \"\"\"Get the mortality percentage point difference increase.\n\n :param temperature: the temperature in Celsius\n :param fish_mass: the fish mass (in grams)\n :returns: Mortality percentage point difference increase\n \"\"\"\n # TODO: is this the right way to solve this?\n fish_mass_indicator = 1 if fish_mass > 2000 else 0\n\n input = np.array([1, temperature, fish_mass_indicator, temperature ** 2, temperature * fish_mass_indicator,\n fish_mass_indicator ** 2])\n return max(float(self.quadratic_fish_mortality_coeffs.dot(input)), 0)\n\n @abstractmethod\n def delay(self, average_temperature: float): # pragma: no cover\n \"\"\"\n Delay before treatment should have a noticeable effect\n \"\"\"\n\n @staticmethod\n def get_allele_heterozygous_trait(alleles: Alleles):\n \"\"\"\n Get the allele heterozygous type\n \"\"\"\n # should we move this?\n if 'A' in alleles:\n if 'a' in alleles:\n trait = HeterozygousResistance.INCOMPLETELY_DOMINANT\n else:\n trait = HeterozygousResistance.DOMINANT\n else:\n trait = HeterozygousResistance.RECESSIVE\n return trait\n\n @abstractmethod\n def get_lice_treatment_mortality_rate(\n self, lice_population: LicePopulation, temperature: float) -> GenoTreatmentDistrib:\n \"\"\"\n Calculate the mortality rates of this treatment\n \"\"\"\n\n def get_fish_mortality_occurrences(\n self,\n temperature: float,\n fish_mass: float,\n num_fish: float,\n efficacy_window: float,\n mortality_events: int\n ):\n \"\"\"Get the number of fish that die due to treatment\n\n :param temperature: the temperature of the cage\n :param num_fish: the number of fish\n :param fish_mass: the average fish mass (in grams)\n :param efficacy_window: the length of the efficacy window\n :param mortality_events: the number of fish mortality events to subtract from\n \"\"\"\n predicted_pp_increase = self.__get_mortality_pp_increase(temperature, fish_mass)\n\n mortality_events_pp = 100 * mortality_events / num_fish\n predicted_deaths = ((predicted_pp_increase + mortality_events_pp) * num_fish / 100) \\\n - mortality_events\n predicted_deaths /= efficacy_window\n\n return predicted_deaths\n\n\nclass ChemicalTreatment(TreatmentParams):\n \"\"\"Trait for all chemical treatments\"\"\"\n def __init__(self, payload):\n super().__init__(payload)\n self.pheno_resistance = self.parse_pheno_resistance(payload[\"pheno_resistance\"])\n self.price_per_kg = Money(payload[\"price_per_kg\"])\n\n self.durability_temp_ratio: float = payload[\"durability_temp_ratio\"]\n\n\nclass ThermalTreatment(TreatmentParams):\n \"\"\"Trait for all thermal-based treatments\"\"\"\n def __init__(self, payload):\n super().__init__(payload)\n self.price_per_application = Money(payload[\"price_per_application\"])\n # NOTE: these are currently unused\n # self.exposure_temperature: float = payload[\"exposure_temperature\"]\n # self.exposure_length: float = payload[\"efficacy\"]\n\n\nclass EMB(ChemicalTreatment):\n \"\"\"Emamectin Benzoate\"\"\"\n name = \"EMB\"\n\n def delay(self, average_temperature: float):\n return self.durability_temp_ratio / average_temperature\n\n def get_lice_treatment_mortality_rate(self, lice_population: LicePopulation, _temperature=None):\n susceptible_populations = [lice_population.geno_by_lifestage[stage] for stage in\n LicePopulation.susceptible_stages]\n num_susc_per_geno = GenoDistrib.batch_sum(susceptible_populations)\n\n geno_treatment_distrib = {geno: GenoTreatmentValue(0.0, 0) for geno in num_susc_per_geno}\n\n for geno, num_susc in num_susc_per_geno.items():\n trait = self.get_allele_heterozygous_trait(geno)\n susceptibility_factor = 1.0 - self.pheno_resistance[trait]\n geno_treatment_distrib[geno] = GenoTreatmentValue(susceptibility_factor, cast(int, num_susc))\n\n return geno_treatment_distrib\n\n\nclass Thermolicer(ThermalTreatment):\n name = \"Thermolicer\"\n\n def delay(self, _):\n return 1 # effects noticeable the next day\n\n def get_lice_treatment_mortality_rate(\n self, lice_population: LicePopulation, temperature: float) -> GenoTreatmentDistrib:\n if temperature >= 12:\n efficacy = 0.8\n else:\n efficacy = 0.99\n\n susceptible_populations = [lice_population.geno_by_lifestage[stage] for stage in\n LicePopulation.susceptible_stages]\n num_susc_per_geno = cast(GenoDistrib, GenoDistrib.batch_sum(susceptible_populations))\n\n geno_treatment_distrib = {geno: GenoTreatmentValue(efficacy, cast(int, num_susc))\n for geno, num_susc in num_susc_per_geno.items()}\n return geno_treatment_distrib\n" ]
[ [ "numpy.array" ] ]
valgur/GIMP-ML-Hub
[ "11bdfcc894ac53543b2178e67eaf29bf9815049f" ]
[ "models/MiDaS.py" ]
[ "import sys\n\nimport numpy as np\nimport torch\nimport torch.hub\nfrom PIL import Image\nfrom torchvision.transforms import Compose\n\nfrom _model_base import ModelBase, handle_alpha\nfrom _util import apply_colormap, to_rgb\n\n\n# Simplified transforms from\n# https://github.com/intel-isl/MiDaS/blob/master/models/transforms.py\nclass Resize:\n def __init__(self, width, height, image_interpolation_method=Image.BICUBIC):\n self.__width = width\n self.__height = height\n self.__multiple_of = 32\n self.__image_interpolation_method = image_interpolation_method\n\n def constrain_to_multiple_of(self, x):\n return (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)\n\n def get_size(self, width, height):\n scale_height = self.__height / height\n scale_width = self.__width / width\n\n # scale such that output size is upper bound\n if scale_width < scale_height:\n # fit width\n scale_height = scale_width\n else:\n # fit height\n scale_width = scale_height\n\n new_height = self.constrain_to_multiple_of(scale_height * height)\n new_width = self.constrain_to_multiple_of(scale_width * width)\n return new_width, new_height\n\n def __call__(self, image):\n width, height = self.get_size(image.shape[1], image.shape[0])\n resized = Image.fromarray(image).resize((width, height), self.__image_interpolation_method)\n return np.array(resized)\n\n\nclass NormalizeImage:\n def __init__(self, mean, std):\n self.__mean = mean\n self.__std = std\n\n def __call__(self, image):\n return (image - self.__mean) / self.__std\n\n\nclass PrepareForNet:\n def __call__(self, image):\n image = np.transpose(image, (2, 0, 1))\n image = np.ascontiguousarray(image, dtype=np.float32)\n tensor = torch.from_numpy(image)\n return tensor.unsqueeze(0)\n\n\nclass MiDaS(ModelBase):\n def __init__(self):\n super().__init__()\n self.hub_repo = \"intel-isl/MiDaS\"\n\n def load_model(self):\n model = torch.hub.load(self.hub_repo, \"MiDaS\", pretrained=True)\n model.to(self.device)\n model.eval()\n return model\n\n @staticmethod\n def get_transform():\n return Compose([\n Resize(384, 384),\n lambda x: x / 255.,\n NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n PrepareForNet()\n ])\n\n @handle_alpha\n @torch.no_grad()\n def predict(self, input_image, colormap=None):\n h, w, d = input_image.shape\n assert d == 3, \"Input image must be RGB\"\n\n torch.backends.cudnn.enabled = True\n torch.backends.cudnn.benchmark = True\n\n transform = self.get_transform()\n image_tensor = transform(input_image).to(self.device)\n prediction = self.model.forward(image_tensor)\n prediction = torch.nn.functional.interpolate(\n prediction.unsqueeze(1),\n size=(h, w),\n mode=\"bicubic\",\n align_corners=False,\n )\n disp = prediction.squeeze().cpu().numpy()\n disp /= disp.max()\n\n if colormap:\n out = apply_colormap(disp, colormap)\n else:\n out = to_rgb(disp)\n return (out * 255).astype(np.uint8)\n\n\nmodel = MiDaS()\n\nif __name__ == '__main__':\n rpc_url = sys.argv[1]\n model.process_rpc(rpc_url)\n" ]
[ [ "numpy.array", "numpy.ascontiguousarray", "numpy.round", "torch.no_grad", "torch.from_numpy", "numpy.transpose", "torch.hub.load" ] ]
maxinye/AIC_Weather_Forecasting
[ "e04b9b3570d7377847b35f8ba67943988e24744d" ]
[ "eval/obs_and_M_split.py" ]
[ "import pandas as pd\nfrom datetime import datetime\nimport os\n\ndef datelist(beginDate, endDate):\n date_l=[datetime.strftime(x,'%Y-%m-%d') for x in list(pd.date_range(start=beginDate, end=endDate))]\n return date_l\nbegin_date='2018-10-28'\nend_date='2018-11-03'\ndates=datelist(begin_date,end_date)\nif not os.path.exists('obs'):\n os.mkdir('obs')\nif not os.path.exists('fore'):\n os.mkdir('fore')\n\nif __name__=='__main__':\n for date in dates:\n obs_and_M_filepath = 'obs_and_M/' + date + '.csv'\n obs_and_M = pd.read_csv(obs_and_M_filepath)\n print(obs_and_M.info())\n for col in obs_and_M.columns:\n obs_and_M[col] = obs_and_M[col].fillna(-9999)\n obs_and_M.round(3)\n obs_and_M['FORE_data'] = ' ' + obs_and_M['FORE_data']\n obs = pd.DataFrame(obs_and_M, columns=['FORE_data', 't2m_obs', 'rh2m_obs', 'w10m_obs'])\n obs.columns = [' OBS_data', ' t2m', ' rh2m', ' w10m']\n\n obs.to_csv('obs/' + date + '_1_obs.csv', index=False, float_format='%.03f')\n\n M = pd.DataFrame(obs_and_M, columns=['FORE_data', 't2m_M', 'rh2m_M', 'w10m_M'])\n M.columns = ['FORE_data', ' t2m', ' rh2m', ' w10m']\n M.to_csv('fore/' + date + '_1_M.csv', index=False, float_format='%.03f')\n" ]
[ [ "pandas.DataFrame", "pandas.read_csv", "pandas.date_range" ] ]
tahashmi/deepvariant
[ "84516dfacd1ed856a34507becb21848aa12e77a8" ]
[ "deeptrio/variant_caller_test.py" ]
[ "# Copyright 2017 Google LLC.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from this\n# software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\"\"\"Tests for deepvariant .variant_caller.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport sys\nif 'google' in sys.modules and 'google.protobuf' not in sys.modules:\n del sys.modules['google']\n\n\n\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport mock\nimport numpy as np\nimport numpy.testing as npt\n\nfrom deeptrio import testdata\nfrom deeptrio import variant_caller\nfrom deepvariant.protos import deepvariant_pb2\nfrom third_party.nucleus.util import variant_utils\nfrom third_party.nucleus.util import variantcall_utils\n\n\ndef setUpModule():\n testdata.init()\n\n\ndef _reference_model_options(p_error, max_gq, gq_resolution=1):\n return deepvariant_pb2.VariantCallerOptions(\n sample_name='UNKNOWN',\n p_error=p_error,\n max_gq=max_gq,\n gq_resolution=gq_resolution,\n ploidy=2)\n\n\nclass PlaceholderVariantCaller(variant_caller.VariantCaller):\n \"\"\"A placeholder VariantCaller.\n\n This class provides a get_candidates implementation and so allows\n the base class to be instantiated and its methods tested.\n \"\"\"\n\n def __init__(self,\n p_error,\n max_gq,\n gq_resolution=1,\n use_cache_table=False,\n max_cache_coverage=100):\n super(PlaceholderVariantCaller, self).__init__(\n options=_reference_model_options(p_error, max_gq, gq_resolution),\n use_cache_table=use_cache_table,\n max_cache_coverage=max_cache_coverage)\n\n def get_candidates(self, allele_counter, target_sample):\n return None\n\n\nclass VariantCallerTests(parameterized.TestCase):\n\n def fake_allele_counter(self, start_pos, counts):\n allele_counter = mock.Mock()\n # pylint: disable=g-complex-comprehension\n allele_counter.summary_counts.return_value = [\n deepvariant_pb2.AlleleCountSummary(\n ref_supporting_read_count=n_ref,\n total_read_count=n_ref + n_alt,\n ref_base=ref,\n reference_name='chr1',\n position=start_pos + i)\n for i, (n_alt, n_ref, ref) in enumerate(counts)\n ]\n allele_counter.counts.return_value = counts\n # pylint: enable=g-complex-comprehension\n return allele_counter\n\n # R code to produce the testdata expectation table.\n # expected <- function(n_ref, n_alt, perr, max_gq = 100) {\n # p_ref <- dbinom(n_alt, n_ref, perr)\n # p_het <- dbinom(n_alt, n_ref, 0.5)\n # p_alt <- dbinom(n_ref - n_alt, n_ref, perr)\n # raw <- c(p_ref, p_het, p_alt)\n # norm <- raw / sum(raw)\n # gq = min(floor(-10 * log10(1 - norm[1])), max_gq)\n # likelihoods = paste(sprintf(\"%.6f\", log10(norm)), collapse=\", \")\n # likelihoods = paste(\"[\", likelihoods, \"]\", sep=\"\")\n # result = paste(n_ref, n_alt, perr, 100, 1, likelihoods, gq, sep=\", \")\n # cat(paste(\"[\", result, \"],\\n\", sep=\"\"))\n # }\n #\n # for (n in c(10, 20)) {\n # for (k in seq(0, n)) {\n # expected(n, k, 0.01)\n # }\n # }\n #\n # for (perr in c(0.1, 0.01, 0.001, 0.0001)) {\n # expected(10, 0, perr)\n # expected(10, 1, perr)\n # }\n #\n # for (n_ref in c(10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 1000, 10000)) {\n # expected(n_ref, 0, 0.01)\n # }\n @parameterized.parameters(\n # No coverage case.\n [0, 0, 0.01, 100, [-0.477121, -0.477121, -0.477121], 1],\n # Test systematically values of n and k.\n [10, 0, 0.01, 100, [-0.000469, -2.967121, -19.956821], 29],\n [10, 1, 0.01, 100, [-0.044109, -1.015126, -16.009190], 10],\n [10, 2, 0.01, 100, [-1.063830, -0.039211, -13.037641], 0],\n [10, 3, 0.01, 100, [-3.020668, -0.000414, -11.003209], 0],\n [10, 4, 0.01, 100, [-5.015893, -0.000004, -9.007163], 0],\n [10, 5, 0.01, 100, [-7.011524, -0.000000, -7.011524], 0],\n [10, 6, 0.01, 100, [-9.007163, -0.000004, -5.015893], 0],\n [10, 7, 0.01, 100, [-11.003209, -0.000414, -3.020668], 0],\n [10, 8, 0.01, 100, [-13.037641, -0.039211, -1.063830], 0],\n [10, 9, 0.01, 100, [-16.009190, -1.015126, -0.044109], 0],\n [10, 10, 0.01, 100, [-19.956821, -2.967121, -0.000469], 0],\n [20, 0, 0.01, 100, [-0.000001, -5.933304, -39.912704], 59],\n [20, 1, 0.01, 100, [-0.000050, -3.937719, -35.921484], 39],\n [20, 2, 0.01, 100, [-0.004935, -1.946968, -31.935098], 19],\n [20, 3, 0.01, 100, [-0.328657, -0.275056, -28.267550], 2],\n [20, 4, 0.01, 100, [-2.053097, -0.003860, -26.000720], 0],\n [20, 5, 0.01, 100, [-4.044911, -0.000039, -24.001263], 0],\n [20, 6, 0.01, 100, [-6.040508, -0.000000, -22.005589], 0],\n [20, 7, 0.01, 100, [-8.036143, -0.000000, -20.009954], 0],\n [20, 8, 0.01, 100, [-10.031778, -0.000000, -18.014319], 0],\n [20, 9, 0.01, 100, [-12.027413, -0.000000, -16.018683], 0],\n [20, 10, 0.01, 100, [-14.023048, -0.000000, -14.023048], 0],\n [20, 11, 0.01, 100, [-16.018683, -0.000000, -12.027413], 0],\n [20, 12, 0.01, 100, [-18.014319, -0.000000, -10.031778], 0],\n [20, 13, 0.01, 100, [-20.009954, -0.000000, -8.036143], 0],\n [20, 14, 0.01, 100, [-22.005589, -0.000000, -6.040508], 0],\n [20, 15, 0.01, 100, [-24.001263, -0.000039, -4.044911], 0],\n [20, 16, 0.01, 100, [-26.000720, -0.003860, -2.053097], 0],\n [20, 17, 0.01, 100, [-28.267550, -0.275056, -0.328657], 0],\n [20, 18, 0.01, 100, [-31.935098, -1.946968, -0.004935], 0],\n [20, 19, 0.01, 100, [-35.921484, -3.937719, -0.000050], 0],\n [20, 20, 0.01, 100, [-39.912704, -5.933304, -0.000001], 0],\n # Testing different values of p_error.\n [10, 0, 0.1, 100, [-0.001215, -2.553940, -9.543640], 25],\n [10, 1, 0.1, 100, [-0.010811, -1.609294, -7.644752], 16],\n [10, 0, 0.01, 100, [-0.000469, -2.967121, -19.956821], 29],\n [10, 1, 0.01, 100, [-0.044109, -1.015126, -16.009190], 10],\n [10, 0, 0.001, 100, [-0.000428, -3.006383, -29.996083], 30],\n [10, 1, 0.001, 100, [-0.297847, -0.304236, -24.294371], 3],\n [10, 0, 1e-04, 100, [-0.000424, -3.010290, -39.999990], 30],\n [10, 1, 1e-04, 100, [-1.032394, -0.042303, -33.032046], 0],\n # Test scaling of calculation with more coverage, hitting max_gq.\n [10, 0, 0.01, 100, [-0.000469, -2.967121, -19.956821], 29],\n [20, 0, 0.01, 100, [-0.000001, -5.933304, -39.912704], 59],\n [30, 0, 0.01, 100, [-0.000000, -8.899956, -59.869056], 88],\n [40, 0, 0.01, 100, [-0.000000, -11.866608, -79.825408], 100],\n [50, 0, 0.01, 100, [-0.000000, -14.833260, -99.781760], 100],\n [60, 0, 0.01, 100, [0.000000, -17.799911, -119.738112], 100],\n [70, 0, 0.01, 100, [0.000000, -20.766563, -139.694464], 100],\n [80, 0, 0.01, 100, [0.000000, -23.733215, -159.650816], 100],\n [90, 0, 0.01, 100, [0.000000, -26.699867, -179.607168], 100],\n [100, 0, 0.01, 100, [0.000000, -29.666519, -199.563519], 100],\n )\n def test_ref_calc(self, total_n, alt_n, p_error, max_gq, expected_likelihoods,\n expected_gq):\n caller = PlaceholderVariantCaller(p_error, max_gq)\n gq, likelihoods = caller.reference_confidence(total_n - alt_n, total_n)\n npt.assert_allclose(expected_likelihoods, likelihoods, atol=1e-6)\n self.assertEqual(expected_gq, gq)\n\n @parameterized.parameters(\n # Values below max_allowed_reads are returned without modification.\n [0, 10, 100, (0, 10)],\n [5, 10, 100, (5, 10)],\n [10, 10, 100, (10, 10)],\n [10, 100, 100, (10, 100)],\n [100, 100, 100, (100, 100)],\n\n # Checks that the rescaling works when n_total_reads > max_allowed.\n [0, 200, 100, (0, 100)],\n [0, 200, 100, (0, 100)],\n [0, 1000, 100, (0, 100)],\n [0, 10000, 100, (0, 100)],\n [1, 200, 100, (1, 100)],\n [1, 1000, 100, (1, 100)],\n [1, 10000, 100, (1, 100)],\n [1, 100000, 100, (1, 100)],\n [2, 200, 100, (1, 100)],\n [3, 200, 100, (2, 100)],\n [4, 200, 100, (2, 100)],\n [10, 200, 100, (5, 100)],\n [50, 200, 100, (25, 100)],\n [100, 200, 100, (50, 100)],\n [200, 200, 100, (100, 100)],\n # I saw a bug at runtime, and the testcase makes sure we scale values of\n # n_ref_reads close to n_total_reads appropriately.\n [99, 100, 100, (99, 100)],\n )\n def test_rescale_read_counts(self, n_ref, n_total, max_allowed_reads,\n expected):\n actual = variant_caller._rescale_read_counts_if_necessary(\n n_ref, n_total, max_allowed_reads)\n self.assertEqual(actual, expected)\n\n # pylint: disable=g-complex-comprehension\n @parameterized.parameters((n_ref, n_alt_fraction)\n for n_ref in [1000, 10000, 100000, 1000000]\n for n_alt_fraction in [0.0, 0.01, 0.02])\n # pylint: enable=g-complex-comprehension\n def test_handles_large_reference_counts(self, n_ref, n_alt_fraction):\n \"\"\"Tests that we don't blow up when the coverage gets really high.\"\"\"\n caller = PlaceholderVariantCaller(0.01, 100)\n n_alt = int(n_alt_fraction * n_ref)\n gq, likelihoods = caller._calc_reference_confidence(n_ref, n_ref + n_alt)\n self.assertTrue(\n np.isfinite(likelihoods).all(),\n 'Non-finite likelihoods {}'.format(likelihoods))\n self.assertEqual(100, gq)\n\n @parameterized.parameters(*variant_caller.CANONICAL_DNA_BASES)\n def test_gvcf_basic(self, ref):\n options = _reference_model_options(0.01, 100)\n caller = PlaceholderVariantCaller(0.01, 100)\n allele_counter = self.fake_allele_counter(100, [(0, 0, ref)])\n gvcfs = list(caller.make_gvcfs(allele_counter.summary_counts()))\n self.assertLen(gvcfs, 1)\n self.assertGVCF(\n gvcfs[0],\n ref=ref,\n gq=1.0,\n start=100,\n end=101,\n min_dp=0,\n chrom='chr1',\n gls=[-0.47712125472] * 3,\n sample_name=options.sample_name)\n\n @parameterized.parameters('N', 'R', 'W', 'B')\n def test_gvcf_basic_skips_iupac_ref_base(self, ref):\n caller = PlaceholderVariantCaller(0.01, 100)\n allele_counter = self.fake_allele_counter(100, [(0, 0, ref)])\n self.assertEmpty(list(caller.make_gvcfs(allele_counter.summary_counts())))\n\n @parameterized.parameters('X', '>', '!')\n def test_gvcf_basic_raises_with_bad_ref_base(self, ref):\n caller = PlaceholderVariantCaller(0.01, 100)\n allele_counter = self.fake_allele_counter(100, [(0, 0, ref)])\n with self.assertRaisesRegexp(ValueError,\n 'Invalid reference base={}'.format(ref)):\n list(caller.make_gvcfs(allele_counter.summary_counts()))\n\n def assertGVCF(self,\n gvcf,\n ref,\n gq,\n start,\n end,\n min_dp,\n chrom='chr1',\n gls=None,\n sample_name=None,\n gts=None):\n if chrom:\n self.assertEqual(gvcf.reference_name, chrom)\n call = variant_utils.only_call(gvcf)\n self.assertNotEmpty(gvcf.reference_name)\n self.assertEqual(gvcf.reference_bases, ref)\n self.assertEqual(gvcf.alternate_bases, ['<*>'])\n self.assertEqual(gvcf.start, start)\n self.assertEqual(gvcf.end, end if end else start + 1)\n self.assertEqual(variantcall_utils.get_gq(call), gq)\n self.assertNotEmpty(call.genotype_likelihood)\n self.assertIn('MIN_DP', call.info)\n self.assertLen(call.info['MIN_DP'].values, 1)\n self.assertEqual(variantcall_utils.get_min_dp(call), min_dp)\n if gls is not None:\n npt.assert_allclose(list(gvcf.calls[0].genotype_likelihood), gls)\n if sample_name:\n self.assertEqual(gvcf.calls[0].call_set_name, sample_name)\n if gts is not None:\n self.assertEqual(list(gvcf.calls[0].genotype), gts)\n\n @parameterized.parameters(\n # Check some basics.\n ([(0, 0, 'A')], [dict(start=1, end=2, ref='A', gq=1, min_dp=0)]),\n # Two equal records are merged, and the reference base is the first one.\n ([(0, 0, 'A'),\n (0, 0, 'C')], [dict(start=1, end=3, ref='A', gq=1, min_dp=0)]),\n ([(0, 0, 'C'),\n (0, 0, 'A')], [dict(start=1, end=3, ref='C', gq=1, min_dp=0)]),\n # Three equal records are merged into a single block.\n ([(0, 0, 'A'), (0, 0, 'C'),\n (0, 0, 'T')], [dict(start=1, end=4, ref='A', gq=1, min_dp=0)]),\n # We don't merge together different GQ value blocks:\n ([(0, 0, 'A'), (0, 100, 'C')], [\n dict(start=1, end=2, ref='A', gq=1, min_dp=0),\n dict(start=2, end=3, ref='C', gq=100, min_dp=100),\n ]),\n ([(0, 100, 'A'), (0, 0, 'C')], [\n dict(start=1, end=2, ref='A', gq=100, min_dp=100),\n dict(start=2, end=3, ref='C', gq=1, min_dp=0),\n ]),\n ([(0, 0, 'A'), (0, 20, 'C'), (0, 100, 'T')], [\n dict(start=1, end=2, ref='A', gq=1, min_dp=0),\n dict(start=2, end=3, ref='C', gq=59, min_dp=20),\n dict(start=3, end=4, ref='T', gq=100, min_dp=100),\n ]),\n )\n def test_make_gvcfs(self, counts, expecteds):\n allele_counts = self.fake_allele_counter(1, counts).summary_counts()\n caller = PlaceholderVariantCaller(0.01, 100)\n gvcfs = list(caller.make_gvcfs(allele_counts))\n\n self.assertLen(gvcfs, len(expecteds))\n for actual, expected in zip(gvcfs, expecteds):\n self.assertGVCF(actual, **expected)\n\n @parameterized.parameters(\n dict(\n gq_resolution=1,\n expecteds=[\n dict(start=1, end=2, ref='A', gq=53, min_dp=18),\n dict(start=2, end=3, ref='C', gq=56, min_dp=19),\n dict(start=3, end=4, ref='A', gq=0, min_dp=35),\n dict(start=4, end=5, ref='T', gq=0, min_dp=20),\n dict(start=5, end=6, ref='A', gq=0, min_dp=16),\n dict(start=6, end=7, ref='A', gq=72, min_dp=31),\n dict(start=7, end=8, ref='C', gq=83, min_dp=35),\n dict(start=8, end=9, ref='T', gq=59, min_dp=20),\n dict(start=9, end=10, ref='G', gq=56, min_dp=19),\n ]),\n # Binning by 3 does not cause any records to be merged.\n dict(\n gq_resolution=3,\n expecteds=[\n dict(start=1, end=2, ref='A', gq=53, min_dp=18),\n dict(start=2, end=3, ref='C', gq=56, min_dp=19),\n dict(start=3, end=4, ref='A', gq=0, min_dp=35),\n dict(start=4, end=5, ref='T', gq=0, min_dp=20),\n dict(start=5, end=6, ref='A', gq=0, min_dp=16),\n dict(start=6, end=7, ref='A', gq=72, min_dp=31),\n dict(start=7, end=8, ref='C', gq=83, min_dp=35),\n dict(start=8, end=9, ref='T', gq=59, min_dp=20),\n dict(start=9, end=10, ref='G', gq=56, min_dp=19),\n ]),\n # Binning by 4 causes the first merge, of the first two records.\n dict(\n gq_resolution=4,\n expecteds=[\n dict(start=1, end=3, ref='A', gq=53, min_dp=18),\n dict(start=3, end=4, ref='A', gq=0, min_dp=35),\n dict(start=4, end=5, ref='T', gq=0, min_dp=20),\n dict(start=5, end=6, ref='A', gq=0, min_dp=16),\n dict(start=6, end=7, ref='A', gq=72, min_dp=31),\n dict(start=7, end=8, ref='C', gq=83, min_dp=35),\n dict(start=8, end=9, ref='T', gq=59, min_dp=20),\n dict(start=9, end=10, ref='G', gq=56, min_dp=19),\n ]),\n dict(\n gq_resolution=10,\n expecteds=[\n dict(start=1, end=3, ref='A', gq=53, min_dp=18),\n dict(start=3, end=4, ref='A', gq=0, min_dp=35),\n dict(start=4, end=5, ref='T', gq=0, min_dp=20),\n dict(start=5, end=6, ref='A', gq=0, min_dp=16),\n dict(start=6, end=7, ref='A', gq=72, min_dp=31),\n dict(start=7, end=8, ref='C', gq=83, min_dp=35),\n dict(start=8, end=10, ref='T', gq=56, min_dp=19),\n ]),\n dict(\n gq_resolution=45,\n expecteds=[\n dict(start=1, end=3, ref='A', gq=53, min_dp=18),\n dict(start=3, end=4, ref='A', gq=0, min_dp=35),\n dict(start=4, end=5, ref='T', gq=0, min_dp=20),\n dict(start=5, end=6, ref='A', gq=0, min_dp=16),\n dict(start=6, end=10, ref='A', gq=56, min_dp=19),\n ]),\n )\n def test_quantize_gvcfs(self, gq_resolution, expecteds):\n # Each count tuple is n_alt, n_ref, ref_base.\n # The third, fourth, and the fifth ones should never be merged, since\n # either het or hom_alt has bigger GL than hom_ref.\n counts = [(0, 18, 'A'), (0, 19, 'C'), (35, 0, 'A'), (10, 10, 'T'),\n (4, 12, 'A'), (1, 30, 'A'), (1, 34, 'C'), (0, 20, 'T'),\n (0, 19, 'G')]\n allele_counts = self.fake_allele_counter(1, counts).summary_counts()\n caller = PlaceholderVariantCaller(0.01, 100, gq_resolution)\n gvcfs = list(caller.make_gvcfs(allele_counts))\n self.assertLen(gvcfs, len(expecteds))\n for actual, expected in zip(gvcfs, expecteds):\n self.assertGVCF(actual, **expected)\n\n @parameterized.parameters(True, False)\n def test_gvcfs_counts(self, include_gvcfs):\n # Only tests the 'gvcfs' creation part of calls_and_gvcfs. The `calls`\n # portion of this method needs to be tested in subclasses, which have\n # implemented the get_candidates method.\n counts = [(0, 0, 'A'), (10, 10, 'G'), (0, 0, 'G'), (0, 0, 'G'),\n (10, 10, 'T')]\n caller = PlaceholderVariantCaller(0.01, 100)\n allele_counter = self.fake_allele_counter(10, counts)\n allele_counters = {}\n allele_counters['sample_id'] = allele_counter\n _, gvcfs = caller.calls_and_gvcfs(allele_counters, include_gvcfs,\n 'sample_id')\n # We expect our gvcfs to occur at the 10 position and that 12 and 13 have\n # been merged into a 2 bp block, if enabled. Otherwise should be empty.\n if include_gvcfs:\n self.assertLen(gvcfs, 4)\n # Expected diploid genotype likelihoods when there's no coverage. The\n # chance of having each genotype is 1/3, in log10 space.\n flat_gls = np.log10([1.0 / 3] * 3)\n self.assertGVCF(\n gvcfs[0], ref='A', start=10, end=11, gq=1, min_dp=0, gls=flat_gls)\n self.assertGVCF(\n gvcfs[1],\n ref='G',\n start=11,\n end=12,\n gq=0,\n min_dp=20,\n gls=np.array([-14.0230482368, -7.993606e-15, -14.0230482368]),\n # The genotype should NOT be called here (\"./.\") as the likelihood\n # for het is greater than hom_ref.\n gts=[-1, -1])\n self.assertGVCF(\n gvcfs[2], ref='G', start=12, end=14, gq=1, min_dp=0, gls=flat_gls)\n else:\n self.assertEmpty(gvcfs)\n\n\n_CACHE_COVERAGE = 20 # Outside class so we can refer to it in @Parameters.\n\n\nclass VariantCallerCacheTests(parameterized.TestCase):\n\n @classmethod\n def setUpClass(cls):\n super(VariantCallerCacheTests, cls).setUpClass()\n cls.raw_caller = PlaceholderVariantCaller(0.1, 50, use_cache_table=False)\n cls.cache_caller = PlaceholderVariantCaller(\n 0.1, 50, use_cache_table=True, max_cache_coverage=_CACHE_COVERAGE)\n\n # pylint: disable=g-complex-comprehension\n @parameterized.parameters((n_alt, n_total)\n for n_total in range(_CACHE_COVERAGE + 1)\n for n_alt in range(n_total + 1))\n # pylint: enable=g-complex-comprehension\n def test_caching(self, n_alt, n_total):\n # Note that we only expect the gq and gls to be close if we are not\n # rescaling the counts, so we are only looping over values that should be\n # cached. In practice the cache is set to values sufficiently large that\n # these differences don't matter, but for this test we are limiting the\n # cache size to a small value in _CACHE_COVERAGE so we can test that the\n # cache lookups are correct.\n raw_gq, raw_gls = self.raw_caller.reference_confidence(n_alt, n_total)\n cache_gq, cache_gls = self.cache_caller.reference_confidence(n_alt, n_total)\n self.assertEqual(raw_gq, cache_gq)\n npt.assert_allclose(raw_gls, cache_gls)\n\n\nif __name__ == '__main__':\n absltest.main()\n" ]
[ [ "numpy.testing.assert_allclose", "numpy.log10", "numpy.array", "numpy.isfinite" ] ]
surfaceanalytics/inelasticscattering
[ "da549dde788a55084c565bbc5f89ebf9cbae4263" ]
[ "model/algorithms/legacy/angular_spread_lorentzian.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 12 16:39:59 2020\n\n@author: nicholls\n\"\"\"\n\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#%%\n\nclass AngularSpreadCalc():\n \"\"\" class for calculating how angular spread changes with iterations:\n Inputs:\n iterations: maxinum number of iterations to calculate for (e.g. 500)\n acceptance angle: acceptance angle of analyser\n energy: initial energy of scattered electrons (eV)\n \"\"\"\n\n def __init__(self, iterations, acceptance_angle, energy, width=None):\n self.iterations = iterations\n self.acceptance_angle = acceptance_angle\n self.energy = energy\n self.width = width\n\n def gen_lorentzian_cross_section(self):\n\n self.cross_section_x = np.arange(-90, 90, 1)\n y = [self._lorentzian_cross_section(x, self.width) for x in self.cross_section_x] \n\n self.cross_section_y = y\n return self.cross_section_y\n\n def _lorentzian_cross_section(self, x, width):\n position = 0\n intensity = 1\n \n l = intensity * 1 / (1 + ((position-x)/(width/2))**2)\n \n return l\n\n def plot_cross_section(self):\n \"\"\" Plot the raw imported nist data \"\"\"\n plt.plot(self.cross_section_x, self.cross_section_y)\n plt.title('Cross Section')\n plt.xlabel('Angle')\n plt.show()\n\n\n def load_nist_cross_section(self, filename):\n \"\"\" Load nist data file of differential elastic scattering profile.\n Input:\n filename: filename of csv data from nist database\n Returns:\n cross_section_y: given cross section in range -90 to 90 deg \"\"\"\n\n filepath = (os.path.dirname(os.path.abspath(__file__)).partition('controller')[0]\n + '\\\\data\\\\NIST cross sections\\\\' + filename)\n\n data = np.genfromtxt(filepath, skip_header=10, delimiter=',')\n self.cross_section_y = self._convert_nist_data(data)\n\n self.cross_section_x = np.arange(-90, 90, 1)\n return self.cross_section_y\n\n\n def plot_nist(self):\n \"\"\" Plot the raw imported nist data \"\"\"\n plt.plot(self.cross_section_x, self.cross_section_y)\n plt.title('NIST Data')\n plt.xlabel('Angle')\n plt.show()\n\n\n def run_convolution(self):\n \"\"\" Run convolution between the nist cross section and a sine curve\n representing initial scattering distribution.\n Returns:\n centered_data: angular distribution spread after each scattering\n event\n \"\"\"\n # normalise cross section by area under curve\n self.cross_section_y_norm = self.cross_section_y / np.sum(self.cross_section_y)\n # generate initial distribution of electron scatter:\n self.emitted_elctn_y = self._gen_electron_dist()\n self.emitted_elctn_x = np.arange(-90, 90, 1)\n # run convolution\n convolved_data = self._convolution(self.cross_section_y_norm,\n self.emitted_elctn_y,\n self.iterations)\n # center data and remove excess data (i.e. outside -90 to 90 range)\n self.centered_data = self._centre_data(convolved_data)\n\n return self.centered_data\n\n def plot_convolution_results(self):\n \"\"\" Plot convolution result to show angular distribution spread after\n each scattering event.\"\"\"\n # plotting selected iterations:\n for n in [0, 1, 2, 5, 10, 20, 50]:\n plt.plot(self.emitted_elctn_x, self.centered_data[n], label=str(n))\n plt.xticks([-90, -60, -30, 0, 30, 60, 90])\n plt.xlabel('theta (degrees)')\n plt.ylabel('Intensity (a.u.)')\n plt.title('Angular distribution per scattering event')\n plt.legend(title='No. of iterations', loc='center left',\n bbox_to_anchor=(1, 0.5))\n #plt.savefig('Convolution.png', dpi=600, bbox_inches='tight')\n plt.show()\n\n def limit_by_acceptance_angle(self):\n \"\"\" Limit the data to the acceptance angle of the analyser \"\"\"\n # to set acceptance angle\n self.angle_limited = self._limit_by_constant_angle(self.centered_data,\n self.acceptance_angle)\n #return self.angle_limited\n\n def plot_angle_limited(self):\n \"\"\" Plot the convolution results only in the accepted angle range\"\"\"\n # to plot angle limited data\n for n in [0, 1, 2, 5, 10, 20, 50]:\n plt.plot(self.emitted_elctn_x, self.angle_limited[n], label=str(n))\n plt.xticks([-90, -60, -30, 0, 30, 60, 90])\n plt.xlabel('theta (degrees)')\n plt.ylabel('Intensity (a.u.)')\n plt.title('Intensity distribution after scattering event')\n plt.legend(title='No. of iterations', loc='center left', bbox_to_anchor=(1, 0.5))\n #plt.savefig('angle convolution.png', dpi=600, bbox_inches='tight')\n plt.show()\n\n def calc_area_under_curve(self):\n \"\"\" Calculate area under each curve within acceptance angle,\n represents intensity that the detector sees\"\"\"\n sin = np.absolute(np.sin(np.arange(-90, 90, 1) * np.pi / 180))\n angle_integrated = self.angle_limited * sin * np.pi\n self.area_sum = np.sum(angle_integrated, axis=1)\n self.area_sum = self.area_sum / self.area_sum[0]\n return self.area_sum\n\n def plot_area_under_curve(self):\n \"\"\" Plot area under curve per scattering event / iteration \"\"\"\n plt.plot(self.area_sum)\n plt.title('area under curve \\n '\n '(Energy: ' + str(self.energy) + ', Acceptance Angle: ' +\n str(self.acceptance_angle) + ')')\n plt.xlabel('No. of iterations')\n plt.ylabel('Intensity a.u.')\n plt.show()\n\n def calc_area_ratio(self):\n \"\"\" Calculate the change in area ratio between iteration n and n-1\"\"\"\n # Change in ratio\n self.area_ratio_list = self._area_ratio_change(self.area_sum)\n return self.area_ratio_list\n\n def plot_area_ratio(self):\n \"\"\" Plot the change in area ratio per iteration \"\"\"\n # to plot\n plt.plot(self.area_ratio_list)\n plt.title('Intensity ratio change per iteration \\n '\n '(Energy: ' + str(self.energy) + ' eV, Acceptance Angle: '\n + str(self.acceptance_angle) + ')')\n plt.xlabel('Iterations')\n plt.ylabel('Area Ratio between iterations')\n #plt.savefig('Ratio change per iteration.png', dpi=600)\n plt.show()\n\n def _convert_nist_data(self, dataset):\n data = [n for n in dataset[:, 1]]\n data.reverse()\n data.extend([n for n in dataset[:, 1]][1:])\n data = data[90:270]\n return data\n\n def _gen_electron_dist(self):\n # x values\n self.emitted_elctn_x = np.arange(-90, 90, 1)\n # calculate y by cosine distribution\n self.emitted_elctn_y = np.array([(np.cos(np.pi * i / 180))\n for i in self.emitted_elctn_x])\n # normalise by area under the curve\n self.emitted_elctn_y = self.emitted_elctn_y / np.sum(self.emitted_elctn_y)\n\n return self.emitted_elctn_y\n\n def _convolution(self, cross_section, scatter, n):\n # empty list to contain arrays of the scattered electrons\n scattered_events = []\n # add the first entry for unscattered:\n scattered_events.append(scatter)\n # convolution n number of times:\n for i in range(n):\n # convolve cross section with last scattered\n z = np.convolve(cross_section, scattered_events[i])\n # add scattered to list\n scattered_events.append(z)\n\n return scattered_events\n\n\n def _centre_data(self, scattered_data_list):\n\n data_cropped = []\n for indx, scattering_event in enumerate(scattered_data_list):\n\n centre = (indx+1) * 90\n x_range_min = centre-90\n x_range_max = centre+90\n\n data = scattering_event[x_range_min : x_range_max]\n\n data_cropped.append(data)\n\n return data_cropped\n\n def _limit_by_constant_angle(self, scattered_data_list, acceptance_angle):\n\n angle = acceptance_angle/2\n\n min_acceptance = 0 - angle\n max_acceptance = 0 + angle\n\n x_range = np.arange(-90, 90, 1)\n min_index_list = np.where(x_range < min_acceptance)\n max_index_list = np.where(x_range > max_acceptance)\n\n for indx, scatter in enumerate(scattered_data_list):\n\n scatter[min_index_list] = 0\n scatter[max_index_list] = 0\n\n return scattered_data_list\n\n def _area_ratio_change(self, area_sum_list):\n ratio_list = []\n for n in range(len(area_sum_list)):\n\n if n != 0:\n ratio = area_sum_list[n]/area_sum_list[n-1]\n\n ratio_list.append(ratio)\n return ratio_list\n" ]
[ [ "matplotlib.pyplot.xlabel", "numpy.sum", "matplotlib.pyplot.plot", "matplotlib.pyplot.title", "numpy.genfromtxt", "matplotlib.pyplot.legend", "numpy.where", "numpy.arange", "matplotlib.pyplot.ylabel", "numpy.cos", "matplotlib.pyplot.show", "matplotlib.pyplot.xticks", "numpy.convolve" ] ]
omrinewman/maritime-whale
[ "ff983bc42572676c806be0d5fac6767463b22df5" ]
[ "src/process_maritime_data.py" ]
[ "# Copyright 2020 The Maritime Whale Authors. All rights reserved.\n# Use of this source code is governed by an MIT-style license that can be\n# found in the LICENSE.txt file.\n#\n# Processes wind and vessel data. Performs simple analysis.\n\nfrom match_wind_data import *\nfrom datetime import *\nfrom meet_and_pass import *\n\nimport pandas as pd\nimport math\nimport sys\n\n# TODO: need to generalize this to apply to any port desired; will need to\n# do the same for main, run, plot, etc\n\n# vessel (AIS) types that should be automatically purged from analysis\n# see details at https://api.vesselfinder.com/docs/ref-aistypes.html\nAUTO_BLACKLIST = [30, 31, 32, 33, 34, 35, 36, 37, 51, 52, 53, 55, 57, 58, 59]\nSUB_PANAMAX = 656 # threshold in feet\nM_TO_FT = 3.28 # meters to feet (conversion)\n\ndef _sanitize_vmr(df):\n \"\"\"Filters entries with '511' error, impossibly high speed, abnormally\n high vessel width, as well as singletons (only one entry) from vessel\n movement DataFrame.\n\n Args:\n df: Vessel movement DataFrame.\n\n Returns:\n Sanitized vessel movement report DataFrame.\n \"\"\"\n df = df.loc[~df.index.isin(df[df.loc[:, \"Beam ft\"] >= 500].index), :]\n df = df.loc[~df.index.isin(df[df.loc[:, \"Course\"] == 511].index), :]\n df = df.loc[~df.index.isin(df[df.loc[:, \"Heading\"] == 511].index), :]\n df = df.loc[~df.index.isin(df[df.loc[:, \"VSPD kn\"] >= 40].index), :]\n singleton = (df.loc[:, \"MMSI\"].value_counts() == 1)\n single_mmsi = df.loc[:, \"MMSI\"].value_counts()[singleton].index.values\n df = df.loc[~df.loc[:, \"MMSI\"].isin(single_mmsi), :]\n return df\n\ndef _wrangle_vmr(df, rename):\n \"\"\"Rounds, renames, and sanitizes vessel movment DataFrame. Creates new\n columns.\n\n Args:\n df: Vessel movement DataFrame.\n\n Returns:\n Cleaned vessel movement report DataFrame.\n \"\"\"\n df.rename(rename, axis=1, inplace=True)\n df.loc[:, \"LOA ft\"] = (df.loc[:, \"A\"] + df.loc[:, \"B\"]) * M_TO_FT\n df.loc[:, \"LOA ft\"] = df.loc[:, \"LOA ft\"].round(0)\n df.loc[:, \"Beam ft\"] = (df.loc[:, \"C\"] + df.loc[:, \"D\"]) * M_TO_FT\n df.loc[:, \"Beam ft\"] = df.loc[:, \"Beam ft\"].round(0)\n df.loc[:, \"Latitude\"] = df.loc[:, \"Latitude\"].round(5)\n df.loc[:, \"Longitude\"] = df.loc[:, \"Longitude\"].round(5)\n df = _sanitize_vmr(df)\n # filter out sub-panamax class vessels\n df = df.loc[df.loc[:, \"LOA ft\"] >= SUB_PANAMAX, :]\n df.loc[:, \"Date/Time UTC\"] = df.loc[:, \"Date/Time UTC\"].str.strip(\"UTC\")\n df.loc[:, \"Date/Time UTC\"] = pd.to_datetime(df.loc[:, \"Date/Time UTC\"])\n df = df.loc[:, ([\"Date/Time UTC\", \"Name\", \"MMSI\", \"LOA ft\", \"Latitude\",\n \"Longitude\", \"Course\", \"AIS Type\", \"Heading\", \"VSPD kn\",\n \"Beam ft\"])]\n return df\n\ndef _filter_blacklisters(df, blacklist):\n \"\"\"Checks vessel AIS types and ommits blacklisted vessel types from the\n filtered data. Appends ommitted vessels' MMSI's to blacklist.txt.\n\n Args:\n df: Vessel movement DataFrame.\n\n Returns:\n Filtered vessel movement DataFrame.\n \"\"\"\n df = df.loc[~df.loc[:, \"MMSI\"].isin(blacklist), :]\n new_blacklisters = []\n for j in range(df.shape[0]):\n if df.iloc[j][\"AIS Type\"] in AUTO_BLACKLIST:\n new_blacklisters.append(df.iloc[j][\"MMSI\"])\n with open(\"../cache/blacklist.txt\", \"a\") as f:\n contents = [str(mmsi) for mmsi in new_blacklisters]\n if contents:\n f.write(\"\\n\".join(contents) + \"\\n\")\n df = df.loc[~df.loc[:, \"MMSI\"].isin(new_blacklisters), :]\n return df\n\ndef _fold_vmr(ports, i):\n \"\"\"Reduces movement report to a DataFrame with a single entry for each\n vessel at the point of it's maximum speed in the channel. Includes a column\n with the vessel's mean speed.\n \"\"\"\n mean = pd.DataFrame(ports[i].groupby([\"Name\", \"MMSI\"])[\"VSPD kn\"]\n .mean()).rename({\"VSPD kn\": \"Mean Speed kn\"}, axis=1).round(1)\n maxes = pd.DataFrame(ports[i].groupby([\"Name\", \"MMSI\"])[\"VSPD kn\"]\n .max()).rename({\"VSPD kn\": \"Max Speed kn\"}, axis=1)\n merged_speeds = maxes.merge(mean, on=[\"Name\", \"MMSI\"])\n max_dict = merged_speeds.loc[:, \"Max Speed kn\"].to_dict()\n columns = {\"Longitude\":[], \"Latitude\":[], \"Date/Time UTC\":[],\n \"LOA ft\":[], \"Course\":[], \"AIS Type\":[], \"WSPD mph\":[],\n \"GST mph\":[], \"WDIR degT\":[], \"Buoy Source\":[], \"Beam ft\":[],\n \"Heading\":[], \"Course Behavior\":[], \"Effective Beam ft\":[],\n \"Class\":[], \"Location\":[], \"Yaw deg\":[], \"Transit\":[],\n \"% Channel Occupied\":[]}\n # grab remaining data based on max speed position\n for key, value in max_dict.items():\n for k in columns.keys():\n columns[k].append(ports[i][(ports[i].loc[:, \"Name\"] == key[0]) &\n (ports[i].loc[:, \"VSPD kn\"] == value)][k].iloc[0])\n for key in columns.keys():\n merged_speeds[key] = columns[key]\n merged_speeds = merged_speeds.reset_index()\n fold_res = merged_speeds\n fold_res.sort_values(\"Max Speed kn\", ascending=False, inplace=True)\n return fold_res\n\ndef _add_channel_occ(ports, i):\n \"\"\"Creates the channel occupancy column.\"\"\"\n # total channel width for CH and SV are 1000 and 600 ft respectively,\n # but vary based on Class and transit condition\n channel_width = [[800, 400, 1000, 500], [600, 300, 600, 300]]\n # create % channel occupancy column for each vessel position based on\n # effective beam, transit, and corresponding channel width\n for row in range(len(ports[i])):\n vessel_class = ports[i].loc[row, \"Class\"]\n transit_type = ports[i].loc[row, \"Transit\"]\n eff_beam = ports[i].loc[row, \"Effective Beam ft\"]\n if ((vessel_class == \"Post-Panamax\") &\n (transit_type == \"One-way Transit\")):\n occ = (eff_beam / channel_width[i][0]) * 100\n ports[i].loc[row, \"% Channel Occupied\"] = round(occ, 2)\n elif ((vessel_class == \"Post-Panamax\") &\n (transit_type == \"Two-way Transit\")):\n occ = (eff_beam / channel_width[i][1]) * 100\n ports[i].loc[row, \"% Channel Occupied\"] = round(occ, 2)\n elif ((vessel_class == \"Panamax\") &\n (transit_type == \"One-way Transit\")):\n occ = (eff_beam / channel_width[i][2]) * 100\n ports[i].loc[row, \"% Channel Occupied\"] = round(occ, 2)\n elif ((vessel_class == \"Panamax\") &\n (transit_type == \"Two-way Transit\")):\n occ = (eff_beam / channel_width[i][3]) * 100\n ports[i].loc[row, \"% Channel Occupied\"] = round(occ, 2)\n else:\n sys.stderr.write(\"Error: Undefined Class and \" +\n \"transit combination...\\n\")\n ports[i].loc[row, \"% Channel Occupied\"] = float(\"NaN\")\n return ports[i]\n\ndef _add_vessel_class(df):\n \"\"\"Creates 'Class' column based on vessel LOA ft.\"\"\"\n df.loc[:, \"Class\"] = \"Panamax\"\n post_row = (df.loc[:, \"LOA ft\"] > 965)\n post_loc = df.loc[post_row, :].index\n post_pan = df.index.isin(post_loc)\n df.loc[post_pan, \"Class\"] = \"Post-Panamax\"\n return df\n\ndef _course_behavior(df, ranges):\n \"\"\"Creates 'Course Behavior' column based on channel specific course ranges.\n \"\"\"\n course_behavior = (\"Outbound\", \"Inbound\")\n # filter on course ranges to isolate inbound and outbound ships only\n df = df[(df.loc[:, \"Course\"] >= ranges[0][0]) &\n (df.loc[:, \"Course\"] <= ranges[0][1]) |\n (df.loc[:, \"Course\"] >= ranges[1][0]) &\n (df.loc[:, \"Course\"] <= ranges[1][1])]\n df.loc[:, \"Course\"] = round(df.loc[:, \"Course\"]).astype(\"int\")\n df.loc[:, \"Course Behavior\"] = df.loc[:, \"Course\"].copy()\n # replace course values with general inbound and outbound behavior\n courses = {}\n for behavior, bounds in zip(course_behavior, ranges):\n lower_bound = bounds[0]\n upper_bound = bounds[1]\n for j in range(lower_bound, upper_bound + 1):\n courses[j] = behavior\n df.loc[:, \"Course Behavior\"] = (df.loc[:, \"Course Behavior\"]\n .replace(courses).astype(\"str\"))\n return df\n\ndef process_report(path):\n \"\"\"Processes data from vessel movement report. Adds data from wind buoys,\n performs meeting and passing analysis. Creates other relevant columns.\n\n Args:\n path: Relative path to raw vessel movement report (CSV).\n\n Returns:\n Two pairs of two DataFrames cooresponding to the movement report.\n The first pair of DataFrames contains all vessel movements belonging to\n Charleston and Savannah, respectively. The second pair of DataFrames\n stores the vessel movement entries at which each vessel achieved\n its maximum speed. Again, the first DataFrame in the pair belongs to\n Charleston and the second DataFrame belongs to Savannah.\n \"\"\"\n blacklist = [int(mmsi) for mmsi in open(\"../cache/blacklist.txt\",\n \"r\").readlines()]\n df = pd.read_csv(path)\n df = _wrangle_vmr(df, {\"DATETIME (UTC)\": \"Date/Time UTC\", \"NAME\": \"Name\",\n \"LATITUDE\": \"Latitude\", \"LONGITUDE\": \"Longitude\",\n \"SPEED\": \"VSPD kn\", \"COURSE\": \"Course\", \"HEADING\":\n \"Heading\", \"AIS TYPE\": \"AIS Type\"})\n ch_course_ranges = ((100, 140), (280, 320)) # (outbound, inbound)\n sv_course_ranges = ((100, 160), (280, 340)) # (outbound, inbound)\n # longitudinal channel midpoint for Charleston and Savannah respectively\n channel_midpoint = ((-79.74169), (-80.78522))\n course_ranges = (ch_course_ranges, sv_course_ranges)\n ports = [None, None] # ch, sv\n # Charleston NOAA wind buoy ID (41004)\n # Savannah NOAA wind buoy ID (41008)\n buoys = [{\"41004\":None}, {\"41008\":None}] # main wind buoys\n alt_buoys = [{\"41008\":None}, {\"41004\":None}] # alternate wind buoys\n # split data into Charleston and Savannah DataFrames based on latitude\n for i in range(len(ports)):\n ch_df = (df.loc[:, \"Latitude\"] >= 32.033)\n sv_df = (df.loc[:, \"Latitude\"] < 32.033)\n ports[i] = df[ch_df] if (i == 0) else df[sv_df]\n # if there is no vessel data on a given day (e.g. major holidays)\n # return empty DataFrames\n if not len(ports[i]):\n empty = pd.DataFrame({\"Date/Time UTC\":[], \"Name\":[], \"MMSI\":[],\n \"Max Speed kn\":[], \"Mean Speed kn\":[],\n \"LOA ft\":[], \"Beam ft\":[], \"Class\":[],\n \"AIS Type\":[], \"Course\":[], \"Heading\":[],\n \"Course Behavior\":[], \"Yaw deg\":[],\n \"Effective Beam ft\":[], \"WDIR degT\":[],\n \"WSPD mph\":[], \"GST mph\":[], \"Buoy Source\":[],\n \"Location\":[], \"Latitude\":[], \"Longitude\":[],\n \"Transit\":[], \"% Channel Occupied\":[]})\n ports[i] = [empty, empty]\n continue\n ports[i].loc[:, \"Location\"] = \"Nearshore\"\n off_row = (ports[i].loc[:, \"Longitude\"] > channel_midpoint[i])\n off_loc = ports[i].loc[off_row, :].index\n offshore_indices = ports[i].index.isin(off_loc)\n ports[i].loc[offshore_indices, \"Location\"] = \"Offshore\"\n ports[i] = add_wind(ports, i, buoys, alt_buoys)\n ports[i] = _course_behavior(ports[i], course_ranges[i])\n ports[i] = _add_vessel_class(ports[i])\n # create yaw column based on difference between course and heading\n ports[i].loc[:, \"Yaw deg\"] = abs(ports[i].loc[:, \"Course\"] -\n ports[i].loc[:, \"Heading\"])\n # compute effective beam based on vessel beam, loa, and yaw\n eff_beam = []\n loa = ports[i].loc[:, \"LOA ft\"].values\n beam = ports[i].loc[:, \"Beam ft\"].values\n yaw = ports[i].loc[:, \"Yaw deg\"].values\n for l in range(ports[i].shape[0]):\n # effective beam formula derived using trigonometry and geometry\n # of vessel positions\n eff_beam.append(round((math.cos(math.radians(90 - yaw[l])) *\n loa[l]) + (math.cos(math.radians(yaw[l])) *\n beam[l])))\n ports[i].loc[:, \"Effective Beam ft\"] = eff_beam\n ports[i].loc[:, \"Effective Beam ft\"] = ports[i].loc[:,\n \"Effective Beam ft\"].round(0)\n # remove unwanted blacklist vessels\n ports[i] = _filter_blacklisters(ports[i], blacklist)\n # create rounded DateTime column for meetpass analysis\n stamps = len(ports[i].loc[:, \"Date/Time UTC\"]) # number of timestamps\n round_times = [ports[i].loc[:, \"Date/Time UTC\"].iloc[ii].floor(\"Min\")\n for ii in range(stamps)]\n ports[i].loc[:, \"rounded date\"] = round_times\n # run meetpass analysis and create Transit column based on results\n mp = meetpass(ports[i])\n two_way = twoway(ports[i], mp)\n ports[i].loc[:, \"Transit\"] = \"One-way Transit\"\n if not isinstance(two_way, type(None)):\n two_way_indices = ports[i].index.isin(two_way.index)\n ports[i].loc[two_way_indices, \"Transit\"] = \"Two-way Transit\"\n # reset index to clear previous pandas manipulations\n ports[i] = ports[i].reset_index()\n ports[i] = _add_channel_occ(ports, i)\n # save current format of data as all_res to be used for all positions\n all_res = ports[i]\n # remove sections of channel where ships turn\n if i % 2:\n all_res = all_res[(all_res.loc[:, \"Latitude\"] <= 32.02838) &\n (all_res.loc[:, \"Latitude\"] >= 31.9985) |\n (all_res.loc[:, \"Latitude\"] <= 31.99183)]\n else:\n all_res = all_res[all_res.loc[:, \"Latitude\"] >= 32.667473]\n fold_res = _fold_vmr(ports, i)\n # return max and mean positional data in specified order\n fold_res = fold_res.loc[:, (\"Date/Time UTC\", \"Name\", \"MMSI\",\n \"Max Speed kn\", \"Mean Speed kn\", \"LOA ft\",\n \"Beam ft\", \"Class\", \"AIS Type\", \"Course\",\n \"Heading\", \"Course Behavior\", \"Yaw deg\",\n \"Effective Beam ft\", \"WDIR degT\",\n \"WSPD mph\", \"GST mph\", \"Buoy Source\",\n \"Location\", \"Latitude\", \"Longitude\",\n \"Transit\", \"% Channel Occupied\")]\n # return positional data in specified order\n all_res = all_res.loc[:, (\"Name\", \"MMSI\", \"VSPD kn\", \"WSPD mph\",\n \"Transit\", \"% Channel Occupied\", \"Yaw deg\",\n \"Effective Beam ft\", \"LOA ft\", \"Beam ft\",\n \"Class\", \"AIS Type\", \"Course\", \"Heading\",\n \"Course Behavior\", \"WDIR degT\", \"GST mph\",\n \"Buoy Source\", \"Location\", \"Latitude\",\n \"Longitude\", \"Date/Time UTC\")]\n # save two copies of daily vmr for each port, one for all vessel\n # positions and one for maximum vessel speed positions\n ports[i] = [fold_res, all_res]\n return ports[0], ports[1] # ch, sv\n" ]
[ [ "pandas.to_datetime", "pandas.DataFrame", "pandas.read_csv" ] ]
jannsta1/insectvision
[ "d98a7acbcde1d5faf00131485fa85c706f313814" ]
[ "sphere/distance.py" ]
[ "import numpy as np\nfrom .transform import sph2vec, vec2sph\n\n\ndef angle_between(ang1, ang2, sign=True):\n d = (ang1 - ang2 + np.pi) % (2 * np.pi) - np.pi\n if not sign:\n d = np.abs(d)\n return d\n\n\ndef angdist(v1, v2, zenith=True):\n if v1.shape[0] == 2:\n v1 = sph2vec(v1, zenith=zenith)\n if v2.shape[0] == 2:\n v2 = sph2vec(v2, zenith=zenith)\n v1 /= np.linalg.norm(v1, axis=0)\n v2 /= np.linalg.norm(v2, axis=0)\n\n if v1.ndim > 1 or v2.ndim > 1:\n d = np.einsum('ij,ij->j', v1, v2)\n else:\n d = np.dot(v1.T, v2)\n # if d.ndim > 1:\n # d = d.diagonal()\n return np.absolute(np.arccos(d))\n\n\ndef eledist(v1, v2, zenith=True):\n if v1.shape[0] == 3:\n v1 = vec2sph(v1, zenith=zenith)\n if v2.shape[0] == 3:\n v2 = vec2sph(v2, zenith=zenith)\n d = (v1[0] - v2[0] + np.pi) % (2 * np.pi) - np.pi\n return np.absolute(d)\n\n\ndef azidist(v1, v2, zenith=True):\n if v1.shape[0] == 3:\n v1 = vec2sph(v1, zenith=zenith)\n if v2.shape[0] == 3:\n v2 = vec2sph(v2, zenith=zenith)\n d = (v1[1] - v2[1] + np.pi) % (2 * np.pi) - np.pi\n return np.absolute(d)\n\n" ]
[ [ "numpy.dot", "numpy.linalg.norm", "numpy.arccos", "numpy.einsum", "numpy.abs", "numpy.absolute" ] ]
CodeLongAndProsper90/COVID
[ "b277c4b64a7c84cb2dfe1f7514b1e884a58f98a0" ]
[ "backend.py" ]
[ "################################################\n# backend.py is part of COVID.codelongandpros.repl.co\n# You should have recieved a copy of the three-clause BSD license. \n# If you did not, it is located at: \n# https://opensource.org/licenses/BSD-3-Clause\n# Made by Scott Little, with help from StackOverflow\n################################################\nimport csv\nimport matplotlib.pyplot as plt\nfrom imageio import imwrite\n\n\ndef get_file():\n url = 'https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-states.csv'\n import requests\n r = requests.get(url)\n\n with open('cases.csv', 'wb') as f:\n f.write(r.content)\n\n # Retrieve HTTP meta-data\n print(r.status_code)\n print(r.headers['content-type'])\n print(r.encoding)\n\ndef get_cases(stat):\n x = []\n y = []\n d = [0]\n dx = [0]\n if len(stat) == 0:\n return 1\n\n dat = 0\n\n state = stat\n reader = csv.DictReader(open(\"cases.csv\"))\n for raw in reader:\n\n if raw['state'] == state:\n dat+=1\n x.append(dat)\n dx.append(dat)\n y.append(raw['cases'])\n d.append(raw['deaths'])\n else:\n continue\n fig, axs = plt.subplots(2,figsize=(12,10))\n fig.suptitle(f\"COVID-19 Cases/Deaths in {stat}\")\n axs[0].plot(x, y)\n axs[1].plot(dx, d)\n axs[0].set_ylabel('Cases')\n axs[1].set_ylabel(\"Deaths\")\n for axe in axs:\n\n axe.set_xlabel(\"Days since 2020-01-21\")\n plt.savefig('static/plots/plot.png', bbox_inches='tight', dpi=400)\n return 0\n\ndef overwrite():\n import numpy as np\n img = np.zeros([100,100,3],dtype=np.uint8)\n img.fill(255) # or img[:] = 255\n imwrite('static/plots/plot.png', img)" ]
[ [ "matplotlib.pyplot.savefig", "numpy.zeros", "matplotlib.pyplot.subplots" ] ]
kbsezginel/angstrom
[ "793fd05b9bf27cab50d5c292fe63f685ea767d6d" ]
[ "angstrom/trajectory/trajectory.py" ]
[ "\"\"\"\n--- Ångström ---\nRead, manipulate and analyze molecular trajectory files.\n\"\"\"\nfrom .read import read_xyz_traj\nfrom .write import write_xyz_traj\nfrom angstrom.geometry import get_molecule_center\nfrom angstrom import Molecule\nimport numpy as np\nimport os\n\n\nclass Trajectory:\n \"\"\"\n Reading and analyzing trajectories in xyz format.\n\n \"\"\"\n def __init__(self, atoms=None, coordinates=None, read=None, molecule=None):\n \"\"\"\n Create a trajectory object.\n\n Parameters\n ----------\n atoms : list or None\n List of elements of the molecule for each frame.\n coordinates : list or None\n List of atomic positions of the molecule for each frame.\n read : str or None\n File name to read molecule file (formats: xyz).\n molecule : Molecule\n Create a Trajectory with 1 frame from a Molecule object.\n\n \"\"\"\n self.name = 'Trajectory'\n if atoms is not None and coordinates is not None:\n self.atoms = atoms\n self.coordinates = coordinates\n elif read is not None:\n self.read(read)\n elif molecule is not None:\n self.atoms = np.array([molecule.atoms])\n self.coordinates = np.array([molecule.coordinates])\n self.name = molecule.name\n else:\n self.atoms = []\n self.coordinates = []\n self.current_frame = 0\n\n def __repr__(self):\n \"\"\"\n Returns basic trajectory info.\n\n \"\"\"\n return \"<Trajectory frames: %i | atoms: %i | dimensions: %i>\" % tuple(np.shape(self.coordinates))\n\n def __len__(self):\n \"\"\"\n Returns number of frames.\n\n \"\"\"\n return len(self.atoms)\n\n def __add__(self, traj):\n \"\"\"\n Trajectory addition for joining the coordinates and elements into a new Trajectory object.\n\n Parameters\n ----------\n traj : Trajectory\n Trajectory object to be added\n\n Returns\n -------\n Trajectory\n Joined Trajectory object.\n\n \"\"\"\n new_traj = Trajectory(atoms=np.append(self.atoms, traj.atoms, axis=0),\n coordinates=np.append(self.coordinates, traj.coordinates, axis=0))\n return new_traj\n\n def __getitem__(self, i):\n \"\"\"\n Indexing method.\n Returns a Molecule object for given index (frame).\n Returns a Trajectory object if used as slicing.\n\n \"\"\"\n if isinstance(i, slice):\n indices = range(len(self))[i.start:i.stop:i.step]\n if len(indices) == 0:\n return []\n else:\n new_traj = Trajectory(molecule=self[indices[0]])\n for j in indices[1:]:\n new_traj.append(self[j])\n return new_traj\n else:\n return Molecule(atoms=self.atoms[i], coordinates=self.coordinates[i])\n\n def __iter__(self):\n \"\"\"\n Initialize iterator, reset frame index.\n\n \"\"\"\n self.current_frame = 0\n return self\n\n def __next__(self):\n \"\"\"\n Returns the next frame in Trajectory as a Molecule object.\n\n \"\"\"\n if self.current_frame >= len(self):\n raise StopIteration\n\n next_mol = self[self.current_frame]\n self.current_frame += 1\n return next_mol\n\n def append(self, mol):\n \"\"\"\n Append molecule to trajectory.\n The number of atoms in the molecule must match that of the trajectory.\n\n Parameters\n ----------\n mol : Molecule\n Molecule object to be added\n\n Returns\n -------\n None\n Added to Trajectory object.\n\n \"\"\"\n if len(mol.atoms) != self.atoms.shape[1]:\n raise Exception('Trajectory cannot have different number of atoms per frame')\n self.atoms = np.append(self.atoms, [mol.atoms], axis=0)\n self.coordinates = np.append(self.coordinates, [mol.coordinates], axis=0)\n\n def read(self, filename):\n \"\"\"\n Read xyz formatted trajectory file.\n\n Parameters\n ----------\n filename : str\n Trajectory file name.\n\n Returns\n -------\n None\n Assigns 'coordinates', 'atoms', and 'headers' attributes.\n\n \"\"\"\n self.name = os.path.splitext(os.path.basename(filename))[0]\n traj = read_xyz_traj(filename)\n self.atoms, self.coordinates, self.headers = traj['atoms'], traj['coordinates'], traj['headers']\n\n def write(self, filename):\n \"\"\"\n Write xyz formatted trajectory file.\n\n Parameters\n ----------\n filename : str\n Trajectory file name (formats: xyz).\n\n Returns\n -------\n None\n Writes molecule information to given file name.\n\n \"\"\"\n with open(filename, 'w') as traj_file:\n if hasattr(self, 'headers'):\n write_xyz_traj(traj_file, self.atoms, self.coordinates, headers=self.headers)\n else:\n write_xyz_traj(traj_file, self.atoms, self.coordinates)\n\n def get_center(self, mass=True):\n \"\"\"\n Get coordinates of molecule center at each frame.\n\n Parameters\n ----------\n mass : bool\n Calculate center of mass (True) or geometric center (False).\n\n Returns\n -------\n ndarray\n Molecule center coordinates for each frame.\n\n \"\"\"\n centers = np.empty((len(self.atoms), 3))\n for f, (frame_atoms, frame_coors) in enumerate(zip(self.atoms, self.coordinates)):\n centers[f] = get_molecule_center(frame_atoms, frame_coors, mass=mass)\n return centers\n" ]
[ [ "numpy.array", "numpy.shape", "numpy.append" ] ]
algocompretto/template-deep-learning
[ "358befe9980f4dbddb9fd9c1df3f8d0d9467e3d1" ]
[ "agents/dqn.py" ]
[ "\"\"\"\nMain agent for DQN\n\"\"\"\nimport math\nimport random\nimport shutil\n\nimport gym\nimport torch\nfrom tensorboardX import SummaryWriter\nfrom torch.backends import cudnn\nfrom tqdm import tqdm\n\nfrom agents.base import BaseAgent\nfrom graphs.losses.huber_loss import HuberLoss\nfrom graphs.models.dqn import DQN\nfrom utils.env_utils import CartPoleEnv\nfrom utils.misc import print_cuda_statistics\nfrom utils.replay_memory import ReplayMemory, Transition\n\ncudnn.benchmark = True\n\n\nclass DQNAgent(BaseAgent):\n\n def __init__(self, config):\n super().__init__(config)\n # define models (policy and target)\n self.policy_model = DQN(self.config)\n self.target_model = DQN(self.config)\n # define memory\n self.memory = ReplayMemory(self.config)\n # define loss\n self.loss = HuberLoss()\n # define optimizer\n self.optim = torch.optim.RMSprop(self.policy_model.parameters())\n\n # define environment\n self.env = gym.make('CartPole-v0').unwrapped\n self.cartpole = CartPoleEnv(self.config.screen_width)\n\n # initialize counter\n self.current_episode = 0\n self.current_iteration = 0\n self.episode_durations = []\n\n # set cuda flag\n self.is_cuda = torch.cuda.is_available()\n if self.is_cuda and not self.config.cuda:\n self.logger.info(\"WARNING: You have a CUDA device, so you should probably enable CUDA\")\n\n self.cuda = self.is_cuda & self.config.cuda\n\n if self.cuda:\n self.device = torch.device(\"cuda\")\n torch.cuda.set_device(self.config.gpu_device)\n self.logger.info(\"Program will run on *****GPU-CUDA***** \")\n print_cuda_statistics()\n else:\n self.device = torch.device(\"cpu\")\n self.logger.info(\"Program will run on *****CPU***** \")\n\n self.policy_model = self.policy_model.to(self.device)\n self.target_model = self.target_model.to(self.device)\n self.loss = self.loss.to(self.device)\n\n # Initialize Target model with policy model state dict\n self.target_model.load_state_dict(self.policy_model.state_dict())\n self.target_model.eval()\n # Summary Writer\n self.summary_writer = SummaryWriter(log_dir=self.config.summary_dir, comment='DQN')\n\n def load_checkpoint(self, file_name):\n filename = self.config.checkpoint_dir + file_name\n try:\n self.logger.info(\"Loading checkpoint '{}'\".format(filename))\n checkpoint = torch.load(filename)\n\n self.current_episode = checkpoint['episode']\n self.current_iteration = checkpoint['iteration']\n self.policy_model.load_state_dict(checkpoint['state_dict'])\n self.optim.load_state_dict(checkpoint['optimizer'])\n\n self.logger.info(\"Checkpoint loaded successfully from '{}' at (epoch {}) at (iteration {})\\n\"\n .format(self.config.checkpoint_dir, checkpoint['episode'], checkpoint['iteration']))\n except OSError as e:\n self.logger.info(\"No checkpoint exists from '{}'. Skipping...\".format(self.config.checkpoint_dir))\n self.logger.info(\"**First time to train**\")\n\n def save_checkpoint(self, file_name=\"checkpoint.pth.tar\", is_best=0):\n state = {\n 'episode': self.current_episode,\n 'iteration': self.current_iteration,\n 'state_dict': self.policy_model.state_dict(),\n 'optimizer': self.optim.state_dict(),\n }\n # Save the state\n torch.save(state, self.config.checkpoint_dir + file_name)\n # If it is the best copy it to another file 'model_best.pth.tar'\n if is_best:\n shutil.copyfile(self.config.checkpoint_dir + file_name,\n self.config.checkpoint_dir + 'model_best.pth.tar')\n\n def run(self):\n \"\"\"\n This function will the operator\n :return:\n \"\"\"\n try:\n self.train()\n\n except KeyboardInterrupt:\n self.logger.info(\"You have entered CTRL+C.. Wait to finalize\")\n\n def select_action(self, state):\n \"\"\"\n The action selection function, it either uses the model to choose an action or samples one uniformly.\n :param state: current state of the model\n :return:\n \"\"\"\n if self.cuda:\n state = state.cuda()\n sample = random.random()\n eps_threshold = self.config.eps_start + (self.config.eps_start - self.config.eps_end) * math.exp(\n -1. * self.current_iteration / self.config.eps_decay)\n self.current_iteration += 1\n if sample > eps_threshold:\n with torch.no_grad():\n return self.policy_model(state).max(1)[1].view(1, 1)\n else:\n return torch.tensor([[random.randrange(2)]], device=self.device, dtype=torch.long)\n\n def optimize_policy_model(self):\n \"\"\"\n performs a single step of optimization for the policy model\n :return:\n \"\"\"\n if self.memory.length() < self.config.batch_size:\n return\n # sample a batch\n transitions = self.memory.sample_batch(self.config.batch_size)\n\n one_batch = Transition(*zip(*transitions))\n\n # create a mask of non-final states\n non_final_mask = torch.tensor(tuple(map(lambda s: s is not None, one_batch.next_state)), device=self.device,dtype=torch.uint8)\n non_final_next_states = torch.cat([s for s in one_batch.next_state if s is not None])\n\n # concatenate all batch elements into one\n state_batch = torch.cat(one_batch.state)\n action_batch = torch.cat(one_batch.action)\n reward_batch = torch.cat(one_batch.reward)\n\n state_batch = state_batch.to(self.device)\n non_final_next_states = non_final_next_states.to(self.device)\n\n curr_state_values = self.policy_model(state_batch)\n curr_state_action_values = curr_state_values.gather(1, action_batch)\n\n next_state_values = torch.zeros(self.config.batch_size, device=self.device)\n next_state_values[non_final_mask] = self.target_model(non_final_next_states).max(1)[0].detach()\n\n # Get the expected Q values\n expected_state_action_values = (next_state_values * self.config.gamma) + reward_batch\n # compute loss: temporal difference error\n loss = self.loss(curr_state_action_values, expected_state_action_values.unsqueeze(1))\n\n # optimizer step\n self.optim.zero_grad()\n loss.backward()\n for param in self.policy_model.parameters():\n param.grad.data.clamp_(-1, 1)\n self.optim.step()\n\n return loss\n\n def train(self):\n \"\"\"\n Training loop based on the number of episodes\n :return:\n \"\"\"\n for episode in tqdm(range(self.current_episode, self.config.num_episodes)):\n self.current_episode = episode\n # reset environment\n self.env.reset()\n self.train_one_epoch()\n # The target network has its weights kept frozen most of the time\n if self.current_episode % self.config.target_update == 0:\n self.target_model.load_state_dict(self.policy_model.state_dict())\n\n self.env.render()\n self.env.close()\n\n def train_one_epoch(self):\n \"\"\"\n One episode of training; it samples an action, observe next screen and optimize the model once\n :return:\n \"\"\"\n episode_duration = 0\n prev_frame = self.cartpole.get_screen(self.env)\n curr_frame = self.cartpole.get_screen(self.env)\n # get state\n curr_state = curr_frame - prev_frame\n\n while(1):\n episode_duration += 1\n # select action\n action = self.select_action(curr_state)\n # perform action and get reward\n _, reward, done, _ = self.env.step(action.item())\n\n if self.cuda:\n reward = torch.Tensor([reward]).to(self.device)\n else:\n reward = torch.Tensor([reward]).to(self.device)\n\n prev_frame = curr_frame\n curr_frame = self.cartpole.get_screen(self.env)\n # assign next state\n if done:\n next_state = None\n else:\n next_state = curr_frame - prev_frame\n\n # add this transition into memory\n self.memory.push_transition(curr_state, action, next_state, reward)\n\n curr_state = next_state\n\n # Policy model optimization step\n curr_loss = self.optimize_policy_model()\n if curr_loss is not None:\n if self.cuda:\n curr_loss = curr_loss.cpu()\n self.summary_writer.add_scalar(\"Temporal_Difference_Loss\", curr_loss.detach().numpy(), self.current_iteration)\n # check if done\n if done:\n break\n\n self.summary_writer.add_scalar(\"Training_Episode_Duration\", episode_duration, self.current_episode)\n\n def validate(self):\n pass\n\n def finalize(self):\n \"\"\"\n Finalize all the operations of the 2 Main classes of the process the operator and the data loader\n :return:\n \"\"\"\n self.logger.info(\"Please wait while finalizing the operation.. Thank you\")\n self.save_checkpoint()\n self.summary_writer.export_scalars_to_json(\"{}all_scalars.json\".format(self.config.summary_dir))\n self.summary_writer.close()\n" ]
[ [ "torch.zeros", "torch.device", "torch.cat", "torch.save", "torch.no_grad", "torch.cuda.set_device", "torch.cuda.is_available", "torch.load", "torch.Tensor" ] ]
GuyLor/gumbel_max_causal_gadgets_part2
[ "ae3b495367e6708b3bf6c169364f030f63be5422" ]
[ "sepsisSimDiabetes/MDP.py" ]
[ "import numpy as np\nfrom .State import State\nfrom .Action import Action\n\n'''\nIncludes blood glucose level proxy for diabetes: 0-3\n (lo2, lo1, normal, hi1, hi2); Any other than normal is \"abnormal\"\nInitial distribution:\n [.05, .15, .6, .15, .05] for non-diabetics and [.01, .05, .15, .6, .19] for diabetics\n\nEffect of vasopressors on if diabetic:\n raise blood pressure: normal -> hi w.p. .9, lo -> normal w.p. .5, lo -> hi w.p. .4\n raise blood glucose by 1 w.p. .5\n\nEffect of vasopressors off if diabetic:\n blood pressure falls by 1 w.p. .05 instead of .1\n glucose does not fall - apply fluctuations below instead\n\nFluctuation in blood glucose levels (IV/insulin therapy are not possible actions):\n fluctuate w.p. .3 if diabetic\n fluctuate w.p. .1 if non-diabetic\nRef: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4530321/\n\nAdditional fluctuation regardless of other changes\nThis order is applied:\n antibiotics, ventilation, vasopressors, fluctuations\n'''\n\nclass MDP(object):\n\n def __init__(self, init_state_idx=None, init_state_idx_type='obs',\n policy_array=None, policy_idx_type='obs', p_diabetes=0.2):\n '''\n initialize the simulator\n '''\n assert p_diabetes >= 0 and p_diabetes <= 1, \\\n \"Invalid p_diabetes: {}\".format(p_diabetes)\n assert policy_idx_type in ['obs', 'full', 'proj_obs']\n\n # Check the policy dimensions (states x actions)\n if policy_array is not None:\n assert policy_array.shape[1] == Action.NUM_ACTIONS_TOTAL\n if policy_idx_type == 'obs':\n assert policy_array.shape[0] == State.NUM_OBS_STATES\n elif policy_idx_type == 'full':\n assert policy_array.shape[0] == \\\n State.NUM_HID_STATES * State.NUM_OBS_STATES\n elif policy_idx_type == 'proj_obs':\n assert policy_array.shape[0] == State.NUM_PROJ_OBS_STATES\n\n # p_diabetes is used to generate random state if init_state is None\n self.p_diabetes = p_diabetes\n self.state = None\n\n # Only need to use init_state_idx_type if you are providing a state_idx!\n self.state = self.get_new_state(init_state_idx, init_state_idx_type)\n\n self.policy_array = policy_array\n self.policy_idx_type = policy_idx_type # Used for mapping the policy to actions\n\n def get_new_state(self, state_idx = None, idx_type = 'obs', diabetic_idx = None):\n '''\n use to start MDP over. A few options:\n\n Full specification:\n 1. Provide state_idx with idx_type = 'obs' + diabetic_idx\n 2. Provide state_idx with idx_type = 'full', diabetic_idx is ignored\n 3. Provide state_idx with idx_type = 'proj_obs' + diabetic_idx*\n\n * This option will set glucose to a normal level\n\n Random specification\n 4. State_idx, no diabetic_idx: Latter will be generated\n 5. No state_idx, no diabetic_idx: Completely random\n 6. No state_idx, diabetic_idx given: Random conditional on diabetes\n '''\n assert idx_type in ['obs', 'full', 'proj_obs']\n option = None\n if state_idx is not None:\n if idx_type == 'obs' and diabetic_idx is not None:\n option = 'spec_obs'\n elif idx_type == 'obs' and diabetic_idx is None:\n option = 'spec_obs_no_diab'\n diabetic_idx = np.random.binomial(1, self.p_diabetes)\n elif idx_type == 'full':\n option = 'spec_full'\n elif idx_type == 'proj_obs' and diabetic_idx is not None:\n option = 'spec_proj_obs'\n elif state_idx is None and diabetic_idx is None:\n option = 'random'\n elif state_idx is None and diabetic_idx is not None:\n option = 'random_cond_diab'\n\n assert option is not None, \"Invalid specification of new state\"\n\n if option in ['random', 'random_cond_diab']:\n init_state = self.generate_random_state(diabetic_idx)\n # Do not start in death or discharge state\n while init_state.check_absorbing_state():\n init_state = self.generate_random_state(diabetic_idx)\n else:\n # Note that diabetic_idx will be ignored if idx_type = 'full'\n init_state = State(\n state_idx=state_idx, idx_type=idx_type,\n diabetic_idx=diabetic_idx)\n\n return init_state\n\n def generate_random_state(self, diabetic_idx=None):\n # Note that we will condition on diabetic idx if provided\n if diabetic_idx is None:\n diabetic_idx = np.random.binomial(1, self.p_diabetes)\n\n # hr and sys_bp w.p. [.25, .5, .25]\n hr_state = np.random.choice(np.arange(3), p=np.array([.25, .5, .25]))\n sysbp_state = np.random.choice(np.arange(3), p=np.array([.25, .5, .25]))\n # percoxyg w.p. [.2, .8]\n percoxyg_state = np.random.choice(np.arange(2), p=np.array([.2, .8]))\n\n if diabetic_idx == 0:\n glucose_state = np.random.choice(np.arange(5), \\\n p=np.array([.05, .15, .6, .15, .05]))\n else:\n glucose_state = np.random.choice(np.arange(5), \\\n p=np.array([.01, .05, .15, .6, .19]))\n antibiotic_state = 0\n vaso_state = 0\n vent_state = 0\n\n state_categs = [hr_state, sysbp_state, percoxyg_state,\n glucose_state, antibiotic_state, vaso_state, vent_state]\n\n return State(state_categs=state_categs, diabetic_idx=diabetic_idx)\n\n def transition_antibiotics_on(self):\n '''\n antibiotics state on\n heart rate, sys bp: hi -> normal w.p. .5\n '''\n self.state.antibiotic_state = 1\n if self.state.hr_state == 2 and np.random.uniform(0,1) < 0.5:\n self.state.hr_state = 1\n if self.state.sysbp_state == 2 and np.random.uniform(0,1) < 0.5:\n self.state.sysbp_state = 1\n\n def transition_antibiotics_off(self):\n '''\n antibiotics state off\n if antibiotics was on: heart rate, sys bp: normal -> hi w.p. .1\n '''\n if self.state.antibiotic_state == 1:\n if self.state.hr_state == 1 and np.random.uniform(0,1) < 0.1:\n self.state.hr_state = 2\n if self.state.sysbp_state == 1 and np.random.uniform(0,1) < 0.1:\n self.state.sysbp_state = 2\n self.state.antibiotic_state = 0\n\n def transition_vent_on(self):\n '''\n ventilation state on\n percent oxygen: low -> normal w.p. .7\n '''\n self.state.vent_state = 1\n if self.state.percoxyg_state == 0 and np.random.uniform(0,1) < 0.7:\n self.state.percoxyg_state = 1\n\n def transition_vent_off(self):\n '''\n ventilation state off\n if ventilation was on: percent oxygen: normal -> lo w.p. .1\n '''\n if self.state.vent_state == 1:\n if self.state.percoxyg_state == 1 and np.random.uniform(0,1) < 0.1:\n self.state.percoxyg_state = 0\n self.state.vent_state = 0\n\n def transition_vaso_on(self):\n '''\n vasopressor state on\n for non-diabetic:\n sys bp: low -> normal, normal -> hi w.p. .7\n for diabetic:\n raise blood pressure: normal -> hi w.p. .9,\n lo -> normal w.p. .5, lo -> hi w.p. .4\n raise blood glucose by 1 w.p. .5\n '''\n self.state.vaso_state = 1\n if self.state.diabetic_idx == 0:\n if np.random.uniform(0,1) < 0.7:\n if self.state.sysbp_state == 0:\n self.state.sysbp_state = 1\n elif self.state.sysbp_state == 1:\n self.state.sysbp_state = 2\n else:\n if self.state.sysbp_state == 1:\n if np.random.uniform(0,1) < 0.9:\n self.state.sysbp_state = 2\n elif self.state.sysbp_state == 0:\n up_prob = np.random.uniform(0,1)\n if up_prob < 0.5:\n self.state.sysbp_state = 1\n elif up_prob < 0.9:\n self.state.sysbp_state = 2\n if np.random.uniform(0,1) < 0.5:\n self.state.glucose_state = min(4, self.state.glucose_state + 1)\n\n def transition_vaso_off(self):\n '''\n vasopressor state off\n if vasopressor was on:\n for non-diabetics, sys bp: normal -> low, hi -> normal w.p. .1\n for diabetics, blood pressure falls by 1 w.p. .05 instead of .1\n '''\n if self.state.vaso_state == 1:\n if self.state.diabetic_idx == 0:\n if np.random.uniform(0,1) < 0.1:\n self.state.sysbp_state = max(0, self.state.sysbp_state - 1)\n else:\n if np.random.uniform(0,1) < 0.05:\n self.state.sysbp_state = max(0, self.state.sysbp_state - 1)\n self.state.vaso_state = 0\n\n def transition_fluctuate(self, hr_fluctuate, sysbp_fluctuate, percoxyg_fluctuate, \\\n glucose_fluctuate):\n '''\n all (non-treatment) states fluctuate +/- 1 w.p. .1\n exception: glucose flucuates +/- 1 w.p. .3 if diabetic\n '''\n if hr_fluctuate:\n hr_prob = np.random.uniform(0,1)\n if hr_prob < 0.1:\n self.state.hr_state = max(0, self.state.hr_state - 1)\n elif hr_prob < 0.2:\n self.state.hr_state = min(2, self.state.hr_state + 1)\n if sysbp_fluctuate:\n sysbp_prob = np.random.uniform(0,1)\n if sysbp_prob < 0.1:\n self.state.sysbp_state = max(0, self.state.sysbp_state - 1)\n elif sysbp_prob < 0.2:\n self.state.sysbp_state = min(2, self.state.sysbp_state + 1)\n if percoxyg_fluctuate:\n percoxyg_prob = np.random.uniform(0,1)\n if percoxyg_prob < 0.1:\n self.state.percoxyg_state = max(0, self.state.percoxyg_state - 1)\n elif percoxyg_prob < 0.2:\n self.state.percoxyg_state = min(1, self.state.percoxyg_state + 1)\n if glucose_fluctuate:\n glucose_prob = np.random.uniform(0,1)\n if self.state.diabetic_idx == 0:\n if glucose_prob < 0.1:\n self.state.glucose_state = max(0, self.state.glucose_state - 1)\n elif glucose_prob < 0.2:\n self.state.glucose_state = min(1, self.state.glucose_state + 1)\n else:\n if glucose_prob < 0.3:\n self.state.glucose_state = max(0, self.state.glucose_state - 1)\n elif glucose_prob < 0.6:\n self.state.glucose_state = min(4, self.state.glucose_state + 1)\n\n def calculateReward(self):\n num_abnormal = self.state.get_num_abnormal()\n if num_abnormal >= 3:\n return -1\n elif num_abnormal == 0 and not self.state.on_treatment():\n return 1\n return 0\n\n def transition(self, action):\n self.state = self.state.copy_state()\n\n if action.antibiotic == 1:\n self.transition_antibiotics_on()\n hr_fluctuate = False\n sysbp_fluctuate = False\n elif self.state.antibiotic_state == 1:\n self.transition_antibiotics_off()\n hr_fluctuate = False\n sysbp_fluctuate = False\n else:\n hr_fluctuate = True\n sysbp_fluctuate = True\n\n if action.ventilation == 1:\n self.transition_vent_on()\n percoxyg_fluctuate = False\n elif self.state.vent_state == 1:\n self.transition_vent_off()\n percoxyg_fluctuate = False\n else:\n percoxyg_fluctuate = True\n\n glucose_fluctuate = True\n\n if action.vasopressors == 1:\n self.transition_vaso_on()\n sysbp_fluctuate = False\n glucose_fluctuate = False\n elif self.state.vaso_state == 1:\n self.transition_vaso_off()\n sysbp_fluctuate = False\n\n self.transition_fluctuate(hr_fluctuate, sysbp_fluctuate, percoxyg_fluctuate, \\\n glucose_fluctuate)\n\n return self.calculateReward()\n\n def select_actions(self):\n assert self.policy_array is not None\n probs = self.policy_array[\n self.state.get_state_idx(self.policy_idx_type)\n ]\n aev_idx = np.random.choice(np.arange(Action.NUM_ACTIONS_TOTAL), p=probs)\n return Action(action_idx = aev_idx)\n" ]
[ [ "numpy.array", "numpy.random.binomial", "numpy.arange", "numpy.random.uniform" ] ]
caesarcc/python-code-tutorials
[ "aa48ebe695e86440b206b641501ad55d021309bf" ]
[ "machine-learning/nlp/bert-text-classification/train.py" ]
[ "# !pip install transformers\n\nimport torch\nfrom transformers.file_utils import is_tf_available, is_torch_available, is_torch_tpu_available\nfrom transformers import BertTokenizerFast, BertForSequenceClassification\nfrom transformers import Trainer, TrainingArguments\nimport numpy as np\nimport random\nfrom sklearn.datasets import fetch_20newsgroups\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\n\n\ndef set_seed(seed: int):\n \"\"\"\n Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch`` and/or ``tf`` (if\n installed).\n\n Args:\n seed (:obj:`int`): The seed to set.\n \"\"\"\n random.seed(seed)\n np.random.seed(seed)\n if is_torch_available():\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n # safe to call this function even if cuda is not available\n if is_tf_available():\n import tensorflow as tf\n tf.random.set_seed(seed)\n\nset_seed(1)\n\n# the model we gonna train, base uncased BERT\n# check text classification models here: https://huggingface.co/models?filter=text-classification\nmodel_name = \"bert-base-uncased\"\n# max sequence length for each document/sentence sample\nmax_length = 512\n# load the tokenizer\ntokenizer = BertTokenizerFast.from_pretrained(model_name, do_lower_case=True)\n\ndef read_20newsgroups(test_size=0.2):\n # download & load 20newsgroups dataset from sklearn's repos\n dataset = fetch_20newsgroups(subset=\"all\", shuffle=True, remove=(\"headers\", \"footers\", \"quotes\"))\n documents = dataset.data\n labels = dataset.target\n # split into training & testing a return data as well as label names\n return train_test_split(documents, labels, test_size=test_size), dataset.target_names\n\n# call the function\n(train_texts, valid_texts, train_labels, valid_labels), target_names = read_20newsgroups()\n# tokenize the dataset, truncate when passed `max_length`, \n# and pad with 0's when less than `max_length`\ntrain_encodings = tokenizer(train_texts, truncation=True, padding=True, max_length=max_length)\nvalid_encodings = tokenizer(valid_texts, truncation=True, padding=True, max_length=max_length)\n\nclass NewsGroupsDataset(torch.utils.data.Dataset):\n def __init__(self, encodings, labels):\n self.encodings = encodings\n self.labels = labels\n\n def __getitem__(self, idx):\n item = {k: torch.tensor(v[idx]) for k, v in self.encodings.items()}\n item[\"labels\"] = torch.tensor([self.labels[idx]])\n return item\n\n def __len__(self):\n return len(self.labels)\n\n# convert our tokenized data into a torch Dataset\ntrain_dataset = NewsGroupsDataset(train_encodings, train_labels)\nvalid_dataset = NewsGroupsDataset(valid_encodings, valid_labels)\n# load the model and pass to CUDA\nmodel = BertForSequenceClassification.from_pretrained(model_name, num_labels=len(target_names)).to(\"cuda\")\n\ndef compute_metrics(pred):\n labels = pred.label_ids\n preds = pred.predictions.argmax(-1)\n # calculate accuracy using sklearn's function\n acc = accuracy_score(labels, preds)\n return {\n 'accuracy': acc,\n }\n\ntraining_args = TrainingArguments(\n output_dir='./results', # output directory\n num_train_epochs=3, # total number of training epochs\n per_device_train_batch_size=8, # batch size per device during training\n per_device_eval_batch_size=20, # batch size for evaluation\n warmup_steps=500, # number of warmup steps for learning rate scheduler\n weight_decay=0.01, # strength of weight decay\n logging_dir='./logs', # directory for storing logs\n load_best_model_at_end=True, # load the best model when finished training (default metric is loss)\n # but you can specify `metric_for_best_model` argument to change to accuracy or other metric\n logging_steps=400, # log & save weights each logging_steps\n save_steps=400,\n evaluation_strategy=\"steps\", # evaluate each `logging_steps`\n)\n\ntrainer = Trainer(\n model=model, # the instantiated Transformers model to be trained\n args=training_args, # training arguments, defined above\n train_dataset=train_dataset, # training dataset\n eval_dataset=valid_dataset, # evaluation dataset\n compute_metrics=compute_metrics, # the callback that computes metrics of interest\n)\n# train the model\ntrainer.train()\n# evaluate the current model after training\ntrainer.evaluate()\n# saving the fine tuned model & tokenizer\nmodel_path = \"20newsgroups-bert-base-uncased\"\nmodel.save_pretrained(model_path)\ntokenizer.save_pretrained(model_path)" ]
[ [ "torch.cuda.manual_seed_all", "sklearn.datasets.fetch_20newsgroups", "numpy.random.seed", "tensorflow.random.set_seed", "sklearn.metrics.accuracy_score", "torch.manual_seed", "torch.tensor", "sklearn.model_selection.train_test_split" ] ]
pwhatfield/RAIL
[ "4986c10c3dd51362bde8ea1ddb3c53fb6d96cf56" ]
[ "rail/estimation/algos/sklearn_nn.py" ]
[ "\"\"\"\nExample code that implements a simple Neural Net predictor\nfor z_mode, and Gaussian centered at z_mode with base_width\nread in fromfile and pdf width set to base_width*(1+zmode).\n\"\"\"\n\nimport numpy as np\n# from numpy import inf\nimport sklearn.neural_network as sknn\nfrom sklearn.preprocessing import StandardScaler\nfrom scipy.stats import norm\nfrom rail.estimation.estimator import Estimator as BaseEstimation\n\n\ndef make_color_data(data_dict):\n \"\"\"\n make a dataset consisting of the i-band mag and the five colors\n Returns:\n --------\n input_data: (nd-array)\n array of imag and 5 colors\n \"\"\"\n input_data = data_dict['mag_i_lsst']\n bands = ['u', 'g', 'r', 'i', 'z', 'y']\n # make colors and append to input data\n for i in range(5):\n # replace the non-detect 99s with 28.0 just arbitrarily for now\n band1 = data_dict[f'mag_{bands[i]}_lsst']\n # band1err = data_dict[f'mag_err_{bands[i]}_lsst']\n band2 = data_dict[f'mag_{bands[i+1]}_lsst']\n # band2err = data_dict[f'mag_err_{bands[i+1]}_lsst']\n # for j,xx in enumerate(band1):\n # if np.isclose(xx,99.,atol=.01):\n # band1[j] = band1err[j]\n # band1err[j] = 1.0\n # for j,xx in enumerate(band2):\n # if np.isclose(xx,99.,atol=0.01):\n # band2[j] = band2err[j]\n # band2err[j] = 1.0\n input_data = np.vstack((input_data, band1-band2))\n return input_data.T\n\n\ndef regularize_data(data):\n scaler = StandardScaler()\n scaler.fit(data)\n regularized_data = scaler.transform(data)\n return regularized_data\n\n\nclass simpleNN(BaseEstimation):\n \"\"\"\n Subclass to implement a simple point estimate Neural Net photoz\n rather than actually predict PDF, for now just predict point zb\n and then put an error of width*(1+zb). We'll do a \"real\" NN\n photo-z later.\n \"\"\"\n def __init__(self, base_config, config_dict):\n \"\"\"\n Parameters:\n -----------\n run_dict: dict\n dictionary of all variables read in from the run_params\n values in the yaml file\n \"\"\"\n\n super().__init__(base_config=base_config, config_dict=config_dict)\n inputs = self.config_dict['run_params']\n\n self.width = inputs['width']\n self.zmin = inputs['zmin']\n self.zmax = inputs['zmax']\n self.nzbins = inputs['nzbins']\n np.random.seed(71)\n\n def inform(self):\n \"\"\"\n train the NN model\n \"\"\"\n speczs = self.training_data['redshift']\n print(\"stacking some data...\")\n color_data = make_color_data(self.training_data)\n input_data = regularize_data(color_data)\n simplenn = sknn.MLPRegressor(hidden_layer_sizes=(12, 12),\n activation='tanh', solver='lbfgs')\n simplenn.fit(input_data, speczs)\n self.model = simplenn\n\n def estimate(self, test_data):\n color_data = make_color_data(test_data)\n input_data = regularize_data(color_data)\n zmode = np.round(self.model.predict(input_data), 3)\n pdfs = []\n widths = self.width * (1.0+zmode)\n self.zgrid = np.linspace(self.zmin, self.zmax, self.nzbins)\n for i, zb in enumerate(zmode):\n pdfs.append(norm.pdf(self.zgrid, zb, widths[i]))\n pz_dict = {'zmode': zmode, 'pz_pdf': pdfs}\n return pz_dict\n" ]
[ [ "scipy.stats.norm.pdf", "sklearn.preprocessing.StandardScaler", "numpy.random.seed", "sklearn.neural_network.MLPRegressor", "numpy.linspace", "numpy.vstack" ] ]
wantysal/MoSQITooo
[ "a52f9ff120db9f9c46f4309d28d9e1568a5b4120" ]
[ "mosqito/sq_metrics/loudness/loudness_zwtv/_third_octave_levels.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n@date Created on Fri May 22 2020\n@author martin_g for Eomys\n\"\"\"\n\n# Third party imports\nimport numpy as np\nfrom scipy import signal\n\n# Local application imports\nfrom mosqito.sq_metrics.loudness.loudness_zwtv._square_and_smooth import (\n _square_and_smooth,\n)\n\n\ndef _third_octave_levels(sig, fs):\n \"\"\"3rd octave filtering, squaring, smoothing, level calculation and\n downsampling to temporal resolution: 0,5 ms, i.e. sampling rate: 2 kHz\n\n See ISO 532-1 section 6.3\n\n Parameters\n ----------\n sig : numpy.ndarray\n time signal sampled at 48 kHz[pa]\n fs : int\n time signal sampling frequency\n\n Outputs\n -------\n third_octave_levels : numpy.ndarray\n Set of time signals filtered per third octave bands\n \"\"\"\n # Sampling frequency shall be equal to 48 kHz (as per ISO 532)\n if fs != 48000:\n raise ValueError(\"\"\"ERROR: Sampling frequency shall be equal to 48 kHz\"\"\")\n # Constants\n n_level_band = 28\n n_filter_coeff = 6\n dec_factor = int(fs / 2000)\n # Initialisation\n coeff = np.zeros(n_filter_coeff)\n # Filter coefficients of one-third-octave-band filters (reference\n # table)\n # ISO 532-1 Table A.1\n third_octave_filter_ref = np.array(\n [[1, 2, 1, 1, -2, 1], [1, 0, -1, 1, -2, 1], [1, -2, 1, 1, -2, 1]]\n )\n # Filter coefficients of one-third-octave-band filters (difference to\n # reference table for 28 one-third-octave-band filters)\n # ISO 532-1 Table A.2\n third_octave_filter = np.array(\n [\n [\n [0, 0, 0, 0, -6.70260e-004, 6.59453e-004],\n [0, 0, 0, 0, -3.75071e-004, 3.61926e-004],\n [0, 0, 0, 0, -3.06523e-004, 2.97634e-004],\n ],\n [\n [0, 0, 0, 0, -8.47258e-004, 8.30131e-004],\n [0, 0, 0, 0, -4.76448e-004, 4.55616e-004],\n [0, 0, 0, 0, -3.88773e-004, 3.74685e-004],\n ],\n [\n [0, 0, 0, 0, -1.07210e-003, 1.04496e-003],\n [0, 0, 0, 0, -6.06567e-004, 5.73553e-004],\n [0, 0, 0, 0, -4.94004e-004, 4.71677e-004],\n ],\n [\n [0, 0, 0, 0, -1.35836e-003, 1.31535e-003],\n [0, 0, 0, 0, -7.74327e-004, 7.22007e-004],\n [0, 0, 0, 0, -6.29154e-004, 5.93771e-004],\n ],\n [\n [0, 0, 0, 0, -1.72380e-003, 1.65564e-003],\n [0, 0, 0, 0, -9.91780e-004, 9.08866e-004],\n [0, 0, 0, 0, -8.03529e-004, 7.47455e-004],\n ],\n [\n [0, 0, 0, 0, -2.19188e-003, 2.08388e-003],\n [0, 0, 0, 0, -1.27545e-003, 1.14406e-003],\n [0, 0, 0, 0, -1.02976e-003, 9.40900e-004],\n ],\n [\n [0, 0, 0, 0, -2.79386e-003, 2.62274e-003],\n [0, 0, 0, 0, -1.64828e-003, 1.44006e-003],\n [0, 0, 0, 0, -1.32520e-003, 1.18438e-003],\n ],\n [\n [0, 0, 0, 0, -3.57182e-003, 3.30071e-003],\n [0, 0, 0, 0, -2.14252e-003, 1.81258e-003],\n [0, 0, 0, 0, -1.71397e-003, 1.49082e-003],\n ],\n [\n [0, 0, 0, 0, -4.58305e-003, 4.15355e-003],\n [0, 0, 0, 0, -2.80413e-003, 2.28135e-003],\n [0, 0, 0, 0, -2.23006e-003, 1.87646e-003],\n ],\n [\n [0, 0, 0, 0, -5.90655e-003, 5.22622e-003],\n [0, 0, 0, 0, -3.69947e-003, 2.87118e-003],\n [0, 0, 0, 0, -2.92205e-003, 2.36178e-003],\n ],\n [\n [0, 0, 0, 0, -7.65243e-003, 6.57493e-003],\n [0, 0, 0, 0, -4.92540e-003, 3.61318e-003],\n [0, 0, 0, 0, -3.86007e-003, 2.97240e-003],\n ],\n [\n [0, 0, 0, 0, -1.00023e-002, 8.29610e-003],\n [0, 0, 0, 0, -6.63788e-003, 4.55999e-003],\n [0, 0, 0, 0, -5.15982e-003, 3.75306e-003],\n ],\n [\n [0, 0, 0, 0, -1.31230e-002, 1.04220e-002],\n [0, 0, 0, 0, -9.02274e-003, 5.73132e-003],\n [0, 0, 0, 0, -6.94543e-003, 4.71734e-003],\n ],\n [\n [0, 0, 0, 0, -1.73693e-002, 1.30947e-002],\n [0, 0, 0, 0, -1.24176e-002, 7.20526e-003],\n [0, 0, 0, 0, -9.46002e-003, 5.93145e-003],\n ],\n [\n [0, 0, 0, 0, -2.31934e-002, 1.64308e-002],\n [0, 0, 0, 0, -1.73009e-002, 9.04761e-003],\n [0, 0, 0, 0, -1.30358e-002, 7.44926e-003],\n ],\n [\n [0, 0, 0, 0, -3.13292e-002, 2.06370e-002],\n [0, 0, 0, 0, -2.44342e-002, 1.13731e-002],\n [0, 0, 0, 0, -1.82108e-002, 9.36778e-003],\n ],\n [\n [0, 0, 0, 0, -4.28261e-002, 2.59325e-002],\n [0, 0, 0, 0, -3.49619e-002, 1.43046e-002],\n [0, 0, 0, 0, -2.57855e-002, 1.17912e-002],\n ],\n [\n [0, 0, 0, 0, -5.91733e-002, 3.25054e-002],\n [0, 0, 0, 0, -5.06072e-002, 1.79513e-002],\n [0, 0, 0, 0, -3.69401e-002, 1.48094e-002],\n ],\n [\n [0, 0, 0, 0, -8.26348e-002, 4.05894e-002],\n [0, 0, 0, 0, -7.40348e-002, 2.24476e-002],\n [0, 0, 0, 0, -5.34977e-002, 1.85371e-002],\n ],\n [\n [0, 0, 0, 0, -1.17018e-001, 5.08116e-002],\n [0, 0, 0, 0, -1.09516e-001, 2.81387e-002],\n [0, 0, 0, 0, -7.85097e-002, 2.32872e-002],\n ],\n [\n [0, 0, 0, 0, -1.67714e-001, 6.37872e-002],\n [0, 0, 0, 0, -1.63378e-001, 3.53729e-002],\n [0, 0, 0, 0, -1.16419e-001, 2.93723e-002],\n ],\n [\n [0, 0, 0, 0, -2.42528e-001, 7.98576e-002],\n [0, 0, 0, 0, -2.45161e-001, 4.43370e-002],\n [0, 0, 0, 0, -1.73972e-001, 3.70015e-002],\n ],\n [\n [0, 0, 0, 0, -3.53142e-001, 9.96330e-002],\n [0, 0, 0, 0, -3.69163e-001, 5.53535e-002],\n [0, 0, 0, 0, -2.61399e-001, 4.65428e-002],\n ],\n [\n [0, 0, 0, 0, -5.16316e-001, 1.24177e-001],\n [0, 0, 0, 0, -5.55473e-001, 6.89403e-002],\n [0, 0, 0, 0, -3.93998e-001, 5.86715e-002],\n ],\n [\n [0, 0, 0, 0, -7.56635e-001, 1.55023e-001],\n [0, 0, 0, 0, -8.34281e-001, 8.58123e-002],\n [0, 0, 0, 0, -5.94547e-001, 7.43960e-002],\n ],\n [\n [0, 0, 0, 0, -1.10165e000, 1.91713e-001],\n [0, 0, 0, 0, -1.23939e000, 1.05243e-001],\n [0, 0, 0, 0, -8.91666e-001, 9.40354e-002],\n ],\n [\n [0, 0, 0, 0, -1.58477e000, 2.39049e-001],\n [0, 0, 0, 0, -1.80505e000, 1.28794e-001],\n [0, 0, 0, 0, -1.32500e000, 1.21333e-001],\n ],\n [\n [0, 0, 0, 0, -2.50630e000, 1.42308e-001],\n [0, 0, 0, 0, -2.19464e000, 2.76470e-001],\n [0, 0, 0, 0, -1.90231e000, 1.47304e-001],\n ],\n ]\n )\n # Filter gain values\n # ISO 532-1 Table A.2\n filter_gain = np.array(\n [\n 4.30764e-011,\n 8.59340e-011,\n 1.71424e-010,\n 3.41944e-010,\n 6.82035e-010,\n 1.36026e-009,\n 2.71261e-009,\n 5.40870e-009,\n 1.07826e-008,\n 2.14910e-008,\n 4.28228e-008,\n 8.54316e-008,\n 1.70009e-007,\n 3.38215e-007,\n 6.71990e-007,\n 1.33531e-006,\n 2.65172e-006,\n 5.25477e-006,\n 1.03780e-005,\n 2.04870e-005,\n 4.05198e-005,\n 7.97914e-005,\n 1.56511e-004,\n 3.04954e-004,\n 5.99157e-004,\n 1.16544e-003,\n 2.27488e-003,\n 3.91006e-003,\n ]\n )\n\n # Definition of the range of preferred filter center frequency\n freq = [\n 25,\n 31.5,\n 40,\n 50,\n 63,\n 80,\n 100,\n 125,\n 160,\n 200,\n 250,\n 315,\n 400,\n 500,\n 630,\n 800,\n 1000,\n 1250,\n 1600,\n 2000,\n 2500,\n 3150,\n 4000,\n 5000,\n 6300,\n 8000,\n 10000,\n 12500,\n ]\n\n n_time = len(sig[::dec_factor])\n time_axis = np.linspace(0, len(sig) / fs, num=n_time)\n\n third_octave_level = np.zeros((n_level_band, n_time))\n for i_bands in range(n_level_band):\n \n # Initialisation\n tiny_value = 10 ** -12\n i_ref = 4 * 10 ** -10\n # 2nd order fltering (See ISO 532-1 section 6.3 and A.2)\n coeff = third_octave_filter_ref - third_octave_filter[i_bands, :, :]\n sig_filt = filter_gain[i_bands] * signal.sosfilt(coeff, sig)\n # Calculate center frequency of filter\n center_freq = 10 ** ((i_bands - 16) / 10) * 1000\n # Squaring and smoothing of filtered signal\n sig_filt = _square_and_smooth(sig_filt, center_freq, 48000)\n # SPL calculation and decimation\n third_octave_level[i_bands, :] = 10 * np.log10(\n (sig_filt[::dec_factor] + tiny_value) / i_ref\n )\n\n return third_octave_level, time_axis, freq\n" ]
[ [ "numpy.log10", "numpy.array", "scipy.signal.sosfilt", "numpy.zeros" ] ]
stroblme/hqsp-main
[ "add585604912f0dec6d02118d4643435525a8df1" ]
[ "train.py" ]
[ "import sys\nsys.path.append(\"./stqft\")\nsys.path.append(\"./qcnn\")\n\nimport os\n#Activate the cuda env\nos.environ[\"LD_LIBRARY_PATH\"] = \"$LD_LIBRARY_PATH:/usr/local/cuda/lib64/:/usr/lib64:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/cuda-11.2/lib64:/usr/local/cuda/targets/x86_64-linux/lib/\"\n\nimport time\n\nimport multiprocessing\nimport glob\nimport numpy as np\n\n\ndatasetPath = \"/storage/mstrobl/dataset\"\nfeaturePath = \"/storage/mstrobl/features\"\ncheckpointsPath = \"/storage/mstrobl/checkpoints\"\nmodelsPath = \"/storage/mstrobl/models\"\nquantumPath = \"/storage/mstrobl/dataQuantum\"\nwaveformPath = \"/storage/mstrobl/waveforms\"\ncheckpointsPath = \"/storage/mstrobl/checkpoints\"\n\nexportPath = \"/storage/mstrobl/versioning\"\n\nTOPIC = \"PrepGenTrain\"\n\nbatchSize = 28\nkernelSize = 2\nepochs = 40\nportion = 1\nPoolSize = int(multiprocessing.cpu_count()*0.6) #be gentle..\n# PoolSize = 1 #be gentle..\n\nif __name__ == '__main__':\n\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--waveform\", default = 1, help = \"Generate Waveforms\")\n parser.add_argument(\"--quantum\", default= 1, help = \"Generate Quantum Data\")\n parser.add_argument(\"--train\", default = 1, action='store_true', help = \"Fit the model\")\n parser.add_argument(\"--checkTree\", default = 1, help = \"Checks if the working tree is dirty\")\n args = parser.parse_args()\n\n\n from stqft.frontend import export\n\n if int(args.checkTree) == 1:\n export.checkWorkingTree(exportPath)\n \n\n print(f\"\\n\\n\\n-----------------------\\n\\n\\n\")\n print(f\"Train Time @{time.time()}\")\n print(f\"\\n\\n\\n-----------------------\\n\\n\\n\")\n\n multiprocessing.set_start_method('spawn')\n print(f\"Running {PoolSize} processes\")\n\n datasetFiles = glob.glob(datasetPath + \"/**/*.wav\", recursive=True)\n\n print(f\"Found {len(datasetFiles)} files in the dataset\")\n\n exp = export(topic=TOPIC, identifier=\"dataset\", dataDir=exportPath)\n exp.setData(export.DESCRIPTION, f\"Dataset {len(datasetFiles)} in {datasetPath}\")\n exp.setData(export.GENERICDATA, datasetFiles)\n exp.doExport()\n\n print(f\"\\n\\n\\n-----------------------\\n\\n\\n\")\n print(f\"Generating Waveforms @{time.time()}\")\n print(f\"\\n\\n\\n-----------------------\\n\\n\\n\")\n from generateFeatures import gen_features, gen_quantum, reportSettings, samplingRate\n from qcnn.small_qsr import labels\n \n if int(args.waveform)==1:\n x_train, x_valid, y_train, y_valid = gen_features(labels, datasetPath, featurePath, PoolSize, waveformPath=waveformPath, portion=portion)\n else:\n print(\"Loading from disk...\")\n x_train = np.load(f\"{featurePath}/x_train_speech.npy\")\n x_valid = np.load(f\"{featurePath}/x_valid_speech.npy\")\n y_train = np.load(f\"{featurePath}/y_train_speech.npy\")\n y_valid = np.load(f\"{featurePath}/y_valid_speech.npy\")\n\n exp = export(topic=TOPIC, identifier=\"waveformData\", dataDir=exportPath)\n exp.setData(export.DESCRIPTION, f\"Waveforms generated (T)/ loaded (F): {args.waveform}; Labels used: {labels}; FeaturePath: {featurePath}; PoolSize: {PoolSize}; WaveformPath: {waveformPath}; Portioning: {portion}, SamplingRate: {samplingRate}, {reportSettings()}\")\n exp.setData(export.GENERICDATA, {\"x_train\":x_train, \"x_valid\":x_valid, \"y_train\":y_train, \"y_valid\":y_valid})\n exp.doExport()\n\n print(f\"\\n\\n\\n-----------------------\\n\\n\\n\")\n print(f\"Generating Quantum Data @{time.time()}\")\n print(f\"\\n\\n\\n-----------------------\\n\\n\\n\")\n\n # disable quanv and pix chan mal\n if int(args.quantum)==-2:\n q_train = x_train\n q_valid = x_valid\n # enable quanv\n elif int(args.quantum)==1:\n q_train, q_valid = gen_quantum(x_train, x_valid, kernelSize, output=quantumPath, poolSize=PoolSize)\n # pix chan map\n elif int(args.quantum)==-1:\n q_train, q_valid = gen_quantum(x_train, x_valid, kernelSize, output=quantumPath, poolSize=PoolSize, quanv=False)\n # load from disk\n else:\n print(\"Loading from disk...\")\n q_train = np.load(f\"{quantumPath}/quanv_train.npy\")\n q_valid = np.load(f\"{quantumPath}/quanv_valid.npy\")\n\n exp = export(topic=TOPIC, identifier=\"quantumData\", dataDir=exportPath)\n exp.setData(export.DESCRIPTION, f\"Quantum data generated (T)/ loaded (F): {args.quantum}; FeaturePath: {quantumPath}; PoolSize: {PoolSize};\")\n exp.setData(export.GENERICDATA, {\"q_train\":q_train, \"q_valid\":q_valid})\n exp.doExport()\n\n print(f\"\\n\\n\\n-----------------------\\n\\n\\n\")\n print(f\"Starting Training @{time.time()}\")\n print(f\"\\n\\n\\n-----------------------\\n\\n\\n\")\n from fitModel import fit_model\n\n if args.train:\n #if quanv completely disabled and no pix channel map\n if int(args.quantum)==-2 or q_train.shape[3]==1:\n print(\"using ablation\")\n # pass quanv data for training and validation\n model, history = fit_model(q_train, y_train, q_valid, y_valid, checkpointsPath, epochs=epochs, batchSize=batchSize, ablation=True)\n else:\n # pass quanv data for training and validation\n model, history = fit_model(q_train, y_train, q_valid, y_valid, checkpointsPath, epochs=epochs, batchSize=batchSize, ablation=False)\n\n data_ix = time.strftime(\"%Y%m%d_%H%M\")\n model.save(f\"{modelsPath}/model_{time.time()}\")\n else:\n print(\"Training disabled\")\n\n exp = export(topic=TOPIC, identifier=\"model\", dataDir=exportPath)\n exp.setData(export.DESCRIPTION, f\"Model trained (T)/ loaded (F): {args.train}; CheckpointsPath: {checkpointsPath}; ModelsPath: {modelsPath}\")\n exp.setData(export.GENERICDATA, {\"history_acc\":history.history['accuracy'], \"history_val_acc\":history.history['val_accuracy'], \"history_loss\":history.history['loss'], \"history_val_loss\":history.history['val_loss']})\n exp.doExport()" ]
[ [ "numpy.load" ] ]
rickywu0421/fibdrv
[ "68f7b771795598f3a7339025dcd61a081c7c8e5c" ]
[ "scripts/driver.py" ]
[ "#!/usr/bin/env python3\n\nimport sys\nimport psutil\nimport subprocess\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nif (len(sys.argv) < 2):\n print(\"usage: python3 driver.py <runs>\")\n sys.exit(1)\n\ninput_file = 'fib_time'\noutput_file = \"time.png\"\nruns = int(sys.argv[1])\n\ndef outlier_filter(data, threshold=2):\n data = np.array(data)\n z = np.abs((data - data.mean()) / data.std())\n return data[z < threshold]\n\ndef data_processing(data, n):\n catgories = data[0].shape[0]\n samples = data[0].shape[1]\n final = np.zeros((catgories, samples))\n\n for c in range(catgories):\n for s in range(samples):\n final[c][s] = \\\n outlier_filter([data[i][c][s] for i in range(n)]).mean()\n return final\n\n\nif __name__ == '__main__':\n Ys = []\n\n for i in range(runs):\n # bind process on cpu0\n subprocess.run('sudo taskset 0x1 ./client 2>&1 > /dev/null', shell=True)\n output = np.loadtxt(input_file, dtype='float').T\n Ys.append(np.delete(output, 0, 0))\n\n X = output[0]\n Y = data_processing(Ys, runs)\n\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.set_title('perf', fontsize=16)\n ax.set_xlabel(r'$n_{th} fibonacci$', fontsize=16)\n ax.set_ylabel('time (ns)', fontsize=16)\n\n ax.plot(X, Y[0], marker='*', markersize=3, label='user') # user\n ax.plot(X, Y[1], marker='+', markersize=3, label='kernel') # kernel\n ax.plot(X, Y[2], marker='^', markersize=3, label='kernel to user') # kernel to user\n\n ax.legend(loc = 'upper left')\n\n plt.subplots_adjust(bottom=0.15)\n plt.savefig(output_file, bbox_inches=\"tight\")\n plt.show()\n" ]
[ [ "numpy.array", "numpy.delete", "numpy.zeros", "matplotlib.pyplot.savefig", "matplotlib.pyplot.figure", "numpy.loadtxt", "matplotlib.pyplot.show", "matplotlib.pyplot.subplots_adjust" ] ]
YosafatM/100-days-of-Python
[ "e81ab663b7aacb7a904f27a4e6774837cf3594a1" ]
[ "Intermedio Avanzado/32 Felicitaciones/main.py" ]
[ "import pandas as pd\nimport datetime as dt\nimport smtplib as st\nimport random as rd\n\nFROM = \"pythonsender633@gmail.com\"\nPASSWORD = \"1234abc()\"\nSUBJECT = \"Happy birthday!\"\n\nLETTERS = [1, 2, 3]\nPLACEHOLDER = \"[NAME]\"\n\nPATH = \"birthdays.csv\"\nC_NAME = \"name\"\nC_EMAIL = \"email\"\nC_YEAR = \"year\"\nC_MONTH = \"month\"\nC_DAY = \"day\"\n\ndata = pd.read_csv(PATH)\ncurrent = dt.datetime.now()\n\nfor row in data.iterrows():\n row = row[1]\n birthday = dt.datetime(int(row[C_YEAR]), int(row[C_MONTH]), int(row[C_DAY]))\n\n if current.month == birthday.month and current.day == birthday.day:\n number = rd.choice(LETTERS)\n\n with open(f\"letter_templates/letter_{number}.txt\") as handle:\n letter = handle.read()\n letter = letter.replace(PLACEHOLDER, row[C_NAME])\n\n with st.SMTP(\"smtp.gmail.com\") as connection:\n message = f\"Subject:{SUBJECT}\\n\\n{letter}\"\n\n connection.starttls()\n connection.login(user=FROM, password=PASSWORD)\n connection.sendmail(\n from_addr=FROM,\n to_addrs=row[C_EMAIL],\n msg=message\n )\n" ]
[ [ "pandas.read_csv" ] ]
Hotaro0237/image-editor-bot
[ "f8292fa81701b897a359ef59f02f74910166f0bf" ]
[ "image/edit_2.py" ]
[ "# By @HYPER-MOD\nfrom PIL import Image, ImageEnhance, ImageDraw\nimport numpy as np\nimport os\nimport cv2\nimport shutil\n\n\nasync def circle_with_bg(client, message):\n try:\n userid = str(message.chat.id)\n if not os.path.isdir(f\"./DOWNLOADS/{userid}\"):\n os.makedirs(f\"./DOWNLOADS/{userid}\")\n download_location = \"./DOWNLOADS\" + \"/\" + userid + \"/\" + userid + \".jpg\"\n edit_img_loc = \"./DOWNLOADS\" + \"/\" + userid + \"/\" + \"circle.png\"\n if not message.reply_to_message.empty:\n msg = await message.reply_to_message.reply_text(\n \"Downloading image\", quote=True\n )\n a = await client.download_media(\n message=message.reply_to_message, file_name=download_location\n )\n await msg.edit(\"Processing Image...\")\n img = Image.open(a).convert(\"RGB\")\n npImage = np.array(img)\n h, w = img.size\n alpha = Image.new(\"L\", img.size, 0)\n draw = ImageDraw.Draw(alpha)\n draw.pieslice([0, 0, h, w], 0, 360, fill=255)\n npAlpha = np.array(alpha)\n npImage = np.dstack((npImage, npAlpha))\n Image.fromarray(npImage).save(edit_img_loc)\n await message.reply_chat_action(\"upload_photo\")\n await message.reply_to_message.reply_photo(edit_img_loc, quote=True)\n await msg.delete()\n else:\n await message.reply_text(\"Why did you delete that??\")\n try:\n shutil.rmtree(f\"./DOWNLOADS/{userid}\")\n except Exception:\n pass\n except Exception as e:\n print(\"circle_with_bg-error - \" + str(e))\n if \"USER_IS_BLOCKED\" in str(e):\n return\n else:\n try:\n await message.reply_to_message.reply_text(\n \"Something went wrong!\", quote=True\n )\n except Exception:\n return\n\n\nasync def circle_without_bg(client, message):\n try:\n userid = str(message.chat.id)\n if not os.path.isdir(f\"./DOWNLOADS/{userid}\"):\n os.makedirs(f\"./DOWNLOADS/{userid}\")\n download_location = \"./DOWNLOADS\" + \"/\" + userid + \"/\" + userid + \".jpg\"\n edit_img_loc = \"./DOWNLOADS\" + \"/\" + userid + \"/\" + \"circle.png\"\n if not message.reply_to_message.empty:\n msg = await message.reply_to_message.reply_text(\n \"Downloading image\", quote=True\n )\n a = await client.download_media(\n message=message.reply_to_message, file_name=download_location\n )\n await msg.edit(\"Processing Image...\")\n img = Image.open(a).convert(\"RGB\")\n npImage = np.array(img)\n h, w = img.size\n alpha = Image.new(\"L\", img.size, 0)\n draw = ImageDraw.Draw(alpha)\n draw.pieslice([0, 0, h, w], 0, 360, fill=255)\n npAlpha = np.array(alpha)\n npImage = np.dstack((npImage, npAlpha))\n Image.fromarray(npImage).save(edit_img_loc)\n await message.reply_chat_action(\"upload_document\")\n await message.reply_to_message.reply_document(edit_img_loc, quote=True)\n await msg.delete()\n else:\n await message.reply_text(\"Why did you delete that??\")\n try:\n shutil.rmtree(f\"./DOWNLOADS/{userid}\")\n except Exception:\n pass\n except Exception as e:\n print(\"circle_without_bg-error - \" + str(e))\n if \"USER_IS_BLOCKED\" in str(e):\n return\n else:\n try:\n await message.reply_to_message.reply_text(\n \"Something went wrong!\", quote=True\n )\n except Exception:\n return\n\n\nasync def sticker(client, message):\n try:\n userid = str(message.chat.id)\n if not os.path.isdir(f\"./DOWNLOADS/{userid}\"):\n os.makedirs(f\"./DOWNLOADS/{userid}\")\n download_location = \"./DOWNLOADS\" + \"/\" + userid + \"/\" + userid + \".jpg\"\n edit_img_loc = \"./DOWNLOADS\" + \"/\" + userid + \"/\" + \"sticker.webp\"\n if not message.reply_to_message.empty:\n msg = await message.reply_to_message.reply_text(\n \"Downloading image\", quote=True\n )\n a = await client.download_media(\n message=message.reply_to_message, file_name=download_location\n )\n await msg.edit(\"Processing Image...\")\n os.rename(a, edit_img_loc)\n await message.reply_to_message.reply_sticker(edit_img_loc, quote=True)\n await msg.delete()\n else:\n await message.reply_text(\"Why did you delete that??\")\n try:\n shutil.rmtree(f\"./DOWNLOADS/{userid}\")\n except Exception:\n pass\n except Exception as e:\n print(\"sticker-error - \" + str(e))\n if \"USER_IS_BLOCKED\" in str(e):\n return\n else:\n try:\n await message.reply_to_message.reply_text(\n \"Something went wrong!\", quote=True\n )\n except Exception:\n return\n\n\ndef add_corners(im, rad):\n circle = Image.new(\"L\", (rad * 2, rad * 2), 0)\n draw = ImageDraw.Draw(circle)\n draw.ellipse((0, 0, rad * 2, rad * 2), fill=255)\n alpha = Image.new(\"L\", im.size, 255)\n w, h = im.size\n alpha.paste(circle.crop((0, 0, rad, rad)), (0, 0))\n alpha.paste(circle.crop((0, rad, rad, rad * 2)), (0, h - rad))\n alpha.paste(circle.crop((rad, 0, rad * 2, rad)), (w - rad, 0))\n alpha.paste(circle.crop((rad, rad, rad * 2, rad * 2)), (w - rad, h - rad))\n im.putalpha(alpha)\n return im\n\n\nasync def edge_curved(client, message):\n try:\n userid = str(message.chat.id)\n if not os.path.isdir(f\"./DOWNLOADS/{userid}\"):\n os.makedirs(f\"./DOWNLOADS/{userid}\")\n download_location = \"./DOWNLOADS\" + \"/\" + userid + \"/\" + userid + \".jpg\"\n edit_img_loc = \"./DOWNLOADS\" + \"/\" + userid + \"/\" + \"edge_curved.webp\"\n if not message.reply_to_message.empty:\n msg = await message.reply_to_message.reply_text(\n \"Downloading image\", quote=True\n )\n a = await client.download_media(\n message=message.reply_to_message, file_name=download_location\n )\n await msg.edit(\"Processing Image...\")\n im = Image.open(a)\n im = add_corners(im, 100)\n im.save(edit_img_loc)\n await message.reply_chat_action(\"upload_photo\")\n await message.reply_to_message.reply_sticker(edit_img_loc, quote=True)\n await msg.delete()\n else:\n await message.reply_text(\"Why did you delete that??\")\n try:\n shutil.rmtree(f\"./DOWNLOADS/{userid}\")\n except Exception:\n pass\n except Exception as e:\n print(\"edge_curved-error - \" + str(e))\n if \"USER_IS_BLOCKED\" in str(e):\n return\n else:\n try:\n await message.reply_to_message.reply_text(\n \"Something went wrong!\", quote=True\n )\n except Exception:\n return\n\n\nasync def contrast(client, message):\n try:\n userid = str(message.chat.id)\n if not os.path.isdir(f\"./DOWNLOADS/{userid}\"):\n os.makedirs(f\"./DOWNLOADS/{userid}\")\n download_location = \"./DOWNLOADS\" + \"/\" + userid + \"/\" + userid + \".jpg\"\n edit_img_loc = \"./DOWNLOADS\" + \"/\" + userid + \"/\" + \"contrast.jpg\"\n if not message.reply_to_message.empty:\n msg = await message.reply_to_message.reply_text(\n \"Downloading image\", quote=True\n )\n a = await client.download_media(\n message=message.reply_to_message, file_name=download_location\n )\n await msg.edit(\"Processing Image...\")\n image = Image.open(a)\n contrast = ImageEnhance.Contrast(image)\n contrast.enhance(1.5).save(edit_img_loc)\n await message.reply_chat_action(\"upload_photo\")\n await message.reply_to_message.reply_photo(edit_img_loc, quote=True)\n await msg.delete()\n else:\n await message.reply_text(\"Why did you delete that??\")\n try:\n shutil.rmtree(f\"./DOWNLOADS/{userid}\")\n except Exception:\n pass\n except Exception as e:\n print(\"contrast-error - \" + str(e))\n if \"USER_IS_BLOCKED\" in str(e):\n return\n else:\n try:\n await message.reply_to_message.reply_text(\n \"Something went wrong!\", quote=True\n )\n except Exception:\n return\n\n\ndef sepia(img):\n width, height = img.size\n new_img = img.copy()\n for x in range(width):\n for y in range(height):\n red, green, blue = img.getpixel((x, y))\n new_val = 0.3 * red + 0.59 * green + 0.11 * blue\n new_red = int(new_val * 2)\n if new_red > 255:\n new_red = 255\n new_green = int(new_val * 1.5)\n if new_green > 255:\n new_green = 255\n new_blue = int(new_val)\n if new_blue > 255:\n new_blue = 255\n\n new_img.putpixel((x, y), (new_red, new_green, new_blue))\n\n return new_img\n\n\nasync def sepia_mode(client, message):\n try:\n userid = str(message.chat.id)\n if not os.path.isdir(f\"./DOWNLOADS/{userid}\"):\n os.makedirs(f\"./DOWNLOADS/{userid}\")\n download_location = \"./DOWNLOADS\" + \"/\" + userid + \"/\" + userid + \".jpg\"\n edit_img_loc = \"./DOWNLOADS\" + \"/\" + userid + \"/\" + \"sepia.jpg\"\n if not message.reply_to_message.empty:\n msg = await message.reply_to_message.reply_text(\n \"Downloading image\", quote=True\n )\n a = await client.download_media(\n message=message.reply_to_message, file_name=download_location\n )\n await msg.edit(\"Processing Image...\")\n image = Image.open(a)\n new_img = sepia(image)\n new_img.save(edit_img_loc)\n await message.reply_chat_action(\"upload_photo\")\n await message.reply_to_message.reply_photo(edit_img_loc, quote=True)\n await msg.delete()\n else:\n await message.reply_text(\"Why did you delete that??\")\n try:\n shutil.rmtree(f\"./DOWNLOADS/{userid}\")\n except Exception:\n pass\n except Exception as e:\n print(\"sepia_mode-error - \" + str(e))\n if \"USER_IS_BLOCKED\" in str(e):\n return\n else:\n try:\n await message.reply_to_message.reply_text(\n \"Something went wrong!\", quote=True\n )\n except Exception:\n return\n\n\ndef dodgeV2(x, y):\n return cv2.divide(x, 255 - y, scale=256)\n\n\nasync def pencil(client, message):\n try:\n userid = str(message.chat.id)\n if not os.path.isdir(f\"./DOWNLOADS/{userid}\"):\n os.makedirs(f\"./DOWNLOADS/{userid}\")\n download_location = \"./DOWNLOADS\" + \"/\" + userid + \"/\" + userid + \".jpg\"\n edit_img_loc = \"./DOWNLOADS\" + \"/\" + userid + \"/\" + \"pencil.jpg\"\n if not message.reply_to_message.empty:\n msg = await message.reply_to_message.reply_text(\n \"Downloading image\", quote=True\n )\n a = await client.download_media(\n message=message.reply_to_message, file_name=download_location\n )\n await msg.edit(\"Processing Image...\")\n img = cv2.imread(a)\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img_invert = cv2.bitwise_not(img_gray)\n img_smoothing = cv2.GaussianBlur(img_invert, (21, 21), sigmaX=0, sigmaY=0)\n final_img = dodgeV2(img_gray, img_smoothing)\n cv2.imwrite(edit_img_loc, final_img)\n await message.reply_chat_action(\"upload_photo\")\n await message.reply_to_message.reply_photo(edit_img_loc, quote=True)\n await msg.delete()\n else:\n await message.reply_text(\"Why did you delete that??\")\n try:\n shutil.rmtree(f\"./DOWNLOADS/{userid}\")\n except Exception:\n pass\n except Exception as e:\n print(\"pencil-error - \" + str(e))\n if \"USER_IS_BLOCKED\" in str(e):\n return\n else:\n try:\n await message.reply_to_message.reply_text(\n \"Something went wrong!\", quote=True\n )\n except Exception:\n return\n\n\ndef color_quantization(img, k):\n data = np.float32(img).reshape((-1, 3))\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 20, 1.0)\n _, label, center = cv2.kmeans(\n data, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS\n )\n center = np.uint8(center)\n result = center[label.flatten()]\n result = result.reshape(img.shape)\n return result\n\n\nasync def cartoon(client, message):\n try:\n userid = str(message.chat.id)\n if not os.path.isdir(f\"./DOWNLOADS/{userid}\"):\n os.makedirs(f\"./DOWNLOADS/{userid}\")\n download_location = \"./DOWNLOADS\" + \"/\" + userid + \"/\" + userid + \".jpg\"\n edit_img_loc = \"./DOWNLOADS\" + \"/\" + userid + \"/\" + \"kang.jpg\"\n if not message.reply_to_message.empty:\n msg = await message.reply_to_message.reply_text(\n \"Downloading image\", quote=True\n )\n a = await client.download_media(\n message=message.reply_to_message, file_name=download_location\n )\n await msg.edit(\"Processing Image...\")\n img = cv2.imread(a)\n edges = cv2.Canny(img, 100, 200)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n edges = cv2.adaptiveThreshold(\n gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 5\n )\n color = cv2.bilateralFilter(img, d=9, sigmaColor=200, sigmaSpace=200)\n\n cv2.bitwise_and(color, color, mask=edges)\n img_1 = color_quantization(img, 7)\n cv2.imwrite(edit_img_loc, img_1)\n await message.reply_chat_action(\"upload_photo\")\n await message.reply_to_message.reply_photo(edit_img_loc, quote=True)\n await msg.delete()\n else:\n await message.reply_text(\"Why did you delete that??\")\n try:\n shutil.rmtree(f\"./DOWNLOADS/{userid}\")\n except Exception:\n pass\n except Exception as e:\n print(\"cartoon-error - \" + str(e))\n if \"USER_IS_BLOCKED\" in str(e):\n return\n else:\n try:\n await message.reply_to_message.reply_text(\n \"Something went wrong!\", quote=True\n )\n except Exception:\n return\n" ]
[ [ "numpy.dstack", "numpy.array", "numpy.uint8", "numpy.float32" ] ]
wangcj05/allopy
[ "0d97127e5132df1449283198143994b45fb11214" ]
[ "tests/regret/portfolio/test_portfolio_regret_optimizer.py" ]
[ "import numpy as np\nimport pytest\n\nfrom allopy import PortfolioRegretOptimizer, RegretOptimizer, get_option\nfrom .data import Test1, Test2, assets, scenarios\nfrom .funcs import cvar_fun, obj_max_returns\n\n\n@pytest.mark.parametrize(\"config\", [Test1, Test2])\ndef test_regret_optimizer(config, main_cubes, cvar_cubes):\n opt = RegretOptimizer(len(assets), len(scenarios), config.prob.as_array(), sum_to_1=True)\n opt.set_bounds(config.lb.as_array(), config.ub.as_array())\n\n obj_funcs, constraint_funcs = [], []\n for i, scenario in enumerate(scenarios):\n obj_funcs.append(obj_max_returns(main_cubes[i]))\n constraint_funcs.append(cvar_fun(cvar_cubes[i], config.cvar[scenario]))\n\n opt.set_max_objective(obj_funcs)\n opt.add_inequality_constraint(constraint_funcs)\n opt.optimize()\n\n assert scenario_solution_equal_or_better(obj_funcs, opt.solution.scenario_optimal, config.solutions) or \\\n regret_is_lower(opt.solution.proportions,\n config.proportions,\n opt.solution.scenario_optimal,\n obj_funcs,\n config.prob.as_array())\n\n\n@pytest.mark.parametrize(\"config\", [Test1])\ndef test_portfolio_regret_optimizer(config, main_cubes, cvar_cubes):\n opt = PortfolioRegretOptimizer(main_cubes, cvar_cubes, config.prob.as_array(),\n rebalance=True, sum_to_1=True, time_unit='quarterly')\n\n opt.set_bounds(config.lb.as_array(), config.ub.as_array())\n opt.maximize_returns(max_cvar=config.cvar.as_array())\n\n obj_funcs = opt._objectives.max_returns\n assert scenario_solution_equal_or_better(obj_funcs, opt.solution.scenario_optimal, config.solutions) or \\\n regret_is_lower(opt.solution.proportions,\n config.proportions,\n opt.solution.scenario_optimal,\n obj_funcs,\n config.prob.as_array())\n\n\ndef scenario_solution_equal_or_better(obj_funcs, solutions, expected):\n results = []\n for f, w, t, in zip(obj_funcs, solutions, expected):\n diff = (f(w) - f(t)) / get_option(\"F.SCALE\")\n results.append(round(diff, 3) >= 0)\n\n return np.alltrue(results)\n\n\ndef regret_is_lower(p0, p1, solutions, obj_funcs, prob):\n def regret(p):\n f_values = np.array([obj_funcs[i](s) for i, s in enumerate(solutions)])\n cost = f_values - np.array([f(p @ solutions) for f in obj_funcs])\n cost = np.asarray(cost ** 2)\n return 100 * sum(prob * cost)\n\n return regret(p0) <= regret(p1)\n" ]
[ [ "numpy.alltrue", "numpy.asarray" ] ]
jali7001/LBH_to_E_flux
[ "ad51ea46f72855db805e28fa0ca0b227b43d2683", "ad51ea46f72855db805e28fa0ca0b227b43d2683" ]
[ "LBH_to_eflux/helper_funcs.py", "LBH_to_eflux/observations/ssusi.py" ]
[ "import sklearn.neighbors\nfrom numpy import linalg as LA\nfrom apexpy import Apex\nimport numpy as np\n\n#Create an Apex conversion instance at the usual reference altitude\n#no epoch is specified; we will set the epoch just-in-time when we are going to\n#do an coordinate transformation\napex_reference_height = 110000. # Apex reference height in meters\nmodule_Apex = Apex(refh=apex_reference_height/1000.)\n\ndef update_apex_epoch(dt):\n year = dt.year\n doy = dt.timetuple().tm_yday\n epoch = year+doy/(366. if np.mod(year,4)==0 else 365.)\n print('Setting Apex epoch for {} to {}'.format(dt.strftime('%Y%m%d'),epoch))\n module_Apex.set_epoch(epoch)\n\n\ndef dmsp_map_interpolate_NN_smooth_great_circle(lat_dmsp, lon_dmsp, lat_map, lon_map, Obs_map, k = 5, tol = 1.5):\n \"\"\"\n generic function to spatially interpolate with the SSJ data using nearest neighbors using some arbirtary distance tolerance\n \"\"\"\n tol = np.deg2rad(tol)\n #reshape to N by 2 array where each row is (lat, lon)\n dmsp_points = np.deg2rad(np.hstack((lat_dmsp.flatten().reshape(-1,1),lon_dmsp.flatten().reshape(-1,1))))\n map_points = np.deg2rad(np.hstack((lat_map.flatten().reshape(-1,1), lon_map.flatten().reshape(-1,1))))\n N_points = dmsp_points.shape[0]\n obs_val = Obs_map.flatten()\n \n model = sklearn.neighbors.NearestNeighbors(n_neighbors = k, radius = tol, metric = 'haversine')\n\n model.fit(map_points)\n neighbors = model.kneighbors(dmsp_points, return_distance = True)\n \n #indices\n obs_interp = np.empty(N_points)\n for i in range(N_points):\n distances = neighbors[0][i]\n inds = neighbors[1][i]\n \n weights = distances/np.nansum(distances)\n obs_interp[i] = np.nansum( obs_val[inds] * weights)\n \n return obs_interp\n\ndef latlt2polar(lat,lt,hemisphere):\n \"\"\"\n Converts an array of latitude and lt points to polar for a top-down dialplot (latitude in degrees, LT in hours)\n i.e. makes latitude the radial quantity and MLT the azimuthal \n\n get the radial displacement (referenced to down from northern pole if we want to do a top down on the north, \n or up from south pole if visa-versa)\n \"\"\"\n from numpy import pi\n if hemisphere=='N':\n r = 90.-lat\n elif hemisphere=='S':\n r = 90.-(-1*lat)\n else:\n raise ValueError('%s is not a valid hemisphere, N or S, please!' % (hemisphere))\n #convert lt to theta (azimuthal angle) in radians\n theta = lt/24. * 2*pi\n\n #the pi/2 rotates the coordinate system from\n #theta=0 at negative y-axis (local time) to\n #theta=0 at positive x axis (traditional polar coordinates)\n return r,theta\ndef polar2dial(ax):\n \"\"\"\n Turns a matplotlib axes polar plot into a dial plot\n \"\"\"\n #Rotate the plot so that noon is at the top and midnight\n #is at the bottom, and fix the labels so radial direction\n #is latitude and azimuthal direction is local time in hours\n ax.set_theta_zero_location('S')\n theta_label_values = np.array([0.,3.,6.,9.,12.,15.,18.,21.])*180./12\n theta_labels = ['%d:00' % (int(th/180.*12)) for th in theta_label_values.flatten().tolist()]\n ax.set_thetagrids(theta_label_values,labels=theta_labels)\n\n r_label_values = 90.-np.array([80.,70.,60.,50.,40.])\n r_labels = [r'$%d^{o}$' % (int(90.-rv)) for rv in r_label_values.flatten().tolist()]\n ax.set_rgrids(r_label_values,labels=r_labels)\n ax.set_rlim([0.,40.])\ndef map_polar2cart(LAT,LON, hemi = 'N'):\n #convert latitude and longitude (in degrees) to cartesian coordinates for interpolation purposes\n X_map, Y_map = satplottools.latlon2cart(LAT.flatten(), LON.flatten(),hemi)\n return X_map, Y_map\ndef dmsp_map_interpolate(X_dmsp, Y_dmsp, X_map, Y_map, tolerance = 0.5):\n \"\"\"\n generic function to spatially interpolate with the SSJ data using nearest neighbors using some arbirtary distance tolerance\n \"\"\"\n\n #indices of the map that fit the dmsp map\n indices = scipy.interpolate.griddata((X_map,Y_map), np.arange(len(X_map.flatten())), (X_dmsp,Y_dmsp), method = 'nearest')\n\n #get mask for map elements that are within distance tolerance \n mask = (abs(X_map[indices] - X_dmsp) < tolerance) & (abs(Y_map[indices] - Y_dmsp) < tolerance)\n\n return indices,mask \ndef greatCircleDist(location1,location2,lonorlt='lt'):\n #Returns n angular distances in radians between n-by-2 numpy arrays\n #location1, location2 (calculated row-wise so diff between \n #location1[0,] and location2[0,]\n #assuming that these arrays have the columns lat[deg],localtime[hours] \n #and that they are points on a sphere of constant radius\n #(the points are at the same altitude)\n pi = np.pi\n azi2rad = pi/12. if lonorlt=='lt' else pi/180\n wrappt = 24. if lonorlt=='lt' else 360.\n #Bounds check\n over = location1[:,1] > wrappt\n under = location1[:,1] < 0.\n location1[over,1]=location1[over,1]-wrappt\n location1[under,1]=location1[under,1]+wrappt\n\n if location1.ndim == 1 or location2.ndim == 1: \n dphi = abs(location2[1]-location1[1])*azi2rad\n a = (90-location1[0])/360*2*pi #get the colatitude in radians\n b = (90-location2[0])/360*2*pi\n C = np.pi - np.abs(dphi - np.pi)#get the angular distance in longitude in radians\n else:\n dphi = abs(location2[:,1]-location1[:,1])*azi2rad\n a = (90-location1[:,0])/360*2*pi #get the colatitude in radians\n b = (90-location2[:,0])/360*2*pi\n C = np.pi - np.abs(dphi - np.pi)#get the angular distance in longitude in radians\n return arccos(cos(a)*cos(b)+sin(a)*sin(b)*cos(C))\n\ndef myGreatCircleDistance(location1,location2):\n #add a dimension\n location1 = location1.reshape(1, 2)\n location2 = location2.reshape(1, 2)\n\n# location2.shape = (1,)+location2.shape[:,1]\n angular_distance = greatCircleDist(location1,location2,lonorlt='lon')\n return angular_distance\n\ndef dmsp_map_interpolate_NN_smooth(X_dmsp, Y_dmsp, X_map, Y_map, Obs_map, k = 5, tol = 3):\n \"\"\"\n generic function to spatially interpolate with the SSJ data using nearest neighbors using some arbirtary distance tolerance\n \"\"\"\n #reshape to N by 2 array where each row is (X, Y)\n dmsp_points = np.hstack((X_dmsp.flatten().reshape(-1,1),Y_dmsp.flatten().reshape(-1,1)))\n map_points = np.hstack((X_map.flatten().reshape(-1,1), Y_map.flatten().reshape(-1,1)))\n N_points = dmsp_points.shape[0]\n obs_val = Obs_map.flatten()\n model = sklearn.neighbors.BallTree(map_points,leaf_size = 40 )\n dists, inds = model.query(dmsp_points, k=k) \n\n obs_interp = np.empty(N_points)\n for i in range(N_points):\n norm = LA.norm(dists[i])\n if (norm > tol):\n obs_interp[i] = np.nan\n else:\n# weights = dists[i]/norm\n\n weights = dists[i]/np.nansum(dists[i])\n obs_interp[i] = np.nansum( obs_val[inds[i]] * weights )\n\n return obs_interp\n\ndef dmsp_map_interpolate_NN_smooth_great_circle(lat_dmsp, lon_dmsp, lat_map, lon_map, Obs_map, k = 5, tol = 1.5):\n \"\"\"\n generic function to spatially interpolate with the SSJ data using nearest neighbors using some arbirtary distance tolerance\n \"\"\"\n \n tol = np.deg2rad(tol)\n #reshape to N by 2 array where each row is (lat, lon)\n dmsp_points = np.deg2rad(np.hstack((lat_dmsp.flatten().reshape(-1,1),lon_dmsp.flatten().reshape(-1,1))))\n map_points = np.deg2rad(np.hstack((lat_map.flatten().reshape(-1,1), lon_map.flatten().reshape(-1,1))))\n N_points = dmsp_points.shape[0]\n obs_val = Obs_map.flatten()\n model = sklearn.neighbors.NearestNeighbors(n_neighbors = k, radius = tol, metric = 'haversine')\n\n model.fit(map_points)\n neighbors = model.kneighbors(dmsp_points, return_distance = True)\n \n #indices\n obs_interp = np.empty(N_points)\n for i in range(N_points):\n distances = neighbors[0][i]\n inds = neighbors[1][i]\n \n weights = distances/np.nansum(distances)\n obs_interp[i] = np.nansum( obs_val[inds] * weights)\n \n \n return obs_interp\n\nfrom ssj_auroral_boundary import dmsp_spectrogram\ndef jd2dayhour(jds):\n #assume jd is an array\n temp = jds - 0.5\n hours = (temp - np.floor(temp))*24 \n return hours\n", "from collections import OrderedDict\nimport numpy as np\nfrom geospacepy.special_datetime import (datetimearr2jd,\n datetime2jd,\n jd2datetime)\nimport esabin\nimport datetime, os\nimport h5py\nfrom sklearn import linear_model\nfrom LBH_to_eflux.helper_funcs import latlt2polar, polar2dial, module_Apex, update_apex_epoch\n\n\nclass SDRPass(object):\n \"\"\"\n Description\n -----------\n This class reads in a NASA CDAweb SSUSI SDR disk file corresponding to one spacecraft orbit and stores the pass data as class attributes.\n After instantiating this class, you can call get_ingest_data() to get observations for one polar pass,\n\n Attributes\n ----------\n name : str\n Name of observations\n ['jds'] : np.ndarray (n_obs x 1)\n Array of observation times (in Julian Data) \n ['observer_ids'] : np.ndarray (n_obs x 1)\n Satellite number associated with observations\n ['Y'] : array_like (n_obs x 1)\n Observations of FUV radiance whose color is specified by radiance_type (in kilo rayleighs)\n ['Y_var'] : np.ndarray (n_obs x 1)\n Observation error \n ['lats'] : np.ndarray (n_obs x 1)\n Magnetic latitude (degrees) of observations in Apex coordinates of reference height 110 km \n ['lons'] : np.ndarray (n_obs x 1)\n Magnetic local time of observations (expressed in degrees ) of observations in Apex coordinates \n ['observer_ids'] : np.ndarrray (n_obs x 1)\n Satellite number associated with observations \n \"\"\"\n def __init__(self, ssusi_file, dmsp, hemisphere, radiance_type = 'LBHL', noise_removal = True, spatial_bin = False, minlat = 50):\n \"\"\"\n Parameters\n ----------\n ssusi_file : str\n Location of SSUSI file to read in\n dmsp : int \n DMSP spacecraft number which must take value in [16, 17, 18]\n hemisphere : str\n Hemisphere of observations (must be either 'N' or 'S')\n radiance_type : str\n \"Color\" of FUV radiance to read in. Must be one of ['Lyman Alpha','OI 130.4', 'OI 135.6', 'LBHS', 'LBHL']\n noise_removal : bool, optional\n If true, removes solar influence from FUV radiances \n Its default value is True\n spatial_bin : bool, optional\n If true, spatially bins observations\n Its default value is False\n minlat : int, optional \n Minimum latitude in magnetic degrees a polar pass \n \"\"\"\n self.ssusi_file = ssusi_file\n self.dmsp = dmsp\n self.hemisphere = hemisphere\n self.radiance_type = radiance_type\n self.noise_removal = noise_removal\n self.spatial_bin = spatial_bin \n self.minlat = minlat\n\n\n self.name = 'SSUSI ' + radiance_type\n self.ssusi_colors = {\n 'Lyman Alpha':0,\n 'OI 130.4':1,\n 'OI 135.6':2,\n 'LBHS':3,\n 'LBHL':4\n }\n self.observers = [16,17,18]\n\n self.arg_radiance = self.ssusi_colors[radiance_type]\n\n #prepare grid if spatially binning\n if spatial_bin:\n self.grid = esabin.esagrid.Esagrid(2, azi_coord = 'lt')\n \n pass_data = self.get_ssusi_pass()\n\n self['jds'] = pass_data['epochjd_match']\n self['observer_ids'] = pass_data['observer']\n\n self['Y'] = pass_data['Y']\n self['Y_var'] = pass_data['Y_var']\n self['lats'] = pass_data['mlat']\n self['lons'] = pass_data['mlon']\n\n def get_data_window(self, startdt, enddt, hemisphere, allowed_observers):\n \"\"\"\n Applies the hemisphere and datetime interval to the attributes of the class.\n \n Parameters\n ----------\n startdt : datetime object\n Defaults to first available observation time\n enddt : datetime object\n Defaults to last available observation time\n hemisphere : str\n Defaults to hemisphere specified in init \n \n Returns\n -------\n data_window : OrderedDict\n Contains the following elements\n ['jds'] : np.ndarray (n_obs x 1)\n Array of observation times (in Julian Data) \n ['observer_ids'] : np.ndarray (n_obs x 1)\n Satellite number associated with observations\n ['Y'] : array_like (n_obs x 1)\n Observations of FUV radiance whose color is specified by radiance_type (in kilo rayleighs)\n ['Y_var'] : np.ndarray (n_obs x 1)\n Observation error \n ['lats'] : np.ndarray (n_obs x 1)\n Magnetic latitude (degrees) of observations in Apex coordinates of reference height 110 km \n ['lons'] : np.ndarray (n_obs x 1)\n Magnetic local time of observations (expressed in degrees ) of observations in Apex coordinates \n ['observer_ids'] : np.ndarrray (n_obs x 1)\n Satellite number associated with observations \n\n \"\"\"\n mask = self.get_data_window_mask(startdt, enddt, hemisphere, allowed_observers)\n\n data_window = OrderedDict()\n for datavarname,datavararr in self.items():\n data_window[datavarname] = datavararr[mask]\n\n return data_window\n\n\n def _read_SDR_file(self, file_name):\n \"\"\"\n Reads in the Disk Radiances and their piercepoint day observation location. \n SSUSI data comes in sweeps of 42 cross track observations.\n Therefore the total number of observation is n_obs = 42 * n_sweeps\n\n Parameters\n ----------\n file_name : str\n location of SSUSI SDR file \n Returns\n -------\n disk : dict\n Dictionary of relevant values from the SDR value with elements\n ['glat'] : np.ndarray (42 x n_sweeps)\n Geographic Latitude of observations\n ['glon'] : np.ndarray (42 x n_sweeps)\n Geographic longitude of observations\n ['alt'] : np.ndarray (1 x 1)\n Altitude of observations\n ['time'] : np.ndarray (n_sweeps)\n Seconds during day\n ['year'] : np.ndarray (n_sweeps)\n Year of obs\n ['radiance_all_colors'] : np.ndarray (42 x n_sweeps x 5)\n All 5 FUV \"colors\" (kRa)\n ['radiance_all_colors_uncertainty'] : np.ndarray (42 x n_obs x 5)\n Uncertainty of FUV radiances (kRa)\n ['SZA'] : np.ndarray (42 x n_sweeps x 5)\n Solar Zenith angle of observations (deg)\n ['epooch'] : list (n_sweeps x 1)\n list of observation times in datetime objects\n\n \"\"\"\n disk = {} #initialize disk measurements\n\n with h5py.File(file_name,'r') as h5f:\n #location of obs in geographic coordinates\n disk['glat'] = h5f['PIERCEPOINT_DAY_LATITUDE_AURORAL'][:] \n disk['glon'] = h5f['PIERCEPOINT_DAY_LONGITUDE_AURORAL'][:]\n disk['alt'] = h5f['PIERCEPOINT_DAY_ALTITUDE_AURORAL'][:]\n #time of observations\n disk['time'] = h5f['TIME_DAY_AURORAL'][:] #seconds since start of day\n disk['year'] = h5f['YEAR_DAY_AURORAL'][:] \n disk['doy'] = h5f['DOY_DAY_AURORAL'][:]\n\n #read radiances as kR\n disk['radiance_all_colors_uncertainty'] = h5f['DISK_RADIANCE_UNCERTAINTY_DAY_AURORAL'][:] /1000.\n disk['radiance_all_colors'] = h5f['DISK_RECTIFIED_INTENSITY_DAY_AURORAL'][:] / 1000.\n\n #read in solar zenith angle in degrees\n disk['SZA'] = h5f['PIERCEPOINT_DAY_SZA_AURORAL'][:]\n\n h5f.close()\n #get epoch from seconds of day, day of year, and year in terms of datetimes \n dt = np.empty((len(disk['doy']),1),dtype='object')\n for k in range(len(dt)):\n dt[k,0] = datetime.datetime(disk['year'][k],1,1,0,0,0)+datetime.timedelta(days=disk['doy'][k]-1.) + datetime.timedelta(seconds= disk['time'][k])\n disk['epoch'] = dt.flatten() \n\n return disk\n\n def get_ssusi_pass(self):\n \"\"\"\n Main function to read in and preprocess SDR file. \n 1. Read SDR ncdf file\n 2. Convert observations to magnetic coordinates\n 3. Applies solar influence removal if desired\n 4. Applies Spatial binning if desired\n\n Returns\n -------\n disk_int : dict\n Dictionary preprocessed observations with elements\n ['epochjd_match'] : np.ndarray (n_obs x 1)\n Array of observation times (in Julian Data) \n ['observer_ids'] : np.ndarray (n_obs x 1)\n Satellite number associated with observations\n ['Y'] : array_like (n_obs x 1)\n Observations of FUV radiance whose color is specified by radiance_type (in kilo rayleighs)\n ['Y_var'] : np.ndarray (n_obs x 1)\n Observation error \n ['mlat'] : np.ndarray (n_obs x 1)\n Magnetic latitude (degrees) of observations in Apex coordinates of reference height 110 km \n ['mlon'] : np.ndarray (n_obs x 1)\n Magnetic local time of observations (expressed in degrees ) of observations in Apex coordinates \n ['observer'] : np.ndarrray (n_obs x 1)\n Satellite number associated with observations \n \"\"\"\n ssusi_file = self.ssusi_file\n\n #Step 1: read in each file \n disk_data = self._read_SDR_file(ssusi_file)\n \n #Step 2: integrate disk data into usable magnetic coordinates\n disk_int = self._ssusi_integrate(disk_data)\n \n #report mlon is magnetic local time in degrees \n disk_int['mlon'] = disk_int['mlt'] * 15\n\n #get observation times\n shape = np.shape(disk_int['mlat'])\n disk_int['epoch_match'] = np.tile(disk_int['epoch'].flatten(), (shape[0],1))\n\n #get the relevant observations\n disk_int['Y'] = disk_int['radiance_all_colors'][:,:,self.arg_radiance]\n disk_int['Y_var'] = disk_int['radiance_all_colors_uncertainty'][:,:,self.arg_radiance]\n\n #get rid of negative values \n disk_int['Y'][disk_int['Y']<0] = 0\n\n #Step 3: if solar influence removal\n if self.noise_removal:\n radiance_fit = self._radiance_zenith_correction(disk_data['SZA'],disk_int['Y']) \n disk_int['Y'] = disk_int['Y'] - radiance_fit\n disk_int['Y'][disk_int['Y']<0] = 0\n\n #flatten data\n for item in disk_int:\n disk_int[item] = disk_int[item].flatten()\n\n #get times in terms of jds\n disk_int['epochjd_match'] = datetimearr2jd(disk_int['epoch_match']).flatten()\n\n #Step 4: spatially bin observations if desired\n if self.spatial_bin:\n disk_int = self.ssusi_spatial_binning(disk_int)\n\n disk_int['observer'] = np.ones_like(disk_int['epochjd_match']) * self.dmsp\n\n return disk_int \n\n def ssusi_spatial_binning(self,disk_int):\n \"\"\"\n This function spatially bins the observations using equal solid angle binning.\n\n Parameters\n ----------\n disk_int : dict\n dictionary from ssusi_pass with elements\n ['epochjd_match'] : np.ndarray (n_obs x 1)\n Array of observation times (in Julian Data) \n ['observer_ids'] : np.ndarray (n_obs x 1)\n Satellite number associated with observations\n ['Y'] : array_like (n_obs x 1)\n Observations of FUV radiance whose color is specified by radiance_type (in kilo rayleighs)\n ['Y_var'] : np.ndarray (n_obs x 1)\n Observation error \n ['mlat'] : np.ndarray (n_obs x 1)\n Magnetic latitude (degrees) of observations in Apex coordinates of reference height 110 km \n ['mlon'] : np.ndarray (n_obs x 1)\n Magnetic local time of observations (expressed in degrees ) of observations in Apex coordinates \n ['observer'] : np.ndarrray (n_obs x 1)\n Satellite number associated with observations \n Returns\n -------\n disk_int : dict\n Same keys as input but binned lol\n \"\"\"\n\n disk_binned = {}\n #spatial bin \n lats_in_pass, lts_in_pass, Y_in_pass, Y_var_in_pass = disk_int['mlat'], disk_int['mlt'], disk_int['Y'], disk_int['Y_var']\n epochjds_in_pass = disk_int['epochjd_match']\n \n #convert from mlt [0, 24] to [-12, 12]\n lts_mask = lts_in_pass >= 12\n lts_in_pass[lts_mask] -= 24 \n\n #bin observation values\n binlats, binlons, binstats = self.grid.bin_stats(lats_in_pass.flatten(), lts_in_pass.flatten(), Y_in_pass.flatten(), \\\n statfun = np.nanmean, center_or_edges = 'center')\n #get varaince of each bin \n binlats, binlons, binstats_var = self.grid.bin_stats(lats_in_pass.flatten(), lts_in_pass.flatten(), Y_var_in_pass.flatten(), \\\n statfun = np.nanvar, center_or_edges = 'center')\n #bin observation time\n binlats, binlons, binstats_time = self.grid.bin_stats(lats_in_pass.flatten(), lts_in_pass.flatten(), epochjds_in_pass.flatten(), \\\n statfun = np.nanmedian, center_or_edges = 'center')\n\n #convert from mlt -12 to 12 to degrees 0 to 360\n binlons[binlons>=0] = 15*binlons[binlons>=0]\n binlons[binlons<0] = (binlons[binlons<0]+24)*15\n\n disk_binned['mlat'], disk_binned['mlon'], disk_binned['Y'], disk_binned['Y_var'] = binlats, binlons, binstats, binstats_var\n disk_binned['epochjd_match'] = binstats_time\n return disk_binned\n\n def geo2apex(self,datadict):\n \"\"\"\n Perform coordinate transform for disk measurements from geographic to apex magnetic coordinates\n\n Parameters\n ----------\n datadict : dict\n dictionary object from _read_SDR_file()\n Returns\n -------\n alat : np.ndarray (same shape as datadict['glat'])\n Apex latitude\n mlt, : np.ndarray (same shape as datadict['glat'])\n Magnetic local time \n \"\"\"\n #take coordinates from data dictionary\n glat,glon,alt = datadict['glat'].flatten(),datadict['glon'].flatten(),datadict['alt'][0]\n alt = 110\n dt_arrs = datadict['epoch']\n\n #convert to apex coordinates\n alat = np.full_like(glat,np.nan)\n alon = np.full_like(glat,np.nan)\n qdlat = np.full_like(glat,np.nan)\n \n update_apex_epoch(dt_arrs[0]) #This does not need to be precise, time-wise\n\n alatout,alonout = module_Apex.geo2apex(glat,glon,alt)\n alat,alon = alatout.flatten(),alonout.flatten()\n \n #calculate time for observations because it isn't available in SDR product\n utsec = datadict['time'].flatten()\n utsec = (np.tile(utsec, (42,1))).flatten()\n\n dt_arrs_tiled = (np.tile(dt_arrs, (42,1))).flatten()\n mlt = np.full_like(alon, np.nan)\n\n for i in range(np.size(mlt)):\n mlt[i] = module_Apex.mlon2mlt(alon[i], dt_arrs_tiled[i],alt)\n\n #reshape to original shapes\n alat = alat.reshape(datadict['glat'].shape)\n mlt = mlt.reshape(datadict['glon'].shape)\n\n return alat,mlt \n\n def get_ingest_data(self,startdt = None, enddt = None, hemisphere = None):\n \"\"\"\n Call to return observations from a polar pass \n\n\n Parameters\n ---------- \n startdt : datetime object\n Defaults to first available observation time\n enddt : datetime object\n Defaults to last available observation time\n hemisphere : str\n Defaults to hemisphere specified in init \n \n Returns\n -------\n ylats : np.ndarray\n Absolute magnetic latitudes of observations\n ylons : np.ndarray\n magnetic 'longitudes' (MLT in degrees) of observations\n y : np.ndarray\n 1D array of kiloRayleighs\n y_var : np.ndarray\n 1D array of uncertainies (variances in kiloRayleighs)\n jds : np.ndarray \n\n \"\"\"\n startdt = jd2datetime(np.nanmin(self['jds'])) if startdt is None else startdt\n enddt = jd2datetime(np.nanmax(self['jds'])) if enddt is None else enddt\n hemisphere = self.hemisphere if hemisphere is None else hemisphere\n\n datadict = self.get_data_window(startdt,\n enddt,\n hemisphere,\n 'all')\n y = datadict['Y'].reshape(-1,1)\n\n\n #Format the error/variance vector similarly,\n y_var = datadict['Y_var'].reshape(-1,1);\n\n #Locations for each vector component\n ylats = datadict['lats'].reshape(-1,1)\n ylons = datadict['lons'].reshape(-1,1)\n\n #jds \n jds = datadict['jds'].reshape(-1,1)\n return np.abs(ylats),ylons,y,y_var,jds \n\n def plot_obs(self, ax, startdt = None,enddt = None,hemisphere = None, **kwargs):\n \"\"\"\n Plot observations from a particular polar pass \n\n Paramters \n ---------\n ax : matplotlib axis\n startdt : datetime object\n Defaults to first available observation time\n enddt : datetime object\n Defaults to last available observation time\n hemisphere : str\n Defaults to hemisphere specified in init \n \"\"\"\n lats,lons,obs,y_var,jds = self.get_ingest_data(startdt,enddt,hemisphere)\n r,theta = latlt2polar(lats.flatten(),lons.flatten()/180*12,'N')\n\n ax.scatter(theta,r,c = obs, **kwargs)\n\n polar2dial(ax)\n\n @staticmethod\n def _ssusi_integrate_position(position_data):\n return np.squeeze(position_data[:,:])\n\n @staticmethod\n def _ssusi_integrate_radiance(radiance_data):\n return np.squeeze(radiance_data[:,:])\n\n def _ssusi_integrate(self,datadict):\n \"\"\"\n General wrapper for coordinate convserion\n datadict - dict\n use the output dictionary from the read in function readSSUSISDR()\n \"\"\"\n datadict_out = {}\n \n for varname in datadict:\n if 'radiance' in varname:\n datadict_out[varname] = self._ssusi_integrate_radiance(datadict[varname])\n elif varname in ['glat','glon','SZA']:\n datadict_out[varname] = self._ssusi_integrate_position(datadict[varname])\n else:\n datadict_out[varname] = datadict[varname]\n\n alat,mlt = self.geo2apex(datadict_out)\n\n datadict_out['mlat'] = alat\n datadict_out['mlt'] = mlt\n\n return datadict_out\n\n @staticmethod\n def _radiance_zenith_correction(sza,radiance):\n \"\"\"\n A quick correction for the solar influence noise on the radiance data using a simple regression following the methods of \n \n Parameters\n ----------\n SZA - list, num_obsx1\n Solar zenith angle of the radiance observations(degrees)\n radiance - list, num_obsx1\n Radiance observations \n \"\"\"\n\n #mask out non finite values\n finite = np.logical_and(np.isfinite(sza.flatten()),\n np.isfinite(radiance.flatten()))\n\n #mask out values above 1 kR\n mask_high_radiance = radiance.flatten() < 1\n\n finite = np.logical_and(finite,mask_high_radiance)\n # clf = linear_model.LinearRegression(fit_intercept=True)\n clf = linear_model.Ridge(fit_intercept=True)\n\n X = sza.reshape((-1,1))\n X = np.cos(np.deg2rad(sza).reshape((-1,1)))\n y = radiance.reshape((-1,1))\n clf.fit(X[finite],y[finite])\n\n return clf.predict(X).reshape(radiance.shape)\n\n\n def __str__(self):\n return '{} {}:\\n hemisphere {},\\n date {}-{}-{}'.format(self.name,\n self.observation_type,\n self.hemisphere,\n self.year,\n self.month,\n self.day)\n\n def __setitem__(self,item,value):\n if not hasattr(self,'_observation_data'):\n self._observation_data = OrderedDict()\n self._observation_data[item]=value\n\n def __getitem__(self,item):\n return self._observation_data[item]\n\n def __contains__(self,item):\n return item in self._observation_data\n\n def __iter__(self):\n for item in self._observation_data:\n yield item\n\n def items(self):\n for key,value in self._observation_data.items():\n yield key,value\n\n def get_data_window_mask(self,startdt,enddt,hemisphere,allowed_observers):\n \"\"\"Return a 1D mask into the data arrays for all points in the\n specified time range, hemisphere and with radar IDs (RIDs) in\n list of RIDs allowed_observers\"\"\"\n mask = np.ones(self['jds'].shape,dtype=bool)\n mask = np.logical_and(mask, self._get_finite())\n mask = np.logical_and(mask,self._get_time_mask(startdt,enddt))\n mask = np.logical_and(mask,self._get_hemisphere_mask(hemisphere))\n mask = np.logical_and(mask,self._get_observers_mask(allowed_observers))\n return mask\n\n def _get_finite(self):\n return np.isfinite(self['Y'])\n\n def _get_time_mask(self,startdt,enddt):\n \"\"\"Return mask in data arrays for all\n data in interval [startdt,enddt)\"\"\"\n startjd = datetime2jd(startdt)\n endjd = datetime2jd(enddt)\n\n inrange = np.logical_and(self['jds'].flatten()>=startjd,\n self['jds'].flatten()<endjd)\n return inrange\n\n def _get_hemisphere_mask(self,hemisphere):\n if hemisphere not in ['N','S']:\n return ValueError(('{}'.format(hemisphere)\n +' is not a valid hemisphere (use N or S)'))\n if hemisphere == 'N':\n inhemi = self['lats'] > self.minlat\n elif hemisphere == 'S':\n inhemi = self['lats'] < -self.minlat\n return inhemi\n def _check_allowed_observers(self,allowed_observers):\n if not isinstance(allowed_observers,list):\n raise RuntimeError('allowed_observers must be a list of '\n +'DMSP satellite numbers')\n for observer_id in allowed_observers:\n if observer_id not in self.observers:\n raise ValueError('DMSP satellite number {} not'.format(observer_id)\n +'in \\n({})'.format(self.observers))\n\n def _get_observers_mask(self,allowed_observers):\n if allowed_observers != 'all':\n self._check_allowed_observers(allowed_observers)\n\n observers_mask = np.zeros(self['jds'].shape,dtype=bool)\n for observer_id in allowed_observers:\n observers_mask = np.logical_or(observers_mask,\n self['observer_ids']==observer_id)\n else:\n observers_mask = np.ones(self['jds'].shape,dtype=bool)\n \n return observers_mask\n " ]
[ [ "numpy.array", "numpy.linalg.norm", "numpy.empty", "numpy.nansum", "numpy.abs", "numpy.deg2rad", "numpy.mod", "numpy.floor" ], [ "numpy.logical_or", "numpy.deg2rad", "numpy.ones_like", "numpy.zeros", "numpy.ones", "numpy.tile", "sklearn.linear_model.Ridge", "numpy.logical_and", "numpy.shape", "numpy.nanmin", "numpy.isfinite", "numpy.size", "numpy.abs", "numpy.nanmax", "numpy.squeeze", "numpy.full_like" ] ]
coinflip112/stoch_models_release
[ "cc36587af8ff61f2dac38bbc5acd8f3ec2079ec8" ]
[ "brownian_stock_simulator.py" ]
[ "import numpy as np\nfrom plotly.subplots import make_subplots\nimport plotly.graph_objects as go\n\n\nclass BrownianStockSimulator:\n plot_title = \"Simulated White noise, Brownian Motion and Stock Price\"\n plotly_template = \"plotly_white\"\n plot_width = 1500\n plot_height = 1000\n\n def __init__(self, time_horizon, steps_count, sigma):\n self.time_horizon = time_horizon\n self.steps_count = steps_count\n self.sigma = sigma\n self.sampling_points = self.time_horizon * self.steps_count\n self.dt = self.time_horizon / self.sampling_points\n self.time_grid = self._get_time_grid()\n\n def _get_time_grid(self):\n time_grid = np.arange(0, self.time_horizon + self.dt, self.dt)\n return time_grid\n\n def _get_white_noise(self):\n white_noise = np.sqrt(self.dt) * np.random.normal(\n loc=0, scale=1.0, size=self.sampling_points\n )\n return white_noise\n\n def _get_brownian_motion(self, white_noise):\n brownian_motion = np.cumsum(white_noise)\n brownian_motion = np.append(0, brownian_motion)\n return brownian_motion\n\n def _get_stock_price(self, init_stock_price):\n output = (\n self.sigma * self.brownian_motion - 0.5 * self.sigma ** 2 * self.time_grid\n )\n return init_stock_price * np.exp(output)\n\n def simulate(self, init_stock_price, random_seed=42):\n np.random.seed(random_seed)\n self.white_noise = self._get_white_noise()\n self.brownian_motion = self._get_brownian_motion(self.white_noise)\n self.price = self._get_stock_price(init_stock_price)\n\n def plot(self):\n fig = make_subplots(rows=3, cols=1)\n fig.append_trace(\n go.Scatter(x=self.time_grid, y=self.white_noise, name=\"White Noise\"),\n row=1,\n col=1,\n ),\n fig.append_trace(\n go.Scatter(\n x=self.time_grid, y=self.brownian_motion, name=\"Brownian Motion\"\n ),\n row=2,\n col=1,\n ),\n fig.append_trace(\n go.Scatter(x=self.time_grid, y=self.price, name=\"Stock Price\"), row=3, col=1\n )\n fig.update_layout(\n height=self.plot_height,\n width=self.plot_width,\n title_text=self.plot_title,\n template=self.plotly_template,\n )\n fig.show()\n" ]
[ [ "numpy.random.normal", "numpy.random.seed", "numpy.exp", "numpy.arange", "numpy.sqrt", "numpy.append", "numpy.cumsum" ] ]
JitenDhandha/CFit
[ "7e3dac0ae16091ba1294e620ab3e787f5ec34202" ]
[ "Fitting.py" ]
[ "####################################################################################\r\n# Jiten Dhandha, 2020 #\r\n# CFit is a curve fitting tool in python, based on the method of least squares. #\r\n# It comes equipped with some standard functions and a graphical user interface. #\r\n# #\r\n# Inspired by: LSFR.py, Abie Marshall, The University of Manchester, 2016 #\r\n####################################################################################\r\n\r\n\r\n\r\n####################################################################################\r\n# LIBRARIES #\r\n####################################################################################\r\n\r\nimport numpy as np\r\nimport matplotlib\r\nmatplotlib.use('Qt5Agg') #This requires PyQt5 to be installed.\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.gridspec as gridspec\r\nimport scipy.optimize as opt\r\nimport scipy.special as sp\r\nimport scipy.stats as stats\r\nimport scipy.linalg as linalg\r\nimport warnings\r\n\r\n####################################################################################\r\n# LIST OF FUNCTIONS #\r\n####################################################################################\r\n\r\n#Class to hold all relevant function information\r\nclass Function():\r\n def __init__(self,name,func,numberOfParameters,rawFuncStr,unicodeFuncStr,rawParametersStr,unicodeParametersStr):\r\n self.name = name\r\n self.func = func\r\n self.numberOfParameters = numberOfParameters\r\n self.rawFuncStr = rawFuncStr\r\n self.unicodeFuncStr = unicodeFuncStr\r\n self.rawParametersStr = rawParametersStr\r\n self.unicodeParametersStr = unicodeParametersStr\r\n\r\n'''\r\nCurrent supported functions are as follows:\r\nPolynomial: constant, linear, quadratic, cubic, quartic, quintic\r\nPeriodic functions: sine wave, square wave\r\nPeak shape functions: gaussian, poisson, laplace, lorentz\r\nPolynomial-based functions: power law\r\nExponentials and logarithms: exponential, logarithm\r\n'''\r\n\r\n#Dictonary to hold the functions\r\nfunctions = {\r\n 'Constant': \r\n Function(name='Constant',\r\n func=lambda x,a: np.polyval([a],x),\r\n numberOfParameters=1,\r\n rawFuncStr=r\"$y = a$\",\r\n unicodeFuncStr=\"y = a\",\r\n rawParametersStr=[r'$a$'],\r\n unicodeParametersStr=['a']),\r\n\r\n 'Linear': \r\n Function(name='Linear',\r\n func=lambda x,a,b: np.polyval([a,b],x),\r\n numberOfParameters=2,\r\n rawFuncStr=r\"$y = ax+b$\",\r\n unicodeFuncStr=\"y = ax+b\",\r\n rawParametersStr=[r'$a$',r'$b$'],\r\n unicodeParametersStr=['a','b']),\r\n\r\n 'Quadratic': \r\n Function(name='Quadratic',\r\n func=lambda x,a,b,c: np.polyval([a,b,c],x),\r\n numberOfParameters=3,\r\n rawFuncStr=r\"$y = ax^2+bx+c$\",\r\n unicodeFuncStr=\"y = ax\\u00B2+bx+c\",\r\n rawParametersStr=[r'$a$',r'$b$',r'$c$'],\r\n unicodeParametersStr=['a','b','c']),\r\n\r\n 'Cubic': \r\n Function(name='Cubic',\r\n func=lambda x,a,b,c,d: np.polyval([a,b,c,d],x),\r\n numberOfParameters=4,\r\n rawFuncStr=r\"$y = ax^3+bx^2+cx+d$\",\r\n unicodeFuncStr=\"y = ax\\u00B3+bx\\u00B2+cx+d\",\r\n rawParametersStr=[r'$a$',r'$b$',r'$c$',r'$d$'],\r\n unicodeParametersStr=['a','b','c','d']),\r\n\r\n 'Quartic': \r\n Function(name='Quadratic',\r\n func=lambda x,a,b,c,d,e: np.polyval([a,b,c,d,e],x),\r\n numberOfParameters=5,\r\n rawFuncStr=r\"$y = ax^4+bx^3+cx^2+dx+e$\",\r\n unicodeFuncStr=\"y = ax\\u2074+bx\\u00B3+cx\\u00B2+dx+e\",\r\n rawParametersStr=[r'$a$',r'$b$',r'$c$',r'$d$',r'$e$'],\r\n unicodeParametersStr=['a','b','c','d','e']),\r\n\r\n 'Quintic': \r\n Function(name='Quintic',\r\n func=lambda x,a,b,c,d,e,f: np.polyval([a,b,c,d,e,f],x),\r\n numberOfParameters=6,\r\n rawFuncStr=r\"$y = ax^5+bx^4+cx^3+dx^2+ex+f$\",\r\n unicodeFuncStr=\"y = ax\\u2075+bx\\u2074+cx\\u00B3+dx\\u00B2+ex+f\",\r\n rawParametersStr=[r'$a$',r'$b$',r'$c$',r'$d$',r'$e$',r'$f$'],\r\n unicodeParametersStr=['a','b','c','d','e','f']),\r\n\r\n 'Sine wave': \r\n Function(name='Sine wave',\r\n func=lambda x,y0,A,omg,phi: y0 + A*np.sin(omg*x+phi),\r\n numberOfParameters=4,\r\n rawFuncStr=r\"$y = y_0 + A[\\sin(\\omega x+\\phi)]$\",\r\n unicodeFuncStr=\"y = y\\u2080 + A sin(\\u03C9x+\\u03D5)\",\r\n rawParametersStr=[r'$y_0$',r'$A$',r'$\\omega$',r'$\\phi$'],\r\n unicodeParametersStr=['y\\u2080','A','\\u03C9','\\u03D5']),\r\n\r\n 'Square wave': \r\n Function(name='Square wave',\r\n func=lambda x,y0,A,omg,phi: y0 + A*np.sign(np.sin(omg*x+phi)),\r\n numberOfParameters=4,\r\n rawFuncStr=r\"$y = y_0 + A\\/signum[\\sin(\\omega x+\\phi)]$\",\r\n unicodeFuncStr=\"y = y\\u2080 + A signum[sin(\\u03C9x+\\u03D5)]\",\r\n rawParametersStr=[r'$y_0$',r'$A$',r'$\\omega$',r'$\\phi$'],\r\n unicodeParametersStr=['y\\u2080','A','\\u03C9','\\u03D5']),\r\n\r\n 'Gaussian': \r\n Function(name='Gaussian',\r\n func=lambda x,y0,A,mu,sig: y0 + (A/(sig*np.sqrt(2*np.pi)))*np.exp((-1/2)*((x-mu)/sig)**2),\r\n numberOfParameters=4,\r\n rawFuncStr=r\"$y = y_0 + \\frac{A}{\\sigma \\sqrt{2\\pi}}e^{-\\frac{(x-\\mu)^2}{2\\sigma^2}}$\",\r\n unicodeFuncStr=\"y = y\\u2080 + A/[\\u03C3 \\u221A(2\\u03C0)] \\u00D7 e^[-(x-\\u03BC)\\u00B2/(2\\u03C3\\u00B2)]\",\r\n rawParametersStr=[r'$y_0$',r'$A$',r'$\\mu$',r'$\\sigma$'],\r\n unicodeParametersStr=['y\\u2080','A','\\u03BC','\\u03C3']),\r\n\r\n 'Poisson': \r\n Function(name='Poisson',\r\n func=lambda x,y0,A,lmd: y0 + A*(np.exp(-lmd))*(lmd**x)/sp.gamma(x),\r\n numberOfParameters=3,\r\n rawFuncStr=r\"$y = y_0 + A\\/\\frac{e^{-\\lambda}\\lambda^x}{x!}$\",\r\n unicodeFuncStr=\"y = y\\u2080 + A [(e^\\u03BB)(\\u03BB^x)]/x!\",\r\n rawParametersStr=[r'$y_0$',r'$A$',r'$\\lambda$'],\r\n unicodeParametersStr=['y\\u2080','A','\\u03BB']),\r\n\r\n 'Laplacian': \r\n Function(name='Laplacian',\r\n func=lambda x,y0,A,mu,b: y0 + (A/(2*b))*np.exp(-np.abs(x-mu)/b),\r\n numberOfParameters=4,\r\n rawFuncStr=r\"$y = y_0 + \\frac{A}{2b}e^{-\\frac{|x-\\mu|}{b}}$\",\r\n unicodeFuncStr=\"y = y\\u2080 + A/(2b) \\u00D7 e^(-|(x-\\u03BC)|/b)\",\r\n rawParametersStr=[r'$y_0$',r'$A$',r'$\\mu$',r'$b$'],\r\n unicodeParametersStr=['y\\u2080','A','\\u03BC','b']),\r\n\r\n 'Lorentzian': \r\n Function(name='Lorentzian',\r\n func=lambda x,y0,A,x0,omg: y0 + (2*A/np.pi)*(omg/(4*(x-x0)**2+omg**2)),\r\n numberOfParameters=4,\r\n rawFuncStr=r\"$y = y_0 + \\frac{2A}{\\pi}\\frac{\\omega}{4(x-x_0)^2+\\omega^2}$\",\r\n unicodeFuncStr=\"y = y\\u2080 + (2A/\\u03C0) \\u00D7 (\\u03C9/[4(x-x\\u2080)\\u00B2+\\u03C9\\u00B2])\",\r\n rawParametersStr=[r'$y_0$',r'$A$',r'$x_0$',r'$\\omega$'],\r\n unicodeParametersStr=['y\\u2080','A','x\\u2080','\\u03C9']),\r\n\r\n 'Power': \r\n Function(name='Power',\r\n func=lambda x,A,b: A*(x)**b,\r\n numberOfParameters=2,\r\n rawFuncStr=r\"$y = Ax^b$\",\r\n unicodeFuncStr=\"y = A x\\u1D47\",\r\n rawParametersStr=[r'$A$',r'$b$'],\r\n unicodeParametersStr=['A','b']),\r\n\r\n 'Exponential': \r\n Function(name='Exponential',\r\n func=lambda x,y0,A,b: y0 + A*np.exp(b*x),\r\n numberOfParameters=3,\r\n rawFuncStr=r\"$y = y_0 + A\\/e^{bx}$\",\r\n unicodeFuncStr=\"y = y\\u2080 + A e^(bx)\",\r\n rawParametersStr=[r'$y_0$',r'$A$',r'$b$'],\r\n unicodeParametersStr=['y\\u2080','A','b']),\r\n\r\n 'Logarithm': \r\n Function(name='Logarithm',\r\n func=lambda x,y0,A,x0: y0 + A*np.log(x-x0),\r\n numberOfParameters=3,\r\n rawFuncStr=r\"$y = y_0 + A\\/log(x-x_0)$\",\r\n unicodeFuncStr=\"y = y\\u2080 + A log(x-x\\u2080)\",\r\n rawParametersStr=[r'$y_0$',r'$A$',r'$x_0$'],\r\n unicodeParametersStr=['y\\u2080','A','x\\u2080'])\r\n }\r\n\r\n####################################################################################\r\n# GLOBAL VARIABLES #\r\n####################################################################################\r\n\r\n#DATA RELATED VARIABLES\r\ndata = [] #holds the data from data file\r\nx = [] #holds the x values from the data file\r\ny = [] #holds the y values from the data file\r\ny_err = [] #holds the y errors, either from user file or generated\r\nERR = bool #boolean to check if data file contains errors\r\nnumberOfDataPoints = int #holds the number of points in the data file\r\n#FIT FUNCTION RELATED VARIABLES\r\nfunction = '' #string holding the function to fit to\r\nnumberOfParameters = int #holds the number of parameters of the fitting function\r\n#FITTING VARIABLES\r\nfitStructure = [] #holds the fitting information from curve_fit/polyfit\r\nfitParameters = [] #holds the fitting parameters\r\nfitErrors = [] #holds the errors on the fitting parameters\r\nchiSquared = float #holds the final chi-squared value of the fit\r\nredChiSquared = float #holds the final reduced chi-squared value of the fit\r\nredChiSquaredLimits = [] #holds the \"acceptable range\" of reduced chi-squared\r\n\r\n####################################################################################\r\n# READING USER FILE #\r\n####################################################################################\r\n'''\r\nThis function tries to read the file held at fileLocation and sets the global\r\nvariables that hold all the information about the data set. \r\n@Arguments:\r\nfileLocation - string containing the location of the file chosen by user.\r\n@Return value:\r\nReturns an integer that specifies success (0) or failure (non 0) of the function.\r\n'''\r\ndef readFile(fileLocation):\r\n \r\n #Access to global variables\r\n global data\r\n global x\r\n global y\r\n global y_err\r\n global ERR\r\n global numberOfDataPoints\r\n\r\n #Checking if the file string is empty\r\n if(fileLocation == ''):\r\n return 1\r\n \r\n #Checking if the file is a .txt or .csv file\r\n if(not fileLocation.endswith('.txt') and not fileLocation.endswith('.csv') ):\r\n return 2\r\n \r\n #Trying to populate the data array from the file (allows both spaces and commas)\r\n try:\r\n with open(fileLocation, 'r') as file:\r\n clean_lines = [' '.join(line.split()) for line in file]\r\n for delims in [(' ,',','),(', ',','),(' ',',')]:\r\n clean_lines = [line.replace(*delims) for line in clean_lines]\r\n data = np.genfromtxt(clean_lines, delimiter=',',dtype='float_')\r\n except (TypeError, ValueError, AttributeError):\r\n return 3\r\n\r\n #Checking if the data array has 2 or 3 columns\r\n try:\r\n if(not len(data[0])==2 and not len(data[0])==3):\r\n return 4\r\n except TypeError:\r\n return 4\r\n\r\n #Checking if there are any NaN's or Inf's in the data\r\n if(np.any(np.isnan(data)) or np.any(np.isinf(data))):\r\n return 5\r\n\r\n #Checking if the errors are all positive number\r\n if(len(data[0])==3 and np.any(data[:,2]<=0)):\r\n return 6\r\n\r\n #Setting global variables\r\n numberOfDataPoints = len(data)\r\n data = data[data[:,0].argsort()] #Sorting the array in ascending order along x column\r\n x = data[:,0]\r\n y = data[:,1]\r\n \r\n #Checking if error along y axis has been provided\r\n if(len(data[0])==2):\r\n y_err = np.array([1 for i in data]) #Constant error to aid in best chi-squared estimate\r\n ERR = False\r\n elif(len(data[0])==3):\r\n y_err = data[:,2]\r\n ERR = True\r\n \r\n #All ran correctly!\r\n return 0\r\n \r\n####################################################################################\r\n# FIT - RELATED FUNCTIONS #\r\n####################################################################################\r\n'''\r\nThis function calculates the chi-squared against the data set given specific \r\nvalues of fitting function parameters. \r\n@Arguments:\r\nparams - array containing parameters of the fitting function to calculate chi-squared\r\nagainst\r\n@Return value:\r\nReturns chi-squared as a float.\r\n'''\r\ndef calcChiSquared(params):\r\n\r\n #Access to global variables\r\n global function\r\n global x\r\n global y\r\n global y_err\r\n\r\n #Returning chi-squared value for the given fitting function parameters\r\n return np.sum( ((y-functions[function].func(x,*params))/y_err)**2 )\r\n\r\n'''\r\nThis function calculates the final chi-squared as well as reduced chi-squared\r\nof the fit. It also calculates the acceptable range of reduced chi-squared based on\r\nthe chi-squared statistic.\r\n@Arguments:\r\n--\r\n@Return value:\r\n--\r\n'''\r\ndef calcGoodnessOfFit():\r\n\r\n #Access to global variables\r\n global numberOfDataPoints\r\n global numberOfParameters\r\n global fitParameters\r\n global chiSquared\r\n global redChiSquared\r\n global redChiSquaredLimits\r\n\r\n #Calculating degrees of freedom\r\n degreesOfFreedom = numberOfDataPoints - numberOfParameters\r\n #Calculating chi-squared and reduced chi-squared\r\n chiSquared = calcChiSquared(fitParameters)\r\n redChiSquared = chiSquared/degreesOfFreedom\r\n \r\n #Calculating the \"acceptable\" range of reduced chi-squared\r\n pValues = [0.95,0.05]\r\n redChiSquaredLimits = stats.chi2.isf(pValues,degreesOfFreedom)/degreesOfFreedom\r\n\r\n'''\r\nThis function provides an initial guess for the final fitting to take place in \r\nfitFunction(). It comes into play when the user wants to fit the data automatically.\r\nThe initial guess is based on a two step procedure. It involves looking at the data:\r\n1) and figuring out a single-valued \"guess\"\r\n2) or figuring out bounds on the parameters and obtaining a guess from that by\r\nglobal minimization of chi-squared using the scipy differential evolution algorithm.\r\n@Arguments:\r\n--\r\n@Return value:\r\nReturns an integer denoting success (0) or failure (non 0) of the function.\r\n'''\r\ndef guessParameters():\r\n \r\n #Access to global variables\r\n global function\r\n global x\r\n global y\r\n global numberOfDataPoints\r\n \r\n #Useful quantities for parameter estimation\r\n xmin = min(x)\r\n xmax = max(x)\r\n ymin = min(y)\r\n ymax = max(y)\r\n \r\n #Empty array to store \"initial guess\"\r\n iniParameters = []\r\n\r\n #All the parameter estimation happens here\r\n\r\n if(function in ['Constant','Linear','Quadratic','Cubic','Quartic','Quintic']):\r\n\r\n order = numberOfParameters - 1\r\n iniParameters = np.polyfit(x,y,deg=order,w=1/y_err)\r\n\r\n elif(function=='Sine wave'):\r\n\r\n x_range = xmax - xmin\r\n y_range = ymax - ymin\r\n\r\n y0_bound = (ymin+2/5*abs(y_range),ymax-2/5*abs(y_range))\r\n A_bound = (abs(y_range)/3,2*abs(y_range)/3)\r\n phi_bound = (0,2*np.pi)\r\n\r\n y_avg = np.average(y)\r\n y_std = np.std(y)\r\n yscaled = []\r\n for i in y:\r\n if(i>y_avg+y_std):\r\n yscaled.append(1)\r\n elif(i<y_avg-y_std):\r\n yscaled.append(-1)\r\n else:\r\n yscaled.append(0)\r\n flag = yscaled[0]\r\n crossings = 0\r\n for i in yscaled:\r\n if(i==0):\r\n continue\r\n if(flag==0):\r\n flag=i\r\n elif(i==-flag):\r\n flag = -flag\r\n crossings+=1\r\n crossings = crossings/2\r\n guess_f = crossings/x_range\r\n\r\n omg_bound = (0.5*(2*np.pi)*guess_f,2*(2*np.pi)*guess_f)\r\n\r\n BOUNDS = [y0_bound, A_bound, omg_bound, phi_bound]\r\n BOUNDS = [np.sort(bound) for bound in BOUNDS]\r\n\r\n with warnings.catch_warnings():\r\n warnings.filterwarnings('ignore')\r\n iniParameters = opt.differential_evolution(calcChiSquared,bounds=BOUNDS,seed=0).x\r\n\r\n elif(function=='Square wave'):\r\n\r\n x_range = xmax - xmin\r\n y_range = ymax - ymin\r\n\r\n y0_bound = (ymin+2/5*abs(y_range),ymax-2/5*abs(y_range))\r\n A_bound = (abs(y_range)/3,2*abs(y_range)/3)\r\n phi_bound = (0,2*np.pi)\r\n\r\n y_avg = np.average(y)\r\n y_std = np.std(y)\r\n yscaled = []\r\n for i in y:\r\n if(i>y_avg+y_std):\r\n yscaled.append(1)\r\n elif(i<y_avg-y_std):\r\n yscaled.append(-1)\r\n else:\r\n yscaled.append(0)\r\n flag = yscaled[0]\r\n crossings = 0\r\n for i in yscaled:\r\n if(i==0):\r\n continue\r\n if(flag==0):\r\n flag=i\r\n elif(i==-flag):\r\n flag = -flag\r\n crossings+=1\r\n crossings = crossings/2\r\n guess_f = crossings/x_range\r\n\r\n omg_bound = (0.5*(2*np.pi)*guess_f,2*(2*np.pi)*guess_f)\r\n\r\n BOUNDS = [y0_bound, A_bound, omg_bound, phi_bound]\r\n BOUNDS = [np.sort(bound) for bound in BOUNDS]\r\n\r\n with warnings.catch_warnings():\r\n warnings.filterwarnings('ignore')\r\n iniParameters = opt.differential_evolution(calcChiSquared,bounds=BOUNDS,seed=0).x\r\n\r\n elif(function=='Gaussian'):\r\n\r\n x_range = xmax - xmin\r\n y_range = ymax - ymin\r\n\r\n mu_bound = (xmin-x_range,xmax+x_range)\r\n omg_bound = (0,x_range)\r\n\r\n A_bound1 = (abs(y_range)/3,2*abs(y_range)*2.5*x_range)\r\n y0_bound1 = (ymin-y_range,ymin+y_range/2)\r\n BOUNDS1 = [y0_bound1, A_bound1, mu_bound, omg_bound]\r\n BOUNDS1 = [np.sort(bound) for bound in BOUNDS1]\r\n\r\n A_bound2 = (-abs(y_range)/3,-2*abs(y_range)*2.5*x_range)\r\n y0_bound2 = (ymax-y_range/2,ymax+y_range)\r\n BOUNDS2 = [y0_bound2, A_bound2, mu_bound, omg_bound]\r\n BOUNDS2 = [np.sort(bound) for bound in BOUNDS2]\r\n\r\n BOUNDS_LIST = [BOUNDS1,BOUNDS2]\r\n\r\n with warnings.catch_warnings():\r\n warnings.filterwarnings('ignore')\r\n bestChiSquared = np.inf\r\n for BOUNDS in BOUNDS_LIST:\r\n tempParameters = opt.differential_evolution(calcChiSquared,bounds=BOUNDS,seed=0).x\r\n tempChiSquared = calcChiSquared(tempParameters)\r\n if(tempChiSquared < bestChiSquared):\r\n bestChiSquared = tempChiSquared\r\n iniParameters = tempParameters\r\n \r\n elif(function=='Poisson'):\r\n\r\n x_range = xmax - xmin\r\n y_range = ymax - ymin\r\n\r\n lmd_bound = (max(0,xmin-x_range),xmax+x_range)\r\n\r\n A_bound1 = (0,2*abs(y_range))\r\n y0_bound1 = (ymin-y_range,ymin+y_range/2) \r\n BOUNDS1 = [y0_bound1, A_bound1, lmd_bound]\r\n BOUNDS1 = [np.sort(bound) for bound in BOUNDS1]\r\n\r\n A_bound2 = (0,-2*abs(y_range))\r\n y0_bound2 = (ymax-y_range/2,ymax+y_range)\r\n BOUNDS2 = [y0_bound1, A_bound1, lmd_bound] \r\n BOUNDS2 = [np.sort(bound) for bound in BOUNDS2]\r\n\r\n BOUNDS_LIST = [BOUNDS1,BOUNDS2]\r\n\r\n with warnings.catch_warnings():\r\n warnings.filterwarnings('ignore')\r\n bestChiSquared = np.inf\r\n for BOUNDS in BOUNDS_LIST:\r\n tempParameters = opt.differential_evolution(calcChiSquared,bounds=BOUNDS,seed=0).x\r\n tempChiSquared = calcChiSquared(tempParameters)\r\n if(tempChiSquared < bestChiSquared):\r\n bestChiSquared = tempChiSquared\r\n iniParameters = tempParameters\r\n\r\n elif(function=='Laplacian'):\r\n \r\n x_range = xmax - xmin\r\n y_range = ymax - ymin\r\n\r\n mu_bound = (xmin-x_range,xmax+x_range)\r\n b_bound = (0,x_range)\r\n\r\n A_bound1 = (abs(y_range)/3,2*abs(y_range)*2*x_range)\r\n y0_bound1 = (ymin-y_range,ymin+y_range/2)\r\n BOUNDS1 = [y0_bound1, A_bound1, mu_bound, b_bound]\r\n BOUNDS1 = [np.sort(bound) for bound in BOUNDS1]\r\n\r\n A_bound2 = (-abs(y_range)/3,-2*abs(y_range)*2*x_range)\r\n y0_bound2 = (ymax-y_range/2,ymax+y_range)\r\n BOUNDS2 = [y0_bound2, A_bound2, mu_bound, b_bound]\r\n BOUNDS2 = [np.sort(bound) for bound in BOUNDS2]\r\n\r\n BOUNDS_LIST = [BOUNDS1,BOUNDS2]\r\n\r\n with warnings.catch_warnings():\r\n warnings.filterwarnings('ignore')\r\n bestChiSquared = np.inf\r\n for BOUNDS in BOUNDS_LIST:\r\n tempParameters = opt.differential_evolution(calcChiSquared,bounds=BOUNDS,seed=0).x\r\n tempChiSquared = calcChiSquared(tempParameters)\r\n if(tempChiSquared < bestChiSquared):\r\n bestChiSquared = tempChiSquared\r\n iniParameters = tempParameters\r\n\r\n elif(function=='Lorentzian'):\r\n\r\n x_range = xmax - xmin\r\n y_range = ymax - ymin\r\n\r\n x0_bound = (xmin-x_range,xmax+x_range)\r\n omg_bound = (0,x_range)\r\n\r\n A_bound1 = (abs(y_range)/3,2*abs(y_range)*np.pi/2*x_range)\r\n y0_bound1 = (ymin-y_range,ymin+y_range/2)\r\n BOUNDS1 = [y0_bound1, A_bound1, x0_bound, omg_bound]\r\n BOUNDS1 = [np.sort(bound) for bound in BOUNDS1]\r\n\r\n A_bound2 = (-abs(y_range)/3,-2*abs(y_range)*np.pi/2*x_range)\r\n y0_bound2 = (ymax-y_range/2,ymax+y_range)\r\n BOUNDS2 = [y0_bound2, A_bound2, x0_bound, omg_bound]\r\n BOUNDS2 = [np.sort(bound) for bound in BOUNDS2]\r\n\r\n BOUNDS_LIST = [BOUNDS1,BOUNDS2]\r\n\r\n with warnings.catch_warnings():\r\n warnings.filterwarnings('ignore')\r\n bestChiSquared = np.inf\r\n for BOUNDS in BOUNDS_LIST:\r\n tempParameters = opt.differential_evolution(calcChiSquared,bounds=BOUNDS,seed=0).x\r\n tempChiSquared = calcChiSquared(tempParameters)\r\n if(tempChiSquared < bestChiSquared):\r\n bestChiSquared = tempChiSquared\r\n iniParameters = tempParameters\r\n\r\n elif(function=='Power'):\r\n\r\n lX = np.log(abs(x), where=x>0)\r\n lY = np.log(abs(y), where=x>0)\r\n\r\n with np.errstate(invalid='ignore'):\r\n b_est, logA_est = np.polyfit(lX,lY,w=np.exp(lX),deg=1)\r\n A_est = np.exp(logA_est)\r\n\r\n A_bound = (-A_est,A_est)\r\n b_bound = (b_est-0.5*abs(b_est),b_est+0.5*abs(b_est))\r\n\r\n BOUNDS = [A_bound,b_bound]\r\n BOUNDS = [np.sort(bound) for bound in BOUNDS]\r\n\r\n with warnings.catch_warnings():\r\n warnings.filterwarnings('ignore')\r\n iniParameters = opt.differential_evolution(calcChiSquared,bounds=BOUNDS,seed=0).x\r\n\r\n elif(function=='Exponential'):\r\n\r\n #Inspired by https://github.com/scipy/scipy/pull/9158\r\n\r\n s = np.empty_like(y)\r\n s[0] = 0\r\n s[1:] = np.cumsum(0.5 * (y[1:] + y[:-1]) * np.diff(x))\r\n\r\n xn = np.array(x - x[0])\r\n yn = np.array(y - y[0])\r\n \r\n sx2 = np.sum(xn**2)\r\n sxs = np.sum(xn*s)\r\n sys = np.sum(yn*s)\r\n ss2 = np.sum(s**2)\r\n sxy = np.sum(xn*yn)\r\n \r\n _, [b] = linalg.inv([[sx2, sxs], [sxs, ss2]]).dot([[sxy], [sys]])\r\n \r\n ex = np.exp(b * x)\r\n \r\n se1 = np.sum(ex)\r\n se2 = np.sum(ex**2)\r\n sy0 = np.sum(y)\r\n sye = np.sum((y * ex))\r\n \r\n [y0], [A] = linalg.inv([[x.size, se1], [se1, se2]]).dot([[sy0], [sye]])\r\n\r\n iniParameters = [y0,A,b]\r\n\r\n elif(function=='Logarithm'):\r\n \r\n #Inspired by https://github.com/scipy/scipy/pull/9158\r\n\r\n s = np.empty_like(x)\r\n s[0] = 0\r\n s[1:] = np.cumsum(0.5 * (x[1:] + x[:-1]) * np.diff(y))\r\n\r\n xn = np.array(x - x[0])\r\n yn = np.array(y - y[0])\r\n \r\n sy2 = np.sum(yn**2)\r\n sys = np.sum(yn*s)\r\n sxs = np.sum(xn*s)\r\n ss2 = np.sum(s**2)\r\n syx = np.sum(xn*yn)\r\n \r\n _, [t1] = linalg.inv([[sy2, sys], [sys, ss2]]).dot([[syx], [sxs]])\r\n \r\n A = 1/t1\r\n\r\n ey = np.exp(t1 * y)\r\n \r\n se1 = np.sum(ey)\r\n se2 = np.sum(ey**2)\r\n sx0 = np.sum(x)\r\n sxe = np.sum((x * ey))\r\n \r\n [x0], [t2] = linalg.inv([[x.size, se1], [se1, se2]]).dot([[sx0], [sxe]])\r\n\r\n with warnings.catch_warnings():\r\n warnings.filterwarnings('ignore')\r\n y0 = -A*np.log(t2)\r\n \r\n iniParameters = [y0,A,x0]\r\n\r\n #If there is no initial guess\r\n if(iniParameters==[]):\r\n return 1\r\n\r\n #Sending the \"best guess\" parameters to the final fitting algorithm\r\n return fitFunction(iniParameters)\r\n\r\n'''\r\nThis function converts a string containing the guess parameters for fitting\r\nprovided by the user into an array of floats for the fitFunction() to use.\r\nIt comes into play when the user wants to fit the data manually.\r\n@Arguments:\r\niniParametersString - string containing initial parameters\r\n@Return value:\r\nReturns an integer denoting success (0) or failure (non 0) of the function.\r\n'''\r\ndef manualParameters(iniParametersString):\r\n\r\n #Access to global variables\r\n global numberOfParameters\r\n\r\n #Splitting string delimited by commas\r\n splitString = iniParametersString.split(',')\r\n\r\n #Trying to populate the iniParameters array\r\n iniParameters = []\r\n try:\r\n for i in splitString:\r\n #Further splitting each sub-string into \"right and left\" of the \"=\" sign\r\n temp1 = i.split('=')\r\n #Taking the value on the right side and converting to float\r\n temp2 = float(temp1[1])\r\n #Adding it to the iniParameters array\r\n iniParameters.append(temp2)\r\n except (ValueError,IndexError):\r\n return 3\r\n\r\n #Checking if the number of parameters expected and received match\r\n if(len(iniParameters)!=numberOfParameters):\r\n return 3\r\n\r\n #Sending the \"manually input\" parameters to the final fitting algorithm \r\n return fitFunction(iniParameters) \r\n\r\n'''\r\nThis function does the final fitting of the data. It takes an inital guess on the \r\nparameters and optimizes from there.\r\n@Arguments:\r\niniParameters - array containing initial guess\r\n@Return value:\r\nReturns an integer denoting success (0) or failure (non 0) of the function.\r\n''' \r\ndef fitFunction(iniParameters):\r\n \r\n #Access to global variables\r\n global function\r\n global x\r\n global y\r\n global y_err\r\n global numberOfDataPoints\r\n global numberOfParameters\r\n global fitStructure\r\n global fitParameters\r\n global fitErrors\r\n \r\n if(numberOfDataPoints<numberOfParameters):\r\n return 2\r\n\r\n #Doing the final fitting of the data\r\n try:\r\n #Ignoring runtime warnings (in case the optimization passes through invalid values) \r\n with warnings.catch_warnings():\r\n warnings.filterwarnings('ignore')\r\n #Main optimization happens here\r\n #Note: curve_fit populates sigma with 1's as a default.\r\n #absolute_sigma = True is the flag that forces errors to not be used in a relative manner\r\n fitStructure = opt.curve_fit(functions[function].func,x,y,absolute_sigma=True,p0=iniParameters,sigma=y_err)\r\n\r\n #Catching errors\r\n except RuntimeError as e:\r\n #Optimization failed\r\n if (str(e).startswith('Optimal parameters not found: Number of calls to function has reached maxfev')):\r\n return 1\r\n #Something else went wrong\r\n else:\r\n raise\r\n\r\n #Filling in the fit parameters and errors on them (from the covariance matrix)\r\n fitParameters = fitStructure[0]\r\n fitErrors = np.sqrt(np.diag(fitStructure[1]))\r\n\r\n #Quantizing the goodness of fit\r\n calcGoodnessOfFit()\r\n\r\n #All ran correctly! \r\n return 0\r\n\r\n####################################################################################\r\n# PLOTTING FUNCTIONS #\r\n####################################################################################\r\n'''\r\nThis function plots the raw data (without the fit).\r\n@Arguments:\r\nplotTitle - string holding the title of the plot\r\nxTitle - string holding the label for the x axis\r\nyTitle - string holding the label for the y axis\r\nviewGrid - boolean denoting whether the user wants the plot to have gridlines\r\n@Return value:\r\n--\r\n''' \r\ndef plotRawData(plotTitle,xTitle,yTitle,viewGrid):\r\n\r\n #Access to global variables\r\n global x\r\n global y\r\n global y_err\r\n global ERR\r\n\r\n #Creating figure and adding subplot\r\n figure1 = plt.figure()\r\n axes1 = figure1.add_subplot(111)\r\n\r\n #Setting x and y axis labels\r\n axes1.set_title(plotTitle, fontsize='x-large')\r\n axes1.set_xlabel(xTitle, fontsize='large')\r\n axes1.set_ylabel(yTitle, fontsize='large')\r\n\r\n #Checking if user wants to add grid to plot and adding them\r\n if(viewGrid):\r\n axes1.minorticks_on()\r\n axes1.set_axisbelow(True)\r\n axes1.grid(b=True, which='major', alpha=0.5)\r\n axes1.grid(b=True, which='minor', alpha=0.2)\r\n\r\n #Plotting the raw data\r\n if(ERR):\r\n axes1.errorbar(x,y,y_err,fmt='.',color='midnightblue',ecolor='royalblue',capsize=2)\r\n else:\r\n axes1.scatter(x,y,color='midnightblue', label='Data')\r\n \r\n #Displaying the beauty\r\n figure1.show()\r\n\r\n'''\r\nThis function plots the raw data along with the fitting function and shows the \r\nfitting parameters if the user wants to see it.\r\n@Arguments:\r\nplotTitle - string holding the title of the plot\r\nxTitle - string holding the label for the x axis\r\nyTitle - string holding the label for the y axis\r\nviewGrid - boolean holding whether the user wants the plot to have gridlines\r\nviewParameters - boolean holding whether the user wants to see the fitting\r\nparameters\r\nviewResiduals - boolean holding whether the user wants to see the residuals\r\nplot\r\n@Return value:\r\n--\r\n''' \r\ndef plotFitData(plotTitle,xTitle,yTitle,viewGrid,viewParameters,viewResiduals):\r\n\r\n #Access to global variables\r\n global x\r\n global y\r\n global y_err\r\n global ERR\r\n global numberOfDataPoints\r\n global function\r\n global numberOfParameters\r\n global fitParameters\r\n global fitErrors\r\n global chiSquared\r\n global redChiSquared\r\n global redChiSquaredLimits\r\n\r\n #Creating figure and adding subplots\r\n figure2 = plt.figure()\r\n if(viewResiduals and viewParameters):\r\n gs = gridspec.GridSpec(2, 2, height_ratios=[3, 1], width_ratios=[4,1]) \r\n axes2 = figure2.add_subplot(gs[0,0])\r\n axes3 = figure2.add_subplot(gs[1,0])\r\n axes4 = figure2.add_subplot(gs[0,1])\r\n elif(viewResiduals and not viewParameters):\r\n gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1]) \r\n axes2 = figure2.add_subplot(gs[0])\r\n axes3 = figure2.add_subplot(gs[1])\r\n elif(not viewResiduals and viewParameters):\r\n gs = gridspec.GridSpec(1, 2, width_ratios=[4, 1])\r\n axes2 = figure2.add_subplot(gs[0])\r\n axes4 = figure2.add_subplot(gs[1])\r\n else:\r\n axes2 = figure2.add_subplot(111)\r\n\r\n #Setting axes titles\r\n axes2.set_title(plotTitle, fontsize='x-large')\r\n axes2.set_xlabel(xTitle, fontsize='large')\r\n axes2.set_ylabel(yTitle, fontsize='large')\r\n\r\n #Checking if user wants to add grid to plot and adding them\r\n if(viewGrid):\r\n axes2.minorticks_on()\r\n axes2.set_axisbelow(True)\r\n axes2.grid(b=True, which='major', alpha=0.5)\r\n axes2.grid(b=True, which='minor', alpha=0.2)\r\n if(viewResiduals):\r\n axes3.minorticks_on()\r\n axes3.set_axisbelow(True)\r\n axes3.grid(b=True, which='major', alpha=0.5)\r\n axes3.grid(b=True, which='minor', alpha=0.2)\r\n\r\n #Plotting the raw data\r\n if(ERR):\r\n axes2.errorbar(x,y,y_err, fmt='.', color='midnightblue', ecolor='royalblue', capsize=2, zorder=1, label='Data')\r\n else:\r\n axes2.scatter(x,y,color='midnightblue', label='Data')\r\n \r\n #Plotting the best fit\r\n xx = np.linspace(min(x),max(x),1000)\r\n yy = functions[function].func(xx,*fitParameters)\r\n axes2.plot(xx,yy,color='darkorange', zorder=2, label='Fit function')\r\n\r\n #Plotting the residuals\r\n if(viewResiduals):\r\n residuals = functions[function].func(x,*fitParameters) - y\r\n axes3.axhline(0,color='darkorange', zorder=2)\r\n if(ERR==True):\r\n axes3.errorbar(x,residuals,y_err,fmt='.', color='midnightblue', ecolor='royalblue', capsize=2, zorder=1)\r\n else:\r\n axes3.scatter(x,residuals,color='midnightblue')\r\n\r\n #Adding legend to the plot\r\n axes2.legend(markerscale=2, fontsize='large')\r\n\r\n #Displaying fit parameters if the user wants\r\n if(viewParameters):\r\n\r\n #Removing x and y axis\r\n axes4.set_axis_off()\r\n\r\n #Declaring the string array that holds everything displayed in the parameters box\r\n parametersStr = []\r\n\r\n #Adding function type to parameters box\r\n parametersStr.append(r\"$\\bf{Function:}$\")\r\n parametersStr.append(functions[function].name)\r\n parametersStr.append(functions[function].rawFuncStr)\r\n\r\n #Adding fit parameters to the parameters box\r\n parametersStr.append(\"\")\r\n parametersStr.append(r\"$\\bf{Fitting\\/parameters:}$\")\r\n for i in range(numberOfParameters):\r\n parametersStr.append(functions[function].rawParametersStr[i]+r' = {0:.5e} $\\pm$ {1:.5e}'.format(fitParameters[i],fitErrors[i]))\r\n\r\n #Adding some additional fitting details to the parameters box\r\n parametersStr.append(\"\")\r\n parametersStr.append(r\"$\\bf{Other\\/fitting\\/data:}$\")\r\n parametersStr.append(r'Number of data points = {0}'.format(numberOfDataPoints))\r\n parametersStr.append(r'Number of parameters = {0}'.format(numberOfParameters))\r\n parametersStr.append(r'$\\chi^2$ = {0:.5e}'.format(chiSquared))\r\n parametersStr.append(r'$\\chi_r^2$ = {0:.5e}'.format(redChiSquared))\r\n parametersStr.append(r'Acceptable range of $\\chi_r^2$ = ({0:.2f},{1:.2f})'.format(redChiSquaredLimits[0],redChiSquaredLimits[1]))\r\n\r\n #Adding an important note\r\n if(not ERR):\r\n parametersStr.append(\"\")\r\n parametersStr.append(r'$\\bf{Note}$: Errors and chi-squared estimates')\r\n parametersStr.append(r'here dont mean much since no errors')\r\n parametersStr.append(r'along y-axis are present!') \r\n\r\n #Joining all elements of the string array into a single string separated by \\n's \r\n parametersStr = '\\n'.join(parametersStr)\r\n\r\n #Placing the parameters box in the plot\r\n axes4.text(-0.35,1.0,parametersStr, bbox=dict(boxstyle=\"square\", fc=\"lemonchiffon\", ec=\"darkorange\", pad=0.5),\r\n va='top', ha='left', fontsize='large', linespacing=1.3)\r\n\r\n #Displaying the beauty\r\n figure2.show()\r\n\r\n'''\r\nAPPENDIX:\r\nCheck efficiency of differential evolution against other global minimization techniques:\r\niniParameters = opt.brute(calcChiSquared,ranges=[],finish=opt.fmin)\r\niniParameters = opt.basinhopping(calcChiSquared,x0=[])\r\n'''\r\n" ]
[ [ "scipy.special.gamma", "numpy.genfromtxt", "numpy.exp", "numpy.sort", "numpy.sin", "numpy.log", "scipy.stats.chi2.isf", "numpy.polyval", "numpy.polyfit", "numpy.sqrt", "numpy.empty_like", "matplotlib.use", "numpy.array", "matplotlib.pyplot.figure", "numpy.diff", "numpy.std", "scipy.linalg.inv", "matplotlib.gridspec.GridSpec", "numpy.isinf", "numpy.isnan", "scipy.optimize.curve_fit", "numpy.errstate", "scipy.optimize.differential_evolution", "numpy.sum", "numpy.any", "numpy.abs", "numpy.average", "numpy.diag" ] ]
DonnieKim411/triton-inference-server
[ "933e5afe1d40c4801186dabdc2ee613e4f0769c6" ]
[ "qa/L0_infer_variable/infer_variable_test.py" ]
[ "# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of NVIDIA CORPORATION nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY\n# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY\n# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport sys\nsys.path.append(\"../common\")\n\nimport os\nimport unittest\nimport numpy as np\nimport infer_util as iu\nimport test_util as tu\n\nnp_dtype_string = np.dtype(object)\n\nTEST_SYSTEM_SHARED_MEMORY = bool(\n int(os.environ.get('TEST_SYSTEM_SHARED_MEMORY', 0)))\nTEST_CUDA_SHARED_MEMORY = bool(int(os.environ.get('TEST_CUDA_SHARED_MEMORY',\n 0)))\n\n\nclass InferVariableTest(unittest.TestCase):\n\n def _full_exact(self,\n input_dtype,\n output0_dtype,\n output1_dtype,\n input_shape,\n output0_shape,\n output1_shape,\n output0_raw=True,\n output1_raw=True,\n swap=False):\n\n def _infer_exact_helper(tester,\n pf,\n tensor_shape,\n batch_size,\n input_dtype,\n output0_dtype,\n output1_dtype,\n output0_raw=True,\n output1_raw=True,\n model_version=None,\n swap=False,\n outputs=(\"OUTPUT0\", \"OUTPUT1\"),\n use_http=True,\n use_grpc=True,\n skip_request_id_check=False,\n use_streaming=True,\n correlation_id=0):\n for bs in (1, batch_size):\n # model that does not support batching\n if bs == 1:\n iu.infer_exact(\n tester,\n pf + \"_nobatch\",\n tensor_shape,\n bs,\n input_dtype,\n output0_dtype,\n output1_dtype,\n output0_raw,\n output1_raw,\n model_version,\n swap,\n outputs,\n use_http,\n use_grpc,\n skip_request_id_check,\n use_streaming,\n correlation_id,\n use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,\n use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)\n # model that supports batching\n iu.infer_exact(\n tester,\n pf, (bs,) + tensor_shape,\n bs,\n input_dtype,\n output0_dtype,\n output1_dtype,\n output0_raw,\n output1_raw,\n model_version,\n swap,\n outputs,\n use_http,\n use_grpc,\n skip_request_id_check,\n use_streaming,\n correlation_id,\n use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,\n use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)\n\n all_ensemble_prefix = [\"simple_\", \"sequence_\", \"fan_\"]\n ensemble_prefix = [\"\"]\n for prefix in all_ensemble_prefix:\n if tu.validate_for_ensemble_model(prefix, input_dtype,\n output0_dtype, output1_dtype,\n input_shape, input_shape,\n input_shape):\n ensemble_prefix.append(prefix)\n\n if tu.validate_for_tf_model(input_dtype, output0_dtype, output1_dtype,\n input_shape, output0_shape, output1_shape):\n for prefix in ensemble_prefix:\n for pf in [\"graphdef\", \"savedmodel\"]:\n _infer_exact_helper(self,\n prefix + pf,\n input_shape,\n 8,\n input_dtype,\n output0_dtype,\n output1_dtype,\n output0_raw=output0_raw,\n output1_raw=output1_raw,\n swap=swap)\n\n if tu.validate_for_trt_model(input_dtype, output0_dtype, output1_dtype,\n input_shape, output0_shape, output1_shape):\n for prefix in ensemble_prefix:\n if input_dtype == np.int8:\n _infer_exact_helper(self,\n prefix + 'plan',\n input_shape + (1, 1),\n 8,\n input_dtype,\n output0_dtype,\n output1_dtype,\n output0_raw=output0_raw,\n output1_raw=output1_raw,\n swap=swap)\n else:\n _infer_exact_helper(self,\n prefix + 'plan',\n input_shape,\n 8,\n input_dtype,\n output0_dtype,\n output1_dtype,\n output0_raw=output0_raw,\n output1_raw=output1_raw,\n swap=swap)\n\n if tu.validate_for_c2_model(input_dtype, output0_dtype, output1_dtype,\n input_shape, output0_shape, output1_shape):\n for prefix in ensemble_prefix:\n _infer_exact_helper(self,\n prefix + 'netdef',\n input_shape,\n 8,\n input_dtype,\n output0_dtype,\n output1_dtype,\n output0_raw=output0_raw,\n output1_raw=output1_raw,\n swap=swap)\n\n # the custom model is src/custom/addsub... it does not swap\n # the inputs so always set to False\n if tu.validate_for_custom_model(input_dtype, output0_dtype,\n output1_dtype, input_shape,\n output0_shape, output1_shape):\n # No basic ensemble models are created against custom models\n _infer_exact_helper(self,\n 'custom',\n input_shape,\n 8,\n input_dtype,\n output0_dtype,\n output1_dtype,\n output0_raw=output0_raw,\n output1_raw=output1_raw,\n swap=False)\n\n if tu.validate_for_onnx_model(input_dtype, output0_dtype, output1_dtype,\n input_shape, output0_shape,\n output1_shape):\n # No basic ensemble models are created against custom models [TODO]\n _infer_exact_helper(self,\n 'onnx',\n input_shape,\n 8,\n input_dtype,\n output0_dtype,\n output1_dtype,\n output0_raw=output0_raw,\n output1_raw=output1_raw,\n swap=swap)\n\n if tu.validate_for_libtorch_model(input_dtype, output0_dtype,\n output1_dtype, input_shape,\n output0_shape, output1_shape):\n # No basic ensemble models are created against custom models [TODO]\n _infer_exact_helper(self,\n 'libtorch',\n input_shape,\n 8,\n input_dtype,\n output0_dtype,\n output1_dtype,\n output0_raw=output0_raw,\n output1_raw=output1_raw,\n swap=swap)\n\n def test_raw_fff(self):\n self._full_exact(np.float32, np.float32, np.float32, (16,), (16,),\n (16,))\n\n def test_raw_fii(self):\n self._full_exact(np.float32, np.int32, np.int32, (2, 8), (2, 8), (2, 8))\n\n def test_raw_fll(self):\n self._full_exact(np.float32, np.int64, np.int64, (8, 4), (8, 4), (8, 4))\n\n def test_raw_fil(self):\n self._full_exact(np.float32, np.int32, np.int64, (2, 8, 2), (2, 8, 2),\n (2, 8, 2))\n\n def test_raw_ffi(self):\n self._full_exact(np.float32, np.float32, np.int32, (16,), (16,), (16,))\n\n def test_raw_iii(self):\n self._full_exact(np.int32, np.int32, np.int32, (2, 8), (2, 8), (2, 8))\n\n def test_faw_iif(self):\n self._full_exact(np.int32, np.int32, np.float32, (2, 8, 2), (2, 8, 2),\n (2, 8, 2))\n\n def test_raw_ooo(self):\n self._full_exact(np_dtype_string, np_dtype_string, np_dtype_string,\n (16,), (16,), (16,))\n\n def test_raw_oii(self):\n self._full_exact(np_dtype_string, np.int32, np.int32, (2, 8), (2, 8),\n (2, 8))\n\n def test_raw_ooi(self):\n self._full_exact(np_dtype_string, np_dtype_string, np.int32, (8, 4),\n (8, 4), (8, 4))\n\n def test_raw_oio(self):\n self._full_exact(np_dtype_string, np.int32, np_dtype_string, (2, 8, 2),\n (2, 8, 2), (2, 8, 2))\n\n def test_class_fff(self):\n self._full_exact(np.float32,\n np.float32,\n np.float32, (16,), (16,), (16,),\n output0_raw=False,\n output1_raw=False)\n\n def test_class_fii(self):\n self._full_exact(np.float32,\n np.int32,\n np.int32, (2, 8), (2, 8), (2, 8),\n output0_raw=False,\n output1_raw=False)\n\n def test_class_fll(self):\n self._full_exact(np.float32,\n np.int64,\n np.int64, (8, 4), (8, 4), (8, 4),\n output0_raw=False,\n output1_raw=False)\n\n def test_class_fil(self):\n self._full_exact(np.float32,\n np.int32,\n np.int64, (2, 8, 2), (2, 8, 2), (2, 8, 2),\n output0_raw=False,\n output1_raw=False)\n\n def test_class_ffi(self):\n self._full_exact(np.float32,\n np.float32,\n np.int32, (16,), (16,), (16,),\n output0_raw=False,\n output1_raw=False)\n\n def test_class_iii(self):\n self._full_exact(np.int32,\n np.int32,\n np.int32, (2, 8), (2, 8), (2, 8),\n output0_raw=False,\n output1_raw=False)\n\n def test_class_iif(self):\n self._full_exact(np.int32,\n np.int32,\n np.float32, (2, 8, 2), (2, 8, 2), (2, 8, 2),\n output0_raw=False,\n output1_raw=False)\n\n def test_mix_ffi(self):\n self._full_exact(np.float32,\n np.float32,\n np.int32, (16,), (16,), (16,),\n output0_raw=True,\n output1_raw=False)\n\n def test_mix_iii(self):\n self._full_exact(np.int32,\n np.int32,\n np.int32, (2, 8), (2, 8), (2, 8),\n output0_raw=False,\n output1_raw=True)\n\n def test_mix_iif(self):\n self._full_exact(np.int32,\n np.int32,\n np.float32, (2, 8, 2), (2, 8, 2), (2, 8, 2),\n output0_raw=True,\n output1_raw=False)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.dtype" ] ]
MinHyung-Kang/Thesis
[ "846c28ec1ca0ffd8acfa3f1f6dce88cd6acb76ed" ]
[ "python/bayesian_nn_subset.py" ]
[ "import theano.tensor as T\nimport theano\nimport numpy as np\nfrom scipy.spatial.distance import pdist, squareform, cdist\nimport random\nimport time\n\n'''\n Sample code to reproduce our results for the Bayesian neural network example.\n Our settings are almost the same as Hernandez-Lobato and Adams (ICML15) https://jmhldotorg.files.wordpress.com/2015/05/pbp-icml2015.pdf\n Our implementation is also based on their Python code.\n\n p(y | W, X, \\gamma) = \\prod_i^N N(y_i | f(x_i; W), \\gamma^{-1})\n p(W | \\lambda) = \\prod_i N(w_i | 0, \\lambda^{-1})\n p(\\gamma) = Gamma(\\gamma | a0, b0)\n p(\\lambda) = Gamma(\\lambda | a0, b0)\n\n The posterior distribution is as follows:\n p(W, \\gamma, \\lambda) = p(y | W, X, \\gamma) p(W | \\lambda) p(\\gamma) p(\\lambda)\n To avoid negative values of \\gamma and \\lambda, we update loggamma and loglambda instead.\n\n Copyright (c) 2016, Qiang Liu & Dilin Wang\n All rights reserved.\n'''\n\nclass svgd_bayesnn:\n\n '''\n We define a one-hidden-layer-neural-network specifically. We leave extension of deep neural network as our future work.\n\n Input\n -- X_train: training dataset, features\n -- y_train: training labels\n -- batch_size: sub-sampling batch size\n -- max_iter: maximum iterations for the training procedure\n -- M: number of particles are used to fit the posterior distribution\n -- n_hidden: number of hidden units\n -- a0, b0: hyper-parameters of Gamma distribution\n -- master_stepsize, auto_corr: parameters of adgrad\n '''\n def __init__(self, X_train, y_train, X_test, y_text, batch_size = 100, max_iter = 1000, M = 20, n_hidden = 50,\n a0 = 1, b0 = 0.1, master_stepsize = 1e-3, auto_corr = 0.9, h=-1, alpha = 0.9,\n method = 'none',m=5, cf = False, uStat = True, regCoeff = 0.1, adver = False, adverMaxIter = 5,\n maxTime = 20, numTimeSteps = 20):\n self.n_hidden = n_hidden\n self.d = X_train.shape[1] # number of data, dimension\n self.M = M\n\n num_vars = self.d * n_hidden + n_hidden * 2 + 3 # w1: d*n_hidden; b1: n_hidden; w2 = n_hidden; b2 = 1; 2 variances\n self.theta = np.zeros([self.M, num_vars]) # particles, will be initialized later\n\n '''\n We keep the last 10% (maximum 500) of training data points for model developing\n '''\n size_dev = min(int(np.round(0.1 * X_train.shape[0])), 500)\n X_dev, y_dev = X_train[-size_dev:], y_train[-size_dev:]\n X_train, y_train = X_train[:-size_dev], y_train[:-size_dev]\n\n '''\n The data sets are normalized so that the input features and the targets have zero mean and unit variance\n '''\n self.std_X_train = np.std(X_train, 0)\n self.std_X_train[ self.std_X_train == 0 ] = 1\n self.mean_X_train = np.mean(X_train, 0)\n\n self.mean_y_train = np.mean(y_train)\n self.std_y_train = np.std(y_train)\n\n '''\n Theano symbolic variables\n Define the neural network here\n '''\n X = T.matrix('X') # Feature matrix\n y = T.vector('y') # labels\n\n w_1 = T.matrix('w_1') # weights between input layer and hidden layer\n b_1 = T.vector('b_1') # bias vector of hidden layer\n w_2 = T.vector('w_2') # weights between hidden layer and output layer\n b_2 = T.scalar('b_2') # bias of output\n\n N = T.scalar('N') # number of observations\n\n log_gamma = T.scalar('log_gamma') # variances related parameters\n log_lambda = T.scalar('log_lambda')\n\n ###\n prediction = T.dot(T.nnet.relu(T.dot(X, w_1)+b_1), w_2) + b_2\n\n ''' define the log posterior distribution '''\n log_lik_data = -0.5 * X.shape[0] * (T.log(2*np.pi) - log_gamma) - (T.exp(log_gamma)/2) * T.sum(T.power(prediction - y, 2))\n log_prior_data = (a0 - 1) * log_gamma - b0 * T.exp(log_gamma) + log_gamma\n log_prior_w = -0.5 * (num_vars-2) * (T.log(2*np.pi)-log_lambda) - (T.exp(log_lambda)/2)*((w_1**2).sum() + (w_2**2).sum() + (b_1**2).sum() + b_2**2) \\\n + (a0-1) * log_lambda - b0 * T.exp(log_lambda) + log_lambda\n\n # sub-sampling mini-batches of data, where (X, y) is the batch data, and N is the number of whole observations\n log_posterior = (log_lik_data * N / X.shape[0] + log_prior_data + log_prior_w)\n dw_1, db_1, dw_2, db_2, d_log_gamma, d_log_lambda = T.grad(log_posterior, [w_1, b_1, w_2, b_2, log_gamma, log_lambda])\n\n # automatic gradient\n logp_gradient = theano.function(\n inputs = [X, y, w_1, b_1, w_2, b_2, log_gamma, log_lambda, N],\n outputs = [dw_1, db_1, dw_2, db_2, d_log_gamma, d_log_lambda]\n )\n\n # prediction function\n self.nn_predict = theano.function(inputs = [X, w_1, b_1, w_2, b_2], outputs = prediction)\n\n '''\n Training with SVGD\n '''\n # normalization\n X_train, y_train = self.normalization(X_train, y_train)\n N0 = X_train.shape[0] # number of observations\n\n ''' initializing all particles '''\n for i in range(self.M):\n w1, b1, w2, b2, loggamma, loglambda = self.init_weights(a0, b0)\n # use better initialization for gamma\n ridx = np.random.choice(range(X_train.shape[0]), \\\n np.min([X_train.shape[0], 1000]), replace = False)\n y_hat = self.nn_predict(X_train[ridx,:], w1, b1, w2, b2)\n loggamma = -np.log(np.mean(np.power(y_hat - y_train[ridx], 2)))\n self.theta[i,:] = self.pack_weights(w1, b1, w2, b2, loggamma, loglambda)\n\n grad_theta = np.zeros([self.M, num_vars]) # gradient\n # adagrad with momentum\n fudge_factor = 1e-6\n historical_grad = 0\n self.y_historical_grad = 0\n self.h_historical_grad = 0\n\n self.rmse_overTime = np.zeros(numTimeSteps) # RMSE\n self.llh_overTime = np.zeros(numTimeSteps) # LLH\n self.iter_overTime = np.zeros(numTimeSteps) # LLH\n timeStepUnit = maxTime / numTimeSteps # Time to check every iteration\n timeInd = 0;\n\n start_time = time.time()\n for iter in range(max_iter):\n if method == 'subparticles':\n self.Sqy = np.zeros([m, num_vars]) # Sqy\n elif method == 'inducedPoints' or method == 'none':\n self.Sqx = np.zeros([self.M, num_vars]) # Sqx\n h = -1;\n # sub-sampling\n batch = [ i % N0 for i in range(iter * batch_size, (iter + 1) * batch_size) ]\n\n if method == 'none' or method =='inducedPoints':\n for i in range(self.M):\n w1, b1, w2, b2, loggamma, loglambda = self.unpack_weights(self.theta[i,:])\n dw1, db1, dw2, db2, dloggamma, dloglambda = logp_gradient(X_train[batch,:], y_train[batch], w1, b1, w2, b2, loggamma, loglambda, N0)\n self.Sqx[i,:] = self.pack_weights(dw1, db1, dw2, db2, dloggamma, dloglambda)\n\n if method == 'none':\n grad_theta = self.svgd_kernel(h=h)\n elif method == 'inducedPoints':\n self.yInd = np.random.choice(self.theta.shape[0], m, replace=False)\n self.y = self.theta[self.yInd]\n grad_theta = self.svgd_kernel_inducedPoints(h=h, uStat = uStat, regCoeff = regCoeff, adver=adver, adverMaxIter = adverMaxIter)\n\n elif method == 'subparticles':\n self.yInd = np.random.choice(self.theta.shape[0], m, replace=False)\n self.y = self.theta[self.yInd]\n\n for i in range(m):\n w1, b1, w2, b2, loggamma, loglambda = self.unpack_weights(self.y[i,:])\n dw1, db1, dw2, db2, dloggamma, dloglambda = logp_gradient(X_train[batch,:], y_train[batch], w1, b1, w2, b2, loggamma, loglambda, N0)\n self.Sqy[i,:] = self.pack_weights(dw1, db1, dw2, db2, dloggamma, dloglambda)\n\n grad_theta = self.svgd_kernel_subset(h=-1, cf=cf)\n\n [adj_grad, historical_grad] = self.get_adamUpdate(iter, grad_theta, historical_grad,master_stepsize, alpha, fudge_factor)\n self.theta = self.theta + adj_grad;\n elapsed_time = time.time() - start_time\n\n if elapsed_time > timeStepUnit:\n self.thetaCopy = np.copy(self.theta)\n\n # Evaluate and save\n '''\n Model selection by using a development set\n '''\n X_dev = self.normalization(X_dev)\n for i in range(self.M):\n w1, b1, w2, b2, loggamma, loglambda = self.unpack_weights(self.thetaCopy[i, :])\n pred_y_dev = self.nn_predict(X_dev, w1, b1, w2, b2) * self.std_y_train + self.mean_y_train\n # likelihood\n def f_log_lik(loggamma): return np.sum( np.log(np.sqrt(np.exp(loggamma)) /np.sqrt(2*np.pi) * np.exp( -1 * (np.power(pred_y_dev - y_dev, 2) / 2) * np.exp(loggamma) )) )\n # The higher probability is better\n lik1 = f_log_lik(loggamma)\n # one heuristic setting\n loggamma = -np.log(np.mean(np.power(pred_y_dev - y_dev, 2)))\n lik2 = f_log_lik(loggamma)\n if lik2 > lik1:\n self.thetaCopy[i,-2] = loggamma # update loggamma\n\n svgd_rmse, svgd_ll = self.evaluation(X_test, y_test)\n self.rmse_overTime[timeInd] = svgd_rmse\n self.llh_overTime[timeInd] = svgd_ll\n self.iter_overTime[timeInd] = iter\n\n start_time = time.time()\n timeInd = timeInd + 1\n\n\n # Break after maxTime\n if timeInd >= numTimeSteps:\n print('Reached ', iter, 'iterations\\n')\n break\n\n\n def normalization(self, X, y = None):\n X = (X - np.full(X.shape, self.mean_X_train)) / \\\n np.full(X.shape, self.std_X_train)\n\n if y is not None:\n y = (y - self.mean_y_train) / self.std_y_train\n return (X, y)\n else:\n return X\n\n '''\n Initialize all particles\n '''\n def init_weights(self, a0, b0):\n w1 = 1.0 / np.sqrt(self.d + 1) * np.random.randn(self.d, self.n_hidden)\n b1 = np.zeros((self.n_hidden,))\n w2 = 1.0 / np.sqrt(self.n_hidden + 1) * np.random.randn(self.n_hidden)\n b2 = 0.\n loggamma = np.log(np.random.gamma(a0, b0))\n loglambda = np.log(np.random.gamma(a0, b0))\n return (w1, b1, w2, b2, loggamma, loglambda)\n\n '''\n Returns control functional weights\n '''\n def getWeights(self, KpMat):\n condNumber = self.getConditionNumber(KpMat)\n z = KpMat.shape[0]\n\n # Get weights\n KPrime = KpMat + condNumber * z * np.identity(z)\n num = np.matmul(np.ones(z),np.linalg.inv(KPrime))\n denom = 1 + np.matmul(num,np.ones(z))\n weights = num / denom\n\n weights = weights / sum(weights)\n\n return (weights)\n\n '''\n Given a kernel matrix K, let lambda be smallest power of 10 such that\n kernel matrix K0 + lamba*I has condition number lower than 10^10\n Note we use 2-norm for computing condition number\n '''\n def getConditionNumber(self, K):\n condNumber = 10e-10\n condA = 10e11\n matSize = K.shape[0]\n while condA > 10e10:\n condNumber = condNumber * 10\n A = K + condNumber * np.identity(matSize)\n condA = np.linalg.norm(A, ord=2) * np.linalg.norm(np.linalg.inv(A), ord=2)\n return (condNumber)\n\n '''\n Calculate kernel matrix and its gradient: K, \\nabla_x k\n '''\n def svgd_kernel(self, h = -1):\n n,d = self.theta.shape\n sq_dist = pdist(self.theta)\n pairwise_dists = squareform(sq_dist)**2\n if h < 0: # if h < 0, using median trick\n h = np.median(pairwise_dists)\n h = np.sqrt(0.5 * h / np.log(n+1))\n\n # compute the rbf kernel\n Kxy = np.exp( -pairwise_dists / h**2 / 2)\n\n dxkxy = -np.matmul(Kxy, self.theta)\n sumkxy = np.sum(Kxy, axis=1)\n for i in range(d):\n dxkxy[:, i] = dxkxy[:,i] + np.multiply(self.theta[:,i],sumkxy)\n dxkxy = dxkxy / (h**2)\n\n grad_theta = (np.matmul(Kxy, self.Sqx) + dxkxy) / n\n\n return grad_theta\n\n '''\n Compute gradient update for theta using svgd random subset (with optional control functional)\n '''\n def svgd_kernel_subset(self, h=-1, cf = False):\n n,d = self.theta.shape\n m = self.y.shape[0]\n\n\n\n pairwise_dists = cdist(self.theta, self.y)**2\n\n if h < 0: # if h < 0, using median trick\n h = np.median(pairwise_dists)\n h = np.sqrt(0.5 * h / np.log(n+1))\n\n # compute the rbf kernel\n Kxy = np.exp( -pairwise_dists / h**2 / 2)\n\n if cf == True : # Using control functional\n sqxdy_part = np.array([np.sum(np.multiply(self.Sqy,self.y),axis=1),]*m).T\n sqxdy = -(np.matmul(self.Sqy,self.y.T)- sqxdy_part)/ h**2\n dxsqy = sqxdy.T\n dxdy = -pairwise_dists[self.yInd]/h**4 +d/h**2\n KxySub = Kxy[self.yInd]\n\n KpMat = (np.matmul(self.Sqy, self.Sqy.T) + sqxdy + dxsqy + dxdy)\n KpMat = np.multiply(KpMat, KxySub)\n\n weights = self.getWeights(KpMat)\n Kxy = np.multiply(Kxy, np.matlib.repmat(weights, n, 1))\n\n dxkxy = -np.matmul(Kxy, self.y)\n sumkxy = np.sum(Kxy, axis=1)\n for i in range(d):\n dxkxy[:, i] = dxkxy[:,i] + np.multiply(self.theta[:,i],sumkxy)\n dxkxy = dxkxy / (h**2)\n\n grad_theta = (np.matmul(Kxy, self.Sqy) + dxkxy)\n if cf == False:\n grad_theta = grad_theta / m\n\n return grad_theta\n\n '''\n Perform a step of adam update\n '''\n def get_adamUpdate(self, iterInd, ori_grad, hist_grad, stepsize = 1e-3, alpha = 0.9, fudge_factor = 1e-6):\n if iterInd == 0:\n hist_grad = hist_grad + ori_grad ** 2\n else:\n hist_grad = alpha * hist_grad + (1 - alpha) * (ori_grad ** 2)\n\n adj_grad = np.divide(ori_grad, fudge_factor+np.sqrt(hist_grad))\n\n return (stepsize * adj_grad, hist_grad)\n\n '''\n Compute gradient update for y\n '''\n def svgd_kernel_grady(self, h=-1, uStat=True, regCoeff=0.1):\n m = self.y.shape[0]\n xAdverSubsetInd = np.random.choice(self.theta.shape[0], m, replace=False)\n self.thetaSubset = self.theta[xAdverSubsetInd,:]\n self.SqxSubset = self.Sqx[xAdverSubsetInd,:]\n\n #self.thetaSubset = np.copy(self.theta)\n #self.SqxSubset = np.copy(self.Sqx)\n n,d = self.thetaSubset.shape\n\n\n pairwise_dists = cdist(self.thetaSubset, self.y)**2\n\n if h < 0: # if h < 0, using median trick\n h = np.median(pairwise_dists)\n h = np.sqrt(0.5 * h / np.log(n+1))\n\n # compute the rbf kernel\n Kxy = np.exp( -pairwise_dists / h**2 / 2)\n\n yGrad = np.zeros((m,d));\n\n # Compute gradient\n for yInd in range(m):\n Kxy_cur = Kxy[:,yInd];\n xmy = (self.thetaSubset - np.tile(self.y[yInd,:],[n,1]))/h**2\n Sqxxmy = self.SqxSubset - xmy;\n back = np.tile(np.array([Kxy_cur]).T,(1,d)) * Sqxxmy\n inner = np.tile(np.array([np.sum(np.matmul(back, back.T),axis=1)]).T,[1,d])\n yGrad[yInd,:] = np.sum(xmy * inner,axis=0) + np.sum(back,axis=0) * np.sum(Kxy_cur)/h**2\n\n # For U-statistic\n if uStat:\n front_u = np.tile(np.array([(Kxy_cur**2) * np.sum(Sqxxmy **2,axis=1)]).T,[1,d]) * xmy;\n back_u = np.tile(np.array([Kxy_cur**2 / h**2]).T,[1,d]) * Sqxxmy\n\n yGrad[yInd,:] = yGrad[yInd,:] - np.sum(front_u + back_u,axis=0)\n\n if uStat:\n yGrad = yGrad * 2 / (n*(n-1)*m);\n else:\n yGrad = yGrad * 2 / (n**2 * m);\n\n if regCoeff > 0 :\n H_y = cdist(self.y, self.y)**2\n Kxy_y = np.exp( -H_y / h**2 / 2)\n sumKxy_y = np.sum(Kxy_y,axis=1)\n yReg = (self.y * np.tile(np.array([sumKxy_y]).T,[1,d]) - np.matmul(Kxy_y,self.y))/(h**2 * m)\n\n yGrad = yGrad + regCoeff * yReg\n return (yGrad)\n\n '''\n Compute gradient update for h\n '''\n def svgd_kernel_gradh(self, h=-1, uStat=True):\n n,d = self.thetaSubset.shape\n m = self.y.shape[0]\n\n H = cdist(self.thetaSubset, self.y)**2\n\n if h < 0: # if h < 0, using median trick\n h = np.median(H)\n h = np.sqrt(0.5 * h / np.log(n+1))\n\n # compute the rbf kernel\n Kxy = np.exp( -H / h**2 / 2)\n\n hGrad = 0;\n\n # For each induced point\n for yInd in range(m):\n Kxy_cur = Kxy[:,yInd]\n H_cur = H[:,yInd]\n xmy = (self.thetaSubset - np.tile(self.y[yInd,:],[n,1]))/h**2\n Sqxxmy = self.SqxSubset - xmy\n\n part2 = np.tile(np.array([Kxy_cur]).T,[1,d]) * Sqxxmy\n part1_1 = np.tile(np.array([H_cur/h**3]).T,[1,d]) * part2\n part1_2 = np.tile(np.array([Kxy_cur]).T,[1,d]) * (2*xmy / h**3)\n part = np.matmul(part1_1 + part1_2, part2.T)\n hGrad = hGrad + np.sum(np.sum(part,axis=1))\n\n if uStat:\n front_u = (Kxy_cur**2) * (H_cur/h**3) * np.sum(Sqxxmy**2, axis=1)\n back_u = np.sum((2*xmy/h**3) * Sqxxmy,axis=1)\n hGrad = hGrad - np.sum(Kxy_cur**2 * (front_u + back_u),axis=0)\n\n if uStat:\n hGrad = hGrad * 2 / (n*(n-1)*m);\n else:\n hGrad = hGrad * 2 / (n**2 * m);\n\n return (hGrad)\n\n '''\n Induced Points Method\n '''\n def svgd_kernel_inducedPoints(self, h=-1, uStat=True, regCoeff=0.1, adver = False, adverMaxIter = 5, stepsize = 1e-3, alpha = 0.9):\n\n n,d = self.theta.shape\n m = self.y.shape[0]\n\n # If we want to perform EM\n if adver == True:\n # Perform update emMaxIter number of times\n fudge_factor = 1e-6\n\n for adverIter in range(0,adverMaxIter):\n grad_y = self.svgd_kernel_grady(h=h,uStat=uStat, regCoeff=regCoeff)\n [update_y,hist_grad] = self.get_adamUpdate(adverIter, grad_y, self.y_historical_grad,stepsize = stepsize, alpha = alpha)\n self.y = self.y + update_y\n self.y_historical_grad = hist_grad\n\n grad_h = self.svgd_kernel_gradh(h=h,uStat=uStat)\n [update_h, hist_grad] = self.get_adamUpdate(adverIter, grad_h, self.h_historical_grad,stepsize = stepsize, alpha = alpha)\n h = h + update_h\n self.h_historical_grad = hist_grad\n\n pairwise_dists = cdist(self.theta, self.y)**2\n\n # compute the rbf kernel\n Kxy = np.exp( -pairwise_dists / h**2 / 2)\n\n innerTerm_1 = np.matmul(Kxy.T, (self.Sqx - self.theta/ h**2))\n sumkxy = np.sum(Kxy, axis=0)\n innerTerm_2 = np.multiply(np.tile(np.array([sumkxy]).T,(1,d)), self.y/h**2)\n innerTerm = (innerTerm_1 + innerTerm_2)/n\n\n gradTheta = np.matmul(Kxy, innerTerm)/m\n return (gradTheta)\n\n '''\n Pack all parameters in our model\n '''\n def pack_weights(self, w1, b1, w2, b2, loggamma, loglambda):\n params = np.concatenate([w1.flatten(), b1, w2, [b2], [loggamma],[loglambda]])\n return params\n\n '''\n Unpack all parameters in our model\n '''\n def unpack_weights(self, z):\n w = z\n w1 = np.reshape(w[:self.d*self.n_hidden], [self.d, self.n_hidden])\n b1 = w[self.d*self.n_hidden:(self.d+1)*self.n_hidden]\n\n w = w[(self.d+1)*self.n_hidden:]\n w2, b2 = w[:self.n_hidden], w[-3]\n\n # the last two parameters are log variance\n loggamma, loglambda= w[-2], w[-1]\n\n return (w1, b1, w2, b2, loggamma, loglambda)\n\n\n '''\n Evaluating testing rmse and log-likelihood, which is the same as in PBP\n Input:\n -- X_test: unnormalized testing feature set\n -- y_test: unnormalized testing labels\n '''\n def evaluation(self, X_test, y_test):\n # normalization\n X_test = self.normalization(X_test)\n\n # average over the output\n pred_y_test = np.zeros([self.M, len(y_test)])\n prob = np.zeros([self.M, len(y_test)])\n\n '''\n Since we have M particles, we use a Bayesian view to calculate rmse and log-likelihood\n '''\n for i in range(self.M):\n w1, b1, w2, b2, loggamma, loglambda = self.unpack_weights(self.thetaCopy[i, :])\n pred_y_test[i, :] = self.nn_predict(X_test, w1, b1, w2, b2) * self.std_y_train + self.mean_y_train\n prob[i, :] = np.sqrt(np.exp(loggamma)) /np.sqrt(2*np.pi) * np.exp( -1 * (np.power(pred_y_test[i, :] - y_test, 2) / 2) * np.exp(loggamma) )\n pred = np.mean(pred_y_test, axis=0)\n\n # evaluation\n svgd_rmse = np.sqrt(np.mean((pred - y_test)**2))\n svgd_ll = np.mean(np.log(np.mean(prob, axis = 0)))\n\n return (svgd_rmse, svgd_ll)\n\n '''\n Returns the result of the iterations\n '''\n def getResults(self):\n return (self.rmse_overTime, self.llh_overTime, self.iter_overTime)\n\nif __name__ == '__main__':\n\n print ('Theano', theano.version.version) #our implementation is based on theano 0.8.2\n\n np.random.seed(1)\n ''' load data file '''\n\n for dataInd in range(0,1):\n if dataInd == 0:\n data = np.loadtxt('../data/boston_housing')\n datasetName = 'Boston Housing'\n elif dataInd == 1:\n data = np.loadtxt(open(\"../data/Concrete.csv\", \"rb\"), delimiter=\",\", skiprows=1) # Concrete\n datasetName = 'Concrete'\n elif dataInd == 2:\n data = np.loadtxt(open(\"../data/Energy.csv\", \"rb\"), delimiter=\",\", skiprows=1) # Energy\n datasetName = 'Energy'\n elif dataInd == 3:\n data = np.loadtxt(open(\"../data/kin8nm.csv\", \"rb\"), delimiter=\",\", skiprows=0) # Kin8nm Dataset\n datasetName = 'Kin8nm'\n print('-------------------',datasetName,'-------------------')\n\n if dataInd == 2:\n X_input = data[ :, range(data.shape[ 1 ] - 2) ]\n y_input = data[ :, data.shape[ 1 ] - 2 ]\n else:\n # Please make sure that the last column is the label and the other columns are features\n X_input = data[ :, range(data.shape[ 1 ] - 1) ]\n y_input = data[ :, data.shape[ 1 ] - 1 ]\n\n ''' build the training and testing data set'''\n train_ratio = 0.9 # We create the train and test sets with 90% and 10% of the data\n permutation = np.arange(X_input.shape[0])\n random.shuffle(permutation)\n\n size_train = int(np.round(X_input.shape[ 0 ] * train_ratio))\n index_train = permutation[ 0 : size_train]\n index_test = permutation[ size_train : ]\n\n X_train, y_train = X_input[ index_train, : ], y_input[ index_train ]\n X_test, y_test = X_input[ index_test, : ], y_input[ index_test ]\n\n #names = ['Base','Subset','Subset-CF','Induced Points'];\n names = ['Base','Subset','Subset-CF','Induced Points','Adversarial Induced Points'];\n #names = ['Base','Induced Points','Adversarial Induced Points'];\n numIter = 10\n maxTime = 100\n numTimeSteps = 20\n modelNum = len(names);\n\n svgd_rmse_final = np.zeros((modelNum, numTimeSteps))\n svgd_ll_final = np.zeros((modelNum, numTimeSteps))\n svgd_iter_final = np.zeros((modelNum, numTimeSteps))\n\n ''' Training Bayesian neural network with SVGD '''\n #batch_size, n_hidden, max_iter, numParticles = 100, 50, 2000, 30 # max_iter is a trade-off between running time and performance\n batch_size, n_hidden, max_iter, numParticles = 100, 50, 100000, 20 # max_iter is a trade-off between running time and performance\n max_iterRS = 100000\n max_iterRSCF = 100000\n max_iterIP = 100000\n max_iterAIP = 100000\n m, adverMaxIter = 10,1\n max_iters = [max_iter, max_iterRS, max_iterRSCF, max_iterIP];\n\n np.set_printoptions(precision=4)\n for modelInd in range(0,5):\n for t in range(0,numIter):\n np.random.seed(t)\n print(names[modelInd], ': Iteration ', t+1, '/', numIter)\n start = time.time()\n\n if modelInd == 0 :# base\n svgd = svgd_bayesnn(X_train, y_train, X_test, y_test, numTimeSteps = numTimeSteps, maxTime = maxTime,\n batch_size = batch_size, n_hidden = n_hidden, M=numParticles, max_iter = max_iter,\n method = 'none')\n elif modelInd == 1 : # Subset\n svgd = svgd_bayesnn(X_train, y_train, X_test, y_test, numTimeSteps = numTimeSteps, maxTime = maxTime,\n batch_size = batch_size, n_hidden = n_hidden, M=numParticles, max_iter = max_iterRS,\n method = 'subparticles',m=m,cf=False)\n elif modelInd == 2 : # Subset (CF)\n svgd = svgd_bayesnn(X_train, y_train, X_test, y_test, numTimeSteps = numTimeSteps, maxTime = maxTime,\n batch_size = batch_size, n_hidden = n_hidden, M=numParticles, max_iter = max_iterRSCF,\n method = 'subparticles',m=m,cf=True)\n elif modelInd == 3 : # Induced Points\n svgd = svgd_bayesnn(X_train, y_train, X_test, y_test, numTimeSteps = numTimeSteps, maxTime = maxTime,\n batch_size = batch_size, n_hidden = n_hidden, M=numParticles, max_iter = max_iterIP,\n method = 'inducedPoints',m=m, uStat = True, adver=False)\n elif modelInd == 4 : # Induced Points (Adver)\n svgd = svgd_bayesnn(X_train, y_train, X_test, y_test, numTimeSteps = numTimeSteps, maxTime = maxTime,\n batch_size = batch_size, n_hidden = n_hidden, M=numParticles, max_iter = max_iterAIP,\n method = 'inducedPoints',m=m, uStat = True, adver=True, adverMaxIter = adverMaxIter)\n\n [rmseResult, llResult, iterResult] = svgd.getResults()\n\n svgd_rmse_final[modelInd,:] = svgd_rmse_final[modelInd,:] + rmseResult / numIter\n svgd_ll_final[modelInd,:] = svgd_ll_final[modelInd,:] + llResult / numIter\n svgd_iter_final[modelInd,:] = svgd_iter_final[modelInd,:] + np.round(iterResult / numIter)\n\n\n np.save('./subset_1adver_rmseResult_'+datasetName,svgd_rmse_final)\n np.save('./subset_1adver_llResult_'+datasetName,svgd_ll_final)\n np.save('./subset_1adver_iterResult_'+datasetName,svgd_iter_final)\n\n #print('--------------------------------------------------------------------------------')\n #print('Dataset : ', datasetName)\n #print('[Options] : M=',numParticles, ', m=',m, ', max_iter=', max_iter, ', n_hidden=',n_hidden, ', batch_size=',batch_size)\n #print('--------------------------------------------------------------------------------')\n #for modelInd in range(0,modelNum):\n # print (names[modelInd],' [Average of', numIter, 'runs] : ', max_iters[modelInd], ' iterations')\n # print ('[rmse] Mean : ', \"%.4f\" % np.mean(svgd_rmse_final[modelInd,]), ' st.dev : ', \"%.4f\" % np.std(svgd_rmse_final[modelInd,]) )\n # print ('[llik] Mean : ', \"%.4f\" % np.mean(svgd_ll_final[modelInd,]), ' st.dev : ', \"%.4f\" % np.std(svgd_ll_final[modelInd,]) )\n # print ('[time] Mean : ', \"%.2f\" % np.mean(svgd_time_final[modelInd,]), ' st.dev : ', \"%.2f\" % np.std(svgd_time_final[modelInd,]), '\\n')\n" ]
[ [ "numpy.random.choice", "numpy.median", "numpy.set_printoptions", "numpy.copy", "numpy.tile", "numpy.exp", "numpy.mean", "numpy.min", "numpy.multiply", "numpy.full", "numpy.linalg.norm", "numpy.log", "numpy.save", "numpy.arange", "numpy.sqrt", "numpy.linalg.inv", "numpy.array", "numpy.matmul", "numpy.zeros", "numpy.reshape", "numpy.round", "scipy.spatial.distance.squareform", "numpy.random.randn", "numpy.identity", "numpy.std", "numpy.loadtxt", "numpy.power", "scipy.spatial.distance.cdist", "scipy.spatial.distance.pdist", "numpy.random.seed", "numpy.sum", "numpy.random.gamma", "numpy.ones", "numpy.matlib.repmat" ] ]
mehmetilker/spacy-stanfordnlp
[ "99d636fc19aa80f19ec111e8862b39323c038093" ]
[ "spacy_stanfordnlp/language.py" ]
[ "# coding: utf8\nfrom spacy.symbols import POS, TAG, DEP, LEMMA, HEAD\nfrom spacy.language import Language\nfrom spacy.tokens import Doc\nfrom spacy.util import get_lang_class\nimport numpy\nimport re\n\n\nclass StanfordNLPLanguage(Language):\n def __init__(self, snlp, meta=None, **kwargs):\n \"\"\"Initialize the Language class.\n\n Instead of \"en\" etc. we call the language \"stanfordnlp_en\" to not\n cause conflicts with spaCy's built-in languages. Using entry points,\n this also allows serializing and deserializing the language class\n and \"lang\": \"stanfordnlp_en\" in the meta.json will automatically\n instantiate this class if this package is available.\n\n snlp (stanfordnlp.Pipeline): The loaded StanfordNLP pipeline.\n kwargs: Optional config parameters.\n RETURNS (spacy.language.Language): The nlp object.\n \"\"\"\n lang = snlp.config[\"lang\"]\n self.lang = \"stanfordnlp_\" + lang\n self.Defaults = get_defaults(lang)\n self.vocab = self.Defaults.create_vocab()\n self.tokenizer = Tokenizer(snlp, self.vocab)\n self.pipeline = []\n self.max_length = kwargs.get(\"max_length\", 10 ** 6)\n self._meta = (\n {\"lang\": self.lang, \"stanfordnlp\": snlp.config}\n if meta is None\n else dict(meta)\n )\n self._path = None\n self._optimizer = None\n\n def make_doc(self, text):\n return self.tokenizer(text)\n\n\ndef get_defaults(lang):\n \"\"\"Get the language-specific defaults, if available in spaCy. This allows\n using lexical attribute getters that depend on static language data, e.g.\n Token.like_num, Token.is_stop, Doc.noun_chunks etc.\n\n lang (unicode): The language code.\n RETURNS (Language.Defaults): The language defaults.\n \"\"\"\n try:\n lang_cls = get_lang_class(lang)\n return lang_cls.Defaults\n except ImportError:\n return Language.Defaults\n\n\nclass Tokenizer(object):\n \"\"\"Because we're only running the StanfordNLP pipeline once and don't split\n it up into spaCy pipeline components, we'll set all the attributes within\n a custom tokenizer. The tokenizer is currently expected to\n implement serialization methods so we're mocking them up here. When loading\n the serialized nlp object back in, you can pass `snlp` to spacy.load:\n\n >>> nlp.to_disk('/path/to/model')\n >>> nlp = spacy.load('/path/to/model', snlp=snlp)\n \"\"\"\n\n to_disk = lambda self, *args, **kwargs: None\n from_disk = lambda self, *args, **kwargs: None\n to_bytes = lambda self, *args, **kwargs: None\n from_bytes = lambda self, *args, **kwargs: None\n _ws_pattern = re.compile(r\"\\s+\")\n\n def __init__(self, snlp, vocab):\n \"\"\"Initialize the tokenizer.\n\n snlp (stanfordnlp.Pipeline): The initialized StanfordNLP pipeline.\n vocab (spacy.vocab.Vocab): The vocabulary to use.\n RETURNS (Tokenizer): The custom tokenizer.\n \"\"\"\n self.snlp = snlp\n self.vocab = vocab\n\n def __call__(self, text):\n \"\"\"Convert a StanfordNLP Doc to a spaCy Doc.\n\n text (unicode): The text to process.\n RETURNS (spacy.tokens.Doc): The spaCy Doc object.\n \"\"\"\n snlp_doc = self.snlp(text)\n text = snlp_doc.text\n tokens, heads = self.get_tokens_with_heads(snlp_doc)\n if not len(tokens):\n raise ValueError(\"No tokens available.\")\n words = []\n spaces = []\n pos = []\n tags = []\n deps = []\n lemmas = []\n offset = 0\n is_aligned = self.check_aligned(text, tokens)\n for i, token in enumerate(tokens):\n span = text[offset:]\n if not len(span):\n break\n while len(span) and span[0].isspace():\n # If we encounter leading whitespace, skip one character ahead\n offset += 1\n span = text[offset:]\n words.append(token.text)\n # Make sure all strings are in the vocabulary\n pos.append(self.vocab.strings.add(token.upos or \"\"))\n tags.append(self.vocab.strings.add(token.xpos or \"\"))\n deps.append(self.vocab.strings.add(token.dependency_relation or \"\"))\n lemmas.append(self.vocab.strings.add(token.lemma or \"\"))\n offset += len(token.text)\n span = text[offset:]\n if i == len(tokens) - 1:\n spaces.append(False)\n elif not is_aligned:\n spaces.append(True)\n else:\n next_token = tokens[i + 1]\n spaces.append(not span.startswith(next_token.text))\n attrs = [POS, TAG, DEP, HEAD]\n array = numpy.array(list(zip(pos, tags, deps, heads)), dtype=\"uint64\")\n doc = Doc(self.vocab, words=words, spaces=spaces).from_array(attrs, array)\n # Overwrite lemmas separately to prevent them from being overwritten by spaCy\n lemma_array = numpy.array([[lemma] for lemma in lemmas], dtype=\"uint64\")\n doc.from_array([LEMMA], lemma_array)\n if any(pos) and any(tags):\n doc.is_tagged = True\n if any(deps):\n doc.is_parsed = True\n return doc\n\n def get_tokens_with_heads(self, snlp_doc):\n \"\"\"Flatten the tokens in the StanfordNLP Doc and extract the token indices\n of the sentence start tokens to set is_sent_start.\n\n snlp_doc (stanfordnlp.Document): The processed StanfordNLP doc.\n RETURNS (list): The tokens (words).\n \"\"\"\n tokens = []\n heads = []\n offset = 0\n for sentence in snlp_doc.sentences:\n for token in sentence.tokens:\n for word in token.words:\n # Here, we're calculating the absolute token index in the doc,\n # then the *relative* index of the head, -1 for zero-indexed\n # and if the governor is 0 (root), we leave it at 0\n if word.governor:\n head = word.governor + offset - len(tokens) - 1\n else:\n head = 0\n heads.append(head)\n tokens.append(word)\n offset += sum(len(token.words) for token in sentence.tokens)\n return tokens, heads\n\n def check_aligned(self, text, tokens):\n token_texts = \"\".join(t.text for t in tokens)\n return re.sub(self._ws_pattern, \"\", text) == token_texts\n" ]
[ [ "numpy.array" ] ]
herjy/proxmin
[ "6dadff29e3e781986f4e38c56f40dbb31a58a428" ]
[ "proxmin/nmf.py" ]
[ "from __future__ import print_function, division\nimport logging\nimport numpy as np\nfrom . import operators\nfrom . import utils\nfrom . import algorithms\n\ndef delta_data(A, S, Y, W=1):\n return W*(A.dot(S) - Y)\n\ndef grad_likelihood_A(A, S, Y, W=1):\n D = delta_data(A, S, Y, W=W)\n return D.dot(S.T)\n\ndef grad_likelihood_S(S, A, Y, W=1):\n D = delta_data(A, S, Y, W=W)\n return A.T.dot(D)\n\n# executes one proximal step of likelihood gradient, followed by prox_g\ndef prox_likelihood_A(A, step, S=None, Y=None, prox_g=None, W=1):\n return prox_g(A - step*grad_likelihood_A(A, S, Y, W=W), step)\n\ndef prox_likelihood_S(S, step, A=None, Y=None, prox_g=None, W=1):\n return prox_g(S - step*grad_likelihood_S(S, A, Y, W=W), step)\n\ndef prox_likelihood(X, step, Xs=None, j=None, Y=None, WA=None, WS=None, prox_S=operators.prox_id, prox_A=operators.prox_id):\n if j == 0:\n return prox_likelihood_A(X, step, S=Xs[1], Y=Y, prox_g=prox_A, W=WA)\n else:\n return prox_likelihood_S(X, step, A=Xs[0], Y=Y, prox_g=prox_S, W=WS)\n\nclass Steps_AS:\n def __init__(self, WA=1, WS=1, slack=0.1, max_stride=100):\n \"\"\"Helper class to compute the Lipschitz constants of grad f.\n\n The __call__ function compute the spectral norms of A or S, which\n determine the Lipschitz constant of the respective update steps.\n\n If a weight matrix is used, the stepsize will be upper bounded by\n assuming the maximum value of the weights. In the case of varying\n weights, it is generally advised to normalize the weight matrix\n differently for the A and S updates, therefore two maximum numbers\n (WAMax, WSmax) can be set.\n\n Because the spectral norm is expensive to compute, it will only update\n the step_size if relative changes of L exceed slack/2.\n If not, which is usually the case after only a few iterations, it will\n report a previous value for the next several iterations. The stride\n between updates is set by\n stride -> stride * (slack/2 / rel_error\n i.e. it increases more strongly if the rel_error is much below the\n slack budget.\n \"\"\"\n import scipy.sparse\n if WA is 1:\n self.WA = WA\n else:\n self.WA = scipy.sparse.diags(WA.reshape(-1))\n if WS is 1:\n self.WS = WS\n else:\n self.WS = scipy.sparse.diags(WS.reshape(-1))\n\n # two independent caches for Lipschitz constants\n self._cb = [utils.ApproximateCache(self._one_over_lipschitzA, slack=slack, max_stride=max_stride),\n utils.ApproximateCache(self._one_over_lipschitzS, slack=slack, max_stride=max_stride)]\n\n def _one_over_lipschitzA(self, Xs):\n A,S = Xs\n if self.WA is 1:\n return 1./utils.get_spectral_norm(S.T)\n else: # full weight matrix, need to serialize S along k\n import scipy.sparse\n Ss = scipy.sparse.block_diag([S.T for b in range(len(A))])\n # Lipschitz constant for grad_A = || S Sigma_1 S.T||_s\n SSigma_1S = Ss.T.dot(self.WA.dot(Ss))\n LA = np.real(scipy.sparse.linalg.eigs(SSigma_1S, k=1, return_eigenvectors=False)[0])\n return 1./LA\n\n def _one_over_lipschitzS(self, Xs):\n A,S = Xs\n if self.WA is 1:\n return 1./utils.get_spectral_norm(A)\n else:\n import scipy.sparse\n N = S.shape[1]\n As = scipy.sparse.bmat([[scipy.sparse.identity(N) * A[b,k] for k in range(A.shape[1])] for b in range(A.shape[0])])\n ASigma_1A = As.T.dot(self.WS.dot(As))\n LS = np.real(scipy.sparse.linalg.eigs(ASigma_1A, k=1, return_eigenvectors=False)[0])\n return 1./LS\n\n def __call__(self, j, Xs):\n return self._cb[j](Xs)\n\ndef normalizeMatrix(M, axis):\n if axis == 1:\n norm = np.sum(M, axis=axis)\n norm = np.broadcast_to(norm, M.T.shape)\n norm = norm.T\n else:\n norm = np.sum(M, axis=axis)\n norm = np.broadcast_to(norm, M.shape)\n return norm\n\ndef nmf(Y, A, S, W=None, prox_A=operators.prox_plus, prox_S=operators.prox_plus, proxs_g=None, steps_g=None, Ls=None, slack=0.9, update_order=None, steps_g_update='steps_f', max_iter=1000, e_rel=1e-3, e_abs=0, traceback=None):\n \"\"\"Non-negative matrix factorization.\n\n This method solves the NMF problem\n minimize || Y - AS ||_2^2\n under an arbitrary number of constraints on A and/or S.\n\n Args:\n Y: target matrix MxN\n A: initial amplitude matrix MxK, will be updated\n S: initial source matrix KxN, will be updated\n W: (optional weight matrix MxN)\n prox_A: direct projection contraint of A\n prox_S: direct projection constraint of S\n proxs_g: list of constraints for A or S for ADMM-type optimization\n [[prox_A_0, prox_A_1...],[prox_S_0, prox_S_1,...]]\n steps_g: specific value of step size for proxs_g (experts only!)\n Ls: list of linear operators for the constraint functions proxs_g\n If set, needs to have same format as proxs_g.\n Matrices can be numpy.array, scipy.sparse, or None (for identity).\n slack: tolerance for (re)evaluation of Lipschitz constants\n See Steps_AS() for details.\n update_order: list of factor indices in update order\n j=0 -> A, j=1 -> S\n max_iter: maximum iteration number, irrespective of current residuals\n e_rel: relative error threshold for primal and dual residuals\n e_abs: absolute error threshold for primal and dual residuals\n traceback: utils.Traceback to hold variable histories\n\n Returns:\n converged: convence test for A,S\n errors: difference between latest and previous iterations for A,S\n\n See also:\n algorithms.bsdmm for update_order and steps_g_update\n utils.AcceleratedProxF for Nesterov acceleration\n\n Reference:\n Moolekamp & Melchior, 2017 (arXiv:1708.09066)\n\n \"\"\"\n\n # create stepsize callback, needs max of W\n if W is not None:\n # normalize in pixel and band directions to have similar update speeds\n WA = normalizeMatrix(W, 1)\n WS = normalizeMatrix(W, 0)\n else:\n WA = WS = 1\n steps_f = Steps_AS(WA=WA, WS=WS, slack=slack)\n\n # gradient step, followed by direct application of prox_S or prox_A\n from functools import partial\n f = partial(prox_likelihood, Y=Y, WA=WA, WS=WS, prox_S=prox_S, prox_A=prox_A)\n\n X = [A, S]\n # use accelerated block-PGM if there's no proxs_g\n if proxs_g is None or not utils.hasNotNone(proxs_g):\n return algorithms.bpgm(X, f, steps_f, accelerated=True, update_order=update_order, max_iter=max_iter, e_rel=e_rel, traceback=traceback)\n else:\n return algorithms.bsdmm(X, f, steps_f, proxs_g, steps_g=steps_g, Ls=Ls, update_order=update_order, steps_g_update=steps_g_update, max_iter=max_iter, e_rel=e_rel, e_abs=e_abs, traceback=traceback)\n" ]
[ [ "numpy.sum", "numpy.broadcast_to" ] ]
gehilley/GlobalSteepness
[ "62a1a5b66adb230d5bbbc004aa5d2c5b618a2fdd" ]
[ "GlobalDataset/bin/steepness_cdf.py" ]
[ "import numpy as np\nimport dem as d\nfrom numpy.fft import fft2, ifft2, ifftshift\n\ndef calc_cdf(ks_grids, area_grids, vmax=400, R2_cutoff = 0.0, area_cutoff = 2E6, density_weighting_distance = False):\n \n ks_vals = np.array([])\n n_vals = np.array([])\n R2_vals = np.array([])\n density_vals = np.array([])\n for (ks_grid, area_grid) in zip(ks_grids, area_grids): \n i = np.where(~np.isnan(ks_grid._griddata) & (ks_grid._griddata >= 0) & (area_grid._griddata >= area_cutoff))\n ks_vals = np.concatenate((ks_vals, ks_grid._griddata[i]))\n n_vals = np.concatenate((n_vals, ks_grid._n[i]))\n R2_vals = np.concatenate((R2_vals, ks_grid._r2[i]))\n if density_weighting_distance is not False:\n template_grid = np.zeros_like(ks_grid._griddata)\n (ny, nx) = template_grid.shape\n (cy, cx) = (ny/2.0, nx/2.0)\n dy, dx = np.meshgrid(np.arange(0,ny)-cy, np.arange(0,nx)-cx, indexing = 'ij')\n d = np.sqrt(np.power(dx,2) + np.power(dy,2))\n j = np.where(d <= density_weighting_distance)\n template_grid[j] = 1.0\n de = area_grid._area_per_pixel()\n ks_bin = (~np.isnan(ks_grid._griddata) & (area_grid >= area_cutoff)).astype(float)*de\n template_F = fft2(template_grid)\n density_weight = np.real(ifftshift(ifft2(template_F*fft2(de))) / ifftshift(ifft2(template_F*fft2(ks_bin))))\n density_vals = np.concatenate((density_vals, density_weight[i]))\n \n i = np.where(R2_vals >= R2_cutoff)\n ks_vals = ks_vals[i]\n n_vals = n_vals[i]\n if density_weighting_distance is not False:\n density_vals = density_vals[i]\n \n i = np.argsort(ks_vals)\n ks_vals = ks_vals[i]\n n_vals = n_vals[i]\n \n weights = 1 / n_vals\n \n if density_weighting_distance is not False:\n density_vals = density_vals[i]\n weights *= density_vals\n \n bins = np.concatenate((np.array([-0.5]), np.arange(0.5, vmax, 1),np.array([vmax])+0.5, np.array([np.max(ks_vals[:])])))\n hist, _ = np.histogram(ks_vals, bins = bins, weights = weights)\n bin_centers = np.concatenate((np.arange(0,vmax,1),np.array([vmax])))\n cdf = np.cumsum(hist)\n cdf /= cdf[-1]\n cdf = cdf[0:-1]\n return bin_centers, cdf" ]
[ [ "numpy.concatenate", "numpy.histogram", "numpy.array", "numpy.zeros_like", "numpy.max", "numpy.fft.fft2", "numpy.isnan", "numpy.where", "numpy.arange", "numpy.power", "numpy.argsort", "numpy.cumsum" ] ]
alexFilin/keras-maskrcnn
[ "fa3d7f8e81d0ffb036fde5e134dcdbf35c206fc1" ]
[ "keras_maskrcnn/bin/debug.py" ]
[ "#!/usr/bin/env python\n\n\"\"\"\nCopyright 2017-2018 Fizyr (https://fizyr.com)\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport argparse\nimport os\nimport sys\nimport cv2\nimport numpy as np\n\nfrom keras_retinanet.utils.transform import random_transform_generator\nfrom keras_retinanet.utils.visualization import draw_annotations, draw_boxes, draw_caption\nfrom keras_retinanet.utils.colors import label_color\n\n# Allow relative imports when being executed as script.\nif __name__ == \"__main__\" and __package__ is None:\n sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))\n import keras_retinanet.bin\n __package__ = \"keras_maskrcnn.bin\"\n\n# Change these to absolute imports if you copy this script outside the keras_retinanet package.\nfrom ..utils.visualization import draw_mask\n\n\ndef create_generator(args):\n # create random transform generator for augmenting training data\n transform_generator = random_transform_generator(\n # min_rotation=-0.1,\n # max_rotation=0.1,\n # min_translation=(-0.1, -0.1),\n # max_translation=(0.1, 0.1),\n # min_shear=-0.1,\n # max_shear=0.1,\n # min_scaling=(0.9, 0.9),\n # max_scaling=(1.1, 1.1),\n flip_x_chance=0.5,\n # flip_y_chance=0.5,\n )\n\n if args.dataset_type == 'coco':\n # import here to prevent unnecessary dependency on cocoapi\n from ..preprocessing.coco import CocoGenerator\n\n generator = CocoGenerator(\n args.coco_path,\n args.coco_set,\n transform_generator=transform_generator\n )\n elif args.dataset_type == 'csv':\n from ..preprocessing.csv_generator import CSVGenerator\n\n generator = CSVGenerator(\n args.annotations,\n args.classes,\n transform_generator=transform_generator\n )\n else:\n raise ValueError('Invalid data type received: {}'.format(args.dataset_type))\n\n return generator\n\n\ndef parse_args(args):\n parser = argparse.ArgumentParser(description='Debug script for a RetinaNet-MaskRCNN network.')\n subparsers = parser.add_subparsers(help='Arguments for specific dataset types.', dest='dataset_type')\n subparsers.required = True\n\n coco_parser = subparsers.add_parser('coco')\n coco_parser.add_argument('coco_path', help='Path to dataset directory (ie. /tmp/COCO).')\n coco_parser.add_argument('--coco-set', help='Name of the set to show (defaults to val2017).', default='val2017')\n\n csv_parser = subparsers.add_parser('csv')\n csv_parser.add_argument('annotations', help='Path to a CSV file containing annotations for evaluation.')\n csv_parser.add_argument('classes', help='Path to a CSV file containing class label mapping.')\n\n parser.add_argument('-l', '--loop', help='Loop forever, even if the dataset is exhausted.', action='store_true')\n parser.add_argument('--no-resize', help='Disable image resizing.', dest='resize', action='store_false')\n parser.add_argument('--anchors', help='Show positive anchors on the image.', action='store_true')\n parser.add_argument('--annotations', help='Show annotations on the image. Green annotations have anchors, red annotations don\\'t and therefore don\\'t contribute to training.', action='store_true')\n parser.add_argument('--masks', help='Show annotated masks on the image.', action='store_true')\n parser.add_argument('--random-transform', help='Randomly transform image and annotations.', action='store_true')\n\n return parser.parse_args(args)\n\n\ndef run(generator, args):\n # display images, one at a time\n for i in range(generator.size()):\n # load the data\n image = generator.load_image(i)\n annotations, masks = generator.load_annotations(i)\n\n # apply random transformations\n if args.random_transform:\n image, annotations, masks = generator.random_transform_group_entry(image, annotations, masks)\n\n # resize the image and annotations\n if args.resize:\n image, image_scale = generator.resize_image(image)\n annotations[:, :4] *= image_scale\n for m in range(len(masks)):\n masks[m], _ = generator.resize_image(masks[m])\n\n # draw anchors on the image\n if args.anchors:\n labels, _, anchors = generator.compute_anchor_targets(image.shape, annotations, generator.num_classes())\n draw_boxes(image, anchors[np.max(labels, axis=1) == 1], (255, 255, 0), thickness=1)\n\n # draw annotations on the image\n if args.annotations:\n # draw annotations in red\n draw_annotations(image, annotations, color=(0, 0, 255), label_to_name=generator.label_to_name)\n\n # draw regressed anchors in green to override most red annotations\n # result is that annotations without anchors are red, with anchors are green\n labels, boxes, _ = generator.compute_anchor_targets(image.shape, annotations, generator.num_classes())\n draw_boxes(image, boxes[np.max(labels, axis=1) == 1], (0, 255, 0))\n\n # Draw masks over the image with random colours\n if args.masks:\n for m in range(len(masks)):\n # crop the mask with the related bbox size, and then draw them\n box = annotations[m, :4].astype(int)\n mask = masks[m][box[1]:box[3], box[0]:box[2]]\n draw_mask(image, box, mask, label_color(annotations[m, 4].astype(int)))\n # add the label caption\n caption = '{}'.format(generator.label_to_name(annotations[m, 4]))\n draw_caption(image, box, caption)\n\n cv2.imshow('Image', image)\n if cv2.waitKey() == ord('q'):\n return False\n return True\n\n\ndef main(args=None):\n # parse arguments\n if args is None:\n args = sys.argv[1:]\n args = parse_args(args)\n\n # create the generator\n generator = create_generator(args)\n\n # create the display window\n cv2.namedWindow('Image', cv2.WINDOW_NORMAL)\n\n if args.loop:\n while run(generator, args):\n pass\n else:\n run(generator, args)\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.max" ] ]
Mithilesh1609/assembled-cnn
[ "e0227eecbf3a7fd4fe99a954068c85ffbed94c53" ]
[ "official/utils/logs/hooks_helper.py" ]
[ "# This code is adapted from the https://github.com/tensorflow/models/tree/master/official/r1/resnet.\n# ==========================================================================================\n# NAVER’s modifications are Copyright 2020 NAVER corp. All rights reserved.\n# ==========================================================================================\n# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Hooks helper to return a list of TensorFlow hooks for training by name.\n\nMore hooks can be added to this set. To add a new hook, 1) add the new hook to\nthe registry in HOOKS, 2) add a corresponding function that parses out necessary\nparameters.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf # pylint: disable=g-bad-import-order\n\nfrom official.utils.logs import hooks\nfrom official.utils.logs import logger\nfrom official.utils.logs import metric_hook\n\n_TENSORS_TO_LOG = dict((x, x) for x in ['learning_rate',\n 'cross_entropy',\n 'train_ece',\n 'train_accuracy'])\n\n\ndef get_train_hooks(name_list, use_tpu=False, **kwargs):\n \"\"\"Factory for getting a list of TensorFlow hooks for training by name.\n\n Args:\n name_list: a list of strings to name desired hook classes. Allowed:\n LoggingTensorHook, ProfilerHook, ExamplesPerSecondHook, which are defined\n as keys in HOOKS\n use_tpu: Boolean of whether computation occurs on a TPU. This will disable\n hooks altogether.\n **kwargs: a dictionary of arguments to the hooks.\n\n Returns:\n list of instantiated hooks, ready to be used in a classifier.train call.\n\n Raises:\n ValueError: if an unrecognized name is passed.\n \"\"\"\n\n if not name_list:\n return []\n\n if use_tpu:\n tf.logging.warning(\"hooks_helper received name_list `{}`, but a TPU is \"\n \"specified. No hooks will be used.\".format(name_list))\n return []\n\n train_hooks = []\n for name in name_list:\n hook_name = HOOKS.get(name.strip().lower())\n if hook_name is None:\n raise ValueError('Unrecognized training hook requested: {}'.format(name))\n else:\n train_hooks.append(hook_name(**kwargs))\n\n return train_hooks\n\n\ndef get_logging_tensor_hook(every_n_iter=100, tensors_to_log=None, **kwargs): # pylint: disable=unused-argument\n \"\"\"Function to get LoggingTensorHook.\n\n Args:\n every_n_iter: `int`, print the values of `tensors` once every N local\n steps taken on the current worker.\n tensors_to_log: List of tensor names or dictionary mapping labels to tensor\n names. If not set, log _TENSORS_TO_LOG by default.\n **kwargs: a dictionary of arguments to LoggingTensorHook.\n\n Returns:\n Returns a LoggingTensorHook with a standard set of tensors that will be\n printed to stdout.\n \"\"\"\n if tensors_to_log is None:\n tensors_to_log = _TENSORS_TO_LOG\n\n return tf.train.LoggingTensorHook(\n tensors=tensors_to_log,\n every_n_iter=every_n_iter)\n\n\ndef get_profiler_hook(model_dir, save_steps=1000, **kwargs): # pylint: disable=unused-argument\n \"\"\"Function to get ProfilerHook.\n\n Args:\n model_dir: The directory to save the profile traces to.\n save_steps: `int`, print profile traces every N steps.\n **kwargs: a dictionary of arguments to ProfilerHook.\n\n Returns:\n Returns a ProfilerHook that writes out timelines that can be loaded into\n profiling tools like chrome://tracing.\n \"\"\"\n return tf.train.ProfilerHook(save_steps=save_steps, output_dir=model_dir)\n\n\ndef get_examples_per_second_hook(every_n_steps=100,\n batch_size=128,\n warm_steps=5,\n **kwargs): # pylint: disable=unused-argument\n \"\"\"Function to get ExamplesPerSecondHook.\n\n Args:\n every_n_steps: `int`, print current and average examples per second every\n N steps.\n batch_size: `int`, total batch size used to calculate examples/second from\n global time.\n warm_steps: skip this number of steps before logging and running average.\n **kwargs: a dictionary of arguments to ExamplesPerSecondHook.\n\n Returns:\n Returns a ProfilerHook that writes out timelines that can be loaded into\n profiling tools like chrome://tracing.\n \"\"\"\n return hooks.ExamplesPerSecondHook(\n batch_size=batch_size, every_n_steps=every_n_steps,\n warm_steps=warm_steps, metric_logger=logger.get_benchmark_logger())\n\n\ndef get_logging_metric_hook(tensors_to_log=None,\n every_n_secs=600,\n **kwargs): # pylint: disable=unused-argument\n \"\"\"Function to get LoggingMetricHook.\n\n Args:\n tensors_to_log: List of tensor names or dictionary mapping labels to tensor\n names. If not set, log _TENSORS_TO_LOG by default.\n every_n_secs: `int`, the frequency for logging the metric. Default to every\n 10 mins.\n\n Returns:\n Returns a LoggingMetricHook that saves tensor values in a JSON format.\n \"\"\"\n if tensors_to_log is None:\n tensors_to_log = _TENSORS_TO_LOG\n return metric_hook.LoggingMetricHook(\n tensors=tensors_to_log,\n metric_logger=logger.get_benchmark_logger(),\n every_n_secs=every_n_secs)\n\n\n# A dictionary to map one hook name and its corresponding function\nHOOKS = {\n 'loggingtensorhook': get_logging_tensor_hook,\n 'profilerhook': get_profiler_hook,\n 'examplespersecondhook': get_examples_per_second_hook,\n 'loggingmetrichook': get_logging_metric_hook,\n}\n" ]
[ [ "tensorflow.train.ProfilerHook", "tensorflow.train.LoggingTensorHook" ] ]
dorranh/airflow
[ "1a9a2cadcf8606cfcb729d1323dd33dfacc64633" ]
[ "tests/providers/amazon/aws/operators/test_hive_to_dynamodb.py" ]
[ "# -*- coding: utf-8 -*-\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nimport datetime\nimport json\nimport unittest\nfrom unittest import mock\n\nimport pandas as pd\n\nimport airflow.providers.amazon.aws.operators.hive_to_dynamodb\nfrom airflow import DAG\nfrom airflow.providers.amazon.aws.hooks.aws_dynamodb_hook import AwsDynamoDBHook\n\nDEFAULT_DATE = datetime.datetime(2015, 1, 1)\nDEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()\nDEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]\n\ntry:\n from moto import mock_dynamodb2\nexcept ImportError:\n mock_dynamodb2 = None\n\n\nclass TestHiveToDynamoDBTransferOperator(unittest.TestCase):\n\n def setUp(self):\n args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}\n dag = DAG('test_dag_id', default_args=args)\n self.dag = dag\n self.sql = 'SELECT 1'\n self.hook = AwsDynamoDBHook(\n aws_conn_id='aws_default', region_name='us-east-1')\n\n @staticmethod\n def process_data(data, *args, **kwargs):\n return json.loads(data.to_json(orient='records'))\n\n @unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')\n @mock_dynamodb2\n def test_get_conn_returns_a_boto3_connection(self):\n hook = AwsDynamoDBHook(aws_conn_id='aws_default')\n self.assertIsNotNone(hook.get_conn())\n\n @mock.patch('airflow.providers.apache.hive.hooks.hive.HiveServer2Hook.get_pandas_df',\n return_value=pd.DataFrame(data=[('1', 'sid')], columns=['id', 'name']))\n @unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')\n @mock_dynamodb2\n def test_get_records_with_schema(self, mock_get_pandas_df):\n # this table needs to be created in production\n self.hook.get_conn().create_table(\n TableName='test_airflow',\n KeySchema=[\n {\n 'AttributeName': 'id',\n 'KeyType': 'HASH'\n },\n ],\n AttributeDefinitions=[\n {\n 'AttributeName': 'id',\n 'AttributeType': 'S'\n }\n ],\n ProvisionedThroughput={\n 'ReadCapacityUnits': 10,\n 'WriteCapacityUnits': 10\n }\n )\n\n operator = airflow.providers.amazon.aws.operators.hive_to_dynamodb.HiveToDynamoDBTransferOperator(\n sql=self.sql,\n table_name=\"test_airflow\",\n task_id='hive_to_dynamodb_check',\n table_keys=['id'],\n dag=self.dag)\n\n operator.execute(None)\n\n table = self.hook.get_conn().Table('test_airflow')\n table.meta.client.get_waiter(\n 'table_exists').wait(TableName='test_airflow')\n self.assertEqual(table.item_count, 1)\n\n @mock.patch('airflow.providers.apache.hive.hooks.hive.HiveServer2Hook.get_pandas_df',\n return_value=pd.DataFrame(data=[('1', 'sid'), ('1', 'gupta')], columns=['id', 'name']))\n @unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')\n @mock_dynamodb2\n def test_pre_process_records_with_schema(self, mock_get_pandas_df):\n # this table needs to be created in production\n self.hook.get_conn().create_table(\n TableName='test_airflow',\n KeySchema=[\n {\n 'AttributeName': 'id',\n 'KeyType': 'HASH'\n },\n ],\n AttributeDefinitions=[\n {\n 'AttributeName': 'id',\n 'AttributeType': 'S'\n }\n ],\n ProvisionedThroughput={\n 'ReadCapacityUnits': 10,\n 'WriteCapacityUnits': 10\n }\n )\n\n operator = airflow.providers.amazon.aws.operators.hive_to_dynamodb.HiveToDynamoDBTransferOperator(\n sql=self.sql,\n table_name='test_airflow',\n task_id='hive_to_dynamodb_check',\n table_keys=['id'],\n pre_process=self.process_data,\n dag=self.dag)\n\n operator.execute(None)\n\n table = self.hook.get_conn().Table('test_airflow')\n table.meta.client.get_waiter('table_exists').wait(TableName='test_airflow')\n self.assertEqual(table.item_count, 1)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "pandas.DataFrame" ] ]
oustling/dicom_profile_fitting
[ "65b148acf9e2167e0d80c723bbbfd16a5dd64c4c", "65b148acf9e2167e0d80c723bbbfd16a5dd64c4c" ]
[ "minimize/retic_xmm_2gauss/2minimize_4mv.py", "minimize/halves_1gauss/minimize_15mv.py" ]
[ "#!\\usr\\bin\\python\n\nfrom numpy import array\nfrom scipy.special import erf \nfrom scipy.optimize import minimize \nfrom math import pi, sin, cos, exp, sqrt\n#import dicom\n\nline_array = [] ## global\n\ndef read_line (file_name ):\n with open( file_name ) as f:\n for line in f:\n line_array.append( [float( line.split()[0] ), float( line.split()[2] )] )\n\n\n\nread_line(\"4mv_line.csv\")\n\nline_len_2 = int(len(line_array)*0.5) ## global\n\n\n\ndef pi(x, b): # 0 1 2 3 4 5 6\n # b is np.array of these parameters: [sigma_1, sigma_2, w_1, x_sh, bkg, B, b]\n s_1 = 0.5*b[5]/(abs(b[2])*b[0]+abs(1-abs(b[2]))*b[1])\n s_2 = abs(b[2])*b[0]*erf( (b[6]-x-b[3])/(sqrt(2)*b[0]) )\n s_3 = abs(b[2])*b[0]*erf( (-b[6]-x-b[3])/(sqrt(2)*b[0]) )\n s_4 = abs(1-abs(b[2]))*b[1]*erf( (b[6]-x-b[3])/(sqrt(2)*b[1]) )\n s_5 = abs(1-abs(b[2]))*b[1]*erf( (-b[6]-x-b[3])/(sqrt(2)*b[1]) )\n return s_1*(s_2 - s_3 + s_4 - s_5) + b[4] # x in mm\n\ndef s(b):\n n_points_checked = 190\n halv = int( n_points_checked*0.5 )\n temp = 0.0\n for i in range( n_points_checked ):\n x = (i-halv)*0.2481\n a = pi(x, b) - line_array[ line_len_2 - halv +i ][1] \n temp += a*a\n return temp\n\n\n# [sigma_1, sigma_2, w_1, x_sh, bkg, B, b ]\nx0 = array([1.58, 0.58, 0.08, -0.03, 1047.0, 15031.0, 1.40]) # initial values for minimize\n\n\nprint ( x0 )\nres = minimize(s, x0, method='nelder-mead', options={'xtol': 1e-2, 'disp': True, 'maxfev':1e5, 'maxiter':1e5} )\nprint (res.x)\nprint (res.fun * 1e-6)\n\n# print out the whole line\nfor i in range(190):\n x = (i-95)*0.2481 # x in milimiters\n print(x,\", \", line_array[line_len_2 - 95 + i][1],\", \",pi(x,res.x) )\n", "#!\\usr\\bin\\python\n\nfrom numpy import array\nfrom scipy.special import erf , erfc\nfrom scipy.optimize import minimize \nfrom math import pi, sin, cos, exp, sqrt\n\nline_array = [] ## global\n\ndef read_line (file_name ):\n with open( file_name ) as f:\n for line in f:\n line_array.append( [float( line.split()[0] ), float( line.split()[2] )] )\n\n\n\nread_line(\"line_15mv.csv\")\n\ndef pi(x, b): # 0 1 2 3\n # b is np.array of these parameters: [sigma_1, x_sh, bkg, A]\n s_1 = 0.5*b[3]\n s_2 = erfc( (-x-b[1])/(sqrt(2)*b[0]) )\n return s_1*s_2 + b[2] # x in mm\n\ndef s(b):\n n_points_checked = 400\n halv = int( n_points_checked*0.5 )\n temp = 0.0\n for i in range( n_points_checked ):\n x = (i-halv)*0.2481\n a = pi(x, b) - line_array[ 512 - halv +i ][1] \n temp += a*a\n return temp\n\n\n# [sigma_1, x_sh, bkg, B ]\nx0 = array([0.73, 0.0, 2000.0, 37000.00]) # initial values for minimize\n\n\nprint ( x0 )\nres = minimize(s, x0, method='nelder-mead', options={'xtol': 1e-2, 'disp': True, 'maxfev':1e5, 'maxiter':1e5} )\nprint (res.x)\nprint (res.fun * 1e-6)\n\n# print out the whole line\nfor i in range( 1024 ):\n x = (i-512)*0.2481 # x in milimiters\n print(x,\", \", line_array[i][1],\", \",pi(x,res.x) )\n\n" ]
[ [ "numpy.array", "scipy.optimize.minimize" ], [ "numpy.array", "scipy.optimize.minimize" ] ]
hr0nix/trackdays
[ "1889d4a6c003b14b4a07c414c95b09c4770e7c73" ]
[ "trackdays/training/utils.py" ]
[ "import imageio\n\nimport tensorflow as tf\nimport numpy as np\n\nfrom PIL import Image, ImageDraw, ImageFont\n\nfrom tf_agents.replay_buffers import tf_uniform_replay_buffer\nfrom tf_agents.drivers import dynamic_step_driver\nfrom tf_agents.environments import tf_py_environment\n\n\ndef load_policy(path):\n return tf.compat.v2.saved_model.load(path)\n\n\ndef visualize_policy(environment, policy, output_filename, num_episodes=1, fps=5):\n rendering_environment = environment\n if isinstance(environment, tf_py_environment.TFPyEnvironment):\n # The inner env should be used for rendering\n rendering_environment = environment.pyenv.envs[0]\n\n with imageio.get_writer(output_filename, fps=fps) as video:\n font = ImageFont.load_default()\n total_reward = None\n\n def _add_environment_frame():\n rendered_env = rendering_environment.render()\n image = Image.fromarray(rendered_env.astype(np.uint8), mode='RGB')\n draw = ImageDraw.Draw(image)\n draw.text((5, 5), 'TR: %.1f' % total_reward, font=font)\n image_as_numpy = np.array(image.getdata()).reshape(rendered_env.shape).astype(np.uint8)\n video.append_data(image_as_numpy)\n\n for _ in range(num_episodes):\n total_reward = 0.0\n time_step = environment.reset()\n _add_environment_frame()\n while not time_step.is_last():\n action_step = policy.action(time_step)\n time_step = environment.step(action_step.action)\n total_reward += time_step.reward.numpy()[0]\n _add_environment_frame()\n\n\ndef evaluate_policy(env, policy, num_episodes):\n total_return = 0.0\n total_num_steps = 0.0\n for _ in range(num_episodes):\n time_step = env.reset()\n episode_return = 0.0\n episode_num_steps = 0.0\n\n while not time_step.is_last():\n action_step = policy.action(time_step)\n time_step = env.step(action_step.action)\n episode_return += time_step.reward\n episode_num_steps += 1\n total_return += episode_return\n total_num_steps += episode_num_steps\n\n return (total_return / num_episodes).numpy()[0], total_num_steps / num_episodes\n\n\ndef as_tf_env(env):\n return tf_py_environment.TFPyEnvironment(env)\n\n\ndef create_replay_buffer(agent, train_env, replay_buffer_size):\n return tf_uniform_replay_buffer.TFUniformReplayBuffer(\n data_spec=agent.collect_data_spec,\n batch_size=train_env.batch_size,\n max_length=replay_buffer_size,\n )\n\n\ndef create_collect_driver(train_env, agent, replay_buffer, collect_steps):\n return dynamic_step_driver.DynamicStepDriver(\n train_env, agent.collect_policy,\n observers=[replay_buffer.add_batch],\n num_steps=collect_steps,\n )\n\n\ndef cudnn_workaround():\n gpus = tf.config.experimental.list_physical_devices('GPU')\n if gpus:\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n" ]
[ [ "tensorflow.config.experimental.list_physical_devices", "tensorflow.config.experimental.set_memory_growth", "tensorflow.compat.v2.saved_model.load" ] ]
Luisbaduy97/COVID-YUCATAN
[ "726ae071de0bc059a42136d9c99503bc4cdfd4b3" ]
[ "datos_yuc_actualizado.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 24 14:52:03 2020\n\n@author: DELL\n\"\"\"\n\n\nimport pandas as pd\n\ndata = pd.read_csv('http://187.191.75.115/gobmx/salud/datos_abiertos/datos_abiertos_covid19.zip', encoding = 'ANSI')\n\n\nres = data[data['ENTIDAD_RES'] == 31]\n\nres.to_csv('data_yuc_actualizado.csv', index = False)" ]
[ [ "pandas.read_csv" ] ]
qingfengwuhen/Paddle
[ "cff5e2c173afc4431085b9382c716be7a9b91759" ]
[ "python/paddle/fluid/executor.py" ]
[ "# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport logging\nimport os\nimport multiprocessing\nimport sys\nimport numpy as np\nfrom .wrapped_decorator import signature_safe_contextmanager\nimport six\nfrom .framework import Program, default_main_program, Variable\nfrom . import core\nfrom . import compiler\nfrom .. import compat as cpt\nfrom .trainer_factory import TrainerFactory\n\n__all__ = ['Executor', 'global_scope', 'scope_guard']\n\ng_scope = core.Scope()\nInferNativeConfig = core.NativeConfig\nInferAnalysisConfig = core.AnalysisConfig\n\n\ndef global_scope():\n \"\"\"\n Get the global/default scope instance. There are a lot of APIs use\n :code:`global_scope` as its default value, e.g., :code:`Executor.run`\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy\n\n fluid.global_scope().var(\"data\").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace())\n numpy.array(fluid.global_scope().find_var(\"data\").get_tensor())\n\n Returns:\n Scope: The global/default scope instance.\n \"\"\"\n return g_scope\n\n\ndef _switch_scope(scope):\n global g_scope\n ex = g_scope\n g_scope = scope\n return ex\n\n\n@signature_safe_contextmanager\ndef scope_guard(scope):\n \"\"\"\n Change the global/default scope instance by Python `with` statement. All\n variable in runtime will assigned to the new scope.\n\n Args:\n scope: The new global/default scope.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy\n\n new_scope = fluid.Scope()\n with fluid.scope_guard(new_scope):\n fluid.global_scope().var(\"data\").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace())\n numpy.array(new_scope.find_var(\"data\").get_tensor())\n \"\"\"\n\n ex = _switch_scope(scope)\n yield\n _switch_scope(ex)\n\n\ndef as_numpy(tensor):\n \"\"\"\n Convert a Tensor to a numpy.ndarray, its only support Tensor without LoD information.\n For higher dimensional sequence data, please use LoDTensor directly.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy\n\n new_scope = fluid.Scope()\n with fluid.scope_guard(new_scope):\n fluid.global_scope().var(\"data\").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace())\n tensor = new_scope.find_var(\"data\").get_tensor()\n fluid.executor.as_numpy(tensor) # or numpy.array(new_scope.find_var(\"data\").get_tensor())\n\n Args:\n tensor(Variable): a instance of Tensor\n\n Returns:\n numpy.ndarray\n \"\"\"\n if isinstance(tensor, core.LoDTensorArray):\n return [as_numpy(t) for t in tensor]\n if isinstance(tensor, list):\n return [as_numpy(t) for t in tensor]\n assert isinstance(tensor, core.LoDTensor)\n lod = tensor.lod()\n if len(lod) > 0:\n raise RuntimeError(\"Some of your fetched tensors hold LoD information. \\\n They can not be completely cast to Python ndarray. \\\n Please set the parameter 'return_numpy' as 'False' to \\\n return LoDTensor itself directly.\")\n if tensor._is_initialized():\n return np.array(tensor)\n else:\n return None\n\n\ndef has_feed_operators(block, feed_targets, feed_holder_name):\n \"\"\" Check whether the block already has feed operators.\n\n Return false if the block does not have any feed operators.\n If some feed operators have been prepended to the block, check that\n the info contained in these feed operators matches the feed_targets\n and feed_holder_name. Raise exception when any mismatch is found.\n Return true when the block has feed operators with matching info.\n\n Args:\n block: a block instance (typically global block of a program)\n feed_targets: a dictionary of {feed_target_name: feed_target_data}\n feed_holder_name: the name of the variable that holds the data of\n all feed targets. The type of this feed_holder variable is\n FEED_MINIBATCH, which is essentially vector<LoDTensor>.\n\n Returns:\n A boolean value that indicates whether a block has feed operators\n that match the info contained in feed_targets and feed_holder_name.\n \"\"\"\n\n feed_count = 0\n for op in block.ops:\n if op.desc.type() == 'feed':\n feed_count += 1\n assert op.desc.input('X')[0] == feed_holder_name\n feed_target_name = op.desc.output('Out')[0]\n if feed_target_name not in feed_targets:\n raise Exception(\"'feed_targets' does not have {} variable\".\n format(feed_target_name))\n else:\n break\n if feed_count > 0 and feed_count != len(feed_targets):\n raise Exception(\n \"Feed operators in program desc do not match 'feed_targets'\")\n return feed_count > 0\n\n\ndef has_fetch_operators(block, fetch_targets, fetch_holder_name):\n \"\"\" Check whether the block already has fetch operators.\n\n Return false if the block does not have any fetch operators.\n If some fetch operators have been appended to the block, check that\n the info contained in these fetch operators matches the fetch_targets\n and fetch_holder_name. Raise exception when any mismatch is found.\n Return true when the block has fetch operators with matching info.\n\n Args:\n block: a block instance (typically global block of a program)\n fetch_targets: a dictionary of {fetch_target_name: fetch_target_data}\n fetch_holder_name: the name of the variable that holds the data of\n all fetch targets. The type of this fetch_holder variable is\n FETCH_LIST, which is essentially vector<LoDTensor>.\n\n Return:\n A boolean value that indicates whether a block has fetch operators\n that match the info contained in fetch_targets and fetch_holder_name.\n \"\"\"\n\n fetch_count = 0\n for op in block.ops:\n if op.desc.type() == 'fetch':\n fetch_count += 1\n assert op.desc.output('Out')[0] == fetch_holder_name\n fetch_target_name = op.desc.input('X')[0]\n if fetch_target_name not in [\n var.desc.name() for var in fetch_targets\n ]:\n raise Exception(\"'fetch_targets' does not have {} variable\".\n format(fetch_target_name))\n idx = op.desc.attr('col')\n assert fetch_target_name == fetch_targets[idx].desc.name()\n if fetch_count > 0 and fetch_count != len(fetch_targets):\n raise Exception(\n \"Fetch operators in program desc do not match 'fetch_targets'\")\n return fetch_count > 0\n\n\ndef _fetch_var(name, scope=None, return_numpy=True):\n \"\"\"\n Fetch the value of the variable with the given name from the\n given scope.\n\n Args:\n name(str): name of the variable. Typically, only persistable variables\n can be found in the scope used for running the program.\n scope(core.Scope|None): scope object. It should be the scope where\n you pass to Executor.run() when running your program.\n If None, global_scope() will be used. Default None.\n return_numpy(bool): whether convert the tensor to numpy.ndarray.\n Default True.\n\n Returns:\n LodTensor|numpy.ndarray\n \"\"\"\n assert isinstance(name, str)\n if scope is None:\n scope = global_scope()\n assert isinstance(scope, core._Scope)\n\n var = scope.find_var(name)\n assert var is not None, (\n \"Cannot find \" + name + \" in scope. Perhaps you need to make the\"\n \" variable persistable by using var.persistable = True in your\"\n \" program.\")\n tensor = var.get_tensor()\n if return_numpy:\n tensor = as_numpy(tensor)\n return tensor\n\n\ndef _to_name_str(var):\n if isinstance(var, Variable):\n return var.desc.name()\n elif isinstance(var, str):\n return var\n elif isinstance(var, six.string_types):\n return str(var)\n else:\n raise TypeError(str(var) + \" should be Variable or str\")\n\n\ndef _get_strong_program_cache_key(program, feed, fetch_list):\n return str(id(program)) + _get_program_cache_key(feed, fetch_list)\n\n\ndef _get_program_cache_key(feed, fetch_list):\n feed_var_names = list(feed.keys())\n fetch_var_names = list(map(_to_name_str, fetch_list))\n\n return str(feed_var_names + fetch_var_names)\n\n\ndef _as_lodtensor(data, place):\n \"\"\"\n Convert numpy.ndarray to Tensor, its only support Tensor without LoD information.\n For higher dimensional sequence data, please use LoDTensor directly.\n\n Examples:\n >>> import paddle.fluid as fluid\n >>> place = fluid.CPUPlace()\n >>> exe = fluid.executor(place)\n >>> data = np.array(size=(100, 200, 300))\n >>> np_outs = map(lambda x: fluid.executor._as_lodtensor(x, place), data)\n >>> ...\n\n Args:\n data(numpy.ndarray): a instance of array\n\n Returns:\n LoDTensor\n \"\"\"\n if isinstance(data, list):\n raise RuntimeError(\"Some of your feed data hold LoD information. \\\n They can not be completely cast from a list of Python \\\n ndarray to LoDTensor. Please convert data to LoDTensor \\\n directly before feeding the data.\\\n \")\n # single tensor case\n tensor = core.LoDTensor()\n tensor.set(data, place)\n return tensor\n\n\nclass Executor(object):\n \"\"\"\n An Executor in Python, supports single/multiple-GPU running,\n and single/multiple-CPU running. Python executor takes a program,\n adds feed operators and fetch operators to this program according\n to feed map and fetch_list. Feed map provides input data for the\n program. fetch_list provides the variables(or names) that user wants\n to get after program runs. Note: the executor will run all operators\n in the program but not only the operators dependent by the fetch_list.\n It stores the global variables into the global scope, and creates a\n local scope for the temporary variables. The contents in local scope\n may be discarded after every minibatch forward/backward finished.\n But the global scope variables will be persistent through different runs.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import paddle.fluid.compiler as compiler\n import numpy\n import os\n\n use_cuda = True\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.program_guard(train_program, startup_program):\n data = fluid.layers.data(name='X', shape=[1], dtype='float32')\n hidden = fluid.layers.fc(input=data, size=10)\n loss = fluid.layers.mean(hidden)\n fluid.optimizer.SGD(learning_rate=0.01).minimize(loss)\n\n # Run the startup program once and only once.\n # Not need to optimize/compile the startup program.\n startup_program.random_seed=1\n exe.run(startup_program)\n\n # Run the main program directly without compile.\n x = numpy.random.random(size=(10, 1)).astype('float32')\n loss_data, = exe.run(train_program,\n feed={\"X\": x},\n fetch_list=[loss.name])\n\n # Or, compiled the program and run. See `CompiledProgram`\n # for more detail.\n # NOTE: If you use CPU to run the program, you need\n # to specify the CPU_NUM, otherwise, fluid will use\n # all the number of the logic core as the CPU_NUM,\n # in that case, the batch size of the input should be\n # greater than CPU_NUM, if not, the process will be\n # failed by an exception.\n if not use_cuda:\n os.environ['CPU_NUM'] = str(2)\n\n compiled_prog = compiler.CompiledProgram(\n train_program).with_data_parallel(\n loss_name=loss.name)\n loss_data, = exe.run(compiled_prog,\n feed={\"X\": x},\n fetch_list=[loss.name])\n\n Args:\n place(fluid.CPUPlace|fluid.CUDAPlace(n)): indicate the executor run on which device.\n\n \"\"\"\n\n def __init__(self, place):\n self.place = place\n self.program_caches = dict()\n self.ctx_caches = dict()\n self.scope_caches = dict()\n self.var_caches = dict()\n p = core.Place()\n p.set_place(self.place)\n self._default_executor = core.Executor(p)\n self._closed = False\n\n def _get_var_cache(self, program_cache_key):\n return self.var_caches.get(program_cache_key, None)\n\n def _get_scope_cache(self, program_cache_key):\n return self.scope_caches.get(program_cache_key, None)\n\n def _get_ctx_cache(self, program_cache_key):\n return self.ctx_caches.get(program_cache_key, None)\n\n def _get_program_cache(self, program_cache_key):\n return self.program_caches.get(program_cache_key, None)\n\n def _add_program_cache(self, program_cache_key, program):\n self.program_caches[program_cache_key] = program\n\n def _add_ctx_cache(self, ctx_cache_key, ctx):\n self.ctx_caches[ctx_cache_key] = ctx\n\n def _add_scope_cache(self, scope_cache_key, scope):\n self.scope_caches[scope_cache_key] = scope\n\n def _add_var_cache(self, var_cache_key, var):\n self.var_caches[var_cache_key] = var\n\n def _add_feed_fetch_ops(self, program, feed, fetch_list, feed_var_name,\n fetch_var_name):\n tmp_program = program.clone()\n\n global_block = tmp_program.global_block()\n\n if feed_var_name in global_block.vars:\n feed_var = global_block.var(feed_var_name)\n else:\n feed_var = global_block.create_var(\n name=feed_var_name,\n type=core.VarDesc.VarType.FEED_MINIBATCH,\n persistable=True)\n\n if fetch_var_name in global_block.vars:\n fetch_var = global_block.var(fetch_var_name)\n else:\n fetch_var = global_block.create_var(\n name=fetch_var_name,\n type=core.VarDesc.VarType.FETCH_LIST,\n persistable=True)\n\n # prepend feed operators\n if not has_feed_operators(global_block, feed, feed_var_name):\n for i, name in enumerate(feed):\n out = global_block.var(name)\n global_block._prepend_op(\n type='feed',\n inputs={'X': [feed_var]},\n outputs={'Out': [out]},\n attrs={'col': i})\n\n # append fetch_operators\n if not has_fetch_operators(global_block, fetch_list, fetch_var_name):\n for i, var in enumerate(fetch_list):\n assert isinstance(var, Variable) or isinstance(\n var, six.string_types), (\n \"Wrong type for fetch_list[%s]: %s\" % (i, type(var)))\n global_block.append_op(\n type='fetch',\n inputs={'X': [var]},\n outputs={'Out': [fetch_var]},\n attrs={'col': i})\n\n return tmp_program\n\n def _feed_data(self, program, feed, feed_var_name, scope):\n # feed var to framework\n for op in program.global_block().ops:\n if op.desc.type() == 'feed':\n feed_target_name = op.desc.output('Out')[0]\n cur_feed = feed[feed_target_name]\n if not isinstance(cur_feed, core.LoDTensor):\n cur_feed = _as_lodtensor(cur_feed, self.place)\n idx = op.desc.attr('col')\n core.set_feed_variable(scope, cur_feed, feed_var_name, idx)\n else:\n break\n\n def _fetch_data(self, fetch_list, fetch_var_name, scope):\n outs = [\n core.get_fetch_variable(scope, fetch_var_name, i)\n for i in six.moves.range(len(fetch_list))\n ]\n return outs\n\n '''\n TODO(typhoonzero): Define \"no longer use\" meaning? Can user create\n a new Executor for the same program and run?\n TODO(panyx0718): Why ParallelExecutor doesn't have close?\n '''\n\n def close(self):\n \"\"\"\n Close this executor.\n\n You can no longer use this executor after calling this method.\n For the distributed training, this method would free the resource\n on PServers related to the current Trainer.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n cpu = fluid.CPUPlace()\n exe = fluid.Executor(cpu)\n # execute training or testing\n exe.close()\n \"\"\"\n if not self._closed:\n self._default_executor.close()\n self._closed = True\n\n def _run_parallel(self, program, scope, feed, fetch_list, fetch_var_name,\n return_numpy):\n exe = program._executor\n if isinstance(feed, dict):\n feed_tensor_dict = dict()\n for feed_name in feed:\n feed_tensor = feed[feed_name]\n if not isinstance(feed_tensor, core.LoDTensor):\n feed_tensor = core.LoDTensor()\n # always set to CPU place, since the tensor need to be splitted\n # it is fast in CPU\n feed_tensor.set(feed[feed_name], core.CPUPlace())\n feed_tensor_dict[feed_name] = feed_tensor\n\n exe.feed_and_split_tensor_into_local_scopes(feed_tensor_dict)\n elif isinstance(feed, list) or isinstance(feed, tuple):\n if len(feed) != len(program._places):\n raise ValueError(\n \"Feed a list of tensor, the list should be the same size as places\"\n )\n\n res = list()\n for i, each in enumerate(feed):\n if not isinstance(each, dict):\n raise TypeError(\n \"Each element of feed list should be a dict\")\n res_dict = dict()\n for feed_name in each:\n tensor = each[feed_name]\n if not isinstance(tensor, core.LoDTensor):\n tmp = core.LoDTensor()\n tmp.set(tensor, program._places[i])\n tensor = tmp\n res_dict[feed_name] = tensor\n res.append(res_dict)\n exe.feed_tensors_into_local_scopes(res)\n\n fetch_var_names = list(map(_to_name_str, fetch_list))\n exe.run(fetch_var_names, fetch_var_name)\n arr = scope.find_var(fetch_var_name).get_lod_tensor_array()\n\n if return_numpy:\n return as_numpy(arr)\n return [arr[i] for i in range(len(arr))]\n\n def _check_fetch_vars_persistable(self, program, fetch_list):\n for var in fetch_list:\n if isinstance(var, Variable):\n persistable = var.persistable\n else:\n block_num = program.desc.num_blocks()\n persistable = None\n var_name = cpt.to_bytes(var)\n for i in six.moves.range(block_num):\n var_desc = program.desc.block(i).find_var(var_name)\n if var_desc:\n persistable = var_desc.persistable()\n break\n assert persistable is not None, \"Variable {} is not found\".format(\n var)\n\n if not persistable:\n logging.warn(\"\"\"\n Detect that build_strategy.memory_optimize = True, but the some variables in the fetch\n list is not persistable, you may get wrong fetched value, or an exeception may be thrown\n about cannot find variable of the fetch list. \n\n TO FIX this:\n # Sample\n conv1 = fluid.layers.conv2d(data, 4, 5, 1, act=None) \n # if you need to fetch conv1, then:\n conv1.persistable = True\n\n \"\"\")\n\n def run(self,\n program=None,\n feed=None,\n fetch_list=None,\n feed_var_name='feed',\n fetch_var_name='fetch',\n scope=None,\n return_numpy=True,\n use_program_cache=False):\n \"\"\"\n Run program by this Executor. Feed data by feed map, fetch result by\n fetch_list. Python executor takes a program, add feed operators and\n fetch operators to this program according to feed map and fetch_list.\n Feed map provides input data for the program. fetch_list provides\n the variables(or names) that user want to get after program run.\n\n Note: the executor will run all operators in the program but not\n only the operators dependent by the fetch_list.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy\n\n # First create the Executor.\n place = fluid.CPUPlace() # fluid.CUDAPlace(0)\n exe = fluid.Executor(place)\n\n data = fluid.layers.data(name='X', shape=[1], dtype='float32')\n hidden = fluid.layers.fc(input=data, size=10)\n loss = fluid.layers.mean(hidden)\n adam = fluid.optimizer.Adam()\n adam.minimize(loss)\n\n # Run the startup program once and only once.\n exe.run(fluid.default_startup_program())\n\n x = numpy.random.random(size=(10, 1)).astype('float32')\n outs = exe.run(feed={'X': x},\n fetch_list=[loss.name])\n\n Args:\n program(Program|CompiledProgram): the program that need to run,\n if not provided, then default_main_program (not compiled) will be used.\n feed(dict): feed variable map, e.g. {\"image\": ImageData, \"label\": LabelData}\n fetch_list(list): a list of variable or variable names that user \n wants to get, this method will return them according to this list.\n feed_var_name(str): the name for the input variable of \n feed Operator.\n fetch_var_name(str): the name for the output variable of \n fetch Operator.\n scope(Scope): the scope used to run this program, you can switch \n it to different scope. default is global_scope\n return_numpy(bool): if convert the fetched tensor to numpy\n use_program_cache(bool): whether to use the cached program \n settings across batches. Setting it be true would be faster \n only when (1) the program is not compiled with data parallel, \n and (2) program, feed variable names and fetch_list variable \n names do not changed compared to the last step. \n \n Returns:\n\n list(numpy.array): fetch result according to fetch_list.\n \"\"\"\n try:\n return self._run_impl(\n program=program,\n feed=feed,\n fetch_list=fetch_list,\n feed_var_name=feed_var_name,\n fetch_var_name=fetch_var_name,\n scope=scope,\n return_numpy=return_numpy,\n use_program_cache=use_program_cache)\n except Exception as e:\n if not isinstance(e, core.EOFException):\n print(\"An exception was thrown!\\n {}\".format(str(e)))\n raise e\n\n def _run_impl(self, program, feed, fetch_list, feed_var_name,\n fetch_var_name, scope, return_numpy, use_program_cache):\n\n if self._closed:\n raise RuntimeError(\"Attempted to use a closed Executor\")\n\n if scope is None:\n scope = global_scope()\n if fetch_list is None:\n fetch_list = []\n\n compiled = isinstance(program, compiler.CompiledProgram)\n # For backward compatibility, run directly.\n if not compiled:\n return self._run_program(\n program,\n self._default_executor,\n feed=feed,\n fetch_list=fetch_list,\n feed_var_name=feed_var_name,\n fetch_var_name=fetch_var_name,\n scope=scope,\n return_numpy=return_numpy,\n use_program_cache=use_program_cache)\n else:\n if fetch_list and program._is_data_parallel and program._program and \\\n program._build_strategy._use_legacy_memory_optimize_strategy:\n self._check_fetch_vars_persistable(program._program, fetch_list)\n\n program._compile(scope, self.place)\n if program._is_data_parallel:\n return self._run_parallel(\n program,\n scope=scope,\n feed=feed,\n fetch_list=fetch_list,\n fetch_var_name=fetch_var_name,\n return_numpy=return_numpy)\n elif program._is_inference:\n return self._run_inference(program._executor, feed)\n else:\n # TODO(panyx0718): Can compile program to optimize executor\n # performance.\n # TODO(panyx0718): executor should be able to run graph.\n assert program._program, \"CompiledProgram is compiled from graph, can only run with_data_parallel.\"\n # use_program_cache is not valid with CompiledProgram\n return self._run_program(\n program._program,\n self._default_executor,\n feed=feed,\n fetch_list=fetch_list,\n feed_var_name=feed_var_name,\n fetch_var_name=fetch_var_name,\n scope=scope,\n return_numpy=return_numpy,\n use_program_cache=False)\n\n def _run_program(self, program, exe, feed, fetch_list, feed_var_name,\n fetch_var_name, scope, return_numpy, use_program_cache):\n\n if feed is None:\n feed = {}\n elif isinstance(feed, (list, tuple)):\n assert len(feed) == 1, \"Not compiled with data parallel\"\n feed = feed[0]\n\n if not isinstance(feed, dict):\n raise TypeError(\n \"feed requires dict as its Parameter. But you passed in %s\" %\n (type(feed)))\n if program is None:\n program = default_main_program()\n\n if not isinstance(program, Program):\n raise TypeError(\n \"Executor requires Program as its Parameter. But you passed in %s\"\n % (type(program)))\n\n if use_program_cache:\n cache_key = _get_strong_program_cache_key(program, feed, fetch_list)\n cached_program = self._get_program_cache(cache_key)\n cached_ctx = self._get_ctx_cache(cache_key)\n cached_scope = self._get_scope_cache(cache_key)\n cached_var = self._get_var_cache(cache_key)\n if cached_program is None:\n cached_program = self._add_feed_fetch_ops(\n program=program,\n feed=feed,\n fetch_list=fetch_list,\n feed_var_name=feed_var_name,\n fetch_var_name=fetch_var_name)\n self._add_program_cache(cache_key, cached_program)\n fetch_list_str = list(map(_to_name_str, fetch_list))\n cached_ctx = self._default_executor.prepare_ctx_cache(\n cached_program.desc, 0, fetch_list_str, False)\n cached_var = self._default_executor.create_variables(\n cached_program.desc, scope, 0)\n # currently, we cache program, vars, sub_scope here\n # we suppose that in a life cycle of training, a user\n # will not create many programs. So, here the basic\n # rule of caching is to cache all unseen (program, var, scope)\n # when a user use use_program_cache.\n cached_scope = scope.new_scope()\n self._add_ctx_cache(cache_key, cached_ctx)\n self._add_var_cache(cache_key, cached_var)\n self._add_scope_cache(cache_key, cached_scope)\n program = cached_program\n ctx = cached_ctx\n scope = cached_scope\n var = cached_var\n else:\n program = self._add_feed_fetch_ops(\n program=program,\n feed=feed,\n fetch_list=fetch_list,\n feed_var_name=feed_var_name,\n fetch_var_name=fetch_var_name)\n\n self._feed_data(program, feed, feed_var_name, scope)\n if not use_program_cache:\n exe.run(program.desc, scope, 0, True, True, fetch_var_name)\n else:\n exe.run_cached_prepared_ctx(ctx, scope, False, False, False)\n outs = self._fetch_data(fetch_list, fetch_var_name, scope)\n if return_numpy:\n outs = as_numpy(outs)\n return outs\n\n def _run_inference(self, exe, feed):\n return exe.run(feed)\n\n def _dump_debug_info(self, program=None, trainer=None):\n with open(str(id(program)) + \"_train_desc.prototxt\", \"w\") as fout:\n fout.write(str(trainer))\n if program._fleet_opt:\n with open(\"fleet_desc.prototxt\", \"w\") as fout:\n fout.write(str(program._fleet_opt[\"fleet_desc\"]))\n\n def _adjust_pipeline_resource(self, pipeline_opt, dataset, pipeline_num):\n filelist_length = len(dataset.dataset.get_filelist())\n if filelist_length < pipeline_num:\n pipeline_num = filelist_length\n print(\n \"Pipeline training: setting the pipeline num to %d is enough because there are only %d files\"\n % (filelist_length, filelist_length))\n if filelist_length < pipeline_num * pipeline_opt[\"concurrency_list\"][0]:\n print(\n \"Pipeline training: setting the 1st element in concurrency_list to %d is enough because there are only %d files\"\n % (filelist_length // pipeline_num, filelist_length))\n pipeline_opt[\"concurrency_list\"][\n 0] = filelist_length // pipeline_num\n dataset.set_thread(pipeline_opt[\"concurrency_list\"][0] * pipeline_num)\n return pipeline_num\n\n def _prepare_trainer(self,\n program=None,\n dataset=None,\n scope=None,\n thread=0,\n debug=False,\n fetch_list=None,\n fetch_info=None,\n print_period=100):\n if scope is None:\n scope = global_scope()\n if fetch_list is None:\n fetch_list = []\n if fetch_info is None:\n fetch_info = []\n assert len(fetch_list) == len(fetch_info)\n compiled = isinstance(program, compiler.CompiledProgram)\n if not compiled:\n # TODO: Need a better way to distinguish and specify different execution mode\n if program._pipeline_opt:\n trainer = TrainerFactory()._create_trainer(\n program._pipeline_opt)\n else:\n trainer = TrainerFactory()._create_trainer(program._fleet_opt)\n trainer._set_program(program)\n else:\n if program._pipeline_opt:\n trainer = TrainerFactory()._create_trainer(\n program.program._pipeline_opt)\n else:\n trainer = TrainerFactory()._create_trainer(\n program.program._fleet_opt)\n trainer._set_program(program.program)\n\n # The following thread_num-determined logic will be deprecated\n if thread <= 0:\n if dataset.thread_num <= 0:\n raise RuntimeError(\n \"You should set thread num first, either in Dataset\"\n \"or in Executor.train_from_dataset\")\n else:\n trainer._set_thread(dataset.thread_num)\n else:\n trainer._set_thread(thread)\n\n trainer._set_debug(debug)\n trainer._set_fetch_var_and_info(fetch_list, fetch_info, print_period)\n return scope, trainer\n\n def infer_from_dataset(self,\n program=None,\n dataset=None,\n scope=None,\n thread=0,\n debug=False,\n fetch_list=None,\n fetch_info=None,\n print_period=100):\n \"\"\"\n The document of infer_from_dataset is almost the same as\n train_from_dataset, except that in distributed training,\n push gradients will be disabled in infer_from_dataset.\n infer_from_dataset() can be used for evaluation in multi-thread\n very easily.\n\n Args:\n program(Program|CompiledProgram): the program that needs to be run,\n if not provided, then default_main_program (not compiled) will be used.\n dataset(paddle.fluid.Dataset): dataset created outside this function,\n a user should provide a well-defined dataset before calling this function.\n Please check the document of Dataset if needed. default is None\n scope(Scope): the scope used to run this program, you can switch it to different scope\n for each run. default is global_scope\n thread(int): number of thread a user wants to run in this function. The actual number\n of thread will be min(Dataset.thread_num, thread) if thread > 0, default is 0\n debug(bool): whether a user wants to run infer_from_dataset, default is False\n fetch_list(Variable List): fetch variable list, each variable\n will be printed during training, default is None\n fetch_info(String List): print information for each variable, default is None\n print_period(int): the number of mini-batches for each print, default is 100\n\n Returns:\n None\n\n Examples:\n\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n place = fluid.CPUPlace() # you can set place = fluid.CUDAPlace(0) to use gpu\n exe = fluid.Executor(place)\n x = fluid.layers.data(name=\"x\", shape=[10, 10], dtype=\"int64\")\n y = fluid.layers.data(name=\"y\", shape=[1], dtype=\"int64\", lod_level=1)\n dataset = fluid.DatasetFactory().create_dataset()\n dataset.set_use_var([x, y])\n dataset.set_thread(1)\n filelist = [] # you should set your own filelist, e.g. filelist = [\"dataA.txt\"]\n dataset.set_filelist(filelist)\n exe.run(fluid.default_startup_program())\n exe.infer_from_dataset(program=fluid.default_main_program(),\n dataset=dataset) \n\n \"\"\"\n if dataset == None:\n raise RuntimeError(\"dataset is needed and should be initialized\")\n\n dataset._prepare_to_run()\n scope, trainer = self._prepare_trainer(\n program=program,\n dataset=dataset,\n scope=scope,\n thread=thread,\n debug=debug,\n fetch_list=fetch_list,\n fetch_info=fetch_info,\n print_period=print_period)\n trainer._set_infer(True)\n trainer._gen_trainer_desc()\n self._dump_debug_info(program=program, trainer=trainer)\n self._default_executor.run_from_dataset(program.desc, scope,\n dataset.dataset,\n trainer._desc())\n dataset._finish_to_run()\n return None\n\n def train_from_dataset(self,\n program=None,\n dataset=None,\n scope=None,\n thread=0,\n debug=False,\n fetch_list=None,\n fetch_info=None,\n print_period=100):\n \"\"\"\n Train from a pre-defined Dataset. Dataset is defined in paddle.fluid.dataset.\n Given a program, either a program or compiled program, train_from_dataset will\n consume all data samples in dataset. Input scope can be given by users. By default,\n scope is global_scope(). The total number of thread run in training is `thread`.\n Thread number used in training will be minimum value of threadnum in Dataset and\n the value of thread in this interface. Debug can be set so that executor will display\n Run-Time for all operators and the throughputs of current training task.\n \n Note: train_from_dataset will destroy all resources created within executor for each run.\n\n Args:\n program(Program|CompiledProgram): the program that needs to be run,\n if not provided, then default_main_program (not compiled) will be used.\n dataset(paddle.fluid.Dataset): dataset created outside this function,\n a user should provide a well-defined dataset before calling this function.\n Please check the document of Dataset if needed.\n scope(Scope): the scope used to run this program, you can switch it to different scope\n for each run. default is global_scope\n thread(int): number of thread a user wants to run in this function. The actual number\n of thread will be min(Dataset.thread_num, thread)\n debug(bool): whether a user wants to run train_from_dataset \n fetch_list(Variable List): fetch variable list, each variable\n will be printed during training\n fetch_info(String List): print information for each variable\n print_period(int): the number of mini-batches for each print\n\n Returns:\n None\n \n Examples:\n \n .. code-block:: python\n\n import paddle.fluid as fluid\n\n place = fluid.CPUPlace() # you can set place = fluid.CUDAPlace(0) to use gpu\n exe = fluid.Executor(place)\n x = fluid.layers.data(name=\"x\", shape=[10, 10], dtype=\"int64\")\n y = fluid.layers.data(name=\"y\", shape=[1], dtype=\"int64\", lod_level=1)\n dataset = fluid.DatasetFactory().create_dataset()\n dataset.set_use_var([x, y])\n dataset.set_thread(1)\n filelist = [] # you should set your own filelist, e.g. filelist = [\"dataA.txt\"]\n dataset.set_filelist(filelist)\n exe.run(fluid.default_startup_program())\n exe.train_from_dataset(program=fluid.default_main_program(),\n dataset=dataset)\n\n \"\"\"\n if dataset == None:\n raise RuntimeError(\"dataset is need and should be initialized\")\n\n if program._pipeline_opt:\n thread = self._adjust_pipeline_resource(program._pipeline_opt,\n dataset, thread)\n\n dataset._prepare_to_run()\n scope, trainer = self._prepare_trainer(\n program=program,\n dataset=dataset,\n scope=scope,\n thread=thread,\n debug=debug,\n fetch_list=fetch_list,\n fetch_info=fetch_info,\n print_period=print_period)\n trainer._gen_trainer_desc()\n self._dump_debug_info(program=program, trainer=trainer)\n self._default_executor.run_from_dataset(program.desc, scope,\n dataset.dataset,\n trainer._desc())\n dataset._finish_to_run()\n return None\n" ]
[ [ "numpy.array" ] ]
SilviaZirino/rl-baselines-zoo
[ "27e64ca3166bf9804cc14b8752c71a6c719137b3" ]
[ "utils/record_video.py" ]
[ "import os\nimport argparse\n\nimport gym\nfrom gym import envs\nimport numpy as np\n\nfrom skimage import transform\nfrom stable_baselines.common.atari_wrappers import WarpFrame\n\nfrom stable_baselines.common.vec_env import VecVideoRecorder, VecFrameStack, VecNormalize\n\nfrom .utils import ALGOS, create_test_env, get_saved_hyperparams, get_latest_run_id, find_saved_model\n\n\n#-----------------------------------------\nimport toy_simulator\n#import dVRL_simulator\n\nfrom skimage import transform\nfrom gym.spaces import Box\nimport cv2\n#-----------------------------------------\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--env', help='environment ID', type=str, default='CartPole-v1')\n parser.add_argument('-f', '--folder', help='Log folder', type=str, default='trained_agents')\n parser.add_argument('-o', '--output-folder', help='Output folder', type=str, default='logs/videos/')\n parser.add_argument('--algo', help='RL Algorithm', default='ppo2',\n type=str, required=False, choices=list(ALGOS.keys()))\n parser.add_argument('-n', '--n-timesteps', help='number of timesteps', default=1000,\n type=int)\n parser.add_argument('--n-envs', help='number of environments', default=1,\n type=int)\n parser.add_argument('--deterministic', action='store_true', default=False,\n help='Use deterministic actions')\n parser.add_argument('--seed', help='Random generator seed', type=int, default=0)\n parser.add_argument('--no-render', action='store_true', default=False,\n help='Do not render the environment (useful for tests)')\n parser.add_argument('--exp-id', help='Experiment ID (default: -1, no exp folder, 0: latest)', default=-1,\n type=int)\n args = parser.parse_args()\n\n env_id = args.env\n algo = args.algo\n folder = args.folder\n video_folder = args.output_folder\n seed = args.seed\n deterministic = args.deterministic\n video_length = args.n_timesteps\n n_envs = args.n_envs\n\n if args.exp_id == 0:\n args.exp_id = get_latest_run_id(os.path.join(folder, algo), env_id)\n print('Loading latest experiment, id={}'.format(args.exp_id))\n # Sanity checks\n if args.exp_id > 0:\n log_path = os.path.join(folder, algo, '{}_{}'.format(env_id, args.exp_id))\n else:\n log_path = os.path.join(folder, algo)\n\n model_path = find_saved_model(algo, log_path, env_id)\n\n stats_path = os.path.join(log_path, env_id)\n hyperparams, stats_path = get_saved_hyperparams(stats_path)\n\n\n is_atari = 'NoFrameskip' in env_id\n\n env = create_test_env(env_id, n_envs=n_envs, is_atari=is_atari,\n stats_path=stats_path, seed=seed, log_dir=None,\n should_render=not args.no_render, hyperparams=hyperparams)\n\n #env = RGBobs(env)\n\n model = ALGOS[algo].load(model_path)\n\n\n obs = env.reset()\n\n #obs = cv2.cvtColor(obs, cv2.COLOR_RGB2GRAY) #ADDED 2\n #obs = cv2.resize(obs, (84,84), interpolation=cv2.INTER_AREA) #ADDED 2\n\n\n #obs_dummy = env.reset() #ADDED 1\n #obs = transform.resize(obs_dummy, (84,84)) #ADDED 1\n #env.observation_space = Box(low=0, high=255, shape=obs.shape, dtype=np.uint8) #ADDED 1\n #obs = obs[:,:, None]*255 #ADDED 1\n\n\n # Note: apparently it renders by default\n env = VecVideoRecorder(env, video_folder,\n record_video_trigger=lambda x: x == 0, video_length=video_length,\n name_prefix=\"{}-{}\".format(algo, env_id))\n\n env.reset()\n for _ in range(video_length + 1):\n # action = [env.action_space.sample()]\n action, _ = model.predict(obs, deterministic=deterministic)\n if isinstance(env.action_space, gym.spaces.Box):\n action = np.clip(action, env.action_space.low, env.action_space.high)\n obs, _, _, _ = env.step(action)\n\n # Workaround for https://github.com/openai/gym/issues/893\n if n_envs == 1 and 'Bullet' not in env_id and not is_atari:\n env = env.venv\n # DummyVecEnv\n while isinstance(env, VecNormalize) or isinstance(env, VecFrameStack):\n env = env.venv\n env.envs[0].env.close()\n else:\n # SubprocVecEnv\n env.close()\n" ]
[ [ "numpy.clip" ] ]
likelyzhao/dino
[ "ad019889b0e4c103f0471d085f79bba42c817d1b" ]
[ "vision_transformer.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nMostly copy-paste from timm library.\nhttps://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py\n\"\"\"\nimport math\nfrom functools import partial\n\nimport torch\nimport torch.nn as nn\n\nfrom utils import trunc_normal_\nfrom swin_transformer import SwinTransformer\n\n\ndef drop_path(x, drop_prob: float = 0., training: bool = False):\n if drop_prob == 0. or not training:\n return x\n keep_prob = 1 - drop_prob\n shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets\n random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)\n random_tensor.floor_() # binarize\n output = x.div(keep_prob) * random_tensor\n return output\n\n\nclass DropPath(nn.Module):\n \"\"\"Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).\n \"\"\"\n def __init__(self, drop_prob=None):\n super(DropPath, self).__init__()\n self.drop_prob = drop_prob\n\n def forward(self, x):\n return drop_path(x, self.drop_prob, self.training)\n\n\nclass Mlp(nn.Module):\n def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):\n super().__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n self.fc1 = nn.Linear(in_features, hidden_features)\n self.act = act_layer()\n self.fc2 = nn.Linear(hidden_features, out_features)\n self.drop = nn.Dropout(drop)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.act(x)\n x = self.drop(x)\n x = self.fc2(x)\n x = self.drop(x)\n return x\n\n\nclass Attention(nn.Module):\n def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):\n super().__init__()\n self.num_heads = num_heads\n head_dim = dim // num_heads\n self.scale = qk_scale or head_dim ** -0.5\n\n self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n self.attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(dim, dim)\n self.proj_drop = nn.Dropout(proj_drop)\n\n def forward(self, x):\n B, N, C = x.shape\n qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n q, k, v = qkv[0], qkv[1], qkv[2]\n\n attn = (q @ k.transpose(-2, -1)) * self.scale\n attn = attn.softmax(dim=-1)\n attn = self.attn_drop(attn)\n\n x = (attn @ v).transpose(1, 2).reshape(B, N, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x, attn\n\n\nclass Block(nn.Module):\n def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,\n drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):\n super().__init__()\n self.norm1 = norm_layer(dim)\n self.attn = Attention(\n dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)\n self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n self.norm2 = norm_layer(dim)\n mlp_hidden_dim = int(dim * mlp_ratio)\n self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\n\n def forward(self, x, return_attention=False):\n y, attn = self.attn(self.norm1(x))\n if return_attention:\n return attn\n x = x + self.drop_path(y)\n x = x + self.drop_path(self.mlp(self.norm2(x)))\n return x\n\n\nclass PatchEmbed(nn.Module):\n \"\"\" Image to Patch Embedding\n \"\"\"\n def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):\n super().__init__()\n num_patches = (img_size // patch_size) * (img_size // patch_size)\n self.img_size = img_size\n self.patch_size = patch_size\n self.num_patches = num_patches\n\n self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)\n\n def forward(self, x):\n B, C, H, W = x.shape\n x = self.proj(x).flatten(2).transpose(1, 2)\n return x\n\n\nclass VisionTransformer(nn.Module):\n \"\"\" Vision Transformer \"\"\"\n def __init__(self, img_size=[224], patch_size=16, in_chans=3, num_classes=0, embed_dim=768, depth=12,\n num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,\n drop_path_rate=0., norm_layer=nn.LayerNorm, **kwargs):\n super().__init__()\n self.num_features = self.embed_dim = embed_dim\n\n self.patch_embed = PatchEmbed(\n img_size=img_size[0], patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)\n num_patches = self.patch_embed.num_patches\n\n self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))\n self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))\n self.pos_drop = nn.Dropout(p=drop_rate)\n\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule\n self.blocks = nn.ModuleList([\n Block(\n dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)\n for i in range(depth)])\n self.norm = norm_layer(embed_dim)\n\n # Classifier head\n self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()\n\n trunc_normal_(self.pos_embed, std=.02)\n trunc_normal_(self.cls_token, std=.02)\n self.apply(self._init_weights)\n\n def _init_weights(self, m):\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n def interpolate_pos_encoding(self, x, w, h):\n npatch = x.shape[1] - 1\n N = self.pos_embed.shape[1] - 1\n if npatch == N and w == h:\n return self.pos_embed\n class_pos_embed = self.pos_embed[:, 0]\n patch_pos_embed = self.pos_embed[:, 1:]\n dim = x.shape[-1]\n w0 = w // self.patch_embed.patch_size\n h0 = h // self.patch_embed.patch_size\n # we add a small number to avoid floating point error in the interpolation\n # see discussion at https://github.com/facebookresearch/dino/issues/8\n w0, h0 = w0 + 0.1, h0 + 0.1\n patch_pos_embed = nn.functional.interpolate(\n patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),\n scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),\n mode='bicubic',\n )\n assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]\n patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)\n return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)\n\n def prepare_tokens(self, x):\n B, nc, w, h = x.shape\n x = self.patch_embed(x) # patch linear embedding\n\n # add the [CLS] token to the embed patch tokens\n cls_tokens = self.cls_token.expand(B, -1, -1)\n x = torch.cat((cls_tokens, x), dim=1)\n\n # add positional encoding to each token\n x = x + self.interpolate_pos_encoding(x, w, h)\n\n return self.pos_drop(x)\n\n def forward(self, x):\n x = self.prepare_tokens(x)\n for blk in self.blocks:\n x = blk(x)\n x = self.norm(x)\n return x[:, 0]\n\n def get_last_selfattention(self, x):\n x = self.prepare_tokens(x)\n for i, blk in enumerate(self.blocks):\n if i < len(self.blocks) - 1:\n x = blk(x)\n else:\n # return attention of the last block\n return blk(x, return_attention=True)\n\n def get_intermediate_layers(self, x, n=1):\n x = self.prepare_tokens(x)\n # we return the output tokens from the `n` last blocks\n output = []\n for i, blk in enumerate(self.blocks):\n x = blk(x)\n if len(self.blocks) - i <= n:\n output.append(self.norm(x))\n return output\n\n\ndef vit_tiny(patch_size=16, **kwargs):\n model = VisionTransformer(\n patch_size=patch_size, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4,\n qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)\n return model\n\n\ndef vit_small(patch_size=16, **kwargs):\n model = VisionTransformer(\n patch_size=patch_size, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4,\n qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)\n return model\n\n\ndef vit_base(patch_size=16, **kwargs):\n model = VisionTransformer(\n patch_size=patch_size, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4,\n qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)\n return model\n\ndef swin_t(**kwargs):\n from config import get_config_swin_t\n config = get_config_swin_t()\n model = SwinTransformer(img_size=config.DATA.IMG_SIZE,\n patch_size=config.MODEL.SWIN.PATCH_SIZE,\n in_chans=config.MODEL.SWIN.IN_CHANS,\n num_classes=config.MODEL.NUM_CLASSES,\n embed_dim=config.MODEL.SWIN.EMBED_DIM,\n depths=config.MODEL.SWIN.DEPTHS,\n num_heads=config.MODEL.SWIN.NUM_HEADS,\n window_size=config.MODEL.SWIN.WINDOW_SIZE,\n mlp_ratio=config.MODEL.SWIN.MLP_RATIO,\n qkv_bias=config.MODEL.SWIN.QKV_BIAS,\n qk_scale=config.MODEL.SWIN.QK_SCALE,\n drop_rate=config.MODEL.DROP_RATE,\n drop_path_rate=config.MODEL.DROP_PATH_RATE,\n ape=config.MODEL.SWIN.APE,\n patch_norm=config.MODEL.SWIN.PATCH_NORM,\n use_checkpoint=config.TRAIN.USE_CHECKPOINT)\n return model\n\n\nclass DINOHead(nn.Module):\n def __init__(self, in_dim, out_dim, use_bn=False, norm_last_layer=True, nlayers=3, hidden_dim=2048, bottleneck_dim=256):\n super().__init__()\n nlayers = max(nlayers, 1)\n if nlayers == 1:\n self.mlp = nn.Linear(in_dim, bottleneck_dim)\n else:\n layers = [nn.Linear(in_dim, hidden_dim)]\n if use_bn:\n layers.append(nn.BatchNorm1d(hidden_dim))\n layers.append(nn.GELU())\n for _ in range(nlayers - 2):\n layers.append(nn.Linear(hidden_dim, hidden_dim))\n if use_bn:\n layers.append(nn.BatchNorm1d(hidden_dim))\n layers.append(nn.GELU())\n layers.append(nn.Linear(hidden_dim, bottleneck_dim))\n self.mlp = nn.Sequential(*layers)\n self.apply(self._init_weights)\n self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False))\n self.last_layer.weight_g.data.fill_(1)\n if norm_last_layer:\n self.last_layer.weight_g.requires_grad = False\n\n def _init_weights(self, m):\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x):\n x = self.mlp(x)\n x = nn.functional.normalize(x, dim=-1, p=2)\n x = self.last_layer(x)\n return x\n" ]
[ [ "torch.nn.Linear", "torch.rand", "torch.nn.Dropout", "torch.cat", "torch.nn.functional.normalize", "torch.nn.Identity", "torch.zeros", "torch.nn.init.constant_", "torch.nn.Sequential", "torch.linspace", "torch.nn.Conv2d", "torch.nn.BatchNorm1d", "torch.nn.GELU" ] ]
tkortz/carla-car-detection
[ "37c00758e4396ec3cbca140722ba7cf4fd44c836" ]
[ "detection.py" ]
[ "import sys\r\nfrom pathlib import Path, PurePath\r\nsys.path.append(\"./models/research/object_detection/\")\r\nsys.path.append(\"./models/research/\")\r\n\r\nimport os\r\nimport cv2\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom utils import label_map_util\r\nfrom utils import visualization_utils as vis_util\r\nfrom image_to_video_converter import images_to_video\r\nfrom PIL import Image\r\n\r\nclass detector:\r\n def __init__(self, model_directory):\r\n model_path = os.path.join(model_directory, 'frozen_inference_graph.pb')\r\n labelmap_path = os.path.join(model_directory, 'labelmap.pbtxt')\r\n self.num_classes = 5\r\n self.label_map = label_map_util.load_labelmap(labelmap_path)\r\n self.categories = label_map_util.convert_label_map_to_categories(self.label_map,\r\n max_num_classes=self.num_classes,\r\n use_display_name=True)\r\n self.category_index = label_map_util.create_category_index(self.categories)\r\n self.detection_graph = tf.Graph()\r\n with self.detection_graph.as_default():\r\n self.od_graph_def = tf.GraphDef()\r\n with tf.gfile.GFile(model_path, 'rb') as fid:\r\n self.serialized_graph = fid.read()\r\n self.od_graph_def.ParseFromString(self.serialized_graph)\r\n tf.import_graph_def(self.od_graph_def, name='')\r\n \r\n self.sess = tf.Session(graph=self.detection_graph)\r\n # Define input and output tensors (i.e. data) for the object detection classifier\r\n self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')\r\n self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')\r\n self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')\r\n self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')\r\n self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')\r\n\r\n def draw_boxes_for_image(self, frame, min_score_threshold):\r\n frame_expanded = np.expand_dims(frame, axis=0) \r\n (boxes, scores, classes, num) = self.sess.run(\r\n [self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],\r\n feed_dict={self.image_tensor: frame_expanded})\r\n vis_util.visualize_boxes_and_labels_on_image_array(\r\n frame,\r\n np.squeeze(boxes),\r\n np.squeeze(classes).astype(np.int32),\r\n np.squeeze(scores),\r\n self.category_index,\r\n use_normalized_coordinates=True,\r\n line_thickness=2,\r\n min_score_thresh=min_score_threshold)\r\n \r\n \"\"\"\r\n print(\"Self cateogry index\")\r\n print(self.category_index)\r\n print(\"Score/Classes\")\r\n for box, score, cls in zip(np.squeeze(boxes), np.squeeze(scores),np.squeeze(classes).astype(np.int32)):\r\n print(score, cls, self.category_index[cls])\r\n \"\"\"\r\n \r\n good_boxes = [box\r\n for box, score, cls in zip(np.squeeze(boxes), np.squeeze(scores), np.squeeze(classes).astype(np.int32))\r\n if score >= min_score_threshold and 'traffic' not in self.category_index[cls]['name']]\r\n return frame, good_boxes\r\n\r\n @staticmethod\r\n def denormalize(box, width, height):\r\n # Order taken from: https://www.tensorflow.org/api_docs/python/tf/image/draw_bounding_boxes\r\n y_min, x_min, y_max, x_max = box[0], box[1], box[2], box[3]\r\n x_min *= width\r\n x_max *= width\r\n y_min *= height\r\n y_max *= height\r\n return [x_min, x_max, y_min, y_max]\r\n\r\n @staticmethod\r\n def log_boxes(frame_number, boxes, ofile, width, height):\r\n for box in boxes:\r\n box = detector.denormalize(box, width, height)\r\n # Cast float coordinates to integers\r\n box = map(int, box)\r\n box = [frame_number] + list(box)\r\n line = \"|\".join(map(str, box))\r\n print(line, file=ofile)\r\n \r\n def process_image(self, video_name, frame_number, image_path,\r\n min_score_threshold, output_path, save_images):\r\n image = cv2.imread(image_path)\r\n image_name = Path(image_path).stem\r\n result_frame = None\r\n # Set up logging file\r\n log_name = os.path.join(output_path, f\"{video_name}_log.txt\")\r\n with open(log_name, 'a') as log_file:\r\n print(\"At Frame:\", frame_number)\r\n frame = np.array(image)\r\n # Draw boxes\r\n frame, boxes = self.draw_boxes_for_image(frame, min_score_threshold)\r\n height, width, layers = frame.shape\r\n # Log boxes\r\n detector.log_boxes(frame_number, boxes, log_file, width, height)\r\n # Save frame with boxes for output\r\n result_frame = frame\r\n # Save frame with boxes\r\n if save_images:\r\n frame_path = os.path.join(output_path, f\"{video_name}_frame_{image_name}.png\")\r\n print(\"Saving image at\", frame_path)\r\n vis_util.save_image_array_as_png(frame, frame_path)\r\n return result_frame\r\n\r\n def process_image_folder(self, folder_path, min_score_threshold, output_path, save_images):\r\n folder_name = Path(folder_path).stem\r\n frames = []\r\n file_names = os.listdir(folder_path)\r\n file_names.sort()\r\n for f in file_names:\r\n image_path = os.path.join(folder_path, f)\r\n if os.path.isfile(image_path):\r\n frame_number = len(frames)\r\n next_frame = self.process_image(folder_name, frame_number, image_path,\r\n min_score_threshold, output_path, save_images)\r\n frames.append(next_frame)\r\n if save_images:\r\n video_path = os.path.join(output_path, folder_name)\r\n video = cv2.VideoCapture(video_path)\r\n print(\"Saving video at\", video_path)\r\n images_to_video(frames, video_path, 30)\r\n \r\n def process_video(self, video_path, min_score_threshold, output_path, save_images):\r\n video_name = Path(video_path).stem\r\n # Open video file\r\n video = cv2.VideoCapture(video_path)\r\n # Set up logging file\r\n log_name = os.path.join(output_path, f\"{video_name}_log.txt\")\r\n with open(log_name, 'a') as log_file:\r\n frames = []\r\n while(video.isOpened()):\r\n ret, frame = video.read()\r\n if not ret:\r\n break\r\n frame_number = len(frames)\r\n print(\"At Frame:\", frame_number)\r\n # Draw boxes\r\n frame, boxes = self.draw_boxes_for_image(frame, min_score_threshold)\r\n height, width, layers = frame.shape\r\n # Log boxes\r\n detector.log_boxes(frame_number, boxes, log_file, width, height)\r\n # Save frame with boxes\r\n if save_images:\r\n frame_path = os.path.join(output_path, f\"{video_name}_frame_{frame_number}.png\")\r\n print(\"Saving image at\", frame_path)\r\n vis_util.save_image_array_as_png(frame, frame_path)\r\n frames.append(frame) \r\n # Save as video\r\n if save_images:\r\n out_video_path = os.path.join(output_path, f\"{video_name}.avi\")\r\n print(\"Saving video at\", out_video_path)\r\n images_to_video(frames, out_video_path, 30)\r\n # Clean up\r\n video.release()\r\n cv2.destroyAllWindows()\r\n \r\ndef default_detector():\r\n det = detector(\"./trained_model/detectors/\")\r\n return det\r\n\r\ndef default_inference():\r\n det = default_detector()\r\n det.process_video(\"./data/SignaledJunctionRightTurn_1.avi\", 0.70, \"./output/temp/\", False)\r\n return det\r\n\r\nif __name__ == \"__main__\":\r\n import argparse\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--model_path', help='Path to the frozen inference graph and labelmap files',\r\n required=True)\r\n parser.add_argument('--video_path', help='Path to the video', required=True)\r\n parser.add_argument('--min_threshold', type=float, help='Minimum score threshold for a bounding box to be drawn', default=0.7)\r\n parser.add_argument('--output_path', help='Path for storing output images and/or logs', required=True)\r\n parser.add_argument('--save_images', action='store_true')\r\n\r\n args = parser.parse_args()\r\n\r\n det = detector(args.model_path)\r\n det.process_video(args.video_path, args.min_threshold, args.output_path, args.save_images)\r\n" ]
[ [ "numpy.array", "tensorflow.Graph", "tensorflow.Session", "tensorflow.import_graph_def", "tensorflow.GraphDef", "tensorflow.gfile.GFile", "numpy.squeeze", "numpy.expand_dims" ] ]
jemorrison/prospector
[ "c34b7dd64e55dfc66c8596ea87633a73a7088730" ]
[ "prospect/likelihood/kernels.py" ]
[ "import numpy as np\n\n__all__ = [\"Kernel\", \"Uncorrelated\", \"ExpSquared\", \"Matern\"]\n\n\nclass Kernel(object):\n\n def __init__(self, parnames=[], name=''):\n \"\"\"\n :param parnames:\n A list of names of the kernel params, used to alias the intrinsic\n parameter names. This way different instances of the same kernel\n can have different parameter names.\n \"\"\"\n if len(parnames) == 0:\n parnames = self.kernel_params\n assert len(parnames) == len(self.kernel_params)\n self.param_alias = dict(zip(self.kernel_params, parnames))\n self.params = {}\n self.name = name\n\n def __repr__(self):\n return '{}({})'.format(self.__class__, self.param_alias.items())\n\n def update(self, **kwargs):\n \"\"\"Take a dictionary of parameters, pick out the properly named\n parameters according to the alias, and put them in the param state\n dictionary.\n \"\"\"\n for k in self.kernel_params:\n self.params[k] = kwargs[self.param_alias[k]]\n\n def __call__(self, metric, weights=None, ndim=2, **extras):\n \"\"\"Return a covariance matrix, given a metric. Optionally, multiply\n the output kernel by a weight function to induce non-stationarity.\n \"\"\"\n k = self.construct_kernel(metric)\n if ndim != k.ndim:\n # Either promote to 2 dimensions or demote to 1.\n # The latter should never happen...\n k = np.diag(k)\n if weights is None:\n return k\n elif ndim == 2:\n Sigma = weights[None, :] * k * weights[:, None]\n else:\n Sigma = k * weights**2\n return Sigma\n\n\nclass Uncorrelated(Kernel):\n\n # Simple uncorrelated noise model\n ndim = 1\n kernel_params = ['amplitude']\n\n def construct_kernel(self, metric):\n s = metric.shape[0]\n jitter = self.params['amplitude']**2 * np.ones(s)\n if metric.ndim == 2:\n return np.diag(jitter)\n elif metric.ndim == 1:\n return jitter\n else:\n raise(NotImplementedError)\n\n\nclass ExpSquared(Kernel):\n\n ndim = 2\n npars = 2\n kernel_params = ['amplitude', 'length']\n\n def construct_kernel(self, metric):\n \"\"\"Construct an exponential squared covariance matrix.\n \"\"\"\n a, l = self.params['amplitude'], self.params['length']\n Sigma = a**2 * np.exp(-(metric[:, None] - metric[None, :])**2 / (2 * l**2))\n return Sigma\n\n\nclass Matern(Kernel):\n\n ndim = 2\n npars = 2\n kernel_params = ['amplitude', 'length']\n\n def construct_kernel(self, metric):\n \"\"\"Construct a Matern kernel covariance matrix, for \\nu=3/2.\n \"\"\"\n a, l = self.params['amplitude'], self.params['length']\n Sigma = np.sqrt(3) * np.abs(metric[:, None] - metric[None, :]) / l\n Sigma = a**2 * (1 + Sigma) * np.exp(-Sigma)\n return Sigma\n\n\nclass Outliers(Kernel):\n kernel_params = ['amplitude', 'location']\n\n def construct_kernel(self, metric):\n raise(NotImplementedError)\n" ]
[ [ "numpy.ones", "numpy.exp", "numpy.abs", "numpy.sqrt", "numpy.diag" ] ]
YevheniiSemendiak/pytorch-tools
[ "11f895ac7af796ca786a3d94bb46de70d7fce87a" ]
[ "pytorch_tools/losses/angular.py" ]
[ "import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .base import Loss\n\n\nclass AdaCos(Loss):\n \"\"\"PyTorch implementation of AdaCos. See Ref[1] for paper\n\n This implementation is different from the most open-source implementations in following ways:\n 1) expects raw logits of size (bs x num_classes) not (bs, embedding_size)\n 2) despite AdaCos being dynamic, still add an optional margin parameter\n 3) calculate running average stats of B and θ, not batch-wise stats as in original paper\n 4) normalize input logits, not embeddings and weights\n\n Args:\n margin (float): margin in radians\n momentum (float): momentum for running average of B and θ\n\n Input:\n y_pred (torch.Tensor): shape BS x N_classes\n y_true (torch.Tensor): one-hot encoded. shape BS x N_classes\n Reference:\n [1] Adaptively Scaling Cosine Logits for Effectively Learning Deep Face Representations\n\n \"\"\"\n\n def __init__(self, embedding_size, num_classes, final_criterion, margin=0, momentum=0.95):\n super(AdaCos, self).__init__()\n self.final_criterion = final_criterion\n self.margin = margin\n self.momentum = momentum\n self.prev_s = 10\n self.running_B = 1000 # default value is chosen so that initial S is ~10\n self.running_theta = math.pi / 4\n self.eps = 1e-7\n self.register_parameter(\"weight\", torch.nn.Parameter(torch.zeros(num_classes, embedding_size)))\n nn.init.xavier_uniform_(self.weight)\n\n self.idx = 0\n\n def forward(self, embedding, y_true):\n\n cos_theta = F.linear(F.normalize(embedding), F.normalize(self.weight)).clamp(-1 + self.eps, 1 - self.eps)\n # cos_theta = torch.cos(torch.acos(cos_theta + self.margin))\n\n if y_true.dim() != 1:\n y_true_one_hot = y_true.float()\n else:\n y_true_one_hot = torch.zeros_like(cos_theta)\n y_true_one_hot.scatter_(1, y_true.unsqueeze(1), 1.0)\n\n with torch.no_grad():\n B_batch = cos_theta[y_true_one_hot.eq(0)].mul(self.prev_s).exp().sum().div(embedding.size(0))\n self.running_B = self.running_B * self.momentum + B_batch * (1 - self.momentum)\n theta = torch.acos(cos_theta.clamp(-1 + self.eps, 1 - self.eps))\n # originally authors use median, but I use mean\n theta_batch = theta[y_true_one_hot.ne(0)].mean().clamp_max(math.pi / 4)\n self.running_theta = self.running_theta * self.momentum + theta_batch * (1 - self.momentum)\n self.prev_s = self.running_B.log() / torch.cos(self.running_theta)\n\n self.idx += 1\n if self.idx % 1000 == 0:\n print(\n f\"\\nRunning B: {self.running_B:.2f}. Running theta: {self.running_theta:.2f}. Running S: {self.prev_s:.2f}\"\n )\n\n return self.final_criterion(cos_theta * self.prev_s, y_true_one_hot)\n" ]
[ [ "torch.zeros", "torch.cos", "torch.nn.functional.normalize", "torch.no_grad", "torch.nn.init.xavier_uniform_", "torch.zeros_like" ] ]
zchuruk/sandwichbot
[ "45879fae289c68e3a6a649958aa338e4cc307f77" ]
[ "sandwichbot/survey_processor.py" ]
[ "import csv\nimport os\nimport numpy as np\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\nsurvey_path = os.path.join(dir_path, '../data/test_two_entries.csv')\n\nNUM_QUESTIONS = 8\nRESPONSE_PERSON = ['pat', 'jeremy', 'zach']\nTASTE_PROFILE_TYPES = ['deliciousness', 'heaviness', 'reliability', 'frequency', 'between']\n\ni = 0\nperson_responses = []\n\nwith open(survey_path) as f:\n data = csv.reader(f, delimiter=',', quotechar='|')\n for row in data:\n if i == 1:\n sando_type_row = row\n if i > 1:\n person_responses.append(row)\n i += 1\n\nnum_sando_types = int(\n (len(sando_type_row) - 3)\n / NUM_QUESTIONS\n)\n\nend_index = 2 + num_sando_types\nsando_types = sando_type_row[2:end_index]\n\nglobal_taste_profile = {}\n\nj = 0\nfor response in person_responses:\n taste_profile = {}\n name = RESPONSE_PERSON[j]\n\n ## Loop through deliciousness, heaviness, etc.\n ## Pull out deliciousness, etc. scores and store in taste_profile[type]\n for data_type in TASTE_PROFILE_TYPES:\n start_index = 2 + (1 + TASTE_PROFILE_TYPES.index(data_type)) * num_sando_types\n end_index = start_index + num_sando_types\n raw_profile = np.array(response[start_index:end_index])\n if data_type in ['deliciousness', 'heaviness', 'reliability']:\n float_profile = raw_profile.astype(np.float) * 0.01\n taste_profile[data_type] = float_profile\n else:\n int_profile = raw_profile.astype(np.int)\n taste_profile[data_type] = int_profile\n\n profile_csv_path = os.path.join(dir_path, '../data/users/profiles', (name + '.csv'))\n\n with open(profile_csv_path, 'w') as f:\n profile_writer = csv.writer(f, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n header = ['sando_type']\n for data_type in TASTE_PROFILE_TYPES:\n header.append(data_type)\n profile_writer.writerow(header)\n\n ## Loop through sando types and dump to CSV\n for sando in sando_types:\n sando_index = sando_types.index(sando)\n sando_row = [sando]\n for data_type in TASTE_PROFILE_TYPES:\n sando_row.append(taste_profile[data_type][sando_index])\n profile_writer.writerow(sando_row)\n" ]
[ [ "numpy.array" ] ]
kcetskcaz/detr_package
[ "0f5cad16c72ec37d7b596d37e12dc32cfb5ef6aa", "0f5cad16c72ec37d7b596d37e12dc32cfb5ef6aa" ]
[ "detr/hubconf/hubconf.py", "detr/models/detr.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport torch\n\nfrom detr.models.backbone import Backbone, Joiner\nfrom detr.models.detr import DETR, PostProcess\nfrom detr.models.position_encoding import PositionEmbeddingSine\nfrom detr.models.segmentation import DETRsegm, PostProcessPanoptic\nfrom detr.models.transformer import Transformer\n\ndependencies = [\"torch\", \"torchvision\"]\n\n\ndef _make_detr(backbone_name: str, dilation=False, num_classes=91, mask=False):\n hidden_dim = 256\n backbone = Backbone(backbone_name, train_backbone=True, return_interm_layers=mask, dilation=dilation)\n pos_enc = PositionEmbeddingSine(hidden_dim // 2, normalize=True)\n backbone_with_pos_enc = Joiner(backbone, pos_enc)\n backbone_with_pos_enc.num_channels = backbone.num_channels\n transformer = Transformer(d_model=hidden_dim, return_intermediate_dec=True)\n detr = DETR(backbone_with_pos_enc, transformer, num_classes=num_classes, num_queries=100)\n if mask:\n return DETRsegm(detr)\n return detr\n\n\ndef detr_resnet50(pretrained=False, num_classes=91, return_postprocessor=False):\n \"\"\"\n DETR R50 with 6 encoder and 6 decoder layers.\n\n Achieves 42/62.4 AP/AP50 on COCO val5k.\n \"\"\"\n model = _make_detr(\"resnet50\", dilation=False, num_classes=num_classes)\n if pretrained:\n checkpoint = torch.hub.load_state_dict_from_url(\n url=\"https://dl.fbaipublicfiles.com/detr/detr-r50-e632da11.pth\", map_location=\"cpu\", check_hash=True\n )\n model.load_state_dict(checkpoint[\"model\"])\n if return_postprocessor:\n return model, PostProcess()\n return model\n\n\ndef detr_resnet50_dc5(pretrained=False, num_classes=91, return_postprocessor=False):\n \"\"\"\n DETR-DC5 R50 with 6 encoder and 6 decoder layers.\n\n The last block of ResNet-50 has dilation to increase\n output resolution.\n Achieves 43.3/63.1 AP/AP50 on COCO val5k.\n \"\"\"\n model = _make_detr(\"resnet50\", dilation=True, num_classes=num_classes)\n if pretrained:\n checkpoint = torch.hub.load_state_dict_from_url(\n url=\"https://dl.fbaipublicfiles.com/detr/detr-r50-dc5-f0fb7ef5.pth\", map_location=\"cpu\", check_hash=True\n )\n model.load_state_dict(checkpoint[\"model\"])\n if return_postprocessor:\n return model, PostProcess()\n return model\n\n\ndef detr_resnet101(pretrained=False, num_classes=91, return_postprocessor=False):\n \"\"\"\n DETR-DC5 R101 with 6 encoder and 6 decoder layers.\n\n Achieves 43.5/63.8 AP/AP50 on COCO val5k.\n \"\"\"\n model = _make_detr(\"resnet101\", dilation=False, num_classes=num_classes)\n if pretrained:\n checkpoint = torch.hub.load_state_dict_from_url(\n url=\"https://dl.fbaipublicfiles.com/detr/detr-r101-2c7b67e5.pth\", map_location=\"cpu\", check_hash=True\n )\n model.load_state_dict(checkpoint[\"model\"])\n if return_postprocessor:\n return model, PostProcess()\n return model\n\n\ndef detr_resnet101_dc5(pretrained=False, num_classes=91, return_postprocessor=False):\n \"\"\"\n DETR-DC5 R101 with 6 encoder and 6 decoder layers.\n\n The last block of ResNet-101 has dilation to increase\n output resolution.\n Achieves 44.9/64.7 AP/AP50 on COCO val5k.\n \"\"\"\n model = _make_detr(\"resnet101\", dilation=True, num_classes=num_classes)\n if pretrained:\n checkpoint = torch.hub.load_state_dict_from_url(\n url=\"https://dl.fbaipublicfiles.com/detr/detr-r101-dc5-a2e86def.pth\", map_location=\"cpu\", check_hash=True\n )\n model.load_state_dict(checkpoint[\"model\"])\n if return_postprocessor:\n return model, PostProcess()\n return model\n\n\ndef detr_resnet50_panoptic(\n pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False\n):\n \"\"\"\n DETR R50 with 6 encoder and 6 decoder layers.\n Achieves 43.4 PQ on COCO val5k.\n\n threshold is the minimum confidence required for keeping segments in the prediction\n \"\"\"\n model = _make_detr(\"resnet50\", dilation=False, num_classes=num_classes, mask=True)\n is_thing_map = {i: i <= 90 for i in range(250)}\n if pretrained:\n checkpoint = torch.hub.load_state_dict_from_url(\n url=\"https://dl.fbaipublicfiles.com/detr/detr-r50-panoptic-00ce5173.pth\",\n map_location=\"cpu\",\n check_hash=True,\n )\n model.load_state_dict(checkpoint[\"model\"])\n if return_postprocessor:\n return model, PostProcessPanoptic(is_thing_map, threshold=threshold)\n return model\n\n\ndef detr_resnet50_dc5_panoptic(\n pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False\n):\n \"\"\"\n DETR-DC5 R50 with 6 encoder and 6 decoder layers.\n\n The last block of ResNet-50 has dilation to increase\n output resolution.\n Achieves 44.6 on COCO val5k.\n\n threshold is the minimum confidence required for keeping segments in the prediction\n \"\"\"\n model = _make_detr(\"resnet50\", dilation=True, num_classes=num_classes, mask=True)\n is_thing_map = {i: i <= 90 for i in range(250)}\n if pretrained:\n checkpoint = torch.hub.load_state_dict_from_url(\n url=\"https://dl.fbaipublicfiles.com/detr/detr-r50-dc5-panoptic-da08f1b1.pth\",\n map_location=\"cpu\",\n check_hash=True,\n )\n model.load_state_dict(checkpoint[\"model\"])\n if return_postprocessor:\n return model, PostProcessPanoptic(is_thing_map, threshold=threshold)\n return model\n\n\ndef detr_resnet101_panoptic(\n pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False\n):\n \"\"\"\n DETR-DC5 R101 with 6 encoder and 6 decoder layers.\n\n Achieves 45.1 PQ on COCO val5k.\n\n threshold is the minimum confidence required for keeping segments in the prediction\n \"\"\"\n model = _make_detr(\"resnet101\", dilation=False, num_classes=num_classes, mask=True)\n is_thing_map = {i: i <= 90 for i in range(250)}\n if pretrained:\n checkpoint = torch.hub.load_state_dict_from_url(\n url=\"https://dl.fbaipublicfiles.com/detr/detr-r101-panoptic-40021d53.pth\",\n map_location=\"cpu\",\n check_hash=True,\n )\n model.load_state_dict(checkpoint[\"model\"])\n if return_postprocessor:\n return model, PostProcessPanoptic(is_thing_map, threshold=threshold)\n return model\n", "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\"\"\"\nDETR model and criterion classes.\n\"\"\"\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n\nfrom detr.util import box_ops\nfrom detr.util.misc import (NestedTensor, nested_tensor_from_tensor_list,\n accuracy, get_world_size, interpolate,\n is_dist_avail_and_initialized)\n\nfrom detr.models.backbone import build_backbone\nfrom detr.models.matcher import build_matcher\nfrom detr.models.segmentation import (DETRsegm, PostProcessPanoptic, PostProcessSegm,\n dice_loss, sigmoid_focal_loss)\nfrom detr.models.transformer import build_transformer\n\n\nclass DETR(nn.Module):\n \"\"\" This is the DETR module that performs object detection \"\"\"\n def __init__(self, backbone, transformer, num_classes, num_queries, aux_loss=False):\n \"\"\" Initializes the model.\n Parameters:\n backbone: torch module of the backbone to be used. See backbone.py\n transformer: torch module of the transformer architecture. See transformer.py\n num_classes: number of object classes\n num_queries: number of object queries, ie detection slot. This is the maximal number of objects\n DETR can detect in a single image. For COCO, we recommend 100 queries.\n aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.\n \"\"\"\n super().__init__()\n self.num_queries = num_queries\n self.transformer = transformer\n hidden_dim = transformer.d_model\n self.class_embed = nn.Linear(hidden_dim, num_classes + 1)\n self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)\n self.query_embed = nn.Embedding(num_queries, hidden_dim)\n self.input_proj = nn.Conv2d(backbone.num_channels, hidden_dim, kernel_size=1)\n self.backbone = backbone\n self.aux_loss = aux_loss\n\n def forward(self, samples: NestedTensor):\n \"\"\" The forward expects a NestedTensor, which consists of:\n - samples.tensor: batched images, of shape [batch_size x 3 x H x W]\n - samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels\n\n It returns a dict with the following elements:\n - \"pred_logits\": the classification logits (including no-object) for all queries.\n Shape= [batch_size x num_queries x (num_classes + 1)]\n - \"pred_boxes\": The normalized boxes coordinates for all queries, represented as\n (center_x, center_y, height, width). These values are normalized in [0, 1],\n relative to the size of each individual image (disregarding possible padding).\n See PostProcess for information on how to retrieve the unnormalized bounding box.\n - \"aux_outputs\": Optional, only returned when auxilary losses are activated. It is a list of\n dictionnaries containing the two above keys for each decoder layer.\n \"\"\"\n if isinstance(samples, (list, torch.Tensor)):\n samples = nested_tensor_from_tensor_list(samples)\n features, pos = self.backbone(samples)\n\n src, mask = features[-1].decompose()\n assert mask is not None\n hs = self.transformer(self.input_proj(src), mask, self.query_embed.weight, pos[-1])[0]\n\n outputs_class = self.class_embed(hs)\n outputs_coord = self.bbox_embed(hs).sigmoid()\n out = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord[-1]}\n if self.aux_loss:\n out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord)\n return out\n\n @torch.jit.unused\n def _set_aux_loss(self, outputs_class, outputs_coord):\n # this is a workaround to make torchscript happy, as torchscript\n # doesn't support dictionary with non-homogeneous values, such\n # as a dict having both a Tensor and a list.\n return [{'pred_logits': a, 'pred_boxes': b}\n for a, b in zip(outputs_class[:-1], outputs_coord[:-1])]\n\n\nclass SetCriterion(nn.Module):\n \"\"\" This class computes the loss for DETR.\n The process happens in two steps:\n 1) we compute hungarian assignment between ground truth boxes and the outputs of the model\n 2) we supervise each pair of matched ground-truth / prediction (supervise class and box)\n \"\"\"\n def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses):\n \"\"\" Create the criterion.\n Parameters:\n num_classes: number of object categories, omitting the special no-object category\n matcher: module able to compute a matching between targets and proposals\n weight_dict: dict containing as key the names of the losses and as values their relative weight.\n eos_coef: relative classification weight applied to the no-object category\n losses: list of all the losses to be applied. See get_loss for list of available losses.\n \"\"\"\n super().__init__()\n self.num_classes = num_classes\n self.matcher = matcher\n self.weight_dict = weight_dict\n self.eos_coef = eos_coef\n self.losses = losses\n empty_weight = torch.ones(self.num_classes + 1)\n empty_weight[-1] = self.eos_coef\n self.register_buffer('empty_weight', empty_weight)\n\n def loss_labels(self, outputs, targets, indices, num_boxes, log=True):\n \"\"\"Classification loss (NLL)\n targets dicts must contain the key \"labels\" containing a tensor of dim [nb_target_boxes]\n \"\"\"\n assert 'pred_logits' in outputs\n src_logits = outputs['pred_logits']\n\n idx = self._get_src_permutation_idx(indices)\n target_classes_o = torch.cat([t[\"labels\"][J] for t, (_, J) in zip(targets, indices)])\n target_classes = torch.full(src_logits.shape[:2], self.num_classes,\n dtype=torch.int64, device=src_logits.device)\n target_classes[idx] = target_classes_o\n\n loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)\n losses = {'loss_ce': loss_ce}\n\n if log:\n # TODO this should probably be a separate loss, not hacked in this one here\n losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0]\n return losses\n\n @torch.no_grad()\n def loss_cardinality(self, outputs, targets, indices, num_boxes):\n \"\"\" Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes\n This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients\n \"\"\"\n pred_logits = outputs['pred_logits']\n device = pred_logits.device\n tgt_lengths = torch.as_tensor([len(v[\"labels\"]) for v in targets], device=device)\n # Count the number of predictions that are NOT \"no-object\" (which is the last class)\n card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1)\n card_err = F.l1_loss(card_pred.float(), tgt_lengths.float())\n losses = {'cardinality_error': card_err}\n return losses\n\n def loss_boxes(self, outputs, targets, indices, num_boxes):\n \"\"\"Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss\n targets dicts must contain the key \"boxes\" containing a tensor of dim [nb_target_boxes, 4]\n The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.\n \"\"\"\n assert 'pred_boxes' in outputs\n idx = self._get_src_permutation_idx(indices)\n src_boxes = outputs['pred_boxes'][idx]\n target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0)\n\n loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none')\n\n losses = {}\n losses['loss_bbox'] = loss_bbox.sum() / num_boxes\n\n loss_giou = 1 - torch.diag(box_ops.generalized_box_iou(\n box_ops.box_cxcywh_to_xyxy(src_boxes),\n box_ops.box_cxcywh_to_xyxy(target_boxes)))\n losses['loss_giou'] = loss_giou.sum() / num_boxes\n return losses\n\n def loss_masks(self, outputs, targets, indices, num_boxes):\n \"\"\"Compute the losses related to the masks: the focal loss and the dice loss.\n targets dicts must contain the key \"masks\" containing a tensor of dim [nb_target_boxes, h, w]\n \"\"\"\n assert \"pred_masks\" in outputs\n\n src_idx = self._get_src_permutation_idx(indices)\n tgt_idx = self._get_tgt_permutation_idx(indices)\n src_masks = outputs[\"pred_masks\"]\n src_masks = src_masks[src_idx]\n masks = [t[\"masks\"] for t in targets]\n # TODO use valid to mask invalid areas due to padding in loss\n target_masks, valid = nested_tensor_from_tensor_list(masks).decompose()\n target_masks = target_masks.to(src_masks)\n target_masks = target_masks[tgt_idx]\n\n # upsample predictions to the target size\n src_masks = interpolate(src_masks[:, None], size=target_masks.shape[-2:],\n mode=\"bilinear\", align_corners=False)\n src_masks = src_masks[:, 0].flatten(1)\n\n target_masks = target_masks.flatten(1)\n target_masks = target_masks.view(src_masks.shape)\n losses = {\n \"loss_mask\": sigmoid_focal_loss(src_masks, target_masks, num_boxes),\n \"loss_dice\": dice_loss(src_masks, target_masks, num_boxes),\n }\n return losses\n\n def _get_src_permutation_idx(self, indices):\n # permute predictions following indices\n batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])\n src_idx = torch.cat([src for (src, _) in indices])\n return batch_idx, src_idx\n\n def _get_tgt_permutation_idx(self, indices):\n # permute targets following indices\n batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])\n tgt_idx = torch.cat([tgt for (_, tgt) in indices])\n return batch_idx, tgt_idx\n\n def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs):\n loss_map = {\n 'labels': self.loss_labels,\n 'cardinality': self.loss_cardinality,\n 'boxes': self.loss_boxes,\n 'masks': self.loss_masks\n }\n assert loss in loss_map, f'do you really want to compute {loss} loss?'\n return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs)\n\n def forward(self, outputs, targets):\n \"\"\" This performs the loss computation.\n Parameters:\n outputs: dict of tensors, see the output specification of the model for the format\n targets: list of dicts, such that len(targets) == batch_size.\n The expected keys in each dict depends on the losses applied, see each loss' doc\n \"\"\"\n outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'}\n\n # Retrieve the matching between the outputs of the last layer and the targets\n indices = self.matcher(outputs_without_aux, targets)\n\n # Compute the average number of target boxes accross all nodes, for normalization purposes\n num_boxes = sum(len(t[\"labels\"]) for t in targets)\n num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)\n if is_dist_avail_and_initialized():\n torch.distributed.all_reduce(num_boxes)\n num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()\n\n # Compute all the requested losses\n losses = {}\n for loss in self.losses:\n losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))\n\n # In case of auxiliary losses, we repeat this process with the output of each intermediate layer.\n if 'aux_outputs' in outputs:\n for i, aux_outputs in enumerate(outputs['aux_outputs']):\n indices = self.matcher(aux_outputs, targets)\n for loss in self.losses:\n if loss == 'masks':\n # Intermediate masks losses are too costly to compute, we ignore them.\n continue\n kwargs = {}\n if loss == 'labels':\n # Logging is enabled only for the last layer\n kwargs = {'log': False}\n l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs)\n l_dict = {k + f'_{i}': v for k, v in l_dict.items()}\n losses.update(l_dict)\n\n return losses\n\n\nclass PostProcess(nn.Module):\n \"\"\" This module converts the model's output into the format expected by the coco api\"\"\"\n @torch.no_grad()\n def forward(self, outputs, target_sizes):\n \"\"\" Perform the computation\n Parameters:\n outputs: raw outputs of the model\n target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch\n For evaluation, this must be the original image size (before any data augmentation)\n For visualization, this should be the image size after data augment, but before padding\n \"\"\"\n out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes']\n\n assert len(out_logits) == len(target_sizes)\n assert target_sizes.shape[1] == 2\n\n prob = F.softmax(out_logits, -1)\n scores, labels = prob[..., :-1].max(-1)\n\n # convert to [x0, y0, x1, y1] format\n boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)\n # and from relative [0, 1] to absolute [0, height] coordinates\n img_h, img_w = target_sizes.unbind(1)\n scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)\n boxes = boxes * scale_fct[:, None, :]\n\n results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)]\n\n return results\n\n\nclass MLP(nn.Module):\n \"\"\" Very simple multi-layer perceptron (also called FFN)\"\"\"\n\n def __init__(self, input_dim, hidden_dim, output_dim, num_layers):\n super().__init__()\n self.num_layers = num_layers\n h = [hidden_dim] * (num_layers - 1)\n self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))\n\n def forward(self, x):\n for i, layer in enumerate(self.layers):\n x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n return x\n\n\ndef build(args):\n # the `num_classes` naming here is somewhat misleading.\n # it indeed corresponds to `max_obj_id + 1`, where max_obj_id\n # is the maximum id for a class in your dataset. For example,\n # COCO has a max_obj_id of 90, so we pass `num_classes` to be 91.\n # As another example, for a dataset that has a single class with id 1,\n # you should pass `num_classes` to be 2 (max_obj_id + 1).\n # For more details on this, check the following discussion\n # https://github.com/facebookresearch/detr/issues/108#issuecomment-650269223\n num_classes = 20 if args.dataset_file != 'coco' else 91\n if args.dataset_file == \"coco_panoptic\":\n # for panoptic, we just add a num_classes that is large enough to hold\n # max_obj_id + 1, but the exact value doesn't really matter\n num_classes = 250\n device = torch.device(args.device)\n\n backbone = build_backbone(args)\n\n transformer = build_transformer(args)\n\n model = DETR(\n backbone,\n transformer,\n num_classes=num_classes,\n num_queries=args.num_queries,\n aux_loss=args.aux_loss,\n )\n if args.masks:\n model = DETRsegm(model, freeze_detr=(args.frozen_weights is not None))\n matcher = build_matcher(args)\n weight_dict = {'loss_ce': 1, 'loss_bbox': args.bbox_loss_coef}\n weight_dict['loss_giou'] = args.giou_loss_coef\n if args.masks:\n weight_dict[\"loss_mask\"] = args.mask_loss_coef\n weight_dict[\"loss_dice\"] = args.dice_loss_coef\n # TODO this is a hack\n if args.aux_loss:\n aux_weight_dict = {}\n for i in range(args.dec_layers - 1):\n aux_weight_dict.update({k + f'_{i}': v for k, v in weight_dict.items()})\n weight_dict.update(aux_weight_dict)\n\n losses = ['labels', 'boxes', 'cardinality']\n if args.masks:\n losses += [\"masks\"]\n criterion = SetCriterion(num_classes, matcher=matcher, weight_dict=weight_dict,\n eos_coef=args.eos_coef, losses=losses)\n criterion.to(device)\n postprocessors = {'bbox': PostProcess()}\n if args.masks:\n postprocessors['segm'] = PostProcessSegm()\n if args.dataset_file == \"coco_panoptic\":\n is_thing_map = {i: i <= 90 for i in range(201)}\n postprocessors[\"panoptic\"] = PostProcessPanoptic(is_thing_map, threshold=0.85)\n\n return model, criterion, postprocessors\n" ]
[ [ "torch.hub.load_state_dict_from_url" ], [ "torch.nn.Linear", "torch.device", "torch.cat", "torch.stack", "torch.nn.functional.l1_loss", "torch.no_grad", "torch.ones", "torch.full_like", "torch.nn.Conv2d", "torch.full", "torch.distributed.all_reduce", "torch.nn.functional.softmax", "torch.nn.Embedding" ] ]
yamaneco28/PaDiM-Anomaly-Detection-Localization-master
[ "79de8ee6472380a2a8f556f21f3b97720697fc97" ]
[ "main.py" ]
[ "import random\nfrom random import sample\nimport argparse\nimport numpy as np\nimport os\nimport pickle\nfrom tqdm import tqdm\nfrom collections import OrderedDict\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import roc_curve\nfrom sklearn.metrics import precision_recall_curve\nfrom sklearn.covariance import LedoitWolf\nfrom scipy.spatial.distance import mahalanobis\nfrom scipy.ndimage import gaussian_filter\nfrom skimage import morphology\nfrom skimage.segmentation import mark_boundaries\nimport matplotlib.pyplot as plt\nimport matplotlib\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nfrom torchvision.models import wide_resnet50_2, resnet18\nimport datasets.mvtec as mvtec\n\n\n# device setup\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device('cuda' if use_cuda else 'cpu')\n\n\ndef parse_args():\n parser = argparse.ArgumentParser('PaDiM')\n parser.add_argument('--data_path', type=str, default='D:/dataset/mvtec_anomaly_detection')\n parser.add_argument('--save_path', type=str, default='./mvtec_result')\n parser.add_argument('--arch', type=str, choices=['resnet18', 'wide_resnet50_2'], default='wide_resnet50_2')\n return parser.parse_args()\n\n\ndef main():\n\n args = parse_args()\n\n # load model\n if args.arch == 'resnet18':\n model = resnet18(pretrained=True, progress=True)\n t_d = 448\n d = 100\n elif args.arch == 'wide_resnet50_2':\n model = wide_resnet50_2(pretrained=True, progress=True)\n t_d = 1792\n d = 550\n model.to(device)\n model.eval()\n random.seed(1024)\n torch.manual_seed(1024)\n if use_cuda:\n torch.cuda.manual_seed_all(1024)\n\n idx = torch.tensor(sample(range(0, t_d), d))\n\n # set model's intermediate outputs\n outputs = []\n\n def hook(module, input, output):\n outputs.append(output)\n\n model.layer1[-1].register_forward_hook(hook)\n model.layer2[-1].register_forward_hook(hook)\n model.layer3[-1].register_forward_hook(hook)\n\n os.makedirs(os.path.join(args.save_path, 'temp_%s' % args.arch), exist_ok=True)\n # fig, ax = plt.subplots(1, 2, figsize=(20, 10))\n # fig_img_rocauc = ax[0]\n # fig_pixel_rocauc = ax[1]\n\n total_roc_auc = []\n total_pixel_roc_auc = []\n\n for class_name in mvtec.CLASS_NAMES:\n\n train_dataset = mvtec.MVTecDataset(args.data_path, class_name=class_name, is_train=True)\n train_dataloader = DataLoader(train_dataset, batch_size=32, pin_memory=True)\n test_dataset = mvtec.MVTecDataset(args.data_path, class_name=class_name, is_train=False)\n test_dataloader = DataLoader(test_dataset, batch_size=32, pin_memory=True)\n\n train_outputs = OrderedDict([('layer1', []), ('layer2', []), ('layer3', [])])\n test_outputs = OrderedDict([('layer1', []), ('layer2', []), ('layer3', [])])\n\n # extract train set features\n train_feature_filepath = os.path.join(args.save_path, 'temp_%s' % args.arch, 'train_%s.pkl' % class_name)\n if not os.path.exists(train_feature_filepath):\n for (x, _, _) in tqdm(train_dataloader, '| feature extraction | train | %s |' % class_name):\n # model prediction\n with torch.no_grad():\n _ = model(x.to(device))\n # get intermediate layer outputs\n for k, v in zip(train_outputs.keys(), outputs):\n train_outputs[k].append(v.cpu().detach())\n # initialize hook outputs\n outputs = []\n for k, v in train_outputs.items():\n train_outputs[k] = torch.cat(v, 0)\n\n # Embedding concat\n embedding_vectors = train_outputs['layer1']\n for layer_name in ['layer2', 'layer3']:\n embedding_vectors = embedding_concat(embedding_vectors, train_outputs[layer_name])\n\n # randomly select d dimension\n embedding_vectors = torch.index_select(embedding_vectors, 1, idx)\n # calculate multivariate Gaussian distribution\n B, C, H, W = embedding_vectors.size()\n embedding_vectors = embedding_vectors.view(B, C, H * W)\n mean = torch.mean(embedding_vectors, dim=0).numpy()\n cov = torch.zeros(C, C, H * W).numpy()\n I = np.identity(C)\n for i in range(H * W):\n # cov[:, :, i] = LedoitWolf().fit(embedding_vectors[:, :, i].numpy()).covariance_\n cov[:, :, i] = np.cov(embedding_vectors[:, :, i].numpy(), rowvar=False) + 0.01 * I\n # save learned distribution\n train_outputs = [mean, cov]\n with open(train_feature_filepath, 'wb') as f:\n pickle.dump(train_outputs, f)\n else:\n print('load train set feature from: %s' % train_feature_filepath)\n with open(train_feature_filepath, 'rb') as f:\n train_outputs = pickle.load(f)\n\n gt_list = []\n gt_mask_list = []\n test_imgs = []\n\n # extract test set features\n for (x, y, mask) in tqdm(test_dataloader, '| feature extraction | test | %s |' % class_name):\n test_imgs.extend(x.cpu().detach().numpy())\n gt_list.extend(y.cpu().detach().numpy())\n gt_mask_list.extend(mask.cpu().detach().numpy())\n # model prediction\n with torch.no_grad():\n _ = model(x.to(device))\n # get intermediate layer outputs\n for k, v in zip(test_outputs.keys(), outputs):\n test_outputs[k].append(v.cpu().detach())\n # initialize hook outputs\n outputs = []\n for k, v in test_outputs.items():\n test_outputs[k] = torch.cat(v, 0)\n\n # Embedding concat\n embedding_vectors = test_outputs['layer1']\n for layer_name in ['layer2', 'layer3']:\n embedding_vectors = embedding_concat(embedding_vectors, test_outputs[layer_name])\n\n # randomly select d dimension\n embedding_vectors = torch.index_select(embedding_vectors, 1, idx)\n\n # calculate distance matrix\n B, C, H, W = embedding_vectors.size()\n embedding_vectors = embedding_vectors.view(B, C, H * W).numpy()\n dist_list = []\n for i in range(H * W):\n mean = train_outputs[0][:, i]\n conv_inv = np.linalg.inv(train_outputs[1][:, :, i])\n dist = [mahalanobis(sample[:, i], mean, conv_inv) for sample in embedding_vectors]\n dist_list.append(dist)\n\n dist_list = np.array(dist_list).transpose(1, 0).reshape(B, H, W)\n\n # upsample\n dist_list = torch.tensor(dist_list)\n score_map = F.interpolate(dist_list.unsqueeze(1), size=x.size(2), mode='bilinear',\n align_corners=False).squeeze().numpy()\n\n # apply gaussian smoothing on the score map\n for i in range(score_map.shape[0]):\n score_map[i] = gaussian_filter(score_map[i], sigma=4)\n\n # Normalization\n max_score = score_map.max()\n min_score = score_map.min()\n scores = (score_map - min_score) / (max_score - min_score)\n\n # calculate image-level ROC AUC score\n img_scores = scores.reshape(scores.shape[0], -1).max(axis=1)\n gt_list = np.asarray(gt_list)\n fpr, tpr, _ = roc_curve(gt_list, img_scores)\n img_roc_auc = roc_auc_score(gt_list, img_scores)\n total_roc_auc.append(img_roc_auc)\n print('image ROCAUC: %.3f' % (img_roc_auc))\n fig_img_rocauc.plot(fpr, tpr, label='%s img_ROCAUC: %.3f' % (class_name, img_roc_auc))\n\n # get optimal threshold\n gt_mask = np.asarray(gt_mask_list)\n precision, recall, thresholds = precision_recall_curve(gt_mask.flatten(), scores.flatten())\n a = 2 * precision * recall\n b = precision + recall\n f1 = np.divide(a, b, out=np.zeros_like(a), where=b != 0)\n threshold = thresholds[np.argmax(f1)]\n\n # calculate per-pixel level ROCAUC\n fpr, tpr, _ = roc_curve(gt_mask.flatten(), scores.flatten())\n per_pixel_rocauc = roc_auc_score(gt_mask.flatten(), scores.flatten())\n total_pixel_roc_auc.append(per_pixel_rocauc)\n print('pixel ROCAUC: %.3f' % (per_pixel_rocauc))\n\n # fig_pixel_rocauc.plot(fpr, tpr, label='%s ROCAUC: %.3f' % (class_name, per_pixel_rocauc))\n # save_dir = args.save_path + '/' + f'pictures_{args.arch}'\n # os.makedirs(save_dir, exist_ok=True)\n # plot_fig(test_imgs, scores, gt_mask_list, threshold, save_dir, class_name)\n\n print('Average ROCAUC: %.3f' % np.mean(total_roc_auc))\n fig_img_rocauc.title.set_text('Average image ROCAUC: %.3f' % np.mean(total_roc_auc))\n fig_img_rocauc.legend(loc=\"lower right\")\n\n print('Average pixel ROCUAC: %.3f' % np.mean(total_pixel_roc_auc))\n fig_pixel_rocauc.title.set_text('Average pixel ROCAUC: %.3f' % np.mean(total_pixel_roc_auc))\n fig_pixel_rocauc.legend(loc=\"lower right\")\n\n fig.tight_layout()\n fig.savefig(os.path.join(args.save_path, 'roc_curve.png'), dpi=100)\n\n\ndef plot_fig(test_img, scores, gts, threshold, save_dir, class_name):\n num = len(scores)\n vmax = scores.max() * 255.\n vmin = scores.min() * 255.\n for i in range(num):\n img = test_img[i]\n img = denormalization(img)\n gt = gts[i].transpose(1, 2, 0).squeeze()\n heat_map = scores[i] * 255\n mask = scores[i]\n mask[mask > threshold] = 1\n mask[mask <= threshold] = 0\n kernel = morphology.disk(4)\n mask = morphology.opening(mask, kernel)\n mask *= 255\n vis_img = mark_boundaries(img, mask, color=(1, 0, 0), mode='thick')\n fig_img, ax_img = plt.subplots(1, 5, figsize=(12, 3))\n fig_img.subplots_adjust(right=0.9)\n norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)\n for ax_i in ax_img:\n ax_i.axes.xaxis.set_visible(False)\n ax_i.axes.yaxis.set_visible(False)\n ax_img[0].imshow(img)\n ax_img[0].title.set_text('Image')\n ax_img[1].imshow(gt, cmap='gray')\n ax_img[1].title.set_text('GroundTruth')\n ax = ax_img[2].imshow(heat_map, cmap='jet', norm=norm)\n ax_img[2].imshow(img, cmap='gray', interpolation='none')\n ax_img[2].imshow(heat_map, cmap='jet', alpha=0.5, interpolation='none')\n ax_img[2].title.set_text('Predicted heat map')\n ax_img[3].imshow(mask, cmap='gray')\n ax_img[3].title.set_text('Predicted mask')\n ax_img[4].imshow(vis_img)\n ax_img[4].title.set_text('Segmentation result')\n left = 0.92\n bottom = 0.15\n width = 0.015\n height = 1 - 2 * bottom\n rect = [left, bottom, width, height]\n cbar_ax = fig_img.add_axes(rect)\n cb = plt.colorbar(ax, shrink=0.6, cax=cbar_ax, fraction=0.046)\n cb.ax.tick_params(labelsize=8)\n font = {\n 'family': 'serif',\n 'color': 'black',\n 'weight': 'normal',\n 'size': 8,\n }\n cb.set_label('Anomaly Score', fontdict=font)\n\n fig_img.savefig(os.path.join(save_dir, class_name + '_{}'.format(i)), dpi=100)\n plt.close()\n\n\ndef denormalization(x):\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n x = (((x.transpose(1, 2, 0) * std) + mean) * 255.).astype(np.uint8)\n\n return x\n\n\ndef embedding_concat(x, y):\n B, C1, H1, W1 = x.size()\n _, C2, H2, W2 = y.size()\n s = int(H1 / H2)\n x = F.unfold(x, kernel_size=s, dilation=1, stride=s)\n x = x.view(B, C1, -1, H2, W2)\n z = torch.zeros(B, C1 + C2, x.size(2), H2, W2)\n for i in range(x.size(2)):\n z[:, :, i, :, :] = torch.cat((x[:, :, i, :, :], y), 1)\n z = z.view(B, -1, H2 * W2)\n z = F.fold(z, kernel_size=s, output_size=(H1, W1), stride=s)\n\n return z\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.nn.functional.unfold", "torch.cat", "numpy.mean", "torch.cuda.is_available", "matplotlib.pyplot.colorbar", "numpy.zeros_like", "matplotlib.pyplot.subplots", "torch.manual_seed", "torch.tensor", "torch.utils.data.DataLoader", "numpy.argmax", "torch.index_select", "numpy.linalg.inv", "torch.zeros", "torch.device", "numpy.array", "torch.cuda.manual_seed_all", "scipy.ndimage.gaussian_filter", "matplotlib.pyplot.close", "numpy.identity", "sklearn.metrics.roc_auc_score", "torch.nn.functional.fold", "numpy.asarray", "scipy.spatial.distance.mahalanobis", "torch.no_grad", "matplotlib.colors.Normalize", "torch.mean", "sklearn.metrics.roc_curve" ] ]
thomps9012/noraML
[ "1a1a74cc9b2170ce2cab5a339dfd9b33116cd910" ]
[ "demographics.py" ]
[ "import pandas as pd\nimport pprint\n\n\nall_client_diagnoses = pd.read_csv('2021_encounters_with_diagnoses.csv')\nprint(all_client_diagnoses.columns)\nnora_clients = all_client_diagnoses.drop_duplicates('Pid').drop(columns=['Date Of Service', 'Encounter', 'Age', 'Service Code'])\n\nnora_gender = nora_clients[nora_clients.Facility == 'Northern Ohio Recovery Association'].groupby('Gender').count()\n\nlorain_gender = nora_clients[nora_clients.Facility == 'Lorain'].groupby('Gender').count()\nprint('------------------------------------')\nprint('NORA All Client Gender Breakdown')\nprint('-------------------------------------')\npprint.pprint(nora_gender)\nprint('------------------------------------')\nprint('Lorain All Client Gender Breakdown')\nprint('-------------------------------------')\npprint.pprint(lorain_gender)\nprint('------------------------------------')" ]
[ [ "pandas.read_csv" ] ]
Cambricon/catch
[ "2625da389f25a67066d20fb6b0c38250ef98f8ab" ]
[ "test/cnnl/op_test/test_type.py" ]
[ "from __future__ import print_function\n\nimport sys\nimport logging\nimport os\nos.environ['ENABLE_CNNL_TRYCATCH'] = 'OFF' # pylint: disable=C0413\nfrom itertools import product\nimport unittest\nimport torch\nimport torch_mlu.core.mlu_model as ct\n\ncur_dir = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(cur_dir + \"/../../\")\nfrom common_utils import testinfo, TestCase # pylint: disable=C0413,C0411\nlogging.basicConfig(level=logging.DEBUG)\n\nclass TestTypeOp(TestCase):\n # @unittest.skip(\"not test\")\n @testinfo()\n def test_type_param_empty(self):\n shape_list = [(512, 1024, 2, 2, 4), (2, 3, 4),\n (254, 254, 112, 1, 1, 3), (1000,), ()]\n dtype_list = [torch.half, torch.float,\n torch.uint8, torch.int8, torch.short,\n torch.int, torch.long, torch.bool]\n for shape, src_type in product(shape_list, dtype_list):\n if src_type in [torch.half, torch.float]:\n x = torch.randn(shape, dtype=src_type)\n elif src_type == torch.uint8:\n x = torch.randint(0, 255, shape).to(src_type)\n else:\n x = torch.randint(-128, 128, shape).to(src_type)\n out_cpu_type = x.type()\n out_mlu_type = x.to(ct.mlu_device()).type()\n l_tmp = out_cpu_type.split('.')\n l_tmp.insert(1, 'mlu')\n self.assertEqual('.'.join(l_tmp), out_mlu_type)\n\n # @unittest.skip(\"not test\")\n @testinfo()\n def test_type_param_empty_channels_last(self):\n shape_list = [(512, 1024, 2, 2), (2, 3, 4, 5),\n (254, 254, 112, 1), (2, 3, 24, 30), (1, 1, 1, 30)]\n dtype_list = [torch.half, torch.float,\n torch.uint8, torch.int8, torch.short,\n torch.int, torch.long, torch.bool]\n for shape, src_type in product(shape_list, dtype_list):\n if src_type in [torch.half, torch.float]:\n x = torch.randn(shape, dtype=src_type).to(memory_format = torch.channels_last)\n elif src_type == torch.uint8:\n x = torch.randint(0, 255, shape).to(src_type).to(\n memory_format = torch.channels_last)\n else:\n x = torch.randint(-128, 128, shape).to(src_type).to(\n memory_format = torch.channels_last)\n out_cpu_type = x.type()\n out_mlu_type = x.to(ct.mlu_device()).type()\n l_tmp = out_cpu_type.split('.')\n l_tmp.insert(1, 'mlu')\n self.assertEqual('.'.join(l_tmp), out_mlu_type)\n\n # @unittest.skip(\"not test\")\n @testinfo()\n def test_type_param_empty_not_dense(self):\n shape_list = [(16, 32, 2, 30), (2, 3, 4, 32),\n (24, 26, 112, 64), (2, 3, 24, 30), (1, 1, 1, 30)]\n dtype_list = [torch.half, torch.float,\n torch.uint8, torch.int8, torch.short,\n torch.int, torch.long, torch.bool]\n for shape, src_type in product(shape_list, dtype_list):\n if src_type in [torch.half, torch.float]:\n x = torch.randn(shape, dtype=src_type)[:, :, :, :15]\n elif src_type == torch.uint8:\n x = torch.randint(0, 255, shape).to(src_type)[:, :, :, :15]\n else:\n x = torch.randint(-128, 128, shape).to(src_type)[:, :, :, :15]\n out_cpu_type = x.type()\n out_mlu_type = x.to(ct.mlu_device()).type()\n l_tmp = out_cpu_type.split('.')\n l_tmp.insert(1, 'mlu')\n self.assertEqual('.'.join(l_tmp), out_mlu_type)\n\n # @unittest.skip(\"not test\")\n @testinfo()\n def test_type_param_dtype(self):\n shape_list = [(512, 1024, 2, 2, 4), (2, 3, 4),\n (254, 254, 112, 1, 1, 3), (1000,), ()]\n cast_map = {torch.float: {torch.half, torch.int, torch.short, torch.int8, torch.bool},\n torch.half: {torch.float, torch.int, torch.short, torch.int8, torch.bool},\n torch.long: {torch.float, torch.half, torch.short, torch.int8},\n torch.int: {torch.float, torch.half, torch.short, torch.int8},\n torch.short: {torch.float, torch.half, torch.int},\n torch.int8: {torch.float, torch.half, torch.int},\n torch.uint8: {torch.float, torch.half},\n torch.bool: {torch.float, torch.half, torch.int},\n }\n for shape, src_type in product(shape_list, cast_map.keys()):\n for dst_type in cast_map[src_type]:\n if src_type in [torch.half, torch.float]:\n x = torch.randn(shape, dtype=src_type)\n elif src_type == torch.uint8:\n x = torch.randint(0, 255, shape).to(src_type)\n else:\n x = torch.randint(-128, 128, shape).to(src_type)\n for is_async in [False, True]:\n out_cpu = x.type(dst_type, non_blocking=is_async)\n out_mlu = x.to(ct.mlu_device()).type(dst_type, non_blocking=is_async)\n self.assertEqual(out_mlu.dtype, dst_type)\n self.assertEqual(out_cpu, out_mlu.cpu())\n\n # @unittest.skip(\"not test\")\n @testinfo()\n def test_type_param_dtype_channels_last(self):\n shape_list = [(512, 1024, 2, 2), (2, 3, 4, 16),\n (254, 254, 112, 1), (2, 3, 24, 30), (1, 1, 1, 30)]\n cast_map = {torch.float: {torch.half, torch.int, torch.short, torch.int8, torch.bool},\n torch.half: {torch.float, torch.int, torch.short, torch.int8, torch.bool},\n torch.long: {torch.float, torch.half, torch.short, torch.int8},\n torch.int: {torch.float, torch.half, torch.short, torch.int8},\n torch.short: {torch.float, torch.half, torch.int},\n torch.int8: {torch.float, torch.half, torch.int},\n torch.uint8: {torch.float, torch.half},\n torch.bool: {torch.float, torch.half, torch.int},\n }\n for shape, src_type in product(shape_list, cast_map.keys()):\n for dst_type in cast_map[src_type]:\n if src_type in [torch.half, torch.float]:\n x = torch.randn(shape, dtype=src_type).to(memory_format = torch.channels_last)\n elif src_type == torch.uint8:\n x = torch.randint(0, 255, shape).to(src_type).to(\n memory_format = torch.channels_last)\n else:\n x = torch.randint(-128, 128, shape).to(src_type).to(\n memory_format = torch.channels_last)\n for is_async in [False, True]:\n out_cpu = x.type(dst_type, non_blocking=is_async)\n out_mlu = x.to(ct.mlu_device()).type(dst_type, non_blocking=is_async)\n self.assertEqual(out_mlu.dtype, dst_type)\n self.assertEqual(out_cpu, out_mlu.cpu())\n\n # @unittest.skip(\"not test\")\n @testinfo()\n def test_type_param_dtype_not_dense(self):\n shape_list = [(16, 32, 2, 30), (2, 3, 4, 32),\n (24, 26, 112, 64), (2, 3, 24, 30), (1, 1, 1, 30)]\n cast_map = {torch.float: {torch.half, torch.int, torch.short, torch.int8, torch.bool},\n torch.half: {torch.float, torch.int, torch.short, torch.int8, torch.bool},\n torch.long: {torch.float, torch.half, torch.short, torch.int8},\n torch.int: {torch.float, torch.half, torch.short, torch.int8},\n torch.short: {torch.float, torch.half, torch.int},\n torch.int8: {torch.float, torch.half, torch.int},\n torch.uint8: {torch.float, torch.half},\n torch.bool: {torch.float, torch.half, torch.int},\n }\n for shape, src_type in product(shape_list, cast_map.keys()):\n for dst_type in cast_map[src_type]:\n if src_type in [torch.half, torch.float]:\n x = torch.randn(shape, dtype=src_type)[:, :, :, :15]\n elif src_type == torch.uint8:\n x = torch.randint(0, 255, shape).to(src_type)[:, :, :, :15]\n else:\n x = torch.randint(-128, 128, shape).to(src_type)[:, :, :, :15]\n for is_async in [False, True]:\n out_cpu = x.type(dst_type, non_blocking=is_async)\n out_mlu = x.to(ct.mlu_device()).type(dst_type, non_blocking=is_async)\n self.assertEqual(out_mlu.dtype, dst_type)\n self.assertEqual(out_cpu, out_mlu.cpu())\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "torch.randint", "torch.randn" ] ]
philipp-leitl/aapy
[ "5e4688191f3eda60b86c84721db07d8dc2323968" ]
[ "tests/test_js.py" ]
[ "import json\nfrom datetime import datetime\n\nimport jsonschema\nimport mock\nimport numpy\nimport pytest\nimport utils\n\nfrom aa import js\n\nEARLY_DATE = datetime(2001, 1, 1, 1, 1)\nLATE_DATE = datetime(2010, 2, 3, 4, 5)\nEMPTY_ARRAY = numpy.array((0,))\n\n\n@pytest.fixture\ndef json_fetcher():\n return js.JsonFetcher(\"localhost\", 5000)\n\n\n@pytest.mark.parametrize(\"filename\", [\"event\", \"string_event\", \"waveform\"])\ndef test_json_matches_schema(filename):\n schema_string = utils.load_from_file(\"aa_schema.json\")\n schema_obj = json.loads(schema_string)\n json_string = utils.load_from_file(filename + \".json\")\n json_obj = json.loads(json_string)\n jsonschema.validate(json_obj, schema_obj)\n\n\ndef test_JsonFetcher_constructs_url_correctly(json_fetcher):\n assert json_fetcher._url == \"http://localhost:5000/retrieval/data/getData.json\"\n\n\ndef test_JsonFetcher_decodes_empty_json_correctly(dummy_pv, empty_data, json_fetcher):\n mock_response = utils.mock_response(json_str=\"[]\")\n json_fetcher._fetch_data = mock_response\n aa_data = json_fetcher.get_values(dummy_pv, EARLY_DATE, LATE_DATE)\n assert aa_data == empty_data\n\n\ndef test_JsonFetcher_decodes_single_event_correctly(dummy_pv, json_fetcher):\n event_json = utils.load_from_file(\"event.json\")\n mock_response = utils.mock_response(json_str=event_json)\n json_fetcher._fetch_data = mock.MagicMock(return_value=mock_response)\n aa_data = json_fetcher.get_values(dummy_pv, EARLY_DATE, LATE_DATE)\n assert aa_data.pv == dummy_pv\n assert aa_data.values[0] == 1.23\n assert aa_data.timestamps[0] == 1502963093.000000123\n assert aa_data.severities[0] == 1\n\n\ndef test_JsonFetcher_decodes_string_event_correctly(dummy_pv, json_fetcher):\n event_json = utils.load_from_file(\"string_event.json\")\n mock_response = utils.mock_response(json_str=event_json)\n json_fetcher._fetch_data = mock.MagicMock(return_value=mock_response)\n aa_data = json_fetcher.get_values(dummy_pv, EARLY_DATE, LATE_DATE)\n assert aa_data.pv == dummy_pv\n assert aa_data.values[0] == \"2015-01-08 19:47:01 UTC\"\n assert aa_data.timestamps[0] == 1507712433.235971000\n assert aa_data.severities[0] == 0\n\n\ndef test_JsonFetcher_decodes_waveform_events_correctly(\n dummy_pv, json_fetcher, data_2d_2_events\n):\n waveform_json = utils.load_from_file(\"waveform.json\")\n mock_response = utils.mock_response(json_str=waveform_json)\n json_fetcher._fetch_data = mock.MagicMock(return_value=mock_response)\n aa_data = json_fetcher.get_values(dummy_pv, EARLY_DATE, LATE_DATE)\n assert aa_data == data_2d_2_events\n\n\ndef test_JsonFetcher_decodes_enum_events_correctly(dummy_pv, json_fetcher):\n enum_json = utils.load_from_file(\"enum_event.json\")\n mock_response = utils.mock_response(json_str=enum_json)\n json_fetcher._fetch_data = mock.MagicMock(return_value=mock_response)\n aa_data = json_fetcher.get_values(dummy_pv, EARLY_DATE, LATE_DATE)\n assert aa_data.enum_options[5] == \"Special\"\n assert aa_data.enum_strings[0] == \"User\"\n assert aa_data[0].enum_string[0] == \"User\"\n assert aa_data.values[0] == 4\n" ]
[ [ "numpy.array" ] ]
reverendbedford/scikit-fem
[ "bc57d968e56e6b89a99e35eac26ef7bc81b7a46a" ]
[ "skfem/io/meshio.py" ]
[ "\"\"\"Import/export any formats supported by meshio.\"\"\"\n\nimport meshio\nimport numpy as np\nimport skfem\n\n\nMESH_TYPE_MAPPING = {\n 'tetra': skfem.MeshTet1,\n 'tetra10': skfem.MeshTet2,\n 'hexahedron': skfem.MeshHex1,\n 'hexahedron27': skfem.MeshHex2,\n 'wedge': skfem.MeshWedge1,\n 'triangle': skfem.MeshTri1,\n 'triangle6': skfem.MeshTri2,\n 'quad': skfem.MeshQuad1,\n 'quad9': skfem.MeshQuad2,\n 'line': skfem.MeshLine1,\n}\n\nBOUNDARY_TYPE_MAPPING = {\n 'line': 'vertex',\n 'triangle': 'line',\n 'quad': 'line',\n 'tetra': 'triangle',\n 'hexahedron': 'quad',\n 'tetra10': 'triangle', # TODO support quadratic facets\n 'triangle6': 'line', # TODO\n 'quad9': 'line', # TODO\n 'hexahedron27': 'quad', # TODO\n}\n\nTYPE_MESH_MAPPING = {MESH_TYPE_MAPPING[k]: k\n for k in dict(reversed(list(MESH_TYPE_MAPPING.items())))}\n\n\nHEX_MAPPING = [0, 3, 6, 2, 1, 5, 7, 4,\n 10, 16, 14, 9, 12, 18, 17, 11, 8, 15, 19, 13,\n 20, 25, 22, 23, 21, 24,\n 26]\nINV_HEX_MAPPING = [HEX_MAPPING.index(i)\n for i in range(len(HEX_MAPPING))]\n\n\ndef from_meshio(m,\n out=None,\n int_data_to_sets=False,\n force_meshio_type=None):\n\n cells = m.cells_dict\n meshio_type = None\n\n if force_meshio_type is None:\n # detect 3D\n for k in cells:\n if k in {'tetra',\n 'hexahedron',\n 'tetra10',\n 'hexahedron27',\n 'wedge'}:\n meshio_type = k\n break\n\n if meshio_type is None:\n # detect 2D\n for k in cells:\n if k in {'triangle',\n 'quad',\n 'triangle6',\n 'quad9'}:\n meshio_type = k\n break\n\n if meshio_type is None:\n # detect 1D\n for k in cells:\n if k == 'line':\n meshio_type = k\n break\n else:\n meshio_type = force_meshio_type\n\n if meshio_type is None:\n raise NotImplementedError(\"Mesh type(s) not supported \"\n \"in import: {}.\".format(cells.keys()))\n\n mesh_type = MESH_TYPE_MAPPING[meshio_type]\n\n # create p and t\n p = np.ascontiguousarray(mesh_type.strip_extra_coordinates(m.points).T)\n t = np.ascontiguousarray(cells[meshio_type].T)\n\n # reorder t if needed\n if meshio_type == 'hexahedron':\n t = t[INV_HEX_MAPPING[:8]]\n elif meshio_type == 'hexahedron27':\n t = t[INV_HEX_MAPPING]\n\n if int_data_to_sets:\n m.int_data_to_sets()\n\n subdomains = {}\n boundaries = {}\n\n # parse any subdomains from cell_sets\n if m.cell_sets:\n subdomains = {k: v[meshio_type]\n for k, v in m.cell_sets_dict.items()\n if meshio_type in v}\n\n # create temporary mesh for matching boundary elements\n mtmp = mesh_type(p, t)\n bnd_type = BOUNDARY_TYPE_MAPPING[meshio_type]\n\n # parse boundaries from cell_sets\n if m.cell_sets and bnd_type in m.cells_dict:\n facets = {\n k: [tuple(f) for f in np.sort(m.cells_dict[bnd_type][v[bnd_type]])]\n for k, v in m.cell_sets_dict.items()\n if bnd_type in v and k.split(\":\")[0] != \"gmsh\"\n }\n boundaries = {k: np.array([i for i, f in\n enumerate(map(tuple, mtmp.facets.T))\n if f in v])\n for k, v in facets.items()}\n\n # MSH 2.2 tag parsing\n if m.cell_data and m.field_data:\n try:\n elements_tag = m.cell_data_dict['gmsh:physical'][meshio_type]\n subdomains = {}\n tags = np.unique(elements_tag)\n\n def find_tagname(tag):\n for key in m.field_data:\n if m.field_data[key][0] == tag:\n return key\n return None\n\n for tag in tags:\n t_set = np.nonzero(tag == elements_tag)[0]\n subdomains[find_tagname(tag)] = t_set\n\n # find tagged boundaries\n if bnd_type in m.cell_data_dict['gmsh:physical']:\n facets = m.cells_dict[bnd_type]\n facets_tag = m.cell_data_dict['gmsh:physical'][bnd_type]\n\n # put meshio facets to dict\n dic = {tuple(np.sort(facets[i])): facets_tag[i]\n for i in range(facets.shape[0])}\n\n # get index of corresponding Mesh.facets for each meshio\n # facet found in the dict\n index = np.array([[dic[tuple(np.sort(mtmp.facets[:, i]))], i]\n for i in mtmp.boundary_facets()\n if tuple(np.sort(mtmp.facets[:, i])) in dic])\n\n # read meshio tag numbers and names\n tags = index[:, 0]\n boundaries = {}\n for tag in np.unique(tags):\n tagindex = np.nonzero(tags == tag)[0]\n boundaries[find_tagname(tag)] = index[tagindex, 1]\n\n except Exception:\n pass\n\n # attempt parsing skfem tags\n if m.cell_data:\n _boundaries, _subdomains = mtmp._decode_cell_data(m.cell_data)\n boundaries.update(_boundaries)\n subdomains.update(_subdomains)\n\n # export mesh data\n if out is not None and isinstance(out, list):\n for i, field in enumerate(out):\n out[i] = getattr(m, field)\n\n return mesh_type(\n p,\n t,\n None if len(boundaries) == 0 else boundaries,\n None if len(subdomains) == 0 else subdomains,\n )\n\n\ndef from_file(filename, out, **kwargs):\n return from_meshio(meshio.read(filename), out, **kwargs)\n\n\ndef to_meshio(mesh,\n point_data=None,\n cell_data=None,\n encode_cell_data=True,\n encode_point_data=False):\n\n t = mesh.dofs.element_dofs.copy()\n if isinstance(mesh, skfem.MeshHex2):\n t = t[HEX_MAPPING]\n elif isinstance(mesh, skfem.MeshHex):\n t = t[HEX_MAPPING[:8]]\n\n mtype = TYPE_MESH_MAPPING[type(mesh)]\n cells = {mtype: t.T}\n\n if encode_cell_data:\n if cell_data is None:\n cell_data = {}\n cell_data.update(mesh._encode_cell_data())\n\n if encode_point_data:\n if point_data is None:\n point_data = {}\n point_data.update(mesh._encode_point_data())\n\n mio = meshio.Mesh(\n mesh.p.T,\n cells,\n point_data=point_data,\n cell_data=cell_data,\n )\n\n return mio\n\n\ndef to_file(mesh,\n filename,\n point_data=None,\n cell_data=None,\n encode_cell_data=True,\n encode_point_data=False,\n **kwargs):\n\n meshio.write(filename,\n to_meshio(mesh,\n point_data,\n cell_data,\n encode_cell_data,\n encode_point_data),\n **kwargs)\n" ]
[ [ "numpy.ascontiguousarray", "numpy.sort", "numpy.unique", "numpy.nonzero" ] ]
yudongcao/OpenFermion
[ "6d3e81f0ea382c69d6e5b376afd0e8f88df8b2fa" ]
[ "src/openfermion/transforms/_jordan_wigner_test.py" ]
[ "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests _jordan_wigner.py.\"\"\"\nfrom __future__ import absolute_import\nimport numpy\n\nimport unittest\n\nfrom openfermion.ops import (FermionOperator,\n hermitian_conjugated,\n InteractionOperator,\n normal_ordered,\n number_operator,\n QubitOperator)\nfrom openfermion.transforms import (get_interaction_operator,\n reverse_jordan_wigner)\nfrom openfermion.transforms._jordan_wigner import (\n jordan_wigner, jordan_wigner_one_body, jordan_wigner_two_body,\n jordan_wigner_interaction_op)\n\n\nclass JordanWignerTransformTest(unittest.TestCase):\n def setUp(self):\n self.n_qubits = 5\n\n def test_bad_input(self):\n with self.assertRaises(TypeError):\n jordan_wigner(3)\n\n def test_transm_raise3(self):\n raising = jordan_wigner(FermionOperator(((3, 1),)))\n self.assertEqual(len(raising.terms), 2)\n\n correct_operators_x = ((0, 'Z'), (1, 'Z'), (2, 'Z'), (3, 'X'))\n correct_operators_y = ((0, 'Z'), (1, 'Z'), (2, 'Z'), (3, 'Y'))\n qtermx = QubitOperator(correct_operators_x, 0.5)\n qtermy = QubitOperator(correct_operators_y, -0.5j)\n\n self.assertEqual(raising.terms[correct_operators_x], 0.5)\n self.assertEqual(raising.terms[correct_operators_y], -0.5j)\n self.assertTrue(raising.isclose(qtermx + qtermy))\n\n def test_transm_raise1(self):\n raising = jordan_wigner(FermionOperator(((1, 1),)))\n\n correct_operators_x = ((0, 'Z'), (1, 'X'))\n correct_operators_y = ((0, 'Z'), (1, 'Y'))\n qtermx = QubitOperator(correct_operators_x, 0.5)\n qtermy = QubitOperator(correct_operators_y, -0.5j)\n\n self.assertEqual(raising.terms[correct_operators_x], 0.5)\n self.assertEqual(raising.terms[correct_operators_y], -0.5j)\n self.assertTrue(raising.isclose(qtermx + qtermy))\n\n def test_transm_lower3(self):\n lowering = jordan_wigner(FermionOperator(((3, 0),)))\n\n correct_operators_x = ((0, 'Z'), (1, 'Z'), (2, 'Z'), (3, 'X'))\n correct_operators_y = ((0, 'Z'), (1, 'Z'), (2, 'Z'), (3, 'Y'))\n qtermx = QubitOperator(correct_operators_x, 0.5)\n qtermy = QubitOperator(correct_operators_y, 0.5j)\n\n self.assertEqual(lowering.terms[correct_operators_x], 0.5)\n self.assertEqual(lowering.terms[correct_operators_y], 0.5j)\n self.assertTrue(lowering.isclose(qtermx + qtermy))\n\n def test_transm_lower2(self):\n lowering = jordan_wigner(FermionOperator(((2, 0),)))\n\n correct_operators_x = ((0, 'Z'), (1, 'Z'), (2, 'X'))\n correct_operators_y = ((0, 'Z'), (1, 'Z'), (2, 'Y'))\n qtermx = QubitOperator(correct_operators_x, 0.5)\n qtermy = QubitOperator(correct_operators_y, 0.5j)\n\n self.assertEqual(lowering.terms[correct_operators_x], 0.5)\n self.assertEqual(lowering.terms[correct_operators_y], 0.5j)\n self.assertTrue(lowering.isclose(qtermx + qtermy))\n\n def test_transm_lower1(self):\n lowering = jordan_wigner(FermionOperator(((1, 0),)))\n\n correct_operators_x = ((0, 'Z'), (1, 'X'))\n correct_operators_y = ((0, 'Z'), (1, 'Y'))\n qtermx = QubitOperator(correct_operators_x, 0.5)\n qtermy = QubitOperator(correct_operators_y, 0.5j)\n\n self.assertEqual(lowering.terms[correct_operators_x], 0.5)\n self.assertEqual(lowering.terms[correct_operators_y], 0.5j)\n self.assertTrue(lowering.isclose(qtermx + qtermy))\n\n def test_transm_lower0(self):\n lowering = jordan_wigner(FermionOperator(((0, 0),)))\n\n correct_operators_x = ((0, 'X'),)\n correct_operators_y = ((0, 'Y'),)\n qtermx = QubitOperator(correct_operators_x, 0.5)\n qtermy = QubitOperator(correct_operators_y, 0.5j)\n\n self.assertEqual(lowering.terms[correct_operators_x], 0.5)\n self.assertEqual(lowering.terms[correct_operators_y], 0.5j)\n self.assertTrue(lowering.isclose(qtermx + qtermy))\n\n def test_transm_raise3lower0(self):\n # recall that creation gets -1j on Y and annihilation gets +1j on Y.\n term = jordan_wigner(FermionOperator(((3, 1), (0, 0))))\n self.assertEqual(term.terms[((0, 'X'), (1, 'Z'), (2, 'Z'), (3, 'Y'))],\n 0.25 * 1 * -1j)\n self.assertEqual(term.terms[((0, 'Y'), (1, 'Z'), (2, 'Z'), (3, 'Y'))],\n 0.25 * 1j * -1j)\n self.assertEqual(term.terms[((0, 'Y'), (1, 'Z'), (2, 'Z'), (3, 'X'))],\n 0.25 * 1j * 1)\n self.assertEqual(term.terms[((0, 'X'), (1, 'Z'), (2, 'Z'), (3, 'X'))],\n 0.25 * 1 * 1)\n\n def test_transm_number(self):\n n = number_operator(self.n_qubits, 3)\n n_jw = jordan_wigner(n)\n self.assertEqual(n_jw.terms[((3, 'Z'),)], -0.5)\n self.assertEqual(n_jw.terms[()], 0.5)\n self.assertEqual(len(n_jw.terms), 2)\n\n def test_ccr_offsite_even_ca(self):\n c2 = FermionOperator(((2, 1),))\n a4 = FermionOperator(((4, 0),))\n\n self.assertTrue(normal_ordered(c2 * a4).isclose(\n normal_ordered(-a4 * c2)))\n self.assertTrue(jordan_wigner(c2 * a4).isclose(\n jordan_wigner(-a4 * c2)))\n\n def test_ccr_offsite_odd_ca(self):\n c1 = FermionOperator(((1, 1),))\n a4 = FermionOperator(((4, 0),))\n self.assertTrue(normal_ordered(c1 * a4).isclose(\n normal_ordered(-a4 * c1)))\n\n self.assertTrue(jordan_wigner(c1 * a4).isclose(\n jordan_wigner(-a4 * c1)))\n\n def test_ccr_offsite_even_cc(self):\n c2 = FermionOperator(((2, 1),))\n c4 = FermionOperator(((4, 1),))\n self.assertTrue(normal_ordered(c2 * c4).isclose(\n normal_ordered(-c4 * c2)))\n\n self.assertTrue(jordan_wigner(c2 * c4).isclose(\n jordan_wigner(-c4 * c2)))\n\n def test_ccr_offsite_odd_cc(self):\n c1 = FermionOperator(((1, 1),))\n c4 = FermionOperator(((4, 1),))\n self.assertTrue(normal_ordered(c1 * c4).isclose(\n normal_ordered(-c4 * c1)))\n\n self.assertTrue(jordan_wigner(c1 * c4).isclose(\n jordan_wigner(-c4 * c1)))\n\n def test_ccr_offsite_even_aa(self):\n a2 = FermionOperator(((2, 0),))\n a4 = FermionOperator(((4, 0),))\n self.assertTrue(normal_ordered(a2 * a4).isclose(\n normal_ordered(-a4 * a2)))\n\n self.assertTrue(jordan_wigner(a2 * a4).isclose(\n jordan_wigner(-a4 * a2)))\n\n def test_ccr_offsite_odd_aa(self):\n a1 = FermionOperator(((1, 0),))\n a4 = FermionOperator(((4, 0),))\n self.assertTrue(normal_ordered(a1 * a4).isclose(\n normal_ordered(-a4 * a1)))\n\n self.assertTrue(jordan_wigner(a1 * a4).isclose(\n jordan_wigner(-a4 * a1)))\n\n def test_ccr_onsite(self):\n c1 = FermionOperator(((1, 1),))\n a1 = hermitian_conjugated(c1)\n self.assertTrue(normal_ordered(c1 * a1).isclose(\n FermionOperator(()) - normal_ordered(a1 * c1)))\n self.assertTrue(jordan_wigner(c1 * a1).isclose(\n QubitOperator(()) - jordan_wigner(a1 * c1)))\n\n def test_jordan_wigner_transm_op(self):\n n = number_operator(self.n_qubits)\n n_jw = jordan_wigner(n)\n self.assertEqual(self.n_qubits + 1, len(n_jw.terms))\n self.assertEqual(self.n_qubits / 2., n_jw.terms[()])\n for qubit in range(self.n_qubits):\n operators = ((qubit, 'Z'),)\n self.assertEqual(n_jw.terms[operators], -0.5)\n\n\nclass InteractionOperatorsJWTest(unittest.TestCase):\n\n def setUp(self):\n self.n_qubits = 5\n self.constant = 0.\n self.one_body = numpy.zeros((self.n_qubits, self.n_qubits), float)\n self.two_body = numpy.zeros((self.n_qubits, self.n_qubits,\n self.n_qubits, self.n_qubits), float)\n self.interaction_operator = InteractionOperator(self.constant,\n self.one_body,\n self.two_body)\n\n def test_jordan_wigner_one_body(self):\n # Make sure it agrees with jordan_wigner(FermionTerm).\n for p in range(self.n_qubits):\n for q in range(self.n_qubits):\n # Get test qubit operator.\n test_operator = jordan_wigner_one_body(p, q)\n\n # Get correct qubit operator.\n fermion_term = FermionOperator(((p, 1), (q, 0)))\n correct_op = jordan_wigner(fermion_term)\n hermitian_conjugate = hermitian_conjugated(fermion_term)\n if not fermion_term.isclose(hermitian_conjugate):\n correct_op += jordan_wigner(hermitian_conjugate)\n\n self.assertTrue(test_operator.isclose(correct_op))\n\n def test_jordan_wigner_two_body(self):\n # Make sure it agrees with jordan_wigner(FermionTerm).\n for p in range(self.n_qubits):\n for q in range(self.n_qubits):\n for r in range(self.n_qubits):\n for s in range(self.n_qubits):\n # Get test qubit operator.\n test_operator = jordan_wigner_two_body(p, q, r, s)\n\n # Get correct qubit operator.\n fermion_term = FermionOperator(((p, 1), (q, 1),\n (r, 0), (s, 0)))\n correct_op = jordan_wigner(fermion_term)\n hermitian_conjugate = hermitian_conjugated(\n fermion_term)\n if not fermion_term.isclose(hermitian_conjugate):\n if p == r and q == s:\n pass\n else:\n correct_op += jordan_wigner(\n hermitian_conjugate)\n\n self.assertTrue(test_operator.isclose(correct_op),\n str(test_operator - correct_op))\n\n def test_jordan_wigner_twobody_interaction_op_allunique(self):\n test_op = FermionOperator('1^ 2^ 3 4')\n test_op += hermitian_conjugated(test_op)\n\n retransformed_test_op = reverse_jordan_wigner(jordan_wigner(\n get_interaction_operator(test_op)))\n\n self.assertTrue(normal_ordered(retransformed_test_op).isclose(\n normal_ordered(test_op)))\n\n def test_jordan_wigner_twobody_interaction_op_reversal_symmetric(self):\n test_op = FermionOperator('1^ 2^ 2 1')\n test_op += hermitian_conjugated(test_op)\n self.assertTrue(jordan_wigner(test_op).isclose(\n jordan_wigner(get_interaction_operator(test_op))))\n\n def test_jordan_wigner_interaction_op_too_few_n_qubits(self):\n with self.assertRaises(ValueError):\n jordan_wigner_interaction_op(self.interaction_operator,\n self.n_qubits - 2)\n\n def test_jordan_wigner_interaction_op_with_zero_term(self):\n test_op = FermionOperator('1^ 2^ 3 4')\n test_op += hermitian_conjugated(test_op)\n\n interaction_op = get_interaction_operator(test_op)\n interaction_op.constant = 0.0\n\n retransformed_test_op = reverse_jordan_wigner(jordan_wigner(\n interaction_op))\n\n\nclass GetInteractionOperatorTest(unittest.TestCase):\n\n def setUp(self):\n self.n_qubits = 5\n self.constant = 0.\n self.one_body = numpy.zeros((self.n_qubits, self.n_qubits), float)\n self.two_body = numpy.zeros((self.n_qubits, self.n_qubits,\n self.n_qubits, self.n_qubits), float)\n\n def test_get_interaction_operator_identity(self):\n interaction_operator = InteractionOperator(-2j, self.one_body,\n self.two_body)\n qubit_operator = jordan_wigner(interaction_operator)\n self.assertTrue(qubit_operator.isclose(-2j * QubitOperator(())))\n self.assertEqual(interaction_operator,\n get_interaction_operator(reverse_jordan_wigner(\n qubit_operator), self.n_qubits))\n\n def test_get_interaction_operator_one_body(self):\n interaction_operator = get_interaction_operator(\n FermionOperator('2^ 2'), self.n_qubits)\n one_body = numpy.zeros((self.n_qubits, self.n_qubits), float)\n one_body[2, 2] = 1.\n self.assertEqual(interaction_operator,\n InteractionOperator(0.0, one_body, self.two_body))\n\n def test_get_interaction_operator_one_body_twoterm(self):\n interaction_operator = get_interaction_operator(\n FermionOperator('2^ 3', -2j) + FermionOperator('3^ 2', 3j),\n self.n_qubits)\n one_body = numpy.zeros((self.n_qubits, self.n_qubits), complex)\n one_body[2, 3] = -2j\n one_body[3, 2] = 3j\n self.assertEqual(interaction_operator,\n InteractionOperator(0.0, one_body, self.two_body))\n\n def test_get_interaction_operator_two_body(self):\n interaction_operator = get_interaction_operator(\n FermionOperator('2^ 2 3^ 4'), self.n_qubits)\n two_body = numpy.zeros((self.n_qubits, self.n_qubits,\n self.n_qubits, self.n_qubits), float)\n two_body[3, 2, 4, 2] = -1.\n self.assertEqual(interaction_operator,\n InteractionOperator(0.0, self.one_body, two_body))\n\n def test_get_interaction_operator_two_body_distinct(self):\n interaction_operator = get_interaction_operator(\n FermionOperator('0^ 1^ 2 3'), self.n_qubits)\n two_body = numpy.zeros((self.n_qubits, self.n_qubits,\n self.n_qubits, self.n_qubits), float)\n two_body[1, 0, 3, 2] = 1.\n self.assertEqual(interaction_operator,\n InteractionOperator(0.0, self.one_body, two_body))\n" ]
[ [ "numpy.zeros" ] ]
timryanb/mphys
[ "74560a163034a0006a17811ba1206bab00f1f775" ]
[ "mphys/integrated_forces.py" ]
[ "import numpy as np\nimport openmdao.api as om\n\nclass IntegratedSurfaceForces(om.ExplicitComponent):\n def setup(self):\n self.add_input('aoa',desc = 'angle of attack', units='rad',tags=['mphys_input'])\n self.add_input('yaw',desc = 'yaw angle',units='rad',tags=['mphys_input'])\n self.add_input('ref_area', val = 1.0,tags=['mphys_input'])\n self.add_input('moment_center',shape=3,tags=['mphys_input'])\n self.add_input('ref_length', val = 1.0,tags=['mphys_input'])\n self.add_input('q_inf', val = 1.0,tags=['mphys_input'])\n\n self.add_input('x_aero', shape_by_conn=True,\n distributed=True,\n desc = 'surface coordinates',\n tags=['mphys_coupling'])\n self.add_input('f_aero', shape_by_conn=True,\n distributed=True,\n desc = 'dimensional forces at nodes',\n tags=['mphys_coupling'])\n\n self.add_output('C_L', desc = 'Lift coefficient', tags=['mphys_result'])\n self.add_output('C_D', desc = 'Drag coefficient', tags=['mphys_result'])\n self.add_output('C_X', desc = 'X Force coefficient', tags=['mphys_result'])\n self.add_output('C_Y', desc = 'Y Force coefficient', tags=['mphys_result'])\n self.add_output('C_Z', desc = 'Z Force coefficient', tags=['mphys_result'])\n self.add_output('CM_X', desc = 'X Moment coefficient', tags=['mphys_result'])\n self.add_output('CM_Y', desc = 'Y Moment coefficient', tags=['mphys_result'])\n self.add_output('CM_Z', desc = 'Z Moment coefficient', tags=['mphys_result'])\n\n self.add_output('Lift', desc = 'Total Lift', tags=['mphys_result'])\n self.add_output('Drag', desc = 'Total Drag', tags=['mphys_result'])\n self.add_output('F_X', desc = 'Total X Force', tags=['mphys_result'])\n self.add_output('F_Y', desc = 'Total Y Force', tags=['mphys_result'])\n self.add_output('F_Z', desc = 'Total Z Force', tags=['mphys_result'])\n self.add_output('M_X', desc = 'Total X Moment', tags=['mphys_result'])\n self.add_output('M_Y', desc = 'Total Y Moment', tags=['mphys_result'])\n self.add_output('M_Z', desc = 'Total Z Moment', tags=['mphys_result'])\n\n def compute(self,inputs,outputs):\n aoa = inputs['aoa']\n yaw = inputs['yaw']\n area = inputs['ref_area']\n q_inf = inputs['q_inf']\n xc = inputs['moment_center'][0]\n yc = inputs['moment_center'][1]\n zc = inputs['moment_center'][2]\n c = inputs['ref_length']\n\n x = inputs['x_aero'][0::3]\n y = inputs['x_aero'][1::3]\n z = inputs['x_aero'][2::3]\n\n fx = inputs['f_aero'][0::3]\n fy = inputs['f_aero'][1::3]\n fz = inputs['f_aero'][2::3]\n\n fx_total = self.comm.allreduce(np.sum(fx))\n fy_total = self.comm.allreduce(np.sum(fy))\n fz_total = self.comm.allreduce(np.sum(fz))\n\n outputs['F_X'] = fx_total\n outputs['F_Y'] = fy_total\n outputs['F_Z'] = fz_total\n outputs['C_X'] = fx_total / (q_inf * area)\n outputs['C_Y'] = fy_total / (q_inf * area)\n outputs['C_Z'] = fz_total / (q_inf * area)\n\n outputs['Lift'] = -fx_total * np.sin(aoa) + fz_total * np.cos(aoa)\n outputs['Drag'] = ( fx_total * np.cos(aoa) * np.cos(yaw)\n - fy_total * np.sin(yaw)\n + fz_total * np.sin(aoa) * np.cos(yaw)\n )\n\n outputs['C_L'] = outputs['Lift'] / (q_inf * area)\n outputs['C_D'] = outputs['Drag'] / (q_inf * area)\n\n m_x = self.comm.allreduce( np.dot(fz,(y-yc)) - np.dot(fy,(z-zc)))\n m_y = self.comm.allreduce(-np.dot(fz,(x-xc)) + np.dot(fx,(z-zc)))\n m_z = self.comm.allreduce( np.dot(fy,(x-xc)) - np.dot(fx,(y-yc)))\n\n outputs['M_X'] = m_x\n outputs['M_Y'] = m_y\n outputs['M_Z'] = m_z\n\n outputs['CM_X'] = m_x / (q_inf * area * c)\n outputs['CM_Y'] = m_y / (q_inf * area * c)\n outputs['CM_Z'] = m_z / (q_inf * area * c)\n\n def compute_jacvec_product(self, inputs, d_inputs, d_outputs, mode):\n aoa = inputs['aoa']\n yaw = inputs['yaw']\n area = inputs['ref_area']\n q_inf = inputs['q_inf']\n xc = inputs['moment_center'][0]\n yc = inputs['moment_center'][1]\n zc = inputs['moment_center'][2]\n c = inputs['ref_length']\n\n x = inputs['x_aero'][0::3]\n y = inputs['x_aero'][1::3]\n z = inputs['x_aero'][2::3]\n\n fx = inputs['f_aero'][0::3]\n fy = inputs['f_aero'][1::3]\n fz = inputs['f_aero'][2::3]\n\n fx_total = self.comm.allreduce(np.sum(fx))\n fy_total = self.comm.allreduce(np.sum(fy))\n fz_total = self.comm.allreduce(np.sum(fz))\n\n lift = -fx_total * np.sin(aoa) + fz_total * np.cos(aoa)\n drag = ( fx_total * np.cos(aoa) * np.cos(yaw)\n - fy_total * np.sin(yaw)\n + fz_total * np.sin(aoa) * np.cos(yaw)\n )\n\n m_x = self.comm.allreduce( np.dot(fz,(y-yc)) - np.dot(fy,(z-zc)))\n m_y = self.comm.allreduce(-np.dot(fz,(x-xc)) + np.dot(fx,(z-zc)))\n m_z = self.comm.allreduce( np.dot(fy,(x-xc)) - np.dot(fx,(y-yc)))\n\n if mode == 'fwd':\n if 'aoa' in d_inputs:\n daoa_rad = d_inputs['aoa']\n if 'Lift' in d_outputs or 'C_L' in d_outputs:\n d_lift_d_aoa = ( - fx_total * np.cos(aoa) * daoa_rad\n - fz_total * np.sin(aoa) * daoa_rad )\n\n if 'Lift' in d_outputs:\n d_outputs['Lift'] += d_lift_d_aoa\n if 'C_L' in d_outputs:\n d_outputs['C_L'] += d_lift_d_aoa / (q_inf * area)\n if 'Drag' in d_outputs or 'C_D' in d_outputs:\n d_drag_d_aoa = ( fx_total * (-np.sin(aoa) * daoa_rad) * np.cos(yaw)\n + fz_total * ( np.cos(aoa) * daoa_rad) * np.cos(yaw))\n if 'Drag' in d_outputs:\n d_outputs['Drag'] += d_drag_d_aoa\n if 'C_D' in d_outputs:\n d_outputs['C_D'] += d_drag_d_aoa / (q_inf * area)\n\n if 'yaw' in d_inputs:\n dyaw_rad = d_inputs['yaw']\n if 'Drag' in d_outputs or 'C_D' in d_outputs:\n d_drag_d_yaw = ( fx_total * np.cos(aoa) * (-np.sin(yaw) * dyaw_rad)\n - fy_total * np.cos(yaw) * dyaw_rad\n + fz_total * np.sin(aoa) * (-np.sin(yaw) * dyaw_rad)\n )\n if 'Drag' in d_outputs:\n d_outputs['Drag'] += d_drag_d_yaw\n if 'C_D' in d_outputs:\n d_outputs['C_D'] += d_drag_d_yaw / (q_inf * area)\n\n if 'ref_area' in d_inputs:\n d_nondim = - d_inputs['ref_area'] / (q_inf * area**2.0)\n if 'C_X' in d_outputs:\n d_outputs['C_X'] += fx_total * d_nondim\n if 'C_Y' in d_outputs:\n d_outputs['C_Y'] += fy_total * d_nondim\n if 'C_Z' in d_outputs:\n d_outputs['C_Z'] += fz_total * d_nondim\n if 'C_L' in d_outputs:\n d_outputs['C_L'] += lift * d_nondim\n if 'C_D' in d_outputs:\n d_outputs['C_D'] += drag * d_nondim\n if 'CM_X' in d_outputs:\n d_outputs['CM_X'] += m_x * d_nondim / c\n if 'CM_X' in d_outputs:\n d_outputs['CM_Y'] += m_y * d_nondim / c\n if 'CM_Z' in d_outputs:\n d_outputs['CM_Z'] += m_z * d_nondim / c\n if 'moment_center' in d_inputs:\n dxc = d_inputs['moment_center'][0]\n dyc = d_inputs['moment_center'][1]\n dzc = d_inputs['moment_center'][2]\n if 'M_X' in d_outputs:\n d_outputs['M_X'] += -fz_total * dyc + fy_total * dzc\n if 'M_Y' in d_outputs:\n d_outputs['M_Y'] += fz_total * dxc - fx_total * dzc\n if 'M_Z' in d_outputs:\n d_outputs['M_Z'] += -fy_total * dxc + fx_total * dyc\n if 'CM_X' in d_outputs:\n d_outputs['CM_X'] += (-fz_total * dyc + fy_total * dzc) / (q_inf * area * c)\n if 'CM_Y' in d_outputs:\n d_outputs['CM_Y'] += ( fz_total * dxc - fx_total * dzc) / (q_inf * area * c)\n if 'CM_Z' in d_outputs:\n d_outputs['CM_Z'] += (-fy_total * dxc + fx_total * dyc) / (q_inf * area * c)\n\n if 'ref_length' in d_inputs:\n d_nondim = - d_inputs['ref_length'] / (q_inf * area * c**2.0)\n if 'CM_X' in d_outputs:\n d_outputs['CM_X'] += m_x * d_nondim\n if 'CM_X' in d_outputs:\n d_outputs['CM_Y'] += m_y * d_nondim\n if 'CM_Z' in d_outputs:\n d_outputs['CM_Z'] += m_z * d_nondim\n\n if 'q_inf' in d_inputs:\n d_nondim = - d_inputs['q_inf'] / (q_inf**2.0 * area)\n if 'C_X' in d_outputs:\n d_outputs['C_X'] += fx_total * d_nondim\n if 'C_Y' in d_outputs:\n d_outputs['C_Y'] += fy_total * d_nondim\n if 'C_Z' in d_outputs:\n d_outputs['C_Z'] += fz_total * d_nondim\n if 'C_L' in d_outputs:\n d_outputs['C_L'] += lift * d_nondim\n if 'C_D' in d_outputs:\n d_outputs['C_D'] += drag * d_nondim\n if 'CM_X' in d_outputs:\n d_outputs['CM_X'] += m_x * d_nondim / c\n if 'CM_X' in d_outputs:\n d_outputs['CM_Y'] += m_y * d_nondim / c\n if 'CM_Z' in d_outputs:\n d_outputs['CM_Z'] += m_z * d_nondim / c\n\n if 'x_aero' in d_inputs:\n dx = d_inputs['x_aero'][0::3]\n dy = d_inputs['x_aero'][1::3]\n dz = d_inputs['x_aero'][2::3]\n if 'M_X' in d_outputs:\n d_outputs['M_X'] += np.dot(fz,dy) - np.dot(fy,dz)\n if 'M_Y' in d_outputs:\n d_outputs['M_Y'] += -np.dot(fz,dx) + np.dot(fx,dz)\n if 'M_Z' in d_outputs:\n d_outputs['M_Z'] += np.dot(fy,dx) - np.dot(fx,dy)\n if 'CM_X' in d_outputs:\n d_outputs['CM_X'] += ( np.dot(fz,dy) - np.dot(fy,dz)) / (q_inf * area * c)\n if 'CM_Y' in d_outputs:\n d_outputs['CM_Y'] += (-np.dot(fz,dx) + np.dot(fx,dz)) / (q_inf * area * c)\n if 'CM_Z' in d_outputs:\n d_outputs['CM_Z'] += ( np.dot(fy,dx) - np.dot(fx,dy)) / (q_inf * area * c)\n\n if 'f_aero' in d_inputs:\n dfx = d_inputs['f_aero'][0::3]\n dfy = d_inputs['f_aero'][1::3]\n dfz = d_inputs['f_aero'][2::3]\n dfx_total = np.sum(dfx)\n dfy_total = np.sum(dfy)\n dfz_total = np.sum(dfz)\n if 'F_X' in d_outputs:\n d_outputs['F_X'] += dfx_total\n if 'F_Y' in d_outputs:\n d_outputs['F_Y'] += dfy_total\n if 'F_Z' in d_outputs:\n d_outputs['F_Z'] += dfz_total\n if 'C_X' in d_outputs:\n d_outputs['C_X'] += dfx_total / (q_inf * area)\n if 'C_Y' in d_outputs:\n d_outputs['C_Y'] += dfy_total / (q_inf * area)\n if 'C_Z' in d_outputs:\n d_outputs['C_Z'] += dfz_total / (q_inf * area)\n if 'Lift' in d_outputs:\n d_outputs['Lift'] += -dfx_total * np.sin(aoa) + dfz_total * np.cos(aoa)\n if 'Drag' in d_outputs:\n d_outputs['Drag'] += ( dfx_total * np.cos(aoa) * np.cos(yaw)\n - dfy_total * np.sin(yaw)\n + dfz_total * np.sin(aoa) * np.cos(yaw)\n )\n if 'C_L' in d_outputs:\n d_outputs['C_L'] += (-dfx_total * np.sin(aoa) + dfz_total * np.cos(aoa)) / (q_inf * area)\n if 'C_D' in d_outputs:\n d_outputs['C_D'] += ( dfx_total * np.cos(aoa) * np.cos(yaw)\n - dfy_total * np.sin(yaw)\n + dfz_total * np.sin(aoa) * np.cos(yaw)\n ) / (q_inf * area)\n\n if 'M_X' in d_outputs:\n d_outputs['M_X'] += np.dot(dfz,(y-yc)) - np.dot(dfy,(z-zc))\n if 'M_Y' in d_outputs:\n d_outputs['M_Y'] += -np.dot(dfz,(x-xc)) + np.dot(dfx,(z-zc))\n if 'M_Z' in d_outputs:\n d_outputs['M_Z'] += np.dot(dfy,(x-xc)) - np.dot(dfx,(y-yc))\n if 'CM_X' in d_outputs:\n d_outputs['CM_X'] += ( np.dot(dfz,(y-yc)) - np.dot(dfy,(z-zc))) / (q_inf * area * c)\n if 'CM_Y' in d_outputs:\n d_outputs['CM_Y'] += (-np.dot(dfz,(x-xc)) + np.dot(dfx,(z-zc))) / (q_inf * area * c)\n if 'CM_Z' in d_outputs:\n d_outputs['CM_Z'] += ( np.dot(dfy,(x-xc)) - np.dot(dfx,(y-yc))) / (q_inf * area * c)\n\n elif mode == 'rev':\n if 'aoa' in d_inputs:\n if 'Lift' in d_outputs or 'C_L' in d_outputs:\n d_lift = d_outputs['Lift'] if 'Lift' in d_outputs else 0.0\n d_cl = d_outputs['C_L'] if 'C_L' in d_outputs else 0.0\n d_inputs['aoa'] += ( - fx_total * np.cos(aoa)\n - fz_total * np.sin(aoa)\n ) * (d_lift + d_cl / (q_inf * area))\n\n if 'Drag' in d_outputs or 'C_D' in d_outputs:\n d_drag = d_outputs['Drag'] if 'Drag' in d_outputs else 0.0\n d_cd = d_outputs['C_D'] if 'C_D' in d_outputs else 0.0\n d_inputs['aoa'] += ( fx_total * (-np.sin(aoa)) * np.cos(yaw)\n + fz_total * ( np.cos(aoa)) * np.cos(yaw)\n ) * (d_drag + d_cd / (q_inf * area))\n if 'yaw' in d_inputs:\n if 'Drag' in d_outputs or 'C_D' in d_outputs:\n d_drag = d_outputs['Drag'] if 'Drag' in d_outputs else 0.0\n d_cd = d_outputs['C_D'] if 'C_D' in d_outputs else 0.0\n d_inputs['yaw'] += ( fx_total * np.cos(aoa) * (-np.sin(yaw))\n - fy_total * np.cos(yaw)\n + fz_total * np.sin(aoa) * (-np.sin(yaw))\n ) * (d_drag + d_cd / (q_inf * area))\n\n if 'ref_area' in d_inputs:\n d_nondim = - 1.0 / (q_inf * area**2.0)\n if 'C_X' in d_outputs:\n d_inputs['ref_area'] += d_outputs['C_X'] * fx_total * d_nondim\n if 'C_Y' in d_outputs:\n d_inputs['ref_area'] += d_outputs['C_Y'] * fy_total * d_nondim\n if 'C_Z' in d_outputs:\n d_inputs['ref_area'] += d_outputs['C_Z'] * fz_total * d_nondim\n if 'C_L' in d_outputs:\n d_inputs['ref_area'] += d_outputs['C_L'] * lift * d_nondim\n if 'C_D' in d_outputs:\n d_inputs['ref_area'] += d_outputs['C_D'] * drag * d_nondim\n if 'CM_X' in d_outputs:\n d_inputs['ref_area'] += d_outputs['CM_X'] * m_x * d_nondim / c\n if 'CM_X' in d_outputs:\n d_inputs['ref_area'] += d_outputs['CM_Y'] * m_y * d_nondim / c\n if 'CM_Z' in d_outputs:\n d_inputs['ref_area'] += d_outputs['CM_Z'] * m_z * d_nondim / c\n\n if 'moment_center' in d_inputs:\n if 'M_X' in d_outputs:\n d_inputs['moment_center'][1] += -fz_total * d_outputs['M_X']\n d_inputs['moment_center'][2] += fy_total * d_outputs['M_X']\n if 'M_Y' in d_outputs:\n d_inputs['moment_center'][0] += fz_total * d_outputs['M_Y']\n d_inputs['moment_center'][2] += -fx_total * d_outputs['M_Y']\n if 'M_Z' in d_outputs:\n d_inputs['moment_center'][0] += -fy_total * d_outputs['M_Z']\n d_inputs['moment_center'][1] += fx_total * d_outputs['M_Z']\n if 'CM_X' in d_outputs:\n d_inputs['moment_center'][1] += -fz_total * d_outputs['CM_X'] / (q_inf * area * c)\n d_inputs['moment_center'][2] += fy_total * d_outputs['CM_X'] / (q_inf * area * c)\n if 'CM_Y' in d_outputs:\n d_inputs['moment_center'][0] += fz_total * d_outputs['CM_Y'] / (q_inf * area * c)\n d_inputs['moment_center'][2] += -fx_total * d_outputs['CM_Y'] / (q_inf * area * c)\n if 'CM_Z' in d_outputs:\n d_inputs['moment_center'][0] += -fy_total * d_outputs['CM_Z'] / (q_inf * area * c)\n d_inputs['moment_center'][1] += fx_total * d_outputs['CM_Z'] / (q_inf * area * c)\n if 'ref_length' in d_inputs:\n d_nondim = - 1.0 / (q_inf * area * c**2.0)\n if 'CM_X' in d_outputs:\n d_inputs['ref_length'] += m_x * d_nondim * d_outputs['CM_X']\n if 'CM_X' in d_outputs:\n d_inputs['ref_length'] += m_y * d_nondim * d_outputs['CM_Y']\n if 'CM_Z' in d_outputs:\n d_inputs['ref_length'] += m_z * d_nondim * d_outputs['CM_Z']\n\n if 'q_inf' in d_inputs:\n d_nondim = - 1.0 / (q_inf**2.0 * area)\n if 'C_X' in d_outputs:\n d_inputs['q_inf'] += d_outputs['C_X'] * fx_total * d_nondim\n if 'C_Y' in d_outputs:\n d_inputs['q_inf'] += d_outputs['C_Y'] * fy_total * d_nondim\n if 'C_Z' in d_outputs:\n d_inputs['q_inf'] += d_outputs['C_Z'] * fz_total * d_nondim\n if 'C_L' in d_outputs:\n d_inputs['q_inf'] += d_outputs['C_L'] * lift * d_nondim\n if 'C_D' in d_outputs:\n d_inputs['q_inf'] += d_outputs['C_D'] * drag * d_nondim\n if 'CM_X' in d_outputs:\n d_inputs['q_inf'] += d_outputs['CM_X'] * m_x * d_nondim / c\n if 'CM_X' in d_outputs:\n d_inputs['q_inf'] += d_outputs['CM_Y'] * m_y * d_nondim / c\n if 'CM_Z' in d_outputs:\n d_inputs['q_inf'] += d_outputs['CM_Z'] * m_z * d_nondim / c\n\n if 'x_aero' in d_inputs:\n nondim = 1.0 / (q_inf * area * c)\n dm_x = d_outputs['M_X'] if 'M_X' in d_outputs else 0.0\n dm_y = d_outputs['M_Y'] if 'M_Y' in d_outputs else 0.0\n dm_z = d_outputs['M_Z'] if 'M_Z' in d_outputs else 0.0\n dcm_x = d_outputs['CM_X']*nondim if 'CM_X' in d_outputs else 0.0\n dcm_y = d_outputs['CM_Y']*nondim if 'CM_Y' in d_outputs else 0.0\n dcm_z = d_outputs['CM_Z']*nondim if 'CM_Z' in d_outputs else 0.0\n d_inputs['x_aero'][0::3] += -fz * (dm_y + dcm_y) + fy * (dm_z + dcm_z)\n d_inputs['x_aero'][1::3] += fz * (dm_x + dcm_x) - fx * (dm_z + dcm_z)\n d_inputs['x_aero'][2::3] += -fy * (dm_x + dcm_x) + fx * (dm_y + dcm_y)\n\n if 'f_aero' in d_inputs:\n if 'F_X' in d_outputs:\n d_inputs['f_aero'][0::3] += d_outputs['F_X']\n if 'F_Y' in d_outputs:\n d_inputs['f_aero'][1::3] += d_outputs['F_Y']\n if 'F_Z' in d_outputs:\n d_inputs['f_aero'][2::3] += d_outputs['F_Z']\n if 'C_X' in d_outputs:\n d_inputs['f_aero'][0::3] += d_outputs['C_X'] / (q_inf * area)\n if 'C_Y' in d_outputs:\n d_inputs['f_aero'][1::3] += d_outputs['C_Y'] / (q_inf * area)\n if 'C_Z' in d_outputs:\n d_inputs['f_aero'][2::3] += d_outputs['C_Z'] / (q_inf * area)\n if 'Lift' in d_outputs:\n d_inputs['f_aero'][0::3] += -np.sin(aoa) * d_outputs['Lift']\n d_inputs['f_aero'][2::3] += np.cos(aoa) * d_outputs['Lift']\n if 'Drag' in d_outputs:\n d_inputs['f_aero'][0::3] += np.cos(aoa) * np.cos(yaw) * d_outputs['Drag']\n d_inputs['f_aero'][1::3] += -np.sin(yaw) * d_outputs['Drag']\n d_inputs['f_aero'][2::3] += np.sin(aoa) * np.cos(yaw) * d_outputs['Drag']\n if 'C_L' in d_outputs:\n d_inputs['f_aero'][0::3] += -np.sin(aoa) * d_outputs['C_L'] / (q_inf * area)\n d_inputs['f_aero'][2::3] += np.cos(aoa) * d_outputs['C_L'] / (q_inf * area)\n if 'C_D' in d_outputs:\n d_inputs['f_aero'][0::3] += np.cos(aoa) * np.cos(yaw) * d_outputs['C_D'] / (q_inf * area)\n d_inputs['f_aero'][1::3] += -np.sin(yaw) * d_outputs['C_D'] / (q_inf * area)\n d_inputs['f_aero'][2::3] += np.sin(aoa) * np.cos(yaw) * d_outputs['C_D'] / (q_inf * area)\n\n if 'M_X' in d_outputs:\n d_inputs['f_aero'][1::3] += -(z-zc) * d_outputs['M_X']\n d_inputs['f_aero'][2::3] += (y-yc) * d_outputs['M_X']\n if 'M_Y' in d_outputs:\n d_inputs['f_aero'][0::3] += (z-zc) * d_outputs['M_Y']\n d_inputs['f_aero'][2::3] += -(x-xc) * d_outputs['M_Y']\n if 'M_Z' in d_outputs:\n d_inputs['f_aero'][0::3] += -(y-yc) * d_outputs['M_Z']\n d_inputs['f_aero'][1::3] += (x-xc) * d_outputs['M_Z']\n if 'CM_X' in d_outputs:\n d_inputs['f_aero'][1::3] += -(z-zc) * d_outputs['CM_X'] / (q_inf * area * c)\n d_inputs['f_aero'][2::3] += (y-yc) * d_outputs['CM_X'] / (q_inf * area * c)\n if 'CM_Y' in d_outputs:\n d_inputs['f_aero'][0::3] += (z-zc) * d_outputs['CM_Y'] / (q_inf * area * c)\n d_inputs['f_aero'][2::3] += -(x-xc) * d_outputs['CM_Y'] / (q_inf * area * c)\n if 'CM_Z' in d_outputs:\n d_inputs['f_aero'][0::3] += -(y-yc) * d_outputs['CM_Z'] / (q_inf * area * c)\n d_inputs['f_aero'][1::3] += (x-xc) * d_outputs['CM_Z'] / (q_inf * area * c)\n\ndef check_integrated_surface_force_partials():\n nnodes = 3\n prob = om.Problem()\n ivc = om.IndepVarComp()\n ivc.add_output('aoa',val=45.0, units='deg')\n ivc.add_output('yaw',val=135.0, units='deg')\n ivc.add_output('ref_area',val=0.2)\n ivc.add_output('moment_center',shape=3,val=np.zeros(3))\n ivc.add_output('ref_length', val = 3.0)\n ivc.add_output('q_inf',val=10.0)\n ivc.add_output('x_aero',shape=3*nnodes,val=np.random.rand(3*nnodes),distributed=True)\n ivc.add_output('f_aero',shape=3*nnodes,val=np.random.rand(3*nnodes),distributed=True)\n prob.model.add_subsystem('ivc',ivc,promotes_outputs=['*'])\n prob.model.add_subsystem('forces',IntegratedSurfaceForces(),\n promotes_inputs=['*'])\n\n prob.setup(force_alloc_complex=True)\n prob.run_model()\n prob.check_partials(compact_print=True, method='cs')\n\nif __name__ == '__main__':\n check_integrated_surface_force_partials()\n" ]
[ [ "numpy.sin", "numpy.dot", "numpy.random.rand", "numpy.zeros", "numpy.sum", "numpy.cos" ] ]
adityamanglik/Algorithm-Implementations
[ "780911ff05c1956ee005729fc8d5c916c5506838" ]
[ "Machine Learning/Sklearn Implementations/Reinforcement Learning/Upper_Confidence_Bound.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 27 17:18:43 2020\n\n@author: admangli\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport math\n\ndataset = pd.read_csv('Ads_CTR_Optimisation.csv').values\n\n#%%\n\nslot_machines = 10\n\n#%% Random ad selection reward\nimport random\n\nrandom_reward = 0\n\nfor i in range(len(dataset)):\n random_reward += dataset[i, random.randint(0, slot_machines - 1)]\n \n#%%\nnumber_of_ad_selections = [0]*slot_machines\nreward_sums = [0]*slot_machines\nad_selection_sequence = []\nUCB_range = np.zeros((slot_machines, 2)) # To get an idea of underlying distributino\n\n# Generate initial seed, selecting each machine at least once randomly\nfor round in range(0, slot_machines):\n target_ad = random.randint(0, slot_machines - 1)\n while (number_of_ad_selections[target_ad] == 1):\n target_ad = random.randint(0, slot_machines - 1)\n number_of_ad_selections[target_ad] += 1\n reward_sums[target_ad] += dataset[round][target_ad]\n ad_selection_sequence.append(target_ad)\n \nfor round in range(slot_machines, len(dataset)):\n # Calculate Ri and Delta for each ad for the current round\n Ri = [0]*slot_machines\n Deltai = [0]*slot_machines\n max_UCB = 0\n target_ad = -1\n for ad in range(0, slot_machines):\n Ri[ad] = reward_sums[ad] / number_of_ad_selections[ad]\n Deltai[ad] = math.sqrt(1.5 * math.log(round + 1)/number_of_ad_selections[ad])\n UCB_range[ad, 0] = Ri[ad] + Deltai[ad]\n UCB_range[ad, 1] = Ri[ad] - Deltai[ad]\n if UCB_range[ad, 0] > max_UCB: # Pick the ad with maximum UCB = Ri + Delta for current round\n max_UCB = UCB_range[ad, 0]\n target_ad = ad\n \n # Increment selected ad's reward and number of selections\n if target_ad != -1:\n number_of_ad_selections[target_ad] += 1\n reward_sums[target_ad] += dataset[round][target_ad]\n ad_selection_sequence.append(target_ad)\n \n#%% Visualize results\n\n# Plot a histogram showing how many times each ad was selected\nplt.hist(ad_selection_sequence)\nplt.xlabel('Ad Number')\nplt.ylabel('Number of selections')\nplt.title('Ad selection comparision')\nplt.show()" ]
[ [ "numpy.zeros", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "matplotlib.pyplot.hist", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show", "pandas.read_csv" ] ]
Gravifer/pylabyk
[ "91922907c5ecaa832bdc6ee6cb285095905f4cac" ]
[ "demo/demo_min_max_distrib.py" ]
[ "# Copyright (c) 2020. Yul HR Kang. hk2699 at caa dot columbia dot edu.\n\nimport torch\nimport matplotlib.pyplot as plt\nfrom lib.pylabyk import numpytorch as npt\nfrom lib.pylabyk.numpytorch import npy, npys\n\n\ndef print_demo(p, fun):\n\n out = fun(p)\n\n print('-----')\n print('fun: %s' % fun.__name__)\n print('p:')\n print(p)\n print('out[0]:')\n print(out[0])\n\n print('out[1]:')\n print(out[1])\n\n print('out[0].sum(), out[1].sum()')\n print(out[0].sum(), out[1].sum())\n\n\nif __name__ == '__main__':\n for p, fun in [\n (torch.tensor([\n [0., 1.],\n [0.5, 0.5]\n ]) * 1., npt.min_distrib),\n (torch.tensor([\n [1., 0.],\n [0.5, 0.5]\n ]) * 1., npt.min_distrib),\n (torch.tensor([\n [0.5, 0.5],\n [0.5, 0.5]\n ]) * 0.1, npt.min_distrib),\n (torch.tensor([\n [0., 1.],\n [0.5, 0.5]\n ]) * 1., npt.max_distrib),\n (torch.tensor([\n [1., 0.],\n [0.5, 0.5]\n ]) * 1., npt.max_distrib),\n (torch.tensor([\n [0.5, 0.5],\n [0.5, 0.5]\n ]) * 0.1, npt.max_distrib),\n ]:\n print_demo(p, fun)\n" ]
[ [ "torch.tensor" ] ]
ActuarialIntelligence/Base
[ "ede66e263960d1784c0dea1131706472fc33eaf3" ]
[ "src/ActuarialIntelligence.Infrastructure.PythonScripts/StreamFootageAnalyse.py" ]
[ "from keras.preprocessing.image import img_to_array\nimport imutils\nimport cv2\nfrom keras.models import load_model\nimport numpy as np\nimport geocoder\nimport streamlink\n#import mysql.connector as con\n\n#mydb = con.connect(\n# host=\"localhost\",\n# user=\"yourusername\",\n# passwd=\"yourpassword\",\n# database=\"mydatabase\"\n#)\n#mycursor = mydb.cursor()\n\ng = geocoder.ip('me')\n\n# parameters for loading data and images\ndetection_model_path = 'C:\\\\Users\\\\rajiyer\\\\Documents\\\\Test Data\\\\Sentiment Analysis\\\\Emotion-recognition-master\\\\haarcascade_files\\\\haarcascade_frontalface_default.xml'\nemotion_model_path = 'C:\\\\Users\\\\rajiyer\\\\Documents\\\\Test Data\\\\Sentiment Analysis\\\\Emotion-recognition-master\\\\models\\\\_mini_XCEPTION.102-0.66.hdf5'\n\n# hyper-parameters for bounding boxes shape\n# loading models\nface_detection = cv2.CascadeClassifier(detection_model_path)\nemotion_classifier = load_model(emotion_model_path, compile=False)\nEMOTIONS = [\"angry\" ,\"disgust\",\"scared\", \"happy\", \"sad\", \"surprised\",\n \"neutral\"]\n\n\n#feelings_faces = []\n#for index, emotion in enumerate(EMOTIONS):\n # feelings_faces.append(cv2.imread('emojis/' + emotion + '.png', -1))\n\n# starting video streaming\nurl = 'https://youtu.be/Bchx0mS7XOY'\nstreams = streamlink.streams(url)\ncv2.namedWindow('Live Footage')\ncamera = cv2.VideoCapture(streams[\"360p\"].url)\nf= open(\"C:\\\\Users\\\\rajiyer\\\\Documents\\\\Test Data\\\\Probability.txt\",\"a+\")\n\nwhile True:\n frame = camera.read()[1]\n #reading the frame\n frame = imutils.resize(frame,width=300)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = face_detection.detectMultiScale(gray,scaleFactor=1.1,minNeighbors=5,minSize=(30,30),flags=cv2.CASCADE_SCALE_IMAGE)\n \n canvas = np.zeros((250, 300, 3), dtype=\"uint8\")\n frameClone = frame.copy()\n if len(faces) > 0:\n faces = sorted(faces, reverse=True,\n key=lambda x: (x[2] - x[0]) * (x[3] - x[1]))[0]\n (fX, fY, fW, fH) = faces\n # Extract the ROI of the face from the grayscale image, resize it to a fixed 28x28 pixels, and then prepare\n # the ROI for classification via the CNN\n roi = gray[fY:fY + fH, fX:fX + fW]\n roi = cv2.resize(roi, (64, 64))\n roi = roi.astype(\"float\") / 255.0\n roi = img_to_array(roi)\n roi = np.expand_dims(roi, axis=0)\n \n \n preds = emotion_classifier.predict(roi)[0]\n emotion_probability = np.max(preds)\n label = EMOTIONS[preds.argmax()]\n else: continue\n\n \n for (i, (emotion, prob)) in enumerate(zip(EMOTIONS, preds)):\n # construct the label text\n text = \"{}: {:.2f}%\".format(emotion, prob * 100)\n #sql = \"INSERT INTO predData (Metadata, Probability) VALUES (%s, %s)\"\n #val = (\"Meta\", prob * 100)\n f.write(text)\n str1 = ''.join(str(e) for e in g.latlng)\n f.write(str1)\n #mycursor.execute(sql, val)\n #mydb.commit()\n # draw the label + probability bar on the canvas\n # emoji_face = feelings_faces[np.argmax(preds)]\n\n \n w = int(prob * 300)\n cv2.rectangle(canvas, (7, (i * 35) + 5),\n (w, (i * 35) + 35), (0, 0, 255), -1)\n cv2.putText(canvas, text, (10, (i * 35) + 23),\n cv2.FONT_HERSHEY_SIMPLEX, 0.45,\n (255, 255, 255), 2)\n cv2.putText(frameClone, label, (fX, fY - 10),\n cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)\n cv2.rectangle(frameClone, (fX, fY), (fX + fW, fY + fH),\n (0, 0, 255), 2)\n\n\n# for c in range(0, 3):\n# frame[200:320, 10:130, c] = emoji_face[:, :, c] * \\\n# (emoji_face[:, :, 3] / 255.0) + frame[200:320,\n# 10:130, c] * (1.0 - emoji_face[:, :, 3] / 255.0)\n\n\n cv2.imshow('your_face', frameClone)\n cv2.imshow(\"Probabilities\", canvas)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncamera.release()\ncv2.destroyAllWindows()\n\n" ]
[ [ "numpy.expand_dims", "numpy.max", "numpy.zeros" ] ]
tecklun/eeg-notebooks
[ "6026d8e03234523f1929e4893b746411e496068e" ]
[ "eegnb/experiments/auditory_ssaep/ssaep.py" ]
[ "\"\"\"\nGenerate Steady-State Auditory Evoked Potential (SSAEP)\n=======================================================\n\nSteady-State Auditory Evoked Potential (SSAEP) - also known as Auditory\nSteady-State Response (ASSR) - stimulus presentation.\n\n\"\"\"\n\n\nfrom time import time\n\nimport numpy as np\nfrom pandas import DataFrame\nfrom psychopy import visual, core, event, sound\nfrom scipy import stats\n\n__title__ = \"Auditory SSAEP (orig)\"\n\n\ndef present(\n save_fn: str,\n duration=120,\n n_trials=2010,\n iti=0.5,\n soa=3.0,\n jitter=0.2,\n volume=0.8,\n random_state=42,\n eeg=None,\n cf1=900,\n amf1=45,\n cf2=770,\n amf2=40.018,\n sample_rate=44100,\n):\n\n \"\"\"\n\n Auditory SSAEP Experiment\n ===========================\n\n\n Parameters:\n -----------\n\n duration - duration of the recording in seconds (default 10)\n\n n_trials - number of trials (default 10)\n\n iti - intertrial interval (default 0.3)\n\n soa - stimulus onset asynchrony, = interval between end of stimulus\n and next trial (default 0.2)\n\n jitter - jitter in the intertrial intervals (default 0.2)\n\n secs - duration of the sound in seconds (default 0.2)\n\n volume - volume of the sounds in [0,1] (default 0.8)\n\n random_state - random seed (default 42)\n\n\n \"\"\"\n\n # Set up trial parameters\n np.random.seed(random_state)\n markernames = [1, 2]\n record_duration = np.float32(duration)\n\n # Initialize stimuli\n am1 = generate_am_waveform(cf1, amf1, secs=soa, sample_rate=sample_rate)\n am2 = generate_am_waveform(cf2, amf2, secs=soa, sample_rate=sample_rate)\n\n aud1 = sound.Sound(am1, sampleRate=sample_rate)\n aud1.setVolume(volume)\n aud2 = sound.Sound(am2, sampleRate=sample_rate)\n aud2.setVolume(volume)\n auds = [aud1, aud2]\n\n # Set up trial list\n stim_freq = np.random.binomial(1, 0.5, n_trials)\n itis = iti + np.random.rand(n_trials) * jitter\n trials = DataFrame(dict(stim_freq=stim_freq, timestamp=np.zeros(n_trials)))\n trials[\"iti\"] = itis\n trials[\"soa\"] = soa\n\n # Setup graphics\n mywin = visual.Window(\n [1920, 1080], monitor=\"testMonitor\", units=\"deg\", fullscr=True\n )\n fixation = visual.GratingStim(win=mywin, size=0.2, pos=[0, 0], sf=0, rgb=[1, 0, 0])\n fixation.setAutoDraw(True)\n mywin.flip()\n\n # Show the instructions screen\n show_instructions(10)\n\n # Start EEG Stream, wait for signal to settle, and then pull timestamp for start point\n if eeg:\n eeg.start(save_fn, duration=record_duration)\n start = time()\n\n # Iterate through the events\n for ii, trial in trials.iterrows():\n\n # Intertrial interval\n core.wait(trial[\"iti\"] + np.random.randn() * jitter)\n\n # Select stimulus frequency\n ind = trials[\"stim_freq\"].iloc[ii]\n\n auds[ind].stop()\n auds[ind].play()\n\n # Push sample\n if eeg:\n timestamp = time()\n if eeg.backend == \"muselsl\":\n marker = [markernames[ind]]\n marker = list(map(int, marker))\n else:\n marker = markernames[ind]\n\n eeg.push_sample(marker=marker, timestamp=timestamp)\n\n mywin.flip()\n\n # Offset\n core.wait(soa)\n if len(event.getKeys()) > 0:\n break\n if (time() - start) > record_duration:\n break\n event.clearEvents()\n\n # Cleanup\n if eeg:\n eeg.stop()\n\n mywin.close()\n\n\ndef show_instructions(duration):\n\n instruction_text = \"\"\"\n Welcome to the aMMN experiment!\n\n Stay still, focus on the centre of the screen, and try not to blink.\n\n This block will run for %s seconds.\n\n Press spacebar to continue.\n\n \"\"\"\n instruction_text = instruction_text % duration\n\n # graphics\n mywin = visual.Window([1600, 900], monitor=\"testMonitor\", units=\"deg\", fullscr=True)\n\n mywin.mouseVisible = False\n\n # Instructions\n text = visual.TextStim(win=mywin, text=instruction_text, color=[-1, -1, -1])\n text.draw()\n mywin.flip()\n event.waitKeys(keyList=\"space\")\n\n mywin.mouseVisible = True\n mywin.close()\n\n\ndef generate_am_waveform(\n carrier_freq,\n am_freq,\n secs=1,\n sample_rate=None,\n am_type=\"gaussian\",\n gaussian_std_ratio=8,\n):\n \"\"\"Generate an amplitude-modulated waveform.\n\n Generate a sine wave amplitude-modulated by a second sine wave or a\n Gaussian envelope with standard deviation = period_AM/8.\n\n Args:\n carrier_freq (float): carrier wave frequency, in Hz\n am_freq (float): amplitude modulation frequency, in Hz\n\n Keyword Args:\n secs (float): duration of the stimulus, in seconds\n sample_rate (float): sampling rate of the sound, in Hz\n am_type (str): amplitude-modulation type\n 'gaussian' -> Gaussian with std defined by `gaussian_std`\n 'sine' -> sine wave\n gaussian_std_ratio (float): only used if `am_type` is 'gaussian'.\n Ratio between AM period and std of the Gaussian envelope. E.g.,\n gaussian_std = 8 means the Gaussian window has 8 standard\n deviations around its mean inside one AM period.\n\n Returns:\n (numpy.ndarray): sound samples\n \"\"\"\n t = np.arange(0, secs, 1.0 / sample_rate)\n\n if am_type == \"gaussian\":\n period = int(sample_rate / am_freq)\n std = period / gaussian_std_ratio\n norm_window = stats.norm.pdf(np.arange(period), period / 2, std)\n norm_window /= np.max(norm_window)\n n_windows = int(np.ceil(secs * am_freq))\n am = np.tile(norm_window, n_windows)\n am = am[: len(t)]\n\n elif am_type == \"sine\":\n am = np.sin(2 * np.pi * am_freq * t)\n\n carrier = 0.5 * np.sin(2 * np.pi * carrier_freq * t) + 0.5\n am_out = carrier * am\n\n return am_out\n" ]
[ [ "numpy.max", "numpy.ceil", "numpy.random.binomial", "numpy.random.rand", "numpy.sin", "numpy.zeros", "numpy.random.seed", "numpy.tile", "numpy.random.randn", "numpy.float32", "numpy.arange" ] ]
giuseppefutia/word2vec
[ "c8a26537fe6ac0bb9b0fe979696d1457d2bc4092" ]
[ "utils/init_parameters.py" ]
[ "#!/usr/bin/env python\n\nimport numpy as np\n\n\ndef initialize_hyper_parameters(layer_acts, learning_rate):\n \"\"\"\n Initialize parameters for different levels of the network\n\n Arguments:\n layer_acts -- python array (list) containing the activation functions of each layer in the network\n learning_rate -- float value used as constant for gradient descent\n\n Returns:\n hyper_parameters -- python dictionary containing hyper_parameters (can be further extended)\n\n \"\"\"\n hyper_parameters = {}\n activations = {}\n L = len(layer_acts) # number of layers in the network\n for l in range(0, L):\n activations[l+1] = layer_acts[l]\n hyper_parameters[\"activations\"] = activations\n hyper_parameters[\"learning_rate\"] = learning_rate\n\n return hyper_parameters\n\n\ndef test_initialize_hyper_parameters():\n print(\"\\033[92m\" + \"\\nTest initialize_hyper_parameters() ...\" + \"\\033[0m\")\n layer_acts = [\"relu\", \"relu\", \"sigmoid\"]\n learning_rate = 0.0075\n hyper_parameters = initialize_hyper_parameters(layer_acts, learning_rate)\n print(hyper_parameters[\"activations\"])\n\n assert len(hyper_parameters[\"activations\"]) == 3\n assert hyper_parameters[\"activations\"][1] == \"relu\"\n\n print(\"\\033[92m\" + \"... end test\" + \"\\033[0m\")\n\n\ndef initialize_parameters(layer_dims):\n \"\"\"\n Initialize parameters for different levels of the network\n\n Arguments:\n layer_dims -- python array (list) containing the dimensions of each layer in the network\n\n Returns:\n parameters -- python dictionary containing your parameters \"W1\", \"b1\", ..., \"WL\", \"bL\", ...:\n Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1])\n bl -- bias vector of shape (layer_dims[l], 1)\n \"\"\"\n\n np.random.seed(1)\n parameters = {}\n L = len(layer_dims) # number of layers in the network\n\n for l in range(1, L):\n parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1]) * 0.01\n parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))\n assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l-1]))\n assert(parameters['b' + str(l)].shape == (layer_dims[l], 1))\n\n return parameters\n\n\ndef test_initialize_parameters():\n print(\"\\n\" + \"\\033[92m\" + \"Test initialize_parameters() ...\" + \"\\033[0m\")\n\n np.random.seed(1)\n parameters = initialize_parameters([3,2,1])\n\n print(\"W1 = \" + str(parameters[\"W1\"]))\n print(\"b1 = \" + str(parameters[\"b1\"]))\n print(\"W2 = \" + str(parameters[\"W2\"]))\n print(\"b2 = \" + str(parameters[\"b2\"]))\n\n W1 = parameters[\"W1\"]\n W1_expected = np.array([[0.01624345,-0.00611756,-0.00528172],[-0.01072969,0.00865408,-0.02301539]])\n assert np.allclose(W1, W1_expected, rtol=1e-05, atol=1e-06)\n\n b1 = parameters[\"b1\"]\n b1_expected = np.array([[0.],[0.]])\n assert np.allclose(b1, b1_expected, rtol=1e-05, atol=1e-06)\n\n W2 = parameters[\"W2\"]\n W2_expected = np.array([[0.01744812, -0.00761207]])\n assert np.allclose(W2, W2_expected, rtol=1e-05, atol=1e-06)\n\n b2 = parameters[\"b2\"]\n b2_expected = np.array([[ 0.]])\n assert np.allclose(b2, b2_expected, rtol=1e-05, atol=1e-06)\n\n print(\"\\033[92m\" + \"... end test\" + \"\\033[0m\")\n\n\nif __name__ == \"__main__\":\n test_initialize_hyper_parameters()\n test_initialize_parameters()\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.random.seed", "numpy.random.randn", "numpy.allclose" ] ]
kavanase/aiida-vasp
[ "f9edc032fb0845622c5b0bbe7e1a5bf51205dae5" ]
[ "aiida_vasp/utils/aiida_utils.py" ]
[ "\"\"\"\nUtils for AiiDA.\n\n----------------\nUtilities for making working against AiiDA a bit easier. Mostly here due to\nhistorical reasons when AiiDA was rapidly developed. In the future most routines\nthat have now standardized in AiiDA will be removed.\n\"\"\"\n# pylint: disable=import-outside-toplevel\nimport numpy as np\nfrom packaging import version\n\nfrom aiida.orm import User\n\nfrom aiida.cmdline.utils.decorators import with_dbenv\n\nBASIC_DATA_TYPES = ['bool', 'float', 'int', 'list', 'str', 'dict']\n\n\ndef get_data_node(data_type, *args, **kwargs):\n return get_data_class(data_type)(*args, **kwargs)\n\n\ndef querybuild(cls, **kwargs):\n \"\"\"\n Instantiates and returns a QueryBuilder instance.\n\n The QueryBuilder's path has one vertice so far, namely this class.\n Additional parameters (e.g. filters or a label),\n can be passes as keyword arguments.\n\n :param label: Label to give\n :param filters: filters to apply\n :param project: projections\n :returns: a QueryBuilder instance.\n \"\"\"\n\n from aiida.orm import QueryBuilder\n\n query_builder = QueryBuilder()\n filters = kwargs.pop('filters', {})\n query_builder.append(cls, filters=filters, **kwargs)\n\n return query_builder\n\n\n@with_dbenv()\ndef get_data_class(data_type):\n \"\"\"Provide access to the orm.data classes with deferred dbenv loading.\"\"\"\n from aiida.plugins import DataFactory\n from aiida.common.exceptions import MissingEntryPointError\n\n data_cls = None\n try:\n data_cls = DataFactory(data_type)\n except MissingEntryPointError as err:\n raise err\n return data_cls\n\n\ndef get_current_user():\n \"\"\"Get current user.\"\"\"\n current_user = User.objects.get_default()\n return current_user\n\n\ndef copy_parameter(old_parameter):\n \"\"\"Assemble a new Dict.\"\"\"\n return get_data_node('dict', dict=old_parameter.get_dict())\n\n\ndef displaced_structure(structure, displacement, entry):\n disp_structure = structure.clone()\n displace_position(disp_structure, displacement, entry)\n return disp_structure\n\n\ndef compressed_structure(structure, volume_change):\n comp_structure = structure.clone()\n compress_cell(comp_structure, volume_change)\n return comp_structure\n\n\ndef displace_position(structure, displacement, entry):\n \"\"\"Displace a position in the StructureData.\"\"\"\n sites = structure.sites\n positions = []\n for site in sites:\n positions.append(site.position)\n new_position = np.asarray(positions[entry - 1]) + displacement\n new_position = new_position.tolist()\n positions[entry - 1] = tuple(new_position)\n structure.reset_sites_positions(positions)\n\n\ndef compress_cell(structure, volume_change):\n \"\"\"Apply compression or tensile forces to the unit cell.\"\"\"\n cell = structure.cell\n new_cell = np.array(cell) * volume_change\n structure.reset_cell(new_cell.tolist())\n\n\ndef aiida_version():\n from aiida import __version__ as aiida_version_\n return version.parse(aiida_version_)\n\n\ndef cmp_version(string):\n return version.parse(string)\n\n\ndef cmp_load_verdi_data():\n \"\"\"Load the verdi data click command group for any version since 0.11.\"\"\"\n verdi_data = None\n import_errors = []\n\n try:\n from aiida.cmdline.commands import data_cmd as verdi_data\n except ImportError as err:\n import_errors.append(err)\n\n if not verdi_data:\n try:\n from aiida.cmdline.commands import verdi_data\n except ImportError as err:\n import_errors.append(err)\n\n if not verdi_data:\n try:\n from aiida.cmdline.commands.cmd_data import verdi_data\n except ImportError as err:\n import_errors.append(err)\n\n if not verdi_data:\n err_messages = '\\n'.join([' * {}'.format(err) for err in import_errors])\n raise ImportError('The verdi data base command group could not be found:\\n' + err_messages)\n\n return verdi_data\n\n\ndef create_authinfo(computer, store=False):\n \"\"\"Allow the current user to use the given computer.\"\"\"\n from aiida.orm import AuthInfo\n authinfo = AuthInfo(computer=computer, user=get_current_user())\n if store:\n authinfo.store()\n return authinfo\n\n\ndef cmp_get_authinfo(computer):\n \"\"\"Get an existing authinfo or None for the given computer and current user.\"\"\"\n return computer.get_authinfo(get_current_user())\n\n\ndef cmp_get_transport(computer):\n if hasattr(computer, 'get_transport'):\n return computer.get_transport()\n authinfo = cmp_get_authinfo(computer)\n return authinfo.get_transport()\n" ]
[ [ "numpy.array", "numpy.asarray" ] ]
ChaseMonsterAway/vedacls
[ "91657f688dcaf3f9f4c58eb40a8f5c8f34a4bd73", "91657f688dcaf3f9f4c58eb40a8f5c8f34a4bd73" ]
[ "vedacls/models/builder.py", "vedacls/metrics/accuracy.py" ]
[ "import logging\nimport torch.nn as nn\n\nfrom . import arch as archs\n\nlogger = logging.getLogger()\n\n\ndef build_model(cfg_model):\n if cfg_model.get('pretrained', False):\n info = \"=> building pre-trained model {}\".format(cfg_model['arch'])\n model = archs.__dict__[cfg_model.arch](pretrained=True)\n in_features = model.fc.in_features\n model.fc = nn.Linear(in_features, cfg_model.num_classes)\n else:\n info = \"=> building model {}\".format(cfg_model.arch)\n model = archs.__dict__[cfg_model.arch](num_classes=cfg_model.num_classes)\n logger.info(info)\n\n return model\n", "from collections import OrderedDict\n\nimport numpy as np\n\nfrom .base import Base\nfrom .registry import METRICS\n\n\n@METRICS.register_module\nclass Accuracy(Base):\n def __init__(self, topk=(1,)):\n super(Accuracy, self).__init__()\n\n self.topk = topk\n self.maxk = max(topk)\n self.count = 0\n self.tp = {k: 0 for k in self.topk}\n\n def add(self, pred, gt):\n if gt.ndim == 1:\n gt = gt[:, None]\n\n mask = np.argsort(pred)[:, -self.maxk:][:, ::-1] == gt\n for k in self.topk:\n self.tp[k] += np.sum(mask[:, :k])\n self.count += len(gt)\n\n def reset(self):\n self.count = 0\n self.tp = {k: 0 for k in self.topk}\n\n def result(self):\n res = OrderedDict()\n for k in self.topk:\n res['top_{}'.format(k)] = self.tp[k] / self.count\n\n return res\n" ]
[ [ "torch.nn.Linear" ], [ "numpy.sum", "numpy.argsort" ] ]
opticverge/scikit-evolution
[ "fdd69468c0aacfba4c1ff82a6619f20abfd5f77b" ]
[ "opticverge/examples/machine_learning/regression/red_wine_quality/problem.py" ]
[ "from typing import Any, Dict\n\nimport pandas\nimport numpy as np\nfrom sklearn import datasets\n\nfrom opticverge.core.chromosome.abstract_chromosome import AbstractChromosome\nfrom opticverge.core.enum.objective import Objective\nfrom opticverge.core.log.logger import data_logger, DATA\nfrom opticverge.core.solver.abstract_solver import AbstractSolver\nfrom opticverge.external.scikit.enum.normaliser import Normaliser\nfrom opticverge.external.scikit.enum.scoring_function import Scoring\nfrom opticverge.external.scikit.problem.abstract_regression_problem import AbstractRegressionProblem\n\n\nclass RedWineQualityPredictionProblem(AbstractRegressionProblem):\n def __init__(self, scoring_function: Scoring, normaliser: Normaliser = None, folds: int = 1):\n\n df = pandas.read_csv(\"./winequality-red.csv\", sep=\";\", usecols=[\n \"fixed acidity\", \"volatile acidity\", \"citric acid\", \"residual sugar\", \"chlorides\", \"free sulfur dioxide\",\n \"total sulfur dioxide\", \"density\", \"pH\", \"sulphates\", \"alcohol\", \"quality\"\n ])\n\n data = np.array(df[[\"fixed acidity\", \"volatile acidity\", \"citric acid\", \"residual sugar\", \"chlorides\", \"free sulfur dioxide\",\n \"total sulfur dioxide\", \"density\", \"pH\", \"sulphates\", \"alcohol\"]])\n target = np.array(df[\"quality\"])\n\n super(RedWineQualityPredictionProblem, self).__init__(\n Objective.Minimisation,\n \"Red Wine Quality Prediction\",\n data_x=data,\n target_x=target,\n normaliser=normaliser,\n folds=folds,\n scoring_function=scoring_function\n )\n\n def log_chromosome(self, chromosome: AbstractChromosome, solver: AbstractSolver,\n additional_data: Dict[str, Any] = None, separator=\"|\"):\n data_str = super(RedWineQualityPredictionProblem, self).log_chromosome(\n chromosome,\n solver,\n None\n )\n\n data_logger.log(DATA, data_str)\n\n def objective_function(self, chromosome: AbstractChromosome):\n super(RedWineQualityPredictionProblem, self).objective_function(chromosome)\n" ]
[ [ "numpy.array", "pandas.read_csv" ] ]
ficusoftdeveloper/bluedome
[ "3dc04fd749bee22b4f5eb94db85a5c15d7ef8ea0" ]
[ "scripts/image/crack_detection_fast.py" ]
[ "from __future__ import (absolute_import, division,print_function, unicode_literals)\r\nfrom builtins import *\r\nimport numpy as np\r\nimport cv2\r\nimport SimpleITK as sitk\r\nfrom builtins import *\r\nfrom scipy.spatial import distance\r\nimport sys\r\nimport time\r\n############### FUNCTIONS ##########################\r\ndef imcomplement(im):\r\n if np.max(im)>1:\r\n imout=255-im\r\n else:\r\n imout=1-im\r\n return imout\r\n\r\ndef mat2gray(img):\r\n max_img=np.max(img)\r\n min_img=np.min(img)\r\n imgout=(img-min_img)/(max_img-min_img)\r\n return imgout\r\n\r\ndef im2double(img):\r\n imgout=img.astype('float32')\r\n imgout= mat2gray(imgout)\r\n return imgout\r\n\r\ndef imreconstruct(marker,mask):\r\n markeritk=sitk.GetImageFromArray(marker)\r\n maskitk=sitk.GetImageFromArray(mask)\r\n recfilt=sitk.ReconstructionByDilationImageFilter()\r\n rectoutitk=recfilt.Execute(markeritk,maskitk)\r\n rectout=sitk.GetArrayFromImage(rectoutitk)\r\n return rectout\r\n\r\ndef eigen_cov(x,y):\r\n mx=np.mean(x)\r\n my=np.mean(y)\r\n x=x-mx\r\n y=y-my\r\n cxx=np.var(x)\r\n cxy=0\r\n cyy=np.var(y);\r\n nx=len(x)\r\n for ct in range(nx):\r\n cxy=cxy+x[ct]*y[ct];\r\n cxy=cxy/nx;\r\n C=np.zeros((2,2))\r\n C[0,0]=cxx\r\n C[0,1]=cxy\r\n C[1,0]=cxy\r\n C[1,1]=cyy\r\n D,V=np.linalg.eig(C)\r\n return V,D\r\n\r\ndef improfile(img,x,y,n):\r\n xm=x[0]\r\n x0=x[1]\r\n ym=y[0]\r\n y0=y[1]\r\n\r\n a = np.arctan((y0 - ym) / (x0 - xm))\r\n i=range(0,100,int(100/n))\r\n cx=np.squeeze(np.zeros((1,len(i))))\r\n cy=np.squeeze(np.zeros((1,len(i))))\r\n c=np.squeeze(np.zeros((1,len(i))))\r\n ct=0\r\n for t in range(0,100,int(100/30)):\r\n tf=t/100.0\r\n cx[ct] = int(xm + (x0 - xm)*tf)\r\n cy[ct] = int(ym + (y0 - ym)*tf)\r\n c[ct]=img[int(cy[ct]), int(cx[ct])]\r\n ct=ct+1\r\n return c,cx,cy\r\ndef filter_result3(img,bw_result,ths,thm):\r\n bw_result_orig=np.copy(bw_result);\r\n points=np.where(bw_result>0)\r\n points=np.reshape(points,np.shape(points))\r\n points=np.transpose(points)\r\n npoints=np.shape(points)[0]\r\n k=20\r\n step=5\r\n hsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\r\n sat=hsv[:,:,1]/255\r\n bw_result_filter=np.zeros(np.shape(bw_result))\r\n xc=points[:,1]\r\n yc=points[:,0]\r\n for ct in range(0,npoints,step):\r\n #print(ct/npoints)\r\n ystart=max(0,yc[ct]-k);\r\n xstart=max(0,xc[ct]-k);\r\n yend=min(np.shape(img)[0],yc[ct]+k);\r\n xend=min(np.shape(img)[1],xc[ct]+k);\r\n\r\n\r\n p=points[ct,:]\r\n p=np.reshape(p,(1,2))\r\n Dpoints=distance.cdist(p,points)\r\n Dpoints=np.squeeze(Dpoints)\r\n ipoints=np.squeeze(np.where(Dpoints<40))\r\n xneigh=points[ipoints,1];\r\n yneigh=points[ipoints,0];\r\n V,D=eigen_cov(xneigh,yneigh)\r\n vmin=V[:,0];\r\n if D[1]<D[0]:\r\n vmin=V[:,1];\r\n\r\n x1=xc[ct]-k*vmin[0];\r\n y1=yc[ct]-k*vmin[1];\r\n\r\n x2=xc[ct]+k*vmin[0];\r\n y2=yc[ct]+k*vmin[1];\r\n p,px,py=improfile(sat,np.array([x1,x2]),np.array([y1,y2]),30);\r\n s=np.abs(np.mean(p[0:5])-np.mean(p[len(p)-5:len(p)]));\r\n s=round(s*100);\r\n m=np.max([p[0:5],p[len(p)-5:len(p)]]);\r\n if(s<ths and m<thm):\r\n bw_result_filter[ystart:yend,xstart:xend]=bw_result_orig[ystart:yend,xstart:xend];\r\n return bw_result_filter\r\ndef min_openings(im,LEN,DEG_NUM):\r\n imo=[];\r\n for i in range(DEG_NUM):\r\n #DEG=(i)*((360/DEG_NUM)/2)\r\n filtername=str(i+1)+'se.txt'\r\n se=np.loadtxt('filters/images/filters/'+filtername)\r\n if(i==0):\r\n se=np.reshape(se,(1,len(se)))\r\n if(i==6):\r\n se=np.reshape(se,(len(se),1))\r\n se=se.astype('uint8')\r\n imoi=cv2.erode(im,se)\r\n imoi=cv2.dilate(imoi,se)\r\n imo.append(imoi)\r\n imB=imo[0]\r\n for i in range(DEG_NUM-1):\r\n k=i+1\r\n imB=np.minimum(imB,imo[k])\r\n\r\n\r\n return imB\r\n\r\ndef smooth_cross_section(imV,LEN_diff,DEG_NUM):\r\n imV_c=imcomplement(imV)\r\n imd=[]\r\n for i in range(12):\r\n k=i+1\r\n se1=np.loadtxt('filters/images/filters/'+str(k)+'linekernel1.txt')\r\n se2=np.loadtxt('filters/images/filters/'+str(k)+'linekernel2.txt')\r\n if(i==0):\r\n se1=np.reshape(se1,(1,len(se1)))\r\n se2=np.reshape(se2,(len(se2),1))\r\n if(i==6):\r\n se1=np.reshape(se1,(len(se1),1))\r\n se2=np.reshape(se2,(1,len(se2)))\r\n\r\n temp=cv2.filter2D(imV_c.astype('float32'),-1,se1)\r\n imdi=cv2.filter2D(temp,-1,se2)\r\n imdi[imdi<0]=0\r\n imd.append(imdi)\r\n imDiff=imd[0]\r\n for i in range(11):\r\n k=i+1\r\n imDiff=np.maximum(imDiff,imd[k])\r\n imDiff=mat2gray(imDiff)\r\n return imDiff\r\n\r\ndef reconstruction_by_dilation(im,LEN,DEG_NUM):\r\n imo=[];\r\n for i in range(DEG_NUM):\r\n #DEG=(i)*((360/DEG_NUM)/2)\r\n filtername=str(i+1)+'se.txt'\r\n se=np.loadtxt('filters/images/filters/'+filtername)\r\n if(i==0):\r\n se=np.reshape(se,(1,len(se)))\r\n if(i==6):\r\n se=np.reshape(se,(len(se),1))\r\n se=se.astype('uint8')\r\n imoi=cv2.erode(im,se)\r\n imoi=cv2.dilate(imoi,se)\r\n imo.append(imoi)\r\n imC=imo[0]\r\n for i in range(DEG_NUM-1):\r\n k=i+1\r\n imC=np.maximum(imC,imo[k])\r\n\r\n imC2=imreconstruct(imC,im)\r\n imC2=mat2gray(imC2)\r\n return imC2\r\n\r\ndef reconstruction_by_erosion(im,LEN,DEG_NUM):\r\n im_close=[];\r\n for i in range(DEG_NUM):\r\n #DEG=(i)*((360/DEG_NUM)/2)\r\n filtername=str(i+1)+'se.txt'\r\n se=np.loadtxt('filters/images/filters/'+filtername)\r\n if(i==0):\r\n se=np.reshape(se,(1,len(se)))\r\n if(i==6):\r\n se=np.reshape(se,(len(se),1))\r\n se=se.astype('uint8')\r\n im_closei=cv2.dilate(im,se)\r\n im_closei=cv2.erode(im_closei,se)\r\n im_close.append(im_closei);\r\n imTemp39=im_close[0]\r\n for i in range(DEG_NUM-1):\r\n k=i+1\r\n imTemp39=np.minimum(imTemp39,im_close[k])\r\n\r\n marker=imcomplement(imTemp39)\r\n mask=imcomplement(im)\r\n imF=imreconstruct(marker,mask)\r\n imF=mat2gray(imF)\r\n imF=imcomplement(imF)\r\n return imF\r\n\r\n############ MAIN ##############\r\nif len(sys.argv)<2:\r\n print('missing input file')\r\n sys.exit(-1)\r\n\r\nif len(sys.argv)==4:\r\n img_file_out=sys.argv[2]\r\n img_file_out_bin=sys.argv[3]\r\nelse:\r\n img_file_out='output.png'\r\n img_file_out_bin='output.png'\r\n\r\nimg_file=sys.argv[1]\r\nprint('processing '+img_file)\r\nimgorig=cv2.imread(img_file)\r\nstart_time = time.time()\r\n\r\nsize_orig=np.shape(imgorig)\r\nprint(size_orig)\r\n## resize if the original size is different from dataset images\r\n## so we can keep the same parameters for the filters\r\nscale=2\r\nrows_dataset=int(2448/scale)\r\ncols_dataset=int(3264/scale)\r\nimg_blur = cv2.bilateralFilter(cv2.resize(imgorig,(cols_dataset,rows_dataset)) ,int(51/scale),int(201),int(201/scale))\r\nimg_blur=cv2.resize(img_blur,(size_orig[1],size_orig[0]))\r\n##\r\nprint(\"bilateral filter --- %s seconds ---\" % (time.time() - start_time))\r\nimg=cv2.resize(img_blur,(653,490))\r\nhsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\r\nim=hsv[:,:,2]\r\nbw_mask=np.zeros(np.shape(im))\r\nbw_mask_offr=round(np.shape(im)[0]/20)\r\nbw_mask_offc=round(np.shape(im)[1]/20)\r\nbw_mask[bw_mask_offr:np.shape(im)[0]-bw_mask_offr, bw_mask_offc:np.shape(im)[1]-bw_mask_offc]=1;\r\nim=mat2gray(im)*mat2gray(bw_mask)\r\n\r\nim=imcomplement(im)\r\nim=im2double(im)\r\nDEG_NUM=12;\r\nLEN_c=11;\r\nLEN_o=11;\r\nLEN_diff=7;\r\nic1=reconstruction_by_dilation(im,LEN_c,DEG_NUM)\r\nio1=min_openings(im,LEN_o,DEG_NUM)\r\niv=mat2gray(ic1-io1)\r\nimDiff=smooth_cross_section(iv,LEN_diff,LEN_c)\r\nimL=reconstruction_by_dilation(imDiff,LEN_c,DEG_NUM)\r\nimF=reconstruction_by_erosion(imL,LEN_c,DEG_NUM)\r\nTH_LOW=0.12;\r\nTH_HIGH=0.2;\r\nmin_obj=20;\r\nmin_hole=10;\r\nmask=np.zeros(np.shape(imF))\r\nmarker=np.zeros(np.shape(imF))\r\nmask[imF>TH_LOW]=1\r\nmarker[imF>TH_HIGH]=1\r\nbw_result=imreconstruct(marker,mask)\r\nprint(\"bw result --- %s seconds ---\" % (time.time() - start_time))\r\nbw_result=filter_result3(img,bw_result,4,0.2)\r\nprint(\"filter result --- %s seconds ---\" % (time.time() - start_time))\r\nbw_result=cv2.resize(bw_result,(size_orig[1],size_orig[0]))\r\n\r\nimgr=imgorig[:,:,2];\r\nimgr[bw_result>0]=255;\r\nimgorig[:,:,2]=imgr;\r\n\r\nprint('saving output file: '+img_file_out)\r\ncv2.imwrite(img_file_out,imgorig)\r\ncv2.imwrite(img_file_out_bin,bw_result*255)\r\nprint('done ')\r\n" ]
[ [ "numpy.max", "numpy.array", "numpy.reshape", "numpy.zeros", "numpy.squeeze", "numpy.minimum", "numpy.copy", "numpy.min", "numpy.mean", "numpy.shape", "numpy.where", "numpy.arctan", "numpy.linalg.eig", "numpy.transpose", "numpy.loadtxt", "scipy.spatial.distance.cdist", "numpy.var", "numpy.maximum" ] ]
rddunphy/pyHSI
[ "b55c2a49568e04e0a2fb39da01cfe1f129bc86a4" ]
[ "test/test_cameras.py" ]
[ "from datetime import datetime\nimport unittest\nfrom unittest.mock import MagicMock\n\nimport numpy as np\n\nfrom pyhsi.cameras import BaslerCamera\n\n\nclass MockGrab:\n def __init__(self, data):\n self.Array = data\n\n def GrabSucceeded(self):\n return True\n\n def Release(self):\n pass\n\n\nclass TestBaslerCamera(unittest.TestCase):\n def setUp(self):\n self.mock_device = MagicMock()\n self.mock_stage = MagicMock()\n self.mock_stage.default_velocity = 20\n self.cam = BaslerCamera(device=self.mock_device)\n\n def test_capture(self):\n self.mock_device.RetrieveResult = MagicMock(side_effect=[\n MockGrab([[0, 12], [3, 100]]),\n MockGrab([[9, 8], [31, 5]])\n ])\n self.mock_stage.is_moving = MagicMock(side_effect=[True, True, False])\n data = self.cam.capture(self.mock_stage, [0, 100])\n target = np.array([[[12, 100], [0, 3]], [[8, 5], [9, 31]]])\n np.testing.assert_array_equal(data, target)\n\n def test_file_name_basic(self):\n fn = \"test_sample\"\n out = self.cam._process_file_name(fn, datetime(2020, 6, 20),\n 0, 100, 10, (227, 300, 400))\n self.assertEqual(out, \"test_sample.hdr\")\n\n def test_file_name_fields(self):\n fn = \"sample_{date}_{time}_exp={exp}_{frames}_frames\"\n out = self.cam._process_file_name(fn, datetime(2020, 6, 20, 13, 40),\n 0, 100, 10, (227, 300, 400))\n target = \"sample_2020-06-20_13:40:00_exp=4000_227_frames.hdr\"\n self.assertEqual(out, target)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.array", "numpy.testing.assert_array_equal" ] ]
bchaselab/Toxopy
[ "30fc03b0dc8e83916b1550a29f14fc899c22d068" ]
[ "toxopy/dlcboxplot.py" ]
[ "\"\"\"\nToxopy (https://github.com/bchaselab/Toxopy)\n© M. Alyetama, University of Nebraska at Omaha\nLicensed under the terms of the MIT license\n\"\"\"\n\nfrom toxopy import fwarnings, trials\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\ndef dlcboxplot(file,\n variable,\n ylab,\n comparison,\n jitter=False,\n colors=False,\n title=False,\n save=False,\n output_dir=None):\n \"\"\"\n file is typically 'dlc_all_avgs_updated.csv'\n variable is either 'cat_ditance' or 'vel'\n ylab is the y-axis label\n colors is a list of two colors (e.g., [\"#0062FF\", \"#DB62FF\"])\n output_dir to save the plot in a specific dir when save is True\n \"\"\"\n\n df = pd.read_csv(file)\n tls = trials()\n new = ['FT', 'ALONE1', 'SALINE1', 'ALONE2', 'URINE1',\n 'ALONE3', 'SALINE2', 'ALONE4', 'URINE2', 'ALONE5']\n if variable == 'distance':\n df = df[(df['trial'].isin(tls[0::2]))]\n d = {}\n\n for i, j in zip(new, tls):\n d[j] = i\n\n df = df.replace(d)\n df = df[df['var'] == variable]\n\n sns.set(style='ticks', font_scale=1)\n\n plt.figure(figsize=(13, 5), dpi=100)\n\n if comparison == 'infection_status':\n test, control = 'Infected', 'Control'\n comparing = 'infection_status'\n legend = 'Infection Status'\n elif comparison == 'indoor_outdoor_status':\n test, control = 'Indoor-outdoor', 'Indoor'\n comparing = 'indoor_outdoor_status'\n legend = 'Indoor-outdoor Status'\n\n if colors is False:\n my_pal = {control: '#00FFFF', test: '#E60E3C'}\n else:\n my_pal = {control: colors[0], test: colors[1]}\n\n ax = sns.boxplot(x='trial',\n y='value',\n data=df,\n hue=comparing,\n palette=my_pal)\n\n if jitter is True:\n sns.stripplot(x='trial',\n y='value',\n data=df,\n color='black',\n size=3,\n jitter=1)\n\n if variable != 'distance':\n for i in range(len(df['trial'].unique())-1):\n if variable == 'vel':\n plt.vlines(i+.5, 10, 45, linestyles='solid',\n colors='black', alpha=0.2)\n elif variable == 'cat_distance':\n plt.vlines(i+.5, 0, 1.3, linestyles='solid',\n colors='black', alpha=0.2)\n\n if title is not False:\n plt.title(title, fontsize=12)\n else:\n pass\n\n ax.set_xlabel('Trial', fontsize=12)\n ax.set_ylabel(ylab, fontsize=12)\n\n ax.legend(title=legend)\n\n plt.legend(title=legend)\n '''add significance bars and asterisks between boxes.\n [first pair, second pair], ..., [|, –], ...'''\n if variable == 'vel':\n l = [[7.75, 5.75], [8.25, 6.25], [26, 28], [31, 33]]\n elif variable == 'cat_distance':\n l = [[7.75, 5.75], [8.25, 6.25], [0.85, 0.9], [0.95, 1]]\n\n for x1, x2, y1, y2 in zip(l[0], l[1], l[2], l[3]):\n sig = plt.plot([x1, x1, x2, x2], [y1, y2, y2, y1],\n linewidth=1,\n color='k')\n plt.text((x1 + x2) * .5, y2 + 0, \"*\",\n ha='center', va='bottom', fontsize=18)\n\n plt.show()\n\n fig = ax.get_figure()\n\n if save is True:\n\n def sav(myString):\n return fig.savefig(myString,\n bbox_inches='tight',\n dpi=100,\n pad_inches=0.1)\n\n if output_dir is not None:\n sav(f'{output_dir}/{variable}.png')\n else:\n sav(f'{variable}.png')\n" ]
[ [ "matplotlib.pyplot.text", "matplotlib.pyplot.title", "matplotlib.pyplot.legend", "matplotlib.pyplot.plot", "matplotlib.pyplot.vlines", "matplotlib.pyplot.figure", "matplotlib.pyplot.show", "pandas.read_csv" ] ]
BOURSa/mmdetection3d
[ "27d0001e873b3102a828a27e1372873fcf81ed7e" ]
[ "mmdet3d/models/model_utils/vote_module.py" ]
[ "import torch\nfrom mmcv.cnn import ConvModule\nfrom torch import nn as nn\n\nfrom mmdet3d.models.builder import build_loss\n\n\nclass VoteModule(nn.Module):\n \"\"\"Vote module.\n\n Generate votes from seed point features.\n\n Args:\n in_channels (int): Number of channels of seed point features.\n vote_per_seed (int): Number of votes generated from each seed point.\n gt_per_seed (int): Number of ground truth votes generated\n from each seed point.\n conv_channels (tuple[int]): Out channels of vote\n generating convolution.\n conv_cfg (dict): Config of convolution.\n Default: dict(type='Conv1d').\n norm_cfg (dict): Config of normalization.\n Default: dict(type='BN1d').\n norm_feats (bool): Whether to normalize features.\n Default: True.\n vote_loss (dict): Config of vote loss.\n \"\"\"\n\n def __init__(self,\n in_channels,\n vote_per_seed=1,\n gt_per_seed=3,\n conv_channels=(16, 16),\n conv_cfg=dict(type='Conv1d'),\n norm_cfg=dict(type='BN1d'),\n norm_feats=True,\n vote_loss=None):\n super().__init__()\n self.in_channels = in_channels\n self.vote_per_seed = vote_per_seed\n self.gt_per_seed = gt_per_seed\n self.norm_feats = norm_feats\n self.vote_loss = build_loss(vote_loss)\n\n prev_channels = in_channels\n vote_conv_list = list()\n for k in range(len(conv_channels)):\n vote_conv_list.append(\n ConvModule(\n prev_channels,\n conv_channels[k],\n 1,\n padding=0,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n bias=True,\n inplace=True))\n prev_channels = conv_channels[k]\n self.vote_conv = nn.Sequential(*vote_conv_list)\n\n # conv_out predicts coordinate and residual features\n out_channel = (3 + in_channels) * self.vote_per_seed\n self.conv_out = nn.Conv1d(prev_channels, out_channel, 1)\n\n def forward(self, seed_points, seed_feats):\n \"\"\"forward.\n\n Args:\n seed_points (torch.Tensor): Coordinate of the seed\n points in shape (B, N, 3).\n seed_feats (torch.Tensor): Features of the seed points in shape\n (B, C, N).\n\n Returns:\n tuple[torch.Tensor]:\n\n - vote_points: Voted xyz based on the seed points \\\n with shape (B, M, 3), ``M=num_seed*vote_per_seed``.\n - vote_features: Voted features based on the seed points with \\\n shape (B, C, M) where ``M=num_seed*vote_per_seed``, \\\n ``C=vote_feature_dim``.\n \"\"\"\n batch_size, feat_channels, num_seed = seed_feats.shape\n num_vote = num_seed * self.vote_per_seed\n x = self.vote_conv(seed_feats)\n # (batch_size, (3+out_dim)*vote_per_seed, num_seed)\n votes = self.conv_out(x)\n\n votes = votes.transpose(2, 1).view(batch_size, num_seed,\n self.vote_per_seed, -1)\n offset = votes[:, :, :, 0:3]\n res_feats = votes[:, :, :, 3:]\n\n vote_points = (seed_points.unsqueeze(2) + offset).contiguous()\n vote_points = vote_points.view(batch_size, num_vote, 3)\n vote_feats = (seed_feats.transpose(2, 1).unsqueeze(2) +\n res_feats).contiguous()\n vote_feats = vote_feats.view(batch_size, num_vote,\n feat_channels).transpose(2,\n 1).contiguous()\n\n if self.norm_feats:\n features_norm = torch.norm(vote_feats, p=2, dim=1)\n vote_feats = vote_feats.div(features_norm.unsqueeze(1))\n return vote_points, vote_feats\n\n def get_loss(self, seed_points, vote_points, seed_indices,\n vote_targets_mask, vote_targets):\n \"\"\"Calculate loss of voting module.\n\n Args:\n seed_points (torch.Tensor): Coordinate of the seed points.\n vote_points (torch.Tensor): Coordinate of the vote points.\n seed_indices (torch.Tensor): Indices of seed points in raw points.\n vote_targets_mask (torch.Tensor): Mask of valid vote targets.\n vote_targets (torch.Tensor): Targets of votes.\n\n Returns:\n torch.Tensor: Weighted vote loss.\n \"\"\"\n batch_size, num_seed = seed_points.shape[:2]\n\n seed_gt_votes_mask = torch.gather(vote_targets_mask, 1,\n seed_indices).float()\n\n seed_indices_expand = seed_indices.unsqueeze(-1).repeat(\n 1, 1, 3 * self.gt_per_seed)\n seed_gt_votes = torch.gather(vote_targets, 1, seed_indices_expand)\n seed_gt_votes += seed_points.repeat(1, 1, 3)\n\n weight = seed_gt_votes_mask / (torch.sum(seed_gt_votes_mask) + 1e-6)\n distance = self.vote_loss(\n vote_points.view(batch_size * num_seed, -1, 3),\n seed_gt_votes.view(batch_size * num_seed, -1, 3),\n dst_weight=weight.view(batch_size * num_seed, 1))[1]\n vote_loss = torch.sum(torch.min(distance, dim=1)[0])\n\n return vote_loss\n" ]
[ [ "torch.min", "torch.gather", "torch.nn.Sequential", "torch.norm", "torch.nn.Conv1d", "torch.sum" ] ]
oldshuren/Hierarchical-Localization
[ "334327956138c98af209e5ae9e13a0e222e97c2c" ]
[ "hloc/extract_features.py" ]
[ "import argparse\nimport torch\nfrom pathlib import Path\nimport h5py\nimport logging\nfrom types import SimpleNamespace\nimport cv2\nimport numpy as np\nfrom tqdm import tqdm\nimport pprint\n\nfrom . import extractors\nfrom .utils.base_model import dynamic_load\nfrom .utils.tools import map_tensor\n\n\n'''\nA set of standard configurations that can be directly selected from the command\nline using their name. Each is a dictionary with the following entries:\n - output: the name of the feature file that will be generated.\n - model: the model configuration, as passed to a feature extractor.\n - preprocessing: how to preprocess the images read from disk.\n'''\nconfs = {\n 'superpoint_aachen': {\n 'output': 'feats-superpoint-n4096-r1024',\n 'model': {\n 'name': 'superpoint',\n 'nms_radius': 3,\n 'max_keypoints': 4096,\n },\n 'preprocessing': {\n 'grayscale': True,\n 'resize_max': 1024,\n },\n },\n 'superpoint_inloc': {\n 'output': 'feats-superpoint-n4096-r1600',\n 'model': {\n 'name': 'superpoint',\n 'nms_radius': 4,\n 'max_keypoints': 4096,\n },\n 'preprocessing': {\n 'grayscale': True,\n 'resize_max': 1600,\n },\n },\n 'hfnet_superpoint': {\n 'output': 'feats-superpoint',\n 'model': {\n 'name': 'superpoint',\n 'nms_radius': 4,\n 'max_keypoints': 4096,\n },\n 'preprocessing': {\n 'grayscale': True,\n 'resize_max': 1600,\n },\n },\n 'd2net-ss': {\n 'output': 'feats-d2net-ss',\n 'model': {\n 'name': 'd2net',\n 'multiscale': False,\n },\n 'preprocessing': {\n 'grayscale': False,\n 'resize_max': 1600,\n },\n },\n}\n\n\nclass ImageDataset(torch.utils.data.Dataset):\n default_conf = {\n 'globs': ['*.jpg', '*.png', '*.jpeg', '*.JPG', '*.PNG'],\n 'grayscale': False,\n 'resize_max': None,\n }\n\n def __init__(self, root, conf):\n self.conf = conf = SimpleNamespace(**{**self.default_conf, **conf})\n self.root = root\n\n self.paths = []\n for g in conf.globs:\n self.paths += list(Path(root).glob('**/'+g))\n if len(self.paths) == 0:\n raise ValueError(f'Could not find any image in root: {root}.')\n self.paths = sorted(list(set(self.paths)))\n self.paths = [i.relative_to(root) for i in self.paths]\n logging.info(f'Found {len(self.paths)} images in root {root}.')\n\n def __getitem__(self, idx):\n path = self.paths[idx]\n if self.conf.grayscale:\n mode = cv2.IMREAD_GRAYSCALE\n else:\n mode = cv2.IMREAD_COLOR\n image = cv2.imread(str(self.root / path), mode)\n if not self.conf.grayscale:\n image = image[:, :, ::-1] # BGR to RGB\n if image is None:\n raise ValueError(f'Cannot read image {str(path)}.')\n image = image.astype(np.float32)\n size = image.shape[:2][::-1]\n w, h = size\n\n if self.conf.resize_max and max(w, h) > self.conf.resize_max:\n scale = self.conf.resize_max / max(h, w)\n h_new, w_new = int(round(h*scale)), int(round(w*scale))\n image = cv2.resize(\n image, (w_new, h_new), interpolation=cv2.INTER_LINEAR)\n\n if self.conf.grayscale:\n image = image[None]\n else:\n image = image.transpose((2, 0, 1)) # HxWxC to CxHxW\n image = image / 255.\n\n data = {\n 'name': path.as_posix(),\n 'image': image,\n 'original_size': np.array(size),\n }\n return data\n\n def __len__(self):\n return len(self.paths)\n\nclass FeatureExtractor(object):\n def __init__(self, conf):\n self.device = 'cuda' if torch.cuda.is_available() else 'cpu'\n Model = dynamic_load(extractors, conf['model']['name'])\n self.model = Model(conf['model']).eval().to(self.device)\n\n def extract(self, image):\n image = image.astype(np.float32)\n size = image.shape[:2][::-1]\n image = image.transpose((2, 0, 1)) # HxWxC to CxHxW\n image = image / 255.\n\n data = {\n 'image': image,\n 'original_size': np.array(size),\n }\n pred = model(map_tensor(data, lambda x: x.to(self.device)))\n pred = {k: v[0].cpu().numpy() for k, v in pred.items()}\n\n pred['image_size'] = original_size = data['original_size'][0].numpy()\n if 'keypoints' in pred:\n size = np.array(data['image'].shape[-2:][::-1])\n scales = (original_size / size).astype(np.float32)\n pred['keypoints'] = (pred['keypoints'] + .5) * scales[None] - .5\n\n return pred\n\n@torch.no_grad()\ndef main(conf, image_dir, export_dir, as_half=False):\n logging.info('Extracting local features with configuration:'\n f'\\n{pprint.pformat(conf)}')\n\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n Model = dynamic_load(extractors, conf['model']['name'])\n model = Model(conf['model']).eval().to(device)\n\n loader = ImageDataset(image_dir, conf['preprocessing'])\n loader = torch.utils.data.DataLoader(loader, num_workers=1)\n\n feature_path = Path(export_dir, conf['output']+'.h5')\n feature_path.parent.mkdir(exist_ok=True, parents=True)\n feature_file = h5py.File(str(feature_path), 'a')\n\n for data in tqdm(loader):\n pred = model(map_tensor(data, lambda x: x.to(device)))\n pred = {k: v[0].cpu().numpy() for k, v in pred.items()}\n\n pred['image_size'] = original_size = data['original_size'][0].numpy()\n if 'keypoints' in pred:\n size = np.array(data['image'].shape[-2:][::-1])\n scales = (original_size / size).astype(np.float32)\n pred['keypoints'] = (pred['keypoints'] + .5) * scales[None] - .5\n\n if as_half:\n for k in pred:\n dt = pred[k].dtype\n if (dt == np.float32) and (dt != np.float16):\n pred[k] = pred[k].astype(np.float16)\n\n grp = feature_file.create_group(data['name'][0])\n for k, v in pred.items():\n grp.create_dataset(k, data=v)\n\n del pred\n\n feature_file.close()\n logging.info('Finished exporting features.')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--image_dir', type=Path, required=True)\n parser.add_argument('--export_dir', type=Path, required=True)\n parser.add_argument('--conf', type=str, default='superpoint_aachen',\n choices=list(confs.keys()))\n args = parser.parse_args()\n main(confs[args.conf], args.image_dir, args.export_dir)\n" ]
[ [ "numpy.array", "torch.no_grad", "torch.cuda.is_available", "torch.utils.data.DataLoader" ] ]
tonino102008/openfast
[ "cfb401af163f4e0b6bb8588c23374e1534ad8d87" ]
[ "ExampleCases/OpFAST_WF1x1/globalDISCON.py" ]
[ "import numpy\n\nGenSpeedF = 0\nIntSpdErr = 0 \nLastGenTrq = 0 \nLastTime = 0 \nLastTimePC = 0 \nLastTimeVS = 0 \nPitCom = numpy.zeros(3)\nVS_Slope15 = 0 \nVS_Slope25 = 0 \nVS_SySp = 0 \nVS_TrGnSp = 0" ]
[ [ "numpy.zeros" ] ]
SharanRajani/SoundQX
[ "22b3fcfab394c0c7ce0819c634d666ca25dc7b54" ]
[ "test_gen_spec.py" ]
[ "from keras.models import Sequential, load_model\nfrom keras.layers.core import Dense, Dropout, Activation,Flatten\nfrom keras.layers.recurrent import LSTM, GRU, SimpleRNN\nfrom keras.layers.convolutional import Convolution2D, Convolution1D, MaxPooling2D, MaxPooling1D, AveragePooling2D\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.advanced_activations import ELU, PReLU, LeakyReLU\nfrom keras.layers.wrappers import TimeDistributed\nfrom keras.optimizers import SGD, Adagrad, RMSprop\nfrom keras.callbacks import Callback, ModelCheckpoint, EarlyStopping\nfrom keras.utils.io_utils import HDF5Matrix\nfrom scipy import signal\nimport scipy.io\nimport scipy.io.wavfile as wav\nimport numpy as np\nimport h5py\nimport librosa\nimport sys\nimport os\n\ndef make_spectrum_phase(y, FRAMESIZE, OVERLAP, FFTSIZE):\n D=librosa.stft(y,n_fft=FRAMESIZE,hop_length=OVERLAP,win_length=FFTSIZE,window=scipy.signal.hamming)\n Sxx = np.log10(abs(D)**2) \n print(str(D) + \" the value for D\")\n phase = np.exp(1j * np.angle(D))\n print(str(phase) + \" the value of phase\")\n mean = np.mean(Sxx, axis=1).reshape((257,1))\n std = np.std(Sxx, axis=1).reshape((257,1))+1e-12\n Sxx = (Sxx-mean)/std \n return Sxx, phase, mean, std\n\ndef recons_spec_phase(Sxx_r, phase):\n Sxx_r = np.sqrt(10**Sxx_r)\n R = np.multiply(Sxx_r , phase)\n result = librosa.istft(R,\n hop_length=256,\n win_length=512,\n window=scipy.signal.hamming)\n return result\n\n\n\ndef predict(modelpath, noisylistpath):\n\tmodel=load_model(modelpath) #\"weights/DNN_spec_20160425v2.hdf5\"\n\tFRAMESIZE = 512\n\tOVERLAP = 256\n\tFFTSIZE = 512\n\tRATE = 16000\n\tFRAMEWIDTH = 2\n\tFBIN = FRAMESIZE//2+1\n\t# noisylistpath = sys.argv[2]\n\tnoisylistpath = noisylistpath\n\n\twith open(noisylistpath, 'r') as f:\n\t for line in f:\n\t print(line)\n\t filename = line.split('/')[-1][:]\n\t print(filename)\n\t y,sr=librosa.load(line[:],sr=RATE)\n\t training_data = np.empty((10000, FBIN, FRAMEWIDTH*2+1)) # For Noisy data\n\n\t Sxx, phase, mean, std = make_spectrum_phase(y, FRAMESIZE, OVERLAP, FFTSIZE)\n\t idx = 0 \n\t for i in range(FRAMEWIDTH, Sxx.shape[1]-FRAMEWIDTH): # 5 Frmae\n\t training_data[idx,:,:] = Sxx[:,i-FRAMEWIDTH:i+FRAMEWIDTH+1] # For Noisy data\n\t idx = idx + 1\n\n\t X_train = training_data[:idx]\n\t X_train = np.reshape(X_train,(idx,-1))\n\t predict = model.predict(X_train)\n\t count=0\n\t for i in range(FRAMEWIDTH, Sxx.shape[1]-FRAMEWIDTH):\n\t Sxx[:,i] = predict[count]\n\t count+=1\n\t # # The un-enhanced part of spec should be un-normalized\n\t Sxx[:, :FRAMEWIDTH] = (Sxx[:, :FRAMEWIDTH] * std) + mean\n\t Sxx[:, -FRAMEWIDTH:] = (Sxx[:, -FRAMEWIDTH:] * std) + mean \n\n\t recons_y = recons_spec_phase(Sxx, phase)\n\t output = librosa.util.fix_length(recons_y, y.shape[0])\n\t wav.write(\"static/wav/enhanced.wav\",RATE,np.int16(output*32767))\n\t return os.path.join(\"static\",\"wav\",\"enhanced.wav\")\n\n\n\n" ]
[ [ "numpy.empty", "numpy.angle", "numpy.reshape", "numpy.mean", "numpy.multiply", "numpy.std", "numpy.sqrt", "numpy.int16" ] ]
shirtsgroup/LLC_Membranes
[ "e94694f298909352d7e9d912625314a1e46aa5b6", "e94694f298909352d7e9d912625314a1e46aa5b6", "e94694f298909352d7e9d912625314a1e46aa5b6" ]
[ "Ben_Manuscripts/stochastic_transport/figures/pore_water_tcl.py", "Ben_Manuscripts/transport/figures/totals.py", "Ben_Manuscripts/transport/figures/tailvpore_hbonding.py" ]
[ "#!/usr/bin/env python\n\nimport mdtraj as md\nimport numpy as np\n\nfrom LLC_Membranes.llclib import physical, topology\n\nr = 1\n\nt = md.load('initial.gro')\nkeep = [a.index for a in t.topology.atoms if a.residue.name == 'HOH']\nres_start = keep[0]\n\n\ncom = physical.center_of_mass(t.xyz[:, keep, :], [18., 1., 1.])\nmembrane = topology.LC('HII') # object w/ attributes of LC making up membrane\nhg = [a.index for a in t.topology.atoms if a.name in membrane.pore_defining_atoms and a.residue.name\n == membrane.name]\npore_centers = physical.avg_pore_loc(4, t.xyz[:, hg, :], t.unitcell_vectors, buffer=0, spline=False)\n\npartition = physical.partition(com, pore_centers, r, unitcell=t.unitcell_vectors,\n spline=False)\n\npore_indices = [res_start + 3 * i for i in np.where(partition[0, :])[0]]\ntail_indices = [res_start + 3 * i for i in np.where(partition[0, :] == False)[0]] # have to use double equals sign. Using is doesn't work with np.where\n\nwith open('partition.tcl', 'w') as f:\n f.write('color Display Background white\\n')\n f.write('mol addrep 0\\n')\n f.write('mol modselect 0 0 index')\n for i in pore_indices:\n end = i + 3\n f.write(' %s to %s' % (i, end - 1))\n f.write('\\n')\n f.write('mol modcolor 0 0 ColorID 0\\n')\n f.write('mol modstyle 0 0 CPK 2.0 0.3 12.0 12.0\\n')\n f.write('mol addrep 0\\n')\n f.write('mol modselect 1 0 index')\n for i in tail_indices:\n end = i + 3\n f.write(' %s to %s' % (i, end - 1))\n f.write('\\n')\n f.write('mol modstyle 1 0 CPK 2.0 0.3 12.0 12.0\\n')\n f.write('mol modcolor 1 0 ColorID 1\\n')\n\n", "#!/usr/bin/env python\n\nimport numpy as np\nfrom LLC_Membranes.analysis.rdf import System\nfrom LLC_Membranes.llclib import file_rw\nimport matplotlib.pyplot as plt\nimport names\n\ndef calculate_rdf(res, path, gro='berendsen.gro', traj='PR_nojump.xtc', atoms=None):\n\n\tprint('Calculating RDF of residue %s' % r)\n\tif atoms is not None:\n\t\trdf = System('%s/%s' %(path, gro), '%s/%s' %(path, traj), r, 'HII', atoms=atoms)\n\telse:\n\t\trdf = System('%s/%s' %(path, gro), '%s/%s' %(path, traj), r, 'HII')\n\n\trdf.radial_distribution_function(bins=50, spline=True, npts_spline=10, cut=1.5)\n\n\trdf.bootstrap(200)\n\t\n\tfile_rw.save_object(rdf, '%s/rdf_%s.pl' % (path, res))\n\n\treturn rdf\n\nrecalculate = False \nresidues = [\"DMS\", \"ATO\"]\nresidues = [\"SOH\", \"GCL\"]\nresidues = [\"THF\", \"PCB\", \"EAC\", \"DMF\"]\nresidues = [\"MET\", \"BUT\"]\nwt=10\nn = np.zeros(len(residues))\n\nfor i, r in enumerate(residues):\n\tprint(i, r)\n\tpath = \"/home/bcoscia/Documents/Gromacs/Transport/NaGA3C11/%s/%dwt\" %(r,wt)\n\n\tif recalculate:\n\t\trdf = calculate_rdf(r, path)\n\telse:\n\t\ttry:\n\t\t\trdf = file_rw.load_object('%s/rdf_%s.pl' %(path, r))\n\t\texcept FileNotFoundError:\n\t\t\trdf = calculate_rdf(r, path)\n\n\tzbox = rdf.t.unitcell_vectors[:, 2, 2].mean()\n\tmean = rdf.density.mean(axis=0)\n\tV = np.array([zbox * mean[i] * np.pi*(rdf.r[i + 1] ** 2 - rdf.r[i] ** 2) for i in range(len(rdf.r) - 1)])\n\tplt.plot(V)\n\nplt.ylabel('Density', fontsize=14)\nplt.xlabel('Distance from pore center (nm)', fontsize=14)\nplt.gcf().get_axes()[0].tick_params(labelsize=14)\nplt.legend(fontsize=14)\nplt.tight_layout()\nplt.show()\n", "#!/usr/bin/env python\n\nimport numpy as np\nfrom LLC_Membranes.analysis import hbonds\nfrom LLC_Membranes.llclib import topology, physical\n\npath = \"/home/bcoscia/Documents/Gromacs/Transport/NaGA3C11/pure_water\"\nspline = True \ntails = False # look at hbonds in the tails\nhead_groups = True\nif head_groups:\n\ttails = False #water molecules in the tails will not be hydrogen bonding with head groups\nwt = 5\nr = 1.5 # cut-off between distal tails\nnpores = 4\ndwell_fraction = 0.95\n\nfull_path = path + '/%swt' % wt\n\nhb = hbonds.System('%s/PR_nojump.xtc' % full_path, '%s/berendsen.gro' % full_path)\n\nif head_groups:\n\thb.set_eligible('HOH', 'all', donors_only=True)\n\thb.set_eligible('HII', 'all')\nelse:\n\thb.set_eligible('HOH', 'all')\n\n# find pore centers\npore_defining_atoms = topology.LC('NAcarb11V').pore_defining_atoms\npore_atoms = [a.index for a in hb.t.topology.atoms if a.name in pore_defining_atoms]\n\nif spline:\n print('Creating pore splines')\n pore_centers = physical.trace_pores(hb.t.xyz[:, pore_atoms, :], hb.t.unitcell_vectors, 10)[0]\nelse:\n pore_centers = physical.avg_pore_loc(npores, hb.t.xyz[:, pore_atoms, :], hb.t.unitcell_vectors)[0]\n\noxygen = [a.index for a in hb.t.topology.atoms if a.residue.name == 'HOH' and a.name == 'OW']\ninregion = physical.partition(hb.t.xyz[:, oxygen, :], pore_centers, r, buffer=0, unitcell=hb.t.unitcell_vectors, npores=npores, spline=spline)\n\nif tails:\n inregion = ~inregion # '~' flips True and False\n\ndwell = np.full((hb.t.n_frames, len(oxygen)), False, dtype=bool)\n\nfor t in range(hb.t.n_frames):\n dwell[t, inregion[t]] = True\n\nfraction_dwelled = np.sum(dwell, axis=0) / hb.t.n_frames # fraction of total time spend in region of interest\n\nkeep = np.where(fraction_dwelled >= dwell_fraction)[0]\n\n# oxygen, hb.A, and hb.D are ordered the same way\nhb.A = np.array(hb.A)[keep]\nhb.D = np.array(hb.D)[keep]\nfrom itertools import chain\nhkeep = list(chain.from_iterable((2*i, 2*i + 1) for i in keep)) # since there are two H's per O\nhb.H = np.array(hb.H)[hkeep]\n\ntotal_waters = len(hb.A)\n\nhb.identify_hbonds(0.35, 30)\nn = [a.shape[1] for a in hb.hbonds]\n\nprint('Average number of water molecules hbonded per frame: %s' % np.mean(n))\nprint('Fraction of water molecules involved in an hbond: %s' % (np.mean(n) / total_waters))\n" ]
[ [ "numpy.where" ], [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.gcf", "matplotlib.pyplot.show" ], [ "numpy.sum", "numpy.array", "numpy.mean", "numpy.where" ] ]
ddayzzz/federated
[ "283ea72f555da78ff4b084c1d94731ef8f16b363", "283ea72f555da78ff4b084c1d94731ef8f16b363" ]
[ "tensorflow_federated/python/core/impl/executors/eager_tf_executor.py", "tensorflow_federated/python/research/adaptive_lr_decay/run_federated_stackoverflow.py" ]
[ "# Copyright 2019, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"A simple executor that operates synchronously in eager TensorFlow mode.\"\"\"\n\nfrom typing import Any, MutableMapping, Optional\n\nimport cachetools\nimport tensorflow as tf\n\nfrom tensorflow_federated.proto.v0 import computation_pb2 as pb\nfrom tensorflow_federated.python.common_libs import py_typecheck\nfrom tensorflow_federated.python.common_libs import serialization_utils\nfrom tensorflow_federated.python.common_libs import structure\nfrom tensorflow_federated.python.common_libs import tracing\nfrom tensorflow_federated.python.core.api import computation_base\nfrom tensorflow_federated.python.core.api import computation_types\nfrom tensorflow_federated.python.core.api import typed_object\nfrom tensorflow_federated.python.core.impl import computation_impl\nfrom tensorflow_federated.python.core.impl import type_utils\nfrom tensorflow_federated.python.core.impl.compiler import building_blocks\nfrom tensorflow_federated.python.core.impl.executors import executor_base\nfrom tensorflow_federated.python.core.impl.executors import executor_value_base\nfrom tensorflow_federated.python.core.impl.types import type_analysis\nfrom tensorflow_federated.python.core.impl.types import type_conversions\nfrom tensorflow_federated.python.core.impl.types import type_serialization\nfrom tensorflow_federated.python.core.impl.utils import tensorflow_utils\nfrom tensorflow_federated.python.tensorflow_libs import graph_merge\n\n# Cache size here is simply heuristic, no formal analysis.\n_TF_FUNCTION_CACHE_SIZE = 100\n\n\ndef _get_wrapped_function_from_comp(comp, must_pin_function_to_cpu, param_type,\n device):\n \"\"\"Extracts the TensorFlow function from serialized computation.\n\n Args:\n comp: An instance of `pb.Computation`.\n must_pin_function_to_cpu: A boolean flag to indicate if the computation is\n forced to be on CPUs.\n param_type: A `tff.Type` instance or None.\n device: A `tf.config.LogicalDevice` or None.\n\n Returns:\n A TensorFlow ConcreteFunction.\n \"\"\"\n\n def function_to_wrap():\n \"\"\"No-arg function to import graph def.\n\n We pass a no-arg function to `tf.compat.v1.wrap_function` to avoid\n the leftover placeholders that can result from binding arguments to the\n imported graphdef via `input_map`. The correct signature will be added to\n this function later, via the `prune` call below.\n\n Returns:\n Result of importing graphdef backing `comp`.\n \"\"\"\n graph_def = serialization_utils.unpack_graph_def(comp.tensorflow.graph_def)\n init_op = comp.tensorflow.initialize_op\n if init_op:\n graph_def = tensorflow_utils.add_control_deps_for_init_op(\n graph_def, init_op)\n\n def _import_fn():\n return tf.import_graph_def(\n graph_merge.uniquify_shared_names(graph_def), name='')\n\n if must_pin_function_to_cpu:\n with tf.device('cpu'):\n return _import_fn()\n elif device is not None:\n with tf.device(device.name):\n return _import_fn()\n else:\n return _import_fn()\n\n wrapped_noarg_fn = tf.compat.v1.wrap_function(function_to_wrap, signature=[])\n\n if param_type is not None:\n input_tensor_names = tensorflow_utils.extract_tensor_names_from_binding(\n comp.tensorflow.parameter)\n else:\n input_tensor_names = []\n output_tensor_names = tensorflow_utils.extract_tensor_names_from_binding(\n comp.tensorflow.result)\n import_graph = wrapped_noarg_fn.graph\n try:\n wrapped_fn = wrapped_noarg_fn.prune(\n feeds=tf.nest.map_structure(import_graph.as_graph_element,\n input_tensor_names),\n fetches=tf.nest.map_structure(import_graph.as_graph_element,\n output_tensor_names),\n )\n except KeyError as e:\n raise TypeError(\n 'Caught exception trying to prune graph `{g}` with '\n 'feeds {feeds} and fetches {fetches}. This indicates that these '\n 'names may not refer to tensors in the graph. .\\nException: {e}'.format(\n g=import_graph,\n feeds=input_tensor_names,\n fetches=output_tensor_names,\n e=e))\n return wrapped_fn\n\n\ndef embed_tensorflow_computation(comp, type_spec=None, device=None):\n \"\"\"Embeds a TensorFlow computation for use in the eager context.\n\n Args:\n comp: An instance of `pb.Computation`.\n type_spec: An optional `tff.Type` instance or something convertible to it.\n device: An optional `tf.config.LogicalDevice`.\n\n Returns:\n Either a one-argument or a zero-argument callable that executes the\n computation in eager mode.\n\n Raises:\n TypeError: If arguments are of the wrong types, e.g., in `comp` is not a\n TensorFlow computation.\n \"\"\"\n # TODO(b/134543154): Decide whether this belongs in `tensorflow_utils.py`\n # since it deals exclusively with eager mode. Incubate here, and potentially\n # move there, once stable.\n\n py_typecheck.check_type(comp, pb.Computation)\n comp_type = type_serialization.deserialize_type(comp.type)\n type_spec = computation_types.to_type(type_spec)\n if type_spec is not None:\n if not type_spec.is_equivalent_to(comp_type):\n raise TypeError('Expected a computation of type {}, got {}.'.format(\n type_spec, comp_type))\n else:\n type_spec = comp_type\n # TODO(b/155198591): Currently, TF will raise on any function returning a\n # `tf.data.Dataset` not pinned to CPU. We should follow up here and remove\n # this gating when we can.\n must_pin_function_to_cpu = type_analysis.contains(type_spec.result,\n lambda t: t.is_sequence())\n which_computation = comp.WhichOneof('computation')\n if which_computation != 'tensorflow':\n unexpected_building_block = building_blocks.ComputationBuildingBlock.from_proto(\n comp)\n raise TypeError('Expected a TensorFlow computation, found {}.'.format(\n unexpected_building_block))\n\n if type_spec.is_function():\n param_type = type_spec.parameter\n result_type = type_spec.result\n else:\n param_type = None\n result_type = type_spec\n\n wrapped_fn = _get_wrapped_function_from_comp(comp, must_pin_function_to_cpu,\n param_type, device)\n\n param_fns = []\n if param_type is not None:\n for spec in structure.flatten(type_spec.parameter):\n if spec.is_tensor():\n param_fns.append(lambda x: x)\n else:\n py_typecheck.check_type(spec, computation_types.SequenceType)\n param_fns.append(tf.data.experimental.to_variant)\n\n result_fns = []\n for spec in structure.flatten(result_type):\n if spec.is_tensor():\n result_fns.append(lambda x: x)\n else:\n py_typecheck.check_type(spec, computation_types.SequenceType)\n tf_structure = type_conversions.type_to_tf_structure(spec.element)\n\n def fn(x, tf_structure=tf_structure):\n return tf.data.experimental.from_variant(x, tf_structure)\n\n result_fns.append(fn)\n\n def _fn_to_return(arg, param_fns, wrapped_fn): # pylint:disable=missing-docstring\n param_elements = []\n if arg is not None:\n arg_parts = structure.flatten(arg)\n if len(arg_parts) != len(param_fns):\n raise RuntimeError('Expected {} arguments, found {}.'.format(\n len(param_fns), len(arg_parts)))\n for arg_part, param_fn in zip(arg_parts, param_fns):\n param_elements.append(param_fn(arg_part))\n result_parts = wrapped_fn(*param_elements)\n\n # There is a tf.wrap_function(...) issue b/144127474 that variables created\n # from tf.import_graph_def(...) inside tf.wrap_function(...) is not\n # destroyed. So get all the variables from `wrapped_fn` and destroy\n # manually.\n # TODO(b/144127474): Remove this manual cleanup once tf.wrap_function(...)\n # is fixed.\n resources = []\n for op in wrapped_fn.graph.get_operations():\n if op.type == 'VarHandleOp':\n resources += op.outputs\n if resources:\n for resource in wrapped_fn.prune(feeds={}, fetches=resources)():\n tf.raw_ops.DestroyResourceOp(resource=resource)\n\n result_elements = []\n for result_part, result_fn in zip(result_parts, result_fns):\n result_elements.append(result_fn(result_part))\n return structure.pack_sequence_as(result_type, result_elements)\n\n fn_to_return = lambda arg, p=param_fns, w=wrapped_fn: _fn_to_return(arg, p, w)\n\n # pylint: disable=function-redefined\n if must_pin_function_to_cpu:\n old_fn_to_return = fn_to_return\n\n def fn_to_return(x):\n with tf.device('cpu'):\n return old_fn_to_return(x)\n elif device is not None:\n old_fn_to_return = fn_to_return\n\n def fn_to_return(x):\n with tf.device(device.name):\n return old_fn_to_return(x)\n\n # pylint: enable=function-redefined\n\n if param_type is not None:\n return lambda arg: fn_to_return(arg) # pylint: disable=unnecessary-lambda\n else:\n return lambda: fn_to_return(None)\n\n\ndef to_representation_for_type(\n value: Any,\n tf_function_cache: MutableMapping[str, Any],\n type_spec: Optional[computation_types.Type] = None,\n device: Optional[tf.config.LogicalDevice] = None) -> Any:\n \"\"\"Verifies or converts the `value` to an eager object matching `type_spec`.\n\n WARNING: This function is only partially implemented. It does not support\n data sets at this point.\n\n The output of this function is always an eager tensor, eager dataset, a\n representation of a TensorFlow computation, or a nested structure of those\n that matches `type_spec`, and when `device` has been specified, everything\n is placed on that device on a best-effort basis.\n\n TensorFlow computations are represented here as zero- or one-argument Python\n callables that accept their entire argument bundle as a single Python object.\n\n Args:\n value: The raw representation of a value to compare against `type_spec` and\n potentially to be converted.\n tf_function_cache: A cache obeying `dict` semantics that can be used to look\n up previously embedded TensorFlow functions.\n type_spec: An instance of `tff.Type`, can be `None` for values that derive\n from `typed_object.TypedObject`.\n device: An optional `tf.config.LogicalDevice` to place the value on (for\n tensor-level values).\n\n Returns:\n Either `value` itself, or a modified version of it.\n\n Raises:\n TypeError: If the `value` is not compatible with `type_spec`.\n \"\"\"\n type_spec = type_utils.reconcile_value_with_type_spec(value, type_spec)\n if isinstance(value, computation_base.Computation):\n return to_representation_for_type(\n computation_impl.ComputationImpl.get_proto(value), tf_function_cache,\n type_spec, device)\n elif isinstance(value, pb.Computation):\n key = (value.SerializeToString(), str(type_spec),\n device.name if device else None)\n cached_fn = tf_function_cache.get(key)\n if cached_fn is not None:\n return cached_fn\n embedded_fn = embed_tensorflow_computation(value, type_spec, device)\n tf_function_cache[key] = embedded_fn\n return embedded_fn\n elif type_spec.is_struct():\n type_elem = structure.to_elements(type_spec)\n value_elem = (structure.to_elements(structure.from_container(value)))\n result_elem = []\n if len(type_elem) != len(value_elem):\n raise TypeError('Expected a {}-element tuple, found {} elements.'.format(\n len(type_elem), len(value_elem)))\n for (t_name, el_type), (v_name, el_val) in zip(type_elem, value_elem):\n if t_name != v_name:\n raise TypeError(\n 'Mismatching element names in type vs. value: {} vs. {}.'.format(\n t_name, v_name))\n el_repr = to_representation_for_type(el_val, tf_function_cache, el_type,\n device)\n result_elem.append((t_name, el_repr))\n return structure.Struct(result_elem)\n elif device is not None:\n py_typecheck.check_type(device, tf.config.LogicalDevice)\n with tf.device(device.name):\n return to_representation_for_type(\n value, tf_function_cache, type_spec=type_spec, device=None)\n elif isinstance(value, EagerValue):\n return value.internal_representation\n elif isinstance(value, executor_value_base.ExecutorValue):\n raise TypeError(\n 'Cannot accept a value embedded within a non-eager executor.')\n elif type_spec.is_tensor():\n if not tf.is_tensor(value):\n value = tf.convert_to_tensor(value, dtype=type_spec.dtype)\n elif hasattr(value, 'read_value'):\n # a tf.Variable-like result, get a proper tensor.\n value = value.read_value()\n value_type = (\n computation_types.TensorType(value.dtype.base_dtype, value.shape))\n if not type_spec.is_assignable_from(value_type):\n raise TypeError(\n 'The apparent type {} of a tensor {} does not match the expected '\n 'type {}.'.format(value_type, value, type_spec))\n return value\n elif type_spec.is_sequence():\n if isinstance(value, list):\n value = tensorflow_utils.make_data_set_from_elements(\n None, value, type_spec.element)\n py_typecheck.check_type(value,\n type_conversions.TF_DATASET_REPRESENTATION_TYPES)\n element_type = computation_types.to_type(value.element_spec)\n value_type = computation_types.SequenceType(element_type)\n type_spec.check_assignable_from(value_type)\n return value\n else:\n raise TypeError('Unexpected type {}.'.format(type_spec))\n\n\nclass EagerValue(executor_value_base.ExecutorValue):\n \"\"\"A representation of an eager value managed by the eager executor.\"\"\"\n\n def __init__(self, value, tf_function_cache, type_spec=None, device=None):\n \"\"\"Creates an instance of a value in this executor.\n\n Args:\n value: Depending on `type_spec`, either a `tf.Tensor`, `tf.data.Dataset`,\n or a nested structure of these stored in an `Struct`.\n tf_function_cache: A cache obeying `dict` semantics that can be used to\n look up previously embedded TensorFlow functions.\n type_spec: An instance of `tff.Type` that represents a tensor, a dataset,\n or a nested structure of these.\n device: An optional `tf.config.LogicalDevice` on which to place the value.\n \"\"\"\n if type_spec is None:\n py_typecheck.check_type(value, typed_object.TypedObject)\n type_spec = value.type_signature\n else:\n type_spec = computation_types.to_type(type_spec)\n py_typecheck.check_type(type_spec, computation_types.Type)\n self._type_signature = type_spec\n self._value = to_representation_for_type(value, tf_function_cache,\n type_spec, device)\n\n @property\n def internal_representation(self):\n \"\"\"Returns a representation of the eager value embedded in the executor.\n\n This property is only intended for use by the eager executor and tests. Not\n for consumption by consumers of the executor interface.\n \"\"\"\n return self._value\n\n @property\n def type_signature(self):\n return self._type_signature\n\n @tracing.trace\n async def compute(self):\n return self._value\n\n\nclass EagerTFExecutor(executor_base.Executor):\n \"\"\"The eager executor only runs TensorFlow, synchronously, in eager mode.\n\n TODO(b/134764569): Add support for data as a building block.\n\n This executor understands the following TFF types: tensors, sequences, named\n tuples, and functions. It does not understand placements, federated, or\n abstract types.\n\n This executor understands the following kinds of TFF computation building\n blocks: tensorflow computations, and external data. It does not understand\n lambda calculus or any compositional constructs. Tuples and selections can\n only be created using `create_struct()` and `create_selection()` in the API.\n\n The arguments to be ingested can be Python constants of simple types, nested\n structures of those, as well as eager tensors and eager datasets.\n\n The external data references must identify files available in the executor's\n filesystem. The exact format is yet to be documented.\n\n The executor will be able to place work on specific devices (e.g., on GPUs).\n In contrast to the reference executor, it handles data sets in a pipelined\n fashion, and does not place limits on the data set sizes. It also avoids\n marshaling TensorFlow values in and out between calls.\n\n It does not deal with multithreading, checkpointing, federated computations,\n and other concerns to be covered by separate executor components. It runs the\n operations it supports in a synchronous fashion. Asynchrony and other aspects\n not supported here should be handled by composing this executor with other\n executors into a complex executor stack, rather than mixing in all the logic.\n \"\"\"\n\n def __init__(self, device=None):\n \"\"\"Creates a new instance of an eager executor.\n\n Args:\n device: An optional `tf.config.LogicalDevice` that this executor will\n schedule all of its operations to run on. For example, the list of\n logical devices can be obtained using\n `tf.config.list_logical_devices()`.\n\n Raises:\n RuntimeError: If not executing eagerly.\n TypeError: If the device is not a `tf.config.LogicalDevice`.\n ValueError: If there is no device `device`.\n \"\"\"\n if not tf.executing_eagerly():\n raise RuntimeError('The eager executor may only be used in eager mode.')\n if device is not None:\n py_typecheck.check_type(device, tf.config.LogicalDevice)\n self._device = device\n else:\n self._device = None\n self._tf_function_cache = cachetools.LRUCache(_TF_FUNCTION_CACHE_SIZE)\n\n @tracing.trace(span=True)\n async def create_value(self, value, type_spec=None):\n \"\"\"Embeds `value` of type `type_spec` within this executor.\n\n Args:\n value: An object that represents the value to embed within the executor.\n type_spec: The `tff.Type` of the value represented by this object, or\n something convertible to it. Can optionally be `None` if `value` is an\n instance of `typed_object.TypedObject`.\n\n Returns:\n An instance of `EagerValue`.\n\n Raises:\n RuntimeError: If not executing eagerly.\n TypeError: If the arguments are of the wrong types.\n ValueError: If the type was not specified and cannot be determined from\n the value.\n \"\"\"\n if not tf.executing_eagerly():\n raise RuntimeError('The eager executor may only be used in eager mode.')\n\n return EagerValue(value, self._tf_function_cache, type_spec, self._device)\n\n @tracing.trace\n async def create_call(self, comp, arg=None):\n \"\"\"Creates a call to `comp` with optional `arg`.\n\n Args:\n comp: As documented in `executor_base.Executor`.\n arg: As documented in `executor_base.Executor`.\n\n Returns:\n An instance of `EagerValue` representing the result of the call.\n\n Raises:\n RuntimeError: If not executing eagerly.\n TypeError: If the arguments are of the wrong types.\n \"\"\"\n py_typecheck.check_type(comp, EagerValue)\n if arg is not None:\n py_typecheck.check_type(arg, EagerValue)\n if not comp.type_signature.is_function():\n raise TypeError('Expected a functional type, found {}'.format(\n comp.type_signature))\n if comp.type_signature.parameter is not None:\n return EagerValue(\n comp.internal_representation(arg.internal_representation), # pytype: disable=attribute-error\n self._tf_function_cache,\n comp.type_signature.result,\n self._device)\n elif arg is None:\n return EagerValue(comp.internal_representation(), self._tf_function_cache,\n comp.type_signature.result, self._device)\n else:\n raise TypeError('Cannot pass an argument to a no-argument function.')\n\n @tracing.trace\n async def create_struct(self, elements):\n \"\"\"Creates a tuple of `elements`.\n\n Args:\n elements: As documented in `executor_base.Executor`.\n\n Returns:\n An instance of `EagerValue` that represents the constructed tuple.\n \"\"\"\n elements = structure.to_elements(structure.from_container(elements))\n val_elements = []\n type_elements = []\n for k, v in elements:\n py_typecheck.check_type(v, EagerValue)\n val_elements.append((k, v.internal_representation))\n type_elements.append((k, v.type_signature))\n return EagerValue(\n structure.Struct(val_elements), self._tf_function_cache,\n computation_types.StructType([\n (k, v) if k is not None else v for k, v in type_elements\n ]))\n\n @tracing.trace\n async def create_selection(self, source, index=None, name=None):\n \"\"\"Creates a selection from `source`.\n\n Args:\n source: As documented in `executor_base.Executor`.\n index: As documented in `executor_base.Executor`.\n name: As documented in `executor_base.Executor`.\n\n Returns:\n An instance of `EagerValue` that represents the constructed selection.\n\n Raises:\n TypeError: If arguments are of the wrong types.\n ValueError: If either both, or neither of `name` and `index` are present.\n \"\"\"\n py_typecheck.check_type(source, EagerValue)\n py_typecheck.check_type(source.type_signature, computation_types.StructType)\n py_typecheck.check_type(source.internal_representation, structure.Struct)\n if index is not None:\n py_typecheck.check_type(index, int)\n if name is not None:\n raise ValueError(\n 'Cannot simultaneously specify name {} and index {}.'.format(\n name, index))\n else:\n return EagerValue(source.internal_representation[index],\n self._tf_function_cache, source.type_signature[index])\n elif name is not None:\n py_typecheck.check_type(name, str)\n return EagerValue(\n getattr(source.internal_representation, str(name)),\n self._tf_function_cache, getattr(source.type_signature, str(name)))\n else:\n raise ValueError('Must specify either name or index.')\n\n def close(self):\n pass\n", "# Copyright 2020, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Trains and evaluates on Stackoverflow NWP with adaptive LR decay.\"\"\"\n\nimport functools\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nimport tensorflow as tf\n\nfrom tensorflow_federated.python.research.adaptive_lr_decay import adaptive_fed_avg\nfrom tensorflow_federated.python.research.adaptive_lr_decay import decay_iterative_process_builder\nfrom tensorflow_federated.python.research.optimization.shared import keras_metrics\nfrom tensorflow_federated.python.research.utils import training_loop\nfrom tensorflow_federated.python.research.utils import training_utils\nfrom tensorflow_federated.python.research.utils import utils_impl\nfrom tensorflow_federated.python.research.utils.datasets import stackoverflow_dataset\nfrom tensorflow_federated.python.research.utils.models import stackoverflow_models\n\nwith utils_impl.record_new_flags() as hparam_flags:\n # Training hyperparameters\n flags.DEFINE_integer('clients_per_round', 10,\n 'How many clients to sample per round.')\n flags.DEFINE_integer('client_epochs_per_round', 1,\n 'Number of epochs in the client to take per round.')\n flags.DEFINE_integer('client_batch_size', 16,\n 'Batch size used on the client.')\n flags.DEFINE_integer('sequence_length', 20, 'Max sequence length to use.')\n flags.DEFINE_integer('max_elements_per_user', 160, 'Max number of training '\n 'sentences to use per user.')\n flags.DEFINE_integer(\n 'max_batches_per_client', -1, 'Maximum number of batches to process at '\n 'each client in a given round. If set to -1, we take the full dataset.')\n flags.DEFINE_integer(\n 'num_validation_examples', 10000, 'Number of examples '\n 'to use from test set for per-round validation.')\n flags.DEFINE_enum(\n 'client_weight', 'uniform', ['num_tokens', 'uniform'],\n 'Weighting scheme for the client model deltas. Currently, this can '\n 'either weight according to the number of tokens on a client '\n '(num_tokens) or uniformly (uniform).')\n\n # Modeling flags\n flags.DEFINE_integer('vocab_size', 10000, 'Size of vocab to use.')\n flags.DEFINE_integer('embedding_size', 96,\n 'Dimension of word embedding to use.')\n flags.DEFINE_integer('latent_size', 670,\n 'Dimension of latent size to use in recurrent cell')\n flags.DEFINE_integer('num_layers', 1,\n 'Number of stacked recurrent layers to use.')\n flags.DEFINE_boolean(\n 'shared_embedding', False,\n 'Boolean indicating whether to tie input and output embeddings.')\n\n flags.DEFINE_integer(\n 'client_datasets_random_seed', 1, 'The random seed '\n 'governing the selection of clients that participate in each training '\n 'round. The seed is used to generate the starting point for a Lehmer '\n 'pseudo-random number generator, the outputs of which are used as seeds '\n 'for the client sampling.')\n\nFLAGS = flags.FLAGS\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError('Expected no command-line arguments, '\n 'got: {}'.format(argv))\n\n model_builder = functools.partial(\n stackoverflow_models.create_recurrent_model,\n vocab_size=FLAGS.vocab_size,\n embedding_size=FLAGS.embedding_size,\n latent_size=FLAGS.latent_size,\n num_layers=FLAGS.num_layers,\n shared_embedding=FLAGS.shared_embedding)\n\n loss_builder = functools.partial(\n tf.keras.losses.SparseCategoricalCrossentropy, from_logits=True)\n\n special_tokens = stackoverflow_dataset.get_special_tokens(FLAGS.vocab_size)\n\n pad_token = special_tokens.pad\n oov_tokens = special_tokens.oov\n eos_token = special_tokens.eos\n\n def metrics_builder():\n return [\n keras_metrics.MaskedCategoricalAccuracy(\n name='accuracy_with_oov', masked_tokens=[pad_token]),\n keras_metrics.MaskedCategoricalAccuracy(\n name='accuracy_no_oov', masked_tokens=[pad_token] + oov_tokens),\n # Notice BOS never appears in ground truth.\n keras_metrics.MaskedCategoricalAccuracy(\n name='accuracy_no_oov_or_eos',\n masked_tokens=[pad_token, eos_token] + oov_tokens),\n keras_metrics.NumBatchesCounter(),\n keras_metrics.NumTokensCounter(masked_tokens=[pad_token])\n ]\n\n train_set, validation_set, test_set = stackoverflow_dataset.construct_word_level_datasets(\n FLAGS.vocab_size,\n FLAGS.client_batch_size,\n FLAGS.client_epochs_per_round,\n FLAGS.sequence_length,\n FLAGS.max_elements_per_user,\n FLAGS.num_validation_examples,\n max_batches_per_user=FLAGS.max_batches_per_client)\n\n input_spec = validation_set.element_spec\n\n if FLAGS.client_weight == 'uniform':\n\n def client_weight_fn(local_outputs):\n del local_outputs\n return 1.0\n\n elif FLAGS.client_weight == 'num_tokens':\n\n def client_weight_fn(local_outputs):\n # Num_tokens is a tensor with type int64[1], to use as a weight need\n # a float32 scalar.\n return tf.cast(tf.squeeze(local_outputs['num_tokens']), tf.float32)\n\n else:\n raise ValueError('Unsupported client_weight flag [{!s}]. Currently only '\n '`uniform` and `num_tokens` are supported.'.format(\n FLAGS.client_weight))\n\n training_process = decay_iterative_process_builder.from_flags(\n input_spec=input_spec,\n model_builder=model_builder,\n loss_builder=loss_builder,\n metrics_builder=metrics_builder,\n client_weight_fn=client_weight_fn)\n\n client_datasets_fn = training_utils.build_client_datasets_fn(\n train_set,\n FLAGS.clients_per_round,\n random_seed=FLAGS.client_datasets_random_seed)\n\n assign_weights_fn = adaptive_fed_avg.ServerState.assign_weights_to_keras_model\n\n evaluate_fn = training_utils.build_evaluate_fn(\n model_builder=model_builder,\n eval_dataset=validation_set,\n loss_builder=loss_builder,\n metrics_builder=metrics_builder,\n assign_weights_to_keras_model=assign_weights_fn)\n\n test_fn = training_utils.build_evaluate_fn(\n model_builder=model_builder,\n # Use both val and test for symmetry with other experiments, which\n # evaluate on the entire test set.\n eval_dataset=validation_set.concatenate(test_set),\n loss_builder=loss_builder,\n metrics_builder=metrics_builder,\n assign_weights_to_keras_model=assign_weights_fn)\n\n logging.info('Training model:')\n logging.info(model_builder().summary())\n\n training_loop.run(\n training_process, client_datasets_fn, evaluate_fn, test_fn=test_fn)\n\n\nif __name__ == '__main__':\n app.run(main)\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.raw_ops.DestroyResourceOp", "tensorflow.compat.v1.wrap_function", "tensorflow.data.experimental.from_variant", "tensorflow.nest.map_structure", "tensorflow.executing_eagerly", "tensorflow.device", "tensorflow.is_tensor" ], [ "tensorflow.squeeze" ] ]
rcm2dev/circular_buffer_numpy
[ "25cf7567f93f866bc9456d250a4f132e3335c068" ]
[ "circular_buffer_numpy/tests/test_circular_buffer.py" ]
[ "#!/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\"\"\"test Queue\r\n by Valentyn Stadnytskyi\r\n created: August 2, 2019\r\n\r\n This is a test library to evaluate the performance of the code.\r\n Queue is an abstract data structure, somewhat similar to Stacks.\r\n Unlike stacks, a queue is open at both its ends.\r\n One end is always used to insert data (enqueue) and the other is used to remove data (dequeue)..\r\n\r\n to run unittest: python3 -m unittest test_queue\r\n\"\"\"\r\nimport unittest\r\nfrom numpy.testing import assert_array_equal\r\n\r\nclass CircularBufferTest(unittest.TestCase):\r\n def test_queue_end(self):\r\n \"\"\"\r\n test if the default pointer in the buffer is -1.\r\n \"\"\"\r\n from ..circular_buffer import CircularBuffer\r\n buffer = CircularBuffer(shape=(100, 2))\r\n self.assertEqual(buffer.pointer, -1)\r\n\r\n def test_queue_end_two(self):\r\n \"\"\"\r\n test if the default pointer in the buffer is -1.\r\n \"\"\"\r\n from ..circular_buffer import CircularBuffer\r\n buffer = CircularBuffer(shape=(100, 2))\r\n self.assertEqual(buffer.pointer, -1)\r\n\r\n def test_1(self):\r\n from numpy import random\r\n from ..circular_buffer import CircularBuffer\r\n buffer = CircularBuffer(shape=(100, 2, 4))\r\n data = random.randint(1024, size=(5, 2, 4))\r\n buffer.packet_length = 5\r\n buffer.append(data)\r\n self.assertEqual(buffer.pointer, 4)\r\n self.assertEqual(buffer.g_pointer, 4)\r\n self.assertEqual(buffer.packet_pointer, 0)\r\n self.assertEqual(buffer.g_packet_pointer, 0)\r\n\r\n def test_attributes(self):\r\n from ..circular_buffer import CircularBuffer\r\n from numpy import random\r\n buffer = CircularBuffer(shape=(100, 2), dtype='int16')\r\n data = random.randint(1024, size=(5, 2))\r\n buffer.append(data)\r\n self.assertEqual(buffer.shape, (100, 2))\r\n self.assertEqual(buffer.size, 100*2)\r\n self.assertEqual(buffer.dtype, 'int16')\r\n\r\n def test_full(self):\r\n from ..circular_buffer import CircularBuffer\r\n from numpy import random, sum\r\n buffer = CircularBuffer(shape=(100, 2, 3), dtype='float64')\r\n data = random.randint(1024, size=(50, 2, 3))\r\n buffer.append(data)\r\n assert buffer.pointer == 49\r\n assert buffer.g_pointer == 49\r\n assert buffer.shape == (100, 2, 3)\r\n assert buffer.size == buffer.buffer.shape[0]*buffer.buffer.shape[1]*buffer.buffer.shape[2]\r\n assert buffer.dtype == 'float64'\r\n assert sum(buffer.get_i_j(i=5, j=6)) == sum(buffer.buffer[5])\r\n # get data between pointers 5 and 10 and compare to get 5 points from pointer M\r\n assert sum(buffer.get_i_j(i=5, j=10)) == sum(buffer.get_N(N=5, M=9))\r\n\r\n def test_vector_append(self):\r\n from ..circular_buffer import CircularBuffer\r\n from numpy import random, sum, zeros, concatenate\r\n buffer = CircularBuffer(shape=(1000, 3))\r\n vec1 = zeros((1, 3))\r\n vec2 = zeros((1, 3))\r\n vec1[0, 0] = 0.0\r\n vec1[0, 1] = 1.0\r\n vec1[0, 2] = 2.0\r\n buffer.append(vec1)\r\n vec2[0, 0] = 3.0\r\n vec2[0, 1] = 4.0\r\n vec2[0, 2] = 5.0\r\n buffer.append(vec2)\r\n assert_array_equal(buffer.get_last_value(), vec2)\r\n assert_array_equal(buffer.get_last_N(2),concatenate((vec1, vec2)))\r\n\r\n\r\n def test_get_data(self):\r\n from ..circular_buffer import CircularBuffer\r\n from numpy import random, sum, zeros, concatenate, array\r\n buffer = CircularBuffer(shape=(1000, 3))\r\n res_buffer = []\r\n\r\n j = 0\r\n for i in range(5):\r\n vec = zeros((3,))\r\n vec[0] = j\r\n vec[1] = j**2\r\n vec[2] = j**3\r\n buffer.append(vec)\r\n res_buffer.append(vec)\r\n j+=1\r\n assert_array_equal(array(res_buffer),buffer.get_data())\r\n\r\n for i in range(555):\r\n vec = zeros((3,))\r\n vec[0] = j\r\n vec[1] = j**2\r\n vec[2] = j**3\r\n buffer.append(vec)\r\n res_buffer.append(vec)\r\n j+=1\r\n assert_array_equal(array(res_buffer),buffer.get_data())\r\n\r\n #the 1000-long buffer spils over and overwrites existing values. The function get_data returns only\r\n for i in range(1300):\r\n vec = zeros((3,))\r\n vec[0] = j\r\n vec[1] = j**2\r\n vec[2] = j**3\r\n buffer.append(vec)\r\n res_buffer.append(vec)\r\n j+=1\r\n assert_array_equal(array(res_buffer[-1000:]),buffer.get_data())\r\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.zeros", "numpy.sum", "numpy.random.randint" ] ]
na018/DeepLearning
[ "f9f44bdf9afe7fa211a0899407534faf309bc0e6" ]
[ "00_MNIST-label/train.py" ]
[ "import argparse\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nfrom net.tf_net import \\\n calculate_accuracy, calculate_loss, \\\n create_simple_cnn_model, optimize_weights\nfrom net.keras_net import simple_cnn\n\n\ndef train_keras(batch_size, epochs, n_classes):\n # x_train returns data with shape (60,000,28,28)\n # y_train returns data with shape (60,000,)\n (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()\n\n # add one dimension for color chanel (only gray values)\n x_train = x_train.reshape(x_train.shape[0], image_height, image_width, 1)\n x_test = x_test.reshape(x_test.shape[0], image_height, image_width, 1)\n\n # define input shape of image\n input_shape = (image_height, image_width, 1)\n\n # convert tensor to float\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n\n # normalize data: divide by 255 (max color value) to receive values between 0 and 1\n x_train /= 255\n x_test /= 255\n\n # one-hot encoding: converts into array of length 'n_classes' and sets one where true\n # e.g. label = 5 y_train[4]=1, rest is 0\n y_train = tf.keras.utils.to_categorical(y_train, n_classes)\n y_test = tf.keras.utils.to_categorical(y_test, n_classes)\n\n simple_cnn_model = simple_cnn(input_shape)\n\n simple_cnn_model.fit(x_train, y_train, batch_size,\n epochs, (x_test, y_test))\n\n train_loss, train_accuracy = simple_cnn_model.evaluate(\n x_train, y_train, verbose=0)\n print('Train data loss:', train_loss)\n print('Train data accuracy:', train_accuracy)\n\n test_loss, test_accuracy = simple_cnn_model.evaluate(\n x_test, y_test, verbose=0)\n print('Test data loss:', test_loss)\n print('Test data accuracy:', test_accuracy)\n\n\ndef train_tensorflow(batch_size, epochs, n_classes):\n mnist_data = input_data.read_data_sets('MNIST_data', one_hot=True)\n test_images, test_labels = mnist_data.test.images, mnist_data.test.labels\n input_size = 784\n\n # declare placeholder\n x_input = tf.placeholder(tf.float32, shape=[None, input_size])\n y_input = tf.placeholder(tf.float32, shape=[None, n_classes])\n # if test set dropout to false\n bool_dropout = tf.placeholder(tf.bool)\n\n # create neural net and receive logits\n logits = create_simple_cnn_model(x_input, y_input, bool_dropout)\n # calculate loss, optimize weights and calculate accuracy\n loss_operation = calculate_loss(logits, y_input)\n optimizer = optimize_weights(loss_operation)\n accuracy_operation = calculate_accuracy(logits, y_input)\n\n # start training\n session = tf.Session()\n session.run(tf.global_variables_initializer())\n\n # merge all summary for tensorboard\n merged_summary_operation = tf.summary.merge_all()\n train_summary_writer = tf.summary.FileWriter('/tmp/train', session.graph)\n test_summary_writer = tf.summary.FileWriter('/tmp/test')\n\n for batch_n in range(epochs):\n mnist_batch = mnist_data.train.next_batch(batch_size)\n train_images, train_labels = mnist_batch[0], mnist_batch[1]\n\n _, merged_summary = session.run([optimizer, merged_summary_operation],\n feed_dict={\n x_input: train_images,\n y_input: train_labels,\n bool_dropout: True\n })\n\n train_summary_writer.add_summary(merged_summary, batch_n)\n\n if batch_n % 10 == 0:\n merged_summary, _ = session.run([merged_summary_operation, accuracy_operation],\n feed_dict={\n x_input: test_images,\n y_input: test_labels,\n bool_dropout: False\n })\n\n test_summary_writer.add_summary(merged_summary, batch_n)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Train a simple neural net to recognize number images from the MNIST dataset and appy the correct labeld')\n parser.add_argument('--epochs', default=200,\n help='Amount of batches the net trains on')\n parser.add_argument('--batch_size', default=100,\n help='Number of training samples inside one batch')\n parser.add_argument('--tf', default=True,\n help='Tensorflow or Keras implementation')\n args = parser.parse_args()\n\n if(args.tf):\n train_tensorflow(args.batch_size, args.epochs, 10)\n else:\n train_keras(args.batch_size, args.epochs, args.n_classes)\n" ]
[ [ "tensorflow.keras.utils.to_categorical", "tensorflow.keras.datasets.mnist.load_data", "tensorflow.Session", "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "tensorflow.placeholder", "tensorflow.summary.merge_all", "tensorflow.summary.FileWriter", "tensorflow.global_variables_initializer" ] ]
cbschaff/nlimb
[ "f0564b00bab1b3367aaa88163e49bebc88f349bb" ]
[ "rl/algorithms/core.py" ]
[ "from deeplearning import logger, tf_util as U\nimport tensorflow as tf\nfrom rl.runner import Runner\nfrom rl.vec_env.subproc_vec_env import SubprocVecEnv\nfrom collections import namedtuple\nimport os, time\n\nclass RLExperiment(U.Experiment):\n def load_env_fn(self):\n fname = os.path.join(self.logdir, 'checkpoints/env_fn.pkl')\n assert os.path.exists(fname), \"No env function saved.\"\n return U.load(fname)\n\n def save_env_fn(self, env_fn):\n fname = os.path.join(self.logdir, 'checkpoints/env_fn.pkl')\n U.save(fname, env_fn)\n\n\n\nclass OnlineRLAlgorithm(object):\n def __init__(self, logdir, env_fn, model_fn, nenv, rollout_length, batch_size, callback=None, runner_flags=[], **kwargs):\n self.exp = RLExperiment(logdir)\n self.exp.save_model_fn(model_fn)\n self.exp.save_env_fn(env_fn)\n logger.configure(os.path.join(logdir, 'logs'), ['stdout', 'log', 'json'])\n self.logdir = logdir\n self.batch_size = batch_size\n self.rollout_length = rollout_length\n self.args = namedtuple('Args', kwargs.keys())(**kwargs)\n\n self.nenv = nenv\n self.timesteps_per_step = self.nenv * self.rollout_length\n self.env = self._make_env(env_fn, nenv)\n\n self.actor = model_fn(self.env)\n self.actor.build('model', self.nenv, 1)\n\n self.loss = self._def_loss(model_fn, self.env)\n self.opt = self._def_opt(self.loss)\n self.opt.build('model', self.nenv, batch_size, reuse=tf.AUTO_REUSE)\n\n self.runner = Runner(self.env, self.actor, rollout_length, *runner_flags)\n\n self.callback = callback\n if callback is not None:\n assert callable(callback)\n\n self.init_session()\n self.load()\n\n def _make_env(self, env_fn, nenv):\n def make_env(rank):\n def _env():\n return env_fn(rank)\n return _env\n return SubprocVecEnv([make_env(i) for i in range(nenv)])\n\n def _def_loss(self, model_fn, env):\n \"\"\"\n returns a module for and the loss\n \"\"\"\n raise NotImplementedError\n\n def _def_opt(self, loss):\n \"\"\"\n returns a module for and the optimizer\n \"\"\"\n raise NotImplementedError\n\n def _before_step(self):\n pass\n\n def _process_rollout(self, rollout):\n raise NotImplementedError\n\n def _update_model(self, data):\n raise NotImplementedError\n\n def _after_step(self, rollout, data, update_out):\n pass\n\n def step(self):\n if self.callback is not None:\n self.callback(locals(), globals())\n self._before_step()\n rollout = self.runner.rollout()\n self.t += self.timesteps_per_step\n data = self._process_rollout(rollout)\n outs = self._update_model(data)\n self._after_step(rollout, data, outs)\n\n def train(self, maxtimesteps=None, maxseconds=None, save_freq=None):\n assert maxtimesteps is not None or maxseconds is not None\n start_time = time.time()\n while True:\n if maxtimesteps is not None and self.t >= maxtimesteps:\n break\n if maxseconds is not None and time.time() - start_time >= maxtimesteps:\n break\n t = self.t\n self.step()\n if save_freq and t // save_freq != self.t // save_freq:\n self.save()\n self.save()\n\n def save(self):\n self.exp.save(self.t)\n\n def load(self, t=None):\n self.t = self.exp.load(t)\n\n def init_session(self):\n if tf.get_default_session() is None:\n U.make_session().__enter__()\n U.initialize()\n\n def close(self):\n if hasattr(self.env, 'close'):\n self.env.close()\n tf.get_default_session().__exit__(None, None, None)\n logger.reset()\n\n\n\n\nif __name__=='__main__':\n from deeplearning.layers import Adam, Placeholder\n from deeplearning.module import Module\n from rl.rl_module import Policy\n import tensorflow as tf\n import gym\n from rl import util\n\n class TestAlg(OnlineRLAlgorithm):\n def _def_loss(self, model_fn):\n class Ent(Module):\n def _build(self, inputs):\n return self.modules[0]._entropy\n return Ent('l', model_fn(self.env))\n\n def _def_opt(self, loss):\n return Adam('opt', loss)\n\n def _before_step(self):\n logger.log(\"Before Step\")\n\n def _process_rollout(self, rollout):\n return rollout.numpy()\n\n def _update_model(self, data):\n self.opt.update(util.swap01andflatten(data['obs']))\n\n def _after_step(self, rollout, data, update_outs):\n logger.log(\"After Step\")\n\n def model_fn(env):\n x = Placeholder(tf.float32, env.observation_space.shape, 'x')\n return Policy('pi', x, ac_space=env.action_space)\n\n def env_fn(rank):\n env = gym.make('CartPole-v1')\n env.seed(rank)\n return env\n\n alg = TestAlg('./test_logs', env_fn, model_fn, 2, 64, 64)\n\n alg.train(1024, save_freq=128)\n" ]
[ [ "tensorflow.get_default_session" ] ]
GIS-PuppetMaster/TENSILE
[ "e19f973bb30fba69a23644389c82a4471ee5a241" ]
[ "pycode/tinyflow/Scheduler.py" ]
[ "import copy\nfrom enum import Enum\nimport multiprocessing\nimport numpy as np\nfrom functools import cmp_to_key\nimport plotly as py\nimport plotly.figure_factory as ff\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\nimport plotly\nfrom collections import defaultdict\nimport os\nfrom pynvml import *\nimport time\nimport matplotlib\n# matplotlib.use('Agg')\nimport pickle\nimport numpy as np\nfrom pynvml import *\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Dense, Conv1D, MaxPool1D, Dropout, Flatten\nfrom matplotlib import cm\nfrom tensorboard.plugins.hparams import keras\nfrom line_profiler import LineProfiler\nfrom typing import List\n\n\ndef get_PCIE_bandwidth():\n # if not debug_mod:\n # PCIE_bandwidth = nvmlDeviceGetPcieThroughput(handle, NVML_PCIE_UTIL_COUNT) # KB/s => MB/ms\n # PCIE_bandwidth /= 1000000\n # else:\n PCIE_bandwidth = 12\n return PCIE_bandwidth\n\n\nGPU = int(os.environ['CUDA_VISIBLE_DEVICES'])\ndebug_mod = False\nif not debug_mod:\n nvmlInit()\n handle = nvmlDeviceGetHandleByIndex(GPU)\npyplt = py.offline.plot\nPCIE_bandwidth = get_PCIE_bandwidth()\nload_list = ['convolution_2d_forward_VALID', 'convolution_backward_filter_2d_VALID', 'convolution_backward_data_2d_VALID',\n 'convolution_2d_forward_SAME', 'convolution_backward_filter_2d_SAME', 'convolution_backward_data_2d_SAME',\n 'dropout_forward', 'dropout_backward', 'broadcast_to_NHWC',\n 'broadcast_to_NCHW', 'reduce_sum_new_NHWC', 'reduce_sum_new_NCHW',\n 'bn_forward_pre_activation', 'bn_backward_pre_activation', 'activation_forward_relu',\n 'activation_backward_relu', 'activation_forward_softmax', 'activation_backward_softmax',\n 'pooling_2d_forward_max', 'pooling_2d_backward_max', 'pooling_2d_forward_mean',\n 'pooling_2d_backward_mean', 'matrix_multiply', 'matrix_elementwise_multiply_by_const', 'matrix_elementwise_add',\n 'array_set', 'concat_forward', 'concat_a_backward',\n 'concat_b_backward', 'sgd_update', 'cross', 'cross_backward', 'adam_mv', 'adam_compute']\noptimizer_op = ['AdamOp']\n\n\nclass TaskType(Enum):\n swap_out = 0\n swap_in = 1\n\n\nclass AccessType(Enum):\n output = 0\n input = 1\n\n\nclass Tensor:\n def __init__(self, tensor_id, job_id, size, shape, recomputation_time, source_tensors=None, is_parameter=False, is_input_or_output=False):\n self.tensor_id = tensor_id\n self.job_id = job_id\n self.size = size\n self.swap_time = self.size / PCIE_bandwidth\n self.source_tensors = source_tensors if source_tensors is not None else []\n self.recomputation_time = recomputation_time\n self.recomputation_metric = self.size / self.recomputation_time\n self.is_parameter = is_parameter\n self.shape = shape\n if self.is_parameter or is_input_or_output:\n self.in_gpu_at_beginning = True\n else:\n self.in_gpu_at_beginning = False\n\n def __repr__(self):\n return f'tensor_id:{self.tensor_id}, job_id\":{self.job_id}, size:{self.size}'\n\n def update_swap_time(self):\n PCIE_bandwidth = get_PCIE_bandwidth()\n # print(f'PCIE_bandwidth:{PCIE_bandwidth}')\n self.swap_time = self.size / PCIE_bandwidth\n\n\nclass TensorAccess:\n def __init__(self, tensor, time, run_time, access_type, operation_id, operation_name):\n self.tensor = tensor\n self.access_id = None\n self.start_time = None\n self.end_time = None\n self.time = time\n self.run_time = run_time\n self.access_type = access_type\n if self.access_type == AccessType.output:\n self.end_time = self.time\n self.start_time = self.time - self.run_time\n else:\n self.start_time = self.time\n self.end_time = self.time + self.run_time\n self.release_flag = False\n self.operation_id = operation_id\n self.operation_name = operation_name\n self.release_for_recomputation = []\n\n def to_tuple(self):\n return (self.tensor.tensor_id, self.time)\n\n def __repr__(self):\n return f'id={self.tensor.tensor_id}, start_time={self.start_time}, end_time={self.end_time}, time={self.time}, access_type={self.access_type}, release_flag={self.release_flag}'\n\n\nclass SwapTask(object):\n '''Date weighted interval'''\n\n def __init__(self, tensor, time, time_cost, task_type: TaskType, front_boundary=None, back_boundary=None):\n self.tensor = tensor\n self.time_cost = time_cost\n self.data_type = np.float64\n self.task_type = task_type\n self.swap_task_id = None\n assert not (front_boundary is None and back_boundary is None)\n # 最早开始时间\n self.front_boundary = front_boundary\n # 最晚结束时间\n self.back_boundary = back_boundary\n self.time = time\n self.execute_time = None\n self.execute_ref = None\n self.start_time_ = None\n self.end_time_ = None\n\n @property\n def start_time(self):\n return self.start_time_\n\n @start_time.setter\n def start_time(self, value):\n self.start_time_ = value\n if self.task_type == TaskType.swap_out:\n self.time = self.start_time_\n\n @property\n def end_time(self):\n return self.end_time_\n\n @end_time.setter\n def end_time(self, value):\n self.end_time_ = value\n if self.task_type == TaskType.swap_in:\n self.time = self.end_time_\n\n @classmethod\n def from_access(cls, access: TensorAccess, weight, task_type, front_boundary=None, back_boundary=None):\n return cls(access.tensor, weight, access.time, access.tensor.swap_time, task_type, front_boundary=front_boundary, back_boundary=back_boundary)\n\n def __repr__(self):\n return f'id={self.tensor}, type={self.task_type}, start_time={self.start_time}, end_time={self.end_time}, time={self.time}'\n\n\ndef numpy_ewma_vectorized(data, window):\n alpha = 2 / (window + 1.0)\n alpha_rev = 1 - alpha\n n = data.shape[0]\n\n pows = alpha_rev ** (np.arange(n + 1))\n\n scale_arr = 1 / pows[:-1]\n offset = data[0] * pows[1:]\n pw0 = alpha * alpha_rev ** (n - 1)\n\n mult = data * pw0 * scale_arr\n cumsums = mult.cumsum()\n out = offset + cumsums * scale_arr[::-1]\n return out\n\n\ndebug_num = 0\n\n\ndef create_model(n):\n model = Sequential()\n model.add(Dense(units=2048, activation='tanh', input_dim=n))\n model.add(Dense(units=2048, activation='tanh'))\n model.add(Dense(units=1, activation='relu'))\n return model\n\n\ndef load(opname, n):\n model = create_model(n)\n model.load_weights('model_parameter/' + opname + '_model.hdf5', by_name=True, skip_mismatch=True)\n return model\n\n\ndef get_predicted_execution_time(op_name, inputs_of_model, logged_time: list):\n return logged_time[0]\n\n\ndef liveness_analysis(tensor_access_list):\n global tensor_access_by_tensor\n # 活跃性分析结果生成\n for job_id in range(len(tensor_access_list)):\n tmp = set()\n for i in range(len(tensor_access_list[job_id]) - 1, -1, -1):\n tensor_access = tensor_access_list[job_id][i]\n accesses_of_tensor = tensor_access_by_tensor[tensor_access.tensor.job_id][tensor_access.tensor]\n if tensor_access.tensor not in tmp and len(accesses_of_tensor) > 1 and tensor_access == accesses_of_tensor[-1]:\n # 参数不会释放\n if not tensor_access.tensor.is_parameter:\n tmp.add(tensor_access.tensor)\n tensor_access.release_flag = True\n\n\ndef is_overlap(task: SwapTask, target: SwapTask):\n return task != target and (\n target.start_time < task.end_time < target.end_time or target.start_time < task.start_time < target.end_time or task.start_time < target.end_time < task.end_time or task.start_time < target.start_time < task.end_time)\n\n\ndef get_free_intervals(target_task, swap_schedule, access_of_target_tensor, key=0, asc=True):\n target_task.tensor.update_swap_time()\n # 列出在可行区间内的所有空白时间区间,并按区间排序\n if target_task.back_boundary - target_task.front_boundary < target_task.time_cost:\n return []\n intervals = []\n for task in swap_schedule:\n # if target_task.back_boundary < task.start_time:\n # continue\n # elif task.end_time < target_task.front_boundary:\n # break\n if target_task.front_boundary <= task.start_time < task.end_time <= target_task.back_boundary:\n intervals.append((task.start_time, task.end_time))\n elif task.start_time < target_task.front_boundary < task.end_time < target_task.back_boundary:\n intervals.append((target_task.front_boundary, task.end_time))\n elif target_task.front_boundary < task.start_time < target_task.back_boundary < task.end_time:\n intervals.append((task.start_time, target_task.back_boundary))\n elif task.start_time < target_task.front_boundary < target_task.back_boundary < task.end_time:\n return []\n intervals = sorted(intervals, key=lambda x: x[0])\n # 区间融合,确保区间之间无交集\n occupied_intervals = []\n i = 0\n while i < len(intervals):\n interval = intervals[i]\n l = interval[0]\n r = interval[1]\n flag = False\n while i < len(intervals) - 1 and intervals[i + 1][0] <= r:\n r = max(r, intervals[i + 1][1])\n flag = True\n i += 1\n occupied_intervals.append((l, r))\n if not flag:\n i += 1\n not_occupied_intervals = []\n s = target_task.front_boundary\n for interval in occupied_intervals:\n if s < interval[0]:\n not_occupied_intervals.append((s, interval[0]))\n s = interval[1]\n if s < target_task.back_boundary:\n not_occupied_intervals.append((s, target_task.back_boundary))\n if len(not_occupied_intervals) == 0:\n return []\n i = 0\n j = 0\n # 按照区间起点排序\n not_occupied_intervals = sorted(not_occupied_intervals, key=lambda x: x[key], reverse=False)\n # 防止区间与被调度张量的access重合\n while j < len(access_of_target_tensor):\n if i >= len(not_occupied_intervals):\n break\n access = access_of_target_tensor[j]\n start, end = not_occupied_intervals[i]\n if start < access.start_time < end <= access.end_time:\n not_occupied_intervals[i] = (start, access.start_time)\n i += 1\n elif start < access.start_time < access.end_time < end:\n not_occupied_intervals[i] = (start, access.start_time)\n not_occupied_intervals.insert(i + 1, (access.end_time, end))\n i += 1\n j += 1\n elif start == access.start_time < end < access.end_time:\n not_occupied_intervals.pop(i)\n j += 1\n elif access.start_time <= start < access.end_time < end:\n not_occupied_intervals[i] = (access.end_time, end)\n j += 1\n elif access.start_time <= start < end <= access.end_time:\n not_occupied_intervals.pop(i)\n else:\n j += 1\n # 按照区间终点排序\n if not asc:\n not_occupied_intervals = sorted(not_occupied_intervals, key=lambda x: x[key], reverse=not asc)\n return not_occupied_intervals\n\n\ndef generate_swap_recomputation_release_order(tensor_access_by_tensor, swap_scheduler, recomputations, job_num):\n swap_orders = defaultdict(list)\n release_orders = defaultdict(list)\n recomp_orders = defaultdict(list)\n for job_id in range(job_num):\n # 按id排序\n tensor_accesses = sorted([i for tmp in tensor_access_by_tensor[job_id].values() for i in tmp], key=lambda x: x.tensor.tensor_id)\n # 按起始时间排序\n swap_tasks = sorted(swap_scheduler[job_id], key=lambda x: x.start_time)\n for i in range(len(swap_tasks)):\n swap_tasks[i].swap_task_id = i\n releases = []\n swaps = []\n recomps = []\n for access in tensor_accesses:\n if access.release_flag:\n releases.append((access.operation_id, access.tensor.tensor_id))\n release_orders[job_id] = releases\n for access in recomputations:\n recomps.append((access.operation_id, access.tensor.tensor_id, access.release_for_recomputation))\n recomp_orders[job_id] = recomps\n for task in swap_tasks:\n # if task.task_type==TaskType.swap_out:\n # (task_id, node_id(tensor_id), start_time, start_node, move_to_gpu, start_node_type)\n ref = task.execute_ref.operation_id\n swaps.append([task.tensor.tensor_id, task.execute_time, ref, 0 if task.task_type == TaskType.swap_out else 1, 1, task.start_time])\n swap_orders[job_id] = list(map(lambda x: x[:-1], sorted(swaps, key=lambda x: x[-1])))\n return release_orders, swap_orders, recomp_orders\n\n\ndef draw_all_task(tensor_access_by_tensor, swap_scheduler, job_num):\n for job_id in range(job_num):\n tmp = list(tensor_access_by_tensor[job_id].values())\n res = []\n for sub_list in tmp:\n res.extend(sub_list)\n draw(sorted(res, key=lambda x: x.start_time), swap_scheduler[job_id])\n\n\nclass MemoryAnalyzer:\n def __init__(self, tensor_access_list, tensors):\n self.tensor_access_list = tensor_access_list\n self.tensors = tensors\n self.next_swap_tasks_index = 0\n\n def insert_sort(self, list_with_order: list, list_b: list, cmp):\n # 升序\n for obj_b in list_b:\n i = 0\n mid = 0\n j = len(list_with_order) - 1\n while i < j:\n mid = (i + j) // 2\n obj_mid = list_with_order[mid]\n flag = cmp(obj_mid, obj_b)\n if flag == -1:\n # mid<b\n if mid == i:\n # i=mid<=j, mid<b, 比较b和j\n flag2 = cmp(list_with_order[j], obj_b)\n if flag2 == -1:\n # i=mid<=j<b, 插入位置在j+1\n mid = j\n elif flag2 == 1:\n # i=mid<b<j, 插入位置在j\n mid = j - 1\n else:\n # i=mid<=j=b, 插入位置在j+1\n mid = j\n break\n i = mid\n elif flag == 1:\n # b<mid\n if mid == j:\n # i<=mid=j, b<mid, 比较i和b\n flag2 = cmp(list_with_order[i], obj_b)\n if flag2 == -1:\n # i<b<mid=j, 插入位置在i+1\n mid = i\n elif flag2 == 1:\n # b<i<mid=j, 插入位置在i\n mid = i - 1\n else:\n # i=b<mid=j, 插入位置在i+1\n mid = i\n break\n j = mid\n elif flag == 0:\n # b==mid,插入位置在mid+1\n break\n list_with_order.insert(mid + 1, obj_b)\n return list_with_order\n\n def custom_cmp(self, x, y):\n if x.time < y.time:\n return -1\n elif x.time > y.time:\n return 1\n else:\n if x.start_time < y.start_time:\n return -1\n elif x.start_time > y.start_time:\n return 1\n else:\n # if isinstance(x,TensorAccess) and isinstance(y, SwapTask):\n # return 1\n # elif isinstance(x, SwapTask) and isinstance(y, TensorAccess):\n # return -1\n return 0\n\n def custom_cmp_end_time(self, x, y):\n if x.end_time < y.end_time:\n return -1\n elif x.end_time > y.end_time:\n return 1\n else:\n return 0\n\n def get_max_memory_used(self, swap_tasks, swapped_out_tensor):\n delta = len(swap_tasks)\n if self.next_swap_tasks_index == 0:\n # 初始化时间轴\n tmp = copy.copy(self.tensor_access_list)\n tmp.extend(swap_tasks)\n self.time_axis = sorted(tmp, key=cmp_to_key(self.custom_cmp))\n self.end_time_axis = sorted(copy.copy(tmp), key=cmp_to_key(self.custom_cmp_end_time))\n # self.last_unused_swap_tasks = copy.copy(swap_tasks)\n else:\n # 更新时间轴\n # assert swap_tasks[:self.next_swap_tasks_index] == self.last_unused_swap_tasks\n # self.last_unused_swap_tasks = copy.copy(swap_tasks)\n swap_tasks = swap_tasks[self.next_swap_tasks_index:]\n self.time_axis = self.insert_sort(self.time_axis, swap_tasks, self.custom_cmp)\n self.end_time_axis = self.insert_sort(self.end_time_axis, swap_tasks, self.custom_cmp_end_time)\n self.index_of_end_time_axis = {self.end_time_axis[i]: i for i in range(len(self.end_time_axis))}\n # 计算显存开销\n # occupied by handle, cudnn, cuda stream and cudart\n memory_used = 0\n max_memory_actual = float('-inf')\n in_gpu_tensors = set()\n max_memory_tensors = set()\n last_input_tensor_access = None\n max_last_access = None\n wait_to_be_released = []\n max_time = None\n # foot_print = {}\n # 首先把输入的x,y以及所有没被swap out的参数载入显存,因为他们从上轮迭代结束时就一直在显存里面\n for tensor in self.tensors:\n if tensor.in_gpu_at_beginning and tensor not in swapped_out_tensor:\n in_gpu_tensors.add(tensor)\n memory_used += tensor.size\n for time_index, event in enumerate(self.time_axis):\n i = len(wait_to_be_released) - 1\n while i >= 0:\n access = wait_to_be_released[i]\n # 如果此刻时间已经过了释放时间,则释放该访问的附带影响\n if event.time >= access.end_time:\n wait_to_be_released.pop(i)\n memory_used -= access.tensor.size\n in_gpu_tensors.remove(access.tensor)\n i -= 1\n if isinstance(event, TensorAccess):\n if event.access_type == AccessType.output:\n if event.tensor not in in_gpu_tensors:\n # 新参数不额外占用空间\n if event.operation_name not in optimizer_op:\n memory_used += event.tensor.size\n in_gpu_tensors.add(event.tensor)\n else:\n # 用完即释放的\n # input本身并不增加gpu使用,swap in增加\n if event.release_flag:\n wait_to_be_released.append(event)\n else:\n last_input_tensor_access = event\n elif isinstance(event, SwapTask):\n # 使用按照结束时间排序的时间轴进行倒序查找\n last_event = None\n # idx = end_time_axis.index(event)\n idx = self.index_of_end_time_axis[event]\n for j in range(idx - 1, -1, -1):\n if isinstance(self.end_time_axis[j], TensorAccess) and self.end_time_axis[j].end_time <= event.start_time:\n last_event = self.end_time_axis[j]\n break\n if last_event is None:\n last_event = self.tensor_access_list[0]\n event.execute_ref = last_event\n event.execute_time = event.start_time - last_event.end_time\n if event.task_type == TaskType.swap_in:\n memory_used += event.tensor.size\n in_gpu_tensors.add(event.tensor)\n else:\n memory_used -= event.tensor.size\n in_gpu_tensors.remove(event.tensor)\n # foot_print[time] = memory_used\n if memory_used > max_memory_actual:\n # max_memory_actual与是否有考虑价值无关,单纯计量峰值\n max_memory_actual = memory_used\n max_memory_tensors = copy.copy(in_gpu_tensors)\n max_last_access = last_input_tensor_access\n max_time = event.time\n self.next_swap_tasks_index = delta\n return max_memory_actual, max_memory_tensors, max_last_access, max_time, self.time_axis\n\n\ndef run_global_memory_analysis(swap_tasks, swapped_out_tensor):\n global job_num\n global global_memory_analyzer\n max_memory = 0\n max_memory_tensors = []\n last_input_accesses = []\n max_time = []\n # foot_prints = []\n time_axis = []\n for job_id in range(job_num):\n job_max_memory, job_max_memory_tensors, last_input_access, now_time, t_axis = global_memory_analyzer[job_id].get_max_memory_used(swap_tasks[job_id], swapped_out_tensor)\n time_axis.append(t_axis)\n # foot_prints.append(foot_print)\n max_memory_tensors.extend(job_max_memory_tensors)\n last_input_accesses.append(last_input_access)\n max_time.append(now_time)\n max_memory += job_max_memory\n return max_memory, max_memory_tensors, last_input_accesses, max_time, time_axis\n\n\ndef draw(tensor_access_list, swap_schedule):\n df = []\n id_color = {'OTA': 'rgb(255, 0, 102)', 'ITA': 'rgb(68, 114, 196)', 'Swap In': 'rgb(237, 137, 69)', 'Swap Out': 'rgb(112, 173, 71)'}\n for tensor_access in tensor_access_list:\n # input 蓝色,output红色\n df.append(dict(Task=f'tensor_id:{tensor_access.tensor.tensor_id}, size:{tensor_access.tensor.size}', Start=tensor_access.start_time, Finish=tensor_access.end_time,\n Resource='OTA' if tensor_access.access_type == AccessType.output else 'ITA'))\n for task in swap_schedule:\n df.append(dict(Task=f'tensor_id:{task.tensor.tensor_id}, size:{task.tensor.size}', Start=task.start_time, Finish=task.end_time, Resource='Swap In' if task.task_type == TaskType.swap_in else 'Swap Out'))\n\n fig = ff.create_gantt(df, colors=id_color, index_col='Resource', group_tasks=True, show_colorbar=True, showgrid_x=True, showgrid_y=True, title=f'ratio={ratio}')\n fig['layout']['xaxis'].update({'type': None})\n fig.update_layout(\n height=900,\n width=1600,\n )\n pyplt(fig, filename=f'../../pic/job{tensor_access_list[0].tensor.job_id}.html', auto_open=True)\n\n\ndef try_swap_in(swap_in_task: SwapTask, swap_scheduler, access_of_target_tensor):\n # swap_in越晚越好,按结束时间降序排序\n free_intervals = get_free_intervals(swap_in_task, swap_scheduler[swap_in_task.tensor.job_id], access_of_target_tensor, 1, asc=False)\n succeed = False\n for interval in free_intervals:\n if interval[1] - interval[0] >= swap_in_task.time_cost:\n swap_in_task.end_time = interval[1]\n swap_in_task.start_time = swap_in_task.end_time - swap_in_task.time_cost\n swap_scheduler[swap_in_task.tensor.job_id].append(swap_in_task)\n succeed = True\n break\n if not succeed:\n return False\n else:\n return True\n\n\ndef can_next_input_access_swap_in(i, all_access_of_tensor, swap_out_task, swap_scheduler):\n # 至少将第一个访问swap in才算成功,后续的能换入的话,则把前一个的release_flag设为True\n access = all_access_of_tensor[i]\n swap_in_task = SwapTask(access.tensor, access.time, access.tensor.swap_time, TaskType.swap_in,\n front_boundary=swap_out_task.end_time if swap_out_task.end_time > all_access_of_tensor[i - 1].end_time else all_access_of_tensor[i - 1].end_time,\n back_boundary=access.time)\n return try_swap_in(swap_in_task, swap_scheduler, tensor_access_by_tensor[swap_in_task.tensor.job_id][swap_in_task.tensor])\n\n\ndef get_framework_info(info, logged_time, job_id):\n global global_tensors\n tensors = {}\n tensor_access_list = []\n global_time = 0\n parameter = []\n # tensor_id: execution time of operator which generate the tensor\n operator_execution_time = []\n # for output_tensor_id, input_tensor_id, output_tensor_size, operation_name, is_parameter, shape, inputs_of_model in info:\n for tensor_info, input_tensor_id, operation_name, operation_id, is_parameter, inputs_of_model, _ in info:\n # is_parameter: 生成的张量是否为参数\n # 输入的为Byte\n # 转换为MB\n input_tensors = []\n for tensor_id in input_tensor_id:\n input_tensor = tensors[tensor_id]\n input_tensors.append(input_tensor)\n time_cost = get_predicted_execution_time(operation_name, inputs_of_model, logged_time[operation_id])\n for output_tensor_id, output_tensor_size, shape in tensor_info:\n output_tensor_size = output_tensor_size / 1000000\n operator_execution_time.append(time_cost)\n if operation_name in optimizer_op:\n is_parameter = 1\n output_tensor = Tensor(tensor_id=output_tensor_id, job_id=job_id, size=output_tensor_size, source_tensors=input_tensors, recomputation_time=time_cost, is_parameter=is_parameter, shape=shape)\n output_access = TensorAccess(tensor=output_tensor, time=global_time + time_cost, run_time=time_cost, access_type=AccessType.output, operation_id=operation_id, operation_name=operation_name)\n tensor_access_list.append(output_access)\n tensors[output_tensor.tensor_id] = output_tensor\n if is_parameter:\n parameter.append(output_tensor)\n for tensor_id in input_tensor_id:\n input_tensor = tensors[tensor_id]\n input_access = TensorAccess(tensor=input_tensor, time=global_time, run_time=time_cost, access_type=AccessType.input, operation_id=operation_id, operation_name=operation_name)\n tensor_access_list.append(input_access)\n global_time += time_cost\n\n tensors = list(tensors.values())\n global_tensors[job_id] = tensors\n tensor_access_list = sorted(tensor_access_list, key=lambda x: x.time)\n dic = defaultdict(list)\n for access in tensor_access_list:\n dic[access.tensor].append(access)\n for k, v in dic.items():\n dic[k] = sorted(v, key=lambda x: x.time)\n tensor_access_by_tensor[job_id] = dic\n\n swap_scheduler = []\n # 对参数进行swap in调度\n # earliest_swap = None\n # earliest_time = float('inf')\n # 从最早的参数开始安排\n parameter = sorted(parameter, key=lambda x: dic[x][0].start_time)\n return tensor_access_list, swap_scheduler, parameter, operator_execution_time\n\n\n# 随机生成数据用的参数\ntimes = 150\ntensors = 50\ntime_scale = times\nratio = 1\n\n# 全局变量\njob_num = 0\nglobal_tensor_access = [[]]\ntensor_access_by_tensor = []\nweight = 1\njobs_weights = []\n# jobs_weight = [1, 1, 1, 1, 1]\ntotal_memory = 0\nenable_recomputation = True\nglobal_graphs = []\nglobal_tensors = {}\nswap_scheduler = []\nparameters = []\nmodels = {}\nglobal_memory_analyzer = []\n\n\n# load_all_model()\n\n\ndef init(logged_times: list, gpu: int):\n global job_num\n global global_tensor_access\n global tensor_access_by_tensor\n global total_memory\n global handle\n global jobs_weights\n global global_graphs\n global global_tensors\n global swap_scheduler\n global parameters\n global global_memory_analyzer\n global_tensor_access = [[]]\n tensor_access_by_tensor = []\n global_tensors = {}\n swap_scheduler = []\n parameters = []\n global_memory_analyzer = []\n graphs = global_graphs\n jobs_weights = [weight for _ in range(len(graphs))]\n tensor_access_by_tensor = [[] for _ in range(job_num)]\n # 获取当前剩余显存总量\n if not debug_mod:\n nvmlInit()\n handle = nvmlDeviceGetHandleByIndex(gpu)\n total_memory = nvmlDeviceGetMemoryInfo(handle).free / 1000000\n else:\n total_memory = 6000\n job_num = len(graphs)\n tmp = [get_framework_info(graphs[i], logged_times[i], i) for i in range(job_num)]\n global_tensor_access = [tmp[i][0] for i in range(job_num)]\n swap_scheduler = [tmp[i][1] for i in range(job_num)]\n parameters = [tmp[i][2] for i in range(job_num)]\n for i in range(job_num):\n global_memory_analyzer.append(MemoryAnalyzer(global_tensor_access[i], global_tensors[i]))\n\n\ndef add_job(graph, job_id, gpu: int):\n global global_graphs\n assert job_id == len(global_graphs) or global_graphs[job_id] is None\n if job_id == len(global_graphs):\n global_graphs.append(graph)\n else:\n global_graphs[job_id] = graph\n init([[] for _ in range(job_num)], gpu)\n\n\ndef remove_job(job_id, gpu: int):\n global global_graphs\n global_graphs[job_id] = None\n init([], gpu)\n\n\ndef generate_scheduling_plan(logged_times, gpu: int):\n # 如果是此时logged_times已经清空,则\n # logged_times: [[(operation_id, [time, time, time])]],外层索引为job_id\n global total_memory\n global global_tensors\n init(logged_times, gpu)\n # 指数加权平均更新估计时间\n tensor_nums = list(map(lambda x: len(x), tensor_access_by_tensor))\n swap_out_number_limits = [int(weight * tensor_num) for weight, tensor_num in zip(jobs_weights, tensor_nums)]\n swap_out_number = [0 for _ in tensor_nums]\n swapped_out_tensor = set()\n swapped_in_source_tensor = set()\n swap_out_dict = {}\n swapped_in_access = set()\n recomputations = []\n recomputation_tensor = set()\n # key:tensor,value:[所有释放这个张量的重计算对应的在recomputations中的index]\n # 上一轮没有成功的swap_out时为False\n swapped_flag = True\n recomputation_flag = True\n iter = 0\n original_memory_used = 0\n last_memory_used = 0\n job_id_ordered_by_weights = list(map(lambda x: x[0], sorted([(job_id, weights) for job_id, weights in enumerate(jobs_weights)], key=lambda x: x[1], reverse=True)))\n max_memory_footprint = []\n # draw_all_task(tensor_access_by_tensor, swap_scheduler, job_num)\n while swapped_flag or (recomputation_flag and enable_recomputation):\n # MB\n if not debug_mod:\n total_memory = nvmlDeviceGetMemoryInfo(handle).free / 1000000\n else:\n total_memory = 6000\n max_memory, max_tensors, last_input_accesses, max_time, time_axis = run_global_memory_analysis(swap_scheduler, swapped_out_tensor)\n max_memory_footprint.append(max_memory)\n # 最后三次迭代的峰值,做一阶差分,结果的最大值大于上一次峰值的0.05%以上或迭代次数小于100轮才继续~`\n if len(max_memory_footprint) > 3 and max([max_memory_footprint[i] - max_memory_footprint[i + 1] for i in range(len(max_memory_footprint) - 3, len(max_memory_footprint) - 1)]) < max_memory_footprint[\n -1] * 0.0005 and iter > 100:\n break\n if iter == 0:\n original_memory_used = max_memory\n liveness_analysis(global_tensor_access)\n else:\n last_memory_used = max_memory\n # print(f'iter:{iter}, max_memory:{max_memory}')\n max_tensors = sorted(max_tensors, key=lambda x: x.size, reverse=True)\n if swapped_flag:\n swapped_flag = False\n for tensor in max_tensors:\n # 对该张量进行swap_out计划的安排\n is_new_parameter = tensor.is_parameter and tensor_access_by_tensor[tensor.job_id][tensor][0].operation_name in optimizer_op and len(tensor_access_by_tensor[tensor.job_id][tensor]) == 1\n if not is_new_parameter:\n if swap_out_number[tensor.job_id] <= swap_out_number_limits[tensor.job_id] and len(tensor_access_by_tensor[tensor.job_id][tensor]) > 1:\n # swapped_out表示所有可能的swap_in已经调度过了\n if tensor not in swapped_out_tensor:\n all_access_of_tensor = tensor_access_by_tensor[tensor.job_id][tensor][1:]\n # 首先确定swap_out的时间范围,最迟不能超过此时此刻,最早不能超过第一次访问结束时刻\n output_access = tensor_access_by_tensor[tensor.job_id][tensor][0]\n assert output_access.access_type == AccessType.output\n if last_input_accesses[tensor.job_id] is not None:\n # 此时此刻\n back_boundary = last_input_accesses[tensor.job_id].time\n else:\n last_time_access = tensor_access_by_tensor[tensor.job_id][tensor][-1]\n back_boundary = last_time_access.time + tensor.swap_time\n succeed = False\n front_boundary = output_access.time\n # failed_input_access = []\n swap_out_succeed = True\n have_next_ITA = True\n # 如果是因为swap out放不下,则不用继续更新可行区间了,直接break\n while not succeed and front_boundary < back_boundary and swap_out_succeed and have_next_ITA:\n swap_out_task = SwapTask(tensor, output_access.time, tensor.swap_time, TaskType.swap_out, front_boundary=front_boundary, back_boundary=back_boundary)\n free_intervals = get_free_intervals(swap_out_task, swap_scheduler[swap_out_task.tensor.job_id], tensor_access_by_tensor[tensor.job_id][tensor])\n selected_first_access_index = None\n # 选出能容纳该任务的剩余空间\n swap_out_succeed = False\n have_next_ITA = False\n for interval in free_intervals:\n if interval[1] - interval[0] >= swap_out_task.time_cost:\n swap_out_succeed = True\n swap_out_task.start_time = interval[0]\n swap_out_task.end_time = swap_out_task.start_time + swap_out_task.time_cost\n swap_scheduler[swap_out_task.tensor.job_id].append(swap_out_task)\n # 看一下后面第一个swap_in能否放下\n for i, access in enumerate(all_access_of_tensor):\n # 找到后面第一个访问\n if access.start_time >= swap_out_task.end_time:\n have_next_ITA = True\n if can_next_input_access_swap_in(i, all_access_of_tensor, swap_out_task, swap_scheduler):\n swapped_out_tensor.add(tensor)\n swap_out_dict[tensor] = swap_out_task\n swapped_in_access.add(access)\n swap_out_number[tensor.job_id] += 1\n selected_first_access_index = i\n succeed = True\n swapped_flag = True\n else:\n # failed_input_access.append(access)\n swap_scheduler[swap_out_task.tensor.job_id].remove(swap_out_task)\n # 修正swap_out_task前向限制为这个失败的input_access的结束时间\n front_boundary = access.end_time\n assert tensor not in swapped_out_tensor\n # swapped_out_tensor.remove(tensor)\n break\n if not succeed:\n if swap_out_task in swap_scheduler[swap_out_task.tensor.job_id]:\n swap_scheduler[swap_out_task.tensor.job_id].remove(swap_out_task)\n # 如果不是因为swap out没安排下则重新生成区间\n break\n else:\n break\n # 安排失败\n if not succeed:\n continue\n if not is_new_parameter:\n # 后续的能换入的话,则把前一个的release_flag设为True\n for i in range(selected_first_access_index + 1, len(all_access_of_tensor)):\n access = all_access_of_tensor[i]\n if i == 0 or access in swapped_in_access:\n continue\n else:\n if can_next_input_access_swap_in(i, all_access_of_tensor, swap_out_task, swap_scheduler):\n # print(f'成功{access}')\n swapped_in_access.add(access)\n if all_access_of_tensor[i - 1].start_time > swap_out_task.end_time:\n all_access_of_tensor[i - 1].release_flag = True\n if swapped_flag:\n break\n # 如果是新参数,则尝试对新参数进行swap out,对对应的旧参数进行swap in\n else:\n if tensor not in swapped_out_tensor:\n output_access = tensor_access_by_tensor[tensor.job_id][tensor][0]\n assert output_access.access_type == AccessType.output\n swap_out_task = SwapTask(tensor, time=output_access.time, time_cost=tensor.swap_time, task_type=TaskType.swap_out, front_boundary=output_access.end_time, back_boundary=float('inf'))\n free_intervals = get_free_intervals(swap_out_task, swap_scheduler[swap_out_task.tensor.job_id], tensor_access_by_tensor[tensor.job_id][tensor])\n for interval in free_intervals:\n if interval[1] - interval[0] >= swap_out_task.time_cost:\n swap_out_task.start_time = interval[0]\n swap_out_task.end_time = swap_out_task.start_time + swap_out_task.time_cost\n swap_scheduler[swap_out_task.tensor.job_id].append(swap_out_task)\n # 找到对应的旧参数张量\n # 由于二者可行域无关,所以直接查看对应的swap in 能否调度\n for t in tensor.source_tensors:\n if t.is_parameter and t not in swapped_in_source_tensor:\n # 试图swap in\n # 找到第一次input访问(feed_dict不实际使用)\n first_access = tensor_access_by_tensor[t.job_id][t][1]\n assert first_access.access_type == AccessType.input\n swap_in_task = SwapTask(t, first_access.time, first_access.tensor.swap_time, TaskType.swap_in, front_boundary=0, back_boundary=first_access.start_time)\n res = try_swap_in(swap_in_task, swap_scheduler, tensor_access_by_tensor[t.job_id][t])\n # assert not res, f'swap in parameter:{t} failed'\n if res:\n swapped_in_source_tensor.add(t)\n swapped_out_tensor.add(tensor)\n swap_out_dict[tensor] = swap_out_task\n swapped_in_access.add(first_access)\n swap_out_number[tensor.job_id] += 1\n swapped_flag = True\n else:\n swap_scheduler[swap_out_task.tensor.job_id].remove(swap_out_task)\n assert tensor not in swapped_out_tensor\n break\n break\n elif enable_recomputation:\n recomputation_flag = False\n # 需要重计算\n if max_memory >= total_memory:\n for job_id in job_id_ordered_by_weights:\n max_tensors_filtered = []\n for tensor in max_tensors:\n # 张量不是参数,没被逐出过,且他的所有源张量从未被swap或recomputation\n if not tensor.is_parameter and tensor not in swapped_out_tensor and tensor.source_tensors is not None and len(tensor.source_tensors) > 0 and \\\n False not in [t not in swapped_out_tensor for t in tensor.source_tensors] and False not in [t not in recomputations for t in tensor.source_tensors]:\n max_tensors_filtered.append(tensor)\n if len(max_tensors_filtered) == 0:\n continue\n max_tensors_by_metric = sorted(max_tensors_filtered, key=lambda x: x.recomputation_metric, reverse=True)\n # 选取metric最大的张量\n tensor = max_tensors_by_metric[0]\n # 找到此刻对应的下一个访问\n now_time = max_time[job_id]\n all_access_of_tensor = tensor_access_by_tensor[tensor.job_id][tensor]\n for i, access in enumerate(all_access_of_tensor):\n if access.access_type == AccessType.input and access not in recomputations:\n if access.start_time >= now_time:\n for source_tensor in access.tensor.source_tensors:\n accesses = tensor_access_by_tensor[source_tensor.job_id][source_tensor]\n for temp_acc in accesses:\n #  确保source被release过的不进行重计算\n if temp_acc.release_flag and temp_acc.end_time <= access.start_time:\n break\n else:\n recomputations.append(access)\n all_access_of_tensor[i - 1].release_flag = True\n recomputation_flag = True\n recomputation_tensor.add(access.tensor)\n break\n break\n iter += 1\n # fig = go.Figure(data=[go.Scatter(x=list(original_memory_footprint[0].keys()), y=list(original_memory_footprint[0].values())), go.Scatter(x=list(foot_prints[0].keys()), y=list(foot_prints[0].values()))])\n # plotly.offline.plot(fig, filename='../../pic/footprint.html')\n # if not debug_mod:\n # total_memory = nvmlDeviceGetMemoryInfo(handle).free / 1000000\n # else:\n # total_memory = 6000\n # stats = 'succeed' if max_memory < total_memory else ' failure'\n # print(f'scheduling {stats}')\n # draw_all_task(tensor_access_by_tensor, swap_scheduler, job_num)\n memory_saved_ratio = format((1 - last_memory_used / original_memory_used) * 100, '.2f')\n print(f'memory_saved_ratio:{memory_saved_ratio}%')\n print(f'swap ratio:{len(swap_scheduler[0]) / len(global_tensors)}')\n # print(f'recomputations:{recomputations}')\n return generate_swap_recomputation_release_order(tensor_access_by_tensor, swap_scheduler, recomputations, job_num)\n\n\ndef multiprocess_init(global_message_queue: multiprocessing.Queue, global_control_queue: multiprocessing.Queue, total_job_number):\n # swap_order = [(20, 0, 20, 0)]\n # control_messages = []\n # control_message = [swap_order, [], []]\n # control_messages.append(control_message)\n # global_control_queue.put(control_messages)\n logged_times = []\n log_repeat = 0\n alpha = 0.9\n second_schedule_finished = False\n\n # todo 设置从executor到algorithm的job_id的映射\n map_out_to_in = {}\n map_in_to_out = {}\n global job_num\n job_num = 0\n\n while True:\n if not global_message_queue.empty():\n global_message = global_message_queue.get()\n job_id = global_message[0]\n message_type = global_message[1][0]\n message_graph = global_message[1][1]\n\n if message_type == 0:\n\n # print(\"job_id =\", job_id)\n\n job_num += 1\n map_out_to_in[job_id] = job_num - 1\n map_in_to_out[job_num - 1] = job_id\n job_id_in = job_num - 1\n\n logged_times.append([])\n global_graphs.append(message_graph)\n tensor_num = len(message_graph)\n\n # with open(\"../../global_graphs\", \"wb\") as f1:\n # pickle.dump(global_graphs, f1)\n\n for i in range(tensor_num):\n # print(message_graph[i][6])\n logged_times[job_id_in].append([message_graph[i][6]])\n\n s = time.time()\n if job_num == total_job_number:\n release_order, swap_order, recomputation_order = generate_scheduling_plan(logged_times, 0)\n print(f'time:{time.time() - s}')\n control_messages = {}\n for i in range(job_num):\n # print(swap_order)\n control_message = [swap_order[i], release_order[i], recomputation_order[i]]\n control_messages[map_in_to_out[i]] = control_message\n global_control_queue.put(control_messages)\n else:\n\n job_id_in = map_out_to_in[job_id]\n\n total_time_old = 0\n for run_time in logged_times[job_id_in]:\n total_time_old += run_time[0]\n total_time_new = 0\n for run_time in message_graph:\n total_time_new += run_time[1]\n change_rate = abs(total_time_new - total_time_old) / total_time_old\n print(\"change rate is \", change_rate)\n # print(\"total time new is\", total_time_new)\n # print(\"total time old is\", total_time_old)\n\n if change_rate > 0.3:\n is_replan = True\n else:\n is_replan = False\n\n # with open(\"./log/total_time.txt\", \"a\") as f1:\n # print(total_time_new, file=f1)\n\n # todo 此处控制了在一定轮数之后才进行决策\n log_repeat += 1\n if log_repeat > 0 and (is_replan or (not second_schedule_finished)):\n\n second_schedule_finished = True\n # with open(\"../../logged_times\", \"wb\") as f1:\n # pickle.dump(logged_times, f1)\n\n for node_message in message_graph:\n time_new = node_message[1] * alpha + logged_times[job_id_in][node_message[0]][0] * (1 - alpha)\n logged_times[job_id_in][node_message[0]] = [time_new]\n\n release_order, swap_order, recomputation_order = generate_scheduling_plan(logged_times, 0)\n\n print(logged_times)\n\n control_messages = {}\n\n for i in range(job_num):\n print(swap_order)\n control_message = [swap_order[i], release_order[i], recomputation_order[i]]\n control_messages[map_in_to_out[i]] = control_message\n global_control_queue.put(control_messages)\n # print(logged_times[0])\n\n\nif debug_mod and __name__ == '__main__':\n import pickle\n\n with open('../../global_graphs', 'rb') as f:\n g = pickle.load(f)\n global_graphs = g\n with open('../../logged_times', 'rb') as f:\n logged_times = pickle.load(f)\n job_num = 1\n # profiler = LineProfiler()\n # profiler.add_function(get_free_intervals)\n # # profiler.add_function(get_occupied_intervals)\n # # profiler.add_function(MemoryAnalyzer.get_max_memory_used)\n # # profiler.add_function(run_global_memory_analysis)\n # profiler_wrapper = profiler(generate_scheduling_plan)\n # res = profiler_wrapper(logged_times, 0)\n # profiler.print_stats()\n release_order, swap_order, recomputation_order = generate_scheduling_plan(logged_times, 0)\n" ]
[ [ "numpy.arange" ] ]
gstoica27/tacred-exploration
[ "49ab2ebf54d38173ce08db372c0f940c34e1954f" ]
[ "ensemble.py" ]
[ "\"\"\"\nEnsemble the predictions from different model outputs.\n\"\"\"\nimport argparse\nimport json\nimport pickle\nimport numpy as np\nfrom collections import Counter\n\nfrom data.loader import DataLoader\nfrom utils import scorer, constant\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('pred_files', nargs='+', help='A list of prediction files written by eval.py.')\n parser.add_argument('--data_dir', default='dataset/tacred')\n parser.add_argument('--dataset', default='test', help='Evaluate on dev or test set.')\n parser.add_argument('--weights', default='')\n args = parser.parse_args()\n return args\n\ndef main():\n args = parse_args()\n print(\"Loading data file...\")\n filename = args.data_dir + '/{}.json'.format(args.dataset)\n with open(filename, 'r') as infile:\n data = json.load(infile, encoding='utf8')\n labels = [d['relation'] for d in data]\n\n # read predictions\n print(\"Loading {} prediction files...\".format(len(args.pred_files)))\n scores_list = []\n for path in args.pred_files:\n with open(path, 'rb') as infile:\n scores = pickle.load(infile)\n scores_list += [scores]\n \n print(\"Calculating ensembled predictions...\")\n predictions = []\n scores_by_examples = list(zip(*scores_list))\n assert len(scores_by_examples) == len(data)\n for scores in scores_by_examples:\n if len(args.weights) == 0:\n pred = ensemble(scores)\n else:\n pred = weight_sum(scores, args.weights)\n predictions += [pred]\n id2label = dict([(v,k) for k,v in constant.LABEL_TO_ID.items()])\n predictions = [id2label[p] for p in predictions]\n scorer.score(labels, predictions, verbose=True)\n\ndef ensemble(scores):\n \"\"\"\n Ensemble by majority vote.\n \"\"\"\n c = Counter()\n for probs in zip(scores):\n idx = int(np.argmax(np.array(probs)))\n c.update([idx])\n best = c.most_common(1)[0][0]\n return best\n\ndef weight_sum(scores, weights):\n weights = list(map(lambda x: float(x), weights.split(' ')))\n aggregate_scores = np.zeros(len(scores[0]))\n for model_scores, weight in zip(scores, weights):\n scores_weights = np.array(model_scores) * weight\n aggregate_scores += scores_weights\n best = int(np.argmax(aggregate_scores))\n return best\n\nif __name__ == '__main__':\n main()\n\n" ]
[ [ "numpy.array", "numpy.argmax" ] ]
GlobalFishingWatch/pipe-segment
[ "4992719d9244901baed7c5db88f434cb87ebb179" ]
[ "pipe_segment/stats/stats.py" ]
[ "import pandas as pd\nimport numpy as np\nimport itertools as it\nfrom collections import defaultdict\nfrom collections import Counter\nfrom six.moves import map as imap\n\n\ndef dict_subset(d, fields):\n # return a subset of the provided dict containing only the\n # fields specified in fields\n return {k: d[k] for k in d if k in fields and d[k] is not None}\n\n\nclass MessageFieldCounter:\n \"\"\"\n Count occurrences of values in a stream of messages for a specified set of fields\n Usage:\n\n messages = [\n {'a': 'apple', 'b': 'boat'},\n {'a': 'pear', 'b': 'boat'},\n {'a': 'apple', 'b': 'boat'},\n ]\n fields = ['a', 'b']\n mfc = MessageFieldCounter(messages, fields)\n # this class is designed to pass through a long stream of messages\n # so we have to pull them through in order to count them\n for msg in mcf:\n pass\n print mfc.most_common('a')\n >>> [('apple', 2)]\n \"\"\"\n\n def __init__(self, messages, fields):\n self.fields = set(fields)\n self.messages = messages\n self.counters = defaultdict(Counter)\n\n def __iter__(self):\n return self.process()\n\n def process(self):\n for msg in self.messages:\n for key in self.fields:\n value = msg.get(key, None)\n if value is not None:\n self.counters[key][value] += 1\n\n yield msg\n\n def most_common(self, field, n=1):\n return self.counters[field].most_common(n)\n\n\nclass MessageStats():\n \"\"\"\n Extract a set of stats from as stream of messages.\n\n numeric_fields: list of field names to compute numeric stats (eg min, max, avg)\n frequency_fields: list of field names to compute frequency of values\n \"\"\"\n NUMERIC_STATS = ['min', 'max', 'first', 'last', 'count']\n FREQUENCY_STATS = ['most_common', 'most_common_count']\n\n def __init__(self, messages, numeric_fields, frequency_fields):\n self._numeric_fields = numeric_fields\n self._frequency_fields = frequency_fields\n\n self.counter = MessageFieldCounter(messages, frequency_fields)\n messages = self.counter.process()\n messages = imap(dict_subset, messages, it.repeat(numeric_fields))\n # DataFrame won't take an iterator, but it will take a generator\n messages = (m for m in messages)\n self.df = pd.DataFrame(messages)\n\n @property\n def numeric_fields(self):\n return self._numeric_fields\n\n @property\n def frequency_fields(self):\n return self._frequency_fields\n\n @property\n def frequency_counter(self):\n return self.counter\n\n @property\n def data_frame(self):\n return self.df\n\n def numeric_stats(self, field):\n def first(col):\n idx = col.first_valid_index()\n return col[idx] if idx is not None else None\n\n def last(col):\n idx = col.last_valid_index()\n return col[idx] if idx is not None else None\n\n assert field in self.numeric_fields\n if field in self.df:\n col = self.df[field]\n return dict(\n min=np.nanmin(col),\n max=np.nanmax(col),\n first=first(col),\n last=last(col),\n count=np.count_nonzero(~np.isnan(col)),\n )\n else:\n return {}\n\n def frequency_stats(self, field):\n assert field in self.frequency_fields\n stat = self.frequency_counter.most_common(field)\n if stat:\n value, count = stat[0]\n return dict(\n most_common=value,\n most_common_count=count\n )\n else:\n return {}\n\n def field_stats(self, field):\n stats = {}\n if field in self.numeric_fields:\n stats.update(self.numeric_stats(field))\n if field in self.frequency_fields:\n stats.update(self.frequency_stats(field))\n return stats" ]
[ [ "pandas.DataFrame", "numpy.nanmax", "numpy.nanmin", "numpy.isnan" ] ]
Victorwz/Generative-Hippocampal-entorhinal-System
[ "5f38b0fea364c1974ebaf25f16576777a35295e3" ]
[ "generate.py" ]
[ "import os\nimport sys\nimport numpy as np\nimport torch\nimport pickle \n\nimport logging\nlog = logging.getLogger(__name__)\nlogging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO\n)\n\n\nclass Graph4D():\n\n def __init__(self, num_envs=4096, env_size=(4,4), steps=128, save=False, data_root='./data/', num_categories=None, verbose=False):\n self.num_envs = num_envs\n self.steps = steps\n self.env_size = env_size\n self.save = False\n self.data_root = data_root\n self.num_categories = num_categories\n self.generate_data(verbose=verbose)\n log.info('''Generated Data: \n \\t\\t\\t {} Environments...\n \\t\\t\\t {} env size...\n \\t\\t\\t {} steps in each... \n \\t\\t\\t {} observable one hot categories... '''.format(\n num_envs, env_size, steps, self.num_categories\n ))\n\n\n def square_env(self):\n \"\"\"\n Generate map where each vertex has a one hot categorical distribution\n Returns:\n (N,N,num_categories) matrix with one-hot categorical observations\n \"\"\"\n env_size = self.env_size\n env = np.zeros((env_size[0], env_size[1], self.num_categories))\n for i in range(env_size[0]):\n # Randomly assign categories to each vertex in a row\n category = np.random.randint(0, self.num_categories, env_size[1])\n # One hot encode them\n env[i, np.arange(category.size), category] = 1\n \n return env\n\n \n def update_location_4way(self, env, loc):\n \"\"\"\n Samples a valid four-way action and updates location \n \"\"\"\n length = env.shape[0]\n valid = False\n # print(loc, end=' --> ')\n while not valid:\n # Sample action\n action = np.random.randint(0, 4)\n # Move up\n if action == 0:\n if loc[0] - 1 >= 0:\n # print('Moving up', end=' --> ')\n loc[0] -= 1\n valid = True\n # Move right\n elif action == 1:\n if loc[1] + 1 < length:\n # print('Moving Right', end=' --> ')\n loc[1] += 1\n valid = True\n # Move down\n elif action == 2:\n if loc[0] + 1 < length:\n # print('Moving Down', end=' --> ')\n loc[0] += 1\n valid = True\n # Move left\n elif action == 3:\n if loc[1] - 1 >= 0:\n # print('Moving Left', end=' --> ')\n loc[1] -= 1\n valid = True\n \n # One hot encode action\n act = np.zeros(4)\n act[action] = 1\n return act, loc\n\n\n def trajectory_4way(self, env):\n \"\"\"\n Generate trajectory of agent diffusing through 4-way connected graph\n At each point we sample the one-hot observation and take an action\n 0 = up\n 1 = right\n 2 = down\n 3 = left \n\n Params:\n steps (int): Number of steps to take\n env (3d np array): environment in which to wander (NxNx(num_categories))\n Returns \n Observations (steps, num_categories), Actions (steps, 4) \n \"\"\" \n observations = np.zeros((self.steps, self.num_categories))\n actions = np.zeros((self.steps, 4))\n positions = np.zeros((self.steps, 2))\n\n loc = np.random.randint(0, env.shape[0], 2) # Initial Location\n\n for step in range(self.steps):\n positions[step] = loc\n obs = env[loc[0], loc[1]] # Observe scene\n action, loc = self.update_location_4way(env, loc) # Sample action and new location\n observations[step] = obs \n actions[step] = action\n\n return observations, actions, positions \n \n def generate_data(self, verbose=False):\n \"\"\"\n Generates N square environments and trajectories ((observation, action) pairs)\n for each environment\n\n Params:\n envs (int): number of environments to generate\n steps (int): how many steps an agent initially takes in each environment\n env_size (tuple): size of environment (should be something like (4,4), (9,9), etc...)\n save (bool): whether or not to save the dataset\n \n Returns:\n Dict of \"environments, observations, actions\", each corresponding to: \n environments: Array shape: (num_envs, env_size_x, env_size_y, num_categories), \n observations: Array shape: (num_envs, steps, num_categories),\n actions: Array shape: (num_envs, steps, 4)\n \"\"\"\n env_size = self.env_size\n if self.num_categories == None:\n self.num_categories = env_size[0] * env_size[1]\n \n self.environments = np.zeros((self.num_envs, env_size[0], env_size[1], self.num_categories))\n self.observations = np.zeros((self.num_envs, self.steps, self.num_categories)) \n self.actions = np.zeros((self.num_envs, self.steps, 4)) \n self.positions = np.zeros((self.num_envs, self.steps, 2))\n \n for i in range(self.num_envs):\n env = self.square_env() # Generate new environment\n obs, acts, pos = self.trajectory_4way(env) # Generate random walk for that environment \n\n self.environments[i] = env\n self.observations[i] = obs\n self.actions[i] = acts\n self.positions[i] = pos\n\n self.data = {'environments': self.environments, 'observations': self.observations, 'actions': self.actions, 'positions': self.positions}\n if self.save:\n name = os.path.join(self.data_root, 'four_way_graph.pickle')\n with open(name, 'wb') as handle:\n pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)\n \n \nif __name__=='__main__':\n print('Generating 20 (8,8) environments with 256 random steps in each.')\n graph = Graph4D(num_envs=20, env_size=(8,8), steps=256)\n data = graph.data\n envs = graph.environments\n observations = graph.observations\n actions = graph.actions\n positions = graph.positions\n print('Envs,', envs.shape)\n print('Obs', observations.shape)\n print('Acts', actions.shape)\n print('Pos', positions.shape)" ]
[ [ "numpy.arange", "numpy.random.randint", "numpy.zeros" ] ]
runauto/nni
[ "30152b04c4739f5b4f95087dee5f1e66ee893078" ]
[ "src/sdk/pynni/nni/curvefitting_assessor/curvefunctions.py" ]
[ "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\n\"\"\"\nA family of functions used by CurvefittingAssessor\n\"\"\"\n\nimport numpy as np\n\nall_models = {}\nmodel_para = {}\nmodel_para_num = {}\n\ncurve_combination_models = ['vap', 'pow3', 'linear', 'logx_linear', 'dr_hill_zero_background', 'log_power', 'pow4', 'mmf',\n 'exp4', 'ilog2', 'weibull', 'janoschek']\n\ndef vap(x, a, b, c):\n \"\"\"Vapor pressure model\n\n Parameters\n ----------\n x : int\n a : float\n b : float\n c : float\n\n Returns\n -------\n float\n np.exp(a+b/x+c*np.log(x))\n \"\"\"\n return np.exp(a+b/x+c*np.log(x))\n\nall_models['vap'] = vap\nmodel_para['vap'] = [-0.622028, -0.470050, 0.042322]\nmodel_para_num['vap'] = 3\n\ndef pow3(x, c, a, alpha):\n \"\"\"pow3\n\n Parameters\n ----------\n x : int\n c : float\n a : float\n alpha : float\n\n Returns\n -------\n float\n c - a * x**(-alpha)\n \"\"\"\n return c - a * x**(-alpha)\n\nall_models['pow3'] = pow3\nmodel_para['pow3'] = [0.84, 0.52, 0.01]\nmodel_para_num['pow3'] = 3\n\ndef linear(x, a, b):\n \"\"\"linear\n\n Parameters\n ----------\n x : int\n a : float\n b : float\n\n Returns\n -------\n float\n a*x + b\n \"\"\"\n return a*x + b\n\nall_models['linear'] = linear\nmodel_para['linear'] = [1., 0]\nmodel_para_num['linear'] = 2\n\ndef logx_linear(x, a, b):\n \"\"\"logx linear\n\n Parameters\n ----------\n x : int\n a : float\n b : float\n\n Returns\n -------\n float\n a * np.log(x) + b\n \"\"\"\n x = np.log(x)\n return a*x + b\n\nall_models['logx_linear'] = logx_linear\nmodel_para['logx_linear'] = [0.378106, 0.046506]\nmodel_para_num['logx_linear'] = 2\n\ndef dr_hill_zero_background(x, theta, eta, kappa):\n \"\"\"dr hill zero background\n\n Parameters\n ----------\n x : int\n theta : float\n eta : float\n kappa : float\n\n Returns\n -------\n float\n (theta* x**eta) / (kappa**eta + x**eta)\n \"\"\"\n return (theta* x**eta) / (kappa**eta + x**eta)\n\nall_models['dr_hill_zero_background'] = dr_hill_zero_background\nmodel_para['dr_hill_zero_background'] = [0.772320, 0.586449, 2.460843]\nmodel_para_num['dr_hill_zero_background'] = 3\n\ndef log_power(x, a, b, c):\n \"\"\"\"logistic power\n\n Parameters\n ----------\n x : int\n a : float\n b : float\n c : float\n\n Returns\n -------\n float\n a/(1.+(x/np.exp(b))**c)\n \"\"\"\n return a/(1.+(x/np.exp(b))**c)\n\nall_models['log_power'] = log_power\nmodel_para['log_power'] = [0.77, 2.98, -0.51]\nmodel_para_num['log_power'] = 3\n\ndef pow4(x, alpha, a, b, c):\n \"\"\"pow4\n\n Parameters\n ----------\n x : int\n alpha : float\n a : float\n b : float\n c : float\n\n Returns\n -------\n float\n c - (a*x+b)**-alpha\n \"\"\"\n return c - (a*x+b)**-alpha\n\nall_models['pow4'] = pow4\nmodel_para['pow4'] = [0.1, 200, 0., 0.8]\nmodel_para_num['pow4'] = 4\n\ndef mmf(x, alpha, beta, kappa, delta):\n \"\"\"Morgan-Mercer-Flodin\n http://www.pisces-conservation.com/growthhelp/index.html?morgan_mercer_floden.htm\n\n Parameters\n ----------\n x : int\n alpha : float\n beta : float\n kappa : float\n delta : float\n\n Returns\n -------\n float\n alpha - (alpha - beta) / (1. + (kappa * x)**delta)\n \"\"\"\n return alpha - (alpha - beta) / (1. + (kappa * x)**delta)\n\nall_models['mmf'] = mmf\nmodel_para['mmf'] = [0.7, 0.1, 0.01, 5]\nmodel_para_num['mmf'] = 4\n\ndef exp4(x, c, a, b, alpha):\n \"\"\"exp4\n\n Parameters\n ----------\n x : int\n c : float\n a : float\n b : float\n alpha : float\n\n Returns\n -------\n float\n c - np.exp(-a*(x**alpha)+b)\n \"\"\"\n return c - np.exp(-a*(x**alpha)+b)\n\nall_models['exp4'] = exp4\nmodel_para['exp4'] = [0.7, 0.8, -0.8, 0.3]\nmodel_para_num['exp4'] = 4\n\ndef ilog2(x, c, a):\n \"\"\"ilog2\n\n Parameters\n ----------\n x : int\n c : float\n a : float\n\n Returns\n -------\n float\n c - a / np.log(x)\n \"\"\"\n return c - a / np.log(x)\n\nall_models['ilog2'] = ilog2\nmodel_para['ilog2'] = [0.78, 0.43]\nmodel_para_num['ilog2'] = 2\n\ndef weibull(x, alpha, beta, kappa, delta):\n \"\"\"Weibull model\n http://www.pisces-conservation.com/growthhelp/index.html?morgan_mercer_floden.htm\n\n Parameters\n ----------\n x : int\n alpha : float\n beta : float\n kappa : float\n delta : float\n\n Returns\n -------\n float\n alpha - (alpha - beta) * np.exp(-(kappa * x)**delta)\n \"\"\"\n return alpha - (alpha - beta) * np.exp(-(kappa * x)**delta)\n\nall_models['weibull'] = weibull\nmodel_para['weibull'] = [0.7, 0.1, 0.01, 1]\nmodel_para_num['weibull'] = 4\n\ndef janoschek(x, a, beta, k, delta):\n \"\"\"http://www.pisces-conservation.com/growthhelp/janoschek.htm\n\n Parameters\n ----------\n x : int\n a : float\n beta : float\n k : float\n delta : float\n\n Returns\n -------\n float\n a - (a - beta) * np.exp(-k*x**delta)\n \"\"\"\n return a - (a - beta) * np.exp(-k*x**delta)\n\nall_models['janoschek'] = janoschek\nmodel_para['janoschek'] = [0.73, 0.07, 0.355, 0.46]\nmodel_para_num['janoschek'] = 4\n" ]
[ [ "numpy.exp", "numpy.log" ] ]
jkcso/oss2021
[ "f5c2dc76670027ec4d508b46a81345ce711ae5e8" ]
[ "notebooks/api/oss_hugo/OSS_Schedule.py" ]
[ "import pandas as pd\nfrom oss_hugo.API_Hugo_OSS import API_Hugo_OSS\n\nclass OSS_Schedule:\n def __init__(self):\n self.hugo = API_Hugo_OSS()\n\n def sessions_mapped_by_size(self):\n\n mapping = []\n for path, session in self.hugo.sessions().items():\n content = session.get('content')\n metadata = session.get('metadata')\n page_type = metadata.get('type')\n title = metadata.get('title')\n track = metadata.get('track')\n organizers = metadata.get('organizers')\n participants = metadata.get('participants')\n if not organizers: organizers = []\n if not participants: participants = []\n if type(organizers) is str: organizers = organizers.split(',')\n if type(participants) is str: participants = participants.split(',')\n if 'TBD' in organizers: organizers.remove('TBD')\n if 'Pending' in organizers: organizers.remove('Pending')\n if 'you ?' in participants: participants.remove('you ?')\n\n if title and page_type:\n item = {\n 'title': title,\n 'track': track,\n 'page_type': page_type,\n 'organizers': organizers,\n 'participants': participants,\n 'content': len(content),\n 'path': path\n }\n mapping.append(item)\n\n df_mappings = pd.DataFrame(mapping)\n df_mappings = df_mappings[['title', 'track', 'page_type', 'content', 'organizers', 'participants']]\n df_sessions = df_mappings[df_mappings['page_type'] != 'track']\n df_sessions = df_sessions.sort_values(['content'], ascending=False).reset_index(drop=True)\n return df_sessions\n\n #todo get the result below using pandas\n def df_sessions_registered_participants(self):\n results = {}\n for key, value in self.hugo.df_participants().to_dict(orient='index').items():\n title = value.get('title')\n sessions = value.get('sessions')\n for session in sessions:\n if results.get(session) is None: results[session] = []\n results[session].append(title)\n mappings = []\n for key, value in results.items():\n mappings.append({'title': key, 'participants': value, 'participants_count': len(value)})\n df_mappings = pd.DataFrame(mappings)\n df_mappings = df_mappings[['title', 'participants_count', 'participants']].sort_values(['participants_count'], ascending=False)\n return df_mappings" ]
[ [ "pandas.DataFrame" ] ]
ZeyuSun/flare-prediction-smarp
[ "ad60163eb83b47ba39e898beb387031d349e2ed6", "ad60163eb83b47ba39e898beb387031d349e2ed6" ]
[ "arnet/modeling/learner.py", "arnet/fusion.py" ]
[ "import os\nimport logging\nfrom typing import Dict, Union\nfrom datetime import timedelta\n\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport mlflow\nimport torch\nimport pytorch_lightning as pl\nimport pprint\npp = pprint.PrettyPrinter(indent=4)\n\nfrom arnet import utils\nfrom arnet.modeling.models import build_model\n\nlogger = logging.getLogger(__name__)\n\n\ndef build_test_logger(logged_learner):\n logger = pl.loggers.TensorBoardLogger(\n logged_learner.logger_save_dir,\n name=logged_learner.logger_name,\n version=logged_learner.logger_version + '_test'\n )\n return logger\n\n\nclass Learner(pl.LightningModule):\n def __init__(self, cfg):\n \"\"\"\n model: torch.nn.Module\n cfg: model-agnostic experiment configs\n \"\"\"\n #super(Learner, self).__init__()\n super().__init__()\n self.cfg = cfg\n self.image = 'MAGNETOGRAM' in cfg.DATA.FEATURES\n self.model = build_model(cfg)\n self.save_hyperparameters() # write to self.hparams. when save model, they are # responsible for tensorboard hp_metric\n\n def forward(self, *args, **kwargs):\n return self.model(*args, **kwargs)\n\n def on_load_checkpoint(self, checkpoint) -> None:\n # log_dev / lightning_logs / version_0 / checkpoints / epoch=0-step=4.ckpt\n # =======================================\n # save_dir / (name) (version)\n # ------- root_dir ---------/\n # ------------ log_dir ----------------/\n # ckpt_list = checkpoint['hyper_parameters']['cfg']['LEARNER']['CHECKPOINT'].split('/')\n # self.logger_save_dir, self.logger_name, self.logger_version = (\n # ckpt_list[-5], ckpt_list[-4], ckpt_list[-3])\n # I gave up modifying test log dir because it requires checkpoint['callbacks'][\"ModelCheckpoint{'monitor': 'validation0/tss', 'mode': 'max', 'every_n_train_steps': 0, 'every_n_epochs': 1, 'train_time_interval': None, 'save_on_train_epoch_end': True}\"]['best_model_path']\n pass\n\n def grad_norm(self, norm_type: Union[float, int, str]) -> Dict[str, float]:\n \"\"\"Compute each parameter's gradient's norm and their overall norm.\n\n The overall norm is computed over all gradients together, as if they\n were concatenated into a single vector.\n\n Args:\n norm_type: The type of the used p-norm, cast to float if necessary.\n Can be ``'inf'`` for infinity norm.\n\n Return:\n norms: The dictionary of p-norms of each parameter's gradient and\n a special entry for the total p-norm of the gradients viewed\n as a single vector.\n \"\"\"\n #norm_type = float(norm_type)\n\n norms, all_norms = {}, []\n for name, p in self.named_parameters():\n if name.split('.')[0] == 'model':\n name = name[6:]\n\n if p.grad is None:\n continue\n\n param_norm = float(p.data.norm(norm_type))\n grad_norm = float(p.grad.data.norm(norm_type))\n norms[f'grad_{norm_type}_norm/{name}'] = {\n 'param': param_norm,\n 'grad': grad_norm,\n }\n\n all_norms.append(param_norm)\n\n total_norm = float(torch.tensor(all_norms).norm(norm_type))\n norms[f'grad_{norm_type}_norm/total'] = round(total_norm, 3)\n\n return norms\n\n def _check_nan_loss(self, loss):\n if torch.isnan(loss):\n norms = self.grad_norm(1)\n import json\n print(json.dumps(norms, indent=2))\n\n def training_step(self, batch, batch_idx):\n loss = self.model.get_loss(batch)\n self._check_nan_loss(loss)\n\n # Scalar(s)\n self.log('train/loss', loss)\n mlflow.log_metric('train/loss', loss.item(), step=self.global_step)\n mlflow.log_metric('train/epoch', self.trainer.current_epoch, step=self.global_step)\n\n if self.image:\n # Text\n if self.global_step in [0] or batch_idx == 0:\n self.log_meta(self.model.result)\n\n # Input videos (padded)\n if False: #self.global_step in [0] or batch_idx == 0:\n self.log_video('train/inputs', x)\n\n # Layer weight\n # not changing fast enough within first epoch\n if False: #self.current_epoch == 0 and batch_idx in [0, 1, 2, 5, 10, 20, 50, 100]:\n self.log_layer_weights('weight', ['convs.conv1'])\n\n # Middle layer features\n if False: #self.global_step in [0] or batch_idx == 0:\n self.log_layer_activations('train features', self.model.result['video'], self.cfg.LEARNER.VIS.ACTIVATIONS)\n\n # Weight histograms\n if True: #self.global_step in [0] or batch_idx == 0:\n for layer_name in self.cfg.LEARNER.VIS.HISTOGRAM:\n self.logger.experiment.add_histogram(\"weights/{} kernel\".format(layer_name),\n utils.get_layer(self.model, layer_name).weight, self.global_step)\n\n self.logger.experiment.flush()\n return {'loss': loss}\n\n def validation_step(self, batch, batch_idx, dataloader_idx):\n loss = self.model.get_loss(batch)\n\n result = self.model.result\n result.update({'val_loss': loss})\n return result\n\n def validation_epoch_end(self, outputs):\n for dataloader_idx, dataloader_outputs in enumerate(outputs):\n tag = f'validation{dataloader_idx}'\n avg_val_loss = torch.stack([out['val_loss'] for out in dataloader_outputs]).mean()\n self.log(tag + '/loss', avg_val_loss)\n mlflow.log_metric(tag + '/loss', avg_val_loss.item(), step=self.global_step)\n\n if True:\n #step = -1 if self.global_step == 0 else None # before training\n step = None # use global_step\n self.log_layer_weights('weight', ['convs.conv1'], step=step)\n\n y_true = torch.cat([out['y_true'] for out in dataloader_outputs])\n y_prob = torch.cat([out['y_prob'] for out in dataloader_outputs])\n self.trainer.datamodule.fill_prob(tag, self.global_step, y_prob.detach().cpu().numpy())\n scores, cm2, _ = utils.get_metrics_probabilistic(y_true, y_prob, criterion=None)\n self.log_scores(tag, scores, step=self.global_step) # pp.pprint(scores)\n self.log_cm(tag + '/cm2', cm2, step=self.global_step)\n self.log_eval_plots(tag, y_true, y_prob, step=self.global_step)\n mlflow.log_artifacts(self.logger.log_dir, 'tensorboard/train_val')\n\n def test_step(self, batch, batch_idx):\n loss = self.model.get_loss(batch)\n result = self.model.result\n result.update({'test_loss': loss})\n return result\n\n def test_epoch_end(self, outputs):\n avg_test_loss = torch.stack([out['test_loss'] for out in outputs]).mean()\n self.log('test/loss', avg_test_loss)\n y_true = torch.cat([out['y_true'] for out in outputs])\n y_prob = torch.cat([out['y_prob'] for out in outputs])\n self.trainer.datamodule.fill_prob('test', self.global_step, y_prob.detach().cpu().numpy())\n scores, cm2, thresh = utils.get_metrics_probabilistic(y_true, y_prob, criterion=None)\n #self.thresh = thresh\n logger.info(scores)\n logger.info(cm2)\n self.log_scores('test', scores)\n self.log_cm('test/cm2', cm2)\n self.log_eval_plots('test', y_true, y_prob)\n mlflow.log_artifacts(self.logger.log_dir, 'tensorboard/test')\n\n def predict_step(self, batch, batch_idx: int , dataloader_idx: int = None):\n _ = self.model.get_loss(batch)\n y_prob = self.model.result['y_prob']\n ###\n #self.thresh = 0.5\n ###\n return y_prob #y_prob >= 0.5 #self.thresh\n\n def configure_optimizers(self):\n return torch.optim.Adam(self.parameters(), lr=self.cfg.LEARNER.LEARNING_RATE)\n\n def on_train_end(self):\n for tag, df in self.trainer.datamodule.val_history.items():\n if tag == 'test':\n continue # val_history['test'] does not update every epoch.\n tmp_path = 'outputs/val_predictions.csv'\n df.to_csv(tmp_path)\n mlflow.log_artifact(tmp_path, tag) # tag in ['validation0', ..., 'test']\n\n def on_test_end(self):\n tmp_path = 'outputs/test_predictions.csv'\n self.trainer.datamodule.val_history['test'].to_csv(tmp_path)\n mlflow.log_artifact(tmp_path, 'test')\n\n def log_meta(self, outputs, step=None):\n video = outputs['video']\n meta = outputs['meta']\n video = video.detach().cpu().numpy()\n y_true = outputs['y_true'].detach().cpu().numpy()\n y_prob = outputs['y_prob'].detach().cpu().numpy()\n info = utils.generate_batch_info_classification(video, meta, y_true=y_true, y_prob=y_prob)\n step = step or self.global_step\n self.logger.experiment.add_text(\"batch info\", info.to_markdown(), step)\n return info\n\n def log_video(self, tag, video, size=None, normalized=False, step=None):\n from skimage.transform import resize\n size = np.round(size.detach().cpu().numpy() * [38, 78] + [78, 157]).astype(int)\n\n # video: [N, C, T, H, W]\n if video.shape[0] > 8:\n video = video[:8]\n video = video.detach().permute(0, 2, 1, 3, 4).to('cpu', non_blocking=True) # convert to numpy may not be efficient in production\n # (N,C,D,H,W) -> (N,T,C,H,W)\n step = step or self.global_step\n if not normalized:\n video = utils.array_to_float_video(video * 50, low=-200, high=200, perc=False)\n self.logger.experiment.add_video(tag, video, step, fps=10)\n vs = video.detach().cpu().numpy()\n for i, v in enumerate(vs):\n for j, image in enumerate(v):\n image = image.transpose(1,2,0)\n if size is not None:\n image = resize(image, size[i])\n mlflow.log_image(image, tag+f'/{i}_{j}.png')\n\n def log_layer_weights(self, tag, layer_names, step=None):\n step = step or self.global_step\n from arnet.modeling.models import MODEL_REGISTRY\n if (isinstance(self.model, MODEL_REGISTRY.get('CNN_Li2020')) or\n isinstance(self.model, MODEL_REGISTRY.get('SimpleC3D'))):\n for layer_name in layer_names:\n layer = utils.get_layer(self.model, layer_name)\n if isinstance(layer, torch.nn.Conv3d):\n # Unscaled\n fig = utils.draw_conv2d_weight(layer.weight)\n image_tensor = utils.fig2rgb(fig)\n save_name = tag + f'/unscaled/{layer_name}'\n self.logger.experiment.add_image(save_name, image_tensor, step)\n save_name += f'/{step}.png'\n mlflow.log_figure(fig, save_name)\n\n # Set vmin vmax\n fig = utils.draw_conv2d_weight(layer.weight, vmin=-0.3, vmax=0.3) # -1/+1 for lr 1e-2\n image_tensor = utils.fig2rgb(fig)\n save_name = tag + f'/uniform_scaled/{layer_name}'\n self.logger.experiment.add_image(save_name, image_tensor, step)\n save_name += f'/{step}.png'\n mlflow.log_figure(fig, save_name)\n\n def log_layer_activations(self, tag, x, layer_names, step=None):\n step = step or self.global_step\n import copy\n model = copy.copy(self.model) # shallow copy, the original model keeps training mode and no activation hook attached\n activations = utils.register_activations(model, layer_names)\n model.eval()\n _ = self.model(x)\n for layer_name in activations:\n features = activations[layer_name].detach().cpu()\n if features.shape[0] > 8:\n features = features[:8]\n for c in range(features.shape[1]):\n features_c = features[:,[c],:,:,:].permute(0,2,1,3,4)\n features_c = utils.array_to_float_video(features_c, 0.1, 99.9)\n self.logger.experiment.add_video(\n '{}/{}/ch{}'.format(tag, layer_name, c),\n features_c,\n step)\n\n def log_scores(self, tag, scores: dict, step=None):\n step = step or self.global_step\n for k, v in scores.items():\n #self.logger.experiment.add_scalar(tag + '/' + k, v, step)\n self.log(tag + '/' + k, v) #wield problem\n mlflow.log_metrics({tag + '/' + k: v.item() for k, v in scores.items()},\n step=step)\n\n def log_cm(self, tag, cm, labels=None, step=None):\n step = step or self.global_step\n fig = utils.draw_confusion_matrix(cm.cpu())\n image_tensor = utils.fig2rgb(fig)\n self.logger.experiment.add_image(tag, image_tensor, step)\n mlflow.log_figure(fig, tag + f'/{step}.png')\n\n def log_eval_plots(self, tag, y_true, y_prob, step=None):\n y_true = y_true.detach().cpu()\n y_prob = y_prob.detach().cpu()\n step = step or self.global_step\n\n reliability = utils.draw_reliability_plot(y_true, y_prob, n_bins=10)\n mlflow.log_figure(reliability, tag + f'/reliability/{step}.png')\n reliability = utils.fig2rgb(reliability)\n self.logger.experiment.add_image(tag + '/reliability', reliability, step)\n\n roc = utils.draw_roc(y_true, y_prob)\n mlflow.log_figure(roc, tag + f'/roc/{step}.png')\n roc = utils.fig2rgb(roc)\n self.logger.experiment.add_image(tag + '/roc', roc, step)\n\n ssp = utils.draw_ssp(y_true, y_prob)\n mlflow.log_figure(ssp, tag + f'/ssp/{step}.png')\n ssp = utils.fig2rgb(ssp)\n self.logger.experiment.add_image(tag + '/ssp', ssp, step)\n", "from pathlib import Path\nimport numpy as np\nimport pandas as pd\n\n\ndef load_csv_dataset(csv_path):\n df = pd.read_csv(csv_path, low_memory=False)\n df.loc[:, 'flares'] = df['flares'].fillna('')\n df.loc[:, 'bad_img_idx'] = df['bad_img_idx'].apply(\n lambda s: [int(x) for x in s.strip('[]').split()])\n return df\n\n\ndef load_fusion_dataset(auxdata):\n d = np.load(auxdata, allow_pickle=True).item()\n return d\n\n\ndef fuse_sharp_to_smarp(df, fuse_dict):\n for k, v in fuse_dict.items():\n if k in df.columns:\n df[k] = df[k] * v['coef'] + v['intercept']\n return df\n\n\ndef group_split_data(df, seed=None):\n from sklearn.model_selection import GroupShuffleSplit\n splitter = GroupShuffleSplit(n_splits=1, test_size=0.2, random_state=seed)\n train_idx, test_idx = next(splitter.split(df, groups=df['arpnum']))\n return df.iloc[train_idx], df.iloc[test_idx]\n\n\ndef group_split_data_cv(df, cv=5, split=0):\n \"\"\"\n Args:\n cv: number of cv folds\n split: index of the cv fold to return\n Note that GroupKFold is not random\n \"\"\"\n from sklearn.model_selection import GroupKFold\n splitter = GroupKFold(n_splits=cv)\n split_generator = splitter.split(df, groups=df['arpnum'])\n for k, (train_idx, test_idx) in enumerate(split_generator):\n if k == split:\n return df.iloc[train_idx], df.iloc[test_idx]\n\n\ndef rus(df, balanced=False, seed=False):\n \"\"\"Random Undersampling\n\n Args:\n balanced (bool or dict): False means no rus. True means rus to balance.\n Dict specifies the desired samples sizes for the two classes.\n seed: default is False, do not reset seed. Pass int/None to reset a\n seed particularly/randomly.\n \"\"\"\n import numpy as np\n if seed is None or isinstance(seed, int):\n np.random.seed(seed)\n\n if balanced == False:\n return df\n neg = np.where(~df['label'])[0]\n pos = np.where(df['label'])[0]\n if balanced == True:\n balanced = {0: len(pos), 1:len(pos)}\n idx = np.concatenate((\n np.random.choice(neg, size=balanced[0], replace=False),\n np.random.choice(pos, size=balanced[1], replace=False),\n ))\n idx = np.sort(idx)\n df = df.iloc[idx].reset_index(drop=True)\n return df\n\n\ndef get_datasets(database, dataset, auxdata,\n balanced=True, validation=False, shuffle=False, seed=None,\n val_split=0, test_split=0,\n balanced_test=False,\n ):\n \"\"\"\n Args:\n sizes: Dict of desired class sizes. None: no rus. 'balanced': balanced rus.\n \"\"\"\n df_smarp = load_csv_dataset(Path(database) / 'smarp.csv')\n df_sharp = load_csv_dataset(Path(database) / 'sharp.csv')\n fuse_dict = load_fusion_dataset(Path(auxdata))\n # Two keys are outdated. fuse_dict =\n #{'MEANGBZ': {'coef': 1.9920261748674042, 'intercept': 8.342889969768606},\n # 'USFLUX': {'coef': 1.216160520290385, 'intercept': -3.8777994451166115e+20},\n # 'R_VALUE': {'coef': 0.8327836641793915, 'intercept': -0.0945601961295528}}\n df_sharp = fuse_sharp_to_smarp(df_sharp, fuse_dict)\n\n # Cross validation split. No randomness.\n if val_split is None and test_split is None:\n # This is how I split before cv was implemented\n if dataset in ['sharp', 'fused_sharp']:\n df_train, df_test = group_split_data(df_sharp, seed=seed)\n if validation:\n df_train, df_val = group_split_data(df_train, seed=seed)\n if dataset == 'fused_sharp':\n df_train = pd.concat((df_train, df_smarp)).reset_index(drop=True)\n elif dataset in ['smarp', 'fused_smarp']:\n df_train, df_test = group_split_data(df_smarp, seed=seed)\n if validation:\n df_train, df_val = group_split_data(df_train, seed=seed)\n if dataset == 'fused_smarp':\n df_train = pd.concat((df_train, df_sharp)).reset_index(drop=True)\n else:\n # If either of them is not None, then this is after cv was implemented\n # We initialize None with 0\n val_split = val_split or 0\n test_split = test_split or 0\n if dataset in ['sharp', 'fused_sharp']:\n df_train, df_test = group_split_data_cv(df_sharp, cv=5, split=test_split)\n if validation:\n df_train, df_val = group_split_data_cv(df_train, cv=5, split=val_split)\n if dataset == 'fused_sharp':\n df_train = pd.concat((df_train, df_smarp)).reset_index(drop=True)\n elif dataset in ['smarp', 'fused_smarp']:\n df_train, df_test = group_split_data_cv(df_smarp, cv=5, split=test_split)\n if validation:\n df_train, df_val = group_split_data_cv(df_train, cv=5, split=val_split)\n if dataset == 'fused_smarp':\n df_train = pd.concat((df_train, df_sharp)).reset_index(drop=True)\n\n # Why rus after split? Strict ratio; Option to rus only train/test differently\n df_train = rus(df_train, balanced=balanced, seed=seed)\n if validation:\n df_val = rus(df_val, balanced=balanced_test, seed=seed)\n df_test = rus(df_test, balanced=balanced_test, seed=seed)\n\n if shuffle:\n df_train = df_train.sample(frac=1, random_state=seed)\n\n if validation:\n return df_train, df_val, df_test\n else:\n return df_train, df_test\n" ]
[ [ "matplotlib.use", "torch.cat", "torch.stack", "torch.isnan", "torch.tensor" ], [ "numpy.random.choice", "sklearn.model_selection.GroupShuffleSplit", "numpy.random.seed", "sklearn.model_selection.GroupKFold", "numpy.load", "numpy.where", "numpy.sort", "pandas.concat", "pandas.read_csv" ] ]
DESIR-CodeSprint/trackA-kickoff
[ "929e23b41a278236caf59db7b9a8f3295b3a09e8" ]
[ "TrackA_python/codesprintapp/views.py" ]
[ "from flask import Flask\nfrom flask import request, session, render_template, json, Response, jsonify, make_response, send_file, redirect, url_for\nimport requests\nimport xml.etree.ElementTree as ET\nimport lxml\nimport pandas as pd\nimport re\n\napp = Flask(__name__)\n\n@app.route('/')\n\ndef index():\n\treturn render_template('process_fulltext.html')\n\n\n@app.route('/process_fulltext', methods = ['GET', 'POST'])\n\ndef process_fulltext():\n\tupload = request.files.get('file', '').read() #puts the uploaded file (in the request)\n\turl = 'http://localhost:8070/api/processFulltextDocument'\n\tfiles = dict(input=upload, teiCoordinates=\"biblStruct\")\n\tr = requests.post(url, files=files)\n\treturn render_template('process_fulltext.html', r=r.text)\n\n# takes a string and removes xml element inside\ndef clean(text):\n\ttext = re.sub(\"<[^>]+>\",\"\", text)\n\ttext = re.sub(\"^\\s+|\\s+$\",\"\", text)\n\treturn text\n\n\n#parses the tei document and creates list of dictionaries out of tei elements\ndef parse_tei(xml):\n\t#data = open(xml)\n\tdata = xml.split('\\n')\n\trefs = []\n\tref = []\n\tstart = False\n\ttitle = \"\"\n\tname = \"\"\n\tdate = \"\"\n\tnames = []\n\tyear = \"\"\n\t#art_name = re.sub(\".*\\/\",\"\")\n\told_ref = {\"title\": \"\", \"name\": \"\", \"date\": \"\", \"year_pub\": \"\"}\n\tfor line in data:\n\t\tif re.match(\".*<date\",line) and start == False:\n\t\t\t\n\t\t\tyear = re.sub(\".*when\\=\\\"\",\"\",line)\n\t\t\tyear = re.sub(\"\\\".*\",\"\",year)[0:4]\n\n\t\tif start == False and re.match(\".*<back\",line):\n\t\t\tstart = True\n\t\tif start == False:\n\t\t\tcontinue\n\t\t\t\n\t\tif re.match(\".*<biblStruct\",line):\n\t\t\t\n\t\t\tif title == \"\":\n\t\t\t\tcontinue\n\t\t\tref = {\"title\": title, \"name\": names, \"date\": date, \"year_pub\": year} \n\t\t\t\n\t\t\tif ref[\"title\"] == old_ref[\"title\"]:\n\t\t\t\n\t\t\t\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\trefs.append(ref)\n\t\t\t\tolf_ref = ref\n\t\t\t\tnames = []\n\t\tif re.match(\".*<title.*type=\\\"main\\\"\",line):\n\t\t\ttitle = clean(line)\n\t\tif re.match(\".*<persName\",line):\n\t\t\tforename = re.sub(\"<\\/forename.*\",\"\",line)\n\t\t\tforename = clean(forename)\n\t\t\t\n\t\t\tsurname = re.sub(\".*<surname\",\"\",line)\n\t\t\tsurname = clean(surname)\n\t\t\tsurname = re.sub(\">\",\". \",surname)\n\t\t\tname = forename+surname\n\t\t\tnames.append(name)\n\t\t\t\n\t\tif re.match(\".*<date\",line):\n\t\t\tdate = re.sub(\".*when\\=\\\"\",\"\",line)\n\t\t\tdate = re.sub(\"\\\".*\",\"\",date)\n\t\t\tdate = date[0:4]\n\t\n\treturn refs\n\n\n# sends request to grobid api to process the pdf and returns data in dataframe to template view\n@app.route('/process_references', methods = ['GET', 'POST'])\n\ndef process_references():\n\tupload = request.files.get('file', '').read() #puts the uploaded file (in the request)\n\turl = 'http://localhost:8070/api/processFulltextDocument'\n\tfiles = dict(input=upload, teiCoordinates=\"biblStruct\")\n\tr = requests.post(url, files=files)\n\ttei_list = parse_tei(r.text)\n\t# increase the column width of pd (standard is only 50px)\n\tpd.set_option('display.max_colwidth', -1)\n\tdf1 = pd.DataFrame(tei_list)\n\t# removing year_pub column\n\tdf1 = df1.drop('year_pub', axis=1)\n\tdf2 = df1.to_json()\n\tdf1 = df1.to_html()\n\t# changing css class in html for dataframe output\n\tdf1 = re.sub(\"dataframe\", \"myTable\", df1)\n\treturn render_template('process_fulltext.html', df1=df1, df2=df2)\n\t\n\nif __name__ == '__main__':\n\tapp.run(debug=True)" ]
[ [ "pandas.DataFrame", "pandas.set_option" ] ]
dayunliu/SMALF
[ "34dc80d338a1f5990a41f4d5041d1a2edcbeeef5" ]
[ "code/get_data.py" ]
[ "# -*- coding: utf-8 -*-\r\nimport xlrd\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\nmirna_sim_path = '../data/miRNA_sim.xlsx'\r\ndisease_sim_path = '../data/disease_sim.xlsx'\r\nmirna_disease_ass_path = '../data/miRNA_disease.csv'\r\nmirna_data_dict_path = \"../data/mirna_data_dict.npy\"\r\ndisease_data_dict_path = \"../data/disease_data_dict.npy\"\r\nmirna_data_dict = np.load(mirna_data_dict_path, allow_pickle=True).item()\r\ndisease_data_dict = np.load(disease_data_dict_path, allow_pickle=True).item()\r\n\r\n\r\ndef get_mirna_sim():\r\n data = xlrd.open_workbook(mirna_sim_path)\r\n table = data.sheets()[0]\r\n nrows = table.nrows\r\n mirna_sim_dict = {}\r\n for cnt in range(nrows):\r\n value = table.row_values(cnt)\r\n mirna_sim_dict[cnt+1] = value\r\n return mirna_sim_dict\r\n\r\n\r\ndef get_disease_sim():\r\n data = xlrd.open_workbook(disease_sim_path)\r\n table = data.sheets()[0]\r\n nrows = table.nrows\r\n disease_sim_dict = {}\r\n for cnt in range(nrows):\r\n value = table.row_values(cnt)\r\n\r\n disease_sim_dict[cnt+1] = value\r\n return disease_sim_dict\r\n\r\n\r\ndef get_data(data_path):\r\n mm_sim_dict = get_mirna_sim()\r\n dd_sim_dict = get_disease_sim()\r\n total_sample = []\r\n Label = []\r\n with open(data_path) as f:\r\n for line in f:\r\n item = line.strip().split('\\t')\r\n mirna = int(item[0])\r\n disease = int(item[1])\r\n label = int(item[2])\r\n Label.append(label)\r\n mirna_ver = mm_sim_dict[mirna] + mirna_data_dict[mirna].tolist()\r\n disease_ver = dd_sim_dict[disease] + disease_data_dict[disease].tolist()\r\n ver = mirna_ver + disease_ver\r\n total_sample.append(ver)\r\n total_sample.reverse()\r\n Label.reverse()\r\n return total_sample, Label\r\n\r\n\r\ndef get_train_data():\r\n data_path = '../data/train_data.txt'\r\n total_sample, label = get_data(data_path)\r\n return total_sample, label\r\n\r\n\r\n" ]
[ [ "numpy.load" ] ]
Suyi32/George-private
[ "1a7b3a20da309318dc0b5415547fa6a5f6c331d3" ]
[ "testbed/SubScheduler.py" ]
[ "import numpy as np\nfrom testbed.cluster_env import LraClusterEnv\nfrom testbed.PolicyGradient_CPO import PolicyGradient\n\n\nparams = {\n # 'path': \"Dynamic_large_100\",\n # 'path': \"Dynamic_large_100_limit10\",\n # 'number of containers': 81,\n 'learning rate': 0.015,\n 'nodes per group': 3,\n 'number of nodes in the cluster': 27,\n 'container_limitation per node':8\n }\n\ndef handle_constraint(observation, NUM_NODES):\n\n observation_original = observation.copy()\n mapping_index = []\n # TODO: we could add more constraints here\n list_check = observation[:, :].sum(1) > params['container_limitation per node'] - 1 # >8\n\n if sum(list_check) == NUM_NODES:\n return [],[]\n\n good_index = np.where(list_check == False)[0]\n length = len(good_index)\n index_replace = 0\n for node in range(NUM_NODES):\n if list_check[node]: # bad node\n # index_this_replace = good_index[np.random.randint(length)]\n index_this_replace = good_index[index_replace % length]\n index_replace += 1\n observation[node] = observation_original[index_this_replace]\n mapping_index.append(index_this_replace)\n else:\n mapping_index.append(node)\n observation[node] = observation_original[node]\n\n return observation, mapping_index\n\nclass NineNodeAPI():\n\n def __init__(self, path_name, surffix, path_surffix):\n \"\"\"\n parameters set\n \"\"\"\n self.NUM_NODES = params['number of nodes in the cluster']\n # self.NUM_CONTAINERS = params['number of containers']\n\n # self.sim = Simulator()\n self.env = LraClusterEnv(num_nodes=self.NUM_NODES)\n\n ckpt_path_1 = path_surffix + path_name + \"1\" + \"/model.ckpt\"\n ckpt_path_2 = path_surffix + path_name + \"2\" + \"/model.ckpt\"\n ckpt_path_3 = path_surffix + path_name + \"3\" + \"/model.ckpt\"\n self.nodes_per_group = int(params['nodes per group'])\n # self.number_of_node_groups = int(self.NUM_NODES / self.nodes_per_group)\n \"\"\"\n Build Network\n \"\"\"\n self.n_actions = self.nodes_per_group #: 3 nodes per group\n self.n_features = int(self.n_actions * (self.env.NUM_APPS + 1 + self.env.NUM_APPS) + 1 + self.env.NUM_APPS)\n #: 29\n\n self.RL_1 = PolicyGradient(n_actions=self.n_actions, n_features=self.n_features, learning_rate=params['learning rate'], suffix=surffix + '1a')\n\n self.RL_2 = PolicyGradient(n_actions=self.n_actions, n_features=self.n_features, learning_rate=params['learning rate'], suffix=surffix + '2a')\n\n self.RL_3 = PolicyGradient(n_actions=self.n_actions, n_features=self.n_features, learning_rate=params['learning rate'], suffix=surffix + '3a')\n\n self.RL_1.restore_session(ckpt_path_1)\n self.RL_2.restore_session(ckpt_path_2)\n self.RL_3.restore_session(ckpt_path_3)\n\n self.observation_episode_1, self.action_episode_1, self.reward_episode_1, self.safety_episode_1 = [], [], [], []\n self.observation_optimal_1, self.action_optimal_1, self.reward_optimal_1, self.safety_optimal_1 = [], [], [], []\n\n self.observation_episode_2, self.action_episode_2, self.reward_episode_2, self.safety_episode_2 = [], [], [], []\n self.observation_optimal_2, self.action_optimal_2, self.reward_optimal_2, self.safety_optimal_2 = [], [], [], []\n\n self.observation_episode_3, self.action_episode_3, self.reward_episode_3, self.safety_episode_3 = [], [], [], []\n self.observation_optimal_3, self.action_optimal_3, self.reward_optimal_3, self.safety_optimal_3 = [], [], [], []\n\n def batch_data(self, rnd_array):\n index_data = []\n for i in range(7):\n index_data.extend([i] * rnd_array[i])\n return rnd_array, index_data\n\n def batch_data_sub(self, rnd_array):\n\n rnd_array = rnd_array.copy()\n index_data = []\n for i in range(7):\n index_data.extend([i] * int(rnd_array[i]))\n\n return rnd_array, index_data\n\n def store_episode_1(self, observations, actions):\n self.observation_episode_1.append(observations)\n self.action_episode_1.append(actions)\n\n def store_episode_2(self, observations, actions):\n self.observation_episode_2.append(observations)\n self.action_episode_2.append(actions)\n\n def store_episode_3(self, observations, actions):\n self.observation_episode_3.append(observations)\n self.action_episode_3.append(actions)\n\n def get_total_tput(self, rnd_array):\n\n # assert sum(rnd_array) == 81\n source_batch_, index_data = self.batch_data(rnd_array.astype(int)) # index_data = [0,1,2,0,1,2]\n env = LraClusterEnv(num_nodes=self.NUM_NODES)\n observation = env.reset().copy() # (9,9)\n source_batch = source_batch_.copy()\n nodes_per_group = int(params['nodes per group'])\n NUM_CONTAINERS = int(sum(rnd_array))\n\n \"\"\"\n Episode\n \"\"\"\n \"\"\"\n first layer\n \"\"\"\n source_batch_first = source_batch_.copy()\n observation_first_layer = np.zeros([nodes_per_group, env.NUM_APPS], int)\n for inter_episode_index in range(NUM_CONTAINERS):\n appid = index_data[inter_episode_index]\n source_batch_first[appid] -= 1\n observation_first_layer_copy = observation_first_layer.copy()\n observation_first_layer_copy[:, appid] += 1\n\n observation_first_layer_copy = np.append(observation_first_layer_copy, observation_first_layer_copy > 9 * 2, axis=1)\n observation_first_layer_copy = np.append(observation_first_layer_copy, observation_first_layer_copy.sum(axis=1).reshape(nodes_per_group, 1), axis=1)\n # observation_first_layer_copy = np.append(observation_first_layer_copy, ((observation_first_layer_copy[:, 2] > 0) * (observation_first_layer_copy[:, 3] > 0)).reshape(nodes_per_group, 1), axis=1)\n observation_first_layer_copy = np.array(observation_first_layer_copy).reshape(1, -1)\n observation_first_layer_copy = np.append(observation_first_layer_copy, appid).reshape(1, -1)\n observation_first_layer_copy = np.append(observation_first_layer_copy, np.array(source_batch_first)).reshape(1, -1)\n\n action_1, prob_weights = self.RL_1.choose_action_determine(observation_first_layer_copy.copy())\n\n observation_first_layer[action_1, appid] += 1\n\n # self.store_episode_1(observation_first_layer_copy, action_1)\n\n \"\"\"\n second layer\n \"\"\"\n observation_second_layer_aggregation = np.empty([0, env.NUM_APPS], int) # 9*20\n\n number_cont_second_layer = []\n\n for second_layer_index in range(nodes_per_group):\n\n rnd_array = observation_first_layer[second_layer_index].copy()\n source_batch_second, index_data = self.batch_data_sub(rnd_array)\n\n observation_second_layer = np.zeros([nodes_per_group, env.NUM_APPS], int)\n\n NUM_CONTAINERS_second = sum(source_batch_second)\n\n number_cont_second_layer.append(NUM_CONTAINERS_second)\n\n for inter_episode_index in range(NUM_CONTAINERS_second):\n appid = index_data[inter_episode_index]\n source_batch_second[appid] -= 1\n observation_second_layer_copy = observation_second_layer.copy()\n observation_second_layer_copy[:, appid] += 1\n\n observation_second_layer_copy = np.append(observation_second_layer_copy, observation_second_layer_copy > 3 * 2, axis=1)\n observation_second_layer_copy = np.append(observation_second_layer_copy, observation_second_layer_copy.sum(axis=1).reshape(nodes_per_group, 1), axis=1)\n # observation_second_layer_copy = np.append(observation_second_layer_copy, ((observation_second_layer_copy[:, 2] > 0) * (observation_second_layer_copy[:, 3] > 0)).reshape(nodes_per_group, 1), axis=1)\n observation_second_layer_copy = np.array(observation_second_layer_copy).reshape(1, -1)\n observation_second_layer_copy = np.append(observation_second_layer_copy, appid).reshape(1, -1)\n observation_second_layer_copy = np.append(observation_second_layer_copy, np.array(source_batch_second)).reshape(1, -1)\n\n action_2, prob_weights = self.RL_2.choose_action_determine(observation_second_layer_copy.copy())\n\n observation_second_layer[action_2, appid] += 1\n\n # self.store_episode_2(observation_second_layer_copy, action_2)\n\n observation_second_layer_aggregation = np.append(observation_second_layer_aggregation, observation_second_layer, 0)\n\n \"\"\"\n third layer\n \"\"\"\n observation_third_layer_aggregation = np.empty([0, env.NUM_APPS], int) # 9*20\n number_cont_third_layer = []\n\n for third_layer_index in range(nodes_per_group * nodes_per_group):\n\n rnd_array = observation_second_layer_aggregation[third_layer_index].copy()\n source_batch_third, index_data = self.batch_data_sub(rnd_array)\n\n observation_third_layer = np.zeros([nodes_per_group, env.NUM_APPS], int)\n\n NUM_CONTAINERS_third = sum(source_batch_third)\n number_cont_third_layer.append(NUM_CONTAINERS_third)\n\n for inter_episode_index in range(NUM_CONTAINERS_third):\n appid = index_data[inter_episode_index]\n source_batch_third[appid] -= 1\n observation_third_layer_copy = observation_third_layer.copy()\n observation_third_layer_copy[:, appid] += 1\n\n observation_third_layer_copy = np.append(observation_third_layer_copy, observation_third_layer_copy > 1 * 2, axis=1)\n observation_third_layer_copy = np.append(observation_third_layer_copy, observation_third_layer_copy.sum(axis=1).reshape(nodes_per_group, 1), axis=1)\n # observation_third_layer_copy = np.append(observation_third_layer_copy, ((observation_third_layer_copy[:, 2] > 0) * (observation_third_layer_copy[:, 3] > 0)).reshape(nodes_per_group, 1), axis=1)\n observation_third_layer_copy = np.array(observation_third_layer_copy).reshape(1, -1)\n observation_third_layer_copy = np.append(observation_third_layer_copy, appid).reshape(1, -1)\n observation_third_layer_copy = np.append(observation_third_layer_copy, np.array(source_batch_third)).reshape(1, -1)\n\n action_3, prob_weights = self.RL_3.choose_action_determine(observation_third_layer_copy.copy())\n\n observation_third_layer[action_3, appid] += 1\n\n # self.store_episode_3(observation_third_layer_copy, action_3)\n\n observation_third_layer_aggregation = np.append(observation_third_layer_aggregation, observation_third_layer, 0)\n\n \"\"\"\n After an entire allocation, calculate total throughput, reward\n \"\"\"\n env.state = observation_third_layer_aggregation.copy()\n assert sum(sum(env.state)) == NUM_CONTAINERS\n assert (env.state.sum(0) == source_batch_).all()\n \"\"\"\n After an entire allocation, calculate total throughput, reward\n \"\"\"\n # state = env.state\n # assert sum(sum(self.env.state)) == 81\n\n return env.state\n\n" ]
[ [ "numpy.array", "numpy.empty", "numpy.zeros", "numpy.where", "numpy.append" ] ]
drdavis/humba
[ "20131ed9bc4bd0ab0af8e0da2940992221c64575" ]
[ "humba/core.py" ]
[ "\"\"\"\nmodule housing core library functionality\n\"\"\"\n\nimport numpy as np\nfrom typing import Optional, Tuple\n\nimport humba.jits as jits\n\n\ndef histogram(\n x: np.ndarray,\n bins: int = 10,\n range: Tuple[float, float] = (0, 10),\n weights: Optional[np.ndarray] = None,\n flow: bool = False,\n) -> Tuple[np.ndarray, Optional[np.ndarray], np.ndarray]:\n \"\"\"Calculate the histogram for the data ``x``.\n\n Parameters\n ----------\n x : :obj:`numpy.ndarray`\n data to histogram\n bins : int\n number of bins\n range : (float, float)\n axis range\n weights : :obj:`numpy.ndarray`, optional\n array of weights for ``x``\n flow : bool\n include over and underflow content in first and last bins\n\n Returns\n -------\n count : :obj:`numpy.ndarray`\n The values of the histogram\n error : :obj:`numpy.ndarray`, optional\n The poission uncertainty on the bin heights\n edges : :obj:`numpy.ndarray`\n The bin edges\n\n Notes\n -----\n If the dtype of the ``weights`` is not the same as ``x``, then it\n is converted to the dtype of ``x``.\n\n Examples\n --------\n >>> import numpy as np\n >>> from humba import histogram\n >>> x = np.random.randn(100000)\n >>> w = np.random.uniform(0.4, 0.5, x.shape[0])\n >>> hist1, _, edges = humba.histogram(x, bins=50, range=(-5, 5))\n >>> hist2, _, edges = humba.histogram(x, bins=50, range=(-5, 5), flow=True)\n >>> hist3, error, edges = histogram(x, bins=50, range=(-5, 5), weights=w)\n >>> hist4, error, edges = histogram(x, bins=50, range=(-3, 3), weights=w, flow=True)\n\n \"\"\"\n edges = np.linspace(range[0], range[1], bins + 1)\n if weights is not None:\n assert x.shape == weights.shape, \"x and weights must have identical shape\"\n if x.dtype == np.float64:\n hfunc = jits._hfloat64_weighted\n elif x.dtype == np.float32:\n hfunc = jits._hfloat32_weighted\n else:\n raise TypeError(\"dtype of input must be float32 or float64\")\n res, err = hfunc(x, weights.astype(x.dtype), bins, range[0], range[1], flow)\n return (res, err, edges)\n else:\n if x.dtype == np.float64:\n hfunc = jits._hfloat64\n elif x.dtype == np.float32:\n hfunc = jits._hfloat32\n else:\n raise TypeError(\"dtype of input must be float32 or float64\")\n res = hfunc(x, bins, range[0], range[1], flow)\n return (res, None, edges)\n\n\ndef mwv_histogram(\n x: np.ndarray,\n weights: np.ndarray,\n bins: int = 10,\n range: Tuple[float, float] = (0, 10),\n flow: bool = False,\n) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"Histogram the same data but with multiple weight variations.\n\n Parameters\n ----------\n x : :obj:`numpy.ndarray`\n data to histogram\n weights : :obj:`numpy.ndarray`, optional\n multidimensional array of weights for ``x`` the first element\n of the ``shape`` attribute must be equal to the length of ``x``.\n bins : int\n number of bins\n range : (float, float)\n axis range\n flow : bool\n include over and underflow content in first and last bins\n\n Returns\n -------\n count : :obj:`numpy.ndarray`\n The values of the histograms calculated from the weights\n Shape will be (bins, ``weights.shape[0]``)\n error : :obj:`numpy.ndarray`\n The poission uncertainty on the bin heights (shape will be\n the same as ``count``.\n edges : :obj:`numpy.ndarray`\n The bin edges\n\n Notes\n -----\n If ``x`` is not the same dtype as ``weights``, then it is converted\n to the dtype of ``weights`` (for multi weight histograms we expect\n the weights array to be larger than the data array so we prefer to\n cast the smaller chunk of data).\n\n \"\"\"\n edges = np.linspace(range[0], range[1], bins + 1)\n assert x.shape[0] == weights.shape[0], \"weights shape is not compatible with x\"\n if weights.dtype == np.float64:\n hfunc = jits._hfloat64_multiweights\n elif weights.dtype == np.float32:\n hfunc = jits._hfloat32_multiweights\n else:\n raise TypeError(\"dtype of input must be float32 or float64\")\n res, err = hfunc(x.astype(weights.dtype), weights, bins, range[0], range[1], flow)\n return (res, err, edges)\n" ]
[ [ "numpy.linspace" ] ]
jacobhepkema/scover
[ "de3c81b61ab7fcf7bed74571ce5ac1246e6b58b7" ]
[ "bin/scover_utils.py" ]
[ "def onehot_encode_seq(sequence, m=0, padding=False):\n \"\"\"Converts a given IUPAC DNA sequence to a one-hot \n encoded DNA sequence. \n \"\"\"\n import numpy as np\n import torch\n \n valid_keys = ['a','c','g','t','u','n','r','y','s','w','k','m']\n \n nucs = {'a':0,'c':1,'g':2,'t':3,'u':3}\n \n if padding:\n assert m != 0, \"If using padding, m should be bigger than 0\"\n padding_mat = np.tile(0.25,(m-1,4))\n onehot = np.tile(.0,(len(sequence),4))\n \n for i,char in enumerate(sequence.lower()):\n if char not in valid_keys:\n sys.exit(\"invalid char in sequence (choose from acgt and nryswkm)\")\n elif char == 'n':\n onehot[i,:] = 0.25\n elif char == 'r':\n onehot[i,(0,2)] = 0.5\n elif char == 'y':\n onehot[i,(1,3)] = 0.5\n elif char == 's':\n onehot[i,(1,2)] = 0.5\n elif char == 'w':\n onehot[i,(0,3)] = 0.5\n elif char == 'k':\n onehot[i,(2,3)] = 0.5\n elif char == 'm':\n onehot[i,(0,1)] = 0.5\n else:\n onehot[i,nucs[char]] = 1\n \n if padding:\n onehot = np.concatenate((padding_mat, onehot, padding_mat))\n\n return onehot\n\ndef save_meme(motifs_ppm_dict, output_file=\"found_motifs.meme\"):\n \"\"\"Saves the found PPMs (given as dictionary) to a file that's\n compatible with MEME suite applications.\n \"\"\"\n import pandas as pd\n\n meme_string = [\"MEME version 4\", \"\", \"ALPHABET= ACGT\", \"\", \"strands: + -\", \"\"]\n for idx,key in enumerate(motifs_ppm_dict.keys()):\n curr_motif = pd.DataFrame(motifs_ppm_dict[key])\n s1 = \"MOTIF \" + str(key)\n s2 = \"letter-probability matrix: alength= \" + str(curr_motif.shape[1]) + \" w= \" + str(curr_motif.shape[0])\n s3 = curr_motif.to_csv(sep=\"\\t\", index=False, header=False)\n meme_string = meme_string + [s1, s2, s3]\n \n meme_string = \"\\n\".join(meme_string)\n \n with open(output_file, 'w') as the_file:\n the_file.write(meme_string)\n \n print(\"wrote meme list\")\n\ndef align_conv_filters(model, input_seqs, m, train_ind):\n \"\"\"Aligns the convolutional filters of a given scover model back\n to the given input sequences at the given indices. \n \"\"\"\n # Motif analysis\n import numpy as np\n import torch\n from tqdm import trange\n\n activation_seqs = input_seqs[train_ind]\n \n with torch.no_grad():\n model.eval()\n activations = model.conv_1(activation_seqs).cpu().detach().numpy().squeeze()\n \n n_seq = activation_seqs.shape[0] \n activation_seqs = activation_seqs.squeeze()\n seq_len = activation_seqs.shape[1]\n d = activations.shape[1]\n \n motifs_pfm_dict = dict() # store pfms in this dict\n motifs_ppm_dict = dict() # store pwms in this dict\n \n # cycle through convolutional filters\n for filter_num in trange(d):\n \n # select activations for filter. new array = nseq x length seq\n curr_activation = activations[:,filter_num,:] \n \n # get those sequences that have positive values\n seq_has_pos_vals = np.argwhere(np.amax(curr_activation, axis=1) > 0)[:,0]\n \n # in the case that there is a minmum of 10 sequences that activate the filter\n if seq_has_pos_vals.shape[0] > 10: \n \n # per sequence, get position of maximum activation\n per_seq_where_max_pos = np.argmax(curr_activation[seq_has_pos_vals], axis=1)\n curr_activation_seqs = activation_seqs[seq_has_pos_vals]\n curr_str_list = []\n # go through sequences and save to curr_str_list\n for i in range(seq_has_pos_vals.shape[0]): \n \n # maximum activation\n curr_max = per_seq_where_max_pos[i] \n # get subsequence that activated filter (max 1 per seq)\n curr_str_list.append(curr_activation_seqs[i][curr_max:(curr_max+m)]) \n \n # put them together in a numpy array\n sequence_array = np.stack(curr_str_list)\n # get sum per position\n sequence_array_summed = np.sum(sequence_array,axis=0) \n # save pfm\n motifs_pfm_dict[str(filter_num)] = sequence_array_summed \n \n # get counts per row\n row_sums = np.sum(sequence_array_summed, axis=1)\n # convert pfm to ppm\n sequence_array_summed = np.nan_to_num(sequence_array_summed / row_sums[:, np.newaxis])\n motifs_ppm_dict[str(filter_num)] = sequence_array_summed\n \n return motifs_pfm_dict, motifs_ppm_dict\n\ndef randomize_sequences(sequences):\n \"\"\"Randomly permutes a set of DNA sequences.\n \"\"\"\n import random\n shuffled_seqs = []\n for seq in sequences:\n shuffled_seqs.append(''.join(random.sample(seq, len(seq))))\n return shuffled_seqs\n" ]
[ [ "numpy.concatenate", "numpy.nan_to_num", "pandas.DataFrame", "torch.no_grad", "numpy.sum", "numpy.tile", "numpy.stack", "numpy.argmax", "numpy.amax" ] ]
Kappers/oscirhythm
[ "3114d5fb7a65c256959d594abd48b7574f82f01f" ]
[ "model/sin_example_gfnn.py" ]
[ "'''\nTest script for GrFNN, plotting the entrainment for a sin wave of changing frequency.\n\n@author T. Kaplan\n'''\nimport numpy as np\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport time\n\nfrom gfnn import FrequencyType, FrequencyDist, ZParams, GrFNN\nfrom plot import spectro_plot\n\n# Construct our model by instantiating the class defined above\ndim_in = 300\nfreq_dist = FrequencyDist(0.25, 6.0, dim_in, FrequencyType.LINEAR)\nzparams = ZParams()\nmodel = GrFNN(freq_dist, zparams, fs=160)\n\n# Stimulus - 50 seconds of FHz sin, at a changing frequency (4->2)\nF = 4\nt1 = np.arange(0, 25, model.dt)\nx1 = np.sin(2 * np.pi * F * t1) * 0.25\nt2 = np.arange(25, 50, model.dt)\nx2 = np.sin(2 * np.pi * F/2 * t2) * 0.25\n\n# Prepare an initial plot\nt = np.concatenate([t1, t2])\nx = np.concatenate([x1, x2])\npx = freq_dist.dist\npy = np.zeros(px.shape)\nplt.plot(px, py)\n\nzs = np.empty((len(t), dim_in), dtype=np.complex64)\nt0 = time.time()\n\nfor i in range(len(t)):\n out = model(x[i])\n zs[i] = out\n # Update plot:\n if i % 10 == 0:\n py = np.abs(out)\n plt.gca().lines[0].set_ydata(py)\n plt.gca().relim()\n plt.gca().autoscale_view()\n plt.pause(0.01)\n\nt1 = time.time()\nprint('Took', round(t1-t0, 2))\nplt.show()\n" ]
[ [ "matplotlib.use", "numpy.concatenate", "numpy.sin", "numpy.zeros", "matplotlib.pyplot.plot", "numpy.arange", "matplotlib.pyplot.pause", "numpy.abs", "matplotlib.pyplot.show", "matplotlib.pyplot.gca" ] ]
flarco/n1slutil
[ "2c9f23c2240b4d94507ae799b417060688293979" ]
[ "xutil/database/base.py" ]
[ "# Database Lib\n\"\"\"\nOracle\nPostGresSQL\nSQLite\nSQLServer\nHive\nSpark\n\"\"\"\nimport os, datetime, pandas, time, re\nfrom collections import namedtuple, OrderedDict\n\nimport jmespath\nimport sqlalchemy\nfrom multiprocessing import Queue, Process\n\nfrom xutil.helpers import (\n log,\n elog,\n slog,\n get_exception_message,\n struct,\n now,\n get_databases,\n get_dir_path,\n get_profile,\n get_variables,\n file_exists,\n str_rmv_indent,\n ptable,\n make_rec,\n get_error_str,\n)\nfrom xutil.diskio import read_yaml, write_csvs\n\nconns = {}\n\n_fwklike = lambda k, v: \"lower({}) like lower('{}')\".format(k, v)\n_fwkeq = lambda k, v: \"{} = '{}'\".format(k, v)\n_fw = lambda sep, _fwkop, **kws: sep.join([_fwkop(k, v) for k, v in kws.items()]) # Format WHERE\nfwa = lambda _fwkop=_fwkeq, **kws: _fw(' and ', _fwkop, **kws) # Format WHERE AND\nfwo = lambda _fwkop=_fwkeq, **kws: _fw(' or ', _fwkop, **kws) # Format WHERE OR\nrows_to_dicts = lambda rows: [row._asdict() for row in rows]\n\n\nclass DBConn(object):\n \"\"\"Base class for database connections\"\"\"\n\n _fix_f_name = lambda self, f: f\n _to_text = lambda self, t: t\n\n def __init__(self, conn_dict, profile=None, echo=False):\n \"Inititate connection\"\n self._cred = struct(conn_dict)\n self._cred.kwargs = conn_dict.get('kwargs', {})\n self.name = self._cred.get('name', None)\n self.username = self._cred.get('username', None)\n self.type = self._cred.type\n self.engine = None\n self._cursor_description = None\n self.profile = profile\n self.batch_size = 10000\n self.fetch_size = 20000\n self.echo = echo\n self.connect()\n self.last_connect = now()\n\n # Base Template\n template_base_path = '{}/database/templates/base.yaml'.format(\n get_dir_path())\n self.template_dict = read_yaml(template_base_path)\n\n # Specific Type Template\n template_path = '{}/database/templates/{}.yaml'.format(\n get_dir_path(), self.type)\n temp_dict = read_yaml(template_path)\n\n for key1 in temp_dict:\n # Level 1\n if isinstance(temp_dict[key1], dict):\n if key1 not in self.template_dict:\n self.template_dict[key1] = temp_dict[key1]\n\n # Level 2\n for key2 in temp_dict[key1]:\n # Always Overwrite\n self.template_dict[key1][key2] = temp_dict[key1][key2]\n else:\n # Level 1 Non-Dict Overwrite\n self.template_dict[key1] = temp_dict[key1]\n\n self.variables = self._template('variables')\n\n if os.getenv('PROFILE_YAML'):\n other_vars = get_variables()\n for key in other_vars:\n self.variables[key] = other_vars[key]\n\n self.tmp_folder = self.variables['tmp_folder']\n self.set_variables()\n\n if echo:\n log(\"Connected to {} as {}\".format(self._cred.name, self._cred.user))\n\n def connect(self):\n \"\"\"Connect to Database\"\"\"\n self.engine = self.get_engine()\n self.connection = self.engine.connect()\n \n def close(self):\n \"\"\"Close database connection\"\"\"\n self.conn.connection.close()\n\n def reconnect(self, min_tresh=0):\n \"\"\"Re-Connect to Database if minute threshold reached\"\"\"\n if (now() - self.last_connect).total_seconds() > min_tresh * 60:\n log('Reconnecting to {}...'.format(self.name))\n self.connect()\n self.last_connect = now()\n\n def set_variables(self):\n \"\"\"Set custom variables\"\"\"\n raise Exception(\"Method 'set_variables' is not implemented!\")\n\n def get_dialect(self, echo=False):\n \"\"\"SQLAlchemy dialect\"\"\"\n raise Exception(\"Method 'get_dialect' is not implemented!\")\n\n def get_engine(self, echo=False):\n import sqlalchemy\n if not self.engine:\n self.create_engine(echo=self.echo)\n self.engine_inspect = sqlalchemy.inspect(self.engine)\n return self.engine\n\n def check_pk(self, table, fields):\n \"Check Primary key to ensure there are not duplicates\"\n if 'where' in fields.lower():\n fields, where_clause = fields.lower().split('where')\n where_clause = 'where ' + where_clause\n else:\n where_clause = ''\n sql = '''\n select\n '{table}' as table,\n case when count(1) = count({fields}) then 'PASS' else 'FAIL' end as pk_result\n from {table}\n {where_clause}\n '''.format(\n table=table,\n fields=fields,\n where_clause=where_clause,\n )\n data = self.query(sql, echo=False)\n headers = self._fields\n print(ptable(headers, data))\n if data[0].pk_result == 'FAIL':\n raise (Exception('PK Text failed for table \"{}\" with fields \"{}\"'.format(\n table, fields)))\n\n def _do_execute(self, sql):\n try:\n self._cursor_description = None\n self.fields = None\n self.result = self.connection.execute(sql)\n self._cursor_description = self.result._cursor_description()\n self._fields = self._get_cursor_fields()\n except Exception as E:\n if 'not open' in get_error_str(E):\n pass # error when Oracle doesn't have a cursor open\n else:\n log(Exception('Error for SQL:\\n' + sql))\n raise E\n\n def execute_multi(self,\n sql,\n dtype='namedtuple',\n limit=None,\n echo=True,\n query_name='Record',\n log=log):\n \"\"\"\n Execute multiple SQL statements separtated by ';'. Returns a generator.\n Example:\n for fields, rows in conn.execute(sql):\n print(fields)\n print(len(rows))\n \"\"\"\n\n self.reconnect(min_tresh=10)\n\n data = None\n fields = None\n rows = []\n message_mapping = {\n 'drop ': 'Dropping {}.',\n 'truncate ': 'Truncating {}.',\n 'select ': 'Selecting {}.',\n 'create ': 'Creating {}.',\n 'insert ': 'Inserting {}.',\n 'alter ': 'Altering {}.',\n 'update ': 'Updating {}.',\n 'delete ': 'Deleting {}.',\n 'exec ': 'Calling Procedure {}.',\n 'grant ': 'Granting {}.',\n }\n\n sqls = sql.split(';')\n\n for sql in sqls:\n if not sql.strip(): continue\n\n sql_ = sql.strip().lower()\n\n for word, message in message_mapping.items():\n if sql_.startswith(word):\n if echo:\n log(\n message.format(' '.join(\n sql_.splitlines()[0].split()[1:3]).upper()))\n break\n\n # Call procedure with callproc\n if sql_.startswith('exec '):\n procedure = sql_[5:].split('(')[0]\n args = sql_[5:].split('(')[1][:-1].replace(\"'\", '').split(',')\n args = [a.strip() for a in args]\n cursor.callproc(procedure, args)\n continue\n\n try:\n self._fields = []\n rows = self.query(\n sql,\n rec_name=query_name,\n dtype=dtype,\n limit=limit,\n echo=echo,\n log=log)\n fields = self._fields\n\n if '-- pk_test:' in sql.lower() and sql_.startswith('create'):\n sql_lines = sql_.splitlines()\n regexp = r'create\\s+table\\s+(\\S*)[\\sa-zA-Z\\d]+ as'\n table = re.findall(regexp, sql_lines[0])[0]\n line = [\n l for l in sql_lines if l.strip().lower().startswith('-- pk_test:')\n ][0]\n fields = line.split(':')[-1]\n self.check_pk(table, fields)\n\n except Exception as E:\n message = get_exception_message().lower()\n\n if sql_.startswith(\n 'drop ') and self.error_msg['table_not_exist'] in message:\n log(\"WARNING: Table already dropped.\")\n else:\n raise E\n\n if not fields: fields = []\n\n yield fields, rows\n\n def execute(self,\n sql,\n dtype='tuple',\n limit=None,\n echo=True,\n query_name='Record',\n log=log):\n \"\"\"Execute SQL, return last result\"\"\"\n self.reconnect(min_tresh=10)\n\n data = None\n fields = None\n rows = []\n message_mapping = {\n 'drop ': 'Dropping {}.',\n 'truncate ': 'Truncating {}.',\n 'select ': 'Selecting {}.',\n 'create ': 'Creating {}.',\n 'insert ': 'Inserting {}.',\n 'alter ': 'Altering {}.',\n 'update ': 'Updating {}.',\n 'delete ': 'Deleting {}.',\n 'exec ': 'Calling Procedure {}.',\n 'grant ': 'Granting {}.',\n }\n\n sql_ = sql.strip().lower()\n\n for word, message in message_mapping.items():\n if sql_.startswith(word):\n if echo:\n log(\n message.format(' '.join(\n sql_.splitlines()[0].split()[1:3]).upper()))\n break\n\n # Call procedure with callproc\n if sql_.startswith('exec '):\n procedure = sql_[5:].split('(')[0]\n args = sql_[5:].split('(')[1][:-1].replace(\"'\", '').split(',')\n args = [a.strip() for a in args]\n connection = self.engine.raw_connection()\n try:\n cursor = connection.cursor()\n cursor.callproc(procedure, args)\n self._fields = self._get_cursor_fields(cursor_desc=cursor.description)\n rows = list(cursor.fetchall())\n cursor.close()\n connection.commit()\n return fields, rows\n finally:\n connection.close()\n\n try:\n self._fields = []\n rows = self.query(\n sql,\n rec_name=query_name,\n dtype=dtype,\n limit=limit,\n echo=echo,\n log=log)\n fields = self._fields\n\n if '-- pk_test:' in sql.lower() and sql_.startswith('create'):\n sql_lines = sql_.splitlines()\n regexp = r'create\\s+table\\s+(\\S*)[\\sa-zA-Z\\d]+ as'\n table = re.findall(regexp, sql_lines[0])[0]\n line = [\n l for l in sql_lines if l.strip().lower().startswith('-- pk_test:')\n ][0]\n fields = line.split(':')[-1]\n self.check_pk(table, fields)\n\n except Exception as E:\n message = get_exception_message().lower()\n\n if sql_.startswith(\n 'drop ') and self.error_msg['table_not_exist'] in message:\n log(\"WARNING: Table already dropped.\")\n else:\n raise E\n\n if not fields: fields = []\n\n return fields, rows\n\n def insert(self, table, data, echo=False):\n \"\"\"Insert records of namedtuple or dicts\"\"\"\n raise Exception('insert not implemented')\n\n def drop_table(self, table, log=log):\n \"Drop table\"\n\n try:\n sql = self._template('core.drop_table').format(table)\n self._do_execute(sql)\n except Exception as E:\n message = get_exception_message().lower()\n if self._template('error_filter.table_not_exist') in message:\n if self.echo:\n log('Table \"{}\" already dropped.'.format(table))\n else:\n raise E\n\n def create_table(self, table, field_types, drop=False, log=log):\n \"Create table\"\n\n if drop:\n self.drop_table(table, log=log)\n\n new_ftypes = OrderedDict()\n for f in field_types:\n ftype, max_len, dec_len = field_types[f]\n if dec_len:\n suff = '({},{})'.format(max_len, dec_len)\n elif max_len:\n suff = '({})'.format(max_len)\n else:\n suff = ''\n\n new_ftypes[f] = self._template('general_type_map')[ftype].replace(\n '()', suff)\n\n field_types_str = ', \\n'.join([\n self._fix_f_name(field) + ' ' + new_ftypes[field] for field in new_ftypes\n ])\n\n sql = self._template('core.create_table').format(\n table=table,\n col_types=field_types_str,\n )\n\n # log('Creating table: \\n' + sql))\n try:\n self._do_execute(sql)\n except Exception as e:\n raise e\n\n log('Created table \"{}\"'.format(table))\n\n def _get_cursor_fields(self, as_dict=False, native_type=True, cursor_desc=None):\n \"Get fields of active Select cursor\"\n fields = OrderedDict()\n cursor_desc = cursor_desc if cursor_desc else self._cursor_description\n if cursor_desc == None:\n return []\n\n for f in cursor_desc:\n f_name = f[0].lower()\n if as_dict:\n if native_type:\n f_type = f[1]\n else:\n f_type = self.reverse_data_map[f[1]]\n\n # assign floa/double as needed\n if 'cx_Oracle.NUMBER' in str(f[1]):\n if f[4] and f[4] > 11: f_type = 'long'\n if f[5] and f[5] > 0: f_type = 'double'\n\n fields[f_name] = f_type\n else:\n fields[f_name] = None\n\n if as_dict:\n return fields\n else:\n return list(fields.keys())\n\n def stream(self,\n sql,\n rec_name='Record',\n dtype='namedtuple',\n yield_chuncks=False,\n chunk_size=None,\n limit=None,\n echo=True):\n \"Stream Select from SQL, yield records as they come in\"\n self.reconnect(min_tresh=10)\n if echo: log(\"Streaming SQL for '{}'.\".format(rec_name))\n\n fetch_size = limit if limit else self.fetch_size\n fetch_size = chunk_size if chunk_size else fetch_size\n\n try:\n self._do_execute(sql)\n except Exception as e:\n raise e\n\n if dtype == 'tuple':\n make_rec = lambda row: row\n make_batch = lambda rows: rows\n elif dtype == 'dataframe':\n yield_chuncks=True\n make_batch = lambda rows: pandas.DataFrame(rows, columns=self._fields)\n else:\n Record = namedtuple(\n rec_name.replace(' ', '_').replace('.', '_'), self._fields)\n make_rec = lambda row: Record(*row)\n make_batch = lambda rows: [make_rec(r) for r in rows]\n\n self._stream_counter = 0\n\n while True:\n if not self._fields:\n break\n rows = self.result.fetchmany(fetch_size)\n if rows:\n if yield_chuncks:\n batch = make_batch(rows)\n self._stream_counter += len(batch)\n if len(batch):\n yield batch\n else:\n for row in rows:\n self._stream_counter += 1\n yield make_rec(row)\n else:\n break\n if limit:\n break\n \n \n\n # log('Stream finished at {} records.'.format(self._stream_counter))\n\n def query(self,\n sql,\n rec_name='Record',\n dtype='namedtuple',\n limit=None,\n echo=True,\n retrying=False,\n log=log):\n \"Select from SQL, return list of namedtuples\"\n # if echo: log(\"Running SQL for '{}'.\".format(rec_name))\n\n self.reconnect(min_tresh=10)\n s_t = datetime.datetime.now()\n\n _data = list(self.stream(sql, dtype=dtype, echo=False, limit=limit))\n if not self.result.closed:\n self.result.close()\n\n fields = self._fields\n if not fields: return []\n\n if dtype == 'namedtuple':\n Record = namedtuple(rec_name.replace(' ', '_').replace('.', '_'), fields)\n if limit:\n data = [Record(*row) for row in _data]\n else:\n data = [Record(*row) for row in _data]\n\n elif dtype == 'tuple':\n if limit:\n data = [tuple(row) for row in _data]\n else:\n data = [tuple(row) for row in _data]\n\n elif dtype == 'dataframe':\n if limit:\n data = pandas.DataFrame([row for row in _data], columns=fields)\n else:\n data = pandas.DataFrame([row for row in _data], columns=fields)\n else:\n raise (Exception('{} is not recongnized.'.format(dtype)))\n\n secs = (datetime.datetime.now() - s_t).total_seconds()\n rate = round(len(data) / secs, 1)\n if echo:\n log(\" >>> Got {} rows in {} secs [{} r/s].\".format(\n len(data), secs, rate))\n return data\n\n def _split_schema_table(self, table_name):\n schema, table = table_name.split('.') if '.' in table_name else (\n self.username, table_name)\n return schema, table\n\n def _concat_fields(self, fields, as_text=False):\n return ' || '.join(fields)\n\n def _template(self, template_key_str):\n val = jmespath.search(template_key_str, self.template_dict)\n if isinstance(val, str):\n val = str_rmv_indent(val)\n return val\n\n def get_schemas(self, echo=True):\n \"Get list of schemas.\"\n Rec = namedtuple('Schemas', 'schema')\n self._fields = Rec._fields\n\n sql_tmpl = self._template('metadata.schemas')\n if sql_tmpl:\n schemas = [r[0] for r in self.query(sql_tmpl)]\n else:\n # http://docs.sqlalchemy.org/en/rel_0_9/core/reflection.html#sqlalchemy.engine.reflection.Inspector.get_schemas\n self.get_engine(echo=echo)\n schemas = self.engine_inspect.get_schema_names()\n\n rows = [Rec(s) for s in schemas]\n return rows\n\n def get_objects(self, schema, object_type='all', echo=True):\n \"Get metadata for objects. object_type in 'all', 'table', 'view'\"\n Rec = namedtuple('Table', 'schema object_name object_type')\n self._fields = Rec._fields\n\n def get_rec(object_name, object_type):\n r_dict = dict(\n schema=schema, object_name=object_name, object_type=object_type)\n return Rec(**r_dict)\n\n if object_type == 'all':\n table_rows = self.get_tables(schema)\n rows = [get_rec(r.table, 'table') for r in sorted(table_rows)]\n view_rows = self.get_views(schema)\n rows += [get_rec(r.view, 'view') for r in sorted(view_rows)]\n elif object_type == 'table':\n table_rows = self.get_tables(schema)\n rows = [get_rec(r.table, 'table') for r in sorted(table_rows)]\n elif object_type == 'view':\n view_rows = self.get_views(schema)\n rows += [get_rec(r.view, 'view') for r in sorted(view_rows)]\n else:\n raise Exception('Object type \"{}\" not supported!'.format(object_type))\n\n return rows\n\n def get_tables(self, schema, echo=True):\n \"Get metadata for tables.\"\n schemas = schema if isinstance(schema, list) else [schema]\n\n def get_tables_for(schema):\n def get_rec(table):\n self._fields = ['schema', 'table']\n return tuple([schema, table])\n\n # Getting pickle.PicklingError: Can't pickle <class 'xutil.database.base.Table'>\n Rec = namedtuple('Table', 'schema table')\n self._fields = Rec._fields\n r_dict = dict(schema=schema, table=table)\n return Rec(**r_dict)\n\n sql_tmpl = self._template('metadata.tables')\n if sql_tmpl:\n tables = self.query(sql_tmpl.format(schema=schema))\n if hasattr(self, '_std_get_tables'):\n tables = self._std_get_tables(schema, tables)\n else:\n self.get_engine(echo=echo)\n tables = self.engine_inspect.get_table_names(schema)\n \n return [get_rec(v) for v in sorted(tables)]\n \n rows = []\n for schema in schemas:\n for row in get_tables_for(schema):\n rows.append(row)\n\n return rows\n\n def get_views(self, schema, echo=True):\n \"Get metadata for views.\"\n schemas = schema if isinstance(schema, list) else [schema]\n\n def get_views_for(schema):\n def get_rec(view):\n self._fields = ['schema', 'view']\n return tuple([schema, view])\n\n # pickle.PicklingError: Can't pickle <class 'xutil.database.base.View'>\n Rec = namedtuple('View', 'schema view')\n self._fields = Rec._fields\n r_dict = dict(schema=schema, view=view)\n return Rec(**r_dict)\n\n sql_tmpl = self._template('metadata.views')\n if sql_tmpl:\n views = [r[0] for r in self.query(sql_tmpl.format(schema=schema))]\n else:\n self.get_engine(echo=echo)\n views = self.engine_inspect.get_view_names(schema)\n \n return [get_rec(v) for v in sorted(views)]\n\n rows = []\n for schema in schemas:\n for row in get_views_for(schema):\n rows.append(row)\n \n return rows\n\n def get_columns(self,\n table_name,\n object_type=None,\n echo=False,\n include_schema_table=True,\n native_type=True):\n \"Get column metadata for table\"\n if include_schema_table:\n headers = 'schema table id column_name type nullable default autoincrement'\n else:\n headers = 'id column_name type nullable default autoincrement'\n\n Rec = namedtuple('Columns', headers)\n self._fields = Rec._fields\n all_rows = []\n\n table_names = table_name if isinstance(table_name, list) else [table_name]\n\n for table_name in table_names:\n schema, table = self._split_schema_table(table_name)\n\n def get_rec(r_dict, column_order):\n if include_schema_table:\n r_dict['schema'] = schema\n r_dict['table'] = table\n r_dict['column_name'] = r_dict['name']\n r_dict['type'] = str(r_dict['type'])\n if not native_type:\n r_dict['type']= r_dict['type'].lower()\n r_dict['type'] = r_dict['type'].split('(')[0] if '(' in r_dict[\n 'type'] else r_dict['type']\n native_type_map = self._template('native_type_map')\n if not r_dict['type'] in native_type_map:\n raise Exception('Field type \"{}\" not in native_type_map for {}'.format(r_dict['type'], self.type))\n r_dict['type'] = native_type_map[r_dict['type']]\n r_dict['id'] = column_order\n\n for k in list(r_dict):\n if k not in headers.split():\n del r_dict[k]\n\n if '(' in r_dict['type']:\n r_dict['type'] = r_dict['type'].split('(')[0]\n\n return Rec(**r_dict)\n\n sql_tmpl = self._template('metadata.columns')\n if sql_tmpl:\n rows = self.query(sql_tmpl.format(table=table, schema=schema))\n if hasattr(self, '_std_get_columns'):\n rows = self._std_get_columns(schema, table, rows)\n else:\n self.get_engine(echo=echo)\n rows = self.engine_inspect.get_columns(table, schema=schema)\n\n all_rows += [get_rec(r_dict, i + 1) for i, r_dict in enumerate(rows)]\n\n self._fields = Rec._fields\n return all_rows\n\n def get_primary_keys(self, table_name, echo=False):\n \"Get PK metadata for table\"\n Rec = namedtuple('PKs', 'schema table pk_name column_name column_order')\n self._fields = Rec._fields\n schema, table = self._split_schema_table(table_name)\n\n def get_rec(col, pk_name, column_order):\n r_dict = {}\n r_dict['schema'] = schema\n r_dict['table'] = table\n r_dict['pk_name'] = pk_name\n r_dict['column_name'] = col\n r_dict['column_order'] = column_order\n return Rec(**r_dict)\n\n sql_tmpl = self._template('metadata.primary_keys')\n if sql_tmpl:\n rows = self.query(sql_tmpl.format(table=table, schema=schema))\n else:\n self.get_engine(echo=echo)\n r_dict = self.engine_inspect.get_pk_constraint(table, schema=schema)\n rows = [\n get_rec(col, r_dict['name'], i + 1)\n for i, col in enumerate(r_dict['constrained_columns'])\n ]\n\n return rows\n\n def get_indexes(self, table_name, echo=False):\n \"Get indexes metadata for table\"\n Rec = namedtuple(\n 'Indexes', 'schema table index_name column_name column_order unique')\n self._fields = Rec._fields\n schema, table = self._split_schema_table(table_name)\n\n def get_rec(r_dict):\n r_dict['schema'] = schema\n r_dict['table'] = table\n r_dict['index_name'] = r_dict['name']\n r_dict['unique'] = str(r_dict['unique'])\n del r_dict['name']\n for i, col in enumerate(r_dict['column_names']):\n r_dict['column_name'] = col\n r_dict['column_order'] = i + 1\n yield Rec(**r_dict)\n\n sql_tmpl = self._template('metadata.indexes')\n if sql_tmpl:\n rows = self.query(sql_tmpl.format(table=table, schema=schema))\n else:\n self.get_engine(echo=echo)\n rows = self.engine_inspect.get_indexes(table, schema=schema)\n rows = [get_rec(r_dict) for r_dict in rows]\n\n return rows\n\n def get_ddl(self, table_name, object_type=None, echo=True):\n \"Get ddl for table\"\n Rec = namedtuple('DDL', 'ddl')\n self._fields = Rec._fields\n schema, table = self._split_schema_table(table_name)\n\n sql_tmpl = self._template('metadata.ddl')\n if sql_tmpl:\n rows = self.query(\n sql_tmpl.format(\n schema=schema,\n table=table,\n obj_type=object_type,\n ))\n else:\n self.get_engine(echo=echo)\n ddl = self.engine_inspect.get_view_definition(table, schema=schema)\n rows = [Rec(ddl)] if ddl else []\n\n self._fields = Rec._fields\n return rows\n\n def get_all_columns(self):\n \"Get all columns for all tables / views\"\n sql_tmpl = self._template('metadata.all_columns')\n if not sql_tmpl:\n raise Exception('get_all_columns not implemented for {}'.format(\n self.type))\n\n rows = self.query(sql_tmpl)\n return rows\n\n def get_all_tables(self, filter, as_sql=False):\n \"Get all tables / views\"\n sql_tmpl = self._template('metadata.all_tables')\n if not sql_tmpl:\n raise Exception('get_all_tables not implemented for {}'.format(self.type))\n\n sql = sql_tmpl.format(filter=filter)\n return sql if as_sql else self.query(sql, echo=False)\n\n def analyze_fields(self,\n analysis,\n table_name,\n fields=[],\n as_sql=False,\n union=True,\n expr_func_map={},\n **kwargs):\n \"\"\"Base function for field level analysis\n expr_func_map: contains mapping for expression to SQL function to all fields\n \"\"\"\n if '.' not in table_name:\n raise Exception(\"table_name must have schema and name in it with a '.'\")\n if analysis not in self.template_dict['analysis']:\n raise Exception(\"'{}' not found in template for '{}'.\".format(\n analysis, self.type))\n\n schema, table = self._split_schema_table(table_name)\n\n # get field type\n field_rows = self.get_columns(table_name)\n field_type = {r.column_name.lower(): r.type for r in field_rows}\n\n if not fields:\n fields = [r.column_name for r in field_rows]\n\n for expr in list(expr_func_map):\n tmpl_path = 'function.' + expr_func_map[expr]\n expr_func_map[expr] = ',\\n'.join([\n self._template(tmpl_path).format(field=field)\n for field in [r.column_name for r in field_rows]\n ])\n\n sep = ' \\nunion all\\n' if union else ' \\n ;\\n'\n sql = sep.join([\n self._template('analysis.' + analysis).format(\n schema=schema,\n field=field,\n table=table,\n type=field_type[field.lower()] if field else '',\n **expr_func_map,\n **kwargs) for field in fields\n ])\n return sql if as_sql else self.query(sql, analysis, echo=False)\n\n def analyze_tables(self, analysis, tables=[], as_sql=False, **kwargs):\n \"\"\"Base function for table level analysis\"\"\"\n if analysis not in self.template_dict['analysis']:\n raise Exception(\"'{}' not found in template for '{}'.\".format(\n analysis, self.type))\n\n if not tables and 'schema' in kwargs:\n # get all tables\n rows = self.get_schemas(kwargs['schema'])\n crt_obj = lambda r: struct(dict(schema=r.schema, table=r.object_name))\n objs = [crt_obj(r) for r in rows]\n else:\n crt_obj = lambda schema, table: struct(dict(schema=schema, table=table))\n objs = [crt_obj(*self._split_schema_table(t)) for t in tables]\n\n sql = ' \\nunion all\\n'.join([\n self._template('analysis.' + analysis).format(\n schema=obj.schema, table=obj.table, **kwargs) for obj in objs\n ])\n\n return sql if as_sql else self.query(sql, analysis, echo=False)\n\n\n def analyze_join_match(self,\n t1,\n t2,\n t1_field,\n t2_field,\n t1_filter='1=1',\n t2_filter='1=1',\n as_sql=False,\n as_text=True,\n lowercase=True):\n def get_kwargs(t1, t2, t1_field, t2_field, t1_filter, t2_filter):\n t1_field_arr = ['t1.' + f for f in t1_field.split(',')]\n t2_field_arr = ['t2.' + f for f in t2_field.split(',')]\n t1_field_concat = self._concat_fields(t1_field_arr, as_text=as_text)\n t2_field_concat = self._concat_fields(t2_field_arr, as_text=as_text)\n to_text = self._to_text\n\n if lowercase:\n conds = ' and '.join([\n 'lower({}) = lower({})'.format(to_text(f), to_text(t2_field_arr[i]))\n for i, f in enumerate(t1_field_arr)\n ])\n else:\n conds = ' and '.join([\n '{} = {}'.format(to_text(f), to_text(t2_field_arr[i]))\n for i, f in enumerate(t1_field_arr)\n ])\n t1_fields1 = t1_field\n t2_fields1 = t2_field\n t1_field = ', '.join(['t1.' + f for f in t1_field_arr])\n t2_field = ', '.join(['t2.' + f for f in t2_field_arr])\n\n return dict(\n t1=t1,\n t1_field=t1_field_concat,\n t1_fields1=t1_fields1,\n t1_filter=t1_filter,\n t2=t2,\n t2_field=t2_field_concat,\n t2_fields1=t2_fields1,\n t2_filter=t2_filter,\n conds=conds,\n )\n\n kwargs = get_kwargs(\n t1=t1,\n t2=t2,\n t1_field=t1_field,\n t2_field=t2_field,\n t1_filter=t1_filter,\n t2_filter=t2_filter,\n )\n sql = self.analyze_fields(\n 'table_join_match', t1, [''], as_sql=True, **kwargs)\n\n return sql if as_sql else self.query(sql, 'table_join_match', echo=False)\n\n\ndef get_conn(db,\n dbs=None,\n echo=True,\n reconnect=False,\n use_jdbc=False,\n conn_expire_min=10,\n spark_hive=False) -> DBConn:\n global conns\n\n dbs = dbs if dbs else get_databases()\n profile = get_profile()\n db_dict = struct(dbs[db])\n\n if db_dict.type.lower() == 'hive' and spark_hive:\n db_dict.type = 'spark'\n\n use_jdbc = True if (use_jdbc or ('use_jdbc' in db_dict\n and db_dict['use_jdbc'])) else use_jdbc\n\n if db in conns and not reconnect:\n if (now() - conns[db].last_connect).total_seconds() / 60 < conn_expire_min:\n return conns[db]\n\n if use_jdbc:\n log('*USING JDBC for ' + db)\n from .jdbc import JdbcConn\n conn = JdbcConn(db_dict, profile=profile)\n\n elif db_dict.type.lower() == 'oracle':\n from .oracle import OracleConn\n conn = OracleConn(db_dict, echo=echo)\n\n elif db_dict.type.lower() == 'spark':\n from .spark import SparkConn\n conn = SparkConn(db_dict, echo=echo)\n\n elif db_dict.type.lower() == 'hive':\n from .hive import HiveConn, Beeline\n if 'use_beeline' in db_dict and db_dict.use_beeline:\n conn = Beeline(db_dict, echo=echo)\n else:\n conn = HiveConn(db_dict, echo=echo)\n\n elif db_dict.type.lower() in ('postgresql', 'redshift'):\n from .postgresql import PostgreSQLConn\n conn = PostgreSQLConn(db_dict, echo=echo)\n\n elif db_dict.type.lower() == 'sqlserver':\n from .sqlserver import SQLServerConn\n conn = SQLServerConn(db_dict, echo=echo)\n\n elif db_dict.type.lower() == 'sqlite':\n from .sqlite import SQLiteConn\n conn = SQLiteConn(db_dict, echo=echo)\n else:\n raise Exception(f'Type {db_dict.type} not handled!')\n\n conns[db] = conn\n return conn\n\n\nclass SqlX:\n \"\"\"\n SQL Express functions. Supports CRUD transactional operations.\n\n Suppose there is a table named 'cache', sqlx allows:\n \n sqlx.x('cache').insert(rows)\n sqlx.x('cache').insert_one(row)\n sqlx.x('cache').add(**kws)\n sqlx.x('cache').delete(where)\n sqlx.x('cache').update(rows, pk_fields)\n sqlx.x('cache').update_one(row, pk_cols)\n sqlx.x('cache').replace(rows, pk_fields)\n sqlx.x('cache').query(where)\n sqlx.x('cache').select_one(where)\n \"\"\"\n\n def __init__(self, conn: DBConn, table, schema, ntRec: namedtuple):\n self.conn = conn\n self.table = table\n self.schema = schema\n self.ntRec = ntRec\n self.pk_fields = None\n self.table_obj = schema + '.' + table if schema else table\n\n self.insert_one = lambda row: self.insert([row])\n self.add = lambda **kws: self.insert([self.ntRec(**kws)])\n self.update_one = lambda row, pk_cols=None: self.update([row], pk_cols)\n self.update_rec=lambda pk_cols=None, **kws: self.update([make_rec(**kws)], pk_cols)\n self.replace_one = lambda row, pk_cols=None: self.replace([row], pk_cols)\n self.replace_rec=lambda pk_cols=None, **kws: self.replace([make_rec(**kws)], pk_cols)\n # self.select_one = lambda where: self.select_one(where, one=True)\n\n def _get_pk(self):\n if not self.pk_fields:\n pk_rows = self.conn.get_primary_keys(self.table_obj)\n self.pk_fields = [r.column_name for r in pk_rows]\n return self.pk_fields\n\n def insert(self, data):\n return self.conn.insert(self.table_obj, data)\n\n def update(self, data, pk_fields=None):\n if not pk_fields:\n pk_fields = self._get_pk()\n if not pk_fields:\n raise Exception(\"Need Keys to perform UPDATE!\")\n t_fields = [x.lower() for x in data[0]._fields]\n for f in pk_fields:\n if not f.lower() in t_fields:\n # if keys not provided, need to make sure PK values are provided in data records\n raise Exception(\n \"Value of PK field '{}' must be provided to perform UPDATE!\".\n format(f))\n\n self.conn.update(self.table_obj, data, pk_fields, echo=False)\n\n def update_one(self, row, pk_cols=None):\n self.update([row], pk_cols)\n\n def update_rec(self, pk_cols=None, **kws):\n self.update([make_rec(**kws)], pk_cols)\n\n def replace(self, data, pk_fields=None):\n if not pk_fields:\n pk_fields = self._get_pk()\n self.conn.replace(self.table_obj, data, pk_fields, echo=False)\n\n # def replace_rec(self, pk_cols=None, **kws):\n # # add default None?\n # for field in self.ntRec._fields:\n # kws[field] = kws.get(field, None)\n\n # self.replace([self.ntRec(**kws)], pk_cols)\n\n def query(self, where='1=1', one=False, limit=None, as_dict=False):\n rows = self.conn.query(\n \"select * from {} where {}\".format(self.table_obj, where),\n echo=False,\n limit=limit)\n rows = rows_to_dicts(rows) if as_dict else rows\n if one: return rows[0] if rows else None\n else: return rows\n\n def select_one(self, where, field=None, as_dict=False):\n row = self.query(where, one=True, as_dict=as_dict)\n if field and row:\n return row[field] if as_dict else row.__getattribute__(field)\n return row\n\n def delete(self, where):\n self.conn.execute(\"delete from {} where {}\".format(self.table_obj, where))\n\n\ndef make_sqlx(conn, schema, tables):\n \"Make sqlx lookup function for given tables\"\n\n table_func_map = {}\n\n for table in tables:\n ntRec = namedtuple(table, tables[table].columns.keys())\n table_func_map[table] = SqlX(conn, table, schema, ntRec)\n\n # return table_func_map\n\n def sqlx(expr) -> SqlX:\n obj = jmespath.search(expr, table_func_map)\n if not obj:\n raise Exception('sqlx: Cannot find \"{}\"'.format(expr))\n return obj\n\n return sqlx\n\n\ndef get_sql_sources(sql_text, echo=False):\n \"\"\"Obtain the source tables of a query\n \"\"\"\n\n import sqlparse\n\n # replace \"as(\" to \"as (\" # this trips up the sql parser in CTEs\n sql_text = re.sub(r\"as\\(\", \"as (\", sql_text, 0, re.MULTILINE | re.IGNORECASE)\n\n statements = sqlparse.parse(sql_text)\n cte_aliases = set()\n sql_sources = {}\n\n def get_sources(statement):\n sources_dict = {}\n last_kw_from = False\n last_kw_join = False\n cte_mode = False\n last_tok = None\n done = False\n\n while not done:\n for tok in statement.tokens:\n\n if tok.is_group:\n if cte_mode and isinstance(tok, sqlparse.sql.IdentifierList):\n for tok2 in tok.tokens:\n if isinstance(tok2, sqlparse.sql.Identifier):\n for tok3 in tok2.tokens:\n if isinstance(tok3, sqlparse.sql.Parenthesis):\n cte_aliases.add(tok3.parent.normalized.lower())\n sources_dict2 = get_sources(tok3)\n sources_dict = {**sources_dict, **sources_dict2}\n elif isinstance(tok, sqlparse.sql.Parenthesis):\n sources_dict2 = get_sources(tok)\n sources_dict = {**sources_dict, **sources_dict2}\n else:\n for tok2 in tok.tokens:\n if isinstance(tok2, sqlparse.sql.Parenthesis):\n cte_aliases.add(tok2.parent.normalized.lower())\n sources_dict2 = get_sources(tok2)\n sources_dict = {**sources_dict, **sources_dict2}\n\n\n if (last_kw_from or last_kw_join) and last_tok.is_whitespace:\n if isinstance(tok, sqlparse.sql.IdentifierList):\n for tok2 in tok.tokens:\n if isinstance(tok2, sqlparse.sql.Identifier) and '(' in tok2.value:\n sources_dict2 = get_sources(tok2)\n sources_dict = {**sources_dict, **sources_dict2}\n elif isinstance(tok2, sqlparse.sql.Identifier) and tok2.normalized.lower() not in cte_aliases:\n if echo: log('+Table = ' + tok2.normalized.lower())\n sources_dict[tok2.normalized.lower()] = tok.parent\n\n elif isinstance(tok, sqlparse.sql.Identifier) and tok.normalized.lower() not in cte_aliases:\n if echo: log('+Table = ' + tok.normalized.lower())\n sources_dict[tok.normalized.lower()] = tok.parent\n\n\n last_kw_join = False\n\n if tok.is_keyword and tok.normalized == 'WITH':\n cte_mode = True\n last_kw_from = False\n elif tok.is_keyword and tok.normalized == 'GROUP':\n last_kw_join = False\n last_kw_from = False\n elif tok.is_keyword and tok.normalized == 'WHERE':\n last_kw_join = False\n last_kw_from = False\n elif tok.is_keyword and tok.normalized == 'ORDER':\n last_kw_join = False\n last_kw_from = False\n elif tok.is_keyword and tok.normalized == 'CREATE':\n cte_mode = True\n last_kw_from = False\n elif tok.is_keyword and tok.normalized == 'SELECT':\n cte_mode = False\n last_kw_from = False\n elif tok.is_keyword and tok.normalized == 'FROM':\n last_kw_from = True\n elif tok.is_keyword and 'JOIN' in tok.normalized:\n last_kw_join = True\n\n last_tok = tok\n done = True\n return sources_dict\n\n for s, statement in enumerate(statements):\n has_from = False\n last_kw_create = False\n last_kw_create_table = False\n create_table = None\n\n for tok in statement.tokens:\n if isinstance(tok, sqlparse.sql.Identifier) and last_kw_create_table:\n create_table = tok.normalized\n last_kw_create_table = False\n last_kw_create = False\n if echo: log('-CREATE TABLE ' + create_table)\n if tok.is_keyword and tok.normalized == 'TABLE' and last_kw_create:\n last_kw_create_table = True\n if tok.is_keyword and tok.normalized == 'CREATE':\n last_kw_create = True\n if tok.is_keyword and tok.normalized == 'FROM':\n has_from = True\n last_tok = tok\n\n if has_from:\n sources_dict = get_sources(statement)\n if create_table:\n sql_sources[create_table] = sorted(sources_dict)\n else:\n sql_sources[s] = sorted(sources_dict)\n\n return sql_sources\n" ]
[ [ "pandas.DataFrame" ] ]
galal-hussein/opni
[ "019cd5f1a5d43bf28bfedc9d7dc4b04dca0a4c82" ]
[ "src/payload-receiver-service/app/main.py" ]
[ "# Standard Library\nimport asyncio\nimport logging\nimport math\n\n# Third Party\nimport numpy as np\nimport pandas as pd\nfrom fastapi import FastAPI, HTTPException, Request\nfrom nats.aio.client import Client as NATS\nfrom nats_wrapper import NatsWrapper\n\napp = FastAPI()\nlogging.basicConfig(level=logging.INFO, format=\"%(asctime)s - %(name)s - %(message)s\")\nnw = None\n\n\n@app.on_event(\"startup\")\nasync def startup_event():\n global nw\n nw = NatsWrapper()\n loop = asyncio.get_event_loop()\n await nw.connect(loop)\n\n\nasync def get_nats() -> NATS:\n if not nw.nc.is_connected:\n loop = asyncio.get_event_loop()\n await nw.connect(loop)\n return nw.nc\n\n\nasync def push_to_nats(nats: NATS, payload):\n try:\n df = pd.json_normalize(payload)\n if \"time\" in df.columns:\n df[\"dt\"] = pd.to_datetime(df.time, errors=\"coerce\")\n df = df[df[\"dt\"].notnull()]\n df[\"time_nanoseconds\"] = df[\"dt\"].astype(np.int64)\n # compute window\n df[\"window_dt\"] = df[\"dt\"].dt.floor(\"30s\")\n df[\"window_start_time_ns\"] = df[\"window_dt\"].astype(np.int64)\n df.drop(columns=[\"dt\"], inplace=True)\n df[\"_id\"] = df[\"time_nanoseconds\"].map(str) + df.groupby(\n \"time_nanoseconds\"\n ).cumcount().map(\"{:016b}\".format)\n df = df.fillna(\"\")\n for window_start_time_ns, data_df in df.groupby([\"window_start_time_ns\"]):\n window_payload_size_bytes = data_df.memory_usage(deep=True).sum()\n num_chunked_dfs = max(\n 1, math.ceil(window_payload_size_bytes / nats.max_payload)\n )\n if num_chunked_dfs > 1:\n logging.info(\n \"payload_df size = {} bytes. NATS max payload = {} bytes. Chunking into {} DataFrames\".format(\n window_payload_size_bytes, nats.max_payload, num_chunked_dfs\n )\n )\n # process every chunk\n for chunked_payload_df in np.array_split(data_df, num_chunked_dfs):\n await nats.publish(\n \"raw_logs\", chunked_payload_df.to_json().encode()\n )\n else:\n # TODO logs without timestamp (e.g. control plane logs)\n logging.info(\"Ignoring payload without time field\")\n except Exception as e:\n logging.error(\"Error: {}\".format(str(e)))\n\n\n@app.post(\"/\")\nasync def index(request: Request):\n logging.info(\"Received request: {}\".format(str(request)))\n try:\n logs_payload = await request.json()\n asyncio.create_task(push_to_nats(await get_nats(), logs_payload))\n except:\n # Bad Request\n raise HTTPException(\n status_code=404, detail=\"Something wrong with request {request}\"\n )\n" ]
[ [ "pandas.to_datetime", "numpy.array_split", "pandas.json_normalize" ] ]
TonyBagnall/sktime
[ "837a77026be3e53511c3d6139ddad14a39351bf5" ]
[ "sktime/utils/load_data.py" ]
[ "import pandas as pd\nimport numpy as np\n\n\ndef load_from_tsfile_to_dataframe(full_file_path_and_name, replace_missing_vals_with='NaN'):\n data_started = False\n instance_list = []\n class_val_list = []\n\n has_time_stamps = False\n has_class_labels = False\n\n uses_tuples = False\n\n is_first_case = True\n with open(full_file_path_and_name, 'r') as f:\n for line in f:\n\n if line.strip():\n if \"@timestamps\" in line.lower():\n if \"true\" in line.lower():\n has_time_stamps = True\n raise Exception(\"Not suppoorted yet\") # we don't have any data formatted to test with yet\n elif \"false\" in line.lower():\n has_time_stamps = False\n else:\n raise Exception(\"invalid timestamp argument\")\n\n if \"@classlabel\" in line.lower():\n if \"true\" in line:\n has_class_labels = True\n elif \"false\" in line:\n has_class_labels = False\n else:\n raise Exception(\"invalid classLabel argument\")\n\n if \"@data\" in line.lower():\n data_started = True\n continue\n\n # if the 'data tag has been found, the header information has been cleared and now data can be loaded\n if data_started:\n line = line.replace(\"?\", replace_missing_vals_with)\n dimensions = line.split(\":\")\n\n # perhaps not the best way to do this, but on the first row, initialise stored depending on the\n # number of dimensions that are present and determine whether data is stored in a list or tuples\n if is_first_case:\n num_dimensions = len(dimensions)\n if has_class_labels:\n num_dimensions -= 1\n is_first_case = False\n for dim in range(0, num_dimensions):\n instance_list.append([])\n if dimensions[0].startswith(\"(\"):\n uses_tuples = True\n\n this_num_dimensions = len(dimensions)\n if has_class_labels:\n this_num_dimensions -= 1\n\n # assuming all dimensions are included for all series, even if they are empty. If this is not true\n # it could lead to confusing dimension indices (e.g. if a case only has dimensions 0 and 2 in the\n # file, dimension 1 should be represented, even if empty, to make sure 2 doesn't get labelled as 1)\n if this_num_dimensions != num_dimensions:\n raise Exception(\"inconsistent number of dimensions\")\n\n # go through each dimension that is represented in the file\n for dim in range(0, num_dimensions):\n\n # handle whether tuples or list here\n if uses_tuples:\n without_brackets = dimensions[dim].replace(\"(\", \"\").replace(\")\", \"\").split(\",\")\n without_brackets = [float(i) for i in without_brackets]\n\n indices = []\n data = []\n i = 0\n while i < len(without_brackets):\n indices.append(int(without_brackets[i]))\n data.append(without_brackets[i + 1])\n i += 2\n\n instance_list[dim].append(pd.Series(data, indices))\n else:\n # if the data is expressed in list form, just read into a pandas.Series\n data_series = dimensions[dim].split(\",\")\n data_series = [float(i) for i in data_series]\n instance_list[dim].append(pd.Series(data_series))\n\n if has_class_labels:\n class_val_list.append(dimensions[num_dimensions].strip())\n\n # note: creating a pandas.DataFrame here, NOT an xpandas.xdataframe\n x_data = pd.DataFrame(dtype=np.float32)\n for dim in range(0, num_dimensions):\n x_data['dim_' + str(dim)] = instance_list[dim]\n\n if has_class_labels:\n return x_data, np.asarray(class_val_list)\n #\n # # otherwise just return an XDataFrame\n return x_data" ]
[ [ "pandas.DataFrame", "numpy.asarray", "pandas.Series" ] ]
wgova/automations
[ "2406d7bb7cc805ca4b48043995a457f5d65633f6" ]
[ "clust_indices.py" ]
[ "# coding: utf-8\n\nimport warnings\nimport numpy as np\nimport pandas as pd\nfrom packaging import version\nfrom sklearn.metrics import pairwise_distances_chunked\nfrom sklearn.utils import check_X_y,check_random_state\nfrom sklearn.preprocessing import LabelEncoder\nimport functools\nfrom pyclustering.cluster.clarans import clarans\nfrom pyclustering.utils import timedcall\nfrom pyclustering.utils import (draw_clusters,\naverage_inter_cluster_distance,\naverage_intra_cluster_distance,\naverage_neighbor_distance)\nimport sklearn\nfrom sklearn.metrics import (davies_bouldin_score,\nsilhouette_score, \npairwise_distances,\ncalinski_harabasz_score\n)\n\n# They changed the name of calinski_harabaz_score in later version of sklearn:\n# https://github.com/scikit-learn/scikit-learn/blob/c4733f4895c1becdf587b38970f6f7066656e3f9/doc/whats_new/v0.20.rst#id2012\nsklearn_version = version.parse(sklearn.__version__)\nnm_chg_ver = version.parse(\"0.23\")\nif sklearn_version >= nm_chg_ver:\n from sklearn.metrics import calinski_harabasz_score as _cal_score\nelse:\n from sklearn.metrics import calinski_harabaz_score as _cal_score\n\n\ndef _get_clust_pairs(clusters):\n return [(i, j) for i in clusters for j in clusters if i > j]\n\n\ndef _dunn(data=None, dist=None, labels=None):\n clusters = set(labels)\n inter_dists = [\n dist[np.ix_(labels == i, labels == j)].min()\n for i, j in _get_clust_pairs(clusters)\n ]\n intra_dists = [\n dist[np.ix_(labels == i, labels == i)].max()\n for i in clusters\n ]\n return min(inter_dists) / max(intra_dists)\n\ndef dunn(dist, labels):\n return _dunn(data=None, dist=dist, labels=labels)\n\n\ndef cop(data, dist, labels):\n clusters = set(labels)\n cpairs = _get_clust_pairs(clusters)\n prox_lst = [\n dist[np.ix_(labels == i[0], labels == i[1])].max()\n for i in cpairs\n ]\n\n out_l = []\n for c in clusters:\n c_data = data[labels == c]\n c_center = c_data.mean(axis=0, keepdims=True)\n c_intra = pairwise_distances(c_data, c_center).mean()\n\n c_prox = [prox for pair, prox in zip(cpairs, prox_lst) if c in pair]\n c_inter = min(c_prox)\n\n to_add = len(c_data) * c_intra / c_inter\n out_l.append(to_add)\n\n return sum(out_l) / len(labels)\n\n\ndef _silhouette_score2(data=None, dist=None, labels=None):\n return silhouette_score(dist, labels, metric='precomputed')\n\n\ndef _davies_bouldin_score2(data=None, dist=None, labels=None):\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', 'divide by zero')\n return davies_bouldin_score(data, labels)\n\n\ndef _calinski_harabaz_score2(data=None, dist=None, labels=None):\n return _cal_score(data, labels)\n\ndef check_number_of_labels(n_labels, n_samples):\n if not 1 < n_labels < n_samples:\n raise ValueError(\"Number of labels is %d. Valid values are 2 \"\n \"to n_samples - 1 (inclusive)\" % n_labels)\n\ndef cluster_dist_reduce(D_chunk, start, labels, label_freqs):\n # accumulate distances from each sample to each cluster\n clust_dists = np.zeros((len(D_chunk), len(label_freqs)),\n dtype=D_chunk.dtype)\n for i in range(len(D_chunk)):\n clust_dists[i] += np.bincount(labels, weights=D_chunk[i],\n minlength=len(label_freqs))\n\n # intra_index selects intra-cluster distances within clust_dists\n intra_index = (np.arange(len(D_chunk)), labels[start:start + len(D_chunk)])\n # intra_clust_dists are averaged over cluster size outside this function\n intra_clust_dists = clust_dists[intra_index]\n # of the remaining distances we normalise and extract the minimum\n clust_dists[intra_index] = np.inf\n clust_dists /= label_freqs\n inter_clust_dists = clust_dists.min(axis=1)\n return intra_clust_dists, inter_clust_dists\n\ndef inter_cluster_dist(data=None, dist=None, labels=None):\n _, inter_dist = cluster_distances(dist, labels, metric='precomputed')\n return inter_dist\n\ndef intra_cluster_dist(data=None, dist=None, labels=None):\n intra_dist, _ = cluster_distances(dist, labels, metric='precomputed')\n return intra_dist\n\ndef cluster_distances(X, labels, *, metric='precomputed', random_state=None, **kwds):\n return intra_inter_distances(X, labels, metric=metric, **kwds)\n\ndef intra_inter_distances(X, labels, metric='precomputed'):\n # Check for non-zero diagonal entries in precomputed distance matrix\n atol = np.finfo(X.dtype).eps * 100\n if np.any(np.abs(np.diagonal(X)) > atol):\n raise ValueError(\n 'The precomputed distance matrix contains non-zero '\n 'elements on the diagonal. Use np.fill_diagonal(X, 0).'\n )\n\n le = LabelEncoder()\n labels = le.fit_transform(labels)\n n_samples = len(labels)\n label_freqs = np.bincount(labels)\n check_number_of_labels(len(le.classes_), n_samples)\n reduce_func = functools.partial(cluster_dist_reduce,\n labels=labels, label_freqs=label_freqs)\n results = zip(*pairwise_distances_chunked(X, reduce_func=reduce_func))\n intra_clust_dists, inter_clust_dists = results\n intra_clust_dists = np.concatenate(intra_clust_dists)\n inter_clust_dists = np.concatenate(inter_clust_dists)\n return np.mean(intra_clust_dists),np.mean(inter_clust_dists)\n\n\ndef clarans_labels(clarans_object):\n labels_clarans = clarans_object.get_clusters()\n labels=pd.DataFrame(labels_clarans).T.melt(var_name='clusters')\\\n .dropna()\n labels['value']=labels.value.astype(int)\n labels=labels.sort_values(['value'])\\\n .set_index('value')\\\n .values\\\n .flatten()\n return labels\n\ndef calculate_clarans_cvi(data,initial_cluster,dist=None):\n cvi_df = pd.DataFrame(columns=['avg_inter_dist','silhouette','calinski',\n 'avg_intra_dist','davies','dunn'])\n df_list = data.values.tolist()\n dist=pairwise_distances(data)\n np.fill_diagonal(dist, 0)\n for k in range(initial_cluster,10):\n print(k)\n clarans_model = clarans(df_list,k,3,5)\n (_, result) =timedcall(clarans_model.process)\n labels = clarans_labels(result)\n # avg_inter_dist = inter_cluster_dist(dist=dist,labels=labels)\n sihlouette = silhouette_score(dist=dist, labels=labels)\n davies = davies_bouldin_score(data, labels)\n calinski = calinski_harabasz_score(data, labels)\n # avg_intra_dist = intra_cluster_dist(dist=dist,labels=labels)\n dunn_ = dunn(dist,labels)\n cvi_df.loc[k] = [avg_inter_dist,sihlouette,\n davies,calinski,avg_intra_dist,dunn_]\n print(cvi_df)\n del clarans_model\n return cvi_df" ]
[ [ "numpy.bincount", "numpy.concatenate", "sklearn.preprocessing.LabelEncoder", "numpy.fill_diagonal", "pandas.DataFrame", "numpy.mean", "numpy.diagonal", "sklearn.metrics.pairwise_distances", "sklearn.metrics.silhouette_score", "sklearn.metrics.calinski_harabasz_score", "numpy.finfo", "numpy.ix_", "sklearn.metrics.davies_bouldin_score", "sklearn.metrics.calinski_harabaz_score", "sklearn.metrics.pairwise_distances_chunked" ] ]
gucci-j/pytorch-imdb-cv
[ "a397716b02eafdba892907fe8800656cbb2afaca" ]
[ "src/metrics.py" ]
[ "import torch\n\ndef binary_accuracy(preds, y):\n\n rounded_preds = torch.round(torch.sigmoid(preds))\n correct = (rounded_preds == y).float()\n acc = correct.sum() / len(correct)\n\n return acc" ]
[ [ "torch.sigmoid" ] ]
WuShaogui/DeepLabV3Plus-Pytorch
[ "8d75215041937269f4e13a80bf4b87fb7b29d570" ]
[ "test.py" ]
[ "import network\nimport torch\n\nif __name__ == '__main__':\n net = network.modeling.__dict__['deeplabv3plus_resnet50']()\n print(net)\n \n input=torch.FloatTensor(2,3,512,512)\n output=net(input)\n print(output.shape)" ]
[ [ "torch.FloatTensor" ] ]
aguirrejuan/ConvRFF
[ "fc3afecc655ddad46fb4f81dd81a76fd5f36a122" ]
[ "convRFF/utils/utils.py" ]
[ "import tensorflow as tf \nfrom tensorflow.keras.callbacks import ModelCheckpoint\n\nimport os \n\n\nclass TensorBoardFix(tf.keras.callbacks.TensorBoard):\n \"\"\"\n This fixes incorrect step values when using the TensorBoard callback with custom summary ops\n https://stackoverflow.com/questions/64642944/steps-of-tf-summary-operations-in-tensorboard-are-always-0\n \"\"\"\n def on_train_begin(self, *args, **kwargs):\n super(TensorBoardFix, self).on_train_begin(*args, **kwargs)\n tf.summary.experimental.set_step(self._train_step)\n \n def on_test_begin(self, *args, **kwargs):\n super(TensorBoardFix, self).on_test_begin(*args, **kwargs)\n tf.summary.experimental.set_step(self._val_step)\n\n\n\ndef get_callbacks(model_name='model',root_dir='logs/fit/',\n monitor='val_categorical_accuracy',mode='max',\n save_freq='epoch',save_best_only=True,\n ):\n log_dir = os.path.join(root_dir,model_name)\n\n tensorboard = TensorBoardFix(log_dir=log_dir,\n histogram_freq=1,\n update_freq=50,\n )\n \n save_model = ModelCheckpoint(filepath=os.path.join(log_dir,'model.h5'),\n save_weights_only=False,\n monitor=monitor,\n mode=mode,\n save_best_only=save_best_only,\n save_freq=save_freq)\n \n return [tensorboard,save_model]" ]
[ [ "tensorflow.summary.experimental.set_step" ] ]
LisanneWiengarten/Punctuation
[ "a633258305a84ca4cffa165e185d8f9920eab6e2" ]
[ "utilities.py" ]
[ "# coding: utf-8\nimport numpy as np\nimport csv\nimport codecs\nimport os\nimport glob\nfrom collections import defaultdict\n\nSPACE = \" \"\nEMPTY = \" \"\nINV_PUNCTUATION_CODES = {EMPTY:0, SPACE:0, ',':1, '.':2, '?':3, '!':4, '-':5, ';':6, ':':7, '...':8, '':0}\nPUNCTUATION_VOCABULARY = {0:SPACE, 1:',', 2:'.', 3:'?', 4:'!', 5:'-', 6:';', 7:':', 8:'...'}\nREDUCED_PUNCTUATION_VOCABULARY = {0:SPACE, 1:',', 2:'.', 3:'?'}\nREDUCED_INV_PUNCTUATION_CODES = {EMPTY:0, SPACE:0, ',':1, '.':2, '?':3, '':0}\nEOS_PUNCTUATION_CODES = [2,3,4,5,6,7,8]\n\nEND = \"<END>\"\nUNK = \"<UNK>\"\nEMP = \"<EMP>\"\nNA = \"NA\"\n\n#PAUSE_FEATURE_NAME = 'pause_before'\n#ALL_POSSIBLE_INPUT_FEATURES = {'word', 'pos', 'pause_before', 'speech_rate_norm', 'f0_mean', 'f0_range', 'i0_mean', 'i0_range'}\n\ndef pad(l, size, padding):\n\tif size >= len(l):\n\t\treturn l + [padding] * abs((len(l)-size))\n\telse:\n\t\treturn l[0:size]\n\ndef read_proscript(filename, add_end=False):\n\tcolumns = defaultdict(list) # each value in each column is appended to a list\n\n\tskip_columns = []\n\twith open(filename) as f:\n\t\treader = csv.DictReader(f, delimiter='|') # read rows into a dictionary format\n\t\tfor row in reader: # read a row as {column1: value1, column2: value2,...}\n\t\t\tfor (k,v) in row.items(): # go over each column name and value \n\t\t\t\tif not k in skip_columns:\n\t\t\t\t\tif \"word\" in k or \"punctuation\" in k or \"pos\" in k:\n\t\t\t\t\t\tcolumns[k].append(v) # append the value into the appropriate list\n\t\t\t\t\telse:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tcolumns[k].append(float(v)) # real value\n\t\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\t\tskip_columns.append(k)\n\t\tif add_end and not columns['word'][-1] == END:\n\t\t\tfor k in columns.keys():\n\t\t\t\tif \"word\" in k or \"pos\" in k:\n\t\t\t\t\tcolumns[k].append(END)\n\t\t\t\telif \"punctuation\" in k:\n\t\t\t\t\tcolumns[k].append(\"\")\n\t\t\t\telse:\n\t\t\t\t\tcolumns[k].append(0.0)\n\treturn columns\n\ndef checkArgument(argname, isFile=False, isDir=False, createDir=False):\n\tif not argname:\n\t\treturn False\n\telse:\n\t\tif isFile and not os.path.isfile(argname):\n\t\t\treturn False\n\t\tif isDir:\n\t\t\tif not os.path.isdir(argname):\n\t\t\t\tif createDir:\n\t\t\t\t\tprint(\"Creating directory %s\"%(argname))\n\t\t\t\t\tos.makedirs(argname)\n\t\t\t\telse:\n\t\t\t\t\treturn False\n\treturn True\n\ndef iterable_to_dict(arr):\n\treturn dict((x.strip(), i) for (i, x) in enumerate(arr))\n\ndef read_vocabulary(file_name):\n\twith codecs.open(file_name, 'r', 'utf-8') as f:\n\t\treturn iterable_to_dict(f.readlines())\n\ndef to_array(arr, dtype=np.int32):\n\t# minibatch of 1 sequence as column\n\treturn np.array([arr], dtype=dtype).T\n\ndef create_pause_bins():\n\tbins = np.arange(0, 1, 0.05)\n\tbins = np.concatenate((bins, np.arange(1, 2, 0.1)))\n\tbins = np.concatenate((bins, np.arange(2, 5, 0.2)))\n\tbins = np.concatenate((bins, np.arange(5, 10, 0.5)))\n\tbins = np.concatenate((bins, np.arange(10, 20, 1)))\n\treturn bins\n\ndef create_pause_bins9():\n\tbins = np.array([ 0. , 0.25, 0.5 , 0.75, 1. , 2. , 3. , 4. , 5. ])\n\treturn bins\n\ndef create_pause_bins2():\n\treturn [0.0, 1.14]\n\ndef create_pause_bins3():\n\treturn [0.0, 0.2, 1.0]\n\ndef create_semitone_bins():\n\tbins = np.arange(-20, -10, 1)\n\tbins = np.concatenate((bins, np.arange(-10, -5, 0.5)))\n\tbins = np.concatenate((bins, np.arange(-5, 0, 0.25)))\n\tbins = np.concatenate((bins, np.arange(0, 5, 0.25)))\n\tbins = np.concatenate((bins, np.arange(5, 10, 0.5)))\n\tbins = np.concatenate((bins, np.arange(10, 20, 1)))\n\treturn bins\n\ndef levels_from_file(filename):\n\twith open(filename) as f:\n\t\tlst = [float(line.rstrip()) for line in f]\n\treturn lst\n\ndef get_level_maker(levels_file):\n\tlevels_list = levels_from_file(levels_file)\n\tdef get_level(value):\n\t\tlevel = 0\n\t\tfor level_bin in levels_list:\n\t\t\tif value > level_bin:\n\t\t\t\tlevel +=1\n\t\t\telse:\n\t\t\t\treturn level\n\t\treturn level\n\n\tno_of_levels = len(levels_list) + 1\n\treturn get_level, no_of_levels\n\n#OBSOLETE\ndef convert_value_to_level_sequence(value_sequence, bins):\n\tlevels = []\n\tfor value in value_sequence:\n\t\tlevel = 0\n\t\tfor bin_no, bin_upper_limit in enumerate(bins):\n\t\t\tif value > bin_upper_limit:\n\t\t\t\tlevel += 1\n\t\t\telse:\n\t\t\t\tbreak\n\t\tlevels.append(level)\n\treturn levels\n\ndef reducePuncCode(puncCode):\n\tif puncCode in [4, 5, 6, 7, 8]: #period\n\t\treturn 2\n\telse:\n\t\treturn puncCode\n\ndef reducePunc(punc):\n\tif punc and not punc.isspace():\n\t\tpuncCode = INV_PUNCTUATION_CODES[punc]\n\t\treducedPuncCode = reducePuncCode(puncCode)\n\t\treturn PUNCTUATION_VOCABULARY[reducedPuncCode]\n\telse:\n\t\treturn punc\n" ]
[ [ "numpy.array", "numpy.arange" ] ]
TobiasKovats/rainbowm4
[ "22e8eb5bc8470b3b49cb570621b2a15e750fb8e3" ]
[ "run_all_stack.py" ]
[ "#!/usr/bin/env python3\nimport os\nimport subprocess\nimport sys\nimport serial\nimport numpy as np\nimport datetime\n\niterations = 1\n\ndef run(scheme, precomp_bitslicing, use_hardware_crypto):\n os.system(\"make clean\")\n path = f\"crypto_sign/{scheme}/m4\"\n binary = f\"crypto_sign_{scheme}_m4_stack.bin\"\n if precomp_bitslicing:\n precomp_bitslicing = 1\n else:\n precomp_bitslicing = 0\n\n if use_hardware_crypto:\n use_hardware_crypto = 1\n else:\n use_hardware_crypto = 0\n\n\n subprocess.check_call(f\"make PRECOMPUTE_BITSLICING={precomp_bitslicing} USE_HARDWARE_CRYPTO={use_hardware_crypto} IMPLEMENTATION_PATH={path} CRYPTO_ITERATIONS={iterations} bin/{binary}\", shell=True)\n os.system(f\"./flash.sh bin/{binary}\")\n\n # get serial output and wait for '#'\n with serial.Serial(\"/dev/ttyUSB0\", 115200, timeout=20) as dev:\n logs = []\n iteration = 0\n log = b\"\"\n while iteration < iterations:\n device_output = dev.read()\n sys.stdout.buffer.write(device_output)\n sys.stdout.flush()\n log += device_output\n if device_output == b'#':\n logs.append(log)\n log = b\"\"\n iteration += 1\n return logs\n\ndef parseLog(log, v):\n log = log.decode(errors=\"ignore\")\n lines = str(log).splitlines()\n v = int(lines[1+lines.index(v)])\n return v\n\ndef printMacro(name, value):\n value = f\"{round(value):,}\"\n value = value.replace(\",\", \"\\\\,\")\n return f\"\\\\newcommand{{\\\\{name}}}{{{value}}}\"\n\ndef e(logs, f, texname, v):\n print(\"##########\")\n print(v)\n logs = np.array([parseLog(log, v) for log in logs])\n print(logs)\n avgs = logs.mean()\n print(\"avg=\", avgs)\n print(\"median=\", np.median(logs))\n print(\"max=\", logs.max())\n print(\"min=\", logs.min())\n print(\"var=\", np.var(logs))\n print(\"std=\", np.std(logs))\n\n print(printMacro(f\"{texname}stack\", int(avgs)), file=f)\n f.flush()\n\n\ndef do_it(scheme, texname, precomp_bitslicing, use_hardware_crypto, f):\n print(f\"% {scheme}\", file=f)\n logs = run(scheme, precomp_bitslicing, use_hardware_crypto)\n e([logs[0]], f, f\"{texname}Keygen\", \"keypair stack usage:\")\n e(logs, f, f\"{texname}Sign\", \"sign stack usage:\")\n e(logs, f, f\"{texname}Verify\", \"verify stack usage:\")\n\n\nwith open(\"stackbenchmarks.tex\", \"a\") as f:\n now = datetime.datetime.now()\n print(f\"% Benchmarks started at {now} (iterations={iterations})\", file=f)\n\n schemes = {\n \"rainbowI-classic\" : \"rainbowIclassic\",\n \"rainbowI-classic-tweaked\" : \"rainbowIclassictweaked\",\n \"rainbowI-circumzenithal\" : \"rainbowIcircumzenithal\",\n \"rainbowI-circumzenithal-tweaked\" : \"rainbowIcircumzenithaltweaked\",\n \"rainbowI-compressed\" : \"rainbowIcompressed\",\n \"rainbowI-compressed-tweaked\" : \"rainbowIcompressedtweaked\"\n }\n\n for scheme, texName in schemes.items():\n for precomp in [True, False]:\n\n if (scheme == \"rainbowI-compressed\" or scheme == \"rainbowI-compressed-tweaked\") and precomp:\n continue\n for hardware_crypto in [True, False]:\n name = texName\n if precomp:\n name += \"Precomp\"\n if hardware_crypto:\n name += \"HWCrypto\"\n do_it(scheme, name, precomp, hardware_crypto, f)\n\n now = datetime.datetime.now()\n print(f\"% Benchmarks finished at {now} (iterations={iterations})\", file=f)\n" ]
[ [ "numpy.median", "numpy.std", "numpy.var" ] ]
pfnet/gym-env-mujoco150
[ "32f9115279a507143aefa5786578a7a2325e7993" ]
[ "gym_env_mujoco150/walker2d.py" ]
[ "import numpy as np\nfrom gym import utils\nfrom gym_env_mujoco150 import mujoco_env\nimport mujoco_py\n\nclass Walker2dEnv(mujoco_env.MujocoEnv, utils.EzPickle):\n\n def __init__(self):\n mujoco_env.MujocoEnv.__init__(self, \"walker2d_150.xml\", 4)\n utils.EzPickle.__init__(self)\n\n def _step(self, a):\n posbefore = self.sim.data.qpos[0]\n self.do_simulation(a, self.frame_skip)\n posafter, height, ang = self.sim.data.qpos[0:3]\n alive_bonus = 1.0\n reward = ((posafter - posbefore) / self.dt)\n reward += alive_bonus\n reward -= 1e-3 * np.square(a).sum()\n done = not (height > 0.8 and height < 2.0 and\n ang > -1.0 and ang < 1.0)\n ob = self._get_obs()\n return ob, reward, done, {}\n\n def _get_obs(self):\n qpos = self.sim.data.qpos\n qvel = self.sim.data.qvel\n return np.concatenate([qpos[1:], np.clip(qvel, -10, 10)]).ravel()\n\n def reset_model(self):\n self.set_state(\n self.init_qpos + self.np_random.uniform(low=-.005, high=.005, size=self.model.nq),\n self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)\n )\n return self._get_obs()\n\n def viewer_setup(self):\n self.viewer.cam.type = mujoco_py.const.CAMERA_TRACKING\n self.viewer.cam.trackbodyid = 2\n self.viewer.cam.distance = self.model.stat.extent * 0.5\n self.viewer.cam.lookat[2] = .8\n self.viewer.cam.elevation = -20\n" ]
[ [ "numpy.square", "numpy.clip" ] ]
gbrammer/dust_attenuation
[ "15144c55d594ea06c4790215f7e0e66b03fd8255" ]
[ "dust_attenuation/radiative_transfer.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport astropy.units as u\nimport pkg_resources\n\nfrom astropy.io import ascii\nfrom astropy.modeling.tabular import tabular_model\n\nfrom .baseclasses import BaseAtttauVModel\nfrom .helpers import _test_valid_x_range\n\n\n__all__ = [\"WG00\"]\n\nx_range_WG00 = [0.1, 3.0001]\n\n\nclass WG00(BaseAtttauVModel):\n r\"\"\"\n Attenuation curve of Witt & Gordon (2000)\n\n Parameters\n ----------\n tau_v: float\n optical depth in V band\n\n Raises\n ------\n InputParameterError\n Input Av values outside of defined range\n\n Notes\n -----\n From Witt & Gordon (2000, ApJ, Volume 528, pp. 799-816)\n\n Example:\n\n .. plot::\n :include-source:\n\n import numpy as np\n import matplotlib.pyplot as plt\n import astropy.units as u\n\n from dust_attenuation.radiative_transfer import WG00\n\n fig, ax = plt.subplots(1,2, figsize=(10,6))\n\n # generate the curves and plot them\n # Use 1/microns for a better sampling\n x = np.arange(0.35,10.0,0.1)/u.micron\n\n x_Vband = 0.55 # microns\n\n tau_Vs = [0.25,0.4,1.1,17.0,46.0]\n for tau_V in tau_Vs[::-1]:\n att_model = WG00(tau_V = tau_V, geometry = 'cloudy',\n dust_type = 'mw', dust_distribution = 'clumpy')\n ax[0].plot(x,att_model(1/x),label=r'$\\tau_V$ = %.2f mag' % (tau_V))\n ax[1].plot(x,att_model(1/x)/att_model(x_Vband),\n label=r'$\\tau_V$ = %.2f mag' % (tau_V))\n\n ax[0].set_xlabel(r'$x$ [$\\mu m^{-1}$]')\n ax[0].set_ylabel(r'$Att(x)$ [mag]')\n ax[1].set_xlabel(r'$x$ [$\\mu m^{-1}$]')\n ax[1].set_ylabel(r'$Att(x)/Att_V$')\n\n\n ax[0].legend(loc='best')\n ax[1].legend(loc='best')\n fig.suptitle(r'CLOUDY / MW / clumpy model',size=15)\n plt.tight_layout()\n fig.subplots_adjust(top=0.88)\n\n plt.show()\n\n \"\"\"\n\n tau_V_range = [0.25, 50.0]\n x_range = x_range_WG00\n\n def __init__(\n self, tau_V, geometry=\"dusty\", dust_type=\"mw\", dust_distribution=\"clumpy\"\n ):\n \"\"\"\n Load the attenuation curves for a given geometry, dust type and\n dust distribution.\n\n Parameters\n ----------\n tau_V: float\n optical depth in V band\n\n geometry: string\n 'shell', 'cloudy' or 'dusty'\n\n dust_type: string\n 'mw' or 'smc'\n\n dust_distribution: string\n 'homogeneous' or 'clumpy'\n\n Returns\n -------\n Attx: np array (float)\n Att(x) attenuation curve [mag]\n\n \"\"\"\n # Ensure strings are lower cases\n self.geometry = geometry.lower()\n self.dust_type = dust_type.lower()\n self.dust_distribution = dust_distribution.lower()\n\n data_path = pkg_resources.resource_filename(\"dust_attenuation\", \"data/WG00/\")\n\n data = ascii.read(data_path + self.geometry + \".txt\", header_start=0)\n\n if self.dust_type == \"mw\":\n start = 0\n elif self.dust_type == \"smc\":\n start = 25\n\n # Column names\n tau_colname = \"tau\"\n tau_att_colname = \"tau_att\"\n fsca_colname = \"f(sca)\"\n fdir_colname = \"f(dir)\"\n fesc_colname = \"f(esc)\"\n\n if self.dust_distribution == \"clumpy\":\n tau_att_colname += \"_c\"\n fsca_colname += \"_c\"\n fdir_colname += \"_c\"\n fesc_colname += \"_c\"\n\n elif self.dust_distribution == \"homogeneous\":\n tau_att_colname += \"_h\"\n fsca_colname += \"_h\"\n fdir_colname += \"_h\"\n fesc_colname += \"_h\"\n\n tau_att_list = []\n tau_list = []\n fsca_list = []\n fdir_list = []\n fesc_list = []\n\n len_data = len(data[\"lambda\"])\n # number of lines between 2 models\n steps = 25\n\n counter = start\n while counter < len_data:\n tau_att_list.append(\n np.array(data[tau_att_colname][counter : counter + steps])\n )\n tau_list.append(np.array(data[tau_colname][counter : counter + steps]))\n fsca_list.append(np.array(data[fsca_colname][counter : counter + steps]))\n fdir_list.append(np.array(data[fdir_colname][counter : counter + steps]))\n fesc_list.append(np.array(data[fesc_colname][counter : counter + steps]))\n\n counter += int(2 * steps)\n\n # Convert to np.array and take transpose to have (wvl, tau_V)\n tau_att_table = np.array(tau_att_list).T\n tau_table = np.array(tau_list).T\n fsca_table = np.array(fsca_list).T\n fdir_table = np.array(fdir_list).T\n fesc_table = np.array(fesc_list).T\n\n # wavelength grid. It is the same for all the models\n wvl = np.array(data[\"lambda\"][0:25])\n self.wvl_grid = wvl\n\n # Grid for the optical depth\n tau_V_grid = np.array(\n [\n 0.25,\n 0.5,\n 0.75,\n 1.0,\n 1.5,\n 2.0,\n 2.5,\n 3.0,\n 3.5,\n 4.0,\n 4.5,\n 5.0,\n 5.5,\n 6.0,\n 7.0,\n 8.0,\n 9.0,\n 10.0,\n 15.0,\n 20.0,\n 25.0,\n 30.0,\n 35.0,\n 40.0,\n 45.0,\n 50.0,\n ]\n )\n\n # Create a 2D tabular model for tau_att and all flux fraction\n tab = tabular_model(2, name=\"2D_table\")\n\n # Values corresponding to the x and y grid points\n gridpoints = (wvl, tau_V_grid)\n\n self.model = tab(\n gridpoints,\n lookup_table=tau_att_table,\n name=\"tau_att_WG00\",\n bounds_error=False,\n fill_value=None,\n method=\"linear\",\n )\n\n self.tau = tab(\n gridpoints,\n lookup_table=tau_table,\n name=\"tau_WG00\",\n bounds_error=False,\n fill_value=None,\n method=\"linear\",\n )\n\n self.fsca = tab(\n gridpoints,\n lookup_table=fsca_table,\n name=\"fsca_WG00\",\n bounds_error=False,\n fill_value=None,\n method=\"linear\",\n )\n\n self.fdir = tab(\n gridpoints,\n lookup_table=fdir_table,\n name=\"fdir_WG00\",\n bounds_error=False,\n fill_value=None,\n method=\"linear\",\n )\n\n self.fesc = tab(\n gridpoints,\n lookup_table=fesc_table,\n name=\"fesc_WG00\",\n bounds_error=False,\n fill_value=None,\n method=\"linear\",\n )\n\n # In Python 2: super(WG00, self)\n # In Python 3: super() but super(WG00, self) still works\n super(WG00, self).__init__(tau_V=tau_V)\n\n def evaluate(self, x, tau_V):\n \"\"\"\n WG00 function\n\n Parameters\n ----------\n x: float\n expects either x in units of wavelengths or frequency\n or assumes wavelengths in [micron]\n\n internally microns are used\n\n tau_V: float\n optical depth in V band\n\n Returns\n -------\n Attx: np array (float)\n Att(x) attenuation curve [mag]\n\n Raises\n ------\n ValueError\n Input x values outside of defined range\n \"\"\"\n # convert to wavenumbers (1/micron) if x input in units\n # otherwise, assume x in appropriate wavenumber units\n with u.add_enabled_equivalencies(u.spectral()):\n x_quant = u.Quantity(x, u.micron, dtype=np.float64)\n\n # strip the quantity to avoid needing to add units to all the\n # polynomical coefficients\n x = x_quant.value\n\n # check that the wavenumbers are within the defined range\n _test_valid_x_range(x, self.x_range, \"WG00\")\n\n # setup the ax vectors\n n_x = len(x)\n\n xinterp = 1e4 * x\n yinterp = tau_V * np.ones(n_x)\n\n taux = self.model(xinterp, yinterp)\n\n # Convert optical depth to attenuation\n Attx = 1.086 * taux\n\n return Attx\n\n def get_extinction(self, x, tau_V):\n \"\"\"\n Return the extinction at a given wavelength and\n V-band optical depth.\n\n Parameters\n ----------\n x: float\n expects either x in units of wavelengths or frequency\n or assumes wavelengths in [micron]\n\n internally microns are used\n\n tau_V: float\n optical depth in V band\n\n Returns\n -------\n ext: np array (float)\n ext(x) extinction curve [mag]\n\n Raises\n ------\n ValueError\n Input x values outside of defined range\n \"\"\"\n # convert to wavenumbers (1/micron) if x input in units\n # otherwise, assume x in appropriate wavenumber units\n with u.add_enabled_equivalencies(u.spectral()):\n x_quant = u.Quantity(x, u.micron, dtype=np.float64)\n\n # strip the quantity to avoid needing to add units to all the\n # polynomical coefficients\n x = x_quant.value\n\n # check that the wavenumbers are within the defined range\n _test_valid_x_range(x, self.x_range, \"WG00\")\n\n # setup the ax vectors\n x = np.atleast_1d(x)\n n_x = len(x)\n\n xinterp = 1e4 * x\n yinterp = tau_V * np.ones(n_x)\n\n return self.tau(xinterp, yinterp) * 1.086\n\n def get_fsca(self, x, tau_V):\n \"\"\"\n Return the scattered flux fraction at a given wavelength and\n V-band optical depth.\n\n Parameters\n ----------\n x: float\n expects either x in units of wavelengths or frequency\n or assumes wavelengths in [micron]\n\n internally microns are used\n\n tau_V: float\n optical depth in V band\n\n Returns\n -------\n fsca: np array (float)\n fsca(x) scattered flux fraction\n\n Raises\n ------\n ValueError\n Input x values outside of defined range\n \"\"\"\n # convert to wavenumbers (1/micron) if x input in units\n # otherwise, assume x in appropriate wavenumber units\n with u.add_enabled_equivalencies(u.spectral()):\n x_quant = u.Quantity(x, u.micron, dtype=np.float64)\n\n # strip the quantity to avoid needing to add units to all the\n # polynomical coefficients\n x = x_quant.value\n\n # check that the wavenumbers are within the defined range\n _test_valid_x_range(x, self.x_range, \"WG00\")\n\n # setup the ax vectors\n x = np.atleast_1d(x)\n n_x = len(x)\n\n xinterp = 1e4 * x\n yinterp = tau_V * np.ones(n_x)\n\n return self.fsca(xinterp, yinterp)\n\n def get_fdir(self, x, tau_V):\n \"\"\"\n Return the direct attenuated stellar flux fraction at a given\n wavelength and V-band optical depth.\n\n Parameters\n ----------\n x: float\n expects either x in units of wavelengths or frequency\n or assumes wavelengths in [micron]\n\n internally microns are used\n\n tau_V: float\n optical depth in V band\n\n Returns\n -------\n fsca: np array (float)\n fsca(x) scattered flux fraction\n\n Raises\n ------\n ValueError\n Input x values outside of defined range\n \"\"\"\n # convert to wavenumbers (1/micron) if x input in units\n # otherwise, assume x in appropriate wavenumber units\n with u.add_enabled_equivalencies(u.spectral()):\n x_quant = u.Quantity(x, u.micron, dtype=np.float64)\n\n # strip the quantity to avoid needing to add units to all the\n # polynomical coefficients\n x = x_quant.value\n\n # check that the wavenumbers are within the defined range\n _test_valid_x_range(x, self.x_range, \"WG00\")\n\n # setup the ax vectors\n x = np.atleast_1d(x)\n n_x = len(x)\n\n xinterp = 1e4 * x\n yinterp = tau_V * np.ones(n_x)\n\n return self.fdir(xinterp, yinterp)\n\n def get_fesc(self, x, tau_V):\n \"\"\"\n Return the total escaping flux fraction at a given wavelength and\n V-band optical depth.\n\n Parameters\n ----------\n x: float\n expects either x in units of wavelengths or frequency\n or assumes wavelengths in [micron]\n\n internally microns are used\n\n tau_V: float\n optical depth in V band\n\n Returns\n -------\n fsca: np array (float)\n fsca(x) scattered flux fraction\n\n Raises\n ------\n ValueError\n Input x values outside of defined range\n \"\"\"\n # convert to wavenumbers (1/micron) if x input in units\n # otherwise, assume x in appropriate wavenumber units\n with u.add_enabled_equivalencies(u.spectral()):\n x_quant = u.Quantity(x, u.micron, dtype=np.float64)\n\n # strip the quantity to avoid needing to add units to all the\n # polynomical coefficients\n x = x_quant.value\n\n # check that the wavenumbers are within the defined range\n _test_valid_x_range(x, self.x_range, \"WG00\")\n\n # setup the ax vectors\n x = np.atleast_1d(x)\n n_x = len(x)\n\n xinterp = 1e4 * x\n yinterp = tau_V * np.ones(n_x)\n\n return self.fesc(xinterp, yinterp)\n\n def get_albedo(self, x):\n \"\"\"\n Return the albedo in function of wavelength for the corresponding\n dust type (SMC or MW). The albedo gives the probability a photon\n is scattered from a dust grain.\n\n Parameters\n ----------\n x: float\n expects either x in units of wavelengths or frequency\n or assumes wavelengths in [micron]\n\n internally microns are used\n\n Returns\n -------\n albedo: np array (float)\n alb(x) albedo\n\n Raises\n ------\n ValueError\n Input x values outside of defined range\n \"\"\"\n # convert to wavenumbers (1/micron) if x input in units\n # otherwise, assume x in appropriate wavenumber units\n with u.add_enabled_equivalencies(u.spectral()):\n x_quant = u.Quantity(x, u.micron, dtype=np.float64)\n\n # strip the quantity to avoid needing to add units to all the\n # polynomical coefficients\n x = x_quant.value\n\n # check that the wavenumbers are within the defined range\n _test_valid_x_range(x, self.x_range, \"WG00\")\n\n # setup the ax vectors\n x = np.atleast_1d(x)\n\n alb_MW = np.array(\n [\n 0.320,\n 0.409,\n 0.481,\n 0.526,\n 0.542,\n 0.536,\n 0.503,\n 0.432,\n 0.371,\n 0.389,\n 0.437,\n 0.470,\n 0.486,\n 0.499,\n 0.506,\n 0.498,\n 0.502,\n 0.491,\n 0.481,\n 0.500,\n 0.473,\n 0.457,\n 0.448,\n 0.424,\n 0.400,\n ]\n )\n\n alb_SMC = np.array(\n [\n 0.400,\n 0.449,\n 0.473,\n 0.494,\n 0.508,\n 0.524,\n 0.529,\n 0.528,\n 0.523,\n 0.520,\n 0.516,\n 0.511,\n 0.505,\n 0.513,\n 0.515,\n 0.498,\n 0.494,\n 0.489,\n 0.484,\n 0.493,\n 0.475,\n 0.465,\n 0.439,\n 0.417,\n 0.400,\n ]\n )\n\n if self.dust_type == \"smc\":\n albedo = alb_SMC\n elif self.dust_type == \"mw\":\n albedo = alb_MW\n\n tab = tabular_model(1, name=\"Tabular1D\")\n alb_fit = tab(\n self.wvl_grid,\n lookup_table=albedo,\n name=\"albedo\",\n bounds_error=False,\n fill_value=None,\n method=\"linear\",\n )\n\n xinterp = 1e4 * x\n\n return alb_fit(xinterp)\n\n def get_scattering_phase_function(self, x):\n \"\"\"\n Return the scattering phase function in function of wavelength for the\n corresponding dust type (SMC or MW). The scattering phase\n function gives the angle at which the photon scatters.\n\n Parameters\n ----------\n x: float\n expects either x in units of wavelengths or frequency\n or assumes wavelengths in [micron]\n\n internally microns are used\n\n Returns\n -------\n g: np array (float)\n g(x) scattering phase function\n\n Raises\n ------\n ValueError\n Input x values outside of defined range\n \"\"\"\n # convert to wavenumbers (1/micron) if x input in units\n # otherwise, assume x in appropriate wavenumber units\n with u.add_enabled_equivalencies(u.spectral()):\n x_quant = u.Quantity(x, u.micron, dtype=np.float64)\n\n # strip the quantity to avoid needing to add units to all the\n # polynomical coefficients\n x = x_quant.value\n\n # check that the wavenumbers are within the defined range\n _test_valid_x_range(x, self.x_range, \"WG00\")\n\n # setup the ax vectors\n x = np.atleast_1d(x)\n\n g_MW = np.array(\n [\n 0.800,\n 0.783,\n 0.767,\n 0.756,\n 0.745,\n 0.736,\n 0.727,\n 0.720,\n 0.712,\n 0.707,\n 0.702,\n 0.697,\n 0.691,\n 0.685,\n 0.678,\n 0.646,\n 0.624,\n 0.597,\n 0.563,\n 0.545,\n 0.533,\n 0.511,\n 0.480,\n 0.445,\n 0.420,\n ]\n )\n\n g_SMC = np.array(\n [\n 0.800,\n 0.783,\n 0.767,\n 0.756,\n 0.745,\n 0.736,\n 0.727,\n 0.720,\n 0.712,\n 0.707,\n 0.702,\n 0.697,\n 0.691,\n 0.685,\n 0.678,\n 0.646,\n 0.624,\n 0.597,\n 0.563,\n 0.545,\n 0.533,\n 0.511,\n 0.480,\n 0.445,\n 0.420,\n ]\n )\n\n if self.dust_type == \"smc\":\n g = g_SMC\n elif self.dust_type == \"mw\":\n g = g_MW\n\n tab = tabular_model(1, name=\"Tabular1D\")\n g_fit = tab(\n self.wvl_grid,\n lookup_table=g,\n name=\"albedo\",\n bounds_error=False,\n fill_value=None,\n method=\"linear\",\n )\n\n xinterp = 1e4 * x\n\n return g_fit(xinterp)\n" ]
[ [ "numpy.array", "numpy.ones", "numpy.atleast_1d" ] ]
ioanaif/awkward-1.0
[ "22501ba218646dc24dc515c4394eb22f126d340d" ]
[ "src/awkward/_connect/_numpy.py" ]
[ "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nfrom __future__ import absolute_import\n\nimport sys\n\nimport numpy\n\nimport awkward as ak\n\n\ndef convert_to_array(layout, args, kwargs):\n out = ak.operations.convert.to_numpy(layout, allow_missing=False)\n if args == () and kwargs == {}:\n return out\n else:\n return numpy.array(out, *args, **kwargs)\n\n\nimplemented = {}\n\n\ndef array_function(func, types, args, kwargs):\n function = implemented.get(func)\n if function is None:\n return NotImplemented\n else:\n return function(*args, **kwargs)\n\n\ndef implements(numpy_function):\n def decorator(function):\n implemented[getattr(numpy, numpy_function)] = function\n return function\n\n return decorator\n\n\ndef array_ufunc(ufunc, method, inputs, kwargs):\n if method != \"__call__\" or len(inputs) == 0 or \"out\" in kwargs:\n return NotImplemented\n\n behavior = ak._util.behaviorof(*inputs)\n\n nextinputs = []\n for x in inputs:\n cast_fcn = ak._util.custom_cast(x, behavior)\n if cast_fcn is not None:\n x = cast_fcn(x)\n nextinputs.append(\n ak.operations.convert.to_layout(x, allow_record=True, allow_other=True)\n )\n inputs = nextinputs\n\n def adjust(custom, inputs, kwargs):\n args = [\n ak._util.wrap(x, behavior)\n if isinstance(x, (ak.layout.Content, ak.layout.Record))\n else x\n for x in inputs\n ]\n out = custom(*args, **kwargs)\n if not isinstance(out, tuple):\n out = (out,)\n\n return tuple(\n x.layout if isinstance(x, (ak.highlevel.Array, ak.highlevel.Record)) else x\n for x in out\n )\n\n def adjust_apply_ufunc(apply_ufunc, ufunc, method, inputs, kwargs):\n nextinputs = [\n ak._util.wrap(x, behavior)\n if isinstance(x, (ak.layout.Content, ak.layout.Record))\n else x\n for x in inputs\n ]\n\n out = apply_ufunc(ufunc, method, nextinputs, kwargs)\n\n if out is NotImplemented:\n return None\n else:\n if not isinstance(out, tuple):\n out = (out,)\n out = tuple(\n x.layout\n if isinstance(x, (ak.highlevel.Array, ak.highlevel.Record))\n else x\n for x in out\n )\n return lambda: out\n\n def is_fully_regular(layout):\n if (\n isinstance(layout, ak.layout.RegularArray)\n and layout.parameter(\"__record__\") is None\n and layout.parameter(\"__array__\") is None\n ):\n if isinstance(layout.content, ak.layout.NumpyArray):\n return True\n elif isinstance(layout.content, ak.layout.RegularArray):\n return is_fully_regular(layout.content)\n else:\n return False\n else:\n return False\n\n def deregulate(layout):\n if not is_fully_regular(layout):\n return layout\n else:\n shape = [len(layout)]\n node = layout\n while isinstance(node, ak.layout.RegularArray):\n shape.append(node.size)\n node = node.content\n nparray = ak.nplike.of(node).asarray(node)\n nparray = nparray.reshape(tuple(shape) + nparray.shape[1:])\n return ak.layout.NumpyArray(\n nparray,\n node.identities,\n node.parameters,\n )\n\n def getfunction(inputs):\n signature = [ufunc]\n for x in inputs:\n if isinstance(x, ak.layout.Content):\n record = x.parameter(\"__record__\")\n array = x.parameter(\"__array__\")\n if record is not None:\n signature.append(record)\n elif array is not None:\n signature.append(array)\n elif isinstance(x, ak.layout.NumpyArray):\n signature.append(ak.nplike.of(x).asarray(x).dtype.type)\n else:\n signature.append(None)\n else:\n signature.append(type(x))\n\n custom = ak._util.overload(behavior, signature)\n if custom is not None:\n return lambda: adjust(custom, inputs, kwargs)\n\n if ufunc is numpy.matmul:\n custom_matmul = getfunction_matmul(inputs)\n if custom_matmul is not None:\n return custom_matmul\n\n inputs = [deregulate(x) for x in inputs]\n\n if all(\n isinstance(x, ak.layout.NumpyArray)\n or not isinstance(x, (ak.layout.Content, ak.partition.PartitionedArray))\n for x in inputs\n ):\n nplike = ak.nplike.of(*inputs)\n result = getattr(ufunc, method)(\n *[nplike.asarray(x) for x in inputs], **kwargs\n )\n return lambda: (ak.operations.convert.from_numpy(result, highlevel=False),)\n\n for x in inputs:\n if isinstance(x, ak.layout.Content):\n chained_behavior = ak._util.Behavior(ak.behavior, behavior)\n apply_ufunc = chained_behavior[numpy.ufunc, x.parameter(\"__array__\")]\n if apply_ufunc is not None:\n out = adjust_apply_ufunc(apply_ufunc, ufunc, method, inputs, kwargs)\n if out is not None:\n return out\n apply_ufunc = chained_behavior[numpy.ufunc, x.parameter(\"__record__\")]\n if apply_ufunc is not None:\n out = adjust_apply_ufunc(apply_ufunc, ufunc, method, inputs, kwargs)\n if out is not None:\n return out\n\n if all(\n x.parameter(\"__array__\") is not None\n or x.parameter(\"__record__\") is not None\n for x in inputs\n if isinstance(x, ak.layout.Content)\n ):\n custom_types = []\n for x in inputs:\n if isinstance(x, ak.layout.Content):\n if x.parameter(\"__array__\") is not None:\n custom_types.append(x.parameter(\"__array__\"))\n elif x.parameter(\"__record__\") is not None:\n custom_types.append(x.parameter(\"__record__\"))\n else:\n custom_types.append(type(x).__name__)\n else:\n custom_types.append(type(x).__name__)\n raise ValueError(\n \"no overloads for custom types: {0}({1})\".format(\n ufunc.__name__,\n \", \".join(custom_types),\n )\n + ak._util.exception_suffix(__file__)\n )\n\n return None\n\n out = ak._util.broadcast_and_apply(\n inputs, getfunction, behavior, allow_records=False, pass_depth=False\n )\n assert isinstance(out, tuple) and len(out) == 1\n return ak._util.wrap(out[0], behavior)\n\n\ndef matmul_for_numba(lefts, rights, dtype):\n total_outer = 0\n total_inner = 0\n total_content = 0\n\n for A, B in zip(lefts, rights):\n first = -1\n for Ai in A:\n if first == -1:\n first = len(Ai)\n elif first != len(Ai):\n raise ValueError(\n \"one of the left matrices in np.matmul is not rectangular\"\n )\n if first == -1:\n first = 0\n rowsA = len(A)\n colsA = first\n\n first = -1\n for Bi in B:\n if first == -1:\n first = len(Bi)\n elif first != len(Bi):\n raise ValueError(\n \"one of the right matrices in np.matmul is not rectangular\"\n )\n if first == -1:\n first = 0\n rowsB = len(B)\n colsB = first\n\n if colsA != rowsB:\n raise ValueError(\n u\"one of the pairs of matrices in np.matmul do not match shape: \"\n u\"(n \\u00d7 k) @ (k \\u00d7 m)\"\n )\n\n total_outer += 1\n total_inner += rowsA\n total_content += rowsA * colsB\n\n outer = numpy.empty(total_outer + 1, numpy.int64)\n inner = numpy.empty(total_inner + 1, numpy.int64)\n content = numpy.zeros(total_content, dtype)\n\n outer[0] = 0\n inner[0] = 0\n outer_i = 1\n inner_i = 1\n content_i = 0\n for A, B in zip(lefts, rights):\n rows = len(A)\n cols = 0\n if len(B) > 0:\n cols = len(B[0])\n mids = 0\n if len(A) > 0:\n mids = len(A[0])\n\n for i in range(rows):\n for j in range(cols):\n for v in range(mids):\n pos = content_i + i * cols + j\n content[pos] += A[i][v] * B[v][j]\n\n outer[outer_i] = outer[outer_i - 1] + rows\n outer_i += 1\n for _ in range(rows):\n inner[inner_i] = inner[inner_i - 1] + cols\n inner_i += 1\n content_i += rows * cols\n\n return outer, inner, content\n\n\nmatmul_for_numba.numbafied = None\n\n\ndef getfunction_matmul(inputs):\n inputs = [\n ak._util.recursively_apply(\n x, (lambda _: _), pass_depth=False, numpy_to_regular=True\n )\n for x in inputs\n ]\n\n if len(inputs) == 2 and all(\n isinstance(x, ak._util.listtypes)\n and isinstance(x.content, ak._util.listtypes)\n and isinstance(x.content.content, ak.layout.NumpyArray)\n for x in inputs\n ):\n ak._connect._numba.register_and_check()\n import numba\n\n if matmul_for_numba.numbafied is None:\n matmul_for_numba.numbafied = numba.njit(matmul_for_numba)\n\n lefts = ak.highlevel.Array(inputs[0])\n rights = ak.highlevel.Array(inputs[1])\n dtype = numpy.asarray(lefts[0:0, 0:0, 0:0] + rights[0:0, 0:0, 0:0]).dtype\n\n outer, inner, content = matmul_for_numba.numbafied(lefts, rights, dtype)\n\n return lambda: (\n ak.layout.ListOffsetArray64(\n ak.layout.Index64(outer),\n ak.layout.ListOffsetArray64(\n ak.layout.Index64(inner),\n ak.layout.NumpyArray(content),\n ),\n ),\n )\n\n else:\n return None\n\n\ntry:\n NDArrayOperatorsMixin = numpy.lib.mixins.NDArrayOperatorsMixin\n\nexcept AttributeError:\n from numpy.core import umath as um\n\n def _disables_array_ufunc(obj):\n try:\n return obj.__array_ufunc__ is None\n except AttributeError:\n return False\n\n def _binary_method(ufunc, name):\n def func(self, other):\n if _disables_array_ufunc(other):\n return NotImplemented\n return ufunc(self, other)\n\n func.__name__ = \"__{}__\".format(name)\n return func\n\n def _reflected_binary_method(ufunc, name):\n def func(self, other):\n if _disables_array_ufunc(other):\n return NotImplemented\n return ufunc(other, self)\n\n func.__name__ = \"__r{}__\".format(name)\n return func\n\n def _inplace_binary_method(ufunc, name):\n def func(self, other):\n return ufunc(self, other, out=(self,))\n\n func.__name__ = \"__i{}__\".format(name)\n return func\n\n def _numeric_methods(ufunc, name):\n return (\n _binary_method(ufunc, name),\n _reflected_binary_method(ufunc, name),\n _inplace_binary_method(ufunc, name),\n )\n\n def _unary_method(ufunc, name):\n def func(self):\n return ufunc(self)\n\n func.__name__ = \"__{}__\".format(name)\n return func\n\n class NDArrayOperatorsMixin(object):\n __lt__ = _binary_method(um.less, \"lt\")\n __le__ = _binary_method(um.less_equal, \"le\")\n __eq__ = _binary_method(um.equal, \"eq\")\n __ne__ = _binary_method(um.not_equal, \"ne\")\n __gt__ = _binary_method(um.greater, \"gt\")\n __ge__ = _binary_method(um.greater_equal, \"ge\")\n\n __add__, __radd__, __iadd__ = _numeric_methods(um.add, \"add\")\n __sub__, __rsub__, __isub__ = _numeric_methods(um.subtract, \"sub\")\n __mul__, __rmul__, __imul__ = _numeric_methods(um.multiply, \"mul\")\n __matmul__, __rmatmul__, __imatmul__ = _numeric_methods(um.matmul, \"matmul\")\n if sys.version_info.major < 3:\n __div__, __rdiv__, __idiv__ = _numeric_methods(um.divide, \"div\")\n __truediv__, __rtruediv__, __itruediv__ = _numeric_methods(\n um.true_divide, \"truediv\"\n )\n __floordiv__, __rfloordiv__, __ifloordiv__ = _numeric_methods(\n um.floor_divide, \"floordiv\"\n )\n __mod__, __rmod__, __imod__ = _numeric_methods(um.remainder, \"mod\")\n if hasattr(um, \"divmod\"):\n __divmod__ = _binary_method(um.divmod, \"divmod\")\n __rdivmod__ = _reflected_binary_method(um.divmod, \"divmod\")\n __pow__, __rpow__, __ipow__ = _numeric_methods(um.power, \"pow\")\n __lshift__, __rlshift__, __ilshift__ = _numeric_methods(um.left_shift, \"lshift\")\n __rshift__, __rrshift__, __irshift__ = _numeric_methods(\n um.right_shift, \"rshift\"\n )\n __and__, __rand__, __iand__ = _numeric_methods(um.bitwise_and, \"and\")\n __xor__, __rxor__, __ixor__ = _numeric_methods(um.bitwise_xor, \"xor\")\n __or__, __ror__, __ior__ = _numeric_methods(um.bitwise_or, \"or\")\n\n __neg__ = _unary_method(um.negative, \"neg\")\n if hasattr(um, \"positive\"):\n __pos__ = _unary_method(um.positive, \"pos\")\n __abs__ = _unary_method(um.absolute, \"abs\")\n __invert__ = _unary_method(um.invert, \"invert\")\n" ]
[ [ "numpy.array", "numpy.empty", "numpy.asarray", "numpy.zeros" ] ]
hbutsuak95/iv_rl
[ "0f72a8f077a238237027ea96b7d1160c35ac9959", "0f72a8f077a238237027ea96b7d1160c35ac9959", "0f72a8f077a238237027ea96b7d1160c35ac9959", "0f72a8f077a238237027ea96b7d1160c35ac9959" ]
[ "mbbl_envs/mbbl/env/gym_env/box2d/walker.py", "mbbl_envs/mbbl/util/il/camera_pose_ID_solver.py", "bsuite/models/qnet_EP.py", "bsuite/bsuite/experiments/bandit_scale/bandit_scale_test.py" ]
[ "import sys, math\nimport numpy as np\n\nimport Box2D\nfrom Box2D.b2 import (edgeShape, circleShape, fixtureDef, polygonShape, revoluteJointDef, contactListener)\n\nimport gym\nfrom gym import spaces\nfrom gym.utils import colorize, seeding\n\n# This is simple 4-joints walker robot environment.\n#\n# There are two versions:\n#\n# - Normal, with slightly uneven terrain.\n#\n# - Hardcore with ladders, stumps, pitfalls.\n#\n# Reward is given for moving forward, total 300+ points up to the far end. If the robot falls,\n# it gets -100. Applying motor torque costs a small amount of points, more optimal agent\n# will get better score.\n#\n# Heuristic is provided for testing, it's also useful to get demonstrations to\n# learn from. To run heuristic:\n#\n# python gym/envs/box2d/bipedal_walker.py\n#\n# State consists of hull angle speed, angular velocity, horizontal speed, vertical speed,\n# position of joints and joints angular speed, legs contact with ground, and 10 lidar\n# rangefinder measurements to help to deal with the hardcore version. There's no coordinates\n# in the state vector. Lidar is less useful in normal version, but it works.\n#\n# To solve the game you need to get 300 points in 1600 time steps.\n#\n# To solve hardcore version you need 300 points in 2000 time steps.\n#\n# Created by Oleg Klimov. Licensed on the same terms as the rest of OpenAI Gym.\n\nFPS = 50\nSCALE = 30.0 # affects how fast-paced the game is, forces should be adjusted as well\n\nMOTORS_TORQUE = 80\nSPEED_HIP = 4\nSPEED_KNEE = 6\nLIDAR_RANGE = 160/SCALE\n\nINITIAL_RANDOM = 5\n\nHULL_POLY =[\n (-30,+9), (+6,+9), (+34,+1),\n (+34,-8), (-30,-8)\n ]\nLEG_DOWN = -8/SCALE\nLEG_W, LEG_H = 8/SCALE, 34/SCALE\n\nVIEWPORT_W = 600\nVIEWPORT_H = 400\n\nTERRAIN_STEP = 14/SCALE\nTERRAIN_LENGTH = 200 # in steps\nTERRAIN_HEIGHT = VIEWPORT_H/SCALE/4\nTERRAIN_GRASS = 10 # low long are grass spots, in steps\nTERRAIN_STARTPAD = 20 # in steps\nFRICTION = 2.5\n\nHULL_FD = fixtureDef(\n shape=polygonShape(vertices=[ (x/SCALE,y/SCALE) for x,y in HULL_POLY ]),\n density=5.0,\n friction=0.1,\n categoryBits=0x0020,\n maskBits=0x001, # collide only with ground\n restitution=0.0) # 0.99 bouncy\n\nLEG_FD = fixtureDef(\n shape=polygonShape(box=(LEG_W/2, LEG_H/2)),\n density=1.0,\n restitution=0.0,\n categoryBits=0x0020,\n maskBits=0x001)\n\nLOWER_FD = fixtureDef(\n shape=polygonShape(box=(0.8*LEG_W/2, LEG_H/2)),\n density=1.0,\n restitution=0.0,\n categoryBits=0x0020,\n maskBits=0x001)\n\nclass ContactDetector(contactListener):\n def __init__(self, env):\n contactListener.__init__(self)\n self.env = env\n def BeginContact(self, contact):\n if self.env.hull==contact.fixtureA.body or self.env.hull==contact.fixtureB.body:\n self.env.game_over = True\n for leg in [self.env.legs[1], self.env.legs[3]]:\n if leg in [contact.fixtureA.body, contact.fixtureB.body]:\n leg.ground_contact = True\n def EndContact(self, contact):\n for leg in [self.env.legs[1], self.env.legs[3]]:\n if leg in [contact.fixtureA.body, contact.fixtureB.body]:\n leg.ground_contact = False\n\nclass BipedalWalker(gym.Env):\n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second' : FPS\n }\n\n hardcore = False\n\n def __init__(self):\n self.seed()\n self.viewer = None\n\n self.world = Box2D.b2World()\n self.terrain = None\n self.hull = None\n\n self.prev_shaping = None\n\n self.fd_polygon = fixtureDef(\n shape = polygonShape(vertices=\n [(0, 0),\n (1, 0),\n (1, -1),\n (0, -1)]),\n friction = FRICTION)\n\n self.fd_edge = fixtureDef(\n shape = edgeShape(vertices=\n [(0, 0),\n (1, 1)]),\n friction = FRICTION,\n categoryBits=0x0001,\n )\n\n self.reset()\n\n high = np.array([np.inf]*24)\n self.action_space = spaces.Box(np.array([-1,-1,-1,-1]), np.array([+1,+1,+1,+1]))\n self.observation_space = spaces.Box(-high, high)\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def _destroy(self):\n if not self.terrain: return\n self.world.contactListener = None\n for t in self.terrain:\n self.world.DestroyBody(t)\n self.terrain = []\n self.world.DestroyBody(self.hull)\n self.hull = None\n for leg in self.legs:\n self.world.DestroyBody(leg)\n self.legs = []\n self.joints = []\n\n def _generate_terrain(self, hardcore):\n GRASS, STUMP, STAIRS, PIT, _STATES_ = range(5)\n state = GRASS\n velocity = 0.0\n y = TERRAIN_HEIGHT\n counter = TERRAIN_STARTPAD\n oneshot = False\n self.terrain = []\n self.terrain_x = []\n self.terrain_y = []\n for i in range(TERRAIN_LENGTH):\n x = i*TERRAIN_STEP\n self.terrain_x.append(x)\n\n if state==GRASS and not oneshot:\n velocity = 0.8*velocity + 0.01*np.sign(TERRAIN_HEIGHT - y)\n if i > TERRAIN_STARTPAD: velocity += self.np_random.uniform(-1, 1)/SCALE #1\n y += velocity\n\n elif state==PIT and oneshot:\n counter = self.np_random.randint(3, 5)\n poly = [\n (x, y),\n (x+TERRAIN_STEP, y),\n (x+TERRAIN_STEP, y-4*TERRAIN_STEP),\n (x, y-4*TERRAIN_STEP),\n ]\n self.fd_polygon.shape.vertices=poly\n t = self.world.CreateStaticBody(\n fixtures = self.fd_polygon)\n t.color1, t.color2 = (1,1,1), (0.6,0.6,0.6)\n self.terrain.append(t)\n\n self.fd_polygon.shape.vertices=[(p[0]+TERRAIN_STEP*counter,p[1]) for p in poly]\n t = self.world.CreateStaticBody(\n fixtures = self.fd_polygon)\n t.color1, t.color2 = (1,1,1), (0.6,0.6,0.6)\n self.terrain.append(t)\n counter += 2\n original_y = y\n\n elif state==PIT and not oneshot:\n y = original_y\n if counter > 1:\n y -= 4*TERRAIN_STEP\n\n elif state==STUMP and oneshot:\n counter = self.np_random.randint(1, 3)\n poly = [\n (x, y),\n (x+counter*TERRAIN_STEP, y),\n (x+counter*TERRAIN_STEP, y+counter*TERRAIN_STEP),\n (x, y+counter*TERRAIN_STEP),\n ]\n self.fd_polygon.shape.vertices=poly\n t = self.world.CreateStaticBody(\n fixtures = self.fd_polygon)\n t.color1, t.color2 = (1,1,1), (0.6,0.6,0.6)\n self.terrain.append(t)\n\n elif state==STAIRS and oneshot:\n stair_height = +1 if self.np_random.rand() > 0.5 else -1\n stair_width = self.np_random.randint(4, 5)\n stair_steps = self.np_random.randint(3, 5)\n original_y = y\n for s in range(stair_steps):\n poly = [\n (x+( s*stair_width)*TERRAIN_STEP, y+( s*stair_height)*TERRAIN_STEP),\n (x+((1+s)*stair_width)*TERRAIN_STEP, y+( s*stair_height)*TERRAIN_STEP),\n (x+((1+s)*stair_width)*TERRAIN_STEP, y+(-1+s*stair_height)*TERRAIN_STEP),\n (x+( s*stair_width)*TERRAIN_STEP, y+(-1+s*stair_height)*TERRAIN_STEP),\n ]\n self.fd_polygon.shape.vertices=poly\n t = self.world.CreateStaticBody(\n fixtures = self.fd_polygon)\n t.color1, t.color2 = (1,1,1), (0.6,0.6,0.6)\n self.terrain.append(t)\n counter = stair_steps*stair_width\n\n elif state==STAIRS and not oneshot:\n s = stair_steps*stair_width - counter - stair_height\n n = s/stair_width\n y = original_y + (n*stair_height)*TERRAIN_STEP\n\n oneshot = False\n self.terrain_y.append(y)\n counter -= 1\n if counter==0:\n counter = self.np_random.randint(TERRAIN_GRASS/2, TERRAIN_GRASS)\n if state==GRASS and hardcore:\n state = self.np_random.randint(1, _STATES_)\n oneshot = True\n else:\n state = GRASS\n oneshot = True\n\n self.terrain_poly = []\n for i in range(TERRAIN_LENGTH-1):\n poly = [\n (self.terrain_x[i], self.terrain_y[i]),\n (self.terrain_x[i+1], self.terrain_y[i+1])\n ]\n self.fd_edge.shape.vertices=poly\n t = self.world.CreateStaticBody(\n fixtures = self.fd_edge)\n color = (0.3, 1.0 if i%2==0 else 0.8, 0.3)\n t.color1 = color\n t.color2 = color\n self.terrain.append(t)\n color = (0.4, 0.6, 0.3)\n poly += [ (poly[1][0], 0), (poly[0][0], 0) ]\n self.terrain_poly.append( (poly, color) )\n self.terrain.reverse()\n\n def _generate_clouds(self):\n # Sorry for the clouds, couldn't resist\n self.cloud_poly = []\n for i in range(TERRAIN_LENGTH//20):\n x = self.np_random.uniform(0, TERRAIN_LENGTH)*TERRAIN_STEP\n y = VIEWPORT_H/SCALE*3/4\n poly = [\n (x+15*TERRAIN_STEP*math.sin(3.14*2*a/5)+self.np_random.uniform(0,5*TERRAIN_STEP),\n y+ 5*TERRAIN_STEP*math.cos(3.14*2*a/5)+self.np_random.uniform(0,5*TERRAIN_STEP) )\n for a in range(5) ]\n x1 = min( [p[0] for p in poly] )\n x2 = max( [p[0] for p in poly] )\n self.cloud_poly.append( (poly,x1,x2) )\n\n def reset(self):\n self._destroy()\n self.world.contactListener_bug_workaround = ContactDetector(self)\n self.world.contactListener = self.world.contactListener_bug_workaround\n self.game_over = False\n self.prev_shaping = None\n self.scroll = 0.0\n self.lidar_render = 0\n\n W = VIEWPORT_W/SCALE\n H = VIEWPORT_H/SCALE\n\n self._generate_terrain(self.hardcore)\n self._generate_clouds()\n\n init_x = TERRAIN_STEP*TERRAIN_STARTPAD/2\n init_y = TERRAIN_HEIGHT+2*LEG_H\n self.hull = self.world.CreateDynamicBody(\n position = (init_x, init_y),\n fixtures = HULL_FD\n )\n self.hull.color1 = (0.5,0.4,0.9)\n self.hull.color2 = (0.3,0.3,0.5)\n self.hull.ApplyForceToCenter((self.np_random.uniform(-INITIAL_RANDOM, INITIAL_RANDOM), 0), True)\n\n self.legs = []\n self.joints = []\n for i in [-1,+1]:\n leg = self.world.CreateDynamicBody(\n position = (init_x, init_y - LEG_H/2 - LEG_DOWN),\n angle = (i*0.05),\n fixtures = LEG_FD\n )\n leg.color1 = (0.6-i/10., 0.3-i/10., 0.5-i/10.)\n leg.color2 = (0.4-i/10., 0.2-i/10., 0.3-i/10.)\n rjd = revoluteJointDef(\n bodyA=self.hull,\n bodyB=leg,\n localAnchorA=(0, LEG_DOWN),\n localAnchorB=(0, LEG_H/2),\n enableMotor=True,\n enableLimit=True,\n maxMotorTorque=MOTORS_TORQUE,\n motorSpeed = i,\n lowerAngle = -0.8,\n upperAngle = 1.1,\n )\n self.legs.append(leg)\n self.joints.append(self.world.CreateJoint(rjd))\n\n lower = self.world.CreateDynamicBody(\n position = (init_x, init_y - LEG_H*3/2 - LEG_DOWN),\n angle = (i*0.05),\n fixtures = LOWER_FD\n )\n lower.color1 = (0.6-i/10., 0.3-i/10., 0.5-i/10.)\n lower.color2 = (0.4-i/10., 0.2-i/10., 0.3-i/10.)\n rjd = revoluteJointDef(\n bodyA=leg,\n bodyB=lower,\n localAnchorA=(0, -LEG_H/2),\n localAnchorB=(0, LEG_H/2),\n enableMotor=True,\n enableLimit=True,\n maxMotorTorque=MOTORS_TORQUE,\n motorSpeed = 1,\n lowerAngle = -1.6,\n upperAngle = -0.1,\n )\n lower.ground_contact = False\n self.legs.append(lower)\n self.joints.append(self.world.CreateJoint(rjd))\n\n self.drawlist = self.terrain + self.legs + [self.hull]\n\n class LidarCallback(Box2D.b2.rayCastCallback):\n def ReportFixture(self, fixture, point, normal, fraction):\n if (fixture.filterData.categoryBits & 1) == 0:\n return 1\n self.p2 = point\n self.fraction = fraction\n return 0\n self.lidar = [LidarCallback() for _ in range(10)]\n\n return self.step(np.array([0,0,0,0]))[0]\n\n def step(self, action):\n #self.hull.ApplyForceToCenter((0, 20), True) -- Uncomment this to receive a bit of stability help\n control_speed = False # Should be easier as well\n if control_speed:\n self.joints[0].motorSpeed = float(SPEED_HIP * np.clip(action[0], -1, 1))\n self.joints[1].motorSpeed = float(SPEED_KNEE * np.clip(action[1], -1, 1))\n self.joints[2].motorSpeed = float(SPEED_HIP * np.clip(action[2], -1, 1))\n self.joints[3].motorSpeed = float(SPEED_KNEE * np.clip(action[3], -1, 1))\n else:\n self.joints[0].motorSpeed = float(SPEED_HIP * np.sign(action[0]))\n self.joints[0].maxMotorTorque = float(MOTORS_TORQUE * np.clip(np.abs(action[0]), 0, 1))\n self.joints[1].motorSpeed = float(SPEED_KNEE * np.sign(action[1]))\n self.joints[1].maxMotorTorque = float(MOTORS_TORQUE * np.clip(np.abs(action[1]), 0, 1))\n self.joints[2].motorSpeed = float(SPEED_HIP * np.sign(action[2]))\n self.joints[2].maxMotorTorque = float(MOTORS_TORQUE * np.clip(np.abs(action[2]), 0, 1))\n self.joints[3].motorSpeed = float(SPEED_KNEE * np.sign(action[3]))\n self.joints[3].maxMotorTorque = float(MOTORS_TORQUE * np.clip(np.abs(action[3]), 0, 1))\n\n self.world.Step(1.0/FPS, 6*30, 2*30)\n\n pos = self.hull.position\n vel = self.hull.linearVelocity\n\n for i in range(10):\n self.lidar[i].fraction = 1.0\n self.lidar[i].p1 = pos\n self.lidar[i].p2 = (\n pos[0] + math.sin(1.5*i/10.0)*LIDAR_RANGE,\n pos[1] - math.cos(1.5*i/10.0)*LIDAR_RANGE)\n self.world.RayCast(self.lidar[i], self.lidar[i].p1, self.lidar[i].p2)\n\n state = [\n self.hull.angle, # Normal angles up to 0.5 here, but sure more is possible.\n 2.0*self.hull.angularVelocity/FPS,\n 0.3*vel.x*(VIEWPORT_W/SCALE)/FPS, # Normalized to get -1..1 range\n 0.3*vel.y*(VIEWPORT_H/SCALE)/FPS,\n self.joints[0].angle, # This will give 1.1 on high up, but it's still OK (and there should be spikes on hiting the ground, that's normal too)\n self.joints[0].speed / SPEED_HIP,\n self.joints[1].angle + 1.0,\n self.joints[1].speed / SPEED_KNEE,\n 1.0 if self.legs[1].ground_contact else 0.0,\n self.joints[2].angle,\n self.joints[2].speed / SPEED_HIP,\n self.joints[3].angle + 1.0,\n self.joints[3].speed / SPEED_KNEE,\n 1.0 if self.legs[3].ground_contact else 0.0\n ]\n state += [l.fraction for l in self.lidar]\n assert len(state)==24\n\n self.scroll = pos.x - VIEWPORT_W/SCALE/5\n\n shaping = 130*pos[0]/SCALE # moving forward is a way to receive reward (normalized to get 300 on completion)\n shaping -= 5.0*abs(state[0]) # keep head straight, other than that and falling, any behavior is unpunished\n\n reward = 0\n if self.prev_shaping is not None:\n reward = shaping - self.prev_shaping\n self.prev_shaping = shaping\n\n for a in action:\n reward -= 0.00035 * MOTORS_TORQUE * np.clip(np.abs(a), 0, 1)\n # normalized to about -50.0 using heuristic, more optimal agent should spend less\n\n done = False\n if self.game_over or pos[0] < 0:\n reward = -100\n done = True\n if pos[0] > (TERRAIN_LENGTH-TERRAIN_GRASS)*TERRAIN_STEP:\n done = True\n return np.array(state), reward, done, {}\n\n def render(self, mode='human'):\n from gym.envs.classic_control import rendering\n if self.viewer is None:\n self.viewer = rendering.Viewer(VIEWPORT_W, VIEWPORT_H)\n self.viewer.set_bounds(self.scroll, VIEWPORT_W/SCALE + self.scroll, 0, VIEWPORT_H/SCALE)\n\n self.viewer.draw_polygon( [\n (self.scroll, 0),\n (self.scroll+VIEWPORT_W/SCALE, 0),\n (self.scroll+VIEWPORT_W/SCALE, VIEWPORT_H/SCALE),\n (self.scroll, VIEWPORT_H/SCALE),\n ], color=(0.9, 0.9, 1.0) )\n for poly,x1,x2 in self.cloud_poly:\n if x2 < self.scroll/2: continue\n if x1 > self.scroll/2 + VIEWPORT_W/SCALE: continue\n self.viewer.draw_polygon( [(p[0]+self.scroll/2, p[1]) for p in poly], color=(1,1,1))\n for poly, color in self.terrain_poly:\n if poly[1][0] < self.scroll: continue\n if poly[0][0] > self.scroll + VIEWPORT_W/SCALE: continue\n self.viewer.draw_polygon(poly, color=color)\n\n self.lidar_render = (self.lidar_render+1) % 100\n i = self.lidar_render\n if i < 2*len(self.lidar):\n l = self.lidar[i] if i < len(self.lidar) else self.lidar[len(self.lidar)-i-1]\n self.viewer.draw_polyline( [l.p1, l.p2], color=(1,0,0), linewidth=1 )\n\n for obj in self.drawlist:\n for f in obj.fixtures:\n trans = f.body.transform\n if type(f.shape) is circleShape:\n t = rendering.Transform(translation=trans*f.shape.pos)\n self.viewer.draw_circle(f.shape.radius, 30, color=obj.color1).add_attr(t)\n self.viewer.draw_circle(f.shape.radius, 30, color=obj.color2, filled=False, linewidth=2).add_attr(t)\n else:\n path = [trans*v for v in f.shape.vertices]\n self.viewer.draw_polygon(path, color=obj.color1)\n path.append(path[0])\n self.viewer.draw_polyline(path, color=obj.color2, linewidth=2)\n\n flagy1 = TERRAIN_HEIGHT\n flagy2 = flagy1 + 50/SCALE\n x = TERRAIN_STEP*3\n self.viewer.draw_polyline( [(x, flagy1), (x, flagy2)], color=(0,0,0), linewidth=2 )\n f = [(x, flagy2), (x, flagy2-10/SCALE), (x+25/SCALE, flagy2-5/SCALE)]\n self.viewer.draw_polygon(f, color=(0.9,0.2,0) )\n self.viewer.draw_polyline(f + [f[0]], color=(0,0,0), linewidth=2 )\n\n return self.viewer.render(return_rgb_array = mode=='rgb_array')\n\n def close(self):\n if self.viewer is not None:\n self.viewer.close()\n self.viewer = None\n\nclass BipedalWalkerHardcore(BipedalWalker):\n hardcore = True\n\nif __name__==\"__main__\":\n # Heurisic: suboptimal, have no notion of balance.\n env = BipedalWalker()\n env.reset()\n steps = 0\n total_reward = 0\n a = np.array([0.0, 0.0, 0.0, 0.0])\n STAY_ON_ONE_LEG, PUT_OTHER_DOWN, PUSH_OFF = 1,2,3\n SPEED = 0.29 # Will fall forward on higher speed\n state = STAY_ON_ONE_LEG\n moving_leg = 0\n supporting_leg = 1 - moving_leg\n SUPPORT_KNEE_ANGLE = +0.1\n supporting_knee_angle = SUPPORT_KNEE_ANGLE\n while True:\n s, r, done, info = env.step(a)\n total_reward += r\n if steps % 20 == 0 or done:\n print(\"\\naction \" + str([\"{:+0.2f}\".format(x) for x in a]))\n print(\"step {} total_reward {:+0.2f}\".format(steps, total_reward))\n print(\"hull \" + str([\"{:+0.2f}\".format(x) for x in s[0:4] ]))\n print(\"leg0 \" + str([\"{:+0.2f}\".format(x) for x in s[4:9] ]))\n print(\"leg1 \" + str([\"{:+0.2f}\".format(x) for x in s[9:14]]))\n steps += 1\n\n contact0 = s[8]\n contact1 = s[13]\n moving_s_base = 4 + 5*moving_leg\n supporting_s_base = 4 + 5*supporting_leg\n\n hip_targ = [None,None] # -0.8 .. +1.1\n knee_targ = [None,None] # -0.6 .. +0.9\n hip_todo = [0.0, 0.0]\n knee_todo = [0.0, 0.0]\n\n if state==STAY_ON_ONE_LEG:\n hip_targ[moving_leg] = 1.1\n knee_targ[moving_leg] = -0.6\n supporting_knee_angle += 0.03\n if s[2] > SPEED: supporting_knee_angle += 0.03\n supporting_knee_angle = min( supporting_knee_angle, SUPPORT_KNEE_ANGLE )\n knee_targ[supporting_leg] = supporting_knee_angle\n if s[supporting_s_base+0] < 0.10: # supporting leg is behind\n state = PUT_OTHER_DOWN\n if state==PUT_OTHER_DOWN:\n hip_targ[moving_leg] = +0.1\n knee_targ[moving_leg] = SUPPORT_KNEE_ANGLE\n knee_targ[supporting_leg] = supporting_knee_angle\n if s[moving_s_base+4]:\n state = PUSH_OFF\n supporting_knee_angle = min( s[moving_s_base+2], SUPPORT_KNEE_ANGLE )\n if state==PUSH_OFF:\n knee_targ[moving_leg] = supporting_knee_angle\n knee_targ[supporting_leg] = +1.0\n if s[supporting_s_base+2] > 0.88 or s[2] > 1.2*SPEED:\n state = STAY_ON_ONE_LEG\n moving_leg = 1 - moving_leg\n supporting_leg = 1 - moving_leg\n\n if hip_targ[0]: hip_todo[0] = 0.9*(hip_targ[0] - s[4]) - 0.25*s[5]\n if hip_targ[1]: hip_todo[1] = 0.9*(hip_targ[1] - s[9]) - 0.25*s[10]\n if knee_targ[0]: knee_todo[0] = 4.0*(knee_targ[0] - s[6]) - 0.25*s[7]\n if knee_targ[1]: knee_todo[1] = 4.0*(knee_targ[1] - s[11]) - 0.25*s[12]\n\n hip_todo[0] -= 0.9*(0-s[0]) - 1.5*s[1] # PID to keep head strait\n hip_todo[1] -= 0.9*(0-s[0]) - 1.5*s[1]\n knee_todo[0] -= 15.0*s[3] # vertical speed, to damp oscillations\n knee_todo[1] -= 15.0*s[3]\n\n a[0] = hip_todo[0]\n a[1] = knee_todo[0]\n a[2] = hip_todo[1]\n a[3] = knee_todo[1]\n a = np.clip(0.5*a, -1.0, 1.0)\n\n env.render()\n if done: break\n\n", "\"\"\"\n @brief: Solve the joints (states) via inverse dynmaics from a list of 2d\n frames\n @input:\n expert_2d_poses\n dynamics_model (inverse_dynamics, forward_dynamics)\n @output:\n qpos, camera_state\n @author: Tingwu Wang\n\n @Date: Jan 15, 2019\n\"\"\"\n# from pyquaternion import Quaternion\nimport numpy as np\nfrom mbbl.util.il.expert_data_util import load_pose_data\nfrom mbbl.env import env_register\nfrom mbbl.util.il.camera_model import camera_matrix_from_state\nfrom mbbl.util.il.camera_model import camera_state_from_info\nfrom mbbl.util.il.camera_model import get_projected_2dpose\nfrom mbbl.util.il.pose_visualization import visualize_sol_pose\nfrom mbbl.util.il.il_util import interpolate_qvel_qacc\n# from mbbl.util.il.camera_model import xyaxis2quaternion\nfrom mbbl.util.common import logger\nfrom scipy import optimize\n\n\nclass solver(object):\n\n def __init__(self, data_path, sol_qpos_freq, opt_var_list,\n camera_info, camera_id, optimize_args,\n imitation_length=1000, gt_camera_info={}, log_path=''):\n \"\"\" @brief:\n @data_path: the path to the expert data npy file\n @opt_var_list: the variable to optimize\n # OLD: ['qpos', 'xyz_pos', 'quaternion', 'fov', 'image_size']\n # NEW: ['qpos', 'xyz_pos', 'cam_view', 'fov', 'image_size']\n\n cam_view is the two angels, that are used to describe the\n direction of the camera\n\n If the var is not in @opt_var_list, then the value is\n assumed to be given and will be read from @camera_info\n\n @camera_info: the dict to record the camera info. A typical\n camera_info looks like this.\n {'mode': 'trackcom',\n 'tracker_type': 'com',\n 'tracker': 'torso',\n 'xyz_pos': np.array([0, -3, 0]),\n 'xyaxis': np.array([1, 0, 0, 0, 0, -1]),\n 'image_size': 400,\n 'fov': 45}\n \"\"\"\n\n self._optimize_args = optimize_args\n self._log_path = log_path\n self._gt_camera_info = gt_camera_info[camera_id]\n self._camera_info = camera_info[camera_id]\n self._sol_qpos_freq = sol_qpos_freq\n self._imitation_length = imitation_length\n self._iteration = 0\n\n # parse the optimization vars\n self._all_var_list = \\\n ['qpos', 'xyz_pos', 'cam_view', 'fov', 'image_size']\n self._opt_var_list = opt_var_list\n self._known_var_list = [key for key in self._all_var_list\n if key not in self._opt_var_list]\n\n self._load_target_data(data_path, camera_id)\n\n self._init_env_engine()\n self._init_data()\n\n def _init_env_engine(self):\n self._env_physics_engine = env_physics_engine(self._env_name)\n self._len_qpos = self._env_physics_engine._env.get_qpos_size()\n\n def _load_target_data(self, data_path, camera_id):\n self._data_path = data_path\n\n # self._target_data should be kept from being accessed!\n self._target_data, self._target_2d_poses, self._env_name, \\\n self._frame_dt = load_pose_data(self._data_path, camera_id,\n self._imitation_length)\n\n self._num_target_frames = len(self._target_2d_poses)\n\n def _init_data(self):\n \"\"\" @brief:\n The optimizer needs to optimize the following variables:\n @qpos: the states (qpos, aka joint angles) of all the frames\n [self._num_frames // self._sol_qpos_freq, len(env.qpos)]\n\n @camera_state: the parameters needed for the\n\n @self._sol: qpos + camera_state\n\n 1. self._sol_qpos_frameid: record the frameid of each qpos in\n the self._sol\n The actual qpos is interpolated from the sol_qpos using\n catmul-rom algorithm\n\n see mbbl/util/il/camera_model.py\n \"\"\"\n assert self._camera_info['mode'] in ['trackcom', 'static', 'free']\n assert (self._num_target_frames - 1) % self._sol_qpos_freq == 0\n\n # qpos of the frames in between two sol_qpos will be interpolated\n self._sol_qpos_frameid = \\\n np.arange(0, self._num_target_frames, self._sol_qpos_freq)\n self._num_sol_qpos = len(self._sol_qpos_frameid)\n\n # pos(3), cam_view(2), fov(1), image_size(1)\n if self._camera_info['mode'] in ['static', 'trackcom']:\n self._camera_state_size = 7\n qpos_sol_size = self._num_sol_qpos * self._len_qpos\n # self._sol = np.zeros([qpos_sol_size + self._camera_state_size])\n self._bounds, self._sol = \\\n self._env_physics_engine.get_bounds_and_init_sol(\n self._camera_info['mode'], self._num_sol_qpos,\n self._len_qpos, self._camera_state_size\n )\n\n self._var_to_sol_id = {\n 'qpos': np.array(range(0, qpos_sol_size)),\n 'camera_state': np.array(range(qpos_sol_size,\n qpos_sol_size + 7)),\n # details\n 'xyz_pos': np.array(range(qpos_sol_size, qpos_sol_size + 3)),\n 'cam_view': np.array(range(qpos_sol_size + 3,\n qpos_sol_size + 5)),\n 'fov': np.array(range(qpos_sol_size + 5,\n qpos_sol_size + 6)),\n 'image_size': np.array(range(qpos_sol_size + 6,\n qpos_sol_size + 7)),\n }\n\n # assign the known variables\n for key in self._known_var_list:\n if key == 'qpos':\n res = (self._imitation_length - 1) % self._sol_qpos_freq\n assert res == 0\n num_sol_qpos = \\\n (self._imitation_length - 1) // self._sol_qpos_freq + 1\n sol_qpos_id = [i_pos * self._sol_qpos_freq\n for i_pos in range(num_sol_qpos)]\n self._sol[self._var_to_sol_id['qpos']] = np.reshape(\n self._target_data['qpos'][np.array(sol_qpos_id)], [-1]\n )\n print(self._sol[self._var_to_sol_id['qpos']])\n else:\n self._sol[self._var_to_sol_id[key]] = self._camera_info[key]\n print(self._sol[self._var_to_sol_id[key]])\n # if key == 'quaternion':\n # #### xyaxis is equivalent to quaternion\n # #### self._sol[self._var_to_sol_id['quaternion']] = \\\n # #### xyaxis2quaternion(self._camera_info['xyaxis'])\n # else:\n\n elif self._camera_state_type == 'free':\n self._sol = np.zeros(\n [self._num_sol_qpos * self._len_qpos +\n self._num_sol_qpos * self._camera_state_size]\n )\n\n qpos_sol_size = self._num_sol_qpos * self._len_qpos\n self._var_to_sol_id = {\n 'qpos': np.array(range(0, qpos_sol_size)),\n 'xyz_pos': np.array(range(qpos_sol_size, qpos_sol_size + 3)),\n 'cam_view': np.array(range(qpos_sol_size + 3,\n qpos_sol_size + 5)),\n 'fov': np.array(range(qpos_sol_size + 5, qpos_sol_size + 6)),\n 'image_size': np.array(range(qpos_sol_size + 6,\n qpos_sol_size + 7)),\n }\n\n # TODO\n raise NotImplementedError\n\n else:\n raise NotImplementedError\n\n def _get_fd_gradient(self, sol):\n \"\"\" @brief: use finite_difference to calculate the gradient. Due to the\n locality of the solution space, we can use some small trick to speed\n up the gradient process\n \"\"\"\n\n gradient = np.zeros([1, len(sol)])\n epsilon = 1e-3 # used for finite difference\n\n # get the base values, and the base interpolation values:\n center_data_dict = {'physics_loss': None, 'projection_loss': None}\n center_loss = self._loss_function(sol, fetch_data_dict=center_data_dict)\n sol_qpos = np.reshape(sol[self._var_to_sol_id['qpos']],\n [-1, self._len_qpos])\n camera_state = sol[self._var_to_sol_id['camera_state']]\n\n if 'qpos' in self._opt_var_list:\n\n logger.info('Calculating the gradient of qpos')\n # utilize the local connectivity of the qpos\n # locate the id of the qpos\n for i_derivative in range(self._num_sol_qpos * self._len_qpos):\n sol_id = i_derivative + self._var_to_sol_id['qpos'][0]\n center_sol_qpos_id = i_derivative // self._len_qpos\n start_sol_qpos_id = max(center_sol_qpos_id - 3, 0)\n end_sol_qpos_id = min(center_sol_qpos_id + 3,\n self._num_sol_qpos - 1)\n\n # get everything within the range of [start_sol_qpos_id,\n # end_sol_qpos_id], take the forward finite difference step\n forward_sol_qpos = np.array(\n sol_qpos[start_sol_qpos_id: end_sol_qpos_id + 1], copy=True\n )\n forward_sol_qpos[center_sol_qpos_id - start_sol_qpos_id,\n i_derivative % self._len_qpos] += epsilon\n\n forward_loss, forward_data_dict = \\\n self._loss_from_sol_qpos_camera_state(\n forward_sol_qpos, camera_state,\n center_sol_qpos_id=center_sol_qpos_id\n )\n\n center_physics_loss = center_data_dict['physics_loss'][\n start_sol_qpos_id * self._sol_qpos_freq:\n end_sol_qpos_id * self._sol_qpos_freq\n ]\n center_projection_loss = center_data_dict['projection_loss'][\n start_sol_qpos_id * self._sol_qpos_freq:\n end_sol_qpos_id * self._sol_qpos_freq + 1\n ]\n\n # make sure the ids are matched\n assert len(forward_data_dict['physics_loss']) == \\\n len(center_physics_loss) and \\\n len(forward_data_dict['projection_loss']) == \\\n len(center_projection_loss)\n\n difference_of_loss = forward_loss - \\\n np.mean(center_physics_loss) - \\\n np.mean(center_projection_loss)\n\n gradient[0, sol_id] = difference_of_loss\n\n for opt_var in ['xyz_pos', 'cam_view', 'fov', 'image_size']:\n if opt_var not in self._opt_var_list:\n continue\n logger.info('Calculating the gradient of {}'.format(opt_var))\n\n # TODO: for xyz_pos / fov / image_size, there is speed-up available\n for i_derivative in range(len(self._var_to_sol_id[opt_var])):\n\n sol_id = i_derivative + self._var_to_sol_id[opt_var][0]\n camera_state_id = sol_id - len(self._var_to_sol_id['qpos'])\n forward_camera_state = np.array(camera_state, copy=True)\n if opt_var == 'cam_view':\n # for quaternion, take care of the length invariance\n quat_id = self._var_to_sol_id['quaternion']\n raise NotImplementedError\n forward_camera_state[camera_state_id] += \\\n epsilon * np.linalg.norm(sol[quat_id])\n else:\n forward_camera_state[camera_state_id] += epsilon\n\n forward_loss, _ = self._loss_from_sol_qpos_camera_state(\n sol_qpos, forward_camera_state\n )\n gradient[0, sol_id] = forward_loss - center_loss\n\n if len(self._opt_var_list) == 0:\n raise ValueError('At least one of the var needs to be optimzied')\n logger.info('Gradient calculated')\n\n return gradient\n\n def _loss_function(self, sol, fetch_data_dict={}):\n \"\"\" @brief: the loss function to be used by the LBFGS optimizer\n\n @fetch_data_dict:\n We can fetch some intermediate variables (interpolated qpos /\n qvel / qacc)\n \"\"\"\n\n if self._camera_info['mode'] in ['static', 'trackcom']:\n # only the qposes\n sol_qpos = sol[self._var_to_sol_id['qpos']]\n sol_qpos = sol_qpos.reshape([-1, self._len_qpos])\n camera_state = sol[self._var_to_sol_id['camera_state']]\n total_loss, _fetch_data_dict = \\\n self._loss_from_sol_qpos_camera_state(sol_qpos, camera_state)\n\n else:\n raise NotImplementedError # TODO for free\n\n # gather the data that can be reused\n for key in fetch_data_dict:\n fetch_data_dict[key] = _fetch_data_dict[key]\n logger.info(\"Current loss: {}\".format(total_loss))\n logger.info(\"\\tphysics loss: {}\".format(\n np.mean(_fetch_data_dict['physics_loss']))\n )\n logger.info(\"\\tproject loss: {}\".format(\n np.mean(_fetch_data_dict['projection_loss']))\n )\n return total_loss\n\n def generate_pose_3d(self, sub_iter=0):\n data_dict = {'gt': {}, 'sol': {}}\n # the optimized qpos and matrix\n sol_qpos = np.reshape(self._sol[self._var_to_sol_id['qpos']],\n [-1, self._len_qpos])\n\n data_dict['sol']['camera_state'] = \\\n self._sol[self._var_to_sol_id['camera_state']]\n data_dict['sol']['mode'] = self._camera_info['mode']\n _, _, data_dict['sol']['qpos'] = interpolate_qvel_qacc(\n sol_qpos, self._len_qpos, self._sol_qpos_freq, self._frame_dt\n )\n\n # the groundtruth qpos and camera_matrix\n data_dict['gt']['qpos'] = \\\n self._target_data['qpos'][:self._imitation_length]\n data_dict['gt']['mode'] = self._gt_camera_info['mode']\n\n data_dict['gt']['camera_state'] = \\\n camera_state_from_info(self._gt_camera_info,\n consider_trackcom=False,\n use_quaternion=False)\n # visualize and save the results\n visualize_sol_pose(self._env_physics_engine, self._log_path,\n data_dict, self._env_name, self._iteration, sub_iter)\n\n def solve(self):\n self._iteration += 1\n assert self._camera_info['mode'] in ['static', 'trackcom']\n bounds = np.transpose(np.array(self._bounds, copy=True), [1, 0])\n\n \"\"\"\n optimized_results = optimize.minimize(\n self._loss_function, self._sol, method='L-BFGS-B',\n jac=self._get_fd_gradient, bounds=bounds,\n )\n \"\"\"\n self.generate_pose_3d() # visualize results\n for sub_iter in range(self._optimize_args['lbfgs_opt_iteration']):\n optimized_results = optimize.minimize(\n self._loss_function, self._sol, method='L-BFGS-B',\n bounds=bounds\n )\n self._sol = optimized_results['x']\n\n self.generate_pose_3d(sub_iter + 1) # visualize results\n\n return self._sol\n\n def _loss_from_sol_qpos_camera_state(self, sol_qpos, camera_state,\n center_sol_qpos_id=-1):\n\n # get the qvel and qpos\n qvel, qacc, qpos = interpolate_qvel_qacc(\n sol_qpos, self._len_qpos, self._sol_qpos_freq, self._frame_dt\n )\n\n # calculate the physics loss (action loss)\n qfrc_inverse = \\\n self._env_physics_engine.get_inverse_action(qpos, qvel, qacc)\n physics_loss = \\\n self._env_physics_engine.get_physics_loss(qfrc_inverse)\n\n # the projection loss\n pose_3d, center_of_mass = self._env_physics_engine.get_pose3d(\n qpos,\n get_center_of_mass=(self._camera_info['mode'] == 'trackcom')\n )\n matrix = self._env_physics_engine.camera_matrix_from_state(\n camera_state, center_of_mass\n )\n projected_pose = \\\n self._env_physics_engine.get_projected_2dpose(pose_3d, matrix)\n if center_sol_qpos_id < 0:\n target_2d_pose = self._target_2d_poses\n else:\n start_frame_id = (center_sol_qpos_id - 3) * self._sol_qpos_freq\n start_frame_id = max(start_frame_id, 0)\n target_2d_pose = self._target_2d_poses[\n start_frame_id: start_frame_id + len(projected_pose)\n ]\n projection_loss = np.square(projected_pose - target_2d_pose)\n var = np.var(target_2d_pose, axis=0)\n projection_loss /= (var[None, :, :] * 0.0 + 1.0) # NOTE: TODO\n # projection_loss /= var[None, :, :]\n\n # TODO: how to normalize the loss?\n total_loss = np.mean(projection_loss) + \\\n self._optimize_args['physics_loss_lambda'] * np.mean(physics_loss)\n fetch_data_dict = {\n 'qvel': qvel, 'qacc': qacc, 'qpos': qpos,\n 'qfrc_inverse': qfrc_inverse,\n 'pose_2d': projected_pose, 'pose_3d': pose_3d,\n 'physics_loss': physics_loss, 'projection_loss': projection_loss\n }\n\n return total_loss, fetch_data_dict\n\n\nclass env_physics_engine(object):\n\n def __init__(self, env_name):\n self._env_name = env_name\n\n self._env, self._env_info = env_register.make_env(self._env_name, 1234)\n self._joint_range = \\\n np.array(self._env._controller_info['range']) / 180.0 * np.pi\n self._len_qpos = self._env.get_qpos_size()\n self._control_info = self._env.get_controller_info()\n\n def get_pose3d(self, qpos, get_center_of_mass=False):\n # loop over each qpos candidates\n assert qpos.shape[1] == self._len_qpos\n\n num_data = qpos.shape[0]\n pos_shape = self._env.get_pos_size() # [num of pose, 3]\n poses_3d = np.zeros([num_data, pos_shape[0], pos_shape[1]])\n if get_center_of_mass:\n center_of_mass = np.zeros([num_data, 3])\n else:\n center_of_mass = None\n\n for i_data in range(num_data):\n\n # set the qpos\n with self._env._env.physics.reset_context():\n self._env._env.physics.data.qpos[:] = qpos[i_data]\n\n # get the poses3d\n poses_3d[i_data, :, :] = self._env.get_pos()\n\n if get_center_of_mass:\n center_of_mass[i_data, :] = np.array(\n self._env._env.physics.named.data.subtree_com['torso'],\n copy=True\n )\n\n return poses_3d, center_of_mass\n\n def get_inverse_action(self, qpos, qvel, qacc):\n num_data = qpos.shape[0]\n assert num_data == qvel.shape[0] and num_data == qacc.shape[0]\n # TODO: not necessary true\n assert qpos.shape[1] == qvel.shape[1] and qpos.shape[1] == qacc.shape[1]\n\n # qfrc_inverse = np.zeros([num_data, self._env_info['action_size']])\n qfrc_inverse = np.zeros([num_data - 1, self._control_info['dof']])\n\n for i_data in range(num_data - 1):\n\n # set the qpos TODO: TEST THIS FUNCTION\n inverse_output = self._env._env.physics.get_inverse_output(\n qpos[i_data], qvel[i_data], qacc[i_data]\n )\n\n # get the poses3d\n qfrc_inverse[i_data, :] = inverse_output\n\n return qfrc_inverse\n\n def get_physics_loss(self, qfrc_inverse):\n # loss = 0.0\n # the forces on the free joints, TODO: contact model?\n # loss += np.square(actions[:, self._control_info['unactuated_id']])\n # actions = qfrc_inverse[]\n\n # the forces applied on the joints\n action = np.abs(\n qfrc_inverse[:, self._control_info['actuated_id']] /\n self._control_info['gear'][None, :]\n )\n gear_violation = np.maximum(action - 1.0, 0.0)\n # gear_violation = -self._control_info['gear'][None, :] + \\\n # np.abs(actions[:, self._control_info['actuated_id']])\n losses = np.square(gear_violation)\n\n if len(losses) == 0:\n losses = np.array([0])\n\n return losses\n\n def get_center_of_mass(self, qpos):\n # loop over each qpos candidates\n assert qpos.shape[1] == self._len_qpos\n\n num_data = qpos.shape[0]\n pos_shape = self._env.get_pos_size() # [num of pose, 3]\n poses_3d = np.zeros([num_data, pos_shape[0], pos_shape[1]])\n\n for i_data in range(num_data):\n\n # set the qpos\n with self._env._env.physics.reset_context():\n self._env._env.physics.data.qpos[:] = qpos[i_data]\n\n # get the poses3d\n poses_3d[i_data, :, :] = self._env.get_pos()\n\n return poses_3d\n\n def camera_matrix_from_state(self, camera_state, center_of_mass=None):\n \"\"\" @brief: it is different from\n mbbl.util.il.camera_model.camera_matrix_from_state\n in that it operates on batched data\n \"\"\"\n if center_of_mass is None:\n num_data = 1\n camera_state = camera_state.reshape([1, -1])\n center_of_mass = 0.0 * camera_state[:3]\n else:\n num_data = center_of_mass.shape[0]\n camera_state = np.tile(camera_state.reshape([1, -1]), [num_data, 1])\n\n batched_camera_matrix = np.zeros([num_data, 4, 4])\n com_offset = np.zeros(camera_state.shape[1])\n for i_data in range(num_data):\n # for static camera, this term is 0\n com_offset[:3] = center_of_mass[i_data]\n # import pdb; pdb.set_trace()\n i_state = camera_state[i_data] + com_offset\n batched_camera_matrix[i_data] = \\\n camera_matrix_from_state(i_state, use_quaternion=False)\n\n return batched_camera_matrix\n\n def get_projected_2dpose(self, poses_3d, matrix):\n num_data = poses_3d.shape[0]\n assert matrix.shape[0] == num_data\n\n poses_2d = np.zeros([num_data, poses_3d.shape[1], 2])\n\n for i_data in range(num_data):\n i_matrix = matrix[i_data] if len(matrix) == num_data else matrix[0]\n poses_2d[i_data] = \\\n get_projected_2dpose(poses_3d[i_data], i_matrix)\n return poses_2d\n\n def get_bounds_and_init_sol(self, camera_mode, num_sol_qpos, len_qpos,\n camera_state_size):\n if camera_mode in ['static', 'trackcom']:\n qpos_sol_size = num_sol_qpos * len_qpos\n self._bounds = np.zeros([2, qpos_sol_size + camera_state_size])\n self._init_sol = np.zeros([qpos_sol_size + camera_state_size])\n self._env.reset()\n init_qpos = self._env._env.physics.data.qpos\n\n # the bounds and init value for the qpos\n self._bounds[:, :qpos_sol_size] = np.tile(\n np.transpose(self._joint_range, [1, 0]), [1, num_sol_qpos]\n )\n self._init_sol[:qpos_sol_size] = np.tile(init_qpos, [num_sol_qpos])\n\n # the bounds for camera_state\n assert camera_state_size == 7\n self._bounds[:, qpos_sol_size:] = np.transpose(\n np.array([[-5, 5], [-5, 5], [-5, 5], # xyz_pos\n [-31.4, 31.4], [-31.4, 31.4], # is it ok?\n # fov and image_size\n [30, 60], [300, 500]])\n )\n self._init_sol[qpos_sol_size:] = \\\n np.array([0, -2, 0, 1.57, 1.57, 45, 400])\n else:\n raise NotImplementedError\n\n return self._bounds, np.array(self._init_sol, copy=True)\n", "import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as functional\nfrom .noisy_linear import NoisyLinear\n\n\nclass Enet(nn.Module):\n def __init__(self) -> None:\n super(Enet, self).__init__()\n return\n\n def get_max_action(self, observation: torch.Tensor) -> int:\n \"\"\"\n Get the action with the maximum q-value for an observation.\n Args:\n observation(torch.Tensor): an observation\n Returns:\n int: action with the maximum q-value for the current state\n \"\"\"\n qvals = self.forward(observation)\n return int(torch.argmax(qvals, dim=-1).cpu().detach().numpy())\n\n\nclass Epn(Enet):\n def __init__(self, states_size: np.ndarray, action_size: np.ndarray, settings: dict) -> None:\n \"\"\"\n Initializes the neural network.\n Args:\n states_size: Size of the input space.\n action_size:Size of the action space.\n settings: dictionary with settings\n \"\"\"\n super(Epn, self).__init__()\n self.batch_size = settings[\"batch_size\"]\n self.noisy_net = settings['noisy_nets']\n layers_size = settings[\"layers_sizes\"][0] * 2\n if not self.noisy_net:\n self.FC1 = nn.Linear(int(states_size), layers_size)\n self.FC2 = nn.Linear(layers_size, layers_size)\n self.FC3 = nn.Linear(layers_size, int(action_size))\n else:\n self.FC1 = NoisyLinear(int(states_size), layers_size )\n self.FC2 = NoisyLinear(layers_size, layers_size)\n self.FC3 = NoisyLinear(layers_size, int(action_size))\n self.reset()\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Forward step of the neural network\n Args:\n x(torch.Tensor): observation or a batch of observations\n\n Returns:\n torch.Tensor: q-values for all observations and actions, size: batch_size x actions_size\n \"\"\"\n x = functional.relu(self.FC1(x))\n x = functional.relu(self.FC2(x))\n return functional.softplus(self.FC3(x))\n\n def reset(self) -> None:\n \"\"\"\n Resets the weights of the neural network layers.\n Returns:\n None\n \"\"\"\n torch.nn.init.xavier_uniform_(self.FC1.weight.data)\n torch.nn.init.xavier_uniform_(self.FC2.weight.data)\n torch.nn.init.xavier_uniform_(self.FC3.weight.data)\n if self.noisy_net:\n self.reset_noise()\n\n def reset_noise(self) -> None:\n \"\"\"\n Resets the noise of the noisy layers.\n \"\"\"\n self.FC1.reset_noise()\n self.FC2.reset_noise()\n self.FC3.reset_noise()\n", "# python3\n# pylint: disable=g-bad-file-header\n# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for bsuite.experiments.bandit_scale.\"\"\"\n\nfrom absl.testing import absltest\nfrom bsuite.experiments.bandit_scale import bandit_scale\nfrom dm_env import test_utils\nimport numpy as np\n\n\nclass InterfaceTest(test_utils.EnvironmentTestMixin, absltest.TestCase):\n\n def make_object_under_test(self):\n return bandit_scale.load(10, 42, 42)\n\n def make_action_sequence(self):\n valid_actions = range(11)\n rng = np.random.RandomState(42)\n\n for _ in range(100):\n yield rng.choice(valid_actions)\n\nif __name__ == '__main__':\n absltest.main()\n" ]
[ [ "numpy.sign", "numpy.array", "numpy.abs", "numpy.clip" ], [ "numpy.square", "numpy.array", "numpy.linalg.norm", "numpy.reshape", "numpy.zeros", "numpy.tile", "numpy.mean", "numpy.arange", "numpy.transpose", "numpy.abs", "scipy.optimize.minimize", "numpy.var", "numpy.maximum" ], [ "torch.nn.Linear", "torch.nn.init.xavier_uniform_", "torch.argmax" ], [ "numpy.random.RandomState" ] ]
gioelelm/scanpy
[ "97391a0e7908b9644b2d6640c8e26d37bdc7811e" ]
[ "scanpy/tools/rank_genes_groups.py" ]
[ "# Author: F. Alex Wolf (http://falexwolf.de)\n\"\"\"Differential Gene Expression Analysis\n\nThis is a Beta Version of a tool for differential gene expression testing\nbetween sets detected in previous tools. Tools such as dpt, cluster,...\n\"\"\"\n\nimport numpy as np\nfrom scipy.sparse import issparse\nfrom .. import utils\nfrom .. import logging as logg\nfrom ..preprocessing import simple\n\ndef rank_genes_groups(\n adata,\n groupby,\n groups='all',\n group_reference=None,\n n_genes=100,\n compute_distribution=False,\n only_positive=True,\n copy=False):\n \"\"\"Rank genes according to differential expression [Wolf17]_.\n\n Rank genes by differential expression. By default, a t-test-like ranking is\n used, in which means are normalized with variances. Soon, a Wilcoxon-rank\n test and other alternatives will be provided.\n\n Parameters\n ----------\n adata : `AnnData`\n Annotated data matrix.\n groupby : `str`\n The key of the sample grouping to consider.\n groups : `str`, `list`, optional (default: `'all'`)\n Subset of groups, e.g. `['g1', 'g2', 'g3']`, to which comparison shall\n be restricted. If not passed, a ranking will be generated for all\n groups.\n group_reference : `str` or `None`, optional (default: `None`)\n If `None`, compare each group to the union of the rest of the group. If\n a group identifier, the comparison will be with respect to this group.\n n_genes : `int` (default: 100)\n How many genes to rank by default.\n compute_distribution : `bool`\n If `True`, also computes the distribution for top-ranked genes, which\n can be visualized using `sc.pl.rank_genes_groups_violin(adata)`.\n\n Returns\n -------\n rank_genes_groups_gene_zscores : np.ndarray of dtype float (adata.add)\n Array of shape (number of comparisons) × (number of genes) storing the\n zscore of the each gene for each test.\n rank_genes_groups_gene_names : np.ndarray of dtype str (adata.add)\n Array of shape (number of comparisons). Stores the labels for each comparison,\n for example \"C1 vs. C2\" when comparing category 'C1' with 'C2'.\n \"\"\"\n logg.info('find differentially expressed genes', r=True)\n adata = adata.copy() if copy else adata\n n_genes_user = n_genes\n utils.check_adata(adata)\n # for clarity, rename variable\n groups_order = groups\n if isinstance(groups_order, list) and isinstance(groups_order[0], int):\n groups_order = [str(n) for n in groups_order]\n if group_reference is not None and group_reference not in set(groups_order):\n groups_order += [group_reference]\n if (group_reference is not None\n and group_reference not in set(adata.add[groupby + '_order'])):\n raise ValueError('group_reference = {} needs to be one of groupby = {}.'\n .format(group_reference, groupby))\n groups_order, groups_masks = utils.select_groups(\n adata, groups_order, groupby)\n adata.add['rank_genes_groups'] = groupby\n adata.add['rank_genes_groups_order'] = groups_order\n X = adata.X\n\n # loop over all masks and compute means, variances and sample numbers\n n_groups = groups_masks.shape[0]\n n_genes = X.shape[1]\n means = np.zeros((n_groups, n_genes))\n vars = np.zeros((n_groups, n_genes))\n ns = np.zeros(n_groups, dtype=int)\n for imask, mask in enumerate(groups_masks):\n means[imask], vars[imask] = simple._get_mean_var(X[mask])\n ns[imask] = np.where(mask)[0].size\n logg.info('... consider \"{}\":'.format(groupby), groups_order,\n 'with sample numbers', ns)\n\n if group_reference is not None:\n ireference = np.where(groups_order == group_reference)[0][0]\n \n # test each either against the union of all other groups\n # or against a specific group\n rankings_gene_zscores = []\n rankings_gene_names = []\n reference_indices = np.arange(adata.n_vars, dtype=int)\n for igroup in range(n_groups):\n if group_reference is None:\n mask_rest = ~groups_masks[igroup]\n else:\n if igroup == ireference: continue\n else: mask_rest = groups_masks[ireference]\n mean_rest, var_rest = simple._get_mean_var(X[mask_rest])\n # Make a more conservative assumption on the variance reduction\n # in the reference. Instead of this\n ns_rest = np.where(mask_rest)[0].size\n # use this\n # ns_rest = ns[igroup]\n denominator = np.sqrt(vars[igroup]/ns[igroup] + var_rest/ns_rest)\n denominator[np.flatnonzero(denominator == 0)] = np.nan\n zscores = (means[igroup] - mean_rest) / denominator\n zscores[np.isnan(zscores)] = 0\n zscores = zscores if only_positive else np.abs(zscores)\n partition = np.argpartition(zscores, -n_genes_user)[-n_genes_user:]\n partial_indices = np.argsort(zscores[partition])[::-1]\n global_indices = reference_indices[partition][partial_indices]\n rankings_gene_zscores.append(zscores[global_indices])\n rankings_gene_names.append(adata.var_names[global_indices])\n if compute_distribution:\n mask = groups_masks[igroup]\n for gene_counter in range(n_genes_user):\n gene_idx = global_indices[gene_counter]\n X_col = X[mask, gene_idx]\n if issparse(X): X_col = X_col.toarray()[:, 0]\n identifier = _build_identifier(groupby, groups_order[igroup],\n gene_counter, adata.var_names[gene_idx])\n full_col = np.empty(adata.n_smps)\n full_col[:] = np.nan\n full_col[mask] = (X_col - mean_rest[gene_idx])/denominator[gene_idx]\n adata.smp[identifier] = full_col\n\n groups_order_save = groups_order\n if group_reference is not None:\n groups_order_save = [g for g in groups_order if g != group_reference]\n \n adata.add['rank_genes_groups_gene_scores'] = np.rec.fromarrays(\n [n for n in rankings_gene_zscores],\n dtype=[(rn, 'float32') for rn in groups_order_save])\n adata.add['rank_genes_groups_gene_names'] = np.rec.fromarrays(\n [n for n in rankings_gene_names],\n dtype=[(rn, 'U50') for rn in groups_order_save])\n logg.m(' finished', t=True, end=' ')\n logg.m('and added\\n'\n ' \"rank_genes_groups_gene_names\", np.recarray to be indexed by the `groups` (adata.add)\\n'\n ' \"rank_genes_groups_gene_zscores\", the scores (adata.add)\\n'\n ' \"rank_genes_...\", distributions of top-ranked genes (adata.smp)')\n return adata if copy else None\n\n\ndef _build_identifier(groupby, name, gene_counter, gene_name):\n return 'rank_genes_{}_{}_{}_{}'.format(\n groupby, name, gene_counter, gene_name)\n" ]
[ [ "scipy.sparse.issparse", "numpy.isnan", "numpy.empty", "numpy.zeros", "numpy.where", "numpy.arange", "numpy.abs", "numpy.sqrt", "numpy.argpartition", "numpy.argsort", "numpy.rec.fromarrays", "numpy.flatnonzero" ] ]
HealthML/FaST-LMM
[ "3c502ce1c693a934b5f2ff7b63a9577e892cb716", "3c502ce1c693a934b5f2ff7b63a9577e892cb716" ]
[ "fastlmm/inference/linear_regression.py", "fastlmm/association/LeaveOneChromosomeOut.py" ]
[ "import numpy as np\r\nimport logging\r\nimport unittest\r\nimport os\r\nimport scipy.linalg as LA\r\nimport time\r\nfrom sklearn.utils import safe_sqr, check_array\r\nfrom scipy import stats\r\n\r\nfrom pysnptools.snpreader import Bed,Pheno\r\nfrom pysnptools.snpreader import SnpData,SnpReader\r\nfrom pysnptools.kernelreader import KernelNpz\r\nfrom pysnptools.kernelreader import SnpKernel\r\nfrom pysnptools.kernelreader import KernelReader\r\nfrom pysnptools.kernelreader import Identity as KernelIdentity\r\nimport pysnptools.util as pstutil\r\nfrom pysnptools.standardizer import DiagKtoN,UnitTrained\r\nfrom pysnptools.standardizer import Unit\r\nfrom pysnptools.util import intersect_apply\r\nfrom pysnptools.standardizer import Standardizer\r\nfrom fastlmm.inference.lmm import LMM\r\nfrom fastlmm.inference.fastlmm_predictor import _pheno_fixup\r\nfrom fastlmm.inference import FastLMM\r\nfrom pysnptools.standardizer import Identity as StandardizerIdentity\r\nfrom scipy.stats import multivariate_normal\r\nfrom fastlmm.util.pickle_io import load, save\r\n\r\n# make FastLmm use this when there are no SNPs or K is Identity?\r\nclass LinearRegression(object):\r\n '''\r\n A linear regression predictor, that works like the FastLMM in fastlmm_predictor.py, but that expects all similarity matrices to be identity. \r\n\r\n **Constructor:**\r\n :Parameters: * **covariate_standardizer** (:class:`Standardizer`) -- The PySnpTools standardizer to be apply to X, the covariate data. Some choices include :class:`Standardizer.Unit` (Default. Fills missing with zero) and :class:`Standardizer.Identity` (do nothing)\r\n\r\n :Example:\r\n\r\n >>> import numpy as np\r\n >>> import logging\r\n >>> from pysnptools.snpreader import Pheno\r\n >>> from fastlmm.inference import LinearRegression\r\n >>> logging.basicConfig(level=logging.INFO)\r\n >>> cov = Pheno(\"../feature_selection/examples/toydata.cov\")\r\n >>> pheno_fn = \"../feature_selection/examples/toydata.phe\"\r\n >>> train_idx = np.r_[10:cov.iid_count] # iids 10 and on\r\n >>> test_idx = np.r_[0:10] # the first 10 iids\r\n >>> linreg = LinearRegression()\r\n >>> #We give it phenotype information for extra examples, but it reorders and intersects the examples, so only training examples are used. \r\n >>> _ = linreg.fit(X=cov[train_idx,:],y=pheno_fn) \r\n >>> mean, covariance = linreg.predict(X=cov[test_idx,:])\r\n >>> print mean.iid[0], round(mean.val[0],7), round(covariance.val[0,0],7)\r\n ['per0' 'per0'] 0.1518764 0.9043703\r\n >>> nll = linreg.score(X=cov[test_idx,:],y=pheno_fn)\r\n >>> print round(nll,7)\r\n 13.6688448\r\n\r\n\r\n '''\r\n def __init__(self,covariate_standardizer=Unit()):\r\n self.covariate_standardizer = covariate_standardizer\r\n self.is_fitted = False\r\n\r\n def fit(self, X=None, y=None, K0_train=None, K1_train=None, h2=None, mixing=None,count_A1=None):\r\n \"\"\"\r\n Method for training a :class:`FastLMM` predictor. If the examples in X, y, K0_train, K1_train are not the same, they will be reordered and intersected.\r\n\r\n :param X: training covariate information, optional: \r\n If you give a string, it should be the file name of a PLINK phenotype-formatted file.\r\n :type X: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ (such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string.\r\n\r\n :param y: training phenotype:\r\n If you give a string, it should be the file name of a PLINK phenotype-formatted file.\r\n :type y: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ (such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string.\r\n\r\n :param K0_train: Must be None. Represents the identity similarity matrix.\r\n :type K0_train: None\r\n\r\n :param K1_train: Must be None. Represents the identity similarity matrix.\r\n :type K1_train: `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string or `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__\r\n\r\n :param h2: Ignored. Optional.\r\n :type h2: number\r\n\r\n :param mixing: Ignored. Optional.\r\n :type mixing: number\r\n\r\n :param count_A1: If it needs to read SNP data from a BED-formatted file, tells if it should count the number of A1\r\n alleles (the PLINK standard) or the number of A2 alleles. False is the current default, but in the future the default will change to True.\r\n :type count_A1: bool\r\n\r\n\r\n :rtype: self, the fitted Linear Regression predictor\r\n \"\"\"\r\n self.is_fitted = True\r\n assert K0_train is None # could also accept that ID or no snps\r\n assert K1_train is None # could also accept that ID or no snps\r\n\r\n assert y is not None, \"y must be given\"\r\n\r\n y = _pheno_fixup(y,count_A1=count_A1)\r\n assert y.sid_count == 1, \"Expect y to be just one variable\"\r\n X = _pheno_fixup(X, iid_if_none=y.iid,count_A1=count_A1)\r\n\r\n X, y = intersect_apply([X, y])\r\n y = y.read()\r\n X, covar_unit_trained = X.read().standardize(self.covariate_standardizer,return_trained=True)\r\n\r\n # add a column of 1's to cov to increase DOF of model (and accuracy) by allowing a constant offset\r\n X = SnpData(iid=X.iid,\r\n sid=FastLMM._new_snp_name(X),\r\n val=np.c_[X.val,np.ones((X.iid_count,1))])\r\n\r\n\r\n lsqSol = np.linalg.lstsq(X.val, y.val[:,0],rcond=-1)\r\n bs=lsqSol[0] #weights\r\n r2=lsqSol[1] #squared residuals\r\n D=lsqSol[2] #rank of design matrix\r\n N=y.iid_count\r\n\r\n self.beta = bs\r\n self.ssres = float(r2)\r\n self.sstot = ((y.val-y.val.mean())**2).sum()\r\n self.covar_unit_trained = covar_unit_trained\r\n self.iid_count = X.iid_count\r\n self.covar_sid = X.sid\r\n self.pheno_sid = y.sid\r\n return self\r\n \r\n\r\n def predict(self,X=None,K0_whole_test=None,K1_whole_test=None,iid_if_none=None,count_A1=None):\r\n \"\"\"\r\n Method for predicting from a fitted :class:`FastLMM` predictor.\r\n If the examples in X, K0_whole_test, K1_whole_test are not the same, they will be reordered and intersected.\r\n\r\n :param X: testing covariate information, optional: \r\n If you give a string, it should be the file name of a PLINK phenotype-formatted file.\r\n :type X: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ (such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string.\r\n\r\n :param K0_whole_test: Must be None. Represents the identity similarity matrix.\r\n :type K0_whole_test: None\r\n\r\n :param K1_whole_test: Must be None. Represents the identity similarity matrix.\r\n :type K1_whole_test: `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string or `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__\r\n\r\n :param iid_if_none: Examples to predict for if no X, K0_whole_test, K1_whole_test is provided.\r\n :type iid_if_none: an ndarray of two strings\r\n\r\n :param count_A1: If it needs to read SNP data from a BED-formatted file, tells if it should count the number of A1\r\n alleles (the PLINK standard) or the number of A2 alleles. False is the current default, but in the future the default will change to True.\r\n :type count_A1: bool\r\n\r\n :rtype: A `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__ of the means and a :class:`KernelData` of the covariance\r\n \"\"\"\r\n\r\n assert self.is_fitted, \"Can only predict after predictor has been fitted\"\r\n assert K0_whole_test is None or isinstance(K0_whole_test,KernelIdentity) # could also accept no snps\r\n assert K1_whole_test is None or isinstance(K1_whole_test,KernelIdentity) # could also accept no snps\r\n\r\n X = _pheno_fixup(X,iid_if_none=iid_if_none,count_A1=count_A1)\r\n X = X.read().standardize(self.covar_unit_trained)\r\n\r\n # add a column of 1's to cov to increase DOF of model (and accuracy) by allowing a constant offset\r\n X = SnpData(iid=X.iid,\r\n sid=FastLMM._new_snp_name(X),\r\n val=np.c_[X.read().val,np.ones((X.iid_count,1))])\r\n assert np.array_equal(X.sid,self.covar_sid), \"Expect covar sids to be the same in train and test.\"\r\n\r\n pheno_predicted = X.val.dot(self.beta).reshape(-1,1)\r\n ret0 = SnpData(iid = X.iid, sid=self.pheno_sid,val=pheno_predicted,pos=np.array([[np.nan,np.nan,np.nan]]),name=\"linear regression Prediction\") #!!!replace 'parent_string' with 'name'\r\n\r\n from pysnptools.kernelreader import KernelData\r\n ret1 = KernelData(iid=X.iid,val=np.eye(X.iid_count)* self.ssres / self.iid_count)\r\n return ret0, ret1\r\n\r\n def score(self, X=None, y=None, K0_whole_test=None, K1_whole_test=None, iid_if_none=None, return_mse_too=False, count_A1=None):\r\n \"\"\"\r\n Method for calculating the negative log likelihood of testing examples.\r\n If the examples in X,y, K0_whole_test, K1_whole_test are not the same, they will be reordered and intersected.\r\n\r\n :param X: testing covariate information, optional: \r\n If you give a string, it should be the file name of a PLINK phenotype-formatted file.\r\n :type X: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ (such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string.\r\n\r\n :param y: testing phenotype:\r\n If you give a string, it should be the file name of a PLINK phenotype-formatted file.\r\n :type y: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ (such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string.\r\n\r\n :param K0_whole_test: Must be None. Represents the identity similarity matrix.\r\n :type K0_whole_test: None\r\n\r\n :param K1_whole_test: Must be None. Represents the identity similarity matrix.\r\n :type K1_whole_test: `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string or `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__\r\n\r\n :param iid_if_none: Examples to predict for if no X, K0_whole_test, K1_whole_test is provided.\r\n :type iid_if_none: an ndarray of two strings\r\n\r\n :param return_mse_too: If true, will also return the mean squared error.\r\n :type return_mse_too: bool\r\n\r\n :param count_A1: If it needs to read SNP data from a BED-formatted file, tells if it should count the number of A1\r\n alleles (the PLINK standard) or the number of A2 alleles. False is the current default, but in the future the default will change to True.\r\n :type count_A1: bool\r\n\r\n :rtype: a float of the negative log likelihood and, optionally, a float of the mean squared error.\r\n \"\"\"\r\n mean0, covar0 = self.predict(K0_whole_test=K0_whole_test,K1_whole_test=K1_whole_test,X=X,iid_if_none=iid_if_none,count_A1=count_A1)\r\n y = _pheno_fixup(y, iid_if_none=covar0.iid,count_A1=count_A1)\r\n mean, covar, y = intersect_apply([mean0, covar0, y])\r\n var = multivariate_normal(mean=mean.read(order='A',view_ok=True).val.reshape(-1), cov=covar.read(order='A',view_ok=True).val)\r\n y_actual = y.read().val\r\n nll = -np.log(var.pdf(y_actual.reshape(-1)))\r\n if not return_mse_too:\r\n return nll\r\n else:\r\n mse = ((y_actual-mean)**2).sum()\r\n return nll, mse\r\n\r\n\r\n\r\n\r\n\r\n\"\"\"\r\nCreated on 2013-08-02\r\n@author: Christian Widmer <chris@shogun-toolbox.org>\r\n@summary: Module for univariate feature selection in the presence of covariates\r\n\r\n\r\nMotivated by sklearn's linear regression method for feature\r\nselection, we've come up with an extended version that takes\r\ncare of covariates\r\n\r\nbased on sklearn code (f_regression):\r\nhttps://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/univariate_selection.py\r\n\r\n\"\"\"\r\n\r\n\r\n\r\n\r\n#def get_example_data():\r\n# \"\"\"\r\n# load plink files\r\n# \"\"\"\r\n\r\n# import fastlmm.pyplink.plink as plink\r\n# import pysnptools.snpreader.bed as Bed\r\n# import fastlmm.util.util as util\r\n\r\n\r\n# ipheno = 0\r\n# foldIter = 0\r\n\r\n\r\n# \"\"\"\r\n# import dataset\r\n# dat = dataset.importDataset(\"pheno4\")\r\n\r\n# fn_bed = dat[\"bedFile\"]\r\n# fn_pheno = dat[\"phenoFile\"]\r\n# \"\"\"\r\n\r\n# fn_bed = \"../featureSelection/examples/toydata\"\r\n# fn_pheno = \"../feature_selection/examples/toydata.phe\"\r\n\r\n# import pysnptools.util.pheno as pstpheno\r\n# pheno = pstpheno.loadPhen(fn_pheno)\r\n\r\n# # load data\r\n# bed = plink.Bed(fn_bed)\r\n\r\n# indarr = util.intersect_ids([pheno['iid'],bed.iid])\r\n\r\n# pheno['iid'] = pheno['iid'][indarr[:,0]]\r\n# pheno['vals'] = pheno['vals'][indarr[:,0]]\r\n# bed = bed[indarr[:,1],:]\r\n\r\n# N = pheno['vals'].shape[0]\r\n# y = pheno['vals'][:,ipheno]\r\n# iid = pheno['iid']\r\n\r\n# snps = bed.read().standardize()\r\n\r\n# return snps, y\r\n\r\n\r\ndef f_regression_block(fun,X,y,blocksize=None,**args):\r\n \"\"\"\r\n runs f_regression for each block separately (saves memory).\r\n\r\n -------------------------\r\n fun : method that returns statistics,pval\r\n X : {array-like, sparse matrix} shape = (n_samples, n_features)\r\n The set of regressors that will tested sequentially.\r\n y : array of shape(n_samples).\r\n The data matrix\r\n blocksize : number of SNPs per block\r\n \"\"\"\r\n if blocksize==None:\r\n return fun(X,y,**args)\r\n\r\n idx_start = 0\r\n idx_stop = int(blocksize)\r\n\r\n pval = np.zeros(X.shape[1])\r\n stats = np.zeros(X.shape[1])\r\n\r\n while idx_start<X.shape[1]:\r\n stats[idx_start:idx_stop], pval[idx_start:idx_stop] = fun(X[:,idx_start:idx_stop],y,**args)\r\n\r\n idx_start = idx_stop\r\n idx_stop += blocksize\r\n if idx_stop>X.shape[1]:\r\n idx_stop = X.shape[1]\r\n\r\n return stats,pval\r\n\r\n\r\ndef f_regression_cov_alt(X, y, C):\r\n \"\"\"\r\n Implementation as derived in tex document\r\n\r\n See pg 12 of following document for definition of F-statistic\r\n http://www-stat.stanford.edu/~jtaylo/courses/stats191/notes/simple_diagnostics.pdf\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix} shape = (n_samples, n_features)\r\n The set of regressors that will tested sequentially.\r\n\r\n y : array of shape(n_samples).\r\n The data matrix\r\n\r\n c : {array-like, sparse matrix} shape = (n_samples, n_covariates)\r\n The set of covariates.\r\n\r\n\r\n Returns\r\n -------\r\n F : array, shape=(n_features,)\r\n F values of features.\r\n\r\n pval : array, shape=(n_features,)\r\n p-values of F-scores.\r\n \"\"\"\r\n # make sure we don't overwrite input data\r\n old_flag_X = X.flags.writeable\r\n old_flag_C = C.flags.writeable\r\n old_flag_y = y.flags.writeable\r\n X.flags.writeable = False\r\n C.flags.writeable = False\r\n y.flags.writeable = False\r\n\r\n\r\n #X, C, y = check_array(X, C, y, dtype=np.float)\r\n y = y.ravel()\r\n\r\n # make copy of input data\r\n X = X.copy(order=\"F\")\r\n y = y.copy()\r\n\r\n assert C.shape[1] < C.shape[0]\r\n cpinv = np.linalg.pinv(C)\r\n X -= np.dot(C,(np.dot(cpinv, X))) #most expensive line (runtime)\r\n y -= np.dot(C,(np.dot(cpinv, y)))\r\n\r\n yS = safe_sqr(y.T.dot(X)) # will create a copy\r\n\r\n # Note: (X*X).sum(0) = X.T.dot(X).diagonal(), computed efficiently\r\n # see e.g.: http://stackoverflow.com/questions/14758283/is-there-a-numpy-scipy-dot-product-calculating-only-the-diagonal-entries-of-the\r\n # TODO: make this smarter using either stride tricks or cython\r\n X *= X\r\n denom = X.sum(0) * y.T.dot(y) - yS\r\n F = yS / denom\r\n\r\n # degrees of freedom\r\n dof = (X.shape[0] - 1 - C.shape[1]) / (1) #(df_fm / (df_rm - df_fm))\r\n F *= dof\r\n\r\n # convert to p-values\r\n pv = stats.f.sf(F, 1, dof)\r\n\r\n # restore old state\r\n X.flags.writeable = old_flag_X\r\n C.flags.writeable = old_flag_C\r\n y.flags.writeable = old_flag_y\r\n\r\n return F, pv\r\n\r\n\r\ndef f_regression_cov(X, y, C):\r\n \"\"\"Univariate linear regression tests\r\n\r\n Quick linear model for testing the effect of a single regressor,\r\n sequentially for many regressors.\r\n\r\n This is done in 3 steps:\r\n 1. the regressor of interest and the data are orthogonalized\r\n wrt constant regressors\r\n 2. the cross correlation between data and regressors is computed\r\n 3. it is converted to an F score then to a p-value\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix} shape = (n_samples, n_features)\r\n The set of regressors that will tested sequentially.\r\n\r\n y : array of shape(n_samples).\r\n The data matrix\r\n\r\n c : {array-like, sparse matrix} shape = (n_samples, n_covariates)\r\n The set of covariates.\r\n\r\n\r\n Returns\r\n -------\r\n F : array, shape=(n_features,)\r\n F values of features.\r\n\r\n pval : array, shape=(n_features,)\r\n p-values of F-scores.\r\n \"\"\"\r\n\r\n X = check_array(X, dtype=np.float)\r\n C = check_array(C, dtype=np.float)\r\n y = check_array(y, dtype=np.float) \r\n y = y.ravel()\r\n\r\n assert C.shape[1] < C.shape[0]\r\n cpinv = np.linalg.pinv(C)\r\n X -= np.dot(C,(np.dot(cpinv, X)))\r\n y -= np.dot(C,(np.dot(cpinv, y)))\r\n\r\n # compute the correlation\r\n corr = np.dot(y, X)\r\n corr /= np.asarray(np.sqrt(safe_sqr(X).sum(axis=0))).ravel()\r\n corr /= np.asarray(np.sqrt(safe_sqr(y).sum())).ravel()\r\n\r\n # convert to p-value\r\n dof = (X.shape[0] - 1 - C.shape[1]) / (1) #(df_fm / (df_rm - df_fm))\r\n F = corr ** 2 / (1 - corr ** 2) * dof\r\n pv = stats.f.sf(F, 1, dof)\r\n return F, pv\r\n\r\n\r\ndef test_bias():\r\n \"\"\"\r\n make sure we get the same result for setting C=unitvec\r\n \"\"\"\r\n\r\n S, y = get_example_data()\r\n C = np.ones((len(y),1))\r\n\r\n from sklearn.feature_selection import f_regression\r\n\r\n F1, pval1 = f_regression(S, y, center=True)\r\n F2, pval2 = f_regression_cov(S, C, y)\r\n F3, pval3 = f_regression_cov_alt(S, C, y)\r\n\r\n # make sure values are the same\r\n np.testing.assert_array_almost_equal(F1, F2)\r\n np.testing.assert_array_almost_equal(F2, F3)\r\n np.testing.assert_array_almost_equal(pval1, pval2)\r\n np.testing.assert_array_almost_equal(pval2, pval3)\r\n\r\n\r\ndef test_cov():\r\n \"\"\"\r\n compare different implementations, make sure results are the same\r\n \"\"\"\r\n\r\n S, y = get_example_data()\r\n C = S[:,0:10]\r\n S = S[:,10:]\r\n\r\n F1, pval1 = f_regression_cov(S, C, y)\r\n F2, pval2 = f_regression_cov_alt(S, C, y)\r\n\r\n np.testing.assert_array_almost_equal(F1, F2)\r\n np.testing.assert_array_almost_equal(pval1, pval2)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n logging.basicConfig(level=logging.INFO)\r\n\r\n import doctest\r\n doctest.testmod()\r\n #test_cov()\r\n #test_bias()\r\n", "#!/usr/bin/env python2.7\n#\n# Written (W) 2014 Christian Widmer\n# Copyright (C) 2014 Microsoft Research\n\n\"\"\"\nCreated on 2014-03-11\n@author: Christian Widmer\n@summary: Module for performing GWAS\n\"\"\"\n\nimport logging\n\nimport numpy as np\nimport scipy as sp\nimport pandas as pd\nfrom scipy import stats\nimport pylab\n\nimport time\n\nimport fastlmm.inference as fastlmm\n\nimport fastlmm.util.util as util \nfrom fastlmm.pyplink.snpreader.Bed import Bed\nfrom fastlmm.util.pickle_io import load, save\nfrom fastlmm.util.util import argintersect_left\n\n\nclass LeaveOneChromosomeOut(object):\n \"\"\"LeaveOneChromosomeOut cross validation iterator (based on sklearn).\n\n Provides train/test indices to split data in train test sets. Split\n dataset into k consecutive folds according to which chromosome they belong to.\n\n Each fold is then used a validation set once while the k - 1 remaining\n fold form the training set.\n\n Parameters\n ----------\n chr : list\n List of chromosome identifiers\n\n indices : boolean, optional (default True)\n Return train/test split as arrays of indices, rather than a boolean\n mask array. Integer indices are required when dealing with sparse\n matrices, since those cannot be indexed by boolean masks.\n\n random_state : int or RandomState\n Pseudo number generator state used for random sampling.\n\n \"\"\"\n\n def __init__(self, chr_names, indices=True, random_state=None):\n\n #random_state = check_random_state(random_state)\n\n self.chr_names = np.array(chr_names)\n self.unique_chr_names = list(set(chr_names))\n self.unique_chr_names.sort()\n\n assert len(self.unique_chr_names) > 1\n self.n = len(self.chr_names)\n self.n_folds = len(self.unique_chr_names)\n self.indices = indices\n self.idxs = np.arange(self.n)\n\n\n def __iter__(self):\n if self.indices:\n ind = np.arange(self.n)\n \n for chr_name in self.unique_chr_names:\n \n test_index = self.chr_names == chr_name\n train_index = np.logical_not(test_index)\n \n if self.indices:\n train_index = ind[train_index]\n test_index = ind[test_index]\n \n yield train_index, test_index\n\n def __repr__(self):\n return '%s.%s(n=%i, n_folds=%i)' % (\n self.__class__.__module__,\n self.__class__.__name__,\n self.n,\n self.n_folds,\n )\n\n def __len__(self):\n return self.n_folds\n" ]
[ [ "numpy.array", "numpy.dot", "numpy.array_equal", "numpy.zeros", "numpy.linalg.pinv", "numpy.ones", "numpy.eye", "numpy.testing.assert_array_almost_equal", "numpy.linalg.lstsq", "sklearn.feature_selection.f_regression", "sklearn.utils.safe_sqr", "sklearn.utils.check_array" ], [ "numpy.logical_not", "numpy.array", "numpy.arange" ] ]
wofeicaoge/manim
[ "50b2a6ff31e3c467d086145bcb3684b75c24137d" ]
[ "from_3b1b/old/eoc/chapter8.py" ]
[ "import scipy\nfrom manimlib.imports import *\nfrom from_3b1b.old.eoc.chapter1 import Thumbnail as Chapter1Thumbnail\nfrom from_3b1b.old.eoc.chapter2 import Car, MoveCar, ShowSpeedometer, \\\n IncrementNumber, GraphCarTrajectory, SecantLineToTangentLine, \\\n VELOCITY_COLOR, TIME_COLOR, DISTANCE_COLOR\n\ndef v_rate_func(t):\n return 4*t - 4*(t**2)\n\ndef s_rate_func(t):\n return 3*(t**2) - 2*(t**3)\n\ndef v_func(t):\n return t*(8-t)\n\ndef s_func(t):\n return 4*t**2 - (t**3)/3.\n\n\nclass Chapter8OpeningQuote(OpeningQuote, PiCreatureScene):\n CONFIG = {\n \"quote\" : [\n \" One should never try to prove anything that \\\\\\\\ is not \",\n \"almost obvious\", \". \"\n ],\n \"quote_arg_separator\" : \"\",\n \"highlighted_quote_terms\" : {\n \"almost obvious\" : BLUE,\n },\n \"author\" : \"Alexander Grothendieck\"\n }\n def construct(self):\n self.remove(self.pi_creature)\n OpeningQuote.construct(self)\n\n words_copy = self.quote.get_part_by_tex(\"obvious\").copy()\n author = self.author\n author.save_state()\n formula = self.get_formula()\n formula.next_to(author, DOWN, MED_LARGE_BUFF)\n formula.to_edge(LEFT)\n\n self.revert_to_original_skipping_status()\n self.play(FadeIn(self.pi_creature))\n self.play(\n author.next_to, self.pi_creature.get_corner(UP+LEFT), UP,\n self.pi_creature.change_mode, \"raise_right_hand\"\n )\n self.wait(3)\n self.play(\n author.restore,\n self.pi_creature.change_mode, \"plain\"\n )\n self.play(\n words_copy.next_to, self.pi_creature, \n LEFT, MED_SMALL_BUFF, UP,\n self.pi_creature.change_mode, \"thinking\"\n )\n self.wait(2)\n self.play(\n Write(formula),\n self.pi_creature.change_mode, \"confused\"\n )\n self.wait()\n\n def get_formula(self):\n result = TexMobject(\n \"{d(\\\\sin(\\\\theta)) \\\\over \\\\,\", \"d\\\\theta}\", \"=\",\n \"\\\\lim_{\", \"h\", \" \\\\to 0}\", \n \"{\\\\sin(\\\\theta+\", \"h\", \") - \\\\sin(\\\\theta) \\\\over\", \" h}\", \"=\",\n \"\\\\lim_{\", \"h\", \" \\\\to 0}\", \n \"{\\\\big[ \\\\sin(\\\\theta)\\\\cos(\", \"h\", \") + \",\n \"\\\\sin(\", \"h\", \")\\\\cos(\\\\theta)\\\\big] - \\\\sin(\\\\theta) \\\\over\", \"h}\",\n \"= \\\\dots\"\n )\n result.set_color_by_tex(\"h\", GREEN, substring = False)\n result.set_color_by_tex(\"d\\\\theta\", GREEN)\n\n result.set_width(FRAME_WIDTH - 2*MED_SMALL_BUFF)\n return result\n\nclass ThisVideo(TeacherStudentsScene):\n def construct(self):\n series = VideoSeries()\n series.to_edge(UP)\n this_video = series[7]\n this_video.save_state()\n next_video = series[8]\n\n deriv, integral, v_t, dt, equals, v_T = formula = TexMobject(\n \"\\\\frac{d}{dT}\", \n \"\\\\int_0^T\", \"v(t)\", \"\\\\,dt\", \n \"=\", \"v(T)\"\n )\n formula.set_color_by_tex(\"v\", VELOCITY_COLOR)\n formula.next_to(self.teacher.get_corner(UP+LEFT), UP, MED_LARGE_BUFF)\n\n self.play(FadeIn(series, lag_ratio = 0.5))\n self.play(\n this_video.shift, this_video.get_height()*DOWN/2,\n this_video.set_color, YELLOW,\n self.teacher.change_mode, \"raise_right_hand\",\n )\n self.play(Write(VGroup(integral, v_t, dt)))\n self.change_student_modes(*[\"erm\"]*3)\n self.wait()\n self.play(Write(VGroup(deriv, equals, v_T)), )\n self.change_student_modes(*[\"confused\"]*3)\n self.wait(3)\n self.play(\n this_video.restore,\n next_video.shift, next_video.get_height()*DOWN/2,\n next_video.set_color, YELLOW,\n integral[0].copy().next_to, next_video, DOWN, MED_LARGE_BUFF,\n FadeOut(formula),\n *it.chain(*[\n [pi.change_mode, \"plain\", pi.look_at, next_video]\n for pi in self.pi_creatures\n ])\n )\n self.wait(2)\n\nclass InCarRestrictedView(ShowSpeedometer):\n CONFIG = {\n \"speedometer_title_text\" : \"Your view\",\n }\n def construct(self):\n car = Car()\n car.move_to(self.point_A)\n self.car = car\n car.randy.save_state()\n Transform(car.randy, Randolph()).update(1)\n car.randy.next_to(car, RIGHT, MED_LARGE_BUFF)\n car.randy.look_at(car)\n\n window = car[1][6].copy()\n window.is_subpath = False\n window.set_fill(BLACK, opacity = 0.75)\n window.set_stroke(width = 0)\n\n square = Square(stroke_color = WHITE)\n square.replace(VGroup(self.speedometer, self.speedometer_title))\n square.scale_in_place(1.5)\n square.pointwise_become_partial(square, 0.25, 0.75)\n\n time_label = TextMobject(\"Time (in seconds):\", \"0\")\n time_label.shift(2*UP)\n\n dots = VGroup(*list(map(Dot, [self.point_A, self.point_B])))\n line = Line(*dots, buff = 0)\n line.set_color(DISTANCE_COLOR)\n brace = Brace(line, DOWN)\n brace_text = brace.get_text(\"Distance traveled?\")\n\n\n #Sit in car\n self.add(car)\n self.play(Blink(car.randy))\n self.play(car.randy.restore, Animation(car))\n self.play(ShowCreation(window, run_time = 2))\n self.wait()\n\n #Show speedometer\n self.introduce_added_mobjects()\n self.play(ShowCreation(square))\n self.wait()\n\n #Travel\n self.play(FadeIn(time_label))\n self.play(\n MoveCar(car, self.point_B, rate_func = s_rate_func),\n IncrementNumber(time_label[1], run_time = 8),\n MaintainPositionRelativeTo(window, car),\n *self.get_added_movement_anims(\n rate_func = v_rate_func,\n radians = -(16.0/70)*4*np.pi/3\n ),\n run_time = 8\n )\n eight = TexMobject(\"8\").move_to(time_label[1])\n self.play(Transform(\n time_label[1], eight,\n rate_func = squish_rate_func(smooth, 0, 0.5)\n ))\n self.wait()\n\n #Ask about distance\n self.play(*list(map(ShowCreation, dots)))\n self.play(ShowCreation(line))\n self.play(\n GrowFromCenter(brace),\n Write(brace_text)\n )\n self.wait(2)\n\nclass GraphDistanceVsTime(GraphCarTrajectory):\n CONFIG = {\n \"y_min\" : 0,\n \"y_max\" : 100,\n \"y_axis_height\" : 6,\n \"y_tick_frequency\" : 10,\n \"y_labeled_nums\" : list(range(10, 100, 10)),\n \"y_axis_label\" : \"Distance (in meters)\",\n \"x_min\" : -1,\n \"x_max\" : 9,\n \"x_axis_width\" : 9,\n \"x_tick_frequency\" : 1,\n \"x_leftmost_tick\" : None, #Change if different from x_min\n \"x_labeled_nums\" : list(range(1, 9)),\n \"x_axis_label\" : \"$t$\",\n \"time_of_journey\" : 8,\n \"care_movement_rate_func\" : s_rate_func,\n \"num_graph_anchor_points\" : 100\n }\n def construct(self):\n self.setup_axes()\n graph = self.get_graph(\n s_func, \n color = DISTANCE_COLOR,\n x_min = 0,\n x_max = 8,\n )\n origin = self.coords_to_point(0, 0)\n graph_label = self.get_graph_label(\n graph, \"s(t)\", color = DISTANCE_COLOR\n )\n self.introduce_graph(graph, origin)\n\nclass PlotVelocity(GraphScene):\n CONFIG = {\n \"x_min\" : -1,\n \"x_max\" : 9,\n \"x_axis_width\" : 9,\n \"x_tick_frequency\" : 1,\n \"x_labeled_nums\" : list(range(1, 9)),\n \"x_axis_label\" : \"$t$\",\n \"y_min\" : 0,\n \"y_max\" : 25,\n \"y_axis_height\" : 6,\n \"y_tick_frequency\" : 5,\n \"y_labeled_nums\" : list(range(5, 30, 5)),\n \"y_axis_label\" : \"Velocity in $\\\\frac{\\\\text{meters}}{\\\\text{second}}$\",\n \"num_graph_anchor_points\" : 50,\n }\n def construct(self):\n self.setup_axes()\n self.add_speedometer()\n self.plot_points()\n self.draw_curve()\n\n def add_speedometer(self):\n speedometer = Speedometer()\n speedometer.next_to(self.y_axis_label_mob, RIGHT, LARGE_BUFF)\n speedometer.to_edge(UP)\n\n self.play(DrawBorderThenFill(\n speedometer, \n lag_ratio = 0.5,\n rate_func=linear,\n ))\n\n self.speedometer = speedometer\n\n def plot_points(self):\n times = list(range(0, 9))\n points = [\n self.coords_to_point(t, v_func(t))\n for t in times\n ]\n dots = VGroup(*[Dot(p, radius = 0.07) for p in points])\n dots.set_color(VELOCITY_COLOR)\n\n pre_dots = VGroup()\n dot_intro_anims = []\n\n for time, dot in zip(times, dots):\n pre_dot = dot.copy()\n self.speedometer.move_needle_to_velocity(v_func(time))\n pre_dot.move_to(self.speedometer.get_needle_tip())\n pre_dot.set_fill(opacity = 0)\n pre_dots.add(pre_dot)\n dot_intro_anims += [\n ApplyMethod(\n pre_dot.set_fill, YELLOW, 1,\n run_time = 0.1,\n ),\n ReplacementTransform(\n pre_dot, dot,\n run_time = 0.9,\n )\n ]\n self.speedometer.move_needle_to_velocity(0)\n\n self.play(\n Succession(\n *dot_intro_anims, rate_func=linear\n ),\n ApplyMethod(\n self.speedometer.move_needle_to_velocity,\n v_func(4),\n rate_func = squish_rate_func(\n lambda t : 1-v_rate_func(t),\n 0, 0.95,\n )\n ),\n run_time = 5\n )\n self.wait()\n\n def draw_curve(self):\n graph, label = self.get_v_graph_and_label()\n\n self.revert_to_original_skipping_status()\n self.play(ShowCreation(graph, run_time = 3))\n self.play(Write(graph_label))\n self.wait()\n\n ##\n\n def get_v_graph_and_label(self):\n graph = self.get_graph(\n v_func, \n x_min = 0,\n x_max = 8,\n color = VELOCITY_COLOR\n )\n graph_label = TexMobject(\"v(t)\", \"=t(8-t)\")\n graph_label.set_color_by_tex(\"v(t)\", VELOCITY_COLOR)\n graph_label.next_to(\n graph.point_from_proportion(7./8.),\n UP+RIGHT\n )\n self.v_graph = graph\n self.v_graph_label = graph_label\n return graph, graph_label\n\nclass Chapter2Wrapper(Scene):\n CONFIG = {\n \"title\" : \"Chapter 2: The paradox of the derivative\",\n }\n def construct(self):\n title = TextMobject(self.title)\n title.to_edge(UP)\n rect = Rectangle(width = 16, height = 9, color = WHITE)\n rect.set_height(1.5*FRAME_Y_RADIUS)\n rect.next_to(title, DOWN)\n\n self.add(title)\n self.play(ShowCreation(rect))\n self.wait(3)\n\nclass GivenDistanceWhatIsVelocity(GraphCarTrajectory):\n def construct(self):\n self.force_skipping()\n self.setup_axes()\n graph = self.graph_sigmoid_trajectory_function()\n origin = self.coords_to_point(0, 0)\n\n self.introduce_graph(graph, origin)\n self.comment_on_slope(graph, origin)\n self.revert_to_original_skipping_status()\n self.show_velocity_graph()\n\nclass DerivativeOfDistance(SecantLineToTangentLine):\n def construct(self):\n self.setup_axes()\n self.remove(self.y_axis_label_mob, self.x_axis_label_mob)\n self.add_derivative_definition(self.y_axis_label_mob)\n self.add_graph()\n self.draw_axes()\n self.show_tangent_line()\n\nclass AskAboutAntiderivative(PlotVelocity):\n def construct(self):\n self.setup_axes()\n self.add_v_graph()\n self.write_s_formula()\n self.write_antiderivative()\n\n\n def add_v_graph(self):\n graph, label = self.get_v_graph_and_label()\n self.play(ShowCreation(graph))\n self.play(Write(label))\n\n self.graph = graph\n self.graph_label = label\n\n def write_s_formula(self):\n ds_dt = TexMobject(\"ds\", \"\\\\over\\\\,\", \"dt\")\n ds_dt.set_color_by_tex(\"ds\", DISTANCE_COLOR)\n ds_dt.set_color_by_tex(\"dt\", TIME_COLOR)\n ds_dt.next_to(self.graph_label, UP, LARGE_BUFF)\n\n v_t = self.graph_label.get_part_by_tex(\"v(t)\")\n arrow = Arrow(\n ds_dt.get_bottom(), v_t.get_top(),\n color = WHITE,\n )\n\n self.play(\n Write(ds_dt, run_time = 2),\n ShowCreation(arrow)\n )\n self.wait()\n\n def write_antiderivative(self):\n randy = Randolph()\n randy.to_corner(DOWN+LEFT)\n randy.shift(2*RIGHT)\n words = TexMobject(\n \"{d(\", \"???\", \") \\\\over \\\\,\", \"dt}\", \"=\", \"t(8-t)\"\n )\n words.set_color_by_tex(\"t(8-t)\", VELOCITY_COLOR)\n words.set_color_by_tex(\"???\", DISTANCE_COLOR)\n words.set_color_by_tex(\"dt\", TIME_COLOR)\n words.scale(0.7)\n\n self.play(FadeIn(randy))\n self.play(PiCreatureSays(\n randy, words, \n target_mode = \"confused\",\n bubble_kwargs = {\"height\" : 3, \"width\" : 4},\n ))\n self.play(Blink(randy))\n self.wait()\n\nclass Antiderivative(PiCreatureScene):\n def construct(self):\n functions = self.get_functions(\"t^2\", \"2t\")\n alt_functions = self.get_functions(\"???\", \"t(8-t)\")\n top_arc, bottom_arc = arcs = self.get_arcs(functions)\n derivative, antiderivative = self.get_arc_labels(arcs)\n group = VGroup(functions, arcs, derivative, antiderivative)\n\n self.add(functions, top_arc, derivative)\n self.wait()\n self.play(\n ShowCreation(bottom_arc),\n Write(antiderivative),\n self.pi_creature.change_mode, \"raise_right_hand\"\n )\n self.wait(2)\n for pair in reversed(list(zip(functions, alt_functions))):\n self.play(\n Transform(*pair),\n self.pi_creature.change_mode, \"pondering\"\n )\n self.wait(2)\n\n self.pi_creature_says(\n \"But first!\", \n target_mode = \"surprised\",\n look_at_arg = 50*OUT,\n added_anims = [group.to_edge, LEFT],\n run_time = 1,\n )\n self.wait()\n\n def get_functions(self, left_tex, right_tex):\n left = TexMobject(left_tex)\n left.shift(2*LEFT)\n left.set_color(DISTANCE_COLOR)\n right = TexMobject(right_tex)\n right.shift(2*RIGHT)\n right.set_color(VELOCITY_COLOR)\n result = VGroup(left, right)\n result.shift(UP)\n return result\n\n def get_arcs(self, functions):\n f1, f2 = functions\n top_line = Line(f1.get_corner(UP+RIGHT), f2.get_corner(UP+LEFT))\n bottom_line = Line(f1.get_corner(DOWN+RIGHT), f2.get_corner(DOWN+LEFT))\n top_arc = Arc(start_angle = 5*np.pi/6, angle = -2*np.pi/3)\n bottom_arc = top_arc.copy()\n bottom_arc.rotate(np.pi)\n arcs = VGroup(top_arc, bottom_arc)\n arcs.set_width(top_line.get_width())\n for arc in arcs:\n arc.add_tip()\n top_arc.next_to(top_line, UP)\n bottom_arc.next_to(bottom_line, DOWN)\n bottom_arc.set_color(MAROON_B)\n\n return arcs\n\n def get_arc_labels(self, arcs):\n top_arc, bottom_arc = arcs\n derivative = TextMobject(\"Derivative\")\n derivative.next_to(top_arc, UP)\n antiderivative = TextMobject(\"``Antiderivative''\")\n antiderivative.next_to(bottom_arc, DOWN)\n antiderivative.set_color(bottom_arc.get_color())\n\n return VGroup(derivative, antiderivative)\n\nclass AreaUnderVGraph(PlotVelocity):\n def construct(self):\n self.setup_axes()\n self.add(*self.get_v_graph_and_label())\n self.show_rects()\n\n def show_rects(self):\n rect_list = self.get_riemann_rectangles_list(\n self.v_graph, 7, \n max_dx = 1.0,\n x_min = 0,\n x_max = 8,\n )\n flat_graph = self.get_graph(lambda t : 0)\n rects = self.get_riemann_rectangles(\n flat_graph, x_min = 0, x_max = 8, dx = 1.0\n )\n\n for new_rects in rect_list:\n new_rects.set_fill(opacity = 0.8)\n rects.align_submobjects(new_rects)\n for alt_rect in rects[::2]:\n alt_rect.set_fill(opacity = 0)\n self.play(Transform(\n rects, new_rects,\n run_time = 2,\n lag_ratio = 0.5\n ))\n self.wait()\n\nclass ConstantVelocityCar(Scene):\n def construct(self):\n car = Car()\n car.move_to(5*LEFT + 3*DOWN)\n\n self.add(car)\n self.wait()\n self.play(MoveCar(\n car, 7*RIGHT+3*DOWN,\n run_time = 5,\n rate_func=linear,\n ))\n self.wait()\n\nclass ConstantVelocityPlot(PlotVelocity):\n CONFIG = {\n \"x_axis_label\" : \"Time\",\n \"units_of_area_color\" : BLUE_E,\n }\n def construct(self):\n self.setup_axes()\n self.x_axis_label_mob.shift(DOWN)\n self.draw_graph()\n self.show_product()\n self.comment_on_area_wierdness()\n self.note_units()\n\n def draw_graph(self):\n graph = self.get_graph(\n lambda t : 10,\n x_min = 0, \n x_max = 8,\n color = VELOCITY_COLOR\n )\n\n self.play(ShowCreation(graph, rate_func=linear, run_time = 3))\n self.wait()\n\n self.graph = graph\n\n def show_product(self):\n rect = Rectangle(\n stroke_width = 0,\n fill_color = DISTANCE_COLOR,\n fill_opacity = 0.5\n )\n rect.replace(\n VGroup(self.graph, VectorizedPoint(self.graph_origin)),\n stretch = True\n )\n\n right_brace = Brace(rect, RIGHT)\n top_brace = Brace(rect, UP)\n v_label = right_brace.get_text(\n \"$10 \\\\frac{\\\\text{meters}}{\\\\text{second}}$\",\n )\n v_label.set_color(VELOCITY_COLOR)\n t_label = top_brace.get_text(\n \"8 seconds\"\n )\n t_label.set_color(TIME_COLOR)\n\n s_label = TexMobject(\"10\", \"\\\\times\", \"8\", \"\\\\text{ meters}\")\n s_label.set_color_by_tex(\"10\", VELOCITY_COLOR)\n s_label.set_color_by_tex(\"8\", TIME_COLOR)\n s_label.move_to(rect)\n\n self.play(\n GrowFromCenter(right_brace),\n Write(v_label),\n )\n self.play(\n GrowFromCenter(top_brace),\n Write(t_label),\n )\n self.play(\n FadeIn(rect),\n Write(s_label),\n Animation(self.graph)\n )\n self.wait(2)\n\n self.area_rect = rect\n self.s_label = s_label\n\n def comment_on_area_wierdness(self):\n randy = Randolph()\n randy.to_corner(DOWN+LEFT)\n bubble = randy.get_bubble(\n \"Distance \\\\\\\\ is area?\",\n bubble_class = ThoughtBubble,\n height = 3,\n width = 4,\n fill_opacity = 1,\n )\n bubble.content.scale_in_place(0.8)\n bubble.content.shift(SMALL_BUFF*UP)\n VGroup(bubble[-1], bubble.content).shift(1.5*LEFT)\n\n self.play(FadeIn(randy))\n self.play(randy.change_mode, \"pondering\")\n self.play(\n self.area_rect.set_color, YELLOW,\n *list(map(Animation, self.get_mobjects())),\n rate_func = there_and_back\n )\n self.play(Blink(randy))\n self.play(\n randy.change_mode, \"confused\",\n randy.look_at, randy.bubble,\n ShowCreation(bubble), \n Write(bubble.content),\n )\n self.wait()\n self.play(Blink(randy))\n self.wait()\n self.play(\n randy.change_mode, \"pondering\",\n FadeOut(bubble),\n FadeOut(bubble.content),\n )\n\n self.randy = randy\n\n def note_units(self):\n x_line, y_line = lines = VGroup(*[\n axis.copy()\n for axis in (self.x_axis, self.y_axis)\n ])\n lines.set_color(TIME_COLOR)\n square = Square(\n stroke_color = BLACK,\n stroke_width = 1,\n fill_color = self.units_of_area_color,\n fill_opacity = 1,\n )\n square.replace(\n VGroup(*[\n VectorizedPoint(self.coords_to_point(i, i))\n for i in (0, 1)\n ]),\n stretch = True\n )\n units_of_area = VGroup(*[\n square.copy().move_to(\n self.coords_to_point(x, y),\n DOWN+LEFT\n )\n for x in range(8)\n for y in range(10)\n ])\n\n self.play(ShowCreation(x_line))\n self.play(Indicate(self.x_axis_label_mob))\n self.play(FadeOut(x_line))\n self.play(\n ShowCreation(y_line),\n self.randy.look_at, self.y_axis_label_mob\n )\n self.play(Indicate(self.y_axis_label_mob))\n self.play(FadeOut(y_line))\n\n for FadeClass in FadeIn, FadeOut:\n self.play(\n FadeClass(\n units_of_area, \n lag_ratio = 0.5,\n run_time = 3\n ),\n Animation(self.s_label),\n self.randy.look_at, self.area_rect\n )\n self.play(Blink(self.randy))\n self.wait()\n\nclass PiecewiseConstantCar(Scene):\n def construct(self):\n car = Car()\n start_point = 5*LEFT\n car.move_to(start_point)\n\n self.add(car)\n self.wait()\n for shift in 2, 6, 12:\n car.randy.rotate_in_place(np.pi/8)\n anim = MoveCar(\n car, start_point+shift*RIGHT,\n rate_func=linear\n )\n\n anim.target_mobject[0].rotate_in_place(-np.pi/8)\n # for mob in anim.starting_mobject, anim.mobject:\n # mob.randy.rotate_in_place(np.pi/6)\n self.play(anim)\n self.wait()\n\nclass PiecewiseConstantPlot(PlotVelocity):\n CONFIG = {\n \"y_axis_label\" : \"\",\n \"min_graph_proportion\" : 0.1,\n \"max_graph_proportion\" : 0.8,\n \"num_riemann_approximations\" : 7,\n \"riemann_rect_fill_opacity\" : 0.75,\n \"tick_size\" : 0.2,\n }\n def construct(self):\n self.setup_graph()\n self.always_changing()\n self.show_piecewise_constant_graph()\n self.compute_distance_on_each_interval()\n self.approximate_original_curve()\n self.revert_to_specific_approximation()\n self.show_specific_rectangle()\n self.show_v_dt_for_all_rectangles()\n self.write_integral_symbol()\n self.roles_of_dt()\n self.what_does_sum_approach()\n self.label_integral()\n\n def setup_graph(self):\n self.setup_axes()\n self.add(*self.get_v_graph_and_label())\n\n def always_changing(self):\n dot = Dot()\n arrow = Arrow(LEFT, RIGHT)\n words = TextMobject(\"Always changing\")\n group = VGroup(dot, arrow, words)\n def update_group(group, alpha):\n dot, arrow, words = group\n prop = interpolate(\n self.min_graph_proportion,\n self.max_graph_proportion,\n alpha\n )\n graph_point = self.v_graph.point_from_proportion(prop)\n dot.move_to(graph_point)\n x_val = self.x_axis.point_to_number(graph_point)\n angle = self.angle_of_tangent(x_val, self.v_graph)\n angle += np.pi/2\n vect = rotate_vector(RIGHT, angle)\n arrow.rotate(angle - arrow.get_angle() + np.pi)\n arrow.shift(\n graph_point + MED_SMALL_BUFF*vect - arrow.get_end()\n )\n words.next_to(arrow.get_start(), UP)\n return group\n update_group(group, 0)\n\n self.play(\n Write(words),\n ShowCreation(arrow),\n DrawBorderThenFill(dot),\n run_time = 1\n )\n self.play(UpdateFromAlphaFunc(\n group, update_group,\n rate_func = there_and_back,\n run_time = 5\n ))\n self.wait()\n self.play(FadeOut(group))\n\n def show_piecewise_constant_graph(self):\n pw_constant_graph = self.get_pw_constant_graph()\n alt_lines = [\n line.copy().set_color(YELLOW)\n for line in pw_constant_graph[:4]\n ]\n for line in alt_lines:\n line.start_dot = Dot(line.get_start())\n line.end_dot = Dot(line.get_end())\n VGroup(line.start_dot, line.end_dot).set_color(line.get_color())\n line = alt_lines[0]\n\n faders = [self.v_graph, self.v_graph_label]\n for mob in faders:\n mob.save_state()\n mob.generate_target()\n mob.target.fade(0.7)\n\n self.play(*list(map(MoveToTarget, faders)))\n self.play(ShowCreation(pw_constant_graph, run_time = 2))\n self.wait()\n self.play(ShowCreation(line))\n self.wait()\n for new_line in alt_lines[1:]:\n for mob in line.end_dot, new_line.start_dot, new_line:\n self.play(Transform(\n line, mob,\n run_time = 1./3\n ))\n self.remove(line)\n self.add(new_line)\n self.wait(2)\n line = new_line\n self.play(FadeOut(line))\n\n self.pw_constant_graph = pw_constant_graph\n\n def compute_distance_on_each_interval(self):\n rect_list = self.get_riemann_rectangles_list(\n self.v_graph, self.num_riemann_approximations, \n max_dx = 1,\n x_min = 0,\n x_max = 8,\n )\n for rects in rect_list:\n rects.set_fill(opacity = self.riemann_rect_fill_opacity)\n flat_rects = self.get_riemann_rectangles(\n self.get_graph(lambda t : 0),\n x_min = 0, x_max = 8, dx = 1\n )\n rects = rect_list[0]\n rect = rects[1]\n flat_rects.submobjects[1] = rect.copy()\n\n right_brace = Brace(rect, RIGHT)\n top_brace = Brace(rect, UP)\n right_brace.label = right_brace.get_text(\"$7\\\\frac{\\\\text{m}}{\\\\text{s}}$\")\n top_brace.label = top_brace.get_text(\"$1$s\")\n\n self.play(FadeIn(rect))\n for brace in right_brace, top_brace:\n self.play(\n GrowFromCenter(brace),\n Write(brace.label, run_time = 1),\n )\n brace.add(brace.label)\n self.wait()\n self.play(\n ReplacementTransform(\n flat_rects, rects,\n run_time = 2,\n lag_ratio = 0.5,\n ),\n Animation(right_brace)\n )\n self.play(*list(map(FadeOut, [top_brace, right_brace])))\n self.wait()\n\n self.rects = rects\n self.rect_list = rect_list\n\n def approximate_original_curve(self):\n rects = self.rects\n self.play(\n FadeOut(self.pw_constant_graph),\n *[\n m.restore \n for m in (self.v_graph, self.v_graph_label)\n ]+[Animation(self.rects)]\n )\n for new_rects in self.rect_list[1:]:\n self.transform_between_riemann_rects(rects, new_rects)\n self.wait()\n\n def revert_to_specific_approximation(self):\n rects = self.rects\n rects.save_state()\n target_rects = self.rect_list[2]\n target_rects.set_fill(opacity = 1)\n\n ticks = self.get_ticks(target_rects)\n tick_pair = VGroup(*ticks[4:6])\n brace = Brace(tick_pair, DOWN, buff = 0)\n dt_label = brace.get_text(\"$dt$\", buff = SMALL_BUFF)\n\n example_text = TextMobject(\n \"For example, \\\\\\\\\",\n \"$dt$\", \"$=0.25$\"\n )\n example_text.to_corner(UP+RIGHT)\n example_text.set_color_by_tex(\"dt\", YELLOW)\n\n self.play(ReplacementTransform(\n rects, target_rects,\n run_time = 2,\n lag_ratio = 0.5\n ))\n rects.restore()\n self.wait()\n self.play(\n ShowCreation(ticks),\n FadeOut(self.x_axis.numbers)\n )\n self.play(\n GrowFromCenter(brace),\n Write(dt_label)\n )\n self.wait()\n self.play(\n FadeIn(\n example_text, \n run_time = 2,\n lag_ratio = 0.5,\n ),\n ReplacementTransform(\n dt_label.copy(),\n example_text.get_part_by_tex(\"dt\")\n )\n )\n self.wait()\n\n self.rects = rects = target_rects\n self.ticks = ticks\n self.dt_brace = brace\n self.dt_label = dt_label\n self.dt_example_text = example_text\n\n def show_specific_rectangle(self):\n rects = self.rects\n rect = rects[4].copy()\n rect_top = Line(\n rect.get_corner(UP+LEFT),\n rect.get_corner(UP+RIGHT),\n color = self.v_graph.get_color()\n )\n\n t_vals = [1, 1.25]\n t_labels = VGroup(*[\n TexMobject(\"t=%s\"%str(t))\n for t in t_vals\n ])\n t_labels.scale(0.7)\n t_labels.next_to(rect, DOWN)\n for vect, label in zip([LEFT, RIGHT], t_labels):\n label.shift(1.5*vect)\n label.add(Arrow(\n label.get_edge_center(-vect),\n rect.get_corner(DOWN+vect),\n buff = SMALL_BUFF,\n tip_length = 0.15,\n color = WHITE\n ))\n\n v_lines = VGroup()\n h_lines = VGroup()\n height_labels = VGroup()\n for t in t_vals:\n v_line = self.get_vertical_line_to_graph(\n t, self.v_graph,\n color = YELLOW\n )\n y_axis_point = self.graph_origin[0]*RIGHT\n y_axis_point += v_line.get_end()[1]*UP\n h_line = DashedLine(v_line.get_end(), y_axis_point)\n label = TexMobject(\"%.1f\"%v_func(t))\n label.scale(0.5)\n label.next_to(h_line, LEFT, SMALL_BUFF)\n v_lines.add(v_line)\n h_lines.add(h_line)\n height_labels.add(label)\n\n circle = Circle(radius = 0.25, color = WHITE)\n circle.move_to(rect.get_top())\n\n self.play(\n rects.set_fill, None, 0.25,\n Animation(rect)\n )\n self.wait()\n for label in t_labels:\n self.play(FadeIn(label))\n self.wait()\n for v_line, h_line, label in zip(v_lines, h_lines, height_labels):\n self.play(ShowCreation(v_line))\n self.play(ShowCreation(h_line))\n self.play(Write(label, run_time = 1))\n self.wait()\n self.wait()\n t_label_copy = t_labels[0].copy()\n self.play(\n t_label_copy.scale, 1./0.7,\n t_label_copy.next_to, self.v_graph_label, DOWN+LEFT, 0\n )\n self.wait()\n self.play(FadeOut(t_label_copy))\n self.wait()\n\n self.play(ShowCreation(circle))\n self.play(ShowCreation(rect_top))\n self.play(FadeOut(circle))\n rect.add(rect_top)\n self.wait()\n for x in range(2):\n self.play(\n rect.stretch_to_fit_height, v_lines[1].get_height(),\n rect.move_to, rect.get_bottom(), DOWN,\n Animation(v_lines),\n run_time = 4,\n rate_func = there_and_back\n )\n\n self.play(*list(map(FadeOut, [\n group[1]\n for group in (v_lines, h_lines, height_labels)\n ])))\n self.play(\n v_lines[0].set_color, RED,\n rate_func = there_and_back,\n )\n self.wait()\n\n area = TextMobject(\n \"7$\\\\frac{\\\\text{m}}{\\\\text{s}}$\",\n \"$\\\\times$\",\n \"0.25s\",\n \"=\",\n \"1.75m\"\n )\n area.next_to(rect, RIGHT, LARGE_BUFF)\n arrow = Arrow(\n area.get_left(), rect.get_center(), \n buff = 0,\n color = WHITE\n )\n area.shift(SMALL_BUFF*RIGHT)\n\n self.play(\n Write(area),\n ShowCreation(arrow)\n )\n self.wait(2)\n self.play(*list(map(FadeOut, [\n area, arrow, \n v_lines[0], h_lines[0], height_labels[0],\n rect, t_labels\n ])))\n\n def show_v_dt_for_all_rectangles(self):\n dt_brace_group = VGroup(self.dt_brace, self.dt_label)\n rects_subset = self.rects[10:20]\n\n last_rect = None\n for rect in rects_subset:\n brace = Brace(rect, LEFT, buff = 0)\n v_t = TexMobject(\"v(t)\")\n v_t.next_to(brace, LEFT, SMALL_BUFF)\n anims = [\n rect.set_fill, None, 1,\n dt_brace_group.next_to, rect, DOWN, SMALL_BUFF\n ]\n if last_rect is not None:\n anims += [\n last_rect.set_fill, None, 0.25,\n ReplacementTransform(last_brace, brace),\n ReplacementTransform(last_v_t, v_t),\n ]\n else:\n anims += [\n GrowFromCenter(brace),\n Write(v_t)\n ]\n self.play(*anims)\n self.wait()\n\n last_rect = rect\n last_brace = brace\n last_v_t = v_t\n\n self.v_t = last_v_t\n self.v_t_brace = last_brace\n\n def write_integral_symbol(self):\n integral = TexMobject(\n \"\\\\int\", \"^8\", \"_0\", \"v(t)\", \"\\\\,dt\"\n )\n integral.to_corner(UP+RIGHT)\n int_copy = integral.get_part_by_tex(\"int\").copy()\n bounds = list(map(integral.get_part_by_tex, [\"0\", \"8\"]))\n\n sum_word = TextMobject(\"``Sum''\")\n sum_word.next_to(integral, DOWN, MED_LARGE_BUFF, LEFT)\n alt_sum_word = sum_word.copy()\n int_symbol = TexMobject(\"\\\\int\")\n int_symbol.replace(alt_sum_word[1], dim_to_match = 1)\n alt_sum_word.submobjects[1] = int_symbol\n\n self.play(FadeOut(self.dt_example_text))\n self.play(Write(integral.get_part_by_tex(\"int\")))\n self.wait()\n self.play(Transform(int_copy, int_symbol))\n self.play(Write(alt_sum_word), Animation(int_copy))\n self.remove(int_copy)\n self.play(ReplacementTransform(alt_sum_word, sum_word))\n self.wait()\n\n for bound in bounds:\n self.play(Write(bound))\n self.wait()\n for bound, num in zip(bounds, [0, 8]):\n bound_copy = bound.copy()\n point = self.coords_to_point(num, 0)\n self.play(\n bound_copy.scale, 1.5,\n bound_copy.next_to, point, DOWN, MED_LARGE_BUFF\n )\n self.play(ApplyWave(self.ticks, direction = UP))\n self.wait()\n\n for mob, tex in (self.v_t, \"v(t)\"), (self.dt_label, \"dt\"):\n self.play(ReplacementTransform(\n mob.copy().set_color(YELLOW), \n integral.get_part_by_tex(tex),\n run_time = 2\n ))\n self.wait()\n\n self.integral = integral\n self.sum_word = sum_word\n\n def roles_of_dt(self):\n rects = self.rects\n next_rects = self.rect_list[3]\n\n morty = Mortimer().flip()\n morty.to_corner(DOWN+LEFT)\n int_dt = self.integral.get_part_by_tex(\"dt\")\n dt_copy = int_dt.copy()\n\n self.play(FadeIn(morty))\n self.play(\n morty.change_mode, \"raise_right_hand\",\n morty.look, UP+RIGHT,\n dt_copy.next_to, morty.get_corner(UP+RIGHT), UP,\n dt_copy.set_color, YELLOW\n )\n self.play(Blink(morty))\n self.play(\n ReplacementTransform(\n dt_copy.copy(), int_dt,\n run_time = 2\n ),\n morty.look_at, int_dt\n )\n self.wait(2)\n self.play(\n ReplacementTransform(dt_copy.copy(), self.dt_label),\n morty.look_at, self.dt_label\n )\n self.play(*[\n ApplyMethod(\n tick.shift, tick.get_height()*UP/2,\n run_time = 2,\n rate_func = squish_rate_func(\n there_and_back,\n alpha, alpha+0.2,\n )\n )\n for tick, alpha in zip(\n self.ticks, \n np.linspace(0, 0.8, len(self.ticks))\n )\n ])\n self.wait()\n\n #Shrink dt just a bit\n self.play(\n morty.change_mode, \"pondering\",\n rects.set_fill, None, 0.75,\n *list(map(FadeOut, [\n dt_copy, self.v_t, self.v_t_brace\n ]))\n )\n rects.align_submobjects(next_rects)\n for every_other_rect in rects[::2]:\n every_other_rect.set_fill(opacity = 0)\n self.play(\n self.dt_brace.stretch, 0.5, 0,\n self.dt_brace.move_to, self.dt_brace, LEFT,\n ReplacementTransform(\n rects, next_rects,\n run_time = 2,\n lag_ratio = 0.5\n ),\n Transform(\n self.ticks, self.get_ticks(next_rects),\n run_time = 2,\n lag_ratio = 0.5,\n ),\n )\n self.rects = rects = next_rects\n self.wait()\n self.play(Blink(morty))\n self.play(*[\n ApplyFunction(\n lambda r : r.shift(0.2*UP).set_fill(None, 1),\n rect,\n run_time = 2,\n rate_func = squish_rate_func(\n there_and_back,\n alpha, alpha+0.2,\n )\n )\n for rect, alpha in zip(\n rects, \n np.linspace(0, 0.8, len(rects))\n )\n ]+[\n morty.change_mode, \"thinking\",\n ])\n self.wait()\n\n self.morty = morty\n\n def what_does_sum_approach(self):\n morty = self.morty\n rects = self.rects\n\n cross = TexMobject(\"\\\\times\")\n cross.replace(self.sum_word, stretch = True)\n cross.set_color(RED)\n brace = Brace(self.integral, DOWN)\n dt_to_0 = brace.get_text(\"$dt \\\\to 0$\")\n\n distance_words = TextMobject(\n \"Area\", \"= Distance traveled\"\n )\n distance_words.next_to(rects, UP)\n arrow = Arrow(\n distance_words[0].get_bottom(),\n rects.get_center(),\n color = WHITE\n )\n\n self.play(PiCreatureSays(\n morty, \"Why not $\\\\Sigma$?\",\n target_mode = \"sassy\"\n ))\n self.play(Blink(morty))\n self.wait()\n self.play(Write(cross))\n self.wait()\n self.play(\n RemovePiCreatureBubble(morty, target_mode = \"plain\"),\n *list(map(FadeOut, [\n cross, self.sum_word, self.ticks,\n self.dt_brace, self.dt_label,\n ]))\n )\n self.play(FadeIn(brace), FadeIn(dt_to_0))\n for new_rects in self.rect_list[4:]:\n rects.align_submobjects(new_rects)\n for every_other_rect in rects[::2]:\n every_other_rect.set_fill(opacity = 0)\n self.play(\n Transform(\n rects, new_rects, \n run_time = 2,\n lag_ratio = 0.5\n ),\n morty.look_at, rects,\n )\n self.wait()\n\n self.play(\n Write(distance_words),\n ShowCreation(arrow),\n morty.change_mode, \"pondering\",\n morty.look_at, distance_words,\n )\n self.wait()\n self.play(Blink(morty))\n self.wait()\n\n self.area_arrow = arrow\n\n def label_integral(self):\n words = TextMobject(\"``Integral of $v(t)$''\")\n words.to_edge(UP)\n arrow = Arrow(\n words.get_right(),\n self.integral.get_left()\n )\n\n self.play(Indicate(self.integral))\n self.play(Write(words, run_time = 2))\n self.play(ShowCreation(arrow))\n self.wait()\n self.play(*[\n ApplyFunction(\n lambda r : r.shift(0.2*UP).set_fill(None, 1),\n rect,\n run_time = 3,\n rate_func = squish_rate_func(\n there_and_back,\n alpha, alpha+0.2,\n )\n )\n for rect, alpha in zip(\n self.rects, \n np.linspace(0, 0.8, len(self.rects))\n )\n ]+[\n Animation(self.area_arrow),\n self.morty.change_mode, \"happy\",\n self.morty.look_at, self.rects,\n ])\n self.wait()\n\n #####\n\n def get_pw_constant_graph(self):\n result = VGroup()\n for left_x in range(8):\n xs = [left_x, left_x+1]\n y = self.v_graph.underlying_function(left_x)\n line = Line(*[\n self.coords_to_point(x, y)\n for x in xs\n ])\n line.set_color(self.v_graph.get_color())\n result.add(line)\n return result\n\n def get_ticks(self, rects):\n ticks = VGroup(*[\n Line(\n point+self.tick_size*UP/2, \n point+self.tick_size*DOWN/2\n )\n for t in np.linspace(0, 8, len(rects)+1)\n for point in [self.coords_to_point(t, 0)]\n ])\n ticks.set_color(YELLOW)\n return ticks\n\nclass DontKnowHowToHandleNonConstant(TeacherStudentsScene):\n def construct(self):\n self.play(*[\n ApplyMethod(pi.change, \"maybe\", UP)\n for pi in self.get_pi_creatures()\n ])\n self.wait(3)\n\nclass CarJourneyApproximation(Scene):\n CONFIG = {\n \"n_jumps\" : 5,\n \"bottom_words\" : \"Approximated motion (5 jumps)\",\n }\n def construct(self):\n points = [5*LEFT + v for v in (UP, 2*DOWN)]\n cars = [Car().move_to(point) for point in points]\n h_line = Line(LEFT, RIGHT).scale(FRAME_X_RADIUS)\n words = [\n TextMobject(\"Real motion (smooth)\").shift(3*UP),\n TextMobject(self.bottom_words).shift(0.5*DOWN),\n ]\n words[1].set_color(GREEN)\n\n\n self.add(h_line, *cars + words)\n self.wait()\n self.play(*[\n MoveCar(\n car, point+10*RIGHT,\n run_time = 5,\n rate_func = rf\n )\n for car, point, rf in zip(cars, points, [\n s_rate_func,\n self.get_approximated_rate_func(self.n_jumps)\n ])\n ])\n self.wait()\n\n def get_approximated_rate_func(self, n):\n new_v_rate_func = lambda t : v_rate_func(np.floor(t*n)/n)\n max_integral, err = scipy.integrate.quad(\n v_rate_func, 0, 1\n )\n def result(t):\n integral, err = scipy.integrate.quad(new_v_rate_func, 0, t)\n return integral/max_integral\n return result\n\nclass LessWrongCarJourneyApproximation(CarJourneyApproximation):\n CONFIG = {\n \"n_jumps\" : 20,\n \"bottom_words\" : \"Better approximation (20 jumps)\",\n }\n\nclass TellMeThatsNotSurprising(TeacherStudentsScene):\n def construct(self):\n self.teacher_says(\n \"Tell me that's \\\\\\\\ not surprising!\",\n target_mode = \"hooray\",\n run_time = 1\n )\n self.wait(3)\n\nclass HowDoesThisHelp(TeacherStudentsScene):\n def construct(self):\n self.student_says(\n \"How does this help\\\\textinterrobang\",\n target_mode = \"angry\",\n run_time = 1\n )\n self.change_student_modes(\n \"confused\", \"angry\", \"confused\",\n )\n self.wait(2)\n self.teacher_says(\n \"You're right.\",\n target_mode = \"shruggie\",\n run_time = 1\n )\n self.change_student_modes(*[\"sassy\"]*3)\n self.wait(2)\n\nclass AreaUnderACurve(GraphScene):\n CONFIG = {\n \"y_max\" : 4,\n \"y_min\" : 0,\n \"num_iterations\" : 7\n }\n def construct(self):\n self.setup_axes()\n graph = self.get_graph(self.func)\n rect_list = self.get_riemann_rectangles_list(\n graph, self.num_iterations\n )\n VGroup(*rect_list).set_fill(opacity = 0.8)\n rects = rect_list[0]\n\n self.play(ShowCreation(graph))\n self.play(Write(rects))\n for new_rects in rect_list[1:]:\n rects.align_submobjects(new_rects)\n for every_other_rect in rects[::2]:\n every_other_rect.set_fill(opacity = 0)\n self.play(Transform(\n rects, new_rects,\n run_time = 2,\n lag_ratio = 0.5\n ))\n self.wait()\n\n\n def func(self, x):\n return np.sin(x) + 1\n\nclass AltAreaUnderCurve(AreaUnderACurve):\n CONFIG = {\n \"graph_origin\" : 2*DOWN,\n \"x_min\" : -3,\n \"x_max\" : 3,\n \"x_axis_width\" : 12,\n \"y_max\" : 2,\n \"y_axis_height\" : 4,\n }\n def func(self, x):\n return np.exp(-x**2)\n\nclass Chapter1Wrapper(Chapter2Wrapper):\n CONFIG = {\n \"title\" : \"Essence of calculus, chapter 1\",\n }\n\nclass AreaIsDerivative(PlotVelocity, ReconfigurableScene):\n CONFIG = {\n \"y_axis_label\" : \"\",\n \"num_rects\" : 400,\n \"dT\" : 0.25,\n \"variable_point_label\" : \"T\",\n \"area_opacity\" : 0.8,\n }\n def setup(self):\n PlotVelocity.setup(self)\n ReconfigurableScene.setup(self)\n self.setup_axes()\n self.add(*self.get_v_graph_and_label())\n self.x_axis_label_mob.shift(MED_LARGE_BUFF*DOWN)\n self.v_graph_label.shift(MED_LARGE_BUFF*DOWN)\n self.submobjects = []\n\n def construct(self):\n self.introduce_variable_area()\n self.write_integral()\n self.nudge_input()\n self.show_rectangle_approximation()\n\n def introduce_variable_area(self):\n area = self.area = self.get_area(0, 6)\n x_nums = self.x_axis.numbers\n\n self.play(Write(area, run_time = 2))\n self.play(FadeOut(self.x_axis.numbers))\n self.add_T_label(6)\n self.change_area_bounds(\n new_t_max = 4,\n rate_func = there_and_back,\n run_time = 2\n )\n self.wait()\n\n def write_integral(self):\n integral = TexMobject(\"\\\\int\", \"^T\", \"_0\", \"v(t)\", \"\\\\,dt\")\n integral.to_corner(UP+RIGHT)\n integral.shift(2*LEFT)\n top_T = integral.get_part_by_tex(\"T\")\n moving_T = self.T_label_group[0]\n\n s_T = TexMobject(\"s(T)\", \"= \")\n s_T.set_color_by_tex(\"s\", DISTANCE_COLOR)\n s_T.next_to(integral, LEFT)\n\n int_arrow, s_arrow = [\n Arrow(\n mob.get_left(), self.area.get_center(),\n color = WHITE\n )\n for mob in (integral, s_T)\n ]\n\n distance_word = TextMobject(\"Distance\")\n distance_word.move_to(self.area)\n\n self.play(Write(integral))\n self.play(ShowCreation(int_arrow))\n self.submobjects.append(int_arrow)\n self.wait()\n self.change_area_bounds(\n new_t_max = 8,\n rate_func = there_and_back,\n run_time = 3,\n )\n self.play(Indicate(top_T))\n self.play(ReplacementTransform(\n top_T.copy(), moving_T\n ))\n self.change_area_bounds(\n new_t_max = 3,\n rate_func = there_and_back,\n run_time = 3\n )\n self.wait()\n self.play(Write(distance_word, run_time = 2))\n self.play(\n ReplacementTransform(int_arrow, s_arrow),\n FadeIn(s_T)\n )\n self.wait()\n self.play(FadeOut(distance_word))\n self.change_area_bounds(new_t_max = 0, run_time = 2)\n self.change_area_bounds(\n new_t_max = 8, \n rate_func=linear,\n run_time = 7.9,\n )\n self.wait()\n self.change_area_bounds(new_t_max = 5)\n self.wait()\n\n def nudge_input(self):\n dark_area = self.area.copy()\n dark_area.set_fill(BLACK, opacity = 0.5)\n curr_T = self.x_axis.point_to_number(self.area.get_right())\n new_T = curr_T + self.dT\n\n rect = Rectangle(\n stroke_width = 0,\n fill_color = YELLOW,\n fill_opacity = 0.75\n )\n rect.replace(\n VGroup(\n VectorizedPoint(self.coords_to_point(new_T, 0)),\n self.right_v_line,\n ),\n stretch = True\n )\n\n dT_brace = Brace(rect, DOWN, buff = 0)\n dT_label = dT_brace.get_text(\"$dT$\", buff = SMALL_BUFF)\n dT_label_group = VGroup(dT_label, dT_brace)\n\n ds_label = TexMobject(\"ds\")\n ds_label.next_to(rect, RIGHT, LARGE_BUFF, UP)\n ds_label.set_color(DISTANCE_COLOR)\n ds_arrow = Arrow(ds_label.get_left(), rect.get_left())\n ds_arrow.set_color(WHITE)\n\n v_brace = Brace(rect, LEFT, buff = SMALL_BUFF)\n v_T_label = v_brace.get_text(\"$v(T)$\", buff = SMALL_BUFF)\n\n self.change_area_bounds(new_t_max = new_T)\n self.play(\n FadeIn(dark_area),\n *list(map(Animation, self.submobjects))\n )\n self.play(\n FadeOut(self.T_label_group),\n FadeIn(dT_label_group)\n )\n self.wait()\n self.play(Write(ds_label))\n self.play(ShowCreation(ds_arrow))\n self.wait(2)\n self.play(GrowFromCenter(v_brace))\n self.play(ReplacementTransform(\n self.v_graph_label.get_part_by_tex(\"v\").copy(),\n v_T_label,\n run_time = 2\n ))\n self.wait()\n self.play(Indicate(dT_label))\n self.wait()\n\n self.rect = rect\n self.dT_label_group = dT_label_group\n self.v_T_label_group = VGroup(v_T_label, v_brace)\n self.dark_area = dark_area\n self.ds_label = ds_label\n self.ds_arrow = ds_arrow\n\n def show_rectangle_approximation(self):\n formula1 = TexMobject(\"ds\", \"=\", \"v(T)\", \"dT\")\n formula2 = TexMobject(\"{ds\", \"\\\\over\\\\,\", \"dT}\", \"=\", \"v(T)\")\n for formula in formula1, formula2:\n formula.next_to(self.v_graph_label, UP, LARGE_BUFF)\n formula.set_color_by_tex(\"ds\", DISTANCE_COLOR)\n\n self.play(\n DrawBorderThenFill(self.rect),\n Animation(self.ds_arrow)\n )\n self.wait()\n self.play(*[\n ReplacementTransform(\n mob, formula1.get_part_by_tex(tex),\n run_time = 2\n )\n for mob, tex in [\n (self.ds_label, \"ds\"),\n (self.ds_arrow, \"=\"),\n (self.v_T_label_group[0].copy(), \"v(T)\"),\n (self.dT_label_group[0].copy(), \"dT\"),\n ]\n ])\n self.wait()\n self.transition_to_alt_config(\n dT = self.dT/5.0,\n transformation_kwargs = {\"run_time\" : 2},\n )\n self.wait()\n self.play(*[\n ReplacementTransform(\n formula1.get_part_by_tex(tex),\n formula2.get_part_by_tex(tex),\n )\n for tex in (\"ds\", \"=\", \"v(T)\", \"dT\")\n ] + [\n Write(formula2.get_part_by_tex(\"over\"))\n ])\n self.wait()\n\n\n ####\n\n def add_T_label(self, x_val, **kwargs):\n triangle = RegularPolygon(n=3, start_angle = np.pi/2)\n triangle.set_height(MED_SMALL_BUFF)\n triangle.move_to(self.coords_to_point(x_val, 0), UP)\n triangle.set_fill(WHITE, 1)\n triangle.set_stroke(width = 0)\n T_label = TexMobject(self.variable_point_label)\n T_label.next_to(triangle, DOWN)\n v_line = self.get_vertical_line_to_graph(\n x_val, self.v_graph,\n color = YELLOW\n )\n\n self.play(\n DrawBorderThenFill(triangle),\n ShowCreation(v_line),\n Write(T_label, run_time = 1),\n **kwargs\n )\n\n self.T_label_group = VGroup(T_label, triangle)\n self.right_v_line = v_line\n\n def get_area(self, t_min, t_max):\n numerator = max(t_max - t_min, 0.01)\n dx = float(numerator) / self.num_rects\n return self.get_riemann_rectangles(\n self.v_graph,\n x_min = t_min,\n x_max = t_max,\n dx = dx,\n stroke_width = 0,\n ).set_fill(opacity = self.area_opacity)\n\n def change_area_bounds(self, new_t_min = None, new_t_max = None, **kwargs):\n curr_t_min = self.x_axis.point_to_number(self.area.get_left())\n curr_t_max = self.x_axis.point_to_number(self.area.get_right())\n if new_t_min is None:\n new_t_min = curr_t_min\n if new_t_max is None:\n new_t_max = curr_t_max\n\n group = VGroup(self.area, self.right_v_line, self.T_label_group)\n def update_group(group, alpha):\n area, v_line, T_label = group\n t_min = interpolate(curr_t_min, new_t_min, alpha)\n t_max = interpolate(curr_t_max, new_t_max, alpha)\n new_area = self.get_area(t_min, t_max)\n new_v_line = self.get_vertical_line_to_graph(\n t_max, self.v_graph\n )\n new_v_line.set_color(v_line.get_color())\n T_label.move_to(new_v_line.get_bottom(), UP)\n\n #Fade close to 0\n T_label[0].set_fill(opacity = min(1, t_max)) \n\n Transform(area, new_area).update(1)\n Transform(v_line, new_v_line).update(1)\n return group\n\n self.play(\n UpdateFromAlphaFunc(group, update_group),\n *list(map(Animation, self.submobjects)),\n **kwargs\n )\n\nclass DirectInterpretationOfDsDt(TeacherStudentsScene):\n def construct(self):\n equation = TexMobject(\"{ds\", \"\\\\over\\\\,\", \"dT}\", \"(T)\", \"=\", \"v(T)\")\n ds, over, dt, of_T, equals, v = equation\n equation.next_to(self.get_pi_creatures(), UP, LARGE_BUFF)\n equation.shift(RIGHT)\n v.set_color(VELOCITY_COLOR)\n\n s_words = TextMobject(\"Tiny change in\", \"distance\")\n s_words.next_to(ds, UP+LEFT, LARGE_BUFF)\n s_words.shift_onto_screen()\n s_arrow = Arrow(s_words[1].get_bottom(), ds.get_left())\n s_words.add(s_arrow)\n s_words.set_color(DISTANCE_COLOR)\n\n t_words = TextMobject(\"Tiny change in\", \"time\")\n t_words.next_to(dt, DOWN+LEFT)\n t_words.to_edge(LEFT)\n t_arrow = Arrow(t_words[1].get_top(), dt.get_left())\n t_words.add(t_arrow)\n t_words.set_color(TIME_COLOR)\n\n self.add(ds, over, dt, of_T)\n for words, part in (s_words, ds), (t_words, dt):\n self.play(\n FadeIn(\n words, \n run_time = 2,\n lag_ratio = 0.5,\n ),\n self.students[1].change_mode, \"raise_right_hand\"\n )\n self.play(part.set_color, words.get_color())\n self.wait()\n self.play(Write(VGroup(equals, v)))\n self.change_student_modes(*[\"pondering\"]*3)\n self.wait(3)\n\nclass FindAntiderivative(Antiderivative):\n def construct(self):\n self.introduce()\n self.first_part()\n self.second_part()\n self.combine()\n self.add_plus_C()\n\n def introduce(self):\n q_marks, rhs = functions = self.get_functions(\"???\", \"t(8-t)\")\n expanded_rhs = TexMobject(\"8t - t^2\")\n expanded_rhs.move_to(rhs, LEFT)\n expanded_rhs.set_color(rhs.get_color())\n self.v_part1 = VGroup(*expanded_rhs[:2])\n self.v_part2 = VGroup(*expanded_rhs[2:])\n for part in self.v_part1, self.v_part2:\n part.save_state()\n\n top_arc, bottom_arc = arcs = self.get_arcs(functions)\n derivative, antiderivative = words = self.get_arc_labels(arcs)\n\n self.add(functions)\n self.play(*list(map(ShowCreation, arcs)))\n for word in words:\n self.play(FadeIn(word, lag_ratio = 0.5))\n self.wait()\n self.change_mode(\"confused\")\n self.wait(2)\n self.play(*[\n ReplacementTransform(\n rhs[i], expanded_rhs[j],\n run_time = 2,\n path_arc = np.pi\n )\n for i, j in enumerate([1, 4, 0, 2, 3, 4])\n ]+[\n self.pi_creature.change_mode, \"hesitant\"\n ])\n self.wait()\n\n self.q_marks = q_marks\n self.arcs = arcs\n self.words = words\n\n def first_part(self):\n four_t_squared, two_t = self.get_functions(\"4t^2\", \"2t\")\n four = four_t_squared[0]\n four.shift(UP)\n four.set_fill(opacity = 0)\n t_squared = VGroup(*four_t_squared[1:])\n two_t.move_to(self.v_part1, LEFT)\n\n self.play(self.v_part2.to_corner, UP+RIGHT)\n self.play(\n self.pi_creature.change, \"plain\", self.v_part1\n )\n self.play(ApplyWave(\n self.q_marks, \n direction = UP, \n amplitude = SMALL_BUFF\n ))\n self.wait(2)\n self.play(\n FadeOut(self.q_marks),\n FadeIn(t_squared),\n self.v_part1.shift, DOWN+RIGHT,\n )\n self.play(*[\n ReplacementTransform(\n t_squared[i].copy(), two_t[1-i],\n run_time = 2,\n path_arc = -np.pi/6.\n )\n for i in (0, 1)\n ])\n self.change_mode(\"thinking\")\n self.wait()\n self.play(four.set_fill, YELLOW, 1)\n self.play(four.shift, DOWN)\n self.play(FadeOut(two_t))\n self.play(self.v_part1.restore)\n self.play(four.set_color, DISTANCE_COLOR)\n self.wait(2)\n\n self.s_part1 = four_t_squared\n\n def second_part(self):\n self.arcs_copy = self.arcs.copy()\n self.words_copy = self.words.copy()\n part1_group = VGroup(\n self.s_part1, self.v_part1, \n self.arcs_copy, self.words_copy\n )\n\n neg_third_t_cubed, three_t_squared = self.get_functions(\n \"- \\\\frac{1}{3} t^3\", \"3t^2\"\n )\n three_t_squared.move_to(self.v_part1, LEFT)\n neg = neg_third_t_cubed[0]\n third = VGroup(*neg_third_t_cubed[1:4])\n t_cubed = VGroup(*neg_third_t_cubed[4:])\n three = three_t_squared[0]\n t_squared = VGroup(*three_t_squared[1:])\n\n self.play(\n part1_group.scale, 0.5,\n part1_group.to_corner, UP+LEFT,\n self.pi_creature.change_mode, \"plain\"\n )\n self.play(\n self.v_part2.restore,\n self.v_part2.shift, LEFT\n )\n self.play(FadeIn(self.q_marks))\n self.wait()\n\n self.play(\n FadeOut(self.q_marks),\n FadeIn(t_cubed),\n self.v_part2.shift, DOWN+RIGHT\n )\n self.play(*[\n ReplacementTransform(\n t_cubed[i].copy(), three_t_squared[j],\n path_arc = -np.pi/6,\n run_time = 2,\n )\n for i, j in [(0, 1), (1, 0), (1, 2)]\n ])\n self.wait()\n self.play(FadeIn(third))\n self.play(FadeOut(three))\n self.wait(2)\n self.play(Write(neg))\n self.play(\n FadeOut(t_squared),\n self.v_part2.shift, UP+LEFT\n )\n self.wait(2)\n\n self.s_part2 = neg_third_t_cubed\n\n def combine(self):\n self.play(\n self.v_part1.restore,\n self.v_part2.restore,\n self.s_part1.scale, 2,\n self.s_part1.next_to, self.s_part2, LEFT,\n FadeOut(self.arcs_copy),\n FadeOut(self.words_copy),\n run_time = 2,\n )\n self.change_mode(\"happy\")\n self.wait(2)\n\n def add_plus_C(self):\n s_group = VGroup(self.s_part1, self.s_part2)\n plus_Cs = [\n TexMobject(\"+%d\"%d)\n for d in range(1, 8)\n ]\n for plus_C in plus_Cs:\n plus_C.set_color(YELLOW)\n plus_C.move_to(s_group, RIGHT)\n plus_C = plus_Cs[0]\n\n self.change_mode(\"sassy\")\n self.wait()\n self.play(\n s_group.next_to, plus_C.copy(), LEFT,\n GrowFromCenter(plus_C),\n )\n self.wait()\n for new_plus_C in plus_Cs[1:]:\n self.play(Transform(plus_C, new_plus_C))\n self.wait()\n\nclass GraphSPlusC(GraphDistanceVsTime):\n CONFIG = {\n \"y_axis_label\" : \"Distance\"\n }\n def construct(self):\n self.setup_axes()\n graph = self.get_graph(\n s_func, \n color = DISTANCE_COLOR,\n x_min = 0,\n x_max = 8,\n )\n tangent = self.get_secant_slope_group(\n 6, graph, dx = 0.01\n ).secant_line\n v_line = self.get_vertical_line_to_graph(\n 6, graph, line_class = DashedLine\n )\n v_line.scale_in_place(2)\n v_line.set_color(WHITE)\n graph_label, plus_C = full_label = TexMobject(\n \"s(t) = 4t^2 - \\\\frac{1}{3}t^3\", \"+C\"\n )\n plus_C.set_color(YELLOW)\n full_label.next_to(graph.points[-1], DOWN)\n full_label.to_edge(RIGHT)\n\n self.play(ShowCreation(graph))\n self.play(FadeIn(graph_label))\n self.wait()\n self.play(\n graph.shift, UP,\n run_time = 2,\n rate_func = there_and_back\n )\n self.play(ShowCreation(tangent))\n graph.add(tangent)\n self.play(ShowCreation(v_line))\n self.play(\n graph.shift, 2*DOWN, \n run_time = 4,\n rate_func = there_and_back,\n )\n self.play(Write(plus_C))\n self.play(\n graph.shift, 2*UP,\n rate_func = there_and_back,\n run_time = 4,\n )\n self.wait()\n\nclass LowerBound(AreaIsDerivative):\n CONFIG = {\n \"graph_origin\" : 2.5*DOWN + 6*LEFT\n }\n\n def construct(self):\n self.add_integral_and_area()\n self.mention_lower_bound()\n self.drag_right_endpoint_to_zero()\n self.write_antiderivative_difference()\n self.show_alternate_antiderivative_difference()\n self.add_constant_to_antiderivative()\n\n def add_integral_and_area(self):\n self.area = self.get_area(0, 6)\n self.integral = self.get_integral(\"0\", \"T\")\n self.remove(self.x_axis.numbers)\n self.add(self.area, self.integral)\n self.add_T_label(6, run_time = 0)\n\n def mention_lower_bound(self):\n lower_bound = self.integral.get_part_by_tex(\"0\")\n circle = Circle(color = YELLOW)\n circle.replace(lower_bound)\n circle.scale_in_place(3)\n zero_label = lower_bound.copy()\n\n self.play(ShowCreation(circle))\n self.play(Indicate(lower_bound))\n self.play(\n zero_label.scale, 1.5,\n zero_label.next_to, self.graph_origin, DOWN, MED_LARGE_BUFF,\n FadeOut(circle)\n )\n self.wait()\n\n self.zero_label = zero_label\n\n def drag_right_endpoint_to_zero(self):\n zero_integral = self.get_integral(\"0\", \"0\")\n zero_integral[1].set_color(YELLOW)\n zero_int_bounds = list(reversed(\n zero_integral.get_parts_by_tex(\"0\")\n ))\n for bound in zero_int_bounds:\n circle = Circle(color = YELLOW)\n circle.replace(bound)\n circle.scale_in_place(3)\n bound.circle = circle\n self.integral.save_state()\n equals_zero = TexMobject(\"=0\")\n equals_zero.next_to(zero_integral, RIGHT)\n equals_zero.set_color(GREEN)\n\n self.change_area_bounds(0, 0, run_time = 3)\n self.play(ReplacementTransform(\n self.zero_label.copy(), equals_zero\n ))\n self.play(Transform(self.integral, zero_integral))\n self.wait(2)\n for bound in zero_int_bounds:\n self.play(ShowCreation(bound.circle))\n self.play(FadeOut(bound.circle))\n self.play(*[\n ReplacementTransform(\n bound.copy(), VGroup(equals_zero[1])\n )\n for bound in zero_int_bounds\n ])\n self.wait(2)\n self.change_area_bounds(0, 5)\n self.play(\n self.integral.restore,\n FadeOut(equals_zero)\n )\n\n self.zero_integral = zero_integral\n\n def write_antiderivative_difference(self):\n antideriv_diff = self.get_antiderivative_difference(\"0\", \"T\")\n equals, at_T, minus, at_zero = antideriv_diff\n antideriv_diff_at_eight = self.get_antiderivative_difference(\"0\", \"8\")\n at_eight = antideriv_diff_at_eight.left_part\n integral_at_eight = self.get_integral(\"0\", \"8\")\n\n for part in at_T, at_zero, at_eight:\n part.brace = Brace(part, DOWN, buff = SMALL_BUFF)\n part.brace.save_state()\n\n antideriv_text = at_T.brace.get_text(\"Antiderivative\", buff = SMALL_BUFF)\n antideriv_text.set_color(MAROON_B)\n value_at_eight = at_eight.brace.get_text(\n \"%.2f\"%s_func(8)\n )\n happens_to_be_zero = at_zero.brace.get_text(\"\"\"\n Happens to\n equal 0\n \"\"\")\n\n big_brace = Brace(VGroup(at_T, at_zero))\n cancel_text = big_brace.get_text(\"Cancels when $T=0$\")\n\n self.play(*list(map(Write, [equals, at_T])))\n self.play(\n GrowFromCenter(at_T.brace),\n Write(antideriv_text, run_time = 2)\n )\n self.change_area_bounds(0, 5.5, rate_func = there_and_back)\n self.wait()\n self.play(\n ReplacementTransform(at_T.copy(), at_zero),\n Write(minus)\n )\n self.wait()\n self.play(\n ReplacementTransform(at_T.brace, big_brace),\n ReplacementTransform(antideriv_text, cancel_text)\n )\n self.change_area_bounds(0, 0, run_time = 4)\n self.wait()\n self.play(\n ReplacementTransform(big_brace, at_zero.brace),\n ReplacementTransform(cancel_text, happens_to_be_zero),\n )\n self.wait(2)\n self.change_area_bounds(0, 8, run_time = 2)\n self.play(\n Transform(self.integral, integral_at_eight),\n Transform(antideriv_diff, antideriv_diff_at_eight),\n MaintainPositionRelativeTo(at_zero.brace, at_zero),\n MaintainPositionRelativeTo(happens_to_be_zero, at_zero.brace),\n )\n self.play(\n GrowFromCenter(at_eight.brace),\n Write(value_at_eight)\n )\n self.wait(2)\n self.play(*list(map(FadeOut, [\n at_eight.brace, value_at_eight,\n at_zero.brace, happens_to_be_zero,\n ])))\n\n self.antideriv_diff = antideriv_diff\n\n def show_alternate_antiderivative_difference(self):\n new_integral = self.get_integral(\"1\", \"7\")\n new_antideriv_diff = self.get_antiderivative_difference(\"1\", \"7\")\n numbers = [\n TexMobject(\"%d\"%d).next_to(\n self.coords_to_point(d, 0), \n DOWN, MED_LARGE_BUFF\n )\n for d in (1, 7)\n ]\n tex_mobs = [new_integral]+new_antideriv_diff[1::2]+numbers\n for tex_mob in tex_mobs:\n tex_mob.set_color_by_tex(\"1\", RED)\n tex_mob.set_color_by_tex(\"7\", GREEN)\n tex_mob.set_color_by_tex(\"\\\\frac{1}{3}\", WHITE)\n\n self.change_area_bounds(1, 7, run_time = 2)\n self.play(\n self.T_label_group[0].set_fill, None, 0,\n *list(map(FadeIn, numbers))\n )\n self.play(\n Transform(self.integral, new_integral),\n Transform(self.antideriv_diff, new_antideriv_diff),\n )\n self.wait(3)\n for part in self.antideriv_diff[1::2]:\n self.play(Indicate(part, scale_factor = 1.1))\n self.wait()\n\n def add_constant_to_antiderivative(self):\n antideriv_diff = self.antideriv_diff\n plus_fives = VGroup(*[TexMobject(\"+5\") for i in range(2)])\n plus_fives.set_color(YELLOW)\n for five, part in zip(plus_fives, antideriv_diff[1::2]):\n five.next_to(part, DOWN)\n group = VGroup(\n plus_fives[0],\n antideriv_diff[2].copy(),\n plus_fives[1]\n )\n\n self.play(Write(plus_fives, run_time = 2))\n self.wait(2)\n self.play(\n group.arrange,\n group.next_to, antideriv_diff, DOWN, MED_LARGE_BUFF\n )\n self.wait()\n self.play(FadeOut(group, run_time = 2))\n self.wait()\n\n #####\n\n def get_integral(self, lower_bound, upper_bound):\n result = TexMobject(\n \"\\\\int\", \"^\"+upper_bound, \"_\"+lower_bound, \n \"t(8-t)\", \"\\\\,dt\"\n )\n result.next_to(self.graph_origin, RIGHT, MED_LARGE_BUFF)\n result.to_edge(UP)\n return result\n\n def get_antiderivative_difference(self, lower_bound, upper_bound):\n strings = []\n for bound in upper_bound, lower_bound:\n try:\n d = int(bound)\n strings.append(\"(%d)\"%d)\n except:\n strings.append(bound)\n parts = []\n for s in strings:\n part = TexMobject(\n \"\\\\left(\",\n \"4\", s, \"^2\", \"-\", \"\\\\frac{1}{3}\", s, \"^3\"\n \"\\\\right))\"\n )\n part.set_color_by_tex(s, YELLOW, substring = False)\n parts.append(part)\n result = VGroup(\n TexMobject(\"=\"), parts[0], \n TexMobject(\"-\"), parts[1],\n )\n result.left_part, result.right_part = parts\n result.arrange(RIGHT)\n result.scale(0.9)\n result.next_to(self.integral, RIGHT)\n return result\n\nclass FundamentalTheorem(GraphScene):\n CONFIG = {\n \"lower_bound\" : 1,\n \"upper_bound\" : 7,\n \"lower_bound_color\" : RED,\n \"upper_bound_color\" : GREEN,\n \"n_riemann_iterations\" : 6,\n }\n\n def construct(self):\n self.add_graph_and_integral()\n self.show_f_dx_sum()\n self.show_rects_approaching_area()\n self.write_antiderivative()\n self.write_fundamental_theorem_of_calculus()\n self.show_integral_considering_continuum()\n self.show_antiderivative_considering_bounds()\n\n def add_graph_and_integral(self):\n self.setup_axes()\n integral = TexMobject(\"\\\\int\", \"^b\", \"_a\", \"f(x)\", \"\\\\,dx\")\n integral.next_to(ORIGIN, LEFT)\n integral.to_edge(UP)\n integral.set_color_by_tex(\"a\", self.lower_bound_color)\n integral.set_color_by_tex(\"b\", self.upper_bound_color)\n graph = self.get_graph(\n lambda x : -0.01*x*(x-3)*(x-6)*(x-12) + 3,\n )\n self.add(integral, graph)\n self.graph = graph\n self.integral = integral\n\n self.bound_labels = VGroup()\n self.v_lines = VGroup()\n for bound, tex in (self.lower_bound, \"a\"), (self.upper_bound, \"b\"):\n label = integral.get_part_by_tex(tex).copy()\n label.scale(1.5)\n label.next_to(self.coords_to_point(bound, 0), DOWN)\n v_line = self.get_vertical_line_to_graph(\n bound, graph, color = label.get_color()\n )\n\n self.bound_labels.add(label)\n self.v_lines.add(v_line)\n self.add(label, v_line)\n\n def show_f_dx_sum(self):\n kwargs = {\n \"x_min\" : self.lower_bound,\n \"x_max\" : self.upper_bound,\n \"fill_opacity\" : 0.75,\n \"stroke_width\" : 0.25,\n }\n low_opacity = 0.25\n start_rect_index = 3\n num_shown_sum_steps = 5\n last_rect_index = start_rect_index + num_shown_sum_steps + 1\n\n self.rect_list = self.get_riemann_rectangles_list(\n self.graph, self.n_riemann_iterations, **kwargs\n )\n rects = self.rects = self.rect_list[0]\n rects.save_state()\n\n start_rect = rects[start_rect_index]\n f_brace = Brace(start_rect, LEFT, buff = 0)\n dx_brace = Brace(start_rect, DOWN, buff = 0)\n f_brace.label = f_brace.get_text(\"$f(x)$\")\n dx_brace.label = dx_brace.get_text(\"$dx$\")\n\n flat_rects = self.get_riemann_rectangles(\n self.get_graph(lambda x : 0), dx = 0.5, **kwargs\n )\n\n self.transform_between_riemann_rects(\n flat_rects, rects, \n replace_mobject_with_target_in_scene = True,\n )\n self.play(*[\n ApplyMethod(\n rect.set_fill, None, \n 1 if rect is start_rect else low_opacity\n )\n for rect in rects\n ])\n self.play(*it.chain(\n list(map(GrowFromCenter, [f_brace, dx_brace])),\n list(map(Write, [f_brace.label, dx_brace.label])),\n ))\n self.wait()\n for i in range(start_rect_index+1, last_rect_index):\n self.play(\n rects[i-1].set_fill, None, low_opacity,\n rects[i].set_fill, None, 1,\n f_brace.set_height, rects[i].get_height(),\n f_brace.next_to, rects[i], LEFT, 0,\n dx_brace.next_to, rects[i], DOWN, 0,\n *[\n MaintainPositionRelativeTo(brace.label, brace)\n for brace in (f_brace, dx_brace)\n ]\n )\n self.wait()\n self.play(*it.chain(\n list(map(FadeOut, [\n f_brace, dx_brace, \n f_brace.label, dx_brace.label\n ])),\n [rects.set_fill, None, kwargs[\"fill_opacity\"]]\n ))\n\n def show_rects_approaching_area(self):\n for new_rects in self.rect_list:\n self.transform_between_riemann_rects(\n self.rects, new_rects\n )\n\n def write_antiderivative(self):\n deriv = TexMobject(\n \"{d\", \"F\", \"\\\\over\\\\,\", \"dx}\", \"(x)\", \"=\", \"f(x)\"\n )\n deriv_F = deriv.get_part_by_tex(\"F\")\n deriv.next_to(self.integral, DOWN, MED_LARGE_BUFF)\n rhs = TexMobject(*\"=F(b)-F(a)\")\n rhs.set_color_by_tex(\"a\", self.lower_bound_color)\n rhs.set_color_by_tex(\"b\", self.upper_bound_color)\n rhs.next_to(self.integral, RIGHT)\n\n self.play(Write(deriv))\n self.wait(2)\n self.play(*it.chain(\n [\n ReplacementTransform(deriv_F.copy(), part)\n for part in rhs.get_parts_by_tex(\"F\")\n ],\n [\n Write(VGroup(*rhs.get_parts_by_tex(tex)))\n for tex in \"=()-\"\n ]\n ))\n for tex in \"b\", \"a\":\n self.play(ReplacementTransform(\n self.integral.get_part_by_tex(tex).copy(),\n rhs.get_part_by_tex(tex)\n ))\n self.wait()\n self.wait(2)\n\n self.deriv = deriv\n self.rhs = rhs\n\n def write_fundamental_theorem_of_calculus(self):\n words = TextMobject(\"\"\"\n Fundamental \n theorem of \n calculus\n \"\"\")\n words.to_edge(RIGHT)\n\n self.play(Write(words))\n self.wait()\n\n def show_integral_considering_continuum(self):\n self.play(*[\n ApplyMethod(mob.set_fill, None, 0.2)\n for mob in (self.deriv, self.rhs)\n ])\n self.play(\n self.rects.restore,\n run_time = 3,\n rate_func = there_and_back\n )\n self.wait()\n for x in range(2):\n self.play(*[\n ApplyFunction(\n lambda m : m.shift(MED_SMALL_BUFF*UP).set_fill(opacity = 1),\n rect,\n run_time = 3,\n rate_func = squish_rate_func(\n there_and_back,\n alpha, alpha+0.2\n )\n )\n for rect, alpha in zip(\n self.rects, \n np.linspace(0, 0.8, len(self.rects))\n )\n ])\n self.wait()\n\n def show_antiderivative_considering_bounds(self):\n self.play(\n self.integral.set_fill, None, 0.5,\n self.deriv.set_fill, None, 1,\n self.rhs.set_fill, None, 1,\n )\n for label, line in reversed(list(zip(self.bound_labels, self.v_lines))):\n new_line = line.copy().set_color(YELLOW)\n label.save_state()\n self.play(label.set_color, YELLOW)\n self.play(ShowCreation(new_line))\n self.play(ShowCreation(line))\n self.remove(new_line)\n self.play(label.restore)\n self.wait()\n self.play(self.integral.set_fill, None, 1)\n self.wait(3)\n\nclass LetsRecap(TeacherStudentsScene):\n def construct(self):\n self.teacher_says(\n \"Let's recap\",\n target_mode = \"hesitant\",\n )\n self.change_student_modes(*[\"happy\"]*3)\n self.wait(3)\n\nclass NegativeArea(GraphScene):\n CONFIG = {\n \"x_axis_label\" : \"Time\",\n \"y_axis_label\" : \"Velocity\",\n \"graph_origin\" : 1.5*DOWN + 5*LEFT,\n \"y_min\" : -3,\n \"y_max\" : 7,\n \"small_dx\" : 0.01,\n \"sample_input\" : 5,\n }\n def construct(self):\n self.setup_axes()\n self.add_graph_and_area()\n self.write_negative_area()\n self.show_negative_point()\n self.show_car_going_backwards()\n self.write_v_dt()\n self.show_rectangle()\n self.write_signed_area()\n\n def add_graph_and_area(self):\n graph = self.get_graph(\n lambda x : -0.02*(x+1)*(x-3)*(x-7)*(x-10),\n x_min = 0,\n x_max = 8,\n color = VELOCITY_COLOR\n )\n area = self.get_riemann_rectangles(\n graph, \n x_min = 0,\n x_max = 8,\n dx = self.small_dx,\n start_color = BLUE_D,\n end_color = BLUE_D,\n fill_opacity = 0.75,\n stroke_width = 0,\n )\n\n self .play(\n ShowCreation(graph),\n FadeIn(\n area, \n run_time = 2,\n lag_ratio = 0.5,\n )\n )\n\n self.graph = graph\n self.area = area\n\n def write_negative_area(self):\n words = TextMobject(\"Negative area\")\n words.set_color(RED)\n words.next_to(\n self.coords_to_point(7, -2),\n RIGHT,\n )\n arrow = Arrow(words, self.coords_to_point(\n self.sample_input, -1,\n ))\n\n self.play(\n Write(words, run_time = 2),\n ShowCreation(arrow)\n )\n self.wait(2)\n self.play(*list(map(FadeOut, [self.area, arrow])))\n\n self.negative_area_words = words\n\n def show_negative_point(self):\n v_line = self.get_vertical_line_to_graph(\n self.sample_input, self.graph,\n color = RED\n )\n self.play(ShowCreation(v_line))\n self.wait()\n self.v_line = v_line\n\n def show_car_going_backwards(self):\n car = Car()\n start_point = 3*RIGHT + 2*UP\n end_point = start_point + LEFT\n nudged_end_point = end_point + MED_SMALL_BUFF*LEFT\n car.move_to(start_point)\n arrow = Arrow(RIGHT, LEFT, color = RED)\n arrow.next_to(car, UP+LEFT)\n arrow.shift(MED_LARGE_BUFF*RIGHT)\n\n self.play(FadeIn(car))\n self.play(ShowCreation(arrow))\n self.play(MoveCar(\n car, end_point, \n moving_forward = False,\n run_time = 3\n ))\n self.wait()\n ghost_car = car.copy().fade()\n right_nose_line = self.get_car_nose_line(car)\n self.play(ShowCreation(right_nose_line))\n self.add(ghost_car)\n self.play(MoveCar(\n car, nudged_end_point,\n moving_forward = False\n ))\n left_nose_line = self.get_car_nose_line(car)\n self.play(ShowCreation(left_nose_line))\n\n self.nose_lines = VGroup(left_nose_line, right_nose_line)\n self.car = car\n self.ghost_car = ghost_car\n\n def write_v_dt(self):\n brace = Brace(self.nose_lines, DOWN, buff = 0)\n equation = TexMobject(\"ds\", \"=\", \"v(t)\", \"dt\")\n equation.next_to(brace, DOWN, SMALL_BUFF, LEFT)\n equation.set_color_by_tex(\"ds\", DISTANCE_COLOR)\n equation.set_color_by_tex(\"dt\", TIME_COLOR)\n\n negative = TextMobject(\"Negative\")\n negative.set_color(RED)\n negative.next_to(equation.get_corner(UP+RIGHT), UP, LARGE_BUFF)\n ds_arrow, v_arrow = arrows = VGroup(*[\n Arrow(\n negative.get_bottom(),\n equation.get_part_by_tex(tex).get_top(),\n color = RED,\n )\n for tex in (\"ds\", \"v(t)\")\n ])\n\n self.play(\n GrowFromCenter(brace),\n Write(equation)\n )\n self.wait()\n self.play(FadeIn(negative))\n self.play(ShowCreation(v_arrow))\n self.wait(2)\n self.play(ReplacementTransform(\n v_arrow.copy(),\n ds_arrow\n ))\n self.wait(2)\n\n self.ds_equation = equation\n self.negative_word = negative\n self.negative_word_arrows = arrows\n\n def show_rectangle(self):\n rect_list = self.get_riemann_rectangles_list(\n self.graph, x_min = 0, x_max = 8,\n n_iterations = 6,\n start_color = BLUE_D,\n end_color = BLUE_D,\n fill_opacity = 0.75,\n )\n rects = rect_list[0]\n rect = rects[len(rects)*self.sample_input//8]\n\n dt_brace = Brace(rect, UP, buff = 0)\n v_brace = Brace(rect, LEFT, buff = 0)\n dt_label = dt_brace.get_text(\"$dt$\", buff = SMALL_BUFF)\n dt_label.set_color(YELLOW)\n v_label = v_brace.get_text(\"$v(t)$\", buff = SMALL_BUFF)\n v_label.add_background_rectangle()\n\n self.play(FadeOut(self.v_line), FadeIn(rect))\n self.play(\n GrowFromCenter(dt_brace), \n GrowFromCenter(v_brace), \n Write(dt_label),\n Write(v_label),\n )\n self.wait(2)\n self.play(*it.chain(\n [FadeIn(r) for r in rects if r is not rect],\n list(map(FadeOut, [\n dt_brace, v_brace, dt_label, v_label\n ]))\n ))\n self.wait()\n for new_rects in rect_list[1:]:\n self.transform_between_riemann_rects(rects, new_rects)\n self.wait()\n\n def write_signed_area(self):\n words = TextMobject(\"``Signed area''\")\n words.next_to(self.coords_to_point(self.sample_input, 0), UP)\n symbols = VGroup(*[\n TexMobject(sym).move_to(self.coords_to_point(*coords))\n for sym, coords in [\n (\"+\", (1, 2)),\n (\"-\", (5, -1)),\n (\"+\", (7.6, 0.5)),\n ]\n ])\n self.play(Write(words))\n self.play(Write(symbols))\n self.wait()\n\n ####\n\n def get_car_nose_line(self, car):\n line = DashedLine(car.get_top(), car.get_bottom())\n line.move_to(car.get_right())\n return line\n\nclass NextVideo(TeacherStudentsScene):\n def construct(self):\n series = VideoSeries()\n series.to_edge(UP)\n next_video = series[8]\n integral = TexMobject(\"\\\\int\")\n integral.next_to(next_video, DOWN, LARGE_BUFF)\n\n self.play(FadeIn(series, lag_ratio = 0.5))\n self.play(\n next_video.set_color, YELLOW,\n next_video.shift, next_video.get_height()*DOWN/2,\n self.teacher.change_mode, \"raise_right_hand\"\n )\n self.play(Write(integral))\n self.wait(5)\n\nclass Chapter8PatreonThanks(PatreonThanks):\n CONFIG = {\n \"specific_patrons\" : [\n \"Ali Yahya\",\n \"CrypticSwarm\",\n \"Kaustuv DeBiswas\",\n \"Kathryn Schmiedicke\",\n \"Karan Bhargava\",\n \"Ankit Agarwal\",\n \"Yu Jun\",\n \"Dave Nicponski\",\n \"Damion Kistler\",\n \"Juan Benet\",\n \"Othman Alikhan\",\n \"Markus Persson\",\n \"Dan Buchoff\",\n \"Derek Dai\",\n \"Joseph John Cox\",\n \"Luc Ritchie\",\n \"Robert Teed\",\n \"Jason Hise\",\n \"Meshal Alshammari\",\n \"Bernd Sing\",\n \"Nils Schneider\",\n \"James Thornton\",\n \"Mustafa Mahdi\",\n \"Jonathan Eppele\",\n \"Mathew Bramson\",\n \"Jerry Ling\",\n \"Mark Govea\",\n \"Vecht\",\n \"Shimin Kuang\",\n \"Rish Kundalia\",\n \"Achille Brighton\",\n \"Ripta Pasay\",\n ]\n }\n\nclass Thumbnail(Chapter1Thumbnail):\n CONFIG = {\n \"x_axis_label\" : \"\",\n \"y_axis_label\" : \"\",\n \"graph_origin\" : 1.5*DOWN + 4*LEFT,\n \"y_axis_height\" : 5,\n \"x_max\" : 5,\n \"x_axis_width\" : 11,\n }\n def construct(self):\n self.setup_axes()\n self.remove(*self.x_axis.numbers)\n self.remove(*self.y_axis.numbers)\n graph = self.get_graph(self.func)\n rects = self.get_riemann_rectangles(\n graph,\n x_min = 0,\n x_max = 4,\n dx = 0.25,\n )\n words = TextMobject(\"Integrals\")\n words.set_width(8)\n words.to_edge(UP)\n\n self.add(graph, rects, words)\n\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "scipy.integrate.quad" ] ]
RoseauTechnologies/pandas
[ "e727a561aee529e9e6ce9494c05685f157ed1073" ]
[ "pandas/core/window/ewm.py" ]
[ "from __future__ import annotations\n\nimport datetime\nfrom functools import partial\nfrom textwrap import dedent\nfrom typing import TYPE_CHECKING\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs.tslibs import Timedelta\nimport pandas._libs.window.aggregations as window_aggregations\nfrom pandas._typing import (\n Axis,\n TimedeltaConvertibleTypes,\n)\n\nif TYPE_CHECKING:\n from pandas import DataFrame, Series\n from pandas.core.generic import NDFrame\n\nfrom pandas.compat.numpy import function as nv\nfrom pandas.util._decorators import doc\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.common import is_datetime64_ns_dtype\nfrom pandas.core.dtypes.missing import isna\n\nimport pandas.core.common as common # noqa: PDF018\nfrom pandas.core.indexers.objects import (\n BaseIndexer,\n ExponentialMovingWindowIndexer,\n GroupbyIndexer,\n)\nfrom pandas.core.util.numba_ import maybe_use_numba\nfrom pandas.core.window.common import zsqrt\nfrom pandas.core.window.doc import (\n _shared_docs,\n args_compat,\n create_section_header,\n kwargs_compat,\n numba_notes,\n template_header,\n template_returns,\n template_see_also,\n window_agg_numba_parameters,\n)\nfrom pandas.core.window.numba_ import (\n generate_ewma_numba_table_func,\n generate_numba_ewma_func,\n)\nfrom pandas.core.window.online import (\n EWMMeanState,\n generate_online_numba_ewma_func,\n)\nfrom pandas.core.window.rolling import (\n BaseWindow,\n BaseWindowGroupby,\n)\n\n\ndef get_center_of_mass(\n comass: float | None,\n span: float | None,\n halflife: float | None,\n alpha: float | None,\n) -> float:\n valid_count = common.count_not_none(comass, span, halflife, alpha)\n if valid_count > 1:\n raise ValueError(\"comass, span, halflife, and alpha are mutually exclusive\")\n\n # Convert to center of mass; domain checks ensure 0 < alpha <= 1\n if comass is not None:\n if comass < 0:\n raise ValueError(\"comass must satisfy: comass >= 0\")\n elif span is not None:\n if span < 1:\n raise ValueError(\"span must satisfy: span >= 1\")\n comass = (span - 1) / 2\n elif halflife is not None:\n if halflife <= 0:\n raise ValueError(\"halflife must satisfy: halflife > 0\")\n decay = 1 - np.exp(np.log(0.5) / halflife)\n comass = 1 / decay - 1\n elif alpha is not None:\n if alpha <= 0 or alpha > 1:\n raise ValueError(\"alpha must satisfy: 0 < alpha <= 1\")\n comass = (1 - alpha) / alpha\n else:\n raise ValueError(\"Must pass one of comass, span, halflife, or alpha\")\n\n return float(comass)\n\n\ndef _calculate_deltas(\n times: str | np.ndarray | NDFrame | None,\n halflife: float | TimedeltaConvertibleTypes | None,\n) -> np.ndarray:\n \"\"\"\n Return the diff of the times divided by the half-life. These values are used in\n the calculation of the ewm mean.\n\n Parameters\n ----------\n times : str, np.ndarray, Series, default None\n Times corresponding to the observations. Must be monotonically increasing\n and ``datetime64[ns]`` dtype.\n halflife : float, str, timedelta, optional\n Half-life specifying the decay\n\n Returns\n -------\n np.ndarray\n Diff of the times divided by the half-life\n \"\"\"\n # error: Item \"str\" of \"Union[str, ndarray, NDFrameT, None]\" has no\n # attribute \"view\"\n # error: Item \"None\" of \"Union[str, ndarray, NDFrameT, None]\" has no\n # attribute \"view\"\n _times = np.asarray(\n times.view(np.int64), dtype=np.float64 # type: ignore[union-attr]\n )\n _halflife = float(Timedelta(halflife).value)\n return np.diff(_times) / _halflife\n\n\nclass ExponentialMovingWindow(BaseWindow):\n r\"\"\"\n Provide exponential weighted (EW) functions.\n\n Available EW functions: ``mean()``, ``var()``, ``std()``, ``corr()``, ``cov()``.\n\n Exactly one parameter: ``com``, ``span``, ``halflife``, or ``alpha`` must be\n provided.\n\n Parameters\n ----------\n com : float, optional\n Specify decay in terms of center of mass,\n :math:`\\alpha = 1 / (1 + com)`, for :math:`com \\geq 0`.\n span : float, optional\n Specify decay in terms of span,\n :math:`\\alpha = 2 / (span + 1)`, for :math:`span \\geq 1`.\n halflife : float, str, timedelta, optional\n Specify decay in terms of half-life,\n :math:`\\alpha = 1 - \\exp\\left(-\\ln(2) / halflife\\right)`, for\n :math:`halflife > 0`.\n\n If ``times`` is specified, the time unit (str or timedelta) over which an\n observation decays to half its value. Only applicable to ``mean()``\n and halflife value will not apply to the other functions.\n\n .. versionadded:: 1.1.0\n\n alpha : float, optional\n Specify smoothing factor :math:`\\alpha` directly,\n :math:`0 < \\alpha \\leq 1`.\n min_periods : int, default 0\n Minimum number of observations in window required to have a value\n (otherwise result is NA).\n adjust : bool, default True\n Divide by decaying adjustment factor in beginning periods to account\n for imbalance in relative weightings (viewing EWMA as a moving average).\n\n - When ``adjust=True`` (default), the EW function is calculated using weights\n :math:`w_i = (1 - \\alpha)^i`. For example, the EW moving average of the series\n [:math:`x_0, x_1, ..., x_t`] would be:\n\n .. math::\n y_t = \\frac{x_t + (1 - \\alpha)x_{t-1} + (1 - \\alpha)^2 x_{t-2} + ... + (1 -\n \\alpha)^t x_0}{1 + (1 - \\alpha) + (1 - \\alpha)^2 + ... + (1 - \\alpha)^t}\n\n - When ``adjust=False``, the exponentially weighted function is calculated\n recursively:\n\n .. math::\n \\begin{split}\n y_0 &= x_0\\\\\n y_t &= (1 - \\alpha) y_{t-1} + \\alpha x_t,\n \\end{split}\n ignore_na : bool, default False\n Ignore missing values when calculating weights; specify ``True`` to reproduce\n pre-0.15.0 behavior.\n\n - When ``ignore_na=False`` (default), weights are based on absolute positions.\n For example, the weights of :math:`x_0` and :math:`x_2` used in calculating\n the final weighted average of [:math:`x_0`, None, :math:`x_2`] are\n :math:`(1-\\alpha)^2` and :math:`1` if ``adjust=True``, and\n :math:`(1-\\alpha)^2` and :math:`\\alpha` if ``adjust=False``.\n\n - When ``ignore_na=True`` (reproducing pre-0.15.0 behavior), weights are based\n on relative positions. For example, the weights of :math:`x_0` and :math:`x_2`\n used in calculating the final weighted average of\n [:math:`x_0`, None, :math:`x_2`] are :math:`1-\\alpha` and :math:`1` if\n ``adjust=True``, and :math:`1-\\alpha` and :math:`\\alpha` if ``adjust=False``.\n axis : {0, 1}, default 0\n The axis to use. The value 0 identifies the rows, and 1\n identifies the columns.\n times : str, np.ndarray, Series, default None\n\n .. versionadded:: 1.1.0\n\n Times corresponding to the observations. Must be monotonically increasing and\n ``datetime64[ns]`` dtype.\n\n If str, the name of the column in the DataFrame representing the times.\n\n If 1-D array like, a sequence with the same shape as the observations.\n\n Only applicable to ``mean()``.\n method : str {'single', 'table'}, default 'single'\n Execute the rolling operation per single column or row (``'single'``)\n or over the entire object (``'table'``).\n\n This argument is only implemented when specifying ``engine='numba'``\n in the method call.\n\n Only applicable to ``mean()``\n\n .. versionadded:: 1.4.0\n\n Returns\n -------\n DataFrame\n A Window sub-classed for the particular operation.\n\n See Also\n --------\n rolling : Provides rolling window calculations.\n expanding : Provides expanding transformations.\n\n Notes\n -----\n\n More details can be found at:\n :ref:`Exponentially weighted windows <window.exponentially_weighted>`.\n\n Examples\n --------\n >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})\n >>> df\n B\n 0 0.0\n 1 1.0\n 2 2.0\n 3 NaN\n 4 4.0\n\n >>> df.ewm(com=0.5).mean()\n B\n 0 0.000000\n 1 0.750000\n 2 1.615385\n 3 1.615385\n 4 3.670213\n\n Specifying ``times`` with a timedelta ``halflife`` when computing mean.\n\n >>> times = ['2020-01-01', '2020-01-03', '2020-01-10', '2020-01-15', '2020-01-17']\n >>> df.ewm(halflife='4 days', times=pd.DatetimeIndex(times)).mean()\n B\n 0 0.000000\n 1 0.585786\n 2 1.523889\n 3 1.523889\n 4 3.233686\n \"\"\"\n\n _attributes = [\n \"com\",\n \"span\",\n \"halflife\",\n \"alpha\",\n \"min_periods\",\n \"adjust\",\n \"ignore_na\",\n \"axis\",\n \"times\",\n \"method\",\n ]\n\n def __init__(\n self,\n obj: NDFrame,\n com: float | None = None,\n span: float | None = None,\n halflife: float | TimedeltaConvertibleTypes | None = None,\n alpha: float | None = None,\n min_periods: int | None = 0,\n adjust: bool = True,\n ignore_na: bool = False,\n axis: Axis = 0,\n times: str | np.ndarray | NDFrame | None = None,\n method: str = \"single\",\n *,\n selection=None,\n ):\n super().__init__(\n obj=obj,\n min_periods=1 if min_periods is None else max(int(min_periods), 1),\n on=None,\n center=False,\n closed=None,\n method=method,\n axis=axis,\n selection=selection,\n )\n self.com = com\n self.span = span\n self.halflife = halflife\n self.alpha = alpha\n self.adjust = adjust\n self.ignore_na = ignore_na\n self.times = times\n if self.times is not None:\n if not self.adjust:\n raise NotImplementedError(\"times is not supported with adjust=False.\")\n if isinstance(self.times, str):\n warnings.warn(\n (\n \"Specifying times as a string column label is deprecated \"\n \"and will be removed in a future version. Pass the column \"\n \"into times instead.\"\n ),\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n self.times = self._selected_obj[self.times]\n if not is_datetime64_ns_dtype(self.times):\n raise ValueError(\"times must be datetime64[ns] dtype.\")\n # error: Argument 1 to \"len\" has incompatible type \"Union[str, ndarray,\n # NDFrameT, None]\"; expected \"Sized\"\n if len(self.times) != len(obj): # type: ignore[arg-type]\n raise ValueError(\"times must be the same length as the object.\")\n if not isinstance(self.halflife, (str, datetime.timedelta)):\n raise ValueError(\n \"halflife must be a string or datetime.timedelta object\"\n )\n if isna(self.times).any():\n raise ValueError(\"Cannot convert NaT values to integer\")\n self._deltas = _calculate_deltas(self.times, self.halflife)\n # Halflife is no longer applicable when calculating COM\n # But allow COM to still be calculated if the user passes other decay args\n if common.count_not_none(self.com, self.span, self.alpha) > 0:\n self._com = get_center_of_mass(self.com, self.span, None, self.alpha)\n else:\n self._com = 1.0\n else:\n if self.halflife is not None and isinstance(\n self.halflife, (str, datetime.timedelta)\n ):\n raise ValueError(\n \"halflife can only be a timedelta convertible argument if \"\n \"times is not None.\"\n )\n # Without times, points are equally spaced\n self._deltas = np.ones(max(len(self.obj) - 1, 0), dtype=np.float64)\n self._com = get_center_of_mass(\n # error: Argument 3 to \"get_center_of_mass\" has incompatible type\n # \"Union[float, Any, None, timedelta64, signedinteger[_64Bit]]\";\n # expected \"Optional[float]\"\n self.com,\n self.span,\n self.halflife, # type: ignore[arg-type]\n self.alpha,\n )\n\n def _get_window_indexer(self) -> BaseIndexer:\n \"\"\"\n Return an indexer class that will compute the window start and end bounds\n \"\"\"\n return ExponentialMovingWindowIndexer()\n\n def online(self, engine=\"numba\", engine_kwargs=None):\n \"\"\"\n Return an ``OnlineExponentialMovingWindow`` object to calculate\n exponentially moving window aggregations in an online method.\n\n .. versionadded:: 1.3.0\n\n Parameters\n ----------\n engine: str, default ``'numba'``\n Execution engine to calculate online aggregations.\n Applies to all supported aggregation methods.\n\n engine_kwargs : dict, default None\n Applies to all supported aggregation methods.\n\n * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``\n and ``parallel`` dictionary keys. The values must either be ``True`` or\n ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is\n ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be\n applied to the function\n\n Returns\n -------\n OnlineExponentialMovingWindow\n \"\"\"\n return OnlineExponentialMovingWindow(\n obj=self.obj,\n com=self.com,\n span=self.span,\n halflife=self.halflife,\n alpha=self.alpha,\n min_periods=self.min_periods,\n adjust=self.adjust,\n ignore_na=self.ignore_na,\n axis=self.axis,\n times=self.times,\n engine=engine,\n engine_kwargs=engine_kwargs,\n selection=self._selection,\n )\n\n @doc(\n _shared_docs[\"aggregate\"],\n see_also=dedent(\n \"\"\"\n See Also\n --------\n pandas.DataFrame.rolling.aggregate\n \"\"\"\n ),\n examples=dedent(\n \"\"\"\n Examples\n --------\n >>> df = pd.DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6], \"C\": [7, 8, 9]})\n >>> df\n A B C\n 0 1 4 7\n 1 2 5 8\n 2 3 6 9\n\n >>> df.ewm(alpha=0.5).mean()\n A B C\n 0 1.000000 4.000000 7.000000\n 1 1.666667 4.666667 7.666667\n 2 2.428571 5.428571 8.428571\n \"\"\"\n ),\n klass=\"Series/Dataframe\",\n axis=\"\",\n )\n def aggregate(self, func, *args, **kwargs):\n return super().aggregate(func, *args, **kwargs)\n\n agg = aggregate\n\n @doc(\n template_header,\n create_section_header(\"Parameters\"),\n args_compat,\n window_agg_numba_parameters,\n kwargs_compat,\n create_section_header(\"Returns\"),\n template_returns,\n create_section_header(\"See Also\"),\n template_see_also,\n create_section_header(\"Notes\"),\n numba_notes.replace(\"\\n\", \"\", 1),\n window_method=\"ewm\",\n aggregation_description=\"(exponential weighted moment) mean\",\n agg_method=\"mean\",\n )\n def mean(self, *args, engine=None, engine_kwargs=None, **kwargs):\n if maybe_use_numba(engine):\n if self.method == \"single\":\n ewma_func = generate_numba_ewma_func(\n engine_kwargs, self._com, self.adjust, self.ignore_na, self._deltas\n )\n numba_cache_key = (lambda x: x, \"ewma\")\n else:\n ewma_func = generate_ewma_numba_table_func(\n engine_kwargs, self._com, self.adjust, self.ignore_na, self._deltas\n )\n numba_cache_key = (lambda x: x, \"ewma_table\")\n return self._apply(\n ewma_func,\n numba_cache_key=numba_cache_key,\n )\n elif engine in (\"cython\", None):\n if engine_kwargs is not None:\n raise ValueError(\"cython engine does not accept engine_kwargs\")\n nv.validate_window_func(\"mean\", args, kwargs)\n\n deltas = None if self.times is None else self._deltas\n window_func = partial(\n window_aggregations.ewma,\n com=self._com,\n adjust=self.adjust,\n ignore_na=self.ignore_na,\n deltas=deltas,\n )\n return self._apply(window_func)\n else:\n raise ValueError(\"engine must be either 'numba' or 'cython'\")\n\n @doc(\n template_header,\n create_section_header(\"Parameters\"),\n dedent(\n \"\"\"\n bias : bool, default False\n Use a standard estimation bias correction.\n \"\"\"\n ).replace(\"\\n\", \"\", 1),\n args_compat,\n kwargs_compat,\n create_section_header(\"Returns\"),\n template_returns,\n create_section_header(\"See Also\"),\n template_see_also[:-1],\n window_method=\"ewm\",\n aggregation_description=\"(exponential weighted moment) standard deviation\",\n agg_method=\"std\",\n )\n def std(self, bias: bool = False, *args, **kwargs):\n nv.validate_window_func(\"std\", args, kwargs)\n return zsqrt(self.var(bias=bias, **kwargs))\n\n def vol(self, bias: bool = False, *args, **kwargs):\n warnings.warn(\n (\n \"vol is deprecated will be removed in a future version. \"\n \"Use std instead.\"\n ),\n FutureWarning,\n stacklevel=2,\n )\n return self.std(bias, *args, **kwargs)\n\n @doc(\n template_header,\n create_section_header(\"Parameters\"),\n dedent(\n \"\"\"\n bias : bool, default False\n Use a standard estimation bias correction.\n \"\"\"\n ).replace(\"\\n\", \"\", 1),\n args_compat,\n kwargs_compat,\n create_section_header(\"Returns\"),\n template_returns,\n create_section_header(\"See Also\"),\n template_see_also[:-1],\n window_method=\"ewm\",\n aggregation_description=\"(exponential weighted moment) variance\",\n agg_method=\"var\",\n )\n def var(self, bias: bool = False, *args, **kwargs):\n nv.validate_window_func(\"var\", args, kwargs)\n window_func = window_aggregations.ewmcov\n wfunc = partial(\n window_func,\n com=self._com,\n adjust=self.adjust,\n ignore_na=self.ignore_na,\n bias=bias,\n )\n\n def var_func(values, begin, end, min_periods):\n return wfunc(values, begin, end, min_periods, values)\n\n return self._apply(var_func)\n\n @doc(\n template_header,\n create_section_header(\"Parameters\"),\n dedent(\n \"\"\"\n other : Series or DataFrame , optional\n If not supplied then will default to self and produce pairwise\n output.\n pairwise : bool, default None\n If False then only matching columns between self and other will be\n used and the output will be a DataFrame.\n If True then all pairwise combinations will be calculated and the\n output will be a MultiIndex DataFrame in the case of DataFrame\n inputs. In the case of missing elements, only complete pairwise\n observations will be used.\n bias : bool, default False\n Use a standard estimation bias correction.\n \"\"\"\n ).replace(\"\\n\", \"\", 1),\n kwargs_compat,\n create_section_header(\"Returns\"),\n template_returns,\n create_section_header(\"See Also\"),\n template_see_also[:-1],\n window_method=\"ewm\",\n aggregation_description=\"(exponential weighted moment) sample covariance\",\n agg_method=\"cov\",\n )\n def cov(\n self,\n other: DataFrame | Series | None = None,\n pairwise: bool | None = None,\n bias: bool = False,\n **kwargs,\n ):\n from pandas import Series\n\n def cov_func(x, y):\n x_array = self._prep_values(x)\n y_array = self._prep_values(y)\n window_indexer = self._get_window_indexer()\n min_periods = (\n self.min_periods\n if self.min_periods is not None\n else window_indexer.window_size\n )\n start, end = window_indexer.get_window_bounds(\n num_values=len(x_array),\n min_periods=min_periods,\n center=self.center,\n closed=self.closed,\n )\n result = window_aggregations.ewmcov(\n x_array,\n start,\n end,\n # error: Argument 4 to \"ewmcov\" has incompatible type\n # \"Optional[int]\"; expected \"int\"\n self.min_periods, # type: ignore[arg-type]\n y_array,\n self._com,\n self.adjust,\n self.ignore_na,\n bias,\n )\n return Series(result, index=x.index, name=x.name)\n\n return self._apply_pairwise(self._selected_obj, other, pairwise, cov_func)\n\n @doc(\n template_header,\n create_section_header(\"Parameters\"),\n dedent(\n \"\"\"\n other : Series or DataFrame, optional\n If not supplied then will default to self and produce pairwise\n output.\n pairwise : bool, default None\n If False then only matching columns between self and other will be\n used and the output will be a DataFrame.\n If True then all pairwise combinations will be calculated and the\n output will be a MultiIndex DataFrame in the case of DataFrame\n inputs. In the case of missing elements, only complete pairwise\n observations will be used.\n \"\"\"\n ).replace(\"\\n\", \"\", 1),\n kwargs_compat,\n create_section_header(\"Returns\"),\n template_returns,\n create_section_header(\"See Also\"),\n template_see_also[:-1],\n window_method=\"ewm\",\n aggregation_description=\"(exponential weighted moment) sample correlation\",\n agg_method=\"corr\",\n )\n def corr(\n self,\n other: DataFrame | Series | None = None,\n pairwise: bool | None = None,\n **kwargs,\n ):\n from pandas import Series\n\n def cov_func(x, y):\n x_array = self._prep_values(x)\n y_array = self._prep_values(y)\n window_indexer = self._get_window_indexer()\n min_periods = (\n self.min_periods\n if self.min_periods is not None\n else window_indexer.window_size\n )\n start, end = window_indexer.get_window_bounds(\n num_values=len(x_array),\n min_periods=min_periods,\n center=self.center,\n closed=self.closed,\n )\n\n def _cov(X, Y):\n return window_aggregations.ewmcov(\n X,\n start,\n end,\n min_periods,\n Y,\n self._com,\n self.adjust,\n self.ignore_na,\n True,\n )\n\n with np.errstate(all=\"ignore\"):\n cov = _cov(x_array, y_array)\n x_var = _cov(x_array, x_array)\n y_var = _cov(y_array, y_array)\n result = cov / zsqrt(x_var * y_var)\n return Series(result, index=x.index, name=x.name)\n\n return self._apply_pairwise(self._selected_obj, other, pairwise, cov_func)\n\n\nclass ExponentialMovingWindowGroupby(BaseWindowGroupby, ExponentialMovingWindow):\n \"\"\"\n Provide an exponential moving window groupby implementation.\n \"\"\"\n\n _attributes = ExponentialMovingWindow._attributes + BaseWindowGroupby._attributes\n\n def __init__(self, obj, *args, _grouper=None, **kwargs):\n super().__init__(obj, *args, _grouper=_grouper, **kwargs)\n\n if not obj.empty and self.times is not None:\n # sort the times and recalculate the deltas according to the groups\n groupby_order = np.concatenate(list(self._grouper.indices.values()))\n self._deltas = _calculate_deltas(\n self.times.take(groupby_order), # type: ignore[union-attr]\n self.halflife,\n )\n\n def _get_window_indexer(self) -> GroupbyIndexer:\n \"\"\"\n Return an indexer class that will compute the window start and end bounds\n\n Returns\n -------\n GroupbyIndexer\n \"\"\"\n window_indexer = GroupbyIndexer(\n groupby_indicies=self._grouper.indices,\n window_indexer=ExponentialMovingWindowIndexer,\n )\n return window_indexer\n\n\nclass OnlineExponentialMovingWindow(ExponentialMovingWindow):\n def __init__(\n self,\n obj: NDFrame,\n com: float | None = None,\n span: float | None = None,\n halflife: float | TimedeltaConvertibleTypes | None = None,\n alpha: float | None = None,\n min_periods: int | None = 0,\n adjust: bool = True,\n ignore_na: bool = False,\n axis: Axis = 0,\n times: str | np.ndarray | NDFrame | None = None,\n engine: str = \"numba\",\n engine_kwargs: dict[str, bool] | None = None,\n *,\n selection=None,\n ):\n if times is not None:\n raise NotImplementedError(\n \"times is not implemented with online operations.\"\n )\n super().__init__(\n obj=obj,\n com=com,\n span=span,\n halflife=halflife,\n alpha=alpha,\n min_periods=min_periods,\n adjust=adjust,\n ignore_na=ignore_na,\n axis=axis,\n times=times,\n selection=selection,\n )\n self._mean = EWMMeanState(\n self._com, self.adjust, self.ignore_na, self.axis, obj.shape\n )\n if maybe_use_numba(engine):\n self.engine = engine\n self.engine_kwargs = engine_kwargs\n else:\n raise ValueError(\"'numba' is the only supported engine\")\n\n def reset(self):\n \"\"\"\n Reset the state captured by `update` calls.\n \"\"\"\n self._mean.reset()\n\n def aggregate(self, func, *args, **kwargs):\n return NotImplementedError\n\n def std(self, bias: bool = False, *args, **kwargs):\n return NotImplementedError\n\n def corr(\n self,\n other: DataFrame | Series | None = None,\n pairwise: bool | None = None,\n **kwargs,\n ):\n return NotImplementedError\n\n def cov(\n self,\n other: DataFrame | Series | None = None,\n pairwise: bool | None = None,\n bias: bool = False,\n **kwargs,\n ):\n return NotImplementedError\n\n def var(self, bias: bool = False, *args, **kwargs):\n return NotImplementedError\n\n def mean(self, *args, update=None, update_times=None, **kwargs):\n \"\"\"\n Calculate an online exponentially weighted mean.\n\n Parameters\n ----------\n update: DataFrame or Series, default None\n New values to continue calculating the\n exponentially weighted mean from the last values and weights.\n Values should be float64 dtype.\n\n ``update`` needs to be ``None`` the first time the\n exponentially weighted mean is calculated.\n\n update_times: Series or 1-D np.ndarray, default None\n New times to continue calculating the\n exponentially weighted mean from the last values and weights.\n If ``None``, values are assumed to be evenly spaced\n in time.\n This feature is currently unsupported.\n\n Returns\n -------\n DataFrame or Series\n\n Examples\n --------\n >>> df = pd.DataFrame({\"a\": range(5), \"b\": range(5, 10)})\n >>> online_ewm = df.head(2).ewm(0.5).online()\n >>> online_ewm.mean()\n a b\n 0 0.00 5.00\n 1 0.75 5.75\n >>> online_ewm.mean(update=df.tail(3))\n a b\n 2 1.615385 6.615385\n 3 2.550000 7.550000\n 4 3.520661 8.520661\n >>> online_ewm.reset()\n >>> online_ewm.mean()\n a b\n 0 0.00 5.00\n 1 0.75 5.75\n \"\"\"\n result_kwargs = {}\n is_frame = True if self._selected_obj.ndim == 2 else False\n if update_times is not None:\n raise NotImplementedError(\"update_times is not implemented.\")\n else:\n update_deltas = np.ones(\n max(self._selected_obj.shape[self.axis - 1] - 1, 0), dtype=np.float64\n )\n if update is not None:\n if self._mean.last_ewm is None:\n raise ValueError(\n \"Must call mean with update=None first before passing update\"\n )\n result_from = 1\n result_kwargs[\"index\"] = update.index\n if is_frame:\n last_value = self._mean.last_ewm[np.newaxis, :]\n result_kwargs[\"columns\"] = update.columns\n else:\n last_value = self._mean.last_ewm\n result_kwargs[\"name\"] = update.name\n np_array = np.concatenate((last_value, update.to_numpy()))\n else:\n result_from = 0\n result_kwargs[\"index\"] = self._selected_obj.index\n if is_frame:\n result_kwargs[\"columns\"] = self._selected_obj.columns\n else:\n result_kwargs[\"name\"] = self._selected_obj.name\n np_array = self._selected_obj.astype(np.float64).to_numpy()\n ewma_func = generate_online_numba_ewma_func(self.engine_kwargs)\n result = self._mean.run_ewm(\n np_array if is_frame else np_array[:, np.newaxis],\n update_deltas,\n self.min_periods,\n ewma_func,\n )\n if not is_frame:\n result = result.squeeze()\n result = result[result_from:]\n result = self._selected_obj._constructor(result, **result_kwargs)\n return result\n" ]
[ [ "pandas._libs.window.aggregations.ewmcov", "pandas.core.window.numba_.generate_ewma_numba_table_func", "pandas.util._exceptions.find_stack_level", "pandas.core.window.online.generate_online_numba_ewma_func", "pandas._libs.tslibs.Timedelta", "pandas.core.dtypes.common.is_datetime64_ns_dtype", "pandas.core.indexers.objects.GroupbyIndexer", "pandas.core.window.doc.numba_notes.replace", "pandas.core.dtypes.missing.isna", "numpy.log", "pandas.core.indexers.objects.ExponentialMovingWindowIndexer", "pandas.core.window.common.zsqrt", "numpy.diff", "pandas.core.common.count_not_none", "pandas.compat.numpy.function.validate_window_func", "pandas.core.util.numba_.maybe_use_numba", "pandas.core.window.doc.create_section_header", "pandas.core.window.online.EWMMeanState", "pandas.core.window.numba_.generate_numba_ewma_func", "numpy.errstate", "pandas.Series" ] ]