repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
iml1111/air_delay_project
[ "4d0b9eee7f84eb79db73d07b4c2c5508593a66c4" ]
[ "src/main.py" ]
[ "from preprocess import *\nfrom lgb import *\nimport pandas as pd\n\ndf = p_proc()\ndf2 = p_proc2()\ndf3 = label(pd.concat([df, df2], ignore_index=True))\n#Y_pred = l_proc2(df3)\nY_pred, result = load_model(df3)\nprint(len([i for i in result if i > 0.4]))" ]
[ [ "pandas.concat" ] ]
ruza-net/Korbit
[ "b33bedbf9169296f33331bfc27d03d3891459ebe" ]
[ "korbit.py" ]
[ "import numpy as np\nfrom turtle import *\n\n\n# Gravitational constant\nG = 6.67428e-11\n\n# Distance scale\nSCALE = 1e-9\n\n# A step\ndphi = 0.05 * 1 / np.pi\n\n\nclass Simulation(Turtle):\n '''\n Draws the orbit based on the parameters\n - mechanical energy, masses, and angular momentum.\n '''\n\n\n def __init__(self, m, M, E, L, color):\n super(Simulation, self).__init__(visible=False)\n\n self.pencolor(color)\n\n print(1 + 2 * E * L**2 / (G**2 * M**2 * m**3))\n\n self.e = np.sqrt(1 + 2 * E * L**2 / (G**2 * M**2 * m**3))\n self.r0 = L**2 / (G * M * m**2)\n\n self.r = 0.0\n self.phi = 0.0\n\n def update_position(self):\n self.setpos(self.r * np.cos(self.phi), self.r * np.sin(self.phi))\n\n def update_parameters(self):\n self.phi += dphi\n\n self.r = self.r0 / (1 + self.e * np.cos(self.phi)) * SCALE\n\n def run(self):\n self.penup()\n self.radians()\n\n while 1:\n self.update_parameters()\n\n self.update_position()\n\n self.dot(2, (0, 255, 186))\n\n\ndef run(planets):\n while 1:\n for p in planets:\n p.update_parameters()\n p.update_position()\n\n p.dot(2)\n\ndef main():\n radians()\n colormode(255)\n\n # screen = Screen()\n # screen.bgcolor('black')\n #\n # m = screen.numinput('Korbit', 'Enter the planet mass(kg): ')\n # M = screen.numinput('Korbit', 'Enter the central body mass(kg): ')\n # E = screen.numinput('Korbit', 'Enter the mechanical energy(J): ')\n # L = screen.numinput('Korbit', 'Enter the angular momentum(Js): ')\n #\n # sim = Simulation(m, M, E, L, (0, 255, 186))\n # sim.run()\n\n earth = Simulation(6e24, 2e30, -2.6e33, 2.6e40, (0, 0, 255))\n venus = Simulation(4.9e24, 2e30, -3.05e33, 7.4e39, (255, 0, 0))\n\n run([earth, venus])\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.cos", "numpy.sqrt", "numpy.sin" ] ]
volpatto/dit
[ "a8b0ffd72e0829c4b239419e85b9e0cfd20085dd" ]
[ "dit/pid/pid.py" ]
[ "\"\"\"\nClasses implementing the partial information decomposition.\n\"\"\"\n\nfrom __future__ import division\n\nfrom abc import ABCMeta, abstractmethod\n\nfrom six import with_metaclass\n\nfrom sys import version_info\n\nfrom itertools import product\n\nimport networkx as nx\nimport numpy as np\n\nimport prettytable\n\nfrom .lattice import ascendants, descendants, least_upper_bound, pid_lattice, sort_key\nfrom .. import ditParams\nfrom ..multivariate import coinformation\nfrom ..utils import flatten, powerset\n\n\nclass BasePID(with_metaclass(ABCMeta, object)):\n \"\"\"\n This implements the basic Williams & Beer Partial Information Decomposition.\n \"\"\"\n\n _red_string = \"I_r\"\n _pi_string = \"pi\"\n\n def __init__(self, dist, inputs=None, output=None, reds=None, pis=None, **kwargs):\n \"\"\"\n Parameters\n ----------\n dist : Distribution\n The distribution to compute the decomposition on.\n inputs : iter of iters, None\n The set of input variables. If None, `dist.rvs` less indices\n in `output` is used.\n output : iter, None\n The output variable. If None, `dist.rvs[-1]` is used.\n reds : dict, None\n Redundancy values pre-assessed.\n pis : dict, None\n Partial information values pre-assessed.\n \"\"\"\n self._dist = dist\n\n if output is None:\n output = dist.rvs[-1]\n if inputs is None:\n inputs = [var for var in dist.rvs if var[0] not in output]\n\n self._inputs = tuple(map(tuple, inputs))\n self._output = tuple(output)\n self._kwargs = kwargs\n self._lattice = pid_lattice(self._inputs)\n self._total = coinformation(self._dist, [list(flatten(self._inputs)), self._output])\n self._compute(reds, pis)\n\n @abstractmethod\n def _measure(self, node, output):\n \"\"\"\n Compute a redundancy value for `node`.\n\n Parameters\n ----------\n node : tuple(tuples)\n The lattice node to compute the redundancy of.\n output : iterable\n The indices to consider the target/output of the PID.\n\n Returns\n -------\n red : float\n The redundancy value.\n \"\"\"\n pass\n\n @property\n @classmethod\n @abstractmethod\n def _name(self):\n \"\"\"\n The name of the PID.\n\n Returns\n -------\n name : str\n The name.\n \"\"\"\n pass\n\n def __eq__(self, other):\n \"\"\"\n Test if this and `other` are equal partial information decompositions.\n\n Parameters\n ----------\n other : BasePID\n\n Returns\n -------\n eq : bool\n If `self` and `other` are the same partial information decomposition.\n \"\"\"\n return all(np.isclose(self[node], other[node], atol=1e-5, rtol=1e-5) for node in self._lattice)\n\n def __ne__(self, other):\n \"\"\"\n Test if this and `other` are not equal.\n\n Parameters\n ----------\n other : BasePID\n\n Returns\n -------\n eq : bool\n If `self` and `other` are different partial information decomposition.\n \"\"\"\n return not (self == other)\n\n def __getitem__(self, key):\n \"\"\"\n Get the partial information value associated with `key`.\n\n Parameters\n ----------\n key : iterable of iterables\n The node to get the partial information of.\n\n Returns\n -------\n pi : float\n The partial information associated with `key`.\n \"\"\"\n return float(self.get_partial(key))\n\n def __repr__(self): # pragma: no cover\n \"\"\"\n Returns a representation of the PID.\n\n Returns\n -------\n repr : str\n A representation of this object.\n \"\"\"\n if ditParams['repr.print']:\n return self.to_string()\n else:\n return super(BasePID, self).__repr__()\n\n def __str__(self):\n \"\"\"\n Return a string representation of the PID.\n\n Returns\n -------\n pid : str\n The PID as a string.\n \"\"\"\n return self.to_string()\n\n def _compute(self, reds=None, pis=None):\n \"\"\"\n Use the redundancy measure to populate the lattice.\n \"\"\"\n if reds is None: # pragma: no cover\n reds = {}\n if pis is None: # pragma: no cover\n pis = {}\n\n for node in self._lattice:\n if node not in reds: # pragma: no branch\n reds[node] = self._measure(self._dist, node, self._output, **self._kwargs)\n\n reds, pis = self._compute_mobius_inversion(reds=reds, pis=pis)\n\n nx.set_node_attributes(self._lattice, name='red', values=reds)\n nx.set_node_attributes(self._lattice, name='pi', values=pis)\n\n def _compute_mobius_inversion(self, reds=None, pis=None):\n \"\"\"\n Perform as much of a Mobius inversion as possible.\n\n Parameters\n ----------\n reds : dict\n Currently known redundancy values.\n pis : dict\n Currently known partial information values.\n\n Returns\n -------\n reds : dict\n Updated redundancy values.\n pis : dict\n Updated partial information values.\n \"\"\"\n if reds is None: # pragma: no cover\n reds = {}\n if pis is None: # pragma: no cover\n pis = {}\n\n for node in reversed(list(nx.topological_sort(self._lattice))):\n if node not in pis:\n try:\n pis[node] = reds[node] - sum(pis[n] for n in descendants(self._lattice, node))\n except KeyError:\n pass\n\n return reds, pis\n\n def get_redundancy(self, node):\n \"\"\"\n Return the redundancy associated with `node`.\n\n Parameters\n ----------\n node : tuple of tuples\n The node to get the redundancy for.\n\n Returns\n -------\n red : float\n The redundancy associated with `node`.\n \"\"\"\n return self._lattice.node[node]['red']\n\n def get_partial(self, node):\n \"\"\"\n Return the partial information associated with `node`.\n\n Parameters\n ----------\n node : tuple of tuples\n The node to get the partial information for.\n\n Returns\n -------\n pi : float\n The partial information associated with `node`.\n \"\"\"\n return self._lattice.node[node]['pi']\n\n def to_string(self, digits=4):\n \"\"\"\n Create a table representing the redundancy and PI lattices.\n\n Parameters\n ----------\n digits : int\n The number of digits of precision to display.\n\n Returns\n -------\n table : str\n The table of values.\n \"\"\"\n red_string = self._red_string\n pi_string = self._pi_string\n\n table = prettytable.PrettyTable([self.name, red_string, pi_string], title=getattr(self._dist, 'name', ''))\n\n if ditParams['text.font'] == 'linechar': # pragma: no cover\n try:\n table.set_style(prettytable.BOX_CHARS)\n except AttributeError:\n pass\n\n table.float_format[red_string] = '{}.{}'.format(digits + 2, digits)\n table.float_format[pi_string] = '{}.{}'.format(digits + 2, digits)\n\n for node in sorted(self._lattice, key=sort_key(self._lattice)):\n node_label = ''.join('{{{}}}'.format(':'.join(map(str, n))) for n in node)\n red_value = self.get_redundancy(node)\n pi_value = self.get_partial(node)\n if np.isclose(0, red_value, atol=10 ** -(digits - 1), rtol=10 ** -(digits - 1)): # pragma: no cover\n red_value = 0.0\n if np.isclose(0, pi_value, atol=10 ** -(digits - 1), rtol=10 ** -(digits - 1)): # pragma: no cover\n pi_value = 0.0\n table.add_row([node_label, red_value, pi_value])\n\n return table.get_string()\n\n @property\n def name(self): # pragma: no cover\n \"\"\"\n Get the name of the decomposition. If colorama is available, the name will be styled\n according to its properties.\n\n Returns\n -------\n name : str\n The name of the decomposition.\n \"\"\"\n try:\n from colorama import Fore, Style\n inconsistent_style = lambda x: Fore.RED + x + Style.RESET_ALL\n negative_style = lambda x: Fore.GREEN + x + Style.RESET_ALL\n incomplete_style = lambda x: Fore.BLUE + x + Style.RESET_ALL\n except:\n inconsistent_style = lambda x: x\n negative_style = lambda x: x\n incomplete_style = lambda x: x\n\n if not self.consistent:\n return inconsistent_style(self._name)\n elif not self.nonnegative:\n return negative_style(self._name)\n elif not self.complete:\n return incomplete_style(self._name)\n else:\n return self._name\n\n @property\n def consistent(self):\n \"\"\"\n Determine if the assignment of values to the lattice is self-consistent.\n\n Returns\n -------\n valid : bool\n True if the lattice is self-consistent, False otherwise.\n \"\"\"\n return True\n\n @property\n def nonnegative(self):\n \"\"\"\n Determine if the partial information values are all non-negative.\n\n Returns\n -------\n nonnegative : bool\n True if all pi values are non-negative, False otherwise.\n \"\"\"\n pis = nx.get_node_attributes(self._lattice, 'pi')\n nonnegative = all(pi >= -1e-6 for pi in pis.values() if not np.isnan(pi))\n return nonnegative\n\n @property\n def complete(self):\n \"\"\"\n Determine if all partial information values are assigned.\n\n Returns\n -------\n valid : bool\n True if the lattice is self-consistant, False otherwise.\n \"\"\"\n return True\n\n\nclass BaseIncompletePID(BasePID):\n \"\"\"\n A special PID class for measures which do not compute the redundancy of an arbitrary antichain.\n\n Properties\n ----------\n LATTICE_MONOTONICITY : bool\n REDUCED_PID : bool\n SELF_REDUNDANCY : bool\n \"\"\"\n LATTICE_MONOTONICITY = True\n REDUCED_PID = True\n SELF_REDUNDANCY = True\n\n def __eq__(self, other):\n \"\"\"\n Test if this and `other` are equal partial information decompositions.\n\n Parameters\n ----------\n other : BasePID\n\n Returns\n -------\n eq : bool\n If `self` and `other` are the same partial information decomposition.\n \"\"\"\n equal_pi = super(BaseIncompletePID, self).__eq__(other)\n equal_red = (np.isclose(self.get_redundancy(node), other.get_redundancy(node), atol=1e-5, rtol=1e-5) for node in self._lattice)\n return equal_pi and all(equal_red)\n\n def _compute_lattice_monotonicity(self, reds, pis):\n \"\"\"\n Infer the redundancy and partial information of lattice elements via lattice monotonicity.\n\n Parameters\n ----------\n reds : dict\n Currently known redundancy values.\n pis : dict\n Currently known partial information values.\n\n Returns\n -------\n reds : dict\n Updated redundancy values.\n pis : dict\n Updated partial information values.\n \"\"\"\n # everything below a redundancy of 0 is a redundancy of 0\n nodes = list(nx.topological_sort(self._lattice))\n while nodes:\n node = nodes.pop(0)\n if node in reds and np.isclose(0, reds[node]):\n for n in descendants(self._lattice, node):\n if n not in reds:\n reds[n] = 0\n nodes.remove(n)\n\n # everything above a redundancy of I(inputs, output) is I(inputs, output)\n nodes = list(reversed(list(nx.topological_sort(self._lattice))))\n while nodes:\n node = nodes.pop(0)\n if node in reds and np.isclose(reds[node], self._total):\n for n in ascendants(self._lattice, node):\n if n not in reds:\n reds[n] = self._total\n nodes.remove(n)\n\n # if redundancy of A == redundancy of B, then for all A -> C -> B, redundancy of C = redundancy of A, B\n tops = [node for node in self._lattice if node in reds and any((n not in reds) for n in self._lattice[node])]\n bottoms = [node for node in self._lattice if\n node in reds and any((n not in reds) for n in self._lattice.reverse()[node])]\n for top, bottom in product(tops, bottoms):\n if np.isclose(reds[top], reds[bottom], atol=1e-5, rtol=1e-5):\n for path in nx.all_simple_paths(self._lattice, top, bottom):\n for node in path[1:-1]:\n if node not in reds:\n reds[node] = reds[top]\n\n # if redundancy of A is equal to the redundancy any of A's children, then pi(A) = 0\n for node in self._lattice:\n if node not in pis:\n if node in reds and all(n in reds for n in self._lattice[node]) and self._lattice[node]:\n if any(np.isclose(reds[n], reds[node], atol=1e-5, rtol=1e-5) for n in self._lattice[node]):\n pis[node] = 0\n\n return reds, pis\n\n def _compute_attempt_linsolve(self, reds, pis):\n \"\"\"\n Infer a linear constraint matrix from missing PI values and the mobius inversion.\n\n Parameters\n ----------\n reds : dict\n Currently known redundancy values.\n pis : dict\n Currently known partial information values.\n\n Returns\n -------\n reds : dict\n Updated redundancy values.\n pis : dict\n Updated partial information values.\n \"\"\"\n missing_vars = [node for node in self._lattice if node not in pis]\n if not missing_vars:\n return reds, pis\n\n def predicate(node, nodes):\n a = node in reds\n b = all((n in pis or n in nodes) for n in descendants(self._lattice, node, self=True))\n return a and b\n\n for vars in reversed(list(powerset(missing_vars))[1:]):\n\n lub = least_upper_bound(self._lattice, vars, predicate)\n\n if lub is None:\n continue\n\n row = lambda node: [1 if (c in descendants(self._lattice, node, self=True)) else 0 for c in vars]\n\n A = np.array([row(node) for node in vars if node in reds] + [[1] * len(vars)])\n # if version_info >= (3, 0, 0): # not sure why this is needed...\n # A = A.T\n b = np.array([reds[node] for node in vars if node in reds] + [reds[lub] - sum(pis[node] for node in descendants(self._lattice, lub, self=True) if node in pis)])\n try:\n new_pis = np.linalg.solve(A, b)\n if np.all(new_pis > -1e-6):\n for node, pi in zip(vars, new_pis):\n pis[node] = pi\n\n for node in self._lattice:\n if node not in reds:\n try:\n reds[node] = sum(pis[n] for n in descendants(self._lattice, node, self=True))\n except KeyError: # pragma: no cover\n pass\n\n break\n\n except:\n pass\n\n return reds, pis\n\n def _compute_single_child(self, reds, pis):\n \"\"\"\n If a node has a single child, and both redundancies are known, then the PI of the node\n is the difference in the redundancies.\n\n Parameters\n ----------\n reds : dict\n Currently known redundancy values.\n pis : dict\n Currently known partial information values.\n\n Returns\n -------\n reds : dict\n Updated redundancy values.\n pis : dict\n Updated partial information values.\n \"\"\"\n # if a node has only a single child, and you know both its redundancy and its partial\n # then you know the redundancy of the child\n for node in self._lattice:\n if node in reds and node in pis and len(self._lattice[node]) == 1:\n n = next(iter(self._lattice[node]))\n if n not in reds:\n reds[n] = reds[node] - pis[node]\n\n return reds, pis\n\n def _compute(self, reds=None, pis=None):\n \"\"\"\n Use a variety of methods to fill out as much of the lattice as possible.\n\n Parameters\n ----------\n reds : dict, None\n Currently known redundancy values.\n pis : dict, None\n Currently known partial information values.\n \"\"\"\n if reds is None:\n reds = {}\n if pis is None:\n pis = {}\n\n # set redundancies of single input sets to I(input, output) and\n # plug in computed unique values\n if self.SELF_REDUNDANCY: # pragma: no branch\n for node in self._lattice:\n if len(node) == 1:\n reds[node] = coinformation(self._dist, [node[0], self._output])\n\n if self.LATTICE_MONOTONICITY: # pragma: no branch\n reds, pis = self._compute_lattice_monotonicity(reds, pis)\n\n # if a node exists in a smaller PID, use that to compute redundancy (if possible)\n if self.REDUCED_PID: # pragma: no branch\n for node in self._lattice:\n if node not in reds and len(node) < len(self._inputs):\n sub_pid = self.__class__(self._dist.copy(), node, self._output)\n reds[node] = sub_pid.get_redundancy(node)\n\n while True:\n num_reds = len(reds)\n num_pis = len(pis)\n\n # if a node has a single child, their redundancies determine the node's partial information\n reds, pis = self._compute_single_child(reds=reds, pis=pis)\n\n # if the lattice is monotonic, then everything below a zero is zero, and everything above a max is max\n if self.LATTICE_MONOTONICITY: # pragma: no branch\n reds, pis = self._compute_lattice_monotonicity(reds=reds, pis=pis)\n\n # do as much of the mobius inversion as possible\n reds, pis = self._compute_mobius_inversion(reds=reds, pis=pis)\n\n # see if the remaining pis can be solved with linear constraints\n reds, pis = self._compute_attempt_linsolve(reds=reds, pis=pis)\n\n if len(reds) == num_reds and len(pis) == num_pis:\n break\n\n # if we know all but one partial, we know the last\n # note: this might be subsumed by _compute_attempt_linsolve\n diff = set(self._lattice) - set(pis)\n if len(diff) == 1: # pragma: no cover\n pis[diff.pop()] = self._total - sum(pis.values())\n\n # if the sum of known PIs is I(inputs, output), all other PIs are zero\n # note: this might be subsumed by _compute_attempt_linsolve\n if np.isclose(sum(pis.values()), self._total):\n for node in self._lattice:\n if node not in pis or np.isnan(pis[node]): # pragma: no cover\n pis[node] = 0\n\n # plug in nan for all unknown values\n for node in self._lattice:\n if node not in reds:\n reds[node] = np.nan\n if node not in pis:\n pis[node] = np.nan\n\n nx.set_node_attributes(self._lattice, name='red', values=reds)\n nx.set_node_attributes(self._lattice, name='pi', values=pis)\n\n @BasePID.consistent.getter\n def consistent(self):\n \"\"\"\n Determine if the assignment of values to the lattice is self-consistant.\n\n Returns\n -------\n valid : bool\n True if the lattice is self-consistent, False otherwise.\n \"\"\"\n reds = nx.get_node_attributes(self._lattice, 'red')\n pis = nx.get_node_attributes(self._lattice, 'pi')\n\n if self.SELF_REDUNDANCY: # pragma: no branch\n for node in self._lattice:\n if len(node) == 1:\n red = reds[node]\n mi = coinformation(self._dist, [node[0], self._output])\n if not np.isclose(red, mi, atol=1e-5, rtol=1e-5): # pragma: no cover\n return False\n\n # ensure that the mobius inversion holds\n for node in self._lattice:\n red = reds[node]\n parts = sum(pis[n] for n in descendants(self._lattice, node, self=True))\n if not np.isnan(red) and not np.isnan(parts):\n if not np.isclose(red, parts, atol=1e-5, rtol=1e-5):\n return False\n\n return True\n\n @BasePID.complete.getter\n def complete(self):\n \"\"\"\n Determine if all partial information values are assigned.\n\n Returns\n -------\n valid : bool\n True if the lattice is self-consistant, False otherwise.\n \"\"\"\n pis = nx.get_node_attributes(self._lattice, 'pi')\n return not any(np.isnan(pi) for pi in pis.values())\n\n\nclass BaseUniquePID(BaseIncompletePID):\n \"\"\"\n PID class for measures which define only unique informations.\n \"\"\"\n\n def _compute(self, reds=None, pis=None):\n \"\"\"\n \"\"\"\n uniques = self._measure(self._dist, self._inputs, self._output, **self._kwargs)\n if pis is None: # pragma: no branch\n pis = {}\n\n for node in self._lattice:\n if len(node) == 1 and node[0] in uniques and node not in pis:\n pis[node] = uniques[node[0]]\n\n super(BaseUniquePID, self)._compute(reds=reds, pis=pis)\n\n\nclass BaseBivariatePID(BaseIncompletePID):\n \"\"\"\n PID class for measures which define only a bivariate measure of redundancy.\n \"\"\"\n\n def _compute(self, reds=None, pis=None):\n \"\"\"\n \"\"\"\n if reds is None: # pragma: no branch\n reds = {}\n for node in self._lattice:\n if len(node) == 2 and node not in reds:\n reds[node] = self._measure(self._dist, node, self._output, **self._kwargs)\n\n super(BaseBivariatePID, self)._compute(reds=reds, pis=pis)\n" ]
[ [ "numpy.all", "numpy.isnan", "numpy.linalg.solve", "numpy.isclose" ] ]
KailinLi/pyrender
[ "cd943dac32ea943b464b0e37262367c593bbd1c9" ]
[ "pyrender/viewer.py" ]
[ "\"\"\"A pyglet-based interactive 3D scene viewer.\n\"\"\"\nimport copy\nimport os\nimport sys\nimport time\nfrom threading import RLock, Thread\n\nimport imageio\nimport numpy as np\nimport OpenGL\nimport trimesh\n\ntry:\n from Tkinter import Tk\n from Tkinter import tkFileDialog as filedialog\nexcept Exception:\n try:\n from tkinter import Tk\n from tkinter import filedialog as filedialog\n except Exception:\n pass\n\nimport pyglet\nfrom pyglet import clock\n\nfrom .camera import IntrinsicsCamera, OrthographicCamera, PerspectiveCamera\nfrom .constants import (\n DEFAULT_SCENE_SCALE,\n DEFAULT_Z_FAR,\n DEFAULT_Z_NEAR,\n OPEN_GL_MAJOR,\n OPEN_GL_MINOR,\n TEXT_PADDING,\n RenderFlags,\n TextAlign,\n)\nfrom .light import DirectionalLight\nfrom .mesh import Mesh\nfrom .node import Node\nfrom .renderer import Renderer\nfrom .trackball import Trackball\n\npyglet.options[\"shadow_window\"] = False\n\n\nclass Viewer(pyglet.window.Window):\n \"\"\"An interactive viewer for 3D scenes.\n\n The viewer's camera is separate from the scene's, but will take on\n the parameters of the scene's main view camera and start in the same pose.\n If the scene does not have a camera, a suitable default will be provided.\n\n Parameters\n ----------\n scene : :class:`Scene`\n The scene to visualize.\n viewport_size : (2,) int\n The width and height of the initial viewing window.\n render_flags : dict\n A set of flags for rendering the scene. Described in the note below.\n viewer_flags : dict\n A set of flags for controlling the viewer's behavior.\n Described in the note below.\n registered_keys : dict\n A map from ASCII key characters to tuples containing:\n\n - A function to be called whenever the key is pressed,\n whose first argument will be the viewer itself.\n - (Optionally) A list of additional positional arguments\n to be passed to the function.\n - (Optionally) A dict of keyword arguments to be passed\n to the function.\n\n kwargs : dict\n Any keyword arguments left over will be interpreted as belonging to\n either the :attr:`.Viewer.render_flags` or :attr:`.Viewer.viewer_flags`\n dictionaries. Those flag sets will be updated appropriately.\n\n Note\n ----\n The basic commands for moving about the scene are given as follows:\n\n - **Rotating about the scene**: Hold the left mouse button and\n drag the cursor.\n - **Rotating about the view axis**: Hold ``CTRL`` and the left mouse\n button and drag the cursor.\n - **Panning**:\n\n - Hold SHIFT, then hold the left mouse button and drag the cursor, or\n - Hold the middle mouse button and drag the cursor.\n\n - **Zooming**:\n\n - Scroll the mouse wheel, or\n - Hold the right mouse button and drag the cursor.\n\n Other keyboard commands are as follows:\n\n - ``a``: Toggles rotational animation mode.\n - ``c``: Toggles backface culling.\n - ``f``: Toggles fullscreen mode.\n - ``h``: Toggles shadow rendering.\n - ``i``: Toggles axis display mode\n (no axes, world axis, mesh axes, all axes).\n - ``l``: Toggles lighting mode\n (scene lighting, Raymond lighting, or direct lighting).\n - ``m``: Toggles face normal visualization.\n - ``n``: Toggles vertex normal visualization.\n - ``o``: Toggles orthographic mode.\n - ``q``: Quits the viewer.\n - ``r``: Starts recording a GIF, and pressing again stops recording\n and opens a file dialog.\n - ``s``: Opens a file dialog to save the current view as an image.\n - ``w``: Toggles wireframe mode\n (scene default, flip wireframes, all wireframe, or all solid).\n - ``z``: Resets the camera to the initial view.\n\n Note\n ----\n The valid keys for ``render_flags`` are as follows:\n\n - ``flip_wireframe``: `bool`, If `True`, all objects will have their\n wireframe modes flipped from what their material indicates.\n Defaults to `False`.\n - ``all_wireframe``: `bool`, If `True`, all objects will be rendered\n in wireframe mode. Defaults to `False`.\n - ``all_solid``: `bool`, If `True`, all objects will be rendered in\n solid mode. Defaults to `False`.\n - ``shadows``: `bool`, If `True`, shadows will be rendered.\n Defaults to `False`.\n - ``vertex_normals``: `bool`, If `True`, vertex normals will be\n rendered as blue lines. Defaults to `False`.\n - ``face_normals``: `bool`, If `True`, face normals will be rendered as\n blue lines. Defaults to `False`.\n - ``cull_faces``: `bool`, If `True`, backfaces will be culled.\n Defaults to `True`.\n - ``point_size`` : float, The point size in pixels. Defaults to 1px.\n\n Note\n ----\n The valid keys for ``viewer_flags`` are as follows:\n\n - ``rotate``: `bool`, If `True`, the scene's camera will rotate\n about an axis. Defaults to `False`.\n - ``rotate_rate``: `float`, The rate of rotation in radians per second.\n Defaults to `PI / 3.0`.\n - ``rotate_axis``: `(3,) float`, The axis in world coordinates to rotate\n about. Defaults to ``[0,0,1]``.\n - ``view_center``: `(3,) float`, The position to rotate the scene about.\n Defaults to the scene's centroid.\n - ``use_raymond_lighting``: `bool`, If `True`, an additional set of three\n directional lights that move with the camera will be added to the scene.\n Defaults to `False`.\n - ``use_direct_lighting``: `bool`, If `True`, an additional directional\n light that moves with the camera and points out of it will be added to\n the scene. Defaults to `False`.\n - ``lighting_intensity``: `float`, The overall intensity of the\n viewer's additional lights (when they're in use). Defaults to 3.0.\n - ``use_perspective_cam``: `bool`, If `True`, a perspective camera will\n be used. Otherwise, an orthographic camera is used. Defaults to `True`.\n - ``save_directory``: `str`, A directory to open the file dialogs in.\n Defaults to `None`.\n - ``window_title``: `str`, A title for the viewer's application window.\n Defaults to `\"Scene Viewer\"`.\n - ``refresh_rate``: `float`, A refresh rate for rendering, in Hertz.\n Defaults to `30.0`.\n - ``fullscreen``: `bool`, Whether to make viewer fullscreen.\n Defaults to `False`.\n - ``show_world_axis``: `bool`, Whether to show the world axis.\n Defaults to `False`.\n - ``show_mesh_axes``: `bool`, Whether to show the individual mesh axes.\n Defaults to `False`.\n - ``caption``: `list of dict`, Text caption(s) to display on the viewer.\n Defaults to `None`.\n\n Note\n ----\n Animation can be accomplished by running the viewer with ``run_in_thread``\n enabled. Then, just run a loop in your main thread, updating the scene as\n needed. Before updating the scene, be sure to acquire the\n :attr:`.Viewer.render_lock`, and release it when your update is done.\n \"\"\"\n\n def __init__(\n self,\n scene,\n viewport_size=None,\n render_flags=None,\n viewer_flags=None,\n registered_keys=None,\n run_in_thread=False,\n **kwargs\n ):\n\n #######################################################################\n # Save attributes and flags\n #######################################################################\n if viewport_size is None:\n viewport_size = (640, 480)\n self._scene = scene\n self._viewport_size = viewport_size\n self._render_lock = RLock()\n self._is_active = False\n self._should_close = False\n self._run_in_thread = run_in_thread\n\n self._default_render_flags = {\n \"flip_wireframe\": False,\n \"all_wireframe\": False,\n \"all_solid\": False,\n \"shadows\": False,\n \"vertex_normals\": False,\n \"face_normals\": False,\n \"cull_faces\": True,\n \"point_size\": 1.0,\n }\n self._default_viewer_flags = {\n \"mouse_pressed\": False,\n \"rotate\": False,\n \"rotate_rate\": np.pi / 3.0,\n \"rotate_axis\": np.array([0.0, 0.0, 1.0]),\n \"view_center\": None,\n \"record\": False,\n \"use_raymond_lighting\": False,\n \"use_direct_lighting\": False,\n \"lighting_intensity\": 3.0,\n \"use_perspective_cam\": True,\n \"save_directory\": None,\n \"window_title\": \"Scene Viewer\",\n \"refresh_rate\": 30.0,\n \"fullscreen\": False,\n \"show_world_axis\": False,\n \"show_mesh_axes\": False,\n \"caption\": None,\n }\n self._render_flags = self._default_render_flags.copy()\n self._viewer_flags = self._default_viewer_flags.copy()\n self._viewer_flags[\"rotate_axis\"] = self._default_viewer_flags[\"rotate_axis\"].copy()\n\n if render_flags is not None:\n self._render_flags.update(render_flags)\n if viewer_flags is not None:\n self._viewer_flags.update(viewer_flags)\n\n for key in kwargs:\n if key in self.render_flags:\n self._render_flags[key] = kwargs[key]\n elif key in self.viewer_flags:\n self._viewer_flags[key] = kwargs[key]\n\n # TODO MAC OS BUG FOR SHADOWS\n if sys.platform == \"darwin\":\n self._render_flags[\"shadows\"] = False\n\n self._registered_keys = {}\n if registered_keys is not None:\n self._registered_keys = {ord(k.lower()): registered_keys[k] for k in registered_keys}\n\n #######################################################################\n # Save internal settings\n #######################################################################\n\n # Set up caption stuff\n self._message_text = None\n self._ticks_till_fade = 2.0 / 3.0 * self.viewer_flags[\"refresh_rate\"]\n self._message_opac = 1.0 + self._ticks_till_fade\n\n # Set up raymond lights and direct lights\n self._raymond_lights = self._create_raymond_lights()\n self._direct_light = self._create_direct_light()\n\n # Set up axes\n self._axes = {}\n self._axis_mesh = Mesh.from_trimesh(\n trimesh.creation.axis(origin_size=0.1, axis_radius=0.05, axis_length=1.0), smooth=False\n )\n if self.viewer_flags[\"show_world_axis\"]:\n self._set_axes(world=self.viewer_flags[\"show_world_axis\"], mesh=self.viewer_flags[\"show_mesh_axes\"])\n\n #######################################################################\n # Set up camera node\n #######################################################################\n self._camera_node = None\n self._prior_main_camera_node = None\n self._default_camera_pose = None\n self._default_persp_cam = None\n self._default_orth_cam = None\n self._trackball = None\n self._saved_frames = []\n\n # Extract main camera from scene and set up our mirrored copy\n znear = None\n zfar = None\n if scene.main_camera_node is not None:\n n = scene.main_camera_node\n camera = copy.copy(n.camera)\n if isinstance(camera, (PerspectiveCamera, IntrinsicsCamera)):\n self._default_persp_cam = camera\n znear = camera.znear\n zfar = camera.zfar\n elif isinstance(camera, OrthographicCamera):\n self._default_orth_cam = camera\n znear = camera.znear\n zfar = camera.zfar\n self._default_camera_pose = scene.get_pose(scene.main_camera_node)\n self._prior_main_camera_node = n\n\n # Set defaults as needed\n if zfar is None:\n zfar = max(scene.scale * 10.0, DEFAULT_Z_FAR)\n if znear is None or znear == 0:\n if scene.scale == 0:\n znear = DEFAULT_Z_NEAR\n else:\n znear = min(scene.scale / 10.0, DEFAULT_Z_NEAR)\n\n if self._default_persp_cam is None:\n self._default_persp_cam = PerspectiveCamera(yfov=np.pi / 3.0, znear=znear, zfar=zfar)\n if self._default_orth_cam is None:\n xmag = ymag = scene.scale\n if scene.scale == 0:\n xmag = ymag = 1.0\n self._default_orth_cam = OrthographicCamera(xmag=xmag, ymag=ymag, znear=znear, zfar=zfar)\n if self._default_camera_pose is None:\n self._default_camera_pose = self._compute_initial_camera_pose()\n\n # Pick camera\n if self.viewer_flags[\"use_perspective_cam\"]:\n camera = self._default_persp_cam\n else:\n camera = self._default_orth_cam\n\n self._camera_node = Node(matrix=self._default_camera_pose, camera=camera)\n scene.add_node(self._camera_node)\n scene.main_camera_node = self._camera_node\n self._reset_view()\n\n #######################################################################\n # Initialize OpenGL context and renderer\n #######################################################################\n self._renderer = Renderer(self._viewport_size[0], self._viewport_size[1], self.render_flags[\"point_size\"])\n self._is_active = True\n\n if self.run_in_thread:\n self._thread = Thread(target=self._init_and_start_app)\n self._thread.start()\n else:\n self._init_and_start_app()\n\n @property\n def scene(self):\n \"\"\":class:`.Scene` : The scene being visualized.\n \"\"\"\n return self._scene\n\n @property\n def viewport_size(self):\n \"\"\"(2,) int : The width and height of the viewing window.\n \"\"\"\n return self._viewport_size\n\n @property\n def render_lock(self):\n \"\"\":class:`threading.RLock` : If acquired, prevents the viewer from\n rendering until released.\n\n Run :meth:`.Viewer.render_lock.acquire` before making updates to\n the scene in a different thread, and run\n :meth:`.Viewer.render_lock.release` once you're done to let the viewer\n continue.\n \"\"\"\n return self._render_lock\n\n @property\n def is_active(self):\n \"\"\"bool : `True` if the viewer is active, or `False` if it has\n been closed.\n \"\"\"\n return self._is_active\n\n @property\n def run_in_thread(self):\n \"\"\"bool : Whether the viewer was run in a separate thread.\n \"\"\"\n return self._run_in_thread\n\n @property\n def render_flags(self):\n \"\"\"dict : Flags for controlling the renderer's behavior.\n\n - ``flip_wireframe``: `bool`, If `True`, all objects will have their\n wireframe modes flipped from what their material indicates.\n Defaults to `False`.\n - ``all_wireframe``: `bool`, If `True`, all objects will be rendered\n in wireframe mode. Defaults to `False`.\n - ``all_solid``: `bool`, If `True`, all objects will be rendered in\n solid mode. Defaults to `False`.\n - ``shadows``: `bool`, If `True`, shadows will be rendered.\n Defaults to `False`.\n - ``vertex_normals``: `bool`, If `True`, vertex normals will be\n rendered as blue lines. Defaults to `False`.\n - ``face_normals``: `bool`, If `True`, face normals will be rendered as\n blue lines. Defaults to `False`.\n - ``cull_faces``: `bool`, If `True`, backfaces will be culled.\n Defaults to `True`.\n - ``point_size`` : float, The point size in pixels. Defaults to 1px.\n\n \"\"\"\n return self._render_flags\n\n @render_flags.setter\n def render_flags(self, value):\n self._render_flags = value\n\n @property\n def viewer_flags(self):\n \"\"\"dict : Flags for controlling the viewer's behavior.\n\n The valid keys for ``viewer_flags`` are as follows:\n\n - ``rotate``: `bool`, If `True`, the scene's camera will rotate\n about an axis. Defaults to `False`.\n - ``rotate_rate``: `float`, The rate of rotation in radians per second.\n Defaults to `PI / 3.0`.\n - ``rotate_axis``: `(3,) float`, The axis in world coordinates to\n rotate about. Defaults to ``[0,0,1]``.\n - ``view_center``: `(3,) float`, The position to rotate the scene\n about. Defaults to the scene's centroid.\n - ``use_raymond_lighting``: `bool`, If `True`, an additional set of\n three directional lights that move with the camera will be added to\n the scene. Defaults to `False`.\n - ``use_direct_lighting``: `bool`, If `True`, an additional directional\n light that moves with the camera and points out of it will be\n added to the scene. Defaults to `False`.\n - ``lighting_intensity``: `float`, The overall intensity of the\n viewer's additional lights (when they're in use). Defaults to 3.0.\n - ``use_perspective_cam``: `bool`, If `True`, a perspective camera will\n be used. Otherwise, an orthographic camera is used. Defaults to\n `True`.\n - ``save_directory``: `str`, A directory to open the file dialogs in.\n Defaults to `None`.\n - ``window_title``: `str`, A title for the viewer's application window.\n Defaults to `\"Scene Viewer\"`.\n - ``refresh_rate``: `float`, A refresh rate for rendering, in Hertz.\n Defaults to `30.0`.\n - ``fullscreen``: `bool`, Whether to make viewer fullscreen.\n Defaults to `False`.\n - ``show_world_axis``: `bool`, Whether to show the world axis.\n Defaults to `False`.\n - ``show_mesh_axes``: `bool`, Whether to show the individual mesh axes.\n Defaults to `False`.\n - ``caption``: `list of dict`, Text caption(s) to display on\n the viewer. Defaults to `None`.\n\n \"\"\"\n return self._viewer_flags\n\n @viewer_flags.setter\n def viewer_flags(self, value):\n self._viewer_flags = value\n\n @property\n def registered_keys(self):\n \"\"\"dict : Map from ASCII key character to a handler function.\n\n This is a map from ASCII key characters to tuples containing:\n\n - A function to be called whenever the key is pressed,\n whose first argument will be the viewer itself.\n - (Optionally) A list of additional positional arguments\n to be passed to the function.\n - (Optionally) A dict of keyword arguments to be passed\n to the function.\n\n \"\"\"\n return self._registered_keys\n\n @registered_keys.setter\n def registered_keys(self, value):\n self._registered_keys = value\n\n def close_external(self):\n \"\"\"Close the viewer from another thread.\n\n This function will wait for the actual close, so you immediately\n manipulate the scene afterwards.\n \"\"\"\n self._should_close = True\n while self.is_active:\n time.sleep(1.0 / self.viewer_flags[\"refresh_rate\"])\n\n def save_gif(self, filename=None):\n \"\"\"Save the stored GIF frames to a file.\n\n To use this asynchronously, run the viewer with the ``record``\n flag and the ``run_in_thread`` flags set.\n Kill the viewer after your desired time with\n :meth:`.Viewer.close_external`, and then call :meth:`.Viewer.save_gif`.\n\n Parameters\n ----------\n filename : str\n The file to save the GIF to. If not specified,\n a file dialog will be opened to ask the user where\n to save the GIF file.\n \"\"\"\n if filename is None:\n filename = self._get_save_filename([\"gif\", \"all\"])\n if filename is not None:\n self.viewer_flags[\"save_directory\"] = os.path.dirname(filename)\n imageio.mimwrite(\n filename, self._saved_frames, fps=self.viewer_flags[\"refresh_rate\"], palettesize=128, subrectangles=True\n )\n self._saved_frames = []\n\n def on_close(self):\n \"\"\"Exit the event loop when the window is closed.\n \"\"\"\n # Remove our camera and restore the prior one\n if self._camera_node is not None:\n self.scene.remove_node(self._camera_node)\n if self._prior_main_camera_node is not None:\n self.scene.main_camera_node = self._prior_main_camera_node\n\n # Delete any lighting nodes that we've attached\n if self.viewer_flags[\"use_raymond_lighting\"]:\n for n in self._raymond_lights:\n if self.scene.has_node(n):\n self.scene.remove_node(n)\n if self.viewer_flags[\"use_direct_lighting\"]:\n if self.scene.has_node(self._direct_light):\n self.scene.remove_node(self._direct_light)\n\n # Delete any axis nodes that we've attached\n self._remove_axes()\n\n # Delete renderer\n if self._renderer is not None:\n self._renderer.delete()\n self._renderer = None\n\n # Force clean-up of OpenGL context data\n try:\n OpenGL.contextdata.cleanupContext()\n self.close()\n except Exception:\n pass\n finally:\n self._is_active = False\n super(Viewer, self).on_close()\n pyglet.app.exit()\n\n def on_draw(self):\n \"\"\"Redraw the scene into the viewing window.\n \"\"\"\n if self._renderer is None:\n return\n\n if self.run_in_thread:\n self.render_lock.acquire()\n\n # Make OpenGL context current\n self.switch_to()\n\n # Render the scene\n self.clear()\n self._render()\n\n if self._message_text is not None:\n self._renderer.render_text(\n self._message_text,\n self.viewport_size[0] - TEXT_PADDING,\n TEXT_PADDING,\n font_pt=20,\n color=np.array([0.1, 0.7, 0.2, np.clip(self._message_opac, 0.0, 1.0)]),\n align=TextAlign.BOTTOM_RIGHT,\n )\n\n if self.viewer_flags[\"caption\"] is not None:\n for caption in self.viewer_flags[\"caption\"]:\n xpos, ypos = self._location_to_x_y(caption[\"location\"])\n self._renderer.render_text(\n caption[\"text\"],\n xpos,\n ypos,\n font_name=caption[\"font_name\"],\n font_pt=caption[\"font_pt\"],\n color=caption[\"color\"],\n scale=caption[\"scale\"],\n align=caption[\"location\"],\n )\n\n if self.run_in_thread:\n self.render_lock.release()\n\n def on_resize(self, width, height):\n \"\"\"Resize the camera and trackball when the window is resized.\n \"\"\"\n if self._renderer is None:\n return\n\n self._viewport_size = (width, height)\n self._trackball.resize(self._viewport_size)\n self._renderer.viewport_width = self._viewport_size[0]\n self._renderer.viewport_height = self._viewport_size[1]\n self.on_draw()\n\n def on_mouse_press(self, x, y, buttons, modifiers):\n \"\"\"Record an initial mouse press.\n \"\"\"\n self._trackball.set_state(Trackball.STATE_ROTATE)\n if buttons == pyglet.window.mouse.LEFT:\n ctrl = modifiers & pyglet.window.key.MOD_CTRL\n shift = modifiers & pyglet.window.key.MOD_SHIFT\n if ctrl and shift:\n self._trackball.set_state(Trackball.STATE_ZOOM)\n elif ctrl:\n self._trackball.set_state(Trackball.STATE_ROLL)\n elif shift:\n self._trackball.set_state(Trackball.STATE_PAN)\n elif buttons == pyglet.window.mouse.MIDDLE:\n self._trackball.set_state(Trackball.STATE_PAN)\n elif buttons == pyglet.window.mouse.RIGHT:\n self._trackball.set_state(Trackball.STATE_ZOOM)\n\n self._trackball.down(np.array([x, y]))\n\n # Stop animating while using the mouse\n self.viewer_flags[\"mouse_pressed\"] = True\n\n def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):\n \"\"\"Record a mouse drag.\n \"\"\"\n self._trackball.drag(np.array([x, y]))\n\n def on_mouse_release(self, x, y, button, modifiers):\n \"\"\"Record a mouse release.\n \"\"\"\n self.viewer_flags[\"mouse_pressed\"] = False\n\n def on_mouse_scroll(self, x, y, dx, dy):\n \"\"\"Record a mouse scroll.\n \"\"\"\n if self.viewer_flags[\"use_perspective_cam\"]:\n self._trackball.scroll(dy)\n else:\n spfc = 0.95\n spbc = 1.0 / 0.95\n sf = 1.0\n if dy > 0:\n sf = spfc * dy\n elif dy < 0:\n sf = -spbc * dy\n\n c = self._camera_node.camera\n xmag = max(c.xmag * sf, 1e-8)\n ymag = max(c.ymag * sf, 1e-8 * c.ymag / c.xmag)\n c.xmag = xmag\n c.ymag = ymag\n\n def on_key_press(self, symbol, modifiers):\n \"\"\"Record a key press.\n \"\"\"\n # First, check for registered key callbacks\n if symbol in self.registered_keys:\n tup = self.registered_keys[symbol]\n callback = None\n args = []\n kwargs = {}\n if not isinstance(tup, (list, tuple, np.ndarray)):\n callback = tup\n else:\n callback = tup[0]\n if len(tup) == 2:\n args = tup[1]\n if len(tup) == 3:\n kwargs = tup[2]\n callback(self, *args, **kwargs)\n return\n\n # Otherwise, use default key functions\n\n # A causes the frame to rotate\n self._message_text = None\n if symbol == pyglet.window.key.A:\n self.viewer_flags[\"rotate\"] = not self.viewer_flags[\"rotate\"]\n if self.viewer_flags[\"rotate\"]:\n self._message_text = \"Rotation On\"\n else:\n self._message_text = \"Rotation Off\"\n\n # C toggles backface culling\n elif symbol == pyglet.window.key.C:\n self.render_flags[\"cull_faces\"] = not self.render_flags[\"cull_faces\"]\n if self.render_flags[\"cull_faces\"]:\n self._message_text = \"Cull Faces On\"\n else:\n self._message_text = \"Cull Faces Off\"\n\n # F toggles face normals\n elif symbol == pyglet.window.key.F:\n self.viewer_flags[\"fullscreen\"] = not self.viewer_flags[\"fullscreen\"]\n self.set_fullscreen(self.viewer_flags[\"fullscreen\"])\n self.activate()\n if self.viewer_flags[\"fullscreen\"]:\n self._message_text = \"Fullscreen On\"\n else:\n self._message_text = \"Fullscreen Off\"\n\n # S toggles shadows\n elif symbol == pyglet.window.key.H and sys.platform != \"darwin\":\n self.render_flags[\"shadows\"] = not self.render_flags[\"shadows\"]\n if self.render_flags[\"shadows\"]:\n self._message_text = \"Shadows On\"\n else:\n self._message_text = \"Shadows Off\"\n\n elif symbol == pyglet.window.key.I:\n if self.viewer_flags[\"show_world_axis\"] and not self.viewer_flags[\"show_mesh_axes\"]:\n self.viewer_flags[\"show_world_axis\"] = False\n self.viewer_flags[\"show_mesh_axes\"] = True\n self._set_axes(False, True)\n self._message_text = \"Mesh Axes On\"\n elif not self.viewer_flags[\"show_world_axis\"] and self.viewer_flags[\"show_mesh_axes\"]:\n self.viewer_flags[\"show_world_axis\"] = True\n self.viewer_flags[\"show_mesh_axes\"] = True\n self._set_axes(True, True)\n self._message_text = \"All Axes On\"\n elif self.viewer_flags[\"show_world_axis\"] and self.viewer_flags[\"show_mesh_axes\"]:\n self.viewer_flags[\"show_world_axis\"] = False\n self.viewer_flags[\"show_mesh_axes\"] = False\n self._set_axes(False, False)\n self._message_text = \"All Axes Off\"\n else:\n self.viewer_flags[\"show_world_axis\"] = True\n self.viewer_flags[\"show_mesh_axes\"] = False\n self._set_axes(True, False)\n self._message_text = \"World Axis On\"\n\n # L toggles the lighting mode\n elif symbol == pyglet.window.key.L:\n if self.viewer_flags[\"use_raymond_lighting\"]:\n self.viewer_flags[\"use_raymond_lighting\"] = False\n self.viewer_flags[\"use_direct_lighting\"] = True\n self._message_text = \"Direct Lighting\"\n elif self.viewer_flags[\"use_direct_lighting\"]:\n self.viewer_flags[\"use_raymond_lighting\"] = False\n self.viewer_flags[\"use_direct_lighting\"] = False\n self._message_text = \"Default Lighting\"\n else:\n self.viewer_flags[\"use_raymond_lighting\"] = True\n self.viewer_flags[\"use_direct_lighting\"] = False\n self._message_text = \"Raymond Lighting\"\n\n # M toggles face normals\n elif symbol == pyglet.window.key.M:\n self.render_flags[\"face_normals\"] = not self.render_flags[\"face_normals\"]\n if self.render_flags[\"face_normals\"]:\n self._message_text = \"Face Normals On\"\n else:\n self._message_text = \"Face Normals Off\"\n\n # N toggles vertex normals\n elif symbol == pyglet.window.key.N:\n self.render_flags[\"vertex_normals\"] = not self.render_flags[\"vertex_normals\"]\n if self.render_flags[\"vertex_normals\"]:\n self._message_text = \"Vert Normals On\"\n else:\n self._message_text = \"Vert Normals Off\"\n\n # O toggles orthographic camera mode\n elif symbol == pyglet.window.key.O:\n self.viewer_flags[\"use_perspective_cam\"] = not self.viewer_flags[\"use_perspective_cam\"]\n if self.viewer_flags[\"use_perspective_cam\"]:\n camera = self._default_persp_cam\n self._message_text = \"Perspective View\"\n else:\n camera = self._default_orth_cam\n self._message_text = \"Orthographic View\"\n\n cam_pose = self._camera_node.matrix.copy()\n cam_node = Node(matrix=cam_pose, camera=camera)\n self.scene.remove_node(self._camera_node)\n self.scene.add_node(cam_node)\n self.scene.main_camera_node = cam_node\n self._camera_node = cam_node\n\n # Q quits the viewer\n elif symbol == pyglet.window.key.Q:\n self.on_close()\n\n # R starts recording frames\n elif symbol == pyglet.window.key.R:\n if self.viewer_flags[\"record\"]:\n self.save_gif()\n self.set_caption(self.viewer_flags[\"window_title\"])\n else:\n self.set_caption(\"{} (RECORDING)\".format(self.viewer_flags[\"window_title\"]))\n self.viewer_flags[\"record\"] = not self.viewer_flags[\"record\"]\n\n # S saves the current frame as an image\n elif symbol == pyglet.window.key.S:\n self._save_image()\n\n # W toggles through wireframe modes\n elif symbol == pyglet.window.key.W:\n if self.render_flags[\"flip_wireframe\"]:\n self.render_flags[\"flip_wireframe\"] = False\n self.render_flags[\"all_wireframe\"] = True\n self.render_flags[\"all_solid\"] = False\n self._message_text = \"All Wireframe\"\n elif self.render_flags[\"all_wireframe\"]:\n self.render_flags[\"flip_wireframe\"] = False\n self.render_flags[\"all_wireframe\"] = False\n self.render_flags[\"all_solid\"] = True\n self._message_text = \"All Solid\"\n elif self.render_flags[\"all_solid\"]:\n self.render_flags[\"flip_wireframe\"] = False\n self.render_flags[\"all_wireframe\"] = False\n self.render_flags[\"all_solid\"] = False\n self._message_text = \"Default Wireframe\"\n else:\n self.render_flags[\"flip_wireframe\"] = True\n self.render_flags[\"all_wireframe\"] = False\n self.render_flags[\"all_solid\"] = False\n self._message_text = \"Flip Wireframe\"\n\n # Z resets the camera viewpoint\n elif symbol == pyglet.window.key.Z:\n self._reset_view()\n\n if self._message_text is not None:\n self._message_opac = 1.0 + self._ticks_till_fade\n\n @staticmethod\n def _time_event(dt, self):\n \"\"\"The timer callback.\n \"\"\"\n # Don't run old dead events after we've already closed\n if not self._is_active:\n return\n\n if self.viewer_flags[\"record\"]:\n self._record()\n if self.viewer_flags[\"rotate\"] and not self.viewer_flags[\"mouse_pressed\"]:\n self._rotate()\n\n # Manage message opacity\n if self._message_text is not None:\n if self._message_opac > 1.0:\n self._message_opac -= 1.0\n else:\n self._message_opac *= 0.90\n if self._message_opac < 0.05:\n self._message_opac = 1.0 + self._ticks_till_fade\n self._message_text = None\n\n if self._should_close:\n self.on_close()\n else:\n self.on_draw()\n\n def _reset_view(self):\n \"\"\"Reset the view to a good initial state.\n\n The view is initially along the positive x-axis at a\n sufficient distance from the scene.\n \"\"\"\n scale = self.scene.scale\n if scale == 0.0:\n scale = DEFAULT_SCENE_SCALE\n centroid = self.scene.centroid\n\n if self.viewer_flags[\"view_center\"] is not None:\n centroid = self.viewer_flags[\"view_center\"]\n\n self._camera_node.matrix = self._default_camera_pose\n self._trackball = Trackball(self._default_camera_pose, self.viewport_size, scale, centroid)\n\n def _get_save_filename(self, file_exts):\n file_types = {\n \"png\": (\"png files\", \"*.png\"),\n \"jpg\": (\"jpeg files\", \"*.jpg\"),\n \"gif\": (\"gif files\", \"*.gif\"),\n \"all\": (\"all files\", \"*\"),\n }\n filetypes = [file_types[x] for x in file_exts]\n try:\n root = Tk()\n save_dir = self.viewer_flags[\"save_directory\"]\n if save_dir is None:\n save_dir = os.getcwd()\n filename = filedialog.asksaveasfilename(\n initialdir=save_dir, title=\"Select file save location\", filetypes=filetypes\n )\n except Exception:\n return None\n\n root.destroy()\n if filename == ():\n return None\n return filename\n\n def _save_image(self):\n filename = self._get_save_filename([\"png\", \"jpg\", \"gif\", \"all\"])\n if filename is not None:\n self.viewer_flags[\"save_directory\"] = os.path.dirname(filename)\n imageio.imwrite(filename, self._renderer.read_color_buf())\n\n def _record(self):\n \"\"\"Save another frame for the GIF.\n \"\"\"\n data = self._renderer.read_color_buf()\n if not np.all(data == 0.0):\n self._saved_frames.append(data)\n\n def _rotate(self):\n \"\"\"Animate the scene by rotating the camera.\n \"\"\"\n az = self.viewer_flags[\"rotate_rate\"] / self.viewer_flags[\"refresh_rate\"]\n self._trackball.rotate(az, self.viewer_flags[\"rotate_axis\"])\n\n def _render(self):\n \"\"\"Render the scene into the framebuffer and flip.\n \"\"\"\n scene = self.scene\n self._camera_node.matrix = self._trackball.pose.copy()\n\n # Set lighting\n vli = self.viewer_flags[\"lighting_intensity\"]\n if self.viewer_flags[\"use_raymond_lighting\"]:\n for n in self._raymond_lights:\n n.light.intensity = vli / 3.0\n if not self.scene.has_node(n):\n scene.add_node(n, parent_node=self._camera_node)\n else:\n self._direct_light.light.intensity = vli\n for n in self._raymond_lights:\n if self.scene.has_node(n):\n self.scene.remove_node(n)\n\n if self.viewer_flags[\"use_direct_lighting\"]:\n if not self.scene.has_node(self._direct_light):\n scene.add_node(self._direct_light, parent_node=self._camera_node)\n elif self.scene.has_node(self._direct_light):\n self.scene.remove_node(self._direct_light)\n\n flags = RenderFlags.NONE\n if self.render_flags[\"flip_wireframe\"]:\n flags |= RenderFlags.FLIP_WIREFRAME\n elif self.render_flags[\"all_wireframe\"]:\n flags |= RenderFlags.ALL_WIREFRAME\n elif self.render_flags[\"all_solid\"]:\n flags |= RenderFlags.ALL_SOLID\n\n if self.render_flags[\"shadows\"]:\n flags |= RenderFlags.SHADOWS_DIRECTIONAL | RenderFlags.SHADOWS_SPOT\n if self.render_flags[\"vertex_normals\"]:\n flags |= RenderFlags.VERTEX_NORMALS\n if self.render_flags[\"face_normals\"]:\n flags |= RenderFlags.FACE_NORMALS\n if not self.render_flags[\"cull_faces\"]:\n flags |= RenderFlags.SKIP_CULL_FACES\n\n self._renderer.render(self.scene, flags)\n\n def _init_and_start_app(self):\n from pyglet.gl import Config\n\n conf = Config(\n sample_buffers=1,\n samples=4,\n depth_size=24,\n double_buffer=True,\n major_version=OPEN_GL_MAJOR,\n minor_version=OPEN_GL_MINOR,\n )\n super(Viewer, self).__init__(\n config=conf, resizable=True, width=self._viewport_size[0], height=self._viewport_size[1]\n )\n if self.context.config.major_version < 3:\n raise ValueError(\"Unable to initialize an OpenGL 3+ context\")\n clock.schedule_interval(Viewer._time_event, 1.0 / self.viewer_flags[\"refresh_rate\"], self)\n self.switch_to()\n self.set_caption(self.viewer_flags[\"window_title\"])\n pyglet.app.run()\n\n def _compute_initial_camera_pose(self):\n centroid = self.scene.centroid\n if self.viewer_flags[\"view_center\"] is not None:\n centroid = self.viewer_flags[\"view_center\"]\n scale = self.scene.scale\n if scale == 0.0:\n scale = DEFAULT_SCENE_SCALE\n\n s2 = 1.0 / np.sqrt(2.0)\n cp = np.eye(4)\n cp[:3, :3] = np.array([[0.0, -s2, s2], [1.0, 0.0, 0.0], [0.0, s2, s2]])\n hfov = np.pi / 6.0\n dist = scale / (2.0 * np.tan(hfov))\n cp[:3, 3] = dist * np.array([1.0, 0.0, 1.0]) + centroid\n\n return cp\n\n def _create_raymond_lights(self):\n thetas = np.pi * np.array([1.0 / 6.0, 1.0 / 6.0, 1.0 / 6.0])\n phis = np.pi * np.array([0.0, 2.0 / 3.0, 4.0 / 3.0])\n\n nodes = []\n\n for phi, theta in zip(phis, thetas):\n xp = np.sin(theta) * np.cos(phi)\n yp = np.sin(theta) * np.sin(phi)\n zp = np.cos(theta)\n\n z = np.array([xp, yp, zp])\n z = z / np.linalg.norm(z)\n x = np.array([-z[1], z[0], 0.0])\n if np.linalg.norm(x) == 0:\n x = np.array([1.0, 0.0, 0.0])\n x = x / np.linalg.norm(x)\n y = np.cross(z, x)\n\n matrix = np.eye(4)\n matrix[:3, :3] = np.c_[x, y, z]\n nodes.append(Node(light=DirectionalLight(color=np.ones(3), intensity=1.0), matrix=matrix))\n\n return nodes\n\n def _create_direct_light(self):\n light = DirectionalLight(color=np.ones(3), intensity=1.0)\n n = Node(light=light, matrix=np.eye(4))\n return n\n\n def _set_axes(self, world, mesh):\n scale = self.scene.scale\n if world:\n if \"scene\" not in self._axes:\n n = Node(mesh=self._axis_mesh, scale=np.ones(3) * scale * 0.3)\n self.scene.add_node(n)\n self._axes[\"scene\"] = n\n else:\n if \"scene\" in self._axes:\n self.scene.remove_node(self._axes[\"scene\"])\n self._axes.pop(\"scene\")\n\n if mesh:\n old_nodes = []\n existing_axes = set([self._axes[k] for k in self._axes])\n for node in self.scene.mesh_nodes:\n if node not in existing_axes:\n old_nodes.append(node)\n\n for node in old_nodes:\n if node in self._axes:\n continue\n n = Node(mesh=self._axis_mesh, scale=np.ones(3) * node.mesh.scale * 0.5)\n self.scene.add_node(n, parent_node=node)\n self._axes[node] = n\n else:\n to_remove = set()\n for main_node in self._axes:\n if main_node in self.scene.mesh_nodes:\n self.scene.remove_node(self._axes[main_node])\n to_remove.add(main_node)\n for main_node in to_remove:\n self._axes.pop(main_node)\n\n def _remove_axes(self):\n for main_node in self._axes:\n axis_node = self._axes[main_node]\n self.scene.remove_node(axis_node)\n self._axes = {}\n\n def _location_to_x_y(self, location):\n if location == TextAlign.CENTER:\n return (self.viewport_size[0] / 2.0, self.viewport_size[1] / 2.0)\n elif location == TextAlign.CENTER_LEFT:\n return (TEXT_PADDING, self.viewport_size[1] / 2.0)\n elif location == TextAlign.CENTER_RIGHT:\n return (self.viewport_size[0] - TEXT_PADDING, self.viewport_size[1] / 2.0)\n elif location == TextAlign.BOTTOM_LEFT:\n return (TEXT_PADDING, TEXT_PADDING)\n elif location == TextAlign.BOTTOM_RIGHT:\n return (self.viewport_size[0] - TEXT_PADDING, TEXT_PADDING)\n elif location == TextAlign.BOTTOM_CENTER:\n return (self.viewport_size[0] / 2.0, TEXT_PADDING)\n elif location == TextAlign.TOP_LEFT:\n return (TEXT_PADDING, self.viewport_size[1] - TEXT_PADDING)\n elif location == TextAlign.TOP_RIGHT:\n return (self.viewport_size[0] - TEXT_PADDING, self.viewport_size[1] - TEXT_PADDING)\n elif location == TextAlign.TOP_CENTER:\n return (self.viewport_size[0] / 2.0, self.viewport_size[1] - TEXT_PADDING)\n\n\n__all__ = [\"Viewer\"]\n\n" ]
[ [ "numpy.sqrt", "numpy.clip", "numpy.eye", "numpy.cos", "numpy.linalg.norm", "numpy.sin", "numpy.all", "numpy.tan", "numpy.ones", "numpy.cross", "numpy.array" ] ]
saiskee/python3-geoplotlib
[ "8e108c7c4b90ebe056fd18c48cc064120321973e" ]
[ "geoplotlib/utils.py" ]
[ "from collections import defaultdict\nimport csv\nfrom datetime import datetime\nimport json\nfrom math import radians, cos, sin, asin, sqrt\nimport urllib.request, urllib.error, urllib.parse\nimport numpy as np\n\n\ndef haversine(lon1, lat1, lon2, lat2):\n \"\"\"\n Distance between geodesic coordinates http://www.movable-type.co.uk/scripts/latlong.html\n\n :param lon1: point 1 latitude\n :param lat1: point 1 longitude\n :param lon2: point 1 latitude\n :param lat2: point 2 longitude\n :return: distance in meters between points 1 and 2\n \"\"\"\n lon1, lat1, lon2, lat2 = list(map(radians, [lon1, lat1, lon2, lat2]))\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n m = 6367000 * c\n return m\n\n\nclass DataAccessObject():\n \"\"\"\n This class wraps data into a dict-like object\n \"\"\"\n\n def __init__(self, dict_or_df):\n \"\"\"\n Create a DataAccessObject either from a dictionary or a pandas.DataFrame\n \"\"\"\n if type(dict_or_df) == dict:\n self.dict = dict_or_df\n else:\n from pandas import DataFrame\n if type(dict_or_df) == DataFrame:\n self.dict = {col: dict_or_df[col].values for col in dict_or_df.columns}\n else:\n raise Exception('dict_or_df must be either a dictionary or a pandas.DataFrame')\n\n\n @staticmethod\n def from_dataframe(df):\n \"\"\"\n Loads data from a pandas DataFrame\n\n :param df: dataframe\n :return: a DataAccessObject\n \"\"\"\n import warnings\n warnings.warn('use ctor directly instead', DeprecationWarning)\n return DataAccessObject(df)\n\n\n def __getitem__(self, key):\n return self.dict[key]\n\n\n def __setitem__(self, key, value):\n assert type(value) == np.ndarray\n self.dict[key] = value\n\n\n def __delitem__(self, key):\n del self.dict[key]\n\n\n def rename(self, mapping):\n \"\"\"\n Rename fields\n\n :param mapping: a dict in the format {'oldkey1': 'newkey1', ...}\n \"\"\"\n for old_key, new_key in mapping:\n self.dict[new_key] = self.dict[old_key]\n del self.dict[old_key]\n\n\n def where(self, mask):\n \"\"\"\n :param mask: boolean mask\n :return: a DataAccessObject with a subset of rows matching mask\n \"\"\"\n assert len(mask) == len(self)\n return DataAccessObject({k: self.dict[k][mask] for k in self.dict})\n\n\n def groupby(self, field1, field2=None):\n if field2 is None:\n uniquevalues = list(set(self.dict[field1]))\n return [(v, self.where(self.dict[field1] == v)) for v in uniquevalues]\n else:\n uniquevalues = set([tuple(row) for row in np.vstack([self.dict[field1],self.dict[field2]]).T])\n return [((v1,v2), self.where((self.dict[field1] == v1) & (self.dict[field2] == v2))) \\\n for v1,v2 in uniquevalues]\n\n\n def head(self, n):\n \"\"\"\n Return a DataAccessObject containing the first n rows\n\n :param n: number of rows\n :return: DataAccessObject\n \"\"\"\n return DataAccessObject({k: self.dict[k][:n] for k in self.dict})\n\n\n def keys(self):\n \"\"\"\n :return: the keys (field names)\n \"\"\"\n return list(self.dict.keys())\n\n\n def values(self):\n \"\"\"\n :return: the values (field values)\n \"\"\"\n return list(self.dict.values())\n\n\n def __str__(self):\n return 'DataAccessObject(%s x %d)' % (str(list(self.dict.keys())), len(self))\n\n\n def __repr__(self):\n return self.__str__()\n\n\n def __len__(self):\n return len(list(self.dict.values())[0])\n\n\n\ndef read_csv(fname):\n \"\"\"\n Read a csv file into a DataAccessObject\n\n :param fname: filename\n \"\"\"\n values = defaultdict(list)\n with open(fname) as f:\n reader = csv.DictReader(f)\n for row in reader:\n for (k,v) in list(row.items()):\n values[k].append(v)\n npvalues = {k: np.array(values[k]) for k in list(values.keys())}\n for k in list(npvalues.keys()):\n for datatype in [np.int, np.float]:\n try:\n npvalues[k][:1].astype(datatype)\n npvalues[k] = npvalues[k].astype(datatype)\n break\n except:\n pass\n dao = DataAccessObject(npvalues)\n return dao\n\n\ndef epoch_to_str(epoch, fmt='%Y-%m-%d %H:%M:%S'):\n \"\"\"\n Convert a unix timestamp into date string\n\n :param epoch: unix timestamp\n :param fmt: date format\n :return: formatted date from timestamp\n \"\"\"\n return datetime.fromtimestamp(epoch).strftime(fmt)\n\n\ndef parse_raw_str(v):\n try:\n v = v.decode('utf-8')\n except:\n try:\n v = v.decode('latin1')\n except:\n pass\n return v\n\n\nclass BoundingBox():\n\n def __init__(self, north, west, south, east):\n \"\"\"\n Represent a map boundingbox\n\n :param north: northmost latitude\n :param west: westmost longitude\n :param south: southmost latitude\n :param east: eastmost longitude\n :return:\n \"\"\"\n self.north = north\n self.west = west\n self.south = south\n self.east = east\n\n\n @staticmethod\n def from_points(lons, lats):\n \"\"\"\n Compute the BoundingBox from a set of latitudes and longitudes\n\n :param lons: longitudes\n :param lats: latitudes\n :return: BoundingBox\n \"\"\"\n north, west = max(lats), min(lons)\n south, east = min(lats), max(lons)\n return BoundingBox(north=north, west=west, south=south, east=east)\n\n\n @staticmethod\n def from_bboxes(bboxes):\n \"\"\"\n Compute a BoundingBox enclosing all specified bboxes\n\n :param bboxes: a list of BoundingBoxes\n :return: BoundingBox\n \"\"\"\n north = max([b.north for b in bboxes])\n south = min([b.south for b in bboxes])\n west = min([b.west for b in bboxes])\n east = max([b.east for b in bboxes])\n return BoundingBox(north=north, west=west, south=south, east=east)\n\n\n def __str__(self):\n return 'BoundingBox(north=%.6f, west=%.6f, south=%.6f, east=%.6f)' % (self.north, self.west, self.south, self.east)\n\n\n @staticmethod\n def from_nominatim(query):\n url = urllib.request.urlopen('http://nominatim.openstreetmap.org/search.php?q=%s&format=json' % query)\n jo = json.load(url)\n\n if len(jo) == 0:\n raise Exception('No results found')\n\n south, north, west, east = list(map(float, jo[0]['boundingbox']))\n print(('bbox from Nominatim:', south, north, west, east))\n return BoundingBox(north=north, west=west, south=south, east=east)\n\n\nBoundingBox.WORLD = BoundingBox(north=85, west=-170, south=-85, east=190)\nBoundingBox.DK = BoundingBox(north=57.769, west=7.932, south=54.444, east=13.282)\nBoundingBox.DTU = BoundingBox(north=55.7925, west=12.5092, south=55.7784, east=12.5309)\nBoundingBox.KBH = BoundingBox(north=55.8190, west=12.0369, south=55.5582, east=12.7002)\nBoundingBox.DOWNTOWN = BoundingBox(north=55.728229, west=12.420230, south=55.629118, east=12.683902)\nBoundingBox.USA = BoundingBox(north=51.338994, west=-124.349040, south=14.851581, east=-56.849040)" ]
[ [ "numpy.array", "numpy.vstack" ] ]
brianchiang-tw/HackerRank
[ "02a30a0033b881206fa15b8d6b4ef99b2dc420c8", "02a30a0033b881206fa15b8d6b4ef99b2dc420c8" ]
[ "Python/Numpy/Dot and Cross/dot_and_corss.py", "Python/Numpy/Transpose and Flatten/tranpose_and_flatten.py" ]
[ "import numpy as np\n\nif __name__ == '__main__':\n\n dim = int(input())\n\n arr_a = []\n arr_b = []\n\n for i in range(dim):\n\n arr_a += list( map(int, input().split() ) ) \n\n for i in range(dim):\n\n arr_b += list( map(int, input().split() ) ) \n\n\n np_arr_a = np.array( arr_a)\n np_arr_a = np.reshape( np_arr_a, (dim, dim) )\n\n np_arr_b = np.array( arr_b)\n np_arr_b = np.reshape( np_arr_b, (dim, dim) )\n\n print( np.matmul(np_arr_a, np_arr_b) )\n \n\n\n\n", "import numpy as np\n\n\n\nif __name__ == '__main__':\n\n h, w = map( int, input().split() )\n\n # create a zero matrix of shape h x w\n matrix = np.zeros( shape = (h, w), dtype = np.int )\n\n for i in range(h):\n\n row_elements = list( map(int, input().split() ) )\n\n for j in range(w):\n matrix[i][j] = row_elements[j]\n \n\n # Task_#1: Matrix Transpose\n matrix_transpose = np.transpose( matrix )\n print( matrix_transpose )\n\n\n\n # Task_#2: Matrix Flatten\n matrix_flatten = matrix.flatten( )\n print( matrix_flatten )\n\n" ]
[ [ "numpy.reshape", "numpy.array", "numpy.matmul" ], [ "numpy.zeros", "numpy.transpose" ] ]
fochoao/cpython
[ "3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9", "3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9" ]
[ "Lib/site-packages/ginga/util/plots.py", "Lib/site-packages/ginga/AstroImage.py" ]
[ "#\n# plots.py -- Utility functions for plotting.\n#\n# This is open-source software licensed under a BSD license.\n# Please see the file LICENSE.txt for details.\n#\nimport numpy\nimport matplotlib as mpl\nfrom matplotlib.figure import Figure\n# fix issue of negative numbers rendering incorrectly with default font\nmpl.rcParams['axes.unicode_minus'] = False\n\nfrom ginga.util import iqcalc\nfrom ginga.misc import Callback\n\nclass Plot(Callback.Callbacks):\n\n def __init__(self, figure=None, logger=None, width=500, height=500):\n Callback.Callbacks.__init__(self)\n\n if figure is None:\n dpi = 100\n wd_in, ht_in = float(width)/dpi, float(height)/dpi\n figure = Figure(figsize=(wd_in, ht_in), dpi=dpi)\n self.fig = figure\n if hasattr(self.fig, 'set_tight_layout'):\n self.fig.set_tight_layout(True)\n self.logger = logger\n self.fontsize = 10\n self.ax = None\n\n self.logx = False\n self.logy = False\n\n self.xdata = []\n self.ydata = []\n\n # For callbacks\n for name in ('draw-canvas', ):\n self.enable_callback(name)\n\n def get_figure(self):\n return self.fig\n\n def get_widget(self):\n return self.fig.canvas\n\n def add_axis(self, **kwdargs):\n self.ax = self.fig.add_subplot(111, **kwdargs)\n return self.ax\n\n def get_axis(self):\n return self.ax\n\n def set_axis(self, ax):\n self.ax = ax\n\n def set_titles(self, xtitle=None, ytitle=None, title=None,\n rtitle=None):\n if xtitle is not None:\n self.ax.set_xlabel(xtitle)\n if ytitle is not None:\n self.ax.set_ylabel(ytitle)\n if title is not None:\n self.ax.set_title(title)\n if rtitle is not None:\n pass\n ax = self.ax\n for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +\n ax.get_xticklabels() + ax.get_yticklabels()):\n item.set_fontsize(self.fontsize)\n\n def clear(self):\n self.logger.debug('clearing canvas...')\n self.ax.cla()\n self.xdata = []\n self.ydata = []\n\n def draw(self):\n self.fig.canvas.draw()\n\n self.make_callback('draw-canvas')\n\n def plot(self, xarr, yarr, xtitle=None, ytitle=None, title=None,\n rtitle=None, **kwdargs):\n\n if self.ax is None:\n self.add_axis()\n\n if self.logx:\n self.ax.set_xscale('log')\n if self.logy:\n self.ax.set_yscale('log')\n\n self.xdata = xarr\n self.ydata = yarr\n\n self.set_titles(xtitle=xtitle, ytitle=ytitle, title=title,\n rtitle=rtitle)\n self.ax.grid(True)\n self.ax.plot(xarr, yarr, **kwdargs)\n\n for item in self.ax.get_xticklabels() + self.ax.get_yticklabels():\n item.set_fontsize(self.fontsize)\n\n # Make x axis labels a little more readable\n lbls = self.ax.xaxis.get_ticklabels()\n for lbl in lbls:\n lbl.set(rotation=45, horizontalalignment='right')\n\n #self.fig.tight_layout()\n\n self.draw()\n\n def get_data(self):\n return self.fig, self.xdata, self.ydata\n\nclass HistogramPlot(Plot):\n\n def histogram(self, data, numbins=2048,\n xtitle=None, ytitle=None, title=None, rtitle=None):\n minval = numpy.nanmin(data)\n maxval = numpy.nanmax(data)\n\n substval = (minval + maxval)/2.0\n data[numpy.isnan(data)] = substval\n\n dist, bins = numpy.histogram(data, bins=numbins, density=False)\n\n # used with 'steps-post' drawstyle, this gives correct histogram-steps\n x = bins\n y = numpy.append(dist, dist[-1])\n\n self.clear()\n self.plot(x, y, alpha=1.0, linewidth=1.0, linestyle='-',\n xtitle=xtitle, ytitle=ytitle, title=title, rtitle=rtitle,\n drawstyle='steps-post')\n\n\nclass CutsPlot(Plot):\n\n def cuts(self, data,\n xtitle=None, ytitle=None, title=None, rtitle=None,\n color=None):\n \"\"\"data: pixel values along a line.\n \"\"\"\n y = data\n x = numpy.arange(len(data))\n\n self.plot(x, y, color=color, drawstyle='steps-mid',\n xtitle=xtitle, ytitle=ytitle, title=title, rtitle=rtitle,\n alpha=1.0, linewidth=1.0, linestyle='-')\n\n\nclass ContourPlot(Plot):\n\n def __init__(self, *args, **kwargs):\n super(ContourPlot, self).__init__(*args, **kwargs)\n\n self.num_contours = 8\n self.plot_panx = 0\n self.plot_pany = 0\n self.plot_zoomlevel = 1.0\n\n def connect_zoom_callbacks(self):\n canvas = self.fig.canvas\n connect = canvas.mpl_connect\n # These are not ready for prime time...\n # connect(\"motion_notify_event\", self.plot_motion_notify)\n # connect(\"button_press_event\", self.plot_button_press)\n connect(\"scroll_event\", self.plot_scroll)\n\n def plot_contours_data(self, x, y, data, num_contours=None):\n # Make a contour plot\n if num_contours is None:\n num_contours = self.num_contours\n\n if self.ax is None:\n self.add_axis()\n\n ht, wd = data.shape\n\n self.ax.set_aspect('equal', adjustable='box')\n self.set_titles(title='Contours')\n #self.fig.tight_layout()\n #self.ax.grid(True)\n\n # Set pan position in contour plot\n self.plot_panx = float(x) / wd\n self.plot_pany = float(y) / ht\n\n self.ax.cla()\n self.ax.set_axis_bgcolor('black')\n\n try:\n # Create a contour plot\n self.xdata = numpy.arange(wd)\n self.ydata = numpy.arange(ht)\n colors = [ 'lightgreen' ] * num_contours\n cs = self.ax.contour(self.xdata, self.ydata, data, num_contours,\n colors=colors)\n # Mark the center of the object\n self.ax.plot([x], [y], marker='x', ms=20.0,\n color='cyan')\n\n # Set the pan and zoom position & redraw\n self.plot_panzoom()\n\n except Exception as e:\n self.logger.error(\"Error making contour plot: %s\" % (\n str(e)))\n\n def plot_contours(self, x, y, radius, image, num_contours=None):\n img_data, x1, y1, x2, y2 = image.cutout_radius(x, y, radius)\n\n self.plot_contours_data(x, y, img_data, num_contours=num_contours)\n\n def plot_panzoom(self):\n ht, wd = len(self.ydata), len(self.xdata)\n x = int(self.plot_panx * wd)\n y = int(self.plot_pany * ht)\n\n if self.plot_zoomlevel >= 1.0:\n scalefactor = 1.0 / self.plot_zoomlevel\n elif self.plot_zoomlevel < -1.0:\n scalefactor = - self.plot_zoomlevel\n else:\n # wierd condition?--reset to 1:1\n scalefactor = 1.0\n self.plot_zoomlevel = 1.0\n\n xdelta = int(scalefactor * (wd/2.0))\n ydelta = int(scalefactor * (ht/2.0))\n xlo, xhi = x-xdelta, x+xdelta\n # distribute remaining x space from plot\n if xlo < 0:\n xsh = abs(xlo)\n xlo, xhi = 0, min(wd-1, xhi+xsh)\n elif xhi >= wd:\n xsh = xhi - wd\n xlo, xhi = max(0, xlo-xsh), wd-1\n self.ax.set_xlim(xlo, xhi)\n\n ylo, yhi = y-ydelta, y+ydelta\n # distribute remaining y space from plot\n if ylo < 0:\n ysh = abs(ylo)\n ylo, yhi = 0, min(ht-1, yhi+ysh)\n elif yhi >= ht:\n ysh = yhi - ht\n ylo, yhi = max(0, ylo-ysh), ht-1\n self.ax.set_ylim(ylo, yhi)\n\n self.draw()\n\n def plot_zoom(self, val):\n self.plot_zoomlevel = val\n self.plot_panzoom()\n\n def plot_scroll(self, event):\n # Matplotlib only gives us the number of steps of the scroll,\n # positive for up and negative for down.\n direction = None\n if event.step > 0:\n #delta = 0.9\n self.plot_zoomlevel += 1.0\n elif event.step < 0:\n #delta = 1.1\n self.plot_zoomlevel -= 1.0\n\n self.plot_panzoom()\n\n # x1, x2 = self.ax.get_xlim()\n # y1, y2 = self.ax.get_ylim()\n # self.ax.set_xlim(x1*delta, x2*delta)\n # self.ax.set_ylim(y1*delta, y2*delta)\n # self.draw()\n return True\n\n def plot_button_press(self, event):\n if event.button == 1:\n self.plot_x, self.plot_y = event.x, event.y\n return True\n\n def plot_motion_notify(self, event):\n if event.button == 1:\n xdelta = event.x - self.plot_x\n #ydelta = event.y - self.plot_y\n ydelta = self.plot_y - event.y\n self.pan_plot(xdelta, ydelta)\n\n def pan_plot(self, xdelta, ydelta):\n x1, x2 = self.ax.get_xlim()\n y1, y2 = self.ax.get_ylim()\n\n self.ax.set_xlim(x1+xdelta, x2+xdelta)\n self.ax.set_ylim(y1+ydelta, y2+ydelta)\n\n self.draw()\n\n\nclass RadialPlot(Plot):\n\n def plot_radial(self, x, y, radius, image):\n\n img_data, x1, y1, x2, y2 = image.cutout_radius(x, y, radius)\n\n self.ax.cla()\n\n # Make a radial plot\n self.ax.set_xlim(-0.1, radius)\n\n self.set_titles(title=\"Radial plot\", xtitle='Radius [pixels]',\n ytitle='Pixel Value (ADU)')\n self.ax.grid(True)\n\n try:\n ht, wd = img_data.shape\n off_x, off_y = x1, y1\n maxval = numpy.nanmax(img_data)\n\n # create arrays of radius and value\n r = []\n v = []\n for i in range(0, wd):\n for j in range(0, ht):\n r.append( numpy.sqrt( (off_x + i - x)**2 + (off_y + j - y)**2 ) )\n v.append(img_data[j, i])\n r, v = numpy.array(r), numpy.array(v)\n\n # compute and plot radial fitting\n # note: you might wanna change `deg` here.\n coefficients = numpy.polyfit(x=r, y=v, deg=10)\n polynomial = numpy.poly1d(coefficients)\n\n x_curve = numpy.linspace(numpy.min(r), numpy.max(r), len(r))\n y_curve = polynomial(x_curve)\n\n yerror = 0 # for now, no error bars\n self.ax.errorbar(r, v, yerr=yerror, marker='x', ls='none',\n color='blue')\n self.ax.plot(x_curve, y_curve, '-', color='green', lw=2)\n\n #self.fig.tight_layout()\n\n self.draw()\n\n except Exception as e:\n self.logger.error(\"Error making radial plot: %s\" % (\n str(e)))\n\nclass FWHMPlot(Plot):\n\n def __init__(self, *args, **kwargs):\n super(FWHMPlot, self).__init__(*args, **kwargs)\n\n self.iqcalc = iqcalc.IQCalc(self.logger)\n\n def _plot_fwhm_axis(self, arr, iqcalc, skybg, color1, color2, color3):\n N = len(arr)\n X = numpy.array(list(range(N)))\n Y = arr\n # subtract sky background\n Y = Y - skybg\n maxv = Y.max()\n # clamp to 0..max\n Y = Y.clip(0, maxv)\n self.logger.debug(\"Y=%s\" % (str(Y)))\n self.ax.plot(X, Y, color=color1, marker='.')\n\n fwhm, mu, sdev, maxv = iqcalc.calc_fwhm(arr)\n # Make a little smoother gaussian curve by plotting intermediate\n # points\n XN = numpy.linspace(0.0, float(N), N*10)\n Z = numpy.array([iqcalc.gaussian(x, (mu, sdev, maxv))\n for x in XN])\n self.ax.plot(XN, Z, color=color1, linestyle=':')\n self.ax.axvspan(mu-fwhm/2.0, mu+fwhm/2.0,\n facecolor=color3, alpha=0.25)\n return (fwhm, mu, sdev, maxv)\n\n def plot_fwhm(self, x, y, radius, image, cutout_data=None, iqcalc=None):\n\n x0, y0, xarr, yarr = image.cutout_cross(x, y, radius)\n\n if iqcalc is None:\n iqcalc = self.iqcalc\n\n self.ax.cla()\n\n #self.ax.set_aspect('equal', adjustable='box')\n self.set_titles(ytitle='Brightness', xtitle='Pixels',\n title='FWHM')\n self.ax.grid(True)\n\n # Make a FWHM plot\n try:\n # get median value from the cutout area\n if cutout_data is None:\n cutout_data, x1, y1, x2, y2 = image.cutout_radius(x, y, radius)\n\n skybg = numpy.median(cutout_data)\n self.logger.debug(\"cutting x=%d y=%d r=%d med=%f\" % (\n x, y, radius, skybg))\n\n self.logger.debug(\"xarr=%s\" % (str(xarr)))\n fwhm_x, mu, sdev, maxv = self._plot_fwhm_axis(xarr, iqcalc, skybg,\n 'blue', 'blue', 'skyblue')\n\n self.logger.debug(\"yarr=%s\" % (str(yarr)))\n fwhm_y, mu, sdev, maxv = self._plot_fwhm_axis(yarr, iqcalc, skybg,\n 'green', 'green', 'seagreen')\n\n self.ax.legend(('data x', 'gauss x', 'data y', 'gauss y'),\n loc='upper right', shadow=False, fancybox=False,\n prop={'size': 8}, labelspacing=0.2)\n self.set_titles(title=\"FWHM X: %.2f Y: %.2f\" % (fwhm_x, fwhm_y))\n\n #self.fig.tight_layout()\n\n self.draw()\n\n except Exception as e:\n self.logger.error(\"Error making fwhm plot: %s\" % (\n str(e)))\n\n\n#END\n", "#\n# AstroImage.py -- Abstraction of an astronomical data image.\n#\n# This is open-source software licensed under a BSD license.\n# Please see the file LICENSE.txt for details.\n#\nimport sys\nimport math\nimport traceback\n\nimport numpy\n\nfrom ginga.util import wcsmod, io_fits\nfrom ginga.util import wcs, iqcalc\nfrom ginga.BaseImage import BaseImage, ImageError, Header\nfrom ginga.misc import Bunch\nfrom ginga import trcalc\nimport ginga.util.six as six\nfrom ginga.util.six.moves import map\n\n\nclass AstroHeader(Header):\n pass\n\n\nclass AstroImage(BaseImage):\n \"\"\"\n Abstraction of an astronomical data (image).\n\n NOTE: this module is NOT thread-safe!\n \"\"\"\n # class variables for WCS and IO can be set\n wcsClass = None\n ioClass = None\n\n @classmethod\n def set_wcsClass(cls, klass):\n cls.wcsClass = klass\n\n @classmethod\n def set_ioClass(cls, klass):\n cls.ioClass = klass\n\n def __init__(self, data_np=None, metadata=None, logger=None,\n name=None, wcsclass=wcsClass, ioclass=ioClass,\n inherit_primary_header=False):\n\n BaseImage.__init__(self, data_np=data_np, metadata=metadata,\n logger=logger, name=name)\n\n # wcsclass specifies a pluggable WCS module\n if wcsclass is None:\n wcsclass = wcsmod.WCS\n self.wcs = wcsclass(self.logger)\n\n # ioclass specifies a pluggable IO module\n if ioclass is None:\n ioclass = io_fits.fitsLoaderClass\n self.io = ioclass(self.logger)\n self.io.register_type('image', self.__class__)\n\n self.inherit_primary_header = inherit_primary_header\n if self.inherit_primary_header:\n # User wants to inherit from primary header--this will hold it\n self._primary_hdr = AstroHeader()\n else:\n self._primary_hdr = None\n\n if metadata is not None:\n header = self.get_header()\n self.wcs.load_header(header)\n\n # For navigating multidimensional data\n self.naxispath = []\n self.revnaxis = []\n self._md_data = None\n\n def load_hdu(self, hdu, fobj=None, naxispath=None,\n inherit_primary_header=None):\n\n if self.io is None:\n # need image loader for the fromHDU() call below\n raise ImageError(\"No IO loader defined\")\n\n self.clear_metadata()\n\n # collect HDU header\n ahdr = self.get_header()\n self.io.fromHDU(hdu, ahdr)\n\n # Set PRIMARY header\n if inherit_primary_header is None:\n inherit_primary_header = self.inherit_primary_header\n\n if inherit_primary_header and (fobj is not None):\n if self._primary_hdr is None:\n self._primary_hdr = AstroHeader()\n\n self.io.fromHDU(fobj[0], self._primary_hdr)\n\n data = hdu.data\n if data is None:\n data = numpy.zeros((0, 0))\n elif not isinstance(data, numpy.ndarray):\n data = numpy.zeros((0, 0))\n elif 0 in data.shape:\n data = numpy.zeros((0, 0))\n elif len(data.shape) < 2:\n # Expand 1D arrays into 1xN array\n data = data.reshape((1, data.shape[0]))\n\n # this is a handle to the full data array\n self._md_data = data\n # this will get reset in set_naxispath() if array is\n # multidimensional\n self._data = data\n\n if naxispath is None:\n naxispath = []\n\n # Set naxispath to drill down to 2D data slice\n if len(naxispath) == 0:\n naxispath = ([0] * (len(data.shape) - 2))\n\n self.set_naxispath(naxispath)\n\n # Try to make a wcs object on the header\n self.wcs.load_header(hdu.header, fobj=fobj)\n\n def load_file(self, filespec, **kwargs):\n\n if self.io is None:\n raise ImageError(\"No IO loader defined\")\n\n self.io.load_file(filespec, dstobj=self, **kwargs)\n\n def load_buffer(self, data, dims, dtype, byteswap=False,\n metadata=None):\n data = numpy.fromstring(data, dtype=dtype)\n if byteswap:\n data.byteswap(True)\n data = data.reshape(dims)\n self.set_data(data, metadata=metadata)\n\n def get_mddata(self):\n return self._md_data\n\n def set_naxispath(self, naxispath):\n \"\"\"Choose a slice out of multidimensional data.\n \"\"\"\n revnaxis = list(naxispath)\n revnaxis.reverse()\n\n # construct slice view and extract it\n view = revnaxis + [slice(None), slice(None)]\n data = self.get_mddata()[view]\n\n if len(data.shape) != 2:\n raise ImageError(\n \"naxispath does not lead to a 2D slice: {}\".format(naxispath))\n\n self.naxispath = naxispath\n self.revnaxis = revnaxis\n\n self.set_data(data)\n\n def set_wcs(self, wcs):\n self.wcs = wcs\n\n def set_io(self, io):\n self.io = io\n\n def get_data_size(self):\n return self.get_size()\n\n def get_header(self, create=True):\n try:\n # By convention, the fits header is stored in a dictionary\n # under the metadata keyword 'header'\n hdr = self.metadata['header']\n\n if self.inherit_primary_header and self._primary_hdr is not None:\n # Inherit PRIMARY header for display but keep metadata intact\n displayhdr = AstroHeader()\n for key in hdr.keyorder:\n card = hdr.get_card(key)\n bnch = displayhdr.__setitem__(card.key, card.value)\n bnch.comment = card.comment\n for key in self._primary_hdr.keyorder:\n if key not in hdr:\n card = self._primary_hdr.get_card(key)\n bnch = displayhdr.__setitem__(card.key, card.value)\n bnch.comment = card.comment\n else:\n # Normal, separate header\n displayhdr = hdr\n\n except KeyError as e:\n if not create:\n raise e\n hdr = AstroHeader()\n self.metadata['header'] = hdr\n displayhdr = hdr\n\n return displayhdr\n\n def get_keyword(self, kwd, *args):\n \"\"\"Get an item from the fits header, if any.\"\"\"\n try:\n kwds = self.get_header()\n return kwds[kwd]\n except KeyError:\n # return a default if there is one\n if len(args) > 0:\n return args[0]\n raise KeyError(kwd)\n\n def get_keywords_list(self, *args):\n return list(map(self.get_keyword, args))\n\n def set_keyword(self, kwd, value, create=True):\n kwds = self.get_header(create=create)\n kwd = kwd.upper()\n if not create:\n prev = kwds[kwd] # noqa, this raises KeyError\n kwds[kwd] = value\n\n def update_keywords(self, keyDict):\n hdr = self.get_header()\n # Upcase all keywords\n for kwd, val in keyDict.items():\n hdr[kwd.upper()] = val\n\n # Try to make a wcs object on the header\n if hasattr(self, 'wcs'):\n self.wcs.load_header(hdr)\n\n def set_keywords(self, **kwds):\n \"\"\"Set an item in the fits header, if any.\"\"\"\n return self.update_keywords(kwds)\n\n def update_data(self, data_np, metadata=None, astype=None):\n \"\"\"DO NOT USE: this method will be deprecated!\n \"\"\"\n self.set_data(data_np.copy(), metadata=metadata,\n astype=astype)\n\n def update_metadata(self, keyDict):\n for key, val in keyDict.items():\n self.metadata[key] = val\n\n # refresh the WCS\n if hasattr(self, 'wcs'):\n header = self.get_header()\n self.wcs.load_header(header)\n\n def clear_all(self):\n # clear metadata and data\n super(AstroImage, self).clear_all()\n\n # unreference full data array\n self._md_data = self._data\n\n def transfer(self, other, astype=None):\n data = self._get_data()\n other.update_data(data, astype=astype)\n other.update_metadata(self.metadata)\n\n def copy(self, astype=None):\n data = self._get_data()\n other = AstroImage(data, logger=self.logger)\n self.transfer(other, astype=astype)\n return other\n\n def save_as_file(self, filepath, **kwdargs):\n data = self._get_data()\n header = self.get_header()\n self.io.save_as_file(filepath, data, header, **kwdargs)\n\n def pixtocoords(self, x, y, system=None, coords='data'):\n args = [x, y] + self.revnaxis\n return self.wcs.pixtocoords(args, system=system, coords=coords)\n\n def spectral_coord(self, coords='data'):\n args = [0, 0] + self.revnaxis\n return self.wcs.spectral_coord(args, coords=coords)\n\n def pixtoradec(self, x, y, format='deg', coords='data'):\n args = [x, y] + self.revnaxis\n ra_deg, dec_deg = self.wcs.pixtoradec(args, coords=coords)\n\n if format == 'deg':\n return ra_deg, dec_deg\n return wcs.deg2fmt(ra_deg, dec_deg, format)\n\n def radectopix(self, ra_deg, dec_deg, format='deg', coords='data'):\n if format != 'deg':\n # convert coordinates to degrees\n ra_deg = wcs.lon_to_deg(ra_deg)\n dec_deg = wcs.lat_to_deg(dec_deg)\n return self.wcs.radectopix(ra_deg, dec_deg, coords=coords,\n naxispath=self.revnaxis)\n\n # -----> TODO: merge into wcs.py ?\n #\n def get_starsep_XY(self, x1, y1, x2, y2):\n # source point\n ra_org, dec_org = self.pixtoradec(x1, y1)\n\n # destination point\n ra_dst, dec_dst = self.pixtoradec(x2, y2)\n\n return wcs.get_starsep_RaDecDeg(ra_org, dec_org, ra_dst, dec_dst)\n\n def calc_radius_xy(self, x, y, radius_deg):\n \"\"\"Calculate a radius (in pixels) from the point (x, y) to a circle\n defined by radius in degrees.\n \"\"\"\n # calculate ra/dec of x,y pixel\n ra_deg, dec_deg = self.pixtoradec(x, y)\n\n # Calculate position 1 degree from the given one\n # NOTE: this needs to add in DEC, not RA\n ra2_deg, dec2_deg = wcs.add_offset_radec(ra_deg, dec_deg,\n 0.0, 1.0)\n\n # Calculate the length of this segment--it is pixels/deg\n x2, y2 = self.radectopix(ra2_deg, dec2_deg)\n px_per_deg_e = math.sqrt(math.fabs(x2-x)**2 + math.fabs(y2-y)**2)\n\n # calculate radius based on desired radius_deg\n radius_px = px_per_deg_e * radius_deg\n return radius_px\n\n def calc_radius_deg2pix(self, ra_deg, dec_deg, delta_deg,\n equinox=None):\n x, y = self.radectopix(ra_deg, dec_deg, equinox=equinox)\n return self.calc_radius_xy(x, y, delta_deg)\n\n def add_offset_xy(self, x, y, delta_deg_x, delta_deg_y):\n # calculate ra/dec of x,y pixel\n ra_deg, dec_deg = self.pixtoradec(x, y)\n\n # add offsets\n ra2_deg, dec2_deg = wcs.add_offset_radec(ra_deg, dec_deg,\n delta_deg_x, delta_deg_y)\n\n # then back to new pixel coords\n x2, y2 = self.radectopix(ra2_deg, dec2_deg)\n\n return (x2, y2)\n\n def calc_radius_center(self, delta_deg):\n return self.calc_radius_xy(float(self.width / 2.0),\n float(self.height / 2.0),\n delta_deg)\n\n def calc_compass(self, x, y, len_deg_e, len_deg_n):\n\n # Get east and north coordinates\n xe, ye = self.add_offset_xy(x, y, len_deg_e, 0.0)\n xe = int(round(xe))\n ye = int(round(ye))\n xn, yn = self.add_offset_xy(x, y, 0.0, len_deg_n)\n xn = int(round(xn))\n yn = int(round(yn))\n\n return (x, y, xn, yn, xe, ye)\n\n def calc_compass_radius(self, x, y, radius_px):\n xe, ye = self.add_offset_xy(x, y, 1.0, 0.0)\n xn, yn = self.add_offset_xy(x, y, 0.0, 1.0)\n\n # now calculate the length in pixels of those arcs\n # (planar geometry is good enough here)\n px_per_deg_e = math.sqrt(math.fabs(ye - y)**2 + math.fabs(xe - x)**2)\n px_per_deg_n = math.sqrt(math.fabs(yn - y)**2 + math.fabs(xn - x)**2)\n\n # now calculate the arm length in degrees for each arm\n # (this produces same-length arms)\n len_deg_e = radius_px / px_per_deg_e\n len_deg_n = radius_px / px_per_deg_n\n\n return self.calc_compass(x, y, len_deg_e, len_deg_n)\n\n def calc_compass_center(self):\n # calculate center of data\n x = float(self.width) / 2.0\n y = float(self.height) / 2.0\n\n # radius we want the arms to be (approx 1/4 the smallest dimension)\n radius_px = float(min(self.width, self.height)) / 4.0\n\n return self.calc_compass_radius(x, y, radius_px)\n #\n # <----- TODO: merge this into wcs.py ?\n\n def get_wcs_rotation_deg(self):\n header = self.get_header()\n (rot, cdelt1, cdelt2) = wcs.get_rotation_and_scale(header)\n return rot\n\n def rotate(self, deg, update_wcs=False):\n #old_deg = self.get_wcs_rotation_deg()\n\n super(AstroImage, self).rotate(deg)\n\n # TODO: currently this is not working!\n ## if update_wcs:\n ## self.wcs.rotate(deg)\n\n def mosaic_inline(self, imagelist, bg_ref=None, trim_px=None,\n merge=False, allow_expand=True, expand_pad_deg=0.01,\n max_expand_pct=None,\n update_minmax=True, suppress_callback=False):\n \"\"\"Drops new images into the current image (if there is room),\n relocating them according the WCS between the two images.\n \"\"\"\n # Get our own (mosaic) rotation and scale\n header = self.get_header()\n ((xrot_ref, yrot_ref),\n (cdelt1_ref, cdelt2_ref)) = wcs.get_xy_rotation_and_scale(header)\n\n scale_x, scale_y = math.fabs(cdelt1_ref), math.fabs(cdelt2_ref)\n\n # drop each image in the right place in the new data array\n mydata = self._get_data()\n\n count = 1\n res = []\n for image in imagelist:\n name = image.get('name', 'image%d' % (count))\n count += 1\n\n data_np = image._get_data()\n\n # Calculate sky position at the center of the piece\n ctr_x, ctr_y = trcalc.get_center(data_np)\n ra, dec = image.pixtoradec(ctr_x, ctr_y)\n\n # User specified a trim? If so, trim edge pixels from each\n # side of the array\n ht, wd = data_np.shape[:2]\n if trim_px:\n xlo, xhi = trim_px, wd - trim_px\n ylo, yhi = trim_px, ht - trim_px\n data_np = data_np[ylo:yhi, xlo:xhi, ...]\n ht, wd = data_np.shape[:2]\n\n # If caller asked us to match background of pieces then\n # get the median of this piece\n if bg_ref is not None:\n bg = iqcalc.get_median(data_np)\n bg_inc = bg_ref - bg\n data_np = data_np + bg_inc\n\n # Determine max/min to update our values\n if update_minmax:\n maxval = numpy.nanmax(data_np)\n minval = numpy.nanmin(data_np)\n self.maxval = max(self.maxval, maxval)\n self.minval = min(self.minval, minval)\n\n # Get rotation and scale of piece\n header = image.get_header()\n ((xrot, yrot),\n (cdelt1, cdelt2)) = wcs.get_xy_rotation_and_scale(header)\n self.logger.debug(\"image(%s) xrot=%f yrot=%f cdelt1=%f \"\n \"cdelt2=%f\" % (name, xrot, yrot, cdelt1, cdelt2))\n\n # scale if necessary\n # TODO: combine with rotation?\n if (not numpy.isclose(math.fabs(cdelt1), scale_x) or\n not numpy.isclose(math.fabs(cdelt2), scale_y)):\n nscale_x = math.fabs(cdelt1) / scale_x\n nscale_y = math.fabs(cdelt2) / scale_y\n self.logger.debug(\"scaling piece by x(%f), y(%f)\" % (\n nscale_x, nscale_y))\n data_np, (ascale_x, ascale_y) = trcalc.get_scaled_cutout_basic(\n data_np, 0, 0, wd-1, ht-1, nscale_x, nscale_y,\n logger=self.logger)\n\n # Rotate piece into our orientation, according to wcs\n rot_dx, rot_dy = xrot - xrot_ref, yrot - yrot_ref\n\n flip_x = False\n flip_y = False\n\n # Optomization for 180 rotations\n if (numpy.isclose(math.fabs(rot_dx), 180.0) or\n numpy.isclose(math.fabs(rot_dy), 180.0)):\n rotdata = trcalc.transform(data_np,\n flip_x=True, flip_y=True)\n rot_dx = 0.0\n rot_dy = 0.0\n else:\n rotdata = data_np\n\n # Finish with any necessary rotation of piece\n if not numpy.isclose(rot_dy, 0.0):\n rot_deg = rot_dy\n self.logger.debug(\"rotating %s by %f deg\" % (name, rot_deg))\n rotdata = trcalc.rotate(rotdata, rot_deg,\n #rotctr_x=ctr_x, rotctr_y=ctr_y\n logger=self.logger)\n\n # Flip X due to negative CDELT1\n if numpy.sign(cdelt1) != numpy.sign(cdelt1_ref):\n flip_x = True\n\n # Flip Y due to negative CDELT2\n if numpy.sign(cdelt2) != numpy.sign(cdelt2_ref):\n flip_y = True\n\n if flip_x or flip_y:\n rotdata = trcalc.transform(rotdata,\n flip_x=flip_x, flip_y=flip_y)\n\n # Get size and data of new image\n ht, wd = rotdata.shape[:2]\n ctr_x, ctr_y = trcalc.get_center(rotdata)\n\n # Find location of image piece (center) in our array\n x0, y0 = self.radectopix(ra, dec)\n\n # Merge piece as closely as possible into our array\n # Unfortunately we lose a little precision rounding to the\n # nearest pixel--can't be helped with this approach\n x0, y0 = int(round(x0)), int(round(y0))\n self.logger.debug(\"Fitting image '%s' into mosaic at %d,%d\" % (\n name, x0, y0))\n\n # This is for useful debugging info only\n my_ctr_x, my_ctr_y = trcalc.get_center(mydata)\n off_x, off_y = x0 - my_ctr_x, y0 - my_ctr_y\n self.logger.debug(\"centering offsets: %d,%d\" % (off_x, off_y))\n\n # Sanity check piece placement\n xlo, xhi = x0 - ctr_x, x0 + wd - ctr_x\n ylo, yhi = y0 - ctr_y, y0 + ht - ctr_y\n assert (xhi - xlo == wd), \\\n Exception(\"Width differential %d != %d\" % (xhi - xlo, wd))\n assert (yhi - ylo == ht), \\\n Exception(\"Height differential %d != %d\" % (yhi - ylo, ht))\n\n mywd, myht = self.get_size()\n if xlo < 0 or xhi > mywd or ylo < 0 or yhi > myht:\n if not allow_expand:\n raise Exception(\"New piece doesn't fit on image and \"\n \"allow_expand=False\")\n\n # <-- Resize our data array to allow the new image\n\n # determine amount to pad expansion by\n expand_x = max(int(expand_pad_deg / scale_x), 0)\n expand_y = max(int(expand_pad_deg / scale_y), 0)\n\n nx1_off, nx2_off = 0, 0\n if xlo < 0:\n nx1_off = abs(xlo) + expand_x\n if xhi > mywd:\n nx2_off = (xhi - mywd) + expand_x\n xlo, xhi = xlo + nx1_off, xhi + nx1_off\n\n ny1_off, ny2_off = 0, 0\n if ylo < 0:\n ny1_off = abs(ylo) + expand_y\n if yhi > myht:\n ny2_off = (yhi - myht) + expand_y\n ylo, yhi = ylo + ny1_off, yhi + ny1_off\n\n new_wd = mywd + nx1_off + nx2_off\n new_ht = myht + ny1_off + ny2_off\n\n # sanity check on new mosaic size\n old_area = mywd * myht\n new_area = new_wd * new_ht\n expand_pct = new_area / old_area\n if ((max_expand_pct is not None) and\n (expand_pct > max_expand_pct)):\n raise Exception(\"New area exceeds current one by %.2f %%;\"\n \"increase max_expand_pct (%.2f) to allow\" %\n (expand_pct*100, max_expand_pct))\n\n # go for it!\n new_data = numpy.zeros((new_ht, new_wd))\n # place current data into new data\n new_data[ny1_off:ny1_off+myht, nx1_off:nx1_off+mywd] = \\\n mydata\n self._data = new_data\n mydata = new_data\n\n if (nx1_off > 0) or (ny1_off > 0):\n # Adjust our WCS for relocation of the reference pixel\n crpix1, crpix2 = self.get_keywords_list('CRPIX1', 'CRPIX2')\n kwds = dict(CRPIX1=crpix1 + nx1_off,\n CRPIX2=crpix2 + ny1_off)\n self.update_keywords(kwds)\n\n # fit image piece into our array\n try:\n if merge:\n mydata[ylo:yhi, xlo:xhi, ...] += rotdata[0:ht, 0:wd, ...]\n else:\n idx = (mydata[ylo:yhi, xlo:xhi, ...] == 0.0)\n mydata[ylo:yhi, xlo:xhi, ...][idx] = \\\n rotdata[0:ht, 0:wd, ...][idx]\n\n except Exception as e:\n self.logger.error(\"Error fitting tile: %s\" % (str(e)))\n raise\n\n res.append((xlo, ylo, xhi, yhi))\n\n # TODO: recalculate min and max values\n # Can't use usual techniques because it adds too much time to the\n # mosacing\n #self._set_minmax()\n\n # Notify watchers that our data has changed\n if not suppress_callback:\n self.make_callback('modified')\n\n return res\n\n def info_xy(self, data_x, data_y, settings):\n # Get the value under the data coordinates\n try:\n # We report the value across the pixel, even though the coords\n # change halfway across the pixel\n value = self.get_data_xy(int(data_x+0.5), int(data_y+0.5))\n\n except Exception as e:\n value = None\n\n system = settings.get('wcs_coords', None)\n format = settings.get('wcs_display', 'sexagesimal')\n ra_lbl, dec_lbl = six.unichr(945), six.unichr(948)\n\n # Calculate WCS coords, if available\n try:\n if self.wcs is None:\n self.logger.debug(\"No WCS for this image\")\n ra_txt = dec_txt = 'NO WCS'\n\n elif self.wcs.coordsys == 'raw':\n self.logger.debug(\"No coordinate system determined\")\n ra_txt = dec_txt = 'NO WCS'\n\n elif self.wcs.coordsys == 'pixel':\n args = [data_x, data_y] + self.revnaxis\n x, y = self.wcs.pixtosystem(args, system=system, coords='data')\n ra_txt = \"%+.3f\" % (x)\n dec_txt = \"%+.3f\" % (y)\n ra_lbl, dec_lbl = \"X\", \"Y\"\n\n else:\n args = [data_x, data_y] + self.revnaxis\n\n lon_deg, lat_deg = self.wcs.pixtosystem(\n args, system=system, coords='data')\n\n if format == 'sexagesimal':\n if system in ('galactic', 'ecliptic'):\n sign, deg, min, sec = wcs.degToDms(lon_deg,\n isLatitude=False)\n ra_txt = '+%03d:%02d:%06.3f' % (deg, min, sec)\n else:\n deg, min, sec = wcs.degToHms(lon_deg)\n ra_txt = '%02d:%02d:%06.3f' % (deg, min, sec)\n\n sign, deg, min, sec = wcs.degToDms(lat_deg)\n if sign < 0:\n sign = '-'\n else:\n sign = '+'\n dec_txt = '%s%02d:%02d:%06.3f' % (sign, deg, min, sec)\n\n else:\n ra_txt = '%+10.7f' % (lon_deg)\n dec_txt = '%+10.7f' % (lat_deg)\n\n if system == 'galactic':\n ra_lbl, dec_lbl = \"l\", \"b\"\n elif system == 'ecliptic':\n ra_lbl, dec_lbl = six.unichr(0x03BB), six.unichr(0x03B2)\n elif system == 'helioprojective':\n ra_txt = \"%+5.3f\" % (lon_deg*3600)\n dec_txt = \"%+5.3f\" % (lat_deg*3600)\n ra_lbl, dec_lbl = \"x-Solar\", \"y-Solar\"\n\n except Exception as e:\n self.logger.warning(\"Bad coordinate conversion: %s\" % (\n str(e)))\n ra_txt = dec_txt = 'BAD WCS'\n try:\n # log traceback, if possible\n (type_, value_, tb) = sys.exc_info()\n tb_str = \"\".join(traceback.format_tb(tb))\n self.logger.error(\"Traceback:\\n%s\" % (tb_str))\n except Exception:\n tb_str = \"Traceback information unavailable.\"\n self.logger.error(tb_str)\n\n info = Bunch.Bunch(itype='astro', data_x=data_x, data_y=data_y,\n x=data_x, y=data_y,\n ra_txt=ra_txt, dec_txt=dec_txt,\n ra_lbl=ra_lbl, dec_lbl=dec_lbl,\n value=value)\n return info\n\n# END\n" ]
[ [ "numpy.nanmax", "numpy.polyfit", "numpy.poly1d", "numpy.sqrt", "matplotlib.figure.Figure", "numpy.isnan", "numpy.arange", "numpy.median", "numpy.nanmin", "numpy.min", "numpy.max", "numpy.append", "numpy.array", "numpy.histogram" ], [ "numpy.nanmax", "numpy.nanmin", "numpy.sign", "numpy.fromstring", "numpy.zeros", "numpy.isclose" ] ]
newby-jay/AshbyaTracking
[ "9e80513d52281cf51d35bfef1e164148a9d3439e" ]
[ "Python/csv2DT.py" ]
[ "#!/opt/local/bin/python2.7\nfrom __future__ import division\nfrom __future__ import print_function\nfrom pylab import *\nimport pandas as pd\nimport os\nimport subprocess\nimport time\nimport sys\nfrom scipy.io import savemat, loadmat\n# tdpath = '/Users/jaynewby/Dropbox/CF/PT/Net-Tracker/TrackingTools/'\n# sys.path.append(tdpath)\n# from TrackingData import *\n\nfilename = ' '.join(sys.argv[1:])\n\n# print('')\n# print(filename)\n# print('')\n\nData = pd.read_csv(filename, index_col=0)\n\nN = Data.frame.size\nM = Data.particle.max()\npaths = zeros((3, N + M))*nan\nNa = 0\nfor p, g in Data.groupby('particle'):\n Np = g.frame.size\n path = array((g[['x', 'y', 'frame']]))\n paths[:, Na:Na+Np] = path.T\n Na += Np + 1\n\noutdict = {'Seq_paths': array([u'Array'], dtype='<U5'),\n 'paths': paths}\nsavemat('csv2DToutput.mat', outdict, format='4')\n" ]
[ [ "pandas.read_csv", "scipy.io.savemat" ] ]
deepmind/distrax
[ "c3f04c64325024bd5a93a4dfe4690249717f753f" ]
[ "distrax/_src/distributions/mvn_diag_test.py" ]
[ "# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for `mvn_diag.py`.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\n\nimport chex\nfrom distrax._src.distributions import mvn_diag\nfrom distrax._src.distributions import normal\nfrom distrax._src.utils import equivalence\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\n\n\nRTOL = 1e-3\n\n\nclass MultivariateNormalDiagTest(\n equivalence.EquivalenceTest, parameterized.TestCase):\n\n def setUp(self):\n # pylint: disable=too-many-function-args\n super().setUp(mvn_diag.MultivariateNormalDiag)\n self.assertion_fn = lambda x, y: np.testing.assert_allclose(x, y, rtol=RTOL)\n\n @parameterized.named_parameters(\n ('1d std normal', {'scale_diag': np.ones((1,))}),\n ('2d std normal', {'loc': [0., 0.], 'scale_diag': [1., 1.]}),\n ('2d std normal, None loc', {'scale_diag': [1., 1.]}),\n ('2d std normal, None scale_diag', {'loc': [0., 0.]}),\n ('rank-2 parameters',\n {'loc': np.zeros((3, 2)), 'scale_diag': np.ones((3, 2))}),\n ('broadcasted scale_diag',\n {'loc': np.zeros((3, 2)), 'scale_diag': np.ones((2,))}),\n ('broadcasted loc',\n {'loc': np.zeros((2)), 'scale_diag': np.ones((3, 2,))}),\n )\n def test_event_shape(self, distr_params):\n distr_params = {\n k: np.asarray(v, dtype=np.float32) for k, v in distr_params.items()}\n super()._test_event_shape((), distr_params)\n\n @parameterized.named_parameters(\n ('1d std normal', {'scale_diag': np.ones((1,))}),\n ('2d std normal', {'loc': [0., 0.], 'scale_diag': [1., 1.]}),\n ('2d std normal, None loc', {'scale_diag': [1., 1.]}),\n ('2d std normal, None scale_diag', {'loc': [0., 0.]}),\n ('rank-2 parameters',\n {'loc': np.zeros((3, 2)), 'scale_diag': np.ones((3, 2))}),\n ('broadcasted scale_diag',\n {'loc': np.zeros((3, 2)), 'scale_diag': np.ones((2,))}),\n ('broadcasted loc',\n {'loc': np.zeros((2)), 'scale_diag': np.ones((3, 2,))}),\n )\n def test_batch_shape(self, distr_params):\n distr_params = {\n k: np.asarray(v, dtype=np.float32) for k, v in distr_params.items()}\n super()._test_batch_shape((), distr_params)\n\n def test_invalid_parameters(self):\n self._test_raises_error(dist_kwargs={'loc': None, 'scale_diag': None})\n self._test_raises_error(\n dist_kwargs={'loc': None, 'scale_diag': np.array(1.)})\n self._test_raises_error(\n dist_kwargs={'loc': np.array(1.), 'scale_diag': None})\n self._test_raises_error(\n dist_kwargs={'loc': np.zeros((3, 5)), 'scale_diag': np.ones((3, 4))})\n\n @chex.all_variants\n @parameterized.named_parameters(\n ('1d std normal, no shape',\n {'scale_diag': np.ones((1,))},\n ()),\n ('2d std normal, no shape',\n {'loc': [0., 0.],\n 'scale_diag': [1., 1.]},\n ()),\n ('2d std normal, None loc, no shape',\n {'scale_diag': [1., 1.]},\n ()),\n ('2d std normal, None scale_diag, no shape',\n {'loc': [0., 0.]},\n ()),\n ('2d std normal, int shape',\n {'loc': [0., 0.],\n 'scale_diag': [1., 1.]},\n 3),\n ('2d std normal, None loc, int shape',\n {'scale_diag': [1., 1.]},\n 3),\n ('2d std normal, None scale_diag, int shape',\n {'loc': [0., 0.]},\n 3),\n ('2d std normal, 1-tuple shape',\n {'loc': [0., 0.],\n 'scale_diag': [1., 1.]},\n (3,)),\n ('2d std normal, None loc, 1-tuple shape',\n {'scale_diag': [1., 1.]},\n (3,)),\n ('2d std normal, None scale_diag, 1-tuple shape',\n {'loc': [0., 0.]},\n (3,)),\n ('2d std normal, 2-tuple shape',\n {'loc': [0., 0.],\n 'scale_diag': [1., 1.]},\n (3, 4)),\n ('2d std normal, None loc, 2-tuple shape',\n {'scale_diag': [1., 1.]},\n (3, 4)),\n ('2d std normal, None scale_diag, 2-tuple shape',\n {'loc': [0., 0.]},\n (3, 4)),\n ('rank-2 parameters, 2-tuple shape',\n {'loc': np.zeros((3, 2)),\n 'scale_diag': np.ones((3, 2))},\n (5, 4)),\n ('broadcasted scale_diag',\n {'loc': np.zeros((3, 2)),\n 'scale_diag': np.ones((2,))},\n 5),\n ('broadcasted loc',\n {'loc': np.zeros((2)),\n 'scale_diag': np.ones((3, 2,))},\n 5),\n )\n def test_sample_shape(self, distr_params, sample_shape):\n distr_params = {\n k: np.asarray(v, dtype=np.float32) for k, v in distr_params.items()}\n super()._test_sample_shape(\n dist_args=(),\n dist_kwargs=distr_params,\n sample_shape=sample_shape)\n\n @chex.all_variants\n @jax.numpy_rank_promotion('raise')\n @parameterized.named_parameters(\n ('1d std normal, no shape',\n {'scale_diag': np.ones((1,))},\n ()),\n ('2d std normal, no shape',\n {'loc': [0., 0.],\n 'scale_diag': [1., 1.]},\n ()),\n ('2d std normal, None loc, no shape',\n {'scale_diag': [1., 1.]},\n ()),\n ('2d std normal, None scale_diag, no shape',\n {'loc': [0., 0.]},\n ()),\n ('2d std normal, int shape',\n {'loc': [0., 0.],\n 'scale_diag': [1., 1.]},\n 3),\n ('2d std normal, None loc, int shape',\n {'scale_diag': [1., 1.]},\n 3),\n ('2d std normal, None scale_diag, int shape',\n {'loc': [0., 0.]},\n 3),\n ('2d std normal, 1-tuple shape',\n {'loc': [0., 0.],\n 'scale_diag': [1., 1.]},\n (3,)),\n ('2d std normal, None loc, 1-tuple shape',\n {'scale_diag': [1., 1.]},\n (3,)),\n ('2d std normal, None scale_diag, 1-tuple shape',\n {'loc': [0., 0.]},\n (3,)),\n ('2d std normal, 2-tuple shape',\n {'loc': [0., 0.],\n 'scale_diag': [1., 1.]},\n (3, 4)),\n ('2d std normal, None loc, 2-tuple shape',\n {'scale_diag': [1., 1.]},\n (3, 4)),\n ('2d std normal, None scale_diag, 2-tuple shape',\n {'loc': [0., 0.]},\n (3, 4)),\n ('rank-2 parameters, 2-tuple shape',\n {'loc': np.zeros((3, 2)),\n 'scale_diag': np.ones((3, 2))},\n (5, 4)),\n ('broadcasted scale_diag',\n {'loc': np.zeros((3, 2)),\n 'scale_diag': np.ones((2,))},\n 5),\n ('broadcasted loc',\n {'loc': np.zeros((2)),\n 'scale_diag': np.ones((3, 2,))},\n 5),\n )\n def test_sample_and_log_prob(self, distr_params, sample_shape):\n distr_params = {\n k: np.asarray(v, dtype=np.float32) for k, v in distr_params.items()}\n super()._test_sample_and_log_prob(\n dist_args=(),\n dist_kwargs=distr_params,\n sample_shape=sample_shape,\n assertion_fn=self.assertion_fn)\n\n @chex.all_variants\n @parameterized.named_parameters(\n ('float32', jnp.float32),\n ('float64', jnp.float64))\n def test_sample_dtype(self, dtype):\n dist_params = {\n 'loc': np.array([0., 0.], dtype),\n 'scale_diag': np.array([1., 1.], dtype)}\n dist = self.distrax_cls(**dist_params)\n samples = self.variant(dist.sample)(seed=self.key)\n self.assertEqual(samples.dtype, dist.dtype)\n chex.assert_type(samples, dtype)\n\n @chex.all_variants\n @parameterized.named_parameters(\n ('log_prob; 2d dist, 2d value', 'log_prob',\n {'scale_diag': [1., 1.]},\n [0., -0.5]),\n ('log_prob; 3d dist, broadcasted params, 3d value', 'log_prob',\n {'loc': np.zeros((4, 3)),\n 'scale_diag': 0.3 * np.ones((3,))},\n [-0.1, 0., -0.5]),\n ('log_prob; 3d dist, broadcasted scale_diag, rank-2 value', 'log_prob',\n {'loc': np.zeros((4, 3)),\n 'scale_diag': 0.1 * np.ones((3,))},\n -0.1 * np.ones((4, 3))),\n ('log_prob; 3d dist, broadcasted scale_diag, rank-3 value', 'log_prob',\n {'loc': np.zeros((4, 3)),\n 'scale_diag': 0.1 * np.ones((3,))},\n -0.1 * np.ones((5, 4, 3))),\n ('log_prob; 2d dist, 2d value, edge case', 'log_prob',\n {'scale_diag': [1., 1.]},\n [200., -200.]),\n ('prob; 2d dist, 2d value', 'prob',\n {'scale_diag': [1., 1.]},\n [0., -0.5]),\n ('prob; 3d dist, broadcasted params, 3d value', 'prob',\n {'loc': np.zeros((4, 3)),\n 'scale_diag': 0.3 * np.ones((3,))},\n [-0.1, 0., -0.5]),\n ('prob; 3d dist, broadcasted scale_diag, rank-2 value', 'prob',\n {'loc': np.zeros((4, 3)),\n 'scale_diag': 0.1 * np.ones((3,))},\n -0.1 * np.ones((4, 3))),\n ('prob; 3d dist, broadcasted scale_diag, rank-3 value', 'prob',\n {'loc': np.zeros((4, 3)),\n 'scale_diag': 0.1 * np.ones((3,))},\n -0.1 * np.ones((5, 4, 3))),\n ('prob; 2d dist, 2d value, edge case', 'prob',\n {'scale_diag': [1., 1.]},\n [200., -200.]),\n )\n def test_pdf(self, function_string, distr_params, value):\n distr_params = {\n k: np.asarray(v, dtype=np.float32) for k, v in distr_params.items()}\n value = np.asarray(value)\n super()._test_attribute(\n attribute_string=function_string,\n dist_kwargs=distr_params,\n call_args=(value,),\n assertion_fn=self.assertion_fn)\n\n @chex.all_variants\n @parameterized.named_parameters(\n ('log_cdf; 2d dist, 2d value', 'log_cdf',\n {'scale_diag': [1., 1.]},\n [0., -0.5]),\n ('log_cdf; 3d dist, broadcasted params, 3d value', 'log_cdf',\n {'loc': np.zeros((4, 3)),\n 'scale_diag': 0.3 * np.ones((3,))},\n [-0.1, 0., -0.5]),\n ('log_cdf; 3d dist, broadcasted scale_diag, rank-2 value', 'log_cdf',\n {'loc': np.zeros((4, 3)),\n 'scale_diag': 0.1 * np.ones((3,))},\n -0.1 * np.ones((4, 3))),\n ('log_cdf; 3d dist, broadcasted scale_diag, rank-3 value', 'log_cdf',\n {'loc': np.zeros((4, 3)),\n 'scale_diag': 0.1 * np.ones((3,))},\n -0.1 * np.ones((5, 4, 3))),\n ('log_cdf; 2d dist, 2d value, edge case', 'log_cdf',\n {'scale_diag': [1., 1.]},\n [200., -200.]),\n ('cdf; 2d dist, 2d value', 'cdf',\n {'scale_diag': [1., 1.]},\n [0., -0.5]),\n ('cdf; 3d dist, broadcasted params, 3d value', 'cdf',\n {'loc': np.zeros((4, 3)),\n 'scale_diag': 0.3 * np.ones((3,))},\n [-0.1, 0., -0.5]),\n ('cdf; 3d dist, broadcasted scale_diag, rank-2 value', 'cdf',\n {'loc': np.zeros((4, 3)),\n 'scale_diag': 0.1 * np.ones((3,))},\n -0.1 * np.ones((4, 3))),\n ('cdf; 3d dist, broadcasted scale_diag, rank-3 value', 'cdf',\n {'loc': np.zeros((4, 3)),\n 'scale_diag': 0.1 * np.ones((3,))},\n -0.1 * np.ones((5, 4, 3))),\n ('cdf; 2d dist, 2d value, edge case', 'cdf',\n {'scale_diag': [1., 1.]},\n [200., -200.]),\n )\n def test_cdf(self, function_string, distr_params, value):\n distr_params = {\n k: np.asarray(v, dtype=np.float32) for k, v in distr_params.items()}\n value = np.asarray(value)\n dist = self.distrax_cls(**distr_params)\n result = self.variant(getattr(dist, function_string))(value)\n # The `cdf` is not implemented in TFP, so we test against a `Normal`.\n loc = 0. if 'loc' not in distr_params else distr_params['loc']\n univariate_normal = normal.Normal(loc, distr_params['scale_diag'])\n expected_result = getattr(univariate_normal, function_string)(value)\n if function_string == 'cdf':\n reduce_fn = lambda x: jnp.prod(x, axis=-1)\n elif function_string == 'log_cdf':\n reduce_fn = lambda x: jnp.sum(x, axis=-1)\n expected_result = reduce_fn(expected_result)\n self.assertion_fn(result, expected_result)\n\n @chex.all_variants(with_pmap=False)\n @parameterized.named_parameters(\n ('entropy; one distribution', 'entropy',\n {'loc': [0.1, -0.1],\n 'scale_diag': [0.8, 0.5]}),\n ('entropy; broadcasted loc', 'entropy',\n {'loc': [0., 0.1, -0.1],\n 'scale_diag': [[1.5, 0.8, 0.5], [0.8, 0.1, 0.4]]}),\n ('entropy; broadcasted scale_diag', 'entropy',\n {'loc': [[0., 0.1, -0.1], [0.2, 0., 0.5]],\n 'scale_diag': [1.5, 0.8, 0.5]}),\n ('entropy; None loc', 'entropy',\n {'scale_diag': [0.8, 0.5]}),\n ('entropy; None scale_diag', 'entropy',\n {'loc': [0.1, -0.1]}),\n ('mean; one distribution', 'mean',\n {'loc': [0.1, -0.1],\n 'scale_diag': [0.8, 0.5]}),\n ('mean; broadcasted loc', 'mean',\n {'loc': [0., 0.1, -0.1],\n 'scale_diag': [[1.5, 0.8, 0.5], [0.8, 0.1, 0.4]]}),\n ('mean; broadcasted scale_diag', 'mean',\n {'loc': [[0., 0.1, -0.1], [0.2, 0., 0.5]],\n 'scale_diag': [1.5, 0.8, 0.5]}),\n ('mean; None loc', 'mean',\n {'scale_diag': [0.8, 0.5]}),\n ('mean; None scale_diag', 'mean',\n {'loc': [0.1, -0.1]}),\n ('stddev; one distribution', 'stddev',\n {'loc': [0.1, -0.1],\n 'scale_diag': [0.8, 0.5]}),\n ('stddev; broadcasted loc', 'stddev',\n {'loc': [0., 0.1, -0.1],\n 'scale_diag': [[1.5, 0.8, 0.5], [0.8, 0.1, 0.4]]}),\n ('stddev; broadcasted scale_diag', 'stddev',\n {'loc': [[0., 0.1, -0.1], [0.2, 0., 0.5]],\n 'scale_diag': [1.5, 0.8, 0.5]}),\n ('stddev; None loc', 'stddev',\n {'scale_diag': [0.8, 0.5]}),\n ('stddev; None scale_diag', 'stddev',\n {'loc': [0.1, -0.1]}),\n ('variance; one distribution', 'variance',\n {'loc': [0.1, -0.1],\n 'scale_diag': [0.8, 0.5]}),\n ('variance; broadcasted loc', 'variance',\n {'loc': [0., 0.1, -0.1],\n 'scale_diag': [[1.5, 0.8, 0.5], [0.8, 0.1, 0.4]]}),\n ('variance; broadcasted scale_diag', 'variance',\n {'loc': [[0., 0.1, -0.1], [0.2, 0., 0.5]],\n 'scale_diag': [1.5, 0.8, 0.5]}),\n ('variance; None loc', 'variance',\n {'scale_diag': [0.8, 0.5]}),\n ('variance; None scale_diag', 'variance',\n {'loc': [0.1, -0.1]}),\n ('covariance; one distribution', 'covariance',\n {'loc': [0.1, -0.1],\n 'scale_diag': [0.8, 0.5]}),\n ('covariance; broadcasted loc', 'covariance',\n {'loc': [0., 0.1, -0.1],\n 'scale_diag': [[1.5, 0.8, 0.5], [0.8, 0.1, 0.4]]}),\n ('covariance; None loc', 'covariance',\n {'scale_diag': [0.8, 0.5]}),\n ('covariance; None scale_diag', 'covariance',\n {'loc': [0.1, -0.1]}),\n ('mode; broadcasted scale_diag', 'mode',\n {'loc': [[0., 0.1, -0.1], [0.2, 0., 0.5]],\n 'scale_diag': [1.5, 0.8, 0.5]}),\n )\n def test_method(self, function_string, distr_params):\n distr_params = {\n k: np.asarray(v, dtype=np.float32) for k, v in distr_params.items()}\n super()._test_attribute(\n function_string,\n dist_kwargs=distr_params,\n assertion_fn=self.assertion_fn)\n\n @chex.all_variants(with_pmap=False)\n def test_median(self):\n dist_params = {'loc': np.array([0.3, -0.1, 0.0]),\n 'scale_diag': np.array([0.1, 1.4, 0.5])}\n dist = self.distrax_cls(**dist_params)\n self.assertion_fn(self.variant(dist.median)(), dist.mean())\n\n @chex.all_variants(with_pmap=False)\n @parameterized.named_parameters(\n ('kl distrax_to_distrax', 'kl_divergence', 'distrax_to_distrax'),\n ('kl distrax_to_tfp', 'kl_divergence', 'distrax_to_tfp'),\n ('kl tfp_to_distrax', 'kl_divergence', 'tfp_to_distrax'),\n ('cross-ent distrax_to_distrax', 'cross_entropy', 'distrax_to_distrax'),\n ('cross-ent distrax_to_tfp', 'cross_entropy', 'distrax_to_tfp'),\n ('cross-ent tfp_to_distrax', 'cross_entropy', 'tfp_to_distrax'))\n def test_with_two_distributions(self, function_string, mode_string):\n super()._test_with_two_distributions(\n attribute_string=function_string,\n mode_string=mode_string,\n dist1_kwargs={\n 'loc': np.random.randn(4, 1, 5).astype(np.float32),\n 'scale_diag': 0.1 + np.random.rand(3, 5).astype(np.float32),\n },\n dist2_kwargs={\n 'loc': np.asarray([-2.4, -1., 0., 1.2, 6.5]).astype(np.float32),\n 'scale_diag': None,\n },\n assertion_fn=self.assertion_fn)\n\n def test_jittable(self):\n super()._test_jittable(\n (np.zeros((2, 3,)), np.ones((2, 3,))), assertion_fn=self.assertion_fn)\n\n @parameterized.named_parameters(\n ('single element', 2),\n ('range', slice(-1)),\n ('range_2', (slice(None), slice(-1))),\n )\n def test_slice(self, slice_):\n loc = jnp.array(np.random.randn(3, 4, 5))\n scale_diag = jnp.array(np.random.randn(3, 4, 5))\n dist = self.distrax_cls(loc=loc, scale_diag=scale_diag)\n self.assertion_fn(dist[slice_].mean(), loc[slice_])\n\n def test_slice_different_parameterization(self):\n loc = jnp.array(np.random.randn(4))\n scale_diag = jnp.array(np.random.randn(3, 4))\n dist = self.distrax_cls(loc=loc, scale_diag=scale_diag)\n self.assertion_fn(dist[0].mean(), loc) # Not slicing loc.\n self.assertion_fn(dist[0].stddev(), scale_diag[0])\n\n def test_slice_ellipsis(self):\n loc = jnp.array(np.random.randn(3, 4, 5))\n scale_diag = jnp.array(np.random.randn(3, 4, 5))\n dist = self.distrax_cls(loc=loc, scale_diag=scale_diag)\n self.assertion_fn(dist[..., -1].mean(), loc[:, -1])\n self.assertion_fn(dist[..., -1].stddev(), scale_diag[:, -1])\n\n\nif __name__ == '__main__':\n absltest.main()\n" ]
[ [ "numpy.asarray", "numpy.ones", "numpy.random.randn", "numpy.random.rand", "numpy.testing.assert_allclose", "numpy.array", "numpy.zeros" ] ]
enneract/colour
[ "27470c16f7a5bf388d0f0798884e8b7abdceafa4" ]
[ "colour/models/rgb/transfer_functions/tests/test_log.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nDefines unit tests for :mod:`colour.models.rgb.transfer_functions.log`\nmodule.\n\"\"\"\n\nfrom __future__ import division, unicode_literals\n\nimport numpy as np\nimport unittest\n\nfrom colour.models.rgb.transfer_functions import (log_encoding_Log2,\n log_decoding_Log2)\nfrom colour.utilities import domain_range_scale, ignore_numpy_errors\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2020 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = 'colour-developers@colour-science.org'\n__status__ = 'Production'\n\n__all__ = ['TestLogEncoding_Log2', 'TestLogDecoding_Log2']\n\n\nclass TestLogEncoding_Log2(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.models.rgb.transfer_functions.log.log_encoding_Log2`\n definition unit tests methods.\n \"\"\"\n\n def test_log_encoding_Log2(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.log.\\\nlog_encoding_Log2` definition.\n \"\"\"\n\n self.assertAlmostEqual(\n log_encoding_Log2(18), 0.407732889704347, places=7)\n\n self.assertAlmostEqual(\n log_encoding_Log2(18, 0.12), 0.443642737727224, places=7)\n\n self.assertAlmostEqual(\n log_encoding_Log2(18, 0.12, 0.0045), 0.443556955303088, places=7)\n\n self.assertAlmostEqual(\n log_encoding_Log2(18, 0.12, 0.0045, 15.0),\n 0.481765775765788,\n places=7)\n\n def test_n_dimensional_log_encoding_Log2(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.log.\\\nlog_encoding_Log2` definition n-dimensional arrays support.\n \"\"\"\n\n x = 18\n y = log_encoding_Log2(x)\n\n x = np.tile(x, 6)\n y = np.tile(y, 6)\n np.testing.assert_almost_equal(log_encoding_Log2(x), y, decimal=7)\n\n x = np.reshape(x, (2, 3))\n y = np.reshape(y, (2, 3))\n np.testing.assert_almost_equal(log_encoding_Log2(x), y, decimal=7)\n\n x = np.reshape(x, (2, 3, 1))\n y = np.reshape(y, (2, 3, 1))\n np.testing.assert_almost_equal(log_encoding_Log2(x), y, decimal=7)\n\n def test_domain_range_scale_log_encoding_Log2(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.log.\\\nlog_encoding_Log2` definition domain and range scale support.\n \"\"\"\n\n x = 18\n y = log_encoding_Log2(x)\n\n d_r = (('reference', 1), (1, 1), (100, 100))\n for scale, factor in d_r:\n with domain_range_scale(scale):\n np.testing.assert_almost_equal(\n log_encoding_Log2(x * factor), y * factor, decimal=7)\n\n @ignore_numpy_errors\n def test_nan_log_encoding_Log2(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.log.\\\nlog_encoding_Log2` definition nan support.\n \"\"\"\n\n log_encoding_Log2(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))\n\n\nclass TestLogDecoding_Log2(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.models.rgb.transfer_functions.log.\\\nlog_decoding_Log2` definition unit tests methods.\n \"\"\"\n\n def test_log_decoding_Log2(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.log.\\\nlog_decoding_Log2` definition.\n \"\"\"\n\n self.assertAlmostEqual(\n log_decoding_Log2(0.407732889704347), 18.000000000000075, places=7)\n\n self.assertAlmostEqual(\n log_decoding_Log2(0.443642737727224, 0.12),\n 17.999999999999904,\n places=7)\n\n self.assertAlmostEqual(\n log_decoding_Log2(0.443556955303088, 0.12, 0.0045),\n 17.999999999999982,\n places=7)\n\n self.assertAlmostEqual(\n log_decoding_Log2(0.481765775765788, 0.12, 0.0045, 15.0),\n 17.999999999999918,\n places=7)\n\n def test_n_dimensional_log_decoding_Log2(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.log.\\\nlog_decoding_Log2` definition n-dimensional arrays support.\n \"\"\"\n\n y = 0.407732889704347\n x = log_decoding_Log2(y)\n\n y = np.tile(y, 6)\n x = np.tile(x, 6)\n np.testing.assert_almost_equal(log_decoding_Log2(y), x, decimal=7)\n\n y = np.reshape(y, (2, 3))\n x = np.reshape(x, (2, 3))\n np.testing.assert_almost_equal(log_decoding_Log2(y), x, decimal=7)\n\n y = np.reshape(y, (2, 3, 1))\n x = np.reshape(x, (2, 3, 1))\n np.testing.assert_almost_equal(log_decoding_Log2(y), x, decimal=7)\n\n def test_domain_range_scale_log_decoding_Log2(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.log.\\\nlog_decoding_Log2` definition domain and range scale support.\n \"\"\"\n\n y = 0.407732889704347\n x = log_decoding_Log2(y)\n\n d_r = (('reference', 1), (1, 1), (100, 100))\n for scale, factor in d_r:\n with domain_range_scale(scale):\n np.testing.assert_almost_equal(\n log_decoding_Log2(y * factor), x * factor, decimal=7)\n\n @ignore_numpy_errors\n def test_nan_log_decoding_Log2(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.log.\\\nlog_decoding_Log2` definition nan support.\n \"\"\"\n\n log_decoding_Log2(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.reshape", "numpy.array", "numpy.tile" ] ]
mrirecon/spreco
[ "e9c720fb0d8a9c59a0e83696d2b7efcdc90b2cc3" ]
[ "spreco/model/nn.py" ]
[ "from spreco.model.utils import *\n\nimport tensorflow.compat.v1 as tf\ntf.disable_eager_execution()\nfrom tf_slim import add_arg_scope\nimport numpy as np\n\n\n@add_arg_scope\ndef dense(x_, num_units, nonlinearity=None, init_scale=1., counters={}, init=False, use_bias=True, ema=None, **kwargs):\n ''' fully connected layer '''\n if 'scope' in kwargs.keys():\n name = get_name(kwargs['scope'], counters)\n else:\n name = get_name('dense', counters)\n\n if 'debug' in kwargs.keys():\n if kwargs['debug']:\n print(name)\n stop_grad = False \n if 'stop_grad' in kwargs.keys():\n stop_grad = kwargs['stop_grad']\n\n with tf.variable_scope(name):\n V = get_variable('V', stop_grad, shape=[int(x_.get_shape()[1]),num_units], dtype=tf.float32,\n initializer=tf.random_normal_initializer(0, 0.05), trainable=True)\n g = get_variable('g', stop_grad, shape=[num_units], dtype=tf.float32,\n initializer=tf.constant_initializer(1.), trainable=True)\n if use_bias:\n b = get_variable('b', stop_grad, shape=[num_units], dtype=tf.float32,\n initializer=tf.constant_initializer(0.), trainable=True)\n\n # use weight normalization (Salimans & Kingma, 2016)\n # https://arxiv.org/pdf/1602.07868.pdf\n x = tf.matmul(x_, V)\n\n scaler = g / tf.sqrt(tf.reduce_sum(tf.square(V), [0]))\n\n if use_bias:\n x = tf.reshape(scaler, [1, num_units]) * x + tf.reshape(b, [1, num_units])\n\n if init: # normalize x\n m_init, v_init = tf.nn.moments(x, [0])\n scale_init = init_scale/tf.sqrt(v_init + 1e-10)\n if use_bias:\n with tf.control_dependencies([g.assign(g*scale_init), b.assign_add(-m_init*scale_init)]):\n x = tf.matmul(x_, V)\n scaler = g / tf.sqrt(tf.reduce_sum(tf.square(V), [0]))\n x = tf.reshape(scaler, [1, num_units]) * x + tf.reshape(b, [1, num_units])\n else:\n with tf.control_dependencies([g.assign(g*scale_init)]):\n x = tf.matmul(x_, V)\n scaler = g / tf.sqrt(tf.reduce_sum(tf.square(V), [0]))\n x = tf.reshape(scaler, [1, num_units]) * x\n\n # apply nonlinearity\n if nonlinearity is not None:\n x = nonlinearity(x)\n\n return x\n\n@add_arg_scope\ndef dense_plus(x_, h, num_units, nr_classes, nonlinearity=None, init_scale=1., counters={}, init=False, use_bias=True, ema=None, **kwargs):\n ''' fully connected layer '''\n if 'scope' in kwargs.keys():\n name = get_name(kwargs['scope'], counters)\n else:\n name = get_name('dense', counters)\n\n if 'debug' in kwargs.keys():\n if kwargs['debug']:\n print(name)\n stop_grad = False \n if 'stop_grad' in kwargs.keys():\n stop_grad = kwargs['stop_grad']\n\n with tf.variable_scope(name):\n V = get_variable('V', stop_grad, shape=[nr_classes, int(x_.get_shape()[1]),num_units], dtype=tf.float32,\n initializer=tf.random_normal_initializer(0, 0.05), trainable=True)\n g = get_variable('g', stop_grad, shape=[nr_classes, num_units], dtype=tf.float32,\n initializer=tf.constant_initializer(1.), trainable=True)\n if use_bias:\n b = get_variable('b', stop_grad, shape=[nr_classes, num_units], dtype=tf.float32,\n initializer=tf.constant_initializer(0.), trainable=True)\n\n V_h = tf.gather(V, h, axis=0)\n g_h = tf.gather(g, h, axis=0)\n if use_bias:\n b_h =tf.gather(b, h, axis=0)\n\n # use weight normalization (Salimans & Kingma, 2016)\n x = tf.einsum('ij,ijk->ik',x_, V_h)\n\n scaler = g_h / tf.sqrt(tf.reduce_sum(tf.square(V_h), [1]))\n\n if use_bias:\n x = scaler * x + b_h\n\n # apply nonlinearity\n if nonlinearity is not None:\n x = nonlinearity(x)\n\n return x\n\n\n@add_arg_scope\ndef conv2d_plus(x_, num_filters, filter_size=[3,3], stride=[1,1], pad='SAME', nonlinearity=None, init_scale=1., counters={}, init=False, ema=None, bias=True, weight_norm=False, **kwargs):\n ''' convolutional layer '''\n if 'scope' in kwargs.keys():\n name = get_name(kwargs['scope'], counters) # this is the scope defined by args\n else:\n name = get_name('conv2d', counters) # this is default scope named with conv2d\n \n if 'debug' in kwargs.keys():\n if kwargs['debug']:\n print(name)\n stop_grad = False \n if 'stop_grad' in kwargs.keys():\n stop_grad = kwargs['stop_grad']\n\n dilation = 1\n if 'dilation' in kwargs.keys():\n dilation = kwargs['dilation']\n\n with tf.variable_scope(name):\n V = get_variable('V', stop_grad, shape=filter_size+[int(x_.get_shape()[-1]),num_filters], dtype=tf.float32,\n initializer=tf.random_normal_initializer(0, 0.05), trainable=True)\n\n if weight_norm:\n g = get_variable('g', stop_grad, shape=[num_filters], dtype=tf.float32,\n initializer=tf.constant_initializer(1.), trainable=True)\n # use weight normalization (Salimans & Kingma, 2016)\n W = tf.reshape(g, [1, 1, 1, num_filters]) * tf.nn.l2_normalize(V, [0, 1, 2])\n else:\n W = V\n \n # calculate convolutional layer output\n x = tf.nn.conv2d(x_, W, [1] + stride + [1], pad, dilations=dilation)\n\n if bias:\n b = get_variable('b', stop_grad, shape=[num_filters], dtype=tf.float32,\n initializer=tf.constant_initializer(0.), trainable=True)\n x = tf.nn.bias_add(x, b)\n\n if init and weight_norm: # normalize x\n m_init, v_init = tf.nn.moments(x, [0,1,2])\n scale_init = init_scale / tf.sqrt(v_init + 1e-10)\n with tf.control_dependencies([g.assign(g * scale_init), b.assign_add(-m_init * scale_init)]):\n # x = tf.identity(x)\n W = tf.reshape(g, [1, 1, 1, num_filters]) * tf.nn.l2_normalize(V, [0, 1, 2])\n x = tf.nn.conv2d(x_, W, [1] + stride + [1], pad, dilations=dilation)\n if bias:\n x = tf.nn.bias_add(x, b)\n\n # apply nonlinearity\n if nonlinearity is not None:\n x = nonlinearity(x)\n\n return x\n\n\n@add_arg_scope\ndef deconv2d(x_, num_filters, filter_size=[3,3], stride=[1,1], pad='SAME', nonlinearity=None, init_scale=1., counters={}, init=False, ema=None, bias = True, **kwargs):\n ''' transposed convolutional layer '''\n if 'scope' in kwargs.keys():\n name = get_name(kwargs['scope'], counters)\n else:\n name = get_name('deconv2d', counters)\n\n if 'debug' in kwargs.keys():\n if kwargs['debug']:\n print(name)\n\n stop_grad = False \n if 'stop_grad' in kwargs.keys():\n stop_grad = kwargs['stop_grad']\n\n xs = int_shape(x_)\n if pad=='SAME':\n target_shape = [xs[0], xs[1]*stride[0], xs[2]*stride[1], num_filters]\n else:\n target_shape = [xs[0], xs[1]*stride[0] + filter_size[0]-1, xs[2]*stride[1] + filter_size[1]-1, num_filters]\n with tf.variable_scope(name):\n V = get_variable('V', stop_grad, shape=filter_size+[num_filters,int(x_.get_shape()[-1])], dtype=tf.float32,\n initializer=tf.random_normal_initializer(0, 0.05), trainable=True)\n g = get_variable('g', stop_grad, shape=[num_filters], dtype=tf.float32,\n initializer=tf.constant_initializer(1.), trainable=True)\n b = get_variable('b', stop_grad, shape=[num_filters], dtype=tf.float32,\n initializer=tf.constant_initializer(0.), trainable=True)\n\n # use weight normalization (Salimans & Kingma, 2016)\n W = tf.reshape(g, [1, 1, num_filters, 1]) * tf.nn.l2_normalize(V, [0, 1, 3])\n\n # calculate convolutional layer output\n x = tf.nn.conv2d_transpose(x_, W, target_shape, [1] + stride + [1], padding=pad)\n if bias:\n x = tf.nn.bias_add(x, b)\n\n if init: # normalize x\n m_init, v_init = tf.nn.moments(x, [0,1,2])\n scale_init = init_scale / tf.sqrt(v_init + 1e-10)\n with tf.control_dependencies([g.assign(g * scale_init), b.assign_add(-m_init * scale_init)]):\n # x = tf.identity(x)\n W = tf.reshape(g, [1, 1, num_filters, 1]) * tf.nn.l2_normalize(V, [0, 1, 3])\n x = tf.nn.conv2d_transpose(x_, W, target_shape, [1] + stride + [1], padding=pad)\n if bias:\n x = tf.nn.bias_add(x, b)\n\n # apply nonlinearity\n if nonlinearity is not None:\n x = nonlinearity(x)\n\n return x\n\n\n@add_arg_scope\ndef nin(x, num_units, use_bias=True, nonlinearity=None, **kwargs):\n \"\"\" a network in network layer (1x1 CONV) \"\"\"\n s = int_shape(x)\n if None in s:\n x = tf.reshape(x, [-1,s[-1]]) # 03.09 11:18\n else:\n x = tf.reshape(x, [np.prod(s[:-1]),s[-1]])\n\n x = dense(x, num_units, use_bias=use_bias, **kwargs)\n\n if nonlinearity is not None:\n x = nonlinearity(x)\n\n if None in s:\n if len(s) == 2:\n shape = [-1] + [num_units]\n else: \n shape = [-1] + list(map(int, s[1:-1])) + [num_units]\n out = tf.reshape(x, shape)\n else:\n out = tf.reshape(x, s[:-1]+[num_units])\n return out\n\n@add_arg_scope\ndef emb(t, embedding_size=256, scale=1.0, counters={}, **kwargs):\n \"\"\"\n gaussian fourier embedding for t\n # https://www.cs.cmu.edu/~schneide/DougalRandomFeatures_UAI2015.pdf\n # https://people.eecs.berkeley.edu/~brecht/papers/07.rah.rec.nips.pdf\n \"\"\"\n\n if 'scope' in kwargs.keys():\n name = get_name(kwargs['scope'], counters)\n else:\n name = get_name('emb', counters)\n\n if 'debug' in kwargs.keys():\n if kwargs['debug']:\n print(name)\n\n with tf.variable_scope(name):\n\n W = get_variable('W', stop_grad=True, shape=[embedding_size], dtype=tf.float32,\n initializer=tf.random_normal_initializer(stddev=scale), trainable=True)\n t_proj = t[:, None]*W[None, :] * 2 * 3.1415926\n \n return tf.concat([tf.math.sin(t_proj), tf.math.cos(t_proj)], axis=-1)\n\n@add_arg_scope\ndef embed_t(t, embedding_size=256, scale=1.0, counters={}, **kwargs):\n \"\"\"\n gaussian fourier embedding for t\n # https://www.cs.cmu.edu/~schneide/DougalRandomFeatures_UAI2015.pdf\n # https://people.eecs.berkeley.edu/~brecht/papers/07.rah.rec.nips.pdf\n \"\"\"\n name = get_name('embed_t', counters)\n W = get_variable(name, stop_grad=True, shape=[embedding_size], dtype=tf.float32,\n initializer=tf.random_normal_initializer(stddev=scale), trainable=True)\n t_proj = t[:, None]*W[None, :] * 2 * 3.1415926\n return tf.concat([tf.math.sin(t_proj), tf.math.cos(t_proj)], axis=-1)\n\n@add_arg_scope\ndef self_attention(x, qk_chns, v_chns, **kwargs):\n \"\"\"\n Non local neural networks\n https://arxiv.org/pdf/1711.07971.pdf\n \"\"\"\n shape = int_shape(x)\n if None in shape:\n flatten_shape = (-1, shape[1]*shape[2], shape[3])\n out_shape = (-1, shape[1], shape[2], v_chns)\n else:\n flatten_shape = (shape[0], shape[1]*shape[2], shape[3])\n out_shape = (shape[0], shape[1], shape[2], v_chns)\n\n query_conv = tf.reshape(nin(x, qk_chns, nonlinearity=None, scope='global_attention'), flatten_shape)\n key_conv = tf.reshape(nin(x, qk_chns, nonlinearity=None, scope='global_attention'), flatten_shape)\n value_conv = tf.reshape(nin(x, v_chns, nonlinearity=None, scope='global_attention'), flatten_shape)\n\n correlation = tf.einsum(\"bnf,bjf->bnj\", query_conv, key_conv)\n attention_map = tf.nn.softmax(correlation, axis=-1)\n out = tf.einsum(\"bnf,bnj->bjf\", value_conv, attention_map)\n out = nin(out, v_chns, scope='global_attention')\n\n if shape[-1] != v_chns:\n x = nin(x, v_chns, nonlinearity=None, scope='global_attention')\n \n return tf.reshape(out, out_shape) + x\n\n\n## normalization modules\n@add_arg_scope\ndef batch_normalization(x, is_training=True, counters={}, **kwargs):\n \n if 'scope' in kwargs.keys():\n name = get_name(kwargs['scope'], counters)\n else:\n name = get_name('batch_norm', counters)\n\n if 'debug' in kwargs.keys():\n if kwargs['debug']:\n print(name)\n\n with tf.variable_scope(name):\n return tf.layers.batch_normalization(x, is_training)\n\n@add_arg_scope\ndef cond_instance_norm_plus(x, h, nr_classes, counters={}, **kwargs):\n \"\"\"\n Adjusted conditional instance normalization arXiv:1907.05600\n\n y is the index of classes\n \"\"\"\n if 'scope' in kwargs.keys():\n name = get_name(kwargs['scope'], counters)\n else:\n name = get_name('cond_instance_norm_plus', counters)\n\n stop_grad = False\n if 'stop_grad' in kwargs.keys():\n stop_grad = kwargs['stop_grad']\n\n in_shape = int_shape(x)\n\n with tf.variable_scope(name):\n gamma = get_variable(name+'_gamma', stop_grad, shape=[nr_classes, in_shape[-1]], dtype=tf.float32,\n initializer=tf.constant_initializer(1.), trainable=True)\n beta = get_variable(name+'_beta', stop_grad, shape=[nr_classes, in_shape[-1]], dtype=tf.float32,\n initializer=tf.constant_initializer(0.), trainable=True)\n alpha = get_variable(name+'_alpha', stop_grad, shape=[nr_classes, in_shape[-1]], dtype=tf.float32,\n initializer=tf.constant_initializer(1.), trainable=True)\n mean, variance = tf.nn.moments(x, [1, 2], keepdims=True)\n nx = tf.nn.batch_normalization(x, mean, variance, offset=None, scale=None, variance_epsilon=1e-12, name='x_norm')\n cm, cvar = tf.nn.moments(mean, [-1], keepdims=True)\n adjusted_mean = tf.nn.batch_normalization(mean, cm, cvar, offset=None, scale=None, variance_epsilon=1e-12, name='adjust_norm')\n offset = tf.expand_dims(tf.expand_dims(tf.gather(beta, h, axis=0),1),1)\n scale = tf.expand_dims(tf.expand_dims(tf.gather(gamma, h, axis=0),1),1)\n out = nx + adjusted_mean*tf.expand_dims(tf.expand_dims(tf.gather(alpha, h, axis=0),1),1)\n #out = out*scale + offset\n out = tf.nn.batch_normalization(out, tf.zeros_like(mean), tf.ones_like(variance), offset=offset, scale=scale, variance_epsilon=1e-12, name='instancenorm') \n \n return out\n\n@add_arg_scope\ndef group_norm(x, groups=32, counters={}, **kwargs):\n\n if 'scope' in kwargs.keys():\n name = get_name(kwargs['scope'], counters)\n else:\n name = get_name('group_norm', counters)\n \n in_shape = int_shape(x)\n\n stop_grad = False\n if 'stop_grad' in kwargs.keys():\n stop_grad = kwargs['stop_grad']\n\n channels = in_shape[-1]\n\n if groups > channels:\n raise ValueError('Invalid groups %d for %d channels' % (groups, channels))\n\n new_shape = in_shape[:-1] + [groups, channels//groups]\n x = tf.reshape(x, new_shape)\n\n params_shape = [1, 1, 1, groups, channels//groups]\n\n with tf.variable_scope(name):\n\n gamma = get_variable(name+'_gamma', stop_grad, shape=params_shape, dtype=tf.float32,\n initializer=tf.constant_initializer(1.), trainable=True)\n beta = get_variable(name+'_beta', stop_grad, shape=params_shape, dtype=tf.float32,\n initializer=tf.constant_initializer(0.), trainable=True)\n mean, variance = tf.nn.moments(x, [1,2,4], keepdims=True)\n\n x = tf.nn.batch_normalization(x, mean, variance, offset = beta, scale=gamma, variance_epsilon=1e-12, name='group_norm')\n \n out = tf.reshape(x, in_shape)\n\n return out\n\n@add_arg_scope\ndef instance_norm_plus(x, counters={}, **kwargs):\n \"\"\"\n Adjusted conditional instance normalization arXiv:1907.05600\n\n y is the index of classes\n \"\"\"\n if 'scope' in kwargs.keys():\n name = get_name(kwargs['scope'], counters)\n else:\n name = get_name('instance_norm_plus', counters)\n\n stop_grad = False\n if 'stop_grad' in kwargs.keys():\n stop_grad = kwargs['stop_grad']\n\n in_shape = int_shape(x)\n\n with tf.variable_scope(name):\n gamma = get_variable(name+'_gamma', stop_grad, shape=[in_shape[-1]], dtype=tf.float32,\n initializer=tf.constant_initializer(1.), trainable=True)\n beta = get_variable(name+'_beta', stop_grad, shape=[in_shape[-1]], dtype=tf.float32,\n initializer=tf.constant_initializer(0.), trainable=True)\n alpha = get_variable(name+'_alpha', stop_grad, shape=[in_shape[-1]], dtype=tf.float32,\n initializer=tf.constant_initializer(1.), trainable=True)\n mean, variance = tf.nn.moments(x, [1, 2], keepdims=True)\n nx = tf.nn.batch_normalization(x, mean, variance, offset=None, scale=None, variance_epsilon=1e-12, name='x_norm')\n cm, cvar = tf.nn.moments(mean, [-1], keepdims=True)\n adjusted_mean = tf.nn.batch_normalization(mean, cm, cvar, offset=None, scale=None, variance_epsilon=1e-12, name='adjust_norm')\n offset = tf.expand_dims(tf.expand_dims(tf.expand_dims(beta, 0),0),0)\n scale = tf.expand_dims(tf.expand_dims(tf.expand_dims(gamma, 0),0),0)\n out = nx + adjusted_mean*tf.expand_dims(tf.expand_dims(tf.expand_dims(alpha, 0), 0), 0)\n #out = out*scale + offset\n out = tf.nn.batch_normalization(out, tf.zeros_like(mean), tf.ones_like(variance), offset=offset, scale=scale, variance_epsilon=1e-12, name='instancenorm') \n \n return out\n\n@add_arg_scope\ndef instance_norm(x, counters={}, **kwargs):\n \"\"\"\n Adjusted conditional instance normalization arXiv:1907.05600\n\n y is the index of classes\n \"\"\"\n if 'scope' in kwargs.keys():\n name = get_name(kwargs['scope'], counters)\n else:\n name = get_name('instance_norm_plus', counters)\n\n stop_grad = False\n if 'stop_grad' in kwargs.keys():\n stop_grad = kwargs['stop_grad']\n\n in_shape = int_shape(x)\n\n with tf.variable_scope(name):\n gamma = get_variable(name+'_gamma', stop_grad, shape=[in_shape[-1]], dtype=tf.float32,\n initializer=tf.constant_initializer(1.), trainable=True)\n beta = get_variable(name+'_beta', stop_grad, shape=[in_shape[-1]], dtype=tf.float32,\n initializer=tf.constant_initializer(0.), trainable=True)\n mean, variance = tf.nn.moments(x, [1, 2], keepdims=True)\n offset = tf.expand_dims(tf.expand_dims(tf.expand_dims(beta, 0),0),0)\n scale = tf.expand_dims(tf.expand_dims(tf.expand_dims(gamma, 0),0),0)\n out = tf.nn.batch_normalization(x, mean, variance, offset=offset, scale=scale, variance_epsilon=1e-12, name='instance_norm') \n \n return out\n\n@add_arg_scope\ndef layer_norm(x, counters={}, **kwargs):\n\n if 'scope' in kwargs.keys():\n name = get_name(kwargs['scope'], counters)\n else:\n name = get_name('layer_norm', counters)\n\n stop_grad = False\n if 'stop_grad' in kwargs.keys():\n stop_grad = kwargs['stop_grad']\n\n in_shape = int_shape(x)\n with tf.variable_scope(name):\n gamma = get_variable(name+'_gamma', stop_grad, shape=[in_shape[-1]], dtype=tf.float32,\n initializer=tf.constant_initializer(1.), trainable=True)\n beta = get_variable(name+'_beta', stop_grad, shape=[in_shape[-1]], dtype=tf.float32,\n initializer=tf.constant_initializer(0.), trainable=True)\n mean, variance = tf.nn.moments(x, [1,2,3], keepdims=True)\n out = tf.nn.batch_normalization(x, mean, variance, offset=beta, scale=gamma, variance_epsilon=1e-12, name='layer_norm')\n return out\n" ]
[ [ "tensorflow.compat.v1.sqrt", "tensorflow.compat.v1.reshape", "tensorflow.compat.v1.einsum", "tensorflow.compat.v1.constant_initializer", "tensorflow.compat.v1.zeros_like", "tensorflow.compat.v1.layers.batch_normalization", "tensorflow.compat.v1.variable_scope", "tensorflow.compat.v1.math.cos", "tensorflow.compat.v1.ones_like", "tensorflow.compat.v1.square", "tensorflow.compat.v1.math.sin", "tensorflow.compat.v1.nn.softmax", "tensorflow.compat.v1.nn.batch_normalization", "tensorflow.compat.v1.nn.moments", "tensorflow.compat.v1.nn.conv2d", "tensorflow.compat.v1.disable_eager_execution", "tensorflow.compat.v1.nn.l2_normalize", "tensorflow.compat.v1.random_normal_initializer", "tensorflow.compat.v1.expand_dims", "tensorflow.compat.v1.gather", "tensorflow.compat.v1.matmul", "numpy.prod", "tensorflow.compat.v1.nn.bias_add", "tensorflow.compat.v1.nn.conv2d_transpose" ] ]
SK-tklab/MES
[ "d1422f7d709e185ffc63f1b8dc1c6ddb91da5eba" ]
[ "src/models/RBFFB.py" ]
[ "import numpy as np\n\n\nclass RBFFourierBasis:\n def __init__(self, n_features: int, n_dim: int, rbf_ls: float = 1.,\n rng: np.random.Generator = None):\n if rng is None:\n rng = np.random.default_rng()\n self.__n_features = n_features\n self.__n_dim = n_dim\n self.__rbf_ls = rbf_ls\n self.__rng = rng\n self.__weight = rng.normal(size=(n_dim, n_features)) / rbf_ls\n self.__offset = rng.uniform(low=0, high=2 * np.pi, size=n_features)\n return\n\n @property\n def n_features(self):\n return self.__n_features\n\n @property\n def n_dims(self):\n return self.__n_dim\n\n @property\n def rbf_ls(self):\n return self.__rbf_ls\n\n @property\n def weight(self):\n return self.__weight\n\n @property\n def offset(self):\n return self.__offset\n\n def transform(self, x):\n assert x.ndim == 2 and x.shape[1] == self.n_dims, 'x should be 2 dim'\n rff = np.sqrt(2 / self.n_features) * np.cos(x @ self.weight + self.offset)\n return rff\n" ]
[ [ "numpy.cos", "numpy.sqrt", "numpy.random.default_rng" ] ]
dangz90/Deep-Learning-for-Multi-Modal-Hidden-Emotion-Analysis
[ "66c50292cac9a66eace32a040c49267b06c45de5" ]
[ "Models/CNN+GRU/preprocessing/image.py" ]
[ "\"\"\"Fairly basic set of tools for real-time data augmentation on image data.\nCan easily be extended to include new transformations,\nnew preprocessing methods, etc...\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport numpy as np\nimport re\nimport h5py\nfrom scipy import linalg\nimport scipy.ndimage as ndi\nfrom six.moves import range\nimport os\nimport threading\nimport warnings\nimport multiprocessing.pool\nfrom functools import partial\nfrom pathlib import Path\n\nfrom keras import backend\nfrom keras.utils.data_utils import Sequence\n\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import StandardScaler\n\ntry:\n from PIL import Image as pil_image\nexcept ImportError:\n pil_image = None\n\n\nif pil_image is not None:\n _PIL_INTERPOLATION_METHODS = {\n 'nearest': pil_image.NEAREST,\n 'bilinear': pil_image.BILINEAR,\n 'bicubic': pil_image.BICUBIC,\n }\n # These methods were only introduced in version 3.4.0 (2016).\n if hasattr(pil_image, 'HAMMING'):\n _PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING\n if hasattr(pil_image, 'BOX'):\n _PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX\n # This method is new in version 1.1.3 (2013).\n if hasattr(pil_image, 'LANCZOS'):\n _PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS\n\n\ndef random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,\n fill_mode='nearest', cval=0.):\n \"\"\"Performs a random rotation of a Numpy image tensor.\n\n # Arguments\n x: Input tensor. Must be 3D.\n rg: Rotation range, in degrees.\n row_axis: Index of axis for rows in the input tensor.\n col_axis: Index of axis for columns in the input tensor.\n channel_axis: Index of axis for channels in the input tensor.\n fill_mode: Points outside the boundaries of the input\n are filled according to the given mode\n (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).\n cval: Value used for points outside the boundaries\n of the input if `mode='constant'`.\n\n # Returns\n Rotated Numpy image tensor.\n \"\"\"\n theta = np.pi / 180 * np.random.uniform(-rg, rg)\n rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],\n [np.sin(theta), np.cos(theta), 0],\n [0, 0, 1]])\n\n h, w = x.shape[row_axis], x.shape[col_axis]\n transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)\n x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)\n return x\n\n\ndef random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,\n fill_mode='nearest', cval=0.):\n \"\"\"Performs a random spatial shift of a Numpy image tensor.\n\n # Arguments\n x: Input tensor. Must be 3D.\n wrg: Width shift range, as a float fraction of the width.\n hrg: Height shift range, as a float fraction of the height.\n row_axis: Index of axis for rows in the input tensor.\n col_axis: Index of axis for columns in the input tensor.\n channel_axis: Index of axis for channels in the input tensor.\n fill_mode: Points outside the boundaries of the input\n are filled according to the given mode\n (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).\n cval: Value used for points outside the boundaries\n of the input if `mode='constant'`.\n\n # Returns\n Shifted Numpy image tensor.\n \"\"\"\n h, w = x.shape[row_axis], x.shape[col_axis]\n tx = np.random.uniform(-hrg, hrg) * h\n ty = np.random.uniform(-wrg, wrg) * w\n translation_matrix = np.array([[1, 0, tx],\n [0, 1, ty],\n [0, 0, 1]])\n\n transform_matrix = translation_matrix # no need to do offset\n x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)\n return x\n\n\ndef random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,\n fill_mode='nearest', cval=0.):\n \"\"\"Performs a random spatial shear of a Numpy image tensor.\n\n # Arguments\n x: Input tensor. Must be 3D.\n intensity: Transformation intensity.\n row_axis: Index of axis for rows in the input tensor.\n col_axis: Index of axis for columns in the input tensor.\n channel_axis: Index of axis for channels in the input tensor.\n fill_mode: Points outside the boundaries of the input\n are filled according to the given mode\n (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).\n cval: Value used for points outside the boundaries\n of the input if `mode='constant'`.\n\n # Returns\n Sheared Numpy image tensor.\n \"\"\"\n shear = np.random.uniform(-intensity, intensity)\n shear_matrix = np.array([[1, -np.sin(shear), 0],\n [0, np.cos(shear), 0],\n [0, 0, 1]])\n\n h, w = x.shape[row_axis], x.shape[col_axis]\n transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)\n x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)\n return x\n\n\ndef random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,\n fill_mode='nearest', cval=0.):\n \"\"\"Performs a random spatial zoom of a Numpy image tensor.\n\n # Arguments\n x: Input tensor. Must be 3D.\n zoom_range: Tuple of floats; zoom range for width and height.\n row_axis: Index of axis for rows in the input tensor.\n col_axis: Index of axis for columns in the input tensor.\n channel_axis: Index of axis for channels in the input tensor.\n fill_mode: Points outside the boundaries of the input\n are filled according to the given mode\n (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).\n cval: Value used for points outside the boundaries\n of the input if `mode='constant'`.\n\n # Returns\n Zoomed Numpy image tensor.\n\n # Raises\n ValueError: if `zoom_range` isn't a tuple.\n \"\"\"\n if len(zoom_range) != 2:\n raise ValueError('`zoom_range` should be a tuple or list of two floats. '\n 'Received arg: ', zoom_range)\n\n if zoom_range[0] == 1 and zoom_range[1] == 1:\n zx, zy = 1, 1\n else:\n zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)\n zoom_matrix = np.array([[zx, 0, 0],\n [0, zy, 0],\n [0, 0, 1]])\n\n h, w = x.shape[row_axis], x.shape[col_axis]\n transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)\n x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)\n return x\n\n\ndef random_channel_shift(x, intensity, channel_axis=0):\n x = np.rollaxis(x, channel_axis, 0)\n min_x, max_x = np.min(x), np.max(x)\n channel_images = [np.clip(x_channel + np.random.uniform(-intensity, intensity), min_x, max_x)\n for x_channel in x]\n x = np.stack(channel_images, axis=0)\n x = np.rollaxis(x, 0, channel_axis + 1)\n return x\n\n\ndef transform_matrix_offset_center(matrix, x, y):\n o_x = float(x) / 2 + 0.5\n o_y = float(y) / 2 + 0.5\n offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])\n reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])\n transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)\n return transform_matrix\n\n\ndef apply_transform(x,\n transform_matrix,\n channel_axis=0,\n fill_mode='nearest',\n cval=0.):\n \"\"\"Apply the image transformation specified by a matrix.\n\n # Arguments\n x: 2D numpy array, single image.\n transform_matrix: Numpy array specifying the geometric transformation.\n channel_axis: Index of axis for channels in the input tensor.\n fill_mode: Points outside the boundaries of the input\n are filled according to the given mode\n (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).\n cval: Value used for points outside the boundaries\n of the input if `mode='constant'`.\n\n # Returns\n The transformed version of the input.\n \"\"\"\n x = np.rollaxis(x, channel_axis, 0)\n final_affine_matrix = transform_matrix[:2, :2]\n final_offset = transform_matrix[:2, 2]\n channel_images = [ndi.interpolation.affine_transform(\n x_channel,\n final_affine_matrix,\n final_offset,\n order=0,\n mode=fill_mode,\n cval=cval) for x_channel in x]\n x = np.stack(channel_images, axis=0)\n x = np.rollaxis(x, 0, channel_axis + 1)\n return x\n\n\ndef flip_axis(x, axis):\n x = np.asarray(x).swapaxes(axis, 0)\n x = x[::-1, ...]\n x = x.swapaxes(0, axis)\n return x\n\n\ndef array_to_img(x, data_format=None, scale=True):\n \"\"\"Converts a 3D Numpy array to a PIL Image instance.\n\n # Arguments\n x: Input Numpy array.\n data_format: Image data format.\n scale: Whether to rescale image values\n to be within [0, 255].\n\n # Returns\n A PIL Image instance.\n\n # Raises\n ImportError: if PIL is not available.\n ValueError: if invalid `x` or `data_format` is passed.\n \"\"\"\n if pil_image is None:\n raise ImportError('Could not import PIL.Image. '\n 'The use of `array_to_img` requires PIL.')\n x = np.asarray(x, dtype=backend.floatx())\n if x.ndim != 3:\n raise ValueError('Expected image array to have rank 3 (single image). '\n 'Got array with shape:', x.shape)\n\n if data_format is None:\n data_format = backend.image_data_format()\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError('Invalid data_format:', data_format)\n\n # Original Numpy array x has format (height, width, channel)\n # or (channel, height, width)\n # but target PIL image has format (width, height, channel)\n if data_format == 'channels_first':\n x = x.transpose(1, 2, 0)\n if scale:\n x = x + max(-np.min(x), 0)\n x_max = np.max(x)\n if x_max != 0:\n x /= x_max\n x *= 255\n if x.shape[2] == 3:\n # RGB\n return pil_image.fromarray(x.astype('uint8'), 'RGB')\n elif x.shape[2] == 1:\n # grayscale\n return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')\n else:\n raise ValueError('Unsupported channel number: ', x.shape[2])\n\n\ndef img_to_array(img, data_format=None):\n \"\"\"Converts a PIL Image instance to a Numpy array.\n\n # Arguments\n img: PIL Image instance.\n data_format: Image data format.\n\n # Returns\n A 3D Numpy array.\n\n # Raises\n ValueError: if invalid `img` or `data_format` is passed.\n \"\"\"\n if data_format is None:\n data_format = backend.image_data_format()\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError('Unknown data_format: ', data_format)\n # Numpy array x has format (height, width, channel)\n # or (channel, height, width)\n # but original PIL image has format (width, height, channel)\n x = np.asarray(img, dtype=backend.floatx())\n if len(x.shape) == 3:\n if data_format == 'channels_first':\n x = x.transpose(2, 0, 1)\n elif len(x.shape) == 2:\n if data_format == 'channels_first':\n x = x.reshape((1, x.shape[0], x.shape[1]))\n else:\n x = x.reshape((x.shape[0], x.shape[1], 1))\n else:\n raise ValueError('Unsupported image shape: ', x.shape)\n return x\n\n\ndef load_img(path, grayscale=False, target_size=None,\n interpolation='bilinear'):\n \"\"\"Loads an image into PIL format.\n\n # Arguments\n path: Path to image file\n grayscale: Boolean, whether to load the image as grayscale.\n target_size: Either `None` (default to original size)\n or tuple of ints `(img_height, img_width)`.\n interpolation: Interpolation method used to resample the image if the\n target size is different from that of the loaded image.\n Supported methods are \"nearest\", \"bilinear\", and \"bicubic\".\n If PIL version 1.1.3 or newer is installed, \"lanczos\" is also\n supported. If PIL version 3.4.0 or newer is installed, \"box\" and\n \"hamming\" are also supported. By default, \"bilinear\" is used.\n\n # Returns\n A PIL Image instance.\n\n # Raises\n ImportError: if PIL is not available.\n ValueError: if interpolation method is not supported.\n \"\"\"\n if pil_image is None:\n raise ImportError('Could not import PIL.Image. '\n 'The use of `array_to_img` requires PIL.')\n img = pil_image.open(path)\n if grayscale:\n if img.mode != 'L':\n img = img.convert('L')\n else:\n if img.mode != 'RGB':\n img = img.convert('RGB')\n if target_size is not None:\n width_height_tuple = (target_size[1], target_size[0])\n if img.size != width_height_tuple:\n if interpolation not in _PIL_INTERPOLATION_METHODS:\n raise ValueError(\n 'Invalid interpolation method {} specified. Supported '\n 'methods are {}'.format(\n interpolation,\n \", \".join(_PIL_INTERPOLATION_METHODS.keys())))\n resample = _PIL_INTERPOLATION_METHODS[interpolation]\n img = img.resize(width_height_tuple, resample)\n return img\n\n\ndef list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):\n return [os.path.join(root, f)\n for root, _, files in os.walk(directory) for f in files\n if re.match(r'([\\w]+\\.(?:' + ext + '))', f)]\n\n\nclass ImageDataGenerator(object):\n \"\"\"Generate minibatches of image data with real-time data augmentation.\n\n # Arguments\n featurewise_center: set input mean to 0 over the dataset.\n samplewise_center: set each sample mean to 0.\n featurewise_std_normalization: divide inputs by std of the dataset.\n samplewise_std_normalization: divide each input by its std.\n zca_whitening: apply ZCA whitening.\n zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.\n rotation_range: degrees (0 to 180).\n width_shift_range: fraction of total width.\n height_shift_range: fraction of total height.\n shear_range: shear intensity (shear angle in radians).\n zoom_range: amount of zoom. if scalar z, zoom will be randomly picked\n in the range [1-z, 1+z]. A sequence of two can be passed instead\n to select this range.\n channel_shift_range: shift range for each channel.\n fill_mode: points outside the boundaries are filled according to the\n given mode ('constant', 'nearest', 'reflect' or 'wrap'). Default\n is 'nearest'.\n cval: value used for points outside the boundaries when fill_mode is\n 'constant'. Default is 0.\n horizontal_flip: whether to randomly flip images horizontally.\n vertical_flip: whether to randomly flip images vertically.\n rescale: rescaling factor. If None or 0, no rescaling is applied,\n otherwise we multiply the data by the value provided. This is\n applied after the `preprocessing_function` (if any provided)\n but before any other transformation.\n preprocessing_function: function that will be implied on each input.\n The function will run before any other modification on it.\n The function should take one argument:\n one image (Numpy tensor with rank 3),\n and should output a Numpy tensor with the same shape.\n data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode, the channels dimension\n (the depth) is at index 1, in 'channels_last' mode it is at index 3.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n \"\"\"\n\n def __init__(self,\n featurewise_center=False,\n samplewise_center=False,\n featurewise_std_normalization=False,\n samplewise_std_normalization=False,\n zca_whitening=False,\n zca_epsilon=1e-6,\n rotation_range=0.,\n width_shift_range=0.,\n height_shift_range=0.,\n shear_range=0.,\n zoom_range=0.,\n channel_shift_range=0.,\n fill_mode='nearest',\n cval=0.,\n horizontal_flip=False,\n vertical_flip=False,\n rescale=None,\n preprocessing_function=None,\n data_format=None):\n if data_format is None:\n data_format = backend.image_data_format()\n self.featurewise_center = featurewise_center\n self.samplewise_center = samplewise_center\n self.featurewise_std_normalization = featurewise_std_normalization\n self.samplewise_std_normalization = samplewise_std_normalization\n self.zca_whitening = zca_whitening\n self.zca_epsilon = zca_epsilon\n self.rotation_range = rotation_range\n self.width_shift_range = width_shift_range\n self.height_shift_range = height_shift_range\n self.shear_range = shear_range\n self.zoom_range = zoom_range\n self.channel_shift_range = channel_shift_range\n self.fill_mode = fill_mode\n self.cval = cval\n self.horizontal_flip = horizontal_flip\n self.vertical_flip = vertical_flip\n self.rescale = rescale\n self.preprocessing_function = preprocessing_function\n\n if data_format not in {'channels_last', 'channels_first'}:\n raise ValueError('`data_format` should be `\"channels_last\"` (channel after row and '\n 'column) or `\"channels_first\"` (channel before row and column). '\n 'Received arg: ', data_format)\n self.data_format = data_format\n if data_format == 'channels_first':\n self.channel_axis = 1\n self.row_axis = 2\n self.col_axis = 3\n if data_format == 'channels_last':\n self.channel_axis = 3\n self.row_axis = 1\n self.col_axis = 2\n\n self.mean = None\n self.std = None\n self.principal_components = None\n\n if np.isscalar(zoom_range):\n self.zoom_range = [1 - zoom_range, 1 + zoom_range]\n elif len(zoom_range) == 2:\n self.zoom_range = [zoom_range[0], zoom_range[1]]\n else:\n raise ValueError('`zoom_range` should be a float or '\n 'a tuple or list of two floats. '\n 'Received arg: ', zoom_range)\n\n def flow(self, x, y=None, batch_size=32, shuffle=True, seed=None,\n save_to_dir=None, save_prefix='', save_format='png'):\n return NumpyArrayIterator(\n x, y, self,\n batch_size=batch_size,\n shuffle=shuffle,\n seed=seed,\n data_format=self.data_format,\n save_to_dir=save_to_dir,\n save_prefix=save_prefix,\n save_format=save_format)\n\n def flow_from_directory(self, directory,\n target_size=(256, 256), color_mode='rgb',\n classes=None, class_mode='categorical',\n batch_size=32, shuffle=True, seed=None,\n save_to_dir=None,\n save_prefix='',\n save_format='png',\n follow_links=False,\n h5=False):\n return DirectoryIterator(\n directory, self,\n target_size=target_size, color_mode=color_mode,\n classes=classes, class_mode=class_mode,\n data_format=self.data_format,\n batch_size=batch_size, shuffle=shuffle, seed=seed,\n save_to_dir=save_to_dir,\n save_prefix=save_prefix,\n save_format=save_format,\n follow_links=follow_links,\n h5=h5)\n\n def standardize(self, x):\n \"\"\"Apply the normalization configuration to a batch of inputs.\n\n # Arguments\n x: batch of inputs to be normalized.\n\n # Returns\n The inputs, normalized.\n \"\"\"\n if self.preprocessing_function:\n x = self.preprocessing_function(x)\n if self.rescale:\n x *= self.rescale\n # x is a single image, so it doesn't have image number at index 0\n img_channel_axis = self.channel_axis - 1\n if self.samplewise_center:\n x -= np.mean(x, axis=img_channel_axis, keepdims=True)\n if self.samplewise_std_normalization:\n x /= (np.std(x, axis=img_channel_axis, keepdims=True) + 1e-7)\n\n if self.featurewise_center:\n if self.mean is not None:\n x -= self.mean\n else:\n warnings.warn('This ImageDataGenerator specifies '\n '`featurewise_center`, but it hasn\\'t'\n 'been fit on any training data. Fit it '\n 'first by calling `.fit(numpy_data)`.')\n if self.featurewise_std_normalization:\n if self.std is not None:\n x /= (self.std + 1e-7)\n else:\n warnings.warn('This ImageDataGenerator specifies '\n '`featurewise_std_normalization`, but it hasn\\'t'\n 'been fit on any training data. Fit it '\n 'first by calling `.fit(numpy_data)`.')\n if self.zca_whitening:\n if self.principal_components is not None:\n flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))\n whitex = np.dot(flatx, self.principal_components)\n x = np.reshape(whitex, x.shape)\n else:\n warnings.warn('This ImageDataGenerator specifies '\n '`zca_whitening`, but it hasn\\'t'\n 'been fit on any training data. Fit it '\n 'first by calling `.fit(numpy_data)`.')\n return x\n\n def random_transform(self, x, seed=None):\n \"\"\"Randomly augment a single image tensor.\n\n # Arguments\n x: 3D tensor, single image.\n seed: random seed.\n\n # Returns\n A randomly transformed version of the input (same shape).\n \"\"\"\n # x is a single image, so it doesn't have image number at index 0\n img_row_axis = self.row_axis - 1\n img_col_axis = self.col_axis - 1\n img_channel_axis = self.channel_axis - 1\n\n if seed is not None:\n np.random.seed(seed)\n\n # use composition of homographies\n # to generate final transform that needs to be applied\n if self.rotation_range:\n theta = np.pi / 180 * np.random.uniform(-self.rotation_range, self.rotation_range)\n else:\n theta = 0\n\n if self.height_shift_range:\n tx = np.random.uniform(-self.height_shift_range, self.height_shift_range) * x.shape[img_row_axis]\n else:\n tx = 0\n\n if self.width_shift_range:\n ty = np.random.uniform(-self.width_shift_range, self.width_shift_range) * x.shape[img_col_axis]\n else:\n ty = 0\n\n if self.shear_range:\n shear = np.random.uniform(-self.shear_range, self.shear_range)\n else:\n shear = 0\n\n if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:\n zx, zy = 1, 1\n else:\n zx, zy = np.random.uniform(self.zoom_range[0], self.zoom_range[1], 2)\n\n transform_matrix = None\n if theta != 0:\n rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],\n [np.sin(theta), np.cos(theta), 0],\n [0, 0, 1]])\n transform_matrix = rotation_matrix\n\n if tx != 0 or ty != 0:\n shift_matrix = np.array([[1, 0, tx],\n [0, 1, ty],\n [0, 0, 1]])\n transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)\n\n if shear != 0:\n shear_matrix = np.array([[1, -np.sin(shear), 0],\n [0, np.cos(shear), 0],\n [0, 0, 1]])\n transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)\n\n if zx != 1 or zy != 1:\n zoom_matrix = np.array([[zx, 0, 0],\n [0, zy, 0],\n [0, 0, 1]])\n transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)\n\n if transform_matrix is not None:\n h, w = x.shape[img_row_axis], x.shape[img_col_axis]\n transform_matrix = transform_matrix_offset_center(transform_matrix, h, w)\n x = apply_transform(x, transform_matrix, img_channel_axis,\n fill_mode=self.fill_mode, cval=self.cval)\n\n if self.channel_shift_range != 0:\n x = random_channel_shift(x,\n self.channel_shift_range,\n img_channel_axis)\n if self.horizontal_flip:\n if np.random.random() < 0.5:\n x = flip_axis(x, img_col_axis)\n\n if self.vertical_flip:\n if np.random.random() < 0.5:\n x = flip_axis(x, img_row_axis)\n\n return x\n\n def fit(self, x,\n augment=False,\n rounds=1,\n seed=None):\n \"\"\"Fits internal statistics to some sample data.\n\n Required for featurewise_center, featurewise_std_normalization\n and zca_whitening.\n\n # Arguments\n x: Numpy array, the data to fit on. Should have rank 4.\n In case of grayscale data,\n the channels axis should have value 1, and in case\n of RGB data, it should have value 3.\n augment: Whether to fit on randomly augmented samples\n rounds: If `augment`,\n how many augmentation passes to do over the data\n seed: random seed.\n\n # Raises\n ValueError: in case of invalid input `x`.\n \"\"\"\n x = np.asarray(x, dtype=backend.floatx())\n if x.ndim != 4:\n raise ValueError('Input to `.fit()` should have rank 4. '\n 'Got array with shape: ' + str(x.shape))\n if x.shape[self.channel_axis] not in {1, 3, 4}:\n warnings.warn(\n 'Expected input to be images (as Numpy array) '\n 'following the data format convention \"' + self.data_format + '\" '\n '(channels on axis ' + str(self.channel_axis) + '), i.e. expected '\n 'either 1, 3 or 4 channels on axis ' + str(self.channel_axis) + '. '\n 'However, it was passed an array with shape ' + str(x.shape) +\n ' (' + str(x.shape[self.channel_axis]) + ' channels).')\n\n if seed is not None:\n np.random.seed(seed)\n\n x = np.copy(x)\n if augment:\n ax = np.zeros(tuple([rounds * x.shape[0]] + list(x.shape)[1:]), dtype=backend.floatx())\n for r in range(rounds):\n for i in range(x.shape[0]):\n ax[i + r * x.shape[0]] = self.random_transform(x[i])\n x = ax\n\n if self.featurewise_center:\n self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))\n broadcast_shape = [1, 1, 1]\n broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]\n self.mean = np.reshape(self.mean, broadcast_shape)\n x -= self.mean\n\n if self.featurewise_std_normalization:\n self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))\n broadcast_shape = [1, 1, 1]\n broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]\n self.std = np.reshape(self.std, broadcast_shape)\n x /= (self.std + backend.epsilon())\n\n if self.zca_whitening:\n flat_x = np.reshape(x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))\n sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]\n u, s, _ = linalg.svd(sigma)\n self.principal_components = np.dot(np.dot(u, np.diag(1. / np.sqrt(s + self.zca_epsilon))), u.T)\n\n\nclass Iterator(Sequence):\n \"\"\"Base class for image data iterators.\n\n Every `Iterator` must implement the `_get_batches_of_transformed_samples`\n method.\n\n # Arguments\n n: Integer, total number of samples in the dataset to loop over.\n batch_size: Integer, size of a batch.\n shuffle: Boolean, whether to shuffle the data between epochs.\n seed: Random seeding for data shuffling.\n \"\"\"\n\n def __init__(self, n, batch_size, shuffle, seed):\n self.n = n\n self.batch_size = batch_size\n self.seed = seed\n self.shuffle = shuffle\n self.batch_index = 0\n self.total_batches_seen = 0\n self.lock = threading.Lock()\n self.index_array = None\n self.index_generator = self._flow_index()\n\n def _set_index_array(self):\n self.index_array = np.arange(self.n)\n if self.shuffle:\n self.index_array = np.random.permutation(self.n)\n\n def __getitem__(self, idx):\n if idx >= len(self):\n raise ValueError('Asked to retrieve element {idx}, '\n 'but the Sequence '\n 'has length {length}'.format(idx=idx,\n length=len(self)))\n if self.seed is not None:\n np.random.seed(self.seed + self.total_batches_seen)\n self.total_batches_seen += 1\n if self.index_array is None:\n self._set_index_array()\n index_array = self.index_array[self.batch_size * idx:\n self.batch_size * (idx + 1)]\n return self._get_batches_of_transformed_samples(index_array)\n\n def __len__(self):\n return int(np.ceil(self.n / float(self.batch_size)))\n\n def on_epoch_end(self):\n self._set_index_array()\n\n def reset(self):\n self.batch_index = 0\n\n def _flow_index(self):\n # Ensure self.batch_index is 0.\n self.reset()\n while 1:\n if self.seed is not None:\n np.random.seed(self.seed + self.total_batches_seen)\n if self.batch_index == 0:\n self._set_index_array()\n\n current_index = (self.batch_index * self.batch_size) % self.n\n if self.n > current_index + self.batch_size:\n self.batch_index += 1\n else:\n self.batch_index = 0\n self.total_batches_seen += 1\n yield self.index_array[current_index:\n current_index + self.batch_size]\n\n def __iter__(self):\n # Needed if we want to do something like:\n # for x, y in data_gen.flow(...):\n return self\n\n def __next__(self, *args, **kwargs):\n return self.next(*args, **kwargs)\n\n def _get_batches_of_transformed_samples(self, index_array):\n \"\"\"Gets a batch of transformed samples.\n\n # Arguments\n index_array: array of sample indices to include in batch.\n\n # Returns\n A batch of transformed samples.\n \"\"\"\n raise NotImplementedError\n\n\nclass NumpyArrayIterator(Iterator):\n \"\"\"Iterator yielding data from a Numpy array.\n\n # Arguments\n x: Numpy array of input data.\n y: Numpy array of targets data.\n image_data_generator: Instance of `ImageDataGenerator`\n to use for random transformations and normalization.\n batch_size: Integer, size of a batch.\n shuffle: Boolean, whether to shuffle the data between epochs.\n seed: Random seed for data shuffling.\n data_format: String, one of `channels_first`, `channels_last`.\n save_to_dir: Optional directory where to save the pictures\n being yielded, in a viewable format. This is useful\n for visualizing the random transformations being\n applied, for debugging purposes.\n save_prefix: String prefix to use for saving sample\n images (if `save_to_dir` is set).\n save_format: Format to use for saving sample images\n (if `save_to_dir` is set).\n \"\"\"\n\n def __init__(self, x, y, image_data_generator,\n batch_size=32, shuffle=False, seed=None,\n data_format=None,\n save_to_dir=None, save_prefix='', save_format='png'):\n if y is not None and len(x) != len(y):\n raise ValueError('X (images tensor) and y (labels) '\n 'should have the same length. '\n 'Found: X.shape = %s, y.shape = %s' %\n (np.asarray(x).shape, np.asarray(y).shape))\n\n if data_format is None:\n data_format = backend.image_data_format()\n self.x = np.asarray(x, dtype=backend.floatx())\n\n if self.x.ndim != 4:\n raise ValueError('Input data in `NumpyArrayIterator` '\n 'should have rank 4. You passed an array '\n 'with shape', self.x.shape)\n channels_axis = 3 if data_format == 'channels_last' else 1\n if self.x.shape[channels_axis] not in {1, 3, 4}:\n warnings.warn('NumpyArrayIterator is set to use the '\n 'data format convention \"' + data_format + '\" '\n '(channels on axis ' + str(channels_axis) + '), i.e. expected '\n 'either 1, 3 or 4 channels on axis ' + str(channels_axis) + '. '\n 'However, it was passed an array with shape ' + str(self.x.shape) +\n ' (' + str(self.x.shape[channels_axis]) + ' channels).')\n if y is not None:\n self.y = np.asarray(y)\n else:\n self.y = None\n self.image_data_generator = image_data_generator\n self.data_format = data_format\n self.save_to_dir = save_to_dir\n self.save_prefix = save_prefix\n self.save_format = save_format\n super(NumpyArrayIterator, self).__init__(x.shape[0], batch_size, shuffle, seed)\n\n def _get_batches_of_transformed_samples(self, index_array):\n batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]),\n dtype=backend.floatx())\n for i, j in enumerate(index_array):\n x = self.x[j]\n x = self.image_data_generator.random_transform(x.astype(backend.floatx()))\n x = self.image_data_generator.standardize(x)\n batch_x[i] = x\n if self.save_to_dir:\n for i, j in enumerate(index_array):\n img = array_to_img(batch_x[i], self.data_format, scale=True)\n fname = '{prefix}_{index}_{hash}.{format}'.format(prefix=self.save_prefix,\n index=j,\n hash=np.random.randint(1e4),\n format=self.save_format)\n img.save(os.path.join(self.save_to_dir, fname))\n if self.y is None:\n return batch_x\n batch_y = self.y[index_array]\n return batch_x, batch_y\n\n def next(self):\n \"\"\"For python 2.x.\n\n # Returns\n The next batch.\n \"\"\"\n # Keeps under lock only the mechanism which advances\n # the indexing of each batch.\n with self.lock:\n index_array = next(self.index_generator)\n # The transformation of images is not under thread lock\n # so it can be done in parallel\n return self._get_batches_of_transformed_samples(index_array)\n\n\ndef _count_valid_files_in_directory(directory, white_list_formats, follow_links):\n \"\"\"Count files with extension in `white_list_formats` contained in a directory.\n\n # Arguments\n directory: absolute path to the directory containing files to be counted\n white_list_formats: set of strings containing allowed extensions for\n the files to be counted.\n\n # Returns\n the count of files with extension in `white_list_formats` contained in\n the directory.\n \"\"\"\n def _recursive_list(subpath):\n return sorted(os.walk(subpath, followlinks=follow_links), key=lambda tpl: tpl[0])\n\n samples = 0\n for root, _, files in _recursive_list(directory):\n for fname in files:\n is_valid = False\n for extension in white_list_formats:\n if fname.lower().endswith('.' + extension):\n is_valid = True\n break\n if is_valid:\n samples += 1\n return samples\n\n\ndef _list_valid_filenames_in_directory(directory, white_list_formats,\n class_indices, follow_links):\n \"\"\"List paths of files in `subdir` relative from `directory` whose extensions are in `white_list_formats`.\n\n # Arguments\n directory: absolute path to a directory containing the files to list.\n The directory name is used as class label and must be a key of `class_indices`.\n white_list_formats: set of strings containing allowed extensions for\n the files to be counted.\n class_indices: dictionary mapping a class name to its index.\n\n # Returns\n classes: a list of class indices\n filenames: the path of valid files in `directory`, relative from\n `directory`'s parent (e.g., if `directory` is \"dataset/class1\",\n the filenames will be [\"class1/file1.jpg\", \"class1/file2.jpg\", ...]).\n \"\"\"\n def _recursive_list(subpath):\n return sorted(os.walk(subpath, followlinks=follow_links), key=lambda tpl: tpl[0])\n\n classes = []\n filenames = []\n subdir = os.path.basename(directory)\n basedir = os.path.dirname(directory)\n for root, _, files in _recursive_list(directory):\n for fname in sorted(files):\n is_valid = False\n for extension in white_list_formats:\n if fname.lower().endswith('.' + extension):\n is_valid = True\n break\n if is_valid:\n classes.append(class_indices[subdir])\n # add filename relative to directory\n absolute_path = os.path.join(root, fname)\n filenames.append(os.path.relpath(absolute_path, basedir))\n return classes, filenames\n\n\nclass DirectoryIterator(Iterator):\n \"\"\"Iterator capable of reading images from a directory on disbackend.\n\n # Arguments\n directory: Path to the directory to read images from.\n Each subdirectory in this directory will be\n considered to contain images from one class,\n or alternatively you could specify class subdirectories\n via the `classes` argument.\n image_data_generator: Instance of `ImageDataGenerator`\n to use for random transformations and normalization.\n target_size: tuple of integers, dimensions to resize input images to.\n color_mode: One of `\"rgb\"`, `\"grayscale\"`. Color mode to read images.\n classes: Optional list of strings, names of subdirectories\n containing images from each class (e.g. `[\"dogs\", \"cats\"]`).\n It will be computed automatically if not set.\n class_mode: Mode for yielding the targets:\n `\"binary\"`: binary targets (if there are only two classes),\n `\"categorical\"`: categorical targets,\n `\"sparse\"`: integer targets,\n `\"input\"`: targets are images identical to input images (mainly\n used to work with autoencoders),\n `None`: no targets get yielded (only input images are yielded).\n batch_size: Integer, size of a batch.\n shuffle: Boolean, whether to shuffle the data between epochs.\n seed: Random seed for data shuffling.\n data_format: String, one of `channels_first`, `channels_last`.\n save_to_dir: Optional directory where to save the pictures\n being yielded, in a viewable format. This is useful\n for visualizing the random transformations being\n applied, for debugging purposes.\n save_prefix: String prefix to use for saving sample\n images (if `save_to_dir` is set).\n save_format: Format to use for saving sample images\n (if `save_to_dir` is set).\n \"\"\"\n\n def __init__(self, directory, image_data_generator,\n target_size=(256, 256), color_mode='rgb',\n classes=None, class_mode='categorical',\n batch_size=32, shuffle=True, seed=None,\n data_format=None,\n save_to_dir=None, save_prefix='', save_format='png',\n follow_links=False, h5=False):\n if data_format is None:\n data_format = backend.image_data_format()\n self.directory = directory\n self.image_data_generator = image_data_generator\n self.target_size = tuple(target_size)\n if color_mode not in {'rgb', 'grayscale'}:\n raise ValueError('Invalid color mode:', color_mode,\n '; expected \"rgb\" or \"grayscale\".')\n self.color_mode = color_mode\n self.data_format = data_format\n if self.color_mode == 'rgb':\n if self.data_format == 'channels_last':\n self.image_shape = self.target_size + (3,)\n else:\n self.image_shape = (3,) + self.target_size\n else:\n if self.data_format == 'channels_last':\n self.image_shape = self.target_size + (1,)\n else:\n self.image_shape = (1,) + self.target_size\n self.classes = classes\n if class_mode not in {'categorical', 'binary', 'sparse',\n 'input', None}:\n raise ValueError('Invalid class_mode:', class_mode,\n '; expected one of \"categorical\", '\n '\"binary\", \"sparse\", \"input\"'\n ' or None.')\n self.class_mode = class_mode\n self.save_to_dir = save_to_dir\n self.save_prefix = save_prefix\n self.save_format = save_format\n self.h5 = h5\n\n white_list_formats = {'png', 'jpg', 'jpeg', 'bmp', 'ppm', 'npy'}\n\n # first, count the number of samples and classes\n self.samples = 0\n\n if not classes:\n classes = []\n for subdir in sorted(os.listdir(directory)):\n if os.path.isdir(os.path.join(directory, subdir)):\n classes.append(subdir)\n self.num_classes = len(classes)\n self.class_indices = dict(zip(classes, range(len(classes))))\n\n pool = multiprocessing.pool.ThreadPool()\n function_partial = partial(_count_valid_files_in_directory,\n white_list_formats=white_list_formats,\n follow_links=follow_links)\n self.samples = sum(pool.map(function_partial,\n (os.path.join(directory, subdir)\n for subdir in classes)))\n\n print('Found %d images belonging to %d classes.' % (self.samples, self.num_classes))\n\n # second, build an index of the images in the different class subfolders\n results = []\n\n self.filenames = []\n self.classes = np.zeros((self.samples,), dtype='int32')\n i = 0\n for dirpath in (os.path.join(directory, subdir) for subdir in classes):\n results.append(pool.apply_async(_list_valid_filenames_in_directory,\n (dirpath, white_list_formats,\n self.class_indices, follow_links)))\n for res in results:\n classes, filenames = res.get()\n self.classes[i:i + len(classes)] = classes\n self.filenames += filenames\n i += len(classes)\n pool.close()\n pool.join()\n super(DirectoryIterator, self).__init__(self.samples, batch_size, shuffle, seed)\n\n def _get_batches_of_transformed_samples(self, index_array):\n\n def applyPCA(X):\n pca = PCA(n_components=100)\n # x_std = StandardScaler().fit_transform(X)\n pca.fit(X)\n\n eigenvalues = pca.explained_variance_\n\n def file_to_array(image_file, fclass):\n new_directory = self.directory.replace('5frames','feature_vector')\n\n file_path = os.path.join(new_directory, 'NoSymmetry', fclass, image_file)\n\n if file_path.split('/')[5] == 'training':\n file_path = file_path.replace('.MP4', '_').replace('.jpg.jpg', '.h5')\n else:\n # file_path = file_path.replace('.jpg.jpg', '.jpg')\n check = Path(file_path.replace('.mp4', '_').replace('.jpg.jpg', '.h5'))\n\n if check.exists():\n file_path = file_path.replace('.mp4', '_').replace('.jpg.jpg', '.h5')\n else:\n file_path = file_path.replace('.jpg.jpg', '.h5')\n \n f = h5py.File(file_path, 'r') \n d = np.array(f.get('data'))\n d = d.reshape((4096,))\n\n return d\n\n # batch_x = np.zeros((len(index_array),) + self.image_shape, dtype=backend.floatx())\n grayscale = self.color_mode == 'grayscale'\n if self.h5 == True:\n batch_x = np.zeros((len(index_array),) + (5, 4096), dtype=backend.floatx())\n\n\n # build batch of h5 files\n for i, j in enumerate(index_array):\n fname = self.filenames[j]\n\n # Read h5 files\n f = np.load(os.path.join(self.directory, fname)) \n file_list = [fi for fi in f]\n\n x = np.array([np.array(file_to_array(file, fname.split('/')[0])) for file in file_list])\n\n applyPCA(x)\n\n # Normalize\n batch_x[i] = x\n\n # optionally save augmented images to disk for debugging purposes\n if self.save_to_dir:\n for i, j in enumerate(index_array):\n img = array_to_img(batch_x[i], self.data_format, scale=True)\n fname = '{prefix}_{index}_{hash}.{format}'.format(prefix=self.save_prefix,\n index=j,\n hash=np.random.randint(1e4),\n format=self.save_format)\n img.save(os.path.join(self.save_to_dir, fname))\n # build batch of labels\n if self.class_mode == 'input':\n batch_y = batch_x.copy()\n elif self.class_mode == 'sparse':\n batch_y = self.classes[index_array]\n elif self.class_mode == 'binary':\n batch_y = self.classes[index_array].astype(backend.floatx())\n elif self.class_mode == 'categorical':\n batch_y = np.zeros((len(batch_x), self.num_classes), dtype=backend.floatx())\n for i, label in enumerate(self.classes[index_array]):\n batch_y[i, label] = 1.\n else:\n return batch_x\n return batch_x, batch_y\n\n def next(self):\n \"\"\"For python 2.x.\n\n # Returns\n The next batch.\n \"\"\"\n with self.lock:\n index_array = next(self.index_generator)\n # The transformation of images is not under thread lock\n # so it can be done in parallel\n return self._get_batches_of_transformed_samples(index_array)\n" ]
[ [ "numpy.rollaxis", "numpy.dot", "scipy.linalg.svd", "numpy.sqrt", "numpy.asarray", "numpy.max", "numpy.mean", "numpy.random.randint", "numpy.reshape", "numpy.arange", "numpy.stack", "numpy.sin", "numpy.copy", "numpy.std", "numpy.zeros", "scipy.ndimage.interpolation.affine_transform", "numpy.min", "numpy.array", "sklearn.decomposition.PCA", "numpy.random.random", "numpy.random.seed", "numpy.cos", "numpy.random.permutation", "numpy.isscalar", "numpy.prod", "numpy.random.uniform" ] ]
hyeonjames/ray
[ "4ab80eafb9d79ffa2f1ba149fc48d1fdd037c14a" ]
[ "python/ray/tests/test_reference_counting.py" ]
[ "# coding: utf-8\nimport asyncio\nimport copy\nimport json\nimport logging\nimport gc\nimport time\nimport weakref\n\nimport numpy as np\n\nimport pytest\n\nimport ray\nimport ray.cluster_utils\nfrom ray.test_utils import SignalActor, put_object, wait_for_condition\nfrom ray.internal.internal_api import global_gc\n\nlogger = logging.getLogger(__name__)\n\n\n@pytest.fixture\ndef one_worker_100MiB(request):\n config = json.dumps({\n \"distributed_ref_counting_enabled\": 1,\n \"object_store_full_max_retries\": 1,\n })\n yield ray.init(\n num_cpus=1,\n object_store_memory=100 * 1024 * 1024,\n _internal_config=config)\n ray.shutdown()\n\n\ndef _fill_object_store_and_get(oid, succeed=True, object_MiB=40,\n num_objects=5):\n for _ in range(num_objects):\n ray.put(np.zeros(object_MiB * 1024 * 1024, dtype=np.uint8))\n\n if type(oid) is bytes:\n oid = ray.ObjectID(oid)\n\n if succeed:\n ray.get(oid)\n else:\n if oid.is_direct_call_type():\n with pytest.raises(ray.exceptions.RayTimeoutError):\n ray.get(oid, timeout=0.1)\n else:\n with pytest.raises(ray.exceptions.UnreconstructableError):\n ray.get(oid)\n\n\ndef _check_refcounts(expected):\n actual = ray.worker.global_worker.core_worker.get_all_reference_counts()\n assert len(expected) == len(actual)\n for object_id, (local, submitted) in expected.items():\n hex_id = object_id.hex().encode(\"ascii\")\n assert hex_id in actual\n assert local == actual[hex_id][\"local\"]\n assert submitted == actual[hex_id][\"submitted\"]\n\n\ndef check_refcounts(expected, timeout=10):\n start = time.time()\n while True:\n try:\n _check_refcounts(expected)\n break\n except AssertionError as e:\n if time.time() - start > timeout:\n raise e\n else:\n time.sleep(0.1)\n\n\ndef test_global_gc(shutdown_only):\n cluster = ray.cluster_utils.Cluster()\n for _ in range(2):\n cluster.add_node(num_cpus=1, num_gpus=0)\n ray.init(address=cluster.address)\n\n class ObjectWithCyclicRef:\n def __init__(self):\n self.loop = self\n\n @ray.remote(num_cpus=1)\n class GarbageHolder:\n def __init__(self):\n gc.disable()\n x = ObjectWithCyclicRef()\n self.garbage = weakref.ref(x)\n\n def has_garbage(self):\n return self.garbage() is not None\n\n try:\n gc.disable()\n\n # Local driver.\n local_ref = weakref.ref(ObjectWithCyclicRef())\n\n # Remote workers.\n actors = [GarbageHolder.remote() for _ in range(2)]\n assert local_ref() is not None\n assert all(ray.get([a.has_garbage.remote() for a in actors]))\n\n # GC should be triggered for all workers, including the local driver.\n global_gc()\n\n def check_refs_gced():\n return (local_ref() is None and\n not any(ray.get([a.has_garbage.remote() for a in actors])))\n\n wait_for_condition(check_refs_gced, timeout_ms=10000)\n finally:\n gc.enable()\n\n\ndef test_global_gc_when_full(shutdown_only):\n cluster = ray.cluster_utils.Cluster()\n for _ in range(2):\n cluster.add_node(\n num_cpus=1, num_gpus=0, object_store_memory=100 * 1024 * 1024)\n ray.init(address=cluster.address)\n\n class LargeObjectWithCyclicRef:\n def __init__(self):\n self.loop = self\n self.large_object = ray.put(\n np.zeros(40 * 1024 * 1024, dtype=np.uint8))\n\n @ray.remote(num_cpus=1)\n class GarbageHolder:\n def __init__(self):\n gc.disable()\n x = LargeObjectWithCyclicRef()\n self.garbage = weakref.ref(x)\n\n def has_garbage(self):\n return self.garbage() is not None\n\n def return_large_array(self):\n return np.zeros(80 * 1024 * 1024, dtype=np.uint8)\n\n try:\n gc.disable()\n\n # Local driver.\n local_ref = weakref.ref(LargeObjectWithCyclicRef())\n\n # Remote workers.\n actors = [GarbageHolder.remote() for _ in range(2)]\n assert local_ref() is not None\n assert all(ray.get([a.has_garbage.remote() for a in actors]))\n\n # GC should be triggered for all workers, including the local driver,\n # when the driver tries to ray.put a value that doesn't fit in the\n # object store. This should cause the captured ObjectIDs' numpy arrays\n # to be evicted.\n ray.put(np.zeros(80 * 1024 * 1024, dtype=np.uint8))\n\n def check_refs_gced():\n return (local_ref() is None and\n not any(ray.get([a.has_garbage.remote() for a in actors])))\n\n wait_for_condition(check_refs_gced, timeout_ms=10000)\n\n # Local driver.\n local_ref = weakref.ref(LargeObjectWithCyclicRef())\n\n # Remote workers.\n actors = [GarbageHolder.remote() for _ in range(2)]\n\n def check_refs_gced():\n return (local_ref() is None and\n not any(ray.get([a.has_garbage.remote() for a in actors])))\n\n wait_for_condition(check_refs_gced, timeout_ms=10000)\n\n # GC should be triggered for all workers, including the local driver,\n # when a remote task tries to put a return value that doesn't fit in\n # the object store. This should cause the captured ObjectIDs' numpy\n # arrays to be evicted.\n ray.get(actors[0].return_large_array.remote())\n assert local_ref() is None\n assert not any(ray.get([a.has_garbage.remote() for a in actors]))\n finally:\n gc.enable()\n\n\ndef test_local_refcounts(ray_start_regular):\n oid1 = ray.put(None)\n check_refcounts({oid1: (1, 0)})\n oid1_copy = copy.copy(oid1)\n check_refcounts({oid1: (2, 0)})\n del oid1\n check_refcounts({oid1_copy: (1, 0)})\n del oid1_copy\n check_refcounts({})\n\n\ndef test_dependency_refcounts(ray_start_regular):\n @ray.remote\n def one_dep(dep, signal=None, fail=False):\n if signal is not None:\n ray.get(signal.wait.remote())\n if fail:\n raise Exception(\"failed on purpose\")\n\n @ray.remote\n def one_dep_large(dep, signal=None):\n if signal is not None:\n ray.get(signal.wait.remote())\n # This will be spilled to plasma.\n return np.zeros(10 * 1024 * 1024, dtype=np.uint8)\n\n # Test that regular plasma dependency refcounts are decremented once the\n # task finishes.\n signal = SignalActor.remote()\n large_dep = ray.put(np.zeros(10 * 1024 * 1024, dtype=np.uint8))\n result = one_dep.remote(large_dep, signal=signal)\n check_refcounts({large_dep: (1, 1), result: (1, 0)})\n ray.get(signal.send.remote())\n # Reference count should be removed once the task finishes.\n check_refcounts({large_dep: (1, 0), result: (1, 0)})\n del large_dep, result\n check_refcounts({})\n\n # Test that inlined dependency refcounts are decremented once they are\n # inlined.\n signal = SignalActor.remote()\n dep = one_dep.remote(None, signal=signal)\n check_refcounts({dep: (1, 0)})\n result = one_dep.remote(dep)\n check_refcounts({dep: (1, 1), result: (1, 0)})\n ray.get(signal.send.remote())\n # Reference count should be removed as soon as the dependency is inlined.\n check_refcounts({dep: (1, 0), result: (1, 0)})\n del dep, result\n check_refcounts({})\n\n # Test that spilled plasma dependency refcounts are decremented once\n # the task finishes.\n signal1, signal2 = SignalActor.remote(), SignalActor.remote()\n dep = one_dep_large.remote(None, signal=signal1)\n check_refcounts({dep: (1, 0)})\n result = one_dep.remote(dep, signal=signal2)\n check_refcounts({dep: (1, 1), result: (1, 0)})\n ray.get(signal1.send.remote())\n ray.get(dep, timeout=10)\n # Reference count should remain because the dependency is in plasma.\n check_refcounts({dep: (1, 1), result: (1, 0)})\n ray.get(signal2.send.remote())\n # Reference count should be removed because the task finished.\n check_refcounts({dep: (1, 0), result: (1, 0)})\n del dep, result\n check_refcounts({})\n\n # Test that regular plasma dependency refcounts are decremented if a task\n # fails.\n signal = SignalActor.remote()\n large_dep = ray.put(np.zeros(10 * 1024 * 1024, dtype=np.uint8))\n result = one_dep.remote(large_dep, signal=signal, fail=True)\n check_refcounts({large_dep: (1, 1), result: (1, 0)})\n ray.get(signal.send.remote())\n # Reference count should be removed once the task finishes.\n check_refcounts({large_dep: (1, 0), result: (1, 0)})\n del large_dep, result\n check_refcounts({})\n\n # Test that spilled plasma dependency refcounts are decremented if a task\n # fails.\n signal1, signal2 = SignalActor.remote(), SignalActor.remote()\n dep = one_dep_large.remote(None, signal=signal1)\n check_refcounts({dep: (1, 0)})\n result = one_dep.remote(dep, signal=signal2, fail=True)\n check_refcounts({dep: (1, 1), result: (1, 0)})\n ray.get(signal1.send.remote())\n ray.get(dep, timeout=10)\n # Reference count should remain because the dependency is in plasma.\n check_refcounts({dep: (1, 1), result: (1, 0)})\n ray.get(signal2.send.remote())\n # Reference count should be removed because the task finished.\n check_refcounts({dep: (1, 0), result: (1, 0)})\n del dep, result\n check_refcounts({})\n\n\ndef test_basic_pinning(one_worker_100MiB):\n @ray.remote\n def f(array):\n return np.sum(array)\n\n @ray.remote\n class Actor(object):\n def __init__(self):\n # Hold a long-lived reference to a ray.put object's ID. The object\n # should not be garbage collected while the actor is alive because\n # the object is pinned by the raylet.\n self.large_object = ray.put(\n np.zeros(25 * 1024 * 1024, dtype=np.uint8))\n\n def get_large_object(self):\n return ray.get(self.large_object)\n\n actor = Actor.remote()\n\n # Fill up the object store with short-lived objects. These should be\n # evicted before the long-lived object whose reference is held by\n # the actor.\n for batch in range(10):\n intermediate_result = f.remote(\n np.zeros(10 * 1024 * 1024, dtype=np.uint8))\n ray.get(intermediate_result)\n\n # The ray.get below would fail with only LRU eviction, as the object\n # that was ray.put by the actor would have been evicted.\n ray.get(actor.get_large_object.remote())\n\n\ndef test_pending_task_dependency_pinning(one_worker_100MiB):\n @ray.remote\n def pending(input1, input2):\n return\n\n # The object that is ray.put here will go out of scope immediately, so if\n # pending task dependencies aren't considered, it will be evicted before\n # the ray.get below due to the subsequent ray.puts that fill up the object\n # store.\n np_array = np.zeros(40 * 1024 * 1024, dtype=np.uint8)\n signal = SignalActor.remote()\n oid = pending.remote(np_array, signal.wait.remote())\n\n for _ in range(2):\n ray.put(np.zeros(40 * 1024 * 1024, dtype=np.uint8))\n\n ray.get(signal.send.remote())\n ray.get(oid)\n\n\ndef test_feature_flag(shutdown_only):\n ray.init(\n object_store_memory=100 * 1024 * 1024,\n _internal_config=json.dumps({\n \"object_pinning_enabled\": 0\n }))\n\n @ray.remote\n def f(array):\n return np.sum(array)\n\n @ray.remote\n class Actor(object):\n def __init__(self):\n self.large_object = ray.put(\n np.zeros(25 * 1024 * 1024, dtype=np.uint8))\n\n def wait_for_actor_to_start(self):\n pass\n\n def get_large_object(self):\n return ray.get(self.large_object)\n\n actor = Actor.remote()\n ray.get(actor.wait_for_actor_to_start.remote())\n\n # The ray.get below fails with only LRU eviction, as the object\n # that was ray.put by the actor should have been evicted.\n _fill_object_store_and_get(actor.get_large_object.remote(), succeed=False)\n\n\n# Remote function takes serialized reference and doesn't hold onto it after\n# finishing. Referenced object shouldn't be evicted while the task is pending\n# and should be evicted after it returns.\n@pytest.mark.parametrize(\"use_ray_put\", [False, True])\ndef test_basic_serialized_reference(one_worker_100MiB, use_ray_put):\n @ray.remote\n def pending(ref, dep):\n ray.get(ref[0])\n\n array_oid = put_object(\n np.zeros(40 * 1024 * 1024, dtype=np.uint8), use_ray_put)\n signal = SignalActor.remote()\n oid = pending.remote([array_oid], signal.wait.remote())\n\n # Remove the local reference.\n array_oid_bytes = array_oid.binary()\n del array_oid\n\n # Check that the remote reference pins the object.\n _fill_object_store_and_get(array_oid_bytes)\n\n # Fulfill the dependency, causing the task to finish.\n ray.get(signal.send.remote())\n ray.get(oid)\n\n # Reference should be gone, check that array gets evicted.\n _fill_object_store_and_get(array_oid_bytes, succeed=False)\n\n\n# Call a recursive chain of tasks that pass a serialized reference to the end\n# of the chain. The reference should still exist while the final task in the\n# chain is running and should be removed once it finishes.\n@pytest.mark.parametrize(\"use_ray_put\", [False, True])\ndef test_recursive_serialized_reference(one_worker_100MiB, use_ray_put):\n @ray.remote(num_cpus=0)\n class Signal:\n def __init__(self):\n self.ready_event = asyncio.Event()\n\n def send(self):\n self.ready_event.set()\n\n async def wait(self):\n await self.ready_event.wait()\n\n @ray.remote\n def recursive(ref, signal, max_depth, depth=0):\n ray.get(ref[0])\n if depth == max_depth:\n return ray.get(signal.wait.remote())\n else:\n return recursive.remote(ref, signal, max_depth, depth + 1)\n\n signal = SignalActor.remote()\n\n max_depth = 5\n array_oid = put_object(\n np.zeros(40 * 1024 * 1024, dtype=np.uint8), use_ray_put)\n head_oid = recursive.remote([array_oid], signal, max_depth)\n\n # Remove the local reference.\n array_oid_bytes = array_oid.binary()\n del array_oid\n\n tail_oid = head_oid\n for _ in range(max_depth):\n tail_oid = ray.get(tail_oid)\n\n # Check that the remote reference pins the object.\n _fill_object_store_and_get(array_oid_bytes)\n\n # Fulfill the dependency, causing the tail task to finish.\n ray.get(signal.send.remote())\n assert ray.get(tail_oid) is None\n\n # Reference should be gone, check that array gets evicted.\n _fill_object_store_and_get(array_oid_bytes, succeed=False)\n\n\n# Test that a passed reference held by an actor after the method finishes\n# is kept until the reference is removed from the actor. Also tests giving\n# the actor a duplicate reference to the same object ID.\n@pytest.mark.parametrize(\"use_ray_put\", [False, True])\ndef test_actor_holding_serialized_reference(one_worker_100MiB, use_ray_put):\n @ray.remote\n class GreedyActor(object):\n def __init__(self):\n pass\n\n def set_ref1(self, ref):\n self.ref1 = ref\n\n def add_ref2(self, new_ref):\n self.ref2 = new_ref\n\n def delete_ref1(self):\n self.ref1 = None\n\n def delete_ref2(self):\n self.ref2 = None\n\n # Test that the reference held by the actor isn't evicted.\n array_oid = put_object(\n np.zeros(40 * 1024 * 1024, dtype=np.uint8), use_ray_put)\n actor = GreedyActor.remote()\n actor.set_ref1.remote([array_oid])\n\n # Test that giving the same actor a duplicate reference works.\n ray.get(actor.add_ref2.remote([array_oid]))\n\n # Remove the local reference.\n array_oid_bytes = array_oid.binary()\n del array_oid\n\n # Test that the remote references still pin the object.\n _fill_object_store_and_get(array_oid_bytes)\n\n # Test that removing only the first reference doesn't unpin the object.\n ray.get(actor.delete_ref1.remote())\n _fill_object_store_and_get(array_oid_bytes)\n\n # Test that deleting the second reference stops it from being pinned.\n ray.get(actor.delete_ref2.remote())\n _fill_object_store_and_get(array_oid_bytes, succeed=False)\n\n\n# Test that a passed reference held by an actor after a task finishes\n# is kept until the reference is removed from the worker. Also tests giving\n# the worker a duplicate reference to the same object ID.\n@pytest.mark.parametrize(\"use_ray_put\", [False, True])\ndef test_worker_holding_serialized_reference(one_worker_100MiB, use_ray_put):\n @ray.remote(num_cpus=0)\n class Signal:\n def __init__(self):\n self.ready_event = asyncio.Event()\n\n def send(self):\n self.ready_event.set()\n\n async def wait(self):\n await self.ready_event.wait()\n\n @ray.remote\n def child(dep1, dep2):\n return\n\n @ray.remote\n def launch_pending_task(ref, signal):\n return child.remote(ref[0], signal.wait.remote())\n\n signal = SignalActor.remote()\n\n # Test that the reference held by the actor isn't evicted.\n array_oid = put_object(\n np.zeros(40 * 1024 * 1024, dtype=np.uint8), use_ray_put)\n child_return_id = ray.get(launch_pending_task.remote([array_oid], signal))\n\n # Remove the local reference.\n array_oid_bytes = array_oid.binary()\n del array_oid\n\n # Test that the reference prevents the object from being evicted.\n _fill_object_store_and_get(array_oid_bytes)\n\n ray.get(signal.send.remote())\n ray.get(child_return_id)\n del child_return_id\n\n _fill_object_store_and_get(array_oid_bytes, succeed=False)\n\n\n# Test that an object containing object IDs within it pins the inner IDs.\ndef test_basic_nested_ids(one_worker_100MiB):\n inner_oid = ray.put(np.zeros(40 * 1024 * 1024, dtype=np.uint8))\n outer_oid = ray.put([inner_oid])\n\n # Remove the local reference to the inner object.\n inner_oid_bytes = inner_oid.binary()\n del inner_oid\n\n # Check that the outer reference pins the inner object.\n _fill_object_store_and_get(inner_oid_bytes)\n\n # Remove the outer reference and check that the inner object gets evicted.\n del outer_oid\n _fill_object_store_and_get(inner_oid_bytes, succeed=False)\n\n\n# Test that an object containing object IDs within it pins the inner IDs\n# recursively and for submitted tasks.\n@pytest.mark.parametrize(\"use_ray_put\", [False, True])\ndef test_recursively_nest_ids(one_worker_100MiB, use_ray_put):\n @ray.remote(num_cpus=0)\n class Signal:\n def __init__(self):\n self.ready_event = asyncio.Event()\n\n def send(self):\n self.ready_event.set()\n\n async def wait(self):\n await self.ready_event.wait()\n\n @ray.remote\n def recursive(ref, signal, max_depth, depth=0):\n unwrapped = ray.get(ref[0])\n if depth == max_depth:\n return ray.get(signal.wait.remote())\n else:\n return recursive.remote(unwrapped, signal, max_depth, depth + 1)\n\n signal = SignalActor.remote()\n\n max_depth = 5\n array_oid = put_object(\n np.zeros(40 * 1024 * 1024, dtype=np.uint8), use_ray_put)\n nested_oid = array_oid\n for _ in range(max_depth):\n nested_oid = ray.put([nested_oid])\n head_oid = recursive.remote([nested_oid], signal, max_depth)\n\n # Remove the local reference.\n array_oid_bytes = array_oid.binary()\n del array_oid, nested_oid\n\n tail_oid = head_oid\n for _ in range(max_depth):\n tail_oid = ray.get(tail_oid)\n\n # Check that the remote reference pins the object.\n _fill_object_store_and_get(array_oid_bytes)\n\n # Fulfill the dependency, causing the tail task to finish.\n ray.get(signal.send.remote())\n ray.get(tail_oid)\n\n # Reference should be gone, check that array gets evicted.\n _fill_object_store_and_get(array_oid_bytes, succeed=False)\n\n\n# Test that serialized objectIDs returned from remote tasks are pinned until\n# they go out of scope on the caller side.\n@pytest.mark.parametrize(\"use_ray_put\", [False, True])\ndef test_return_object_id(one_worker_100MiB, use_ray_put):\n @ray.remote\n def return_an_id():\n return [\n put_object(\n np.zeros(40 * 1024 * 1024, dtype=np.uint8), use_ray_put)\n ]\n\n outer_oid = return_an_id.remote()\n inner_oid_binary = ray.get(outer_oid)[0].binary()\n\n # Check that the inner ID is pinned by the outer ID.\n _fill_object_store_and_get(inner_oid_binary)\n\n # Check that taking a reference to the inner ID and removing the outer ID\n # doesn't unpin the object.\n inner_oid = ray.get(outer_oid)[0]\n del outer_oid\n _fill_object_store_and_get(inner_oid_binary)\n\n # Check that removing the inner ID unpins the object.\n del inner_oid\n _fill_object_store_and_get(inner_oid_binary, succeed=False)\n\n\n# Test that serialized objectIDs returned from remote tasks are pinned if\n# passed into another remote task by the caller.\n@pytest.mark.parametrize(\"use_ray_put\", [False, True])\ndef test_pass_returned_object_id(one_worker_100MiB, use_ray_put):\n @ray.remote(num_cpus=0)\n class Signal:\n def __init__(self):\n self.ready_event = asyncio.Event()\n\n def send(self):\n self.ready_event.set()\n\n async def wait(self):\n await self.ready_event.wait()\n\n @ray.remote\n def put():\n return\n\n @ray.remote\n def return_an_id():\n return [\n put_object(\n np.zeros(40 * 1024 * 1024, dtype=np.uint8), use_ray_put)\n ]\n\n @ray.remote\n def pending(ref):\n ray.get(ref[0])\n return ref[0]\n\n signal = SignalActor.remote()\n outer_oid = return_an_id.remote()\n pending_oid = pending.remote([outer_oid])\n\n # Remove the local reference to the returned ID.\n del outer_oid\n\n # Check that the inner ID is pinned by the remote task ID.\n _fill_object_store_and_get(pending_oid, succeed=False)\n ray.get(signal.send.remote())\n inner_oid = ray.get(pending_oid)\n inner_oid_binary = inner_oid.binary()\n _fill_object_store_and_get(inner_oid_binary)\n\n del pending_oid\n del inner_oid\n _fill_object_store_and_get(inner_oid_binary, succeed=False)\n\n\n# Call a recursive chain of tasks that pass a serialized reference that was\n# returned by another task to the end of the chain. The reference should still\n# exist while the final task in the chain is running and should be removed once\n# it finishes.\n@pytest.mark.parametrize(\"use_ray_put\", [False, True])\ndef test_recursively_pass_returned_object_id(one_worker_100MiB, use_ray_put):\n @ray.remote(num_cpus=0)\n class Signal:\n def __init__(self):\n self.ready_event = asyncio.Event()\n\n def send(self):\n self.ready_event.set()\n\n async def wait(self):\n await self.ready_event.wait()\n\n @ray.remote\n def return_an_id():\n return [\n put_object(\n np.zeros(40 * 1024 * 1024, dtype=np.uint8), use_ray_put)\n ]\n\n @ray.remote\n def recursive(ref, signal, max_depth, depth=0):\n ray.get(ref[0])\n if depth == max_depth:\n return ray.get(signal.wait.remote())\n else:\n return recursive.remote(ref, signal, max_depth, depth + 1)\n\n max_depth = 5\n outer_oid = return_an_id.remote()\n inner_oid_bytes = ray.get(outer_oid)[0].binary()\n signal = SignalActor.remote()\n head_oid = recursive.remote([outer_oid], signal, max_depth)\n\n # Remove the local reference.\n del outer_oid\n\n tail_oid = head_oid\n for _ in range(max_depth):\n tail_oid = ray.get(tail_oid)\n\n # Check that the remote reference pins the object.\n _fill_object_store_and_get(inner_oid_bytes)\n\n # Fulfill the dependency, causing the tail task to finish.\n ray.get(signal.send.remote())\n ray.get(tail_oid)\n\n # Reference should be gone, check that returned ID gets evicted.\n _fill_object_store_and_get(inner_oid_bytes, succeed=False)\n\n\n# Call a recursive chain of tasks. The final task in the chain returns an\n# ObjectID returned by a task that it submitted. Every other task in the chain\n# returns the same ObjectID by calling ray.get() on its submitted task and\n# returning the result. The reference should still exist while the driver has a\n# reference to the final task's ObjectID.\n@pytest.mark.parametrize(\"use_ray_put\", [False, True])\ndef test_recursively_return_borrowed_object_id(one_worker_100MiB, use_ray_put):\n @ray.remote\n def recursive(num_tasks_left):\n if num_tasks_left == 0:\n return put_object(\n np.zeros(40 * 1024 * 1024, dtype=np.uint8), use_ray_put)\n\n final_id = ray.get(recursive.remote(num_tasks_left - 1))\n ray.get(final_id)\n return final_id\n\n max_depth = 5\n head_oid = recursive.remote(max_depth)\n final_oid = ray.get(head_oid)\n final_oid_bytes = final_oid.binary()\n\n # Check that the driver's reference pins the object.\n _fill_object_store_and_get(final_oid_bytes)\n\n # Remove the local reference and try it again.\n final_oid = ray.get(head_oid)\n _fill_object_store_and_get(final_oid_bytes)\n\n # Remove all references.\n del head_oid\n del final_oid\n # Reference should be gone, check that returned ID gets evicted.\n _fill_object_store_and_get(final_oid_bytes, succeed=False)\n\n\ndef test_out_of_band_serialized_object_id(one_worker_100MiB):\n assert len(\n ray.worker.global_worker.core_worker.get_all_reference_counts()) == 0\n oid = ray.put(\"hello\")\n _check_refcounts({oid: (1, 0)})\n oid_str = ray.cloudpickle.dumps(oid)\n _check_refcounts({oid: (2, 0)})\n del oid\n assert len(\n ray.worker.global_worker.core_worker.get_all_reference_counts()) == 1\n assert ray.get(ray.cloudpickle.loads(oid_str)) == \"hello\"\n\n\ndef test_captured_object_id(one_worker_100MiB):\n captured_id = ray.put(np.zeros(10 * 1024 * 1024, dtype=np.uint8))\n\n @ray.remote\n def f(signal):\n ray.get(signal.wait.remote())\n ray.get(captured_id) # noqa: F821\n\n signal = SignalActor.remote()\n oid = f.remote(signal)\n\n # Delete local references.\n del f\n del captured_id\n\n # Test that the captured object ID is pinned despite having no local\n # references.\n ray.get(signal.send.remote())\n _fill_object_store_and_get(oid)\n\n captured_id = ray.put(np.zeros(10 * 1024 * 1024, dtype=np.uint8))\n\n @ray.remote\n class Actor:\n def get(self, signal):\n ray.get(signal.wait.remote())\n ray.get(captured_id) # noqa: F821\n\n signal = SignalActor.remote()\n actor = Actor.remote()\n oid = actor.get.remote(signal)\n\n # Delete local references.\n del Actor\n del captured_id\n\n # Test that the captured object ID is pinned despite having no local\n # references.\n ray.get(signal.send.remote())\n _fill_object_store_and_get(oid)\n\n\nif __name__ == \"__main__\":\n import sys\n sys.exit(pytest.main([\"-v\", __file__]))\n" ]
[ [ "numpy.zeros", "numpy.sum" ] ]
ducouloa/ml4ir
[ "75aeecaff11682a7bd71c5521e59c449c43c3f9f" ]
[ "python/ml4ir/base/model/architectures/dnn.py" ]
[ "import tensorflow as tf\nfrom tensorflow.keras import layers\nfrom typing import List\n\nfrom ml4ir.base.features.feature_config import FeatureConfig\nfrom ml4ir.base.features.feature_fns.categorical import get_vocabulary_info\nfrom ml4ir.base.io.file_io import FileIO\n\n\nOOV = 1\n\n\n\nclass DNNLayer:\n DENSE = \"dense\"\n BATCH_NORMALIZATION = \"batch_norm\"\n DROPOUT = \"dropout\"\n ACTIVATION = \"activation\"\n\n\nclass DNN:\n def __init__(self, model_config: dict, feature_config: FeatureConfig, file_io):\n self.file_io: FileIO = file_io\n self.layer_ops: List = self.define_architecture(model_config, feature_config)\n\n def define_architecture(self, model_config: dict, feature_config: FeatureConfig):\n \"\"\"\n Convert the model from model_config to a List of tensorflow.keras.layer\n\n :param model_config: dict corresponding to the model config\n :param feature_config: dict corresponding to the feature config, only used in case of classification if the last\n layer of the model_config doesn't have a units number defined (or set to -1). In which case we retrieve the\n label vocabulary defined in the feature_config to deduce the number of units.\n :return: List[layers]: list of keras layer corresponding to each of the layers defined in the model_config.\n \"\"\"\n def get_op(layer_type, layer_args):\n if layer_type == DNNLayer.DENSE:\n if not \"units\" in layer_args or layer_args[\"units\"] == -1:\n try:\n label_feature_info = feature_config.get_label()\n vocabulary_keys, vocabulary_ids = get_vocabulary_info(label_feature_info, self.file_io)\n layer_args[\"units\"] = len(vocabulary_keys) + OOV\n except:\n raise KeyError(\"We were not able to find information for the output layer of your DNN. \"\n \"Try specifying the number of output units either by passing \\\"units\\\" in the \"\n \"model configuration yaml file or units in the feature configuration file.\")\n return layers.Dense(**layer_args)\n elif layer_type == DNNLayer.BATCH_NORMALIZATION:\n return layers.BatchNormalization(**layer_args)\n elif layer_type == DNNLayer.DROPOUT:\n return layers.Dropout(**layer_args)\n elif layer_type == DNNLayer.ACTIVATION:\n return layers.Activation(**layer_args)\n else:\n raise KeyError(\"Layer type is not supported : {}\".format(layer_type))\n\n return [\n get_op(layer_args[\"type\"], {k: v for k, v in layer_args.items() if k not in \"type\"})\n for layer_args in model_config[\"layers\"]\n ]\n\n def get_architecture_op(self):\n def _architecture_op(ranking_features):\n layer_input = ranking_features\n\n # Pass ranking features through all the layers of the DNN\n for layer_op in self.layer_ops:\n layer_input = layer_op(layer_input)\n\n # Collapse extra dimensions\n if isinstance(self.layer_ops[-1], layers.Dense) and (self.layer_ops[-1].units == 1):\n scores = tf.squeeze(layer_input, axis=-1)\n else:\n scores = layer_input\n\n return scores\n\n return _architecture_op\n" ]
[ [ "tensorflow.keras.layers.Activation", "tensorflow.keras.layers.Dense", "tensorflow.squeeze", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.layers.Dropout" ] ]
cemlyn007/mindboggle
[ "947d4b3f41fb7a24c079550c7255c4d16939d740" ]
[ "mindboggle/shapes/zernike/pipelines.py" ]
[ "from __future__ import division\n\nimport logging\n\nimport numpy as np\nimport scipy\nfrom scipy.special import (factorial,\n comb as nchoosek,\n )\n\nfrom mindboggle.shapes.zernike.helpers import nest, autocat\n\nLOG = logging.getLogger(__name__)\n\nIMAG_CONST = scipy.sqrt(-1)\nPI_CONST = np.pi\nNAN_CONST = np.NaN\n\n\nclass Pipeline(object):\n\n def geometric_moments_approx(self, points_array, faces_array, N):\n raise NotImplementedError()\n\n def geometric_moments_exact(self, points_array, faces_array, N):\n raise NotImplementedError()\n\n\nclass SerialPipeline(Pipeline):\n\n def geometric_moments_exact(self, points_array, faces_array, N):\n n_facets, n_vertices = faces_array.shape[:2]\n assert n_vertices == 3\n moments_array = np.zeros([N + 1, N + 1, N + 1])\n monomial_array = self.monomial_precalc(points_array, N)\n for face in faces_array:\n vertex_list = [points_array[_i, ...] for _i in face]\n Cf_list = [monomial_array[_i, ...] for _i in face]\n Vf = self.facet_volume(vertex_list)\n moments_array += Vf * self.term_Sijk(Cf_list, N)\n return self.factorial_scalar(N) * moments_array\n\n def factorial_scalar(self, N):\n i, j, k = np.mgrid[0:N + 1, 0:N + 1, 0:N + 1]\n return factorial(i) * factorial(j) * factorial(k) / (factorial(i + j + k + 2) * (i + j + k + 3))\n\n def monomial_precalc(self, points_array, N):\n n_points = points_array.shape[0]\n monomial_array = np.zeros([n_points, N + 1, N + 1, N + 1])\n tri_array = self.trinomial_precalc(N)\n for point_indx, point in enumerate(points_array):\n monomial_array[point_indx, ...] = self.mon_comb(\n point, tri_array, N)\n return monomial_array\n\n def mon_comb(self, vertex, tri_array, N, out=None):\n x, y, z = vertex\n c = np.zeros([N + 1, N + 1, N + 1])\n for i, j, k in nest(lambda: range(N + 1),\n lambda _i: range(N - _i + 1),\n lambda _i, _j: range(N - _i - _j + 1),\n ):\n c[i, j, k] = tri_array[i, j, k] * \\\n np.power(x, i) * np.power(y, j) * np.power(z, k)\n return c\n\n def term_Sijk(self, Cf_list, N):\n S = np.zeros([N + 1, N + 1, N + 1])\n C0, C1, C2 = Cf_list\n Dabc = self.term_Dabc(C1, C2, N)\n for i, j, k, ii, jj, kk in nest(lambda: range(N + 1),\n lambda _i: range(N - _i + 1),\n lambda _i, _j: range(N - _i - _j + 1),\n lambda _i, _j, _k: range(_i + 1),\n lambda _i, _j, _k, _ii: range(_j + 1),\n lambda _i, _j, _k, _ii, _jj: range(\n _k + 1),\n ):\n S[i, j, k] += C0[ii, jj, kk] * Dabc[i - ii, j - jj, k - kk]\n return S\n\n def trinomial_precalc(self, N):\n tri_array = np.zeros([N + 1, N + 1, N + 1])\n for i, j, k in nest(lambda: range(N + 1),\n lambda _i: range(N - _i + 1),\n lambda _i, _j: range(N - _i - _j + 1)\n ):\n tri_array[i, j, k] = self.trinomial(i, j, k)\n return tri_array\n\n def trinomial(self, i, j, k):\n return factorial(i + j + k) / (factorial(i) * factorial(j) * factorial(k))\n\n def facet_volume(self, vertex_list):\n return np.linalg.det(autocat(vertex_list, axis=1))\n\n def term_Dabc(self, C1, C2, N):\n D = np.zeros([N + 1, N + 1, N + 1])\n for i, j, k, ii, jj, kk in nest(lambda: range(N + 1),\n lambda _i: range(N + 1),\n lambda _i, _j: range(N + 1),\n lambda _i, _j, _k: range(_i + 1),\n lambda _i, _j, _k, _ii: range(_j + 1),\n lambda _i, _j, _k, _ii, _jj: range(\n _k + 1)\n ):\n D[i, j, k] += C1[ii, jj, kk] * C2[i - ii, j - jj, k - kk]\n return D\n\n def zernike(self, G, N):\n V = np.zeros([N + 1, N + 1, N + 1], dtype=complex)\n for a, b, c, alpha in nest(lambda: range(int(N / 2) + 1),\n lambda _a: range(N - 2 * _a + 1),\n lambda _a, _b: range(N - 2 * _a - _b + 1),\n lambda _a, _b, _c: range(_a + _c + 1),\n ):\n V[a, b, c] += np.power(IMAG_CONST, alpha) * \\\n nchoosek(a + c, alpha) * G[2 * a + c - alpha, alpha, b]\n\n W = np.zeros([N + 1, N + 1, N + 1], dtype=complex)\n for a, b, c, alpha in nest(lambda: range(int(N / 2) + 1),\n lambda _a: range(N - 2 * _a + 1),\n lambda _a, _b: range(N - 2 * _a - _b + 1),\n lambda _a, _b, _c: range(_a + 1),\n ):\n W[a, b, c] += np.power(-1, alpha) * np.power(2, a - alpha) * \\\n nchoosek(a, alpha) * V[a - alpha, b, c + 2 * alpha]\n\n X = np.zeros([N + 1, N + 1, N + 1], dtype=complex)\n for a, b, c, alpha in nest(lambda: range(int(N / 2) + 1),\n lambda _a: range(N - 2 * _a + 1),\n lambda _a, _b: range(N - 2 * _a - _b + 1),\n lambda _a, _b, _c: range(_a + 1),\n ):\n X[a, b, c] += nchoosek(a, alpha) * W[a - alpha, b + 2 * alpha, c]\n\n Y = np.zeros([N + 1, N + 1, N + 1], dtype=complex)\n for l, nu, m, j in nest(lambda: range(N + 1),\n lambda _l: range(int((N - _l) / 2) + 1),\n lambda _l, _nu: range(_l + 1),\n lambda _l, _nu, _m: range(int((_l - _m) / 2) + 1),\n ):\n Y[l, nu, m] += self.Yljm(l, j, m) * X[nu + j, l - m - 2 * j, m]\n\n Z = np.zeros([N + 1, N + 1, N + 1], dtype=complex)\n for n, l, m, nu, in nest(lambda: range(N + 1),\n lambda _n: range(_n + 1),\n # there's an if...mod missing in this but it\n # still works?\n lambda _n, _l: range(_l + 1),\n lambda _n, _l, _m: range(int((_n - _l) / 2) + 1),\n ):\n # integer required for k when used as power in Qklnu below:\n k = int((n - l) / 2)\n Z[n, l, m] += (3 / (4 * PI_CONST)) * \\\n self.Qklnu(k, l, nu) * np.conj(Y[l, nu, m])\n\n for n, l, m in nest(lambda: range(N + 1),\n lambda _n: range(n + 1),\n lambda _n, _l: range(l + 1),\n ):\n if np.mod(np.sum([n, l, m]), 2) == 0:\n Z[n, l, m] = np.real(\n Z[n, l, m]) - np.imag(Z[n, l, m]) * IMAG_CONST\n else:\n Z[n, l, m] = -np.real(Z[n, l, m]) + \\\n np.imag(Z[n, l, m]) * IMAG_CONST\n\n return Z\n\n def Yljm(self, l, j, m):\n aux_1 = np.power(-1, j) * (np.sqrt(2 * l + 1) / np.power(2, l))\n aux_2 = self.trinomial(\n m, j, l - m - 2 * j) * nchoosek(2 * (l - j), l - j)\n aux_3 = np.sqrt(self.trinomial(m, m, l - m))\n y = (aux_1 * aux_2) / aux_3\n return y\n\n def Qklnu(self, k, l, nu):\n aux_1 = np.power(-1, k + nu) / np.float(np.power(4, k))\n aux_2 = np.sqrt((2 * l + 4 * k + 3) / 3.0)\n aux_3 = self.trinomial(\n nu, k - nu, l + nu + 1) * nchoosek(2 * (l + nu + 1 + k), l + nu + 1 + k)\n aux_4 = nchoosek(2.0 * (l + nu + 1), l + nu + 1)\n return (aux_1 * aux_2 * aux_3) / aux_4\n\n def feature_extraction(self, Z, N):\n F = np.zeros([N + 1, N + 1]) - 1 # +NAN_CONST\n for n in range(N + 1):\n for l in range(n + 1):\n if np.mod(n - l, 2) != 0:\n continue\n aux_1 = Z[n, l, 0:(l + 1)]\n if l > 0:\n aux_2 = np.conj(aux_1[1:(l + 1)])\n for m in range(0, l):\n aux_2[m] = aux_2[m] * np.power(-1, m + 1)\n aux_2 = np.flipud(aux_2)\n aux_1 = np.concatenate([aux_2, aux_1])\n F[n, l] = np.linalg.norm(aux_1, ord=2)\n F = F.transpose()\n return F[F >= 0]\n\n\nimport multiprocessing as mp\n\n\ndef _mp_geometric_moments_exact_worker(pipeline, vertex_list, Cf_list, N):\n Vf = pipeline.facet_volume(vertex_list) # volume of the whole face\n return Vf * pipeline.term_Sijk(Cf_list, N)\n\n\ndef _mp_mon_comb_worker(pipeline, *args, **dargs):\n return pipeline.mon_comb(*args, **dargs)\n\n\nclass MultiprocPipeline(SerialPipeline):\n\n def geometric_moments_exact(self, points_array, faces_array, N, max_workers=None):\n n_facets, n_vertices = faces_array.shape[:2]\n assert n_vertices == 3\n moments_array = np.zeros([N + 1, N + 1, N + 1])\n monomial_array = self.monomial_precalc(points_array, N)\n process_pool = mp.Pool(processes=max_workers)\n for face in faces_array:\n vertex_list = [points_array[_i, ...] for _i in face]\n monomial_list = [monomial_array[_i, ...] for _i in face]\n process_pool.apply_async(_mp_geometric_moments_exact_worker,\n args=(self, vertex_list, monomial_list, N),\n callback=moments_array.__iadd__,\n )\n process_pool.close()\n process_pool.join()\n return self.factorial_scalar(N) * moments_array\n\n def monomial_precalc(self, points_array, N, max_workers=None):\n n_points = points_array.shape[0]\n monomial_array = np.zeros([n_points, N + 1, N + 1, N + 1])\n tri_array = self.trinomial_precalc(N)\n process_pool = mp.Pool(processes=max_workers)\n for point_indx, point in enumerate(points_array):\n def get_callback(_i):\n def __callback(result):\n monomial_array[_i, ...] = result\n\n return __callback\n\n process_pool.apply_async(_mp_mon_comb_worker,\n args=(self, point, tri_array, N),\n callback=get_callback(point_indx),\n )\n process_pool.close()\n process_pool.join()\n return monomial_array\n\n\nimport itertools as it\n\n\ndef threeD_reversed(C):\n return C[::-1, ::-1, ::-1]\n\n\nclass NumpyOptimizations(Pipeline):\n\n def term_Dabc(self, C1, C2, N):\n D = np.zeros_like(C1)\n for a, b, c in it.product(range(N + 1), repeat=3):\n c1 = C1[:a + 1, :b + 1, :c + 1]\n c2 = threeD_reversed(C2[:a + 1, :b + 1, :c + 1])\n D[a, b, c] = np.sum(c1 * c2)\n return D\n\n def term_Sijk(self, Cf_list, N):\n S = np.zeros([N + 1, N + 1, N + 1])\n C0, C1, C2 = Cf_list\n Dabc = self.term_Dabc(C1, C2, N)\n for i, j, k in nest(lambda: range(N + 1),\n lambda _i: range(N - _i + 1),\n lambda _i, _j: range(N - _i - _j + 1),\n ):\n C_ijk = C0[:i + 1, :j + 1, :k + 1]\n D_ijk = threeD_reversed(Dabc[:i + 1, :j + 1, :k + 1])\n S[i, j, k] += np.sum(C_ijk * D_ijk)\n return S\n\n def trinomial_precalc(self, N):\n i, k, j = np.mgrid[0:N + 1, 0:N + 1, 0:N + 1]\n return factorial(i + j + k) / (factorial(i) * factorial(j) * factorial(k))\n\n def mon_comb(self, vertex, tri_array, N):\n i, j, k = np.mgrid[0:N + 1, 0:N + 1, 0:N + 1]\n x, y, z = vertex\n return tri_array * (x ** i) * (y ** j) * (z ** k)\n\n\nclass KoehlOptimizations(Pipeline):\n\n def geometric_moments_exact(self, points_array, faces_array, N):\n n_facets, n_vertices = faces_array.shape[:2]\n assert n_vertices == 3\n moments_array = np.zeros([N + 1, N + 1, N + 1])\n for face in faces_array:\n vertex_list = [points_array[_i, ...] for _i in face]\n moments_array += self.facet_contribution(vertex_list, N)\n return self.factorial_scalar(N) * moments_array\n\n def facet_contribution(self, vertex_list, N):\n Vf = self.facet_volume(vertex_list)\n Cf = self.term_Cijk(vertex_list[2], N)\n Df = self.term_Dijk(vertex_list[1], N, Cf)\n return Vf * self.term_Sijk(vertex_list[0], N, Df)\n\n def term_Cijk(self, vertex, N):\n return self.work_loop(vertex, N)\n\n def term_Dijk(self, vertex, N, Cijk):\n return self.work_loop(vertex, N, Cijk)\n\n def term_Sijk(self, vertex, N, Dijk):\n return self.work_loop(vertex, N, Dijk)\n\n def work_loop(self, vertex, N, prev=None):\n R = prev\n if R is None:\n R = np.zeros([N + 1, N + 1, N + 1])\n Q = np.zeros([N + 1, N + 1, N + 1])\n Q[0, 0, 0] = 1.0\n\n recursion_term = lambda _X, x_y_z, mask: \\\n np.roll(_X, 1, axis=0)[mask] * x_y_z[0] + \\\n np.roll(_X, 1, axis=1)[mask] * x_y_z[1] + \\\n np.roll(_X, 1, axis=2)[mask] * x_y_z[2]\n i, j, k = np.mgrid[:N + 1, :N + 1, :N + 1]\n order = (i + j + k)\n for n in range(N):\n mask = (order == n + 1)\n _Q = recursion_term(Q, vertex, mask)\n Q[mask] = _Q + R[mask]\n return Q\n\n\ndef _kmp_geometric_moments_exact_worker(self, vertex_list, N):\n return self.facet_contribution(vertex_list, N)\n\n\nclass KoehlMultiproc(KoehlOptimizations):\n def geometric_moments_exact(self, points_array, faces_array, N, max_workers=None):\n n_facets, n_vertices = faces_array.shape[:2]\n assert n_vertices == 3\n moments_array = np.zeros([N + 1, N + 1, N + 1])\n process_pool = mp.Pool(processes=max_workers)\n for face in faces_array:\n vertex_list = [points_array[_i, ...] for _i in face]\n process_pool.apply_async(_kmp_geometric_moments_exact_worker,\n args=(self, vertex_list, N),\n callback=moments_array.__iadd__,\n )\n process_pool.close()\n process_pool.join()\n return self.factorial_scalar(N) * moments_array\n\n\n# DefaultPipeline = type('DefaultPipeline', (SerialPipeline,), {})\n# DefaultPipeline = type(\n# 'DefaultPipeline', (NumpyOptimizations, MultiprocPipeline,), {})\nDefaultPipeline = type(\n 'DefaultPipeline', (KoehlOptimizations, SerialPipeline), {}\n)\n# DefaultPipeline = type(\n# 'DefaultPipeline', (KoehlMultiproc, SerialPipeline), {}\n# )\n" ]
[ [ "numpy.imag", "numpy.sqrt", "numpy.conj", "numpy.power", "numpy.flipud", "numpy.linalg.norm", "scipy.special.comb", "scipy.sqrt", "numpy.real", "numpy.concatenate", "numpy.zeros_like", "scipy.special.factorial", "numpy.mod", "numpy.roll", "numpy.zeros", "numpy.sum" ] ]
shaymargolis/PCC-RL
[ "947a0016480db57a3bd1f96f3f892180d707496b" ]
[ "src/gym/test_vis_multiple.py" ]
[ "# Copyright 2019 Nathan Jay and Noga Rotman\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nimport sys\nimport inspect\nimport random\nimport matplotlib.pyplot as plt\n\n\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparentdir = os.path.dirname(currentdir)\npparentdir = os.path.dirname(parentdir)\nsys.path.insert(0,pparentdir)\n\nfrom src.gym.parameter_readme import create_readmefile\nfrom src.gym.parameter_extractor import extract_parameters\n\nfrom src.common.simple_arg_parse import arg_or_default\n\nfrom src.gym.network_creator import get_env, get_ogd_worker, get_agent_reward_calculator\n\nfrom src.gym.worker.aurora_worker import AuroraWorker\nfrom src.gym.worker.ogd_worker import OGDWorker\nfrom src.gym.worker.two_point_ogd_worker import TwoPointOGDWorker\nfrom src.gym.worker.combining_worker import CombiningWorker\n\nfrom src.gym.visualizer.multiple_sender_visualizer import MultipleSenderVisualizer\nfrom src.gym.visualizer.multiple_sender_stats_visualizer import MultipleSenderStatsVisualizer\n\nNUMBER_OF_EPOCHES = 10\nTIMES = 15000\nbws = [400]\n\nparams = extract_parameters()\n\ncomb_kwargs = params[\"comb_kwargs\"]\ntwo_point_kwargs = params[\"two_point_kwargs\"]\nOUTPUT = params[\"output\"]\noffset = params[\"offset\"]\n\n# Fix race cond bug\nif params[\"concurrent\"] == 1:\n import matplotlib\n matplotlib.use('Agg')\n\ncreate_readmefile(params)\n\nfor i in range(NUMBER_OF_EPOCHES):\n env = get_env(bws, 2, params[\"reward_type\"])\n\n print(\"ENV\", env)\n\n model = CombiningWorker(\n (80, 450),\n env,\n [\n AuroraWorker(\"./\" + params[\"aurora_agent\"], env, (80, 450)),\n get_ogd_worker(params[\"ogd_worker\"], env, (80, 450), C=11 * 450, L=20, sender_id=0, **two_point_kwargs)\n ],\n reward_calculator=get_agent_reward_calculator(params[\"agent_reward\"]),\n sender_idx=0,\n **comb_kwargs\n )\n\n model2 = CombiningWorker(\n (80, 450),\n env,\n [\n AuroraWorker(\"./\" + params[\"aurora_agent\"], env, (80, 450)),\n get_ogd_worker(params[\"ogd_worker\"], env, (80, 450), C=11 * 400, L=20, sender_id=1, **two_point_kwargs)\n ],\n reward_calculator=get_agent_reward_calculator(params[\"agent_reward\"]),\n sender_idx=1,\n **comb_kwargs\n )\n\n start1 = random.uniform(40, 300)\n start2 = random.uniform(40, 300)\n\n # start1 = 120\n # start2 = 340\n #\n # # OGD\n # model.weights[0] = 5000\n # model.weights[1] = 0\n # model.calculate_proba()\n #\n # # Aurora\n # model2.weights[0] = 5000\n # model2.weights[1] = 0\n # model2.calculate_proba()\n #\n model.set_action(start1)\n model2.set_action(start2)\n\n vis = MultipleSenderStatsVisualizer(env, [model, model2])\n vis.steps(TIMES, TIMES, 100)\n\n fig = vis.parse_data()\n\n fig.suptitle('COMB=(%d, %r, %.2f), TWOP=(%d, %r, %.2f),\\n REW=%s START=(%.0f, %.0f)' % (\n comb_kwargs[\"lr\"],\n comb_kwargs[\"lower_lr\"],\n comb_kwargs[\"min_proba_thresh\"],\n two_point_kwargs[\"lr\"],\n two_point_kwargs[\"lower_lr\"],\n two_point_kwargs[\"delta\"],\n params[\"reward_type\"],\n start1, start2\n ), fontsize=16)\n\n plt.show()\n\n fig.savefig(OUTPUT + \"/%d.png\" % (i + offset))\n vis._save_data(OUTPUT + \"/%d.json\" % (i + offset))\n" ]
[ [ "matplotlib.use", "matplotlib.pyplot.show" ] ]
matthewzimmer/trajectory-contact-networks
[ "b70e12052447899cea2e21c1dda85aea2f62a469" ]
[ "app/lib/ops/tiles.py" ]
[ "from math import radians, cos, sin, asin, pi, sqrt\nimport itertools\nimport networkx as nx\nimport numpy as np\n\n\nfrom app.lib.datasets import GeolifeData\nfrom app.lib.pipeline_ops import PipelineOp\nfrom app.lib.points import TrajectoryPoint\n\n\nclass GenerateTilesOp(PipelineOp):\n EARTH_CIRCUMFERENCE_AT_EQUATOR_METERS = 40075160\n EARTH_CIRCUMFERENCE_THROUGH_POLES_METERS = 40008000\n\n \"\"\"\n Generates a dictionary of tiles where the key is a hash\n of lat/lon/time and the value is a set of unique user\n ids that have points within that encoded\n spaciotemporal tile (cube).\n \"\"\"\n def __init__(self, users, ds, dt, relative_null_point=(39.75872, 116.04142)):\n PipelineOp.__init__(self)\n self.tiles = {}\n self.data_op = GeolifeData()\n self.users = np.array(users)\n self.ds = ds\n self.dt = dt\n self.relative_null_lat = relative_null_point[0]\n self.relative_null_lon = relative_null_point[1]\n\n def perform(self):\n for uid in self.users:\n for pt, plot in self.data_op.trajectories(uid):\n traj_pt = TrajectoryPoint(pt, uid)\n\n lat, lon = self.meters_for_lat_lon(traj_pt.lat, traj_pt.lon)\n t = traj_pt.t\n\n local_lat_meters = int(lat / self.ds) * self.ds\n local_lon_meters = int(lon / self.ds) * self.ds\n\n local_lat, local_lon = self.get_lat_lng_from_meters(local_lat_meters, local_lon_meters)\n local_t = int(t / self.dt) * self.dt\n\n tile_hash = \"lat{}_lon{}_t{}\".format(local_lat, local_lon, local_t)\n tile = self.hash_tile(tile_hash)\n # extract first column (uid)\n users = [sub_list[0] for sub_list in tile]\n if traj_pt.uid not in users:\n tile.append([traj_pt.uid, traj_pt.lat, traj_pt.lon, t, self.ds, self.dt])\n return self._apply_output(self.tiles)\n\n def hash_tile(self, tile_hash):\n \"\"\"\n Returns an existing tile based on tile hash if already generated.\n Otherwise, generates and returns a new list for the given tile_hash.\n \"\"\"\n tile = self.tiles.get(tile_hash, None)\n if tile is None:\n tile = []\n self.tiles[tile_hash] = tile\n return tile\n\n def meters_for_lat_lon(self, lat, lon):\n \"\"\"\n Calculates X and Y distances in meters.\n\n https://stackoverflow.com/a/3024728\n \"\"\"\n delta_latitude = lat - self.relative_null_lat\n delta_longitude = lon - self.relative_null_lon\n latitude_circumference = self.EARTH_CIRCUMFERENCE_AT_EQUATOR_METERS * cos(self.deg_to_rad(self.relative_null_lat))\n result_x = delta_longitude * latitude_circumference / 360\n result_y = delta_latitude * self.EARTH_CIRCUMFERENCE_THROUGH_POLES_METERS / 360\n return result_x, result_y\n\n def get_lat_lng_from_meters(self, lat, lon):\n latitude_circumference = self.EARTH_CIRCUMFERENCE_AT_EQUATOR_METERS * cos(self.deg_to_rad(self.relative_null_lat))\n delta_latitude = lon * 360 / self.EARTH_CIRCUMFERENCE_THROUGH_POLES_METERS\n delta_longitude = lat * 360 / latitude_circumference\n\n result_lat = delta_latitude + self.relative_null_lat\n result_lng = delta_longitude + self.relative_null_lon\n\n return result_lat, result_lng\n\n @staticmethod\n def deg_to_rad(degrees):\n return degrees * pi / 180\n\n\nclass GraphContactPointsOp(PipelineOp):\n def __init__(self, hashed_tiles, weight):\n PipelineOp.__init__(self)\n self.hashed_tiles = hashed_tiles\n self.weight = weight\n assert(weight in ['dist_weight', 'count_weight'])\n\n def perform(self):\n contact_points = [['uid1', 'uid2', 'ds', 'dt', 'tile_hash', 'dist_apart', 'time_diff', 'lat1', 'lat2', 'lon1', 'lon2', 't1', 't2']]\n tiles = self.hashed_tiles.items()\n tile_count = len(tiles)\n op_count = 0\n graph = nx.Graph()\n delta = (None, None)\n for tile_hash, uids in tiles:\n if not tile_hash:\n graph_filepath = 'app/data/graphs/no_tiles_from_data.png'\n return self._apply_output({\"graph_filepath\": graph_filepath, \"graph_generated\": False})\n if not delta:\n delta = (uids[0][4], uids[0][5])\n if len(uids) > 1:\n contact_pairs = itertools.combinations(uids, 2)\n for user_pair in contact_pairs:\n user1, user2 = user_pair\n u1_uid, u1_lat, u1_lon, u1_t, u1_ds, u1_dt = user1\n u2_uid, u2_lat, u2_lon, u2_t, u2_ds, u2_dt = user2\n u1_lat_lon = (u1_lat, u1_lon)\n u2_lat_lon = (u2_lat, u2_lon)\n distance = dist_apart(u1_lat_lon, u2_lat_lon)\n time_difference = abs(u1_t - u2_t)\n contact_points.append([u1_uid, u2_uid, u1_ds, u1_dt, tile_hash, distance, time_difference, u1_lat, u2_lat, u1_lon, u2_lon, u1_t, u2_t])\n\n if self.weight == 'dist_weight':\n graph = weight_by_distance(graph, user1, user2)\n elif self.weight == 'count_weight':\n graph = weight_by_count(graph, user1, user1)\n\n op_count += 1\n print(\"Remaining Tiles: {}\".format(tile_count - op_count))\n\n # graph_filepath = 'app/data/graphs/{}.png'.format(str(delta[0]) + 'ds_' + str(delta[1]) + 'dt')\n # nx.draw_circular(graph, with_labels=True) # spectral circular random\n # plt.savefig(graph_filepath, bbox_inches='tight')\n ds, dt = delta\n gml_filepath = 'app/data/graphs/{}.gml'.format(str(ds) + 'ds_' + str(dt) + 'dt_' + str(self.weight))\n nx.write_gml(graph, gml_filepath)\n\n # largest_comp = find_largest_component(graph)\n # avg_degree = find_average_degree(graph)\n # graph_results(largest_comp, avg_degree, deltas)\n return self._apply_output({\"contact_points\": np.asarray(contact_points), \"graph_filepath\": gml_filepath, \"graph_generated\": True})\n\n\nclass GraphHottestPointsOp(PipelineOp):\n def __init__(self, hashed_tiles, weight):\n PipelineOp.__init__(self)\n self.hashed_tiles = hashed_tiles\n self.weight = weight\n\n def perform(self):\n contact_points = [['uid1', 'uid2', 'ds', 'dt', 'tile_hash', 'dist_apart', 'time_diff', 'lat1', 'lat2', 'lon1', 'lon2', 't1', 't2']]\n user_count_in_tiles = [len(uids) for tile_hash, uids in self.hashed_tiles.items()]\n hot_zone_count = max(user_count_in_tiles)\n graph = nx.Graph()\n delta = (None, None)\n for tile_hash, uids in self.hashed_tiles.items():\n if not delta:\n delta = (uids[0][4], uids[0][5])\n if len(uids) == hot_zone_count:\n contact_pairs = itertools.combinations(uids, 2)\n for user_pair in contact_pairs:\n user1, user2 = user_pair\n u1_uid, u1_lat, u1_lon, u1_t, u1_ds, u1_dt = user1\n u2_uid, u2_lat, u2_lon, u2_t, u2_ds, u2_dt = user2\n u1_lat_lon = (u1_lat, u1_lon)\n u2_lat_lon = (u2_lat, u2_lon)\n distance = dist_apart(u1_lat_lon, u2_lat_lon)\n time_difference = abs(u1_t - u2_t)\n contact_points.append([u1_uid, u2_uid, u1_ds, u1_dt, tile_hash, distance, time_difference, u1_lat, u2_lat, u1_lon, u2_lon, u1_t, u2_t])\n\n if self.weight == 'dist_weight':\n graph = weight_by_distance(graph, user_pair[0], user_pair[1])\n elif self.weight == 'count_weight':\n graph = weight_by_count(graph, user_pair[0], user_pair[1])\n\n ds, dt = delta\n gml_filepath = 'app/data/graphs/{}.gml'.format(str(ds) + 'ds_' + str(dt) + 'dt_hot_zones')\n nx.write_gml(graph, gml_filepath)\n\n return self._apply_output({\"contact_points\": np.asarray(contact_points), \"gml_filepath\": gml_filepath, \"graph_generated\": True})\n\n\ndef weight_by_count(graph, user1, user2):\n u1_uid, u1_lat, u1_lon, u1_t, u1_ds, u1_dt = user1\n u2_uid, u2_lat, u2_lon, u2_t, u2_ds, u2_dt = user2\n u1_lat_lon = (u1_lat, u1_lon)\n u2_lat_lon = (u2_lat, u2_lon)\n distance = dist_apart(u1_lat_lon, u2_lat_lon)\n time_difference = abs(u1_t - u2_t)\n\n if not graph.has_edge(u1_uid, u2_uid):\n graph.add_edge(u1_uid, u2_uid, weight=1, ds=time_difference, distance=distance)\n else:\n graph[u1_uid][u2_uid]['weight'] += 1\n graph[u1_uid][u2_uid]['ds'] = time_difference\n graph[u1_uid][u2_uid]['dt'] = distance\n return graph\n\n\ndef weight_by_distance(graph, user1, user2):\n u1_uid, u1_lat, u1_lon, u1_t, u1_ds, u1_dt = user1\n u2_uid, u2_lat, u2_lon, u2_t, u2_ds, u2_dt = user2\n u1_lat_lon = (u1_lat, u1_lon)\n u2_lat_lon = (u2_lat, u2_lon)\n distance = dist_apart(u1_lat_lon, u2_lat_lon)\n time_difference = abs(u1_t - u2_t)\n delta = (u1_ds, u1_dt)\n ds, dt = delta\n weight = dt - distance\n if not graph.has_edge(u1_uid, u2_uid):\n graph.add_edge(u1_uid, u2_uid, weight=weight, distance=distance, ds=time_difference, dt=distance)\n else:\n if graph[u1_uid][u2_uid]['weight'] > weight:\n graph[u1_uid][u2_uid]['weight'] = weight\n graph[u1_uid][u2_uid]['ds'] = time_difference\n graph[u1_uid][u2_uid]['dt'] = distance\n return graph\n\n\ndef dist_apart(p1, p2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [p1[1], p1[0], p2[1], p2[0]])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n # Radius of earth in kilometers is 6371\n km = 6371 * c\n return km * 1000\n\n# def find_largest_component(graph):\n# component_size = [len(c) for c in sorted(nx.connected_components(graph), key=len, reverse=True)]\n# return str(max(component_size))\n# # print(\"Largest Component Size: \" + str(max(component_size)))\n# # print(\"Component List: \" + str(max(nx.connected_components(Graph), key=len)))\n#\n#\n# def find_average_degree(graph):\n# degree_list = []\n# for n in graph.nodes():\n# degree_list.append(graph.degree(n))\n# return str(sum(degree_list) / graph.number_of_nodes())\n# # print(\"Average degree of Nodes \" + str(sum(listr)/Graph.number_of_nodes()))\n#\n#\n# def graph_results(largest_comps, avg_degrees, deltas):\n# if largest_comps or avg_degrees != 'NULL':\n# plt.plot(deltas, largest_comps, label=\"Largest Component\")\n# plt.title(\"Size of Largest Connected Component\")\n# plt.ylabel(\"Largest Component Size\")\n# plt.xlabel(\"Delta settings\")\n# plt.savefig('app/viz/Largest_Component_Results.png', bbox_inches='tight')\n#\n# plt.plot(deltas, avg_degrees, label=\"Average Degree\")\n# plt.title(\"Average Degree of Nodes\")\n# plt.ylabel(\"Mean Degree\")\n# plt.xlabel(\"Delta settings\")\n# plt.savefig('app/viz/Avg_Degree_Results.png', bbox_inches='tight')\n\n" ]
[ [ "numpy.asarray", "numpy.array" ] ]
qingyundou/tacotron_qdou
[ "aca014e8ea73bbab617029b81368cee235f47ce2", "aca014e8ea73bbab617029b81368cee235f47ce2" ]
[ "lib/sigproc/scripts/bndspec2spec.py", "datasets/ljspeech.py" ]
[ "#!/usr/bin/python\n\n'''\nCopyright(C) 2016 Engineering Department, University of Cambridge, UK.\n\nLicense\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\nAuthor\n Gilles Degottex <gad27@cam.ac.uk>\n'''\n\nimport sys\nimport argparse\nimport numpy as np\n\nsys.path.append('/home/degottex/Research/CUED/Code')\nfrom lib import sigproc as sp\n\nif __name__ == \"__main__\" :\n\n argpar = argparse.ArgumentParser()\n argpar.add_argument(\"bndspecfile\", default=None, help=\"Input spectrum file\")\n argpar.add_argument(\"--nbbands\", type=int, help=\"Number of bands in the warped spectral representation\")\n argpar.add_argument(\"--dftlen\", default=4096, type=int, help=\"DFT size for the output spectrum\")\n argpar.add_argument(\"--fs\", default=16000, type=int, help=\"Sampling frequency[Hz]\")\n argpar.add_argument(\"specfile\", default=None, help=\"Output warped spectrum file\")\n args, unknown = argpar.parse_known_args()\n\n BNDSPEC = np.fromfile(args.bndspecfile, dtype=np.float32)\n BNDSPEC = BNDSPEC.reshape((-1, args.nbbands))\n SPEC = np.exp(sp.fwbnd2linbnd(BNDSPEC, args.fs, args.dftlen))\n SPEC.astype('float32').tofile(args.specfile)\n", "from concurrent.futures import ProcessPoolExecutor\nfrom functools import partial\nimport glob\nimport numpy as np\nimport os\nfrom util import audio\nimport shutil\nfrom datasets import linear_dir, mel_dir, pml_dir, pml_data_dir, wav_dir\n\n\ndef build_from_path(in_dir, out_dir, hparams, num_workers=1, tqdm=lambda x: x):\n '''Preprocesses the LJ Speech dataset from a given input path into a given output directory.\n\n Args:\n in_dir: The directory where you have downloaded the LJ Speech dataset\n out_dir: The directory to write the output into\n num_workers: Optional number of worker processes to parallelize across\n tqdm: You can optionally pass tqdm to get a nice progress bar\n\n Returns:\n A list of tuples describing the training examples. This should be written to train.txt\n '''\n\n # We use ProcessPoolExecutor to parallelize across processes. This is just an optimization and you\n # can omit it and just call _process_utterance on each input if you want.\n executor = ProcessPoolExecutor(max_workers=num_workers)\n futures = []\n index = 1\n\n with open(os.path.join(in_dir, 'metadata.csv'), encoding='utf-8') as f:\n for line in f:\n parts = line.strip().split('|')\n wav_path = os.path.join(in_dir, 'wavs', '%s.wav' % parts[0])\n text = parts[2]\n\n # get the pml features\n pml_path = os.path.join(in_dir, 'pml', '%s.cmp' % parts[0])\n pml_features = np.fromfile(pml_path, dtype=np.float32)\n\n futures.append(executor.submit(partial(_process_utterance, out_dir, index, wav_path, text, pml_features, hparams)))\n index += 1\n\n # copy the data files, if they exist\n files = glob.iglob(os.path.join(in_dir, 'pml', '*.dat'))\n os.makedirs(os.path.join(out_dir, pml_data_dir), exist_ok=True)\n\n for file in files:\n if os.path.isfile(file):\n shutil.copyfile(file, os.path.join(out_dir, pml_data_dir, os.path.basename(file)))\n\n return [future.result() for future in tqdm(futures)]\n\n# qd212\ndef _process_utterance(out_dir, index, wav_path, text, pml_cmp, hparams):\n # Create directories if they do not exist\n os.makedirs(os.path.join(out_dir, pml_dir), exist_ok=True)\n\n # Write the PML features to disk\n pml_filename = 'ljspeech-pml-%05d.npy' % index\n pml_dimension = hparams.pml_dimension\n pml_features = pml_cmp.reshape((-1, pml_dimension))\n pml_frames = pml_features.shape[0]\n np.save(os.path.join(out_dir, pml_dir, pml_filename), pml_features, allow_pickle=False)\n\n return None\n \n\n# def _process_utterance(out_dir, index, wav_path, text, pml_cmp, hparams):\n# '''Preprocesses a single utterance audio/text pair.\n\n# This writes the mel and linear scale spectrograms to disk and returns a tuple to write\n# to the train.txt file.\n\n# Args:\n# out_dir: The directory to write the spectrograms into\n# index: The numeric index to use in the spectrogram filenames.\n# wav_path: Path to the audio file containing the speech input\n# text: The text spoken in the input audio file\n# pml_cmp: One dimensional array containing vocoder features read from .cmp file\n\n# Returns:\n# A (spectrogram_filename, mel_filename, n_frames, text) tuple to write to train.txt\n# '''\n\n# # Load the audio to a numpy array:\n# wav = audio.load_wav(wav_path)\n\n# # Create directories if they do not exist\n# os.makedirs(os.path.join(out_dir, wav_dir), exist_ok=True)\n# os.makedirs(os.path.join(out_dir, pml_dir), exist_ok=True)\n# os.makedirs(os.path.join(out_dir, mel_dir), exist_ok=True)\n# os.makedirs(os.path.join(out_dir, linear_dir), exist_ok=True)\n\n# # Copy the wav into the training directory\n# shutil.copyfile(wav_path, os.path.join(out_dir, wav_dir, os.path.basename(wav_path)))\n\n# # Write the PML features to disk\n# pml_filename = 'ljspeech-pml-%05d.npy' % index\n# pml_dimension = hparams.pml_dimension\n# pml_features = pml_cmp.reshape((-1, pml_dimension))\n# pml_frames = pml_features.shape[0]\n# np.save(os.path.join(out_dir, pml_dir, pml_filename), pml_features, allow_pickle=False)\n\n# # Compute the linear-scale spectrogram from the wav:\n# spectrogram = audio.spectrogram(wav).astype(np.float32)\n# n_frames = spectrogram.shape[1]\n\n# # Compute a mel-scale spectrogram from the wav:\n# mel_spectrogram = audio.melspectrogram(wav).astype(np.float32)\n# mel_frames = mel_spectrogram.shape[1]\n\n# # Ensure lengths of spectrograms and PML features are the same\n# if n_frames > pml_frames:\n# spectrogram = spectrogram[:, :pml_frames]\n\n# # Check the shape of the mel target\n# if mel_frames > pml_frames:\n# mel_spectrogram = mel_spectrogram[:, :pml_frames]\n\n# # Write the spectrograms to disk:\n# spectrogram_filename = 'ljspeech-spec-%05d.npy' % index\n# mel_filename = 'ljspeech-mel-%05d.npy' % index\n# np.save(os.path.join(out_dir, linear_dir, spectrogram_filename), spectrogram.T, allow_pickle=False)\n# np.save(os.path.join(out_dir, mel_dir, mel_filename), mel_spectrogram.T, allow_pickle=False)\n\n# # Return a tuple describing this training example:\n# return spectrogram_filename, mel_filename, n_frames, pml_filename, pml_frames, \\\n# text, os.path.basename(wav_path)\n" ]
[ [ "numpy.fromfile" ], [ "numpy.fromfile" ] ]
sufe-nlp/transformer-alignment
[ "0763129d8b1065eb671a516d0e14459cdec44271" ]
[ "fairseq/models/transformer.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom collections import namedtuple\nimport math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom fairseq import options, utils\nfrom fairseq.models import (\n FairseqEncoder,\n FairseqDecoder,\n FairseqIncrementalDecoder,\n FairseqEncoderDecoderModel,\n register_model,\n register_model_architecture,\n)\nfrom fairseq.modules import (\n AdaptiveSoftmax,\n LayerNorm,\n PositionalEmbedding,\n SinusoidalPositionalEmbedding,\n TransformerDecoderLayer,\n TransformerEncoderLayer,\n PointerNet,\n)\nimport random\n\nDEFAULT_MAX_SOURCE_POSITIONS = 1024\nDEFAULT_MAX_TARGET_POSITIONS = 1024\n\n\n@register_model('transformer')\nclass TransformerModel(FairseqEncoderDecoderModel):\n \"\"\"\n Transformer model from `\"Attention Is All You Need\" (Vaswani, et al, 2017)\n <https://arxiv.org/abs/1706.03762>`_.\n\n Args:\n encoder (TransformerEncoder): the encoder\n decoder (TransformerDecoder): the decoder\n\n The Transformer model provides the following named architectures and\n command-line arguments:\n\n .. argparse::\n :ref: fairseq.models.transformer_parser\n :prog:\n \"\"\"\n\n @classmethod\n def hub_models(cls):\n # fmt: off\n\n def moses_subword(path):\n return {\n 'path': path,\n 'tokenizer': 'moses',\n 'bpe': 'subword_nmt',\n }\n\n def moses_fastbpe(path):\n return {\n 'path': path,\n 'tokenizer': 'moses',\n 'bpe': 'fastbpe',\n }\n\n return {\n 'transformer.wmt14.en-fr': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-fr.joined-dict.transformer.tar.bz2'),\n 'transformer.wmt16.en-de': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt16.en-de.joined-dict.transformer.tar.bz2',\n 'transformer.wmt18.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt18.en-de.ensemble.tar.gz'),\n 'transformer.wmt19.en-de': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.ensemble.tar.gz'),\n 'transformer.wmt19.en-ru': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.ensemble.tar.gz'),\n 'transformer.wmt19.de-en': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.ensemble.tar.gz'),\n 'transformer.wmt19.ru-en': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.ensemble.tar.gz'),\n 'transformer.wmt19.en-de.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.single_model.tar.gz'),\n 'transformer.wmt19.en-ru.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.single_model.tar.gz'),\n 'transformer.wmt19.de-en.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.single_model.tar.gz'),\n 'transformer.wmt19.ru-en.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.single_model.tar.gz'),\n }\n # fmt: on\n\n def __init__(self, args, encoder, decoder):\n super().__init__(encoder, decoder)\n self.args = args\n self.supports_align_args = True\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n # fmt: off\n parser.add_argument('--activation-fn',\n choices=utils.get_available_activation_fns(),\n help='activation function to use')\n parser.add_argument('--dropout', type=float, metavar='D',\n help='dropout probability')\n parser.add_argument('--attention-dropout', type=float, metavar='D',\n help='dropout probability for attention weights')\n parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',\n help='dropout probability after activation in FFN.')\n parser.add_argument('--encoder-embed-path', type=str, metavar='STR',\n help='path to pre-trained encoder embedding')\n parser.add_argument('--encoder-embed-dim', type=int, metavar='N',\n help='encoder embedding dimension')\n parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',\n help='encoder embedding dimension for FFN')\n parser.add_argument('--encoder-layers', type=int, metavar='N',\n help='num encoder layers')\n parser.add_argument('--encoder-attention-heads', type=int, metavar='N',\n help='num encoder attention heads')\n parser.add_argument('--encoder-normalize-before', action='store_true',\n help='apply layernorm before each encoder block')\n parser.add_argument('--encoder-learned-pos', action='store_true',\n help='use learned positional embeddings in the encoder')\n parser.add_argument('--decoder-embed-path', type=str, metavar='STR',\n help='path to pre-trained decoder embedding')\n parser.add_argument('--decoder-embed-dim', type=int, metavar='N',\n help='decoder embedding dimension')\n parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',\n help='decoder embedding dimension for FFN')\n parser.add_argument('--decoder-layers', type=int, metavar='N',\n help='num decoder layers')\n parser.add_argument('--decoder-attention-heads', type=int, metavar='N',\n help='num decoder attention heads')\n parser.add_argument('--decoder-learned-pos', action='store_true',\n help='use learned positional embeddings in the decoder')\n parser.add_argument('--decoder-normalize-before', action='store_true',\n help='apply layernorm before each decoder block')\n parser.add_argument('--share-decoder-input-output-embed', action='store_true',\n help='share decoder input and output embeddings')\n parser.add_argument('--share-all-embeddings', action='store_true',\n help='share encoder, decoder and output embeddings'\n ' (requires shared dictionary and embed dim)')\n parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',\n help='if set, disables positional embeddings (outside self attention)')\n parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',\n help='comma separated list of adaptive softmax cutoff points. '\n 'Must be used with adaptive_loss criterion'),\n parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',\n help='sets adaptive softmax dropout for the tail projections')\n # args for \"Cross+Self-Attention for Transformer Models\" (Peitz et al., 2019)\n parser.add_argument('--no-cross-attention', default=False, action='store_true',\n help='do not perform cross-attention')\n parser.add_argument('--cross-self-attention', default=False, action='store_true',\n help='perform cross+self-attention')\n parser.add_argument('--layer-wise-attention', default=False, action='store_true',\n help='perform layer-wise attention (cross-attention or cross+self-attention)')\n # args for \"Reducing Transformer Depth on Demand with Structured Dropout\" (Fan et al., 2019)\n parser.add_argument('--encoder-layerdrop', type=float, metavar='D', default=0,\n help='LayerDrop probability for encoder')\n parser.add_argument('--decoder-layerdrop', type=float, metavar='D', default=0,\n help='LayerDrop probability for decoder')\n parser.add_argument('--encoder-layers-to-keep', default=None,\n help='which layers to *keep* when pruning as a comma-separated list')\n parser.add_argument('--decoder-layers-to-keep', default=None,\n help='which layers to *keep* when pruning as a comma-separated list')\n parser.add_argument('--layernorm-embedding', action='store_true',\n help='add layernorm to embedding')\n parser.add_argument('--no-scale-embedding', action='store_true',\n help='if True, dont scale embeddings')\n \n # fmt: on\n\n @classmethod\n def build_model(cls, args, task):\n \"\"\"Build a new model instance.\"\"\"\n\n # make sure all arguments are present in older models\n base_architecture(args)\n\n if args.encoder_layers_to_keep:\n args.encoder_layers = len(args.encoder_layers_to_keep.split(\",\"))\n if args.decoder_layers_to_keep:\n args.decoder_layers = len(args.decoder_layers_to_keep.split(\",\"))\n\n if getattr(args, 'max_source_positions', None) is None:\n args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS\n if getattr(args, 'max_target_positions', None) is None:\n args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS\n\n src_dict, tgt_dict = task.source_dictionary, task.target_dictionary\n\n def build_embedding(dictionary, embed_dim, path=None):\n num_embeddings = len(dictionary)\n padding_idx = dictionary.pad()\n emb = Embedding(num_embeddings, embed_dim, padding_idx)\n # if provided, load from preloaded dictionaries\n if path:\n embed_dict = utils.parse_embedding(path)\n utils.load_embedding(embed_dict, dictionary, emb)\n return emb\n\n if args.share_all_embeddings:\n if src_dict != tgt_dict:\n raise ValueError('--share-all-embeddings requires a joined dictionary')\n if args.encoder_embed_dim != args.decoder_embed_dim:\n raise ValueError(\n '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')\n if args.decoder_embed_path and (\n args.decoder_embed_path != args.encoder_embed_path):\n raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path')\n encoder_embed_tokens = build_embedding(\n src_dict, args.encoder_embed_dim, args.encoder_embed_path\n )\n decoder_embed_tokens = encoder_embed_tokens\n args.share_decoder_input_output_embed = True\n else:\n encoder_embed_tokens = build_embedding(\n src_dict, args.encoder_embed_dim, args.encoder_embed_path\n )\n decoder_embed_tokens = build_embedding(\n tgt_dict, args.decoder_embed_dim, args.decoder_embed_path\n )\n\n encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)\n decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens, src_dict=src_dict)\n return cls(args, encoder, decoder)\n\n @classmethod\n def build_encoder(cls, args, src_dict, embed_tokens):\n return TransformerEncoder(args, src_dict, embed_tokens)\n\n @classmethod\n def build_decoder(cls, args, tgt_dict, embed_tokens, src_dict=None):\n return TransformerDecoder(\n args,\n tgt_dict,\n embed_tokens,\n no_encoder_attn=getattr(args, 'no_cross_attention', False),\n src_dict=src_dict,\n )\n\n\n@register_model('transformer_align')\nclass TransformerAlignModel(TransformerModel):\n \"\"\"\n See \"Jointly Learning to Align and Translate with Transformer\n Models\" (Garg et al., EMNLP 2019).\n \"\"\"\n\n def __init__(self, encoder, decoder, args):\n super().__init__(args, encoder, decoder)\n self.alignment_heads = args.alignment_heads\n self.alignment_layer = args.alignment_layer\n self.full_context_alignment = args.full_context_alignment\n\n @staticmethod\n def add_args(parser):\n # fmt: off\n super(TransformerAlignModel, TransformerAlignModel).add_args(parser)\n # parser.add_argument('--alignment-heads', type=int, metavar='D',\n # help='Number of cross attention heads per layer to supervised with alignments')\n # parser.add_argument('--alignment-layer', type=int, metavar='D',\n # help='Layer number which has to be supervised. 0 corresponding to the bottommost layer.')\n parser.add_argument('--full-context-alignment', type=bool, metavar='D',\n help='Whether or not alignment is supervised conditioned on the full target context.')\n \n # fmt: on\n\n @classmethod\n def build_model(cls, args, task):\n # set any default arguments\n transformer_align(args)\n\n transformer_model = TransformerModel.build_model(args, task)\n return TransformerAlignModel(transformer_model.encoder, transformer_model.decoder, args)\n\n def forward(self, src_tokens, src_lengths, prev_output_tokens):\n encoder_out = self.encoder(src_tokens, src_lengths)\n return self.forward_decoder(prev_output_tokens, encoder_out)\n\n def forward_decoder(\n self,\n prev_output_tokens,\n encoder_out=None,\n incremental_state=None,\n features_only=False,\n **extra_args,\n ):\n attn_args = {'alignment_layer': self.alignment_layer, 'alignment_heads': self.alignment_heads}\n decoder_out = self.decoder(\n prev_output_tokens,\n encoder_out,\n **attn_args,\n **extra_args,\n )\n\n if self.full_context_alignment:\n attn_args['full_context_alignment'] = self.full_context_alignment\n _, alignment_out = self.decoder(\n prev_output_tokens, encoder_out, features_only=True, **attn_args, **extra_args,\n )\n decoder_out[1]['attn'] = alignment_out['attn']\n\n return decoder_out\n\nEncoderOut = namedtuple('TransformerEncoderOut', [\n 'encoder_out', # T x B x C\n 'encoder_padding_mask', # B x T\n 'encoder_embedding', # B x T x C\n 'encoder_states', # List[T x B x C]\n 'src_tokens',\n])\n\n\nclass TransformerEncoder(FairseqEncoder):\n \"\"\"\n Transformer encoder consisting of *args.encoder_layers* layers. Each layer\n is a :class:`TransformerEncoderLayer`.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n dictionary (~fairseq.data.Dictionary): encoding dictionary\n embed_tokens (torch.nn.Embedding): input embedding\n \"\"\"\n\n def __init__(self, args, dictionary, embed_tokens):\n super().__init__(dictionary)\n self.register_buffer('version', torch.Tensor([3]))\n\n self.dropout = args.dropout\n self.encoder_layerdrop = args.encoder_layerdrop\n\n embed_dim = embed_tokens.embedding_dim\n self.padding_idx = embed_tokens.padding_idx\n self.max_source_positions = args.max_source_positions\n\n self.embed_tokens = embed_tokens\n\n self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)\n\n self.embed_positions = PositionalEmbedding(\n args.max_source_positions, embed_dim, self.padding_idx,\n learned=args.encoder_learned_pos,\n ) if not args.no_token_positional_embeddings else None\n\n self.layer_wise_attention = getattr(args, 'layer_wise_attention', False)\n\n self.layers = nn.ModuleList([])\n self.layers.extend([\n TransformerEncoderLayer(args)\n for i in range(args.encoder_layers)\n ])\n\n if args.encoder_normalize_before:\n self.layer_norm = LayerNorm(embed_dim)\n else:\n self.layer_norm = None\n if getattr(args, 'layernorm_embedding', False):\n self.layernorm_embedding = LayerNorm(embed_dim)\n else:\n self.layernorm_embedding = None\n\n def forward_embedding(self, src_tokens):\n # embed tokens and positions\n x = embed = self.embed_scale * self.embed_tokens(src_tokens)\n\n if self.embed_positions is not None:\n x = embed + self.embed_positions(src_tokens)\n if self.layernorm_embedding:\n x = self.layernorm_embedding(x)\n x = F.dropout(x, p=self.dropout, training=self.training)\n return x, embed\n\n def forward(self, src_tokens, src_lengths, cls_input=None, return_all_hiddens=False, disable_pos=None, **unused):\n \"\"\"\n Args:\n src_tokens (LongTensor): tokens in the source language of shape\n `(batch, src_len)`\n src_lengths (torch.LongTensor): lengths of each source sentence of\n shape `(batch)`\n return_all_hiddens (bool, optional): also return all of the\n intermediate hidden states (default: False).\n\n Returns:\n namedtuple:\n - **encoder_out** (Tensor): the last encoder layer's output of\n shape `(src_len, batch, embed_dim)`\n - **encoder_padding_mask** (ByteTensor): the positions of\n padding elements of shape `(batch, src_len)`\n - **encoder_embedding** (Tensor): the (scaled) embedding lookup\n of shape `(batch, src_len, embed_dim)`\n - **encoder_states** (List[Tensor]): all intermediate\n hidden states of shape `(src_len, batch, embed_dim)`.\n Only populated if *return_all_hiddens* is True.\n \"\"\"\n if self.layer_wise_attention:\n return_all_hiddens = True\n\n x, encoder_embedding = self.forward_embedding(src_tokens,disable_pos=disable_pos)\n # import pdb;pdb.set_trace()\n\n # B x T x C -> T x B x C\n x = x.transpose(0, 1)\n \n # compute padding mask\n encoder_padding_mask = src_tokens.eq(self.padding_idx)\n if not encoder_padding_mask.any():\n encoder_padding_mask = None\n\n encoder_states = [] if return_all_hiddens else None\n\n # encoder layers\n for layer in self.layers:\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n dropout_probability = random.uniform(0, 1)\n if not self.training or (dropout_probability > self.encoder_layerdrop):\n x = layer(x, encoder_padding_mask)\n if return_all_hiddens:\n encoder_states.append(x)\n\n if self.layer_norm:\n x = self.layer_norm(x)\n if return_all_hiddens:\n encoder_states[-1] = x\n\n return EncoderOut(\n encoder_out=x, # T x B x C\n encoder_padding_mask=encoder_padding_mask, # B x T\n encoder_embedding=encoder_embedding, # B x T x C\n encoder_states=encoder_states, # List[T x B x C]\n src_tokens=src_tokens,\n )\n\n def reorder_encoder_out(self, encoder_out, new_order):\n \"\"\"\n Reorder encoder output according to *new_order*.\n\n Args:\n encoder_out: output from the ``forward()`` method\n new_order (LongTensor): desired order\n\n Returns:\n *encoder_out* rearranged according to *new_order*\n \"\"\"\n if encoder_out.encoder_out is not None:\n encoder_out = encoder_out._replace(\n encoder_out=encoder_out.encoder_out.index_select(1, new_order)\n )\n if encoder_out.encoder_padding_mask is not None:\n encoder_out = encoder_out._replace(\n encoder_padding_mask=encoder_out.encoder_padding_mask.index_select(0, new_order)\n )\n if encoder_out.encoder_embedding is not None:\n encoder_out = encoder_out._replace(\n encoder_embedding=encoder_out.encoder_embedding.index_select(0, new_order)\n ) \n if encoder_out.encoder_states is not None:\n for idx, state in enumerate(encoder_out.encoder_states):\n encoder_out.encoder_states[idx] = state.index_select(1, new_order)\n if encoder_out.src_tokens is not None:\n encoder_out = encoder_out._replace(\n src_tokens = encoder_out.src_tokens.index_select(0, new_order)\n )\n return encoder_out\n\n def max_positions(self):\n \"\"\"Maximum input length supported by the encoder.\"\"\"\n if self.embed_positions is None:\n return self.max_source_positions\n return min(self.max_source_positions, self.embed_positions.max_positions())\n\n def buffered_future_mask(self, tensor):\n dim = tensor.size(0)\n if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device:\n self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1)\n if self._future_mask.size(0) < dim:\n self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1)\n return self._future_mask[:dim, :dim]\n\n def upgrade_state_dict_named(self, state_dict, name):\n \"\"\"Upgrade a (possibly old) state dict for new versions of fairseq.\"\"\"\n if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):\n weights_key = '{}.embed_positions.weights'.format(name)\n if weights_key in state_dict:\n print('deleting {0}'.format(weights_key))\n del state_dict[weights_key]\n state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)\n for i in range(len(self.layers)):\n # update layer norms\n self.layers[i].upgrade_state_dict_named(state_dict, \"{}.layers.{}\".format(name, i))\n\n version_key = '{}.version'.format(name)\n if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:\n # earlier checkpoints did not normalize after the stack of layers\n self.layer_norm = None\n self.normalize = False\n state_dict[version_key] = torch.Tensor([1])\n return state_dict\n\n\nclass TransformerDecoder(FairseqIncrementalDecoder): #\n \"\"\"\n Transformer decoder consisting of *args.decoder_layers* layers. Each layer\n is a :class:`TransformerDecoderLayer`.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n dictionary (~fairseq.data.Dictionary): decoding dictionary\n embed_tokens (torch.nn.Embedding): output embedding\n no_encoder_attn (bool, optional): whether to attend to encoder outputs\n (default: False).\n \"\"\"\n\n def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False, src_dict=None):\n super().__init__(dictionary)\n self.register_buffer('version', torch.Tensor([3]))\n\n self.dropout = args.dropout\n self.decoder_layerdrop = args.decoder_layerdrop\n self.share_input_output_embed = args.share_decoder_input_output_embed\n\n input_embed_dim = embed_tokens.embedding_dim\n embed_dim = args.decoder_embed_dim\n self.output_embed_dim = args.decoder_output_dim\n self.src_dict = src_dict\n\n self.alignment_layer = getattr(args, 'alignment_layer', 2)\n if hasattr(args, \"alignment_task\") and args.alignment_task == 'supalign':\n self.add_sup_align_module = True\n else:\n self.add_sup_align_module = False\n \n self.padding_idx = embed_tokens.padding_idx\n self.max_target_positions = args.max_target_positions\n self.embed_tokens = embed_tokens\n self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)\n self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None\n self.embed_positions = PositionalEmbedding(\n args.max_target_positions, embed_dim, self.padding_idx,\n learned=args.decoder_learned_pos,\n ) if not args.no_token_positional_embeddings else None\n\n self.cross_self_attention = getattr(args, 'cross_self_attention', False)\n self.layer_wise_attention = getattr(args, 'layer_wise_attention', False)\n\n self.layers = nn.ModuleList([])\n self.layers.extend([\n TransformerDecoderLayer(args, no_encoder_attn, add_suphead=(self.add_sup_align_module and idx==self.alignment_layer))\n for idx in range(args.decoder_layers)\n ])\n\n self.adaptive_softmax = None\n\n self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \\\n if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None\n\n if args.adaptive_softmax_cutoff is not None:\n self.adaptive_softmax = AdaptiveSoftmax(\n len(dictionary),\n self.output_embed_dim,\n options.eval_str_list(args.adaptive_softmax_cutoff, type=int),\n dropout=args.adaptive_softmax_dropout,\n adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None,\n factor=args.adaptive_softmax_factor,\n tie_proj=args.tie_adaptive_proj,\n )\n elif not self.share_input_output_embed:\n self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.output_embed_dim))\n nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5)\n\n if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False):\n self.layer_norm = LayerNorm(embed_dim)\n else:\n self.layer_norm = None\n\n if getattr(args, 'layernorm_embedding', False):\n self.layernorm_embedding = LayerNorm(embed_dim)\n else:\n self.layernorm_embedding = None\n \n def forward(\n self,\n prev_output_tokens,\n encoder_out=None,\n incremental_state=None,\n features_only=False,\n reorder_state=None,\n **extra_args\n ):\n \"\"\"\n Args:\n prev_output_tokens (LongTensor): previous decoder outputs of shape\n `(batch, tgt_len)`, for teacher forcing\n encoder_out (optional): output from the encoder, used for\n encoder-side attention\n incremental_state (dict): dictionary used for storing state during\n :ref:`Incremental decoding`\n features_only (bool, optional): only return features without\n applying output layer (default: False).\n\n Returns:\n tuple:\n - the decoder's output of shape `(batch, tgt_len, vocab)`\n - a dictionary with any model-specific outputs\n \"\"\"\n x, extra = self.extract_features(\n prev_output_tokens,\n encoder_out=encoder_out,\n incremental_state=incremental_state,\n **extra_args\n )\n\n if not features_only:\n x = self.output_layer(x)\n \n return x, extra\n\n def get_normalized_probs(self, net_output, log_probs, sample):\n \"\"\"Get normalized probabilities (or log probs) from a net's output.\"\"\"\n logits = net_output[0].float() \n if log_probs:\n return F.log_softmax(logits, dim=-1)\n else:\n return F.softmax(logits, dim=-1)\n\n\n def extract_features(\n self,\n prev_output_tokens,\n encoder_out=None,\n incremental_state=None,\n full_context_alignment=False,\n alignment_layer=None,\n alignment_heads=None,\n **unused,\n ):\n \"\"\"\n Similar to *forward* but only return features.\n\n Includes several features from \"Jointly Learning to Align and\n Translate with Transformer Models\" (Garg et al., EMNLP 2019).\n\n Args:\n full_context_alignment (bool, optional): don't apply\n auto-regressive mask to self-attention (default: False).\n alignment_layer (int, optional): return mean alignment over\n heads at this layer (default: last layer).\n alignment_heads (int, optional): only average alignment over\n this many heads (default: all heads).\n\n Returns:\n tuple:\n - the decoder's features of shape `(batch, tgt_len, embed_dim)`\n - a dictionary with any model-specific outputs\n \"\"\"\n if alignment_layer is None:\n alignment_layer = self.alignment_layer\n # embed positions\n positions = self.embed_positions(\n prev_output_tokens,\n incremental_state=incremental_state,\n ) if self.embed_positions is not None else None\n\n if incremental_state is not None:\n prev_output_tokens = prev_output_tokens[:, -1:]\n if positions is not None:\n positions = positions[:, -1:]\n\n # embed tokens and positions\n x = self.embed_scale * self.embed_tokens(prev_output_tokens)\n\n if self.project_in_dim is not None:\n x = self.project_in_dim(x)\n\n if positions is not None:\n x += positions\n\n if self.layernorm_embedding:\n x = self.layernorm_embedding(x)\n\n x = F.dropout(x, p=self.dropout, training=self.training)\n\n # B x T x C -> T x B x C\n x = x.transpose(0, 1)\n\n self_attn_padding_mask = None\n if self.cross_self_attention or prev_output_tokens.eq(self.padding_idx).any():\n self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)\n\n # decoder layers\n attn = None\n inner_states = [x]\n final_suphead_attn, dec_self_attn = None, None\n need_dec_self_attn = False\n \n for idx, layer in enumerate(self.layers):\n encoder_state = None\n if encoder_out is not None:\n if self.layer_wise_attention:\n encoder_state = encoder_out.encoder_states[idx]\n else:\n encoder_state = encoder_out.encoder_out\n\n if incremental_state is None and not full_context_alignment:\n self_attn_mask = self.buffered_future_mask(x) \n else:\n self_attn_mask = None\n\n dropout_probability = random.uniform(0, 1)\n if not self.training or (dropout_probability > self.decoder_layerdrop): \n x, layer_attn, dec_attn, suphead_attn = layer(\n x,\n encoder_state,\n encoder_out.encoder_padding_mask if encoder_out is not None else None,\n incremental_state,\n self_attn_mask=self_attn_mask,\n self_attn_padding_mask=self_attn_padding_mask,\n need_attn=(idx == alignment_layer),\n need_head_weights=(idx == alignment_layer),\n need_self_attn= ((idx == alignment_layer) and need_dec_self_attn), \n )\n inner_states.append(x)\n if layer_attn is not None and idx == alignment_layer:\n attn = layer_attn.float()\n if dec_attn is not None and idx == alignment_layer:\n dec_self_attn = dec_attn.float()\n if suphead_attn is not None:\n final_suphead_attn = suphead_attn.float()\n if attn is not None:\n if alignment_heads is not None:\n attn = attn[:alignment_heads] \n # average probabilities over heads\n attn = attn.mean(dim=0)\n\n if self.layer_norm:\n x = self.layer_norm(x)\n\n # T x B x C -> B x T x C\n x = x.transpose(0, 1)\n\n if self.project_out_dim is not None:\n x = self.project_out_dim(x)\n \n return x, {'attn': attn, 'inner_states': inner_states, 'dec_self_attn': dec_self_attn, 'suphead_attn': final_suphead_attn}\n\n\n def output_layer(self, features, **kwargs):\n \"\"\"Project features to the vocabulary size.\"\"\"\n if self.adaptive_softmax is None:\n # project back to size of vocabulary\n if self.share_input_output_embed:\n return F.linear(features, self.embed_tokens.weight)\n else:\n return F.linear(features, self.embed_out)\n else:\n return features\n\n def max_positions(self):\n \"\"\"Maximum output length supported by the decoder.\"\"\"\n if self.embed_positions is None:\n return self.max_target_positions\n return min(self.max_target_positions, self.embed_positions.max_positions())\n\n def buffered_future_mask(self, tensor):\n dim = tensor.size(0)\n if (\n not hasattr(self, '_future_mask')\n or self._future_mask is None\n or self._future_mask.device != tensor.device\n or self._future_mask.size(0) < dim\n ):\n self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1)\n return self._future_mask[:dim, :dim]\n\n def upgrade_state_dict_named(self, state_dict, name):\n \"\"\"Upgrade a (possibly old) state dict for new versions of fairseq.\"\"\"\n if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):\n weights_key = '{}.embed_positions.weights'.format(name)\n if weights_key in state_dict:\n del state_dict[weights_key]\n state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)\n\n for i in range(len(self.layers)):\n # update layer norms\n layer_norm_map = {\n '0': 'self_attn_layer_norm',\n '1': 'encoder_attn_layer_norm',\n '2': 'final_layer_norm'\n }\n for old, new in layer_norm_map.items():\n for m in ('weight', 'bias'):\n k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m)\n if k in state_dict:\n state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k]\n del state_dict[k]\n\n version_key = '{}.version'.format(name)\n if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2:\n # earlier checkpoints did not normalize after the stack of layers\n self.layer_norm = None\n self.normalize = False\n state_dict[version_key] = torch.Tensor([1])\n\n return state_dict\n\n\ndef Embedding(num_embeddings, embedding_dim, padding_idx):\n m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)\n nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)\n nn.init.constant_(m.weight[padding_idx], 0)\n return m\n\n\ndef Linear(in_features, out_features, bias=True):\n m = nn.Linear(in_features, out_features, bias)\n nn.init.xavier_uniform_(m.weight)\n if bias:\n nn.init.constant_(m.bias, 0.)\n return m\n\n\n@register_model_architecture('transformer', 'transformer')\ndef base_architecture(args):\n args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)\n args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048)\n args.encoder_layers = getattr(args, 'encoder_layers', 6)\n args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)\n args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)\n args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False)\n args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim)\n args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim)\n args.decoder_layers = getattr(args, 'decoder_layers', 6)\n args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)\n args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False)\n args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)\n args.attention_dropout = getattr(args, 'attention_dropout', 0.)\n args.activation_dropout = getattr(args, 'activation_dropout', 0.)\n args.activation_fn = getattr(args, 'activation_fn', 'relu')\n args.dropout = getattr(args, 'dropout', 0.1)\n args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)\n args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)\n args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)\n args.share_all_embeddings = getattr(args, 'share_all_embeddings', False)\n args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)\n args.adaptive_input = getattr(args, 'adaptive_input', False)\n args.no_cross_attention = getattr(args, 'no_cross_attention', False)\n args.cross_self_attention = getattr(args, 'cross_self_attention', False)\n args.layer_wise_attention = getattr(args, 'layer_wise_attention', False)\n\n args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)\n args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)\n\n args.no_scale_embedding = getattr(args, 'no_scale_embedding', False)\n args.layernorm_embedding = getattr(args, 'layernorm_embedding', False)\n\n\n@register_model_architecture('transformer', 'transformer_iwslt_de_en')\ndef transformer_iwslt_de_en(args):\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)\n args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024)\n args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)\n args.encoder_layers = getattr(args, 'encoder_layers', 6)\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)\n args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024)\n args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)\n args.decoder_layers = getattr(args, 'decoder_layers', 6)\n base_architecture(args)\n\n\n@register_model_architecture('transformer', 'transformer_wmt_en_de')\ndef transformer_wmt_en_de(args):\n base_architecture(args)\n\n\n# parameters used in the \"Attention Is All You Need\" paper (Vaswani et al., 2017)\n@register_model_architecture('transformer', 'transformer_vaswani_wmt_en_de_big')\ndef transformer_vaswani_wmt_en_de_big(args):\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)\n args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096)\n args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16)\n args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024)\n args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096)\n args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)\n args.dropout = getattr(args, 'dropout', 0.3)\n base_architecture(args)\n\n\n@register_model_architecture('transformer', 'transformer_vaswani_wmt_en_fr_big')\ndef transformer_vaswani_wmt_en_fr_big(args):\n args.dropout = getattr(args, 'dropout', 0.1)\n transformer_vaswani_wmt_en_de_big(args)\n\n\n@register_model_architecture('transformer', 'transformer_wmt_en_de_big')\ndef transformer_wmt_en_de_big(args):\n args.attention_dropout = getattr(args, 'attention_dropout', 0.1)\n transformer_vaswani_wmt_en_de_big(args)\n\n\n# default parameters used in tensor2tensor implementation\n@register_model_architecture('transformer', 'transformer_wmt_en_de_big_t2t')\ndef transformer_wmt_en_de_big_t2t(args):\n args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', True)\n args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', True)\n args.attention_dropout = getattr(args, 'attention_dropout', 0.1)\n args.activation_dropout = getattr(args, 'activation_dropout', 0.1)\n transformer_vaswani_wmt_en_de_big(args)\n\n\n@register_model_architecture('transformer_align', 'transformer_align')\ndef transformer_align(args):\n args.alignment_heads = getattr(args, 'alignment_heads', 1)\n args.alignment_layer = getattr(args, 'alignment_layer', 4)\n args.full_context_alignment = getattr(args, 'full_context_alignment', False)\n base_architecture(args)\n\n@register_model_architecture('transformer_align', 'transformer_align_small')\ndef transformer_align_small(args):\n args.alignment_heads = getattr(args, 'alignment_heads', 1)\n args.alignment_layer = getattr(args, 'alignment_layer', 2)\n args.full_context_alignment = getattr(args, 'full_context_alignment', False)\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)\n args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024)\n args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)\n args.encoder_layers = getattr(args, 'encoder_layers', 6)\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)\n args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024)\n args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)\n args.decoder_layers = getattr(args, 'decoder_layers', 6)\n base_architecture(args)\n\n\n@register_model_architecture('transformer_align', 'transformer_wmt_en_de_big_align')\ndef transformer_wmt_en_de_big_align(args):\n args.alignment_heads = getattr(args, 'alignment_heads', 1)\n args.alignment_layer = getattr(args, 'alignment_layer', 4)\n transformer_wmt_en_de_big(args)\n" ]
[ [ "torch.nn.functional.softmax", "torch.Tensor", "torch.nn.functional.dropout", "torch.nn.init.constant_", "torch.nn.functional.log_softmax", "torch.nn.ModuleList", "torch.nn.Embedding", "torch.nn.Linear", "torch.nn.init.normal_", "torch.FloatTensor", "torch.nn.init.xavier_uniform_", "torch.nn.functional.linear" ] ]
zweifel/SyncMap
[ "7ad5a08e15cc49807a2c965a3b70e795948b256f" ]
[ "FixedChunkTest.py" ]
[ "from keras.utils import np_utils\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\n\n\nclass FixedChunkTest:\n\t\n\tdef __init__(self, time_delay, filename=\"fixed_chunk2.txt\"):\n\t\t'''\n\t\tChunks are written in the filename in which every line is a sequence of outputs followed by the number of the respective chunk\n\t\tAll chunk numbers must be in ascending order and must have the same number of outputs\n\t\tChunks will be shuffled and presented repeatedly throughout\n\t\t'''\n\t\tdataset= np.loadtxt(filename, dtype=\"i\", delimiter=\",\")\n\t\tself.time_delay = time_delay\n\t\tself.time_counter = 0\n\t\tself.current_index= 0\n\n\t\tself.output_size= dataset.shape[1]-1\n\t\tself.data = dataset[:,:self.output_size]\n\t\tself.data_class= dataset[:,self.output_size]\n\n\t\tacc = np.zeros(len(self.data_class), dtype=int)\n\t\tfor i,sample in enumerate(self.data):\n\t\t\t#print(sample)\n\t\t\t#print(self.data_class)\n\t\t\ttmp= sample*self.data_class\n\t\t\tacc[i]= int(tmp.sum())\n\t\t\n\t\tacc-= 1\n\t\tself.true_labels= acc\n\n\t\tself.chunk= []\n\t\tnew_chunk= None\n\t\tnew_chunk_index= None\n\t\tfor i,sample in enumerate(self.data):\n\t\t\tif new_chunk is None:\n\t\t\t\tnew_chunk_index= self.data_class[i]\n\t\t\t\tnew_chunk= [sample]\n\t\t\telse:\n\t\t\t\tif new_chunk_index == self.data_class[i]:\n\t\t\t\t\tnew_chunk.append(sample)\n\t\t\t\telse:\n\t\t\t\t\tself.chunk.append(np.asarray(new_chunk))\n\t\t\t\t\tnew_chunk= [sample]\n\t\t\t\t\tnew_chunk_index= self.data_class[i]\n\n\t\tself.chunk.append(np.asarray(new_chunk))\n\n\t\tself.chunk= np.asarray(self.chunk)\n\t\tself.number_of_chunks= self.chunk.shape[0]\n\t\tself.chunk_index= np.random.randint(self.number_of_chunks)\n\t\t\n\t\t#print(self.chunk)\n#\t\tprint(self.chunk.shape)\n#\t\tfor i in range(10):\n#\t\t\trand= np.random.randint(self.number_of_chunks)\n#\t\t\tprint(self.chunk[rand])\n\n#\t\texit()\n\n\n\n#\t\tself.chunk= 0\n#\t\tself.output_size = output_size\n#\t\tself.counter = -1\n#\t\tself.output_class= data_class[current_index]\n\t\tself.previous_output_class= None\n\t\tself.previous_previous_output_class= None\n\t\t\t\n\t\t#print(self.data_class.shape[0])\n\t\t#exit()\n\n#\t\tself.sequenceA_length = 4\n#\t\tself.sequenceB_length = 4 #np.random.randint(2)+5\n\t\n\tdef getOutputSize(self):\n\t\treturn self.output_size\n\t\n\tdef trueLabel(self):\n\t\treturn self.true_labels\n\n\tdef updateTimeDelay(self):\n\t\tself.time_counter+= 1\n\t\tif self.time_counter > self.time_delay:\n\t\t\tself.time_counter = 0 \n\t\t\tself.previous_previous_output_class= self.previous_output_class\n\t\t\tself.previous_output_class= self.output_class\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\t#create an input pattern for the system\n\tdef getInput(self, reset = False):\n\t\t\n\t\tif reset == True:\n\t\t\tself.current_index=0\n\t\t\tself.time_counter=0\n\n\t\tupdate = self.updateTimeDelay()\n\t\t\n\t\t#print(self.chunk[self.chunk_index].shape)\n\t\t#exit()\n\n\t\tif update == True:\n\t\t\t\n\t\t\tself.current_index+= 1\n\n\t\t\t#check if a new chunk should start\n\t\t\tif self.current_index >= self.chunk[self.chunk_index].shape[0]:\n\t\t\t\tself.chunk_index= np.random.randint(self.number_of_chunks)\n\t\t\t\tself.current_index= 0\n\t\t\t\n\t\t\n\t\t\t\t\t\n\t\t#chunk is the cluster it pertains\n\t\t#output class is the current output\n\t\t#self.chunk_index= \n\t\t#print(\"chunk\",self.chunk)\n\t\tself.output_class = self.chunk[self.chunk_index][self.current_index]\n\t\t\n\t\tnoise_intensity= 0\n\t\tif self.previous_output_class is None or np.array_equal(self.previous_output_class, self.output_class):\n\t\t\tinput_value = self.output_class*np.exp(-0.1*self.time_counter) + np.random.randn(self.output_size)*noise_intensity\n\t\telse:\n\t\t\tinput_value = self.output_class*np.exp(-0.1*self.time_counter) + np.random.randn(self.output_size)*noise_intensity + self.previous_output_class*np.exp(-0.1*(self.time_counter+self.time_delay))\n\n\n\n\t\treturn input_value\n\n\tdef getSequence(self, sequence_size):\n\t\n\t\t#print(self.data.shape[0])\n\t\t#print(input_sequence.shape)\n\t\t#exit()\n\t\tself.input_sequence = np.empty((sequence_size, self.data.shape[1]))\n\t\tself.input_class = np.empty(sequence_size)\n\t\t\n\t\tfor i in range(sequence_size):\n\t\t\t\n\t\t\tinput_value = self.getInput()\n\t\t\t\n\t\t\t#input_class.append(self.chunk)\n\t\t\t#input_sequence.append(input_value)\n\t\t\tself.input_class[i] = self.chunk_index\n\t\t\tself.input_sequence[i] = input_value\n\n\t\treturn self.input_sequence, self.input_class\n\n\t\n\tdef plot(self, input_class, input_sequence = None, save = False):\n\t\t\n\t\ta = np.asarray(input_class)\n\t\tt = [i for i,value in enumerate(a)]\n\n\t\tplt.plot(t, a)\n\t\t\n\t\tif input_sequence != None:\n\t\t\tsequence = [np.argmax(x) for x in input_sequence]\n\t\t\tplt.plot(t, sequence)\n\n\t\tif save == True:\n\t\t\tplt.savefig(\"plot.png\")\n\t\t\n\t\tplt.show()\n\t\tplt.close()\n\t\n\tdef plotSuperposed(self, input_class, input_sequence = None, save = False):\n\t\n\t\tinput_sequence= np.asarray(input_sequence)\n\t\t\n\t\tt = [i for i,value in enumerate(input_sequence)]\n\n\t\t#exit()\n\n\t\tfor i in range(input_sequence.shape[1]):\n\t\t\ta = input_sequence[:,i]\n\t\t\tplt.plot(t, a)\n\t\t\n\t\ta = np.asarray(input_class)\n\t\tplt.plot(t, a)\n\n\t\tif save == True:\n\t\t\tplt.savefig(\"plot.png\")\n\t\t\n\t\tplt.show()\n\t\tplt.close()\n\n" ]
[ [ "numpy.array_equal", "numpy.asarray", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "numpy.loadtxt", "numpy.argmax", "numpy.random.randn", "matplotlib.pyplot.close", "numpy.exp", "matplotlib.pyplot.show", "numpy.empty", "numpy.random.randint" ] ]
atulyakumar97/gender-classifier-using-voice
[ "58a5aa035d42f0e295b0210e0a5fd5336ced3a6f" ]
[ "ML_testing/knn.py" ]
[ "#k nearest neighbours\n\nimport pandas as pd\n\n#Importing the dataset\ndataset = pd.read_csv('C:\\\\Users\\\\Atulya\\\\Documents\\\\GitHub\\\\gender-classifier-using-voice\\\\Data Preprocessing\\\\feature extraction\\\\features.csv')\nX = dataset.iloc[:,1:-1].values\ny = dataset.iloc[:,-1:].values\n\n#Taking care of missing data\nfrom sklearn.impute import SimpleImputer\nimputer = SimpleImputer(missing_values=0, strategy='mean')\nimputer = imputer.fit(X[:,:])\nX[:,:] = imputer.transform(X[:,:])\n\n# Encoding categorical data\nfrom sklearn.preprocessing import LabelEncoder\nlabelencoder_y = LabelEncoder()\ny = labelencoder_y.fit_transform(y.ravel())\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.02, random_state=None)\n\n# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc_X = StandardScaler()\nX_train = sc_X.fit_transform(X_train)\nX_test = sc_X.transform(X_test)\n\n# Fitting classifier to the Training set\nfrom sklearn.neighbors import KNeighborsClassifier\nclassifier = KNeighborsClassifier(n_neighbors=5, weights='distance',metric='minkowski', p=2, algorithm='auto', n_jobs=-1)\nclassifier.fit(X_train, y_train)\n\n# Predicting the Test set results\ny_pred = classifier.predict(X_test)\n\n# Making the confusion Matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\nprint(cm)\n\n# And find the final test error\ncorrect_pred=sum(y_pred == y_test)\nprint(correct_pred)\nprint('accuracy = ', correct_pred*100/(y_pred.shape[0]))" ]
[ [ "pandas.read_csv", "sklearn.impute.SimpleImputer", "sklearn.model_selection.train_test_split", "sklearn.metrics.confusion_matrix", "sklearn.neighbors.KNeighborsClassifier", "sklearn.preprocessing.StandardScaler", "sklearn.preprocessing.LabelEncoder" ] ]
KennCoder7/DNN-numpy
[ "4b31624d442acc7854734c3d51e6ddcd66271f82" ]
[ "model/ActivationBlock.py" ]
[ "\"\"\"\r\nauthor: Kun Wang (Kenn)\r\ne-mail: iskenn7@gmail.com\r\n\"\"\"\r\nimport numpy as np\r\nfrom utils import *\r\n\r\n\r\nclass Activation(object):\r\n def __init__(self, name, method):\r\n self.name = name\r\n self.__method = method\r\n self.__input_dim = None\r\n\r\n def initial(self, input_dim):\r\n self.__input_dim = input_dim\r\n return self.name, self.__input_dim\r\n\r\n def forward(self, _x_set):\r\n _a_set = activation_function(self.__method, _x_set)\r\n return _a_set\r\n\r\n def backward(self, _e_set, _z_down_set):\r\n _e_down_set = derivative_function(self.__method, _z_down_set) * _e_set\r\n return _e_down_set\r\n\r\n\r\nif __name__ == '__main__':\r\n x_set = np.random.randn(3, 5)\r\n print(sigmoid(x_set).shape)\r\n print(d_sigmoid(x_set).shape)\r\n print(relu(x_set).shape)\r\n print(d_relu(x_set).shape)\r\n print(softmax(x_set).shape)\r\n\r\n # for i in range(3):\r\n # print(np.sum(softmax(x_set)[i]))\r\n" ]
[ [ "numpy.random.randn" ] ]
carnivorouspeanut/isovar_comp
[ "74fcc12ef52d08eb4cfa85bdcda8903970babbda" ]
[ "isovar/dataframe_builder.py" ]
[ "# Copyright (c) 2016. Mount Sinai School of Medicine\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function, division, absolute_import\nfrom collections import OrderedDict\nfrom six import integer_types, text_type, binary_type\n\nfrom varcode import Variant\nimport pandas as pd\n\nVALID_ELEMENT_TYPES = integer_types + (text_type, binary_type, float, bool)\n\n# values of these types are automatically converted to their size or length\n# unless some other conversion function is provided\nCOLLECTION_TYPES = (tuple, list, set, frozenset)\n\n\nclass DataFrameBuilder(object):\n \"\"\"\n Helper class for constructing a DataFrame which always has fields\n of a variant (chr/pos/ref/alt) as well as some subset of the fields\n from a namedtuple or ValueObject.\n \"\"\"\n def __init__(\n self,\n element_class,\n field_names=None,\n exclude=set([]),\n converters={},\n rename_dict={},\n extra_column_fns={},\n variant_columns=True,\n convert_collections_to_size=True):\n \"\"\"\n Parameters\n ----------\n element_class : type\n Class of elements in this collection.\n\n field_names : list, optional\n If not given then we expect element_class to have a class member\n named '_fields' which is a list of field names.\n\n exclude : set\n Field names from element_class which should be used as columns for\n the DataFrame we're building\n\n converters : dict\n Dictionary of names mapping to functions. These functions will be\n applied to each element of a column before it's added to the\n DataFrame.\n\n rename_dict : dict\n Dictionary mapping element_class field names to desired column names\n in the produced DataFrame.\n\n extra_column_fns : dict\n Dictionary mapping column names to functions which take a variant\n and element (such as an AlleleRead instance) and return a single\n value for each row.\n\n variant_columns : bool\n If True, then add four columns for fields of a Variant: chr/pos/ref/alt\n along with a \"gene\" column indicating which gene name(s) the variant\n overlaps.\n\n convert_collections_to_size : bool\n If a value is a built-in collection (list, tuple, or set) then\n transform it to the size of that collection. If this option is False\n then collection values cause a runtime error.\n \"\"\"\n self.element_class = element_class\n self.rename_dict = rename_dict\n self.converters = converters\n self.variant_columns = variant_columns\n self.convert_collections_to_size = convert_collections_to_size\n\n if field_names is None:\n assert hasattr(element_class, \"_fields\"), (\n \"Expected %s to have member called `_fields`\" % element_class)\n field_names = element_class._fields\n\n # remove specified field names without changing the order of the others\n self.original_field_names = [\n x\n for x in field_names\n if x not in exclude\n ]\n\n for name in converters:\n if name not in self.original_field_names:\n raise ValueError(\"No field named '%s', valid names: %s\" % (\n name,\n self.original_field_names))\n\n self.renamed_field_names = [\n self.rename_dict.get(x, x)\n for x in self.original_field_names\n ]\n if self.variant_columns:\n columns_list = [\n # fields related to variant\n (\"chr\", []),\n (\"pos\", []),\n (\"ref\", []),\n (\"alt\", []),\n ]\n else:\n columns_list = []\n\n for name in self.renamed_field_names:\n columns_list.append((name, []))\n\n self.extra_column_fns = extra_column_fns\n for column_name in self.extra_column_fns:\n columns_list.append((column_name, []))\n\n self.columns_dict = OrderedDict(columns_list)\n\n def add(self, variant, element):\n if self.variant_columns:\n assert isinstance(variant, Variant)\n self.columns_dict[\"chr\"].append(variant.contig)\n self.columns_dict[\"pos\"].append(variant.original_start)\n self.columns_dict[\"ref\"].append(variant.original_ref)\n self.columns_dict[\"alt\"].append(variant.original_alt)\n else:\n assert variant is None\n\n assert isinstance(element, self.element_class)\n\n for name in self.original_field_names:\n value = getattr(element, name)\n\n if name in self.converters:\n fn = self.converters[name]\n value = fn(value)\n\n if isinstance(value, COLLECTION_TYPES) and self.convert_collections_to_size:\n value = len(value)\n elif not isinstance(value, VALID_ELEMENT_TYPES):\n raise ValueError(\n \"Please provider converter for field '%s' : %s to make a scalar or string\" % (\n name,\n type(value)))\n\n if name in self.rename_dict:\n name = self.rename_dict[name]\n self.columns_dict[name].append(value)\n\n for column_name, fn in self.extra_column_fns.items():\n self.columns_dict[column_name].append(fn(variant, element))\n\n def add_many(self, variant, elements):\n for element in elements:\n self.add(variant, element)\n\n def _check_column_lengths(self):\n \"\"\"\n Make sure columns are of the same length or else DataFrame construction\n will fail.\n \"\"\"\n column_lengths_dict = {\n name: len(xs)\n for (name, xs)\n in self.columns_dict.items()\n }\n unique_column_lengths = set(column_lengths_dict.values())\n if len(unique_column_lengths) != 1:\n raise ValueError(\n \"Mismatch between lengths of columns: %s\" % (column_lengths_dict,))\n\n def to_dataframe(self):\n self._check_column_lengths()\n return pd.DataFrame(self.columns_dict)\n\n\ndef dataframe_from_generator(\n element_class,\n variant_and_elements_generator,\n **kwargs):\n builder = DataFrameBuilder(element_class, **kwargs)\n for variant, elements in variant_and_elements_generator:\n builder.add_many(variant, elements)\n return builder.to_dataframe()\n" ]
[ [ "pandas.DataFrame" ] ]
biorover/MAGOT2
[ "d14a52626d9e036b55d8a7283fa983a3e38970f4" ]
[ "MAGOT2/lib.py" ]
[ "#!/usr/bin/env python\r\nimport sys, pysam, os, re, glob\r\nimport numpy as np\r\nfrom intervaltree import IntervalTree, Interval\r\nfrom collections import OrderedDict\r\nfrom typing import Union\r\nfrom pathlib import Path\r\nimport pandas as pd\r\n\r\ndef mode(a,prec = 2) -> float:\r\n vals,counts = np.unique(np.around(a,prec),return_counts=True)\r\n return vals[np.argmax(counts)]\r\n\r\ndef translate(x: str,padXs: bool = False,frames: Union[list,int] = 1) -> Union[list,str]:\r\n \"\"\"\r\n Translates DNA/RNA sequence into amino acid characters\r\n\r\n :params x: string dna sequence\r\n \"\"\"\r\n ttabs = []\r\n codons = {'TTT':'F','TTC':'F','TTA':'L','TTG':'L','CTT':'L','CTC':'L','CTA':'L','CTG':'L',\r\n 'ATT':'I','ATC':'I','ATA':'I','ATG':'M','GTT':'V','GTC':'V','GTA':'V','GTG':'V',\r\n 'TCT':'S','TCC':'S','TCA':'S','TCG':'S','CCT':'P','CCC':'P','CCA':'P','CCG':'P',\r\n 'ACT':'T','ACC':'T','ACA':'T','ACG':'T','GCT':'A','GCC':'A','GCA':'A','GCG':'A',\r\n 'TAT':'Y','TAC':'Y','TAA':'*','TAG':'*','CAT':'H','CAC':'H','CAA':'Q','CAG':'Q',\r\n 'AAT':'N','AAC':'N','AAA':'K','AAG':'K','GAT':'D','GAC':'D','GAA':'E','GAG':'E',\r\n 'TGT':'C','TGC':'C','TGA':'*','TGG':'W','CGT':'R','CGC':'R','CGA':'R','CGG':'R',\r\n 'AGT':'S','AGC':'S','AGA':'R','AGG':'R','GGT':'G','GGC':'G','GGA':'G','GGG':'G'}\r\n if 'u' in x or 'U' in x:\r\n x = x.replace('U','T').replace('u','t')\r\n rstring = False\r\n if type(frames) == int:\r\n frames = [frames]\r\n rstring = True\r\n elif frames in ['all']:\r\n frames = [1,2,3,-1,-2,-3]\r\n for frame in frames:\r\n ttab = []\r\n if frame >= 0:\r\n seq = x\r\n start = frame - 1\r\n else:\r\n seq = revcomp(x)\r\n start = -1 * frame - 1\r\n for i in range(start,len(seq),3):\r\n codon = x[i:i+3]\r\n if len(codon) == 3 or padXs:\r\n if codon.upper() in codons:\r\n ttab.append(codons[codon.upper()])\r\n else:\r\n ttab.append('X')\r\n ttabs.append(\"\".join(ttab))\r\n if rstring:\r\n return ttabs[0]\r\n else:\r\n return ttabs\r\n\r\ndef revcomp(x: str) -> str:\r\n comp = str.maketrans('ATCGMKRYVBHDatcgmkryvbhd','TAGCKMYRBVDHtagckmyrbvdh')\r\n seq = x[::-1].translate(comp)\r\n return seq\r\n\r\ndef orfs(x: str,best: bool = False) -> Union[list,str]: #todo: #1 add info table\r\n orfs = []\r\n for frames in translate(x,frames = 'all'):\r\n orfs += frames.split('*')\r\n if best:\r\n lens = [len(k) for k in orfs]\r\n return orfs[np.argmax(lens)]\r\n else:\r\n return orfs\r\n\r\ndef read_gff(gfffile: Path,version: Union[str,int] = 'auto') -> OrderedDict:\r\n \"\"\"\r\n Reads a reasonably sane gff3 or gtf file and fills and returns a nested dictionary containing annotation information\r\n (format feature_type_interval_tree -> dictionary_of_feature_types -> dictionary_of_transcripts -> dictionary_of_genes)\r\n attributes of features are stored as a dictionary within each interval\r\n\r\n :param gfffile: Path. Path to gff3 or gtf\r\n :param version: str or int. Valid options are 2, 3, or \"auto\" (in which case version will be determined from first line)\r\n \"\"\"\r\n annotdict = OrderedDict()\r\n t2g = {}\r\n with open(gfffile) as gff:\r\n for i,line in enumerate(gff):\r\n fields = line.strip().split('\\t')\r\n if version == 'auto' and i==0:\r\n if \"#gff-version\" in line:\r\n version = int(line.split()[1])\r\n elif \"ID=\" in line:\r\n version = 3\r\n elif \"gene_id \" in line:\r\n version = 2\r\n else:\r\n sys.stderr.write('Cannot determine gff format version of file from first line\\n')\r\n return None\r\n if line[0] != \"#\":\r\n feature = fields[2]\r\n if version == 3:\r\n attrs = {k:v for (k,v) in [i.split('=') for i in fields[8].split(';')] }\r\n elif version == 2:\r\n attrs = {k:v.replace('\"','') for (k,v) in [(i.split(' ')[0],' '.join(i.split(' ')[1:])) for i in fields[8].replace('; ',';').split(';')] }\r\n if version == 3:\r\n if feature == 'gene':\r\n gene_id = attrs['ID']\r\n transcript_id = 'none' # this is so I can access the coords of the whole gene under annotdict[gene_id][\"none\"]['gene']\r\n elif feature in ['mRNA','transcript']:\r\n transcript_id = attrs['ID']\r\n gene_id = attrs['Parent']\r\n t2g[transcript_id] = gene_id\r\n else:\r\n transcript_id = attrs['Parent']\r\n gene_id = t2g[transcript_id]\r\n elif version == 2:\r\n gene_id = attrs['gene_id']\r\n transcript_id = attrs['transcript_id']\r\n if not gene_id in annotdict:\r\n annotdict[gene_id] = OrderedDict()\r\n if not transcript_id in annotdict[gene_id]:\r\n annotdict[gene_id][transcript_id] = dict()\r\n if not feature in annotdict[gene_id][transcript_id]:\r\n annotdict[gene_id][transcript_id][feature] = IntervalTree()\r\n start = int(fields[3]) - 1\r\n stop = int(fields[4])\r\n attrs['seqid'] = fields[0]\r\n attrs['source'] = fields[1]\r\n attrs['score'] = fields[5]\r\n attrs['strand'] = fields[6]\r\n attrs['phase'] = fields[7]\r\n annotdict[gene_id][transcript_id][feature][start:stop] = attrs\r\n return annotdict\r\n\r\ndef annot2seqs(annotdict: dict, fasta_file: Path, which_transcript: str = 'all', \r\n seq_from: str = 'CDS', seq_level: str = 'transcript', seq_type: str = \"nucl\") -> OrderedDict:\r\n \"\"\"\r\n takes an annotdict (nested objects coordinate_IntervalTree -> feature_dictionary -> \\\r\ntranscript_dictionary -> gene_dictionary) ant returns a dictionary of all sequences\r\n\r\n :param annotdict: dict. All annotations\r\n :param fasta_file: Path. Path to fasta file\r\n :param which_transcript: str default \"all\". Options are \"all\", \"longest\", \"first\", and \"best_scoring\" \r\n :param seq_from: str default \"CDS\". Name of feature to extract sequence from (usually \"CDS\" or \"exon\")\r\n :param seq_level: str default \"transcript\". What to return the sequences of - usually \"transcript\" or \"gene\" \\\r\n(gene only valid if which_transcript = \"first\", \"longest\", or \"best_scoring\"). \\\r\nCould also be \"CDS\" or \"exon\" or other sub-feature (must then match \"seq_from\" argument)\r\n :param seq_type: str default \"nucl\". Whether to output \"nucl\" (nucleotide), \"aa\" (translated amino acid) \\\r\nor \"lorfaa\" (longest orf amino acid) sequence\r\n \"\"\"\r\n if not os.path.exists(str(fasta_file) + '.fai'):\r\n pysam.faidx(str(fasta_file))\r\n if not seq_level in ['transcript','gene']:\r\n if seq_level != seq_from:\r\n sys.stderr.write('error: invalid choice for \"seq_level\": ' + str(name_from) + \r\n '! Currently supported: \"transcript\", \"gene\", or else must match \"seq_from\" argument\\n')\r\n return None\r\n fasta = pysam.FastaFile(fasta_file)\r\n outseqs = OrderedDict()\r\n for gene_id in annotdict:\r\n tseqs = []\r\n tscores = []\r\n for transcript_id in annotdict[gene_id]:\r\n if seq_from in annotdict[gene_id][transcript_id]:\r\n seq = []\r\n scores = []\r\n for i,interval in enumerate(sorted(annotdict[gene_id][transcript_id][seq_from])):\r\n if i == 0:\r\n seqid = interval[2]['seqid']\r\n strand = interval[2]['strand']\r\n elif seqid != interval[2]['seqid']:\r\n sys.stderr.write('error: annotations contain transcript with features on different sequence! Not currently supported\\n')\r\n return None\r\n elif strand != interval[2]['strand']:\r\n sys.stderr.write('error: annotations contain transcript with features on different strands! Not currently supported, and doesn\\'t really make sense!\\n')\r\n return None\r\n iseq = fasta.fetch(seqid,interval[0],interval[1])\r\n if not seq_level in ['transcript','gene']:\r\n outseqs[transcript_id + \":\" + seq_from + str(i)] = iseq\r\n else:\r\n seq.append(iseq)\r\n if which_transcript == 'best_scoring':\r\n scores.append(interval[2]['score'])\r\n if seq_level in ['transcript','gene']:\r\n seq = \"\".join(seq)\r\n if strand == '-':\r\n seq = revcomp(seq)\r\n if which_transcript == 'all':\r\n outseqs[transcript_id] = seq\r\n else:\r\n tseqs.append(seq[:])\r\n if which_transcript == 'best_scoring':\r\n tscores.append(np.mean(scores))\r\n if which_transcript == 'first' and seq_level in ['transcript','gene']:\r\n outseqs[locals()[seq_level + '_id'] ] = tseqs[0]\r\n elif which_transcript == 'longest' and seq_level in ['transcript','gene']:\r\n seqlens = [len(k) for k in tseqs]\r\n outseqs[locals()[seq_level + '_id'] ] = tseqs[np.argmax(seqlens)]\r\n elif which_transcript == 'best_scoring' and seq_level in ['transcript','gene']:\r\n outseqs[locals()[seq_level + '_id'] ] = tseqs[np.argmax(tscores)]\r\n if seq_type == 'aa':\r\n outseqs = OrderedDict((k,translate(v)) for k,v in outseqs.items())\r\n elif seq_type == 'lorfaa':\r\n outseqs = OrderedDict((k,orfs(v,best = True)) for k,v in outseqs.items())\r\n return outseqs\r\n\r\ndef annot2gtf(annotdict: dict, features2write: Union[list,str] = 'CDS') -> str:\r\n \"\"\"\r\n takes a MAGOT2 format annotation dictionary and returns a string for specific annotations in gtf format\r\n\r\n :params annotdict: dict. MAGOT2 format annotation dictionary (nested objects coordinate_IntervalTree -> feature_dictionary -> \\\r\ntranscript_dictionary -> gene_dictionary)\r\n :params features2write: list or str default \"CDS\". What features to write out (usually \"CDS\", sometimes also \"exon\")\r\n \"\"\"\r\n if type(features2write) == str:\r\n f2w = [features2write]\r\n else:\r\n f2w = features2write\r\n linelist = []\r\n for gene_id in annotdict:\r\n for transcript_id in annotdict[gene_id]:\r\n for feature in f2w:\r\n if feature in annotdict[gene_id][transcript_id]:\r\n for ivl in annotdict[gene_id][transcript_id][feature]:\r\n attrs = ivl[2]\r\n linelist.append('\\t'.join([attrs['seqid'],attrs['source'],feature,str(ivl[0] + 1), str(ivl[1]), \r\n attrs['score'],attrs['strand'],attrs['phase'],\r\n 'gene_id ' + gene_id + ';transcript_id ' + transcript_id]))\r\n return \"\\n\".join(linelist)\r\n\r\ndef annot2gff3(annotdict: dict) -> str:\r\n \"\"\"\r\n takes a MAGOT2 format annotation dictionary and returns a string for gene, mRNA/transcript, exon, and CDS features in gff3 format\r\n\r\n :params annotdict: dict. MAGOT2 format annotation dictionary (nested objects coordinate_IntervalTree -> feature_dictionary -> \\\r\ntranscript_dictionary -> gene_dictionary)\r\n \"\"\"\r\n linelist = []\r\n for gene_id in annotdict:\r\n tlines = []\r\n seqid,gstart,gend,strand,gscore,source = '.',np.inf,0,'.',0,'.'\r\n for transcript_id in annotdict[gene_id]:\r\n flines = []\r\n tstart,tend,tscore = np.inf,0,0\r\n for feature in ['exon','CDS']:\r\n if feature in annotdict[gene_id][transcript_id]:\r\n for i,ivl in enumerate(annotdict[gene_id][transcript_id][feature]):\r\n attrs = ivl[2]\r\n if ivl[0] - 1 < tstart: tstart = ivl[0] -1\r\n if ivl[1] > tend: tend = ivl[1]\r\n if attrs['score'] > tscore: tscore = attrs['score']\r\n if seqid == '.': seqid = attrs['seqid']\r\n if strand == '.': strand = attrs['strand']\r\n if source == '.': source = attrs['source']\r\n flines.append('\\t'.join([attrs['seqid'],attrs['source'],feature,str(ivl[0] + 1), str(ivl[1]), \r\n attrs['score'],attrs['strand'],attrs['phase'],\r\n 'ID=' + transcript_id + ':' + feature + str(i) + ';Parent=' + transcript_id]))\r\n if 'CDS' in annotdict[gene_id][transcript_id]:\r\n ttype = 'mRNA'\r\n elif 'exon' in annotdict[gene_id][transcript_id]:\r\n ttype = 'transcript'\r\n else:\r\n continue\r\n tlines.append(\r\n '\\t'.join([seqid,source,ttype,str(tstart),str(tend), str(tscore),strand,'.',\r\n 'ID=' + transcript_id + ';Parent=' + gene_id])\r\n )\r\n tlines.extend(flines)\r\n if tstart < gstart: gstart = tstart\r\n if tend > gend: gend = tend\r\n if tscore > gscore: gscore = tscore\r\n if len(tlines) > 0:\r\n linelist.append(\r\n '\\t'.join([seqid,source,'gene',str(gstart),str(gend), str(gscore),strand,'.',\r\n 'ID=' + gene_id])\r\n )\r\n return \"\\n\".join(linelist)\r\n\r\ndef dipvcf2tripvcf(vcfin: Path,vcfout: Path,expand_on: str = \"SR\"):\r\n \"\"\"\r\n reads a diploid vcf file and writes a triploid vcf file, chosing which allele to duplicate based on the \"expand_on\" param.\r\n\r\n :param vcfin: Path. Path to diploid input vcf file\r\n :param vcfout: Path. Path to output triploid vcf file\r\n :param expand_on: str defualt \"SR\". What information to use to chose which allele to duplicate. Currently only valid option \\\r\nis \"SR\", which will use the \"SR\" attribute from the info collumn to add the allele with the highest depth to the alleles field\r\n \"\"\"\r\n with open(vcfin) as dip:\r\n with open(vcfout,'w') as trip:\r\n for line in dip:\r\n if line[0] == \"#\":\r\n trip.write(line)\r\n continue\r\n elif ';SR=' in line:\r\n srs = line.split(';SR=')[1].split(';')[0].split('\\t')[0]\r\n elif '\\tSR=' in line:\r\n srs = line.split('\\tSR=')[1].split(';')[0].split('\\t')[0]\r\n else:\r\n continue\r\n srs = [int(k) for k in srs.split(',')]\r\n ads = [srs[i] + srs[i+1] for i in range(0,len(srs),2)]\r\n allele = np.argmax(ads)\r\n fields = line.strip().split('\\t')\r\n subfields = fields[9].split(':')\r\n gtindex = fields[8].split(':').index('GT')\r\n alleles = [int(k) for k in subfields[gtindex].split('/')] + [allele]\r\n alleles.sort()\r\n subfields[gtindex] = '/'.join([str(k) for k in alleles])\r\n fields[9] = \":\".join(subfields)\r\n trip.write(\"\\t\".join(fields) + '\\n')\r\n\r\ndef merge_tables(table_path: str,*args,**kwargs):\r\n \"\"\"\r\n Takes a path w/ wildcards and merges tables with new columns for variable folders / filenames. Positional args are used for column names and, \\\r\n if iterable, regexes to match specific values. Keyword args are passed to pandas read_csv function.\r\n\r\n :params table_path: path to tables with wildcards for variables to expand in new columns\r\n \"\"\"\r\n ppl = table_path.split('*')\r\n globpaths = glob.glob(table_path)\r\n relist,colnames = [],[]\r\n for col in args:\r\n if type(col) == str:\r\n relist.append(re.compile('.*'))\r\n colnames.append(col)\r\n else:\r\n relist.append(re.compile(col[1]))\r\n colnames.append(col[0])\r\n #print(relist)\r\n #print(globpaths)\r\n for k,tp in enumerate(globpaths):\r\n wcs = []\r\n regexfail = False\r\n tpstump = tp\r\n for i,pp in enumerate(ppl[:-1]):\r\n tpstump = tpstump[len(pp):]\r\n wc = tpstump.split(ppl[i+1])[0]\r\n wcs.append(wc)\r\n tpstump = tpstump[len(wc):]\r\n if len(relist) > i:\r\n if not relist[i].search(wcs[-1]):\r\n regexfail = True\r\n continue\r\n if regexfail:\r\n continue\r\n tpdf = pd.read_csv(tp,**kwargs)\r\n for i,col in enumerate(colnames):\r\n tpdf[col] = wcs[i]\r\n for i,wc in enumerate(wcs[len(colnames):]):\r\n tpdf['newcol' + str(i)] = wc\r\n if k == 0:\r\n df = tpdf.copy()\r\n else:\r\n df = df.append(tpdf)\r\n return df\r\n" ]
[ [ "numpy.around", "pandas.read_csv", "numpy.mean", "numpy.argmax" ] ]
YasserAlBarbary/Real-time-face-tracking-and-recognition
[ "dbee9e28e167e2b380a7cf9809cfd40ca7ebe4ff" ]
[ "recognition/FaceRecognition.py" ]
[ "import os\r\nimport cv2\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom recognition import facenet\r\n\r\nBASE_DIR = os.path.dirname(__file__) + '/'\r\n\r\n# Path to frozen detection graph. This is the actual model that is used for the object detection.\r\nPATH_TO_CKPT = 'model/20170512-110547.pb'\r\ninput_image_size = 160\r\n\r\n\r\nclass FaceRecognition:\r\n def __init__(self):\r\n # Load models\r\n self.recognition_graph = tf.Graph()\r\n self.sess = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True),\r\n log_device_placement=False),graph=self.recognition_graph)\r\n print('Loading feature extraction model')\r\n with self.sess.as_default():\r\n with self.recognition_graph.as_default():\r\n facenet.load_model(BASE_DIR + PATH_TO_CKPT)\r\n\r\n def __del__(self):\r\n self.sess.close()\r\n\r\n def recognize(self, image):\r\n\r\n images_placeholder = self.recognition_graph.get_tensor_by_name(\"input:0\")\r\n embeddings = self.recognition_graph.get_tensor_by_name(\"embeddings:0\")\r\n phase_train_placeholder = self.recognition_graph.get_tensor_by_name(\"phase_train:0\")\r\n embedding_size = embeddings.get_shape()[1]\r\n\r\n emb_array = np.zeros((1, embedding_size))\r\n image = facenet.prewhiten(image)\r\n image = cv2.resize(image, (input_image_size, input_image_size), interpolation=cv2.INTER_AREA)\r\n image = image.reshape(-1, input_image_size, input_image_size, 3)\r\n feed_dict = {images_placeholder: image, phase_train_placeholder: False}\r\n emb_array[0, :] = self.sess.run(embeddings, feed_dict=feed_dict)\r\n return emb_array\r\n" ]
[ [ "tensorflow.Graph", "numpy.zeros", "tensorflow.GPUOptions" ] ]
Yipeng-Sun/Paddle
[ "e08b6e70e948f1a4ad45675b866aa48621ff3b78" ]
[ "python/paddle/fluid/framework.py" ]
[ "# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport collections\nfrom collections import defaultdict\nfrom collections import Iterable\nimport contextlib\nfrom .wrapped_decorator import signature_safe_contextmanager\nimport os\nimport re\nimport traceback\nimport six\n\nimport numpy as np\nimport subprocess\nimport multiprocessing\n\nfrom .. import compat as cpt\nfrom .proto import framework_pb2\ntry:\n if os.name == 'nt':\n import sys\n third_lib_path = os.path.abspath(os.path.dirname(\n __file__)) + os.sep + '..' + os.sep + 'libs'\n os.environ['path'] += ';' + third_lib_path\n sys.path.append(third_lib_path)\n\n from . import core\nexcept ImportError as e:\n if os.name == 'nt':\n executable_path = os.path.abspath(os.path.dirname(sys.executable))\n raise ImportError(\n \"\"\"NOTE: You may need to run \\\"set PATH=%s;%%PATH%%\\\"\n if you encounters \\\"DLL load failed\\\" errors. If you have python\n installed in other directory, replace \\\"%s\\\" with your own\n directory. The original error is: \\n %s\"\"\" %\n (executable_path, executable_path, cpt.get_exception_message(e)))\n else:\n raise ImportError(\n \"\"\"NOTE: You may need to run \\\"export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH\\\"\n if you encounters \\\"libmkldnn.so not found\\\" errors. If you have python\n installed in other directory, replace \\\"/usr/local/lib\\\" with your own\n directory. The original error is: \\n\"\"\" + cpt.get_exception_message(e))\nexcept Exception as e:\n raise e\nfrom . import unique_name\n\n__all__ = [\n 'Program',\n 'default_startup_program',\n 'default_main_program',\n 'program_guard',\n 'name_scope',\n 'cuda_places',\n 'cpu_places',\n 'cuda_pinned_places',\n 'in_dygraph_mode',\n]\n\nEMPTY_VAR_NAME = core.kEmptyVarName()\nTEMP_VAR_NAME = core.kTempVarName()\nGRAD_VAR_SUFFIX = core.kGradVarSuffix()\nZERO_VAR_SUFFIX = core.kZeroVarSuffix()\nCONTROL_DEP_VAR_PREFIX = core.kControlDepVarName()\n\n_dygraph_tracer_ = None\n_dygraph_current_expected_place_ = None\n\n\ndef in_dygraph_mode():\n \"\"\"\n Check program status(tracer), Whether it runs in dygraph mode or not\n\n Returns:\n out (boolean): True if the program is running in dynamic graph mode\n\n Examples:\n .. code-block:: python\n\n if fluid.in_dygraph_mode():\n pass\n\n \"\"\"\n return _dygraph_tracer_ is not None\n\n\ndef _dygraph_tracer():\n return _dygraph_tracer_\n\n\ndef _current_expected_place():\n return _dygraph_current_expected_place_\n\n\ndef _cpu_num():\n return int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))\n\n\ndef cuda_places(device_ids=None):\n \"\"\"\n Create a list of :code:`fluid.CUDAPlace` objects.\n\n If :code:`device_ids` is None, environment variable of\n :code:`FLAGS_selected_gpus` would be checked first. If\n :code:`FLAGS_selected_gpus=0,1,2`, the returned list would\n be [fluid.CUDAPlace(0), fluid.CUDAPlace(1), fluid.CUDAPlace(2)].\n If :code:`FLAGS_selected_gpus` is not set, all visible\n gpu places would be returned. \n\n If :code:`device_ids` is not None, it should be the device\n ids of gpus. For example, if :code:`device_ids=[0,1,2]`, \n the returned list would be \n [fluid.CUDAPlace(0), fluid.CUDAPlace(1), fluid.CUDAPlace(2)].\n \n Args: \n device_ids (None|list(int)|tuple(int)): gpu device id list.\n\n Returns:\n out (list(fluid.CUDAPlace)): gpu place list.\n\n Examples:\n .. code-block:: python\n\n cuda_places = fluid.cuda_places()\n\n \"\"\"\n assert core.is_compiled_with_cuda(), \\\n \"Not compiled with CUDA\"\n if device_ids is None:\n gpus_env = os.getenv(\"FLAGS_selected_gpus\")\n if gpus_env:\n device_ids = [int(s) for s in gpus_env.split(\",\")]\n else:\n device_ids = six.moves.range(core.get_cuda_device_count())\n elif not isinstance(device_ids, (list, tuple)):\n device_ids = [device_ids]\n return [core.CUDAPlace(dev_id) for dev_id in device_ids]\n\n\ndef cpu_places(device_count=None):\n \"\"\"\n Create a list of :code:`fluid.CPUPlace` objects.\n \n If :code:`device_count` is None, the device count would\n be determined by environment variable :code:`CPU_NUM`. \n If :code:`CPU_NUM` is not set, the device count would\n be determined by :code:`multiprocessing.cpu_count()`. \n\n Args:\n device_count (None|int): device number.\n\n Returns:\n out (list(fluid.CPUPlace)): cpu place list.\n\n Examples:\n .. code-block:: python\n\n cpu_places = fluid.cpu_places()\n \"\"\"\n\n if device_count is None:\n device_count = _cpu_num()\n return [core.CPUPlace()] * device_count\n\n\ndef cuda_pinned_places(device_count=None):\n \"\"\"\n Create a list of :code:`fluid.CUDAPinnedPlace` objects.\n\n If :code:`device_count` is None, the device count would\n be determined by environment variable :code:`CPU_NUM`. \n If :code:`CPU_NUM` is not set, the device count would\n be determined by :code:`multiprocessing.cpu_count()`. \n\n Args:\n device_count (None|int): device number.\n\n Returns:\n out (list(fluid.CUDAPinnedPlace)): cuda pinned place list.\n\n Examples:\n .. code-block:: python\n\n cuda_pinned_places_cpu_num = fluid.cuda_pinned_places()\n # or\n cuda_pinned_places = fluid.cuda_pinned_places(1)\n\n \"\"\"\n assert core.is_compiled_with_cuda(), \\\n \"Not compiled with CUDA\"\n if device_count is None:\n device_count = _cpu_num()\n return [core.cuda_pinned_places()] * device_count\n\n\nclass NameScope(object):\n def __init__(self, name=\"\", parent=None):\n self._children = dict()\n self._name = name\n self._parent = parent\n\n def child(self, prefix):\n if prefix not in self._children:\n new_child = NameScope(prefix, self)\n self._children[prefix] = [new_child]\n else:\n new_child = NameScope(prefix + \"_%d\" % len(self._children[prefix]),\n self)\n self._children[prefix].append(new_child)\n return new_child\n\n def parent(self):\n return self._parent\n\n def name(self):\n return self._name\n\n\n_name_scope = NameScope()\n\n\n@signature_safe_contextmanager\ndef name_scope(prefix=None):\n \"\"\"\n Generate hierarchical name prefix for the operators.\n\n Note: This should only used for debugging and visualization purpose.\n Don't use it for serious analysis such as graph/program transformations.\n\n Args:\n prefix(str): prefix.\n\n Examples:\n .. code-block:: python\n\n with fluid.name_scope(\"s1\"):\n a = fluid.layers.data(name='data', shape=[1], dtype='int32')\n b = a + 1\n with fluid.name_scope(\"s2\"):\n c = b * 1\n with fluid.name_scope(\"s3\"):\n d = c / 1\n with fluid.name_scope(\"s1\"):\n f = fluid.layers.pow(d, 2.0)\n with fluid.name_scope(\"s4\"):\n g = f - 1\n \"\"\"\n # TODO(panyx0718): Only [0-9a-z].\n assert prefix, \"namescope prefix cannot be empty.\"\n global _name_scope\n _name_scope = _name_scope.child(prefix)\n yield\n _name_scope = _name_scope.parent()\n\n\ndef _full_name_scope():\n global _name_scope\n scope = _name_scope\n name = \"\"\n while scope:\n name = scope.name() + \"/\" + name\n scope = scope.parent()\n return name\n\n\ndef generate_control_dev_var_name():\n import random\n return CONTROL_DEP_VAR_PREFIX + \"@\" + str(random.random())\n\n\ndef grad_var_name(var_name):\n \"\"\"\n Returns:\n str: gradient name for a certain var name\n \"\"\"\n return var_name + GRAD_VAR_SUFFIX\n\n\ndef convert_np_dtype_to_dtype_(np_dtype):\n \"\"\"\n Convert the data type in numpy to the data type in Paddle\n\n Args:\n np_dtype(np.dtype): the data type in numpy.\n\n Returns:\n core.VarDesc.VarType: the data type in Paddle.\n\n \"\"\"\n dtype = np.dtype(np_dtype)\n if dtype == np.float32:\n return core.VarDesc.VarType.FP32\n elif dtype == np.float64:\n return core.VarDesc.VarType.FP64\n elif dtype == np.float16:\n return core.VarDesc.VarType.FP16\n elif dtype == np.int32:\n return core.VarDesc.VarType.INT32\n elif dtype == np.int16:\n return core.VarDesc.VarType.INT16\n elif dtype == np.int64:\n return core.VarDesc.VarType.INT64\n elif dtype == np.bool:\n return core.VarDesc.VarType.BOOL\n elif dtype == np.uint16:\n return core.VarDesc.VarType.INT16\n elif dtype == np.uint8:\n return core.VarDesc.VarType.UINT8\n elif dtype == np.int8:\n return core.VarDesc.VarType.INT8\n else:\n raise ValueError(\"Not supported numpy dtype %s\" % dtype)\n\n\ndef dtype_is_floating(dtype):\n \"\"\"\n Check the data type is floating or not.\n Args:\n dtype(np.dtype|core.VarDesc.VarType): data type.\n Could be numpy format or Paddle format\n\n Returns(bool): True if data type is a float value\n\n \"\"\"\n if not isinstance(dtype, core.VarDesc.VarType):\n dtype = convert_np_dtype_to_dtype_(dtype)\n\n return dtype in [\n core.VarDesc.VarType.FP16, core.VarDesc.VarType.FP32,\n core.VarDesc.VarType.FP64\n ]\n\n\ndef _debug_string_(proto, throw_on_error=True):\n \"\"\"\n Get the debug string of a protobuf message. The message could be not\n initialized.\n Args:\n proto(google.protobuf.message.Message): The protobuf message\n throw_on_error(bool): True if raise an error when the protobuf message\n is not initialized.\n\n Returns(str): The debug string of the protobuf message\n\n \"\"\"\n error_fields = list()\n if not proto.IsInitialized(error_fields) and throw_on_error:\n raise ValueError(\"{0} are not initialized.\\nThe message is {1}:\\n\".\n format(error_fields, proto))\n return proto.__str__()\n\n\nclass Variable(object):\n \"\"\"\n In Fluid, every input and output of an operator is a variable. In most\n cases, variables are used for holding different kinds of data or training\n labels. A variable belongs to a block. All variable has its own name and\n two variables in different blocks could have the same name.\n\n There are many kinds of variables. Each kind of them has its own attributes\n and usages. Please reference the framework.proto for details.\n\n Most of a Variable's member variables can be setted to be None. It mean\n it is not available or will be specified later.\n\n Args:\n block(Block): The block that the variable belongs to.\n type(core.VarDesc.VarType): Variable type. Please reference the\n framework.proto for details.\n name(str|None): The name of the variable. If setted None, it will be\n generated automatically. Default: None\n shape(tuple|list|None): The shape of the variable. -1 means the batch size.\n Some kinds of variable do not contain shape, just set it to None.\n Default: None\n dtype(np.dtype|core.VarDesc.VarType|str|None): The data type of variable.\n Default: None\n lod_level (int|None): The level of lod tensor. 0 means it is not a time\n series data.\n Default: None\n capacity (int|None): The capacity of Channel variable. Ignored for other\n types. Default: None\n persistable (bool|None): True if the variable is persistable. A persistable\n variable will not be deleted after an iteration ending. Defaults: None.\n error_clip (BaseErrorClipAttr|None): The error clip attributes of the\n corresponding gradient variable. Default: None\n stop_gradient (bool): True if the variable will stop to calculate its\n gradients when backward. Default: False.\n is_data (bool): True if the variable is an input data. Default: False\n\n Notes:\n The constructor of Variable should not be invoked directly. Please\n use `Block.create_var` to create a variable.\n\n Examples:\n .. code-block:: python\n\n cur_program = Program()\n cur_block = cur_program.current_block()\n new_variable = cur_block.create_var(name=\"X\",\n shape=[-1, 23, 48],\n dtype='float32')\n \"\"\"\n\n def __init__(self,\n block,\n type=core.VarDesc.VarType.LOD_TENSOR,\n name=None,\n shape=None,\n dtype=None,\n lod_level=None,\n capacity=None,\n persistable=None,\n error_clip=None,\n stop_gradient=False,\n is_data=False,\n **kwargs):\n self.block = block\n if name is None:\n name = unique_name.generate('_generated_var')\n\n if dtype is not None:\n if not isinstance(dtype, core.VarDesc.VarType):\n dtype = convert_np_dtype_to_dtype_(dtype)\n\n if in_dygraph_mode():\n # record vars in tracer rather than blocks\n self._ivar = kwargs.get(\"ivar\", None)\n if not self._ivar:\n self._ivar = core.VarBase(\n name, dtype if dtype else core.VarDesc.VarType.FP32,\n list(shape) if shape else [],\n _current_expected_place(), stop_gradient, True\n if persistable else False)\n if persistable:\n _dygraph_tracer().trace_var(name, self)\n self.op = None\n else:\n self.error_clip = error_clip\n\n is_new_var = False\n name = cpt.to_text(name)\n self.desc = self.block.desc.find_var(cpt.to_bytes(name))\n\n if self.desc is None:\n self.desc = self.block.desc.var(cpt.to_bytes(name))\n is_new_var = True\n\n if is_new_var:\n self.desc.set_type(type)\n elif self.desc.type() != type:\n raise ValueError(\n \"Variable {0} has been created before. The \"\n \"previous type is {1}; the new type is {2}. They\"\n \" are not matched\".format(self.name, self.desc.type(),\n type))\n\n if shape is not None:\n if is_new_var:\n self.desc.set_shape(shape)\n else:\n old_shape = self.shape\n shape = tuple(shape)\n if shape != old_shape:\n raise ValueError(\n \"Variable {0} has been created before. the previous \"\n \"shape is {1}; the new shape is {2}. They are not \"\n \"matched.\".format(self.name, old_shape, shape))\n if dtype is not None:\n if is_new_var:\n self.desc.set_dtype(dtype)\n else:\n old_dtype = self.dtype\n if dtype != old_dtype:\n raise ValueError(\n \"Variable {0} has been created before. \"\n \"The previous data type is {1}; the new \"\n \"data type is {2}. They are not \"\n \"matched.\".format(self.name, old_dtype, dtype))\n\n if lod_level is not None:\n if is_new_var:\n self.desc.set_lod_level(lod_level)\n else:\n if lod_level != self.lod_level:\n raise ValueError(\n \"Variable {0} has been created before. \"\n \"The previous lod_level is {1}; the new \"\n \"lod_level is {2}. They are not \"\n \"matched\".format(self.name, self.lod_level,\n lod_level))\n if persistable is not None:\n if is_new_var:\n self.desc.set_persistable(persistable)\n else:\n if persistable != self.persistable:\n raise ValueError(\n \"Variable {0} has been created before.\"\n \"The previous persistable is {1}; the new \"\n \"persistable is {2}. They are not matched\".format(\n self.name, self.persistable, persistable))\n\n if capacity is not None:\n if is_new_var:\n self.desc.set_capacity(capacity)\n else:\n # TODO(abhinavarora) : Compare with set capacity once,\n # get_capacity is implemented\n pass\n\n self.block.vars[name] = self\n self.op = None\n self._stop_gradient = stop_gradient\n self.is_data = is_data\n\n def numpy(self):\n new_ivar = self._ivar._copy_to(core.CPUPlace(), True)\n return np.array(new_ivar.value().get_tensor())\n\n def backward(self, backward_strategy=None):\n from .dygraph import BackwardStrategy\n if isinstance(backward_strategy, BackwardStrategy):\n self._ivar._run_backward(backward_strategy)\n elif backward_strategy is not None:\n raise TypeError(\n \"only BackwardStrategy type should be passed in backward\")\n else:\n backward_strategy = BackwardStrategy()\n backward_strategy.sort_sum_gradient = False\n self._ivar._run_backward(backward_strategy)\n\n def gradient(self):\n new_ivar = self._ivar._grad_ivar()._copy_to(core.CPUPlace(), True)\n return np.array(new_ivar.value().get_tensor())\n\n def clear_gradient(self):\n self._ivar._clear_gradient()\n\n def __str__(self):\n return self.to_string(True)\n\n def to_string(self, throw_on_error, with_details=False):\n \"\"\"\n Get debug string.\n\n Args:\n throw_on_error(bool): True if raise an exception when self is\n not initialized.\n with_details(bool): more details about variables and parameters\n (e.g. trainable, optimize_attr, ...) will be printed when\n with_details is True. Default False;\n\n Returns:\n str: The debug string.\n \"\"\"\n if in_dygraph_mode():\n # TODO(panyx0718): add more dygraph debug info.\n return 'name %s, dtype: %s shape: %s' % (self.name, self.dtype,\n self.shape)\n\n assert isinstance(throw_on_error, bool) and isinstance(with_details,\n bool)\n protostr = self.desc.serialize_to_string()\n proto = framework_pb2.VarDesc.FromString(six.binary_type(protostr))\n res_str = _debug_string_(proto, throw_on_error)\n if with_details:\n additional_attr = (\"error_clip\", \"stop_gradient\")\n for attr_name in additional_attr:\n res_str += \"%s: %s\\n\" % (\n attr_name, six.binary_type(getattr(self, attr_name)))\n return res_str\n\n __repr__ = __str__\n\n def set_desc(self, input):\n \"\"\"\n Set the variable description.\n\n Args:\n input(core.VarDesc): The new VarDesc.\n\n Returns:\n None\n \"\"\"\n self.desc = input\n\n @property\n def stop_gradient(self):\n if in_dygraph_mode():\n return self._ivar.stop_gradient\n else:\n return self._stop_gradient\n\n @stop_gradient.setter\n def stop_gradient(self, s):\n if in_dygraph_mode():\n self._ivar.stop_gradient = s\n else:\n self._stop_gradient = s\n\n @property\n def persistable(self):\n if in_dygraph_mode():\n return self._ivar.persistable\n else:\n return self.desc.persistable()\n\n @persistable.setter\n def persistable(self, p):\n if in_dygraph_mode():\n return self._ivar.persistable\n else:\n self.desc.set_persistable(p)\n\n @property\n def name(self):\n if in_dygraph_mode():\n return self._ivar.name\n else:\n return cpt.to_text(self.desc.name())\n\n @name.setter\n def name(self, new_name):\n if in_dygraph_mode():\n self._ivar.name = new_name\n else:\n self.desc.set_name(new_name)\n\n @property\n def shape(self):\n # convert to tuple, make it as same as numpy API.\n if in_dygraph_mode():\n return self._ivar.shape\n else:\n return tuple(self.desc.shape())\n\n @property\n def dtype(self):\n if in_dygraph_mode():\n return self._ivar.dtype\n else:\n return self.desc.dtype()\n\n @property\n def lod_level(self):\n # TODO(minqiyang): Support lod_level in dygraph mode\n return self.desc.lod_level()\n\n @property\n def type(self):\n if in_dygraph_mode():\n return self._ivar.dtype\n else:\n return self.desc.type()\n\n def _set_error_clip(self, error_clip):\n \"\"\"\n Set the error_clip.\n\n Args:\n error_clip(BaseErrorClipAttr) : The new error_clip.\n\n Returns:\n None\n \"\"\"\n self.error_clip = error_clip\n\n def _slice_indices(self, slice, length):\n \"\"\"\n Reference implementation for the slice.indices method.\n \"\"\"\n # Compute step and length as integers.\n step = 1 if slice.step is None else slice.step\n\n # Raise ValueError for negative length or zero step.\n if length < 0:\n raise ValueError(\"length should not be negative\")\n if step == 0:\n raise ValueError(\"slice step cannot be zero\")\n\n # Find lower and upper bounds for start and stop.\n lower = -1 if step < 0 else 0\n upper = length - 1 if step < 0 else length\n\n # Compute start.\n if slice.start is None:\n start = upper if step < 0 else lower\n else:\n start = slice.start\n start = max(start + length, lower) if start < 0 else min(start,\n upper)\n\n # Compute stop.\n if slice.stop is None:\n stop = lower if step < 0 else upper\n else:\n stop = slice.stop\n stop = max(stop + length, lower) if stop < 0 else min(stop, upper)\n\n return start, stop, step\n\n def _detectEllipsis(self, item):\n has_ellipsis = False\n start = 0\n end = len(self.shape)\n for index, o in enumerate(item):\n if o is Ellipsis:\n if has_ellipsis:\n raise ValueError(\"Index can have one ellipsis only.\")\n has_ellipsis = True\n start = index\n else:\n if has_ellipsis:\n end = index\n return has_ellipsis, start, end\n\n def _reconstructSliceinfo(self, item):\n has_ellipsis, start, end = self._detectEllipsis(item)\n if has_ellipsis:\n newitem = []\n for i in range(start):\n newitem.append(item[i])\n for i in range(start, end):\n newitem.append(slice(None, None, None))\n for i in range(end, len(item)):\n newitem.append(item[i])\n return newitem\n else:\n return None\n\n def _detectContinuesSlice(self, item):\n starts = []\n ends = []\n for index, o in enumerate(item):\n if isinstance(o, int):\n start = int(o)\n if (index > 0 and index >= self.shape[index]) \\\n or (index < 0 and (index + self.shape[index]) < 0):\n raise IndexError(\"invalid index\")\n start = max(start + self.shape[index], 0) if start < 0 else min(\n start, self.shape[index])\n starts.append(start)\n ends.append(start + 1)\n elif isinstance(o, slice):\n start, stop, step = self._slice_indices(o, self.shape[index])\n if step == 1 or step == -1:\n starts.append(start)\n ends.append(stop)\n else:\n return False, None\n else:\n raise IndexError(\"Valid index accept int or slice or ellipsis\")\n return True, [starts, ends]\n\n def _cloneVar(self, copy=False):\n if not copy:\n return self.block.create_var(\n name=unique_name.generate(\".\".join(self.name)),\n dtype=self.dtype,\n persistable=self.persistable,\n stop_gradient=self.stop_gradient, )\n else:\n return self\n\n def _sliceVar(self, axes, starts, ends):\n new_var = self._cloneVar()\n self.block.append_op(\n type=\"slice\",\n inputs={'Input': [self]},\n outputs={'Out': [new_var]},\n attrs={'axes': axes,\n 'starts': starts,\n 'ends': ends})\n return new_var\n\n def _concatVar(self, inputs, axis):\n new_var = self._cloneVar()\n self.block.append_op(\n type=\"concat\",\n inputs={'X': inputs},\n outputs={'Out': [new_var]},\n attrs={'axis': axis, })\n return new_var\n\n def _sliceAndConcatVar(self, item, axis):\n if isinstance(item, slice):\n if self.shape[axis] < 0:\n return self._cloneVar(True)\n start, stop, step = self._slice_indices(item, self.shape[axis])\n if step == 1:\n return self._sliceVar([axis], [start], [stop])\n else:\n vars = []\n if step > 0:\n while start < stop:\n vars.append(\n self._sliceVar([axis], [start], [start + 1]))\n start += step\n else:\n while start > stop:\n vars.append(\n self._sliceVar([axis], [start], [start + 1]))\n start += step\n return self._concatVar(vars, axis)\n elif isinstance(item, int):\n if self.shape[axis] < 0:\n return self._cloneVar(True)\n index = int(item)\n if (index > 0 and index >= self.shape[axis])\\\n or (index < 0 and (index + self.shape[axis]) < 0):\n raise IndexError(\"invalid index\")\n return self._sliceVar([axis], [index], [index + 1])\n else:\n raise IndexError(\"Valid index accept int or slice or tuple\")\n\n def __getitem__(self, item):\n \"\"\"\n Slice the variable.\n\n Args:\n item(int/slice/tuple) : the index.\n\n Returns:\n Sliced variable\n \"\"\"\n new_var = None\n if isinstance(item, tuple):\n if len(item) > len(self.shape):\n raise IndexError(\"Too many indexes\")\n fixedSize = True\n for i in range(len(self.shape)):\n if self.shape[i] == -1:\n fixedSize = False\n break\n\n newitem = self._reconstructSliceinfo(item) or item\n if fixedSize:\n check, info = self._detectContinuesSlice(newitem)\n if check:\n starts = info[0]\n ends = info[1]\n axes = [i for i in range(len(starts))]\n return self._sliceVar(axes, starts, ends)\n else:\n new_var = self\n for index, o in enumerate(newitem):\n new_var = new_var._sliceAndConcatVar(o, index)\n else:\n new_var = self\n for index, o in enumerate(newitem):\n new_var = new_var._sliceAndConcatVar(o, index)\n else:\n new_var = self._sliceAndConcatVar(item, 0)\n return new_var\n\n\ndef get_all_op_protos():\n \"\"\"\n Get all registered op proto from PaddlePaddle C++ end.\n\n Returns:\n list: list of OpProto.\n \"\"\"\n protostrs = core.get_all_op_protos()\n ret_values = []\n for pbstr in protostrs:\n op_proto = framework_pb2.OpProto.FromString(six.binary_type(pbstr))\n ret_values.append(op_proto)\n return ret_values\n\n\nclass OpProtoHolder(object):\n \"\"\"\n A global variable to hold all OpProtos from C++ as a map\n \"\"\"\n\n @classmethod\n def instance(cls):\n if not hasattr(cls, '_instance'):\n cls._instance = cls()\n return cls._instance\n\n def __init__(self):\n assert not hasattr(\n self.__class__,\n '_instance'), 'Please use `instance()` to get OpProtoHolder object!'\n op_protos = get_all_op_protos()\n self.op_proto_map = {}\n for proto in op_protos:\n self.op_proto_map[proto.type] = proto\n\n def get_op_proto(self, type):\n \"\"\"\n Get OpProto by a type string.\n Args:\n type(str): The type that operator registered in C++ side.\n\n Returns(framework_pb2.OpProto): The OpProto\n\n \"\"\"\n if type not in self.op_proto_map:\n raise ValueError(\"Operator \\\"%s\\\" has not been registered.\" % type)\n return self.op_proto_map[type]\n\n @staticmethod\n def generated_op_attr_names():\n return {\n core.op_proto_and_checker_maker.kOpRoleAttrName(),\n core.op_proto_and_checker_maker.kOpRoleVarAttrName(),\n core.op_proto_and_checker_maker.kOpNameScopeAttrName(),\n core.op_proto_and_checker_maker.kOpCreationCallstackAttrName()\n }\n\n\nclass Operator(object):\n \"\"\"\n In Fluid, all the operation are represented by Operator, and Operator\n is regarded as a build in an instruction of a Block. Users can use the\n build in instructions to describe their neural network.\n\n Args:\n block(Block): The block has the current operator.\n desc(core.OpDesc): The protobuf description of Operator.\n type(str): The type of operator. Default None.\n inputs(dict): The input of this Operator. it is a dictionary, for every\n element, key is the input parameter name, and value is a list of\n variables. Default None.\n outputs(dict): The output of this Operator. it is a dictionary, for\n every element, key is the input parameter name, and value is a list\n of variables. Default None.\n attrs(dict): The attributes of this Operator. it is a dictionary, for\n every element, key is attribute name, and value is the attribute value.\n The attribute type should be as same as the type registered in C++ side.\n Default None.\n\n Returns:\n Operator: The initialized Operator.\n\n Raises:\n ValueError: If the passed input, output and attrs doesn't match the\n initializing Operator's that registered in C++ side.\n\n Notes:\n The constructor of operator should not be invoked directly. Use\n Block.append_op or Block._prepend_op instead.\n\n Examples:\n .. code-block:: python\n\n cur_program = Program()\n cur_block = cur_program.current_block()\n # var1 += var2 + var3\n cur_block.append_op(type=\"sum\",\n inputs={\"X\": [var1, var2, var3]},\n outputs={\"Out\": [var1]})\n \"\"\"\n OP_WITHOUT_KERNEL_SET = {\n 'feed', 'fetch', 'recurrent', 'go', 'rnn_memory_helper_grad',\n 'conditional_block', 'while', 'send', 'recv', 'listen_and_serv',\n 'ncclInit', 'select', 'checkpoint_notify', 'gen_nccl_id'\n }\n\n def __init__(self,\n block,\n desc,\n type=None,\n inputs=None,\n outputs=None,\n attrs=None):\n if in_dygraph_mode():\n if type is None:\n raise ValueError(\n \"`type` to initialized an Operator can not be None.\")\n self.iop = core.OpBase(type)\n self.previous_ops = []\n\n self.attrs = attrs if attrs else {}\n else:\n self.block = block\n self.desc = desc\n # note: not add self.attrs here:\n # https://github.com/PaddlePaddle/Paddle/pull/12583#pullrequestreview-145093173\n op_attrs = attrs\n if op_attrs is None:\n op_attrs = dict()\n del attrs\n\n op_maker = core.op_proto_and_checker_maker\n\n if op_maker.kOpRoleAttrName() not in op_attrs:\n op_attrs[op_maker.kOpRoleAttrName(\n )] = self.block.program._op_role\n\n role_var_name = op_maker.kOpRoleVarAttrName()\n if len(self.block.program.\n _op_role_var) != 0 and role_var_name not in op_attrs:\n op_attrs[role_var_name] = self.block.program._op_role_var\n\n if role_var_name in op_attrs and len(op_attrs[role_var_name]) == 0:\n del op_attrs[role_var_name]\n\n if len(self.desc.type()) != 0:\n return\n if type is None:\n raise ValueError(\n \"`type` to initilized an Operator can not be None.\")\n else:\n callstack_var_name = op_maker.kOpCreationCallstackAttrName()\n op_attrs[callstack_var_name] = list(\n reversed(traceback.format_stack()))[1:]\n\n self.desc.set_type(type)\n proto = OpProtoHolder.instance().get_op_proto(type)\n\n namescope_var_name = op_maker.kOpNameScopeAttrName()\n op_attrs[namescope_var_name] = _full_name_scope()\n\n def find_name(var_list, name):\n for var_name in var_list:\n if var_list[var_name] is not None and var_name == name:\n return True\n return False\n\n if inputs is not None:\n for in_proto in proto.inputs:\n found = find_name(inputs, in_proto.name)\n assert found or in_proto.dispensable, \"Input {} not found\".format(\n in_proto.name)\n\n if found:\n in_args = inputs[in_proto.name]\n if not isinstance(in_args, list):\n in_args = [in_args]\n if not in_proto.duplicable and len(in_args) > 1:\n raise ValueError(\n \"Input %s expects only one input, but %d are given.\"\n % (in_proto.name, len(in_args)))\n in_arg_names = []\n for arg in in_args:\n if isinstance(arg, six.string_types):\n in_arg_names.append(arg)\n elif isinstance(arg, six.binary_type):\n in_arg_names.append(arg.decode())\n else:\n in_arg_names.append(cpt.to_text(arg.name))\n self.desc.set_input(in_proto.name, in_arg_names)\n else:\n self.desc.set_input(in_proto.name, [])\n\n if outputs is not None:\n for m in proto.outputs:\n if (m.name not in outputs) and m.dispensable:\n continue\n if not ((m.name in outputs) or m.dispensable):\n raise ValueError((\"Incorrect setting for output(s) of \"\n \"operator \\\"%s\\\", should set: [%s].\")\n % (type, m.name))\n for out_proto in proto.outputs:\n if out_proto.name not in outputs:\n continue\n out_args = outputs[out_proto.name]\n if not isinstance(out_args, list):\n out_args = [out_args]\n if not out_proto.duplicable and len(out_args) > 1:\n raise ValueError(\n \"Output %s expects only one output, but %d are given.\"\n % (out_proto.name, len(out_args)))\n out_arg_names = []\n for arg in out_args:\n out_arg_names.append(cpt.to_text(arg.name))\n # TODO(minqiyang): could we remove variable's op in static mode?\n if not in_dygraph_mode():\n arg.op = self\n self.desc.set_output(out_proto.name, out_arg_names)\n\n if op_attrs is not None:\n if not isinstance(op_attrs, dict):\n raise TypeError(\"'attrs' should be a dict.\")\n for attr in proto.attrs:\n attr_name = attr.name\n if (attr_name not in op_attrs) or (\n op_attrs[attr_name] is None):\n continue\n attr_val = op_attrs[attr_name]\n self._update_desc_attr(attr_name, attr_val)\n\n self.desc.check_attrs()\n if self._has_kernel(type):\n self.desc.infer_var_type(self.block.desc)\n self.desc.infer_shape(self.block.desc)\n\n def _has_kernel(self, op_type):\n return op_type not in self.OP_WITHOUT_KERNEL_SET\n\n def to_string(self, throw_on_error):\n \"\"\"\n Get debug string.\n\n Args:\n throw_on_error(bool): Whether to raise exception if self is not\n initialized.\n\n Returns:\n str: The debug string.\n\n \"\"\"\n protostr = self.desc.serialize_to_string()\n proto = framework_pb2.OpDesc.FromString(six.binary_type(protostr))\n return _debug_string_(proto, throw_on_error)\n\n def __str__(self):\n return self.to_string(True)\n\n __repr__ = __str__\n\n @property\n def type(self):\n if in_dygraph_mode():\n return self.iop.type\n else:\n return self.desc.type()\n\n def input(self, name):\n \"\"\"\n Get the input arguments according to the input parameter name.\n\n Args:\n name(str): The input parameter name.\n\n Returns:\n list: return the list of argument names that associated with \\\n the specific parameter name.\n \"\"\"\n return self.desc.input(name)\n\n def _rename_input(self, old_name, new_name):\n \"\"\"\n Rename the `old_name` to `new_name`.\n\n Args:\n old_name(str): The old name of the Operator's input.\n new_name(str): The new name of the Operator's input.\n\n Returns:\n None\n \"\"\"\n self.desc._rename_input(old_name, new_name)\n\n def _rename_output(self, old_name, new_name):\n \"\"\"\n Rename the `old_name` to `new_name`.\n\n Args:\n old_name(str): The old name of the Operator's output.\n new_name(str): The new name of the Operator's output.\n\n Returns:\n None\n \"\"\"\n self.desc._rename_output(old_name, new_name)\n\n @property\n def input_names(self):\n return self.desc.input_names()\n\n @property\n def input_arg_names(self):\n return self.desc.input_arg_names()\n\n @property\n def output_arg_names(self):\n return self.desc.output_arg_names()\n\n def output(self, name):\n \"\"\"\n Get output arguments by the output parameter name.\n\n Args:\n name(str): The output parameter name.\n\n Returns:\n list: return the list of argument names associated with \\\n the specific parameter name.\n \"\"\"\n return self.desc.output(name)\n\n @property\n def output_names(self):\n return self.desc.output_names()\n\n @property\n def idx(self):\n for i, op in enumerate(self.block.ops):\n if op == self:\n return i\n raise ValueError(\n \"Can't find op itself in it's block. It could be a bug of Paddle.\")\n\n def has_attr(self, name):\n \"\"\"\n Whether this Operator has the attribute with name or not.\n\n Args:\n name(str): the attribute name.\n\n Returns:\n bool: True if has this attribute.\n\n \"\"\"\n return self.desc.has_attr(name)\n\n def attr_type(self, name):\n \"\"\"\n Get the type of attribute by attribute's name.\n\n Args:\n name(str): the attribute name.\n\n Returns:\n core.AttrType: the attribute type.\n \"\"\"\n return self.desc.attr_type(name)\n\n def _set_attr(self, name, val):\n \"\"\"\n Set the value of attribute by attribute's name.\n\n Args:\n name(str): the attribute name.\n val(bool|int|str|float|list): the value of the attribute.\n\n Raises:\n ValueError: If the type of value doesn't match with desc.attr_type(name).\n \"\"\"\n self._update_desc_attr(name, val)\n\n def _remove_attr(self, name):\n self.desc.remove_attr(name)\n\n def _update_desc_attr(self, name, val):\n \"\"\"\n Update the value of desc's attribute by attribute's name.\n\n Args:\n name(str): the attribute name.\n val(bool|int|str|float|list): the value of the attribute.\n\n Raises:\n ValueError: If the type of value doesn't match with desc.attr_type(name).\n \"\"\"\n if isinstance(val, Block):\n self.desc.set_block_attr(name, val.desc)\n elif isinstance(val, list) and val and all(\n isinstance(v, Block) for v in val):\n self.desc.set_blocks_attr(name, [v.desc for v in val])\n elif isinstance(val, core.BlockDesc) or \\\n isinstance(val, core.ProgramDesc):\n self.desc.set_serialized_attr(name, val.serialize_to_string())\n else:\n self.desc._set_attr(name, val)\n\n @property\n def attr_names(self):\n return self.desc.attr_names()\n\n def attr(self, name):\n \"\"\"\n Get the attribute by name.\n\n Args:\n name(str): the attribute name.\n\n Returns:\n bool|int|str|float|list: The attribute value. The return value\n can be any valid attribute type.\n \"\"\"\n return self.desc.attr(name)\n\n def _block_attr_id(self, name):\n \"\"\"\n Get the block attribute's id by name.\n\n Args:\n name(str): the attribute name.\n\n Returns:\n int: the block index.\n \"\"\"\n return self.desc._block_attr_id(name)\n\n def _block_attr(self, name):\n \"\"\"\n Get the block attribute by name.\n\n Args:\n name(str): the attribute name.\n\n Returns:\n block: the block attribute.\n \"\"\"\n\n id = self._block_attr_id(name)\n assert (id >= 0 and id < len(self.block.program.blocks))\n return self.block.program.blocks[id]\n\n def _blocks_attr(self, name):\n \"\"\"\n Get the blocks attribute by name.\n\n Args:\n name(str): the attribute name.\n\n Returns:\n list: list of the blocks attribute.\n \"\"\"\n attrs = []\n for i in self._blocks_attr_ids(name):\n assert (i >= 0 and i < len(self.block.program.blocks))\n attrs.append(self.block.program.blocks[i])\n\n return attrs\n\n def _blocks_attr_ids(self, name):\n \"\"\"\n Get the blocks attribute's ids by name.\n\n Args:\n name(str): the attribute name.\n\n Returns:\n list: list of the blocks ids.\n \"\"\"\n\n return self.desc._blocks_attr_ids(name)\n\n def all_attrs(self):\n \"\"\"\n Get the attribute dict.\n\n Returns:\n dict: The Operator's attribute dict, name->attr.\n \"\"\"\n attr_names = self.attr_names\n attr_map = {}\n for n in attr_names:\n attr_type = self.desc.attr_type(n)\n if attr_type == core.AttrType.BLOCK:\n attr_map[n] = self._block_attr(n)\n continue\n\n if attr_type == core.AttrType.BLOCKS:\n attr_map[n] = self._blocks_attr(n)\n continue\n\n attr_map[n] = self.attr(n)\n\n return attr_map\n\n\nclass Block(object):\n \"\"\"\n In Fluid, a Program is consistence of multi-Block, and Block stores\n VarDesc and OpDesc. In a specific Block, a VarDesc have a unique name.\n One block could have some child blocks, and child block's name scopes\n should inherit the parent's so that OpDesc in child block can reference\n a VarDesc that is stored in the parent block.\n Please reference the framework.proto for details.\n\n Args:\n program(Program): The Program that the Block belongs to.\n idx(int): The block's id in the Program.\n\n Notes:\n The constructor of Block should not be invoked directly. Please\n use `Program._create_block()` to create a block.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n cur_program = fluid.Program()\n cur_block = cur_program.current_block()\n var = cur_block.create_var(name=\"X\",\n shape=[-1, 23, 48],\n dtype='float32')\n cur_block.append_op(type=\"abs\",\n inputs={\"X\": [var]},\n outputs={\"Out\": [var]})\n \"\"\"\n\n def __init__(self, program, idx):\n self.desc = program.desc.block(idx)\n self.vars = collections.OrderedDict() # var_name --> var\n self.ops = list() # operator list\n self.program = program\n self.removed_vars = collections.OrderedDict()\n\n def __str__(self):\n return self.to_string(True)\n\n def to_string(self, throw_on_error, with_details=False):\n \"\"\"\n Get debug string.\n\n Args:\n throw_on_error(bool): raise exception when self is not initialized\n when throw_on_error is True.\n with_details(bool): more details about variables and parameters\n (e.g. trainable, optimize_attr, ...) will be printed when\n with_details is True. Default False.\n\n Returns:\n str: The debug string.\n \"\"\"\n assert isinstance(throw_on_error, bool) and isinstance(with_details,\n bool)\n if with_details:\n re_add_indent = re.compile(r\"\\n(.)\")\n res_str = \"blocks {\\n idx: %d\\n parent_idx: %d\" % (\n self.idx, self.parent_idx)\n for var in list(self.vars.values()):\n res_str += \"\\n vars {\\n %s }\" % re_add_indent.sub(\n r\"\\n \\1\", var.to_string(throw_on_error, with_details))\n for op in self.ops:\n res_str += \"\\n ops {\\n %s }\" % re_add_indent.sub(\n r\"\\n \\1\", op.to_string(throw_on_error))\n res_str += \"\\n}\"\n else:\n protostr = self.desc.serialize_to_string()\n proto = framework_pb2.BlockDesc.FromString(\n six.binary_type(protostr))\n res_str = _debug_string_(proto, throw_on_error)\n return res_str\n\n __repr__ = __str__\n\n @property\n def parent_idx(self):\n return self.desc.parent\n\n @property\n def forward_block_idx(self):\n return self.desc.get_forward_block_idx()\n\n def _set_forward_block_idx(self, idx):\n \"\"\"\n Set the forward block Idx.\n\n Args:\n idx(int): the block index.\n\n Returns:\n None\n \"\"\"\n self.desc._set_forward_block_idx(idx)\n\n @property\n def idx(self):\n return self.desc.id\n\n def var(self, name):\n \"\"\"\n Get a Variable by name from this block.\n\n Args:\n name(str): the Variable's name.\n\n Raises:\n ValueError: The If input's type is not str, or this block\n doesn't have a Variable with the giving name.\n\n Returns:\n Variable: the Variable with the giving name.\n \"\"\"\n if not isinstance(name, six.string_types):\n raise TypeError(\n \"var require string as parameter, but get %s instead.\" %\n (type(name)))\n v = self.vars.get(name, None)\n if v is None:\n raise ValueError(\"var %s not in this block\" % name)\n return v\n\n def _find_var_recursive(self, name):\n \"\"\"\n Get a Variable by name from this block recursively.\n\n Args:\n name(str): the Variable's name.\n\n Returns:\n Variable: the Variable with the giving name. Or None if not found.\n \"\"\"\n frontier = list()\n visited = set()\n\n frontier.append(self)\n\n prog = self.program\n\n while len(frontier) != 0: # BFS\n cur = frontier[0]\n frontier = frontier[1:]\n\n if id(cur) in visited:\n continue\n\n if cur.has_var(name):\n return cur.var(name)\n\n if cur.parent_idx != -1:\n frontier.append(prog.block(cur.parent_idx))\n\n if cur.forward_block_idx != -1:\n frontier.append(prog.block(cur.forward_block_idx))\n\n visited.add(id(cur))\n return None\n\n def _var_recursive(self, name):\n \"\"\"\n Get a Variable by name from this block recursively.\n\n Args:\n name(str): the Variable's name.\n\n Raises:\n ValueError: this block and this parent block doesn't\n have a Variable with the giving name.\n\n Returns:\n Variable: the Variable with the giving name.\n \"\"\"\n var = self._find_var_recursive(name)\n if var:\n return var\n else:\n raise ValueError(\"Var {0} is not found recursively\".format(name))\n\n def all_parameters(self):\n return list(self.iter_parameters())\n\n def iter_parameters(self):\n return (item[1] for item in six.iteritems(self.vars)\n if isinstance(item[1], Parameter))\n\n def create_var(self, *args, **kwargs):\n var = Variable(block=self, *args, **kwargs)\n if 'initializer' in kwargs:\n kwargs['initializer'](var, self)\n return var\n\n def has_var(self, name):\n return name in self.vars\n\n def _rename_var(self, name, new_name):\n \"\"\"\n Rename variable in vars and ops' inputs and outputs\n\n Args:\n name(str): the name that need to be renamed.\n new_name(str): the name that need to rename to.\n\n Raises:\n ValueError: If this block doesn't have this the giving name,\n or the type of the var with the giving name is not Parameter\n or Variable.\n\n Returns:\n Variable: the Variable with the giving name.\n \"\"\"\n name = cpt.to_text(name)\n new_name = cpt.to_text(new_name)\n\n if not self.has_var(name):\n raise ValueError(\"var %s is not in current block\" % name)\n v = self.var(name)\n if type(v) == Parameter:\n var_type = \"Parameter\"\n stop_gradient = v.stop_gradient\n trainable = v.trainable\n optimize_attr = v.optimize_attr\n regularizer = v.regularizer\n gradient_clip_attr = v.gradient_clip_attr\n error_clip = v.error_clip\n elif type(v) == Variable:\n var_type = \"Variable\"\n error_clip = v.error_clip\n stop_gradient = v.stop_gradient\n else:\n raise ValueError(\"unsupported var type: %s\", type(v))\n orig_var_type = v.type\n self.desc._rename_var(cpt.to_bytes(name), cpt.to_bytes(new_name))\n # NOTE: v is destroyed by C++ after calling _rename_var.\n d = self.desc.find_var(cpt.to_bytes(new_name))\n if var_type == \"Parameter\":\n var = Parameter(\n self,\n d.shape(),\n d.dtype(),\n type=orig_var_type,\n name=new_name,\n stop_gradient=stop_gradient,\n trainable=trainable,\n optimize_attr=optimize_attr,\n regularizer=regularizer,\n gradient_clip_attr=gradient_clip_attr,\n error_clip=error_clip)\n elif var_type == \"Variable\":\n var = Variable(\n self,\n type=orig_var_type,\n name=new_name,\n error_clip=error_clip,\n stop_gradient=stop_gradient)\n\n # rename the python side, _sync_with_cpp will only add\n # new vars/ops to python side.\n self.vars[new_name] = var\n del self.vars[name]\n self._sync_with_cpp()\n return var\n\n def _remove_var(self, name):\n self._sync_with_cpp()\n self.desc._remove_var(cpt.to_bytes(name))\n del self.vars[name]\n\n def create_parameter(self, *args, **kwargs):\n global_block = self.program.global_block()\n param = Parameter(global_block, *args, **kwargs)\n if 'initializer' in kwargs:\n\n def _is_inited_by(block, var):\n init_ops = []\n for op in block.ops:\n if var.name in op.output_arg_names:\n init_ops.append(op)\n return init_ops\n\n initializer = kwargs['initializer']\n init_ops = _is_inited_by(global_block, param)\n init_ops_len = len(init_ops)\n if init_ops_len > 1:\n raise RuntimeError(\"param \" + param.name +\n \" is inited by multiple init ops \" + str(\n init_ops))\n elif init_ops_len == 1:\n #TODO already inited, do nothing, should log a warning\n pass\n else:\n initializer(param, self)\n return param\n\n def append_op(self, *args, **kwargs):\n \"\"\"\n Appends a new Operator according to the giving arguments.\n\n Returns:\n Operator: the append Operator.\n \"\"\"\n if in_dygraph_mode():\n op = Operator(\n block=self,\n desc=None,\n type=kwargs.get(\"type\", None),\n inputs=None,\n outputs=None,\n attrs=kwargs.get(\"attrs\", {}))\n\n # record ops in tracer rather than blocks\n #\n # TODO(minqiyang): add op stop_gradient support in static mode too.\n # currently, we only support stop_gradient in dygraph mode.\n _dygraph_tracer().trace_op(op,\n kwargs.get(\"inputs\", {}),\n kwargs.get(\"outputs\", {}),\n kwargs.get(\"stop_gradient\", False))\n else:\n op_desc = self.desc.append_op()\n op = Operator(\n block=self,\n desc=op_desc,\n type=kwargs.get(\"type\", None),\n inputs=kwargs.get(\"inputs\", None),\n outputs=kwargs.get(\"outputs\", None),\n attrs=kwargs.get(\"attrs\", None))\n\n self.ops.append(op)\n\n return op\n\n def _insert_op(self, index, *args, **kwargs):\n \"\"\"\n Insert a Operator according to the giving arguments.\n\n Args:\n index(int): the place that the operator to insert.\n\n Returns:\n Operator: the insert Operator.\n \"\"\"\n self._sync_with_cpp()\n op_desc = self.desc._insert_op(index)\n op = Operator(block=self, desc=op_desc, *args, **kwargs)\n self.ops.insert(index, op)\n return op\n\n def _remove_op(self, index):\n \"\"\"\n Remove the specific position operator.\n\n Args:\n index(int): the position that the operator to insert.\n\n Returns:\n None\n \"\"\"\n self._sync_with_cpp()\n self.desc._remove_op(index, index + 1)\n del self.ops[index]\n\n def _slice_ops(self, start, end):\n \"\"\"\n Return the Operator between start and end.\n\n Args:\n start(int): the start position.\n end(int): the end position.\n\n Returns:\n list: the Operators between start and end.\n \"\"\"\n return self.ops[start:end]\n\n def _prepend_op(self, *args, **kwargs):\n if in_dygraph_mode():\n op = Operator(\n self,\n None,\n type=kwargs.get(\"type\", None),\n inputs=None,\n outputs=None,\n attrs=kwargs.get(\"attrs\", {}))\n\n _dygraph_tracer().trace_op(op,\n kwargs.get(\"inputs\", {}),\n kwargs.get(\"outputs\", {}),\n kwargs.get(\"stop_gradient\", False))\n else:\n op_desc = self.desc._prepend_op()\n op = Operator(\n self,\n op_desc,\n type=kwargs.get(\"type\", None),\n inputs=kwargs.get(\"inputs\", None),\n outputs=kwargs.get(\"outputs\", None),\n attrs=kwargs.get(\"attrs\", None))\n self.ops.insert(0, op)\n\n return op\n\n def _sync_with_cpp(self):\n \"\"\"\n Sync from the desc on the c++ end. This method is used to synchronize\n the c++ desc instance generated by backward.\n \"\"\"\n # sync variables from cpp\n for var in self.desc.all_vars():\n if not self.has_var(var.name()):\n self.create_var(name=var.name(), desc=var, type=var.type())\n\n # sync variables removed from c++ end\n for var in list(self.vars.keys()):\n if not self.desc.find_var(cpt.to_bytes(var)):\n self.vars.pop(var)\n\n # sync operators from cpp\n ops_in_cpp = []\n for op_idx in range(0, self.desc.op_size()):\n ops_in_cpp.append(self.desc.op(op_idx))\n\n if len(self.ops) != 0:\n first_op_in_python = self.ops[0].desc\n last_op_in_python = self.ops[len(self.ops) - 1].desc\n start_index = None\n end_index = None\n for index in range(len(ops_in_cpp)):\n if first_op_in_python == ops_in_cpp[index]:\n start_index = index\n if last_op_in_python == ops_in_cpp[index]:\n end_index = index\n assert start_index is not None\n assert end_index is not None\n assert start_index <= end_index\n else:\n start_index = 0\n end_index = -1\n\n # sync ops append to the head of cpp_ops\n for index in range((start_index - 1 - 1), -1, -1):\n op_desc = ops_in_cpp[index]\n op = Operator(self, op_desc)\n self.ops.insert(0, op)\n\n # sync ops append to the end of cpp_ops\n for index in range((end_index + 1), len(ops_in_cpp)):\n op_desc = ops_in_cpp[index]\n op = Operator(self, op_desc)\n self.ops.append(op)\n\n # sync ops removed from c++ end\n if end_index != -1 and end_index < len(self.ops):\n ops_in_cpp_index = 0\n ops_in_python_index = 0\n while ops_in_python_index < len(\n self.ops) and ops_in_cpp_index < len(ops_in_cpp):\n if self.ops[ops_in_python_index].desc != ops_in_cpp[\n ops_in_cpp_index]:\n del self.ops[ops_in_python_index]\n else:\n ops_in_cpp_index += 1\n ops_in_python_index += 1\n\n assert len(self.ops) == len(ops_in_cpp)\n for index in range(len(self.ops)):\n assert self.ops[index].desc == ops_in_cpp[index]\n\n def _copy_param_info_from(self, other):\n \"\"\"\n Copy the information of parameters from the other block.\n\n Args:\n other(Block): the other block.\n\n Raises:\n ValueError: If type of input is not Block, or the `other` and this\n block is not in the same topology.\n\n Returns:\n None\n \"\"\"\n if not isinstance(other, Block):\n raise TypeError(\n \"_copy_param_info_from should be invoked with Block\")\n for p in other.iter_parameters():\n assert isinstance(p, Parameter)\n v = self.vars.get(p.name, None)\n if v is None:\n raise ValueError(\"_copy_param_info_from should be invoked with \"\n \"same topology\")\n assert isinstance(v, Variable)\n new_p = Parameter(\n block=self,\n shape=v.shape,\n dtype=v.dtype,\n type=v.type,\n lod_level=v.lod_level,\n stop_gradient=p.stop_gradient,\n trainable=p.trainable,\n optimize_attr=p.optimize_attr,\n regularizer=p.regularizer,\n gradient_clip_attr=p.gradient_clip_attr,\n error_clip=p.error_clip,\n name=v.name)\n self.vars[new_p.name] = new_p\n\n def _clone_variable(self, var, force_persistable=True):\n \"\"\"\n Clone a variable into current block.\n\n Args:\n var: the variable to be cloned.\n force_persistable(bool): True means setting the result variable to being persistable.\n False means setting the persistable the same with that of input var.\n default: True.\n\n Returns:\n Variable: the new variable cloned from 'var' in current block.\n \"\"\"\n assert isinstance(var, Variable)\n ret_var = None\n # make STEP_SCOPES var can be safely cloned.\n if var.type == core.VarDesc.VarType.STEP_SCOPES:\n ret_var = self.create_var(\n name=var.name, persistable=var.persistable, type=var.type)\n elif var.type == core.VarDesc.VarType.RAW:\n ret_var = self.create_var(\n name=var.name, persistable=var.persistable, type=var.type)\n elif var.type == core.VarDesc.VarType.SELECTED_ROWS:\n ret_var = self.create_var(\n name=var.name,\n shape=var.shape,\n dtype=var.dtype,\n type=var.type,\n persistable=True if force_persistable else var.persistable,\n is_data=var.is_data)\n else:\n ret_var = self.create_var(\n name=var.name,\n shape=var.shape,\n dtype=var.dtype,\n type=var.type,\n lod_level=var.lod_level,\n persistable=True if force_persistable else var.persistable,\n is_data=var.is_data)\n return ret_var\n\n\nclass IrNode(object):\n \"\"\"\n Python IrNode. Beneath it is a core.Node, which is used for Ir Pass.\n \"\"\"\n\n def __init__(self, node):\n \"\"\"\n Construct an IrNode using core.Node.\n\n Args:\n node(core.Node): C++ Node.\n \"\"\"\n assert isinstance(node,\n core.Node), 'node must be the instance of core.Node.'\n self.node = node\n\n def name(self):\n \"\"\"\n Return the node name.\n\n Returns:\n str: node name.\n \"\"\"\n return self.node.name()\n\n def node_type(self):\n \"\"\"\n Return the node type.\n\n Returns:\n core.Node.Type: node type(core.Node.Type.Operation or core.Node.Type.Variable).\n \"\"\"\n return self.node.node_type()\n\n def var(self):\n \"\"\"\n Return the node variable description.\n\n Returns:\n core.VarDesc: node variable description.\n \"\"\"\n return self.node.var()\n\n def op(self):\n \"\"\"\n Return the node operator description.\n\n Returns:\n core.OpDesc: node operator description.\n \"\"\"\n return self.node.op()\n\n def id(self):\n \"\"\"\n Return the node id.\n\n Returns:\n int: node id.\n \"\"\"\n return self.node.id()\n\n def is_op(self):\n \"\"\"\n If the node is an operator, then return true.\n\n Returns:\n bool: indicate whether the node is an operator.\n \"\"\"\n return self.node.is_op()\n\n def is_var(self):\n \"\"\"\n If the node is a variable, then return true.\n\n Returns:\n bool: indicate whether the node is a variable.\n \"\"\"\n return self.node.is_var()\n\n def is_ctrl_var(self):\n \"\"\"\n If the node is a control dependence variable, then return true.\n\n Returns:\n bool: indicate whether the node is a control dependence variable.\n \"\"\"\n return self.node.is_ctrl_var()\n\n def clear_inputs(self):\n \"\"\"\n Clear the node inputs. After executing the `clear_inputs` function,\n the node inputs will be empty.\n \"\"\"\n self.node.clear_inputs()\n\n def remove_input_by_id(self, node_id):\n \"\"\"\n Remove a node from inputs by the given node id.\n\n Args:\n node_id(int): the given node id.\n \"\"\"\n self.node.remove_input(node_id)\n\n def remove_input(self, node):\n \"\"\"\n Remove a node from inputs.\n\n Args:\n node(IrNode): the node being removed.\n \"\"\"\n self.node.remove_input(node.node)\n\n def append_input(self, node):\n \"\"\"\n Append a node in inputs.\n\n Args:\n node(IrNode): the node being appended.\n \"\"\"\n self.node.append_input(node.node)\n\n def clear_outputs(self):\n \"\"\"\n Clear the node outputs. After executing the `clear_outputs` function,\n the node outputs will be empty.\n \"\"\"\n self.node.clear_outputs()\n\n def remove_output_by_id(self, node_id):\n \"\"\"\n Remove a node from outputs by the given node id.\n\n Args:\n node_id(int): the given node id.\n \"\"\"\n self.node.remove_output(node_id)\n\n def remove_output(self, node):\n \"\"\"\n Remove a node from outputs.\n\n Args:\n node(IrNode): the node being removed.\n \"\"\"\n self.node.remove_output(node.node)\n\n def append_output(self, node):\n \"\"\"\n Append a node in outputs.\n\n Args:\n node(IrNode): the node being appended.\n \"\"\"\n self.node.append_output(node.node)\n\n @property\n def inputs(self):\n \"\"\"\n Return the node inputs.\n\n Returns:\n list(IrNode): node inputs wrapped by IrNode.\n \"\"\"\n return [IrNode(n) for n in self.node.inputs]\n\n @property\n def outputs(self):\n \"\"\"\n Return the node outputs.\n\n Returns:\n list(IrNode): node outputs wrapped by IrNode.\n \"\"\"\n return [IrNode(n) for n in self.node.outputs]\n\n\nclass IrVarNode(IrNode):\n \"\"\"\n Python IrVarNode. Beneath it is a core.Node, it inherits from IrNode.\n \"\"\"\n\n def __init__(self, node):\n \"\"\"\n Construct an IrVarNode using core.Node.\n\n Args:\n node(core.Node): C++ Node.\n \"\"\"\n assert isinstance(node, core.Node) and node.is_var(), \\\n 'node must be the instance of core.Node and it must be a variable node.'\n super(IrVarNode, self).__init__(node)\n self.node = node\n\n def set_shape(self, shape):\n \"\"\"\n Set the node variable shape.\n\n Args:\n shape(list): shape to be set.\n \"\"\"\n assert self.node.var() is not None, \\\n \"The node variable description cannot be None.\"\n self.node.var().set_shape(shape)\n\n def persistable(self):\n \"\"\"\n If the variable node is a persistable variable, then return true.\n\n Returns:\n bool: indicate whether the variable is persistable.\n \"\"\"\n assert self.node.var() is not None, \\\n \"The node variable description cannot be None.\"\n return self.node.var().persistable()\n\n def type(self):\n \"\"\"\n Return the variable type.\n\n Returns:\n core.VarDesc.VarType: the variable type.\n \"\"\"\n assert self.node.var() is not None, \\\n \"The node variable description cannot be None.\"\n return self.node.var().type()\n\n def dtype(self):\n \"\"\"\n Return the variable data type.\n\n Returns:\n core.VarDesc.VarType: the variable data type.\n \"\"\"\n assert self.node.var() is not None, \\\n \"The node variable description cannot be None.\"\n return self.node.var().dtype()\n\n def shape(self):\n \"\"\"\n Return the variable shape.\n\n Returns:\n list: the variable shape.\n \"\"\"\n assert self.node.var() is not None, \\\n \"The node variable description cannot be None.\"\n return self.node.var().shape()\n\n @property\n def inputs(self):\n \"\"\"\n Return the node inputs.\n\n Returns:\n list(IrOpNode): node inputs wrapped by IrOpNode.\n \"\"\"\n return [IrOpNode(n) for n in self.node.inputs]\n\n @property\n def outputs(self):\n \"\"\"\n Return the node outputs.\n\n Returns:\n list(IrOpNode): node outputs wrapped by IrOpNode.\n \"\"\"\n return [IrOpNode(n) for n in self.node.outputs]\n\n\nclass IrOpNode(IrNode):\n \"\"\"\n Python IrOpNode. Beneath it is a core.Node, it inherits from IrNode.\n \"\"\"\n\n def __init__(self, node):\n \"\"\"\n Construct an IrOpNode using core.Node.\n\n Args:\n node(core.Node): C++ Node.\n \"\"\"\n assert isinstance(node, core.Node) and node.is_op(), \\\n 'node must be the instance of core.Node and it must be a operator node.'\n super(IrOpNode, self).__init__(node)\n self.node = node\n\n def rename_input(self, old_input_name, new_input_name):\n \"\"\"\n Rename the input of this node.\n\n Args:\n old_input_name(str): the old input name.\n new_input_name(str): the new input name.\n \"\"\"\n assert self.node.op() is not None, \\\n \"The node operator description cannot be None.\"\n self.node.op()._rename_input(old_input_name, new_input_name)\n\n def input(self, name):\n \"\"\"\n Get the argument name list by the parameter name for input.\n\n Args:\n name(str): the parameter name.\n\n Returns:\n list(str): the argument name list.\n \"\"\"\n assert self.node.op() is not None, \\\n \"The node operator description cannot be None.\"\n return self.node.op().input(name)\n\n def output(self, name):\n \"\"\"\n Get the argument name list by the parameter name for output.\n\n Args:\n name(str): the parameter name.\n\n Returns:\n list(str): the argument name list.\n \"\"\"\n assert self.node.op() is not None, \\\n \"The node operator description cannot be None.\"\n return self.node.op().output(name)\n\n def set_type(self, new_type):\n \"\"\"\n Change the operator type into new type.\n\n Args:\n new_type(str): new operator type to be set.\n \"\"\"\n assert self.node.op() is not None, \\\n \"The node operator description cannot be None.\"\n return self.node.op().set_type(new_type)\n\n def set_attr(self, name, val):\n \"\"\"\n Set the value of attribute by attribute's name.\n\n Args:\n name(str): the attribute name.\n val(bool|int|str|float|list): the value of the attribute.\n \"\"\"\n self._update_desc_attr(name, val)\n\n def _update_desc_attr(self, name, val):\n \"\"\"\n Update the value of the op desc's attribute by attribute's name.\n \"\"\"\n assert self.node.op() is not None, \\\n \"The node operator description cannot be None.\"\n desc = self.node.op()\n if isinstance(val, Block):\n desc.set_block_attr(name, val.desc)\n elif isinstance(val, list) and val and \\\n all(isinstance(v, Block) for v in val):\n desc.set_blocks_attr(name, [v.desc for v in val])\n elif isinstance(val, core.BlockDesc) or \\\n isinstance(val, core.ProgramDesc):\n desc.set_serialized_attr(name, val.serialize_to_string())\n else:\n desc._set_attr(name, val)\n\n def input_arg_names(self):\n \"\"\"\n Return input arguments' names of this op node.\n\n Returns:\n list(str): input arguments' names of this op node.\n \"\"\"\n assert self.node.op() is not None, \\\n \"The node operator description cannot be None.\"\n return self.node.op().input_arg_names()\n\n def output_arg_names(self):\n \"\"\"\n Return output arguments' names of this op node.\n\n Returns:\n list(str): output arguments' names of this op node.\n \"\"\"\n assert self.node.op() is not None, \\\n \"The node operator description cannot be None.\"\n return self.node.op().output_arg_names()\n\n @property\n def inputs(self):\n \"\"\"\n Return the node inputs.\n\n Returns:\n list(IrVarNode): node inputs wrapped by IrVarNode.\n \"\"\"\n return [IrVarNode(n) for n in self.node.inputs]\n\n @property\n def outputs(self):\n \"\"\"\n Return the node outputs.\n\n Returns:\n list(IrVarNode): node outputs wrapped by IrVarNode.\n \"\"\"\n return [IrVarNode(n) for n in self.node.outputs]\n\n\nclass IrGraph(object):\n \"\"\"\n Python IrGraph. Beneath it is a core.Graph, which is used for\n creating a c++ Ir Pass Graph. An IrGraph is just a graph view of\n a Program. In an IrGraph, both Variables and Operators are graph\n nodes.\n \"\"\"\n\n def __init__(self, graph, for_test=False):\n \"\"\"\n Construct an IrGraph using core.Graph.\n\n Args:\n graph(core.Graph): C++ Graph.\n for_test(bool): True for the test graph and false for the train graph.\n \"\"\"\n assert isinstance(\n graph, core.Graph), 'graph must be the instance of core.Graph.'\n self.graph = graph\n self._for_test = for_test\n\n def clone(self):\n \"\"\"\n Create a new and duplicated IrGraph.\n\n Warns:\n The method only clones the graph structure, not its attributes.\n\n Returns:\n IrGraph: A new and duplicated graph.\n \"\"\"\n g = self.graph.clone()\n return IrGraph(g, self._for_test)\n\n def is_test(self):\n \"\"\"\n If the graph is used for testing, the function returns true. Otherwise, returns false.\n \"\"\"\n return self._for_test\n\n def all_nodes(self):\n \"\"\"\n Return all nodes included in the graph as a set.\n \"\"\"\n return {IrNode(node) for node in self.graph.nodes()}\n\n def all_var_nodes(self):\n \"\"\"\n Return all variable nodes included in the graph as a set.\n \"\"\"\n return {IrVarNode(node) for node in self.graph.nodes() if node.is_var()}\n\n def all_persistable_nodes(self):\n \"\"\"\n Return all persistable variable nodes included in the graph as a set.\n \"\"\"\n persistable_nodes = set()\n for node in self.graph.nodes():\n if node.is_var() and node.var() is not None and node.var(\n ).persistable():\n persistable_nodes.add(node)\n return {IrVarNode(p) for p in persistable_nodes}\n\n def all_op_nodes(self):\n \"\"\"\n Return all operator nodes included in the graph as a set.\n \"\"\"\n return {IrOpNode(node) for node in self.graph.nodes() if node.is_op()}\n\n def create_persistable_node(self, name, var_type, shape, var_dtype):\n \"\"\"\n Create a persistable variable node in the graph. In IrGraph,\n it can not distinguish between persistable variables and parameters.\n\n Args:\n name(str): the name of the persistable variable node.\n vart_type(core.VarDesc.VarType): the type of the persistable variable node.\n shape(list): the shape of the persistable variable node.\n var_dtype(core.VarDesc.VarType): the data type of the persistable variable node.\n\n Returns:\n IrVarNode: the created persistable variable node.\n \"\"\"\n var_desc = core.VarDesc(name)\n var_desc.set_type(var_type)\n var_desc.set_shape(shape)\n var_desc.set_dtype(var_dtype)\n var_desc.set_persistable(True)\n return IrVarNode(self.graph.create_var_node(var_desc))\n\n def create_var_node(self, name, var_type, shape, var_dtype):\n \"\"\"\n Create a variable node in the graph. The created variable node is\n not persistable.\n\n Args:\n name(str): the name of the variable node.\n vart_type(core.VarDesc.VarType): the type of the variable node.\n shape(list): the shape of the variable node.\n var_dtype(core.VarDesc.VarType): the data type of the variable node.\n\n Returns:\n IrVarNode: the created variable node.\n \"\"\"\n\n var_desc = core.VarDesc(name)\n var_desc.set_type(var_type)\n var_desc.set_shape(shape)\n var_desc.set_dtype(var_dtype)\n return IrVarNode(self.graph.create_var_node(var_desc))\n\n def create_var_node_from_desc(self, var_desc):\n \"\"\"\n Create a variable node by using an existing VarDesc in the graph.\n Depend on the giving VarDesc, the created variable node may be persistable.\n\n Args:\n var_desc(core.VarDesc): the giving variable description.\n\n Returns:\n IrVarNode: the created variable node.\n \"\"\"\n return IrVarNode(self.graph.create_var_node(var_desc))\n\n def create_op_node(self, op_type, attrs, inputs, outputs):\n \"\"\"\n Create a operator node in the graph.\n\n Args:\n op_type(str): the type of the operator node.\n attrs(dict): the attributes of the operator node.\n inputs(dict): the inputs of the operator node.\n outputs(dict): the outpus of the operator node.\n\n Returns:\n IrOpNode: the created operator node.\n \"\"\"\n op_desc = core.OpDesc()\n op_desc.set_type(op_type)\n for attr, value in six.iteritems(attrs):\n self._update_desc_attr(op_desc, attr, value)\n for input_name, var_nodes in six.iteritems(inputs):\n if not isinstance(var_nodes, list):\n var_nodes = [var_nodes]\n op_desc.set_input(input_name,\n [var_node.name() for var_node in var_nodes])\n for output_name, var_nodes in six.iteritems(outputs):\n if not isinstance(var_nodes, list):\n var_nodes = [var_nodes]\n op_desc.set_output(output_name,\n [var_node.name() for var_node in var_nodes])\n return IrOpNode(self.graph.create_op_node(op_desc))\n\n def create_op_node_from_desc(self, op_desc):\n \"\"\"\n Create a operator node by using an existing OpDesc in the graph.\n\n Args:\n op_desc(core.VarDesc): the giving operator description.\n\n Returns:\n IrOpNode: the created operator node.\n \"\"\"\n return IrOpNode(self.graph.create_op_node(op_desc))\n\n def update_input_link(self, old_input_node, new_input_node, op_node):\n \"\"\"\n Update the input's link of a operator node.\n\n Args:\n old_input_node(IrNode): the old input node of the giving op_node.\n new_input_node(IrNode): the new input node of the giving op_node.\n op_node(IrOpNode): the operator node that is needed to update input's link.\n \"\"\"\n assert old_input_node.node in self.graph.nodes() and new_input_node.node in \\\n self.graph.nodes() and op_node.node in self.graph.nodes(), \\\n 'The three arguments(old_input_node&new_input_node&op_node) must be in the graph nodes.'\n old_input_node.remove_output(op_node)\n op_node.remove_input(old_input_node)\n new_input_node.append_output(op_node)\n op_node.append_input(new_input_node)\n op_node.rename_input(old_input_node.name(), new_input_node.name())\n\n def link_to(self, node_in, node_out):\n \"\"\"\n Connect two nodes.\n\n Args:\n node_in(IrNode): the input node.\n node_out(IrNode): the output node.\n \"\"\"\n assert node_in.node in self.graph.nodes() and node_out.node in self.graph.nodes(), \\\n 'The two arguments(node_in&node_out) must be in the graph nodes.'\n node_in.append_output(node_out)\n node_out.append_input(node_in)\n\n def safe_remove_nodes(self, remove_nodes):\n \"\"\"\n Remove nodes safely since links connected to these removed nodes are\n also removed.\n\n Args:\n remove_nodes(set): the nodes prepared to be removed.\n \"\"\"\n if not isinstance(remove_nodes, set):\n if isinstance(remove_nodes, Iterable):\n remove_nodes = set(remove_nodes)\n else:\n remove_nodes = {remove_nodes}\n original_nodes = {n.node for n in remove_nodes}\n core.graph_safe_remove_nodes(self.graph, original_nodes)\n\n def resolve_hazard(self):\n ordered_nodes = core.topology_sort(self.graph)\n var_nodes = dict()\n for node in ordered_nodes:\n if node.is_op() and node.op() is not None:\n for each_var_name in node.op().input_arg_names():\n if each_var_name not in var_nodes:\n var_nodes[each_var_name] = [\n self._find_node_by_name(node.inputs, each_var_name)\n ]\n for each_var_name in node.op().output_arg_names():\n if each_var_name not in var_nodes:\n var_nodes[each_var_name] = [\n self._find_node_by_name(node.outputs, each_var_name)\n ]\n else:\n var_nodes[each_var_name].append(\n self._find_node_by_name(node.outputs,\n each_var_name))\n self.graph.resolve_hazard(var_nodes)\n\n def has_circle(self):\n \"\"\"\n Check if the graph has a circle.\n\n Returns:\n bool: True if the graph has a circle else False.\n \"\"\"\n return core.has_circle(self.graph)\n\n def graph_num(self):\n \"\"\"\n Count the number of unconnected graphs in this graph.\n\n Returns:\n int: the number of unconnected graphs.\n \"\"\"\n return core.graph_num(self.graph)\n\n def topology_sort(self):\n \"\"\"\n Perform the topology sort operation on the graph.\n\n Notes: the `graph` cannot contain a circle.\n\n Returns:\n list(IrNode): nodes in topology order.\n \"\"\"\n ordered_nodes = core.topology_sort(self.graph)\n return [IrNode(n) for n in ordered_nodes]\n\n def build_adjacency_list(self):\n \"\"\"\n Build an adjacency list of operations for the `graph`.\n\n Returns:\n dict{IrNode: set(IrNode)}: the adjacency list.\n \"\"\"\n adj_list = core.build_adjacency_list(self.graph)\n wrapped_adj_list = dict()\n for k, v in six.iteritems(adj_list):\n wrapped_adj_list[IrNode(k)] = {IrNode(n) for n in v}\n return wrapped_adj_list\n\n def draw(self, save_path, name, marked_nodes=None, remove_ctr_var=True):\n \"\"\"\n Draw the graph. If `dot` command is installed, the drawn graph\n will be saved as pdf file type, otherwise dot file type is used.\n\n Args:\n save_path(str): the save path of drawn graph.\n name(str): the name of drawn graph.\n marked_nodes(set(IrNode)): nodes that are needed to be marked.\n Default value is None.\n remove_ctr_var(bool): If it is set True, all control variable nodes\n in the graph will be removed. Default value is True.\n \"\"\"\n\n def _convert_to_pdf(dot_file_path):\n pdf_save_path = os.path.splitext(dot_file_path)[0] + '.pdf'\n exited_code = subprocess.call('dot -Tpdf ' + dot_file_path \\\n + ' -o ' + pdf_save_path, shell=True)\n if exited_code != 0:\n print('The dot command is needed for creating pdf files.')\n print('The {} is saved as the dot filetype.'.format(\n dot_file_path))\n\n remove_ctr_vars = set()\n if remove_ctr_var:\n for node in self.all_var_nodes():\n if node.is_ctrl_var():\n remove_ctr_vars.add(node)\n self.safe_remove_nodes(remove_ctr_vars)\n print('Total ops num = {}.'.format(len(self.all_op_nodes())))\n\n if marked_nodes is not None:\n if not isinstance(marked_nodes, set):\n if isinstance(marked_nodes, Iterable):\n marked_nodes = set(marked_nodes)\n else:\n marked_nodes = {marked_nodes}\n marked_nodes = {n.node for n in marked_nodes}\n remove_ctr_vars = {n.node for n in remove_ctr_vars}\n marked_nodes = marked_nodes - remove_ctr_vars\n if self.graph.has('__graphviz__marked_node__'):\n self.graph.erase('__graphviz__marked_node__')\n self.graph.set('__graphviz__marked_node__', marked_nodes)\n viz_dot_path = os.path.join(save_path, name) + '.dot'\n viz_pass = core.get_pass('graph_viz_pass')\n viz_pass.set('graph_viz_path', viz_dot_path)\n viz_pass.apply(self.graph)\n _convert_to_pdf(viz_dot_path)\n\n def to_program(self):\n \"\"\"\n Convert the graph into a Program.\n\n WARN: When the graph includes backward operator nodes, the\n conversion process may be failed. Usually, this function is\n only used to convert a test graph.\n\n Returns:\n Program: a program converted from the graph.\n \"\"\"\n convert_pass = core.get_pass('graph_to_program_pass')\n desc = core.ProgramDesc()\n convert_pass.set_not_owned('program', desc)\n convert_pass.apply(self.graph)\n program = Program._construct_from_desc(desc)\n return program\n\n def _find_node_by_name(self, nodes, node_name):\n \"\"\"\n Find a node in the giving nodes set by the name.\n \"\"\"\n target_node = None\n for n in nodes:\n if n.name() == node_name:\n target_node = n\n assert target_node is not None, \"Cannot find the target node in the giving set.\"\n return target_node\n\n def _update_desc_attr(self, desc, name, val):\n \"\"\"\n Update the value of desc's attribute by attribute's name.\n \"\"\"\n if isinstance(val, Block):\n desc.set_block_attr(name, val.desc)\n elif isinstance(val, list) and val and all(\n isinstance(v, Block) for v in val):\n desc.set_blocks_attr(name, [v.desc for v in val])\n elif isinstance(val, core.BlockDesc) or \\\n isinstance(val, core.ProgramDesc):\n desc.set_serialized_attr(name, val.serialize_to_string())\n else:\n desc._set_attr(name, val)\n\n\nclass Program(object):\n \"\"\"\n Python Program. Beneath it is a ProgramDesc, which is used for\n create c++ Program. A program is a self-contained programing\n language like container. It has at least one Block, when the\n control flow op like conditional_block, while_op is included,\n it will contains nested block.\n Please reference the framework.proto for details.\n\n Notes: we have default_startup_program and default_main_program\n by default, a pair of them will shared the parameters.\n The default_startup_program only run once to initialize parameters,\n default_main_program run in every mini batch and adjust the weights.\n\n Returns:\n A empty program.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n main_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.program_guard(main_program=main_program, startup_program=startup_program):\n x = fluid.layers.data(name=\"x\", shape=[-1, 784], dtype='float32')\n y = fluid.layers.data(name=\"y\", shape=[-1, 1], dtype='int32')\n z = fluid.layers.fc(name=\"fc\", input=x, size=10, act=\"relu\")\n\n print(\"main program is: {}\".format(main_program))\n print(\"start up program is: {}\".format(startup_program))\n\n \"\"\"\n\n def __init__(self):\n self.desc = core.ProgramDesc()\n self.blocks = [Block(self, 0)]\n self.current_block_idx = 0\n self._seed = 0\n self._current_role = core.op_proto_and_checker_maker.OpRole.Forward\n self.__op_role_var = []\n\n # for distribute training\n # _is_distributed = True if under distributed training\n self._is_distributed = False\n # _is_chief = True if the trainer is the first one, usually No.0\n self._is_chief = False\n # _parameters_on_pservers records all the parameters distributed on parameter servers.\n self._parameters_on_pservers = None\n # _endpoints is a list about parameter servers ip:port, such as [\"ip:port\",\"ip:port\"]\n self._endpoints = []\n # if current role is parameter server, the _ps_endpoint is its \"ip:port\"\n self._ps_endpoint = None\n # trainers_endpoints, it is used for distribution.\n self._trainers_endpoints = []\n # the distributed lookup table names\n self._distributed_lookup_table = None\n\n # use Deep gradient comrepssion or not\n self._enable_dgc = False\n\n # @deprecated(the python memory optimize transpiler is deprecated)\n # whether the program is optimized by memory_optimize_transpiler\n self.__is_mem_optimized = False\n\n # if this program has been optimized by distributed optimizer\n # fleet_opt will be given a value\n self._fleet_opt = None\n self._program_config = None\n\n @property\n def _is_mem_optimized(self):\n # if the program is optimized, operator input/outputs\n # maybe same, which conflict with save_inference_model.\n return self.__is_mem_optimized\n\n @_is_mem_optimized.setter\n def _is_mem_optimized(self, target):\n self.__is_mem_optimized = target\n\n @property\n def _op_role(self):\n \"\"\"\n The operator role. In a enum {Forward, Backward, Optimize}.\n\n Notes: this is a low level API. It is used only for ParallelExecutor to\n duplicate or schedule operator to devices.\n\n For example, the forward operator should be executed on every device.\n The backward operator should be executed on every device and the\n parameter gradient of backward (use :code:`_op_role_var` to get this\n variable) operator should be merged to one device. The optimization\n operators should be executed on only one device and broadcast the\n optimization result, i.e., the new parameter, to every other device.\n \"\"\"\n return self._current_role\n\n @_op_role.setter\n def _op_role(self, role):\n self._current_role = role\n\n @property\n def _op_role_var(self):\n \"\"\"\n The auxiliary variables for :code:`_op_role` property.\n\n See Also: :code:`Program._op_role`'s documentation for details.\n\n Notes: This is a very low-level API. Users should not use it directly.\n \"\"\"\n return self.__op_role_var\n\n @contextlib.contextmanager\n def _backward_role_guard(self):\n tmp_role = self._current_role\n\n OpRole = core.op_proto_and_checker_maker.OpRole\n self._current_role = OpRole.Backward\n yield\n self._current_role = tmp_role\n\n @signature_safe_contextmanager\n def _optimized_guard(self, param_and_grads):\n \"\"\"\n A with guard to set :code:`Optimization` :code:`OpRole` and\n :code:`OpRoleVar` automatically.\n\n Notes: This is a very low level API. Users should not use it directly.\n\n Args:\n param_and_grads(list): The variables (names) to be optimized.\n\n Examples:\n\n >>> p, g = backward(...)\n >>> with program._optimized_guard([p,g]):\n >>> p = p - 0.001 * g\n \"\"\"\n tmp_role = self._current_role\n tmp_var = self.__op_role_var\n\n OpRole = core.op_proto_and_checker_maker.OpRole\n self._current_role = OpRole.Optimize\n self.__op_role_var = [\n var.name if isinstance(var, Variable) else var\n for var in param_and_grads\n ]\n yield\n self.__op_role_var = tmp_var\n self._current_role = tmp_role\n\n @signature_safe_contextmanager\n def _lr_schedule_guard(self, is_with_opt=False):\n \"\"\"\n A with guard to set :code:`LRSched` :code:`OpRole` and\n :code:`OpRoleVar` automatically. The :code:`OpRoleVar` is\n set to the target learning rate.\n\n Notes: This is a very low level API. Users should not use it directly.\n\n Args:\n is_with_opt: Only set to true if these ops a in the middle\n of a bunch of optimize ops so that it can be treated\n correctly. For example, sgd->lr_op->sgd->lr_op->sgd.\n\n Examples:\n\n >>> p, g = backward(...)\n >>> with program.lr_schedule_guard():\n >>> lr = lr * decay\n \"\"\"\n\n tmp_role = self._current_role\n tmp_var = self.__op_role_var\n\n OpRole = core.op_proto_and_checker_maker.OpRole\n self._current_role = OpRole.LRSched\n if is_with_opt:\n self._current_role = int(OpRole.LRSched) | int(OpRole.Optimize)\n # TODO(typhoonzero): how to set target learning rate var\n self.__op_role_var = []\n yield\n self.__op_role_var = tmp_var\n self._current_role = tmp_role\n\n def __str__(self):\n \"\"\"\n Get the protobuf debug string of this Program.\n\n Returns:\n (str): The protobuf debug string.\n\n Raises:\n ValueError: If any of required fields is not set.\n \"\"\"\n return self.to_string(True)\n\n def to_string(self, throw_on_error, with_details=False):\n \"\"\"\n To debug string.\n\n Args:\n throw_on_error(bool): raise Value error when any of required fields\n is not set.\n\n with_details(bool): True if more details about variables and\n parameters, e.g., :code:`trainable`, :code:`optimize_attr`, need\n to print.\n\n Returns:\n str : The debug string.\n\n Raises:\n ValueError: If any of required fields is not set and throw_on_error is\n True.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n prog = fluid.default_main_program()\n prog_string = prog.to_string(throw_on_error=True, with_details=False)\n print(prog_string)\n\n \"\"\"\n assert isinstance(throw_on_error, bool) and isinstance(with_details,\n bool)\n if with_details:\n res_str = \"\"\n for block in self.blocks:\n res_str += block.to_string(throw_on_error, with_details)\n else:\n protostr = self.desc.serialize_to_string()\n proto = framework_pb2.ProgramDesc.FromString(\n six.binary_type(protostr))\n res_str = _debug_string_(proto, throw_on_error)\n return res_str\n\n def _get_desc(self):\n \"\"\"\n Get the C++ side of `ProgramDesc` object pointer. The C++ object is\n exposed by :code:`pybind`.\n\n Notes: This is a very low level API. Users should not use this API\n directly.\n \"\"\"\n return self.desc\n\n def _version(self):\n return self.desc._version()\n\n def clone(self, for_test=False):\n \"\"\"\n Create a new, duplicated program.\n\n\n Some operators, e.g., :code:`batch_norm`, behave differently between\n training and testing. They have an attribute, :code:`is_test`, to\n control this behaviour. This method will change the :code:`is_test`\n attribute of them to :code:`True` when :code:`for_test=True`.\n\n * Set for_test to False when we want to clone the program for training.\n * Set for_test to True when we want to clone the program for testing. We will not do any prune\n on program here, So if you just want an forward program for testing, please use :code:`clone`\n before using :code:`Opimizer.minimize`\n\n Notes: This API DOES NOT prune any operator. Use\n :code:`clone(for_test=True)` before backward and optimization please. e.g.\n\n .. code-block:: python\n\n test_program = fluid.default_main_program().clone(for_test=True)\n optimizer = fluid.optimizer.Momentum(learning_rate=0.01, momentum=0.9)\n optimizer.minimize()\n\n Args:\n for_test(bool): True if change the :code:`is_test` attribute of\n operators to :code:`True`.\n\n Returns:\n Program: The new, duplicated Program object.\n\n Examples:\n\n Notes: The Program Descs' order maybe different after :code:`clone` and this will not affect your training or testing progress. In the following example we give you an simple method :code:`print_prog(program)` to print Program Descs inorder to make sure you have same print result after :code:`clone`:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import six\n\n\n def print_prog(prog):\n for name, value in sorted(six.iteritems(prog.block(0).vars)):\n print(value)\n for op in prog.block(0).ops:\n print(\"op type is {}\".format(op.type))\n print(\"op inputs are {}\".format(op.input_arg_names))\n print(\"op outputs are {}\".format(op.output_arg_names))\n for key, value in sorted(six.iteritems(op.all_attrs())):\n if key not in ['op_callstack', 'op_role_var']:\n print(\" [ attrs: {}: {} ]\".format(key, value))\n\n\n 1. To clone a test program, the sample code is:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import six\n\n def print_prog(prog):\n for name, value in sorted(six.iteritems(prog.block(0).vars)):\n print(value)\n for op in prog.block(0).ops:\n print(\"op type is {}\".format(op.type))\n print(\"op inputs are {}\".format(op.input_arg_names))\n print(\"op outputs are {}\".format(op.output_arg_names))\n for key, value in sorted(six.iteritems(op.all_attrs())):\n if key not in ['op_callstack', 'op_role_var']:\n print(\" [ attrs: {}: {} ]\".format(key, value))\n\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.program_guard(train_program, startup_program):\n with fluid.unique_name.guard():\n img = fluid.layers.data(name='image', shape=[784])\n hidden = fluid.layers.fc(input=img, size=200, act='relu')\n hidden = fluid.layers.dropout(hidden, dropout_prob=0.5)\n loss = fluid.layers.cross_entropy(\n input=fluid.layers.fc(hidden, size=10, act='softmax'),\n label=fluid.layers.data(name='label', shape=[1], dtype='int64'))\n avg_loss = fluid.layers.mean(loss)\n test_program = train_program.clone(for_test=False)\n print_prog(test_program)\n with fluid.program_guard(train_program, startup_program):\n with fluid.unique_name.guard():\n sgd = fluid.optimizer.SGD(learning_rate=1e-3)\n sgd.minimize(avg_loss)\n\n\n 2. The clone method can be avoid if you create program for training and program for testing individually.\n .. code-block:: python\n\n import paddle.fluid as fluid\n import six\n\n def print_prog(prog):\n for name, value in sorted(six.iteritems(prog.block(0).vars)):\n print(value)\n for op in prog.block(0).ops:\n print(\"op type is {}\".format(op.type))\n print(\"op inputs are {}\".format(op.input_arg_names))\n print(\"op outputs are {}\".format(op.output_arg_names))\n for key, value in sorted(six.iteritems(op.all_attrs())):\n if key not in ['op_callstack', 'op_role_var']:\n print(\" [ attrs: {}: {} ]\".format(key, value))\n def network(is_test):\n img = fluid.layers.data(name='image', shape=[784])\n hidden = fluid.layers.fc(input=img, size=200, act='relu')\n hidden = fluid.layers.dropout(hidden, dropout_prob=0.5)\n loss = fluid.layers.cross_entropy(\n input=fluid.layers.fc(hidden, size=10, act='softmax'),\n label=fluid.layers.data(name='label', shape=[1], dtype='int64'))\n avg_loss = fluid.layers.mean(loss)\n return avg_loss\n\n\n train_program_2 = fluid.Program()\n startup_program_2 = fluid.Program()\n test_program_2 = fluid.Program()\n with fluid.program_guard(train_program_2, startup_program_2):\n with fluid.unique_name.guard():\n sgd = fluid.optimizer.SGD(learning_rate=1e-3)\n sgd.minimize(avg_loss)\n # the test startup program is not used.\n with fluid.program_guard(test_program_2, fluid.Program()):\n with fluid.unique_name.guard():\n loss = network(is_test=True)\n print(test_program_2)\n\n The two code snippets above will generate and print same programs.\n \"\"\"\n if for_test:\n p = self._inference_optimize(prune_read_op=False)\n else:\n p = Program()\n p.current_block_idx = self.current_block_idx\n p._seed = self._seed\n p.desc = core.ProgramDesc(self.desc)\n p.blocks = [\n Block(p, i) for i in six.moves.range(self.desc.num_blocks())\n ]\n\n p._current_role = self._current_role\n p.__op_role_var = self.__op_role_var\n\n p._sync_with_cpp()\n\n p._copy_param_info_from(self)\n p._copy_data_info_from(self)\n p._copy_dist_param_info_from(self)\n return p\n\n def _prune(self, targets):\n \"\"\"\n Prune operators and variables which are not needed to generate\n :code:`targets`.\n\n Notes: This is a very low level API. Users should not use this API\n directly. This API is in flux and not stable.\n\n Args:\n targets(list|Variable|Operator): A list of variables or operators\n need to be pruned\n\n Returns:\n Program: A new, pruned program.\n\n \"\"\"\n if not isinstance(targets, list):\n targets = [targets]\n targets_idx = []\n for t in targets:\n if not isinstance(t, Operator):\n if isinstance(t, Variable):\n # After transpiler processing, the op that output this\n # variable maybe has been changed, so t.op is not reliable\n # and we need to find the current op that generate this\n # variable here.\n t.op = None\n global_block = self.global_block()\n for idx, op in enumerate(global_block.ops):\n if t.name in op.output_arg_names:\n t.op = op\n break\n\n t = t.op\n if t is None:\n raise ValueError(\n \"The target variable must have an \"\n \"associated operator that generates it.\")\n else:\n raise ValueError(\"All targets of prune() can only be \"\n \"Variable or Operator.\")\n\n targets_idx.append([t.block.idx, t.idx])\n res = Program()\n res.desc = core.prune(self.desc, targets_idx)\n res.blocks = [\n Block(res, i) for i in six.moves.range(res.desc.num_blocks())\n ]\n res._sync_with_cpp()\n return res\n\n def _inference_optimize(self, prune_read_op=True):\n \"\"\"\n This method will create a new program and do following adjustments on it:\n 1. Remove all reader variables and their creator ops if exist.\n\n 2. Remove the :code:`read_op` if exists.\n\n 3. change the :code:`is_test`\n attribute of operators to :code:`True`. All the :code:`Parameter`\n information will be lost.\n\n Args:\n prune_read_op(bool): remove the read ops that are added by py_reader\n for cpp inference library\n\n Notes: This API is a very low level API. Use\n :code:`Program.clone(for_test=True)` instead.\n\n Returns:\n Program: The new program.\n \"\"\"\n res = Program()\n res.desc = core.ProgramDesc(self.desc)\n\n # remove all readers and the read_op if exist\n read_op_idx = 0\n root_block = res.desc.block(0)\n if prune_read_op:\n while True:\n if read_op_idx >= root_block.op_size() or root_block.op(\n read_op_idx).type() == 'read':\n break\n read_op_idx += 1\n if read_op_idx < root_block.op_size():\n root_block._remove_op(0, read_op_idx + 1)\n for var in root_block.all_vars():\n if var.type() == core.VarDesc.VarType.READER:\n root_block._remove_var(cpt.to_bytes(var.name()))\n\n # change all `is_test` attributes to True\n for i in six.moves.range(res.desc.num_blocks()):\n block = res.desc.block(i)\n for j in six.moves.range(block.op_size()):\n op = block.op(j)\n if op.has_attr('is_test'):\n op._set_attr('is_test', True)\n res.blocks = [\n Block(res, i) for i in six.moves.range(res.desc.num_blocks())\n ]\n res._sync_with_cpp()\n return res\n\n @staticmethod\n def parse_from_string(binary_str):\n \"\"\"\n Deserialize a program desc from protobuf binary string.\n\n Notes: All information about parameters will be lost after serialization\n and deserialization.\n\n Args:\n binary_str_type(str): The binary prootbuf string.\n\n Returns:\n Program: A deserialized program desc.\n \"\"\"\n p = Program()\n p.desc = core.ProgramDesc(binary_str)\n p.blocks = [Block(p, i) for i in six.moves.range(p.desc.num_blocks())]\n p._sync_with_cpp()\n return p\n\n @staticmethod\n def _construct_from_desc(desc):\n \"\"\"\n Construct a program from program desc.\n\n Args:\n desc(core.ProgramDesc): The program desc for constructing.\n\n Returns:\n Program: A program.\n \"\"\"\n p = Program()\n p.desc = desc\n p.blocks = [Block(p, i) for i in six.moves.range(p.desc.num_blocks())]\n p._sync_with_cpp()\n return p\n\n @property\n def random_seed(self):\n \"\"\"\n The default random seed for random operators in Program. Zero means get\n the random seed from random device.\n\n Notes: It must be set before the operators have been added.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n prog = fluid.default_main_program()\n random_seed = prog.random_seed\n print(random_seed)\n prog.random_seed = 1\n print(prog.random_seed)\n \"\"\"\n return self._seed\n\n @property\n def num_blocks(self):\n \"\"\"\n The number of blocks in this program.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n prog = fluid.default_main_program()\n num_blocks = prog.num_blocks\n print(num_blocks)\n \"\"\"\n return self.desc.num_blocks()\n\n @random_seed.setter\n def random_seed(self, seed):\n if not isinstance(seed, int):\n raise ValueError(\"Seed must be a integer.\")\n self._seed = seed\n\n def __repr__(self):\n return self.__str__()\n\n def global_block(self):\n \"\"\"\n Get the first block of this program.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n prog = fluid.default_main_program()\n gb_block = prog.global_block()\n print(gb_block)\n \"\"\"\n return self.blocks[0]\n\n def block(self, index):\n \"\"\"\n Get the :code:`index` block of this program\n Args:\n index(int): The index of block to get\n\n Returns:\n Block: The :code:`index` block\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n prog = fluid.default_main_program()\n block_0 = prog.block(0)\n print(block_0)\n \"\"\"\n return self.blocks[index]\n\n def current_block(self):\n \"\"\"\n Get the current block. The :code:`current` block is the block to append\n operators.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n prog = fluid.default_main_program()\n current_blk = prog.current_block()\n print(current_blk)\n \"\"\"\n return self.blocks[self.current_block_idx]\n\n def _create_block(self, parent_idx=None):\n \"\"\"\n Create a new block with the :code:`parent_idx` and change the current block\n to new block.\n\n Args:\n parent_idx(int): The parent block index.\n\n Returns:\n Block: The new block.\n \"\"\"\n new_block_idx = len(self.blocks)\n parent = self.current_block() if parent_idx is None else self.block(\n parent_idx)\n self.desc.append_block(parent.desc)\n self.current_block_idx = new_block_idx\n self.blocks.append(Block(self, self.current_block_idx))\n return self.current_block()\n\n def _rollback(self):\n \"\"\"\n Exit a code block, i.e., roll back to the parent block.\n Returns:\n None\n \"\"\"\n self.current_block_idx = self.current_block().parent_idx\n\n def _sync_with_cpp(self):\n \"\"\"\n Synchronize Python instance to its binding C++ object instance.\n If the program is modified in C++ space, this method should be invoked.\n\n Notes: This is a very low level API. Users should not invoke it\n directly.\n\n Returns:\n None\n \"\"\"\n for block_idx in range(len(self.blocks), self.desc.num_blocks()):\n self.blocks.append(Block(self, block_idx))\n for block in self.blocks:\n block._sync_with_cpp()\n\n def _copy_param_info_from(self, other):\n \"\"\"\n Copy the information of parameters from other program.\n\n Notes: This is a very low level API. Users should not invoke it\n directly.\n\n Args:\n other(Program): Other program\n\n Returns:\n None\n \"\"\"\n if not isinstance(other, Program):\n raise TypeError(\"_copy_param_info_from should be invoked with \"\n \"Program\")\n\n if len(self.blocks) != len(other.blocks):\n raise ValueError(\"_copy_param_info_from should be invoked with two \"\n \"program, with represent the same topology\")\n self.global_block()._copy_param_info_from(other.global_block())\n\n def _copy_dist_param_info_from(self, other):\n \"\"\"\n Copy the information of distributed information from other program.\n\n Args:\n other(Program): Other program\n\n Returns:\n None\n \"\"\"\n if not isinstance(other, Program):\n raise TypeError(\"_copy_dist_param_info_from should be invoked with \"\n \"Program\")\n self._is_distributed = other._is_distributed\n self._is_chief = other._is_chief\n self._parameters_on_pservers = other._parameters_on_pservers\n self._endpoints = other._endpoints\n self._ps_endpoint = other._ps_endpoint\n self._distributed_lookup_table = other._distributed_lookup_table\n\n def _copy_data_info_from(self, other):\n \"\"\"\n Copy the information of data variables from other program.\n\n Notes: This is a very low level API. Users should not invoke it\n directly.\n\n Args:\n other(Program): Other program\n\n Returns:\n None\n \"\"\"\n if not isinstance(other, Program):\n raise TypeError(\"_copy_param_info_from should be invoked with \"\n \"Program\")\n\n if len(self.blocks) != len(other.blocks):\n raise ValueError(\"_copy_param_info_from should be invoked with two \"\n \"program, with represent the same topology\")\n for var in list(other.global_block().vars.values()):\n if var.is_data:\n self.global_block().var(var.name).is_data = True\n\n def list_vars(self):\n \"\"\"\n Get all variables from this Program. A iterable object is returned.\n\n Returns:\n iterable: The generator will yield every variable in this program.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n prog = fluid.default_main_program()\n img = fluid.layers.data(name='img', shape=[1,28,28], dtype='float32')\n label = fluid.layers.data(name='label', shape=[128,1], dtype='int64')\n for var in prog.list_vars():\n print(var)\n \"\"\"\n for each_block in self.blocks:\n for each_var in list(each_block.vars.values()):\n yield each_var\n\n\nclass Parameter(Variable):\n \"\"\"\n Parameter is derived from Variable. A parameter is a persistable\n Variable, and will be updated by optimizers after each iteration.\n The training of a neural network is essentially the updating of\n its parameters.\n\n Relative to a general Variable, a Parameter has several its own\n member variables:\n\n Args:\n trainable(bool): True if the parameter need to be updated after\n iterations.\n optimize_attr(map): Parameter attributes related with optimizing.\n Currently, it only contains 'learning_rate'.\n Default: {'learning_rate': 1.0}\n regularizer(WeightDecayRegularizer): The Regularizer which will\n be applied on the parameter. Default: None\n gradient_clip_attr(BaseGradientClipAttr): The gradint clip strategy\n which will be applied on the parameter. Default: None\n do_model_average(bool): True if the model average strategy will\n be applied on this parameter.\n \"\"\"\n\n def __init__(self, block, shape, dtype, **kwargs):\n if shape is None or dtype is None:\n raise ValueError(\"Parameter must set shape and dtype\")\n if len(shape) == 0:\n raise ValueError(\"Parameter shape cannot be empty\")\n\n for each in shape:\n if each < 0:\n raise ValueError(\"Parameter shape should not be related with \"\n \"batch-size\")\n\n Variable.__init__(\n self, block, persistable=True, shape=shape, dtype=dtype, **kwargs)\n self.trainable = kwargs.get('trainable', True)\n\n self.optimize_attr = kwargs.get('optimize_attr', {'learning_rate': 1.0})\n\n self.regularizer = kwargs.get('regularizer', None)\n\n self.gradient_clip_attr = kwargs.get('gradient_clip_attr', None)\n\n self.do_model_average = kwargs.get('do_model_average', None)\n\n def __str__(self):\n return self.to_string(True)\n\n def to_string(self, throw_on_error, with_details=False):\n \"\"\"\n To debug string.\n\n Args:\n throw_on_error(bool): raise exception when self is not initialized\n when throw_on_error is True\n with_details(bool): more details about variables and parameters\n (e.g. trainable, optimize_attr, ...) will be printed when with_details is True\n\n Returns(str): The debug string.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n prog = fluid.default_main_program()\n rlt = fluid.layers.data(\"fake_data\", shape=[1,1], dtype='float32')\n debug_str = prog.to_string(throw_on_error=True, with_details=False)\n print(debug_str)\n \"\"\"\n assert isinstance(throw_on_error, bool) and isinstance(with_details,\n bool)\n if with_details:\n res_str = Variable.to_string(self, throw_on_error, True)\n additional_attr = (\"trainable\", \"optimize_attr\", \"regularizer\",\n \"gradient_clip_attr\", \"do_model_average\")\n for attr_name in additional_attr:\n res_str += \"%s: %s\\n\" % (\n attr_name, six.binary_type(getattr(self, attr_name)))\n else:\n res_str = Variable.to_string(self, throw_on_error, False)\n return res_str\n\n __repr__ = __str__\n\n\n# program is a global instance.\n_main_program_ = Program()\n_startup_program_ = Program()\n\n\ndef default_startup_program():\n \"\"\"\n Get default/global startup program.\n\n The layer function in :code:`fluid.layers` will create parameters, readers,\n NCCL handles as global variables. The :code:`startup_program` will\n initialize them by the operators in startup program. The layer function will\n append these initialization operators into startup program.\n\n This method will return the :code:`default` or the :code:`current` startup\n program. Users can use :code:`fluid.program_guard` to switch program.\n\n Returns:\n Program: startup program\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n main_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.program_guard(main_program=main_program, startup_program=startup_program):\n x = fluid.layers.data(name=\"x\", shape=[-1, 784], dtype='float32')\n y = fluid.layers.data(name=\"y\", shape=[-1, 1], dtype='int32')\n z = fluid.layers.fc(name=\"fc\", input=x, size=10, act=\"relu\")\n\n print(\"main program is: {}\".format(fluid.default_main_program()))\n print(\"start up program is: {}\".format(fluid.default_startup_program()))\n \"\"\"\n return _startup_program_\n\n\ndef default_main_program():\n \"\"\"\n Get default/global main program. The main program is used for training or\n testing.\n\n All layer function in :code:`fluid.layers` will append operators and\n variables to the :code:`default_main_program`.\n\n The :code:`default_main_program` is the default program in a lot of APIs.\n For example, the :code:`Executor.run()` will execute the\n :code:`default_main_program` when the program is not specified.\n\n Returns:\n Program: main program\n \"\"\"\n return _main_program_\n\n\ndef switch_main_program(program):\n \"\"\"\n Switch the main program to a new program.\n\n Args:\n program(Program): The new main program\n\n Returns:\n Program: The previous main program\n \"\"\"\n global _main_program_\n prev_program = _main_program_\n _main_program_ = program\n return prev_program\n\n\ndef switch_startup_program(program):\n \"\"\"\n Switch the startup program to a new program\n Args:\n program(Program): The new startup program\n\n Returns:\n Program: The previous startup program\n \"\"\"\n global _startup_program_\n prev_program = _startup_program_\n _startup_program_ = program\n return prev_program\n\n\n@signature_safe_contextmanager\ndef program_guard(main_program, startup_program=None):\n \"\"\"\n Change the global main program and startup program with `with` statement.\n Layer functions in the Python `with` block will append operators and\n variables to the new main programs.\n\n Examples:\n .. code-block:: python\n \n import paddle.fluid as fluid\n\n main_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.program_guard(main_program, startup_program):\n data = fluid.layers.data(name='image', shape=[784, 784], dtype='float32')\n hidden = fluid.layers.fc(input=data, size=10, act='relu')\n\n Notes: The temporary :code:`Program` can be used if the user does not need\n to construct either of startup program or main program.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n main_program = fluid.Program()\n # does not care about startup program. Just pass a temporary value.\n with fluid.program_guard(main_program, fluid.Program()):\n data = fluid.layers.data(name='image', shape=[784, 784], dtype='float32')\n\n Args:\n main_program(Program): New main program inside `with` statement.\n startup_program(Program): New startup program inside `with` statement.\n None means do not change startup program.\n \"\"\"\n if not isinstance(main_program, Program):\n raise TypeError(\"main_program should be Program\")\n main_program = switch_main_program(main_program)\n if startup_program is not None:\n if not isinstance(startup_program, Program):\n raise TypeError(\"startup_program should be Program\")\n startup_program = switch_startup_program(startup_program)\n yield\n switch_main_program(main_program)\n if startup_program is not None:\n switch_startup_program(startup_program)\n\n\ndef _get_var(name, program=None):\n \"\"\"\n Get a variable by name from the global block of a program.\n\n Args:\n name(str): name of the variable\n program(Program|None): program object.\n If None, default_global_program() will be used.\n\n Returns:\n Variable\n \"\"\"\n if program is None:\n program = default_main_program()\n assert isinstance(name, str)\n assert isinstance(program, Program)\n\n return program.global_block().var(name)\n\n\n@signature_safe_contextmanager\ndef _dygraph_guard(tracer):\n global _dygraph_tracer_\n tmp_trace = _dygraph_tracer_\n _dygraph_tracer_ = tracer\n\n yield\n\n _dygraph_tracer_ = tmp_trace\n\n\n@signature_safe_contextmanager\ndef _dygraph_place_guard(place):\n global _dygraph_current_expected_place_\n tmp_place = _dygraph_current_expected_place_\n _dygraph_current_expected_place_ = place\n\n yield\n\n _dygraph_current_expected_place_ = tmp_place\n" ]
[ [ "numpy.dtype" ] ]
mirekphd/Vertica-ML-Python
[ "1660471dee371808eb88cf38bbd59cb619741322" ]
[ "vertica_ml_python/rvd.py" ]
[ "# (c) Copyright [2018] Micro Focus or one of its affiliates. \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# You may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n############################################################################################################ \n# __ __ ___ ____ ______ ____ __ ____ ___ ___ _ ____ __ __ ______ __ __ ___ ____ #\n# | | | / _| \\| | | / ]/ | | | | | | \\| | | | | |/ \\| \\ #\n# | | |/ [_| D | || | / /| o | | _ _ | | | o | | | | | | | _ | #\n# | | | _| /|_| |_|| |/ / | | | \\_/ | |___ | _/| ~ |_| |_| _ | O | | | #\n# | : | [_| \\ | | | / \\_| _ | | | | | | | |___, | | | | | | | | | #\n# \\ /| | . \\ | | | \\ | | | | | | | | | | | | | | | | | | | #\n# \\_/ |_____|__|\\_| |__| |____\\____|__|__| |___|___|_____| |__| |____/ |__| |__|__|\\___/|__|__| #\n# #\n############################################################################################################\n# Vertica-ML-Python allows user to create RVD (Resilient Vertica Dataset). #\n# RVD simplifies data exploration, data cleaning and machine learning in Vertica. #\n# It is an object which keeps in it all the actions that the user wants to achieve # \n# and execute them when they are needed. #\n#####################################################################################\n# #\n# Author: Badr Ouali #\n# #\n######################\n\n# Libraries\nimport numpy as np\nimport os\nimport math\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport shutil\nimport time\nimport matplotlib.colors as colors\nfrom random import shuffle\nfrom vertica_ml_python.rvc import RVC\nfrom vertica_ml_python.fun import print_table\nfrom vertica_ml_python.fun import isnotebook\nfrom vertica_ml_python.fun import run_query\nfrom vertica_ml_python.fun import column_matrix\n\nimport pandas as pd\n\n# Drop Table if it exists\ndef drop_table(input_relation,cursor,print_info=True):\n\tcursor.execute(\"select 1;\")\n\ttry:\n\t\tquery=\"drop table {};\".format(input_relation)\n\t\tcursor.execute(query)\n\t\tif (print_info):\n\t\t\tprint(\"The table {} was successfully dropped.\".format(input_relation))\n\texcept:\n\t\tprint(\"/!\\\\ Warning: The table {} doesn't exist !\".format(input_relation))\n# Drop View if it exists\ndef drop_view(view_name,cursor,print_info=True):\n\tcursor.execute(\"select 1;\")\n\ttry:\n\t\tquery=\"drop view {};\".format(view_name)\n\t\tcursor.execute(query)\n\t\tif (print_info):\n\t\t\tprint(\"The view {} was successfully dropped.\".format(view_name))\n\texcept:\n\t\tprint(\"/!\\\\ Warning: The view {} doesn't exist !\".format(view_name))\n# Create a RVD from a csv file (= Vertica CSV parser using Flex Tables)\ndef read_csv(path,cursor,local=True,input_relation=None,delimiter=',',columns=None,types=None,\n\t\t\tnull='',enclosed_by='\"',escape='\\\\',skip=1,temporary=False,skip_all=False,\n\t\t\tsplit=False,split_name='vpython_split'):\n\tif (not(isinstance(skip,int)) or (skip<0)):\n\t\traise TypeError(\"The parameter 'skip' must be a positive integer\")\n\tif (not(isinstance(temporary,bool))):\n\t\traise TypeError(\"The parameter 'temporary' must be a bool\")\n\tif (not(isinstance(skip_all,bool))):\n\t\traise TypeError(\"The parameter 'skip_all' must be a bool\")\n\tif (not(isinstance(local,bool))):\n\t\traise TypeError(\"The parameter 'local' must be a bool\")\n\tif (not(isinstance(split_name,str))):\n\t\traise TypeError(\"The parameter 'split_name' must be a varchar\")\n\tif (not(isinstance(escape,str))):\n\t\traise TypeError(\"The parameter 'escape' must be a varchar\")\n\tif (not(isinstance(enclosed_by,str))):\n\t\traise TypeError(\"The parameter 'enclosed_by' must be a varchar\")\n\tif (not(isinstance(null,str))):\n\t\traise TypeError(\"The parameter 'null' must be a varchar\")\n\tif (not(isinstance(delimiter,str))):\n\t\traise TypeError(\"The parameter 'delimiter' must be a varchar\")\n\tif (not(isinstance(path,str))):\n\t\traise TypeError(\"The parameter 'path' must be a varchar\")\n\tif (local):\n\t\tlocal=\" local \"\n\telse:\n\t\tlocal=\"\"\n\tif (type(input_relation)!=str):\n\t\t\tinput_relation=path.split(\"/\")[-1].split(\".csv\")[0]\n\tif (temporary):\n\t\ttemporary=\"temporary\"\n\telse:\n\t\ttemporary=\"\"\n\tschema_input_relation=input_relation.split(\".\")\n\tif (len(schema_input_relation)==1):\n\t\tschema=None\n\telse:\n\t\tinput_relation=schema_input_relation[1]\n\t\tschema=schema_input_relation[0]\n\tquery=\"select column_name from columns where table_name='{}'\".format(input_relation)\n\tif (schema!=None):\n\t\tquery+=\" and table_schema='{}'\".format(schema)\n\tcursor.execute(query)\n\tquery_result=cursor.fetchall()\n\tif (query_result!=[]):\n\t\tprint(\"/!\\\\ Warning: The table {} already exists !\".format(input_relation))\n\t\treturn\n\telse:\n\t\tif (columns==None):\n\t\t\tflex_name=\"_vpython\"+str(np.random.randint(10000000))+\"_flex_\"\n\t\t\tquery=\"drop table if exists \"+flex_name\n\t\t\tcursor.execute(query)\n\t\t\tquery=\"create flex table if not exists \"+flex_name+\"()\"\n\t\t\tcursor.execute(query)\n\t\t\tquery=\"copy \"+flex_name+\" from\"+local+\"'{}' parser fcsvparser(delimiter='{}',\"\n\t\t\tquery+=\"enclosed_by='{}',escape='{}') null '{}'\"\n\t\t\tquery=query.format(path,delimiter,enclosed_by,escape,null)\n\t\t\tcursor.execute(query)\n\t\t\tquery=\"select compute_flextable_keys('\"+flex_name+\"');\"\n\t\t\tcursor.execute(query)\n\t\t\tquery=\"select key_name,data_type_guess from \"+flex_name+\"_keys\"\n\t\t\tcursor.execute(query)\n\t\t\tquery_result=cursor.fetchall()\n\t\t\tcolumns=[]\n\t\t\tfor column in query_result:\n\t\t\t\tcolumns+=[[item for item in column]]\n\t\t\tprint(\"The parser guess the following columns and types:\")\n\t\t\tfor column,column_type in columns:\n\t\t\t\tprint(column+\": \"+column_type)\n\t\t\tprint(\"Illegal characters in the columns names will be erased.\")\n\t\t\tif not(skip_all):\n\t\t\t\tnext=False\n\t\t\t\twhile not(next):\n\t\t\t\t\tprint(\"Is any type wrong?\\nIf one of the types is not correct, it will be considered as Varchar(100).\")\n\t\t\t\t\tprint(\"0 - There is one type that I want to modify.\")\n\t\t\t\t\tprint(\"1 - I wish to continue.\")\n\t\t\t\t\tprint(\"2 - I wish to see the columns and their types again.\")\n\t\t\t\t\tnext=input()\n\t\t\t\t\tif (next==0 or next=='0'):\n\t\t\t\t\t\tprint(\"please write ['column_name','column_type'] to modify the type of the corresponding column.\")\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tcolumn_name,column_type=eval(input())\n\t\t\t\t\t\t\tfor column in columns:\n\t\t\t\t\t\t\t\tif (column[0]==column_name):\n\t\t\t\t\t\t\t\t\tcolumn[1]=column_type\n\t\t\t\t\t\t\t\t\tprint(\"type of \"+column_name+\" has been successfully changed.\")\n\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tprint(\"Failed to change type. Try again.\")\n\t\t\t\t\t\tnext=False\n\t\t\t\t\telif (next==2 or next=='2'):\n\t\t\t\t\t\tfor column,column_type in columns:\n\t\t\t\t\t\t\tprint(column+\": \"+column_type)\n\t\t\t\t\t\tnext=False\n\t\t\t\t\telif (next!=1 and next!='1'):\n\t\t\t\t\t\tprint(\"Please enter a value between 0 and 2.\")\n\t\t\t\t\t\tnext=False\n\t\t\tfor column in columns:\n\t\t\t\ttry:\n\t\t\t\t\tif (column[1]==\"Interval\"):\n\t\t\t\t\t\tcolumn[1]=\"Varchar(100)\"\n\t\t\t\t\t\tprint(\"/!\\\\ Warning: Type of {} was changed to Varchar(100) [Interval type is not supported]\".format(column[0]))\n\t\t\t\t\telif (\"Varchar\" not in column[1]):\n\t\t\t\t\t\tquery='select (case when \"'+column[0]+'\"=\\''+null+'\\' then null else \"'+column[0]+'\" end)::'+column[1]+' as \"'+column[0]+'\"'\n\t\t\t\t\t\tquery+=\" from \"+flex_name+\" where \"+column[0]+\" is not null limit 1000\"\n\t\t\t\t\t\tcursor.execute(query)\n\t\t\t\texcept:\n\t\t\t\t\tprint(\"/!\\\\ Warning: Type of {} was changed to Varchar(100)\".format(column[0]))\n\t\t\t\t\tcolumn[1]=\"Varchar(100)\"\n\t\t\tcolumns=['(case when \"'+item[0]+'\"=\\''+null+'\\' then null else \"'+item[0]+'\" end)::'+item[1]+' as \"'+\n\t\t\t\t\titem[0].replace('.','').replace('-','').replace('+','').replace('=','').replace('*','')\n\t\t\t\t\t+'\"' for item in columns]\n\t\t\tif (split):\n\t\t\t\tcolumns+=['random() as '+split_name]\n\t\t\tquery=(\"create {} table {} as select \".format(temporary,input_relation)+\",\".join(columns)+\n\t\t\t\t\t\" from \"+flex_name)\n\t\t\tquery=query.format(input_relation)\n\t\t\tcursor.execute(query)\n\t\t\tquery=\"drop table \"+flex_name\n\t\t\tcursor.execute(query)\n\t\telse:\n\t\t\tif (type(columns)!=list) or (type(types)!=list) or (len(types)!=len(columns)):\n\t\t\t\traise TypeError(\"The parameters 'types' and 'columns' must be two lists having the same size\")\n\t\t\tquery=\"create table {}(\".format(input_relation)\n\t\t\ttry:\n\t\t\t\tfor i in range(len(columns)):\n\t\t\t\t\tquery+=columns[i]+\" \"+types[i]+\", \"\n\t\t\t\tif (split):\n\t\t\t\t\tquery+=\" \"+split_name+\" float default random()\"+\");\"\n\t\t\t\telse:\n\t\t\t\t\tquery=query[0:-2]\n\t\t\t\t\tquery+=\");\"\n\t\t\texcept:\n\t\t\t\traise TypeError(\"The parameters 'types' and 'columns' must be two lists containing only varchars\")\n\t\t\tcursor.execute(query)\n\t\t\tquery=\"copy {}({}) from {} '{}' delimiter '{}' null '{}' enclosed by '{}' escape as '{}' skip {};\".format(\n\t\t\t\tinput_relation,\", \".join(columns),local,path,delimiter,null,enclosed_by,escape,skip)\n\t\t\tcursor.execute(query)\n\t\tprint(\"The table {} has been successfully created.\".format(input_relation))\n\t\treturn RVD(input_relation,cursor)\n#\n############################\n# _______ _______ #\n# | __ \\ \\ / / __ \\ #\n# | |__) \\ \\ / /| | | | #\n# | _ / \\ \\/ / | | | | #\n# | | \\ \\ \\ / | |__| | #\n# |_| \\_\\ \\/ |_____/ #\n# #\n#############################\n# #\n# Resilient Vertica Dataset #\n# #\n#############################\n#\n##\nclass RVD:\n\t###################\n\t# #\n\t# Special Methods #\n\t# #\n\t###################\n\t#\n\t# Initialization\n\t#\n\t# RVD has 7 main attributes: input_relation, cursor, dsn, columns, where, offset and limit\n\t# It has also 7 other attributes to simplify the code and to have easy interaction with\n\t# sql and matplotlib. \n\tdef __init__(self,input_relation,cursor=None,dsn=None,columns=None,with_quote=False):\n\t\tif ((isinstance(cursor,type(None))) and (isinstance(dsn,type(None)))):\n\t\t\traise Exception(\"At least one of the two parameters (dsn or cursor) must receive an input for the RVD creation\")\n\t\tif ((isinstance(cursor,type(None))) and not(isinstance(dsn,str))):\n\t\t\traise Exception(\"If the cursor is not informed, the dsn must be a varchar corresponding to a Vertica DSN\")\n\t\telif (isinstance(dsn,str)):\n\t\t\timport pyodbc\n\t\t\tcursor=pyodbc.connect(\"DSN=\"+dsn).cursor()\n\t\t\tself.dsn=dsn\n\t\tschema_input_relation=input_relation.split(\".\")\n\t\tif (len(schema_input_relation)==1):\n\t\t\t# Name of the concerned table\n\t\t\tself.schema=\"public\"\n\t\t\tself.input_relation=input_relation\n\t\telse:\n\t\t\tself.input_relation=schema_input_relation[1]\n\t\t\tself.schema=schema_input_relation[0]\n\t\t# Cursor to the Vertica Database\n\t\tself.cursor=cursor\n\t\t# All the columns of the RVD\n\t\tif (type(columns)!=list):\n\t\t\tquery=\"select column_name from columns where table_name='{}' and table_schema='{}'\".format(self.input_relation,self.schema)\n\t\t\tcursor.execute(query)\n\t\t\tcolumns=cursor.fetchall()\n\t\t\tcolumns=[str(item) for sublist in columns for item in sublist]\n\t\t\tfor column in columns:\n\t\t\t\tif (column.find(' ')!=-1 or column.find('<')!=-1 or column.find('>')!=-1 or column.find('-')!=-1 or column.find('#')!=-1 or column.find('+')!=-1 or column.find('*')!=-1 or column.find('/')!=-1):\n\t\t\t\t\tprint(\"/!\\\\ Warning: A special char has been detected in a column name. The RVD will work with quotes.\") \n\t\t\t\t\twith_quote=True\n\t\t\t\t\tbreak\n\t\t\tif (with_quote):\n\t\t\t\tcolumns=['\"' + item + '\"' for item in columns]\n\t\tif (columns!=[]):\n\t\t\tself.columns=columns\n\t\t\tview=False\n\t\telse:\n\t\t\tview=True\n\t\tif (view):\n\t\t\tquery=\"select * from views where table_name='{}' and table_schema='{}'\".format(self.input_relation,self.schema)\n\t\t\tcursor.execute(query)\n\t\t\tcolumns=cursor.fetchall()\n\t\t\tif (columns==[]):\n\t\t\t\tprint(\"/!\\\\ Warning: No table or views '{}' found.\\nNothing was created.\".format(self.input_relation))\n\t\t\t\tdel self\n\t\t\t\treturn None\n\t\t\tname=\"_vpython\"+str(np.random.randint(10000000))+\"_tt_\"\n\t\t\tquery=\"drop table if exists \"+name\n\t\t\tcursor.execute(query)\n\t\t\tquery=\"create temporary table \"+name+\" as select * from \"+input_relation+\" limit 1000\"\n\t\t\tcursor.execute(query)\n\t\t\tquery=\"select column_name from columns where table_name='\"+name+\"'\"\n\t\t\tcursor.execute(query)\n\t\t\tcolumns=cursor.fetchall()\n\t\t\tself.columns=[str(item) for sublist in columns for item in sublist]\n\t\t\tself.input_relation=name\n\t\tfor column in self.columns:\n\t\t\tnew_rvc=RVC(column,parent=self)\n\t\t\tsetattr(self,column,new_rvc)\n\t\t\tif (with_quote):\n\t\t\t\tsetattr(self,column[1:-1],new_rvc)\n\t\t# Table Limitation\n\t\tself.limit=None \n\t\t# Table Offset\n\t\tself.offset=0\n\t\t# Rules for the cleaned data\n\t\tself.where=[]\n\t\t# Display the elapsed time during the query\n\t\tself.time_on=False\n\t\t# Display or not the sequal queries that are used during the RVD manipulation\n\t\tself.query_on=False\n\t\t# Use sqlparse to reindent the query\n\t\tself.reindent=False\n\t\t# Label Location and figure size\n\t\tself.legend_loc=(None,None,None)\n\t\tif (isnotebook()):\n\t\t\tself.figsize=(9,7)\n\t\telse:\n\t\t\tself.figsize=(7,5)\n\t\t# Figure color\n\t\trvd_colors=['dodgerblue','seagreen','indianred','gold','tan','pink','darksalmon','lightskyblue','lightgreen',\n\t\t\t\t\t'palevioletred','coral']\n\t\tall_colors=[item for item in colors.cnames]\n\t\tshuffle(all_colors)\n\t\tfor c in all_colors:\n\t\t\tif c not in rvd_colors:\n\t\t\t\trvd_colors+=[c]\n\t\tself.colors=rvd_colors\n\t\t# RVD history\n\t\tself.rvd_history=[]\n\t\tif (view):\n\t\t\tself.input_relation=input_relation\n\t\t\tquery=\"drop table if exists \"+name\n\t\t\tcursor.execute(query)\n\t# Get and Set item\n\tdef __getitem__(self,index):\n\t\treturn getattr(self,index)\n\tdef __setitem__(self,index,val):\n\t\tsetattr(self,index,val)\n\t# Object Representation\n\tdef __repr__(self,limit=30,table_info=True):\n\t\tif ((self.limit!=None) and (self.limit<limit)):\n\t\t\tis_finished=True\n\t\telse:\n\t\t\tis_finished=False\n\t\tquery=\"select * from {} limit {}\".format(self._table_transf_(),limit)\n\t\tself._display_query_(query)\n\t\tstart_time = time.time()\n\t\tself.cursor.execute(query)\n\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\tquery_result=self.cursor.fetchall()\n\t\tdata=[item for item in query_result]\n\t\tformatted_text=\"\"\n\t\tif (data!=[]):\n\t\t\tdata_columns=[[item] for item in self.columns]\n\t\t\tfor row in data:\n\t\t\t\tfor idx,val in enumerate(row):\n\t\t\t\t\tdata_columns[idx]+=[val]\n\t\t\tformatted_text+=print_table(data_columns,is_finished=is_finished,offset=max(self.offset,0))\n\t\telse:\n\t\t\tfor column in self.columns:\n\t\t\t\tformatted_text+=column+\" \"\n\t\t\tformatted_text+=\"\\n\"\n\t\tif (table_info):\n\t\t\tformatted_text+=\"Name: {}, Number of rows: {}, Number of columns: {}\".format(\n\t\t\t\tself.input_relation,self.count(),len(self.columns))\n\t\tif isnotebook():\n\t\t\tformatted_text=\"Name: {}, Number of rows: {}, Number of columns: {}\".format(\n\t\t\t\tself.input_relation,self.count(),len(self.columns))\n\t\treturn formatted_text\n\t# Object attr affectation\n\tdef __setattr__(self,attr,val):\n\t\t# input_relation\n\t\tif (attr==\"input_relation\"):\n\t\t\tif not(isinstance(val,str)):\n\t\t\t\tprint(\"/!\\\\ Warning: attribute 'input_relation' must be a string corresponding to a \"\n\t\t\t\t\t+\"table or view inside your Vertica DB.\\nYou are not allowed to manually change\"\n\t\t\t\t\t+ \"this attribute, it can destroy the RVD robustness.\\nNothing was changed.\")\n\t\t\telse:\n\t\t\t\tself.__dict__[attr]=val\n\t\t# schema\n\t\tif (attr==\"schema\"):\n\t\t\tif not(isinstance(val,(str,type(None)))):\n\t\t\t\tprint(\"/!\\\\ Warning: attribute 'schema' must be a string corresponding to a \"\n\t\t\t\t\t+\"schema inside your Vertica DB.\\nYou are not allowed to manually change\"\n\t\t\t\t\t+ \"this attribute, it can destroy the RVD robustness.\\nNothing was changed.\")\n\t\t\telse:\n\t\t\t\tself.__dict__[attr]=val\n\t\t# cursor\n\t\telif (attr==\"cursor\"):\n\t\t\ttry:\n\t\t\t\tval.execute(\"select 1;\")\n\t\t\t\tresult=int(val.fetchone()[0])\n\t\t\t\tif (result==1):\n\t\t\t\t\tself.__dict__[attr]=val\n\t\t\t\telse:\n\t\t\t\t\tprint(\"/!\\\\ Warning: attribute 'cursor' must be a cursor to a Vertica DB having \"\n\t\t\t\t\t\t+\"the fetchone and fetchall methods.\\nNothing was changed.\")\n\t\t\texcept:\n\t\t\t\tprint(\"/!\\\\ Warning: attribute 'cursor' must be a cursor to a Vertica DB. Use pyodbc or jaydebeapi for \"\n\t\t\t\t\t+\"respectively ODBC and JDBC connection using Python.\\nNothing was changed.\")\n\t\t# columns\n\t\telif (attr==\"columns\"):\n\t\t\terror=False\n\t\t\tif not(isinstance(val,list)):\n\t\t\t\terror=True\n\t\t\telse:\n\t\t\t\tfor item in val:\n\t\t\t\t\tif not(isinstance(item,str)):\n\t\t\t\t\t\terror=True\n\t\t\tif (error):\n\t\t\t\tprint(\"/!\\\\ Warning: attribute 'columns' must be the list of the different table/view columns.\"\n\t\t\t\t\t+\"\\nNothing was changed.\")\n\t\t\telse:\n\t\t\t\tself.__dict__[attr]=val\n\t\t# offset\n\t\telif (attr==\"offset\"):\n\t\t\tif (not(isinstance(val,int)) or (val<0)):\n\t\t\t\tprint(\"/!\\\\ Warning: attribute '\"+attr+\"' must be a positive integer.\\nNothing was changed.\")\n\t\t\telse:\n\t\t\t\tself.__dict__[attr]=val\n\t\t# limit\n\t\telif (attr ==\"dsn\"):\n\t\t\tif not(isinstance(val,str)):\n\t\t\t\tprint(\"/!\\\\ Warning: attribute '\"+attr+\"' must be a varchar corresponding to a Vertica DSN.\\nNothing was changed.\")\n\t\t\telse:\n\t\t\t\tself.__dict__[attr]=val\n\t\t# limit\n\t\telif (attr ==\"limit\"):\n\t\t\tif (not(isinstance(val,(int,type(None)))) or ((isinstance(val,int)) and (val<0))):\n\t\t\t\tprint(\"/!\\\\ Warning: attribute '\"+attr+\"' must be a positive integer or null (no limit).\\nNothing was changed.\")\n\t\t\telse:\n\t\t\t\tself.__dict__[attr]=val\n\t\t# where\n\t\telif (attr==\"where\"):\n\t\t\terror=False\n\t\t\tif not(isinstance(val,list)):\n\t\t\t\terror=True\n\t\t\telse:\n\t\t\t\tfor item in val:\n\t\t\t\t\tif ((type(item)!=tuple) or (len(item)!=2) or (type(item[0])!=str) or (type(item[1])!=int or item[1]<0)):\n\t\t\t\t\t\terror=True\n\t\t\tif (error):\n\t\t\t\tprint(\"/!\\\\ Warning: attribute 'where' must be a list of 2-tuple of the form\"\n\t\t\t\t\t+ \"(filter,filter_pos). Changing this attribute can destroy the RVD robustness.\"\n\t\t\t\t\t+ \"\\nNothing was changed.\")\n\t\t\telse:\n\t\t\t\tself.__dict__[attr]=val\n\t\t# time_on/query_on/reindent\n\t\telif (attr in [\"time_on\",\"query_on\",\"reindent\"]):\n\t\t\tif not(isinstance(val,bool)):\n\t\t\t\tprint(\"/!\\\\ Warning: attribute '\"+attr+\"' must be a bool.\\nNothing was changed.\")\n\t\t\telse:\n\t\t\t\tself.__dict__[attr]=val\n\t\t# legend_loc\n\t\telif (attr==\"legend_loc\"):\n\t\t\tif (not(isinstance(val,tuple)) or (len(val)!=3)):\n\t\t\t\tprint(\"/!\\\\ Warning: attribute '\"+attr+\"' must be a tuple of length 3.\\nNothing was changed.\")\n\t\t\telse:\n\t\t\t\tself.__dict__[attr]=val\n\t\t# rvd_history\n\t\telif (attr==\"rvd_history\"):\n\t\t\terror=False\n\t\t\tif not(isinstance(val,list)):\n\t\t\t\terror=True\n\t\t\telse:\n\t\t\t\tfor item in val:\n\t\t\t\t\tif (type(item)!=str):\n\t\t\t\t\t\terror=True\n\t\t\tif (error):\n\t\t\t\tprint(\"/!\\\\ Warning: attribute 'rvd_history' must be a list of varchar. \"\n\t\t\t\t\t+ \"Changing this attribute manually can destroy the RVD robustness.\"\n\t\t\t\t\t+ \"\\nNothing was changed.\")\n\t\t\telse:\n\t\t\t\tself.__dict__[attr]=val\n\t\t# colors\n\t\telif (attr==\"colors\"):\n\t\t\tif isinstance(val,str):\n\t\t\t\tval=[val]\n\t\t\tif not(isinstance(val,list)):\n\t\t\t\tprint(\"/!\\\\ Warning: attribute '\"+attr+\"' must be a list of colors.\\nNothing was changed.\")\n\t\t\telse:\n\t\t\t\tall_colors=[item for item in colors.cnames]\n\t\t\t\tcorrect_colors=[]\n\t\t\t\terror=False\n\t\t\t\tfor color in val:\n\t\t\t\t\tif color not in all_colors:\n\t\t\t\t\t\tif (color[0]!='#'):\n\t\t\t\t\t\t\tcorrect_elem=['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','A','B','C','D','E','F']\n\t\t\t\t\t\t\tfor idx in range(1,len(color)):\n\t\t\t\t\t\t\t\tif (color[idx] not in correct_elem):\n\t\t\t\t\t\t\t\t\terror=True\n\t\t\t\t\tif not(error):\n\t\t\t\t\t\tcorrect_colors+=[color]\n\t\t\t\t\telse:\n\t\t\t\t\t\terror=False\n\t\t\t\t\t\tprint(\"/!\\\\ Warning: the color '\"+color+\"' doesn't exist.\\nIt was not added to the RVD colors attribute.\")\n\t\t\t\tshuffle(all_colors)\n\t\t\t\tfor c in all_colors:\n\t\t\t\t\tif c not in correct_colors:\n\t\t\t\t\t\tcorrect_colors+=[c]\n\t\t\t\tself.__dict__[attr]=correct_colors*5\n\t\t# other attributes\n\t\telse:\n\t\t\tself.__dict__[attr]=val\n\t#\n\t########################\n\t# #\n\t# Semi Special Methods #\n\t# #\n\t########################\n\t#\n\t# These methods are used to simplify the code\n\t#\n\t# \n\t# Display the query if the attribute query_on is True\n\tdef _display_query_(self,query,title=\"\"):\n\t\tif (self.query_on):\n\t\t\ttry:\n\t\t\t\tscreen_columns=shutil.get_terminal_size().columns\n\t\t\texcept:\n\t\t\t\tscreen_rows, screen_columns = os.popen('stty size', 'r').read().split()\n\t\t\ttry:\n\t\t\t\timport sqlparse\n\t\t\t\tquery_print=sqlparse.format(query,reindent=self.reindent)\n\t\t\texcept:\n\t\t\t\tquery_print=query\n\t\t\tif (isnotebook()):\n\t\t\t\tfrom IPython.core.display import HTML,display\n\t\t\t\tdisplay(HTML(\"<h4 style='color:#444444;text-decoration:underline;'>\"+title+\"</h4>\"))\n\t\t\t\tquery_print=query_print.lower()\n\t\t\t\tsql_syntax=[\"select\",\"from\",\"where\",\"insert\",\"update\",\"delete\",\"order\",\n\t\t\t\t\t\t\t\"offset\",\"limit\",\"group\",\"not\",\"union\",\"null\"]\n\t\t\t\tsql_syntax2=[\"by\",\"top\",\"fetch\",\"next\",\"rows\",\"only\",\"distinct\",\"between\",\n\t\t\t\t\t\t\t\"in\",\"like\",\"is\",\"null\",\"having\",\"as\",\"join\",\"left\",\"right\",\n\t\t\t\t\t\t\t\"full\",\"self\",\"union\",\"any\",\"all\",\"exists\",\"into\",\"over\",\"partition\",\n\t\t\t\t\t\t\t\"and\",\"or\",\"else\",\"when\",\"end\",\"xor\"]\n\t\t\t\tsql_analytics_fun=[\"avg\",\"count\",\"cume_dist\",\"dense_rank\",\"exponential_moving_average\",\n\t\t\t\t\t\t\t\t\t\"first_value\",\"lag\",\"last_value\",\"lead\",\"max\",\"median\",\"min\",\"ntile\",\n\t\t\t\t\t\t\t\t\t\"nth_value\",\"percent_rank\",\"percentile_cont\",\"percentile_disc\",\"rank\",\n\t\t\t\t\t\t\t\t\t\"row_number\",\"stddev\",\"stddev_pop\",\"stddev_samp\",\"sum\",\"var_pop\",\"var_samp\",\n\t\t\t\t\t\t\t\t\t\"variance\",\"floor\",\"round\",\"corr\",\"decode\",\"coalesce\"]\n\t\t\t\tquery_print=query_print.replace('\\n',' <br>')\n\t\t\t\tquery_print=query_print.replace(' ',' &emsp; ')\n\t\t\t\tfor item in sql_syntax:\n\t\t\t\t\tquery_print=query_print.replace(item+\" \",' <b> '+item+' </b> ')\n\t\t\t\tfor item in sql_syntax2:\n\t\t\t\t\tquery_print=query_print.replace(\" \"+item+\" \",' <b> '+item+' </b> ')\n\t\t\t\t\tquery_print=query_print.replace(\"(\"+item+\" \",'(<b> '+item+' </b> ')\n\t\t\t\tfor item in sql_analytics_fun:\n\t\t\t\t\tquery_print=query_print.replace(item+\"(\",'<b>'+item+'</b>(')\n\t\t\t\tdisplay(HTML(query_print))\n\t\t\t\tdisplay(HTML(\"<div style='border:1px dashed black;width:100%'></div>\"))\n\t\t\telse:\n\t\t\t\tprint(\"$ \"+title+\" $\\n\")\n\t\t\t\tprint(query_print)\n\t\t\t\tprint(\"-\"*int(screen_columns)+\"\\n\")\n\t# Display the elapsed query time if the attribute time_on is True\n\tdef _display_time_(self,elapsed_time):\n\t\tif (self.time_on):\n\t\t\ttry:\n\t\t\t\tscreen_columns=shutil.get_terminal_size().columns\n\t\t\texcept:\n\t\t\t\tscreen_rows, screen_columns = os.popen('stty size', 'r').read().split()\n\t\t\tif (isnotebook()):\n\t\t\t\tfrom IPython.core.display import HTML,display\n\t\t\t\tdisplay(HTML(\"<div><b>Elapsed Time:</b> \"+str(elapsed_time)+\"</div>\"))\n\t\t\t\tdisplay(HTML(\"<div style='border:1px dashed black;width:100%'></div>\"))\n\t\t\telse:\n\t\t\t\tprint(\"Elapsed Time: \"+str(elapsed_time))\n\t\t\t\tprint(\"-\"*int(screen_columns)+\"\\n\")\n\t# Return the Label Loc Initialization\n\tdef _legend_loc_init_(self):\n\t\tif ((type(self.legend_loc)!=list) or (len(self.legend_loc)!=3)):\n\t\t\tself.legend_loc=(None,None,None)\n\t\tlegend_loc=[]\n\t\t# bbox_to_anchor\n\t\tif (type(self.legend_loc[0])!=tuple):\n\t\t\tif (isnotebook()):\n\t\t\t\tlegend_loc+=[(1.04,0.5)]\n\t\t\telse:\n\t\t\t\tlegend_loc+=[(1,1)]\n\t\telse:\n\t\t\tlegend_loc+=[self.legend_loc[0]]\n\t\t# ncol\n\t\tif ((type(self.legend_loc[1])!=int) or (self.legend_loc[1]<0)):\n\t\t\tif (isnotebook()):\n\t\t\t\tlegend_loc+=[3]\n\t\t\telse:\n\t\t\t\tlegend_loc+=[2]\n\t\telse:\n\t\t\tlegend_loc+=[self.legend_loc[1]]\n\t\t# loc\n\t\tif (type(self.legend_loc[2])!=str):\n\t\t\tif (isnotebook()):\n\t\t\t\tlegend_loc+=[\"center left\"]\n\t\t\telse:\n\t\t\t\tlegend_loc+=[\"upper right\"]\n\t\telse:\n\t\t\tlegend_loc+=[self.legend_loc[2]]\n\t\treturn (legend_loc[0],legend_loc[1],legend_loc[2])\n\t# Display the columnar matrix using appropriate colors\n\tdef _show_matrix_(self,matrix,columns_x,columns_y,n,m,vmax,vmin,cmap='PRGn',title=\"\",\n\t\t\t\t\t\tcolorbar=\"\",x_label=\"\",y_label=\"\",with_numbers=True,mround=3):\n\t\tmatrix_array=np.ndarray(shape=(n,m),dtype=float)\n\t\tfor i in range(n):\n\t\t\tfor j in range(m):\n\t\t\t\ttry:\n\t\t\t\t\tmatrix_array[i][j]=matrix[j+1][i+1]\n\t\t\t\texcept:\n\t\t\t\t\tmatrix_array[i][j]=None\n\t\tplt.figure(figsize=(self.figsize[0]+1,self.figsize[1]+1))\n\t\tplt.title(title)\n\t\tplt.imshow(matrix_array,cmap=cmap,interpolation='nearest',vmax=vmax,vmin=vmin)\n\t\tplt.colorbar().set_label(colorbar)\n\t\tplt.gca().set_xlabel(x_label)\n\t\tplt.gca().set_ylabel(y_label)\n\t\tplt.gca().set_yticks([i for i in range(0,n)])\n\t\tplt.gca().set_xticks([i for i in range(0,m)])\n\t\tplt.yticks(rotation=0)\n\t\tplt.xticks(rotation=90)\n\t\tplt.subplots_adjust(bottom=max(0.2,len(max([str(item) for item in columns_y],key=len))/90.0))\t\n\t\tplt.gca().set_xticklabels(columns_y)\n\t\tplt.gca().set_yticklabels(columns_x)\n\t\tx_positions=np.linspace(start=0,stop=m,num=m,endpoint=False)\n\t\ty_positions=np.linspace(start=0,stop=n,num=n,endpoint=False)\n\t\tif (with_numbers):\n\t\t\tfor y_index,y in enumerate(y_positions):\n\t\t\t for x_index,x in enumerate(x_positions):\n\t\t\t label=round(matrix_array[y_index,x_index],mround)\n\t\t\t plt.gca().text(x,y,label,color='black',ha='center',va='center')\n\t\tplt.show()\n\t# Return the string corresponding to the new table used by all the method: [The most important method]\n\tdef _table_transf_(self,tablesample=None):\n\t\tif (tablesample==None):\n\t\t\ttablesample=\"\"\n\t\telse:\n\t\t\ttablesample=\" tablesample({})\".format(tablesample)\n\t\t# We save all the imputation grammar in a single list in order to find the max floor\n\t\tall_imputations_grammar=[]\n\t\tfor column in self.columns:\n\t\t all_imputations_grammar+=[[item[0] for item in self[column].transformations]]\n\t\t# The max floor is the one of the column having the biggest number of transformations\n\t\tmax_len=len(max(all_imputations_grammar,key=len))\n\t\t# Complete the imputations of the columns having a len < max_len\n\t\tfor imputations in all_imputations_grammar:\n\t\t diff=max_len-len(imputations)\n\t\t if diff>0:\n\t\t imputations+=[\"{}\"]*diff\n\t\t# filtering positions\n\t\twhere_positions=[item[1] for item in self.where]\n\t\tmax_where_pos=max(where_positions+[0])\n\t\tall_where=[[] for item in range(max_where_pos+1)]\n\t\tfor i in range(0,len(self.where)):\n\t\t\tall_where[where_positions[i]]+=[self.where[i][0]]\n\t\tall_where=[\" and \".join(item) for item in all_where]\n\t\tfor i in range(len(all_where)):\n\t\t\tif (all_where[i]!=''):\n\t\t\t\tall_where[i]=\" where \"+all_where[i]\n\t\t# first floor\n\t\tfirst_values=[item[0] for item in all_imputations_grammar]\n\t\tfor i in range(0,len(first_values)):\n\t\t first_values[i]=first_values[i]+\" as \"+self.columns[i]\n\t\ttable=\"select \"+\", \".join(first_values)+\" from \"+self.schema+\".\"+self.input_relation+tablesample\n\t\t# all the other floors\n\t\tfor i in range(1,max_len):\n\t\t values=[item[i] for item in all_imputations_grammar]\n\t\t for j in range(0,len(values)):\n\t\t values[j]=values[j].replace(\"{}\",self.columns[j])+\" as \"+self.columns[j]\n\t\t table=\"select \"+\", \".join(values)+\" from (\"+table+\") t\"+str(i)\n\t\t try:\n\t\t \ttable+=all_where[i-1]\n\t\t except:\n\t\t \tpass\n\t\t# add the limit and the offset in the end of the query\n\t\tif (type(self.offset)==int) and (self.offset>0):\n\t\t\ttable+=\" offset \"+str(self.offset)\n\t\tif (type(self.limit)==int) and (self.limit>=0):\n\t\t\ttable+=\" limit \"+str(self.limit)\n\t\ttry:\n\t\t\tif (all_where[max_len-1]==\"\"):\n\t\t\t\ttable=\"(\"+table+\") new_table\"\n\t\t\telse:\n\t\t\t\ttable=\"(\"+table+\") t\"+str(max_len)\n\t\t\t\ttable+=all_where[max_len-1]\n\t\t\t\ttable=\"(select * from \"+table+\") new_table\"\n\t\texcept:\n\t\t\ttable=\"(\"+table+\") new_table\"\n\t\treturn table\n\t#\n\t###########\n\t# #\n\t# Methods #\n\t# #\n\t###########\n\t# \n\t# add a new RVC to the rvd\n\tdef to_pandas(self,limit=30,table_info=True):\n\t\tquery=\"select * from {} limit {}\".format(self._table_transf_(),limit)\n\t\tself._display_query_(query)\n\t\tstart_time = time.time()\n\t\tself.cursor.execute(query)\n\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\tcolumn_names=[column[0] for column in self.cursor.description]\n\t\tquery_result=self.cursor.fetchall()\n\t\t#print(\"The value of query_result is {}\".format(query_result))\n\t\t#print(\"----------------------------------------------------\")\n\t\tdata=[list(item) for item in query_result]\n\t\t#print(\"The value of data is {}\".format(data))\n\t\tdf=pd.DataFrame(data)\n\t\tdf.columns = column_names\n\t\treturn df\n\n\n\tdef add_feature(self,alias,imputation):\n\t\tif not(isinstance(alias,str)):\n\t\t\traise TypeError(\"The parameter 'alias' must be a varchar\")\n\t\tif not(isinstance(imputation,str)):\n\t\t\traise TypeError(\"The parameter 'imputation' must be a varchar\")\n\t\ttry:\n\t\t\tname=\"_vpython\"+str(np.random.randint(10000000))+\"_\"\n\t\t\tquery=\"drop table if exists \"+name\n\t\t\tself._display_query_(query,title=\"Drop the existing generated table\")\n\t\t\tstart_time = time.time()\n\t\t\tself.cursor.execute(query)\n\t\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\t\tquery=\"create temporary table \"+name+\" as select {} as {} from {} limit 20\".format(\n\t\t\t\timputation,alias,self.input_relation)\n\t\t\tself._display_query_(query,title=\"Create a temporary table to test if the new feature is correct\")\n\t\t\tstart_time = time.time()\n\t\t\tself.cursor.execute(query)\n\t\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\t\tquery=\"select data_type from columns where column_name='{}' and table_name='{}'\".format(\n\t\t\t\talias,name)\n\t\t\tself._display_query_(query,title=\"Catch the type of the new feature\")\n\t\t\tstart_time = time.time()\n\t\t\tself.cursor.execute(query)\n\t\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\t\tctype=self.cursor.fetchone()\n\t\t\tquery=\"drop table if exists \"+name\n\t\t\tself._display_query_(query,title=\"Drop the temporary table\")\n\t\t\tstart_time = time.time()\n\t\t\tself.cursor.execute(query)\n\t\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\t\tif not(ctype==[]):\n\t\t\t\tctype=ctype[0]\n\t\t\t\t# RVC category (date|int|float|text)\n\t\t\t\tif (ctype[0:4]==\"date\") or (ctype[0:4]==\"time\") or (ctype[0:8]==\"interval\"):\n\t\t\t\t\tcategory=\"date\"\n\t\t\t\telif ((ctype[0:3]==\"int\") or (ctype[0:4]==\"bool\")):\n\t\t\t\t\tcategory=\"int\"\n\t\t\t\telif ((ctype[0:7]==\"numeric\") or (ctype[0:5]==\"float\")):\n\t\t\t\t\tcategory=\"float\"\n\t\t\t\telse:\n\t\t\t\t\tcategory=\"text\"\n\t\t\telse:\n\t\t\t\tctype=\"undefined\"\n\t\t\t\tcategory=\"undefined\"\n\t\t\tnew_rvc=RVC(alias,parent=self,first_transformation=(imputation,ctype,category))\n\t\t\tsetattr(self,alias,new_rvc)\n\t\t\tself.columns+=[alias]\n\t\t\tprint(\"The new RVC '{}' was added to the RVD.\".format(\n\t\t\t\talias))\n\t\t\tself.rvd_history+=[\"{\"+time.strftime(\"%c\")+\"} \"+\"[Add Feature]: A new RVC '{}' was added to the RVD.\".format(\n\t\t\t\t\t\t\t\talias)]\n\t\texcept:\n\t\t\traise Exception(\"An error occurs during the creation of the new feature\")\n\t# Draw a 2D bar\n\tdef bar(self,columns,method=\"density\",of=None,max_cardinality=[6,6],h=[None,None],color=None,limit_distinct_elements=200,stacked=False):\n\t\tif (color==None):\n\t\t\tcolor=self.colors\n\t\tif not(isinstance(stacked,bool)):\n\t\t\traise TypeError(\"The parameter 'stacked' must be a bool\")\n\t\tbbox_to_anchor,ncol,loc=self._legend_loc_init_()\n\t\tif (type(columns)==str):\n\t\t\tself[columns].bar(method=method,of=of,max_cardinality=max_cardinality[0],h=h[0],\n\t\t\t\tcolor=color[0])\n\t\telif ((type(columns)==list) and (len(columns)==1)):\n\t\t\treturn self[columns[0]].bar(method=method,of=of,max_cardinality=max_cardinality[0],h=h[0],\n\t\t\t\tcolor=color[0])\n\t\telse:\n\t\t\tall_columns=self.pivot_table(columns,method=method,of=of,h=h,max_cardinality=max_cardinality,show=False,\n\t\t\t\tlimit_distinct_elements=limit_distinct_elements).data_columns\n\t\t\tplt.figure(figsize=self.figsize,facecolor='white')\n\t\t\tplt.rcParams['axes.facecolor']='#F5F5F5'\n\t\t\tn=len(all_columns)\n\t\t\tm=len(all_columns[0])\n\t\t\tn_groups=m-1\n\t\t\tindex=np.arange(n_groups)\n\t\t\tbar_width=0.5\n\t\t\tfor i in range(1,n):\n\t\t\t\tcurrent_column=all_columns[i][1:m]\n\t\t\t\tfor idx,item in enumerate(current_column):\n\t\t\t\t\ttry:\n\t\t\t\t\t\tcurrent_column[idx]=float(item)\n\t\t\t\t\texcept:\n\t\t\t\t\t\tcurrent_column[idx]=0\n\t\t\t\tcurrent_label=str(all_columns[i][0])\n\t\t\t\tif (stacked):\n\t\t\t\t\tif (i==1):\n\t\t\t\t\t\tlast_column=[0 for item in all_columns[i][1:m]]\n\t\t\t\t\telse:\n\t\t\t\t\t\tfor idx,item in enumerate(all_columns[i-1][1:m]):\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tlast_column[idx]+=float(item)\n\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\tlast_column[idx]+=0\n\t\t\t\t\tplt.barh(index,current_column,bar_width,alpha=0.86,\n\t\t\t\t\t\tcolor=color[i-1],label=current_label,left=last_column)\n\t\t\t\telse:\n\t\t\t\t\tplt.barh(index+(i-1)*bar_width/(n-1),current_column,bar_width/(n-1),alpha=0.86,\n\t\t\t\t\t\tcolor=color[i-1],label=current_label)\n\t\t\tif (stacked):\n\t\t\t\tplt.yticks(index,all_columns[0][1:m])\n\t\t\telse:\n\t\t\t\tplt.yticks(index+bar_width/2-bar_width/2/(n-1),all_columns[0][1:m])\n\t\t\tplt.subplots_adjust(left=max(0.3,len(max([str(item) for item in all_columns[0][1:m]],key=len))/140.0))\n\t\t\tplt.ylabel(columns[0])\n\t\t\tif (method==\"mean\"):\n\t\t\t\tmethod=\"avg\"\n\t\t\tif (method==\"density\"):\n\t\t\t\tplt.xlabel('Density')\n\t\t\t\tplt.title('Distribution of {} group by {}'.format(columns[0],columns[1]))\n\t\t\telif ((method in [\"avg\",\"min\",\"max\",\"sum\"]) and (of!=None)):\n\t\t\t\tplt.xlabel(\"{}({})\".format(method,of))\n\t\t\t\tplt.title('{}({}) of {} group by {}'.format(method,of,columns[0],columns[1]))\n\t\t\telse:\n\t\t\t\tplt.xlabel('Frequency')\n\t\t\t\tplt.title('Count by {} group by {}'.format(columns[0],columns[1]))\n\t\t\tplt.legend(title=columns[1],loc=loc,ncol=ncol,bbox_to_anchor=bbox_to_anchor)\n\t\t\tplt.gca().set_axisbelow(True)\n\t\t\tplt.gca().xaxis.grid()\n\t\t\tplt.show()\n\t# case columns size=2: Return the correlation between two elements of the RVD\n\t# else: Return the correlation matrix \n\tdef corr(self,columns=[],cmap=\"PRGn\",show=True):\n\t\tif not(isinstance(show,bool)):\n\t\t\traise TypeError(\"The parameter 'show' must be a bool\")\n\t\tif not(isinstance(cmap,str)):\n\t\t\traise TypeError(\"The parameter 'cmap' must be a varchar\")\n\t\tif not(isinstance(columns,(list,str))):\n\t\t\traise TypeError(\"The parameter 'columns' must be a list of different RVD columns\")\n\t\telse:\n\t\t\tif (isinstance(columns,str)):\n\t\t\t\tcolumns=[columns]\n\t\t\tfor item in columns:\n\t\t\t\tif not(item in self.columns):\n\t\t\t\t\traise Exception(\"The parameter 'columns' must be a list of different RVD columns\")\n\t\tif (type(columns)==str) or (len(columns)==1):\n\t\t\treturn 1\n\t\telif (len(columns)==2):\n\t\t\tquery=\"select round(corr({},{}),3) from {}\".format(columns[0],columns[1],self._table_transf_())\n\t\t\tself._display_query_(query,title=\"Compute the Correlation between the two variables\")\n\t\t\tstart_time = time.time()\n\t\t\tself.cursor.execute(query)\n\t\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\t\tresult=self.cursor.fetchone()[0]\n\t\t\treturn result\n\t\telif (len(columns)>=2):\n\t\t\tall_corr=[]\n\t\t\tn=len(columns)\n\t\t\tfor i in range(1,n):\n\t\t\t\tfor j in range(0,i):\n\t\t\t\t\tall_corr+=[\"round(corr(\"+columns[i]+\",\"+columns[j]+\"),3)\"]\n\t\t\tall_corr=\",\".join(all_corr)\n\t\t\tquery=\"select {} from {}\".format(all_corr,self._table_transf_())\n\t\t\tself._display_query_(query,title=\"Compute all the Correlations in a single query\")\n\t\t\tstart_time = time.time()\n\t\t\tself.cursor.execute(query)\n\t\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\t\tresult=self.cursor.fetchone()\n\t\t\tmatrix=[[1 for i in range(0,n+1)] for i in range(0,n+1)]\n\t\t\tmatrix[0]=[\"\"]+columns\n\t\t\tfor i in range(0,n+1):\n\t\t\t\tmatrix[i][0]=columns[i-1]\n\t\t\tk=0\n\t\t\tfor i in range(1,n):\n\t\t\t\tfor j in range(0,i):\n\t\t\t\t\tcurrent_corr=result[k]\n\t\t\t\t\tk+=1\n\t\t\t\t\tif (current_corr==None):\n\t\t\t\t\t\tcurrent_corr=0\n\t\t\t\t\tmatrix[i+1][j+1]=current_corr\n\t\t\t\t\tmatrix[j+1][i+1]=current_corr\n\t\t\tif (show):\n\t\t\t\twith_numbers=True\n\t\t\t\tmatrix_to_draw=matrix\n\t\t\t\tmround=3\n\t\t\t\tif (n>8) and (n<=12):\n\t\t\t\t\tmround=2\n\t\t\t\telif (n>12) and (n<19):\n\t\t\t\t\tmround=1\n\t\t\t\telif n>=19:\n\t\t\t\t\twith_numbers=False\n\t\t\t\tself._show_matrix_(matrix_to_draw,columns,columns,n,n,vmax=1,vmin=-1,cmap=cmap,\n\t\t\t\t\ttitle='Correlation Matrix of {} RVD'.format(self.input_relation),\n\t\t\t\t\tmround=mround,with_numbers=with_numbers)\n\t\t\treturn column_matrix(matrix)\n\t\telse:\n\t\t\tnumerical_bool_columns=[]\n\t\t\tfor column in self.columns:\n\t\t\t\tif (self[column].category() in [\"bool\",\"int\",\"float\"]):\n\t\t\t\t\tnumerical_bool_columns+=[column]\n\t\t\tif (len(numerical_bool_columns)==0):\n\t\t\t \traise Exception(\"No numerical columns found in the RVD.\")\n\t\t\telse:\n\t\t\t\treturn self.corr(columns=numerical_bool_columns,show=show)\n\t# case columns size=2: Return the correlation between the first element and the log of the second one\n\t# else: Return the correlation log matrix \n\tdef corr_log(self,columns=[],cmap=\"PRGn\",epsilon=1e-8,show=True):\n\t\tif not(isinstance(show,bool)):\n\t\t\traise TypeError(\"The parameter 'show' must be a bool\")\n\t\tif not(isinstance(cmap,str)):\n\t\t\traise TypeError(\"The parameter 'cmap' must be a varchar\")\n\t\tif not(isinstance(columns,(list,str))):\n\t\t\traise TypeError(\"The parameter 'columns' must be a list of different RVD columns\")\n\t\telse:\n\t\t\tif (isinstance(columns,str)):\n\t\t\t\tcolumns=[columns]\n\t\t\tfor item in columns:\n\t\t\t\tif not(item in self.columns):\n\t\t\t\t\traise Exception(\"The parameter 'columns' must be a list of different RVD columns\")\n\t\tif (type(columns)==str) or (len(columns)==1):\n\t\t\tquery=\"select round(corr({},log({}+\"+str(epsilon)+\")),3) from {}\".format(columns[0],columns[0],self._table_transf_())\n\t\t\tself._display_query_(query,title=\"Compute the Correlation between the two variables\")\n\t\t\tstart_time = time.time()\n\t\t\tself.cursor.execute(query)\n\t\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\t\tresult=self.cursor.fetchone()[0]\n\t\t\treturn result\n\t\telif (len(columns)==2):\n\t\t\tquery=\"select round(corr({},log({}+\"+str(epsilon)+\")),3) from {}\".format(columns[0],columns[1],self._table_transf_())\n\t\t\tself._display_query_(query,title=\"Compute the Correlation between the two variables\")\n\t\t\tstart_time = time.time()\n\t\t\tself.cursor.execute(query)\n\t\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\t\tresult=self.cursor.fetchone()[0]\n\t\t\treturn result\n\t\telif (len(columns)>=2):\n\t\t\tall_corr=[]\n\t\t\tn=len(columns)\n\t\t\tfor i in range(0,n):\n\t\t\t\tfor j in range(0,n):\n\t\t\t\t\tall_corr+=[\"round(corr(\"+columns[i]+\",log(\"+columns[j]+\"+\"+str(epsilon)+\")),3)\"]\n\t\t\tall_corr=\",\".join(all_corr)\n\t\t\tquery=\"select {} from {}\".format(all_corr,self._table_transf_())\n\t\t\tself._display_query_(query,title=\"Compute all the Correlations in a single query\")\n\t\t\tstart_time = time.time()\n\t\t\tself.cursor.execute(query)\n\t\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\t\tresult=self.cursor.fetchone()\n\t\t\tmatrix=[[1 for i in range(0,n+1)] for i in range(0,n+1)]\n\t\t\tmatrix[0]=[\"\"]+columns\n\t\t\tfor i in range(0,n+1):\n\t\t\t\tmatrix[i][0]=\"log(\"+columns[i-1]+\")\"\n\t\t\tfor i in range(0,n):\n\t\t\t\tfor j in range(0,n):\n\t\t\t\t\tmatrix[i+1][j+1]=result[i+n*j]\n\t\t\tif (show):\n\t\t\t\twith_numbers=True\n\t\t\t\tmatrix_to_draw=matrix\n\t\t\t\tmround=3\n\t\t\t\tif (n>8) and (n<=12):\n\t\t\t\t\tmround=2\n\t\t\t\telif (n>12) and (n<19):\n\t\t\t\t\tmround=1\n\t\t\t\telif n>=19:\n\t\t\t\t\twith_numbers=False\n\t\t\t\tself._show_matrix_(matrix_to_draw,columns,[\"log(\"+item+\")\" for item in columns],n,n,vmax=1,vmin=-1,cmap=cmap,\n\t\t\t\t\ttitle='Correlation Log Matrix of {} RVD using epsilon={}'.format(self.input_relation,epsilon),\n\t\t\t\t\tmround=mround,with_numbers=with_numbers)\n\t\t\treturn column_matrix(matrix)\n\t\telse:\n\t\t\tnumerical_bool_columns=[]\n\t\t\tfor column in self.columns:\n\t\t\t\tif (self[column].category() in [\"bool\",\"int\",\"float\"]):\n\t\t\t\t\tnumerical_bool_columns+=[column]\n\t\t\tif (len(numerical_bool_columns)==0):\n\t\t\t \traise Exception(\"No numerical columns found in the RVD.\")\n\t\t\telse:\n\t\t\t\treturn self.corr_log(columns=numerical_bool_columns,show=show)\n\t# Return the number of elements in the RVD\n\tdef count(self):\n\t\tquery=\"select count(*) from {}\".format(self._table_transf_())\n\t\tself.cursor.execute(query)\n\t\ttotal=self.cursor.fetchone()[0]\n\t\treturn total\n\t# Return the current table, we are working on\n\tdef current_table(self):\n\t\treturn self._table_transf_()\n\t# Generates descriptive statistics that summarize the Table\n\t# mode can be auto (only numerical values are printed),all (it will describe all the column one per one)\n\t# ,categorical (only categorical variables are described (cardinality<=6)) or date\n\tdef describe(self,mode=\"auto\",columns=None,include_cardinality=True):\n\t\tif not(isinstance(columns,list)):\n\t\t\tcolumns=self.columns\n\t\telse:\n\t\t\tfor column in columns:\n\t\t\t\tif column not in self.columns:\n\t\t\t\t\traise TypeError(\"RVC '\"+column+\"' doesn't exist\")\n\t\tif not(isinstance(include_cardinality,bool)):\n\t\t\traise TypeError(\"The parameter 'include_cardinality' must be a bool\")\n\t\tif (mode==\"auto\"):\n\t\t\ttry:\n\t\t\t\tif (type(include_cardinality)!=bool):\n\t\t\t\t\tinclude_cardinality_temp=True\n\t\t\t\tquery=\"select summarize_numcol(\"\n\t\t\t\tfor column in columns:\n\t\t\t\t\tif ((self[column].category()==\"float\") or (self[column].category()==\"int\")):\n\t\t\t\t\t\tif (self[column].transformations[-1][1]==\"boolean\"):\n\t\t\t\t\t\t\tquery+=column+\"::int,\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tquery+=column+\",\"\n\t\t\t\tquery=query[:-1]\n\t\t\t\tquery+=\") over () from {}\".format(self._table_transf_())\n\t\t\t\tself._display_query_(query,title=\"Compute the descriptive statistics of all the numerical columns\")\n\t\t\t\tstart_time = time.time()\n\t\t\t\tself.cursor.execute(query)\n\t\t\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\t\t\tquery_result=self.cursor.fetchall()\n\t\t\t\tdata=[item for item in query_result]\n\t\t\t\tdata_columns=[['column'],['count'],['mean'],['std'],['min'],['25%'],['50%'],['75%'],['max']]\n\t\t\t\tfor row in data:\n\t\t\t\t\tfor idx,val in enumerate(row):\n\t\t\t\t\t\tdata_columns[idx]+=[val]\n\t\t\t\tif (include_cardinality):\n\t\t\t\t\tquery=[]\n\t\t\t\t\ttry:\n\t\t\t\t\t\tfor column in data_columns[0][1:]:\n\t\t\t\t\t\t\tquery+=[\"count(distinct {})\".format(column)]\n\t\t\t\t\t\tquery=\"select \"+\",\".join(query)+\" from \"+self._table_transf_()\n\t\t\t\t\t\tself._display_query_(query,title=\"Compute the cardinalities of all the elements in a single query\")\n\t\t\t\t\t\tstart_time = time.time()\n\t\t\t\t\t\tself.cursor.execute(query)\n\t\t\t\t\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\t\t\t\t\tcardinality=self.cursor.fetchone()\n\t\t\t\t\t\tcardinality=[item for item in cardinality]\n\t\t\t\t\texcept:\n\t\t\t\t\t\tcardinality=[]\n\t\t\t\t\t\tfor column in data_columns[0][1:]:\n\t\t\t\t\t\t\tquery=\"select count(distinct {}) from {}\".format(column,self._table_transf_())\n\t\t\t\t\t\t\tself._display_query_(query,title=\"Fail: Compute one per one all the cardinalities\")\n\t\t\t\t\t\t\tstart_time = time.time()\n\t\t\t\t\t\t\tself.cursor.execute(query)\n\t\t\t\t\t\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\t\t\t\t\t\tcardinality+=[self.cursor.fetchone()[0]]\n\t\t\t\t\tdata_columns+=[['cardinality']+cardinality]\n\t\t\t\treturn column_matrix(data_columns)\n\t\t\texcept:\n\t\t\t\treturn self.describe(mode=\"all\")\n\t\telif (mode==\"all\"):\n\t\t\tfor column in columns:\n\t\t\t\tprint(self[column].describe())\n\t\t\t\tprint(\"-\"*(len(column)+len(self[column].ctype())+15))\n\t\telif (mode==\"categorical\"):\n\t\t\tfor column in columns:\n\t\t\t\tif ((self[column].cardinality() <= 6) or (self[column].category()==\"text\")):\n\t\t\t\t\tprint(self[column].describe())\n\t\t\t\t\tprint(\"-\"*(len(column)+len(self[column].ctype())+15))\n\t\telif (mode==\"date\"):\n\t\t\tfor column in columns:\n\t\t\t\tif ((self[column].category()==\"date\")):\n\t\t\t\t\tprint(self[column].describe())\n\t\t\t\t\tprint(\"-\"*(len(column)+len(self[column].ctype())+15))\n\t\telse:\n\t\t\traise TypeError(\"The parameter 'mode' must be in auto|all|categorical|date\")\n\t# Drop the RVD columns\n\tdef drop_columns(self,columns=[]):\n\t\tif not(isinstance(columns,list)):\n\t\t\tif not(isinstance(columns,str)):\n\t\t\t\traise TypeError(\"The parameter 'columns' must be a list of different RVD columns\")\n\t\t\telse:\n\t\t\t\tcolumns=[columns]\n\t\tfor column in columns:\n\t\t\tif (column in self.columns):\n\t\t\t\tself[column].drop_column()\n\t\t\telse:\n\t\t\t\tprint(\"/!\\\\ Warning: Column '{}' is not in the RVD.\".format(column))\n\t# Restart the cursor using the Vertica DSN (pyodbc must be installed)\n\tdef dsn_restart(self):\n\t\timport pyodbc\n\t\tself.cursor=pyodbc.connect(\"DSN=\"+self.dsn).cursor()\n\t# Return the RVD's columns types\n\tdef dtypes(self):\n\t\tljust_val=len(max([str(item) for item in self.columns],key=len))+2\n\t\tctypes=[]\n\t\tall_types=[]\n\t\tfor column in self.columns:\n\t\t\tall_types+=[self[column].ctype()]\n\t\tformatted_text=print_table([[\"\"]+self.columns,[\"type\"]+all_types],repeat_first_column=True,first_element=\"\")[0:-2]\n\t\tif not(isnotebook()):\n\t\t\tprint(formatted_text)\n\t\tprint(\"Name: {},Number of rows: {},Number of columns: {}\".format(self.input_relation,self.count(),len(self.columns)))\n\t# Filter the values of the RVD (adding a where clause to the RVD) following the conditions\n\tdef filter(self,conditions):\n\t\tcount=self.count()\n\t\tif (type(conditions)!=str):\n\t\t\tif (isinstance(conditions,list)):\n\t\t\t\tfor item in conditions:\n\t\t\t\t\tself.filter(item)\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tprint(\"/!\\\\ Warning: 'conditions' must be a varchar (found {}).\\nNothing was filtered.\".format(type(item)))\n\t\t\t\treturn False\n\t\tmax_pos=0\n\t\tfor column in self.columns:\n\t\t\tif (column in conditions):\n\t\t\t\tmax_pos=max(max_pos,len(self[column].transformations)-1)\n\t\tself.where+=[(conditions,max_pos)]\n\t\ttry:\n\t\t\tcount-=self.count()\n\t\texcept:\n\t\t\tdel self.where[-1]\n\t\t\tprint(\"/!\\\\ Warning: The condition '{}' is incorrect.\\nNothing was filtered.\".format(conditions))\n\t\t\treturn False\n\t\tif (count>1):\n\t\t\tprint(\"{} elements were filtered\".format(count))\n\t\t\tself.rvd_history+=[\"{\"+time.strftime(\"%c\")+\"} \"+\"[Filter]: {} elements were filtered using the filter '{}'\".format(count,conditions)]\n\t\telif (count==1):\n\t\t\tprint(\"{} element was filtered\".format(count))\n\t\t\tself.rvd_history+=[\"{\"+time.strftime(\"%c\")+\"} \"+\"[Filter]: {} element was filtered using the filter '{}'\".format(count,conditions)]\n\t\telse:\n\t\t\tdel self.where[-1]\n\t\t\tprint(\"Nothing was filtered.\")\n\t# Fully Stacked Bar\n\tdef fully_stacked_bar(self,columns,max_cardinality=[6,6],h=[None,None],color=None,limit_distinct_elements=200):\n\t\tif (color==None):\n\t\t\tcolor=self.colors\n\t\tbbox_to_anchor,ncol,loc=self._legend_loc_init_()\n\t\tif (type(columns)==str) or ((type(columns)==list) and (len(columns)==1)):\n\t\t\tprint(\"/!\\\\ Warning: Fully Stacked Bar is only available with two variables.\")\n\t\telse:\n\t\t\tall_columns=self.pivot_table(columns,method=\"density\",h=h,max_cardinality=max_cardinality,show=False,\n\t\t\t\tlimit_distinct_elements=limit_distinct_elements).data_columns\n\t\t\tplt.figure(figsize=self.figsize,facecolor='white')\n\t\t\tplt.rcParams['axes.facecolor']='#F5F5F5'\n\t\t\tn=len(all_columns)\n\t\t\tm=len(all_columns[0])\n\t\t\tn_groups=m-1\n\t\t\tindex=np.arange(n_groups)\n\t\t\tbar_width=0.5\n\t\t\ttotal=[0 for item in range(1,m)]\n\t\t\tfor i in range(1,n):\n\t\t\t\tfor j in range(1,m):\n\t\t\t\t\tif not(type(all_columns[i][j]) in [str]):\n\t\t\t\t\t\ttotal[j-1]+=float(all_columns[i][j])\n\t\t\tfor i in range(1,n):\n\t\t\t\tfor j in range(1,m):\n\t\t\t\t\tif not(type(all_columns[i][j]) in [str]):\n\t\t\t\t\t\tall_columns[i][j]=float(all_columns[i][j])/total[j-1]\n\t\t\tfor i in range(1,n):\n\t\t\t\tcurrent_column=all_columns[i][1:m]\n\t\t\t\tfor idx,item in enumerate(current_column):\n\t\t\t\t\ttry:\n\t\t\t\t\t\tcurrent_column[idx]=float(item)\n\t\t\t\t\texcept:\n\t\t\t\t\t\tcurrent_column[idx]=0\n\t\t\t\tcurrent_label=str(all_columns[i][0])\n\t\t\t\tif (i==1):\n\t\t\t\t\tlast_column=[0 for item in all_columns[i][1:m]]\n\t\t\t\telse:\n\t\t\t\t\tfor idx,item in enumerate(all_columns[i-1][1:m]):\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tlast_column[idx]+=float(item)\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tlast_column[idx]+=0\n\t\t\t\tplt.barh(index,current_column,bar_width,alpha=0.86,\n\t\t\t\t\tcolor=color[i-1],label=current_label,left=last_column)\n\t\t\tplt.yticks(index,all_columns[0][1:m])\n\t\t\tplt.subplots_adjust(left=max(0.3,len(max([str(item) for item in all_columns[0][1:m]],key=len))/140.0))\n\t\t\tplt.ylabel(columns[0])\n\t\t\tplt.xlabel('Density per category')\n\t\t\tplt.title('Distribution per category of {} group by {}'.format(columns[0],columns[1]))\n\t\t\tplt.legend(title=columns[1],loc=loc,ncol=ncol,bbox_to_anchor=bbox_to_anchor)\n\t\t\tplt.gca().set_axisbelow(True)\n\t\t\tplt.gca().xaxis.grid()\n\t\t\tplt.show()\n\t# Group by the elements\n\tdef group_by(self,columns,aggregations,order_by=None,limit=1000):\n\t\tif (not(isinstance(limit,int)) or (limit<1)):\n\t\t\traise TypeError(\"The parameter 'limit' must be a strictly positive integer\")\n\t\tif (type(columns)==str):\n\t\t\tcolumns=[columns]\n\t\telif (type(columns)!=list):\n\t\t\traise TypeError(\"The parameter 'columns' must be a list of varchar\")\n\t\tif (type(aggregations)==str):\n\t\t\taggregations=[aggregations]\n\t\telif (type(aggregations)!=list):\n\t\t\traise TypeError(\"The parameter 'aggregations' must be a list of varchar\")\n\t\tif (type(order_by)==str):\n\t\t\torder_by=\" order by \"+order_by\n\t\telif (type(order_by)==list):\n\t\t\torder_by=\" order by \"+\", \".join(order_by)\n\t\telse:\n\t\t\torder_by=\" order by \"+\", \".join(columns)\n\t\taggregations_alias=[]\n\t\tfor item in aggregations:\n\t\t\tif \"(*)\" in item:\n\t\t\t\taggregations_alias+=[item.replace('(','').replace(')','').replace('*','')]\n\t\t\telse:\n\t\t\t\taggregations_alias+=[item.replace('(','_').replace(')','').replace('*','').replace('/','_').replace('+','_').replace('-','_')]\n\t\tquery=(\"select \"+\", \".join(columns+[aggregations[i]+\" as \"+aggregations_alias[i] for i in range(len(aggregations))])+\" from \"+\n\t\t\t\tself._table_transf_()+\" group by \"+\", \".join(columns)+order_by)\n\t\treturn run_query(query,self.cursor,limit=limit)\n\t# Print the first n rows of the rvd\n\tdef head(self,n=5):\n\t\tprint(self.__repr__(limit=n))\n\t# Hexbin\n\tdef hexbin(self,columns,method=\"count\",of=None,cmap='Blues',gridsize=10,color=\"white\"):\n\t\tif (color==None):\n\t\t\tcolor=self.colors[0]\n\t\tif (not(isinstance(columns,list))):\n\t\t\traise TypeError(\"The parameter 'columns' must be a list of different RVD columns\")\n\t\tif (len(columns)!=2):\n\t\t\traise TypeError(\"The parameter 'columns' must be exactly of size 2 for drawing the hexbin\")\n\t\tif (method==\"mean\"):\n\t\t\tmethod=\"avg\"\n\t\tif ((method in [\"avg\",\"min\",\"max\",\"sum\"]) and (type(of)==str) and (of in self.columns)):\n\t\t\taggregate=\"{}({})\".format(method,of)\n\t\t\tof=[of]\n\t\t\tothers_aggregate=method\n\t\t\tif (method==\"avg\"):\n\t\t\t\treduce_C_function=np.mean\n\t\t\telif (method==\"min\"):\n\t\t\t\treduce_C_function=min\n\t\t\telif (method==\"max\"):\n\t\t\t\treduce_C_function=max\n\t\t\telif (method==\"sum\"):\n\t\t\t\treduce_C_function=sum\n\t\telse:\n\t\t\taggregate=\"count(*)\"\n\t\t\tof=[]\n\t\t\treduce_C_function=sum\n\t\tcount=self.count()\n\t\tif (method==\"density\"):\n\t\t\tover=\"/\"+str(float(count))\n\t\telse:\n\t\t\tover=\"\"\n\t\tquery=\"select {},{},{}{} from {} group by {},{}\".format(\n\t\t\t\tcolumns[0],columns[1],aggregate,over,self._table_transf_(),columns[0],columns[1])\n\t\tself._display_query_(query,title=\"Group all the elements for the Hexbin Plot\")\n\t\tstart_time = time.time()\n\t\tself.cursor.execute(query)\n\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\tquery_result=self.cursor.fetchall()\n\t\tcolumn1=[]\n\t\tcolumn2=[]\n\t\tcolumn3=[]\n\t\tfor item in query_result:\n\t\t\tif ((item[0]!=None) and (item[1]!=None) and (item[2]!=None)):\n\t\t\t\tif (reduce_C_function in [min,max,np.mean]):\n\t\t\t\t\tcolumn1+=[float(item[0])]*2\n\t\t\t\t\tcolumn2+=[float(item[1])]*2\n\t\t\t\t\tcolumn3+=[float(item[2])]*2\n\t\t\t\telse:\n\t\t\t\t\tcolumn1+=[float(item[0])]*2\n\t\t\t\t\tcolumn2+=[float(item[1])]*2\n\t\t\t\t\tcolumn3+=[float(item[2])/2]*2\n\t\tplt.figure(figsize=self.figsize,facecolor='white')\n\t\tplt.rcParams['axes.facecolor']='white'\n\t\tplt.title('Hexbin of {} vs {}'.format(columns[0],columns[1]))\n\t\tplt.ylabel(columns[1])\n\t\tplt.xlabel(columns[0])\n\t\tplt.hexbin(column1,column2,C=column3,reduce_C_function=reduce_C_function,gridsize=gridsize,color=color,cmap=cmap,mincnt=1)\n\t\tif (method==\"density\"):\n\t\t\tplt.colorbar().set_label(method)\n\t\telse:\n\t\t\tplt.colorbar().set_label(aggregate)\n\t\tplt.show()\n\t# 2D hist\n\tdef hist(self,columns,method=\"density\",of=None,max_cardinality=[6,6],h=[None,None],color=None,limit_distinct_elements=200,stacked=False):\n\t\tif (color==None):\n\t\t\tcolor=self.colors\n\t\tif not(isinstance(stacked,bool)):\n\t\t\traise TypeError(\"The parameter 'stacked' must be a bool\")\n\t\tbbox_to_anchor,ncol,loc=self._legend_loc_init_()\n\t\tif (type(columns)==str):\n\t\t\tself[columns].hist(method=method,of=of,max_cardinality=max_cardinality[0],h=h[0],\n\t\t\t\tcolor=color[0])\n\t\telif ((type(columns)==list) and (len(columns)==1)):\n\t\t\treturn self[columns[0]].hist(method=method,of=of,max_cardinality=max_cardinality[0],h=h[0],\n\t\t\t\tcolor=color[0])\n\t\telse:\n\t\t\tall_columns=self.pivot_table(columns,method=method,of=of,h=h,max_cardinality=max_cardinality,show=False,\n\t\t\t\tlimit_distinct_elements=limit_distinct_elements).data_columns\n\t\t\tplt.figure(figsize=self.figsize,facecolor='white')\n\t\t\tplt.rcParams['axes.facecolor']='#F5F5F5'\n\t\t\tn=len(all_columns)\n\t\t\tm=len(all_columns[0])\n\t\t\tn_groups=m-1\n\t\t\tindex=np.arange(n_groups)\n\t\t\tbar_width=0.5\n\t\t\tfor i in range(1,n):\n\t\t\t\tcurrent_column=all_columns[i][1:m]\n\t\t\t\tfor idx,item in enumerate(current_column):\n\t\t\t\t\ttry:\n\t\t\t\t\t\tcurrent_column[idx]=float(item)\n\t\t\t\t\texcept:\n\t\t\t\t\t\tcurrent_column[idx]=0\n\t\t\t\tcurrent_label=str(all_columns[i][0])\n\t\t\t\tif (stacked):\n\t\t\t\t\tif (i==1):\n\t\t\t\t\t\tlast_column=[0 for item in all_columns[i][1:m]]\n\t\t\t\t\telse:\n\t\t\t\t\t\tfor idx,item in enumerate(all_columns[i-1][1:m]):\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tlast_column[idx]+=float(item)\n\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\tlast_column[idx]+=0\n\t\t\t\t\tplt.bar(index,current_column,bar_width,alpha=0.86,\n\t\t\t\t\t\tcolor=color[i-1],label=current_label,bottom=last_column)\n\t\t\t\telse:\n\t\t\t\t\tplt.bar(index+(i-1)*bar_width/(n-1),current_column,bar_width/(n-1),alpha=0.86,\n\t\t\t\t\t\tcolor=color[i-1],label=current_label)\n\t\t\tif (stacked):\n\t\t\t\tplt.xticks(index,all_columns[0][1:m],rotation=90)\n\t\t\telse:\n\t\t\t\tplt.xticks(index+bar_width/2-bar_width/2/(n-1),all_columns[0][1:m],rotation=90)\n\t\t\tplt.subplots_adjust(bottom=max(0.3,len(max([str(item) for item in all_columns[0][1:m]],key=len))/140.0))\n\t\t\tplt.xlabel(columns[0])\n\t\t\tif (method==\"mean\"):\n\t\t\t\tmethod=\"avg\"\n\t\t\tif (method==\"density\"):\n\t\t\t\tplt.ylabel('Density')\n\t\t\t\tplt.title('Distribution of {} group by {}'.format(columns[0],columns[1]))\n\t\t\telif ((method in [\"avg\",\"min\",\"max\",\"sum\"]) and (of!=None)):\n\t\t\t\tplt.ylabel(\"{}({})\".format(method,of))\n\t\t\t\tplt.title('{}({}) of {} group by {}'.format(method,of,columns[0],columns[1]))\n\t\t\telse:\n\t\t\t\tplt.ylabel('Frequency')\n\t\t\t\tplt.title('Count by {} group by {}'.format(columns[0],columns[1]))\n\t\t\tplt.legend(title=columns[1],loc=loc,ncol=ncol,bbox_to_anchor=bbox_to_anchor)\n\t\t\tplt.gca().set_axisbelow(True)\n\t\t\tplt.gca().yaxis.grid()\n\t\t\tplt.show()\n\t# Resume all the modifications made on the RVD\n\tdef history(self):\n\t\tif (type(self.rvd_history)!=list or len(self.rvd_history)==0):\n\t\t\tself.rvd_history=[]\n\t\t\tprint(\"The RVD was never modified.\")\n\t\telif (len(self.rvd_history)==1):\n\t\t\tprint(\"The RVD was modified with only one action: \")\n\t\t\tprint(\" * \"+self.rvd_history[0])\n\t\telse:\n\t\t\tprint(\"The RVD was modified many times: \")\n\t\t\tfor modif in self.rvd_history:\n\t\t\t\tprint(\" * \"+modif)\n\t# Resume the number of null elements\n\tdef missing(self):\n\t\tcount=self.count()\n\t\tquery=[]\n\t\tall_count=[\"count(\"+column+\")\" for column in self.columns]\n\t\tquery=\"select \"+\",\".join(all_count)+\" from \"+self._table_transf_()\n\t\tself._display_query_(query,title=\"Compute the number of elements for each feature\")\n\t\tstart_time = time.time()\n\t\tself.cursor.execute(query)\n\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\tquery_result=self.cursor.fetchall()\n\t\tmissing_values_count=[item for item in query_result[0]]\n\t\tcolumn_and_missing=[(missing_values_count[i],self.columns[i]) for i in range(len(missing_values_count))]\n\t\tcolumn_and_missing.sort()\n\t\tmissing_values_count=[item[0] for item in column_and_missing]\n\t\tcolumns=[item[1] for item in column_and_missing]\n\t\tmissing_array=[[\"\"]+columns,[\"total\"]+[count-item for item in missing_values_count],[\"percent\"]+[round((float(count)-item)/float(count),3) for item in missing_values_count]]\n\t\treturn column_matrix(missing_array)\n\t# Multiple Histograms\n\tdef multiple_hist(self,columns,method=\"density\",of=None,h=None,color=None):\n\t\tif (color==None):\n\t\t\tcolor=self.colors\n\t\tif (not(isinstance(columns,list))):\n\t\t\traise TypeError(\"The parameter 'columns' must be a list of different RVD columns\")\n\t\tif (len(columns)>5):\n\t\t\traise Exception(\"The number of column must be <= 5 to use 'multiple_hist' method\")\n\t\telse:\n\t\t\tbbox_to_anchor,ncol,loc=self._legend_loc_init_()\n\t\t\tplt.figure(figsize=self.figsize,facecolor='white')\n\t\t\tplt.rcParams['axes.facecolor']='#F5F5F5'\n\t\t\talpha=1\n\t\t\tall_columns=[]\n\t\t\tall_h=[]\n\t\t\tif (type(h) not in [int,float]):\n\t\t\t\tfor idx,column in enumerate(columns):\n\t\t\t\t\tis_numeric=(self[column].category()==\"float\") or (self[column].category()==\"int\")\n\t\t\t\t\tif (is_numeric):\n\t\t\t\t\t\tall_h+=[self[column]._best_hist_interval_()]\n\t\t\t\th=min(all_h)\n\t\t\tfor idx,column in enumerate(columns):\n\t\t\t\tis_numeric=(self[column].category()==\"float\") or (self[column].category()==\"int\")\n\t\t\t\tif (is_numeric):\n\t\t\t\t\t[x,y,z,h,is_categorical]=self[column]._hist_(method=method,of=of,max_cardinality=1,h=h)\n\t\t\t\t\th=h/0.94\n\t\t\t\t\tplt.bar(x,y,h,color=color[idx],alpha=alpha,label=column)\n\t\t\t\t\talpha-=0.2\n\t\t\t\t\tall_columns+=[columns[idx]]\n\t\t\t\telse:\n\t\t\t\t\tprint(\"/!\\\\ Warning: {} is not numerical. Its histogram will not be draw.\")\n\t\t\tplt.xlabel(\", \".join(all_columns))\n\t\t\tplt.gca().set_axisbelow(True)\n\t\t\tplt.gca().yaxis.grid()\n\t\t\tif (method==\"density\"):\n\t\t\t\tplt.ylabel('Density')\n\t\t\telif ((method in [\"avg\",\"min\",\"max\",\"sum\"]) and (of!=None)):\n\t\t\t\tplt.ylabel(method+\"(\"+of+\")\")\n\t\t\telse:\n\t\t\t\tplt.ylabel('Frequency')\n\t\t\tplt.title(\"Multiple Histograms\")\n\t\t\tplt.legend(title=\"columns\",loc=loc,ncol=1,bbox_to_anchor=bbox_to_anchor)\n\t\t\tplt.show()\n\t# Normalize the column \n\tdef normalize(self,method=\"zscore\",with_int=False):\n\t\tif not(isinstance(with_int,bool)):\n\t\t\traise TypeError(\"The parameter 'with_int' must be a bool\")\n\t\tfor column in self.columns:\n\t\t\tif (self[column].category()==\"float\") or ((self[column].category()==\"int\") and with_int):\n\t\t\t\tself[column].normalize(method=method)\n\t# Return the Pivot Table of the RVD on the corresponding column and raw [One of the most important function for Data Exploration]\n\tdef pivot_table(self,columns,method=\"count\",of=None,h=[None,None],max_cardinality=[20,20],show=True,\n\t\t\tcmap='Blues',limit_distinct_elements=1000,with_numbers=True):\n\t\tif (not(isinstance(cmap,str))):\n\t\t\traise TypeError(\"The parameter 'cmap' must be a varchar\")\n\t\tif (not(isinstance(limit_distinct_elements,int)) or (limit_distinct_elements<1)):\n\t\t\traise TypeError(\"The parameter 'limit_distinct_elements' must be a list of two strictly positive numerical integers or null\")\n\t\tif not(isinstance(with_numbers,bool)):\n\t\t\traise TypeError(\"The parameter 'with_numbers' must be a bool\")\n\t\tif not(isinstance(show,bool)):\n\t\t\traise TypeError(\"The parameter 'show' must be a bool\")\n\t\tif not(isinstance(h,list)):\n\t\t\traise TypeError(\"The parameter 'h' must be a list of two strictly positive numerical numbers or null\")\n\t\tif not(isinstance(max_cardinality,list)):\n\t\t\traise TypeError(\"The parameter 'max_cardinality' must be a list of two strictly positive numerical integers or null\")\n\t\t# aggregation used for the bins height\n\t\tif (method==\"mean\"):\n\t\t\tmethod=\"avg\"\n\t\tif ((method in [\"avg\",\"min\",\"max\",\"sum\"]) and (type(of)==str)):\n\t\t\tif (of in self.columns):\n\t\t\t\taggregate=\"{}({})\".format(method,of)\n\t\t\t\tof=[of]\n\t\t\t\tothers_aggregate=method\n\t\t\telse:\n\t\t\t\traise Exception(\"None RVC named '\"+of+\"' found\")\n\t\telif (method in [\"density\",\"count\"]):\n\t\t\taggregate=\"count(*)\"\n\t\t\tothers_aggregate=\"sum\"\n\t\t\tof=[]\n\t\telse:\n\t\t\traise TypeError(\"The parameter 'method' must be in avg|mean|min|max|sum\")\n\t\tfor i in range(2):\n\t\t\tif (not(isinstance(max_cardinality[i],int)) or (max_cardinality[i]<1)):\n\t\t\t\traise TypeError(\"The parameter 'max_cardinality' must be a list of two strictly positive numerical integers or null\")\n\t\tfor i in range(2):\n\t\t\tif (not(isinstance(h[i],type(None)))):\n\t\t\t\tif (not(isinstance(h[i],(int,float))) or (h[i]<=0)):\n\t\t\t\t\traise TypeError(\"The parameter 'h' must be a list of two strictly positive numbers or null\")\n\t\tif ((type(columns)!=list) or (len(columns)!=2)):\n\t\t\traise TypeError(\"The parameter 'columns' must be a list of size 2 in order to plot the Pivot Table\")\n\t\tall_columns=[]\n\t\tis_column_date=[False,False]\n\t\ttimestampadd=[\"\",\"\"]\n\t\tfor idx,column in enumerate(columns):\n\t\t\tis_numeric=(self[column].category()==\"float\") or (self[column].category()==\"int\")\n\t\t\tis_date=(self[column].category()==\"date\")\n\t\t\tif (is_numeric):\n\t\t\t\tif (h[idx]==None):\n\t\t\t\t\tinterval=self[column]._best_hist_interval_()\n\t\t\t\t\tinterval=round(interval,2)\n\t\t\t\telse:\n\t\t\t\t\tinterval=h[idx]\n\t\t\t\tif (self[column].category()==\"int\"):\n\t\t\t\t\tfloor_end=\"-1\"\n\t\t\t\t\tinterval=int(max(math.floor(interval),1))\n\t\t\t\telse:\n\t\t\t\t\tfloor_end=\"\"\n\t\t\t\tif (interval>1) or (self[column].category()==\"float\"):\n\t\t\t\t\tall_columns+=[\"'[' || floor({}/{})*{} ||';'|| (floor({}/{})*{}+{}{}) || ']'\".format(\n\t\t\t\t\t\tcolumn,interval,interval,column,interval,interval,interval,floor_end)]\n\t\t\t\telse:\n\t\t\t\t\tall_columns+=[\"floor({}) || ''\".format(column)]\n\t\t\telif (is_date):\n\t\t\t\tif (h[idx]==None):\n\t\t\t\t\tinterval=self[column]._best_hist_interval_()\n\t\t\t\telse:\n\t\t\t\t\tinterval=max(math.floor(h[idx]),1)\n\t\t\t\tmin_date=self[column].min()\n\t\t\t\tall_columns+=[\"floor(datediff('second','\"+str(min_date)+\"',\"+column+\")/\"+str(interval)+\")*\"+str(interval)]\n\t\t\t\tis_column_date[idx]=True\n\t\t\t\ttimestampadd[idx]=\"timestampadd('second',\"+columns[idx]+\"::int,'\"+str(min_date)+\"'::timestamp)\"\n\t\t\telse:\n\t\t\t\tall_columns+=[column]\n\t\tif (type(of)==str or (type(of)==list and (len(of)>0) and type(of[0])==str)):\n\t\t\tif (type(of)==list):\n\t\t\t\tof=of[0]\n\t\t\tsubtable=(\"(select \"+all_columns[0]+\" as \"+columns[0]+\", \"+all_columns[1]+\" as \"+columns[1]+\", \"+of+\" as \"+of+\n\t\t\t\t\t\t\" from \"+self._table_transf_()+\") pivot_table\")\n\t\t\tif (is_column_date[0] and not(is_column_date[1])):\n\t\t\t\tsubtable=(\"(select \"+timestampadd[0]+\" as \"+columns[0]+\", \"+columns[1]+\", \"+of+\" from \"+subtable+\") pivot_table_date\")\n\t\t\telif (is_column_date[1] and not(is_column_date[0])):\n\t\t\t\tsubtable=(\"(select \"+columns[0]+\", \"+timestampadd[1]+\" as \"+columns[1]+\", \"+of+\" from \"+subtable+\") pivot_table_date\")\n\t\t\telif (is_column_date[1] and is_column_date[0]):\n\t\t\t\tsubtable=(\"(select \"+timestampadd[0]+\" as \"+columns[0]+\", \"+timestampadd[1]+\" as \"+columns[1]+\", \"+of+\" from \"+subtable+\") pivot_table_date\")\n\t\telse:\n\t\t\tsubtable=(\"(select \"+all_columns[0]+\" as \"+columns[0]+\", \"+all_columns[1]+\" as \"+columns[1]+\n\t\t\t\t\t\t\" from \"+self._table_transf_()+\") pivot_table\")\n\t\t\tif (is_column_date[0] and not(is_column_date[1])):\n\t\t\t\tsubtable=(\"(select \"+timestampadd[0]+\" as \"+columns[0]+\", \"+columns[1]+\" from \"+subtable+\") pivot_table_date\")\n\t\t\telif (is_column_date[1] and not(is_column_date[0])):\n\t\t\t\tsubtable=(\"(select \"+columns[0]+\", \"+timestampadd[1]+\" as \"+columns[1]+\" from \"+subtable+\") pivot_table_date\")\n\t\t\telif (is_column_date[1] and is_column_date[0]):\n\t\t\t\tsubtable=(\"(select \"+timestampadd[0]+\" as \"+columns[0]+\", \"+timestampadd[1]+\" as \"+columns[1]+\" from \"+subtable+\") pivot_table_date\")\n\t\tif (len(columns)==1):\n\t\t\treturn self[columns[0]].describe(method=method,of=of)\n\t\telse:\n\t\t\tis_finished=limit_distinct_elements\n\t\t\tlimit_distinct_elements=\" limit \"+str(limit_distinct_elements)\n\t\t\tif (method==\"density\"):\n\t\t\t\tover=\"/\"+str(self.count())\n\t\t\telse:\n\t\t\t\tover=\"\"\n\t\t\tquery=\"select {},{},{}{} from {} where {} is not null and {} is not null group by {},{} order by {},{} asc\"\n\t\t\tquery=query.format(columns[0],columns[1],aggregate,over,subtable,columns[0],columns[1],\n\t\t\t\t\tcolumns[0],columns[1],columns[0],columns[1])+limit_distinct_elements\n\t\t\tself._display_query_(query,title=\"Group the features to compute the pivot table\")\n\t\t\tstart_time = time.time()\n\t\t\tself.cursor.execute(query)\n\t\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\t\tquery_result=self.cursor.fetchall()\n\t\t\t# Column0 sorted categories\n\t\t\tall_column0_categories=list(set([str(item[0]) for item in query_result]))\n\t\t\tall_column0_categories.sort()\n\t\t\ttry:\n\t\t\t\ttry:\n\t\t\t\t\torder=[]\n\t\t\t\t\tfor item in all_column0_categories:\n\t\t\t\t\t\torder+=[float(item.split(\";\")[0].split('[')[1])]\n\t\t\t\texcept:\n\t\t\t\t\torder=[float(item) for item in all_column0_categories]\n\t\t\t\tall_column0_categories = [x for _,x in sorted(zip(order,all_column0_categories))]\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\t# Column1 sorted categories\n\t\t\tall_column1_categories=list(set([str(item[1]) for item in query_result])) \n\t\t\tall_column1_categories.sort()\n\t\t\ttry:\n\t\t\t\ttry:\n\t\t\t\t\torder=[]\n\t\t\t\t\tfor item in all_column1_categories:\n\t\t\t\t\t\torder+=[float(item.split(\";\")[0].split('[')[1])]\n\t\t\t\texcept:\n\t\t\t\t\torder=[float(item) for item in all_column1_categories]\n\t\t\t\tall_column1_categories = [x for _,x in sorted(zip(order,all_column1_categories))]\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\tall_columns=[['' for item in all_column0_categories] for item in all_column1_categories]\n\t\t\tis_finished=(is_finished>=len(all_column0_categories)*len(all_column1_categories))\n\t\t\tfor item in query_result:\n\t\t\t\tj=all_column0_categories.index(str(item[0]))\n\t\t\t\ti=all_column1_categories.index(str(item[1]))\n\t\t\t\tall_columns[i][j]=item[2]\n\t\t\tall_columns=[[all_column1_categories[i]]+all_columns[i] for i in range(0,len(all_columns))]\n\t\t\tall_columns=[[columns[0]+\"/\"+columns[1]]+all_column0_categories]+all_columns\n\t\t\tif (show):\n\t\t\t\tall_count=[item[2] for item in query_result]\n\t\t\t\tself._show_matrix_(all_columns,all_column0_categories,all_column1_categories,len(all_column0_categories),\n\t\t\t\t\tlen(all_column1_categories),vmax=max(all_count),vmin=min(all_count),\n\t\t\t\t\tcmap=cmap,title=\"Pivot Table of \"+columns[0]+\" vs \"+columns[1],\n\t\t\t\t\tcolorbar=aggregate,x_label=columns[1],y_label=columns[0],with_numbers=with_numbers)\n\t\t\treturn column_matrix(all_columns,first_element=columns[0]+\"/\"+columns[1])\n\t# Save the RVD by creating a view/a temporary table or a table in order to make the computations\n\t# faster or simply to apply ML algorithm on it\n\tdef save(self,name,columns=None,mode=\"view\",affect=True):\n\t\tif not(isinstance(name,str)):\n\t\t\traise TypeError(\"The parameter 'name' must be a varchar\")\n\t\tif (mode not in [\"view\",\"temporary table\",\"table\"]):\n\t\t\traise TypeError(\"The parameter mode must be in view|temporary table|table\\nNothing was saved.\")\n\t\tif (type(columns)!=list):\n\t\t\tcolumns=\"*\"\n\t\telse:\n\t\t\tfor column in columns:\n\t\t\t\tif not(column in self.columns):\n\t\t\t\t\traise Exception(\"The RVC '{}' doesn't exist\".format(column))\n\t\t\tcolumns=\",\".join(columns)\n\t\tquery=\"create {} {} as select {} from {}\".format(mode,name,columns,self._table_transf_())\n\t\tself._display_query_(query,title=\"Create a new \"+mode+\" to save the RVD\")\n\t\tstart_time = time.time()\n\t\tself.cursor.execute(query)\n\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\tself.rvd_history+=[\"{\"+time.strftime(\"%c\")+\"} \"+\"[Save]: The RVD was saved into a {} named '{}'.\".format(mode,name)]\n\t\tif (affect):\n\t\t\tquery_on=self.query_on\n\t\t\tself.query_on=False\n\t\t\thistory=self.rvd_history\n\t\t\ttime_on=self.time_on\n\t\t\tself.__init__(name,self.cursor)\n\t\t\tself.rvd_history=history\n\t\t\tself.query_on=query_on\n\t\t\tself.time_on=time_on\n\t\t\tprint(\"The RVD was successfully saved.\")\n\t# Draw the scatter plot between 2/3 columns\n\tdef scatter(self,columns,max_cardinality=3,cat_priority=None,with_others=True,color=None,marker=[\"^\",\"o\",\"+\",\"*\",\"h\",\"x\",\"D\",\"1\"]*10,max_nb_points=1000):\n\t\tif (color==None):\n\t\t\tcolor=self.colors\n\t\ttry:\n\t\t\treturn self.scatter2D(columns,max_cardinality,cat_priority,with_others,color,marker,max_nb_points)\n\t\texcept:\n\t\t\ttry:\n\t\t\t\treturn self.scatter3D(columns,max_cardinality,cat_priority,with_others,color,marker,max_nb_points)\n\t\t\texcept:\n\t\t\t\traise Exception(\"An error occured during the execution of the 'scatter' method.\\nPlease use the\"\n\t\t\t\t\t\t\t\t+\" methods 'scatter2D' or 'scatter3D' for more details.\")\n\t# Draw the scatter plot between 2 columns\n\tdef scatter2D(self,columns,max_cardinality=3,cat_priority=None,with_others=True,color=None,marker=[\"^\",\"o\",\"+\",\"*\",\"h\",\"x\",\"D\",\"1\"]*10,max_nb_points=1000):\n\t\tif (color==None):\n\t\t\tcolor=self.colors\n\t\tif (not(isinstance(max_cardinality,int)) or (max_cardinality<1)):\n\t\t\traise TypeError(\"The parameter 'max_cardinality' must be a strictly positive integer\")\n\t\tif (not(isinstance(with_others,bool))):\n\t\t\traise TypeError(\"The parameter 'with_others' must be a bool\")\n\t\tif (not(isinstance(max_nb_points,int)) or (max_nb_points<1)):\n\t\t\traise TypeError(\"The parameter 'max_nb_points' must be a strictly positive integer\")\n\t\tif (not(isinstance(cat_priority,(list,type(None))))):\n\t\t\traise TypeError(\"The parameter 'cat_priority' must be a list of categories or null\")\n\t\tbbox_to_anchor,ncol,loc=self._legend_loc_init_()\n\t\tif (type(columns)!=list):\n\t\t\traise TypeError(\"The parameter 'columns' must be a list of columns\")\n\t\tif ((len(columns)<2) or (len(columns)>3)):\n\t\t\traise Exception(\"2D Scatter plot can only be done with at least two columns and maximum with three columns\")\n\t\telse:\n\t\t\tfor column in columns:\n\t\t\t\tif (column not in self.columns):\n\t\t\t\t\traise Exception(\"The RVC '{}' doesn't exist\".format(column))\n\t\t\tif (self[columns[0]].category() not in [\"int\",\"float\"]) or (self[columns[1]].category() not in [\"int\",\"float\"]):\n\t\t\t\traise TypeError(\"The two first value of 'columns' must be numerical\")\n\t\t\tif (len(columns)==2):\n\t\t\t\ttablesample=max_nb_points/self.count()\n\t\t\t\tquery=\"select {},{} from {} where {} is not null and {} is not null limit {}\".format(columns[0],columns[1],\n\t\t\t\t\tself._table_transf_(tablesample),columns[0],columns[1],max_nb_points)\n\t\t\t\tself._display_query_(query, title=\"Select random points for the scatter plot\")\n\t\t\t\tstart_time = time.time()\n\t\t\t\tself.cursor.execute(query)\n\t\t\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\t\t\tquery_result=self.cursor.fetchall()\n\t\t\t\tcolumn1=[item[0] for item in query_result]\n\t\t\t\tcolumn2=[item[1] for item in query_result]\n\t\t\t\tplt.figure(figsize=self.figsize)\n\t\t\t\tplt.gca().grid()\n\t\t\t\tplt.gca().set_axisbelow(True)\n\t\t\t\tplt.title('Scatter Plot of {} vs {}'.format(columns[0],columns[1]))\n\t\t\t\tplt.ylabel(columns[1])\n\t\t\t\tplt.xlabel(columns[0])\n\t\t\t\tplt.scatter(column1,column2,color=color[0],s=14)\n\t\t\t\tplt.show()\n\t\t\telse:\n\t\t\t\tcolumn_groupby=columns[2]\n\t\t\t\tcount=self.count()\n\t\t\t\tif (type(cat_priority)==list):\n\t\t\t\t\tquery_result=cat_priority\n\t\t\t\telse:\n\t\t\t\t\tquery=\"select {} from {} where {} is not null group by {} order by count(*) desc limit {}\".format(\n\t\t\t\t\t\t\tcolumn_groupby,self._table_transf_(),column_groupby,column_groupby,max_cardinality)\n\t\t\t\t\tself._display_query_(query,title=\"Select all the category of the column \"+column_groupby)\n\t\t\t\t\tstart_time = time.time()\n\t\t\t\t\tself.cursor.execute(query)\n\t\t\t\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\t\t\t\tquery_result=self.cursor.fetchall()\n\t\t\t\t\tquery_result=[item for sublist in query_result for item in sublist]\n\t\t\t\tall_columns=[query_result]\n\t\t\t\tall_scatter=[]\n\t\t\t\tall_categories=query_result\n\t\t\t\tfig=plt.figure(figsize=self.figsize,facecolor=\"white\")\n\t\t\t\tax=plt\n\t\t\t\tothers=[]\n\t\t\t\tgroupby_cardinality=self[column_groupby].cardinality()\n\t\t\t\tcount=self.count()\n\t\t\t\tif (count>10000):\n\t\t\t\t\ttablesample=10\n\t\t\t\telse:\n\t\t\t\t\ttablesample=90\n\t\t\t\tfor idx,category in enumerate(all_categories):\n\t\t\t\t\tif ((max_cardinality<groupby_cardinality) or ((type(cat_priority)==list) and len(cat_priority)<groupby_cardinality)):\n\t\t\t\t\t\t\tothers+=[\"{}!='{}'\".format(column_groupby,category)]\n\t\t\t\t\tquery=\"select {},{} from {} where {}='{}' and {} is not null and {} is not null limit {}\"\n\t\t\t\t\tquery=query.format(columns[0],columns[1],self._table_transf_(tablesample),\n\t\t\t\t\t\tcolumns[2],category,columns[0],columns[1],int(max_nb_points/len(all_categories))) \n\t\t\t\t\tself._display_query_(query,title=\"Select random points for the scatter plot (category='\"+str(category)+\"')\")\n\t\t\t\t\tstart_time = time.time()\n\t\t\t\t\tself.cursor.execute(query)\n\t\t\t\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\t\t\t\tquery_result=self.cursor.fetchall()\n\t\t\t\t\tcolumn1=[float(item[0]) for item in query_result]\n\t\t\t\t\tcolumn2=[float(item[1]) for item in query_result]\n\t\t\t\t\tall_columns+=[[column1,column2]]\n\t\t\t\t\tall_scatter+=[ax.scatter(column1,column2,alpha=0.8,marker=marker[idx],color=color[idx])]\n\t\t\t\tif (len(others)>0 and with_others):\n\t\t\t\t\tall_categories+=[\"others\"]\n\t\t\t\t\tquery=(\"select {},{} from {} where {} and {} is not null and {} is not null limit {}\")\n\t\t\t\t\tquery=query.format(columns[0],columns[1],self._table_transf_(tablesample),\n\t\t\t\t\t\t\" and \".join(others),columns[0],columns[1],int(max_nb_points/len(all_categories)))\n\t\t\t\t\tself._display_query_(query,title=\"Select random points for the scatter plot (category='others')\")\n\t\t\t\t\tstart_time=time.time()\n\t\t\t\t\tself.cursor.execute(query)\n\t\t\t\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\t\t\t\tquery_result=self.cursor.fetchall()\n\t\t\t\t\tcolumn1=[float(item[0]) for item in query_result]\n\t\t\t\t\tcolumn2=[float(item[1]) for item in query_result]\n\t\t\t\t\tall_columns+=[[column1,column2]]\n\t\t\t\t\tall_scatter+=[ax.scatter(column1,column2,alpha=0.8,marker=marker[idx+1],color=color[idx+1])]\n\t\t\t\tfor idx,item in enumerate(all_categories):\n\t\t\t\t\tif (len(str(item))>10):\n\t\t\t\t\t\tall_categories[idx]=str(item)[0:10]+\"...\"\n\t\t\t\tplt.gca().grid()\n\t\t\t\tplt.gca().set_axisbelow(True)\n\t\t\t\tplt.title('Scatter Plot of {} vs {}'.format(columns[0],columns[1]))\n\t\t\t\tax.xlabel(columns[0])\n\t\t\t\tax.ylabel(columns[1])\n\t\t\t\tax.legend(all_scatter,all_categories,scatterpoints=1,loc=loc,ncol=4,\n\t\t\t\t\t\t\ttitle=column_groupby,bbox_to_anchor=bbox_to_anchor,fontsize=8)\n\t\t\t\tplt.show()\n\t# Draw the scatter plot between 3 columns\n\tdef scatter3D(self,columns,max_cardinality=3,cat_priority=None,with_others=True,color=None,marker=[\"^\",\"o\",\"+\",\"*\",\"h\",\"x\",\"D\",\"1\"]*10,max_nb_points=1000):\n\t\tif (color==None):\n\t\t\tcolor=self.colors\n\t\tif (not(isinstance(max_cardinality,int)) or (max_cardinality<1)):\n\t\t\traise TypeError(\"The parameter 'max_cardinality' must be a strictly positive integer\")\n\t\tif (not(isinstance(with_others,bool))):\n\t\t\traise TypeError(\"The parameter 'with_others' must be a bool\")\n\t\tif (not(isinstance(max_nb_points,int)) or (max_nb_points<1)):\n\t\t\traise TypeError(\"The parameter 'max_nb_points' must be a strictly positive integer\")\n\t\tif (not(isinstance(cat_priority,(list,type(None))))):\n\t\t\traise TypeError(\"The parameter 'cat_priority' must be a list of categories or null\")\n\t\tbbox_to_anchor,ncol,loc=self._legend_loc_init_()\n\t\tif (type(columns)!=list):\n\t\t\traise TypeError(\"The parameter 'columns' must be a list of columns\")\n\t\tif ((len(columns)<3) or (len(columns)>4)):\n\t\t\traise Exception(\"3D Scatter plot can only be done with at least two columns and maximum with four columns\")\n\t\telse:\n\t\t\tfor column in columns:\n\t\t\t\tif (column not in self.columns):\n\t\t\t\t\traise Exception(\"The RVC '{}' doesn't exist\".format(column))\n\t\t\tfor i in range(3):\n\t\t\t\tif (self[columns[i]].category() not in [\"int\",\"float\"]):\n\t\t\t\t\traise TypeError(\"The three first value of 'columns' must be numerical\")\n\t\t\tif (len(columns)==3):\n\t\t\t\ttablesample=max_nb_points/self.count()\n\t\t\t\tquery=\"select {},{},{} from {} where {} is not null and {} is not null and {} is not null limit {}\".format(\n\t\t\t\t\t\t\tcolumns[0],columns[1],columns[2],self._table_transf_(tablesample),columns[0],\n\t\t\t\t\t\t\tcolumns[1],columns[2],max_nb_points)\n\t\t\t\tself._display_query_(query,title=\"Select random points for the scatter plot\")\n\t\t\t\tstart_time = time.time()\n\t\t\t\tself.cursor.execute(query)\n\t\t\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\t\t\tquery_result=self.cursor.fetchall()\n\t\t\t\tcolumn1=[float(item[0]) for item in query_result]\n\t\t\t\tcolumn2=[float(item[1]) for item in query_result]\n\t\t\t\tcolumn3=[float(item[2]) for item in query_result]\n\t\t\t\tfig=plt.figure(figsize=self.figsize,facecolor='white')\n\t\t\t\tax=fig.add_subplot(111,projection='3d')\n\t\t\t\tplt.title('Scatter Plot of {} vs {} vs {}'.format(columns[0],columns[1],columns[2]))\n\t\t\t\tax.scatter(column1,column2,column3,color=color[0])\n\t\t\t\tax.set_xlabel(columns[0])\n\t\t\t\tax.set_ylabel(columns[1])\n\t\t\t\tax.set_zlabel(columns[2])\n\t\t\t\tax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))\n\t\t\t\tax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))\n\t\t\t\tax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))\n\t\t\t\tplt.show()\n\t\t\telse:\n\t\t\t\tcolumn_groupby=columns[3]\n\t\t\t\tcount=self.count()\n\t\t\t\tif (type(cat_priority)==list):\n\t\t\t\t\tquery_result=cat_priority\n\t\t\t\telse:\n\t\t\t\t\tquery=\"select {} from {} where {} is not null group by {} order by count(*) desc limit {}\".format(\n\t\t\t\t\t\t\tcolumn_groupby,self._table_transf_(),column_groupby,column_groupby,max_cardinality)\n\t\t\t\t\tself._display_query_(query,title=\"Select all the category of the column \"+column_groupby)\n\t\t\t\t\tstart_time = time.time()\n\t\t\t\t\tself.cursor.execute(query)\n\t\t\t\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\t\t\t\tquery_result=self.cursor.fetchall()\n\t\t\t\t\tquery_result=[item for sublist in query_result for item in sublist]\n\t\t\t\tall_columns=[query_result]\n\t\t\t\tall_scatter=[]\n\t\t\t\tall_categories=query_result\n\t\t\t\tfig=plt.figure(figsize=self.figsize,facecolor=\"white\")\n\t\t\t\tax=fig.add_subplot(111,projection='3d')\n\t\t\t\tothers=[]\n\t\t\t\tgroupby_cardinality=self[column_groupby].cardinality()\n\t\t\t\tif (count>10000):\n\t\t\t\t\ttablesample=10\n\t\t\t\telse:\n\t\t\t\t\ttablesample=90\n\t\t\t\tfor idx,category in enumerate(all_categories):\n\t\t\t\t\tif ((max_cardinality<groupby_cardinality) or ((type(cat_priority)==list) and len(cat_priority)<groupby_cardinality)):\n\t\t\t\t\t\t\tothers+=[\"{}!='{}'\".format(column_groupby,category)]\n\t\t\t\t\tquery=(\"select {},{},{} from {} where {}='{}' and {} is not null and {} is not null \" +\n\t\t\t\t\t\t\"and {} is not null limit {}\")\n\t\t\t\t\tquery=query.format(columns[0],columns[1],columns[2],self._table_transf_(tablesample),\n\t\t\t\t\t\tcolumns[3],category,columns[0],columns[1],columns[2],int(max_nb_points/len(all_categories))) \n\t\t\t\t\tself._display_query_(query,title=\"Select random points for the scatter plot (category='\"+str(category)+\"')\")\n\t\t\t\t\tstart_time = time.time()\n\t\t\t\t\tself.cursor.execute(query)\n\t\t\t\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\t\t\t\tquery_result=self.cursor.fetchall()\n\t\t\t\t\tcolumn1=[float(item[0]) for item in query_result]\n\t\t\t\t\tcolumn2=[float(item[1]) for item in query_result]\n\t\t\t\t\tcolumn3=[float(item[2]) for item in query_result]\n\t\t\t\t\tall_columns+=[[column1,column2,column3]]\n\t\t\t\t\tall_scatter+=[ax.scatter(column1,column2,column3,alpha=0.8,marker=marker[idx],\n\t\t\t\t\t\tcolor=color[idx])]\n\t\t\t\tif (len(others)>0 and with_others):\n\t\t\t\t\tall_categories+=[\"others\"]\n\t\t\t\t\tquery=(\"select {},{},{} from {} where {} and {} is not null and {} is not null \" +\n\t\t\t\t\t\t\"and {} is not null limit {}\")\n\t\t\t\t\tquery=query.format(columns[0],columns[1],columns[2],self._table_transf_(tablesample),\" and \".join(others),\n\t\t\t\t\t\tcolumns[0],columns[1],columns[2],int(max_nb_points/len(all_categories)))\n\t\t\t\t\tself._display_query_(query,title=\"Select random points for the scatter plot (category='others')\")\n\t\t\t\t\tstart_time = time.time()\n\t\t\t\t\tself.cursor.execute(query)\n\t\t\t\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\t\t\t\tquery_result=self.cursor.fetchall()\n\t\t\t\t\tcolumn1=[float(item[0]) for item in query_result]\n\t\t\t\t\tcolumn2=[float(item[1]) for item in query_result]\n\t\t\t\t\tall_columns+=[[column1,column2]]\n\t\t\t\t\tall_scatter+=[ax.scatter(column1,column2,alpha=0.8,marker=marker[idx+1],color=color[idx+1])]\n\t\t\t\tfor idx,item in enumerate(all_categories):\n\t\t\t\t\tif (len(str(item))>10):\n\t\t\t\t\t\tall_categories[idx]=str(item)[0:10]+\"...\"\n\t\t\t\tplt.title('Scatter Plot of {} vs {} vs {}'.format(columns[0],columns[1],columns[2]))\n\t\t\t\tax.set_xlabel(columns[0])\n\t\t\t\tax.set_ylabel(columns[1])\n\t\t\t\tax.set_zlabel(columns[2])\n\t\t\t\tax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))\n\t\t\t\tax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))\n\t\t\t\tax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))\n\t\t\t\tif (bbox_to_anchor==(1,1)):\n\t\t\t\t\tbbox_to_anchor=(1,1.15)\n\t\t\t\tax.legend(all_scatter,all_categories,scatterpoints=1,loc=loc,ncol=4,\n\t\t\t\t\t\t\ttitle=column_groupby,bbox_to_anchor=bbox_to_anchor,fontsize=8)\n\t\t\t\tplt.show()\n\t# Draw the Scatter Plot Matrix of the RVD\n\tdef scatter_matrix(self, columns=None, color=None):\n\t\tif (color==None):\n\t\t\tif (type(self.colors) == list):\n\t\t\t\tcolor=self.colors[0]\n\t\t\telse:\n\t\t\t\tcolor=self.colors\n\t\tif not(isinstance(columns,type(None))):\n\t\t\tfor column in columns:\n\t\t\t\tif (column not in self.columns):\n\t\t\t\t\traise Exception(\"The RVC '{}' doesn't exist\".format(column))\n\t\tif (type(columns)!=list):\n\t\t\tcolumns=[]\n\t\t\tfor column in self.columns:\n\t\t\t\tif (self[column].category() in [\"bool\",\"int\",\"float\"]):\n\t\t\t\t\tcolumns+=[column]\n\t\telif (len(columns)==1):\t\n\t\t\tself[columns[0]].hist(color=color)\n\t\tn=len(columns)\n\t\tfig,axes=plt.subplots(nrows=n,ncols=n)\n\t\tquery=(\"select \"+\",\".join(columns)+\",random() as rand from {} \".format(self._table_transf_())+\n\t\t\t\t\"order by rand limit 1000\")\n\t\tself._display_query_(query,title=\"Select random points for the scatter plot\")\n\t\tstart_time = time.time()\n\t\tself.cursor.execute(query)\n\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\tall_scatter_points=self.cursor.fetchall()\n\t\tall_scatter_columns=[]\n\t\tall_h=[]\n\t\tfor idx,column in enumerate(columns):\n\t\t\tis_numeric=(self[column].category()==\"float\") or (self[column].category()==\"int\")\n\t\t\tif (is_numeric):\n\t\t\t\tall_h+=[self[column]._best_hist_interval_()]\n\t\th=min(all_h)\n\t\tfor i in range(n):\n\t\t\tall_scatter_columns+=[[item[i] for item in all_scatter_points]]\n\t\tfor i in range(n):\n\t\t\tx = columns[i]\n\t\t\taxes[-1][i].set_xlabel(x,rotation=90)\n\t\t\taxes[i][0].set_ylabel(x,rotation=0)\n\t\t\taxes[i][0].yaxis.get_label().set_ha('right')\n\t\t\tfor j in range(n):\n\t\t\t\taxes[i][j].get_xaxis().set_ticks([])\n\t\t\t\taxes[i][j].get_yaxis().set_ticks([])\n\t\t\t\taxes[i][j].set_facecolor(\"#F0F0F0\")\n\t\t\t\ty=columns[j]\n\t\t\t\tif (x==y):\n\t\t\t\t\tx0,y0,z0,h0,is_categorical=self[x]._hist_(method=\"density\",h=h,max_cardinality=1)\n\t\t\t\t\taxes[i,j].bar(x0,y0,h0/0.94,color=color)\n\t\t\t\telse:\n\t\t\t\t\taxes[i,j].scatter(all_scatter_columns[j],all_scatter_columns[i],color=color,s=4,marker='o')\n\t\tfig.suptitle('Scatter Plot Matrix of {}'.format(self.input_relation))\n\t\tplt.show()\n\t# Select some columns\n\tdef select(self,columns,order_by=None,asc=True,limit=100):\n\t\tif (not(isinstance(asc,bool))):\n\t\t\traise TypeError(\"The parameter 'asc' must be a bool\")\n\t\tif (not(isinstance(limit,int)) or (limit<1)):\n\t\t\traise TypeError(\"The parameter 'limit' must be a strictly positive integer\")\n\t\tif (type(columns)==str):\n\t\t\tcolumns=[columns]\n\t\telif (type(columns)!=list):\n\t\t\traise TypeError(\"The parameter 'columns' must be a list of varchar\")\n\t\tif (asc):\n\t\t\torder=\" asc \"\n\t\telse:\n\t\t\torder=\" desc \"\n\t\tif (type(order_by)!=list):\n\t\t\tif (type(order_by)!=str):\n\t\t\t\torder_by=\"\"\n\t\t\telse:\n\t\t\t\torder_by=\" order by \"+order_by+order\n\t\telse:\n\t\t\torder_by=\" order by \"+\", \".join(order_by)+order\n\t\tquery=\"select \"+\", \".join(columns)+\" from \"+self._table_transf_()+order_by\n\t\treturn(run_query(query,self.cursor,limit=limit))\n\t# Set new rvd cursors\n\tdef set_colors(self,colors):\n\t\tself.colors=colors\n\t# Set a new cursor\n\tdef set_cursor(self,cursor):\n\t\tself.cursor=cursor\n\t# Set a new dsn\n\tdef set_dsn(self,dsn):\n\t\tself.dsn=dsn\n\t# Set the figure size\n\tdef set_figure_size(self,figsize=(7,5)):\n\t\tself.figsize=figsize\n\t# Set the label location\n\tdef set_legend_loc(self,bbox_to_anchor=None,ncol=None,loc=None):\n\t\tself.legend_loc=(bbox_to_anchor,ncol,loc)\n\t# Set the RVD limit\n\tdef set_limit(self,limit=None):\n\t\tself.limit=limit\n\t# Set the RVD offset\n\tdef set_offset(self,offset=0):\n\t\tself.offset=offset\n\t# Print all the SQL queries in the terminal\n\tdef sql_on_off(self,reindent=False):\n\t\tself.query_on=not(self.query_on)\n\t\tself.reindent=reindent\n\t# Draw the Stacked bar\n\tdef stacked_bar(self,columns,method=\"density\",of=None,max_cardinality=[6,6],h=[None,None],color=None,limit_distinct_elements=200):\n\t\tself.bar(columns,method=method,of=of,max_cardinality=max_cardinality,h=h,color=color,limit_distinct_elements=limit_distinct_elements,\n\t\t\t\t\tstacked=True)\n\t# Draw the Stacked Histogram\n\tdef stacked_hist(self,columns,method=\"density\",of=None,max_cardinality=[6,6],h=[None,None],color=None,limit_distinct_elements=200):\n\t\tself.hist(columns,method=method,of=of,max_cardinality=max_cardinality,h=h,color=color,limit_distinct_elements=limit_distinct_elements,\n\t\t\t\t\tstacked=True)\n\t# Display all the queries elapsed time\n\tdef time_on_off(self):\n\t\tself.time_on=not(self.time_on)\n\t# Split the rvd into two relations using a split column\n\t# If the split column does not exist, a column of random float will be created\n\t# The split column must be a columns containing random float in [0,1] \n\tdef train_test_split(self,split=None,test_name=None,train_name=None,columns=\"*\",test_size=0.33,mode=\"view\",print_info=True):\n\t\tif (mode not in [\"view\",\"temporary table\",\"table\"]):\n\t\t\traise Exception(\"The parameter 'mode' must be in view|temporary table|table\\nNothing was saved.\")\n\t\tif not(isinstance(test_name,(str,type(None)))):\n\t\t\traise TypeError(\"The parameter 'test_name' must be a varchar or null\")\n\t\tif not(isinstance(train_name,(str,type(None)))):\n\t\t\traise TypeError(\"The parameter 'train_name' must be a varchar or null\")\n\t\tif not(isinstance(split,(str,type(None)))):\n\t\t\traise TypeError(\"The parameter 'split' must be a varchar or null\")\n\t\tif (not(isinstance(test_size,float)) or (test_size<=0) or (test_size>=1)):\n\t\t\traise TypeError(\"The parameter 'test_size' must be in ]0,1[\")\n\t\tif not(isinstance(print_info,bool)):\n\t\t\traise TypeError(\"The parameter 'print_info' must be a bool\")\n\t\tif columns!=\"*\":\n\t\t\tfor column in columns:\n\t\t\t\tif (column not in self.columns):\n\t\t\t\t\traise Exception(\"The RVC '{}' is not in the RVD\".format(column))\n\t\telse:\n\t\t\tcolumns=self.columns\n\t\tif (type(test_name)!=str):\n\t\t\ttest_name=\"test_\"+self.input_relation+\"0\"+str(int(test_size*100))\n\t\t\tdrop_test=True\n\t\telse:\n\t\t\tdrop_test=False\n\t\tif (type(train_name)!=str):\n\t\t\ttrain_name=\"train_\"+self.input_relation+\"0\"+str(int(100-test_size*100))\n\t\t\tdrop_train=True\n\t\telse:\n\t\t\tdrop_train=False\n\t\tif (mode==\"view\"):\n\t\t\tif (drop_test):\n\t\t\t\tquery=\"drop view if exists \"+test_name\n\t\t\t\tself._display_query_(query,title=\"Drop the test view\")\n\t\t\t\tstart_time = time.time()\n\t\t\t\tself.cursor.execute(query)\n\t\t\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\t\tif (drop_train):\n\t\t\t\tquery=\"drop view if exists \"+train_name\n\t\t\t\tself._display_query_(query,title=\"Drop the train view\")\n\t\t\t\tstart_time = time.time()\n\t\t\t\tself.cursor.execute(query)\n\t\t\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\telse:\n\t\t\tif (drop_test):\n\t\t\t\tquery=\"drop table if exists \"+test_name\n\t\t\t\tself._display_query_(query,title=\"Drop the test table\")\n\t\t\t\tstart_time = time.time()\n\t\t\t\tself.cursor.execute(query)\n\t\t\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\t\tif (drop_train):\n\t\t\t\tquery=\"drop table if exists \"+train_name\n\t\t\t\tself._display_query_(query,title=\"Drop the train table\")\n\t\t\t\tstart_time = time.time()\n\t\t\t\tself.cursor.execute(query)\n\t\t\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\tif (type(split)!=str):\n\t\t\trandom_name=\"random_vpython_table_\"+self.input_relation\n\t\t\ttry:\n\t\t\t\tquery=\"select split from \"+random_name\n\t\t\t\tself._display_query_(query,title=\"Try to see if the random table exists and contains a column split\")\n\t\t\t\tstart_time = time.time()\n\t\t\t\tself.cursor.execute(query)\n\t\t\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\t\texcept:\n\t\t\t\tquery=\"drop table if exists \"+random_name\n\t\t\t\tself._display_query_(query,title=\"The random doesn't exist or does not have the good format: drop the random table\")\n\t\t\t\tstart_time = time.time()\n\t\t\t\tself.cursor.execute(query)\n\t\t\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\t\t\tquery=(\"create table \"+random_name+\" as select row_number() over() as row_number,random() as split from \"\n\t\t\t\t\t\t+self._table_transf_())\n\t\t\t\tself._display_query_(query,title=\"Create the random table\")\n\t\t\t\tstart_time = time.time()\n\t\t\t\tself.cursor.execute(query)\n\t\t\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\t\t\tif (print_info):\n\t\t\t\t\tprint(\"The random table \"+random_name+\" was successfully created.\")\n\t\t\tquery=\"create {} {} as select {} from (select row_number() over() as row_number,* from {}) z natural join {} where split<{}\"\n\t\t\tquery=query.format(mode,test_name,\",\".join(columns),self._table_transf_(),random_name,test_size)\n\t\t\tself._display_query_(query,title=\"Create the test \"+mode)\n\t\t\tstart_time = time.time()\n\t\t\tself.cursor.execute(query)\n\t\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\t\tquery=\"create {} {} as select {} from (select row_number() over() as row_number,* from {}) z natural join {} where split>={}\"\n\t\t\tquery=query.format(mode,train_name,\",\".join(columns),self._table_transf_(),random_name,test_size)\n\t\t\tself._display_query_(query,title=\"Create the train \"+mode)\n\t\t\tstart_time = time.time()\n\t\t\tself.cursor.execute(query)\n\t\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\telse:\n\t\t\tquery=\"create {} {} as select {} from {} where {}<{}\"\n\t\t\tquery=query.format(mode,test_name,\",\".join(columns),self._table_transf_(),split,test_size)\n\t\t\tself._display_query_(query,title=\"Create the test \"+mode+\" using the corresponding split\")\n\t\t\tstart_time = time.time()\n\t\t\tself.cursor.execute(query)\n\t\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\t\tquery=\"create {} {} as select {} from {} where {}>={}\"\n\t\t\tquery=query.format(mode,train_name,\",\".join(columns),self._table_transf_(),split,test_size)\n\t\t\tself._display_query_(query,title=\"Create the train \"+mode+\" using the corresponding split\")\n\t\t\tstart_time = time.time()\n\t\t\tself.cursor.execute(query)\n\t\t\tself._display_time_(elapsed_time=time.time()-start_time)\n\t\tif (print_info):\n\t\t\tprint(\"The \"+mode+\"s \"+test_name+\" and \"+train_name+\" were successfully created.\")\n\t\tself.rvd_history+=[\"{\"+time.strftime(\"%c\")+\"} \"+\"[Train Test Split]: The \"+mode+\"s '\"+test_name+\"' and '\"+train_name+\"' were created.\"]\n\t\treturn RVD(train_name,self.cursor),RVD(test_name,self.cursor)\n\t# Undo all the filters\n\tdef undo_all_filters(self):\n\t\tself.where=[]\n\t\tself.rvd_history+=[\"{\"+time.strftime(\"%c\")+\"} \"+\"[Undo All Filters]: All the filters were deleted.\"]\n\t# Undo the last filter\n\tdef undo_filter(self):\n\t\tif (len(self.where)>0):\n\t\t\tdel self.where[-1]\n\t\t\tself.rvd_history+=[\"{\"+time.strftime(\"%c\")+\"} \"+\"[Undo Filter]: The last filter was removed.\"]\n\t#\n\t#######################\n\t# #\n\t# Information Methods #\n\t# #\n\t#######################\n\t# \n\t# Help: return some RVD info \n\tdef help(self):\n\t\tprint(\"############################\")\n\t\tprint(\"# _______ _______ #\")\n\t\tprint(\"# | __ \\ \\ / / __ \\ #\")\n\t\tprint(\"# | |__) \\ \\ / /| | | | #\")\n\t\tprint(\"# | _ / \\ \\/ / | | | | #\")\n\t\tprint(\"# | | \\ \\ \\ / | |__| | #\")\n\t\tprint(\"# |_| \\_\\ \\/ |_____/ #\")\n\t\tprint(\"# #\")\n\t\tprint(\"#############################\")\n\t\tprint(\"# #\")\n\t\tprint(\"# Resilient Vertica Dataset #\")\n\t\tprint(\"# #\")\n\t\tprint(\"#############################\")\n\t\tprint(\"\")\n\t\tprint(\"The RVD is a Python object which will keep in mind all the user modifications in order \"\n\t\t\t\t+\"to use an optimized SQL query. It will send the query to the database which will use its \"\n\t\t\t\t+\"aggregations to compute fast results. It is created using a view or a table stored in the \"\n\t\t\t\t+\"user database and a database cursor. It will create for each column of the table a RVC (Resilient\"\n\t\t\t\t+\" Vertica Column) which will store for each column its name, its imputations and allows to do easy \"\n\t\t\t\t+\"modifications and explorations.\")\n\t\tprint(\"\")\n\t\tprint(\"RVC and RVD coexist and one can not live without the other. RVC will use the RVD information and reciprocally.\" \n\t\t\t\t+\" It is imperative to understand both structures to know how to use the entire object.\")\n\t\tprint(\"\")\n\t\tprint(\"When the user imputes or filters the data, the RVD gets in memory all the transformations to select for each query \"\n\t\t\t\t+\"the needed data in the input relation.\")\n\t\tprint(\"\")\n\t\tprint(\"As the RVD will try to keep in mind where the transformations occurred in order to use the appropriate query,\" \n\t\t\t\t+\" it is highly recommended to save the RVD when the user has done a lot of transformations in order to gain in efficiency\" \n\t\t\t\t+\" (using the save method). We can also see all the modifications using the history method.\")\n\t\tprint(\"\")\n\t\tprint(\"If you find any difficulties using vertica_ml_python, please contact me: badr.ouali@microfocus.com / I'll be glad to help.\")\n\t\tprint(\"\")\n\t\tprint(\"For more information about the different methods or the entire RVD structure, please see the entire documentation\")\n\t# Return the vertica Version and what is available\n\tdef version(self):\n\t\tquery=\"select version();\"\n\t\tself.cursor.execute(query)\n\t\tversion=self.cursor.fetchone()[0]\n\t\tprint(\"############################################################################################################\") \n\t\tprint(\"# __ __ ___ ____ ______ ____ __ ____ ___ ___ _ ____ __ __ ______ __ __ ___ ____ #\")\n\t\tprint(\"# | | | / _| \\| | | / ]/ | | | | | | \\| | | | | |/ \\| \\ #\")\n\t\tprint(\"# | | |/ [_| D | || | / /| o | | _ _ | | | o | | | | | | | _ | #\")\n\t\tprint(\"# | | | _| /|_| |_|| |/ / | | | \\_/ | |___ | _/| ~ |_| |_| _ | O | | | #\")\n\t\tprint(\"# | : | [_| \\ | | | / \\_| _ | | | | | | | |___, | | | | | | | | | #\")\n\t\tprint(\"# \\ /| | . \\ | | | \\ | | | | | | | | | | | | | | | | | | | #\")\n\t\tprint(\"# \\_/ |_____|__|\\_| |__| |____\\____|__|__| |___|___|_____| |__| |____/ |__| |__|__|\\___/|__|__| #\")\n\t\tprint(\"# #\")\n\t\tprint(\"############################################################################################################\")\n\t\tprint(\"#\")\n\t\tprint(\"# Author: Badr Ouali, Datascientist at Vertica\")\n\t\tprint(\"#\")\n\t\tprint(\"# You are currently using \"+version)\n\t\tprint(\"#\")\n\t\tversion=version.split(\"Database v\")\n\t\tversion_id=int(version[1][0])\n\t\tversion_release=int(version[1][2])\n\t\tif (version_id>8):\n\t\t\tprint(\"# You have a perfectly adapted version for using RVD and Vertica ML\")\n\t\telif (version_id==8):\n\t\t\tif (version_release>0):\n\t\t\t\tprint(\"# You have a perfectly adapted version for using RVD and Vertica ML except some algorithms\")\n\t\t\t\tprint(\"# Go to your Vertica version documentation for more information\")\n\t\t\t\tprint(\"# Unavailable algorithms: rf_regressor and cross_validate\")\n\t\t\telse:\n\t\t\t\tprint(\"# Your Vertica version is adapted for using RVD but you are quite limited for Vertica ML\")\n\t\t\t\tprint(\"# Go to your Vertica version documentation for more information\")\n\t\t\t\tprint(\"# Unavailable algorithms: rf, svm and cross_validate\")\n\t\t\t\tprint(\"# /!\\\\ Some RVD queries can be really big because of the unavailability of a lot of functions\")\n\t\telse:\n\t\t\tprint(\"# Your Vertica version is adapted for using RVD but you can not use Vertica ML\")\n\t\t\tprint(\"# Go to your Vertica version documentation for more information\")\n\t\t\tprint(\"# /!\\\\ Some RVD queries can be really big because of the unavailability of a lot of functions\")\n\t\t\tprint(\"# /!\\\\ Some RVD functions could not work\")\n\t\tprint(\"#\")\n\t\tprint(\"# For more information about the RVD you can use the help() method\")\n\t\treturn (version_id,version_release)\n\n\t\t\n\n\n\n\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.imshow", "numpy.linspace", "matplotlib.pyplot.barh", "numpy.ndarray", "pandas.DataFrame", "numpy.random.randint", "matplotlib.pyplot.gca", "numpy.arange", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "matplotlib.pyplot.hexbin", "matplotlib.pyplot.show", "matplotlib.pyplot.xticks", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.scatter", "matplotlib.pyplot.subplots", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.bar", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.yticks" ] ]
RyanTorant/AGIO
[ "634e38cf5013821c78dad53d242da8202f632aa0" ]
[ "process_interrel.py" ]
[ "import csv\nfrom scipy.stats import shapiro, normaltest, norm, ks_2samp\nimport numpy as np\n\nspecies_data = {}\nspecies_types = {}\n\nbaseline = {}\nspecies_count = 0\nwith open('bin/baseline.csv','r') as csvfile:\n freader = csv.reader(csvfile)\n for sid,data in enumerate(freader):\n baseline[sid] = [float(x) for x in data[:-1]]\nspecies_count = len(baseline)\n\nnode_descs = []\nwith open('bin/species_ref.csv','r') as csvfile:\n freader = csv.reader(csvfile)\n for sid,data in enumerate(freader):\n species_types[sid] = data[1]\n\n if(species_types[sid] == 'Omnivore'):\n node_descs.append('{0}[shape=\"diamond\",label=\"\"];'.format(sid))\n elif(species_types[sid] == 'Carnivore'):\n node_descs.append('{0}[shape=\"square\",label=\"\"];'.format(sid))\n else:\n node_descs.append('{0}[label=\"\"];'.format(sid)) \n\nrelations = []\nrelevant_species = set()\n\nwith open('bin/interrelations.csv','r') as csvfile:\n freader = csv.reader(csvfile)\n dataIter = iter(freader)\n for sidB in range(species_count):\n for sidA in range(species_count):\n data = next(dataIter)[:-1]\n\n if sidA == sidB:\n continue\n\n data = [float(x) for x in data[:-1]]\n\n testP = ks_2samp(data, baseline[sidA]).pvalue\n \n if testP < 0.05:\n # The data are not from the same distribution so assume the species are related\n relevant_species.add(sidA)\n relevant_species.add(sidB)\n\n # TODO : Optimize! I'm computing the mean for the baseline a bunch of times\n mean_fA = np.mean(baseline[sidA])\n mean_fAB = np.mean(data)\n r_ppc = 100*(mean_fAB - mean_fA) / mean_fA\n\n print(\"{0} & {1} & {2} & {3} & {4:.2f}\\\\%\\\\\\\\\".format(sidA,species_types[sidA], sidB, species_types[sidB], r_ppc))\n relations.append((sidA, sidB, mean_fAB > mean_fA))\n\nprint('')\n'''\nrelations = []\nrelevant_species = set()\nnode_descs = []\nwith open('bin/interrelations.csv','r') as csvfile:\n freader = csv.reader(csvfile)\n for sid,data in enumerate(freader):\n data = [float(x) for x in data[:-1]]\n mean, dev, p_shapiro = species_data[sid]\n\n if(species_types[sid] == 'Omnívoro'):\n node_descs.append('{0}[shape=\"diamond\",label=\"\"];'.format(sid))\n elif(species_types[sid] == 'Carnívoro'):\n node_descs.append('{0}[shape=\"square\",label=\"\"];'.format(sid))\n else:\n node_descs.append('{0}[label=\"\"];'.format(sid)) \n\n if p_shapiro < 0.05:\n continue\n \n for second_id, val in enumerate(data):\n if sid == second_id:\n continue\n r = abs(val - mean) / dev\n r_ppc = 100*(val - mean) / mean\n\n if r >= 2:\n relevant_species.add(sid)\n relevant_species.add(second_id)\n\n prob = 1 - (norm.cdf(r) - norm.cdf(-r))\n relations.append((sid, second_id, r_ppc > 0))\n print(\"{0} & {1} & {2} & {3} & {4:.2f} & {5:.4f} & {6:.2f}\\\\%\\\\\\\\\".format(sid,species_types[sid], second_id, species_types[second_id],r, prob, r_ppc))\n\n'''\nprint('')\nprint('digraph relations {')\nfor sid, desc in enumerate(node_descs):\n if sid in relevant_species:\n print(desc)\nfor x,y,increased in relations:\n if increased:\n print(\"{0}->{1}\".format(x,y))\n else:\n print('{0}->{1}[style=\"dashed\"]'.format(x,y))\n\nprint('}')" ]
[ [ "scipy.stats.ks_2samp", "numpy.mean" ] ]
dbrainio/catalyst
[ "d6c0908f17f2a53e360b50415b4849354c40e3fc" ]
[ "rl/offpolicy/algorithms/core.py" ]
[ "import copy\nimport torch\n\nfrom catalyst.dl.utils import UtilsFactory\nfrom catalyst.rl.agents import AGENTS\nfrom .utils import soft_update\n\n\nclass Algorithm:\n def __init__(\n self,\n actor,\n critic,\n gamma,\n n_step,\n actor_optimizer_params,\n critic_optimizer_params,\n actor_grad_clip_params=None,\n critic_grad_clip_params=None,\n actor_loss_params=None,\n critic_loss_params=None,\n actor_scheduler_params=None,\n critic_scheduler_params=None,\n resume=None,\n load_optimizer=True,\n actor_tau=1.0,\n critic_tau=1.0,\n min_action=-1.0,\n max_action=1.0,\n **kwargs\n ):\n self._device = UtilsFactory.prepare_device()\n\n self.actor = actor.to(self._device)\n self.critic = critic.to(self._device)\n\n self.target_actor = copy.deepcopy(actor).to(self._device)\n self.target_critic = copy.deepcopy(critic).to(self._device)\n\n self.actor_optimizer = UtilsFactory.create_optimizer(\n self.actor, **actor_optimizer_params\n )\n self.critic_optimizer = UtilsFactory.create_optimizer(\n self.critic, **critic_optimizer_params\n )\n\n self.actor_optimizer_params = actor_optimizer_params\n self.critic_optimizer_params = critic_optimizer_params\n\n self.actor_scheduler = UtilsFactory.create_scheduler(\n self.actor_optimizer, **actor_scheduler_params\n )\n self.critic_scheduler = UtilsFactory.create_scheduler(\n self.critic_optimizer, **critic_scheduler_params\n )\n\n self.actor_scheduler_params = actor_scheduler_params\n self.critic_scheduler_params = critic_scheduler_params\n\n self.n_step = n_step\n self.gamma = gamma\n\n actor_grad_clip_params = actor_grad_clip_params or {}\n critic_grad_clip_params = critic_grad_clip_params or {}\n\n self.actor_grad_clip_fn = UtilsFactory.create_grad_clip_fn(\n **actor_grad_clip_params)\n self.critic_grad_clip_fn = UtilsFactory.create_grad_clip_fn(\n **critic_grad_clip_params)\n\n self.actor_grad_clip_params = actor_grad_clip_params\n self.critic_grad_clip_params = critic_grad_clip_params\n\n self.actor_criterion = UtilsFactory.create_criterion(\n **(actor_loss_params or {})\n )\n self.critic_criterion = UtilsFactory.create_criterion(\n **(critic_loss_params or {})\n )\n\n self.actor_loss_params = actor_loss_params\n self.critic_loss_params = critic_loss_params\n\n self.actor_tau = actor_tau\n self.critic_tau = critic_tau\n\n self.min_action = min_action\n self.max_action = max_action\n\n self._init(**kwargs)\n\n if resume is not None:\n self.load_checkpoint(resume, load_optimizer=load_optimizer)\n\n def _init(self, **kwards):\n assert len(kwards) == 0\n\n def __repr__(self):\n str_val = \" \".join(\n [\n f\"{key}: {str(getattr(self, key, ''))}\"\n for key in [\"n_step\", \"gamma\", \"actor_tau\", \"critic_tau\"]\n ]\n )\n return f\"Algorithm. {str_val}\"\n\n def _to_tensor(self, *args, **kwargs):\n return torch.Tensor(*args, **kwargs).to(self._device)\n\n def train(self, batch, actor_update=True, critic_update=True):\n \"returns loss for a batch of transitions\"\n raise NotImplementedError\n\n def get_td_errors(self, batch):\n # @TODO: for prioritized replay\n raise NotImplementedError\n\n def actor_update(self, loss):\n self.actor.zero_grad()\n self.actor_optimizer.zero_grad()\n loss.backward()\n if self.actor_grad_clip_fn is not None:\n self.actor_grad_clip_fn(self.actor.parameters())\n self.actor_optimizer.step()\n if self.actor_scheduler is not None:\n self.actor_scheduler.step()\n return {\"lr_actor\": self.actor_scheduler.get_lr()[0]}\n\n def critic_update(self, loss):\n self.critic.zero_grad()\n self.critic_optimizer.zero_grad()\n loss.backward()\n if self.critic_grad_clip_fn is not None:\n self.critic_grad_clip_fn(self.critic.parameters())\n self.critic_optimizer.step()\n if self.critic_scheduler is not None:\n self.critic_scheduler.step()\n return {\"lr_critic\": self.critic_scheduler.get_lr()[0]}\n\n def target_actor_update(self):\n soft_update(self.target_actor, self.actor, self.actor_tau)\n\n def target_critic_update(self):\n soft_update(self.target_critic, self.critic, self.critic_tau)\n\n def prepare_checkpoint(self):\n checkpoint = {}\n\n for key in [\"actor\", \"critic\"]:\n checkpoint[f\"{key}_state_dict\"] = getattr(self, key).state_dict()\n for key2 in [\"optimizer\", \"scheduler\"]:\n key2 = f\"{key}_{key2}\"\n value2 = getattr(self, key2, None)\n if value2 is not None:\n checkpoint[f\"{key2}_state_dict\"] = value2.state_dict()\n\n return checkpoint\n\n def load_checkpoint(self, filepath, load_optimizer=True):\n checkpoint = UtilsFactory.load_checkpoint(filepath)\n for key in [\"actor\", \"critic\"]:\n value_l = getattr(self, key, None)\n if value_l is not None:\n value_r = checkpoint[f\"{key}_state_dict\"]\n value_l.load_state_dict(value_r)\n\n if load_optimizer:\n for key2 in [\"optimizer\", \"scheduler\"]:\n key2 = f\"{key}_{key2}\"\n value_l = getattr(self, key2, None)\n if value_l is not None:\n value_r = checkpoint[f\"{key2}_state_dict\"]\n value_l.load_state_dict(value_r)\n\n @classmethod\n def prepare_for_trainer(cls, config):\n config_ = config.copy()\n\n actor_state_shape = (\n config_[\"shared\"][\"history_len\"],\n config_[\"shared\"][\"state_size\"],\n )\n actor_action_size = config_[\"shared\"][\"action_size\"]\n n_step = config_[\"shared\"][\"n_step\"]\n gamma = config_[\"shared\"][\"gamma\"]\n history_len = config_[\"shared\"][\"history_len\"]\n trainer_state_shape = (config_[\"shared\"][\"state_size\"], )\n trainer_action_shape = (config_[\"shared\"][\"action_size\"], )\n\n actor_fn = config_[\"actor\"].pop(\"actor\", None)\n actor_fn = AGENTS[actor_fn]\n actor = actor_fn.create_from_config(\n state_shape=actor_state_shape,\n action_size=actor_action_size,\n **config_[\"actor\"]\n )\n\n critic_fn = config_[\"critic\"].pop(\"critic\", None)\n critic_fn = AGENTS[critic_fn]\n critic = critic_fn.create_from_config(\n state_shape=actor_state_shape,\n action_size=actor_action_size,\n **config_[\"critic\"]\n )\n\n algorithm = cls(\n **config_[\"algorithm\"],\n actor=actor,\n critic=critic,\n n_step=n_step,\n gamma=gamma\n )\n\n kwargs = {\n \"algorithm\": algorithm,\n \"state_shape\": trainer_state_shape,\n \"action_shape\": trainer_action_shape,\n \"n_step\": n_step,\n \"gamma\": gamma,\n \"history_len\": history_len\n }\n\n return kwargs\n\n @classmethod\n def prepare_for_sampler(cls, config):\n config_ = config.copy()\n\n actor_state_shape = (\n config_[\"shared\"][\"history_len\"],\n config_[\"shared\"][\"state_size\"],\n )\n actor_action_size = config_[\"shared\"][\"action_size\"]\n\n actor_fn = config_[\"actor\"].pop(\"actor\", None)\n actor_fn = AGENTS[actor_fn]\n actor = actor_fn.create_from_config(\n state_shape=actor_state_shape,\n action_size=actor_action_size,\n **config_[\"actor\"]\n )\n\n history_len = config_[\"shared\"][\"history_len\"]\n\n kwargs = {\"actor\": actor, \"history_len\": history_len}\n\n return kwargs\n\n\nALGORITHM = Algorithm\n" ]
[ [ "torch.Tensor" ] ]
CaiYingFeng/ASLFeat
[ "97f7375d0ded92204551b917f30c46951f3b5516" ]
[ "models/cnn_wrapper/homo_utils.py" ]
[ "\"\"\"\nAdapted from:\nhttps://github.com/tynguyen/unsupervisedDeepHomographyRAL2018\n\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\n\n#######################################################\n# Auxiliary matrices used to solve DLT\nAux_M1 = np.array([\n [0, 0, 0, 0, 0, 0, 0, 0],\n [1, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 0]], dtype=np.float64)\n\n\nAux_M2 = np.array([\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1]], dtype=np.float64)\n\n\nAux_M3 = np.array([\n [0],\n [1],\n [0],\n [1],\n [0],\n [1],\n [0],\n [1]], dtype=np.float64)\n\n\nAux_M4 = np.array([\n [-1, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, -1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, -1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, -1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.float64)\n\n\nAux_M5 = np.array([\n [0, -1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, -1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, -1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, -1],\n [0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.float64)\n\n\nAux_M6 = np.array([\n [-1],\n [0],\n [-1],\n [0],\n [-1],\n [0],\n [-1],\n [0]], dtype=np.float64)\n\n\nAux_M71 = np.array([\n [0, 1, 0, 0, 0, 0, 0, 0],\n [1, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 0, 1, 0]], dtype=np.float64)\n\n\nAux_M72 = np.array([\n [1, 0, 0, 0, 0, 0, 0, 0],\n [-1, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, -1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, -1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, -1, 0]], dtype=np.float64)\n\n\nAux_M8 = np.array([\n [0, 1, 0, 0, 0, 0, 0, 0],\n [0, -1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, -1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, -1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 0, 0, -1]], dtype=np.float64)\n\n\nAux_Mb = np.array([\n [0, -1, 0, 0, 0, 0, 0, 0],\n [1, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, -1, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, -1, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, -1],\n [0, 0, 0, 0, 0, 0, 1, 0]], dtype=np.float64)\n\n\ndef solve_DLT(pred_h4p, training, constrained=True, scale=None):\n bs = tf.shape(pred_h4p)[0]\n if training:\n h = pred_h4p.get_shape()[1].value\n w = pred_h4p.get_shape()[2].value\n else:\n h = tf.shape(pred_h4p)[1]\n w = tf.shape(pred_h4p)[2]\n\n pts_1 = tf.constant([-1, -1, 1, -1, -1, 1, 1, 1], dtype=tf.float32)\n pts_1 = tf.reshape(pts_1, (1, 1, 1, 8, 1))\n pts_1 = tf.tile(pts_1, (bs, h, w, 1, 1))\n pts_2 = pred_h4p[..., None] + pts_1\n\n # Auxiliary tensors used to create Ax = b equation\n M1 = tf.constant(Aux_M1, tf.float32)\n M1_tensor = tf.reshape(M1, [1, 1, 1, 8, 8])\n M1_tile = tf.tile(M1_tensor, [bs, h, w, 1, 1])\n\n M2 = tf.constant(Aux_M2, tf.float32)\n M2_tensor = tf.reshape(M2, [1, 1, 1, 8, 8])\n M2_tile = tf.tile(M2_tensor, [bs, h, w, 1, 1])\n\n M3 = tf.constant(Aux_M3, tf.float32)\n M3_tensor = tf.reshape(M3, [1, 1, 1, 8, 1])\n M3_tile = tf.tile(M3_tensor, [bs, h, w, 1, 1])\n\n M4 = tf.constant(Aux_M4, tf.float32)\n M4_tensor = tf.reshape(M4, [1, 1, 1, 8, 8])\n M4_tile = tf.tile(M4_tensor, [bs, h, w, 1, 1])\n\n M5 = tf.constant(Aux_M5, tf.float32)\n M5_tensor = tf.reshape(M5, [1, 1, 1, 8, 8])\n M5_tile = tf.tile(M5_tensor, [bs, h, w, 1, 1])\n\n M6 = tf.constant(Aux_M6, tf.float32)\n M6_tensor = tf.reshape(M6, [1, 1, 1, 8, 1])\n M6_tile = tf.tile(M6_tensor, [bs, h, w, 1, 1])\n\n M71 = tf.constant(Aux_M71, tf.float32)\n M71_tensor = tf.reshape(M71, [1, 1, 1, 8, 8])\n M71_tile = tf.tile(M71_tensor, [bs, h, w, 1, 1])\n\n M72 = tf.constant(Aux_M72, tf.float32)\n M72_tensor = tf.reshape(M72, [1, 1, 1, 8, 8])\n M72_tile = tf.tile(M72_tensor, [bs, h, w, 1, 1])\n\n M8 = tf.constant(Aux_M8, tf.float32)\n M8_tensor = tf.reshape(M8, [1, 1, 1, 8, 8])\n M8_tile = tf.tile(M8_tensor, [bs, h, w, 1, 1])\n\n Mb = tf.constant(Aux_Mb, tf.float32)\n Mb_tensor = tf.reshape(Mb, [1, 1, 1, 8, 8])\n Mb_tile = tf.tile(Mb_tensor, [bs, h, w, 1, 1])\n\n # Form the equations Ax = b to compute H\n # Form A matrix\n A1 = tf.matmul(M1_tile, pts_1) # Column 1\n A2 = tf.matmul(M2_tile, pts_1) # Column 2\n A3 = M3_tile # Column 3\n A4 = tf.matmul(M4_tile, pts_1) # Column 4\n A5 = tf.matmul(M5_tile, pts_1) # Column 5\n A6 = M6_tile # Column 6\n A7 = tf.matmul(M71_tile, pts_2) * tf.matmul(M72_tile, pts_1) # Column 7\n A8 = tf.matmul(M71_tile, pts_2) * tf.matmul(M8_tile, pts_1) # Column 8\n\n if constrained:\n A_mat = tf.concat([A1, A2, A4, A5, A7, A8], axis=-1)\n else:\n A_mat = tf.concat([A1, A2, A3, A4, A5, A6, A7, A8], axis=-1)\n # Form b matrix\n b_mat = tf.matmul(Mb_tile, pts_2)\n\n # Solve the Ax = b\n if constrained:\n A_t_mat = tf.matrix_transpose(A_mat)\n A_mat = tf.matmul(A_t_mat, A_mat)\n b_mat = tf.matmul(A_t_mat, b_mat)\n H_6el = tf.matrix_solve(A_mat, b_mat)\n H_6el = tf.squeeze(H_6el, axis=-1)\n\n if scale is not None:\n H_4el = H_6el[:, :, :, 0:4]\n H_4el = H_4el * scale\n H_2el = H_6el[:, :, :, 4:6]\n H_6el = tf.concat([H_4el, H_2el], axis=-1)\n\n h_zeros = tf.zeros([bs, h, w, 1])\n h_ones = tf.ones([bs, h, w, 1])\n h3 = tf.expand_dims(tf.concat([h_zeros, h_zeros, h_ones], axis=-1), axis=-1)\n H_6el = tf.reshape(H_6el, [bs, h, w, 3, 2]) # BATCH_SIZE x 3 x 3\n H_mat = tf.concat([H_6el, h3], axis=-1)\n else:\n H_8el = tf.matrix_solve(A_mat, b_mat) # BATCH_SIZE x 8.\n H_8el = tf.squeeze(H_8el, axis=-1)\n\n if scale is not None:\n H_6el = H_8el[:, :, :, 0:6]\n H_6el = H_6el * scale\n H_2el = H_8el[:, :, :, 6:8]\n H_8el = tf.concat([H_6el, H_2el], axis=-1)\n\n h_ones = tf.ones([bs, h, w, 1])\n H_9el = tf.concat([H_8el, h_ones], -1)\n H_mat = tf.reshape(H_9el, [bs, h, w, 3, 3]) # BATCH_SIZE x 3 x 3\n\n has_nan = tf.reduce_sum(tf.cast(tf.math.is_nan(H_mat), tf.float32))\n H_mat = tf.cond(\n tf.equal(has_nan, 0),\n lambda: H_mat,\n lambda: tf.tile(tf.reshape(tf.eye(3), [1, 1, 1, 3, 3]), [bs, h, w, 1, 1])\n )\n \n return H_mat\n\n\nif __name__ == \"__main__\":\n pred_h4p = np.array([0.5, -0.3, 0.15, 0.9, -1, -0.2, 0.5, 1.2])\n pred_h4p = tf.constant(pred_h4p, dtype=tf.float32)\n pred_h4p = tf.reshape(pred_h4p, (1, 1, 1, 8))\n H_mat = solve_DLT(pred_h4p, True, False)\n\n rng = tf.range(-1, 2)\n x, y = tf.meshgrid(rng, rng)\n x = tf.reshape(x, (-1, ))\n y = tf.reshape(y, (-1, ))\n xy = tf.reshape(tf.stack([x, y], axis=-1), [1, 1, 1, -1, 2])\n xy = tf.tile(xy, [tf.shape(H_mat)[0], tf.shape(\n H_mat)[1], tf.shape(H_mat)[2], 1, 1])\n xy = tf.cast(xy, tf.float32)\n ones = tf.ones_like(xy[:, :, :, :, 0])[..., None]\n xy_homo = tf.concat([xy, ones], axis=-1)\n\n pert_xy = tf.matmul(xy_homo, H_mat, transpose_b=True)\n homo_scale = tf.expand_dims(pert_xy[:, :, :, :, -1], axis=-1)\n pert_xy = pert_xy[:, :, :, :, 0:2]\n pert_xy = tf.clip_by_value(tf.math.divide_no_nan(pert_xy, homo_scale), -10., 10.)\n\n with tf.Session() as sess:\n print(sess.run(pert_xy))\n" ]
[ [ "tensorflow.concat", "tensorflow.zeros", "tensorflow.stack", "tensorflow.cast", "tensorflow.equal", "tensorflow.matrix_solve", "tensorflow.squeeze", "tensorflow.Session", "tensorflow.math.divide_no_nan", "tensorflow.matrix_transpose", "tensorflow.tile", "tensorflow.matmul", "tensorflow.shape", "tensorflow.meshgrid", "numpy.array", "tensorflow.constant", "tensorflow.range", "tensorflow.reshape", "tensorflow.ones_like", "tensorflow.expand_dims", "tensorflow.ones", "tensorflow.math.is_nan", "tensorflow.eye" ] ]
SimonSuster/lxmls-toolkit
[ "78413c1ee61752ca33988c454e3b2c27326e7063" ]
[ "lxmls/sequences/discriminative_sequence_classifier.py" ]
[ "import numpy as np\nimport lxmls.sequences.sequence_classifier as sc\nimport pdb\n\nclass DiscriminativeSequenceClassifier(sc.SequenceClassifier):\n\n def __init__(self, observation_labels, state_labels, feature_mapper):\n sc.SequenceClassifier.__init__(self, observation_labels, state_labels)\n\n # Set feature mapper and initialize parameters.\n self.feature_mapper = feature_mapper\n self.parameters = np.zeros(self.feature_mapper.get_num_features())\n\n ################################\n ## Build the node and edge potentials\n ## node - f(t,y_t,X)*w\n ## edge - f(t,y_t,y_(t-1),X)*w\n ## Only supports binary features representation\n ## If we have an HMM with 4 positions and transitins\n ## a - b - c - d\n ## the edge potentials have at position:\n ## 0 a - b\n ## 1 b - c\n ################################\n def compute_scores(self, sequence):\n num_states = self.get_num_states()\n length = len(sequence.x)\n emission_scores = np.zeros([length, num_states])\n initial_scores = np.zeros(num_states)\n transition_scores = np.zeros([length-1, num_states, num_states])\n final_scores = np.zeros(num_states)\n\n # Initial position.\n for tag_id in xrange(num_states):\n initial_features = self.feature_mapper.get_initial_features(sequence, tag_id)\n score = 0.0\n for feat_id in initial_features:\n score += self.parameters[feat_id]\n initial_scores[tag_id] = score\n\n # Intermediate position.\n for pos in xrange(length):\n for tag_id in xrange(num_states):\n emission_features = self.feature_mapper.get_emission_features(sequence, pos, tag_id)\n score = 0.0\n for feat_id in emission_features:\n score += self.parameters[feat_id]\n emission_scores[pos, tag_id] = score\n if pos > 0: \n for tag_id in xrange(num_states):\n for prev_tag_id in xrange(num_states):\n transition_features = self.feature_mapper.get_transition_features(sequence, pos, tag_id, prev_tag_id)\n score = 0.0\n for feat_id in transition_features:\n score += self.parameters[feat_id]\n transition_scores[pos-1, tag_id, prev_tag_id] = score\n\n # Final position.\n for prev_tag_id in xrange(num_states):\n final_features = self.feature_mapper.get_final_features(sequence, prev_tag_id)\n score = 0.0\n for feat_id in final_features:\n score += self.parameters[feat_id]\n final_scores[prev_tag_id] = score\n\n return initial_scores, transition_scores, final_scores, emission_scores\n\n" ]
[ [ "numpy.zeros" ] ]
heyoh-app/gestures-detector
[ "b052382a9899d771502b79a97cea28ff51bdfa0e" ]
[ "multitask_lightning/lightning_module.py" ]
[ "import json\nimport torch\nimport numpy as np\nimport pytorch_lightning as pl\nfrom torch.utils.data import DataLoader\nfrom dataset.dataset import Dataset\nfrom losses.loss import get_loss\nfrom models.model import UnetClipped\nfrom utils.train_utils import get_optim\nfrom metrics.metric import MeanAveragePrecision\n\n\nclass LightningModule(pl.LightningModule):\n\n def __init__(self, config):\n super(LightningModule, self).__init__()\n self.hparams.update(config)\n\n self.net = UnetClipped(**self.hparams[\"model\"])\n\n self.image_size = self.hparams[\"train_data_params\"][\"size\"]\n self.stride = self.hparams[\"train_data_params\"][\"output_stride\"]\n self.in_channels = self.hparams[\"model\"][\"in_channels\"]\n self.num_classes = sum(self.hparams[\"train_data_params\"][\"subclasses\"])\n\n with open(self.hparams[\"split\"]) as json_file:\n split = json.load(json_file)\n\n self.train_files = split[\"train\"]\n self.val_files = split[\"val\"]\n\n self.train_data = Dataset(self.train_files, **self.hparams['train_data_params'])\n self.val_data = Dataset(self.val_files, **self.hparams['val_data_params'])\n\n self.loss_functions = {task: get_loss(params) for task, params in self.hparams['losses'].items()}\n self.metric = MeanAveragePrecision(\n num_classes=self.num_classes,\n out_img_size=self.hparams[\"val_data_params\"][\"size\"] // self.hparams[\"val_data_params\"][\"output_stride\"],\n **self.hparams[\"metric\"]\n )\n\n self.freeze_epochs = self.hparams[\"freeze_epochs\"]\n if self.freeze_epochs != 0:\n self.set_grad(False)\n\n def set_grad(self, requires_grad: bool):\n for param in self.net.encoder.features.parameters():\n param.requires_grad = requires_grad\n\n def forward(self, x):\n return self.net(x)\n\n def _log(self, log_dict):\n [self.log(log_name, log_value, on_step=False, on_epoch=True, prog_bar=True) for log_name, log_value in\n log_dict.items()]\n\n def compute_losses(self, masks_predict, masks_gt, mode: str = \"train\"):\n masks, predicts, losses = dict(), dict(), dict()\n\n masks['kpoint'], masks['side'], masks['size'] = masks_gt\n predicts['kpoint'], predicts['side'], predicts['size'] = torch.split(masks_predict, [self.num_classes, 1, 1], 1)\n\n loss = torch.tensor(0.).cuda()\n for task in masks.keys():\n losses[task] = self.loss_functions[task](predicts[task], masks[task])\n self.log(f\"{mode}_loss_{task}\", losses[task], on_step=False, on_epoch=True, prog_bar=True)\n loss += losses[task]\n self.log(f\"{mode}_loss\", loss, on_step=False, on_epoch=True, prog_bar=True)\n return loss\n\n def training_step(self, batch, batch_idx):\n image = batch[0]\n output = self.forward(image)\n loss = self.compute_losses(output, batch[1:], mode=\"train\")\n return {'loss': loss}\n\n def validation_step(self, batch, batch_idx):\n image = batch[0]\n output = self.forward(image)\n loss = self.compute_losses(output, batch[1:], mode=\"val\")\n self.metric.update(output, batch[1:])\n return {'val_loss': loss}\n\n def validation_epoch_end(self, outputs):\n val_loss = np.mean([output['val_loss'].detach().cpu() for output in outputs])\n val_map = self.metric.pascal_map_value(reset=True)\n self.log('val_map', val_map, on_step=False, on_epoch=True, prog_bar=True)\n\n if self.current_epoch + 1 == self.freeze_epochs and self.freeze_epochs != 0:\n self.set_grad(True) # unfreeze encoder\n return {'val_loss': val_loss, 'val_map': val_map}\n\n def configure_optimizers(self):\n optimizer = get_optim(self.hparams[\"optimizer\"])\n optimizer = optimizer(self.net.parameters())\n\n scheduler = {\n 'scheduler': torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, **self.hparams['reduce_on_plateau']),\n 'monitor': 'val_loss',\n 'interval': 'epoch',\n 'frequency': self.hparams[\"check_val_every_n_epoch\"]\n }\n return [optimizer], [scheduler]\n\n def train_dataloader(self):\n return DataLoader(\n self.train_data,\n batch_size=self.hparams[\"batch_size\"],\n shuffle=True,\n drop_last=True,\n num_workers=self.hparams[\"workers\"]\n )\n\n def val_dataloader(self):\n return DataLoader(\n self.val_data,\n batch_size=self.hparams[\"val_batch_size\"],\n shuffle=False,\n drop_last=False,\n num_workers=self.hparams[\"workers\"]\n )\n" ]
[ [ "torch.tensor", "torch.utils.data.DataLoader", "torch.optim.lr_scheduler.ReduceLROnPlateau", "torch.split" ] ]
slaclab/lume-elegant
[ "b4c1e9d2ab72c2502bd6b937ae5b518116aa2675" ]
[ "elegant/parsers.py" ]
[ "import numpy as np\n\nimport subprocess\nimport os\n\n\ndef parse_sdds_table(sddsfile, columns, sdds2plaindata_exe='sdds2plaindata'):\n \"\"\"\n Get tabular data from SDDS file.\n \n Example:\n get_table('LCLS2scH.twi', ['s', 'betax', 'betay', 'etax'])\n \"\"\"\n \n assert os.path.exists(sddsfile)\n \n outfile = sddsfile+'_table'\n cmd0 = [sdds2plaindata_exe, sddsfile, outfile, '-noRowCount', '-outputMode=ascii']\n \n cmd = cmd0 + [f'-col={c}' for c in columns] + ['-separator= ']\n\n output,error = subprocess.Popen(\n cmd, universal_newlines=True,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()\n assert os.path.exists(outfile), f'{outfile} does not exist'\n \n rdat = np.loadtxt(outfile)\n \n dat = {}\n for i, key in enumerate(columns):\n dat[key] = rdat[:,i]\n \n os.remove(outfile)\n return dat" ]
[ [ "numpy.loadtxt" ] ]
Tishacy/MTSpider
[ "2a0e046477f59bd29b6512645d356f02c021eb88" ]
[ "mspider/mtd.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport os\nimport requests\nimport wget\nimport numpy as np\nimport pandas as pd\nimport json\nimport time\nfrom threading import Thread\nfrom queue import Queue\nfrom bs4 import BeautifulSoup\nfrom tqdm import tqdm\n\n\n# CLASS Downloader\n\nclass Downloader(object):\n\t\"\"\"A downloader using multi-threading tech\n\n\t----------------------------------------\n\tsource: A list of zip of names and urls, like:\n\t\t[(name, url), (name, url), ... (name, url)]\n\t\tA source could be generated by calling \"list(zip(name_list, url_list))\",\n\t\twhere \"name_list\" is a list of names and \"url_list\" is a list of urls.\n\n\tfile_extension: 'normal' by default or other file extensions like\n\t\t'jpg', 'png', 'mp4', etc.\n\t\t'normal': Automatically recognize the file extension.\n\t\"\"\"\n\tdef __init__(self, source, file_extension='normal'):\n\t\tself.source = source\n\t\tself.file_extension = file_extension\n\t\tself.crawler = Crawler(self.download_single_file, self.source)\n\t\tself.check('initialization')\n\n\tdef check(self, item):\n\t\t# initial check\n\t\tif item == \"initialization\":\n\t\t\tself.num_files = len(self.source)\n\t\t# check if the output folder already exists.\n\t\telif item == \"out_folder\":\n\t\t\tif os.path.isdir(self.out_folder):\n\t\t\t\tprint(\"[WARNING]: Folder '%s' already exists. Automatically download into this folder.\" % self.out_folder)\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tos.mkdir(self.out_folder)\n\n\tdef download_single_file(self, index, source_item):\n\t\tname, url = source_item\n\t\tif self.file_extension == 'normal':\n\t\t\tfilex = url.split('.')[-1]\n\t\telse:\n\t\t\tfilex = self.file_extension\n\t\tout_path = \"./%s/%s.%s\" %(self.out_folder, name, filex)\n\n\t\tif os.path.isfile(out_path):\n\t\t\tprint(\"\\r[INFO]: Arealy has this file, skipping...\".ljust(73), end=\"\")\n\t\telse:\n\t\t\tif self.engine == \"wget\":\n\t\t\t\twget.download(url, out_path)\n\t\t\telif self.engine == \"wget-cli\":\n\t\t\t\tos.system('wget %s -O %s' %(url, out_path))\n\t\t\telif self.engine == \"you-get\":\n\t\t\t\tos.system('you-get %s -O %s' %(url, out_path))\n\n\tdef download(self, out_folder=\"./\", engine=\"wget\"):\n\t\tself.out_folder = out_folder\n\t\tself.check('out_folder')\n\t\tself.engine = engine\n\t\tself.crawl()\n\n\tdef crawl(self):\n\t\tself.crawler.crawl()\n\t\tprint(\"[INFO]: %d urls failed.\" %(len(self.crawler.failed_urls), ))\n\t\tif len(self.crawler.failed_urls) > 0:\n\t\t\tprint(\"[FAILED]: \\n\", self.crawler.failed_urls)\n\t\t\tgo_on = input(\"[INPUT]: Recrawl the faield urls, (y/n): \")\n\t\t\tif go_on == 'y':\n\t\t\t\tself.crawler = Crawler(self.download_single_file, self.crawler.failed_urls)\n\t\t\t\tself.crawl()\n\t\t\telse:\n\t\t\t\tprint('[INFO]: Task done.')\n\t\t\t\treturn\n\n\n# A TEST FOR CLASS Downloader\n\ndef test_downloader():\n\tnames = [str(i) for i in range(100)]\n\turls = ['https://www.baidu.com/img/baidu_resultlogo@2.png']*100\n\tsource = list(zip(names, urls))\n\n\tdownloader = Downloader(source)\n\tdownloader.download('test', engine=\"wget\")\n\n\n\n\n# CLASS Crawler\n\nclass Crawler(object):\n\t\"\"\"A wapper of multi-threading crawler for human\n\n\t----------------------------------------\n\tbasic_func: Any function that you need it to be multi-threaded.\n\t\tNote that the parameters of this function are fixed, which\n\t\tmeans you should define this function like:\n\t\t\tdef basic_func_name(index, src_item):\n\t\t\t\tpass\n\t\twhere \"index\" represents the index of the \"src_item\", and\n\t\t\"src_item\" represents the information to be handled in this\n\t\tfunction.\n\n\turls: A list of sources that you'd like to be handled multi-threadingly\n\t\tGiven that crawler always request urls, so typically the source\n\t\tlist is often a list of urls.\n\t\tNote that this parameter could be anything iterable, as long\n\t\tas you'd like to deal with the item in your basic_func.\n\n\thas_result: (Boolean) \"False\" by default.\n\t\tSet this to be \"True\" when you'd like to export data after\n\t\tall of crawling task done. And remember to return the data you\n\t\twanted in your basic_func.\n\t\tHowever, this way of exporting data is not recommended. The better\n\t\tway is to export your data in your basic_func, so that every time\n\t\tthe basic_func finished in multi-threading tasks, the associated\n\t\tdata will be exported to your own dataset.\n\t\"\"\"\n\n\tdef __init__(self, basic_func, urls, has_result=False, batch_size=None):\n\t\tsuper(Crawler, self).__init__()\n\t\tself.basic_func = basic_func\n\t\tself.has_result = has_result\n\t\tself.batch_size = batch_size\n\t\tself.urls = urls\n\t\tself.num_urls = len(self.urls)\n\t\tself.failed_urls = []\n\n\tdef crawl(self):\n\t\t# initialization\n\t\tprint(\"[INFO]: %d urls in total.\" %(self.num_urls))\n\t\tif self.batch_size is None:\n\t\t\tbatch_size = int(input(\"[INPUT]: BATCH SIZE: \"))\n\t\telse:\n\t\t\tbatch_size = self.batch_size\n\t\t\tprint(\"[INFO]: Using BATCH SIZE: %d\" %(batch_size))\n\t\tself.num_thrd = int(np.ceil(self.num_urls / batch_size))\n\t\tif self.has_result == True:\n\t\t\tself.queue = Queue()\n\n\t\tthds = []\n\t\tt1 = time.time()\n\t\t# for i in range(self.num_thrd):\n\t\tfor i in tqdm(range(self.num_thrd), desc=\"[INFO]: Open threads\"):\n\t\t\tfrm = i * batch_size\n\t\t\t# batch_size of the last thread\n\t\t\tif i == self.num_thrd - 1 and self.num_urls % batch_size != 0:\n\t\t\t\tbatch_size = self.num_urls % batch_size\n\t\t\tthd = Thread(target = self.crawl_batch,\n\t\t\t\t\t\t args=(frm, batch_size))\n\t\t\tthd.start()\n\t\t\tthds.append(thd)\n\n\t\tfor thd in thds:\n\t\t\tthd.join()\n\t\tprint(\"\\r[INFO]: Task done.\".ljust(74))\n\t\tprint(\"[INFO]: The task costs %.4f sec.\" %(time.time()-t1))\n\n\t\tif self.has_result == True:\n\t\t\t# Load data\n\t\t\tresult = []\n\t\t\tfor i in tqdm(range(len(thds)), desc=\"[INFO]: Load data\"):\n\t\t\t\tresult += self.queue.get()\n\t\t\tprint(\"[INFO]: All data are loaded.\")\n\t\t\treturn result\n\n\tdef crawl_batch(self, frm, batch_size):\n\t\tthd_num = frm // batch_size\n\t\turls = self.urls[frm: frm + batch_size]\n\t\tbatch_result = []\n\t\tfor i, url in enumerate(urls):\n\t\t\ttry:\n\t\t\t\tres = self.basic_func(frm + i, url)\n\t\t\t\tprint(\"\\r[INFO]: Thread %d, url %d is done.\".ljust(70)\n\t\t\t\t\t%(thd_num + 1, i+1), end='')\n\t\t\texcept:\n\t\t\t\tres = None\n\t\t\t\tself.failed_urls.append(url)\n\t\t\t\tprint('\\r[ERROR]: Thread %d, url %d is failed, which is stored in failed_urls'.rjust(70)\n\t\t\t\t \t%(thd_num + 1, i+1), end='')\n\t\t\tbatch_result.append(res)\n\n\t\tif self.has_result == True:\n\t\t\tself.queue.put(batch_result)\n\n\nif __name__==\"__main__\":\n\ttest_downloader()\n" ]
[ [ "numpy.ceil" ] ]
iraquitan/stanford-cnn-visual-recog
[ "28d515661b65dc525f4d0532eeb0f37741d1c6d0" ]
[ "assignment1/cs231n/classifiers/linear_classifier.py" ]
[ "from __future__ import print_function\n\nimport numpy as np\nfrom cs231n.classifiers.linear_svm import *\nfrom cs231n.classifiers.softmax import *\n\n\nclass LinearClassifier(object):\n def __init__(self):\n self.W = None\n\n def train(\n self,\n X,\n y,\n learning_rate=1e-3,\n reg=1e-5,\n num_iters=100,\n batch_size=200,\n verbose=False,\n ):\n \"\"\"\n Train this linear classifier using stochastic gradient descent.\n\n Inputs:\n - X: A numpy array of shape (N, D) containing training data; there are N\n training samples each of dimension D.\n - y: A numpy array of shape (N,) containing training labels; y[i] = c\n means that X[i] has label 0 <= c < C for C classes.\n - learning_rate: (float) learning rate for optimization.\n - reg: (float) regularization strength.\n - num_iters: (integer) number of steps to take when optimizing\n - batch_size: (integer) number of training examples to use at each step.\n - verbose: (boolean) If true, print progress during optimization.\n\n Outputs:\n A list containing the value of the loss function at each training iteration.\n \"\"\"\n num_train, dim = X.shape\n num_classes = (\n np.max(y) + 1\n ) # assume y takes values 0...K-1 where K is number of classes\n if self.W is None:\n # lazily initialize W\n self.W = 0.001 * np.random.randn(dim, num_classes)\n\n # Run stochastic gradient descent to optimize W\n loss_history = []\n for it in range(num_iters):\n # X_batch = None\n # y_batch = None\n\n #########################################################################\n # TODO: #\n # Sample batch_size elements from the training data and their #\n # corresponding labels to use in this round of gradient descent. #\n # Store the data in X_batch and their corresponding labels in #\n # y_batch; after sampling X_batch should have shape (dim, batch_size) #\n # and y_batch should have shape (batch_size,) #\n # #\n # Hint: Use np.random.choice to generate indices. Sampling with #\n # replacement is faster than sampling without replacement. #\n #########################################################################\n ind = np.random.choice(num_train, batch_size, replace=True)\n X_batch = X[ind]\n y_batch = y[ind]\n #########################################################################\n # END OF YOUR CODE #\n #########################################################################\n\n # evaluate loss and gradient\n loss, grad = self.loss(X_batch, y_batch, reg)\n loss_history.append(loss)\n\n # perform parameter update\n #########################################################################\n # TODO: #\n # Update the weights using the gradient and the learning rate. #\n #########################################################################\n self.W -= learning_rate * grad\n #########################################################################\n # END OF YOUR CODE #\n #########################################################################\n\n if verbose and it % 100 == 0:\n print(\"iteration %d / %d: loss %f\" % (it, num_iters, loss))\n\n return loss_history\n\n def predict(self, X):\n \"\"\"\n Use the trained weights of this linear classifier to predict labels for\n data points.\n\n Inputs:\n - X: A numpy array of shape (N, D) containing training data; there are N\n training samples each of dimension D.\n\n Returns:\n - y_pred: Predicted labels for the data in X. y_pred is a 1-dimensional\n array of length N, and each element is an integer giving the predicted\n class.\n \"\"\"\n y_pred = np.zeros(X.shape[0])\n ###########################################################################\n # TODO: #\n # Implement this method. Store the predicted labels in y_pred. #\n ###########################################################################\n y_pred = np.argmax(X.dot(self.W), 1)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return y_pred\n\n def loss(self, X_batch, y_batch, reg):\n \"\"\"\n Compute the loss function and its derivative. \n Subclasses will override this.\n\n Inputs:\n - X_batch: A numpy array of shape (N, D) containing a minibatch of N\n data points; each point has dimension D.\n - y_batch: A numpy array of shape (N,) containing labels for the minibatch.\n - reg: (float) regularization strength.\n\n Returns: A tuple containing:\n - loss as a single float\n - gradient with respect to self.W; an array of the same shape as W\n \"\"\"\n pass\n\n\nclass LinearSVM(LinearClassifier):\n \"\"\" A subclass that uses the Multiclass SVM loss function \"\"\"\n\n def loss(self, X_batch, y_batch, reg):\n return svm_loss_vectorized(self.W, X_batch, y_batch, reg)\n\n\nclass Softmax(LinearClassifier):\n \"\"\" A subclass that uses the Softmax + Cross-entropy loss function \"\"\"\n\n def loss(self, X_batch, y_batch, reg):\n return softmax_loss_vectorized(self.W, X_batch, y_batch, reg)\n" ]
[ [ "numpy.max", "numpy.zeros", "numpy.random.randn", "numpy.random.choice" ] ]
imjal/CenterNet
[ "295ad5c620c58a8ade3ae48dae2b8b3f5147fd4e" ]
[ "src/lib/datasets/dataset/driving_fourth_dataset.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport pycocotools.coco as coco\nfrom pycocotools.cocoeval import COCOeval\nimport numpy as np\nimport json\nimport os\n\nimport torch.utils.data as data\n\nclass DrivingFourth(data.Dataset):\n num_classes = 80\n default_resolution = [512, 512]\n mean = np.array([0.40789654, 0.44719302, 0.47026115],\n dtype=np.float32).reshape(1, 1, 3)\n std = np.array([0.28863828, 0.27408164, 0.27809835],\n dtype=np.float32).reshape(1, 1, 3)\n\n def modify_json(self, json_file, end_tag, threshold):\n def mod_category_coco(id):\n self.index\n A = json.load(open(json_file, 'r'))\n annots = []\n for x in A['annotations']:\n if 'score' in x:\n if x['score'] > threshold:\n annots += [x]\n else:\n if x['category_id'] in self._valid_ids:\n x['category_id'] = self._valid_ids.index(x['category_id']) + 1\n annots += [x]\n A['annotations'] = annots\n json.dump(A, open(f'/scratch/jl5/{end_tag}', 'w'))\n\n def __init__(self, opt, split):\n super(DrivingFourth, self).__init__()\n self.class_name = [\n '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',\n 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',\n 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',\n 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',\n 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',\n 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',\n 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',\n 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',\n 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',\n 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',\n 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',\n 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',\n 'scissors', 'teddy bear', 'hair drier', 'toothbrush']\n self._valid_ids = [\n 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13,\n 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, \n 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, \n 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, \n 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, \n 58, 59, 60, 61, 62, 63, 64, 65, 67, 70,\n 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, \n 82, 84, 85, 86, 87, 88, 89, 90]\n\n self.data_dir = '/scratch/jl5/'\n self.img_dir = os.path.join(self.data_dir, 'coco/images/train2017')\n end_tag = ''\n if split == 'test':\n self.annot_path = '/data2/jl5/mmdetect_results/driving1000/fourth1.json'\n end_tag = 'fourth_test.json'\n else:\n self.annot_path = '/data2/jl5/mmdetect_results/driving1000/coco_offset_fourth0.json'\n end_tag = 'fourth_train.json'\n self.modify_json(self.annot_path, end_tag, opt.data_thresh)\n self.annot_path = '/scratch/jl5/fourth_train.json'\n \n self.max_objs = 128\n \n\n self.cat_ids = {v: i for i, v in enumerate([j for j in range(1, 81)])}\n self.voc_color = [(v // 32 * 64 + 64, (v // 8) % 4 * 64, v % 8 * 32) \\\n for v in range(1, self.num_classes + 1)]\n self._data_rng = np.random.RandomState(123)\n self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571],\n dtype=np.float32)\n self._eig_vec = np.array([\n [-0.58752847, -0.69563484, 0.41340352],\n [-0.5832747, 0.00994535, -0.81221408],\n [-0.56089297, 0.71832671, 0.41158938]\n ], dtype=np.float32)\n # self.mean = np.array([0.485, 0.456, 0.406], np.float32).reshape(1, 1, 3)\n # self.std = np.array([0.229, 0.224, 0.225], np.float32).reshape(1, 1, 3)\n\n self.split = split\n self.opt = opt\n\n print('==> initializing coco 2017 {} data.'.format(split))\n self.coco = coco.COCO(self.annot_path)\n self.images = self.coco.getImgIds()\n self.num_samples = len(self.images)\n\n print('Loaded {} {} samples'.format(split, self.num_samples))\n\n def _to_float(self, x):\n return float(\"{:.2f}\".format(x))\n\n def convert_eval_format(self, all_bboxes):\n # import pdb; pdb.set_trace()\n detections = []\n for image_id in all_bboxes:\n for cls_ind in all_bboxes[image_id]:\n category_id = self._valid_ids[cls_ind - 1]\n for bbox in all_bboxes[image_id][cls_ind]:\n bbox[2] -= bbox[0]\n bbox[3] -= bbox[1]\n score = bbox[4]\n bbox_out = list(map(self._to_float, bbox[0:4]))\n\n detection = {\n \"image_id\": int(image_id),\n \"category_id\": int(category_id),\n \"bbox\": bbox_out,\n \"score\": float(\"{:.2f}\".format(score))\n }\n if len(bbox) > 5:\n extreme_points = list(map(self._to_float, bbox[5:13]))\n detection[\"extreme_points\"] = extreme_points\n detections.append(detection)\n return detections\n\n def __len__(self):\n return self.num_samples\n\n def save_results(self, results, save_dir):\n json.dump(self.convert_eval_format(results), \n open('{}/results.json'.format(save_dir), 'w'))\n \n def run_eval(self, results, save_dir):\n # result_json = os.path.join(save_dir, \"results.json\")\n # detections = self.convert_eval_format(results)\n # json.dump(detections, open(result_json, \"w\"))\n self.save_results(results, save_dir)\n coco_dets = self.coco.loadRes('{}/results.json'.format(save_dir))\n coco_eval = COCOeval(self.coco, coco_dets, \"bbox\")\n coco_eval.evaluate()\n coco_eval.accumulate()\n coco_eval.summarize()\n" ]
[ [ "numpy.array", "numpy.random.RandomState" ] ]
lamhoangtung/kornia
[ "9cfee5a83a9b7f371f7581ca9a1a67128ca872bc" ]
[ "test/feature/test_siftdesc.py" ]
[ "import pytest\nimport torch\nfrom torch.autograd import gradcheck\n\nimport kornia.testing as utils # test utils\nfrom kornia.feature.siftdesc import SIFTDescriptor, get_sift_bin_ksize_stride_pad, get_sift_pooling_kernel\nfrom kornia.testing import assert_close\n\n\n@pytest.mark.parametrize(\"ksize\", [5, 13, 25])\ndef test_get_sift_pooling_kernel(ksize):\n kernel = get_sift_pooling_kernel(ksize)\n assert kernel.shape == (ksize, ksize)\n\n\n@pytest.mark.parametrize(\"ps,n_bins,ksize,stride,pad\", [(41, 3, 20, 13, 5), (32, 4, 12, 8, 3)])\ndef test_get_sift_bin_ksize_stride_pad(ps, n_bins, ksize, stride, pad):\n out = get_sift_bin_ksize_stride_pad(ps, n_bins)\n assert out == (ksize, stride, pad)\n\n\nclass TestSIFTDescriptor:\n def test_shape(self, device, dtype):\n inp = torch.ones(1, 1, 32, 32, device=device, dtype=dtype)\n sift = SIFTDescriptor(32).to(device, dtype)\n out = sift(inp)\n assert out.shape == (1, 128)\n\n def test_batch_shape(self, device, dtype):\n inp = torch.ones(2, 1, 15, 15, device=device, dtype=dtype)\n sift = SIFTDescriptor(15).to(device, dtype)\n out = sift(inp)\n assert out.shape == (2, 128)\n\n def test_batch_shape_non_std(self, device, dtype):\n inp = torch.ones(3, 1, 19, 19, device=device, dtype=dtype)\n sift = SIFTDescriptor(19, 5, 3).to(device, dtype)\n out = sift(inp)\n assert out.shape == (3, (3 ** 2) * 5)\n\n def test_toy(self, device, dtype):\n patch = torch.ones(1, 1, 6, 6, device=device, dtype=dtype)\n patch[0, 0, :, 3:] = 0\n sift = SIFTDescriptor(6, num_ang_bins=4, num_spatial_bins=1, clipval=0.2, rootsift=False).to(device, dtype)\n out = sift(patch)\n expected = torch.tensor([[0, 0, 1.0, 0]], device=device, dtype=dtype)\n assert_close(out, expected, atol=1e-3, rtol=1e-3)\n\n def test_gradcheck(self, device):\n dtype = torch.float64\n batch_size, channels, height, width = 1, 1, 15, 15\n patches = torch.rand(batch_size, channels, height, width, device=device, dtype=dtype)\n patches = utils.tensor_to_gradcheck_var(patches) # to var\n sift = SIFTDescriptor(15).to(device, dtype)\n assert gradcheck(sift, (patches,), raise_exception=True, nondet_tol=1e-4)\n\n @pytest.mark.skip(\"Compiled functions can't take variable number\")\n def test_jit(self, device, dtype):\n B, C, H, W = 1, 1, 32, 32\n patches = torch.ones(B, C, H, W, device=device, dtype=dtype)\n model = SIFTDescriptor(32).to(patches.device, patches.dtype).eval()\n model_jit = torch.jit.script(model)\n assert_close(model(patches), model_jit(patches))\n" ]
[ [ "torch.jit.script", "torch.ones", "torch.tensor", "torch.rand", "torch.autograd.gradcheck" ] ]
Shiduo-zh/pybulletSim
[ "a51c71adc328d2071d7faf53e4bc5cd695f03ab2" ]
[ "exptools/logging/_logger.py" ]
[ "from contextlib import contextmanager\nfrom exptools.logging.tabulate import tabulate\nfrom exptools.logging.console import mkdir_p, colorize\nfrom exptools.logging.autoargs import get_all_parameters\nimport numpy as np\nfrom collections import OrderedDict, defaultdict\nimport os, shutil\nimport os.path as osp\nimport sys\nimport datetime\nimport pandas as pd\nimport imageio\nimport csv\nimport threading\nimport json\n\n_tb_avaliable = False\ntb_writer = None\ntry:\n import tensorboardX\nexcept ImportError as e:\n print(\"TensorboardX is not available in exptools, logging might be limited\")\nelse:\n _tb_avaliable = True\n\nclass Logger():\n \"\"\" The interface to handle all logging operations (if you are using this library).\n Current logging modalities: text, scalar, image, gif, pointcloud/mesh, \n All modalities can be logged in batch, which means the datas should be able to be indexed as data[i]\n NOTE: all filename and paths (except self.log_dir) are relative paths related to self.log_dir\n \"\"\"\n def __init__(self,\n log_dir, # The abspath of where all log files are put\n refresh= False, # if you don't want to resume your experiment, this will remove everything in log_dir\n ):\n self.refresh = refresh\n self.log_dir = osp.abspath(log_dir)\n mkdir_p(self.log_dir)\n self.mp_lock = threading.Lock()\n\n # cleaning the log_dir if necessary\n if refresh:\n for filename in os.listdir(self.log_dir):\n _fp = os.path.join(self.log_dir, filename)\n try:\n if os.path.isfile(_fp) or os.path.islink(_fp):\n os.unlink(_fp)\n elif os.path.isdir(_fp):\n shutil.rmtree(_fp)\n except Exception as e:\n print('Failed to delete %s. Reason: %s' % (_fp, e))\n\n\n # start building all logging stuff\n self.tb_writer = None if not _tb_avaliable else tensorboardX.SummaryWriter(logdir= self.log_dir)\n \n self._text_prefix = [] # a stack to set prefix\n self._text_files = {} # dict of {filename:file_descriptor}\n self._text_default_file = None\n\n # assuming current scalar data can be handled by cluster memory (otherwise, solve later)\n self._scalar_prefix = [] # a stack to set prefix\n self._scalar_data = {} # a dict of {filename:pandas_dataframe}\n self._scalar_default_file = None\n \n self._image_prefix = []\n self._gif_prefix = []\n\n self.default_step = 0\n\n def push_text_prefix(self, prefix: str):\n self._text_prefix.append(prefix)\n def pop_text_prefix(self):\n self._text_prefix.pop(-1)\n @contextmanager\n def text_prefix(self, prefix: str):\n self.push_text_prefix(prefix)\n yield\n self.pop_text_prefix()\n \n def push_scalar_prefix(self, prefix: str):\n self._scalar_prefix.append(prefix)\n def pop_scalar_prefix(self):\n self._scalar_prefix.pop(-1)\n @contextmanager\n def scalar_prefix(self, prefix: str):\n self.push_scalar_prefix(prefix)\n yield\n self.pop_scalar_prefix()\n \n def push_image_prefix(self, prefix: str):\n self._image_prefix.append(prefix)\n def pop_image_prefix(self):\n self._image_prefix.pop(-1)\n @contextmanager\n def image_prefix(self, prefix: str):\n self.push_image_prefix(prefix)\n yield\n self.pop_image_prefix()\n \n def push_gif_prefix(self, prefix: str):\n self._gif_prefix.append(prefix)\n def pop_gif_prefix(self):\n self._gif_prefix.pop(-1)\n @contextmanager\n def gif_prefix(self, prefix: str):\n self.push_gif_prefix(prefix)\n yield\n self.pop_gif_prefix()\n\n def push_prefix(self, prefix: str):\n self.push_text_prefix(prefix)\n self.push_scalar_prefix(prefix)\n self.push_image_prefix(prefix)\n self.push_gif_prefix(prefix)\n def pop_prefix(self):\n self.pop_text_prefix()\n self.pop_scalar_prefix()\n self.pop_image_prefix()\n self.pop_gif_prefix()\n @contextmanager\n def prefix(self, prefix: str):\n \"\"\" All modality prefix \"\"\"\n self.push_prefix(prefix)\n yield\n self.pop_prefix()\n\n def add_text_output(self, filename: str):\n if not self._text_default_file:\n self._text_default_file = filename\n self._text_files[filename] = open(osp.join(self.log_dir, filename), mode= \"a\")\n def remove_text_output(self, filename):\n if filename == self._text_default_file:\n print(colorize(\n \"Warning: You are removing default text output\",\n color= \"yellow\",\n ))\n self._text_default_file = None\n self._text_files[filename].close()\n self._text_files.pop(filename)\n @contextmanager\n def additional_text_output(self, filename):\n self.add_text_output(filename)\n yield\n self.remove_text_output(filename)\n\n def redirect_stdout_to_text_output(self):\n \"\"\" NOTE: You have to add_text_output before calling this method\n \"\"\"\n sys.stdout = self._text_files[self._text_default_file]\n def redirect_stdout_to_console(self):\n sys.stdout = sys.__stdout__\n\n def save_param_dict(self, param, filename):\n assert isinstance(param, dict)\n with open(osp.join(self.log_dir, filename), \"w\") as fd:\n json.dump(param, fd, indent= 4)\n \n def add_scalar_output(self, filename: str):\n if not self._scalar_default_file:\n self._scalar_default_file = filename\n if not self.refresh and osp.isfile(osp.join(self.log_dir, filename)):\n self._scalar_data[filename] = pd.read_csv(osp.join(self.log_dir, filename))\n else:\n self._scalar_data[filename] = pd.DataFrame().append({}, ignore_index= True)\n def remove_scalar_output(self, filename= None):\n if filename is None: filename = self._scalar_default_file\n if filename == self._scalar_default_file:\n print(colorize(\n \"Warning: You are removing default scalar output\",\n color= \"yellow\",\n ))\n self._scalar_default_file = None\n self._scalar_data[filename].to_csv(osp.join(self.log_dir, filename), index= False)\n self._scalar_data.pop(filename)\n @contextmanager\n def additional_scalar_output(self, filename):\n self.add_scalar_output(filename)\n yield\n self.remove_scalar_output(filename)\n\n def log_text(self, data, step= None,\n filename= None,\n with_prefix= True,\n with_timestamp=True,\n color=None\n ):\n if filename is None: filename = self._text_default_file\n if step is None: step = self.default_step\n\n out = data\n if with_prefix: \n for p in self._text_prefix:\n out = p + out\n if with_timestamp:\n now = datetime.datetime.now() # dateutil.tz.tzlocal())\n timestamp = now.strftime('%Y-%m-%d %H:%M:%S.%f %Z')\n out = \"%s | %s\" % (timestamp, out)\n if color is not None:\n out = colorize(out, color)\n\n print(out)\n self._text_files[filename].write(out + \"\\n\")\n self._text_files[filename].flush()\n if not self.tb_writer is None:\n self.tb_writer.add_text(\"text\", out, step)\n\n def log_scalar(self, tag, data, step= None, filename= None, with_prefix= True, **kwargs):\n \"\"\"\n @Args:\n tag: string;\n data: a number (not array)\n step: a int of the iteration number (starting from 0). If `filename` provided, \n you need to give proper `step` of current `filename` and increment one by one.\n \"\"\"\n if filename is None: filename = self._scalar_default_file\n if with_prefix:\n for p in self._scalar_prefix:\n tag = p + tag\n # maintain pandas DataFrame\n df_len = len(self._scalar_data[filename])\n if step is None: step = self.default_step\n if step > (df_len - 1):\n for _ in range(step - df_len + 1):\n self._scalar_data[filename] = self._scalar_data[filename].append({}, ignore_index= True)\n if step > 1:\n print(colorize(\"You might forget to dump_scalar on a regular basis, this might cause the scalar data lost\", color= \"yellow\"))\n if not tag in self._scalar_data[filename]:\n self._scalar_data[filename][tag] = np.nan\n try:\n self._scalar_data[filename].loc[step][tag] = data\n except KeyError as e:\n print(colorize(\"KeyError: {}\".format(e), color= \"red\"))\n print(colorize(\"You might forget to dump_scalar for your scalar file, check demo script please\", color= \"yellow\"))\n exit(-1)\n # tensorboardX API\n if not self.tb_writer is None:\n self.tb_writer.add_scalar(tag, data, step)\n\n def log_scalar_batch(self, tag, data, step= None, filename= None, **kwargs):\n \"\"\" Record a batch of data with several statictis\n data: a array of numbers np.array is better\n \"\"\"\n if not isinstance(data, np.ndarray): data = np.array(data)\n if len(data) > 0:\n self.log_scalar(tag + \"/Average\", np.nanmean(data), step, filename, **kwargs)\n self.log_scalar(tag + \"/Std\", np.nanstd(data), step, filename, **kwargs)\n self.log_scalar(tag + \"/Max\", np.nanmax(data), step, filename, **kwargs)\n self.log_scalar(tag + \"/Min\", np.nanmin(data), step, filename, **kwargs)\n self.log_scalar(tag + \"/Len\", np.count_nonzero(~np.isnan(data)), step, filename, **kwargs)\n\n def dump_scalar(self, filename= None):\n \"\"\" In order to reflect the scalar data to the file and data loss due to program crash\n we write current scalar dataframe to csv file\n \"\"\"\n if filename is None: filename = self._scalar_default_file\n \n self._scalar_data[filename].to_csv(osp.join(self.log_dir, filename), index= False)\n self.log_text(\"Dumping scalar data for {}\".format(filename), len(self._scalar_data[filename]))\n print(tabulate( self._scalar_data[filename].iloc[-1].items() ))\n self._scalar_data[filename] = self._scalar_data[filename].append({}, ignore_index= True)\n\n def __old_dump_scalar(self, filename= None):\n \"\"\" Due to csv feature, you need to dump scalar to csv file. You can \n also specify the filename for which file you are dumping to\n \"\"\"\n if filename is None: filename = self._scalar_default_file\n\n current_reader = csv.reader(self._scalar_files[filename])\n # print current data\n if len(current_reader) == 0:\n text_step = 0\n else:\n text_step = len(current_reader) - 1\n self.log_text(\"Dumping scalar data for {}\".format(filename), text_step)\n print(tabulate( self._scalar_current_data[filename].items() ))\n # check current file keys, and determine whether to rewrite the entire file\n if len(current_reader) > 0:\n old_keys = next(current_reader)\n current_keys = list(self._scalar_current_data[filename].keys()) # a copy of keys\n del current_reader\n # checking keys\n key_unchanged = len(old_keys) == len(current_keys)\n for csv_k, data_k in zip(old_keys, current_keys):\n if csv_k != data_k:\n key_unchanged = False; break\n if key_unchanged:\n # keep writing\n current_writer = csv.DictWriter(self._scalar_files[filename], current_keys)\n current_writer.writerow(self._scalar_current_data[filename])\n self._scalar_files[filename].flush()\n else:\n # rewrite the entire csv file (hope this never comes)\n keys_to_add = []\n for key in old_keys: # if current_keys < old_keys\n if not key in current_keys:\n self._scalar_current_data[filename][key] = np.nan\n with open(osp.join(self.log_dir, self._TEMP_CSV_FILENAME), \"w\") as new_fd:\n old_reader = csv.DictReader(self._scalar_files[filename])\n new_writer = csv.DictWriter(new_fd, fieldnames= list(self._scalar_current_data[filename].keys()))\n # rewrite old data\n for row in old_reader:\n row = defaultdict(lambda:np.nan, **row) # if current_keys > old_keys\n new_writer.writerow(row)\n # write new data\n new_writer.writerow(self._scalar_current_data[filename])\n new_fd.flush()\n # replace file descriptor\n self._scalar_files[filename].close()\n os.remove(osp.join(self.log_dir, filename)) # NOTE: currently, `filename` is invalid filename\n os.rename(\n osp.join(self.log_dir, self._TEMP_CSV_FILENAME),\n osp.join(self.log_dir, filename),\n )\n self._scalar_files[filename] = open(osp.join(self.log_dir, filename))\n else:\n # new file, write directly\n del current_reader\n file_writer = csv.DictWriter(self._scalar_files[filename], fieldnames= list(self._scalar_files[filename].keys()))\n file_writer.writeheader()\n file_writer.writerow(self._scalar_current_data[filename])\n # clear out current data (buffer)\n for k in self._scalar_current_data[filename].keys():\n self._scalar_current_data[filename][k] = np.nan\n \n def log_image(self, tag, data, step= None, with_prefix= True, **kwargs):\n \"\"\" NOTE: data must be (H, W) or (3, H, W) or (4, H, W) from 0-255 uint8\n \"\"\"\n mkdir_p(osp.join(self.log_dir, \"image\"))\n if with_prefix:\n for p in self._image_prefix:\n tag = p + tag\n if step is None: step = self.default_step\n filename = osp.join(self.log_dir, \"image\", \"{}-{}.png\".format(tag, step))\n if len(data.shape) == 3:\n imageio.imwrite(filename, np.transpose(data, (1,2,0)), format= \"PNG\")\n else:\n imageio.imwrite(filename, data, format= \"PNG\")\n if not self.tb_writer is None:\n self.tb_writer.add_image(tag, data, step)\n\n def log_gif(self, tag, data, step= None, duration= 0.1, with_prefix= True, **kwargs):\n \"\"\" record a series of image as gif into file\n NOTE: data must be a sequence of nparray (H, W) or (3, H, W) or (4, H, W) from 0-255 uint8\n \"\"\"\n mkdir_p(osp.join(self.log_dir, \"gif\"))\n if with_prefix:\n for p in self._gif_prefix:\n tag = p + tag\n if step is None: step = self.default_step\n filename = osp.join(self.log_dir, \"gif\", \"{}-{}.gif\".format(tag, step))\n if isinstance(data, np.ndarray) or (len(data) > 0 and len(data[0].shape)) == 3:\n imageio.mimwrite(filename, data, format= \"GIF\", duration= duration)\n else:\n imageio.mimwrite(filename, data, format= \"GIF\", duration= duration)\n # TensorboardX does not support this yet\n\n def dump_data(self):\n \"\"\" dump all default data handler, and increase default_step by 1\n \"\"\"\n self.default_step += 1\n self.dump_scalar()\n def dump(self):\n return self.dump_data()\n\n def set_step(self, step):\n self.default_step = step\n\n def __del__(self):\n try:\n for _, v in self._text_files.items():\n v.close()\n except:\n print(colorize(\"Exceptions when closing text logger\", color= \"yellow\"))\n try:\n for f, d in self._scalar_data.items():\n d.to_csv(osp.join(self.log_dir, f), index= False)\n except:\n print(colorize(\"Exceptions when closing scalar logger\", color= \"yellow\"))\n try:\n if not self.tb_writer is None:\n self.tb_writer.close()\n except:\n print(colorize(\"Exceptions when closing tensorboardX writer\", color= \"yellow\"))\n\n # >>>>>>>>> The followings are APIs for other experiment platforms <<<<<<<<\n def _deprecated_warn(self):\n print(colorize(\"You are using dereprecated API of exptools logger\", color= \"yellow\"))\n def __getattr__(self, name: str):\n if name == \"_tb_writer\":\n self._deprecated_warn()\n return self.tb_writer\n else:\n super(Logger, self).__getattr__(self, name)\n @contextmanager\n def tabular_prefix(self, key):\n self.push_scalar_prefix(key)\n yield\n self.pop_scalar_prefix()\n def record_tabular(self, key, val, step= None):\n self._deprecated_warn()\n return self.log_scalar(key, val, step)\n def record_tabular_misc_stat(self, key, val, step= None):\n self._deprecated_warn()\n return self.log_scalar_batch(key, val, step)\n def dump_tabular(self, *args, **kwargs):\n self._deprecated_warn()\n return self.dump_data()\n def log(self, data, step= 0, *args, **kwargs):\n self._deprecated_warn()\n return self.log_text(data, step)\n def record_image(self, *args, **kwargs):\n self._deprecated_warn()\n return self.log_image(*args, **kwargs)\n def record_gif(self, *args, **kwargs):\n self._deprecated_warn()\n return self.log_gif(*args, **kwargs)\n def set_iteration(self, itr):\n self._deprecated_warn()\n return self.set_step(itr)\n def set_snapshot_dir(self, *args):\n self._deprecated_warn()\n def get_snapshot_dir(self):\n self._deprecated_warn()\n return self.log_dir\n def set_snapshot_mode(self, mode):\n self._deprecated_warn()\n self._snapshot_mode = mode\n def get_snapshot_mode(self):\n self._deprecated_warn()\n return self._snapshot_mode\n def set_log_tabular_only(self, mode):\n self._deprecated_warn()\n def set_tf_summary_writter(self, *args, **kwargs):\n self._deprecated_warn()\n def save_itr_params(self, *args, **kwargs):\n self._deprecated_warn()\n\n \n \n \n\n\n\n" ]
[ [ "numpy.nanmax", "numpy.isnan", "numpy.nanmin", "pandas.DataFrame", "numpy.nanmean", "numpy.transpose", "numpy.nanstd", "numpy.array" ] ]
stjordanis/rlmeta
[ "f407c60362d8e46c48ff6fd180bde523a6eb7737" ]
[ "examples/atari/ppo/atari_ppo_model.py" ]
[ "# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom typing import Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport rlmeta.core.remote as remote\n\nfrom rlmeta.agents.ppo.ppo_model import PPOModel\n\n\nclass AtariPPOModel(PPOModel):\n\n def __init__(self, action_dim: int) -> None:\n super().__init__()\n self.action_dim = action_dim\n\n layers = []\n layers.append(nn.Conv2d(4, 32, kernel_size=8, stride=4))\n layers.append(nn.ReLU())\n layers.append(nn.Conv2d(32, 64, kernel_size=4, stride=2))\n layers.append(nn.ReLU())\n layers.append(nn.Conv2d(64, 64, kernel_size=3, stride=1))\n layers.append(nn.ReLU())\n layers.append(nn.Flatten())\n layers.append(nn.Linear(3136, 512))\n layers.append(nn.ReLU())\n self.backbone = nn.Sequential(*layers)\n self.linear_p = nn.Linear(512, self.action_dim)\n self.linear_v = nn.Linear(512, 1)\n\n def forward(self, obs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n x = obs.float() / 255.0\n h = self.backbone(x)\n p = self.linear_p(h)\n logpi = F.log_softmax(p, dim=-1)\n v = self.linear_v(h)\n return logpi, v\n\n @remote.remote_method(batch_size=128)\n def act(\n self, obs: torch.Tensor, deterministic_policy: torch.Tensor\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n device = next(self.parameters()).device\n\n with torch.no_grad():\n x = obs.to(device)\n d = deterministic_policy.to(device)\n logpi, v = self.forward(x)\n\n greedy_action = logpi.argmax(-1, keepdim=True)\n sample_action = logpi.exp().multinomial(1, replacement=True)\n action = torch.where(d, greedy_action, sample_action)\n logpi = logpi.gather(dim=-1, index=action)\n\n return action.cpu(), logpi.cpu(), v.cpu()\n" ]
[ [ "torch.nn.Sequential", "torch.nn.functional.log_softmax", "torch.nn.Conv2d", "torch.nn.Flatten", "torch.nn.Linear", "torch.no_grad", "torch.where", "torch.nn.ReLU" ] ]
awtkns/openapi-perf
[ "63151ee87383efabea6d6b4fb5c05c6c621c7f0f" ]
[ "openapi_perf/core/results.py" ]
[ "from typing import Union, Any\n\nfrom pandas import DataFrame, read_csv\nfrom matplotlib.figure import Figure\n\nfrom ._types import TEST_RESULTS, FILE_PATH\nfrom .analysis import graphing\n\n\nclass PerfResults:\n def __init__(self, results: Union[DataFrame, TEST_RESULTS]) -> None:\n if type(results) is not DataFrame:\n self.results = DataFrame(results)\n else:\n self.results = results\n\n @staticmethod\n def from_csv(file_path: FILE_PATH) -> \"PerfResults\":\n df = read_csv(file_path, index_col=None)\n\n return PerfResults(df)\n\n def to_csv(self, file_path: FILE_PATH, **kwargs: Any) -> None:\n self.results.to_csv(file_path, index=False, **kwargs)\n\n def plot(self, show: bool = True) -> Figure:\n return graphing.generate_graphs(self.results, show)\n\n\nclass RegressionResults:\n def __init__(self, new: PerfResults, old: PerfResults) -> None:\n self.new = new\n self.old = old\n\n def plot(self, show: bool = True) -> Figure:\n return graphing.plot_regression(self.new.results, self.old.results, show)\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
salt-die/Labyrinth
[ "772f7635101b7132df696332eaed2473bd460f17" ]
[ "labyrinth.py" ]
[ "# -*- coding: utf-8 -*\nfrom random import choice\nfrom kivy.app import App\nfrom kivy.uix.widget import Widget\nfrom kivy.graphics.texture import Texture\nfrom kivy.graphics import Rectangle\nfrom kivy.core.window import Window\nimport numpy as np\nimport networkx as nx\nfrom maze_gen import gen_maze, maze_to_array\n\nPLAYER_COLOR = np.array([.5, .5, 1], dtype=np.float32)\n\nclass Labyrinth_Game(Widget):\n def __init__(self, **kwargs):\n super(Labyrinth_Game, self).__init__(**kwargs)\n self.level = 0\n with self.canvas:\n self.rect = Rectangle(pos=self.pos, size=self.size)\n self._new_level()\n self.bind(size=self._update, pos=self._update)\n self._keyboard = Window.request_keyboard(self._keyboard_closed, self)\n self._keyboard.bind(on_key_down=self._on_keyboard_down)\n\n def _update(self, *args):\n self.rect.size = self.size\n self.rect.pos = self.pos\n\n def _keyboard_closed(self):\n self._keyboard.unbind(on_key_down=self._on_keyboard_down)\n self._keyboard = None\n\n def _blit(self):\n maze_stack = np.dstack([self.maze_array]*3)\n maze_stack[tuple(self.player_loc)] = PLAYER_COLOR\n self.texture.blit_buffer(maze_stack[::-1].tobytes(), bufferfmt='float')\n self.canvas.ask_update()\n\n def _new_level(self):\n self.moves = 0\n self.level += 1\n self.maze_dim = [10 * self.level] * 2\n self.grid, self.maze = gen_maze(self.maze_dim)\n self.player_loc, self.maze_array = maze_to_array(self.maze,\n self.maze_dim)\n self.texture = Texture.create(size=self.maze_array.T.shape)\n self.texture.mag_filter = 'nearest'\n self.rect.texture = self.texture\n self._blit()\n\n def _on_keyboard_down(self, keyboard, keycode, text, modifiers):\n\n directions = { 'up' : (-1, 0),\n 'left' : ( 0, -1),\n 'right' : ( 0, 1),\n 'down' : ( 1, 0)}\n\n if keycode[1] not in directions:\n return True\n\n new_loc = self.player_loc + np.array(directions[keycode[1]])\n #Check if in-bounds and no walls block us\n if any(new_loc < 0) or not self.maze_array[tuple(new_loc)]:\n return True\n\n #Check if we've completed maze\n if any(new_loc == 2 * np.array(self.maze_dim)):\n self._new_level()\n return True\n\n #Everything checks out -- move player\n self.player_loc = new_loc\n self.moves += 1\n if self.moves % 2: #No walls can spawn on top of us\n for _ in range((self.level + 1) // 2): #More levels, more changes\n self._labyrinth_change()\n self._blit()\n return True\n\n def _labyrinth_change(self):\n if np.random.random() > .3: #30% chance to change maze after a move\n return\n\n def distance_to(node):\n return np.linalg.norm(self.player_loc - (2 * np.array(node) + 1))\n\n #Find a wall to remove -- equivalently, add an edge to our maze's tree\n random_node = choice([node for node in self.maze\n if distance_to(node) < 10])\n neighbors = [node for node in self.grid.neighbors(random_node)\n if node not in self.maze.neighbors(random_node)]\n if not neighbors:\n return\n neighbor = choice(neighbors)\n\n #Adding an edge to a tree creates a cycle\n self.maze.add_edge(random_node, neighbor)\n #So we can remove any edge from that cycle to get back to a tree\n new_wall = choice(nx.find_cycle(self.maze, random_node))\n #Removing an edge == adding a wall\n self.maze.remove_edge(*new_wall)\n #Our underlying graph is a tree again -- the maze is still solvable.\n\n removed_wall_loc = (i + j + 1 for i, j in zip(random_node, neighbor))\n new_wall_loc = (i + j + 1 for i, j in zip(*new_wall))\n self.maze_array[tuple(removed_wall_loc)] = 1\n self.maze_array[tuple(new_wall_loc)] = 0\n\n\nclass Labyrinth(App):\n def build(self):\n return Labyrinth_Game()\n\nif __name__ == '__main__':\n Labyrinth().run()\n" ]
[ [ "numpy.array", "numpy.random.random", "numpy.dstack" ] ]
s4sarath/tf-transformers
[ "f26d440a4de0557e0e481279bfd70a732aaa8825" ]
[ "src/tf_transformers/layers/layer_normalization.py" ]
[ "# Copyright 2019 The TensorFlow/tf_transformers Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Keras-based GPT2 Layer Normalization layer.\"\"\"\n# from __future__ import google_type_annotations\n# pylint: disable=g-classes-have-attributes\nfrom __future__ import absolute_import, division, print_function\n\nimport tensorflow as tf\n\nfrom tf_transformers.utils import tf_utils\n\n\n@tf.keras.utils.register_keras_serializable(package=\"Text\")\nclass GPT2LayerNormalization(tf.keras.layers.Layer):\n \"\"\"Creates a GPT2 Layer Normalization.\n\n This layer creates a LayerNormalization as described in\n https://github.com/openai/gpt-2/blob/master/src/model.py#L28\n\n This layer can be used to Normalize to mean = 0, std = 1, then do a diagonal affine transform.\n\n Arguments:\n \"\"\"\n\n def __init__(\n self,\n initializer=\"constant\",\n beta_initializer=\"zeros\",\n gamma_initializer=\"ones\",\n axis=-1,\n epsilon=1e-5,\n **kwargs,\n ):\n # We need to have a default dtype of float32, since the inputs (which Keras\n # usually uses to infer the dtype) will always be int32.\n if \"dtype\" not in kwargs:\n kwargs[\"dtype\"] = \"float32\"\n\n super(GPT2LayerNormalization, self).__init__(**kwargs)\n self.beta_initializer = tf.keras.initializers.get(beta_initializer)\n self.gamma_initializer = tf.keras.initializers.get(gamma_initializer)\n self.axis = axis\n self.epsilon = epsilon\n\n def build(self, input_shape):\n \"\"\"Implements build() for the layer.\"\"\"\n dimension_list = input_shape.as_list()\n\n if len(dimension_list) != 3:\n raise ValueError(\n \"GPT2LayerNormalization expects a 3-dimensional input tensor \" \"of shape [batch, sequence, width]\"\n )\n # seq_length = dimension_list[1]\n width = dimension_list[2]\n\n self.gamma = self.add_weight(\"gamma\", shape=[width], initializer=self.gamma_initializer)\n self.beta = self.add_weight(\"beta\", shape=[width], initializer=self.beta_initializer)\n\n super(GPT2LayerNormalization, self).build(input_shape)\n\n def call(self, inputs):\n \"\"\"Implements call() for the layer.\"\"\"\n\n u = tf.reduce_mean(inputs, axis=self.axis, keepdims=True)\n s = tf.reduce_mean(tf.square(inputs - u), axis=self.axis, keepdims=True)\n inputs = (inputs - u) * tf.math.rsqrt(s + self.epsilon)\n inputs = inputs * self.gamma + self.beta\n return inputs\n\n def get_config(self):\n config = {\n \"axis\": self.axis,\n \"epsilon\": self.epsilon,\n \"beta_initializer\": tf.keras.initializers.serialize(self.beta_initializer),\n \"gamma_initializer\": tf.keras.initializers.serialize(self.gamma_initializer),\n }\n base_config = super(GPT2LayerNormalization, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@tf.keras.utils.register_keras_serializable(package=\"Text\")\nclass T5LayerNormalization(tf.keras.layers.Layer):\n def __init__(self, initializer=\"constant\", beta_initializer=\"ones\", epsilon=1e-6, axis=-1, **kwargs):\n \"\"\"Construct a layernorm module in the T5 style\n No bias and no substraction of mean.\n \"\"\"\n super(T5LayerNormalization, self).__init__(**kwargs)\n self.beta_initializer = tf.keras.initializers.get(beta_initializer)\n self.variance_epsilon = epsilon\n self.axis = axis\n\n def build(self, input_shape):\n \"\"\"Build shared word embedding layer \"\"\"\n self.weight = self.add_weight(\"weight\", shape=(input_shape[-1],), initializer=self.beta_initializer)\n super(T5LayerNormalization, self).build(input_shape)\n\n def call(self, x):\n variance = tf.reduce_mean(tf.square(x), axis=self.axis, keepdims=True)\n x = x * tf.math.rsqrt(variance + self.variance_epsilon)\n return self.weight * x\n\n def get_config(self):\n config = {\n \"axis\": self.axis,\n \"epsilon\": self.variance_epsilon,\n \"beta_initializer\": tf.keras.initializers.serialize(self.beta_initializer),\n }\n base_config = super(T5LayerNormalization, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n" ]
[ [ "tensorflow.reduce_mean", "tensorflow.keras.initializers.serialize", "tensorflow.math.rsqrt", "tensorflow.keras.utils.register_keras_serializable", "tensorflow.square", "tensorflow.keras.initializers.get" ] ]
saumyaborwankar/self_supervised_AHC
[ "ae8595c9e33383eacd1172917603c6cc4f34218e" ]
[ "services/generate_groundtruth_label_sequence.py" ]
[ "import os\nimport argparse\nimport numpy as np\nfrom pdb import set_trace as bp\n\ndef indices_with_intersecting_durs(seg_time_boundaries, rttm_bins, threshold):\n \n rttm_bins[:,1] += rttm_bins[:,0]\n\n intersect_values = np.minimum(seg_time_boundaries[1], rttm_bins[:,1]) - np.maximum(seg_time_boundaries[0], rttm_bins[:,0])\n return intersect_values, intersect_values > threshold\n\ndef generate_labels(segmentsfile, labelsfiledir, ground_truth_rttm, threshold):\n \n if not os.path.exists(labelsfiledir):\n os.makedirs(labelsfiledir, 0o777)\n \n \n print(\"\\t\\t Threshold for generating label is {}\".format(threshold))\n segments = np.genfromtxt(segmentsfile, dtype='str')\n utts = segments[:,0]\n segments = segments[:,1:]\n filenames = np.unique(segments[:,0])\n segment_boundaries =[]\n utts_filewise =[]\n for f in filenames:\n segment_boundaries.append((segments[:,1:][segments[:,0]==f]).astype(float))\n utts_filewise.append((utts[segments[:,0]==f])) \n \n gt_rttm = np.genfromtxt(ground_truth_rttm, dtype='str')\n rttm_idx = np.asarray([False,False,False,True,True,False,False,True,False, False])\n \n for i,f in enumerate(filenames):\n labelsfilepath = os.path.join(labelsfiledir, \"labels_{}\".format(f))\n if os.path.isfile(labelsfilepath):\n continue\n labels = open(labelsfilepath,'w')\n if i % 5 == 0:\n print(\"\\t\\t Generated labels for {} files\".format(i))\n \n\n labels_f = []\n flag = []\n \n rttm = gt_rttm[gt_rttm[:,1] == f] \n rttm = rttm[:, rttm_idx]\n \n for j in range(len(segment_boundaries[i])):\n _, label_idx = indices_with_intersecting_durs(segment_boundaries[i][j],rttm[:,0:2].astype(float), threshold)\n labels_f = rttm[label_idx][:,2]\n\n if np.sum(label_idx) > 2:\n label_f = np.unique(labels_f) \n\n elif np.sum(label_idx) == 0:\n intersect_values, label_idx = indices_with_intersecting_durs(segment_boundaries[i][j],rttm[:,0:2].astype(float),0)\n labels_f = rttm[np.argmax(intersect_values)][2]\n labels_f = np.array([labels_f])\n \n\n\n towrite= \"{} {}\\n\".format(utts_filewise[i][j], ' '.join(labels_f.tolist()))\n \n labels.writelines(towrite)\n \n \n labels.close()\n print('DONE with labels')\n\n\nif __name__==\"__main__\":\n\n \n default_dataset=\"callhome1\"\n threshold = 0.75\n default_segments = \"../lists/{}/tmp/segments\".format(default_dataset)\n default_gt_rttm = \"data/{}/rttm\".format(default_dataset)\n default_labels_dir = \"../ALL_CALLHOME_GROUND_LABELS/{}/threshold_{}\".format(default_dataset,threshold)\n\n print(\"In the label generation script...\")\n parser = argparse.ArgumentParser(description='Speaker Label generation for embeddings')\n parser.add_argument('--dataset', default=default_dataset, type=str, help='dataset', nargs='?')\n parser.add_argument('--segmentsfile', default=default_segments, type=str, metavar='PATH', help='path of the embedding segments file', nargs='?')\n parser.add_argument('--labelsfiledir', default=default_labels_dir, type=str, metavar='PATH', help='path of the labels file', nargs='?')\n parser.add_argument('--ground_truth_rttm', default=default_gt_rttm, type=str, metavar='PATH', help='path of the ground truth rttm file', nargs='?')\n parser.add_argument('--threshold', default=threshold, type=float, metavar='N', help='threshold duration to assign label')\n\n args = parser.parse_args()\n generate_labels(**vars(args))\n\n\n\n\n \n" ]
[ [ "numpy.maximum", "numpy.minimum", "numpy.unique", "numpy.asarray", "numpy.genfromtxt", "numpy.argmax", "numpy.array", "numpy.sum" ] ]
easeml/snoopy
[ "bd087cbd512d2f1872fe156a6b0f0223a77c9419" ]
[ "snoopy/embedding/base.py" ]
[ "from __future__ import annotations\n\nfrom abc import ABC, abstractmethod\nfrom time import time\nfrom typing import Callable, NamedTuple, Optional, Tuple\n\nimport numpy as np\nimport tensorflow as tf\nimport torch as pt\n\nfrom .._logging import get_logger\nfrom .._utils import get_num_splits, get_tf_device\nfrom ..custom_types import CacheType, DataType, DataWithInfo, EmbeddingSlice, Expandable2D\nfrom ..reader import UNKNOWN_LABEL\n\n_logger = get_logger(__name__)\n\nCPU_CACHE_NUM_BATCHES_IN_GPU = 20\n\n\nclass _ImageTransformationHelper:\n @staticmethod\n def _central_crop_with_resize(feature: tf.Tensor, required_image_size: Tuple[int, int]) -> tf.Tensor:\n converted_img = tf.image.convert_image_dtype(feature, dtype=tf.float32, saturate=False)\n shape = tf.shape(converted_img)\n min_dim = tf.minimum(shape[0], shape[1])\n cropped_img = tf.image.resize_with_crop_or_pad(converted_img, min_dim, min_dim)\n return tf.image.resize(cropped_img, required_image_size)\n\n @staticmethod\n def central_crop_with_resize_3_channels(feature: tf.Tensor, required_image_size: Tuple[int, int]) -> tf.Tensor:\n resized_img = _ImageTransformationHelper._central_crop_with_resize(feature, required_image_size)\n # For 1 channel, repeats 3 times; for 3 channels, repeats 1 time\n return tf.repeat(resized_img, 3 - tf.shape(resized_img)[2] + 1, axis=2)\n\n @staticmethod\n def raw_image_with_central_crop_and_resize(feature: tf.Tensor, required_image_size: Tuple[int, int]) -> tf.Tensor:\n resized_img = _ImageTransformationHelper._central_crop_with_resize(feature, required_image_size)\n return tf.reshape(resized_img, (1, -1))\n\n\nclass EmbeddingModel(ABC):\n @abstractmethod\n def move_to(self, device: pt.device) -> None:\n pass\n\n @property\n @abstractmethod\n def output_dimension(self) -> int:\n pass\n\n @abstractmethod\n def get_data_preparation_function(self) -> Callable:\n pass\n\n @abstractmethod\n def apply_embedding(self, features: np.ndarray) -> pt.Tensor:\n pass\n\n\nclass EmbeddingModelSpec(ABC):\n\n @abstractmethod\n def load(self, device: pt.device) -> EmbeddingModel:\n pass\n\n @property\n @abstractmethod\n def data_type(self) -> DataType:\n pass\n\n\nclass EmbeddingConfig(NamedTuple):\n embedding_model_spec: EmbeddingModelSpec\n batch_size: int\n\n # NOTE: prefetch size significantly increases RAM usage\n prefetch_size: int\n label_noise_amount: Optional[float] = None\n\n\n# TODO: A class that contain input_data and config + a method that accepts cache_type and device and returns\n# EmbeddingDataset\nclass EmbeddingDataset:\n def __init__(self, input_data: DataWithInfo, config: EmbeddingConfig, cache_type: CacheType, device: pt.device):\n # Check that parameters in config are valid\n assert config.batch_size > 0, f\"Batch size of specified {type(config)} must be a positive number!\"\n assert config.prefetch_size > 0, f\"Prefetch size of specified {type(config)} must be a positive number!\"\n assert config.label_noise_amount is None or 0.0 < config.label_noise_amount <= 1.0, \\\n f\"Label noise of specified {type(config)} must be in interval (0, 1]! For no noise, set it to None.\"\n\n # Initialize model\n self._embedding_model = config.embedding_model_spec.load(device)\n feature_fn = self._embedding_model.get_data_preparation_function()\n\n def label_alert_fn(feature: tf.Tensor, label: tf.Tensor):\n if label == UNKNOWN_LABEL:\n tf.py_function(\n func=lambda: _logger.error(f\"Label {UNKNOWN_LABEL} (unknown label) detected!\"),\n inp=[],\n Tout=[]\n )\n\n return feature, label\n\n def label_noise(label: tf.Tensor, num_labels: int, noise_level: float) -> tf.Tensor:\n if tf.random.uniform([1])[0] < noise_level:\n # First two parameters are irrelevant in this case!\n return tf.random.uniform_candidate_sampler(\n true_classes=[[0]], num_true=1, num_sampled=1, unique=True, range_max=num_labels\n ).sampled_candidates[0]\n else:\n return label\n\n def preparation_fn_no_label_noise(feature: tf.Tensor, label: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:\n return feature_fn(feature), label\n\n def preparation_fn_with_label_noise(feature: tf.Tensor, label: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:\n return feature_fn(feature), label_noise(label, input_data.num_labels, config.label_noise_amount)\n\n if config.label_noise_amount:\n preparation_fn = preparation_fn_with_label_noise\n else:\n preparation_fn = preparation_fn_no_label_noise\n\n # Prepare input data\n self._input_dataset = input_data.data \\\n .map(label_alert_fn) \\\n .map(preparation_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE) \\\n .batch(config.batch_size) \\\n .prefetch(config.prefetch_size) \\\n .as_numpy_iterator()\n\n self._output_dimension = self._embedding_model.output_dimension\n self._apply_embedding = self._embedding_model.apply_embedding\n self._size = input_data.size\n self._batch_size = config.batch_size\n self._device = device\n\n # Cache data\n self._feature_cache = None\n self._label_cache = None\n\n # Set when 'prepare' is called\n self._cache_type = cache_type\n self._prepared = False\n\n @property\n def device(self) -> pt.device:\n return self._device\n\n @property\n def size(self) -> int:\n return self._size\n\n @property\n def batch_size(self) -> int:\n return self._batch_size\n\n @property\n def cache_type(self) -> CacheType:\n return self._cache_type\n\n def _compute_embedding(self, num_batches_to_return: Optional[int]) -> Optional[EmbeddingSlice]:\n if num_batches_to_return:\n result_size = self._batch_size * num_batches_to_return\n num_iters = num_batches_to_return\n else:\n result_size = self._size\n num_iters = get_num_splits(self._size, self._batch_size)\n\n embeddings_to_return = Expandable2D(result_size, self._output_dimension, dtype=pt.float32, device=self._device)\n labels_to_return = Expandable2D(result_size, 1, dtype=pt.int64, device=self._device)\n\n feature_retrieve_times = []\n for i in range(num_iters):\n try:\n start = time()\n current_features_np, current_labels_np = next(self._input_dataset)\n feature_retrieve_times.append(time() - start)\n except StopIteration:\n break\n\n # Copy tf.Tensor (CPU) to pt.Tensor(GPU)\n current_labels = pt.as_tensor(current_labels_np, dtype=pt.int64, device=self._device).view((-1, 1))\n\n # Apply embedding on mini-batch\n current_embeddings = self._apply_embedding(current_features_np)\n\n # Store embedding and labels\n embeddings_to_return.add(current_embeddings)\n labels_to_return.add(current_labels)\n\n if i % 100 == 0 and i > 0:\n _logger.debug(f\"Batches processed: {i}\")\n\n _logger.debug(f\"Average feature retrieve time was {np.mean(np.array(feature_retrieve_times)): .3f} seconds\")\n\n # Return relevant data (current batch could be smaller than batch size)\n if embeddings_to_return.size > 0:\n return EmbeddingSlice(embeddings_to_return.data, labels_to_return.data)\n\n else:\n return None\n\n def _prepare_device_cache(self) -> None:\n self._feature_cache, self._label_cache = self._compute_embedding(None)\n\n def _prepare_cpu_cache(self) -> None:\n global CPU_CACHE_NUM_BATCHES_IN_GPU\n features = Expandable2D(self._size, self._output_dimension, dtype=pt.float32)\n labels = Expandable2D(self._size, 1, dtype=pt.int64)\n\n num_splits = get_num_splits(self._size, self._batch_size * CPU_CACHE_NUM_BATCHES_IN_GPU)\n for split_index in range(num_splits):\n _logger.info(f\"Processing split: {split_index + 1}/{num_splits} on {get_tf_device(self._device)}\")\n result = self._compute_embedding(CPU_CACHE_NUM_BATCHES_IN_GPU)\n features.add(result.features.cpu())\n labels.add(result.labels.cpu())\n\n self._feature_cache = features.data\n self._label_cache = labels.data\n\n def prepare(self):\n if not self._prepared:\n if self._cache_type == CacheType.DEVICE:\n self._prepare_device_cache()\n\n elif self._cache_type == CacheType.CPU:\n self._prepare_cpu_cache()\n\n self._prepared = True\n\n def get_cache(self, start_index: int, end_index: int, copy_to_device: bool = False) -> Optional[EmbeddingSlice]:\n assert self._prepared, \"EmbeddingDataset was not prepared when use_cache was called!\"\n assert self._cache_type != CacheType.NONE, \"This method can only be used if cache is precomputed!\"\n assert 0 <= start_index <= self._size and 0 <= end_index <= self.size and start_index <= end_index, \\\n \"Provided indices are not valid!\"\n\n if end_index - start_index == 0:\n return None\n\n features_to_return = pt.narrow(self._feature_cache, 0, start_index, end_index - start_index)\n labels_to_return = pt.narrow(self._label_cache, 0, start_index, end_index - start_index)\n\n # Copy to the device on which embedding was computed\n if copy_to_device and features_to_return.device != self._device:\n features_to_return = features_to_return.to(self._device)\n labels_to_return = labels_to_return.to(self._device)\n\n return EmbeddingSlice(features_to_return, labels_to_return)\n\n def get_next(self, num_batches_to_return: int) -> Optional[EmbeddingSlice]:\n assert self._prepared, \"EmbeddingDataset was not prepared when get_next was called!\"\n assert self._cache_type == CacheType.NONE, \"This method can only be used if no cache is used!\"\n return self._compute_embedding(num_batches_to_return)\n\n def get_iterator(self, batches_per_iter) -> EmbeddingIterator:\n return EmbeddingIterator(self, batches_per_iter)\n\n\nclass EmbeddingDatasetsTuple(NamedTuple):\n train: EmbeddingDataset\n test: EmbeddingDataset\n\n\nclass EmbeddingIterator:\n def __init__(self, embedding_dataset: EmbeddingDataset, num_batches_per_iter: int):\n self._embedding_dataset = embedding_dataset\n self._embedding_dataset.prepare()\n\n # Embedding dataset properties\n self._size = embedding_dataset.size\n self._batch_size = embedding_dataset.batch_size\n self._cache_type = embedding_dataset.cache_type\n\n # Tracking where in dataset are we\n self._num_iters_done = 0\n self._num_iters_available = get_num_splits(self._size, self._batch_size * num_batches_per_iter)\n self._num_batches_per_iter = num_batches_per_iter\n self._start_index = 0\n self._end_index = 0\n\n def reset(self) -> None:\n assert self._cache_type != CacheType.NONE, \"Iterator cannot be reset if there is no cache!\"\n\n self._start_index = 0\n self._end_index = 0\n self._num_iters_done = 0\n\n @property\n def device(self) -> pt.device:\n return self._embedding_dataset.device\n\n @property\n def size(self) -> int:\n return self._size\n\n def has_next(self) -> bool:\n return self._num_iters_done < self._num_iters_available\n\n def next(self) -> EmbeddingSlice:\n assert self.has_next(), \"'.next()' was called on iterator that was iterated to the end!\"\n\n self._num_iters_done += 1\n\n if self._cache_type == CacheType.NONE:\n return self._embedding_dataset.get_next(self._num_batches_per_iter)\n\n else:\n self._start_index = min(self._end_index, self._size)\n self._end_index = min(self._start_index + self._batch_size * self._num_batches_per_iter, self._size)\n\n return self._embedding_dataset.get_cache(self._start_index, self._end_index)\n" ]
[ [ "tensorflow.image.resize_with_crop_or_pad", "tensorflow.shape", "tensorflow.minimum", "tensorflow.reshape", "tensorflow.random.uniform", "torch.narrow", "tensorflow.image.resize", "tensorflow.random.uniform_candidate_sampler", "tensorflow.image.convert_image_dtype", "numpy.array", "torch.as_tensor" ] ]
RishirajKanungo/semantic-segmentation-pytorch
[ "e74993792b4e4b737ca2c8b42e65427bcabe7bbb" ]
[ "models/resnet.py" ]
[ "import torch.nn as nn\nimport math\nfrom .utils import load_url\nfrom lib.nn import SynchronizedBatchNorm2d\nBatchNorm2d = SynchronizedBatchNorm2d\n\n\n__all__ = ['ResNet', 'resnet18', 'resnet50', 'resnet101'] # resnet101 is coming soon!\n\n\nmodel_urls = {\n 'resnet18': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/resnet18-imagenet.pth',\n 'resnet50': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/resnet50-imagenet.pth',\n 'resnet101': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/resnet101-imagenet.pth'\n}\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"3x3 convolution with padding\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = BatchNorm2d(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = BatchNorm2d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, layers, num_classes=1000):\n self.inplanes = 128\n super(ResNet, self).__init__()\n self.conv1 = conv3x3(3, 64, stride=2)\n self.bn1 = BatchNorm2d(64)\n self.relu1 = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(64, 64)\n self.bn2 = BatchNorm2d(64)\n self.relu2 = nn.ReLU(inplace=True)\n self.conv3 = conv3x3(64, 128)\n self.bn3 = BatchNorm2d(128)\n self.relu3 = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n self.avgpool = nn.AvgPool2d(7, stride=1)\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.relu1(self.bn1(self.conv1(x)))\n x = self.relu2(self.bn2(self.conv2(x)))\n x = self.relu3(self.bn3(self.conv3(x)))\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n\ndef resnet18(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-18 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(load_url(model_urls['resnet18']))\n return model\n\n'''\ndef resnet34(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-34 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(load_url(model_urls['resnet34']))\n return model\n'''\n\ndef resnet50(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-50 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if True:\n # model.load_state_dict(load_url(model_urls['resnet50']), strict=False)\n\n model.load_state_dict(load_url(\"http://sceneparsing.csail.mit.edu/model/pytorch/ade20k-resnet50dilated-ppm_deepsup/encoder_epoch_20.pth\"), strict=False)\n return model\n\ndef resnet101(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-101 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained:\n model.load_state_dict(load_url(model_urls['resnet101']), strict=False)\n return model\n\n# def resnet152(pretrained=False, **kwargs):\n# \"\"\"Constructs a ResNet-152 model.\n#\n# Args:\n# pretrained (bool): If True, returns a model pre-trained on ImageNet\n# \"\"\"\n# model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n# if pretrained:\n# model.load_state_dict(load_url(model_urls['resnet152']))\n# return model\n" ]
[ [ "torch.nn.Sequential", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.MaxPool2d", "torch.nn.AvgPool2d", "torch.nn.ReLU" ] ]
Shrita10/Text-AI
[ "af4b55aa4378b0bd9ee0c1de27aa40070ddd8bd7" ]
[ "05 - PRODUCT TO LAUNCH/app.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 9 03:49:28 2021\n\n@author: Shrita\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\nfrom flask import Flask, request, jsonify, render_template\nimport pickle\nimport re\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom nltk.tokenize import word_tokenize\nfrom doc3 import training_doc3\nfrom flask_restful import Resource, Api, reqparse\nfrom tensorflow.keras.models import load_model\n\nWORDS_USED = {}\nparser = reqparse.RequestParser()\n\napp = Flask(__name__)\napi = Api(app)\nmodel = load_model('mymodel.h5', compile = False)\n\ntokenizer = load_model('tokenizer.h5')\n\nclass WordList(Resource):\n def get(self):\n return WORDS_USED\n def post(self):\n parser.add_argument(\"word_used\")\n parser.add_argument(\"previous_words\")\n parser.add_argument(\"other_options\")\n args = parser.parse_args()\n word_id = int(max(WORDS_USED.keys())) + 1\n word_id = '%i' % word_id\n WORDS_USED[word_id] = {\n \"word_used\": args[\"word_used\"],\n \"previous_words\": args[\"previous_words\"],\n \"other_optins\": args[\"other_options\"],\n }\n return WORDS_USED[word_id], 201\n\n\nclass Word(Resource):\n def get(self, word_id):\n if word_id not in WORDS_USED:\n return \"Not found\", 404\n else:\n return WORDS_USED[word_id]\n \n def put(self, word_id):\n parser.add_argument(\"word_used\")\n parser.add_argument(\"previous_words\")\n parser.add_argument(\"other_options\")\n args = parser.parse_args()\n if word_id not in WORDS_USED:\n return \"Record not found\", 404\n else:\n word = WORDS_USED[word_id]\n word[\"word_used\"] = args[\"word_used\"] if args[\"word_used\"] is not None else word[\"word_used\"]\n word[\"previous_words\"] = args[\"previous_words\"] if args[\"previous_words\"] is not None else word[\"previous words\"]\n word[\"other_options\"] = args[\"other_options\"] if args[\"other_options\"] is not None else word[\"other_options\"]\n return word, 200\n \n def delete(self, word_id):\n if word_id not in WORDS_USED:\n return \"Not found\", 404\n else:\n del WORDS_USED[word_id]\n return '', 204\n\napi.add_resource(WordList, '/words/')\napi.add_resource(Word, '/words/<word_id>')\n\ncounter = 1\n@app.route('/')\n\ndef home():\n return render_template('html1.html')\n\n@app.route('/predict2',methods = ['POST'])\n\ndef predict2():\n global counter\n global WORDS_USED\n input_text = request.form['ttext']\n cleaned = re.sub(r'\\W+', ' ', training_doc3).lower()\n tokens = word_tokenize(cleaned)\n train_len = 1\n text_sequences = [] \n for i in range(train_len,len(tokens)):\n text_sequences.append(tokens[i])\n sequences = {}\n count = 1\n for i in range(len(tokens)):\n if tokens[i] not in sequences:\n sequences[tokens[i]] = count\n count += 1\n tokenizer = Tokenizer()\n tokenizer.fit_on_texts(text_sequences)\n input_text = input_text.strip().lower()\n encoded_text = tokenizer.texts_to_sequences([input_text])[0]\n pad_encoded = pad_sequences([encoded_text], maxlen=3, truncating='pre')\n list_of_words =[]\n for i in (model.predict(pad_encoded)[0]).argsort()[-3:][::-1]:\n pred_word = tokenizer.index_word[i]\n list_of_words.append(pred_word)\n first_word = list_of_words[0]\n second_word = list_of_words[1]\n third_word = list_of_words[2]\n WORDS_USED[str(counter)] = {'word_used': None, 'previous_words': request.form['ttext'], 'other_options': [first_word, second_word, third_word]}\n counter += 1\n return render_template('html1.html',prediction_text1 = first_word , prediction_text2 = second_word, prediction_text3 = third_word)\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n \n \n \n \n \n \n " ]
[ [ "tensorflow.keras.preprocessing.text.Tokenizer", "tensorflow.keras.models.load_model", "tensorflow.keras.preprocessing.sequence.pad_sequences" ] ]
PatrickFeng/RPNet
[ "83d6003950553e3563b4318d44d91294bac34821" ]
[ "net.py" ]
[ "import torch\nimport torch.nn as nn\nimport math\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom layers.slice_pool_layer.slice_pool_layer import *\nfrom layers.slice_unpool_layer.slice_unpool_layer import *\n\n\n\n\nclass RSNet(nn.Module):\n def __init__(self, pool_type, num_slice=[None, None, None]):\n super(RSNet, self).__init__()\n # input: B, 1, N, 3\n \n #-- conv block 1\n self.conv_1 = nn.Conv2d( 1, 64, kernel_size=(1,9), stride=(1,1) )\n self.bn_1 = nn.BatchNorm2d(64)\n\n self.conv_2 = nn.Conv2d( 64, 64, kernel_size=(1,1), stride=(1,1) )\n self.bn_2 = nn.BatchNorm2d(64)\n \n self.conv_3 = nn.Conv2d( 64, 64, kernel_size=(1,1), stride=(1,1) )\n self.bn_3 = nn.BatchNorm2d(64)\n \n #-- RNN block\n num_slice_x, num_slice_y, num_slice_z = num_slice\n self.pool_x = SP(pool_type, num_slice_x)\n self.pool_y = SP(pool_type, num_slice_y)\n self.pool_z = SP(pool_type, num_slice_z)\n \n self.rnn_type = 'GRU'\n self.rnn_hidden_sz_list = [256, 128, 64, 64, 128, 256]\n \n self.rnn_x_1 = nn.GRU(64, self.rnn_hidden_sz_list[0], 1, bidirectional=True)\n self.rnn_x_2 = nn.GRU(512, self.rnn_hidden_sz_list[1], 1, bidirectional=True)\n self.rnn_x_3 = nn.GRU(256, self.rnn_hidden_sz_list[2], 1, bidirectional=True)\n self.rnn_x_4 = nn.GRU(128, self.rnn_hidden_sz_list[3], 1, bidirectional=True)\n self.rnn_x_5 = nn.GRU(128, self.rnn_hidden_sz_list[4], 1, bidirectional=True)\n self.rnn_x_6 = nn.GRU(256, self.rnn_hidden_sz_list[5], 1, bidirectional=True)\n \n \n self.rnn_y_1 = nn.GRU(64, self.rnn_hidden_sz_list[0], 1, bidirectional=True)\n self.rnn_y_2 = nn.GRU(512, self.rnn_hidden_sz_list[1], 1, bidirectional=True)\n self.rnn_y_3 = nn.GRU(256, self.rnn_hidden_sz_list[2], 1, bidirectional=True)\n self.rnn_y_4 = nn.GRU(128, self.rnn_hidden_sz_list[3], 1, bidirectional=True)\n self.rnn_y_5 = nn.GRU(128, self.rnn_hidden_sz_list[4], 1, bidirectional=True)\n self.rnn_y_6 = nn.GRU(256, self.rnn_hidden_sz_list[5], 1, bidirectional=True)\n \n self.rnn_z_1 = nn.GRU(64, self.rnn_hidden_sz_list[0], 1, bidirectional=True)\n self.rnn_z_2 = nn.GRU(512, self.rnn_hidden_sz_list[1], 1, bidirectional=True)\n self.rnn_z_3 = nn.GRU(256, self.rnn_hidden_sz_list[2], 1, bidirectional=True)\n self.rnn_z_4 = nn.GRU(128, self.rnn_hidden_sz_list[3], 1, bidirectional=True)\n self.rnn_z_5 = nn.GRU(128, self.rnn_hidden_sz_list[4], 1, bidirectional=True)\n self.rnn_z_6 = nn.GRU(256, self.rnn_hidden_sz_list[5], 1, bidirectional=True)\n \n #-- conv block 3\n self.un_pool_x = SU()\n self.un_pool_y = SU()\n self.un_pool_z = SU()\n \n self.conv_6 = nn.Conv2d( 512, 512, kernel_size=(1,1), stride=(1,1) )\n self.bn_6 = nn.BatchNorm2d(512)\n \n self.conv_7 = nn.Conv2d( 512, 256, kernel_size=(1,1), stride=(1,1) )\n self.bn_7 = nn.BatchNorm2d(256)\n \n self.dp = nn.Dropout(p=0.3)\n \n self.conv_8 = nn.Conv2d( 256, 13, kernel_size=(1,1), stride=(1,1) )\n \n self.relu = nn.ReLU(inplace=True)\n \n self._initialize_weights()\n\n\n def forward(self, x, x_slice_idx, y_slice_idx, z_slice_idx, hidden_list):\n \n num_batch, _, num_points, _ = x.size()\n \n x_hidden_1, x_hidden_2, x_hidden_3, x_hidden_4, x_hidden_5, x_hidden_6, y_hidden_1, y_hidden_2, y_hidden_3, y_hidden_4, y_hidden_5, y_hidden_6, z_hidden_1, z_hidden_2, z_hidden_3, z_hidden_4, z_hidden_5, z_hidden_6 = hidden_list\n \n #-- conv block 1\n conv_1 = self.relu( self.bn_1( self.conv_1(x) ) ) # num_batch, 64, num_points, 1\n conv_2 = self.relu( self.bn_2( self.conv_2(conv_1) ) ) # num_batch, 64, num_points, 1\n conv_3 = self.relu( self.bn_3( self.conv_3(conv_2) ) ) # num_batch, 64, num_points, 1\n \n #-- RNN block\n x_pooled = self.pool_x( conv_3, x_slice_idx ) # num_batch, 64, numSlices, 1\n y_pooled = self.pool_y( conv_3, y_slice_idx )\n z_pooled = self.pool_z( conv_3, z_slice_idx )\n \n x_pooled = x_pooled[:,:,:,0].permute( 2, 0, 1 ).contiguous()\n y_pooled = y_pooled[:,:,:,0].permute( 2, 0, 1 ).contiguous()\n z_pooled = z_pooled[:,:,:,0].permute( 2, 0, 1 ).contiguous()\n \n x_rnn_1, _ = self.rnn_x_1( x_pooled, x_hidden_1 )\n x_rnn_2, _ = self.rnn_x_2( x_rnn_1, x_hidden_2 )\n x_rnn_3, _ = self.rnn_x_3( x_rnn_2, x_hidden_3 )\n x_rnn_4, _ = self.rnn_x_4( x_rnn_3, x_hidden_4 )\n x_rnn_5, _ = self.rnn_x_5( x_rnn_4, x_hidden_5 )\n x_rnn_6, _ = self.rnn_x_6( x_rnn_5, x_hidden_6 )\n \n y_rnn_1, _ = self.rnn_y_1( y_pooled, y_hidden_1 )\n y_rnn_2, _ = self.rnn_y_2( y_rnn_1, y_hidden_2 )\n y_rnn_3, _ = self.rnn_y_3( y_rnn_2, y_hidden_3 )\n y_rnn_4, _ = self.rnn_y_4( y_rnn_3, y_hidden_4 )\n y_rnn_5, _ = self.rnn_y_5( y_rnn_4, y_hidden_5 )\n y_rnn_6, _ = self.rnn_y_6( y_rnn_5, y_hidden_6 )\n \n z_rnn_1, _ = self.rnn_z_1( z_pooled, z_hidden_1 )\n z_rnn_2, _ = self.rnn_z_2( z_rnn_1, z_hidden_2 )\n z_rnn_3, _ = self.rnn_z_3( z_rnn_2, z_hidden_3 )\n z_rnn_4, _ = self.rnn_z_4( z_rnn_3, z_hidden_4 )\n z_rnn_5, _ = self.rnn_z_5( z_rnn_4, z_hidden_5 )\n z_rnn_6, _ = self.rnn_z_6( z_rnn_5, z_hidden_6 )\n \n #-- uppooling\n x_rnn_6 = x_rnn_6.permute( 1, 2, 0 ).contiguous()\n x_rnn_6 = x_rnn_6.view( x_rnn_6.size(0), x_rnn_6.size(1), x_rnn_6.size(2), 1 )\n \n y_rnn_6 = y_rnn_6.permute( 1, 2, 0 ).contiguous()\n y_rnn_6 = y_rnn_6.view( y_rnn_6.size(0), y_rnn_6.size(1), y_rnn_6.size(2), 1 )\n \n z_rnn_6 = z_rnn_6.permute( 1, 2, 0 ).contiguous()\n z_rnn_6 = z_rnn_6.view( z_rnn_6.size(0), z_rnn_6.size(1), z_rnn_6.size(2), 1 )\n \n x_rnn_6 = self.un_pool_x( x_rnn_6, x_slice_idx )\n y_rnn_6 = self.un_pool_y( y_rnn_6, y_slice_idx )\n z_rnn_6 = self.un_pool_z( z_rnn_6, z_slice_idx )\n \n #-- conv block 3\n rnn = x_rnn_6 + y_rnn_6 + z_rnn_6\n \n conv_6 = self.relu( self.bn_6( self.conv_6(rnn) ) ) # num_batch, 512, num_points, 1\n conv_7 = self.relu( self.bn_7( self.conv_7(conv_6) ) ) # num_batch, 256, num_points, 1\n droped = self.dp(conv_7)\n conv_8 = self.conv_8(droped)\n \n return conv_8\n \n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n n = m.weight.size(1)\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n\n def init_hidden(self, bsz = 1):\n weight = next(self.parameters()).data\n hidden_list = [ ]\n for i in range(3):\n for hid_sz in self.rnn_hidden_sz_list:\n if self.rnn_type == 'LSTM':\n hidden_list.append( (Variable(weight.new(2, bsz, hid_sz).zero_()),\n Variable(weight.new(2, bsz, hid_sz).zero_())) )\n else:\n hidden_list.append( Variable(weight.new(2, bsz, hid_sz).zero_()) )\n\n return hidden_list\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "torch.nn.Dropout", "torch.nn.GRU", "torch.nn.Conv2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
mbrenohd/Python-Baseball
[ "4984392ed520856109ff675a8c2e227cf5acd3c1" ]
[ "stats/offense.py" ]
[ "import pandas as pd\nimport matplotlib.pyplot as plt\n\n# Task 1\nfrom data import games\n\nplays = games[games['type'] == 'play']\n\nplays.columns = ['type', 'inning', 'team', 'player', 'count', 'pitches', 'event', 'game_id', 'year']\n\n# Task 2\nhits = plays.loc[plays['event'].str.contains('^(?:S(?!B)|D|T|HR)'), ['inning', 'event']]\n\n# Task 3\nhits.loc[:, 'inning'] = pd.to_numeric(hits.loc[:, 'inning'])\n\n# Task 4\nreplacements = {r'^S(.*)': 'single', r'^D(.*)': 'double', r'^T(.*)': 'triple', r'^HR(.*)': 'hr'}\n\n# Task 5\nhit_type = hits['event'].replace(replacements, regex=True)\n\n# Task 6\nhits = hits.assign(hit_type=hit_type)\n\n# Task 7\nhits = hits.groupby(['inning', 'hit_type']).size().reset_index(name='count')\n\n# Task 8\nhits['hit_type'] = pd.Categorical(hits['hit_type'], ['single', 'double', 'triple', 'hr'])\n\n# Task 9\nhits = hits.sort_values(['inning', 'hit_type'])\n\n# Task 10\nhits = hits.pivot(index='inning', columns='hit_type', values='count')\n\n# Task 11\nhits.plot.bar(stacked=True)\nplt.show()\n\n" ]
[ [ "pandas.Categorical", "matplotlib.pyplot.show", "pandas.to_numeric" ] ]
icecube/simweights
[ "f8a7c35a8f54a7cca17ff1e1cd73cc3424b57980" ]
[ "examples/nugen_plot.py" ]
[ "import pandas as pd\nimport pylab as plt\n\nimport simweights\n\n# load the hdf5 file that we just created using pandas\nhdffile = pd.HDFStore(\"Level2_IC86.2016_NuMu.021217.hdf5\", \"r\")\n\n# instantiate the weighter object by passing the pandas file to it\nweighter = simweights.NuGenWeighter(hdffile, nfiles=10)\n\n# create an function to represent the IceCube northern track limit\n# Note that the units are GeV^-1 * cm^-2 * sr^-1 * s^-1 per particle type\ndef northern_track(energy):\n return 1.44e-18 / 2 * (energy/1e5)**-2.2\n\n# get the weights by passing the flux to the weighter\nweights = weights = weighter.get_weights(northern_track)\n\n# print some info about the weighting object\nprint(weighter.tostring(northern_track))\n\n# create equal spaced bins in log space\nbins = plt.geomspace(1e2, 1e8, 50)\n\n# get energy of the primary cosmic-ray from `PolyplopiaPrimary`\nprimary_energy = weighter.get_column(\"PolyplopiaPrimary\", \"energy\")\n\n# histogram the primary energy with the weights\nplt.hist(primary_energy, weights=weights, bins=bins)\n\n# make the plot look good\nplt.loglog()\nplt.xlabel(\"Primary Energy [GeV]\")\nplt.ylabel(\"Event Rate [Hz]\")\nplt.xlim(bins[0], bins[-1])\nplt.ylim(1e-8, 2e-6)\nplt.tight_layout()\nplt.show()\n" ]
[ [ "pandas.HDFStore" ] ]
Slyne/wenet
[ "de74d8acf40f47a3c503bff5cf4ed6808a9dad14" ]
[ "wenet/transformer/subsampling.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Copyright 2019 Mobvoi Inc. All Rights Reserved.\n# Author: di.wu@mobvoi.com (DI WU)\n\"\"\"Subsampling layer definition.\"\"\"\n\nfrom typing import Tuple, Union\n\nimport torch\n\n\nclass BaseSubsampling(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.right_context = 0\n self.subsampling_rate = 1\n\n def position_encoding(self, offset: Union[int, torch.Tensor],\n size: int) -> torch.Tensor:\n return self.pos_enc.position_encoding(offset, size)\n\n\nclass LinearNoSubsampling(BaseSubsampling):\n \"\"\"Linear transform the input without subsampling\n\n Args:\n idim (int): Input dimension.\n odim (int): Output dimension.\n dropout_rate (float): Dropout rate.\n\n \"\"\"\n def __init__(self, idim: int, odim: int, dropout_rate: float,\n pos_enc_class: torch.nn.Module):\n \"\"\"Construct an linear object.\"\"\"\n super().__init__()\n self.out = torch.nn.Sequential(\n torch.nn.Linear(idim, odim),\n torch.nn.LayerNorm(odim, eps=1e-5),\n torch.nn.Dropout(dropout_rate),\n )\n self.pos_enc = pos_enc_class\n self.right_context = 0\n self.subsampling_rate = 1\n\n def forward(\n self,\n x: torch.Tensor,\n x_mask: torch.Tensor,\n offset: Union[int, torch.Tensor] = 0\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"Input x.\n\n Args:\n x (torch.Tensor): Input tensor (#batch, time, idim).\n x_mask (torch.Tensor): Input mask (#batch, 1, time).\n\n Returns:\n torch.Tensor: linear input tensor (#batch, time', odim),\n where time' = time .\n torch.Tensor: linear input mask (#batch, 1, time'),\n where time' = time .\n\n \"\"\"\n x = self.out(x)\n x, pos_emb = self.pos_enc(x, offset)\n return x, pos_emb, x_mask\n\n\nclass Conv2dSubsampling4(BaseSubsampling):\n \"\"\"Convolutional 2D subsampling (to 1/4 length).\n\n Args:\n idim (int): Input dimension.\n odim (int): Output dimension.\n dropout_rate (float): Dropout rate.\n\n \"\"\"\n def __init__(self, idim: int, odim: int, dropout_rate: float,\n pos_enc_class: torch.nn.Module):\n \"\"\"Construct an Conv2dSubsampling4 object.\"\"\"\n super().__init__()\n self.conv = torch.nn.Sequential(\n torch.nn.Conv2d(1, odim, 3, 2),\n torch.nn.ReLU(),\n torch.nn.Conv2d(odim, odim, 3, 2),\n torch.nn.ReLU(),\n )\n self.out = torch.nn.Sequential(\n torch.nn.Linear(odim * (((idim - 1) // 2 - 1) // 2), odim))\n self.pos_enc = pos_enc_class\n # The right context for every conv layer is computed by:\n # (kernel_size - 1) * frame_rate_of_this_layer\n self.subsampling_rate = 4\n # 6 = (3 - 1) * 1 + (3 - 1) * 2\n self.right_context = 6\n\n def forward(\n self,\n x: torch.Tensor,\n x_mask: torch.Tensor,\n offset: Union[int, torch.Tensor] = 0\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"Subsample x.\n\n Args:\n x (torch.Tensor): Input tensor (#batch, time, idim).\n x_mask (torch.Tensor): Input mask (#batch, 1, time).\n\n Returns:\n torch.Tensor: Subsampled tensor (#batch, time', odim),\n where time' = time // 4.\n torch.Tensor: Subsampled mask (#batch, 1, time'),\n where time' = time // 4.\n torch.Tensor: positional encoding\n\n \"\"\"\n x = x.unsqueeze(1) # (b, c=1, t, f)\n x = self.conv(x)\n b, c, t, f = x.size()\n x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))\n x, pos_emb = self.pos_enc(x, offset)\n return x, pos_emb, x_mask[:, :, :-2:2][:, :, :-2:2]\n\n\nclass Conv2dSubsampling6(BaseSubsampling):\n \"\"\"Convolutional 2D subsampling (to 1/6 length).\n Args:\n idim (int): Input dimension.\n odim (int): Output dimension.\n dropout_rate (float): Dropout rate.\n pos_enc (torch.nn.Module): Custom position encoding layer.\n \"\"\"\n def __init__(self, idim: int, odim: int, dropout_rate: float,\n pos_enc_class: torch.nn.Module):\n \"\"\"Construct an Conv2dSubsampling6 object.\"\"\"\n super().__init__()\n self.conv = torch.nn.Sequential(\n torch.nn.Conv2d(1, odim, 3, 2),\n torch.nn.ReLU(),\n torch.nn.Conv2d(odim, odim, 5, 3),\n torch.nn.ReLU(),\n )\n self.linear = torch.nn.Linear(odim * (((idim - 1) // 2 - 2) // 3),\n odim)\n self.pos_enc = pos_enc_class\n # 10 = (3 - 1) * 1 + (5 - 1) * 2\n self.subsampling_rate = 6\n self.right_context = 10\n\n def forward(\n self,\n x: torch.Tensor,\n x_mask: torch.Tensor,\n offset: Union[int, torch.Tensor] = 0\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"Subsample x.\n Args:\n x (torch.Tensor): Input tensor (#batch, time, idim).\n x_mask (torch.Tensor): Input mask (#batch, 1, time).\n\n Returns:\n torch.Tensor: Subsampled tensor (#batch, time', odim),\n where time' = time // 6.\n torch.Tensor: Subsampled mask (#batch, 1, time'),\n where time' = time // 6.\n torch.Tensor: positional encoding\n \"\"\"\n x = x.unsqueeze(1) # (b, c, t, f)\n x = self.conv(x)\n b, c, t, f = x.size()\n x = self.linear(x.transpose(1, 2).contiguous().view(b, t, c * f))\n x, pos_emb = self.pos_enc(x, offset)\n return x, pos_emb, x_mask[:, :, :-2:2][:, :, :-4:3]\n\n\nclass Conv2dSubsampling8(BaseSubsampling):\n \"\"\"Convolutional 2D subsampling (to 1/8 length).\n\n Args:\n idim (int): Input dimension.\n odim (int): Output dimension.\n dropout_rate (float): Dropout rate.\n\n \"\"\"\n def __init__(self, idim: int, odim: int, dropout_rate: float,\n pos_enc_class: torch.nn.Module):\n \"\"\"Construct an Conv2dSubsampling8 object.\"\"\"\n super().__init__()\n self.conv = torch.nn.Sequential(\n torch.nn.Conv2d(1, odim, 3, 2),\n torch.nn.ReLU(),\n torch.nn.Conv2d(odim, odim, 3, 2),\n torch.nn.ReLU(),\n torch.nn.Conv2d(odim, odim, 3, 2),\n torch.nn.ReLU(),\n )\n self.linear = torch.nn.Linear(\n odim * ((((idim - 1) // 2 - 1) // 2 - 1) // 2), odim)\n self.pos_enc = pos_enc_class\n self.subsampling_rate = 8\n # 14 = (3 - 1) * 1 + (3 - 1) * 2 + (3 - 1) * 4\n self.right_context = 14\n\n def forward(\n self,\n x: torch.Tensor,\n x_mask: torch.Tensor,\n offset: Union[int, torch.Tensor] = 0\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"Subsample x.\n\n Args:\n x (torch.Tensor): Input tensor (#batch, time, idim).\n x_mask (torch.Tensor): Input mask (#batch, 1, time).\n\n Returns:\n torch.Tensor: Subsampled tensor (#batch, time', odim),\n where time' = time // 8.\n torch.Tensor: Subsampled mask (#batch, 1, time'),\n where time' = time // 8.\n torch.Tensor: positional encoding\n \"\"\"\n x = x.unsqueeze(1) # (b, c, t, f)\n x = self.conv(x)\n b, c, t, f = x.size()\n x = self.linear(x.transpose(1, 2).contiguous().view(b, t, c * f))\n x, pos_emb = self.pos_enc(x, offset)\n return x, pos_emb, x_mask[:, :, :-2:2][:, :, :-2:2][:, :, :-2:2]\n" ]
[ [ "torch.nn.Dropout", "torch.nn.Conv2d", "torch.nn.LayerNorm", "torch.nn.Linear", "torch.nn.ReLU" ] ]
EnriqueNueve/BirdCall_Project2021
[ "2a0d6a5ae4706f9f8461f50b4acf88de54ed9064" ]
[ "feature_extraction/nn_v2.py" ]
[ "import numpy as np\nfrom tensorflow import keras\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Activation\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.keras.layers import Dense, Dropout\n\n# load the features\nX = np.load('feat_with_fourier_tempogram.npy')\ny = np.load('label_with_fourier_tempogram.npy').ravel()\n\nnum_classes = np.max(y, axis=0)\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0)\nprint(X_train.shape)\n\n# Build the Neural Network\nmodel = Sequential()\nmodel.add(Dense(512, input_shape=(380,)))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(512))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(num_classes))\nmodel.add(Activation('softmax'))\nprint(model.summary())\nmodel.compile(optimizer='rmsprop',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n# Convert label to onehot\ny_train = keras.utils.to_categorical(y_train-1, num_classes=num_classes)\ny_test = keras.utils.to_categorical(y_test-1, num_classes=num_classes)\n\n# Train and test\nmodel.fit(X_train, y_train, epochs=1000, batch_size=64)\nscore, acc = model.evaluate(X_test, y_test, batch_size=32)\nprint('Test score:', score)\nprint('Test accuracy:', acc)\n" ]
[ [ "tensorflow.keras.layers.Dropout", "tensorflow.keras.layers.Activation", "tensorflow.keras.layers.Dense", "sklearn.model_selection.train_test_split", "numpy.max", "numpy.load", "tensorflow.keras.models.Sequential", "tensorflow.keras.utils.to_categorical" ] ]
xub-alt/MuseumAR
[ "063ee437425a588d592a004872ece6272d16ee41", "063ee437425a588d592a004872ece6272d16ee41" ]
[ "yolo_utils/torch_utils.py", "yolo_utils/plots.py" ]
[ "# YOLOv5 PyTorch yolo_utils\n\nimport datetime\nimport logging\nimport math\nimport os\nimport platform\nimport subprocess\nimport time\nfrom contextlib import contextmanager\nfrom copy import deepcopy\nfrom pathlib import Path\n\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision\n\ntry:\n import thop # for FLOPS computation\nexcept ImportError:\n thop = None\nlogger = logging.getLogger(__name__)\n\n\n@contextmanager\ndef torch_distributed_zero_first(local_rank: int):\n \"\"\"\n Decorator to make all processes in distributed training wait for each local_master to do something.\n \"\"\"\n if local_rank not in [-1, 0]:\n torch.distributed.barrier()\n yield\n if local_rank == 0:\n torch.distributed.barrier()\n\n\ndef init_torch_seeds(seed=0):\n # Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html\n torch.manual_seed(seed)\n if seed == 0: # slower, more reproducible\n cudnn.benchmark, cudnn.deterministic = False, True\n else: # faster, less reproducible\n cudnn.benchmark, cudnn.deterministic = True, False\n\n\ndef date_modified(path=__file__):\n # return human-readable file modification date, i.e. '2021-3-26'\n t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime)\n return f'{t.year}-{t.month}-{t.day}'\n\n\ndef git_describe(path=Path(__file__).parent): # path must be a directory\n # return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe\n s = f'git -C {path} describe --tags --long --always'\n try:\n return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1]\n except subprocess.CalledProcessError as e:\n return '' # not a git repository\n\n\ndef select_device(device='', batch_size=None):\n # device = 'cpu' or '0' or '0,1,2,3'\n s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string\n cpu = device.lower() == 'cpu'\n if cpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False\n elif device: # non-cpu device requested\n os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable\n assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability\n\n cuda = not cpu and torch.cuda.is_available()\n if cuda:\n n = torch.cuda.device_count()\n if n > 1 and batch_size: # check that batch_size is compatible with device_count\n assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'\n space = ' ' * len(s)\n for i, d in enumerate(device.split(',') if device else range(n)):\n p = torch.cuda.get_device_properties(i)\n s += f\"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\\n\" # bytes to MB\n else:\n s += 'CPU\\n'\n\n logger.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe\n return torch.device('cuda:0' if cuda else 'cpu')\n\n\ndef time_synchronized():\n # pytorch-accurate time\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n return time.time()\n\n\ndef profile(x, ops, n=100, device=None):\n # profile a pytorch module or list of modules. Example usage:\n # x = torch.randn(16, 3, 640, 640) # input\n # m1 = lambda x: x * torch.sigmoid(x)\n # m2 = nn.SiLU()\n # profile(x, [m1, m2], n=100) # profile speed over 100 iterations\n\n device = device or torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n x = x.to(device)\n x.requires_grad = True\n print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '')\n print(f\"\\n{'Params':>12s}{'GFLOPS':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}\")\n for m in ops if isinstance(ops, list) else [ops]:\n m = m.to(device) if hasattr(m, 'to') else m # device\n m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type\n dtf, dtb, t = 0., 0., [0., 0., 0.] # dt forward, backward\n try:\n flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPS\n except:\n flops = 0\n\n for _ in range(n):\n t[0] = time_synchronized()\n y = m(x)\n t[1] = time_synchronized()\n try:\n _ = y.sum().backward()\n t[2] = time_synchronized()\n except: # no backward method\n t[2] = float('nan')\n dtf += (t[1] - t[0]) * 1000 / n # ms per op forward\n dtb += (t[2] - t[1]) * 1000 / n # ms per op backward\n\n s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list'\n s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list'\n p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters\n print(f'{p:12}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}')\n\n\ndef is_parallel(model):\n return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)\n\n\ndef intersect_dicts(da, db, exclude=()):\n # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values\n return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape}\n\n\ndef initialize_weights(model):\n for m in model.modules():\n t = type(m)\n if t is nn.Conv2d:\n pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif t is nn.BatchNorm2d:\n m.eps = 1e-3\n m.momentum = 0.03\n elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]:\n m.inplace = True\n\n\ndef find_modules(model, mclass=nn.Conv2d):\n # Finds layer indices matching module class 'mclass'\n return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)]\n\n\ndef sparsity(model):\n # Return global model sparsity\n a, b = 0., 0.\n for p in model.parameters():\n a += p.numel()\n b += (p == 0).sum()\n return b / a\n\n\ndef prune(model, amount=0.3):\n # Prune model to requested global sparsity\n import torch.nn.utils.prune as prune\n print('Pruning model... ', end='')\n for name, m in model.named_modules():\n if isinstance(m, nn.Conv2d):\n prune.l1_unstructured(m, name='weight', amount=amount) # prune\n prune.remove(m, 'weight') # make permanent\n print(' %.3g global sparsity' % sparsity(model))\n\n\ndef fuse_conv_and_bn(conv, bn):\n # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/\n fusedconv = nn.Conv2d(conv.in_channels,\n conv.out_channels,\n kernel_size=conv.kernel_size,\n stride=conv.stride,\n padding=conv.padding,\n groups=conv.groups,\n bias=True).requires_grad_(False).to(conv.weight.device)\n\n # prepare filters\n w_conv = conv.weight.clone().view(conv.out_channels, -1)\n w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))\n fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape))\n\n # prepare spatial bias\n b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias\n b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))\n fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)\n\n return fusedconv\n\n\ndef model_info(model, verbose=False, img_size=640):\n # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320]\n n_p = sum(x.numel() for x in model.parameters()) # number parameters\n n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients\n if verbose:\n print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))\n for i, (name, p) in enumerate(model.named_parameters()):\n name = name.replace('module_list.', '')\n print('%5g %40s %9s %12g %20s %10.3g %10.3g' %\n (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))\n\n try: # FLOPS\n from thop import profile\n stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32\n img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input\n flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS\n img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float\n fs = ', %.1f GFLOPS' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPS\n except (ImportError, Exception):\n fs = ''\n\n logger.info(f\"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}\")\n\n\ndef load_classifier(name='resnet101', n=2):\n # Loads a pretrained model reshaped to n-class output\n model = torchvision.models.__dict__[name](pretrained=True)\n\n # ResNet model properties\n # input_size = [3, 224, 224]\n # input_space = 'RGB'\n # input_range = [0, 1]\n # mean = [0.485, 0.456, 0.406]\n # std = [0.229, 0.224, 0.225]\n\n # Reshape output to n classes\n filters = model.fc.weight.shape[1]\n model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True)\n model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True)\n model.fc.out_features = n\n return model\n\n\ndef scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416)\n # scales img(bs,3,y,x) by ratio constrained to gs-multiple\n if ratio == 1.0:\n return img\n else:\n h, w = img.shape[2:]\n s = (int(h * ratio), int(w * ratio)) # new size\n img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize\n if not same_shape: # pad/crop img\n h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)]\n return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean\n\n\ndef copy_attr(a, b, include=(), exclude=()):\n # Copy attributes from b to a, options to only include [...] and to exclude [...]\n for k, v in b.__dict__.items():\n if (len(include) and k not in include) or k.startswith('_') or k in exclude:\n continue\n else:\n setattr(a, k, v)\n\n\nclass ModelEMA:\n \"\"\" Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models\n Keep a moving average of everything in the model state_dict (parameters and buffers).\n This is intended to allow functionality like\n https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage\n A smoothed version of the weights is necessary for some training schemes to perform well.\n This class is sensitive where it is initialized in the sequence of model init,\n GPU assignment and distributed training wrappers.\n \"\"\"\n\n def __init__(self, model, decay=0.9999, updates=0):\n # Create EMA\n self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA\n # if next(model.parameters()).device.type != 'cpu':\n # self.ema.half() # FP16 EMA\n self.updates = updates # number of EMA updates\n self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs)\n for p in self.ema.parameters():\n p.requires_grad_(False)\n\n def update(self, model):\n # Update EMA parameters\n with torch.no_grad():\n self.updates += 1\n d = self.decay(self.updates)\n\n msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict\n for k, v in self.ema.state_dict().items():\n if v.dtype.is_floating_point:\n v *= d\n v += (1. - d) * msd[k].detach()\n\n def update_attr(self, model, include=(), exclude=('process_group', 'reducer')):\n # Update EMA attributes\n copy_attr(self.ema, model, include, exclude)\n", "# Plotting yolo_utils\n\nimport glob\nimport math\nimport os\nimport random\nfrom copy import copy\nfrom pathlib import Path\n\nimport cv2\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport torch\nimport yaml\nfrom PIL import Image, ImageDraw, ImageFont\n\nfrom yolo_utils.general import xywh2xyxy, xyxy2xywh\nfrom yolo_utils.metrics import fitness\n\n# Settings\nmatplotlib.rc('font', **{'size': 11})\nmatplotlib.use('Agg') # for writing to files only\n\n\nclass Colors:\n # Ultralytics color palette https://ultralytics.com/\n def __init__(self):\n # hex = matplotlib.colors.TABLEAU_COLORS.values()\n hex = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB',\n '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7')\n self.palette = [self.hex2rgb('#' + c) for c in hex]\n self.n = len(self.palette)\n\n def __call__(self, i, bgr=False):\n c = self.palette[int(i) % self.n]\n return (c[2], c[1], c[0]) if bgr else c\n\n @staticmethod\n def hex2rgb(h): # rgb order (PIL)\n return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))\n\n\ncolors = Colors() # create instance for 'from yolo_utils.plots import colors'\n\n\ndef hist2d(x, y, n=100):\n # 2d histogram used in labels.png and evolve.png\n xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)\n hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))\n xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)\n yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)\n return np.log(hist[xidx, yidx])\n\n\ndef butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):\n from scipy.signal import butter, filtfilt\n\n # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy\n def butter_lowpass(cutoff, fs, order):\n nyq = 0.5 * fs\n normal_cutoff = cutoff / nyq\n return butter(order, normal_cutoff, btype='low', analog=False)\n\n b, a = butter_lowpass(cutoff, fs, order=order)\n return filtfilt(b, a, data) # forward-backward filter\n\n\ndef plot_one_box(x, im, color=None, label=None, line_thickness=3):\n # Plots one bounding box on image 'im' using OpenCV\n assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to plot_on_box() input image.'\n tl = line_thickness or round(0.002 * (im.shape[0] + im.shape[1]) / 2) + 1 # line/font thickness\n color = color or [random.randint(0, 255) for _ in range(3)]\n c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))\n cv2.rectangle(im, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)\n if label:\n tf = max(tl - 1, 1) # font thickness\n t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]\n c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3\n cv2.rectangle(im, c1, c2, color, -1, cv2.LINE_AA) # filled\n cv2.putText(im, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)\n\n\ndef plot_one_box_PIL(box, im, color=None, label=None, line_thickness=None):\n # Plots one bounding box on image 'im' using PIL\n im = Image.fromarray(im)\n draw = ImageDraw.Draw(im)\n line_thickness = line_thickness or max(int(min(im.size) / 200), 2)\n draw.rectangle(box, width=line_thickness, outline=tuple(color)) # plot\n if label:\n fontsize = max(round(max(im.size) / 40), 12)\n font = ImageFont.truetype(\"Arial.ttf\", fontsize)\n txt_width, txt_height = font.getsize(label)\n draw.rectangle([box[0], box[1] - txt_height + 4, box[0] + txt_width, box[1]], fill=tuple(color))\n draw.text((box[0], box[1] - txt_height + 1), label, fill=(255, 255, 255), font=font)\n return np.asarray(im)\n\n\ndef plot_wh_methods(): # from yolo_utils.plots import *; plot_wh_methods()\n # Compares the two methods for width-height anchor multiplication\n # https://github.com/ultralytics/yolov3/issues/168\n x = np.arange(-4.0, 4.0, .1)\n ya = np.exp(x)\n yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2\n\n fig = plt.figure(figsize=(6, 3), tight_layout=True)\n plt.plot(x, ya, '.-', label='YOLOv3')\n plt.plot(x, yb ** 2, '.-', label='YOLOv5 ^2')\n plt.plot(x, yb ** 1.6, '.-', label='YOLOv5 ^1.6')\n plt.xlim(left=-4, right=4)\n plt.ylim(bottom=0, top=6)\n plt.xlabel('input')\n plt.ylabel('output')\n plt.grid()\n plt.legend()\n fig.savefig('comparison.png', dpi=200)\n\n\ndef output_to_target(output):\n # Convert model output to target format [batch_id, class_id, x, y, w, h, conf]\n targets = []\n for i, o in enumerate(output):\n for *box, conf, cls in o.cpu().numpy():\n targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf])\n return np.array(targets)\n\n\ndef plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16):\n # Plot image grid with labels\n\n if isinstance(images, torch.Tensor):\n images = images.cpu().float().numpy()\n if isinstance(targets, torch.Tensor):\n targets = targets.cpu().numpy()\n\n # un-normalise\n if np.max(images[0]) <= 1:\n images *= 255\n\n tl = 3 # line thickness\n tf = max(tl - 1, 1) # font thickness\n bs, _, h, w = images.shape # batch size, _, height, width\n bs = min(bs, max_subplots) # limit plot images\n ns = np.ceil(bs ** 0.5) # number of subplots (square)\n\n # Check if we should resize\n scale_factor = max_size / max(h, w)\n if scale_factor < 1:\n h = math.ceil(scale_factor * h)\n w = math.ceil(scale_factor * w)\n\n mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init\n for i, img in enumerate(images):\n if i == max_subplots: # if last batch has fewer images than we expect\n break\n\n block_x = int(w * (i // ns))\n block_y = int(h * (i % ns))\n\n img = img.transpose(1, 2, 0)\n if scale_factor < 1:\n img = cv2.resize(img, (w, h))\n\n mosaic[block_y:block_y + h, block_x:block_x + w, :] = img\n if len(targets) > 0:\n image_targets = targets[targets[:, 0] == i]\n boxes = xywh2xyxy(image_targets[:, 2:6]).T\n classes = image_targets[:, 1].astype('int')\n labels = image_targets.shape[1] == 6 # labels if no conf column\n conf = None if labels else image_targets[:, 6] # check for confidence presence (label vs pred)\n\n if boxes.shape[1]:\n if boxes.max() <= 1.01: # if normalized with tolerance 0.01\n boxes[[0, 2]] *= w # scale to pixels\n boxes[[1, 3]] *= h\n elif scale_factor < 1: # absolute coords need scale if image scales\n boxes *= scale_factor\n boxes[[0, 2]] += block_x\n boxes[[1, 3]] += block_y\n for j, box in enumerate(boxes.T):\n cls = int(classes[j])\n color = colors(cls)\n cls = names[cls] if names else cls\n if labels or conf[j] > 0.25: # 0.25 conf thresh\n label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j])\n plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)\n\n # Draw image filename labels\n if paths:\n label = Path(paths[i]).name[:40] # trim to 40 char\n t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]\n cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,\n lineType=cv2.LINE_AA)\n\n # Image border\n cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)\n\n if fname:\n r = min(1280. / max(h, w) / ns, 1.0) # ratio to limit image size\n mosaic = cv2.resize(mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA)\n # cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save\n Image.fromarray(mosaic).save(fname) # PIL save\n return mosaic\n\n\ndef plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):\n # Plot LR simulating training for full epochs\n optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals\n y = []\n for _ in range(epochs):\n scheduler.step()\n y.append(optimizer.param_groups[0]['lr'])\n plt.plot(y, '.-', label='LR')\n plt.xlabel('epoch')\n plt.ylabel('LR')\n plt.grid()\n plt.xlim(0, epochs)\n plt.ylim(0)\n plt.savefig(Path(save_dir) / 'LR.png', dpi=200)\n plt.close()\n\n\ndef plot_test_txt(): # from yolo_utils.plots import *; plot_test()\n # Plot test.txt histograms\n x = np.loadtxt('test.txt', dtype=np.float32)\n box = xyxy2xywh(x[:, :4])\n cx, cy = box[:, 0], box[:, 1]\n\n fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)\n ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)\n ax.set_aspect('equal')\n plt.savefig('hist2d.png', dpi=300)\n\n fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)\n ax[0].hist(cx, bins=600)\n ax[1].hist(cy, bins=600)\n plt.savefig('hist1d.png', dpi=200)\n\n\ndef plot_targets_txt(): # from yolo_utils.plots import *; plot_targets_txt()\n # Plot targets.txt histograms\n x = np.loadtxt('targets.txt', dtype=np.float32).T\n s = ['x targets', 'y targets', 'width targets', 'height targets']\n fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)\n ax = ax.ravel()\n for i in range(4):\n ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std()))\n ax[i].legend()\n ax[i].set_title(s[i])\n plt.savefig('targets.jpg', dpi=200)\n\n\ndef plot_study_txt(path='', x=None): # from yolo_utils.plots import *; plot_study_txt()\n # Plot study.txt generated by test.py\n fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)\n # ax = ax.ravel()\n\n fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)\n # for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]:\n for f in sorted(Path(path).glob('study*.txt')):\n y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T\n x = np.arange(y.shape[1]) if x is None else np.array(x)\n s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)']\n # for i in range(7):\n # ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)\n # ax[i].set_title(s[i])\n\n j = y[3].argmax() + 1\n ax2.plot(y[6, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8,\n label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO'))\n\n ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],\n 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')\n\n ax2.grid(alpha=0.2)\n ax2.set_yticks(np.arange(20, 60, 5))\n ax2.set_xlim(0, 57)\n ax2.set_ylim(30, 55)\n ax2.set_xlabel('GPU Speed (ms/img)')\n ax2.set_ylabel('COCO AP val')\n ax2.legend(loc='lower right')\n plt.savefig(str(Path(path).name) + '.png', dpi=300)\n\n\ndef plot_labels(labels, names=(), save_dir=Path(''), loggers=None):\n # plot dataset labels\n print('Plotting labels... ')\n c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes\n nc = int(c.max() + 1) # number of classes\n x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height'])\n\n # seaborn correlogram\n sns.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9))\n plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200)\n plt.close()\n\n # matplotlib labels\n matplotlib.use('svg') # faster\n ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel()\n ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)\n ax[0].set_ylabel('instances')\n if 0 < len(names) < 30:\n ax[0].set_xticks(range(len(names)))\n ax[0].set_xticklabels(names, rotation=90, fontsize=10)\n else:\n ax[0].set_xlabel('classes')\n sns.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9)\n sns.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9)\n\n # rectangles\n labels[:, 1:3] = 0.5 # center\n labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000\n img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255)\n for cls, *box in labels[:1000]:\n ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot\n ax[1].imshow(img)\n ax[1].axis('off')\n\n for a in [0, 1, 2, 3]:\n for s in ['top', 'right', 'left', 'bottom']:\n ax[a].spines[s].set_visible(False)\n\n plt.savefig(save_dir / 'labels.jpg', dpi=200)\n matplotlib.use('Agg')\n plt.close()\n\n # loggers\n for k, v in loggers.items() or {}:\n if k == 'wandb' and v:\n v.log({\"Labels\": [v.Image(str(x), caption=x.name) for x in save_dir.glob('*labels*.jpg')]}, commit=False)\n\n\ndef plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from yolo_utils.plots import *; plot_evolution()\n # Plot hyperparameter evolution results in evolve.txt\n with open(yaml_file) as f:\n hyp = yaml.safe_load(f)\n x = np.loadtxt('evolve.txt', ndmin=2)\n f = fitness(x)\n # weights = (f - f.min()) ** 2 # for weighted results\n plt.figure(figsize=(10, 12), tight_layout=True)\n matplotlib.rc('font', **{'size': 8})\n for i, (k, v) in enumerate(hyp.items()):\n y = x[:, i + 7]\n # mu = (y * weights).sum() / weights.sum() # best weighted result\n mu = y[f.argmax()] # best single result\n plt.subplot(6, 5, i + 1)\n plt.scatter(y, f, c=hist2d(y, f, 20), cmap='viridis', alpha=.8, edgecolors='none')\n plt.plot(mu, f.max(), 'k+', markersize=15)\n plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters\n if i % 5 != 0:\n plt.yticks([])\n print('%15s: %.3g' % (k, mu))\n plt.savefig('evolve.png', dpi=200)\n print('\\nPlot saved as evolve.png')\n\n\ndef profile_idetection(start=0, stop=0, labels=(), save_dir=''):\n # Plot iDetection '*.txt' per-image logs. from yolo_utils.plots import *; profile_idetection()\n ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel()\n s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS']\n files = list(Path(save_dir).glob('frames*.txt'))\n for fi, f in enumerate(files):\n try:\n results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows\n n = results.shape[1] # number of rows\n x = np.arange(start, min(stop, n) if stop else n)\n results = results[:, x]\n t = (results[0] - results[0].min()) # set t0=0s\n results[0] = x\n for i, a in enumerate(ax):\n if i < len(results):\n label = labels[fi] if len(labels) else f.stem.replace('frames_', '')\n a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5)\n a.set_title(s[i])\n a.set_xlabel('time (s)')\n # if fi == len(files) - 1:\n # a.set_ylim(bottom=0)\n for side in ['top', 'right']:\n a.spines[side].set_visible(False)\n else:\n a.remove()\n except Exception as e:\n print('Warning: Plotting error for %s; %s' % (f, e))\n\n ax[1].legend()\n plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200)\n\n\ndef plot_results_overlay(start=0, stop=0): # from yolo_utils.plots import *; plot_results_overlay()\n # Plot training 'results*.txt', overlaying train and val losses\n s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', 'val', 'val', 'val', 'Recall', 'mAP@0.5:0.95'] # legends\n t = ['Box', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles\n for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')):\n results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T\n n = results.shape[1] # number of rows\n x = range(start, min(stop, n) if stop else n)\n fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True)\n ax = ax.ravel()\n for i in range(5):\n for j in [i, i + 5]:\n y = results[j, x]\n ax[i].plot(x, y, marker='.', label=s[j])\n # y_smooth = butter_lowpass_filtfilt(y)\n # ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j])\n\n ax[i].set_title(t[i])\n ax[i].legend()\n ax[i].set_ylabel(f) if i == 0 else None # add filename\n fig.savefig(f.replace('.txt', '.png'), dpi=200)\n\n\ndef plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''):\n # Plot training 'results*.txt'. from yolo_utils.plots import *; plot_results(save_dir='runs/train/exp')\n fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)\n ax = ax.ravel()\n s = ['Box', 'Objectness', 'Classification', 'Precision', 'Recall',\n 'val Box', 'val Objectness', 'val Classification', 'mAP@0.5', 'mAP@0.5:0.95']\n if bucket:\n # files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id]\n files = ['results%g.txt' % x for x in id]\n c = ('gsutil cp ' + '%s ' * len(files) + '.') % tuple('gs://%s/results%g.txt' % (bucket, x) for x in id)\n os.system(c)\n else:\n files = list(Path(save_dir).glob('results*.txt'))\n assert len(files), 'No results.txt files found in %s, nothing to plot.' % os.path.abspath(save_dir)\n for fi, f in enumerate(files):\n try:\n results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T\n n = results.shape[1] # number of rows\n x = range(start, min(stop, n) if stop else n)\n for i in range(10):\n y = results[i, x]\n if i in [0, 1, 2, 5, 6, 7]:\n y[y == 0] = np.nan # don't show zero loss values\n # y /= y[0] # normalize\n label = labels[fi] if len(labels) else f.stem\n ax[i].plot(x, y, marker='.', label=label, linewidth=2, markersize=8)\n ax[i].set_title(s[i])\n # if i in [5, 6, 7]: # share train and val loss y axes\n # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])\n except Exception as e:\n print('Warning: Plotting error for %s; %s' % (f, e))\n\n ax[1].legend()\n fig.savefig(Path(save_dir) / 'results.png', dpi=200)\n" ]
[ [ "torch.cuda.get_device_properties", "torch.cuda.synchronize", "torch.mm", "torch.zeros", "torch.sqrt", "torch.manual_seed", "torch.nn.utils.prune.remove", "torch.nn.Conv2d", "torch.distributed.barrier", "torch.nn.utils.prune.l1_unstructured", "torch.no_grad", "torch.cuda.is_available", "torch.nn.functional.interpolate", "torch.device", "torch.cuda.device_count", "torch.nn.functional.pad" ], [ "matplotlib.pyplot.legend", "numpy.linspace", "numpy.asarray", "matplotlib.pyplot.plot", "numpy.max", "numpy.digitize", "numpy.exp", "numpy.arange", "torch.from_numpy", "numpy.ceil", "scipy.signal.butter", "matplotlib.pyplot.subplot", "matplotlib.pyplot.close", "matplotlib.pyplot.figure", "numpy.log", "matplotlib.pyplot.title", "matplotlib.pyplot.ylim", "matplotlib.pyplot.savefig", "numpy.array", "matplotlib.rc", "matplotlib.pyplot.ylabel", "numpy.histogram2d", "scipy.signal.filtfilt", "matplotlib.use", "matplotlib.pyplot.subplots", "numpy.ones", "matplotlib.pyplot.xlim", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.yticks", "numpy.loadtxt" ] ]
Computational-Plant-Science/plant_image_analysis
[ "321eaae9531cd5f8eaebf3ee6c68b99eb53e420c" ]
[ "dev_code/marker_roi_crop.py" ]
[ "'''\nName: sticker_detection.py\n\nVersion: 1.0\n\nSummary: Detect sticker shape markers in image and cropp image based on marker location\n \nAuthor: suxing liu\n\nAuthor-email: suxingliu@gmail.com\n\nCreated: 2018-03-09\n\nUSAGE:\n\ntime python3 marker_roi_crop.py -p ~/plant-image-analysis/test/ -ft png \n\n'''\n\n# import necessary packages\nimport argparse\nimport cv2\nimport numpy as np\nimport os\nimport glob\nfrom pathlib import Path \n\nimport psutil\nimport concurrent.futures\nimport multiprocessing\nfrom multiprocessing import Pool\nfrom contextlib import closing\n\nfrom tabulate import tabulate\nimport openpyxl\n\nfrom pathlib import Path\n\n\n# generate foloder to store the output results\ndef mkdir(path):\n # import module\n import os\n \n # remove space at the beginning\n path=path.strip()\n # remove slash at the end\n path=path.rstrip(\"\\\\\")\n \n # path exist? # True # False\n isExists=os.path.exists(path)\n \n # process\n if not isExists:\n # construct the path and folder\n #print path + ' folder constructed!'\n # make dir\n os.makedirs(path)\n return True\n else:\n # if exists, return \n #print path+' path exists!'\n return False\n\n# Detect stickers in the image\ndef sticker_detect(image_file):\n \n image_file_name = Path(image_file).name\n \n abs_path = os.path.abspath(image_file)\n \n filename, file_extension = os.path.splitext(abs_path)\n base_name = os.path.splitext(os.path.basename(filename))[0]\n \n print(\"Processing image : {0}\\n\".format(str(image_file)))\n \n # save folder construction\n mkpath = os.path.dirname(abs_path) +'/cropped'\n mkdir(mkpath)\n save_path = mkpath + '/'\n\n print (\"results_folder: \" + save_path)\n \n \n\n # load the image, clone it for output, and then convert it to grayscale\n img_ori = cv2.imread(image_file)\n \n img_rgb = img_ori.copy()\n \n # Convert it to grayscale \n img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY) \n \n # Store width and height of template in w and h \n w, h = template.shape[::-1] \n \n # Perform match operations. \n res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)\n \n #(minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(res)\n \n \n # Specify a threshold \n threshold = 0.8\n \n # Store the coordinates of matched area in a numpy array \n loc = np.where( res >= threshold) \n \n if len(loc):\n \n (y,x) = np.unravel_index(res.argmax(), res.shape)\n \n (min_val, max_val, min_loc, max_loc) = cv2.minMaxLoc(res)\n \n print(y,x)\n \n print(min_val, max_val, min_loc, max_loc)\n \n \n (startX, startY) = max_loc\n endX = startX + template.shape[1]\n endY = startY + template.shape[0]\n \n # Draw a rectangle around the matched region. \n for pt in zip(*loc[::-1]): \n sticker_overlay = cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,255,255), 2) \n\n # save segmentation result\n result_file = (save_path + base_name + '_sticker_matched.' + args['filetype'])\n print(result_file)\n cv2.imwrite(result_file, sticker_overlay)\n \n \n crop_img = img_rgb[startY:endY, startX:endX]\n \n # save segmentation result\n result_file = (save_path + base_name + '_cropped.' + args['filetype'])\n print(result_file)\n cv2.imwrite(result_file, crop_img)\n \n return image_file_name, (x,y)\n\n\n\n\n\nif __name__ == '__main__':\n \n ap = argparse.ArgumentParser()\n ap.add_argument(\"-p\", \"--path\", required = True, help = \"path to image file\")\n ap.add_argument(\"-ft\", \"--filetype\", required=True, help = \"image filetype\")\n\n args = vars(ap.parse_args())\n \n \n # Setting path to image files\n file_path = args[\"path\"]\n ext = args['filetype']\n\n # Extract file type and path\n filetype = '*.' + ext\n image_file_path = file_path + filetype\n \n # Accquire image file list\n imgList = sorted(glob.glob(image_file_path))\n \n global template\n template_path = \"/home/suxing/smart/marker_template/sticker_template.jpg\"\n # Read the template \n template = cv2.imread(template_path, 0) \n print(template)\n #imgList = (glob.glob(image_file_path))\n\n #print((imgList))\n #global save_path\n \n # Get number of images in the data folder\n n_images = len(imgList)\n \n result_list = []\n \n \n # Loop execute\n for image in imgList:\n \n (image_file_name, sticker_overlay) = sticker_detect(image)\n \n result_list.append([image_file_name, sticker_overlay])\n \n #sticker_detect(image)\n \n '''\n # Parallel processing\n \n # get cpu number for parallel processing\n agents = psutil.cpu_count() \n #agents = multiprocessing.cpu_count() \n #agents = 8\n \n print(\"Using {0} cores to perfrom parallel processing... \\n\".format(int(agents)))\n \n # Create a pool of processes. By default, one is created for each CPU in the machine.\n # extract the bouding box for each image in file list\n with closing(Pool(processes = agents)) as pool:\n result_list = pool.map(sticker_detect, imgList)\n pool.terminate()\n '''\n \n # Output sum table in command window\n print(\"Summary: {0} plant images were processed...\\n\".format(n_images))\n \n table = tabulate(result_list, headers = ['image_file_name', 'marker coordinates'], tablefmt = 'orgtbl')\n\n print(table + \"\\n\")\n \n\n" ]
[ [ "numpy.where" ] ]
SergioLordano/hoshi
[ "b14d9d26ebb92c283cc7026ba85bb70135046cf7" ]
[ "hoshi/srw_beam_caustic.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 20 17:15:24 2018\n\n@author: sergio.lordano\n\"\"\"\n\n\ndef SRW_beam_caustic(wfr=None, zStart=1.0, zFin=2.0, zStep=0.1, zOffset=0.0, extract_parameter=1, useMPI=True, save_hdf5=False, h5_filename='test.h5', buffer=False, matrix=True, ppFin=None):\n \"\"\"\n :extract_parameter: (1) Total Intensity, (2) Hor. Pol. Intensity, (3) Vert. Pol. Intensity, (4) Hor. Pol. Phase, (5) Hor. Pol. Phase, (6) Hor. Pol. Electric Field, (7) Hor. Pol. Electric Field\n \"\"\"\n \n import numpy as np\n from srwlib import SRWLOptA, SRWLOptD, SRWLOptC, SRWLWfr\n import srwlpy as srwl\n from array import array\n import time\n import copy\n from scipy.integrate import simps\n \n if(save_hdf5):\n import h5py\n \n #############################################\n #### INITIALIZE THREADS ###\n #############################################\n\n MPI=None\n comMPI=None\n nProc=1\n rank=0\n \n if(useMPI):\n \n from mpi4py import MPI\n comMPI = MPI.COMM_WORLD\n nProc = comMPI.Get_size() # total number of threads\n rank = comMPI.Get_rank() # particular thread executing\n\n if(wfr==None):\n \n wfr = SRWLWfr()\n\n t0 = time.time() \n \n #############################################\n #### DEFINE THE POSITIONS TO CALCULATE IN ###\n #############################################\n\n nz = int((zFin-zStart)/zStep+1) # total number of positions\n positions = np.linspace(zStart, zFin, nz) # array of positions\n\n #############################################\n #### DEFINE THE FUNCTION TO BE CALLED ###\n #############################################\n \n def propagate_distance(wfri, distance, extract_parameter, ppFin):\n \n PP_GEN = [ 0, 0, 1.5, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0]\n \n if ppFin is not None:\n optBLj = SRWLOptC([SRWLOptD(distance)], [PP_GEN, ppFin])\n else:\n optBLj = SRWLOptC([SRWLOptD(distance)], [PP_GEN])\n \n wfrj = copy.deepcopy(wfri)\n \n if(distance != 0.0): # if distance is zero, the wavefront is not propagated\n srwl.PropagElecField(wfrj, optBLj)\n \n if(distance == 0.0 and ppFin is not None): # if distance is zero\n srwl.PropagElecField(wfrj, SRWLOptC([SRWLOptA('c', 'a', 10.0)], [ppFin]))\n \n if(extract_parameter==1): # intensity: 2D, polarization: total\n arI = array('f', [0]*wfrj.mesh.nx*wfrj.mesh.ny)\n srwl.CalcIntFromElecField(arI , wfrj, 6, 0, 3, wfrj.mesh.eStart, 0, 0)\n return np.array(arI, dtype=np.float64), wfrj\n \n elif(extract_parameter==2): # intensity: 2D, polarization: x\n arIx = array('f', [0]*wfrj.mesh.nx*wfrj.mesh.ny) #\"flat\" 2D array to take intensity data\n srwl.CalcIntFromElecField(arIx, wfrj, 0, 0, 3, wfrj.mesh.eStart, 0, 0) # Intensity from electric field (Wfr_name, Disp_Name, Polariz., FuncOf, Extr, ObsEi , ConstHorPos, ConstVerPos, NewDisp)\n return np.array(arIx, dtype=np.float64), wfrj\n \n elif(extract_parameter==3): # intensity: 2D, polarization: y\n arIy = array('f', [0]*wfrj.mesh.nx*wfrj.mesh.ny) #\"flat\" 2D array to take intensity data\n srwl.CalcIntFromElecField(arIy, wfrj, 1, 0, 3, wfrj.mesh.eStart, 0, 0) # Intensity from electric field (Wfr_name, Disp_Name, Polariz., FuncOf, Extr, ObsEi , ConstHorPos, ConstVerPos, NewDisp)\n return np.array(arIy, dtype=np.float64), wfrj\n \n elif(extract_parameter==4): # phase: 2D, polarization: x\n arPx = array('d', [0]*wfrj.mesh.nx*wfrj.mesh.ny) #\"flat\" array to take 2D phase data (note it should be 'd')\n srwl.CalcIntFromElecField(arPx, wfrj, 0, 4, 3, wfrj.mesh.eStart, 0, 0) #extracts radiation phase\n return np.array(arPx, dtype=np.float64), wfrj\n \n elif(extract_parameter==5): # phase: 2D, polarization: y\n arPy = array('d', [0]*wfrj.mesh.nx*wfrj.mesh.ny) #\"flat\" array to take 2D phase data (note it should be 'd')\n srwl.CalcIntFromElecField(arPy, wfrj, 1, 4, 3, wfrj.mesh.eStart, 0, 0) #extracts radiation phase\n return np.array(arPy, dtype=np.float64), wfrj\n \n def integrate_array2D(array2D, xStart, xFin, nx, yStart, yFin, ny):\n \n x_axis = np.linspace(xStart, xFin, nx)*1e3 # work in [mm]\n y_axis = np.linspace(yStart, yFin, ny)*1e3 # work in [mm]\n \n int_y = np.zeros((ny)) # array to store the integrated array over x_axis\n for i in range(ny):\n int_y[i] = simps(array2D[i, :], x=x_axis)\n \n integral = simps(int_y, x=y_axis)\n return integral\n \n \n def initialize_hdf5(wfr0):\n \n with h5py.File(h5_filename, 'w') as f:\n \n f.attrs['begin time'] = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n f.attrs['xStart'] = wfr0.mesh.xStart\n f.attrs['xFin'] = wfr0.mesh.xFin\n f.attrs['nx'] = wfr0.mesh.nx\n f.attrs['yStart'] = wfr0.mesh.yStart\n f.attrs['yFin'] = wfr0.mesh.yFin\n f.attrs['ny'] = wfr0.mesh.ny\n f.attrs['eStart'] = wfr0.mesh.eStart\n f.attrs['eFin'] = wfr0.mesh.eFin\n f.attrs['ne'] = wfr0.mesh.ne\n f.attrs['zStart'] = zStart\n f.attrs['zFin'] = zFin\n f.attrs['nz'] = nz\n f.attrs['zOffset'] = zOffset\n f.attrs['zStep'] = zStep\n f.attrs['extract_parameter'] = extract_parameter\n if(matrix):\n f.attrs['format'] = 'array2D'\n else:\n f.attrs['format'] = 'array1D'\n \n def append_dataset_hdf5(data, tag, t0):\n \n with h5py.File(h5_filename, 'a') as f:\n \n xStart = f.attrs['xStart']\n xFin = f.attrs['xFin']\n nx = f.attrs['nx']\n yStart = f.attrs['yStart']\n yFin = f.attrs['yFin']\n ny = f.attrs['ny']\n integral = integrate_array2D(data, xStart, xFin, nx, yStart, yFin, ny)\n \n dset = f.create_dataset('step_{0}'.format(tag), data=data, compression=\"gzip\")\n dset.attrs['z'] = positions[tag] + zOffset\n dset.attrs['ellapsed time (s)'] = round(time.time() - t0, 3)\n dset.attrs['max intensity'] = np.max(data)\n dset.attrs['integral'] = integral\n \n if(tag == nz-1): \n f.attrs['end time'] = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n \n def write_max_peak_int(h5_filename):\n \n with h5py.File(h5_filename, 'r+') as f:\n \n max_peak_int = [0.0, 0.0]\n max_integral = [0.0, 0.0]\n \n for dset_name in f:\n \n max_dset = f[dset_name].attrs['max intensity']\n if(max_dset > max_peak_int[0]):\n max_peak_int[0] = max_dset\n max_peak_int[1] = f[dset_name].attrs['z'] \n \n int_dset = f[dset_name].attrs['integral']\n if(int_dset > max_integral[0]):\n max_integral[0] = int_dset\n max_integral[1] = f[dset_name].attrs['z']\n \n f.attrs['max peak int'] = max_peak_int[0]\n f.attrs['max peak z'] = max_peak_int[1]\n f.attrs['max integral'] = max_integral[0]\n f.attrs['max integral z'] = max_integral[1]\n \n ###################################################\n #### DEFINE VARIABLES FOR PARALLEL CALCULATIONS ###\n ###################################################\n \n if(nProc > 1):\n if(nz % (nProc-1) != 0): # Increases 1 round if nz is not divisible by n_threads \n n_rounds = int(nz/(nProc-1))+1 # number of times that each rank will execute\n else:\n n_rounds = int(nz/(nProc-1)) # number of times that each rank will execute\n else:\n n_rounds = nz\n \n #############################################\n #### START RUNNING IN PARALLEL MODE ###\n #############################################\n \n if(nProc > 1 and rank == 0): # master process saving data received from slaves to hdf5\n \n for i_iteration in range(1, nz+1):\n\n if(i_iteration==1):\n arIP, wfri = propagate_distance(wfr, 0.0, extract_parameter, ppFin)\n if(save_hdf5): # initialize file\n initialize_hdf5(wfri) \n \n data_recv = np.zeros(wfr.mesh.nx*wfr.mesh.ny, dtype=np.float64) # allocate array\n \n status = MPI.Status() \n comMPI.Recv([data_recv, MPI.FLOAT], source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)\n \n if(matrix):\n data_recv = data_recv.reshape((wfri.mesh.ny,wfri.mesh.nx)) # reshape array\n \n tag = status.Get_tag() \n \n if(save_hdf5):\n append_dataset_hdf5(data_recv, tag, t0)\n \n write_max_peak_int(h5_filename) # add max intensity attributes to group \n return [0]\n \n elif(nProc > 1 and rank > 0): # slave processes sending data to master\n \n for i_round in range(n_rounds): # iterate over rounds\n\n i_value = rank + i_round*(nProc-1) - 1 # get actual value\n\n if(i_value < nz): # check if n_total was not exceeded\n \n arIP = np.empty(wfr.mesh.nx*wfr.mesh.ny, dtype=np.float64) # allocate array\n arIP, wfrm = propagate_distance(wfr, positions[i_value], extract_parameter, ppFin) # Propagate wavefront to each distance \n comMPI.Send([arIP, MPI.FLOAT], dest=0, tag=i_value) # send to master with tag number\n \n return [0]\n \n #############################################\n #### START RUNNING IN SERIAL MODE ###\n #############################################\n \n elif(nProc == 1): # single process doing everything (serial calculation)\n \n\n arIP, wfri = propagate_distance(wfr, 0.0, extract_parameter, ppFin)\n if(save_hdf5): # initialize file\n initialize_hdf5(wfri) \n \n for i_round in range(n_rounds): # iterate over rounds\n \n data, wfrm = propagate_distance(wfr, positions[i_round], extract_parameter, ppFin) # Propagate wavefront to each distance\n if(matrix):\n data = data.reshape((wfri.mesh.ny, wfri.mesh.nx))\n \n append_dataset_hdf5(data, i_round, t0)\n \n write_max_peak_int(h5_filename) # add max intensity attributes to group\n \n \n \n\n#SRW_beam_caustic()\n" ]
[ [ "numpy.linspace", "numpy.max", "scipy.integrate.simps", "numpy.array", "numpy.zeros", "numpy.empty" ] ]
llxSKyWALKeRxll/Video_Manipulation
[ "7531c34735be4f68ec76700858e2faa809fa837a" ]
[ "FlashArtPy_VideoCol.py" ]
[ "import pygame\r\nimport cv2\r\nimport numpy\r\n\r\nclass FlashArt:\r\n def __init__(self, vid_path, font_size=8, colour_lvl = 16):\r\n pygame.init()\r\n self.video_path = vid_path\r\n self.capture_video = cv2.VideoCapture(vid_path)\r\n self.colour_level = colour_lvl\r\n self.image, self.bnw_image = self.load_image()\r\n self.width = self.image.shape[0]\r\n self.height = self.image.shape[1]\r\n #self.width = 800\r\n #self.height = 600\r\n #self.image = cv2.resize(self.image, (0, 0), fx=0.1, fy=0.1)\r\n #self.image = pygame.transform.scale(self.image, (self.width, self.height))\r\n self.resolution = self.width, self.height\r\n self.screen = pygame.display.set_mode(self.resolution)\r\n self.clock = pygame.time.Clock()\r\n self.ascii_symbols = '01!\\/|@#^*+ILVXT%!'\r\n self.est_ascii_index = 255 // (len(self.ascii_symbols) - 1)\r\n self.font = pygame.font.SysFont('C:\\\\Users\\\\rebor\\\\Desktop\\\\Academics 2k21\\\\Group Project 2k21\\\\Flash Art\\\\VideoPix_FlashArt_ASCII\\\\FlashArtFonts_VideoPix\\\\f2.ttf', font_size, bold=True)\r\n self.symbol_gap = int(font_size * 0.5)\r\n #self.rendered_ascii_symbols = [self.font.render(char, False, (255, 255, 255)) for char in self.ascii_symbols]\r\n self.colour_collection, self.colour_coEfficient = self.rendered_colour_collection()\r\n\r\n def load_image(self):\r\n temp, self.cv_image = self.capture_video.read()\r\n if not temp:\r\n f1 = FlashArt('C:\\\\Users\\\\rebor\\\\Desktop\\\\Academics 2k21\\\\Group Project 2k21\\\\Flash Art\\\\VideoCol_FlashArt_ASCII\\\\FlashArt_VideosCol\\\\hagler1.mp4')\r\n f1.execute()\r\n #self.cv_image = cv2.imread(self.image_path)\r\n inverted_image = cv2.transpose(self.cv_image) #To rectify the inversion of the image display in pygame window\r\n img = cv2.cvtColor(inverted_image, cv2.COLOR_BGR2RGB)\r\n bNw_image = cv2.cvtColor(inverted_image, cv2.COLOR_BGR2GRAY)\r\n return img, bNw_image\r\n\r\n def draw_ascii_art(self):\r\n modified_image = cv2.resize(self.cv_image, (580, 430), interpolation=cv2.INTER_AREA)\r\n cv2.imshow('Original Image ', modified_image)\r\n\r\n def draw_modified_image(self):\r\n self.image, self.bnw_image = self.load_image()\r\n symbol_locations = self.bnw_image // self.est_ascii_index\r\n colour_locations = self.image // self.colour_coEfficient\r\n for i in range(0, self.width, self.symbol_gap):\r\n for j in range(0, self.height, self.symbol_gap):\r\n symbol_location = symbol_locations[i, j]\r\n if symbol_location:\r\n symbol = self.ascii_symbols[symbol_location]\r\n colour = tuple(colour_locations[i, j])\r\n self.screen.blit(self.colour_collection[symbol][colour], (i, j))\r\n\r\n def rendered_colour_collection(self):\r\n colours, colour_coEff = numpy.linspace(0, 255, num=self.colour_level, dtype=int, retstep=True)\r\n colour_collection = [numpy.array([c1, c2, c3]) for c1 in colours for c2 in colours for c3 in colours]\r\n colour_dictionary = dict.fromkeys(self.ascii_symbols, None)\r\n colour_coEff = int(colour_coEff)\r\n for symbol in colour_dictionary:\r\n symbol_depth = {}\r\n for colour in colour_collection:\r\n colour_key = tuple(colour // colour_coEff)\r\n symbol_depth[colour_key] = self.font.render(symbol, False, tuple(colour))\r\n colour_dictionary[symbol] = symbol_depth\r\n return colour_dictionary, colour_coEff\r\n\r\n def draw_image(self):\r\n #pygame.surfarray.blit_array(self.screen, self.image)\r\n #cv2.imshow('Flash_Art', self.image)\r\n self.screen.fill((0, 0, 0))\r\n self.draw_modified_image()\r\n self.draw_ascii_art()\r\n\r\n def save_image(self):\r\n image = pygame.surfarray.array3d(self.screen)\r\n cv_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\r\n cv2.imwrite('C:\\\\Users\\\\rebor\\\\Desktop\\\\Academics 2k21\\\\Group Project 2k21\\\\Flash Art\\\\VideoCol_FlashArt_ASCII\\\\FlashArtOutput_VideoCol\\\\Flash_Art_Image.png', cv_image)\r\n\r\n def execute(self):\r\n while True:\r\n for x in pygame.event.get():\r\n if x.type == pygame.QUIT:\r\n exit()\r\n elif x.type == pygame.KEYDOWN:\r\n if x.key == pygame.K_SPACE:\r\n self.save_image()\r\n self.draw_image()\r\n pygame.display.set_caption(str(self.clock.get_fps()))\r\n pygame.display.flip()\r\n self.clock.tick(25)\r\n\r\nflash_art = FlashArt('C:\\\\Users\\\\rebor\\\\Desktop\\\\Academics 2k21\\\\Group Project 2k21\\\\Flash Art\\\\VideoCol_FlashArt_ASCII\\\\FlashArt_VideosCol\\\\hagler1.mp4')\r\nflash_art.execute()" ]
[ [ "numpy.array", "numpy.linspace" ] ]
bossm0n5t3r/deep-learning-from-scratch
[ "8b06ffc7067f54ca54cc23822ec4a86d33c5e10a" ]
[ "ch06/multi_layer_net_extend.py" ]
[ "# coding: utf-8\nimport numpy as np\nfrom collections import OrderedDict\nfrom layers import *\nfrom gradient import numerical_gradient\n\nclass MultiLayerNetExtend:\n \"\"\"완전 연결 다층 신경망(확장판)\n 가중치 감소, 드롭아웃, 배치 정규화 구현\n\n Parameters\n ----------\n input_size : 입력 크기(MNIST의 경우엔 784)\n hidden_size_list : 각 은닉층의 뉴런 수를 담은 리스트(e.g. [100, 100, 100])\n output_size : 출력 크기(MNIST의 경우엔 10)\n activation : 활성화 함수 - 'relu' 혹은 'sigmoid'\n weight_init_std : 가중치의 표준편차 지정(e.g. 0.01)\n 'relu'나 'he'로 지정하면 'He 초깃값'으로 설정\n 'sigmoid'나 'xavier'로 지정하면 'Xavier 초깃값'으로 설정\n weight_decay_lambda : 가중치 감소(L2 법칙)의 세기\n use_dropout : 드롭아웃 사용 여부\n dropout_ration : 드롭아웃 비율\n use_batchNorm : 배치 정규화 사용 여부\n \"\"\"\n def __init__(self, input_size, hidden_size_list, output_size,\n activation='relu', weight_init_std='relu', weight_decay_lambda=0, \n use_dropout = False, dropout_ration = 0.5, use_batchnorm=False):\n self.input_size = input_size\n self.output_size = output_size\n self.hidden_size_list = hidden_size_list\n self.hidden_layer_num = len(hidden_size_list)\n self.use_dropout = use_dropout\n self.weight_decay_lambda = weight_decay_lambda\n self.use_batchnorm = use_batchnorm\n self.params = {}\n\n # 가중치 초기화\n self.__init_weight(weight_init_std)\n\n # 계층 생성\n activation_layer = {'sigmoid': Sigmoid, 'relu': Relu}\n self.layers = OrderedDict()\n for idx in range(1, self.hidden_layer_num+1):\n self.layers['Affine' + str(idx)] = Affine(self.params['W' + str(idx)],\n self.params['b' + str(idx)])\n if self.use_batchnorm:\n self.params['gamma' + str(idx)] = np.ones(hidden_size_list[idx-1])\n self.params['beta' + str(idx)] = np.zeros(hidden_size_list[idx-1])\n self.layers['BatchNorm' + str(idx)] = BatchNormalization(self.params['gamma' + str(idx)], self.params['beta' + str(idx)])\n \n self.layers['Activation_function' + str(idx)] = activation_layer[activation]()\n \n if self.use_dropout:\n self.layers['Dropout' + str(idx)] = Dropout(dropout_ration)\n\n idx = self.hidden_layer_num + 1\n self.layers['Affine' + str(idx)] = Affine(self.params['W' + str(idx)], self.params['b' + str(idx)])\n\n self.last_layer = SoftmaxWithLoss()\n\n def __init_weight(self, weight_init_std):\n \"\"\"가중치 초기화\n \n Parameters\n ----------\n weight_init_std : 가중치의 표준편차 지정(e.g. 0.01)\n 'relu'나 'he'로 지정하면 'He 초깃값'으로 설정\n 'sigmoid'나 'xavier'로 지정하면 'Xavier 초깃값'으로 설정\n \"\"\"\n all_size_list = [self.input_size] + self.hidden_size_list + [self.output_size]\n for idx in range(1, len(all_size_list)):\n scale = weight_init_std\n if str(weight_init_std).lower() in ('relu', 'he'):\n scale = np.sqrt(2.0 / all_size_list[idx - 1]) # ReLUを使う場合に推奨される初期値\n elif str(weight_init_std).lower() in ('sigmoid', 'xavier'):\n scale = np.sqrt(1.0 / all_size_list[idx - 1]) # sigmoidを使う場合に推奨される初期値\n self.params['W' + str(idx)] = scale * np.random.randn(all_size_list[idx-1], all_size_list[idx])\n self.params['b' + str(idx)] = np.zeros(all_size_list[idx])\n\n def predict(self, x, train_flg=False):\n for key, layer in self.layers.items():\n if \"Dropout\" in key or \"BatchNorm\" in key:\n x = layer.forward(x, train_flg)\n else:\n x = layer.forward(x)\n\n return x\n\n def loss(self, x, t, train_flg=False):\n \"\"\"손실 함수를 구한다.\n \n Parameters\n ----------\n x : 입력 데이터\n t : 정답 레이블 \n \"\"\"\n y = self.predict(x, train_flg)\n\n weight_decay = 0\n for idx in range(1, self.hidden_layer_num + 2):\n W = self.params['W' + str(idx)]\n weight_decay += 0.5 * self.weight_decay_lambda * np.sum(W**2)\n\n return self.last_layer.forward(y, t) + weight_decay\n\n def accuracy(self, X, T):\n Y = self.predict(X, train_flg=False)\n Y = np.argmax(Y, axis=1)\n if T.ndim != 1 : T = np.argmax(T, axis=1)\n\n accuracy = np.sum(Y == T) / float(X.shape[0])\n return accuracy\n\n def numerical_gradient(self, X, T):\n \"\"\"기울기를 구한다(수치 미분).\n \n Parameters\n ----------\n x : 입력 데이터\n t : 정답 레이블\n \n Returns\n -------\n 각 층의 기울기를 담은 사전(dictionary) 변수\n grads['W1']、grads['W2']、... 각 층의 가중치\n grads['b1']、grads['b2']、... 각 층의 편향\n \"\"\"\n loss_W = lambda W: self.loss(X, T, train_flg=True)\n\n grads = {}\n for idx in range(1, self.hidden_layer_num+2):\n grads['W' + str(idx)] = numerical_gradient(loss_W, self.params['W' + str(idx)])\n grads['b' + str(idx)] = numerical_gradient(loss_W, self.params['b' + str(idx)])\n \n if self.use_batchnorm and idx != self.hidden_layer_num+1:\n grads['gamma' + str(idx)] = numerical_gradient(loss_W, self.params['gamma' + str(idx)])\n grads['beta' + str(idx)] = numerical_gradient(loss_W, self.params['beta' + str(idx)])\n\n return grads\n \n def gradient(self, x, t):\n # forward\n self.loss(x, t, train_flg=True)\n\n # backward\n dout = 1\n dout = self.last_layer.backward(dout)\n\n layers = list(self.layers.values())\n layers.reverse()\n for layer in layers:\n dout = layer.backward(dout)\n\n # 결과 저장\n grads = {}\n for idx in range(1, self.hidden_layer_num+2):\n grads['W' + str(idx)] = self.layers['Affine' + str(idx)].dW + self.weight_decay_lambda * self.params['W' + str(idx)]\n grads['b' + str(idx)] = self.layers['Affine' + str(idx)].db\n\n if self.use_batchnorm and idx != self.hidden_layer_num+1:\n grads['gamma' + str(idx)] = self.layers['BatchNorm' + str(idx)].dgamma\n grads['beta' + str(idx)] = self.layers['BatchNorm' + str(idx)].dbeta\n\n return grads\n" ]
[ [ "numpy.sqrt", "numpy.ones", "numpy.argmax", "numpy.random.randn", "numpy.zeros", "numpy.sum" ] ]
ColasGael/Autonomous-Aircraft
[ "f45510c9f892ca6a08171f63ba5e3e08fe6ef23c" ]
[ "strategy_supervisor/scripts/util.py" ]
[ "import numpy as np\nfrom math import sin, cos, sqrt, atan2, radians\n\n'''\nUseful classes and functions are defined here\n'''\n\ndef coord_to_dist(loc1, loc2):\n\t'''Compute distance (m) beteen two GPS lat/long positions using Haversine formula\n\t'''\n\tlat1 = radians(loc1[0])\n\tlon1 = radians(loc1[1])\n\tlat2 = radians(loc2[0])\n\tlon2 = radians(loc2[1])\n\n\t#approximate radius of Earth in m\n\tR = 6371e3\n\tdlon = lon2 - lon1\n\tdlat = lat2 - lat1\n\n\ta = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2\n\tc = 2 * atan2(sqrt(a), sqrt(1 - a))\n\n\tdistance = R * c\n\treturn distance\n\ndef coord_to_direction(loc1, loc2):\n\tlat1 = radians(loc1[0])\n\tlon1 = radians(loc1[1])\n\tlat2 = radians(loc2[0])\n\tlon2 = radians(loc2[1])\n\n\tbearing = atan2(sin(lon2-lon1)*cos(lat2), cos(lat1)*sin(lat2) - sin(lat1)*cos(lat2)*cos(lon2-lon1))\n\n\treturn bearing\n\nclass Graph:\n\t'''Graph represents the network of vertiports\n\t\n\tAttributes:\n\t\t'n' (int) : number of nodes\n\t\t'nodes' (n*2 double np.array) : position of the nodes in the GPS format, nodes[i] = [latitude, longitude]\n\t\t'prob' (n double np.array) : prob[i] = probability that a query starts from node i\n\t\t'adj'\t(n*n int np.array) : adjacency matrix, adj[i,j] = 1 if connexion between vertiports i and j, 0 otherwise\n\t'''\n\t\n\tdef __init__(self, nodes, adj):\n\t\tself.n = len(nodes)\n\t\tself.nodes = np.array(nodes)\n\t\tself.adj = np.array(adj)\n\t\tself.prob = np.sum(self.adj,0)/np.sum(self.adj)\n\t\n\tdef dist (self, i, j):\n\t\t'''compute the euclidian distance between nodes i and join\n\t\t\n\t\tArgs:\n\t\t\t'i' (int) : start node\n\t\t\t'j' (int) : end node\n\t\t\n\t\tReturns:\n\t\t\td (double) : euclidian distance between i and join\n\t\t'''\n\t\td = coord_to_dist(nodes[i,:], nodes[j,:])\n\t\t\n\t\treturn d\n\t\n\tdef is_connected (self, i, j):\n\t\t'''check if 2 nodes are connected\n\t\t\n\t\tArgs:\n\t\t\ti (int) : start node\n\t\t\tj (int) : end node\n\t\t\n\t\tReturns:\n\t\t\tis_connected (bool) : true if i and j are connected in the graph self\n\t\t'''\n\t\treturn adj[i,j] == 1\n\nclass Query:\n\t'''Query represents the query of a client\n\t\n\tAttributes:\n\t\t'id' (int) : unique id of the query\n\t\t't' (double) : time at which the query as been made\n\t\t'start' (int) : id of the starting vertiport\n\t\t'end' (int) : id of the destination vertiport\n\t'''\n\t\n\tdef __init__(self, id, t, start, end):\n\t\tself.id = id\n\t\tself.t = t\n\t\tself.start = start\n\t\tself.end = end\n\nclass Bid:\n\t'''Bid represents the bid made by our team to a client query\n\t\n\tAttributes:\n\t\t'query' (int): corresponding query\n\t\t'start' (int) : id of the starting vertiport\n\t\t'end' (int) : id of the destination vertiport\n\t\t'drone' (Drone) : drone affected to the bid\n\t\t'time' (double) : estimated time of travel\n\t\t'amount' (double) : price proposed to the client\n\t\t'accepted' (bool) : has the bid been accepted by the client\n\t\t'profit' (double) : profit made on this trip\n\t'''\n\t\t\n\tdef __init__(self, query, drone=None, estimated_time=0, amount=0, profit=0, accepted=False):\n\t\tself.query = query\n\t\tself.start = query.start\n\t\tself.end = query.end\n\t\t\n\t\tself.drone = drone\n\t\tself.estimated_time = estimated_time\n\t\tself.amount = amount\n\t\tself.accepted = accepted\n\t\tself.profit = profit\n" ]
[ [ "numpy.array", "numpy.sum" ] ]
HXWAndCL/mmgeneration
[ "9afb1d740bf56a4ecde5064d5bb2a4e2d777638b" ]
[ "apps/conditional_interpolate.py" ]
[ "import argparse\nimport os\n\nimport mmcv\nimport torch\nimport torch.nn as nn\nfrom mmcv import Config, DictAction\nfrom mmcv.runner import load_checkpoint\nfrom torchvision.utils import save_image\n\nfrom mmgen.apis import set_random_seed\nfrom mmgen.core.evaluation import slerp\nfrom mmgen.models import build_model\nfrom mmgen.models.architectures import BigGANDeepGenerator, BigGANGenerator\nfrom mmgen.models.architectures.common import get_module_device\n\n# yapf: disable\nsys.path.append(os.path.abspath(os.path.join(__file__, '../..'))) # isort:skip # noqa\n\n\n_default_embedding_name = dict(\n BigGANGenerator='shared_embedding',\n BigGANDeepGenerator='shared_embedding',\n SNGANGenerator='NULL',\n SAGANGenerator='NULL')\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='Sampling from latents\\' interpolation')\n parser.add_argument('config', help='evaluation config file path')\n parser.add_argument('checkpoint', help='checkpoint file')\n parser.add_argument(\n '--use-cpu',\n action='store_true',\n help='whether to use cpu device for sampling')\n parser.add_argument(\n '--embedding-name',\n type=str,\n default=None,\n help='name of conditional model\\'s embedding layer')\n parser.add_argument(\n '--fix-z',\n action='store_true',\n help='whether to fix the noise for conditonal model')\n parser.add_argument(\n '--fix-y',\n action='store_true',\n help='whether to fix the label for conditional model')\n parser.add_argument('--seed', type=int, default=2021, help='random seed')\n parser.add_argument(\n '--deterministic',\n action='store_true',\n help='whether to set deterministic options for CUDNN backend.')\n parser.add_argument(\n '--samples-path', type=str, help='path to store images.')\n parser.add_argument(\n '--sample-model',\n type=str,\n default='ema',\n help='use which mode (ema/orig) in sampling.')\n parser.add_argument(\n '--show-mode',\n choices=['group', 'sequence'],\n default='sequence',\n help='mode to show interpolation result.')\n parser.add_argument(\n '--interp-mode',\n choices=['lerp', 'slerp'],\n default='lerp',\n help='mode to sample from endpoints\\'s interpolation.')\n parser.add_argument(\n '--endpoint', type=int, default=2, help='The number of endpoints.')\n parser.add_argument(\n '--batch-size',\n type=int,\n default=2,\n help='batch size used in generator sampling.')\n parser.add_argument(\n '--interval',\n type=int,\n default=10,\n help='The number of intervals between two endpoints.')\n parser.add_argument(\n '--sample-cfg',\n nargs='+',\n action=DictAction,\n help='Other customized kwargs for sampling function')\n args = parser.parse_args()\n return args\n\n\n@torch.no_grad()\ndef batch_inference(generator,\n noise,\n embedding=None,\n num_batches=-1,\n max_batch_size=16,\n dict_key=None,\n **kwargs):\n \"\"\"Inference function to get a batch of desired data from output dictionary\n of generator.\n\n Args:\n generator (nn.Module): Generator of a conditional model.\n noise (Tensor | list[torch.tensor] | None): A batch of noise\n Tensor.\n embedding (Tensor, optional): Embedding tensor of label for\n conditional models. Defaults to None.\n num_batches (int, optional): The number of batchs for\n inference. Defaults to -1.\n max_batch_size (int, optional): The number of batch size for\n inference. Defaults to 16.\n dict_key (str, optional): key used to get results from output\n dictionary of generator. Defaults to None.\n\n Returns:\n torch.Tensor: Tensor of output image, noise batch or label\n batch.\n \"\"\"\n # split noise into groups\n if noise is not None:\n if isinstance(noise, torch.Tensor):\n num_batches = noise.shape[0]\n noise_group = torch.split(noise, max_batch_size, 0)\n else:\n num_batches = noise[0].shape[0]\n noise_group = torch.split(noise[0], max_batch_size, 0)\n noise_group = [[noise_tensor] for noise_tensor in noise_group]\n else:\n noise_group = [None] * (\n num_batches // max_batch_size +\n (1 if num_batches % max_batch_size > 0 else 0))\n\n # split embedding into groups\n if embedding is not None:\n assert isinstance(embedding, torch.Tensor)\n num_batches = embedding.shape[0]\n embedding_group = torch.split(embedding, max_batch_size, 0)\n else:\n embedding_group = [None] * (\n num_batches // max_batch_size +\n (1 if num_batches % max_batch_size > 0 else 0))\n\n # split batchsize into groups\n batchsize_group = [max_batch_size] * (num_batches // max_batch_size)\n if num_batches % max_batch_size > 0:\n batchsize_group += [num_batches % max_batch_size]\n\n device = get_module_device(generator)\n outputs = []\n for _noise, _embedding, _num_batches in zip(noise_group, embedding_group,\n batchsize_group):\n if isinstance(_noise, torch.Tensor):\n _noise = _noise.to(device)\n if isinstance(_noise, list):\n _noise = [ele.to(device) for ele in _noise]\n if _embedding is not None:\n _embedding = _embedding.to(device)\n output = generator(\n _noise, label=_embedding, num_batches=_num_batches, **kwargs)\n output = output[dict_key] if dict_key else output\n if isinstance(output, list):\n output = output[0]\n # once obtaining sampled results, we immediately put them into cpu\n # to save cuda memory\n outputs.append(output.to('cpu'))\n outputs = torch.cat(outputs, dim=0)\n return outputs\n\n\n@torch.no_grad()\ndef sample_from_path(generator,\n latent_a,\n latent_b,\n label_a,\n label_b,\n intervals,\n embedding_name=None,\n interp_mode='lerp',\n **kwargs):\n interp_alphas = torch.linspace(0, 1, intervals)\n interp_samples = []\n\n device = get_module_device(generator)\n if embedding_name is None:\n generator_name = generator.__class__.__name__\n assert generator_name in _default_embedding_name\n embedding_name = _default_embedding_name[generator_name]\n embedding_fn = getattr(generator, embedding_name, nn.Identity())\n embedding_a = embedding_fn(label_a.to(device))\n embedding_b = embedding_fn(label_b.to(device))\n\n for alpha in interp_alphas:\n # calculate latent interpolation\n if interp_mode == 'lerp':\n latent_interp = torch.lerp(latent_a, latent_b, alpha)\n else:\n assert latent_a.ndim == latent_b.ndim == 2\n latent_interp = slerp(latent_a, latent_b, alpha)\n\n # calculate embedding interpolation\n embedding_interp = embedding_a + (\n embedding_b - embedding_a) * alpha.to(embedding_a.dtype)\n if isinstance(generator, (BigGANDeepGenerator, BigGANGenerator)):\n kwargs.update(dict(use_outside_embedding=True))\n sample = batch_inference(generator, latent_interp, embedding_interp,\n **kwargs)\n interp_samples.append(sample)\n\n return interp_samples\n\n\ndef main():\n args = parse_args()\n cfg = Config.fromfile(args.config)\n # set cudnn_benchmark\n if cfg.get('cudnn_benchmark', False):\n torch.backends.cudnn.benchmark = True\n\n # set random seeds\n if args.seed is not None:\n print('set random seed to', args.seed)\n set_random_seed(args.seed, deterministic=args.deterministic)\n\n # build the model and load checkpoint\n model = build_model(\n cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)\n _ = load_checkpoint(model, args.checkpoint, map_location='cpu')\n # sanity check for models without ema\n if not model.use_ema:\n args.sample_model = 'orig'\n if args.sample_model == 'ema':\n generator = model.generator_ema\n else:\n generator = model.generator\n mmcv.print_log(f'Sampling model: {args.sample_model}', 'mmgen')\n mmcv.print_log(f'Show mode: {args.show_mode}', 'mmgen')\n mmcv.print_log(f'Samples path: {args.samples_path}', 'mmgen')\n\n generator.eval()\n\n if not args.use_cpu:\n generator = generator.cuda()\n if args.show_mode == 'sequence':\n assert args.endpoint >= 2\n else:\n assert args.endpoint >= 2 and args.endpoint % 2 == 0\n\n kwargs = dict(max_batch_size=args.batch_size)\n if args.sample_cfg is None:\n args.sample_cfg = dict()\n kwargs.update(args.sample_cfg)\n\n # get noises corresponding to each endpoint\n noise_batch = batch_inference(\n generator,\n None,\n num_batches=args.endpoint,\n dict_key='noise_batch',\n return_noise=True,\n **kwargs)\n\n # get labels corresponding to each endpoint\n label_batch = batch_inference(\n generator,\n None,\n num_batches=args.endpoint,\n dict_key='label',\n return_noise=True,\n **kwargs)\n # set label fixed\n if args.fix_y:\n label_batch = label_batch[0] * torch.ones_like(label_batch)\n # set noise fixed\n if args.fix_z:\n noise_batch = torch.cat(\n [noise_batch[0:1, ]] * noise_batch.shape[0], dim=0)\n\n if args.show_mode == 'sequence':\n results = sample_from_path(generator, noise_batch[:-1, ],\n noise_batch[1:, ], label_batch[:-1, ],\n label_batch[1:, ], args.interval,\n args.embedding_name, args.interp_mode,\n **kwargs)\n else:\n results = sample_from_path(generator, noise_batch[::2, ],\n noise_batch[1::2, ], label_batch[:-1, ],\n label_batch[1:, ], args.interval,\n args.embedding_name, args.interp_mode,\n **kwargs)\n # reorder results\n results = torch.stack(results).permute(1, 0, 2, 3, 4)\n _, _, ch, h, w = results.shape\n results = results.reshape(-1, ch, h, w)\n # rescale value range to [0, 1]\n results = ((results + 1) / 2)\n results = results[:, [2, 1, 0], ...]\n results = results.clamp_(0, 1)\n # save image\n mmcv.mkdir_or_exist(args.samples_path)\n if args.show_mode == 'sequence':\n for i in range(results.shape[0]):\n image = results[i:i + 1]\n save_image(\n image,\n os.path.join(args.samples_path, '{:0>5d}'.format(i) + '.png'))\n else:\n save_image(\n results,\n os.path.join(args.samples_path, 'group.png'),\n nrow=args.interval)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.linspace", "torch.lerp", "torch.cat", "torch.nn.Identity", "torch.no_grad", "torch.split", "torch.stack", "torch.ones_like" ] ]
shizhouxing/Robustness-Verification-for-Transformers
[ "b1758c10826b7e00ebf3f030020548809be558a1" ]
[ "data_utils.py" ]
[ "# Copyright (c) 2020, Zhouxing shi <zhouxingshichn@gmail.com>\n# Licenced under the BSD 2-Clause License.\n\nimport numpy as np\nimport json, re, os, nltk, pickle, gzip, random, csv\nimport torch\nfrom tqdm import tqdm\nfrom multiprocessing import Pool\n\nif not os.path.exists(\"tmp\"): os.mkdir(\"tmp\") \n\ndef tokenize(sent):\n return nltk.word_tokenize(sent)\n\ndef tokenize_example(example):\n for key in [\"sent_a\", \"sent_b\"]:\n if key in example:\n example[key] = tokenize(example[key])\n return example \n\ndef load_data_yelp(args, set):\n path = \"data/%s/%s.csv\" % (args.data, set) \n print(\"Loading yelp data from \" + path) \n data = []\n with open(path) as file:\n raw = csv.reader(file)\n for row in raw:\n if row[0] == \"label\": \n continue\n text = row[1]\n text = text.replace(\"\\\\n\", \" \")\n text = text.replace('\\\\\"', '\"')\n data.append({\n \"label\": int(row[0]),\n \"sent_a\": text\n })\n with Pool(processes=args.cpus) as pool:\n data = pool.map(tokenize_example, data) \n return data\n\ndef load_data_sst(args, set):\n if set != \"train\":\n path = \"data/%s/%s.txt\" % (args.data, set) \n print(\"Loading sst data from \" + path) \n data = [] \n with open(path) as file:\n for line in file.readlines():\n segs = line[:-1].split(\" \")\n tokens = []\n word_labels = []\n label = int(segs[0][1])\n if label < 2:\n label = 0\n elif label >= 3:\n label = 1\n else: \n continue\n for i in range(len(segs) - 1):\n if segs[i][0] == \"(\" and segs[i][1] in [\"0\", \"1\", \"2\", \"3\", \"4\"]\\\n and segs[i + 1][0] != \"(\":\n tokens.append(segs[i + 1][:segs[i + 1].find(\")\")])\n word_labels.append(int(segs[i][1]))\n data.append({\n \"label\": label,\n \"sent_a\": tokens,\n \"word_labels\": word_labels\n })\n for example in data:\n for i, token in enumerate(example[\"sent_a\"]):\n if token == \"-LRB-\":\n example[\"sent_a\"][i] = \"(\"\n if token == \"-RRB-\":\n example[\"sent_a\"][i] = \")\"\n else:\n path = \"data/sst/train-nodes.tsv\"\n print(\"Loading sst data from \" + path) \n data = [] \n with open(path) as file:\n for line in file.readlines()[1:]:\n data.append({\n \"sent_a\": line.split(\"\\t\")[0],\n \"label\": int(line.split(\"\\t\")[1])\n })\n with Pool(processes=args.cpus) as pool:\n data = pool.map(tokenize_example, data) \n return data\n \ndef load_data_raw(args, set):\n if args.data == \"yelp\":\n data = load_data_yelp(args, set)\n elif args.data == \"sst\":\n data = load_data_sst(args, set)\n else:\n raise NotImplementedError\n return data\n\ndef load_data(args):\n if args.small:\n path = \"tmp/data_%s_small.pkl.gz\" % (args.data)\n path_no_train = \"tmp/data_%s_no_train_small.pkl.gz\" % (args.data)\n else:\n path = \"tmp/data_%s.pkl.gz\" % (args.data)\n path_no_train = \"tmp/data_%s_no_train.pkl.gz\" % (args.data)\n path_load = path if args.train else path_no_train\n if os.path.exists(path_load):\n print(\"Loading cached data...\")\n with gzip.open(path_load, \"rb\") as file:\n data_train, data_valid, data_test, vocab_char, vocab_word = pickle.load(file)\n else:\n data_train = load_data_raw(args, \"train\")\n if args.small: \n random.shuffle(data_train)\n data_train = data_train[:len(data_train)//10]\n vocab_char, vocab_word = None, None\n data_test = load_data_raw(args, \"test\")\n if args.small: \n random.shuffle(data_test)\n data_test = data_test[:len(data_test)//10]\n try:\n data_valid = load_data_raw(args, \"dev\")\n if args.small:\n random.shuffle(data_valid)\n data_valid = data_valid[:len(data_valid)//10]\n except FileNotFoundError:\n data_valid = []\n with gzip.open(path, \"wb\") as file:\n pickle.dump((data_train, data_valid, data_test, vocab_char, vocab_word), file)\n with gzip.open(path_no_train, \"wb\") as file:\n pickle.dump(([], data_valid, data_test, vocab_char, vocab_word), file)\n\n # in the yelp dataset labels are among {1, 2}\n if args.data == \"yelp\":\n for example in data_train + data_valid + data_test:\n example[\"label\"] -= 1\n\n return data_train, data_valid, data_test, vocab_char, vocab_word\n\ndef get_batches(data, batch_size):\n batches = []\n for i in range((len(data) + batch_size - 1) // batch_size):\n batches.append(data[i * batch_size : (i + 1) * batch_size])\n return batches\n\ndef sample(args, data, target):\n examples = []\n for i in range(args.samples):\n while True:\n example = data[random.randint(0, len(data) - 1)]\n std = target.step([example])[-1]\n # too long\n if std[\"embedding_output\"][0].shape[0] > args.max_verify_length:\n continue\n # incorrectly classified \n if std[\"pred_labels\"][0] != example[\"label\"]:\n continue\n examples.append(example)\n break\n return examples\n\ndef set_seeds(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n" ]
[ [ "torch.manual_seed", "numpy.random.seed", "torch.cuda.manual_seed_all" ] ]
DrSnowbird/texar-docker
[ "a087880f29c0aa08473c6af5554165dedb3f9453" ]
[ "texar/tf/modules/pretrained/xlnet_model_utils_test.py" ]
[ "\"\"\"\nUnit tests for xlnet model utils.\n\"\"\"\nimport tensorflow as tf\n\nfrom texar.tf.modules.pretrained.xlnet_model_utils import \\\n PositionWiseFF, RelativePositionalEncoding, RelativeMutiheadAttention\n\n\nclass XLNetModelUtilsTest(tf.test.TestCase):\n r\"\"\"Tests xlnet model utils.\n \"\"\"\n\n def test_PositionWiseFF(self):\n\n # Case 1\n model = PositionWiseFF()\n inputs = tf.random_uniform(shape=(32, model.hparams.hidden_dim))\n outputs = model(inputs)\n self.assertEqual(outputs.shape, [32, model._hparams.hidden_dim])\n\n # Case 2\n hparams = {\n \"hidden_dim\": 16,\n \"ffn_inner_dim\": 32,\n \"dropout\": 0.1,\n \"activation\": 'relu',\n }\n model = PositionWiseFF(hparams=hparams)\n inputs = tf.random_uniform(shape=(32, 16))\n outputs = model(inputs)\n self.assertEqual(outputs.shape, [32, 16])\n\n # Case 3\n hparams = {\n \"hidden_dim\": 16,\n \"ffn_inner_dim\": 32,\n \"dropout\": 0.1,\n \"activation\": 'gelu',\n }\n model = PositionWiseFF(hparams=hparams)\n inputs = tf.random_uniform(shape=(32, 16))\n outputs = model(inputs)\n self.assertEqual(outputs.shape, [32, 16])\n\n def test_RelativeMultiheadAttention(self):\n num_heads = 12\n head_dim = 64\n\n r_r_bias = tf.random_normal(shape=(num_heads, head_dim))\n r_w_bias = tf.random_normal(shape=(num_heads, head_dim))\n\n model = RelativeMutiheadAttention(r_r_bias=r_r_bias, r_w_bias=r_w_bias)\n\n states_h = tf.random_uniform(shape=(16, 32, model._hparams.hidden_dim))\n pos_embed = tf.random_uniform(shape=(24, 32, model._hparams.hidden_dim))\n\n output_h, output_g = model(states_h=states_h, pos_embed=pos_embed)\n\n self.assertEqual(output_h.shape,\n [16, 32, model._hparams.hidden_dim])\n self.assertEqual(output_g, None)\n\n def test_RelativePositionalEncoding(self):\n\n batch_size = 16\n max_time = 8\n total_len = 32\n\n # Case 1\n model = RelativePositionalEncoding()\n pos_embed = model(batch_size=batch_size,\n max_time=max_time,\n total_len=total_len)\n self.assertEqual(pos_embed.shape,\n [40, 16, model._hparams.dim])\n\n # Case 2\n model = RelativePositionalEncoding()\n pos_embed = model(batch_size=batch_size,\n max_time=max_time,\n total_len=total_len,\n attn_type='uni')\n self.assertEqual(pos_embed.shape,\n [33, 16, model._hparams.dim])\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n" ]
[ [ "tensorflow.random_uniform", "tensorflow.test.main", "tensorflow.random_normal" ] ]
Ehsan-Yaghoubi/reid-strong-baseline
[ "51b3e4933082f6a6539da3f05cf0d248971f079e" ]
[ "tools/train.py" ]
[ "# encoding: utf-8\n\"\"\"\n@author: sherlock\n@contact: sherlockliao01@gmail.com\n\"\"\"\n\nimport argparse\nimport os\nimport sys\nimport torch\n\nfrom torch.backends import cudnn\n\nsys.path.append('.')\nfrom config import cfg\nfrom data import make_data_loader\nfrom engine.trainer import do_train, do_train_with_center\nfrom modeling import build_model\nfrom layers import make_loss, make_loss_with_center\nfrom solver import make_optimizer, make_optimizer_with_center, WarmupMultiStepLR\n\nfrom utils.logger import setup_logger\n\n\ndef train(cfg):\n # prepare dataset\n train_loader, val_loader, num_query, num_classes = make_data_loader(cfg)\n\n # prepare model\n model = build_model(cfg, num_classes)\n #print(model)\n if cfg.MODEL.IF_WITH_CENTER == 'no':\n print('Train without center loss, the loss type is', cfg.MODEL.METRIC_LOSS_TYPE)\n optimizer = make_optimizer(cfg, model)\n # scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR,\n # cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD)\n\n loss_func = make_loss(cfg, num_classes) # modified by gu\n\n # Add for using self trained model\n if cfg.MODEL.PRETRAIN_CHOICE == 'self':\n start_epoch = eval(cfg.MODEL.PRETRAIN_PATH.split('/')[-1].split('.')[0].split('_')[-1])\n print('Start epoch:', start_epoch)\n path_to_optimizer = cfg.MODEL.PRETRAIN_PATH.replace('model', 'optimizer')\n print('Path to the checkpoint of optimizer:', path_to_optimizer)\n model=torch.load(cfg.MODEL.PRETRAIN_PATH) # model.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH))\n optimizer=torch.load(path_to_optimizer) # optimizer.load_state_dict(torch.load(path_to_optimizer))\n scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR,\n cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD, start_epoch)\n elif cfg.MODEL.PRETRAIN_CHOICE == 'imagenet':\n start_epoch = 0\n scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR,\n cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD)\n else:\n print('Only support pretrain_choice for imagenet and self, but got {}'.format(cfg.MODEL.PRETRAIN_CHOICE))\n\n arguments = {}\n\n do_train(\n cfg,\n model,\n train_loader,\n val_loader,\n optimizer,\n scheduler, # modify for using self trained model\n loss_func,\n num_query,\n start_epoch # add for using self trained model\n )\n input_sizee = (3,cfg.INPUT.SIZE_TRAIN[0],cfg.INPUT.SIZE_TRAIN[1])\n print(\"input_sizee\", input_sizee)\n elif cfg.MODEL.IF_WITH_CENTER == 'yes':\n print('Train with center loss, the loss type is', cfg.MODEL.METRIC_LOSS_TYPE)\n loss_func, center_criterion = make_loss_with_center(cfg, num_classes) # modified by gu\n optimizer, optimizer_center = make_optimizer_with_center(cfg, model, center_criterion)\n # scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR,\n # cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD)\n\n arguments = {}\n\n # Add for using self trained model\n if cfg.MODEL.PRETRAIN_CHOICE == 'self':\n start_epoch = eval(cfg.MODEL.PRETRAIN_PATH.split('/')[-1].split('.')[0].split('_')[-1])\n print('Start epoch:', start_epoch)\n path_to_optimizer = cfg.MODEL.PRETRAIN_PATH.replace('model', 'optimizer')\n print('Path to the checkpoint of optimizer:', path_to_optimizer)\n path_to_center_param = cfg.MODEL.PRETRAIN_PATH.replace('model', 'center_param')\n print('Path to the checkpoint of center_param:', path_to_center_param)\n path_to_optimizer_center = cfg.MODEL.PRETRAIN_PATH.replace('model', 'optimizer_center')\n print('Path to the checkpoint of optimizer_center:', path_to_optimizer_center)\n model.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH))\n optimizer.load_state_dict(torch.load(path_to_optimizer))\n center_criterion.load_state_dict(torch.load(path_to_center_param))\n optimizer_center.load_state_dict(torch.load(path_to_optimizer_center))\n scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR,\n cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD, start_epoch)\n elif cfg.MODEL.PRETRAIN_CHOICE == 'imagenet':\n start_epoch = 0\n scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR,\n cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD)\n else:\n print('Only support pretrain_choice for imagenet and self, but got {}'.format(cfg.MODEL.PRETRAIN_CHOICE))\n\n do_train_with_center(\n cfg,\n model,\n center_criterion,\n train_loader,\n val_loader,\n optimizer,\n optimizer_center,\n scheduler, # modify for using self trained model\n loss_func,\n num_query,\n start_epoch # add for using self trained model\n )\n else:\n print(\"Unsupported value for cfg.MODEL.IF_WITH_CENTER {}, only support yes or no!\\n\".format(cfg.MODEL.IF_WITH_CENTER))\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"ReID Baseline Training\")\n parser.add_argument(\"--config_file\", default=\"../configs/softmax.yml\", help=\"path to config file\", type=str )\n parser.add_argument(\"opts\", help=\"Modify config options using the command-line\", default=None, nargs=argparse.REMAINDER)\n args = parser.parse_args()\n\n num_gpus = int(os.environ[\"WORLD_SIZE\"]) if \"WORLD_SIZE\" in os.environ else 1\n\n if args.config_file != \"\":\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n cfg.freeze()\n\n output_dir = cfg.OUTPUT_DIR\n if output_dir and not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n logger = setup_logger(\"reid_baseline\", output_dir, 0)\n logger.info(\"Using {} GPUS\".format(num_gpus))\n logger.info(args)\n\n if args.config_file != \"\":\n logger.info(\"Loaded configuration file {}\".format(args.config_file))\n with open(args.config_file, 'r') as cf:\n config_str = \"\\n\" + cf.read()\n logger.info(config_str)\n logger.info(\"Running with config:\\n{}\".format(cfg))\n\n if cfg.MODEL.DEVICE == \"cuda\":\n os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID # new add by gu\n cudnn.benchmark = True\n train(cfg)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.load" ] ]
pelingundogdu/signalization_prior_knowledge_based_nn
[ "677870d51e87dda2601622d0c152aa6fa2421e8b" ]
[ "scripts/dataset_scripts.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# Required libraries\n# import os\n# import pyreadr\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler, FunctionTransformer\n\ndef dataframe_modification(experiment_dataset, target_col_index):\n '''\n Data uploading and preprocessing\n\n (1) applying lowercase opation for gene names\n (2) re-naming the last columns as cell type\n (3) reordering the dataset (cell type information is the last column)\n (4) changing the data type as float of gene expression\n\n Parameters\n ----------\n experiment_dataset : dataframe\n \n target_col_index : str\n The column number of target features, it differs depending on the using dataset. (-1 is for last column)\n\n Returns\n -------\n experiment_dataset : dataframe\n Uploaded and preprocessed dataset\n '''\n print('\\n****** RAW DATASET ******')\n print('experiment dataset shape , {0}\\n'.format(experiment_dataset.shape))\n print(experiment_dataset.head(3))\n print('******* DATASET INFO *******')\n print(experiment_dataset.info())\n print('******* DATASET INFO *******')\n\n# (1) applying lowercase opation for gene names\n experiment_dataset.columns = experiment_dataset.columns.str.lower()\n# (2) re-naming the last columns as cell type\n experiment_dataset = experiment_dataset.rename(columns={experiment_dataset.columns[target_col_index]:'cell_type'})\n# (3) reordering the dataset (cell type information is the last column)\n col_name = list(experiment_dataset.columns)\n target_col_name = col_name.pop(target_col_index)\n col_name.append(target_col_name)\n experiment_dataset = experiment_dataset[col_name]\n print(experiment_dataset.head())\n# (4) changing the data type as float of gene expression\n experiment_dataset = pd.concat([experiment_dataset.iloc[:, :-1].astype('float32'), experiment_dataset.iloc[:,-1]], axis=1)\n print(experiment_dataset.head())\n print('\\n****** PROCESSED DATASET ******')\n print('Target column, ', experiment_dataset.columns[target_col_index])\n print('experiment dataset shape , {0}\\n'.format(experiment_dataset.shape))\n print(experiment_dataset.head(3))\n# print('******* DATASET INFO *******')\n# print(experiment_dataset.info())\n# print('******* DATASET INFO *******')\n \n return(experiment_dataset)\n\n\ndef sample_wise_normalization(dataset):\n '''\n Applying sample-wise normalization into dataset\n\n Parameters\n ----------\n dataset : dataframe\n\n Returns\n -------\n df_scaler : dataframe\n Scaled dataset\n '''\n print(' -> sample wise normalization implemented!')\n df_sample_wise = pd.concat([ dataset.iloc[:, :-1].div(dataset.iloc[:, :-1].sum(axis=1), axis=0)*1e6\n , dataset.iloc[:, -1]], axis=1)\n return(df_sample_wise)\n\ndef scaler_normalization(dataset, scaler_name):\n '''\n Applying scaler or required mathematical function into dataset\n\n Parameters\n ----------\n dataset : dataframe\n \n scaler_name : str, [ss, mms, log1p]\n the scaler information\n\n Returns\n -------\n df_scaler : dataframe\n Scaled dataset\n \n '''\n try :\n if scaler_name == 'ss':\n scaler = StandardScaler()\n elif scaler_name == 'mms':\n scaler = MinMaxScaler()\n elif scaler_name == 'log1p':\n scaler = FunctionTransformer(np.log1p)\n else:\n raise Exception('Please, choose one of the normzalization options --> standard, minmax or log1p !!!')\n\n df_scaler = pd.concat([pd.DataFrame(scaler.fit_transform(dataset.iloc[: , :-1]) , columns=dataset.columns[:-1]).set_index(dataset.index)\n ,dataset.iloc[:, -1]], axis=1)\n \n print(' -> Normalization implemented! -- {0}'.format(scaler_name)) \n print(df_scaler.head(3))\n return(df_scaler)\n \n except Exception as error:\n print('\\n{0}'.format(error)) \n except:\n print(\"Unexpected error:\", sys.exc_info()[0]) " ]
[ [ "sklearn.preprocessing.StandardScaler", "sklearn.preprocessing.MinMaxScaler", "sklearn.preprocessing.FunctionTransformer" ] ]
michaelsouza/bp_loop
[ "8a6eb7a55f836661409b2f9adc889c256bb6a71a" ]
[ "read_sol.py" ]
[ "import pandas as pd\nimport numpy as np\n\ndef read_sol(fsol):\n print('Reading ' + fsol)\n df = {}\n colname = {}\n with open(fsol, 'r') as fid:\n for k, row in enumerate(fid.readlines()):\n if k == 0: # header\n for i, col in enumerate(row.split()):\n df[col] = []\n colname[i] = col\n continue\n for i, s in enumerate(row.split()):\n df[colname[i]].append(s)\n V = []\n for k in range(len(df['X'])):\n x = [float(df['X'][k]),float(df['Y'][k]),float(df['Z'][k])]\n V.append(x)\n df = pd.DataFrame(df) \n V = np.array(V)\n return V, df\n\nif __name__ == \"__main__\":\n fsol = 'DATA_LOOP_04/1egu.sol'\n V,_ = read_sol(fsol) \n" ]
[ [ "numpy.array", "pandas.DataFrame" ] ]
adamrvfisher/TechnicalAnalysisLibrary
[ "38a22b2b2b5052623f81edb11b3c5460fc254e45", "38a22b2b2b5052623f81edb11b3c5460fc254e45" ]
[ "NormChaikinTester.py", "YahooSourceDailies+Div.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\n\r\n@author: Adam Reinhold Von Fisher - https://www.linkedin.com/in/adamrvfisher/\r\n\r\n\"\"\"\r\n\r\n#This is a strategy tester\r\n#Pandas_datareader is deprecated, use YahooGrabber\r\n\r\n#Import modules\r\nimport numpy as np\r\nfrom pandas_datareader import data\r\nimport pandas as pd\r\n#Assign ticker\r\nticker = '^GSPC'\r\n#Request data\r\ns = data.DataReader(ticker, 'yahoo', start='01/01/2006', end='01/01/2050')\r\n#Calculate log returns\r\ns['LogRet'] = np.log(s['Adj Close']/s['Adj Close'].shift(1)) \r\ns['LogRet'] = s['LogRet'].fillna(0)\r\n#Close location value\r\ns['CLV'] = (((s['Adj Close'] - s['Low']) - (s['High'] - s['Adj Close']))\r\n / (s['High'] - s['Low']))\r\n#Number of periods in time series\r\nLength = len(s['LogRet'])\r\n#Iterable\r\nRange = range(0,Length-1)\r\n#Index object\r\nindex = s.index\r\n#Assign params\r\na = 20 #number of days for moving average window\r\nb = 43 #numer of days for moving average window\r\n#Prep EMA weights\r\nmultiplierA = (2/(a+1))\r\nmultiplierB = (2/(b+1))\r\n#Calculate ADI\r\ns['ADI'] = (s['Volume'] * s['CLV']).cumsum()\r\n#Initialize EMA values\r\nEMAyesterdayA = s['ADI'][0] #these prices are based off the SMA values\r\nEMAyesterdayB = s['ADI'][0] #these prices are based off the SMA values\r\n#Calculate small EMA\r\nsmallEMA = [EMAyesterdayA]\r\nfor i in Range:\r\n holder = (s['ADI'][i]*multiplierA) + (EMAyesterdayA *\r\n (1-multiplierA))\r\n smallEMA.append(holder)\r\n EMAyesterdayA = holder\r\nsmallEMAseries = pd.Series(smallEMA[:], index=s.index) \r\n#Calculate large EMA\r\nlargeEMA = [EMAyesterdayB]\r\nfor i in Range:\r\n holder1 = (s['ADI'][i]*multiplierB) + (EMAyesterdayB *\r\n (1-multiplierB))\r\n largeEMA.append(holder1)\r\n EMAyesterdayB = holder1\r\nlargeEMAseries = pd.Series(largeEMA[:], index=s.index)\r\n#Series to list\r\ns['ADIEMAsmall'] = smallEMAseries\r\ns['ADIEMAlarge'] = largeEMAseries\r\n#Calculate Chaikin indicator\r\ns['Chaikin'] = s['ADIEMAsmall'] - s['ADIEMAlarge']\r\n#Horizontal line\r\ns['ZeroLine'] = 0\r\n#Assign params\r\nvolumewindow = 21\r\n#Calculate average rolling volume\r\ns['AverageRollingVolume'] = s['Volume'].rolling(center=False,\r\n window=volumewindow).mean()\r\n#Normalize by volume\r\ns['NormChaikin'] = s['Chaikin']/s['AverageRollingVolume']\r\n#Graphical display\r\ns[['ADI','ADIEMAsmall','ADIEMAlarge']].plot(grid=True, figsize = (8,3))\r\n#Trim time series\r\nkk = s[:volumewindow-1]\r\ns = s[volumewindow-1:]\r\n#Directional methodology\r\ns['Touch'] = np.where(s['NormChaikin'] < 0, 1,0) #long signal\r\ns['Touch'] = np.where(s['NormChaikin'] > 0, -1, s['Touch']) #short signal\r\ns['Sustain'] = np.where(s['Touch'].shift(1) == 1, 1, 0) # never actually true when optimized\r\ns['Sustain'] = np.where(s['Sustain'].shift(1) == 1, 1, \r\n s['Sustain']) \r\ns['Sustain'] = np.where(s['Touch'].shift(1) == -1, -1, 0) #true when previous day touch is -1, and current RSI is > line 37 threshold \r\ns['Sustain'] = np.where(s['Sustain'].shift(1) == -1, -1,\r\n s['Sustain']) \r\ns['Sustain'] = np.where(s['NormChaikin'] > 0, 0, s['Sustain']) #if RSI is greater than threshold, sustain is forced to 0\r\ns['Sustain'] = np.where(s['NormChaikin'] < 0, 0, s['Sustain']) #never actually true when optimized\r\ns['Regime'] = s['Touch'] + s['Sustain']\r\n#Apply position to returns\r\ns['Strategy'] = (s['Regime']).shift(1)*s['LogRet']\r\ns['Strategy'] = s['Strategy'].fillna(0)\r\n#Performance metric\r\nsharpe = (s['Strategy'].mean()-s['LogRet'].mean())/s['Strategy'].std()\r\n#s[['LogRet','Strategy']].cumsum().apply(np.exp).plot(grid=True,\r\n# figsize=(8,5))\r\n#Indicator graphical display\r\ns[['NormChaikin', 'ZeroLine']].plot(grid=True, figsize = (8,3))\r\n#Add data back\r\ns = kk.append(s)\r\n#Performance metric\r\nprint(sharpe)\r\n", "# -*- coding: utf-8 -*-\r\n\"\"\"\r\n\r\n@author: Adam Reinhold Von Fisher - https://www.linkedin.com/in/adamrvfisher/\r\n\r\n\"\"\"\r\n\r\n#This is a database creation tool, I/O, technical analysis tool, and formatting tool.\r\n#This file is dependent on EntityCreation\r\n#Prior to running this, run EntityCreation, do NASDAQ population,\r\n#Yahoo sourcing for monthlies, weeklies, dailies, dividend, and qualitative data\r\n#When it finishes, YahooSource folder will populate\r\n\r\n#Import modules\r\nimport numpy as np\r\nfrom pandas import read_csv \r\nimport requests\r\nimport pandas as pd\r\nimport os\r\nimport time\r\nfrom io import StringIO\r\nfrom CrumbCatcher import CrumbCatcher\r\nfrom pandas.parser import CParserError\r\nfrom requests.exceptions import ConnectionError\r\nimport pandas.io.common\r\n\r\n#Start timer\r\nstart = time.time()\r\n#Load universe list\r\nUniverseCSVList = pd.read_pickle('Z:\\\\Users\\\\Username\\\\DirectoryLocation\\\\'+\r\n 'DataSources\\\\NASDAQSource\\\\UniverseLists\\\\Universe2018')\r\nUniverseList = [s[:-4] for s in UniverseCSVList]\r\n\r\n#Manually add to Universe\r\n#UniverseList = ['SPY', 'GLD', 'TQQQ', 'SQQQ', 'VXXB', 'SLV']#, '''''']\r\n\r\n#Trimmer\r\n#UniverseList = UniverseList[:10]\r\n\r\n#Load the NASDAQ CSV\r\nNASDAQData = pd.read_csv('Z:\\\\Users\\\\Username\\\\DirectoryLocation\\\\DataSources\\\\'+\r\n 'NASDAQSource\\\\QualitativeData\\\\PretrimQualitativeData.csv', sep = ',')\r\nNASDAQDataTickers = list(NASDAQData['Symbol'])\r\n\r\n#Ticker Finder\r\n#UniverseList.index('A')\r\n#trim and reset index if interrupted\r\n#df = df.reset_index(drop=True)\r\n\r\n#Iterable range, one per issue\r\nranger = range(0,len(UniverseList))\r\n#For every issue\r\nfor i in ranger:\r\n try: \r\n #Take the symbol from the UniverseList\r\n ticker = str(UniverseList[i])\r\n #Rest before daily crumb request\r\n time.sleep(2)\r\n \r\n #Get crumb for daily download url\r\n DailyCrumb = str(CrumbCatcher(str(ticker)))\r\n #Generate download url\r\n DailyDownloadURL = (\"https://query1.finance.yahoo.com/v7/finance/download/\" + ticker \r\n + \"?period1=-631123200&period2=1570258800&interval=1d&events=history&crumb=\" + DailyCrumb)\r\n #Download // capture response from post request\r\n DailyResponse = requests.post(DailyDownloadURL)\r\n \r\n #Rest before dividend crumb request\r\n time.sleep(2)\r\n #Get crumb for dividend download url\r\n DividendCrumb = str(CrumbCatcher(str(ticker)))\r\n #Generate dividend download URL\r\n DividendDownloadURL = (\"https://query1.finance.yahoo.com/v7/finance/download/\" + ticker \r\n + \"?period1=-631123200&period2=1598374000&interval=1d&events=div&crumb=\" + DividendCrumb)\r\n #Download // capture response from post request\r\n DividendResponse = requests.post(DividendDownloadURL)\r\n \r\n #Formatting..\r\n DailyResponseText = DailyResponse.text\r\n DividendResponseText = DividendResponse.text\r\n\r\n #More formatting..\r\n FormattedDailyResponse = StringIO(DailyResponseText)\r\n FormattedDividendResponse = StringIO(DividendResponseText)\r\n \r\n #Put Response in Dataframe\r\n DailyResponseDataFrame = pd.read_csv(FormattedDailyResponse, sep = ',')\r\n DividendResponseDataFrame = pd.read_csv(FormattedDividendResponse, sep = ',')\r\n \r\n #Error detection\r\n if DailyResponseDataFrame.columns[0] == '{\"chart\":{\"result\":null':\r\n print('The URL failed for ' + ticker + ' on daily frequency')\r\n pass\r\n else: \r\n print(ticker + ' Dailies secured.')\r\n if DividendResponseDataFrame.columns[0] == '{\"chart\":{\"result\":null':\r\n print('The URL failed for ' + ticker + ' dividends')\r\n pass \r\n else: \r\n print(ticker + ' Dividends secured.')\r\n\r\n #Set up date as primary key for preprocess storage\r\n DailyResponseDataFrame = DailyResponseDataFrame.set_index('Date')\r\n DividendResponseDataFrame = DividendResponseDataFrame.set_index('Date')\r\n\r\n #Change date to datetime data type\r\n DailyResponseDataFrame.index = pd.to_datetime(DailyResponseDataFrame.index, format = \"%Y/%m/%d\") \r\n DividendResponseDataFrame.index = pd.to_datetime(DividendResponseDataFrame.index, format = \"%Y/%m/%d\") \r\n \r\n #Save to preprocess storage\r\n DailyResponseDataFrame.to_csv((\"Z:\\\\Users\\\\Username\\\\DirectoryLocation\\\\DataSources\\\\YahooSource\\\\TimeSeriesData\\\\DAY-\" + ticker + \".csv\"))\r\n DividendResponseDataFrame.to_csv((\"Z:\\\\Users\\\\Username\\\\DirectoryLocation\\\\DataSources\\\\YahooSource\\\\DividendData\\\\DIV-\" + ticker + \".csv\"))\r\n \r\n print(ticker + ' completed.') \r\n continue\r\n except KeyError:\r\n print('KeyError for ' + ticker + ', likely unlisted.')\r\n except CParserError:\r\n print('Parser failed for ' + ticker + ', skipping to next ticker.')\r\n continue\r\n except ConnectionError:\r\n try:\r\n #Sleep, then retry last ticker, continue loop.\r\n print('ConnectionError on ' + str(ticker) + '.')\r\n print('Sleeping for 5 min.') \r\n time.sleep(301)\r\n #Retrying parse\r\n print('Parsing for ' + ticker + '.')\r\n \r\n #Get crumb for daily download url\r\n DailyCrumb = str(CrumbCatcher(str(ticker)))\r\n #Generate daily download url\r\n DailyDownloadURL = (\"https://query1.finance.yahoo.com/v7/finance/download/\" + ticker \r\n + \"?period1=-631123200&period2=1570258800&interval=1d&events=history&crumb=\" + DailyCrumb)\r\n #Download // capture response from post request\r\n DailyResponse = requests.post(DailyDownloadURL)\r\n \r\n #Rest before dividend crumb request\r\n time.sleep(2)\r\n #Get crumb for dividend download url\r\n DividendCrumb = str(CrumbCatcher(str(ticker)))\r\n #Generate dividend download URL\r\n DividendDownloadURL = (\"https://query1.finance.yahoo.com/v7/finance/download/\" + ticker \r\n + \"?period1=-631123200&period2=1598374000&interval=1d&events=div&crumb=\" + DividendCrumb)\r\n #Download // capture response from post request\r\n DividendResponse = requests.post(DividendDownloadURL)\r\n \r\n #Formatting..\r\n DailyResponseText = DailyResponse.text\r\n DividendResponseText = DividendResponse.text\r\n \r\n #More formatting..\r\n FormattedDailyResponse = StringIO(DailyResponseText)\r\n FormattedDividendResponse = StringIO(DividendResponseText)\r\n \r\n #Put Response in Dataframe\r\n DailyResponseDataFrame = pd.read_csv(FormattedDailyResponse, sep = ',')\r\n DividendResponseDataFrame = pd.read_csv(FormattedDividendResponse, sep = ',')\r\n \r\n #Error detection\r\n if DailyResponseDataFrame.columns[0] == '{\"chart\":{\"result\":null':\r\n print('The URL failed for ' + ticker + ' on daily frequency')\r\n pass\r\n else: \r\n print(ticker + ' Dailies secured.')\r\n if DividendResponseDataFrame.columns[0] == '{\"chart\":{\"result\":null':\r\n print('The URL failed for ' + ticker + ' dividends')\r\n pass \r\n else: \r\n print(ticker + ' Dividends secured.')\r\n \r\n #Set up date as primary key for preprocess storage\r\n DailyResponseDataFrame = DailyResponseDataFrame.set_index('Date')\r\n DividendResponseDataFrame = DividendResponseDataFrame.set_index('Date')\r\n \r\n #Change date to datetime data type\r\n DailyResponseDataFrame.index = pd.to_datetime(DailyResponseDataFrame.index, format = \"%Y/%m/%d\") \r\n DividendResponseDataFrame.index = pd.to_datetime(DividendResponseDataFrame.index, format = \"%Y/%m/%d\") \r\n \r\n #Save to preprocess storage\r\n DailyResponseDataFrame.to_csv((\"Z:\\\\Users\\\\Username\\\\DirectoryLocation\\\\DataSources\\\\YahooSource\\\\TimeSeriesData\\\\DAY-\" + ticker + \".csv\"))\r\n DividendResponseDataFrame.to_csv((\"Z:\\\\Users\\\\Username\\\\DirectoryLocation\\\\DataSources\\\\YahooSource\\\\DividendData\\\\DIV-\" + ticker + \".csv\"))\r\n \r\n print(ticker + ' completed.') \r\n continue\r\n except CParserError:\r\n print('Parser failed for ' + ticker + '.')\r\n continue\r\n except requests.exceptions.SSLError:\r\n try:\r\n print('SSLError after Connection Error for ' + ticker + '.')\r\n #Sleep, then retry last ticker, continue loop.\r\n print('Sleeping for 61 seconds.') \r\n time.sleep(61)\r\n #Retrying parse\r\n print('Parsing for ' + ticker + '.')\r\n \r\n #Get crumb for daily download url\r\n DailyCrumb = str(CrumbCatcher(str(ticker)))\r\n #Generate daily download url\r\n DailyDownloadURL = (\"https://query1.finance.yahoo.com/v7/finance/download/\" + ticker \r\n + \"?period1=-631123200&period2=1570258800&interval=1d&events=history&crumb=\" + DailyCrumb)\r\n #Download // capture response from post request\r\n DailyResponse = requests.post(DailyDownloadURL)\r\n \r\n #Rest before dividend crumb request\r\n time.sleep(2)\r\n #Get crumb for dividend download url\r\n DividendCrumb = str(CrumbCatcher(str(ticker)))\r\n #Generate dividend download URL\r\n DividendDownloadURL = (\"https://query1.finance.yahoo.com/v7/finance/download/\" + ticker \r\n + \"?period1=-631123200&period2=1598374000&interval=1d&events=div&crumb=\" + DividendCrumb)\r\n #Download // capture response from post request\r\n DividendResponse = requests.post(DividendDownloadURL)\r\n \r\n #Formatting..\r\n DailyResponseText = DailyResponse.text\r\n DividendResponseText = DividendResponse.text\r\n \r\n #More formatting..\r\n FormattedDailyResponse = StringIO(DailyResponseText)\r\n FormattedDividendResponse = StringIO(DividendResponseText)\r\n \r\n #Put Response in Dataframe\r\n DailyResponseDataFrame = pd.read_csv(FormattedDailyResponse, sep = ',')\r\n DividendResponseDataFrame = pd.read_csv(FormattedDividendResponse, sep = ',')\r\n \r\n #Error detection\r\n if DailyResponseDataFrame.columns[0] == '{\"chart\":{\"result\":null':\r\n print('The URL failed for ' + ticker + ' on daily frequency')\r\n pass\r\n else: \r\n print(ticker + ' Dailies secured.')\r\n if DividendResponseDataFrame.columns[0] == '{\"chart\":{\"result\":null':\r\n print('The URL failed for ' + ticker + ' dividends')\r\n pass \r\n else: \r\n print(ticker + ' Dividends secured.')\r\n \r\n #Set up date as primary key for preprocess storage\r\n DailyResponseDataFrame = DailyResponseDataFrame.set_index('Date')\r\n DividendResponseDataFrame = DividendResponseDataFrame.set_index('Date')\r\n \r\n #Change date to datetime data type\r\n DailyResponseDataFrame.index = pd.to_datetime(DailyResponseDataFrame.index, format = \"%Y/%m/%d\") \r\n DividendResponseDataFrame.index = pd.to_datetime(DividendResponseDataFrame.index, format = \"%Y/%m/%d\") \r\n \r\n #Save to preprocess storage\r\n DailyResponseDataFrame.to_csv((\"Z:\\\\Users\\\\Username\\\\DirectoryLocation\\\\DataSources\\\\YahooSource\\\\TimeSeriesData\\\\DAY-\" + ticker + \".csv\"))\r\n DividendResponseDataFrame.to_csv((\"Z:\\\\Users\\\\Username\\\\DirectoryLocation\\\\DataSources\\\\YahooSource\\\\DividendData\\\\DIV-\" + ticker + \".csv\"))\r\n \r\n print(ticker + ' completed.') \r\n continue \r\n except CParserError:\r\n print('Parser failed for ' + ticker + '.')\r\n continue\r\n except requests.exceptions.SSLError:\r\n print('Double SSLError after ConnectionError for ' + ticker + '.')\r\n continue \r\n except ConnectionError:\r\n print('Double ConnectionError for ' + ticker + '.')\r\n continue\r\n except requests.exceptions.SSLError:\r\n try:\r\n# Sleep, then retry last ticker, continue loop.\r\n print('SSLError on ' + str(ticker) + '.')\r\n print('Sleeping for 61 seconds.') \r\n time.sleep(61)\r\n #Retrying parse\r\n print('Parsing for ' + ticker + '.')\r\n\r\n #Get crumb for daily download url\r\n DailyCrumb = str(CrumbCatcher(str(ticker)))\r\n #Generate daily download url\r\n DailyDownloadURL = (\"https://query1.finance.yahoo.com/v7/finance/download/\" + ticker \r\n + \"?period1=-631123200&period2=1570258800&interval=1d&events=history&crumb=\" + DailyCrumb)\r\n #Download // capture response from post request\r\n DailyResponse = requests.post(DailyDownloadURL)\r\n \r\n #Rest before dividend crumb request\r\n time.sleep(2)\r\n #Get crumb for dividend download url\r\n DividendCrumb = str(CrumbCatcher(str(ticker)))\r\n #Generate dividend download URL\r\n DividendDownloadURL = (\"https://query1.finance.yahoo.com/v7/finance/download/\" + ticker \r\n + \"?period1=-631123200&period2=1598374000&interval=1d&events=div&crumb=\" + DividendCrumb)\r\n #Download // capture response from post request\r\n DividendResponse = requests.post(DividendDownloadURL)\r\n \r\n #Formatting..\r\n DailyResponseText = DailyResponse.text\r\n DividendResponseText = DividendResponse.text\r\n \r\n #More formatting..\r\n FormattedDailyResponse = StringIO(DailyResponseText)\r\n FormattedDividendResponse = StringIO(DividendResponseText)\r\n \r\n #Put Response in Dataframe\r\n DailyResponseDataFrame = pd.read_csv(FormattedDailyResponse, sep = ',')\r\n DividendResponseDataFrame = pd.read_csv(FormattedDividendResponse, sep = ',')\r\n \r\n #Error detection\r\n if DailyResponseDataFrame.columns[0] == '{\"chart\":{\"result\":null':\r\n print('The URL failed for ' + ticker + ' on daily frequency')\r\n pass\r\n else: \r\n print(ticker + ' Dailies secured.')\r\n if DividendResponseDataFrame.columns[0] == '{\"chart\":{\"result\":null':\r\n print('The URL failed for ' + ticker + ' dividends')\r\n pass \r\n else: \r\n print(ticker + ' Dividends secured.')\r\n \r\n #Set up date as primary key for preprocess storage\r\n DailyResponseDataFrame = DailyResponseDataFrame.set_index('Date')\r\n DividendResponseDataFrame = DividendResponseDataFrame.set_index('Date')\r\n \r\n #Change date to datetime data type\r\n DailyResponseDataFrame.index = pd.to_datetime(DailyResponseDataFrame.index, format = \"%Y/%m/%d\") \r\n DividendResponseDataFrame.index = pd.to_datetime(DividendResponseDataFrame.index, format = \"%Y/%m/%d\") \r\n \r\n #Save to preprocess storage\r\n DailyResponseDataFrame.to_csv((\"Z:\\\\Users\\\\Username\\\\DirectoryLocation\\\\DataSources\\\\YahooSource\\\\TimeSeriesData\\\\DAY-\" + ticker + \".csv\"))\r\n DividendResponseDataFrame.to_csv((\"Z:\\\\Users\\\\Username\\\\DirectoryLocation\\\\DataSources\\\\YahooSource\\\\DividendData\\\\DIV-\" + ticker + \".csv\"))\r\n \r\n print(ticker + ' completed.') \r\n continue \r\n except CParserError:\r\n print('Parser failed for ' + ticker + '.')\r\n continue\r\n except requests.exceptions.SSLError:\r\n print('Double SSLError for ' + ticker + '.')\r\n continue\r\n except ConnectionError:\r\n try:\r\n #Sleep, then retry last ticker, continue loop.\r\n print('ConnectionError after SSLError on ' + str(ticker) + '.')\r\n print('Sleeping for 61 seconds.') \r\n time.sleep(61)\r\n #Retrying parse\r\n print('Parsing for ' + ticker + '.')\r\n \r\n #Get crumb for daily download url\r\n DailyCrumb = str(CrumbCatcher(str(ticker)))\r\n #Generate daily download url\r\n DailyDownloadURL = (\"https://query1.finance.yahoo.com/v7/finance/download/\" + ticker \r\n + \"?period1=-631123200&period2=1570258800&interval=1d&events=history&crumb=\" + DailyCrumb)\r\n #Download // capture response from post request\r\n DailyResponse = requests.post(DailyDownloadURL)\r\n \r\n #Rest before dividend crumb request\r\n time.sleep(2)\r\n #Get crumb for dividend download url\r\n DividendCrumb = str(CrumbCatcher(str(ticker)))\r\n #Generate dividend download URL\r\n DividendDownloadURL = (\"https://query1.finance.yahoo.com/v7/finance/download/\" + ticker \r\n + \"?period1=-631123200&period2=1598374000&interval=1d&events=div&crumb=\" + DividendCrumb)\r\n #Download // capture response from post request\r\n DividendResponse = requests.post(DividendDownloadURL)\r\n \r\n #Formatting..\r\n DailyResponseText = DailyResponse.text\r\n DividendResponseText = DividendResponse.text\r\n \r\n #More formatting..\r\n FormattedDailyResponse = StringIO(DailyResponseText)\r\n FormattedDividendResponse = StringIO(DividendResponseText)\r\n \r\n #Put Response in Dataframe\r\n DailyResponseDataFrame = pd.read_csv(FormattedDailyResponse, sep = ',')\r\n DividendResponseDataFrame = pd.read_csv(FormattedDividendResponse, sep = ',')\r\n \r\n #Error detection\r\n if DailyResponseDataFrame.columns[0] == '{\"chart\":{\"result\":null':\r\n print('The URL failed for ' + ticker + ' on daily frequency')\r\n pass\r\n else: \r\n print(ticker + ' Dailies secured.')\r\n if DividendResponseDataFrame.columns[0] == '{\"chart\":{\"result\":null':\r\n print('The URL failed for ' + ticker + ' dividends')\r\n pass \r\n else: \r\n print(ticker + ' Dividends secured.')\r\n \r\n #Set up date as primary key for preprocess storage\r\n DailyResponseDataFrame = DailyResponseDataFrame.set_index('Date')\r\n DividendResponseDataFrame = DividendResponseDataFrame.set_index('Date')\r\n \r\n #Change date to datetime data type\r\n DailyResponseDataFrame.index = pd.to_datetime(DailyResponseDataFrame.index, format = \"%Y/%m/%d\") \r\n DividendResponseDataFrame.index = pd.to_datetime(DividendResponseDataFrame.index, format = \"%Y/%m/%d\") \r\n \r\n #Save to preprocess storage\r\n DailyResponseDataFrame.to_csv((\"Z:\\\\Users\\\\Username\\\\DirectoryLocation\\\\DataSources\\\\YahooSource\\\\TimeSeriesData\\\\DAY-\" + ticker + \".csv\"))\r\n DividendResponseDataFrame.to_csv((\"Z:\\\\Users\\\\Username\\\\DirectoryLocation\\\\DataSources\\\\YahooSource\\\\DividendData\\\\DIV-\" + ticker + \".csv\"))\r\n \r\n print(ticker + ' completed.') \r\n continue\r\n except CParserError:\r\n print('Parser failed after SSLError and ConnectionError for ' + ticker + '.')\r\n continue\r\n except requests.exceptions.SSLError:\r\n print('SSLError after SSLError and ConnectionEror for ' + ticker + '.')\r\n continue \r\n\r\n \r\nprint('All source data is in preprocess storage as CSV; ready for processing.')\r\n#Processed data will be stored as FREQ-TICKER-YYYYMMDD(Timestamp??? Daily is smallest frequency for this source.)\r\n \r\n#CSV list for TimeSeries Data to put into processing\r\nTimeSeries = os.listdir('Z:\\\\Users\\\\Username\\\\DirectoryLocation\\\\DataSources\\\\YahooSource\\\\TimeSeriesData\\\\')\r\n\r\n#Trimmer\r\n#TimeSeries = TimeSeries[:10]\r\n\r\nTimeSeriesTickers = [s[4:-4] for s in TimeSeries]\r\n\r\n#Iterable for every freqxstock\r\nranger = range(0,len(TimeSeries))\r\n\r\nfor i in ranger:\r\n try:\r\n temp = read_csv('Z:\\\\Users\\\\Username\\\\DirectoryLocation\\\\DataSources\\\\YahooSource\\\\TimeSeriesData\\\\' +\r\n (TimeSeries[i]), sep = ',')\r\n print('Basic TS processing for ' + TimeSeries[i] +'.')\r\n #Until Prefix + Suffix fixed, use Date as index regardless of frequency for this source.. \r\n temp = temp.set_index('Date')\r\n #Make index for frequency concatenation; ProcessedDataIndex \r\n temp['Date'] = temp.index\r\n #Formatting\r\n temp.index = pd.to_datetime(temp.index, format = \"%Y/%m/%d\") \r\n FSList = []\r\n for ii in temp['Date'].index:\r\n FSList.append(temp['Date'][ii].replace('-',''))\r\n FSSeries = pd.Series(FSList) \r\n FSSeries.index = temp.index \r\n temp['FrequencySuffix'] = FSSeries\r\n temp['FrequencyPrefix'] = TimeSeries[i][:3]\r\n temp['Ticker'] = TimeSeries[i][4:-4]\r\n temp['ProcessedDataIndex'] = temp['FrequencyPrefix'] + temp['Ticker'] + temp['FrequencySuffix']\r\n #Erase duplicate columns\r\n temp = temp.loc[:,~temp.columns.duplicated()]\r\n #Erase duplicate rows\r\n temp = temp[~temp.index.duplicated(keep='first')]\r\n #Make HLOCV data all numerical\r\n for x in temp.columns[:6]:\r\n temp[x] = pd.to_numeric(temp[x], errors='coerce')\r\n #Basic Date information\r\n temp['Age'] = len(temp['Adj Close'])\r\n temp['Year'] = temp.index.year\r\n temp['Month'] = temp.index.month\r\n temp['Day'] = temp.index.day\r\n temp['DayOfWeek'] = temp.index.dayofweek\r\n #Dividends\r\n temp['Dividends'] = np.nan\r\n #Addition of NASDAQSource data; its stored as CSV - NASDAQ data will have to be populated already.\r\n NASDAQDataRow = NASDAQData[NASDAQData['Symbol'] == temp['Ticker'][0]] \r\n temp['Name'] = NASDAQDataRow.iloc[0][1]\r\n temp['LastSale'] = float(NASDAQDataRow.iloc[0][2])\r\n temp['GivenMarketCap'] = NASDAQDataRow.iloc[0][3]\r\n temp['IPOyear'] = NASDAQDataRow.iloc[0][5]\r\n temp['Sector'] = NASDAQDataRow.iloc[0][6]\r\n temp['Industry'] = NASDAQDataRow.iloc[0][7]\r\n temp['SharesOutstanding'] = temp['GivenMarketCap']/temp['LastSale']\r\n temp['MarketCap'] = (temp['SharesOutstanding'] * temp['Adj Close'])/10**9 \r\n #Make folders inside quarternary folders - Choose frequency and save in frequency folder; For time series data\r\n if not os.path.exists('Z:\\\\Users\\\\Username\\\\DirectoryLocation\\\\DataSources\\\\YahooSource\\\\ProcessedData\\\\' +\r\n temp['FrequencyPrefix'][0] + '\\\\' + TimeSeries[i][:-4]):\r\n os.makedirs('Z:\\\\Users\\\\Username\\\\DirectoryLocation\\\\DataSources\\\\YahooSource\\\\ProcessedData\\\\' +\r\n temp['FrequencyPrefix'][0] + '\\\\' + TimeSeries[i][:-4])\r\n pd.to_pickle(temp, 'Z:\\\\Users\\\\Username\\\\DirectoryLocation\\\\DataSources\\\\YahooSource\\\\ProcessedData\\\\' +\r\n temp['FrequencyPrefix'][0] + '\\\\' + TimeSeries[i][:-4] + '\\\\' + TimeSeries[i][:-4])\r\n print(temp['Ticker'][0] + ' time series pickles saved.')\r\n except OSError:\r\n continue\r\n except IndexError: #If there is no qualitative data for given ticker\r\n #Fill N/A to missing data from Remote Source\r\n temp['Name'] = np.nan\r\n temp['LastSale'] = np.nan\r\n temp['GivenMarketCap'] = np.nan\r\n temp['IPOyear'] = np.nan\r\n temp['Sector'] = np.nan\r\n temp['Industry'] = np.nan\r\n temp['SharesOutstanding'] = np.nan\r\n #Perhaps make a proxy for market cap and insert here\r\n temp['MarketCap'] = np.nan\r\n #Make folders inside quarternary folders - Choose frequency and save in frequency folder; For time series data\r\n if not os.path.exists('Z:\\\\Users\\\\Username\\\\DirectoryLocation\\\\DataSources\\\\YahooSource\\\\ProcessedData\\\\' +\r\n temp['FrequencyPrefix'][0] + '\\\\' + TimeSeries[i][:-4]):\r\n os.makedirs('Z:\\\\Users\\\\Username\\\\DirectoryLocation\\\\DataSources\\\\YahooSource\\\\ProcessedData\\\\' +\r\n temp['FrequencyPrefix'][0] + '\\\\' + TimeSeries[i][:-4])\r\n pd.to_pickle(temp, 'Z:\\\\Users\\\\Username\\\\DirectoryLocation\\\\DataSources\\\\YahooSource\\\\ProcessedData\\\\' +\r\n temp['FrequencyPrefix'][0] + '\\\\' + TimeSeries[i][:-4] + '\\\\' + TimeSeries[i][:-4])\r\n\r\nprint('TS data stored in ProcessedData')\r\n\r\n#Contents of dividend parse\r\nDividends = os.listdir('Z:\\\\Users\\\\Username\\\\DirectoryLocation\\\\DataSources\\\\YahooSource\\\\DividendData\\\\')\r\n\r\n#Trimmer\r\n#Dividends = Dividends[:10]\r\n\r\nDividendsTickers = [s[4:-4] for s in Dividends]\r\n\r\n#Iterable for every divxstock\r\nranger = range(0,len(Dividends))\r\nfor i in ranger:\r\n try:\r\n temp = read_csv('Z:\\\\Users\\\\Username\\\\DirectoryLocation\\\\DataSources\\\\YahooSource\\\\DividendData\\\\' +\r\n (Dividends[i]), sep = ',')\r\n if len(temp['Dividends']) == 0:\r\n print('No dividend data in ' + Dividends[i])\r\n continue\r\n print('Basic DIV processing for ' + DividendsTickers[i] +'.')\r\n #Until Prefix + Suffix fixed, use Date as index regardless of frequency for this source.. \r\n temp = temp.set_index('Date')\r\n #Make index for frequency concatenation; ProcessedDataIndex \r\n temp['Date'] = temp.index\r\n #Formatting\r\n temp.index = pd.to_datetime(temp.index, format = \"%Y/%m/%d\") \r\n FSList = []\r\n for ii in temp['Date'].index:\r\n FSList.append(temp['Date'][ii].replace('-',''))\r\n FSSeries = pd.Series(FSList) \r\n FSSeries.index = temp.index \r\n temp['FrequencySuffix'] = FSSeries\r\n temp['FrequencyPrefix'] = Dividends[i][:3]\r\n temp['Ticker'] = Dividends[i][4:-4]\r\n temp['ProcessedDataIndex'] = temp['FrequencyPrefix'] + temp['Ticker'] + temp['FrequencySuffix']\r\n #Sort DIV by index..\r\n temp = temp.sort_index()\r\n #Erase duplicate columns\r\n temp = temp.loc[:,~temp.columns.duplicated()]\r\n #Erase duplicate rows\r\n temp = temp[~temp.index.duplicated(keep='first')]\r\n #Make dividend info numerical\r\n for xx in temp.columns[:0]:\r\n temp[xx] = pd.to_numeric(temp[xx], errors='coerce') \r\n #Basic Date information\r\n temp['Age'] = len(temp['Dividends'])\r\n temp['Year'] = temp.index.year\r\n temp['Month'] = temp.index.month\r\n temp['Day'] = temp.index.day\r\n temp['DayOfWeek'] = temp.index.dayofweek\r\n #Addition of NASDAQSource data; its stored as CSV - NASDAQ data will have to be populated already.\r\n NASDAQDataRow = NASDAQData[NASDAQData['Symbol'] == temp['Ticker'][0]] \r\n temp['Name'] = NASDAQDataRow.iloc[0][1]\r\n temp['LastSale'] = float(NASDAQDataRow.iloc[0][2])\r\n temp['GivenMarketCap'] = NASDAQDataRow.iloc[0][3]\r\n temp['IPOyear'] = NASDAQDataRow.iloc[0][5]\r\n temp['Sector'] = NASDAQDataRow.iloc[0][6]\r\n temp['Industry'] = NASDAQDataRow.iloc[0][7]\r\n temp['SharesOutstanding'] = temp['GivenMarketCap']/temp['LastSale'] \r\n #Make folders inside quarternary folders - Choose frequency and save in frequency folder; For time series data\r\n if not os.path.exists('Z:\\\\Users\\\\Username\\\\DirectoryLocation\\\\DataSources\\\\YahooSource\\\\ProcessedData\\\\' +\r\n temp['FrequencyPrefix'][0] + '\\\\' + Dividends[i][:-4]):\r\n os.makedirs('Z:\\\\Users\\\\Username\\\\DirectoryLocation\\\\DataSources\\\\YahooSource\\\\ProcessedData\\\\' +\r\n temp['FrequencyPrefix'][0] + '\\\\' + Dividends[i][:-4])\r\n pd.to_pickle(temp, 'Z:\\\\Users\\\\Username\\\\DirectoryLocation\\\\DataSources\\\\YahooSource\\\\ProcessedData\\\\' +\r\n temp['FrequencyPrefix'][0] + '\\\\' + Dividends[i][:-4] + '\\\\' + Dividends[i][:-4])\r\n print(temp['Ticker'][0] + ' time series pickles saved.')\r\n except pandas.io.common.EmptyDataError:\r\n continue\r\n except OSError:\r\n continue\r\n except ValueError:\r\n continue\r\n except IndexError: #If there is no qualitative data for given ticker\r\n #Fill N/A to missing data from Remote Source\r\n temp['Name'] = np.nan\r\n temp['LastSale'] = np.nan\r\n temp['GivenMarketCap'] = np.nan\r\n temp['IPOyear'] = np.nan\r\n temp['Sector'] = np.nan\r\n temp['Industry'] = np.nan\r\n temp['SharesOutstanding'] = np.nan\r\n #Make folders inside quarternary folders - Choose frequency and save in frequency folder; For time series data\r\n if not os.path.exists('Z:\\\\Users\\\\Username\\\\DirectoryLocation\\\\DataSources\\\\YahooSource\\\\ProcessedData\\\\' +\r\n temp['FrequencyPrefix'][0] + '\\\\' + Dividends[i][:-4]):\r\n os.makedirs('Z:\\\\Users\\\\Username\\\\DirectoryLocation\\\\DataSources\\\\YahooSource\\\\ProcessedData\\\\' +\r\n temp['FrequencyPrefix'][0] + '\\\\' + Dividends[i][:-4])\r\n pd.to_pickle(temp, 'Z:\\\\Users\\\\Username\\\\DirectoryLocation\\\\DataSources\\\\YahooSource\\\\ProcessedData\\\\' +\r\n temp['FrequencyPrefix'][0] + '\\\\' + Dividends[i][:-4] + '\\\\' + Dividends[i][:-4])\r\n\r\nprint('DIV data stored in DividendData')\r\n#Yahoo source HLOC data is stored and cleaned in pickles for addition of dividend, div yield, and technical analysis data. \r\n\r\nprint('Adding DIV to DAY TS.')\r\n#Find all stocks with dividend information and technical data // delete duplicates list(set()) might be redundant.\r\nDividendAndTechnicalList = list(set([i for i in DividendsTickers if i in TimeSeriesTickers]))\r\n#For all stocks with div + dailies: Dividend based modifications made here. \r\nfor d in DividendAndTechnicalList:\r\n try:\r\n #Get DAY time series\r\n DAY = pd.read_pickle('Z:\\\\Users\\\\Username\\\\DirectoryLocation\\\\DataSources\\\\'\r\n + 'YahooSource\\\\ProcessedData\\\\' + 'DAY' + '\\\\' + 'DAY-' + d + '\\\\' + 'DAY-' + d) \r\n #Get DIV \r\n DIV = pd.read_pickle('Z:\\\\Users\\\\Username\\\\DirectoryLocation\\\\DataSources\\\\'\r\n + 'YahooSource\\\\ProcessedData\\\\' + 'DIV' + '\\\\' + 'DIV-' + d + '\\\\' + 'DIV-' + d)\r\n #For every dividend entry, populate daily TS['Dividends'] from DIV.T\r\n TimeStamps = [i for i in DAY.index if i in DIV.index]\r\n DAY.loc[(TimeStamps,'Dividends')] = DIV['Dividends']\r\n DAY['LastDividend'] = DAY['Dividends']\r\n #Bfill to use for yield calculation\r\n DAY['LastDividend'][DAY['LastDividend'] == 0] = np.nan\r\n DAY['LastDividend'] = DAY['LastDividend'].ffill()\r\n #Fill nans with 0 for no dividends on given day\r\n DAY['Dividends'] = DAY['Dividends'].fillna(0) \r\n #Div Yield by unspecified frequency // Be aware of assumptions using AdjClose\r\n DAY['DividendYield'] = DAY['LastDividend']/DAY['Adj Close']\r\n pd.to_pickle(DAY, 'Z:\\\\Users\\\\Username\\\\DirectoryLocation\\\\DataSources\\\\' +\r\n 'YahooSource\\\\ProcessedData\\\\' + 'DAY' + '\\\\' + 'DAY-' + d + '\\\\' + 'DAY-' + d)\r\n print('Dividends added to ' + d + ' dailies.') \r\n except FileNotFoundError:\r\n print('No file for ' + d)\r\n continue\r\n except ValueError:\r\n print('No good data for ' + d)\r\n continue\r\n#Populate set - DAY, WEK, MO - with custom database modifications for TA data.\r\n#Start with Yahoo dailies TA mod\r\nprint('Processing for dailies.')\r\n#Get list to process\r\nProcessedDailies = os.listdir('Z:\\\\Users\\\\Username\\\\DirectoryLocation\\\\DataSources\\\\'\r\n + 'YahooSource\\\\ProcessedData\\\\DAY\\\\')\r\n\r\n#Trimmer\r\n#ProcessedDailies = ProcessedDailies[:10]\r\n\r\nfor p in ProcessedDailies:\r\n #Access data\r\n temp = pd.read_pickle('Z:\\\\Users\\\\Username\\\\DirectoryLocation\\\\DataSources\\\\'\r\n + 'YahooSource\\\\ProcessedData\\\\DAY\\\\' + p + '\\\\' + p)\r\n #Daily Log Returns (subtract 1!!!)\r\n temp['LogRet'] = np.log(temp['Adj Close']/temp['Adj Close'].shift(1)) \r\n temp['LogRet'] = temp['LogRet'].fillna(0)\r\n \r\n #EasyData \r\n temp['HigherOpen'] = (np.where(temp['Open'] > temp['Open'].shift(1), 1,0))\r\n temp['LowerOpen'] = (np.where(temp['Open'] < temp['Open'].shift(1), 1,0))\r\n temp['HigherHigh'] = (np.where(temp['High'] > temp['High'].shift(1), 1,0))\r\n temp['LowerHigh'] = (np.where(temp['High'] < temp['High'].shift(1), 1,0))\r\n temp['HigherLow'] = (np.where(temp['Low'] > temp['Low'].shift(1), 1,0))\r\n temp['LowerLow'] = (np.where(temp['Low'] < temp['Low'].shift(1), 1,0))\r\n temp['HigherClose'] = (np.where(temp['Adj Close'] > temp['Adj Close'].shift(1), 1,0))\r\n temp['LowerClose'] = (np.where(temp['Adj Close'] < temp['Adj Close'].shift(1), 1,0))\r\n \r\n #Gap Up % > 0 \r\n temp['GapUp'] = (temp['High'].shift(1) - temp['Low']) / temp['Adj Close'].shift(1)\r\n temp['GapUp'] = temp['GapUp'][temp['GapUp'] < 0]\r\n temp['GapUp'] = temp['GapUp'].fillna(0)\r\n temp['GapUp'] = np.where(temp['GapUp'] == 0 , 0, (-1*temp['GapUp']))\r\n\r\n #Gap Down % > 0 \r\n temp['GapDown'] = (temp['Low'].shift(1) - temp['High']) / temp['Adj Close'].shift(1)\r\n temp['GapDown'] = temp['GapDown'][temp['GapDown'] > 0]\r\n temp['GapDown'] = temp['GapDown'].fillna(0)\r\n \r\n #Min/Max & RangePoints/RangePercent\r\n temp['AllTimeLow'] = temp['Adj Close'].min()\r\n temp['AllTimeHigh'] = temp['Adj Close'].max()\r\n temp['100wkLow'] = temp['Adj Close'].rolling(500).min()\r\n temp['100wkHigh'] = temp['Adj Close'].rolling(500).max()\r\n temp['100wkRangePoints'] = temp['100wkHigh'] - temp['100wkLow']\r\n temp['100wkRangePercent'] = temp['100wkRangePoints'] / temp['Adj Close']\r\n temp['90wkLow'] = temp['Adj Close'].rolling(450).min()\r\n temp['90wkHigh'] = temp['Adj Close'].rolling(450).max()\r\n temp['90wkRangePoints'] = temp['90wkHigh'] - temp['90wkLow']\r\n temp['90wkRangePercent'] = temp['90wkRangePoints'] / temp['Adj Close']\r\n temp['80wkLow'] = temp['Adj Close'].rolling(400).min()\r\n temp['80wkHigh'] = temp['Adj Close'].rolling(400).max()\r\n temp['80wkRangePoints'] = temp['80wkHigh'] - temp['80wkLow']\r\n temp['80wkRangePercent'] = temp['80wkRangePoints'] / temp['Adj Close']\r\n temp['70wkLow'] = temp['Adj Close'].rolling(350).min()\r\n temp['70wkHigh'] = temp['Adj Close'].rolling(350).max()\r\n temp['70wkRangePoints'] = temp['70wkHigh'] - temp['70wkLow']\r\n temp['70wkRangePercent'] = temp['70wkRangePoints'] / temp['Adj Close']\r\n temp['65wkLow'] = temp['Adj Close'].rolling(325).min()\r\n temp['65wkHigh'] = temp['Adj Close'].rolling(325).max()\r\n temp['65wkRangePoints'] = temp['65wkHigh'] - temp['65wkLow']\r\n temp['65wkRangePercent'] = temp['65wkRangePoints'] / temp['Adj Close']\r\n temp['60wkLow'] = temp['Adj Close'].rolling(300).min()\r\n temp['60wkHigh'] = temp['Adj Close'].rolling(300).max()\r\n temp['60wkRangePoints'] = temp['60wkHigh'] - temp['60wkLow']\r\n temp['60wkRangePercent'] = temp['60wkRangePoints'] / temp['Adj Close']\r\n temp['55wkLow'] = temp['Adj Close'].rolling(275).min()\r\n temp['55wkHigh'] = temp['Adj Close'].rolling(275).max()\r\n temp['55wkRangePoints'] = temp['55wkHigh'] - temp['55wkLow']\r\n temp['55wkRangePercent'] = temp['55wkRangePoints'] / temp['Adj Close']\r\n temp['52wkLow'] = temp['Adj Close'].rolling(252).min()\r\n temp['52wkHigh'] = temp['Adj Close'].rolling(252).max()\r\n temp['52wkRangePoints'] = temp['52wkHigh'] - temp['52wkLow']\r\n temp['52wkRangePercent'] = temp['52wkRangePoints'] / temp['Adj Close']\r\n temp['45wkLow'] = temp['Adj Close'].rolling(225).min()\r\n temp['45wkHigh'] = temp['Adj Close'].rolling(225).max()\r\n temp['45wkRangePoints'] = temp['45wkHigh'] - temp['45wkLow']\r\n temp['45wkRangePercent'] = temp['45wkRangePoints'] / temp['Adj Close']\r\n temp['40wkLow'] = temp['Adj Close'].rolling(200).min()\r\n temp['40wkHigh'] = temp['Adj Close'].rolling(200).max()\r\n temp['40wkRangePoints'] = temp['40wkHigh'] - temp['40wkLow']\r\n temp['40wkRangePercent'] = temp['40wkRangePoints'] / temp['Adj Close']\r\n temp['35wkLow'] = temp['Adj Close'].rolling(175).min()\r\n temp['35wkHigh'] = temp['Adj Close'].rolling(175).max()\r\n temp['35wkRangePoints'] = temp['35wkHigh'] - temp['35wkLow']\r\n temp['35wkRangePercent'] = temp['35wkRangePoints'] / temp['Adj Close']\r\n temp['30wkLow'] = temp['Adj Close'].rolling(150).min()\r\n temp['30wkHigh'] = temp['Adj Close'].rolling(150).max()\r\n temp['30wkRangePoints'] = temp['30wkHigh'] - temp['30wkLow']\r\n temp['30wkRangePercent'] = temp['30wkRangePoints'] / temp['Adj Close']\r\n temp['25wkLow'] = temp['Adj Close'].rolling(125).min()\r\n temp['25wkHigh'] = temp['Adj Close'].rolling(125).max()\r\n temp['25wkRangePoints'] = temp['25wkHigh'] - temp['25wkLow']\r\n temp['25wkRangePercent'] = temp['25wkRangePoints'] / temp['Adj Close']\r\n temp['20wkLow'] = temp['Adj Close'].rolling(100).min()\r\n temp['20wkHigh'] = temp['Adj Close'].rolling(100).max()\r\n temp['20wkRangePoints'] = temp['20wkHigh'] - temp['20wkLow']\r\n temp['20wkRangePercent'] = temp['20wkRangePoints'] / temp['Adj Close']\r\n temp['15wkLow'] = temp['Adj Close'].rolling(75).min()\r\n temp['15wkHigh'] = temp['Adj Close'].rolling(75).max()\r\n temp['15wkRangePoints'] = temp['15wkHigh'] - temp['15wkLow']\r\n temp['15wkRangePercent'] = temp['15wkRangePoints'] / temp['Adj Close']\r\n temp['12wkLow'] = temp['Adj Close'].rolling(60).min()\r\n temp['12wkHigh'] = temp['Adj Close'].rolling(60).max()\r\n temp['12wkRangePoints'] = temp['12wkHigh'] - temp['12wkLow']\r\n temp['12wkRangePercent'] = temp['12wkRangePoints'] / temp['Adj Close']\r\n temp['11wkLow'] = temp['Adj Close'].rolling(55).min()\r\n temp['11wkHigh'] = temp['Adj Close'].rolling(55).max()\r\n temp['11wkRangePoints'] = temp['11wkHigh'] - temp['11wkLow']\r\n temp['11wkRangePercent'] = temp['11wkRangePoints'] / temp['Adj Close']\r\n temp['10wkLow'] = temp['Adj Close'].rolling(50).min()\r\n temp['10wkHigh'] = temp['Adj Close'].rolling(50).max()\r\n temp['10wkRangePoints'] = temp['10wkHigh'] - temp['10wkLow']\r\n temp['10wkRangePercent'] = temp['10wkRangePoints'] / temp['Adj Close']\r\n temp['9wkLow'] = temp['Adj Close'].rolling(45).min()\r\n temp['9wkHigh'] = temp['Adj Close'].rolling(45).max()\r\n temp['9wkRangePoints'] = temp['9wkHigh'] - temp['9wkLow']\r\n temp['9wkRangePercent'] = temp['9wkRangePoints'] / temp['Adj Close']\r\n temp['8wkLow'] = temp['Adj Close'].rolling(40).min()\r\n temp['8wkHigh'] = temp['Adj Close'].rolling(40).max()\r\n temp['8wkRangePoints'] = temp['8wkHigh'] - temp['8wkLow']\r\n temp['8wkRangePercent'] = temp['8wkRangePoints'] / temp['Adj Close']\r\n temp['7wkLow'] = temp['Adj Close'].rolling(35).min()\r\n temp['7wkHigh'] = temp['Adj Close'].rolling(35).max()\r\n temp['7wkRangePoints'] = temp['7wkHigh'] - temp['7wkLow']\r\n temp['7wkRangePercent'] = temp['7wkRangePoints'] / temp['Adj Close']\r\n temp['6wkLow'] = temp['Adj Close'].rolling(30).min()\r\n temp['6wkHigh'] = temp['Adj Close'].rolling(30).max()\r\n temp['6wkRangePoints'] = temp['6wkHigh'] - temp['6wkLow']\r\n temp['6wkRangePercent'] = temp['6wkRangePoints'] / temp['Adj Close']\r\n temp['5wkLow'] = temp['Adj Close'].rolling(25).min()\r\n temp['5wkHigh'] = temp['Adj Close'].rolling(25).max()\r\n temp['5wkRangePoints'] = temp['5wkHigh'] - temp['5wkLow']\r\n temp['5wkRangePercent'] = temp['5wkRangePoints'] / temp['Adj Close']\r\n temp['4wkLow'] = temp['Adj Close'].rolling(20).min()\r\n temp['4wkHigh'] = temp['Adj Close'].rolling(20).max()\r\n temp['4wkRangePoints'] = temp['4wkHigh'] - temp['4wkLow']\r\n temp['4wkRangePercent'] = temp['4wkRangePoints'] / temp['Adj Close'] \r\n temp['3wkLow'] = temp['Adj Close'].rolling(15).min()\r\n temp['3wkHigh'] = temp['Adj Close'].rolling(15).max()\r\n temp['3wkRangePoints'] = temp['3wkHigh'] - temp['3wkLow']\r\n temp['3wkRangePercent'] = temp['3wkRangePoints'] / temp['Adj Close']\r\n temp['2wkLow'] = temp['Adj Close'].rolling(10).min()\r\n temp['2wkHigh'] = temp['Adj Close'].rolling(10).max()\r\n temp['2wkRangePoints'] = temp['2wkHigh'] - temp['2wkLow']\r\n temp['2wkRangePercent'] = temp['2wkRangePoints'] / temp['Adj Close']\r\n temp['1wkLow'] = temp['Adj Close'].rolling(5).min()\r\n temp['1wkHigh'] = temp['Adj Close'].rolling(5).max()\r\n temp['1wkRangePoints'] = temp['1wkHigh'] - temp['1wkLow']\r\n temp['1wkRangePercent'] = temp['1wkRangePoints'] / temp['Adj Close']\r\n temp['4dayLow'] = temp['Adj Close'].rolling(4).min()\r\n temp['4dayHigh'] = temp['Adj Close'].rolling(4).max()\r\n temp['4dayRangePoints'] = temp['4dayHigh'] - temp['4dayLow']\r\n temp['4dayRangePercent'] = temp['4dayRangePoints'] / temp['Adj Close']\r\n temp['3dayLow'] = temp['Adj Close'].rolling(3).min()\r\n temp['3dayHigh'] = temp['Adj Close'].rolling(3).max()\r\n temp['3dayRangePoints'] = temp['3dayHigh'] - temp['3dayLow']\r\n temp['3dayRangePercent'] = temp['3dayRangePoints'] / temp['Adj Close']\r\n temp['2dayLow'] = temp['Adj Close'].rolling(2).min()\r\n temp['2dayHigh'] = temp['Adj Close'].rolling(2).max()\r\n temp['2dayRangePoints'] = temp['2dayHigh'] - temp['2dayLow']\r\n temp['2dayRangePercent'] = temp['2dayRangePoints'] / temp['Adj Close']\r\n \r\n #STATIC Average Range\r\n temp['100wkTotalAverageRange'] = temp['100wkRangePercent'].mean() * 500\r\n temp['90wkTotalAverageRange'] = temp['90wkRangePercent'].mean() * 450\r\n temp['80wkTotalAverageRange'] = temp['80wkRangePercent'].mean() * 400\r\n temp['70wkTotalAverageRange'] = temp['70wkRangePercent'].mean() * 350\r\n temp['65wkTotalAverageRange'] = temp['65wkRangePercent'].mean() * 325\r\n temp['60wkTotalAverageRange'] = temp['60wkRangePercent'].mean() * 300\r\n temp['55wkTotalAverageRange'] = temp['55wkRangePercent'].mean() * 275\r\n temp['52wkTotalAverageRange'] = temp['52wkRangePercent'].mean() * 252\r\n temp['45wkTotalAverageRange'] = temp['45wkRangePercent'].mean() * 225\r\n temp['40wkTotalAverageRange'] = temp['40wkRangePercent'].mean() * 200\r\n temp['35wkTotalAverageRange'] = temp['35wkRangePercent'].mean() * 175\r\n temp['30wkTotalAverageRange'] = temp['30wkRangePercent'].mean() * 150\r\n temp['25wkTotalAverageRange'] = temp['25wkRangePercent'].mean() * 125\r\n temp['20wkTotalAverageRange'] = temp['20wkRangePercent'].mean() * 100\r\n temp['15wkTotalAverageRange'] = temp['15wkRangePercent'].mean() * 75\r\n temp['12wkTotalAverageRange'] = temp['12wkRangePercent'].mean() * 60\r\n temp['11wkTotalAverageRange'] = temp['11wkRangePercent'].mean() * 55\r\n temp['10wkTotalAverageRange'] = temp['10wkRangePercent'].mean() * 50\r\n temp['9wkTotalAverageRange'] = temp['9wkRangePercent'].mean() * 45\r\n temp['8wkTotalAverageRange'] = temp['8wkRangePercent'].mean() * 40\r\n temp['7wkTotalAverageRange'] = temp['7wkRangePercent'].mean() * 35\r\n temp['6wkTotalAverageRange'] = temp['6wkRangePercent'].mean() * 30\r\n temp['5wkTotalAverageRange'] = temp['5wkRangePercent'].mean() * 25\r\n temp['4wkTotalAverageRange'] = temp['4wkRangePercent'].mean() * 20\r\n temp['3wkTotalAverageRange'] = temp['3wkRangePercent'].mean() * 15\r\n temp['2wkTotalAverageRange'] = temp['2wkRangePercent'].mean() * 10\r\n temp['1wkTotalAverageRange'] = temp['1wkRangePercent'].mean() * 5\r\n temp['4dayTotalAverageRange'] = temp['4dayRangePercent'].mean() * 4\r\n temp['3dayTotalAverageRange'] = temp['3dayRangePercent'].mean() * 3\r\n temp['2dayTotalAverageRange'] = temp['2dayRangePercent'].mean() * 2\r\n \r\n #DYNAMIC Rolling Average Range\r\n temp['100wkRollingAverageRange'] = temp['100wkRangePercent'].rolling(\r\n center=False, window = 500).mean()\r\n temp['90wkRollingAverageRange'] = temp['90wkRangePercent'].rolling(\r\n center=False, window = 450).mean()\r\n temp['80wkRollingAverageRange'] = temp['80wkRangePercent'].rolling(\r\n center=False, window = 400).mean() \r\n temp['70wkRollingAverageRange'] = temp['70wkRangePercent'].rolling(\r\n center=False, window = 350).mean()\r\n temp['65wkRollingAverageRange'] = temp['65wkRangePercent'].rolling(\r\n center=False, window = 325).mean()\r\n temp['60wkRollingAverageRange'] = temp['60wkRangePercent'].rolling(\r\n center=False, window = 300).mean() \r\n temp['55wkRollingAverageRange'] = temp['55wkRangePercent'].rolling(\r\n center=False, window = 275).mean()\r\n temp['52wkRollingAverageRange'] = temp['52wkRangePercent'].rolling(\r\n center=False, window = 252).mean()\r\n temp['45wkRollingAverageRange'] = temp['45wkRangePercent'].rolling(\r\n center=False, window = 225).mean()\r\n temp['40wkRollingAverageRange'] = temp['40wkRangePercent'].rolling(\r\n center=False, window = 200).mean()\r\n temp['35wkRollingAverageRange'] = temp['35wkRangePercent'].rolling(\r\n center=False, window = 175).mean() \r\n temp['30wkRollingAverageRange'] = temp['30wkRangePercent'].rolling(\r\n center=False, window = 150).mean()\r\n temp['25wkRollingAverageRange'] = temp['25wkRangePercent'].rolling(\r\n center=False, window = 125).mean()\r\n temp['20wkRollingAverageRange'] = temp['20wkRangePercent'].rolling(\r\n center=False, window = 100).mean()\r\n temp['15wkRollingAverageRange'] = temp['15wkRangePercent'].rolling(\r\n center=False, window = 75).mean()\r\n temp['12wkRollingAverageRange'] = temp['12wkRangePercent'].rolling(\r\n center=False, window = 60).mean()\r\n temp['11wkRollingAverageRange'] = temp['11wkRangePercent'].rolling(\r\n center=False, window = 55).mean()\r\n temp['10wkRollingAverageRange'] = temp['10wkRangePercent'].rolling(\r\n center=False, window = 50).mean()\r\n temp['9wkRollingAverageRange'] = temp['9wkRangePercent'].rolling(\r\n center=False, window = 45).mean()\r\n temp['8wkRollingAverageRange'] = temp['8wkRangePercent'].rolling(\r\n center=False, window = 40).mean()\r\n temp['7wkRollingAverageRange'] = temp['7wkRangePercent'].rolling(\r\n center=False, window = 35).mean()\r\n temp['6wkRollingAverageRange'] = temp['6wkRangePercent'].rolling(\r\n center=False, window = 30).mean()\r\n temp['5wkRollingAverageRange'] = temp['5wkRangePercent'].rolling(\r\n center=False, window = 25).mean()\r\n temp['4wkRollingAverageRange'] = temp['4wkRangePercent'].rolling(\r\n center=False, window = 20).mean()\r\n temp['3wkRollingAverageRange'] = temp['3wkRangePercent'].rolling(\r\n center=False, window = 15).mean()\r\n temp['2wkRollingAverageRange'] = temp['2wkRangePercent'].rolling(\r\n center=False, window = 10).mean()\r\n temp['1wkRollingAverageRange'] = temp['1wkRangePercent'].rolling(\r\n center=False, window = 5).mean()\r\n temp['4dayRollingAverageRange'] = temp['4dayRangePercent'].rolling(\r\n center=False, window = 4).mean()\r\n temp['3dayRollingAverageRange'] = temp['4dayRangePercent'].rolling(\r\n center=False, window = 3).mean()\r\n temp['2dayRollingAverageRange'] = temp['2dayRangePercent'].rolling(\r\n center=False, window = 2).mean()\r\n\r\n #DYNAMIC (Rolling Average Range / Average Range) - 1\r\n temp['100wkRARtoTAR'] = (temp['100wkRollingAverageRange']/temp['100wkTotalAverageRange']) - 1\r\n temp['90wkRARtoTAR'] = (temp['90wkRollingAverageRange']/temp['90wkTotalAverageRange']) - 1\r\n temp['80wkRARtoTAR'] = (temp['80wkRollingAverageRange']/temp['80wkTotalAverageRange']) - 1\r\n temp['70wkRARtoTAR'] = (temp['70wkRollingAverageRange']/temp['70wkTotalAverageRange']) - 1\r\n temp['65wkRARtoTAR'] = (temp['65wkRollingAverageRange']/temp['65wkTotalAverageRange']) - 1\r\n temp['60wkRARtoTAR'] = (temp['60wkRollingAverageRange']/temp['60wkTotalAverageRange']) - 1\r\n temp['55wkRARtoTAR'] = (temp['55wkRollingAverageRange']/temp['55wkTotalAverageRange']) - 1\r\n temp['52wkRARtoTAR'] = (temp['52wkRollingAverageRange']/temp['52wkTotalAverageRange']) - 1\r\n temp['45wkRARtoTAR'] = (temp['45wkRollingAverageRange']/temp['45wkTotalAverageRange']) - 1\r\n temp['40wkRARtoTAR'] = (temp['40wkRollingAverageRange']/temp['40wkTotalAverageRange']) - 1\r\n temp['35wkRARtoTAR'] = (temp['35wkRollingAverageRange']/temp['35wkTotalAverageRange']) - 1\r\n temp['30wkRARtoTAR'] = (temp['30wkRollingAverageRange']/temp['30wkTotalAverageRange']) - 1\r\n temp['25wkRARtoTAR'] = (temp['25wkRollingAverageRange']/temp['25wkTotalAverageRange']) - 1 \r\n temp['20wkRARtoTAR'] = (temp['20wkRollingAverageRange']/temp['20wkTotalAverageRange']) - 1\r\n temp['15wkRARtoTAR'] = (temp['15wkRollingAverageRange']/temp['15wkTotalAverageRange']) - 1\r\n temp['12wkRARtoTAR'] = (temp['12wkRollingAverageRange']/temp['12wkTotalAverageRange']) - 1\r\n temp['11wkRARtoTAR'] = (temp['11wkRollingAverageRange']/temp['11wkTotalAverageRange']) - 1\r\n temp['10wkRARtoTAR'] = (temp['10wkRollingAverageRange']/temp['10wkTotalAverageRange']) - 1\r\n temp['9wkRARtoTAR'] = (temp['9wkRollingAverageRange']/temp['9wkTotalAverageRange']) - 1\r\n temp['8wkRARtoTAR'] = (temp['8wkRollingAverageRange']/temp['8wkTotalAverageRange']) - 1 \r\n temp['7wkRARtoTAR'] = (temp['7wkRollingAverageRange']/temp['7wkTotalAverageRange']) - 1\r\n temp['6wkRARtoTAR'] = (temp['6wkRollingAverageRange']/temp['6wkTotalAverageRange']) - 1\r\n temp['5wkRARtoTAR'] = (temp['5wkRollingAverageRange']/temp['5wkTotalAverageRange']) - 1\r\n temp['4wkRARtoTAR'] = (temp['4wkRollingAverageRange']/temp['4wkTotalAverageRange']) - 1\r\n temp['3wkRARtoTAR'] = (temp['3wkRollingAverageRange']/temp['3wkTotalAverageRange']) - 1\r\n temp['2wkRARtoTAR'] = (temp['2wkRollingAverageRange']/temp['2wkTotalAverageRange']) - 1\r\n temp['1wkRARtoTAR'] = (temp['1wkRollingAverageRange']/temp['1wkTotalAverageRange']) - 1\r\n temp['4dayRARtoTAR'] = (temp['4dayRollingAverageRange']/temp['4dayTotalAverageRange']) - 1\r\n temp['3dayRARtoTAR'] = (temp['3dayRollingAverageRange']/temp['3dayTotalAverageRange']) - 1\r\n temp['2dayRARtoTAR'] = (temp['2dayRollingAverageRange']/temp['2dayTotalAverageRange']) - 1\r\n \r\n #B/O, B/D ratio\r\n temp['100wkBreakOutRatio'] = temp['High']/temp['100wkHigh'] #If > 1, then moving higher\r\n temp['100wkBreakDownRatio'] = temp['Low']/temp['100wkLow'] #If > 1, then moving lower\r\n temp['90wkBreakOutRatio'] = temp['High']/temp['90wkHigh'] #If > 1, then moving higher\r\n temp['90wkBreakDownRatio'] = temp['Low']/temp['90wkLow'] #If > 1, then moving lower\r\n temp['80wkBreakOutRatio'] = temp['High']/temp['80wkHigh'] #If > 1, then moving higher\r\n temp['80wkBreakDownRatio'] = temp['Low']/temp['80wkLow'] #If > 1, then moving lower\r\n temp['70wkBreakOutRatio'] = temp['High']/temp['70wkHigh'] #If > 1, then moving higher\r\n temp['70wkBreakDownRatio'] = temp['Low']/temp['70wkLow'] #If > 1, then moving lower\r\n temp['65wkBreakOutRatio'] = temp['High']/temp['65wkHigh'] #If > 1, then moving higher\r\n temp['65wkBreakDownRatio'] = temp['Low']/temp['65wkLow'] #If > 1, then moving lower\r\n temp['60wkBreakOutRatio'] = temp['High']/temp['60wkHigh'] #If > 1, then moving higher\r\n temp['60wkBreakDownRatio'] = temp['Low']/temp['60wkLow'] #If > 1, then moving lower\r\n temp['55wkBreakOutRatio'] = temp['High']/temp['55wkHigh'] #If > 1, then moving higher\r\n temp['55wkBreakDownRatio'] = temp['Low']/temp['55wkLow'] #If > 1, then moving lower\r\n temp['52wkBreakOutRatio'] = temp['High']/temp['52wkHigh'] #If > 1, then moving higher\r\n temp['52wkBreakDownRatio'] = temp['Low']/temp['52wkLow'] #If > 1, then moving lower\r\n temp['45wkBreakOutRatio'] = temp['High']/temp['45wkHigh'] #If > 1, then moving higher\r\n temp['45wkBreakDownRatio'] = temp['Low']/temp['45wkLow'] #If > 1, then moving lower\r\n temp['40wkBreakOutRatio'] = temp['High']/temp['40wkHigh'] #If > 1, then moving higher\r\n temp['40wkBreakDownRatio'] = temp['Low']/temp['40wkLow'] #If > 1, then moving lower\r\n temp['35wkBreakOutRatio'] = temp['High']/temp['35wkHigh'] #If > 1, then moving higher\r\n temp['35wkBreakDownRatio'] = temp['Low']/temp['35wkLow'] #If > 1, then moving lower\r\n temp['30wkBreakOutRatio'] = temp['High']/temp['30wkHigh'] #If > 1, then moving higher\r\n temp['30wkBreakDownRatio'] = temp['Low']/temp['30wkLow'] #If > 1, then moving lower\r\n temp['25wkBreakOutRatio'] = temp['High']/temp['25wkHigh'] #If > 1, then moving higher\r\n temp['25wkBreakDownRatio'] = temp['Low']/temp['25wkLow'] #If > 1, then moving lower\r\n temp['20wkBreakOutRatio'] = temp['High']/temp['20wkHigh'] #If > 1, then moving higher\r\n temp['20wkBreakDownRatio'] = temp['Low']/temp['20wkLow'] #If > 1, then moving lower\r\n temp['15wkBreakOutRatio'] = temp['High']/temp['15wkHigh'] #If > 1, then moving higher\r\n temp['15wkBreakDownRatio'] = temp['Low']/temp['15wkLow'] #If > 1, then moving lower\r\n temp['12wkBreakOutRatio'] = temp['High']/temp['12wkHigh'] #If > 1, then moving higher\r\n temp['12wkBreakDownRatio'] = temp['Low']/temp['12wkLow'] #If > 1, then moving lower\r\n temp['11wkBreakOutRatio'] = temp['High']/temp['11wkHigh'] #If > 1, then moving higher\r\n temp['11wkBreakDownRatio'] = temp['Low']/temp['11wkLow'] #If > 1, then moving lower\r\n temp['10wkBreakOutRatio'] = temp['High']/temp['10wkHigh'] #If > 1, then moving higher\r\n temp['10wkBreakDownRatio'] = temp['Low']/temp['10wkLow'] #If > 1, then moving lower\r\n temp['9wkBreakOutRatio'] = temp['High']/temp['9wkHigh'] #If > 1, then moving higher\r\n temp['9wkBreakDownRatio'] = temp['Low']/temp['9wkLow'] #If > 1, then moving lower\r\n temp['8wkBreakOutRatio'] = temp['High']/temp['8wkHigh'] #If > 1, then moving higher\r\n temp['8wkBreakDownRatio'] = temp['Low']/temp['8wkLow'] #If > 1, then moving lower\r\n temp['7wkBreakOutRatio'] = temp['High']/temp['7wkHigh'] #If > 1, then moving higher\r\n temp['7wkBreakDownRatio'] = temp['Low']/temp['7wkLow'] #If > 1, then moving lower\r\n temp['6wkBreakOutRatio'] = temp['High']/temp['6wkHigh'] #If > 1, then moving higher\r\n temp['6wkBreakDownRatio'] = temp['Low']/temp['6wkLow'] #If > 1, then moving lower\r\n temp['5wkBreakOutRatio'] = temp['High']/temp['5wkHigh'] #If > 1, then moving higher\r\n temp['5wkBreakDownRatio'] = temp['Low']/temp['5wkLow'] #If > 1, then moving lower\r\n temp['4wkBreakOutRatio'] = temp['High']/temp['4wkHigh'] #If > 1, then moving higher\r\n temp['4wkBreakDownRatio'] = temp['Low']/temp['4wkLow'] #If > 1, then moving lower\r\n temp['3wkBreakOutRatio'] = temp['High']/temp['3wkHigh'] #If > 1, then moving higher\r\n temp['3wkBreakDownRatio'] = temp['Low']/temp['3wkLow'] #If > 1, then moving lower\r\n temp['2wkBreakOutRatio'] = temp['High']/temp['2wkHigh'] #If > 1, then moving higher\r\n temp['2wkBreakDownRatio'] = temp['Low']/temp['2wkLow'] #If > 1, then moving lower\r\n temp['1wkBreakOutRatio'] = temp['High']/temp['1wkHigh'] #If > 1, then moving higher\r\n temp['1wkBreakDownRatio'] = temp['Low']/temp['1wkLow'] #If > 1, then moving lower\r\n temp['4dayBreakOutRatio'] = temp['High']/temp['4dayHigh'] #If > 1, then moving higher\r\n temp['4dayBreakDownRatio'] = temp['Low']/temp['4dayLow'] #If > 1, then moving lower\r\n temp['3dayBreakOutRatio'] = temp['High']/temp['3dayHigh'] #If > 1, then moving higher\r\n temp['3dayBreakDownRatio'] = temp['Low']/temp['3dayLow'] #If > 1, then moving lower\r\n temp['2dayBreakOutRatio'] = temp['High']/temp['2dayHigh'] #If > 1, then moving higher\r\n temp['2dayBreakDownRatio'] = temp['Low']/temp['2dayLow'] #If > 1, then moving lower\r\n\r\n #Over all time, the average return per period & average Std Dev per period; STATIC\r\n temp['100wkTotalAverageReturn'] = temp['LogRet'].mean() * 500 \r\n temp['100wkTotalAverageStdDev'] = temp['LogRet'].std()*np.sqrt(500)\r\n temp['90wkTotalAverageReturn'] = temp['LogRet'].mean() * 450\r\n temp['90wkTotalAverageStdDev'] = temp['LogRet'].std()*np.sqrt(450)\r\n temp['80wkTotalAverageReturn'] = temp['LogRet'].mean() * 400\r\n temp['80wkTotalAverageStdDev'] = temp['LogRet'].std()*np.sqrt(400)\r\n temp['70wkTotalAverageReturn'] = temp['LogRet'].mean() * 350\r\n temp['70wkTotalAverageStdDev'] = temp['LogRet'].std()*np.sqrt(350)\r\n temp['65wkTotalAverageReturn'] = temp['LogRet'].mean() * 325\r\n temp['65wkTotalAverageStdDev'] = temp['LogRet'].std()*np.sqrt(325) \r\n temp['60wkTotalAverageReturn'] = temp['LogRet'].mean() * 300\r\n temp['60wkTotalAverageStdDev'] = temp['LogRet'].std()*np.sqrt(300)\r\n temp['55wkTotalAverageReturn'] = temp['LogRet'].mean() * 275\r\n temp['55wkTotalAverageStdDev'] = temp['LogRet'].std()*np.sqrt(275)\r\n temp['52wkTotalAverageReturn'] = temp['LogRet'].mean() * 252\r\n temp['52wkTotalAverageStdDev'] = temp['LogRet'].std()*np.sqrt(252)\r\n temp['45wkTotalAverageReturn'] = temp['LogRet'].mean() * 225\r\n temp['45wkTotalAverageStdDev'] = temp['LogRet'].std()*np.sqrt(225)\r\n temp['40wkTotalAverageReturn'] = temp['LogRet'].mean() * 200\r\n temp['40wkTotalAverageStdDev'] = temp['LogRet'].std()*np.sqrt(200)\r\n temp['35wkTotalAverageReturn'] = temp['LogRet'].mean() * 175\r\n temp['35wkTotalAverageStdDev'] = temp['LogRet'].std()*np.sqrt(175)\r\n temp['30wkTotalAverageReturn'] = temp['LogRet'].mean() * 150\r\n temp['30wkTotalAverageStdDev'] = temp['LogRet'].std()*np.sqrt(150)\r\n temp['25wkTotalAverageReturn'] = temp['LogRet'].mean() * 125\r\n temp['25wkTotalAverageStdDev'] = temp['LogRet'].std()*np.sqrt(125)\r\n temp['20wkTotalAverageReturn'] = temp['LogRet'].mean() * 100\r\n temp['20wkTotalAverageStdDev'] = temp['LogRet'].std()*np.sqrt(100)\r\n temp['15wkTotalAverageReturn'] = temp['LogRet'].mean() * 75\r\n temp['15wkTotalAverageStdDev'] = temp['LogRet'].std()*np.sqrt(75)\r\n temp['12wkTotalAverageReturn'] = temp['LogRet'].mean() * 60\r\n temp['12wkTotalAverageStdDev'] = temp['LogRet'].std()*np.sqrt(60)\r\n temp['11wkTotalAverageReturn'] = temp['LogRet'].mean() * 55\r\n temp['11wkTotalAverageStdDev'] = temp['LogRet'].std()*np.sqrt(55)\r\n temp['10wkTotalAverageReturn'] = temp['LogRet'].mean() * 50\r\n temp['10wkTotalAverageStdDev'] = temp['LogRet'].std()*np.sqrt(50)\r\n temp['9wkTotalAverageReturn'] = temp['LogRet'].mean() * 45\r\n temp['9wkTotalAverageStdDev'] = temp['LogRet'].std()*np.sqrt(45)\r\n temp['8wkTotalAverageReturn'] = temp['LogRet'].mean() * 40\r\n temp['8wkTotalAverageStdDev'] = temp['LogRet'].std()*np.sqrt(40)\r\n temp['7wkTotalAverageReturn'] = temp['LogRet'].mean() * 35\r\n temp['7wkTotalAverageStdDev'] = temp['LogRet'].std()*np.sqrt(35)\r\n temp['6wkTotalAverageReturn'] = temp['LogRet'].mean() * 30\r\n temp['6wkTotalAverageStdDev'] = temp['LogRet'].std()*np.sqrt(30)\r\n temp['5wkTotalAverageReturn'] = temp['LogRet'].mean() * 25\r\n temp['5wkTotalAverageStdDev'] = temp['LogRet'].std()*np.sqrt(25)\r\n temp['4wkTotalAverageReturn'] = temp['LogRet'].mean() * 20\r\n temp['4wkTotalAverageStdDev'] = temp['LogRet'].std()*np.sqrt(20)\r\n temp['3wkTotalAverageReturn'] = temp['LogRet'].mean() * 15\r\n temp['3wkTotalAverageStdDev'] = temp['LogRet'].std()*np.sqrt(15)\r\n temp['2wkTotalAverageReturn'] = temp['LogRet'].mean() * 10\r\n temp['2wkTotalAverageStdDev'] = temp['LogRet'].std()*np.sqrt(10)\r\n temp['1wkTotalAverageReturn'] = temp['LogRet'].mean() * 5\r\n temp['1wkTotalAverageStdDev'] = temp['LogRet'].std()*np.sqrt(5)\r\n temp['4dayTotalAverageReturn'] = temp['LogRet'].mean() * 4\r\n temp['4dayTotalAverageStdDev'] = temp['LogRet'].std()*np.sqrt(4)\r\n temp['3dayTotalAverageReturn'] = temp['LogRet'].mean() * 3\r\n temp['3dayTotalAverageStdDev'] = temp['LogRet'].std()*np.sqrt(3)\r\n temp['2dayTotalAverageReturn'] = temp['LogRet'].mean() * 2\r\n temp['2dayTotalAverageStdDev'] = temp['LogRet'].std()*np.sqrt(2)\r\n \r\n #CV IS STATIC = not rolling\r\n temp['100wkCoefficientOfVaration'] = (\r\n temp['100wkTotalAverageStdDev']/temp['100wkTotalAverageReturn'])\r\n temp['90wkCoefficientOfVaration'] = (\r\n temp['90wkTotalAverageStdDev']/temp['90wkTotalAverageReturn'])\r\n temp['80wkCoefficientOfVaration'] = (\r\n temp['80wkTotalAverageStdDev']/temp['80wkTotalAverageReturn'])\r\n temp['70wkCoefficientOfVaration'] = (\r\n temp['70wkTotalAverageStdDev']/temp['70wkTotalAverageReturn'])\r\n temp['65wkCoefficientOfVaration'] = (\r\n temp['65wkTotalAverageStdDev']/temp['65wkTotalAverageReturn'])\r\n temp['60wkCoefficientOfVaration'] = (\r\n temp['60wkTotalAverageStdDev']/temp['60wkTotalAverageReturn'])\r\n temp['55wkCoefficientOfVaration'] = (\r\n temp['55wkTotalAverageStdDev']/temp['55wkTotalAverageReturn'])\r\n temp['52wkCoefficientOfVaration'] = (\r\n temp['52wkTotalAverageStdDev']/temp['52wkTotalAverageReturn'])\r\n temp['45wkCoefficientOfVaration'] = (\r\n temp['45wkTotalAverageStdDev']/temp['45wkTotalAverageReturn'])\r\n temp['40wkCoefficientOfVaration'] = (\r\n temp['40wkTotalAverageStdDev']/temp['40wkTotalAverageReturn'])\r\n temp['35wkCoefficientOfVaration'] = (\r\n temp['35wkTotalAverageStdDev']/temp['35wkTotalAverageReturn'])\r\n temp['30wkCoefficientOfVaration'] = (\r\n temp['30wkTotalAverageStdDev']/temp['30wkTotalAverageReturn'])\r\n temp['25wkCoefficientOfVaration'] = (\r\n temp['25wkTotalAverageStdDev']/temp['25wkTotalAverageReturn'])\r\n temp['20wkCoefficientOfVaration'] = (\r\n temp['20wkTotalAverageStdDev']/temp['20wkTotalAverageReturn'])\r\n temp['15wkCoefficientOfVaration'] = (\r\n temp['15wkTotalAverageStdDev']/temp['15wkTotalAverageReturn'])\r\n temp['12CoefficientOfVaration'] = (\r\n temp['12wkTotalAverageStdDev']/temp['12wkTotalAverageReturn'])\r\n temp['11wkCoefficientOfVaration'] = (\r\n temp['11wkTotalAverageStdDev']/temp['11wkTotalAverageReturn'])\r\n temp['10wkCoefficientOfVaration'] = (\r\n temp['10wkTotalAverageStdDev']/temp['10wkTotalAverageReturn'])\r\n temp['9wkCoefficientOfVaration'] = (\r\n temp['9wkTotalAverageStdDev']/temp['9wkTotalAverageReturn'])\r\n temp['8wkCoefficientOfVaration'] = (\r\n temp['8wkTotalAverageStdDev']/temp['8wkTotalAverageReturn'])\r\n temp['7wkCoefficientOfVaration'] = (\r\n temp['7wkTotalAverageStdDev']/temp['7wkTotalAverageReturn'])\r\n temp['6wkCoefficientOfVaration'] = (\r\n temp['6wkTotalAverageStdDev']/temp['6wkTotalAverageReturn'])\r\n temp['5wkCoefficientOfVaration'] = (\r\n temp['5wkTotalAverageStdDev']/temp['5wkTotalAverageReturn'])\r\n temp['4wkCoefficientOfVaration'] = (\r\n temp['4wkTotalAverageStdDev']/temp['4wkTotalAverageReturn'])\r\n temp['3wkCoefficientOfVaration'] = (\r\n temp['3wkTotalAverageStdDev']/temp['3wkTotalAverageReturn'])\r\n temp['2wkCoefficientOfVaration'] = (\r\n temp['2wkTotalAverageStdDev']/temp['2wkTotalAverageReturn'])\r\n temp['1wkCoefficientOfVaration'] = (\r\n temp['1wkTotalAverageStdDev']/temp['1wkTotalAverageReturn'])\r\n temp['4dayCoefficientOfVaration'] = (\r\n temp['4dayTotalAverageStdDev']/temp['4dayTotalAverageReturn'])\r\n temp['3dayCoefficientOfVaration'] = (\r\n temp['3dayTotalAverageStdDev']/temp['3dayTotalAverageReturn'])\r\n temp['2dayCoefficientOfVaration'] = (\r\n temp['2dayTotalAverageStdDev']/temp['2dayTotalAverageReturn'])\r\n \r\n #Over rolling period, Average return during period; DYNAMIC\r\n temp['100wkRollingAverageReturn'] = temp['LogRet'].rolling(\r\n center=False, window = 500).mean()\r\n temp['90wkRollingAverageReturn'] = temp['LogRet'].rolling(\r\n center=False, window = 450).mean() \r\n temp['80wkRollingAverageReturn'] = temp['LogRet'].rolling(\r\n center=False, window = 400).mean()\r\n temp['70wkRollingAverageReturn'] = temp['LogRet'].rolling(\r\n center=False, window = 350).mean()\r\n temp['65wkRollingAverageReturn'] = temp['LogRet'].rolling(\r\n center=False, window = 325).mean() \r\n temp['60wkRollingAverageReturn'] = temp['LogRet'].rolling(\r\n center=False, window = 300).mean()\r\n temp['55wkRollingAverageReturn'] = temp['LogRet'].rolling(\r\n center=False, window = 275).mean()\r\n temp['52wkRollingAverageReturn'] = temp['LogRet'].rolling(\r\n center=False, window = 252).mean()\r\n temp['45wkRollingAverageReturn'] = temp['LogRet'].rolling(\r\n center=False, window = 225).mean()\r\n temp['40wkRollingAverageReturn'] = temp['LogRet'].rolling(\r\n center=False, window = 200).mean()\r\n temp['35wkRollingAverageReturn'] = temp['LogRet'].rolling(\r\n center=False, window = 175).mean()\r\n temp['30wkRollingAverageReturn'] = temp['LogRet'].rolling(\r\n center=False, window = 150).mean()\r\n temp['25wkRollingAverageReturn'] = temp['LogRet'].rolling(\r\n center=False, window = 125).mean()\r\n temp['20wkRollingAverageReturn'] = temp['LogRet'].rolling(\r\n center=False, window = 100).mean()\r\n temp['15wkRollingAverageReturn'] = temp['LogRet'].rolling(\r\n center=False, window = 75).mean()\r\n temp['12wkRollingAverageReturn'] = temp['LogRet'].rolling(\r\n center=False, window = 60).mean()\r\n temp['11wkRollingAverageReturn'] = temp['LogRet'].rolling(\r\n center=False, window = 55).mean()\r\n temp['10wkRollingAverageReturn'] = temp['LogRet'].rolling(\r\n center=False, window = 50).mean()\r\n temp['9wkRollingAverageReturn'] = temp['LogRet'].rolling(\r\n center=False, window = 45).mean()\r\n temp['8wkRollingAverageReturn'] = temp['LogRet'].rolling(\r\n center=False, window = 40).mean()\r\n temp['7wkRollingAverageReturn'] = temp['LogRet'].rolling(\r\n center=False, window = 35).mean()\r\n temp['6wkRollingAverageReturn'] = temp['LogRet'].rolling(\r\n center=False, window = 30).mean()\r\n temp['5wkRollingAverageReturn'] = temp['LogRet'].rolling(\r\n center=False, window = 25).mean() \r\n temp['4wkRollingAverageReturn'] = temp['LogRet'].rolling(\r\n center=False, window = 20).mean()\r\n temp['3wkRollingAverageReturn'] = temp['LogRet'].rolling(\r\n center=False, window = 15).mean()\r\n temp['2wkRollingAverageReturn'] = temp['LogRet'].rolling(\r\n center=False, window = 10).mean()\r\n temp['1wkRollingAverageReturn'] = temp['LogRet'].rolling(\r\n center=False, window = 5).mean()\r\n temp['4dayRollingAverageReturn'] = temp['LogRet'].rolling(\r\n center=False, window = 4).mean()\r\n temp['3dayRollingAverageReturn'] = temp['LogRet'].rolling(\r\n center=False, window = 3).mean()\r\n temp['2dayRollingAverageReturn'] = temp['LogRet'].rolling(\r\n center=False, window = 2).mean() \r\n \r\n #Over rolling period, Average Std Dev during period; DYNAMIC\r\n temp['100wkRollingStdDev'] = temp['LogRet'].rolling(\r\n center=False, window = 500).std()\r\n temp['90wkRollingStdDev'] = temp['LogRet'].rolling(\r\n center=False, window = 450).std() \r\n temp['80wkRollingStdDev'] = temp['LogRet'].rolling(\r\n center=False, window = 400).std()\r\n temp['70wkRollingStdDev'] = temp['LogRet'].rolling(\r\n center=False, window = 350).std()\r\n temp['65wkRollingStdDev'] = temp['LogRet'].rolling(\r\n center=False, window = 325).std() \r\n temp['60wkRollingStdDev'] = temp['LogRet'].rolling(\r\n center=False, window = 300).std()\r\n temp['55wkRollingStdDev'] = temp['LogRet'].rolling(\r\n center=False, window = 275).std()\r\n temp['52wkRollingStdDev'] = temp['LogRet'].rolling(\r\n center=False, window = 252).std()\r\n temp['45wkRollingStdDev'] = temp['LogRet'].rolling(\r\n center=False, window = 225).std()\r\n temp['40wkRollingStdDev'] = temp['LogRet'].rolling(\r\n center=False, window = 200).std()\r\n temp['35wkRollingStdDev'] = temp['LogRet'].rolling(\r\n center=False, window = 175).std()\r\n temp['30wkRollingStdDev'] = temp['LogRet'].rolling(\r\n center=False, window = 150).std()\r\n temp['25wkRollingStdDev'] = temp['LogRet'].rolling(\r\n center=False, window = 125).std()\r\n temp['20wkRollingStdDev'] = temp['LogRet'].rolling(\r\n center=False, window = 100).std()\r\n temp['15wkRollingStdDev'] = temp['LogRet'].rolling(\r\n center=False, window = 75).std()\r\n temp['12wkRollingStdDev'] = temp['LogRet'].rolling(\r\n center=False, window = 60).std()\r\n temp['11wkRollingStdDev'] = temp['LogRet'].rolling(\r\n center=False, window = 55).std()\r\n temp['10wkRollingStdDev'] = temp['LogRet'].rolling(\r\n center=False, window = 50).std()\r\n temp['9wkRollingStdDev'] = temp['LogRet'].rolling(\r\n center=False, window = 45).std()\r\n temp['8wkRollingStdDev'] = temp['LogRet'].rolling(\r\n center=False, window = 40).std()\r\n temp['7wkRollingStdDev'] = temp['LogRet'].rolling(\r\n center=False, window = 35).std()\r\n temp['6wkRollingStdDev'] = temp['LogRet'].rolling(\r\n center=False, window = 30).std()\r\n temp['5wkRollingStdDev'] = temp['LogRet'].rolling(\r\n center=False, window = 25).std() \r\n temp['4wkRollingStdDev'] = temp['LogRet'].rolling(\r\n center=False, window = 20).std()\r\n temp['3wkRollingStdDev'] = temp['LogRet'].rolling(\r\n center=False, window = 15).std()\r\n temp['2wkRollingStdDev'] = temp['LogRet'].rolling(\r\n center=False, window = 10).std()\r\n temp['1wkRollingStdDev'] = temp['LogRet'].rolling(\r\n center=False, window = 5).std()\r\n temp['4dayRollingStdDev'] = temp['LogRet'].rolling(\r\n center=False, window = 4).std()\r\n temp['3dayRollingStdDev'] = temp['LogRet'].rolling(\r\n center=False, window = 3).std()\r\n temp['2dayRollingStdDev'] = temp['LogRet'].rolling(\r\n center=False, window = 2).std()\r\n \r\n #Rate of Change (ROC) in %\r\n temp['100wkRateOfChange'] = (temp['Adj Close'] - temp['Adj Close'].shift(500)\r\n ) / temp['Adj Close'].shift(500) \r\n temp['100wkRateOfChange'] = temp['100wkRateOfChange'].fillna(0)\r\n temp['90wkRateOfChange'] = (temp['Adj Close'] - temp['Adj Close'].shift(450)\r\n ) / temp['Adj Close'].shift(450) \r\n temp['90wkRateOfChange'] = temp['90wkRateOfChange'].fillna(0)\r\n temp['80wkRateOfChange'] = (temp['Adj Close'] - temp['Adj Close'].shift(400)\r\n ) / temp['Adj Close'].shift(400) \r\n temp['80wkRateOfChange'] = temp['80wkRateOfChange'].fillna(0)\r\n temp['70wkRateOfChange'] = (temp['Adj Close'] - temp['Adj Close'].shift(350)\r\n ) / temp['Adj Close'].shift(350) \r\n temp['70wkRateOfChange'] = temp['70wkRateOfChange'].fillna(0)\r\n temp['65wkRateOfChange'] = (temp['Adj Close'] - temp['Adj Close'].shift(325)\r\n ) / temp['Adj Close'].shift(325) \r\n temp['65wkRateOfChange'] = temp['65wkRateOfChange'].fillna(0)\r\n temp['60wkRateOfChange'] = (temp['Adj Close'] - temp['Adj Close'].shift(300)\r\n ) / temp['Adj Close'].shift(300) \r\n temp['60wkRateOfChange'] = temp['60wkRateOfChange'].fillna(0)\r\n temp['55wkRateOfChange'] = (temp['Adj Close'] - temp['Adj Close'].shift(275)\r\n ) / temp['Adj Close'].shift(275) \r\n temp['55wkRateOfChange'] = temp['55wkRateOfChange'].fillna(0)\r\n temp['52wkRateOfChange'] = (temp['Adj Close'] - temp['Adj Close'].shift(252)\r\n ) / temp['Adj Close'].shift(252) \r\n temp['52wkRateOfChange'] = temp['52wkRateOfChange'].fillna(0)\r\n temp['45wkRateOfChange'] = (temp['Adj Close'] - temp['Adj Close'].shift(225)\r\n ) / temp['Adj Close'].shift(225) \r\n temp['45wkRateOfChange'] = temp['45wkRateOfChange'].fillna(0)\r\n temp['40wkRateOfChange'] = (temp['Adj Close'] - temp['Adj Close'].shift(200)\r\n ) / temp['Adj Close'].shift(200) \r\n temp['40wkRateOfChange'] = temp['40wkRateOfChange'].fillna(0)\r\n temp['35wkRateOfChange'] = (temp['Adj Close'] - temp['Adj Close'].shift(175)\r\n ) / temp['Adj Close'].shift(175) \r\n temp['35wkRateOfChange'] = temp['35wkRateOfChange'].fillna(0)\r\n temp['30wkRateOfChange'] = (temp['Adj Close'] - temp['Adj Close'].shift(150)\r\n ) / temp['Adj Close'].shift(150) \r\n temp['30wkRateOfChange'] = temp['30wkRateOfChange'].fillna(0)\r\n temp['25wkRateOfChange'] = (temp['Adj Close'] - temp['Adj Close'].shift(125)\r\n ) / temp['Adj Close'].shift(125) \r\n temp['25wkRateOfChange'] = temp['25wkRateOfChange'].fillna(0)\r\n temp['20wkRateOfChange'] = (temp['Adj Close'] - temp['Adj Close'].shift(100)\r\n ) / temp['Adj Close'].shift(100) \r\n temp['20wkRateOfChange'] = temp['20wkRateOfChange'].fillna(0)\r\n temp['15wkRateOfChange'] = (temp['Adj Close'] - temp['Adj Close'].shift(75)\r\n ) / temp['Adj Close'].shift(75) \r\n temp['15wkRateOfChange'] = temp['15wkRateOfChange'].fillna(0)\r\n temp['12wkRateOfChange'] = (temp['Adj Close'] - temp['Adj Close'].shift(60)\r\n ) / temp['Adj Close'].shift(60) \r\n temp['12wkRateOfChange'] = temp['12wkRateOfChange'].fillna(0)\r\n temp['11wkRateOfChange'] = (temp['Adj Close'] - temp['Adj Close'].shift(55)\r\n ) / temp['Adj Close'].shift(55) \r\n temp['11wkRateOfChange'] = temp['11wkRateOfChange'].fillna(0)\r\n temp['10wkRateOfChange'] = (temp['Adj Close'] - temp['Adj Close'].shift(50)\r\n ) / temp['Adj Close'].shift(50) \r\n temp['10wkRateOfChange'] = temp['10wkRateOfChange'].fillna(0)\r\n temp['9wkRateOfChange'] = (temp['Adj Close'] - temp['Adj Close'].shift(45)\r\n ) / temp['Adj Close'].shift(45) \r\n temp['9wkRateOfChange'] = temp['9wkRateOfChange'].fillna(0)\r\n temp['8wkRateOfChange'] = (temp['Adj Close'] - temp['Adj Close'].shift(40)\r\n ) / temp['Adj Close'].shift(40) \r\n temp['8wkRateOfChange'] = temp['8wkRateOfChange'].fillna(0)\r\n temp['7wkRateOfChange'] = (temp['Adj Close'] - temp['Adj Close'].shift(35)\r\n ) / temp['Adj Close'].shift(35) \r\n temp['7wkRateOfChange'] = temp['7wkRateOfChange'].fillna(0)\r\n temp['6wkRateOfChange'] = (temp['Adj Close'] - temp['Adj Close'].shift(30)\r\n ) / temp['Adj Close'].shift(30) \r\n temp['6wkRateOfChange'] = temp['6wkRateOfChange'].fillna(0)\r\n temp['5wkRateOfChange'] = (temp['Adj Close'] - temp['Adj Close'].shift(25)\r\n ) / temp['Adj Close'].shift(25) \r\n temp['5wkRateOfChange'] = temp['5wkRateOfChange'].fillna(0)\r\n temp['4wkRateOfChange'] = (temp['Adj Close'] - temp['Adj Close'].shift(20)\r\n ) / temp['Adj Close'].shift(20) \r\n temp['4wkRateOfChange'] = temp['4wkRateOfChange'].fillna(0)\r\n temp['3wkRateOfChange'] = (temp['Adj Close'] - temp['Adj Close'].shift(15)\r\n ) / temp['Adj Close'].shift(15) \r\n temp['3wkRateOfChange'] = temp['3wkRateOfChange'].fillna(0)\r\n temp['2wkRateOfChange'] = (temp['Adj Close'] - temp['Adj Close'].shift(10)\r\n ) / temp['Adj Close'].shift(10) \r\n temp['2wkRateOfChange'] = temp['2wkRateOfChange'].fillna(0) \r\n temp['1wkRateOfChange'] = (temp['Adj Close'] - temp['Adj Close'].shift(5)\r\n ) / temp['Adj Close'].shift(5) \r\n temp['1wkRateOfChange'] = temp['1wkRateOfChange'].fillna(0)\r\n temp['4dayRateOfChange'] = (temp['Adj Close'] - temp['Adj Close'].shift(4)\r\n ) / temp['Adj Close'].shift(4) \r\n temp['4dayRateOfChange'] = temp['4dayRateOfChange'].fillna(0)\r\n temp['3dayRateOfChange'] = (temp['Adj Close'] - temp['Adj Close'].shift(3)\r\n ) / temp['Adj Close'].shift(3) \r\n temp['3dayRateOfChange'] = temp['3dayRateOfChange'].fillna(0) \r\n temp['2dayRateOfChange'] = (temp['Adj Close'] - temp['Adj Close'].shift(2)\r\n ) / temp['Adj Close'].shift(2) \r\n temp['2dayRateOfChange'] = temp['2dayRateOfChange'].fillna(0)\r\n\r\n #Over rolling period Average volume in period\r\n temp['100wkRollingAverageVolume'] = temp['Volume'].rolling(\r\n center=False, window=500).mean()\r\n temp['90wkRollingAverageVolume'] = temp['Volume'].rolling(\r\n center=False, window=450).mean()\r\n temp['80wkRollingAverageVolume'] = temp['Volume'].rolling(\r\n center=False, window=400).mean()\r\n temp['70wkRollingAverageVolume'] = temp['Volume'].rolling(\r\n center=False, window=350).mean()\r\n temp['65wkRollingAverageVolume'] = temp['Volume'].rolling(\r\n center=False, window=325).mean()\r\n temp['60wkRollingAverageVolume'] = temp['Volume'].rolling(\r\n center=False, window=300).mean()\r\n temp['55wkRollingAverageVolume'] = temp['Volume'].rolling(\r\n center=False, window=275).mean()\r\n temp['52wkRollingAverageVolume'] = temp['Volume'].rolling(\r\n center=False, window=252).mean()\r\n temp['45wkRollingAverageVolume'] = temp['Volume'].rolling(\r\n center=False, window=225).mean()\r\n temp['40wkRollingAverageVolume'] = temp['Volume'].rolling(\r\n center=False, window=200).mean()\r\n temp['35wkRollingAverageVolume'] = temp['Volume'].rolling(\r\n center=False, window=175).mean()\r\n temp['30wkRollingAverageVolume'] = temp['Volume'].rolling(\r\n center=False, window=150).mean()\r\n temp['25wkRollingAverageVolume'] = temp['Volume'].rolling(\r\n center=False, window=125).mean()\r\n temp['20wkRollingAverageVolume'] = temp['Volume'].rolling(\r\n center=False, window=100).mean()\r\n temp['15wkRollingAverageVolume'] = temp['Volume'].rolling(\r\n center=False, window=75).mean()\r\n temp['12wkRollingAverageVolume'] = temp['Volume'].rolling(\r\n center=False, window=60).mean()\r\n temp['11wkRollingAverageVolume'] = temp['Volume'].rolling(\r\n center=False, window=55).mean()\r\n temp['10wkRollingAverageVolume'] = temp['Volume'].rolling(\r\n center=False, window=50).mean()\r\n temp['9wkRollingAverageVolume'] = temp['Volume'].rolling(\r\n center=False, window=45).mean()\r\n temp['8wkRollingAverageVolume'] = temp['Volume'].rolling(\r\n center=False, window=40).mean()\r\n temp['7wkRollingAverageVolume'] = temp['Volume'].rolling(\r\n center=False, window=35).mean()\r\n temp['6wkRollingAverageVolume'] = temp['Volume'].rolling(\r\n center=False, window=30).mean()\r\n temp['5wkRollingAverageVolume'] = temp['Volume'].rolling(\r\n center=False, window=25).mean()\r\n temp['4wkRollingAverageVolume'] = temp['Volume'].rolling(\r\n center=False, window=20).mean()\r\n temp['3wkRollingAverageVolume'] = temp['Volume'].rolling(\r\n center=False, window=15).mean()\r\n temp['2wkRollingAverageVolume'] = temp['Volume'].rolling(\r\n center=False, window=10).mean()\r\n temp['1wkRollingAverageVolume'] = temp['Volume'].rolling(\r\n center=False, window=5).mean() \r\n temp['4dayRollingAverageVolume'] = temp['Volume'].rolling(\r\n center=False, window=4).mean() \r\n temp['3dayRollingAverageVolume'] = temp['Volume'].rolling(\r\n center=False, window=3).mean() \r\n temp['2dayRollingAverageVolume'] = temp['Volume'].rolling(\r\n center=False, window=2).mean() \r\n \r\n #Front period over Average Return\r\n temp['100wkRollingReturnOverAverage'] = (temp['100wkRollingAverageReturn']/ \r\n temp['100wkTotalAverageReturn'])\r\n temp['90wkRollingReturnOverAverage'] = (temp['90wkRollingAverageReturn']/ \r\n temp['90wkTotalAverageReturn'])\r\n temp['80wkRollingReturnOverAverage'] = (temp['80wkRollingAverageReturn']/ \r\n temp['80wkTotalAverageReturn'])\r\n temp['70wkRollingReturnOverAverage'] = (temp['70wkRollingAverageReturn']/ \r\n temp['70wkTotalAverageReturn'])\r\n temp['65wkRollingReturnOverAverage'] = (temp['65wkRollingAverageReturn']/ \r\n temp['65wkTotalAverageReturn'])\r\n temp['60wkRollingReturnOverAverage'] = (temp['60wkRollingAverageReturn']/ \r\n temp['60wkTotalAverageReturn'])\r\n temp['55wkRollingReturnOverAverage'] = (temp['55wkRollingAverageReturn']/ \r\n temp['55wkTotalAverageReturn'])\r\n temp['52wkRollingReturnOverAverage'] = (temp['52wkRollingAverageReturn']/ \r\n temp['52wkTotalAverageReturn'])\r\n temp['45wkRollingReturnOverAverage'] = (temp['45wkRollingAverageReturn']/ \r\n temp['45wkTotalAverageReturn'])\r\n temp['40wkRollingReturnOverAverage'] = (temp['40wkRollingAverageReturn']/ \r\n temp['40wkTotalAverageReturn'])\r\n temp['35wkRollingReturnOverAverage'] = (temp['35wkRollingAverageReturn']/ \r\n temp['35wkTotalAverageReturn'])\r\n temp['30wkRollingReturnOverAverage'] = (temp['30wkRollingAverageReturn']/ \r\n temp['30wkTotalAverageReturn'])\r\n temp['25wkRollingReturnOverAverage'] = (temp['25wkRollingAverageReturn']/ \r\n temp['25wkTotalAverageReturn'])\r\n temp['20wkRollingReturnOverAverage'] = (temp['20wkRollingAverageReturn']/ \r\n temp['20wkTotalAverageReturn'])\r\n temp['15wkRollingReturnOverAverage'] = (temp['15wkRollingAverageReturn']/ \r\n temp['15wkTotalAverageReturn'])\r\n temp['12wkRollingReturnOverAverage'] = (temp['12wkRollingAverageReturn']/ \r\n temp['12wkTotalAverageReturn'])\r\n temp['11wkRollingReturnOverAverage'] = (temp['11wkRollingAverageReturn']/ \r\n temp['11wkTotalAverageReturn'])\r\n temp['10wkRollingReturnOverAverage'] = (temp['10wkRollingAverageReturn']/ \r\n temp['10wkTotalAverageReturn'])\r\n temp['9wkRollingReturnOverAverage'] = (temp['9wkRollingAverageReturn']/ \r\n temp['9wkTotalAverageReturn'])\r\n temp['8wkRollingReturnOverAverage'] = (temp['8wkRollingAverageReturn']/ \r\n temp['8wkTotalAverageReturn'])\r\n temp['7wkRollingReturnOverAverage'] = (temp['7wkRollingAverageReturn']/ \r\n temp['7wkTotalAverageReturn'])\r\n temp['6wkRollingReturnOverAverage'] = (temp['6wkRollingAverageReturn']/ \r\n temp['6wkTotalAverageReturn'])\r\n temp['5wkRollingReturnOverAverage'] = (temp['5wkRollingAverageReturn']/ \r\n temp['5wkTotalAverageReturn'])\r\n temp['4wkRollingReturnOverAverage'] = (temp['4wkRollingAverageReturn']/ \r\n temp['4wkTotalAverageReturn'])\r\n temp['3wkRollingReturnOverAverage'] = (temp['3wkRollingAverageReturn']/ \r\n temp['3wkTotalAverageReturn'])\r\n temp['2wkRollingReturnOverAverage'] = (temp['2wkRollingAverageReturn']/ \r\n temp['2wkTotalAverageReturn'])\r\n temp['1wkRollingReturnOverAverage'] = (temp['1wkRollingAverageReturn']/ \r\n temp['1wkTotalAverageReturn'])\r\n temp['4dayRollingReturnOverAverage'] = (temp['4dayRollingAverageReturn']/ \r\n temp['4dayTotalAverageReturn'])\r\n temp['3dayRollingReturnOverAverage'] = (temp['3dayRollingAverageReturn']/ \r\n temp['3dayTotalAverageReturn'])\r\n temp['2dayRollingReturnOverAverage'] = (temp['2dayRollingAverageReturn']/ \r\n temp['2dayTotalAverageReturn']) \r\n\r\n #Front period over Average Std Dev // These are ratios \r\n temp['100wkRollingStdDevOverAverage'] = (temp['100wkRollingStdDev']/ \r\n temp['100wkTotalAverageStdDev'])\r\n temp['90wkRollingStdDevOverAverage'] = (temp['90wkRollingStdDev']/ \r\n temp['90wkTotalAverageStdDev'])\r\n temp['80wkRollingStdDevOverAverage'] = (temp['80wkRollingStdDev']/ \r\n temp['80wkTotalAverageStdDev'])\r\n temp['70wkRollingStdDevOverAverage'] = (temp['70wkRollingStdDev']/ \r\n temp['70wkTotalAverageStdDev'])\r\n temp['65wkRollingStdDevOverAverage'] = (temp['65wkRollingStdDev']/ \r\n temp['65wkTotalAverageStdDev'])\r\n temp['60wkRollingStdDevOverAverage'] = (temp['60wkRollingStdDev']/ \r\n temp['60wkTotalAverageStdDev'])\r\n temp['55wkRollingStdDevOverAverage'] = (temp['55wkRollingStdDev']/ \r\n temp['55wkTotalAverageStdDev'])\r\n temp['52wkRollingStdDevOverAverage'] = (temp['52wkRollingStdDev']/ \r\n temp['52wkTotalAverageStdDev'])\r\n temp['45wkRollingStdDevOverAverage'] = (temp['45wkRollingStdDev']/ \r\n temp['45wkTotalAverageStdDev'])\r\n temp['40wkRollingStdDevOverAverage'] = (temp['40wkRollingStdDev']/ \r\n temp['40wkTotalAverageStdDev'])\r\n temp['35wkRollingStdDevOverAverage'] = (temp['35wkRollingStdDev']/ \r\n temp['35wkTotalAverageStdDev'])\r\n temp['30wkRollingStdDevOverAverage'] = (temp['30wkRollingStdDev']/ \r\n temp['30wkTotalAverageStdDev'])\r\n temp['25wkRollingStdDevOverAverage'] = (temp['25wkRollingStdDev']/ \r\n temp['25wkTotalAverageStdDev'])\r\n temp['20wkRollingStdDevOverAverage'] = (temp['20wkRollingStdDev']/ \r\n temp['20wkTotalAverageStdDev'])\r\n temp['15wkRollingStdDevOverAverage'] = (temp['15wkRollingStdDev']/ \r\n temp['15wkTotalAverageStdDev'])\r\n temp['12wkRollingStdDevOverAverage'] = (temp['12wkRollingStdDev']/ \r\n temp['12wkTotalAverageStdDev'])\r\n temp['11wkRollingStdDevOverAverage'] = (temp['11wkRollingStdDev']/ \r\n temp['11wkTotalAverageStdDev'])\r\n temp['10wkRollingStdDevOverAverage'] = (temp['10wkRollingStdDev']/ \r\n temp['10wkTotalAverageStdDev'])\r\n temp['9wkRollingStdDevOverAverage'] = (temp['9wkRollingStdDev']/ \r\n temp['9wkTotalAverageStdDev'])\r\n temp['8wkRollingStdDevOverAverage'] = (temp['8wkRollingStdDev']/ \r\n temp['8wkTotalAverageStdDev'])\r\n temp['7wkRollingStdDevOverAverage'] = (temp['7wkRollingStdDev']/ \r\n temp['7wkTotalAverageStdDev'])\r\n temp['6wkRollingStdDevOverAverage'] = (temp['6wkRollingStdDev']/ \r\n temp['6wkTotalAverageStdDev'])\r\n temp['5wkRollingStdDevOverAverage'] = (temp['5wkRollingStdDev']/ \r\n temp['5wkTotalAverageStdDev'])\r\n temp['4wkRollingStdDevOverAverage'] = (temp['4wkRollingStdDev']/ \r\n temp['4wkTotalAverageStdDev'])\r\n temp['3wkRollingStdDevOverAverage'] = (temp['3wkRollingStdDev']/ \r\n temp['3wkTotalAverageStdDev'])\r\n temp['2wkRollingStdDevOverAverage'] = (temp['2wkRollingStdDev']/ \r\n temp['2wkTotalAverageStdDev'])\r\n temp['1wkRollingStdDevOverAverage'] = (temp['1wkRollingStdDev']/ \r\n temp['1wkTotalAverageStdDev'])\r\n temp['4dayRollingStdDevOverAverage'] = (temp['4dayRollingStdDev']/ \r\n temp['4dayTotalAverageStdDev'])\r\n temp['3dayRollingStdDevOverAverage'] = (temp['3dayRollingStdDev']/ \r\n temp['3dayTotalAverageStdDev'])\r\n temp['2dayRollingStdDevOverAverage'] = (temp['2dayRollingStdDev']/ \r\n temp['2dayTotalAverageStdDev']) \r\n \r\n #N period ATR Setup\r\n temp['Method1'] = temp['High'] - temp['Low']\r\n temp['Method2'] = abs((temp['High'] - temp['Adj Close'].shift(1)))\r\n temp['Method3'] = abs((temp['Low'] - temp['Adj Close'].shift(1)))\r\n temp['Method1'] = temp['Method1'].fillna(0)\r\n temp['Method2'] = temp['Method2'].fillna(0)\r\n temp['Method3'] = temp['Method3'].fillna(0)\r\n temp['TrueRange'] = temp[['Method1','Method2','Method3']].max(axis = 1)\r\n\r\n #ATR Calculation\r\n temp['100wkATRPoints'] = temp['TrueRange'].rolling(window = 500, center=False).mean() \r\n temp['100wkATRPercent'] = temp['100wkATRPoints'] / temp['Adj Close']\r\n temp['90wkATRPoints'] = temp['TrueRange'].rolling(window = 450, center=False).mean() \r\n temp['90wkATRPercent'] = temp['90wkATRPoints'] / temp['Adj Close']\r\n temp['80wkATRPoints'] = temp['TrueRange'].rolling(window = 400, center=False).mean() \r\n temp['80wkATRPercent'] = temp['80wkATRPoints'] / temp['Adj Close']\r\n temp['70wkATRPoints'] = temp['TrueRange'].rolling(window = 350, center=False).mean() \r\n temp['70wkATRPercent'] = temp['70wkATRPoints'] / temp['Adj Close']\r\n temp['65wkATRPoints'] = temp['TrueRange'].rolling(window = 325, center=False).mean() \r\n temp['65wkATRPercent'] = temp['65wkATRPoints'] / temp['Adj Close']\r\n temp['60wkATRPoints'] = temp['TrueRange'].rolling(window = 300, center=False).mean() \r\n temp['60wkATRPercent'] = temp['60wkATRPoints'] / temp['Adj Close']\r\n temp['55wkATRPoints'] = temp['TrueRange'].rolling(window = 275, center=False).mean() \r\n temp['55wkATRPercent'] = temp['55wkATRPoints'] / temp['Adj Close']\r\n temp['52wkATRPoints'] = temp['TrueRange'].rolling(window = 252, center=False).mean() \r\n temp['52wkATRPercent'] = temp['52wkATRPoints'] / temp['Adj Close']\r\n temp['45wkATRPoints'] = temp['TrueRange'].rolling(window = 225, center=False).mean() \r\n temp['45wkATRPercent'] = temp['45wkATRPoints'] / temp['Adj Close']\r\n temp['40wkATRPoints'] = temp['TrueRange'].rolling(window = 200, center=False).mean() \r\n temp['40wkATRPercent'] = temp['40wkATRPoints'] / temp['Adj Close']\r\n temp['35wkATRPoints'] = temp['TrueRange'].rolling(window = 175, center=False).mean() \r\n temp['35wkATRPercent'] = temp['35wkATRPoints'] / temp['Adj Close']\r\n temp['30wkATRPoints'] = temp['TrueRange'].rolling(window = 150, center=False).mean() \r\n temp['30wkATRPercent'] = temp['30wkATRPoints'] / temp['Adj Close']\r\n temp['25wkATRPoints'] = temp['TrueRange'].rolling(window = 125, center=False).mean() \r\n temp['25wkATRPercent'] = temp['25wkATRPoints'] / temp['Adj Close']\r\n temp['20wkATRPoints'] = temp['TrueRange'].rolling(window = 100, center=False).mean() \r\n temp['20wkATRPercent'] = temp['20wkATRPoints'] / temp['Adj Close']\r\n temp['15wkATRPoints'] = temp['TrueRange'].rolling(window = 75, center=False).mean() \r\n temp['15wkATRPercent'] = temp['15wkATRPoints'] / temp['Adj Close']\r\n temp['12wkATRPoints'] = temp['TrueRange'].rolling(window = 60, center=False).mean() \r\n temp['12wkATRPercent'] = temp['12wkATRPoints'] / temp['Adj Close']\r\n temp['11wkATRPoints'] = temp['TrueRange'].rolling(window = 55, center=False).mean() \r\n temp['11wkATRPercent'] = temp['11wkATRPoints'] / temp['Adj Close']\r\n temp['10wkATRPoints'] = temp['TrueRange'].rolling(window = 50, center=False).mean() \r\n temp['10wkATRPercent'] = temp['10wkATRPoints'] / temp['Adj Close']\r\n temp['9wkATRPoints'] = temp['TrueRange'].rolling(window = 45, center=False).mean() \r\n temp['9wkATRPercent'] = temp['9wkATRPoints'] / temp['Adj Close']\r\n temp['8wkATRPoints'] = temp['TrueRange'].rolling(window = 40, center=False).mean() \r\n temp['8wkATRPercent'] = temp['8wkATRPoints'] / temp['Adj Close']\r\n temp['7wkATRPoints'] = temp['TrueRange'].rolling(window = 35, center=False).mean() \r\n temp['7wkATRPercent'] = temp['7wkATRPoints'] / temp['Adj Close']\r\n temp['6wkATRPoints'] = temp['TrueRange'].rolling(window = 30, center=False).mean() \r\n temp['6wkATRPercent'] = temp['6wkATRPoints'] / temp['Adj Close']\r\n temp['5wkATRPoints'] = temp['TrueRange'].rolling(window = 25, center=False).mean() \r\n temp['5wkATRPercent'] = temp['5wkATRPoints'] / temp['Adj Close']\r\n temp['4wkATRPoints'] = temp['TrueRange'].rolling(window = 20, center=False).mean() \r\n temp['4wkATRPercent'] = temp['4wkATRPoints'] / temp['Adj Close']\r\n temp['3wkATRPoints'] = temp['TrueRange'].rolling(window = 15, center=False).mean() \r\n temp['3wkATRPercent'] = temp['3wkATRPoints'] / temp['Adj Close']\r\n temp['2wkATRPoints'] = temp['TrueRange'].rolling(window = 10, center=False).mean() \r\n temp['2wkATRPercent'] = temp['2wkATRPoints'] / temp['Adj Close']\r\n temp['1wkATRPoints'] = temp['TrueRange'].rolling(window = 5, center=False).mean() \r\n temp['1wkATRPercent'] = temp['1wkATRPoints'] / temp['Adj Close']\r\n temp['4dayATRPoints'] = temp['TrueRange'].rolling(window = 4, center=False).mean() \r\n temp['4dayATRPercent'] = temp['4dayATRPoints'] / temp['Adj Close']\r\n temp['3dayATRPoints'] = temp['TrueRange'].rolling(window = 3, center=False).mean() \r\n temp['3dayATRPercent'] = temp['3dayATRPoints'] / temp['Adj Close']\r\n temp['2dayATRPoints'] = temp['TrueRange'].rolling(window = 2, center=False).mean() \r\n temp['2dayATRPercent'] = temp['2dayATRPoints'] / temp['Adj Close'] \r\n \r\n #STATIC Total Average ATR\r\n temp['100wkTotalAverageATR'] = temp['100wkATRPercent'].mean() * 500 \r\n temp['90wkTotalAverageATR'] = temp['90wkATRPercent'].mean() * 450 \r\n temp['80wkTotalAverageATR'] = temp['80wkATRPercent'].mean() * 400 \r\n temp['70wkTotalAverageATR'] = temp['70wkATRPercent'].mean() * 350 \r\n temp['65wkTotalAverageATR'] = temp['65wkATRPercent'].mean() * 325 \r\n temp['60wkTotalAverageATR'] = temp['60wkATRPercent'].mean() * 300 \r\n temp['55wkTotalAverageATR'] = temp['55wkATRPercent'].mean() * 275 \r\n temp['52wkTotalAverageATR'] = temp['52wkATRPercent'].mean() * 252 \r\n temp['45wkTotalAverageATR'] = temp['45wkATRPercent'].mean() * 225\r\n temp['40wkTotalAverageATR'] = temp['40wkATRPercent'].mean() * 200\r\n temp['35wkTotalAverageATR'] = temp['35wkATRPercent'].mean() * 175 \r\n temp['30wkTotalAverageATR'] = temp['30wkATRPercent'].mean() * 150 \r\n temp['25wkTotalAverageATR'] = temp['25wkATRPercent'].mean() * 125 \r\n temp['20wkTotalAverageATR'] = temp['20wkATRPercent'].mean() * 100 \r\n temp['15wkTotalAverageATR'] = temp['15wkATRPercent'].mean() * 75 \r\n temp['12wkTotalAverageATR'] = temp['12wkATRPercent'].mean() * 60 \r\n temp['11wkTotalAverageATR'] = temp['11wkATRPercent'].mean() * 55 \r\n temp['10wkTotalAverageATR'] = temp['10wkATRPercent'].mean() * 50 \r\n temp['9wkTotalAverageATR'] = temp['9wkATRPercent'].mean() * 45 \r\n temp['8wkTotalAverageATR'] = temp['8wkATRPercent'].mean() * 40 \r\n temp['7wkTotalAverageATR'] = temp['7wkATRPercent'].mean() * 35 \r\n temp['6wkTotalAverageATR'] = temp['6wkATRPercent'].mean() * 30 \r\n temp['5wkTotalAverageATR'] = temp['5wkATRPercent'].mean() * 25 \r\n temp['4wkTotalAverageATR'] = temp['4wkATRPercent'].mean() * 20 \r\n temp['3wkTotalAverageATR'] = temp['3wkATRPercent'].mean() * 15 \r\n temp['2wkTotalAverageATR'] = temp['2wkATRPercent'].mean() * 10\r\n temp['1wkTotalAverageATR'] = temp['1wkATRPercent'].mean() * 5 \r\n temp['4dayTotalAverageATR'] = temp['4dayATRPercent'].mean() * 4 \r\n temp['3dayTotalAverageATR'] = temp['3dayATRPercent'].mean() * 3 \r\n temp['2dayTotalAverageATR'] = temp['2dayATRPercent'].mean() * 2 \r\n\r\n #DYNAMIC Rolling Average ATR\r\n temp['100wkRollingAverageATR'] = temp['100wkATRPercent'].rolling(\r\n center=False, window = 500).mean()\r\n temp['90wkRollingAverageATR'] = temp['90wkATRPercent'].rolling(\r\n center=False, window = 450).mean()\r\n temp['80wkRollingAverageATR'] = temp['80wkATRPercent'].rolling(\r\n center=False, window = 400).mean()\r\n temp['70wkRollingAverageATR'] = temp['70wkATRPercent'].rolling(\r\n center=False, window = 350).mean()\r\n temp['65wkRollingAverageATR'] = temp['65wkATRPercent'].rolling(\r\n center=False, window = 325).mean()\r\n temp['60wkRollingAverageATR'] = temp['60wkATRPercent'].rolling(\r\n center=False, window = 300).mean()\r\n temp['55wkRollingAverageATR'] = temp['55wkATRPercent'].rolling(\r\n center=False, window = 275).mean()\r\n temp['52wkRollingAverageATR'] = temp['52wkATRPercent'].rolling(\r\n center=False, window = 252).mean()\r\n temp['45wkRollingAverageATR'] = temp['45wkATRPercent'].rolling(\r\n center=False, window = 225).mean()\r\n temp['40wkRollingAverageATR'] = temp['40wkATRPercent'].rolling(\r\n center=False, window = 200).mean()\r\n temp['35wkRollingAverageATR'] = temp['35wkATRPercent'].rolling(\r\n center=False, window = 175).mean()\r\n temp['30wkRollingAverageATR'] = temp['30wkATRPercent'].rolling(\r\n center=False, window = 150).mean()\r\n temp['25wkRollingAverageATR'] = temp['25wkATRPercent'].rolling(\r\n center=False, window = 125).mean()\r\n temp['20wkRollingAverageATR'] = temp['20wkATRPercent'].rolling(\r\n center=False, window = 100).mean()\r\n temp['15wkRollingAverageATR'] = temp['15wkATRPercent'].rolling(\r\n center=False, window = 75).mean()\r\n temp['12wkRollingAverageATR'] = temp['12wkATRPercent'].rolling(\r\n center=False, window = 60).mean()\r\n temp['11wkRollingAverageATR'] = temp['11wkATRPercent'].rolling(\r\n center=False, window = 55).mean()\r\n temp['10wkRollingAverageATR'] = temp['10wkATRPercent'].rolling(\r\n center=False, window = 50).mean()\r\n temp['9wkRollingAverageATR'] = temp['9wkATRPercent'].rolling(\r\n center=False, window = 45).mean()\r\n temp['8wkRollingAverageATR'] = temp['8wkATRPercent'].rolling(\r\n center=False, window = 40).mean()\r\n temp['7wkRollingAverageATR'] = temp['7wkATRPercent'].rolling(\r\n center=False, window = 35).mean()\r\n temp['6wkRollingAverageATR'] = temp['6wkATRPercent'].rolling(\r\n center=False, window = 30).mean()\r\n temp['5wkRollingAverageATR'] = temp['5wkATRPercent'].rolling(\r\n center=False, window = 25).mean()\r\n temp['4wkRollingAverageATR'] = temp['4wkATRPercent'].rolling(\r\n center=False, window = 20).mean()\r\n temp['3wkRollingAverageATR'] = temp['3wkATRPercent'].rolling(\r\n center=False, window = 15).mean()\r\n temp['2wkRollingAverageATR'] = temp['2wkATRPercent'].rolling(\r\n center=False, window = 10).mean()\r\n temp['1wkRollingAverageATR'] = temp['1wkATRPercent'].rolling(\r\n center=False, window = 5).mean()\r\n temp['4dayRollingAverageATR'] = temp['4dayATRPercent'].rolling(\r\n center=False, window = 4).mean()\r\n temp['3dayRollingAverageATR'] = temp['3dayATRPercent'].rolling(\r\n center=False, window = 3).mean()\r\n temp['2dayRollingAverageATR'] = temp['2dayATRPercent'].rolling(\r\n center=False, window = 2).mean() \r\n\r\n #DYNAMIC RAATR/TAATR - 1 \r\n temp['100wkRAATRtoTAATR'] = (temp['100wkRollingAverageATR']/temp['100wkTotalAverageATR']) - 1\r\n temp['90wkRAATRtoTAATR'] = (temp['90wkRollingAverageATR']/temp['90wkTotalAverageATR']) - 1\r\n temp['80wkRAATRtoTAATR'] = (temp['80wkRollingAverageATR']/temp['80wkTotalAverageATR']) - 1\r\n temp['70wkRAATRtoTAATR'] = (temp['70wkRollingAverageATR']/temp['70wkTotalAverageATR']) - 1\r\n temp['65wkRAATRtoTAATR'] = (temp['65wkRollingAverageATR']/temp['65wkTotalAverageATR']) - 1\r\n temp['60wkRAATRtoTAATR'] = (temp['60wkRollingAverageATR']/temp['60wkTotalAverageATR']) - 1\r\n temp['55wkRAATRtoTAATR'] = (temp['55wkRollingAverageATR']/temp['55wkTotalAverageATR']) - 1\r\n temp['52wkRAATRtoTAATR'] = (temp['52wkRollingAverageATR']/temp['52wkTotalAverageATR']) - 1\r\n temp['45wkRAATRtoTAATR'] = (temp['45wkRollingAverageATR']/temp['45wkTotalAverageATR']) - 1\r\n temp['40wkRAATRtoTAATR'] = (temp['40wkRollingAverageATR']/temp['40wkTotalAverageATR']) - 1\r\n temp['35wkRAATRtoTAATR'] = (temp['35wkRollingAverageATR']/temp['35wkTotalAverageATR']) - 1\r\n temp['30wkRAATRtoTAATR'] = (temp['30wkRollingAverageATR']/temp['30wkTotalAverageATR']) - 1\r\n temp['25wkRAATRtoTAATR'] = (temp['25wkRollingAverageATR']/temp['25wkTotalAverageATR']) - 1\r\n temp['20wkRAATRtoTAATR'] = (temp['20wkRollingAverageATR']/temp['20wkTotalAverageATR']) - 1\r\n temp['15wkRAATRtoTAATR'] = (temp['15wkRollingAverageATR']/temp['15wkTotalAverageATR']) - 1\r\n temp['12wkRAATRtoTAATR'] = (temp['12wkRollingAverageATR']/temp['12wkTotalAverageATR']) - 1\r\n temp['11wkRAATRtoTAATR'] = (temp['11wkRollingAverageATR']/temp['11wkTotalAverageATR']) - 1\r\n temp['10wkRAATRtoTAATR'] = (temp['10wkRollingAverageATR']/temp['10wkTotalAverageATR']) - 1\r\n temp['9wkRAATRtoTAATR'] = (temp['9wkRollingAverageATR']/temp['9wkTotalAverageATR']) - 1\r\n temp['8wkRAATRtoTAATR'] = (temp['8wkRollingAverageATR']/temp['8wkTotalAverageATR']) - 1\r\n temp['7wkRAATRtoTAATR'] = (temp['7wkRollingAverageATR']/temp['7wkTotalAverageATR']) - 1\r\n temp['6wkRAATRtoTAATR'] = (temp['6wkRollingAverageATR']/temp['6wkTotalAverageATR']) - 1\r\n temp['5wkRAATRtoTAATR'] = (temp['5wkRollingAverageATR']/temp['5wkTotalAverageATR']) - 1\r\n temp['4wkRAATRtoTAATR'] = (temp['4wkRollingAverageATR']/temp['4wkTotalAverageATR']) - 1\r\n temp['3wkRAATRtoTAATR'] = (temp['3wkRollingAverageATR']/temp['3wkTotalAverageATR']) - 1\r\n temp['2wkRAATRtoTAATR'] = (temp['2wkRollingAverageATR']/temp['2wkTotalAverageATR']) - 1\r\n temp['1wkRAATRtoTAATR'] = (temp['1wkRollingAverageATR']/temp['1wkTotalAverageATR']) - 1\r\n temp['4dayRAATRtoTAATR'] = (temp['4dayRollingAverageATR']/temp['4dayTotalAverageATR']) - 1\r\n temp['3dayRAATRtoTAATR'] = (temp['3dayRollingAverageATR']/temp['3dayTotalAverageATR']) - 1 \r\n temp['2dayRAATRtoTAATR'] = (temp['2dayRollingAverageATR']/temp['2dayTotalAverageATR']) - 1 \r\n \r\n #DYNAMIC ATR percent / Range percent \r\n temp['100wkATRtoRange'] = temp['100wkATRPercent'] / temp['100wkRangePercent']\r\n temp['90wkATRtoRange'] = temp['90wkATRPercent'] / temp['90wkRangePercent']\r\n temp['80wkATRtoRange'] = temp['80wkATRPercent'] / temp['80wkRangePercent']\r\n temp['70wkATRtoRange'] = temp['70wkATRPercent'] / temp['70wkRangePercent']\r\n temp['65wkATRtoRange'] = temp['65wkATRPercent'] / temp['65wkRangePercent']\r\n temp['60wkATRtoRange'] = temp['60wkATRPercent'] / temp['60wkRangePercent']\r\n temp['55wkATRtoRange'] = temp['55wkATRPercent'] / temp['55wkRangePercent']\r\n temp['52wkATRtoRange'] = temp['52wkATRPercent'] / temp['52wkRangePercent']\r\n temp['45wkATRtoRange'] = temp['45wkATRPercent'] / temp['45wkRangePercent']\r\n temp['40wkATRtoRange'] = temp['40wkATRPercent'] / temp['40wkRangePercent']\r\n temp['35wkATRtoRange'] = temp['35wkATRPercent'] / temp['35wkRangePercent']\r\n temp['30wkATRtoRange'] = temp['30wkATRPercent'] / temp['30wkRangePercent']\r\n temp['25wkATRtoRange'] = temp['25wkATRPercent'] / temp['25wkRangePercent']\r\n temp['20wkATRtoRange'] = temp['20wkATRPercent'] / temp['20wkRangePercent']\r\n temp['15wkATRtoRange'] = temp['15wkATRPercent'] / temp['15wkRangePercent']\r\n temp['12wkATRtoRange'] = temp['12wkATRPercent'] / temp['12wkRangePercent']\r\n temp['11wkATRtoRange'] = temp['11wkATRPercent'] / temp['11wkRangePercent']\r\n temp['10wkATRtoRange'] = temp['10wkATRPercent'] / temp['10wkRangePercent']\r\n temp['9wkATRtoRange'] = temp['9wkATRPercent'] / temp['9wkRangePercent']\r\n temp['8wkATRtoRange'] = temp['8wkATRPercent'] / temp['8wkRangePercent']\r\n temp['7wkATRtoRange'] = temp['7wkATRPercent'] / temp['7wkRangePercent']\r\n temp['6wkATRtoRange'] = temp['6wkATRPercent'] / temp['6wkRangePercent']\r\n temp['5wkATRtoRange'] = temp['5wkATRPercent'] / temp['5wkRangePercent']\r\n temp['4wkATRtoRange'] = temp['4wkATRPercent'] / temp['4wkRangePercent']\r\n temp['3wkATRtoRange'] = temp['3wkATRPercent'] / temp['3wkRangePercent']\r\n temp['2wkATRtoRange'] = temp['2wkATRPercent'] / temp['2wkRangePercent']\r\n temp['1wkATRtoRange'] = temp['1wkATRPercent'] / temp['1wkRangePercent']\r\n temp['4dayATRtoRange'] = temp['4dayATRPercent'] / temp['4dayRangePercent']\r\n temp['3dayATRtoRange'] = temp['3dayATRPercent'] / temp['3dayRangePercent']\r\n temp['2dayATRtoRange'] = temp['2dayATRPercent'] / temp['2dayRangePercent']\r\n \r\n #STATIC Average ATRtoRange\r\n temp['100wkTotalAverageATRtoRange'] = temp['100wkATRtoRange'].mean() * 500\r\n temp['90wkTotalAverageATRtoRange'] = temp['90wkATRtoRange'].mean() * 450\r\n temp['80wkTotalAverageATRtoRange'] = temp['80wkATRtoRange'].mean() * 400\r\n temp['70wkTotalAverageATRtoRange'] = temp['70wkATRtoRange'].mean() * 350\r\n temp['65wkTotalAverageATRtoRange'] = temp['65wkATRtoRange'].mean() * 325\r\n temp['60wkTotalAverageATRtoRange'] = temp['60wkATRtoRange'].mean() * 300\r\n temp['55wkTotalAverageATRtoRange'] = temp['55wkATRtoRange'].mean() * 275\r\n temp['52wkTotalAverageATRtoRange'] = temp['52wkATRtoRange'].mean() * 250\r\n temp['45wkTotalAverageATRtoRange'] = temp['45wkATRtoRange'].mean() * 225\r\n temp['40wkTotalAverageATRtoRange'] = temp['40wkATRtoRange'].mean() * 200\r\n temp['35wkTotalAverageATRtoRange'] = temp['35wkATRtoRange'].mean() * 175\r\n temp['30wkTotalAverageATRtoRange'] = temp['30wkATRtoRange'].mean() * 150\r\n temp['25wkTotalAverageATRtoRange'] = temp['25wkATRtoRange'].mean() * 125\r\n temp['20wkTotalAverageATRtoRange'] = temp['20wkATRtoRange'].mean() * 100 \r\n temp['15wkTotalAverageATRtoRange'] = temp['15wkATRtoRange'].mean() * 75\r\n temp['12wkTotalAverageATRtoRange'] = temp['12wkATRtoRange'].mean() * 60\r\n temp['11wkTotalAverageATRtoRange'] = temp['11wkATRtoRange'].mean() * 55\r\n temp['10wkTotalAverageATRtoRange'] = temp['10wkATRtoRange'].mean() * 50\r\n temp['9wkTotalAverageATRtoRange'] = temp['9wkATRtoRange'].mean() * 45\r\n temp['8wkTotalAverageATRtoRange'] = temp['8wkATRtoRange'].mean() * 40 \r\n temp['7wkTotalAverageATRtoRange'] = temp['7wkATRtoRange'].mean() * 35\r\n temp['6wkTotalAverageATRtoRange'] = temp['6wkATRtoRange'].mean() * 30\r\n temp['5wkTotalAverageATRtoRange'] = temp['5wkATRtoRange'].mean() * 25\r\n temp['4wkTotalAverageATRtoRange'] = temp['4wkATRtoRange'].mean() * 20 \r\n temp['3wkTotalAverageATRtoRange'] = temp['3wkATRtoRange'].mean() * 15 \r\n temp['2wkTotalAverageATRtoRange'] = temp['2wkATRtoRange'].mean() * 10 \r\n temp['1wkTotalAverageATRtoRange'] = temp['1wkATRtoRange'].mean() * 5\r\n temp['4dayTotalAverageATRtoRange'] = temp['4dayATRtoRange'].mean() * 4\r\n temp['3dayTotalAverageATRtoRange'] = temp['3dayATRtoRange'].mean() * 3 \r\n temp['2dayTotalAverageATRtoRange'] = temp['2dayATRtoRange'].mean() * 2 \r\n\r\n #DTNAMIC Rolling Average ATRtoRange\r\n temp['100wkRollingAverageATRtoRange'] = temp['100wkATRtoRange'].rolling(\r\n center=False, window = 500).mean()\r\n temp['90wkRollingAverageATRtoRange'] = temp['90wkATRtoRange'].rolling(\r\n center=False, window = 450).mean()\r\n temp['80wkRollingAverageATRtoRange'] = temp['80wkATRtoRange'].rolling(\r\n center=False, window = 400).mean()\r\n temp['70wkRollingAverageATRtoRange'] = temp['70wkATRtoRange'].rolling(\r\n center=False, window = 350).mean()\r\n temp['65wkRollingAverageATRtoRange'] = temp['65wkATRtoRange'].rolling(\r\n center=False, window = 325).mean()\r\n temp['60wkRollingAverageATRtoRange'] = temp['60wkATRtoRange'].rolling(\r\n center=False, window = 300).mean()\r\n temp['55wkRollingAverageATRtoRange'] = temp['55wkATRtoRange'].rolling(\r\n center=False, window = 275).mean()\r\n temp['52wkRollingAverageATRtoRange'] = temp['52wkATRtoRange'].rolling(\r\n center=False, window = 250).mean()\r\n temp['45wkRollingAverageATRtoRange'] = temp['45wkATRtoRange'].rolling(\r\n center=False, window = 225).mean()\r\n temp['40wkRollingAverageATRtoRange'] = temp['40wkATRtoRange'].rolling(\r\n center=False, window = 200).mean()\r\n temp['35wkRollingAverageATRtoRange'] = temp['35wkATRtoRange'].rolling(\r\n center=False, window = 175).mean()\r\n temp['30wkRollingAverageATRtoRange'] = temp['30wkATRtoRange'].rolling(\r\n center=False, window = 150).mean()\r\n temp['25wkRollingAverageATRtoRange'] = temp['25wkATRtoRange'].rolling(\r\n center=False, window = 125).mean()\r\n temp['20wkRollingAverageATRtoRange'] = temp['20wkATRtoRange'].rolling(\r\n center=False, window = 100).mean()\r\n temp['15wkRollingAverageATRtoRange'] = temp['15wkATRtoRange'].rolling(\r\n center=False, window = 75).mean()\r\n temp['12wkRollingAverageATRtoRange'] = temp['12wkATRtoRange'].rolling(\r\n center=False, window = 60).mean()\r\n temp['11wkRollingAverageATRtoRange'] = temp['11wkATRtoRange'].rolling(\r\n center=False, window = 55).mean()\r\n temp['10wkRollingAverageATRtoRange'] = temp['10wkATRtoRange'].rolling(\r\n center=False, window = 50).mean()\r\n temp['9wkRollingAverageATRtoRange'] = temp['9wkATRtoRange'].rolling(\r\n center=False, window = 45).mean()\r\n temp['8wkRollingAverageATRtoRange'] = temp['8wkATRtoRange'].rolling(\r\n center=False, window = 40).mean()\r\n temp['7wkRollingAverageATRtoRange'] = temp['7wkATRtoRange'].rolling(\r\n center=False, window = 35).mean()\r\n temp['6wkRollingAverageATRtoRange'] = temp['6wkATRtoRange'].rolling(\r\n center=False, window = 30).mean()\r\n temp['5wkRollingAverageATRtoRange'] = temp['5wkATRtoRange'].rolling(\r\n center=False, window = 25).mean()\r\n temp['4wkRollingAverageATRtoRange'] = temp['4wkATRtoRange'].rolling(\r\n center=False, window = 20).mean()\r\n temp['3wkRollingAverageATRtoRange'] = temp['3wkATRtoRange'].rolling(\r\n center=False, window = 15).mean()\r\n temp['2wkRollingAverageATRtoRange'] = temp['2wkATRtoRange'].rolling(\r\n center=False, window = 10).mean()\r\n temp['1wkRollingAverageATRtoRange'] = temp['1wkATRtoRange'].rolling(\r\n center=False, window = 5).mean()\r\n temp['4dayRollingAverageATRtoRange'] = temp['4dayATRtoRange'].rolling(\r\n center=False, window = 4).mean()\r\n temp['3dayRollingAverageATRtoRange'] = temp['3dayATRtoRange'].rolling(\r\n center=False, window = 3).mean()\r\n temp['2dayRollingAverageATRtoRange'] = temp['2dayATRtoRange'].rolling(\r\n center=False, window = 2).mean() \r\n\r\n #Efficiency (is normalized across markets by Diff/ATR) \r\n temp['100wkCloseDiff'] = temp['Adj Close'] - temp['Adj Close'].shift(500)\r\n temp['100wkEfficiency'] = temp['100wkCloseDiff'] / temp['100wkATRPoints'] \r\n temp['90wkCloseDiff'] = temp['Adj Close'] - temp['Adj Close'].shift(450)\r\n temp['90wkEfficiency'] = temp['90wkCloseDiff'] / temp['90wkATRPoints'] \r\n temp['80wkCloseDiff'] = temp['Adj Close'] - temp['Adj Close'].shift(400)\r\n temp['80wkEfficiency'] = temp['80wkCloseDiff'] / temp['80wkATRPoints'] \r\n temp['70wkCloseDiff'] = temp['Adj Close'] - temp['Adj Close'].shift(350)\r\n temp['70wkEfficiency'] = temp['70wkCloseDiff'] / temp['70wkATRPoints'] \r\n temp['65wkCloseDiff'] = temp['Adj Close'] - temp['Adj Close'].shift(325)\r\n temp['65wkEfficiency'] = temp['65wkCloseDiff'] / temp['65wkATRPoints'] \r\n temp['60wkCloseDiff'] = temp['Adj Close'] - temp['Adj Close'].shift(300)\r\n temp['60wkEfficiency'] = temp['60wkCloseDiff'] / temp['60wkATRPoints'] \r\n temp['55wkCloseDiff'] = temp['Adj Close'] - temp['Adj Close'].shift(275)\r\n temp['55wkEfficiency'] = temp['55wkCloseDiff'] / temp['55wkATRPoints'] \r\n temp['52wkCloseDiff'] = temp['Adj Close'] - temp['Adj Close'].shift(252)\r\n temp['52wkEfficiency'] = temp['52wkCloseDiff'] / temp['52wkATRPoints'] \r\n temp['45wkCloseDiff'] = temp['Adj Close'] - temp['Adj Close'].shift(225)\r\n temp['45wkEfficiency'] = temp['45wkCloseDiff'] / temp['45wkATRPoints'] \r\n temp['40wkCloseDiff'] = temp['Adj Close'] - temp['Adj Close'].shift(200)\r\n temp['40wkEfficiency'] = temp['40wkCloseDiff'] / temp['40wkATRPoints'] \r\n temp['35wkCloseDiff'] = temp['Adj Close'] - temp['Adj Close'].shift(175)\r\n temp['35wkEfficiency'] = temp['35wkCloseDiff'] / temp['35wkATRPoints'] \r\n temp['30wkCloseDiff'] = temp['Adj Close'] - temp['Adj Close'].shift(150)\r\n temp['30wkEfficiency'] = temp['30wkCloseDiff'] / temp['30wkATRPoints'] \r\n temp['25wkCloseDiff'] = temp['Adj Close'] - temp['Adj Close'].shift(125)\r\n temp['25wkEfficiency'] = temp['25wkCloseDiff'] / temp['25wkATRPoints'] \r\n temp['20wkCloseDiff'] = temp['Adj Close'] - temp['Adj Close'].shift(100)\r\n temp['20wkEfficiency'] = temp['20wkCloseDiff'] / temp['20wkATRPoints'] \r\n temp['15wkCloseDiff'] = temp['Adj Close'] - temp['Adj Close'].shift(75)\r\n temp['15wkEfficiency'] = temp['15wkCloseDiff'] / temp['15wkATRPoints'] \r\n temp['12wkCloseDiff'] = temp['Adj Close'] - temp['Adj Close'].shift(60)\r\n temp['12wkEfficiency'] = temp['12wkCloseDiff'] / temp['12wkATRPoints'] \r\n temp['11wkCloseDiff'] = temp['Adj Close'] - temp['Adj Close'].shift(55)\r\n temp['11wkEfficiency'] = temp['11wkCloseDiff'] / temp['11wkATRPoints'] \r\n temp['10wkCloseDiff'] = temp['Adj Close'] - temp['Adj Close'].shift(50)\r\n temp['10wkEfficiency'] = temp['10wkCloseDiff'] / temp['10wkATRPoints'] \r\n temp['9wkCloseDiff'] = temp['Adj Close'] - temp['Adj Close'].shift(45)\r\n temp['9wkEfficiency'] = temp['9wkCloseDiff'] / temp['9wkATRPoints'] \r\n temp['8wkCloseDiff'] = temp['Adj Close'] - temp['Adj Close'].shift(40)\r\n temp['8wkEfficiency'] = temp['8wkCloseDiff'] / temp['8wkATRPoints'] \r\n temp['7wkCloseDiff'] = temp['Adj Close'] - temp['Adj Close'].shift(35)\r\n temp['7wkEfficiency'] = temp['7wkCloseDiff'] / temp['7wkATRPoints'] \r\n temp['6wkCloseDiff'] = temp['Adj Close'] - temp['Adj Close'].shift(30)\r\n temp['6wkEfficiency'] = temp['6wkCloseDiff'] / temp['6wkATRPoints'] \r\n temp['5wkCloseDiff'] = temp['Adj Close'] - temp['Adj Close'].shift(25)\r\n temp['5wkEfficiency'] = temp['5wkCloseDiff'] / temp['5wkATRPoints'] \r\n temp['4wkCloseDiff'] = temp['Adj Close'] - temp['Adj Close'].shift(20)\r\n temp['4wkEfficiency'] = temp['4wkCloseDiff'] / temp['4wkATRPoints'] \r\n temp['3wkCloseDiff'] = temp['Adj Close'] - temp['Adj Close'].shift(15)\r\n temp['3wkEfficiency'] = temp['3wkCloseDiff'] / temp['3wkATRPoints'] \r\n temp['2wkCloseDiff'] = temp['Adj Close'] - temp['Adj Close'].shift(10)\r\n temp['2wkEfficiency'] = temp['2wkCloseDiff'] / temp['2wkATRPoints'] \r\n temp['1wkCloseDiff'] = temp['Adj Close'] - temp['Adj Close'].shift(5)\r\n temp['1wkEfficiency'] = temp['1wkCloseDiff'] / temp['1wkATRPoints'] \r\n temp['4dayCloseDiff'] = temp['Adj Close'] - temp['Adj Close'].shift(4)\r\n temp['4dayEfficiency'] = temp['4dayCloseDiff'] / temp['4dayATRPoints'] \r\n temp['3dayCloseDiff'] = temp['Adj Close'] - temp['Adj Close'].shift(3)\r\n temp['3dayEfficiency'] = temp['3dayCloseDiff'] / temp['3dayATRPoints'] \r\n temp['2dayCloseDiff'] = temp['Adj Close'] - temp['Adj Close'].shift(2)\r\n temp['2dayEfficiency'] = temp['2dayCloseDiff'] / temp['2dayATRPoints'] \r\n \r\n #Average rolling volume\r\n temp['100wkAverageRollingVolume'] = temp['Volume'].rolling(center=False, \r\n window=500).mean() \r\n temp['90wkAverageRollingVolume'] = temp['Volume'].rolling(center=False, \r\n window=450).mean() \r\n temp['80wkAverageRollingVolume'] = temp['Volume'].rolling(center=False, \r\n window=400).mean() \r\n temp['70wkAverageRollingVolume'] = temp['Volume'].rolling(center=False, \r\n window=350).mean() \r\n temp['65wkAverageRollingVolume'] = temp['Volume'].rolling(center=False, \r\n window=325).mean() \r\n temp['60wkAverageRollingVolume'] = temp['Volume'].rolling(center=False, \r\n window=300).mean() \r\n temp['55wkAverageRollingVolume'] = temp['Volume'].rolling(center=False, \r\n window=275).mean() \r\n temp['52wkAverageRollingVolume'] = temp['Volume'].rolling(center=False, \r\n window=252).mean() \r\n temp['45wkAverageRollingVolume'] = temp['Volume'].rolling(center=False, \r\n window=225).mean() \r\n temp['40wkAverageRollingVolume'] = temp['Volume'].rolling(center=False, \r\n window=200).mean() \r\n temp['35wkAverageRollingVolume'] = temp['Volume'].rolling(center=False, \r\n window=175).mean() \r\n temp['30wkAverageRollingVolume'] = temp['Volume'].rolling(center=False, \r\n window=150).mean() \r\n temp['25wkAverageRollingVolume'] = temp['Volume'].rolling(center=False, \r\n window=125).mean() \r\n temp['20wkAverageRollingVolume'] = temp['Volume'].rolling(center=False, \r\n window=100).mean() \r\n temp['15wkAverageRollingVolume'] = temp['Volume'].rolling(center=False, \r\n window=75).mean() \r\n temp['12wkAverageRollingVolume'] = temp['Volume'].rolling(center=False, \r\n window=60).mean() \r\n temp['11wkAverageRollingVolume'] = temp['Volume'].rolling(center=False, \r\n window=55).mean() \r\n temp['10wkAverageRollingVolume'] = temp['Volume'].rolling(center=False, \r\n window=50).mean() \r\n temp['9wkAverageRollingVolume'] = temp['Volume'].rolling(center=False, \r\n window=45).mean() \r\n temp['8wkAverageRollingVolume'] = temp['Volume'].rolling(center=False, \r\n window=40).mean() \r\n temp['7wkAverageRollingVolume'] = temp['Volume'].rolling(center=False, \r\n window=35).mean() \r\n temp['6wkAverageRollingVolume'] = temp['Volume'].rolling(center=False, \r\n window=30).mean() \r\n temp['5wkAverageRollingVolume'] = temp['Volume'].rolling(center=False, \r\n window=25).mean() \r\n temp['4wkAverageRollingVolume'] = temp['Volume'].rolling(center=False, \r\n window=20).mean() \r\n temp['3wkAverageRollingVolume'] = temp['Volume'].rolling(center=False, \r\n window=15).mean() \r\n temp['2wkAverageRollingVolume'] = temp['Volume'].rolling(center=False, \r\n window=10).mean() \r\n temp['1wkAverageRollingVolume'] = temp['Volume'].rolling(center=False, \r\n window=5).mean() \r\n temp['4dayAverageRollingVolume'] = temp['Volume'].rolling(center=False, \r\n window=4).mean() \r\n temp['3dayAverageRollingVolume'] = temp['Volume'].rolling(center=False, \r\n window=3).mean() \r\n temp['2dayAverageRollingVolume'] = temp['Volume'].rolling(center=False, \r\n window=2).mean()\r\n \r\n #Make also a float estimation\r\n temp['Float'] = 0\r\n \r\n #Simple Moving Average\r\n temp['100wkSMA'] = temp['Adj Close'].rolling(window=500, center=False).mean()\r\n temp['100wkSMA'] = temp['100wkSMA'].fillna(0)\r\n temp['90wkSMA'] = temp['Adj Close'].rolling(window=450, center=False).mean()\r\n temp['90wkSMA'] = temp['90wkSMA'].fillna(0)\r\n temp['80wkSMA'] = temp['Adj Close'].rolling(window=400, center=False).mean()\r\n temp['80wkSMA'] = temp['80wkSMA'].fillna(0)\r\n temp['70wkSMA'] = temp['Adj Close'].rolling(window=350, center=False).mean()\r\n temp['70wkSMA'] = temp['70wkSMA'].fillna(0)\r\n temp['65wkSMA'] = temp['Adj Close'].rolling(window=325, center=False).mean()\r\n temp['65wkSMA'] = temp['65wkSMA'].fillna(0)\r\n temp['60wkSMA'] = temp['Adj Close'].rolling(window=300, center=False).mean()\r\n temp['60wkSMA'] = temp['60wkSMA'].fillna(0)\r\n temp['55wkSMA'] = temp['Adj Close'].rolling(window=275, center=False).mean()\r\n temp['55wkSMA'] = temp['55wkSMA'].fillna(0)\r\n temp['52wkSMA'] = temp['Adj Close'].rolling(window=252, center=False).mean()\r\n temp['52wkSMA'] = temp['52wkSMA'].fillna(0)\r\n temp['45wkSMA'] = temp['Adj Close'].rolling(window=225, center=False).mean()\r\n temp['45wkSMA'] = temp['45wkSMA'].fillna(0)\r\n temp['40wkSMA'] = temp['Adj Close'].rolling(window=200, center=False).mean()\r\n temp['40wkSMA'] = temp['40wkSMA'].fillna(0)\r\n temp['35wkSMA'] = temp['Adj Close'].rolling(window=175, center=False).mean()\r\n temp['35wkSMA'] = temp['35wkSMA'].fillna(0)\r\n temp['30wkSMA'] = temp['Adj Close'].rolling(window=150, center=False).mean()\r\n temp['30wkSMA'] = temp['30wkSMA'].fillna(0)\r\n temp['25wkSMA'] = temp['Adj Close'].rolling(window=125, center=False).mean()\r\n temp['25wkSMA'] = temp['25wkSMA'].fillna(0)\r\n temp['20wkSMA'] = temp['Adj Close'].rolling(window=100, center=False).mean()\r\n temp['20wkSMA'] = temp['20wkSMA'].fillna(0)\r\n temp['15wkSMA'] = temp['Adj Close'].rolling(window=75, center=False).mean()\r\n temp['15wkSMA'] = temp['15wkSMA'].fillna(0)\r\n temp['12wkSMA'] = temp['Adj Close'].rolling(window=60, center=False).mean()\r\n temp['12wkSMA'] = temp['12wkSMA'].fillna(0)\r\n temp['11wkSMA'] = temp['Adj Close'].rolling(window=55, center=False).mean()\r\n temp['11wkSMA'] = temp['11wkSMA'].fillna(0)\r\n temp['10wkSMA'] = temp['Adj Close'].rolling(window=50, center=False).mean()\r\n temp['10wkSMA'] = temp['10wkSMA'].fillna(0)\r\n temp['9wkSMA'] = temp['Adj Close'].rolling(window=45, center=False).mean()\r\n temp['9wkSMA'] = temp['9wkSMA'].fillna(0)\r\n temp['8wkSMA'] = temp['Adj Close'].rolling(window=40, center=False).mean()\r\n temp['8wkSMA'] = temp['8wkSMA'].fillna(0)\r\n temp['7wkSMA'] = temp['Adj Close'].rolling(window=35, center=False).mean()\r\n temp['7wkSMA'] = temp['7wkSMA'].fillna(0)\r\n temp['6wkSMA'] = temp['Adj Close'].rolling(window=30, center=False).mean()\r\n temp['6wkSMA'] = temp['6wkSMA'].fillna(0)\r\n temp['5wkSMA'] = temp['Adj Close'].rolling(window=25, center=False).mean()\r\n temp['5wkSMA'] = temp['5wkSMA'].fillna(0)\r\n temp['4wkSMA'] = temp['Adj Close'].rolling(window=20, center=False).mean()\r\n temp['4wkSMA'] = temp['4wkSMA'].fillna(0)\r\n temp['3wkSMA'] = temp['Adj Close'].rolling(window=15, center=False).mean()\r\n temp['3wkSMA'] = temp['3wkSMA'].fillna(0)\r\n temp['2wkSMA'] = temp['Adj Close'].rolling(window=10, center=False).mean()\r\n temp['2wkSMA'] = temp['2wkSMA'].fillna(0)\r\n temp['1wkSMA'] = temp['Adj Close'].rolling(window=5, center=False).mean()\r\n temp['1wkSMA'] = temp['1wkSMA'].fillna(0)\r\n temp['4daySMA'] = temp['Adj Close'].rolling(window=4, center=False).mean()\r\n temp['4daySMA'] = temp['4daySMA'].fillna(0)\r\n temp['3daySMA'] = temp['Adj Close'].rolling(window=3, center=False).mean()\r\n temp['3daySMA'] = temp['3daySMA'].fillna(0)\r\n temp['2daySMA'] = temp['Adj Close'].rolling(window=2, center=False).mean()\r\n temp['2daySMA'] = temp['2daySMA'].fillna(0) \r\n \r\n #Save to folder\r\n pd.to_pickle(temp, 'Z:\\\\Users\\\\Username\\\\DirectoryLocation\\\\DataSources\\\\'\r\n + 'YahooSource\\\\ProcessedData\\\\DAY\\\\' + p + '\\\\' + p)\r\n #Display results\r\n print(p + ' is processed and saved.')\r\n#End timer \r\nend = time.time()\r\n#Timer stats\r\nt = round(end - start, 2)\r\n#Number of tickers processed\r\nn = round(len(os.listdir('Z:\\\\Users\\\\Username\\\\DirectoryLocation\\\\DataSources\\\\' +\r\n 'YahooSource\\\\ProcessedData\\\\DAY\\\\')), 2)\r\n#Display results\r\nprint('Yahoo processed data is full.')\r\nprint('YahooSource took ' + str(t) + ' seconds for ' + str(n) + ' tickers.')\r\n" ]
[ [ "numpy.where", "pandas.Series" ], [ "pandas.to_numeric", "pandas.to_datetime", "pandas.read_csv", "numpy.sqrt", "pandas.Series", "pandas.to_pickle", "pandas.read_pickle", "numpy.where" ] ]
WayneFerrao/autofocus
[ "80a5d2366639177dbd16708a79b88df17528054c", "80a5d2366639177dbd16708a79b88df17528054c" ]
[ "onehot.py", "linreg.py" ]
[ "import pandas as pd\r\nimport numpy as np\r\nfrom numpy import argmax\r\nimport seaborn as sns\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.preprocessing import OneHotEncoder\r\n\r\nout_path = \"dummy.csv\"\r\n\r\ndf = pd.read_csv(out_path)\r\n\r\n#use dff to do basic charts to show trends\r\ndff = df.dropna(axis=0)\r\n\r\n#set all categorical types to 'categorical'\r\n\r\n#This stuff was for data analysis\r\n#sns.lineplot(x=dff['year'], y=dff['price'])\r\n#sns.scatterplot(x=dff['odometer'], y=dff['price'])\r\n\r\n# color one hot encoder\r\ncolors = set(dff['paint_color'])\r\ncolor_df = pd.DataFrame(colors, columns=['paint_color'])\r\n# generate binary values using get_dummies\r\ndum_df = pd.get_dummies(color_df, columns=[\"paint_color\"], prefix=[\"Color_is\"])\r\n# merge with main df bridge_df on key values\r\ncolor_df = color_df.join(dum_df)\r\nprint(color_df)\r\n\r\n#model one hot encoder\r\nmodels = set(dff['model'])\r\nmodel_df = pd.DataFrame(models, columns=['model'])\r\n# generate binary values using get_dummies\r\ndum_df = pd.get_dummies(model_df, columns=[\"model\"], prefix=[\"Model_is\"])\r\n# merge with main df bridge_df on key values\r\nmodel_df = model_df.join(dum_df)\r\nprint(model_df)\r\n\r\n#manufacturer\r\nmanufacturers = set(dff['manufacturer'])\r\nmanufacturer_df = pd.DataFrame(manufacturers, columns=['manufacturer'])\r\n# generate binary values using get_dummies\r\ndum_df = pd.get_dummies(manufacturer_df, columns=[\"manufacturer\"], prefix=[\"Manufacturer_is\"])\r\n# merge with main df bridge_df on key values\r\nmanufacturer_df = manufacturer_df.join(dum_df)\r\nprint(manufacturer_df)\r\n\r\n#drive\r\ndrives = set(dff['drive'])\r\ndrive_df = pd.DataFrame(drives, columns=['drive'])\r\n# generate binary values using get_dummies\r\ndum_df = pd.get_dummies(drive_df, columns=[\"drive\"], prefix=[\"Drive_is\"])\r\n# merge with main df bridge_df on key values\r\ndrive_df = drive_df.join(dum_df)\r\nprint(drive_df)\r\n\r\n#transmission\r\ntransmissions = set(dff['transmission'])\r\ntransmission_df = pd.DataFrame(transmissions, columns=['transmission'])\r\n# generate binary values using get_dummies\r\ndum_df = pd.get_dummies(transmission_df, columns=[\"transmission\"], prefix=[\"Transmission_is\"])\r\n# merge with main df bridge_df on key values\r\ntransmission_df = transmission_df.join(dum_df)\r\nprint(transmission_df)\r\n\r\n#condition\r\nconditions = set(dff['condition'])\r\ncondition_df = pd.DataFrame(transmissions, columns=['condition'])\r\n# generate binary values using get_dummies\r\ndum_df = pd.get_dummies(condition_df, columns=[\"condition\"], prefix=[\"Condition_is\"])\r\n# merge with main df bridge_df on key values\r\ncondition_df = condition_df.join(dum_df)\r\nprint(condition_df)\r\n\r\n\r\n\r\n\r\n\r\n", "import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\n\ndf = pd.read_csv('dividedsamples/training.csv') \ndfval = pd.read_csv('dividedsamples/testing.csv') \n\ntrain_features = df.copy()\ntest_features = dfval.copy()\n\ntrain_labels = train_features.pop('price')\ntest_labels = test_features.pop('price')\n\nregressor = LinearRegression()\nregressor.fit(train_features, train_labels)\ncoeff_df = pd.DataFrame(regressor.coef_, train_features.columns, columns=['Coefficient'])\nprint(coeff_df)\n\ny_pred = regressor.predict(test_features)\nboi = pd.DataFrame({'Actual': test_labels, 'Predicted': y_pred})\nprint(boi)" ]
[ [ "pandas.read_csv", "pandas.DataFrame", "pandas.get_dummies" ], [ "pandas.read_csv", "sklearn.linear_model.LinearRegression", "pandas.DataFrame" ] ]
ziofil/strawberryfields
[ "2c3241c1831f396ade208a3d4b8ff5786f3cf806" ]
[ "tests/backend/test_beamsplitter_operation.py" ]
[ "# Copyright 2019 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"\nUnit tests for beamsplitter operations\nConvention: The beamsplitter operation transforms\n\\hat{a} -> t \\hat{a} + r \\hat{b}\n\\hat{b} -> - r^* \\hat{a} + t^* \\hat{b}\nwhere \\hat{a}, \\hat{b} are the photon creation operators of the two modes\nEquivalently, we have t:=\\cos(\\theta) (t assumed real) and r:=\\exp{i\\phi}\\sin(\\theta)\n\"\"\"\n\nimport pytest\n\nimport numpy as np\nfrom scipy.special import factorial\n\n\nT_VALUES = np.linspace(-0.2, 1.0, 3)\nPHASE_R = np.linspace(0, 2 * np.pi, 3, endpoint=False)\nALPHA = 0.1\nMAG_ALPHAS = np.linspace(0.0, ALPHA, 3)\n\n\nclass TestRepresentationIndependent:\n \"\"\"Basic implementation-independent tests.\"\"\"\n\n def test_complex_t(self, setup_backend):\n \"\"\"Test exception raised if t is complex\"\"\"\n t = 0.1 + 0.5j\n r = np.exp(1j * 0.2) * np.sqrt(1.0 - np.abs(t) ** 2)\n backend = setup_backend(2)\n\n with pytest.raises(ValueError, match=\"must be a float\"):\n backend.beamsplitter(t, r, 0, 1)\n\n @pytest.mark.parametrize(\"t\", T_VALUES)\n @pytest.mark.parametrize(\"r_phi\", PHASE_R)\n def test_vacuum_beamsplitter(self, setup_backend, t, r_phi, tol):\n \"\"\"Tests beamsplitter operation in some limiting cases where the output\n should be the vacuum in both modes.\"\"\"\n r = np.exp(1j * r_phi) * np.sqrt(1.0 - np.abs(t) ** 2)\n backend = setup_backend(2)\n\n backend.beamsplitter(t, r, 0, 1)\n assert np.all(backend.is_vacuum(tol))\n\n @pytest.mark.parametrize(\"t\", T_VALUES)\n @pytest.mark.parametrize(\"mag_alpha\", MAG_ALPHAS[1:])\n @pytest.mark.parametrize(\"r_phi\", PHASE_R)\n def test_coherent_vacuum_interfered(self, setup_backend, t, mag_alpha, r_phi, tol):\n r\"\"\"Tests if a range of beamsplitter output states (formed from a coherent state interfering with vacuum)\n have the correct fidelity with the expected coherent states outputs.\n |\\psi_in> = |\\alpha>|0> --> |t \\alpha>|r \\alpha> = |\\psi_out>\n and for each output mode,\n |\\gamma> = exp(-0.5 |\\gamma|^2) \\sum_n \\gamma^n / \\sqrt{n!} |n>\"\"\"\n phase_alpha = np.pi / 5\n alpha = mag_alpha * np.exp(1j * phase_alpha)\n r = np.exp(1j * r_phi) * np.sqrt(1.0 - np.abs(t) ** 2)\n backend = setup_backend(2)\n\n backend.displacement(alpha, 0)\n backend.beamsplitter(t, r, 0, 1)\n alpha_outA = t * alpha\n alpha_outB = r * alpha\n state = backend.state()\n fidel = state.fidelity_coherent([alpha_outA, alpha_outB])\n assert np.allclose(fidel, 1, atol=tol, rtol=0)\n\n\n@pytest.mark.backends(\"tf\", \"fock\")\nclass TestFockRepresentation:\n \"\"\"Tests that make use of the Fock basis representation.\"\"\"\n\n @pytest.mark.parametrize(\"t\", T_VALUES)\n @pytest.mark.parametrize(\"r_phi\", PHASE_R)\n def test_normalized_beamsplitter_output(self, setup_backend, t, r_phi, tol):\n \"\"\"Tests if a range of beamsplitter outputs states are normalized.\"\"\"\n\n alpha = ALPHA * np.exp(1j * np.pi / 3)\n r = np.exp(1j * r_phi) * np.sqrt(1.0 - np.abs(t) ** 2)\n backend = setup_backend(2)\n\n backend.displacement(alpha, 1)\n backend.beamsplitter(t, r, 0, 1)\n state = backend.state()\n tr = state.trace()\n assert np.allclose(tr, 1, atol=tol, rtol=0)\n\n @pytest.mark.parametrize(\"t\", T_VALUES)\n @pytest.mark.parametrize(\"mag_alpha\", MAG_ALPHAS[1:])\n @pytest.mark.parametrize(\"r_phi\", PHASE_R)\n def test_coherent_vacuum_interfered_fock_elements(\n self, setup_backend, mag_alpha, t, r_phi, cutoff, pure, tol\n ):\n r\"\"\"Tests if a range of beamsplitter output states (formed from a coherent state interfering with vacuum)\n have the correct Fock basis elements.\n |\\psi_in> = |\\alpha>|0> --> |t \\alpha>|r \\alpha> = |\\psi_out>\n and for each output mode,\n |\\gamma> = exp(-0.5 |\\gamma|^2) \\sum_n \\gamma^n / \\sqrt{n!} |n>\"\"\"\n\n phase_alpha = np.pi / 5\n alpha = mag_alpha * np.exp(1j * phase_alpha)\n r = np.exp(1j * r_phi) * np.sqrt(1.0 - np.abs(t) ** 2)\n backend = setup_backend(2)\n\n backend.displacement(alpha, 0)\n backend.beamsplitter(t, r, 0, 1)\n state = backend.state()\n\n if state.is_pure:\n numer_state = state.ket()\n else:\n numer_state = state.dm()\n\n alpha_outA = t * alpha\n alpha_outB = r * alpha\n\n n = np.arange(cutoff)\n ref_stateA = (\n np.exp(-0.5 * np.abs(alpha_outA) ** 2)\n * alpha_outA ** n\n / np.sqrt(factorial(n))\n )\n ref_stateB = (\n np.exp(-0.5 * np.abs(alpha_outB) ** 2)\n * alpha_outB ** n\n / np.sqrt(factorial(n))\n )\n\n ref_state = np.einsum(\"i,j->ij\", ref_stateA, ref_stateB)\n\n if not pure:\n ref_state = np.einsum(\n \"i,j,k,l->ijkl\",\n ref_stateA,\n np.conj(ref_stateA),\n ref_stateB,\n np.conj(ref_stateB),\n )\n\n assert np.allclose(numer_state, ref_state, atol=tol, rtol=0)\n" ]
[ [ "numpy.allclose", "numpy.einsum", "numpy.linspace", "numpy.conj", "numpy.arange", "numpy.abs", "scipy.special.factorial", "numpy.exp" ] ]
roshniRam/Tensorflow-for-poets2
[ "d2208873da0fdf7348c2af1887afab77ae143280" ]
[ "scripts/retrain.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nr\"\"\"Simple transfer learning with Inception v3 or Mobilenet models.\n\nWith support for TensorBoard.\n\nThis example shows how to take a Inception v3 or Mobilenet model trained on\nImageNet images, and train a new top layer that can recognize other classes of\nimages.\n\nThe top layer receives as input a 2048-dimensional vector (1001-dimensional for\nMobilenet) for each image. We train a softmax layer on top of this\nrepresentation. Assuming the softmax layer contains N labels, this corresponds\nto learning N + 2048*N (or 1001*N) model parameters corresponding to the\nlearned biases and weights.\n\nHere's an example, which assumes you have a folder containing class-named\nsubfolders, each full of images for each label. The example folder flower_photos\nshould have a structure like this:\n\n~/flower_photos/daisy/photo1.jpg\n~/flower_photos/daisy/photo2.jpg\n...\n~/flower_photos/rose/anotherphoto77.jpg\n...\n~/flower_photos/sunflower/somepicture.jpg\n\nThe subfolder names are important, since they define what label is applied to\neach image, but the filenames themselves don't matter. Once your images are\nprepared, you can run the training with a command like this:\n\n\n```bash\nbazel build tensorflow/examples/image_retraining:retrain && \\\nbazel-bin/tensorflow/examples/image_retraining/retrain \\\n --image_dir ~/flower_photos\n```\n\nOr, if you have a pip installation of tensorflow, `retrain.py` can be run\nwithout bazel:\n\n```bash\npython tensorflow/examples/image_retraining/retrain.py \\\n --image_dir ~/flower_photos\n```\n\nYou can replace the image_dir argument with any folder containing subfolders of\nimages. The label for each image is taken from the name of the subfolder it's\nin.\n\nThis produces a new model file that can be loaded and run by any TensorFlow\nprogram, for example the label_image sample code.\n\nBy default this script will use the high accuracy, but comparatively large and\nslow Inception v3 model architecture. It's recommended that you start with this\nto validate that you have gathered good training data, but if you want to deploy\non resource-limited platforms, you can try the `--architecture` flag with a\nMobilenet model. For example:\n\n```bash\npython tensorflow/examples/image_retraining/retrain.py \\\n --image_dir ~/flower_photos --architecture mobilenet_1.0_224\n```\n\nThere are 32 different Mobilenet models to choose from, with a variety of file\nsize and latency options. The first number can be '1.0', '0.75', '0.50', or\n'0.25' to control the size, and the second controls the input image size, either\n'224', '192', '160', or '128', with smaller sizes running faster. See\nhttps://research.googleblog.com/2017/06/mobilenets-open-source-models-for.html\nfor more information on Mobilenet.\n\nTo use with TensorBoard:\n\nBy default, this script will log summaries to /tmp/retrain_logs directory\n\nVisualize the summaries with this command:\n\ntensorboard --logdir /tmp/retrain_logs\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL']='1'\nimport argparse\nimport collections\nfrom datetime import datetime\nimport hashlib\nimport os.path\nimport random\nimport re\nimport sys\nimport tarfile\n\nimport numpy as np\nfrom six.moves import urllib\nimport tensorflow as tf\n\nfrom tensorflow.python.framework import graph_util\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.util import compat\n\nFLAGS = None\n\n# These are all parameters that are tied to the particular model architecture\n# we're using for Inception v3. These include things like tensor names and their\n# sizes. If you want to adapt this script to work with another model, you will\n# need to update these to reflect the values in the network you're using.\nMAX_NUM_IMAGES_PER_CLASS = 2 ** 27 - 1 # ~134M\n\n\ndef create_image_lists(image_dir, testing_percentage, validation_percentage):\n \"\"\"Builds a list of training images from the file system.\n\n Analyzes the sub folders in the image directory, splits them into stable\n training, testing, and validation sets, and returns a data structure\n describing the lists of images for each label and their paths.\n\n Args:\n image_dir: String path to a folder containing subfolders of images.\n testing_percentage: Integer percentage of the images to reserve for tests.\n validation_percentage: Integer percentage of images reserved for validation.\n\n Returns:\n A dictionary containing an entry for each label subfolder, with images split\n into training, testing, and validation sets within each label.\n \"\"\"\n if not gfile.Exists(image_dir):\n tf.logging.error(\"Image directory '\" + image_dir + \"' not found.\")\n return None\n result = collections.OrderedDict()\n sub_dirs = [\n os.path.join(image_dir,item)\n for item in gfile.ListDirectory(image_dir)]\n sub_dirs = sorted(item for item in sub_dirs\n if gfile.IsDirectory(item))\n for sub_dir in sub_dirs:\n extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']\n file_list = []\n dir_name = os.path.basename(sub_dir)\n if dir_name == image_dir:\n continue\n tf.logging.info(\"Looking for images in '\" + dir_name + \"'\")\n for extension in extensions:\n file_glob = os.path.join(image_dir, dir_name, '*.' + extension)\n file_list.extend(gfile.Glob(file_glob))\n if not file_list:\n tf.logging.warning('No files found')\n continue\n if len(file_list) < 20:\n tf.logging.warning(\n 'WARNING: Folder has less than 20 images, which may cause issues.')\n elif len(file_list) > MAX_NUM_IMAGES_PER_CLASS:\n tf.logging.warning(\n 'WARNING: Folder {} has more than {} images. Some images will '\n 'never be selected.'.format(dir_name, MAX_NUM_IMAGES_PER_CLASS))\n label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())\n training_images = []\n testing_images = []\n validation_images = []\n for file_name in file_list:\n base_name = os.path.basename(file_name)\n # We want to ignore anything after '_nohash_' in the file name when\n # deciding which set to put an image in, the data set creator has a way of\n # grouping photos that are close variations of each other. For example\n # this is used in the plant disease data set to group multiple pictures of\n # the same leaf.\n hash_name = re.sub(r'_nohash_.*$', '', file_name)\n # This looks a bit magical, but we need to decide whether this file should\n # go into the training, testing, or validation sets, and we want to keep\n # existing files in the same set even if more files are subsequently\n # added.\n # To do that, we need a stable way of deciding based on just the file name\n # itself, so we do a hash of that and then use that to generate a\n # probability value that we use to assign it.\n hash_name_hashed = hashlib.sha1(compat.as_bytes(hash_name)).hexdigest()\n percentage_hash = ((int(hash_name_hashed, 16) %\n (MAX_NUM_IMAGES_PER_CLASS + 1)) *\n (100.0 / MAX_NUM_IMAGES_PER_CLASS))\n if percentage_hash < validation_percentage:\n validation_images.append(base_name)\n elif percentage_hash < (testing_percentage + validation_percentage):\n testing_images.append(base_name)\n else:\n training_images.append(base_name)\n result[label_name] = {\n 'dir': dir_name,\n 'training': training_images,\n 'testing': testing_images,\n 'validation': validation_images,\n }\n return result\n\n\ndef get_image_path(image_lists, label_name, index, image_dir, category):\n \"\"\"\"Returns a path to an image for a label at the given index.\n\n Args:\n image_lists: Dictionary of training images for each label.\n label_name: Label string we want to get an image for.\n index: Int offset of the image we want. This will be moduloed by the\n available number of images for the label, so it can be arbitrarily large.\n image_dir: Root folder string of the subfolders containing the training\n images.\n category: Name string of set to pull images from - training, testing, or\n validation.\n\n Returns:\n File system path string to an image that meets the requested parameters.\n\n \"\"\"\n if label_name not in image_lists:\n tf.logging.fatal('Label does not exist %s.', label_name)\n label_lists = image_lists[label_name]\n if category not in label_lists:\n tf.logging.fatal('Category does not exist %s.', category)\n category_list = label_lists[category]\n if not category_list:\n tf.logging.fatal('Label %s has no images in the category %s.',\n label_name, category)\n mod_index = index % len(category_list)\n base_name = category_list[mod_index]\n sub_dir = label_lists['dir']\n full_path = os.path.join(image_dir, sub_dir, base_name)\n return full_path\n\n\ndef get_bottleneck_path(image_lists, label_name, index, bottleneck_dir,\n category, architecture):\n \"\"\"\"Returns a path to a bottleneck file for a label at the given index.\n\n Args:\n image_lists: Dictionary of training images for each label.\n label_name: Label string we want to get an image for.\n index: Integer offset of the image we want. This will be moduloed by the\n available number of images for the label, so it can be arbitrarily large.\n bottleneck_dir: Folder string holding cached files of bottleneck values.\n category: Name string of set to pull images from - training, testing, or\n validation.\n architecture: The name of the model architecture.\n\n Returns:\n File system path string to an image that meets the requested parameters.\n \"\"\"\n return get_image_path(image_lists, label_name, index, bottleneck_dir,\n category) + '_' + architecture + '.txt'\n\n\ndef create_model_graph(model_info):\n \"\"\"\"Creates a graph from saved GraphDef file and returns a Graph object.\n\n Args:\n model_info: Dictionary containing information about the model architecture.\n\n Returns:\n Graph holding the trained Inception network, and various tensors we'll be\n manipulating.\n \"\"\"\n with tf.Graph().as_default() as graph:\n model_path = os.path.join(FLAGS.model_dir, model_info['model_file_name'])\n with gfile.FastGFile(model_path, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n bottleneck_tensor, resized_input_tensor = (tf.import_graph_def(\n graph_def,\n name='',\n return_elements=[\n model_info['bottleneck_tensor_name'],\n model_info['resized_input_tensor_name'],\n ]))\n return graph, bottleneck_tensor, resized_input_tensor\n\n\ndef run_bottleneck_on_image(sess, image_data, image_data_tensor,\n decoded_image_tensor, resized_input_tensor,\n bottleneck_tensor):\n \"\"\"Runs inference on an image to extract the 'bottleneck' summary layer.\n\n Args:\n sess: Current active TensorFlow Session.\n image_data: String of raw JPEG data.\n image_data_tensor: Input data layer in the graph.\n decoded_image_tensor: Output of initial image resizing and preprocessing.\n resized_input_tensor: The input node of the recognition graph.\n bottleneck_tensor: Layer before the final softmax.\n\n Returns:\n Numpy array of bottleneck values.\n \"\"\"\n # First decode the JPEG image, resize it, and rescale the pixel values.\n resized_input_values = sess.run(decoded_image_tensor,\n {image_data_tensor: image_data})\n # Then run it through the recognition network.\n bottleneck_values = sess.run(bottleneck_tensor,\n {resized_input_tensor: resized_input_values})\n bottleneck_values = np.squeeze(bottleneck_values)\n return bottleneck_values\n\n\ndef maybe_download_and_extract(data_url):\n \"\"\"Download and extract model tar file.\n\n If the pretrained model we're using doesn't already exist, this function\n downloads it from the TensorFlow.org website and unpacks it into a directory.\n\n Args:\n data_url: Web location of the tar file containing the pretrained model.\n \"\"\"\n dest_directory = FLAGS.model_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = data_url.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' %\n (filename,\n float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n\n filepath, _ = urllib.request.urlretrieve(data_url, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n tf.logging.info('Successfully downloaded', filename, statinfo.st_size,\n 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)\n\n\ndef ensure_dir_exists(dir_name):\n \"\"\"Makes sure the folder exists on disk.\n\n Args:\n dir_name: Path string to the folder we want to create.\n \"\"\"\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n\n\nbottleneck_path_2_bottleneck_values = {}\n\n\ndef create_bottleneck_file(bottleneck_path, image_lists, label_name, index,\n image_dir, category, sess, jpeg_data_tensor,\n decoded_image_tensor, resized_input_tensor,\n bottleneck_tensor):\n \"\"\"Create a single bottleneck file.\"\"\"\n tf.logging.info('Creating bottleneck at ' + bottleneck_path)\n image_path = get_image_path(image_lists, label_name, index,\n image_dir, category)\n if not gfile.Exists(image_path):\n tf.logging.fatal('File does not exist %s', image_path)\n image_data = gfile.FastGFile(image_path, 'rb').read()\n try:\n bottleneck_values = run_bottleneck_on_image(\n sess, image_data, jpeg_data_tensor, decoded_image_tensor,\n resized_input_tensor, bottleneck_tensor)\n except Exception as e:\n raise RuntimeError('Error during processing file %s (%s)' % (image_path,\n str(e)))\n bottleneck_string = ','.join(str(x) for x in bottleneck_values)\n with open(bottleneck_path, 'w') as bottleneck_file:\n bottleneck_file.write(bottleneck_string)\n\n\ndef get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir,\n category, bottleneck_dir, jpeg_data_tensor,\n decoded_image_tensor, resized_input_tensor,\n bottleneck_tensor, architecture):\n \"\"\"Retrieves or calculates bottleneck values for an image.\n\n If a cached version of the bottleneck data exists on-disk, return that,\n otherwise calculate the data and save it to disk for future use.\n\n Args:\n sess: The current active TensorFlow Session.\n image_lists: Dictionary of training images for each label.\n label_name: Label string we want to get an image for.\n index: Integer offset of the image we want. This will be modulo-ed by the\n available number of images for the label, so it can be arbitrarily large.\n image_dir: Root folder string of the subfolders containing the training\n images.\n category: Name string of which set to pull images from - training, testing,\n or validation.\n bottleneck_dir: Folder string holding cached files of bottleneck values.\n jpeg_data_tensor: The tensor to feed loaded jpeg data into.\n decoded_image_tensor: The output of decoding and resizing the image.\n resized_input_tensor: The input node of the recognition graph.\n bottleneck_tensor: The output tensor for the bottleneck values.\n architecture: The name of the model architecture.\n\n Returns:\n Numpy array of values produced by the bottleneck layer for the image.\n \"\"\"\n label_lists = image_lists[label_name]\n sub_dir = label_lists['dir']\n sub_dir_path = os.path.join(bottleneck_dir, sub_dir)\n ensure_dir_exists(sub_dir_path)\n bottleneck_path = get_bottleneck_path(image_lists, label_name, index,\n bottleneck_dir, category, architecture)\n if not os.path.exists(bottleneck_path):\n create_bottleneck_file(bottleneck_path, image_lists, label_name, index,\n image_dir, category, sess, jpeg_data_tensor,\n decoded_image_tensor, resized_input_tensor,\n bottleneck_tensor)\n with open(bottleneck_path, 'r') as bottleneck_file:\n bottleneck_string = bottleneck_file.read()\n did_hit_error = False\n try:\n bottleneck_values = [float(x) for x in bottleneck_string.split(',')]\n except ValueError:\n tf.logging.warning('Invalid float found, recreating bottleneck')\n did_hit_error = True\n if did_hit_error:\n create_bottleneck_file(bottleneck_path, image_lists, label_name, index,\n image_dir, category, sess, jpeg_data_tensor,\n decoded_image_tensor, resized_input_tensor,\n bottleneck_tensor)\n with open(bottleneck_path, 'r') as bottleneck_file:\n bottleneck_string = bottleneck_file.read()\n # Allow exceptions to propagate here, since they shouldn't happen after a\n # fresh creation\n bottleneck_values = [float(x) for x in bottleneck_string.split(',')]\n return bottleneck_values\n\n\ndef cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir,\n jpeg_data_tensor, decoded_image_tensor,\n resized_input_tensor, bottleneck_tensor, architecture):\n \"\"\"Ensures all the training, testing, and validation bottlenecks are cached.\n\n Because we're likely to read the same image multiple times (if there are no\n distortions applied during training) it can speed things up a lot if we\n calculate the bottleneck layer values once for each image during\n preprocessing, and then just read those cached values repeatedly during\n training. Here we go through all the images we've found, calculate those\n values, and save them off.\n\n Args:\n sess: The current active TensorFlow Session.\n image_lists: Dictionary of training images for each label.\n image_dir: Root folder string of the subfolders containing the training\n images.\n bottleneck_dir: Folder string holding cached files of bottleneck values.\n jpeg_data_tensor: Input tensor for jpeg data from file.\n decoded_image_tensor: The output of decoding and resizing the image.\n resized_input_tensor: The input node of the recognition graph.\n bottleneck_tensor: The penultimate output layer of the graph.\n architecture: The name of the model architecture.\n\n Returns:\n Nothing.\n \"\"\"\n how_many_bottlenecks = 0\n ensure_dir_exists(bottleneck_dir)\n for label_name, label_lists in image_lists.items():\n for category in ['training', 'testing', 'validation']:\n category_list = label_lists[category]\n for index, unused_base_name in enumerate(category_list):\n get_or_create_bottleneck(\n sess, image_lists, label_name, index, image_dir, category,\n bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,\n resized_input_tensor, bottleneck_tensor, architecture)\n\n how_many_bottlenecks += 1\n if how_many_bottlenecks % 100 == 0:\n tf.logging.info(\n str(how_many_bottlenecks) + ' bottleneck files created.')\n\n\ndef get_random_cached_bottlenecks(sess, image_lists, how_many, category,\n bottleneck_dir, image_dir, jpeg_data_tensor,\n decoded_image_tensor, resized_input_tensor,\n bottleneck_tensor, architecture):\n \"\"\"Retrieves bottleneck values for cached images.\n\n If no distortions are being applied, this function can retrieve the cached\n bottleneck values directly from disk for images. It picks a random set of\n images from the specified category.\n\n Args:\n sess: Current TensorFlow Session.\n image_lists: Dictionary of training images for each label.\n how_many: If positive, a random sample of this size will be chosen.\n If negative, all bottlenecks will be retrieved.\n category: Name string of which set to pull from - training, testing, or\n validation.\n bottleneck_dir: Folder string holding cached files of bottleneck values.\n image_dir: Root folder string of the subfolders containing the training\n images.\n jpeg_data_tensor: The layer to feed jpeg image data into.\n decoded_image_tensor: The output of decoding and resizing the image.\n resized_input_tensor: The input node of the recognition graph.\n bottleneck_tensor: The bottleneck output layer of the CNN graph.\n architecture: The name of the model architecture.\n\n Returns:\n List of bottleneck arrays, their corresponding ground truths, and the\n relevant filenames.\n \"\"\"\n class_count = len(image_lists.keys())\n bottlenecks = []\n ground_truths = []\n filenames = []\n if how_many >= 0:\n # Retrieve a random sample of bottlenecks.\n for unused_i in range(how_many):\n label_index = random.randrange(class_count)\n label_name = list(image_lists.keys())[label_index]\n image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)\n image_name = get_image_path(image_lists, label_name, image_index,\n image_dir, category)\n bottleneck = get_or_create_bottleneck(\n sess, image_lists, label_name, image_index, image_dir, category,\n bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,\n resized_input_tensor, bottleneck_tensor, architecture)\n ground_truth = np.zeros(class_count, dtype=np.float32)\n ground_truth[label_index] = 1.0\n bottlenecks.append(bottleneck)\n ground_truths.append(ground_truth)\n filenames.append(image_name)\n else:\n # Retrieve all bottlenecks.\n for label_index, label_name in enumerate(image_lists.keys()):\n for image_index, image_name in enumerate(\n image_lists[label_name][category]):\n image_name = get_image_path(image_lists, label_name, image_index,\n image_dir, category)\n bottleneck = get_or_create_bottleneck(\n sess, image_lists, label_name, image_index, image_dir, category,\n bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,\n resized_input_tensor, bottleneck_tensor, architecture)\n ground_truth = np.zeros(class_count, dtype=np.float32)\n ground_truth[label_index] = 1.0\n bottlenecks.append(bottleneck)\n ground_truths.append(ground_truth)\n filenames.append(image_name)\n return bottlenecks, ground_truths, filenames\n\n\ndef get_random_distorted_bottlenecks(\n sess, image_lists, how_many, category, image_dir, input_jpeg_tensor,\n distorted_image, resized_input_tensor, bottleneck_tensor):\n \"\"\"Retrieves bottleneck values for training images, after distortions.\n\n If we're training with distortions like crops, scales, or flips, we have to\n recalculate the full model for every image, and so we can't use cached\n bottleneck values. Instead we find random images for the requested category,\n run them through the distortion graph, and then the full graph to get the\n bottleneck results for each.\n\n Args:\n sess: Current TensorFlow Session.\n image_lists: Dictionary of training images for each label.\n how_many: The integer number of bottleneck values to return.\n category: Name string of which set of images to fetch - training, testing,\n or validation.\n image_dir: Root folder string of the subfolders containing the training\n images.\n input_jpeg_tensor: The input layer we feed the image data to.\n distorted_image: The output node of the distortion graph.\n resized_input_tensor: The input node of the recognition graph.\n bottleneck_tensor: The bottleneck output layer of the CNN graph.\n\n Returns:\n List of bottleneck arrays and their corresponding ground truths.\n \"\"\"\n class_count = len(image_lists.keys())\n bottlenecks = []\n ground_truths = []\n for unused_i in range(how_many):\n label_index = random.randrange(class_count)\n label_name = list(image_lists.keys())[label_index]\n image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)\n image_path = get_image_path(image_lists, label_name, image_index, image_dir,\n category)\n if not gfile.Exists(image_path):\n tf.logging.fatal('File does not exist %s', image_path)\n jpeg_data = gfile.FastGFile(image_path, 'rb').read()\n # Note that we materialize the distorted_image_data as a numpy array before\n # sending running inference on the image. This involves 2 memory copies and\n # might be optimized in other implementations.\n distorted_image_data = sess.run(distorted_image,\n {input_jpeg_tensor: jpeg_data})\n bottleneck_values = sess.run(bottleneck_tensor,\n {resized_input_tensor: distorted_image_data})\n bottleneck_values = np.squeeze(bottleneck_values)\n ground_truth = np.zeros(class_count, dtype=np.float32)\n ground_truth[label_index] = 1.0\n bottlenecks.append(bottleneck_values)\n ground_truths.append(ground_truth)\n return bottlenecks, ground_truths\n\n\ndef should_distort_images(flip_left_right, random_crop, random_scale,\n random_brightness):\n \"\"\"Whether any distortions are enabled, from the input flags.\n\n Args:\n flip_left_right: Boolean whether to randomly mirror images horizontally.\n random_crop: Integer percentage setting the total margin used around the\n crop box.\n random_scale: Integer percentage of how much to vary the scale by.\n random_brightness: Integer range to randomly multiply the pixel values by.\n\n Returns:\n Boolean value indicating whether any distortions should be applied.\n \"\"\"\n return (flip_left_right or (random_crop != 0) or (random_scale != 0) or\n (random_brightness != 0))\n\n\ndef add_input_distortions(flip_left_right, random_crop, random_scale,\n random_brightness, input_width, input_height,\n input_depth, input_mean, input_std):\n \"\"\"Creates the operations to apply the specified distortions.\n\n During training it can help to improve the results if we run the images\n through simple distortions like crops, scales, and flips. These reflect the\n kind of variations we expect in the real world, and so can help train the\n model to cope with natural data more effectively. Here we take the supplied\n parameters and construct a network of operations to apply them to an image.\n\n Cropping\n ~~~~~~~~\n\n Cropping is done by placing a bounding box at a random position in the full\n image. The cropping parameter controls the size of that box relative to the\n input image. If it's zero, then the box is the same size as the input and no\n cropping is performed. If the value is 50%, then the crop box will be half the\n width and height of the input. In a diagram it looks like this:\n\n < width >\n +---------------------+\n | |\n | width - crop% |\n | < > |\n | +------+ |\n | | | |\n | | | |\n | | | |\n | +------+ |\n | |\n | |\n +---------------------+\n\n Scaling\n ~~~~~~~\n\n Scaling is a lot like cropping, except that the bounding box is always\n centered and its size varies randomly within the given range. For example if\n the scale percentage is zero, then the bounding box is the same size as the\n input and no scaling is applied. If it's 50%, then the bounding box will be in\n a random range between half the width and height and full size.\n\n Args:\n flip_left_right: Boolean whether to randomly mirror images horizontally.\n random_crop: Integer percentage setting the total margin used around the\n crop box.\n random_scale: Integer percentage of how much to vary the scale by.\n random_brightness: Integer range to randomly multiply the pixel values by.\n graph.\n input_width: Horizontal size of expected input image to model.\n input_height: Vertical size of expected input image to model.\n input_depth: How many channels the expected input image should have.\n input_mean: Pixel value that should be zero in the image for the graph.\n input_std: How much to divide the pixel values by before recognition.\n\n Returns:\n The jpeg input layer and the distorted result tensor.\n \"\"\"\n\n jpeg_data = tf.placeholder(tf.string, name='DistortJPGInput')\n decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)\n decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)\n decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)\n margin_scale = 1.0 + (random_crop / 100.0)\n resize_scale = 1.0 + (random_scale / 100.0)\n margin_scale_value = tf.constant(margin_scale)\n resize_scale_value = tf.random_uniform(tensor_shape.scalar(),\n minval=1.0,\n maxval=resize_scale)\n scale_value = tf.multiply(margin_scale_value, resize_scale_value)\n precrop_width = tf.multiply(scale_value, input_width)\n precrop_height = tf.multiply(scale_value, input_height)\n precrop_shape = tf.stack([precrop_height, precrop_width])\n precrop_shape_as_int = tf.cast(precrop_shape, dtype=tf.int32)\n precropped_image = tf.image.resize_bilinear(decoded_image_4d,\n precrop_shape_as_int)\n precropped_image_3d = tf.squeeze(precropped_image, squeeze_dims=[0])\n cropped_image = tf.random_crop(precropped_image_3d,\n [input_height, input_width, input_depth])\n if flip_left_right:\n flipped_image = tf.image.random_flip_left_right(cropped_image)\n else:\n flipped_image = cropped_image\n brightness_min = 1.0 - (random_brightness / 100.0)\n brightness_max = 1.0 + (random_brightness / 100.0)\n brightness_value = tf.random_uniform(tensor_shape.scalar(),\n minval=brightness_min,\n maxval=brightness_max)\n brightened_image = tf.multiply(flipped_image, brightness_value)\n offset_image = tf.subtract(brightened_image, input_mean)\n mul_image = tf.multiply(offset_image, 1.0 / input_std)\n distort_result = tf.expand_dims(mul_image, 0, name='DistortResult')\n return jpeg_data, distort_result\n\n\ndef variable_summaries(var):\n \"\"\"Attach a lot of summaries to a Tensor (for TensorBoard visualization).\"\"\"\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)\n\n\ndef add_final_training_ops(class_count, final_tensor_name, bottleneck_tensor,\n bottleneck_tensor_size):\n \"\"\"Adds a new softmax and fully-connected layer for training.\n\n We need to retrain the top layer to identify our new classes, so this function\n adds the right operations to the graph, along with some variables to hold the\n weights, and then sets up all the gradients for the backward pass.\n\n The set up for the softmax and fully-connected layers is based on:\n https://www.tensorflow.org/versions/master/tutorials/mnist/beginners/index.html\n\n Args:\n class_count: Integer of how many categories of things we're trying to\n recognize.\n final_tensor_name: Name string for the new final node that produces results.\n bottleneck_tensor: The output of the main CNN graph.\n bottleneck_tensor_size: How many entries in the bottleneck vector.\n\n Returns:\n The tensors for the training and cross entropy results, and tensors for the\n bottleneck input and ground truth input.\n \"\"\"\n with tf.name_scope('input'):\n bottleneck_input = tf.placeholder_with_default(\n bottleneck_tensor,\n shape=[None, bottleneck_tensor_size],\n name='BottleneckInputPlaceholder')\n\n ground_truth_input = tf.placeholder(tf.float32,\n [None, class_count],\n name='GroundTruthInput')\n\n # Organizing the following ops as `final_training_ops` so they're easier\n # to see in TensorBoard\n layer_name = 'final_training_ops'\n with tf.name_scope(layer_name):\n with tf.name_scope('weights'):\n initial_value = tf.truncated_normal(\n [bottleneck_tensor_size, class_count], stddev=0.001)\n\n layer_weights = tf.Variable(initial_value, name='final_weights')\n\n variable_summaries(layer_weights)\n with tf.name_scope('biases'):\n layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases')\n variable_summaries(layer_biases)\n with tf.name_scope('Wx_plus_b'):\n logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases\n tf.summary.histogram('pre_activations', logits)\n\n final_tensor = tf.nn.softmax(logits, name=final_tensor_name)\n tf.summary.histogram('activations', final_tensor)\n\n with tf.name_scope('cross_entropy'):\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(\n labels=ground_truth_input, logits=logits)\n with tf.name_scope('total'):\n cross_entropy_mean = tf.reduce_mean(cross_entropy)\n tf.summary.scalar('cross_entropy', cross_entropy_mean)\n\n with tf.name_scope('train'):\n optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)\n train_step = optimizer.minimize(cross_entropy_mean)\n\n return (train_step, cross_entropy_mean, bottleneck_input, ground_truth_input,\n final_tensor)\n\n\ndef add_evaluation_step(result_tensor, ground_truth_tensor):\n \"\"\"Inserts the operations we need to evaluate the accuracy of our results.\n\n Args:\n result_tensor: The new final node that produces results.\n ground_truth_tensor: The node we feed ground truth data\n into.\n\n Returns:\n Tuple of (evaluation step, prediction).\n \"\"\"\n with tf.name_scope('accuracy'):\n with tf.name_scope('correct_prediction'):\n prediction = tf.argmax(result_tensor, 1)\n correct_prediction = tf.equal(\n prediction, tf.argmax(ground_truth_tensor, 1))\n with tf.name_scope('accuracy'):\n evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n tf.summary.scalar('accuracy', evaluation_step)\n return evaluation_step, prediction\n\n\ndef save_graph_to_file(sess, graph, graph_file_name):\n output_graph_def = graph_util.convert_variables_to_constants(\n sess, graph.as_graph_def(), [FLAGS.final_tensor_name])\n with gfile.FastGFile(graph_file_name, 'wb') as f:\n f.write(output_graph_def.SerializeToString())\n return\n\n\ndef prepare_file_system():\n # Setup the directory we'll write summaries to for TensorBoard\n if tf.gfile.Exists(FLAGS.summaries_dir):\n tf.gfile.DeleteRecursively(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(FLAGS.summaries_dir)\n if FLAGS.intermediate_store_frequency > 0:\n ensure_dir_exists(FLAGS.intermediate_output_graphs_dir)\n return\n\n\ndef create_model_info(architecture):\n \"\"\"Given the name of a model architecture, returns information about it.\n\n There are different base image recognition pretrained models that can be\n retrained using transfer learning, and this function translates from the name\n of a model to the attributes that are needed to download and train with it.\n\n Args:\n architecture: Name of a model architecture.\n\n Returns:\n Dictionary of information about the model, or None if the name isn't\n recognized\n\n Raises:\n ValueError: If architecture name is unknown.\n \"\"\"\n architecture = architecture.lower()\n if architecture == 'inception_v3':\n # pylint: disable=line-too-long\n data_url = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'\n # pylint: enable=line-too-long\n bottleneck_tensor_name = 'pool_3/_reshape:0'\n bottleneck_tensor_size = 2048\n input_width = 299\n input_height = 299\n input_depth = 3\n resized_input_tensor_name = 'Mul:0'\n model_file_name = 'classify_image_graph_def.pb'\n input_mean = 128\n input_std = 128\n elif architecture.startswith('mobilenet_'):\n parts = architecture.split('_')\n if len(parts) != 3 and len(parts) != 4:\n tf.logging.error(\"Couldn't understand architecture name '%s'\",\n architecture)\n return None\n version_string = parts[1]\n if (version_string != '1.0' and version_string != '0.75' and\n version_string != '0.50' and version_string != '0.25'):\n tf.logging.error(\n \"\"\"\"The Mobilenet version should be '1.0', '0.75', '0.50', or '0.25',\n but found '%s' for architecture '%s'\"\"\",\n version_string, architecture)\n return None\n size_string = parts[2]\n if (size_string != '224' and size_string != '192' and\n size_string != '160' and size_string != '128'):\n tf.logging.error(\n \"\"\"The Mobilenet input size should be '224', '192', '160', or '128',\n but found '%s' for architecture '%s'\"\"\",\n size_string, architecture)\n return None\n if len(parts) == 3:\n is_quantized = False\n else:\n if parts[3] != 'quantized':\n tf.logging.error(\n \"Couldn't understand architecture suffix '%s' for '%s'\", parts[3],\n architecture)\n return None\n is_quantized = True\n data_url = 'http://download.tensorflow.org/models/mobilenet_v1_'\n data_url += version_string + '_' + size_string + '_frozen.tgz'\n bottleneck_tensor_name = 'MobilenetV1/Predictions/Reshape:0'\n bottleneck_tensor_size = 1001\n input_width = int(size_string)\n input_height = int(size_string)\n input_depth = 3\n resized_input_tensor_name = 'input:0'\n if is_quantized:\n model_base_name = 'quantized_graph.pb'\n else:\n model_base_name = 'frozen_graph.pb'\n model_dir_name = 'mobilenet_v1_' + version_string + '_' + size_string\n model_file_name = os.path.join(model_dir_name, model_base_name)\n input_mean = 127.5\n input_std = 127.5\n else:\n tf.logging.error(\"Couldn't understand architecture name '%s'\", architecture)\n raise ValueError('Unknown architecture', architecture)\n\n return {\n 'data_url': data_url,\n 'bottleneck_tensor_name': bottleneck_tensor_name,\n 'bottleneck_tensor_size': bottleneck_tensor_size,\n 'input_width': input_width,\n 'input_height': input_height,\n 'input_depth': input_depth,\n 'resized_input_tensor_name': resized_input_tensor_name,\n 'model_file_name': model_file_name,\n 'input_mean': input_mean,\n 'input_std': input_std,\n }\n\n\ndef add_jpeg_decoding(input_width, input_height, input_depth, input_mean,\n input_std):\n \"\"\"Adds operations that perform JPEG decoding and resizing to the graph..\n\n Args:\n input_width: Desired width of the image fed into the recognizer graph.\n input_height: Desired width of the image fed into the recognizer graph.\n input_depth: Desired channels of the image fed into the recognizer graph.\n input_mean: Pixel value that should be zero in the image for the graph.\n input_std: How much to divide the pixel values by before recognition.\n\n Returns:\n Tensors for the node to feed JPEG data into, and the output of the\n preprocessing steps.\n \"\"\"\n jpeg_data = tf.placeholder(tf.string, name='DecodeJPGInput')\n decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)\n decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)\n decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)\n resize_shape = tf.stack([input_height, input_width])\n resize_shape_as_int = tf.cast(resize_shape, dtype=tf.int32)\n resized_image = tf.image.resize_bilinear(decoded_image_4d,\n resize_shape_as_int)\n offset_image = tf.subtract(resized_image, input_mean)\n mul_image = tf.multiply(offset_image, 1.0 / input_std)\n return jpeg_data, mul_image\n\n\ndef main(_):\n # Needed to make sure the logging output is visible.\n # See https://github.com/tensorflow/tensorflow/issues/3047\n tf.logging.set_verbosity(tf.logging.INFO)\n\n # Prepare necessary directories that can be used during training\n prepare_file_system()\n\n # Gather information about the model architecture we'll be using.\n model_info = create_model_info(FLAGS.architecture)\n if not model_info:\n tf.logging.error('Did not recognize architecture flag')\n return -1\n\n # Set up the pre-trained graph.\n maybe_download_and_extract(model_info['data_url'])\n graph, bottleneck_tensor, resized_image_tensor = (\n create_model_graph(model_info))\n\n # Look at the folder structure, and create lists of all the images.\n image_lists = create_image_lists(FLAGS.image_dir, FLAGS.testing_percentage,\n FLAGS.validation_percentage)\n class_count = len(image_lists.keys())\n if class_count == 0:\n tf.logging.error('No valid folders of images found at ' + FLAGS.image_dir)\n return -1\n if class_count == 1:\n tf.logging.error('Only one valid folder of images found at ' +\n FLAGS.image_dir +\n ' - multiple classes are needed for classification.')\n return -1\n\n # See if the command-line flags mean we're applying any distortions.\n do_distort_images = should_distort_images(\n FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,\n FLAGS.random_brightness)\n\n with tf.Session(graph=graph) as sess:\n # Set up the image decoding sub-graph.\n jpeg_data_tensor, decoded_image_tensor = add_jpeg_decoding(\n model_info['input_width'], model_info['input_height'],\n model_info['input_depth'], model_info['input_mean'],\n model_info['input_std'])\n\n if do_distort_images:\n # We will be applying distortions, so setup the operations we'll need.\n (distorted_jpeg_data_tensor,\n distorted_image_tensor) = add_input_distortions(\n FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,\n FLAGS.random_brightness, model_info['input_width'],\n model_info['input_height'], model_info['input_depth'],\n model_info['input_mean'], model_info['input_std'])\n else:\n # We'll make sure we've calculated the 'bottleneck' image summaries and\n # cached them on disk.\n cache_bottlenecks(sess, image_lists, FLAGS.image_dir,\n FLAGS.bottleneck_dir, jpeg_data_tensor,\n decoded_image_tensor, resized_image_tensor,\n bottleneck_tensor, FLAGS.architecture)\n\n # Add the new layer that we'll be training.\n (train_step, cross_entropy, bottleneck_input, ground_truth_input,\n final_tensor) = add_final_training_ops(\n len(image_lists.keys()), FLAGS.final_tensor_name, bottleneck_tensor,\n model_info['bottleneck_tensor_size'])\n\n # Create the operations we need to evaluate the accuracy of our new layer.\n evaluation_step, prediction = add_evaluation_step(\n final_tensor, ground_truth_input)\n\n # Merge all the summaries and write them out to the summaries_dir\n merged = tf.summary.merge_all()\n train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',\n sess.graph)\n\n validation_writer = tf.summary.FileWriter(\n FLAGS.summaries_dir + '/validation')\n\n # Set up all our weights to their initial default values.\n init = tf.global_variables_initializer()\n sess.run(init)\n\n # Run the training for as many cycles as requested on the command line.\n for i in range(FLAGS.how_many_training_steps):\n # Get a batch of input bottleneck values, either calculated fresh every\n # time with distortions applied, or from the cache stored on disk.\n if do_distort_images:\n (train_bottlenecks,\n train_ground_truth) = get_random_distorted_bottlenecks(\n sess, image_lists, FLAGS.train_batch_size, 'training',\n FLAGS.image_dir, distorted_jpeg_data_tensor,\n distorted_image_tensor, resized_image_tensor, bottleneck_tensor)\n else:\n (train_bottlenecks,\n train_ground_truth, _) = get_random_cached_bottlenecks(\n sess, image_lists, FLAGS.train_batch_size, 'training',\n FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,\n decoded_image_tensor, resized_image_tensor, bottleneck_tensor,\n FLAGS.architecture)\n # Feed the bottlenecks and ground truth into the graph, and run a training\n # step. Capture training summaries for TensorBoard with the `merged` op.\n train_summary, _ = sess.run(\n [merged, train_step],\n feed_dict={bottleneck_input: train_bottlenecks,\n ground_truth_input: train_ground_truth})\n train_writer.add_summary(train_summary, i)\n\n # Every so often, print out how well the graph is training.\n is_last_step = (i + 1 == FLAGS.how_many_training_steps)\n if (i % FLAGS.eval_step_interval) == 0 or is_last_step:\n train_accuracy, cross_entropy_value = sess.run(\n [evaluation_step, cross_entropy],\n feed_dict={bottleneck_input: train_bottlenecks,\n ground_truth_input: train_ground_truth})\n tf.logging.info('%s: Step %d: Train accuracy = %.1f%%' %\n (datetime.now(), i, train_accuracy * 100))\n tf.logging.info('%s: Step %d: Cross entropy = %f' %\n (datetime.now(), i, cross_entropy_value))\n validation_bottlenecks, validation_ground_truth, _ = (\n get_random_cached_bottlenecks(\n sess, image_lists, FLAGS.validation_batch_size, 'validation',\n FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,\n decoded_image_tensor, resized_image_tensor, bottleneck_tensor,\n FLAGS.architecture))\n # Run a validation step and capture training summaries for TensorBoard\n # with the `merged` op.\n validation_summary, validation_accuracy = sess.run(\n [merged, evaluation_step],\n feed_dict={bottleneck_input: validation_bottlenecks,\n ground_truth_input: validation_ground_truth})\n validation_writer.add_summary(validation_summary, i)\n tf.logging.info('%s: Step %d: Validation accuracy = %.1f%% (N=%d)' %\n (datetime.now(), i, validation_accuracy * 100,\n len(validation_bottlenecks)))\n\n # Store intermediate results\n intermediate_frequency = FLAGS.intermediate_store_frequency\n\n if (intermediate_frequency > 0 and (i % intermediate_frequency == 0)\n and i > 0):\n intermediate_file_name = (FLAGS.intermediate_output_graphs_dir +\n 'intermediate_' + str(i) + '.pb')\n tf.logging.info('Save intermediate result to : ' +\n intermediate_file_name)\n save_graph_to_file(sess, graph, intermediate_file_name)\n\n # We've completed all our training, so run a final test evaluation on\n # some new images we haven't used before.\n test_bottlenecks, test_ground_truth, test_filenames = (\n get_random_cached_bottlenecks(\n sess, image_lists, FLAGS.test_batch_size, 'testing',\n FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,\n decoded_image_tensor, resized_image_tensor, bottleneck_tensor,\n FLAGS.architecture))\n test_accuracy, predictions = sess.run(\n [evaluation_step, prediction],\n feed_dict={bottleneck_input: test_bottlenecks,\n ground_truth_input: test_ground_truth})\n tf.logging.info('Final test accuracy = %.1f%% (N=%d)' %\n (test_accuracy * 100, len(test_bottlenecks)))\n\n if FLAGS.print_misclassified_test_images:\n tf.logging.info('=== MISCLASSIFIED TEST IMAGES ===')\n for i, test_filename in enumerate(test_filenames):\n if predictions[i] != test_ground_truth[i].argmax():\n tf.logging.info('%70s %s' %\n (test_filename,\n list(image_lists.keys())[predictions[i]]))\n\n # Write out the trained graph and labels with the weights stored as\n # constants.\n save_graph_to_file(sess, graph, FLAGS.output_graph)\n with gfile.FastGFile(FLAGS.output_labels, 'w') as f:\n f.write('\\n'.join(image_lists.keys()) + '\\n')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--image_dir',\n type=str,\n default='',\n help='Path to folders of labeled images.'\n )\n parser.add_argument(\n '--output_graph',\n type=str,\n default='/tmp/output_graph.pb',\n help='Where to save the trained graph.'\n )\n parser.add_argument(\n '--intermediate_output_graphs_dir',\n type=str,\n default='/tmp/intermediate_graph/',\n help='Where to save the intermediate graphs.'\n )\n parser.add_argument(\n '--intermediate_store_frequency',\n type=int,\n default=0,\n help=\"\"\"\\\n How many steps to store intermediate graph. If \"0\" then will not\n store.\\\n \"\"\"\n )\n parser.add_argument(\n '--output_labels',\n type=str,\n default='/tmp/output_labels.txt',\n help='Where to save the trained graph\\'s labels.'\n )\n parser.add_argument(\n '--summaries_dir',\n type=str,\n default='/tmp/retrain_logs',\n help='Where to save summary logs for TensorBoard.'\n )\n parser.add_argument(\n '--how_many_training_steps',\n type=int,\n default=4000,\n help='How many training steps to run before ending.'\n )\n parser.add_argument(\n '--learning_rate',\n type=float,\n default=0.01,\n help='How large a learning rate to use when training.'\n )\n parser.add_argument(\n '--testing_percentage',\n type=int,\n default=10,\n help='What percentage of images to use as a test set.'\n )\n parser.add_argument(\n '--validation_percentage',\n type=int,\n default=10,\n help='What percentage of images to use as a validation set.'\n )\n parser.add_argument(\n '--eval_step_interval',\n type=int,\n default=10,\n help='How often to evaluate the training results.'\n )\n parser.add_argument(\n '--train_batch_size',\n type=int,\n default=100,\n help='How many images to train on at a time.'\n )\n parser.add_argument(\n '--test_batch_size',\n type=int,\n default=-1,\n help=\"\"\"\\\n How many images to test on. This test set is only used once, to evaluate\n the final accuracy of the model after training completes.\n A value of -1 causes the entire test set to be used, which leads to more\n stable results across runs.\\\n \"\"\"\n )\n parser.add_argument(\n '--validation_batch_size',\n type=int,\n default=100,\n help=\"\"\"\\\n How many images to use in an evaluation batch. This validation set is\n used much more often than the test set, and is an early indicator of how\n accurate the model is during training.\n A value of -1 causes the entire validation set to be used, which leads to\n more stable results across training iterations, but may be slower on large\n training sets.\\\n \"\"\"\n )\n parser.add_argument(\n '--print_misclassified_test_images',\n default=False,\n help=\"\"\"\\\n Whether to print out a list of all misclassified test images.\\\n \"\"\",\n action='store_true'\n )\n parser.add_argument(\n '--model_dir',\n type=str,\n default='/tmp/imagenet',\n help=\"\"\"\\\n Path to classify_image_graph_def.pb,\n imagenet_synset_to_human_label_map.txt, and\n imagenet_2012_challenge_label_map_proto.pbtxt.\\\n \"\"\"\n )\n parser.add_argument(\n '--bottleneck_dir',\n type=str,\n default='/tmp/bottleneck',\n help='Path to cache bottleneck layer values as files.'\n )\n parser.add_argument(\n '--final_tensor_name',\n type=str,\n default='final_result',\n help=\"\"\"\\\n The name of the output classification layer in the retrained graph.\\\n \"\"\"\n )\n parser.add_argument(\n '--flip_left_right',\n default=False,\n help=\"\"\"\\\n Whether to randomly flip half of the training images horizontally.\\\n \"\"\",\n action='store_true'\n )\n parser.add_argument(\n '--random_crop',\n type=int,\n default=0,\n help=\"\"\"\\\n A percentage determining how much of a margin to randomly crop off the\n training images.\\\n \"\"\"\n )\n parser.add_argument(\n '--random_scale',\n type=int,\n default=0,\n help=\"\"\"\\\n A percentage determining how much to randomly scale up the size of the\n training images by.\\\n \"\"\"\n )\n parser.add_argument(\n '--random_brightness',\n type=int,\n default=0,\n help=\"\"\"\\\n A percentage determining how much to randomly multiply the training image\n input pixels up or down by.\\\n \"\"\"\n )\n parser.add_argument(\n '--architecture',\n type=str,\n default='inception_v3',\n help=\"\"\"\\\n Which model architecture to use. 'inception_v3' is the most accurate, but\n also the slowest. For faster or smaller models, chose a MobileNet with the\n form 'mobilenet_<parameter size>_<input_size>[_quantized]'. For example,\n 'mobilenet_1.0_224' will pick a model that is 17 MB in size and takes 224\n pixel input images, while 'mobilenet_0.25_128_quantized' will choose a much\n less accurate, but smaller and faster network that's 920 KB on disk and\n takes 128x128 images. See https://research.googleblog.com/2017/06/mobilenets-open-source-models-for.html\n for more information on Mobilenet.\\\n \"\"\")\n FLAGS, unparsed = parser.parse_known_args()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n" ]
[ [ "tensorflow.python.framework.tensor_shape.scalar", "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.logging.warning", "tensorflow.gfile.DeleteRecursively", "tensorflow.zeros", "tensorflow.gfile.Exists", "tensorflow.stack", "numpy.squeeze", "tensorflow.cast", "tensorflow.python.platform.gfile.Exists", "tensorflow.gfile.MakeDirs", "tensorflow.python.platform.gfile.IsDirectory", "tensorflow.summary.scalar", "tensorflow.Graph", "tensorflow.import_graph_def", "tensorflow.image.random_flip_left_right", "tensorflow.Variable", "tensorflow.placeholder_with_default", "tensorflow.squeeze", "tensorflow.subtract", "tensorflow.logging.set_verbosity", "tensorflow.name_scope", "tensorflow.Session", "tensorflow.square", "tensorflow.argmax", "numpy.zeros", "tensorflow.logging.fatal", "tensorflow.image.decode_jpeg", "tensorflow.app.run", "tensorflow.image.resize_bilinear", "tensorflow.python.platform.gfile.ListDirectory", "tensorflow.truncated_normal", "tensorflow.matmul", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.train.GradientDescentOptimizer", "tensorflow.logging.info", "tensorflow.summary.merge_all", "tensorflow.python.platform.gfile.FastGFile", "tensorflow.summary.histogram", "tensorflow.reduce_max", "tensorflow.multiply", "tensorflow.constant", "tensorflow.nn.softmax", "tensorflow.summary.FileWriter", "tensorflow.reduce_mean", "tensorflow.python.platform.gfile.Glob", "tensorflow.expand_dims", "tensorflow.random_crop", "tensorflow.python.util.compat.as_bytes", "tensorflow.logging.error", "tensorflow.reduce_min", "tensorflow.GraphDef" ] ]
ksarangmath/prolip
[ "145235cdc084b382830a8a471ed99383ca4c76fb" ]
[ "experiment.py" ]
[ "import os\nimport torch\nimport numpy as np\nimport time\nimport csv\nimport matplotlib as mpl \nfrom matplotlib import rc\nimport matplotlib.pyplot as plt\nimport argparse\nimport onnx\nfrom onnx import numpy_helper\nfrom boxprop import Box\nfrom onnx_to_boxprop import boxprop\n\ndef isnetworkfile(fname):\n _, ext = os.path.splitext(fname)\n if ext not in ['.onnx']:\n raise argparse.ArgumentTypeError('only .onnx format supported')\n return fname\n\ndef gen(upper_bound, lower_bound, G):\n\tgen_box = Box(upper_bound,lower_bound, False)\n\tboxprop(gen_box, G)\n\tprint(\"generator propagation done\")\n\treturn gen_box\n\ndef clf(gen_box, C):\n\tclf_box = Box(gen_box.upper, gen_box.lower, True)\n\tboxprop(clf_box, C)\n\tprint('classifier propagation done')\n\treturn clf_box\n\n\ndef main():\n\tparser = argparse.ArgumentParser(description='Arguments for PROLIP experiments', formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\tparser.add_argument('--genname', type=isnetworkfile, default=None, help='the generator network name, must be an onnx network with .onnx extension')\n\tparser.add_argument('--clfname', type=isnetworkfile, default=None, help='the classifier network name, must be an onnx network with .onnx extension')\n\tparser.add_argument('--boxsizes', type=int, default=[0.00001,0.001,0.1], nargs='+', help='list of box sizes')\n\tparser.add_argument('--numcenters', type=int, default=1, help='number of random centers')\n\tparser.add_argument('--randomseed', type=int, default=0, help='torch random seed for picking random box centers')\n\tparser.add_argument('--outfile', type=str, default='out', help='name for output files')\n\targs = parser.parse_args()\n\n\tassert args.genname, 'a generator network has to be provided for analysis.'\n\tassert args.clfname, 'a classifier network has to be provided for analysis.'\n\n\trc('font', **{'serif': ['Computer Modern']})\n\tmpl.rcParams.update({'font.size': 14})\n\n\tG = onnx.load(args.genname)\n\tC = onnx.load(args.clfname)\t \n\n\tlatent_size = numpy_helper.to_array(G.graph.initializer[0]).shape[0]\n\tboxSizes = args.boxsizes\n\tnumCenters = args.numcenters\n\trandomSeed = args.randomseed\n\tfilename = args.outfile\n\n\ttorch.manual_seed(randomSeed)\n\n\twith open(filename +'.csv', mode='w') as file:\n\t\tfieldnames = ['center','size','lip-constant','time']\n\t\twriter = csv.DictWriter(file, fieldnames=fieldnames)\n\t\twriter.writeheader()\n\t\tfor _ in range(numCenters):\n\t\t\tcenter = torch.randn(1,latent_size,1,1)\n\t\t\tfor size in boxSizes:\n\t\t\t\tupper_bound = center+size\n\t\t\t\tlower_bound = center-size\n\n\t\t\t\ttottic = time.perf_counter()\n\t\t\t\ta_o = gen(upper_bound,lower_bound,G)\n\t\t\t\tlipc = clf(a_o, C).getLip()\n\t\t\t\ttotaltime=time.perf_counter()-tottic\n\n\t\t\t\tprint('total time:', totaltime, 'lipc:', lipc)\n\t\t\t\tprint('ROUND DONE')\n\t\t\t\twriter.writerow({'center':center,'size':size,'lip-constant':lipc,'time':totaltime})\n\n\tsizeTime = {}\n\n\twith open(filename + '.csv') as csv_file:\n\t\tcsv_reader = csv.reader(csv_file, delimiter=',')\n\t\tline_count = 0\n\t\tfor row in csv_reader:\n\t\t\tif line_count == 0:\n\t\t\t\tprint(f'Column names are {\", \".join(row)}')\n\t\t\t\tline_count += 1\n\t\t\telse:\n\n\t\t\t\tif float(row[1]) not in sizeTime:\n\t\t\t\t\tsizeTime[float(row[1])] = [float(row[3])]\n\t\t\t\telse:\n\t\t\t\t\tsizeTime[float(row[1])].append(float(row[3]))\n\n\t\t\t\tline_count += 1\n\t\tprint(f'Processed {line_count} lines.')\n\n\tx = [x+1 for x in range(numCenters)]\n\tN = len(x)\n\tind = np.arange(N) \n\twidth = 0.24 # adjust this width if bars are too wide or narrow\n\tfig = plt.figure()\n\tax = fig.add_subplot(111)\n\thandles = []\n\n\tkeys = list(sizeTime.keys())\n\tkeys.sort()\n\tfor i,s in enumerate(keys):\n\t\trects = ax.bar(ind+width*i, sizeTime[s], width,edgecolor='black')\n\t\thandles.append(rects)\n\n\tax.set_xticks(ind+width)\n\tax.set_xticklabels( x )\n\tax.legend( [h[0] for h in handles], boxSizes ,title='Box Sizes', loc='lower right')\n\n\tplt.title('PROLIP Runtime on ' + filename)\n\tplt.ylabel('Runtime (seconds)')\n\tplt.xlabel('Random Centers')\n\tplt.savefig(filename+'.png', bbox_inches='tight')\n \n\nif __name__ == \"__main__\":\n main()\n\t\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "matplotlib.pyplot.title", "torch.manual_seed", "numpy.arange", "torch.randn", "matplotlib.pyplot.savefig", "matplotlib.pyplot.ylabel", "matplotlib.rcParams.update", "matplotlib.pyplot.xlabel", "matplotlib.rc", "matplotlib.pyplot.figure" ] ]
sthagen/pyvista-pyvista
[ "c49a6abae7cc62d242f12ec45a6b22b524db1ec8" ]
[ "tests/plotting/test_widgets.py" ]
[ "import numpy as np\nimport pytest\n\nimport pyvista\nfrom pyvista.plotting import system_supports_plotting\n\n# skip all tests if unable to render\nif not system_supports_plotting():\n pytestmark = pytest.mark.skip\n\n\ndef test_widget_box(uniform):\n p = pyvista.Plotter()\n func = lambda box: box # Does nothing\n p.add_mesh(uniform)\n p.add_box_widget(callback=func)\n p.close()\n\n p = pyvista.Plotter()\n func = lambda box, widget: box # Does nothing\n p.add_mesh(uniform)\n p.add_box_widget(callback=func, pass_widget=True)\n p.close()\n\n # clip box with and without crinkle\n p = pyvista.Plotter()\n p.add_mesh_clip_box(uniform)\n p.close()\n\n p = pyvista.Plotter()\n p.add_mesh_clip_box(uniform, crinkle=True)\n p.close()\n\n p = pyvista.Plotter()\n # merge_points=True is the default and is tested above\n p.add_mesh_clip_box(uniform, merge_points=False)\n p.close()\n\n\ndef test_widget_plane(uniform):\n p = pyvista.Plotter()\n func = lambda normal, origin: normal # Does nothing\n p.add_mesh(uniform)\n p.add_plane_widget(callback=func, implicit=True)\n p.close()\n\n p = pyvista.Plotter()\n func = lambda normal, origin, widget: normal # Does nothing\n p.add_mesh(uniform)\n p.add_plane_widget(callback=func, pass_widget=True, implicit=True)\n p.close()\n\n p = pyvista.Plotter()\n func = lambda normal, origin: normal # Does nothing\n p.add_mesh(uniform)\n p.add_plane_widget(callback=func, implicit=False)\n p.close()\n\n p = pyvista.Plotter()\n func = lambda normal, origin, widget: normal # Does nothing\n p.add_mesh(uniform)\n p.add_plane_widget(callback=func, pass_widget=True, implicit=False)\n p.close()\n\n p = pyvista.Plotter()\n func = lambda normal, origin: normal # Does nothing\n p.add_mesh(uniform)\n p.add_plane_widget(callback=func, assign_to_axis='z', implicit=True)\n p.close()\n\n p = pyvista.Plotter()\n func = lambda normal, origin: normal # Does nothing\n p.add_mesh(uniform)\n p.add_plane_widget(callback=func, normal_rotation=False, implicit=False)\n p.close()\n\n p = pyvista.Plotter()\n p.add_mesh_clip_plane(uniform)\n p.close()\n\n p = pyvista.Plotter()\n p.add_mesh_clip_plane(uniform, crinkle=True)\n p.close()\n\n p = pyvista.Plotter()\n p.add_mesh_slice(uniform)\n p.close()\n\n p = pyvista.Plotter()\n p.add_mesh_slice_orthogonal(uniform)\n p.close()\n\n\ndef test_widget_line(uniform):\n p = pyvista.Plotter()\n func = lambda line: line # Does nothing\n p.add_mesh(uniform)\n p.add_line_widget(callback=func)\n p.close()\n\n p = pyvista.Plotter()\n func = lambda line, widget: line # Does nothing\n p.add_mesh(uniform)\n p.add_line_widget(callback=func, pass_widget=True)\n p.close()\n\n p = pyvista.Plotter()\n func = lambda a, b: (a, b) # Does nothing\n p.add_mesh(uniform)\n p.add_line_widget(callback=func, use_vertices=True)\n p.close()\n\n\ndef test_widget_text_slider(uniform):\n p = pyvista.Plotter()\n func = lambda value: value # Does nothing\n p.add_mesh(uniform)\n with pytest.raises(TypeError, match='must be a list'):\n p.add_text_slider_widget(callback=func, data='foo')\n with pytest.raises(ValueError, match='list of values is empty'):\n p.add_text_slider_widget(callback=func, data=[])\n for style in pyvista.global_theme.slider_styles:\n p.add_text_slider_widget(callback=func, data=['foo', 'bar'], style=style)\n p.close()\n\n\ndef test_widget_slider(uniform):\n p = pyvista.Plotter()\n func = lambda value: value # Does nothing\n p.add_mesh(uniform)\n p.add_slider_widget(callback=func, rng=[0, 10], style=\"classic\")\n p.close()\n\n p = pyvista.Plotter()\n for event_type in ['start', 'end', 'always']:\n p.add_slider_widget(callback=func, rng=[0, 10], event_type=event_type)\n with pytest.raises(TypeError, match='type for ``style``'):\n p.add_slider_widget(callback=func, rng=[0, 10], style=0)\n with pytest.raises(AttributeError):\n p.add_slider_widget(callback=func, rng=[0, 10], style=\"foo\")\n with pytest.raises(TypeError, match='type for `event_type`'):\n p.add_slider_widget(callback=func, rng=[0, 10], event_type=0)\n with pytest.raises(ValueError, match='value for `event_type`'):\n p.add_slider_widget(callback=func, rng=[0, 10], event_type='foo')\n p.close()\n\n p = pyvista.Plotter()\n func = lambda value, widget: value # Does nothing\n p.add_mesh(uniform)\n p.add_slider_widget(callback=func, rng=[0, 10], style=\"modern\", pass_widget=True)\n p.close()\n\n p = pyvista.Plotter()\n p.add_mesh_threshold(uniform, invert=True)\n p.add_mesh(uniform.outline())\n p.close()\n\n p = pyvista.Plotter()\n p.add_mesh_threshold(uniform, invert=False)\n p.add_mesh(uniform.outline())\n p.close()\n\n p = pyvista.Plotter()\n p.add_mesh_isovalue(uniform)\n p.close()\n\n p = pyvista.Plotter()\n title_height = np.random.random()\n s = p.add_slider_widget(callback=func, rng=[0, 10], style=\"classic\", title_height=title_height)\n assert s.GetRepresentation().GetTitleHeight() == title_height\n p.close()\n\n p = pyvista.Plotter()\n title_opacity = np.random.random()\n s = p.add_slider_widget(\n callback=func, rng=[0, 10], style=\"classic\", title_opacity=title_opacity\n )\n assert s.GetRepresentation().GetTitleProperty().GetOpacity() == title_opacity\n p.close()\n\n p = pyvista.Plotter()\n title_color = \"red\"\n s = p.add_slider_widget(callback=func, rng=[0, 10], style=\"classic\", title_color=title_color)\n assert s.GetRepresentation().GetTitleProperty().GetColor() == pyvista.Color(title_color)\n p.close()\n\n p = pyvista.Plotter()\n fmt = \"%0.9f\"\n s = p.add_slider_widget(callback=func, rng=[0, 10], style=\"classic\", fmt=fmt)\n assert s.GetRepresentation().GetLabelFormat() == fmt\n p.close()\n\n\ndef test_widget_spline(uniform):\n p = pyvista.Plotter()\n func = lambda spline: spline # Does nothing\n p.add_mesh(uniform)\n p.add_spline_widget(callback=func)\n p.close()\n\n p = pyvista.Plotter()\n func = lambda spline, widget: spline # Does nothing\n p.add_mesh(uniform)\n p.add_spline_widget(callback=func, pass_widget=True, color=None, show_ribbon=True)\n p.close()\n\n p = pyvista.Plotter()\n p.add_mesh_slice_spline(uniform)\n p.close()\n\n\ndef test_widget_uniform(uniform):\n p = pyvista.Plotter()\n func = lambda center: center # Does nothing\n p.add_sphere_widget(callback=func, center=(0, 0, 0))\n p.close()\n\n nodes = np.array([[-1, -1, -1], [1, 1, 1]])\n p = pyvista.Plotter()\n func = lambda center: center # Does nothing\n p.add_sphere_widget(callback=func, center=nodes)\n p.close()\n\n\ndef test_widget_checkbox_button(uniform):\n p = pyvista.Plotter()\n func = lambda value: value # Does nothing\n p.add_mesh(uniform)\n p.add_checkbox_button_widget(callback=func)\n p.close()\n\n\n@pytest.mark.skipif(pyvista.vtk_version_info < (9, 1), reason=\"Requires vtk>=9.1\")\ndef test_add_camera_orientation_widget(uniform):\n p = pyvista.Plotter()\n p.add_camera_orientation_widget()\n assert p.camera_widgets\n p.close()\n assert not p.camera_widgets\n" ]
[ [ "numpy.array", "numpy.random.random" ] ]
WorksApplications/omni_torch
[ "10b689d794c8f485e38c765303ef018da17bc641" ]
[ "networks/train.py" ]
[ "\"\"\"\n# Copyright (c) 2018 Works Applications Co., Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\n\nimport time\nimport torch\nimport omni_torch.visualize.basic as vb\nimport numpy as np\n\ndef fit(net, args, dataset, device, optimizer, criterion, measure=None, is_train=True,\n visualize_step=100, visualize_op=None, visualize_loss=False,\n plot_low_bound=None, plot_high_bound=None):\n \"\"\"\n \n :param net: The network you defined\n :param args:\n :param dataset:\n :param device: gpu device\n :param optimizer: torch.optim or self defined loss function\n :param criterion: torch.nn or self defined loss function used for optimization\n :param measure: torch.nn or self defined loss function only used for measure\n :param is_train: current status\n :param visualize_step: visualize the training or validation process every n steps\n :param visualize_op: the function used for above visualization\n :param visualize_loss: visualize the each loss during each step\n :return:\n \"\"\"\n if is_train:\n net.train()\n prefix = \"\"\n iter = args.epoches_per_phase\n else:\n net.eval()\n prefix = \"VAL\"\n iter = 1\n # Each step, there will be N types of losses and measures be calculated\n # The structure of the Basic_losses will be:\n # [[loss_1_step_1, loss_2_step_1, ... , loss_N_step_1],\n # [loss_1_step_2, loss_2_step_2, ..., loss_N_step_2],\n # ...\n # [loss_1_step_m, loss_2_step_m, ..., loss_N_step_m]]\n Basic_Losses, Basic_Measures = [], []\n if \"loss_name\" in args.items():\n loss_name = args.loss_name\n else:\n loss_name = None\n for epoch in range(iter):\n epoch_loss, epoch_measure = [], []\n start_time = time.time()\n for batch_idx, data in enumerate(dataset):\n if args.steps_per_epoch is not None and batch_idx >= args.steps_per_epoch:\n break\n img_batch, label_batch = data[0].to(device), data[1].to(device)\n prediction = net(img_batch)\n prediction = [prediction] if type(prediction) is not list else prediction\n label = [label_batch] if type(label_batch) is not list else label_batch\n basic_loss = criterion(prediction, label)\n basic_loss = [basic_loss] if type(basic_loss) is not list else basic_loss\n Basic_Losses.append([float(loss.data) for loss in basic_loss])\n epoch_loss.append(Basic_Losses[-1])\n if measure:\n basic_measure = measure(prediction[-1], label_batch)\n basic_measure = [basic_measure] if type(basic_measure) is not list else basic_measure\n Basic_Measures.append([float(loss.data) for loss in basic_measure])\n epoch_measure.append(Basic_Measures[-1])\n if is_train:\n optimizer.zero_grad()\n if loss_name:\n total_loss = sum([loss * args.loss_weight[loss_name[i]] for i, loss in enumerate(basic_loss)])\n else:\n total_loss = sum([loss for loss in basic_loss])\n total_loss.backward()\n optimizer.step()\n # Visualize Output and Loss\n assert len(loss_name) == len(Basic_Losses[-1]), \\\n \"please check your args.loss_name for it does not match the length of the number of loss produced by the network\"\n loss_dict = dict(zip(loss_name, Basic_Losses[-1]))\n if batch_idx % visualize_step == 0:\n visualize_op(args, batch_idx, prefix, loss_dict, img_batch, prediction, label_batch)\n if measure:\n print(prefix + \" --- total loss: %8f, total measure: %8f at epoch %04d/%04d, cost %3f seconds ---\" %\n (sum([sum(i) for i in epoch_loss]) / len(epoch_loss),\n sum([sum(i) for i in epoch_measure]) / len(epoch_measure),\n epoch, args.epoches_per_phase, time.time() - start_time))\n else:\n print(prefix + \" --- total loss: %8f at epoch %04d/%04d, cost %3f seconds ---\" %\n (sum([sum(i) for i in epoch_loss]) / len(epoch_loss),\n epoch, args.epoches_per_phase, time.time() - start_time))\n if is_train:\n args.curr_epoch += 1\n # Then in the below line , the structure of the Basic_losses\n # and Basic_Measures will be transposed:\n # [[loss_1_step_1, loss_1_step_2, ... , loss_1_step_m],\n # [loss_2_step_1, loss_2_step_2, ..., loss_2_step_m],\n # ...\n # [loss_N_step_1, loss_N_step_2, ..., loss_N_step_m]]\n all_losses = list(zip(*Basic_Losses))\n all_measures = list(zip(*Basic_Measures))\n print(*list(zip(loss_name, [sum(loss) / len(loss) for loss in all_losses])))\n if is_train and visualize_loss:\n vb.visualize_gradient(args, net)\n #args.loss_weight = init.update_loss_weight(all_losses, loss_name, args.loss_weight,\n #args.loss_weight_range, args.loss_weight_momentum)\n vb.plot_curves([np.asarray(loss) for loss in all_losses], loss_name, args.loss_log,\n prefix + \"loss_at_\", args.curr_epoch, window=11, fig_size=(5, 5),\n low_bound=plot_low_bound, high_bound=plot_high_bound)\n return all_losses, all_measures\n\ndef evaluation(net, args, val_set, device, optimizer, criterion, measure=None, is_train=False,\n visualize_step=100, visualize_op=None, visualize_loss=False,\n plot_low_bound=None, plot_high_bound=None):\n with torch.no_grad():\n return fit(net, args, val_set, device, optimizer, criterion, measure, is_train,\n visualize_step, visualize_op, visualize_loss, plot_low_bound, plot_high_bound)" ]
[ [ "numpy.asarray", "torch.no_grad" ] ]
grapesmoker/ketl
[ "328a807bd1a439ae45540cf3d96597c803689a73" ]
[ "ketl/transformer/Transformer.py" ]
[ "from abc import abstractmethod\nfrom itertools import chain\nfrom pathlib import Path\nfrom typing import List, Union, Dict, Callable\n\nimport pandas as pd\nimport json\n\n\nclass AdapterError(Exception):\n pass\n\n\nclass NoValidSourcesError(AdapterError):\n pass\n\n\nclass BaseTransformer:\n \"\"\"\n The base transformer class. Should not be instantiated directly.\n \"\"\"\n # TODO: init should take the configuration kwargs\n\n def __init__(self, transpose: bool = False, concat_on_axis: Union[int, str] = None,\n columns: List[Union[str, int]] = None, skip_errors: bool = False,\n rename: Union[Callable, Dict[str, str]] = None, **kwargs):\n \"\"\" Initialize the transformer.\n\n :param transpose: whether to transpose the resulting matrix.\n :param concat_on_axis: whether to concatenate data along some axis.\n :param columns: column names.\n :param skip_errors: whether to skip input files if an error is encountered.\n :param rename: a dict or function suitable for passing to the Pandas rename function.\n :param kwargs: optional keyword arguments to pass to reader.\n \"\"\"\n self.transpose = transpose\n self.concat_on_axis = concat_on_axis\n self.columns = columns\n self.skip_errors = skip_errors\n self.rename = rename\n\n self.passed_kwargs = kwargs\n\n @abstractmethod\n def transform(self, source_files: List[Path]) -> pd.DataFrame:\n \"\"\" Run the actual transformation.\n\n :param source_files: the source files containing the data.\n :return: a data frame.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def _build_data_frame(self, source_files: List[Path]) -> pd.DataFrame:\n \"\"\" Construct a data frame from the list of inpute files.\n\n :param source_files: the source files containing the data.\n :return: a data frame.\n \"\"\"\n raise NotImplementedError\n\n\nclass DelimitedTableTransformer(BaseTransformer):\n \"\"\"\n A transformer that changes the input data into a delimited table.\n \"\"\"\n def __init__(self, transpose: bool = False, concat_on_axis: Union[str, int] = None,\n columns: List[Union[str, int]] = None, skip_errors: bool = False,\n rename: Union[Callable, Dict[str, str]] = None, **kwargs):\n \"\"\" Initialize the transformer.\n\n :param transpose: whether to transpose the resulting data.\n :param concat_on_axis: whether to concatenate the data along an axis.\n :param columns: list of column names.\n :param skip_errors: whether to skip errors.\n :param rename: a dict or function suitable for passing to the Pandas rename function.\n :param kwargs: keyword arguments to be passed to the reader.\n \"\"\"\n super(DelimitedTableTransformer, self).__init__(\n transpose, concat_on_axis, columns, skip_errors, rename, **kwargs)\n\n self.reader_kwargs = {\n 'comment': None,\n 'names': None,\n 'delimiter': None,\n 'header': 'infer',\n 'dtype': None,\n 'index_col': None,\n 'parse_dates': None,\n 'skiprows': None,\n 'iterator': True,\n 'chunksize': 50000\n }\n self.reader_kwargs.update(self.passed_kwargs)\n\n def _build_data_frame(self, source_files: List[Path]):\n \"\"\" Build a data frame from a list of source files. All kwargs set at initialization are passed\n to the CSV reader.\n\n :param source_files: a list of source files to read data from.\n :return: a Pandas data frame.\n \"\"\"\n data_frames = [pd.read_csv(source_file, **self.reader_kwargs) for source_file in source_files]\n\n # for the special case where every file is a column. this assumes all data can fit into memory\n # TODO: replace this with dask stuff so that things can be lazily concatenated\n if self.concat_on_axis:\n df = pd.concat(data_frames, axis=self.concat_on_axis)\n yield df\n else:\n df_chain = chain(*data_frames)\n\n for chunk in df_chain:\n if self.transpose:\n yield chunk.transpose()\n else:\n yield chunk\n\n def transform(self, source_files: List[Path]) -> pd.DataFrame:\n \"\"\" Transform the data contained in the list of source files to something else. By default\n simply returns the data frame consisting of the raw data.\n\n :param source_files: a list of source files.\n :return: a Pandas data frame.\n \"\"\"\n for df in self._build_data_frame(source_files):\n yield df\n\n\nclass JsonTableTransformer(BaseTransformer):\n\n def __init__(self, record_path: Union[List[str], str] = None,\n transpose: bool = False,\n concat_on_axis: Union[str, int] = None,\n columns: List[Union[str, int]] = None,\n skip_errors: bool = False,\n rename: Union[Callable, Dict[str, str]] = None,\n **kwargs):\n\n super(JsonTableTransformer, self).__init__(\n transpose, concat_on_axis, columns, skip_errors, rename, **kwargs)\n\n self.record_path = record_path\n\n self.reader_kwargs = {\n 'orient': None,\n 'typ': 'frame',\n 'dtype': None,\n 'convert_axes': None,\n 'convert_dates': True,\n 'keep_default_dates': True,\n 'precise_float': False,\n 'date_unit': None,\n 'encoding': None,\n 'lines': False,\n 'chunksize': None,\n 'compression': 'infer',\n 'nrows': None,\n 'storage_options': None\n }\n self.reader_kwargs.update(self.passed_kwargs)\n\n @staticmethod\n def _extract_data(filename: Union[Path, str], record_path: Union[List[str], str],\n serialize: bool = True) -> Union[dict, list, str]:\n\n with open(filename, 'r') as f:\n data: dict = json.load(f)\n if type(record_path) is str:\n if serialize:\n return json.dumps(data[record_path])\n else:\n return data[record_path]\n elif type(record_path) is list:\n for item in record_path:\n data = data[item]\n if serialize:\n return json.dumps(data)\n else:\n return data\n else:\n raise TypeError('record_path must be a list or a string')\n\n def _build_data_frame(self, source_files: List[Path]) -> pd.DataFrame:\n\n # we're assuming any single json file can fit into memory here because we need to be able to\n # access its internals to extract data from it\n\n for source_file in source_files:\n\n try:\n if not self.record_path:\n df = pd.read_json(source_file, **self.reader_kwargs)\n df._source_file = source_file\n else:\n data = self._extract_data(source_file, self.record_path)\n df = pd.read_json(data, **self.reader_kwargs)\n df._source_file = source_file\n\n yield df.transpose() if self.transpose else df\n except Exception as ex:\n if self.skip_errors:\n print(f'skipping {source_file} due to error: {ex}')\n yield pd.DataFrame()\n else:\n raise ex\n\n def transform(self, source_files: List[Path]) -> pd.DataFrame:\n\n # TODO: move the renaming logic to the base and allow a mapper to be passed\n\n for df in self._build_data_frame(source_files):\n if not df.empty:\n if self.rename:\n df = df.rename(self.rename, axis='columns')\n if self.columns:\n df = df[self.columns]\n\n yield df\n" ]
[ [ "pandas.concat", "pandas.read_csv", "pandas.read_json", "pandas.DataFrame" ] ]
deraevejames/data-generation_FS
[ "0f245f5456cec09119a4bd034d81a731df029b51" ]
[ "MVPA_sim_data_generation.py" ]
[ "import numpy as np\r\n\r\n# this code generates data that simulates two tasks which elicit a \r\n# shared activation pattern for certain voxels(default is features 10-20)\r\n# this is presented here as each task having two \"rules\" with one rule in common for both tasks\r\n\r\n#### arguments:\r\n# signal = SNR, defined as ratio of mean/std\r\n\r\n# trials = number of trials per task set\r\n\r\n# inf_features = number of informative features per rule\r\n\r\n# non_inf_features = number of non-informative features\r\n####\r\n\r\ndef sim_data(signal, trials=100, inf_features=10, non_inf_features=270):\r\n\r\n # task set 1: rule A and rule B have mean equal to signal\r\n ts1_rule_A = np.random.normal(signal,1,(trials,inf_features))\r\n ts1_rule_B = np.random.normal(signal,1,(trials,inf_features))\r\n ts1_rule_C = np.random.normal(0,1,(trials,inf_features))\r\n ts1_rule_0 = np.random.normal(0,1,(trials,non_inf_features))\r\n\r\n ts1 = np.hstack((ts1_rule_A,ts1_rule_B,ts1_rule_C,ts1_rule_0))\r\n\r\n # set 2: rule B and rule C have mean equal to signal\r\n ts2_rule_A = np.random.normal(0,1,(trials,inf_features))\r\n ts2_rule_B = np.random.normal(signal,1,(trials,inf_features))\r\n ts2_rule_C = np.random.normal(signal,1,(trials,inf_features))\r\n ts2_rule_0 = np.random.normal(0,1,(trials,non_inf_features))\r\n\r\n ts2 = np.hstack((ts2_rule_A,ts2_rule_B,ts2_rule_C,ts2_rule_0))\r\n\r\n samples = np.vstack((ts1,ts2))\r\n labels = np.repeat(np.array([0,1]),trials)\r\n \r\n return samples, labels\r\n" ]
[ [ "numpy.hstack", "numpy.random.normal", "numpy.array", "numpy.vstack" ] ]
dachoc/nnUNet
[ "fc6bfb78fc728509a5e79e78ece34fc9c421785c" ]
[ "nnunet/training/dataloading/dataset_loading.py" ]
[ "# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom collections import OrderedDict\nfrom batchgenerators.augmentations.utils import random_crop_2D_image_batched, pad_nd_image\nimport numpy as np\nfrom batchgenerators.dataloading import SlimDataLoaderBase\nfrom multiprocessing import Pool\n\nfrom nnunet.configuration import default_num_threads\nfrom nnunet.paths import preprocessing_output_dir\nfrom batchgenerators.utilities.file_and_folder_operations import *\n\n\ndef get_case_identifiers(folder):\n case_identifiers = [i[:-4] for i in os.listdir(folder) if i.endswith(\"npz\") and (i.find(\"segFromPrevStage\") == -1)]\n return case_identifiers\n\n\ndef get_case_identifiers_from_raw_folder(folder):\n case_identifiers = np.unique(\n [i[:-12] for i in os.listdir(folder) if i.endswith(\".nii.gz\") and (i.find(\"segFromPrevStage\") == -1)])\n return case_identifiers\n\n\ndef convert_to_npy(args):\n if not isinstance(args, tuple):\n key = \"data\"\n npz_file = args\n else:\n npz_file, key = args\n if not isfile(npz_file[:-3] + \"npy\"):\n a = np.load(npz_file)[key]\n np.save(npz_file[:-3] + \"npy\", a)\n\n\ndef save_as_npz(args):\n if not isinstance(args, tuple):\n key = \"data\"\n npy_file = args\n else:\n npy_file, key = args\n d = np.load(npy_file)\n np.savez_compressed(npy_file[:-3] + \"npz\", **{key: d})\n\n\ndef unpack_dataset(folder, threads=default_num_threads, key=\"data\"):\n \"\"\"\n unpacks all npz files in a folder to npy (whatever you want to have unpacked must be saved unter key)\n :param folder:\n :param threads:\n :param key:\n :return:\n \"\"\"\n p = Pool(threads)\n npz_files = subfiles(folder, True, None, \".npz\", True)\n p.map(convert_to_npy, zip(npz_files, [key] * len(npz_files)))\n p.close()\n p.join()\n\n\ndef pack_dataset(folder, threads=default_num_threads, key=\"data\"):\n p = Pool(threads)\n npy_files = subfiles(folder, True, None, \".npy\", True)\n p.map(save_as_npz, zip(npy_files, [key] * len(npy_files)))\n p.close()\n p.join()\n\n\ndef delete_npy(folder):\n case_identifiers = get_case_identifiers(folder)\n npy_files = [join(folder, i + \".npy\") for i in case_identifiers]\n npy_files = [i for i in npy_files if isfile(i)]\n for n in npy_files:\n os.remove(n)\n\n\ndef load_dataset(folder, num_cases_properties_loading_threshold=1000):\n # we don't load the actual data but instead return the filename to the np file.\n print('loading dataset')\n case_identifiers = get_case_identifiers(folder)\n case_identifiers.sort()\n dataset = OrderedDict()\n for c in case_identifiers:\n dataset[c] = OrderedDict()\n dataset[c]['data_file'] = join(folder, \"%s.npz\" % c)\n\n # dataset[c]['properties'] = load_pickle(join(folder, \"%s.pkl\" % c))\n dataset[c]['properties_file'] = join(folder, \"%s.pkl\" % c)\n\n if dataset[c].get('seg_from_prev_stage_file') is not None:\n dataset[c]['seg_from_prev_stage_file'] = join(folder, \"%s_segs.npz\" % c)\n\n if len(case_identifiers) <= num_cases_properties_loading_threshold:\n print('loading all case properties')\n for i in dataset.keys():\n dataset[i]['properties'] = load_pickle(dataset[i]['properties_file'])\n\n return dataset\n\n\ndef crop_2D_image_force_fg(img, crop_size, valid_voxels):\n \"\"\"\n img must be [c, x, y]\n img[-1] must be the segmentation with segmentation>0 being foreground\n :param img:\n :param crop_size:\n :param valid_voxels: voxels belonging to the selected class\n :return:\n \"\"\"\n assert len(valid_voxels.shape) == 2\n\n if type(crop_size) not in (tuple, list):\n crop_size = [crop_size] * (len(img.shape) - 1)\n else:\n assert len(crop_size) == (len(\n img.shape) - 1), \"If you provide a list/tuple as center crop make sure it has the same len as your data has dims (3d)\"\n\n # we need to find the center coords that we can crop to without exceeding the image border\n lb_x = crop_size[0] // 2\n ub_x = img.shape[1] - crop_size[0] // 2 - crop_size[0] % 2\n lb_y = crop_size[1] // 2\n ub_y = img.shape[2] - crop_size[1] // 2 - crop_size[1] % 2\n\n if len(valid_voxels) == 0:\n selected_center_voxel = (np.random.random_integers(lb_x, ub_x),\n np.random.random_integers(lb_y, ub_y))\n else:\n selected_center_voxel = valid_voxels[np.random.choice(valid_voxels.shape[1]), :]\n\n selected_center_voxel = np.array(selected_center_voxel)\n for i in range(2):\n selected_center_voxel[i] = max(crop_size[i] // 2, selected_center_voxel[i])\n selected_center_voxel[i] = min(img.shape[i + 1] - crop_size[i] // 2 - crop_size[i] % 2,\n selected_center_voxel[i])\n\n result = img[:, (selected_center_voxel[0] - crop_size[0] // 2):(\n selected_center_voxel[0] + crop_size[0] // 2 + crop_size[0] % 2),\n (selected_center_voxel[1] - crop_size[1] // 2):(\n selected_center_voxel[1] + crop_size[1] // 2 + crop_size[1] % 2)]\n return result\n\n\nclass DataLoader3D(SlimDataLoaderBase):\n def __init__(self, data, patch_size, final_patch_size, batch_size, has_prev_stage=False,\n oversample_foreground_percent=0.0, memmap_mode=\"r\", pad_mode=\"edge\", pad_kwargs_data=None,\n pad_sides=None):\n \"\"\"\n This is the basic data loader for 3D networks. It uses preprocessed data as produced by my (Fabian) preprocessing.\n You can load the data with load_dataset(folder) where folder is the folder where the npz files are located. If there\n are only npz files present in that folder, the data loader will unpack them on the fly. This may take a while\n and increase CPU usage. Therefore, I advise you to call unpack_dataset(folder) first, which will unpack all npz\n to npy. Don't forget to call delete_npy(folder) after you are done with training?\n Why all the hassle? Well the decathlon dataset is huge. Using npy for everything will consume >1 TB and that is uncool\n given that I (Fabian) will have to store that permanently on /datasets and my local computer. With this strategy all\n data is stored in a compressed format (factor 10 smaller) and only unpacked when needed.\n :param data: get this with load_dataset(folder, stage=0). Plug the return value in here and you are g2g (good to go)\n :param patch_size: what patch size will this data loader return? it is common practice to first load larger\n patches so that a central crop after data augmentation can be done to reduce border artifacts. If unsure, use\n get_patch_size() from data_augmentation.default_data_augmentation\n :param final_patch_size: what will the patch finally be cropped to (after data augmentation)? this is the patch\n size that goes into your network. We need this here because we will pad patients in here so that patches at the\n border of patients are sampled properly\n :param batch_size:\n :param num_batches: how many batches will the data loader produce before stopping? None=endless\n :param seed:\n :param stage: ignore this (Fabian only)\n :param random: Sample keys randomly; CAREFUL! non-random sampling requires batch_size=1, otherwise you will iterate batch_size times over the dataset\n :param oversample_foreground: half the batch will be forced to contain at least some foreground (equal prob for each of the foreground classes)\n \"\"\"\n super(DataLoader3D, self).__init__(data, batch_size, None)\n if pad_kwargs_data is None:\n pad_kwargs_data = OrderedDict()\n self.pad_kwargs_data = pad_kwargs_data\n self.pad_mode = pad_mode\n self.oversample_foreground_percent = oversample_foreground_percent\n self.final_patch_size = final_patch_size\n self.has_prev_stage = has_prev_stage\n self.patch_size = patch_size\n self.list_of_keys = list(self._data.keys())\n # need_to_pad denotes by how much we need to pad the data so that if we sample a patch of size final_patch_size\n # (which is what the network will get) these patches will also cover the border of the patients\n self.need_to_pad = (np.array(patch_size) - np.array(final_patch_size)).astype(int)\n if pad_sides is not None:\n if not isinstance(pad_sides, np.ndarray):\n pad_sides = np.array(pad_sides)\n self.need_to_pad += pad_sides\n self.memmap_mode = memmap_mode\n self.num_channels = None\n self.pad_sides = pad_sides\n self.data_shape, self.seg_shape = self.determine_shapes()\n\n def get_do_oversample(self, batch_idx):\n return not batch_idx < round(self.batch_size * (1 - self.oversample_foreground_percent))\n\n def determine_shapes(self):\n if self.has_prev_stage:\n num_seg = 2\n else:\n num_seg = 1\n\n k = list(self._data.keys())[0]\n if isfile(self._data[k]['data_file'][:-4] + \".npy\"):\n case_all_data = np.load(self._data[k]['data_file'][:-4] + \".npy\", self.memmap_mode)\n else:\n case_all_data = np.load(self._data[k]['data_file'])['data']\n num_color_channels = case_all_data.shape[0] - 1\n data_shape = (self.batch_size, num_color_channels, *self.patch_size)\n seg_shape = (self.batch_size, num_seg, *self.patch_size)\n return data_shape, seg_shape\n\n def generate_train_batch(self):\n selected_keys = np.random.choice(self.list_of_keys, self.batch_size, True, None)\n data = np.zeros(self.data_shape, dtype=np.float32)\n seg = np.zeros(self.seg_shape, dtype=np.float32)\n case_properties = []\n for j, i in enumerate(selected_keys):\n # oversampling foreground will improve stability of model training, especially if many patches are empty\n # (Lung for example)\n if self.get_do_oversample(j):\n force_fg = True\n else:\n force_fg = False\n\n if 'properties' in self._data[i].keys():\n properties = self._data[i]['properties']\n else:\n properties = load_pickle(self._data[i]['properties_file'])\n case_properties.append(properties)\n\n # cases are stored as npz, but we require unpack_dataset to be run. This will decompress them into npy\n # which is much faster to access\n if isfile(self._data[i]['data_file'][:-4] + \".npy\"):\n case_all_data = np.load(self._data[i]['data_file'][:-4] + \".npy\", self.memmap_mode)\n else:\n case_all_data = np.load(self._data[i]['data_file'])['data']\n\n # If we are doing the cascade then we will also need to load the segmentation of the previous stage and\n # concatenate it. Here it will be concatenates to the segmentation because the augmentations need to be\n # applied to it in segmentation mode. Later in the data augmentation we move it from the segmentations to\n # the last channel of the data\n if self.has_prev_stage:\n if isfile(self._data[i]['seg_from_prev_stage_file'][:-4] + \".npy\"):\n segs_from_previous_stage = np.load(self._data[i]['seg_from_prev_stage_file'][:-4] + \".npy\",\n mmap_mode=self.memmap_mode)[None]\n else:\n segs_from_previous_stage = np.load(self._data[i]['seg_from_prev_stage_file'])['data'][None]\n # we theoretically support several possible previsous segmentations from which only one is sampled. But\n # in practice this feature was never used so it's always only one segmentation\n seg_key = np.random.choice(segs_from_previous_stage.shape[0])\n seg_from_previous_stage = segs_from_previous_stage[seg_key:seg_key + 1]\n assert all([i == j for i, j in zip(seg_from_previous_stage.shape[1:], case_all_data.shape[1:])]), \\\n \"seg_from_previous_stage does not match the shape of case_all_data: %s vs %s\" % \\\n (str(seg_from_previous_stage.shape[1:]), str(case_all_data.shape[1:]))\n else:\n seg_from_previous_stage = None\n\n # do you trust me? You better do. Otherwise you'll have to go through this mess and honestly there are\n # better things you could do right now\n\n # (above) documentation of the day. Nice. Even myself coming back 1 months later I have not friggin idea\n # what's going on. I keep the above documentation just for fun but attempt to make things clearer now\n\n need_to_pad = self.need_to_pad\n for d in range(3):\n # if case_all_data.shape + need_to_pad is still < patch size we need to pad more! We pad on both sides\n # always\n if need_to_pad[d] + case_all_data.shape[d + 1] < self.patch_size[d]:\n need_to_pad[d] = self.patch_size[d] - case_all_data.shape[d + 1]\n\n # we can now choose the bbox from -need_to_pad // 2 to shape - patch_size + need_to_pad // 2. Here we\n # define what the upper and lower bound can be to then sample form them with np.random.randint\n shape = case_all_data.shape[1:]\n lb_x = - need_to_pad[0] // 2\n ub_x = shape[0] + need_to_pad[0] // 2 + need_to_pad[0] % 2 - self.patch_size[0]\n lb_y = - need_to_pad[1] // 2\n ub_y = shape[1] + need_to_pad[1] // 2 + need_to_pad[1] % 2 - self.patch_size[1]\n lb_z = - need_to_pad[2] // 2\n ub_z = shape[2] + need_to_pad[2] // 2 + need_to_pad[2] % 2 - self.patch_size[2]\n\n # if not force_fg then we can just sample the bbox randomly from lb and ub. Else we need to make sure we get\n # at least one of the foreground classes in the patch\n if not force_fg:\n bbox_x_lb = np.random.randint(lb_x, ub_x + 1)\n bbox_y_lb = np.random.randint(lb_y, ub_y + 1)\n bbox_z_lb = np.random.randint(lb_z, ub_z + 1)\n else:\n # these values should have been precomputed\n if 'class_locations' not in properties.keys():\n raise RuntimeError(\"Please rerun the preprocessing with the newest version of nnU-Net!\")\n\n # this saves us a np.unique. Preprocessing already did that for all cases. Neat.\n foreground_classes = np.array(\n [i for i in properties['class_locations'].keys() if len(properties['class_locations'][i]) != 0])\n print(foreground_classes)\n foreground_classes = foreground_classes[foreground_classes > 0]\n print(foreground_classes)\n\n\n if len(foreground_classes) == 0:\n # this only happens if some image does not contain foreground voxels at all\n selected_class = None\n voxels_of_that_class = None\n print('case does not contain any foreground classes', i)\n else:\n selected_class = np.random.choice(foreground_classes)\n\n voxels_of_that_class = properties['class_locations'][selected_class]\n\n if voxels_of_that_class is not None:\n selected_voxel = voxels_of_that_class[np.random.choice(len(voxels_of_that_class))]\n # selected voxel is center voxel. Subtract half the patch size to get lower bbox voxel.\n # Make sure it is within the bounds of lb and ub\n bbox_x_lb = max(lb_x, selected_voxel[0] - self.patch_size[0] // 2)\n bbox_y_lb = max(lb_y, selected_voxel[1] - self.patch_size[1] // 2)\n bbox_z_lb = max(lb_z, selected_voxel[2] - self.patch_size[2] // 2)\n else:\n # If the image does not contain any foreground classes, we fall back to random cropping\n bbox_x_lb = np.random.randint(lb_x, ub_x + 1)\n bbox_y_lb = np.random.randint(lb_y, ub_y + 1)\n bbox_z_lb = np.random.randint(lb_z, ub_z + 1)\n\n bbox_x_ub = bbox_x_lb + self.patch_size[0]\n bbox_y_ub = bbox_y_lb + self.patch_size[1]\n bbox_z_ub = bbox_z_lb + self.patch_size[2]\n\n # whoever wrote this knew what he was doing (hint: it was me). We first crop the data to the region of the\n # bbox that actually lies within the data. This will result in a smaller array which is then faster to pad.\n # valid_bbox is just the coord that lied within the data cube. It will be padded to match the patch size\n # later\n valid_bbox_x_lb = max(0, bbox_x_lb)\n valid_bbox_x_ub = min(shape[0], bbox_x_ub)\n valid_bbox_y_lb = max(0, bbox_y_lb)\n valid_bbox_y_ub = min(shape[1], bbox_y_ub)\n valid_bbox_z_lb = max(0, bbox_z_lb)\n valid_bbox_z_ub = min(shape[2], bbox_z_ub)\n\n # At this point you might ask yourself why we would treat seg differently from seg_from_previous_stage.\n # Why not just concatenate them here and forget about the if statements? Well that's because segneeds to\n # be padded with -1 constant whereas seg_from_previous_stage needs to be padded with 0s (we could also\n # remove label -1 in the data augmentation but this way it is less error prone)\n case_all_data = np.copy(case_all_data[:, valid_bbox_x_lb:valid_bbox_x_ub,\n valid_bbox_y_lb:valid_bbox_y_ub,\n valid_bbox_z_lb:valid_bbox_z_ub])\n if seg_from_previous_stage is not None:\n seg_from_previous_stage = seg_from_previous_stage[:, valid_bbox_x_lb:valid_bbox_x_ub,\n valid_bbox_y_lb:valid_bbox_y_ub,\n valid_bbox_z_lb:valid_bbox_z_ub]\n\n data[j] = np.pad(case_all_data[:-1], ((0, 0),\n (-min(0, bbox_x_lb), max(bbox_x_ub - shape[0], 0)),\n (-min(0, bbox_y_lb), max(bbox_y_ub - shape[1], 0)),\n (-min(0, bbox_z_lb), max(bbox_z_ub - shape[2], 0))),\n self.pad_mode, **self.pad_kwargs_data)\n\n seg[j, 0] = np.pad(case_all_data[-1:], ((0, 0),\n (-min(0, bbox_x_lb), max(bbox_x_ub - shape[0], 0)),\n (-min(0, bbox_y_lb), max(bbox_y_ub - shape[1], 0)),\n (-min(0, bbox_z_lb), max(bbox_z_ub - shape[2], 0))),\n 'constant', **{'constant_values': -1})\n if seg_from_previous_stage is not None:\n seg[j, 1] = np.pad(seg_from_previous_stage, ((0, 0),\n (-min(0, bbox_x_lb),\n max(bbox_x_ub - shape[0], 0)),\n (-min(0, bbox_y_lb),\n max(bbox_y_ub - shape[1], 0)),\n (-min(0, bbox_z_lb),\n max(bbox_z_ub - shape[2], 0))),\n 'constant', **{'constant_values': 0})\n\n return {'data': data, 'seg': seg, 'properties': case_properties, 'keys': selected_keys}\n\n\nclass DataLoader2D(SlimDataLoaderBase):\n def __init__(self, data, patch_size, final_patch_size, batch_size, oversample_foreground_percent=0.0,\n memmap_mode=\"r\", pseudo_3d_slices=1, pad_mode=\"edge\",\n pad_kwargs_data=None, pad_sides=None):\n \"\"\"\n This is the basic data loader for 2D networks. It uses preprocessed data as produced by my (Fabian) preprocessing.\n You can load the data with load_dataset(folder) where folder is the folder where the npz files are located. If there\n are only npz files present in that folder, the data loader will unpack them on the fly. This may take a while\n and increase CPU usage. Therefore, I advise you to call unpack_dataset(folder) first, which will unpack all npz\n to npy. Don't forget to call delete_npy(folder) after you are done with training?\n Why all the hassle? Well the decathlon dataset is huge. Using npy for everything will consume >1 TB and that is uncool\n given that I (Fabian) will have to store that permanently on /datasets and my local computer. With htis strategy all\n data is stored in a compressed format (factor 10 smaller) and only unpacked when needed.\n :param data: get this with load_dataset(folder, stage=0). Plug the return value in here and you are g2g (good to go)\n :param patch_size: what patch size will this data loader return? it is common practice to first load larger\n patches so that a central crop after data augmentation can be done to reduce border artifacts. If unsure, use\n get_patch_size() from data_augmentation.default_data_augmentation\n :param final_patch_size: what will the patch finally be cropped to (after data augmentation)? this is the patch\n size that goes into your network. We need this here because we will pad patients in here so that patches at the\n border of patients are sampled properly\n :param batch_size:\n :param num_batches: how many batches will the data loader produce before stopping? None=endless\n :param seed:\n :param stage: ignore this (Fabian only)\n :param transpose: ignore this\n :param random: sample randomly; CAREFUL! non-random sampling requires batch_size=1, otherwise you will iterate batch_size times over the dataset\n :param pseudo_3d_slices: 7 = 3 below and 3 above the center slice\n \"\"\"\n super(DataLoader2D, self).__init__(data, batch_size, None)\n if pad_kwargs_data is None:\n pad_kwargs_data = OrderedDict()\n self.pad_kwargs_data = pad_kwargs_data\n self.pad_mode = pad_mode\n self.pseudo_3d_slices = pseudo_3d_slices\n self.oversample_foreground_percent = oversample_foreground_percent\n self.final_patch_size = final_patch_size\n self.patch_size = patch_size\n self.list_of_keys = list(self._data.keys())\n self.need_to_pad = np.array(patch_size) - np.array(final_patch_size)\n self.memmap_mode = memmap_mode\n if pad_sides is not None:\n if not isinstance(pad_sides, np.ndarray):\n pad_sides = np.array(pad_sides)\n self.need_to_pad += pad_sides\n self.pad_sides = pad_sides\n self.data_shape, self.seg_shape = self.determine_shapes()\n\n def determine_shapes(self):\n num_seg = 1\n\n k = list(self._data.keys())[0]\n if isfile(self._data[k]['data_file'][:-4] + \".npy\"):\n case_all_data = np.load(self._data[k]['data_file'][:-4] + \".npy\", self.memmap_mode)\n else:\n case_all_data = np.load(self._data[k]['data_file'])['data']\n num_color_channels = case_all_data.shape[0] - num_seg\n data_shape = (self.batch_size, num_color_channels, *self.patch_size)\n seg_shape = (self.batch_size, num_seg, *self.patch_size)\n return data_shape, seg_shape\n\n def get_do_oversample(self, batch_idx):\n return not batch_idx < round(self.batch_size * (1 - self.oversample_foreground_percent))\n\n def generate_train_batch(self):\n selected_keys = np.random.choice(self.list_of_keys, self.batch_size, True, None)\n print(\"selected_keys_\"+selected_keys)\n\n data = np.zeros(self.data_shape, dtype=np.float32)\n seg = np.zeros(self.seg_shape, dtype=np.float32)\n\n case_properties = []\n for j, i in enumerate(selected_keys):\n print(\"j_\"+j)\n print(\"i_\"+i)\n if 'properties' in self._data[i].keys():\n properties = self._data[i]['properties']\n print(\"in self.data[i].keys()\")\n print(\"self._data[i]_\"+self._data[i])\n print(\"self._data[i]['properties']_\"+self._data[i]['properties'])\n else:\n properties = load_pickle(self._data[i]['properties_file'])\n print(\"NOT in self.data[i].keys()\")\n print(\"self._data[i]_\"+self._data[i])\n print(\"self._data[i]['properties_file']_\"+self._data[i]['properties_file'])\n\n case_properties.append()\n if self.get_do_oversample(j):\n force_fg = True\n else:\n force_fg = False\n\n print(\"force_fg_\"+force_fg)\n\n if not isfile(self._data[i]['data_file'][:-4] + \".npy\"):\n # lets hope you know what you're doing\n case_all_data = np.load(self._data[i]['data_file'][:-4] + \".npz\")['data']\n print(\"in IF NOT isfile\")\n print(\"case_all_data\"+case_all_data)\n else:\n case_all_data = np.load(self._data[i]['data_file'][:-4] + \".npy\", self.memmap_mode)\n print(\"case_all_data\" + case_all_data)\n\n # this is for when there is just a 2d slice in case_all_data (2d support)\n if len(case_all_data.shape) == 3:\n case_all_data = case_all_data[:, None]\n print(\"in 2D slice\")\n\n # first select a slice. This can be either random (no force fg) or guaranteed to contain some class\n if not force_fg:\n random_slice = np.random.choice(case_all_data.shape[1])\n selected_class = None\n else:\n # these values should have been precomputed\n if 'class_locations' not in properties.keys():\n raise RuntimeError(\"Please rerun the preprocessing with the newest version of nnU-Net!\")\n\n foreground_classes = np.array(\n [i for i in properties['class_locations'].keys() if len(properties['class_locations'][i]) != 0])\n print(foreground_classes)\n foreground_classes = foreground_classes[foreground_classes > 0]\n print(foreground_classes)\n print(\"properties['class_locations'][i]\")\n print(properties['class_locations'][i])\n print(\" properties['class_locations'].keys()\")\n print(properties['class_locations'].keys())\n print(\"i\")\n print(i)\n\n if len(foreground_classes) == 0:\n selected_class = None\n random_slice = np.random.choice(case_all_data.shape[1])\n print('case does not contain any foreground classes', i)\n else:\n selected_class = np.random.choice(foreground_classes)\n\n voxels_of_that_class = properties['class_locations'][selected_class]\n valid_slices = np.unique(voxels_of_that_class[:, 0])\n random_slice = np.random.choice(valid_slices)\n voxels_of_that_class = voxels_of_that_class[voxels_of_that_class[:, 0] == random_slice]\n voxels_of_that_class = voxels_of_that_class[:, 1:]\n\n # now crop case_all_data to contain just the slice of interest. If we want additional slice above and\n # below the current slice, here is where we get them. We stack those as additional color channels\n if self.pseudo_3d_slices == 1:\n case_all_data = case_all_data[:, random_slice]\n else:\n # this is very deprecated and will probably not work anymore. If you intend to use this you need to\n # check this!\n mn = random_slice - (self.pseudo_3d_slices - 1) // 2\n mx = random_slice + (self.pseudo_3d_slices - 1) // 2 + 1\n valid_mn = max(mn, 0)\n valid_mx = min(mx, case_all_data.shape[1])\n case_all_seg = case_all_data[-1:]\n case_all_data = case_all_data[:-1]\n case_all_data = case_all_data[:, valid_mn:valid_mx]\n case_all_seg = case_all_seg[:, random_slice]\n need_to_pad_below = valid_mn - mn\n need_to_pad_above = mx - valid_mx\n if need_to_pad_below > 0:\n shp_for_pad = np.array(case_all_data.shape)\n shp_for_pad[1] = need_to_pad_below\n case_all_data = np.concatenate((np.zeros(shp_for_pad), case_all_data), 1)\n if need_to_pad_above > 0:\n shp_for_pad = np.array(case_all_data.shape)\n shp_for_pad[1] = need_to_pad_above\n case_all_data = np.concatenate((case_all_data, np.zeros(shp_for_pad)), 1)\n case_all_data = case_all_data.reshape((-1, case_all_data.shape[-2], case_all_data.shape[-1]))\n case_all_data = np.concatenate((case_all_data, case_all_seg), 0)\n\n # case all data should now be (c, x, y)\n assert len(case_all_data.shape) == 3\n\n # we can now choose the bbox from -need_to_pad // 2 to shape - patch_size + need_to_pad // 2. Here we\n # define what the upper and lower bound can be to then sample form them with np.random.randint\n\n need_to_pad = self.need_to_pad\n for d in range(2):\n # if case_all_data.shape + need_to_pad is still < patch size we need to pad more! We pad on both sides\n # always\n if need_to_pad[d] + case_all_data.shape[d + 1] < self.patch_size[d]:\n need_to_pad[d] = self.patch_size[d] - case_all_data.shape[d + 1]\n\n shape = case_all_data.shape[1:]\n lb_x = - need_to_pad[0] // 2\n ub_x = shape[0] + need_to_pad[0] // 2 + need_to_pad[0] % 2 - self.patch_size[0]\n lb_y = - need_to_pad[1] // 2\n ub_y = shape[1] + need_to_pad[1] // 2 + need_to_pad[1] % 2 - self.patch_size[1]\n\n # if not force_fg then we can just sample the bbox randomly from lb and ub. Else we need to make sure we get\n # at least one of the foreground classes in the patch\n if not force_fg or selected_class is None:\n bbox_x_lb = np.random.randint(lb_x, ub_x + 1)\n bbox_y_lb = np.random.randint(lb_y, ub_y + 1)\n else:\n # this saves us a np.unique. Preprocessing already did that for all cases. Neat.\n selected_voxel = voxels_of_that_class[np.random.choice(len(voxels_of_that_class))]\n # selected voxel is center voxel. Subtract half the patch size to get lower bbox voxel.\n # Make sure it is within the bounds of lb and ub\n bbox_x_lb = max(lb_x, selected_voxel[0] - self.patch_size[0] // 2)\n bbox_y_lb = max(lb_y, selected_voxel[1] - self.patch_size[1] // 2)\n\n bbox_x_ub = bbox_x_lb + self.patch_size[0]\n bbox_y_ub = bbox_y_lb + self.patch_size[1]\n\n # whoever wrote this knew what he was doing (hint: it was me). We first crop the data to the region of the\n # bbox that actually lies within the data. This will result in a smaller array which is then faster to pad.\n # valid_bbox is just the coord that lied within the data cube. It will be padded to match the patch size\n # later\n valid_bbox_x_lb = max(0, bbox_x_lb)\n valid_bbox_x_ub = min(shape[0], bbox_x_ub)\n valid_bbox_y_lb = max(0, bbox_y_lb)\n valid_bbox_y_ub = min(shape[1], bbox_y_ub)\n\n # At this point you might ask yourself why we would treat seg differently from seg_from_previous_stage.\n # Why not just concatenate them here and forget about the if statements? Well that's because segneeds to\n # be padded with -1 constant whereas seg_from_previous_stage needs to be padded with 0s (we could also\n # remove label -1 in the data augmentation but this way it is less error prone)\n\n case_all_data = case_all_data[:, valid_bbox_x_lb:valid_bbox_x_ub,\n valid_bbox_y_lb:valid_bbox_y_ub]\n\n case_all_data_donly = np.pad(case_all_data[:-1], ((0, 0),\n (-min(0, bbox_x_lb), max(bbox_x_ub - shape[0], 0)),\n (-min(0, bbox_y_lb), max(bbox_y_ub - shape[1], 0))),\n self.pad_mode, **self.pad_kwargs_data)\n\n case_all_data_segonly = np.pad(case_all_data[-1:], ((0, 0),\n (-min(0, bbox_x_lb), max(bbox_x_ub - shape[0], 0)),\n (-min(0, bbox_y_lb), max(bbox_y_ub - shape[1], 0))),\n 'constant', **{'constant_values': -1})\n\n data[j] = case_all_data_donly\n seg[j] = case_all_data_segonly\n\n keys = selected_keys\n return {'data': data, 'seg': seg, 'properties': case_properties, \"keys\": keys}\n\n\nif __name__ == \"__main__\":\n t = \"Task002_Heart\"\n p = join(preprocessing_output_dir, t, \"stage1\")\n dataset = load_dataset(p)\n with open(join(join(preprocessing_output_dir, t), \"plans_stage1.pkl\"), 'rb') as f:\n plans = pickle.load(f)\n unpack_dataset(p)\n dl = DataLoader3D(dataset, (32, 32, 32), (32, 32, 32), 2, oversample_foreground_percent=0.33)\n dl = DataLoader3D(dataset, np.array(plans['patch_size']).astype(int), np.array(plans['patch_size']).astype(int), 2,\n oversample_foreground_percent=0.33)\n dl2d = DataLoader2D(dataset, (64, 64), np.array(plans['patch_size']).astype(int)[1:], 12,\n oversample_foreground_percent=0.33)\n" ]
[ [ "numpy.random.choice", "numpy.unique", "numpy.save", "numpy.concatenate", "numpy.savez_compressed", "numpy.copy", "numpy.random.random_integers", "numpy.load", "numpy.array", "numpy.zeros", "numpy.random.randint" ] ]
HarshitR612/-Robust-Real-Time-LPR
[ "17a0296fdc1c41e52b3a69cde7e55daf7156cb10" ]
[ "src/yolo/segmentation-recognition/benchmark.py" ]
[ "import cv2\nimport numpy as np\nimport os\nfrom PIL import Image\nfrom skimage.morphology import opening\nfrom skimage.morphology import disk\n\nconf_threshold = 0.5\nnms_threshold = 0.4\n\ndef getOutputLayers(net):\n\tlayerNames = net.getLayerNames()\n\toutputLayers = [layerNames[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\treturn outputLayers\n\ndef drawBoundingBox(image, class_id, confidence, xmin, ymin, xmax, ymax):\n\tlabel = str(classes[class_id])\n\tcv2.rectangle(image, (xmin, ymin), (xmax, ymax), (0,0,255), 2)\n\tcv2.putText(image, label, (xmin-3, ymin-3), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (51,25,0), 2)\n\t\n# read class names from text file\nclasses = None\nwith open('crnet.names', 'r') as f:\n\tclasses = [line.strip() for line in f.readlines()]\n\n# read pre-trained model and config file\nnet = cv2.dnn.readNet('crnet.weights', 'crnet.cfg')\nnet2 = cv2.dnn.readNet('crnet_old.weights', 'crnet_old.cfg')\n\nfiles = os.listdir(\"benchmark/reld/\")\n\nfor file in files:\n\tclass_ids = []\n\tconfidences = []\n\tboxes = []\n\t# create input blob and set blob for the network\n\timage = cv2.imread(\"benchmark/reld/\" + file)\n\timage2 = cv2.resize(image, (608,250))\n\n\twidth = image2.shape[1]\n\theight = image2.shape[0]\n\tblob = cv2.dnn.blobFromImage(image, 1/255, (352,128), (0,0,0), True, crop=False)\n\tnet.setInput(blob)\n\tnet2.setInput(blob)\n\n\n\touts = net.forward(getOutputLayers(net))\n\tfor out in outs:\n\t\tfor detection in out:\n\t\t\tscores = detection[5:]\n\t\t\tclass_id = np.argmax(scores)\n\t\t\tconfidence = scores[class_id]\n\t\t\tif confidence > 0.5:\n\t\t\t\tcenter_x = int(detection[0] * width)\n\t\t\t\tcenter_y = int(detection[1] * height)\n\t\t\t\tw = int(detection[2] * width)\n\t\t\t\th = int(detection[3] * height)\n\t\t\t\tx = center_x - w / 2\n\t\t\t\ty = center_y - h / 2\n\t\t\t\tclass_ids.append(class_id)\n\t\t\t\tconfidences.append(float(confidence))\n\t\t\t\tboxes.append([x, y, w, h])\n\touts2 = net2.forward(getOutputLayers(net2))\n\tfor out in outs2:\n\t\tfor detection in out:\n\t\t\tscores = detection[5:]\n\t\t\tclass_id = np.argmax(scores)\n\t\t\tconfidence = scores[class_id]\n\t\t\tif confidence > 0.5:\n\t\t\t\tcenter_x = int(detection[0] * width)\n\t\t\t\tcenter_y = int(detection[1] * height)\n\t\t\t\tw = int(detection[2] * width)\n\t\t\t\th = int(detection[3] * height)\n\t\t\t\tx = center_x - w / 2\n\t\t\t\ty = center_y - h / 2\n\t\t\t\tclass_ids.append(class_id)\n\t\t\t\tconfidences.append(float(confidence))\n\t\t\t\tboxes.append([x, y, w, h])\n\n\tindices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold, nms_threshold)\n\tfor i in indices:\n\t\ti = i[0]\n\t\tbox = boxes[i]\n\t\tx = box[0]\n\t\ty = box[1]\n\t\tw = box[2]\n\t\th = box[3]\n\t\tdrawBoundingBox(image2, class_ids[i], confidences[i], int(x), int(y), int(x+w), int(y+h))\n\tcv2.imwrite(\"recognition results/reld/\" + file, image2)\n" ]
[ [ "numpy.argmax" ] ]
KernelA/ldgcnn
[ "e58017a5436738c159376c44ffb91930685990a2" ]
[ "evaluate.py" ]
[ "\"\"\"\nEvaluate the classification accuracy on the ModelNet40 based on our ldgcnn \ntrained feature extractor and classifier. We borrow the evaluation code \nfrom the DGCNN, and add the code of combining the classifier with the \nfeature extractor. \nReference code: https://github.com/WangYueFt/dgcnn\n@author: Kuangen Zhang\n\n\"\"\"\nimport tensorflow as tf\nimport numpy as np\nimport argparse\nimport socket\nimport importlib\nimport os\nimport scipy.misc\nimport sys\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(BASE_DIR)\nsys.path.append(os.path.join(BASE_DIR, 'models'))\nsys.path.append(os.path.join(BASE_DIR, 'utils'))\nsys.path.append(os.path.join(BASE_DIR, 'VisionProcess'))\nfrom PlotClass import PlotClass\nimport provider\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--log_dir', default='log', help='Log dir [default: log]')\nparser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')\nparser.add_argument('--model_cnn', default='ldgcnn', help='Model name: dgcnn [default: dgcnn]')\nparser.add_argument('--model_fc', default='ldgcnn_classifier', help='Model name: dgcnn [default: dgcnn]')\nparser.add_argument('--batch_size', type=int, default= 16, help='Batch Size during training [default: 1]')\nparser.add_argument('--num_point', type=int, default=1024, help='Point Number [256/512/1024/2048] [default: 1024]')\nparser.add_argument('--num_feature', type=int, default=3072, help='Point Number [256/512/1024/2048] [default: 1024]')\nparser.add_argument('--dump_dir', default='dump', help='dump folder path [dump]')\nFLAGS = parser.parse_args()\n\nNAME_MODEL = ''\nLOG_DIR = FLAGS.log_dir\nBATCH_SIZE = FLAGS.batch_size\nNUM_POINT = FLAGS.num_point\nNUM_FEATURE = FLAGS.num_feature\nGPU_INDEX = FLAGS.gpu\n# MODEL_CNN: Model of feature extractor (convolutional layers)\nMODEL_CNN = importlib.import_module(FLAGS.model_cnn)\n# MODEL_FC: Model of feature extractor (convolutional layers)\nMODEL_FC = importlib.import_module(FLAGS.model_fc)\nDUMP_DIR = FLAGS.dump_dir\nif not os.path.exists(DUMP_DIR): os.mkdir(DUMP_DIR)\nLOG_FOUT = open(os.path.join(DUMP_DIR, 'log_evaluate.txt'), 'w')\nLOG_FOUT.write(str(FLAGS)+'\\n')\n\nNUM_CLASSES = 40\nSHAPE_NAMES = [line.rstrip() for line in \\\n open(os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/shape_names.txt'))] \nHOSTNAME = socket.gethostname()\n#%%\n# ModelNet40 official train/test split\nTRAIN_FILES = provider.getDataFiles( \\\n os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/train_files.txt'))\nTEST_FILES = provider.getDataFiles(\\\n os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/test_files.txt'))\n\nis_training = False\n#%%\nwith tf.device('/gpu:'+str(GPU_INDEX)):\n # Input of the MODEL_CNN is the point cloud and label.\n pointclouds_pl, labels_pl = MODEL_CNN.placeholder_inputs(BATCH_SIZE, NUM_POINT)\n # Input of the MODEL_FC is the global feature and label.\n features, labels_features = MODEL_FC.placeholder_inputs(BATCH_SIZE, NUM_FEATURE)\n is_training_pl = tf.placeholder(tf.bool, shape=())\n\n _, layers = MODEL_CNN.get_model(pointclouds_pl, is_training_pl)\n pred,_ = MODEL_FC.get_model(features, is_training_pl)\n loss = MODEL_FC.get_loss(pred, labels_pl)\n #%%\nwith tf.device('/gpu:'+str(GPU_INDEX)): \n # Add ops to save and restore all the variables.\n variable_names = [v.name for v in tf.global_variables()]\n variables = tf.global_variables()\n # Variables before #43 belong to the feature extractor.\n saver_cnn = tf.train.Saver(variables[0:44])\n # Variables after #43 belong to the classifier.\n saver_fc = tf.train.Saver(variables[44:])\n#%%\n# Create a session\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nconfig.allow_soft_placement = True\nconfig.log_device_placement = True\n\nops = {'pointclouds_pl': pointclouds_pl,\n 'features': features,\n 'labels_pl': labels_pl,\n 'labels_features': labels_features,\n 'is_training_pl': is_training_pl,\n 'pred': pred,\n 'loss': loss}\n\ndef log_string(out_str):\n LOG_FOUT.write(out_str+'\\n')\n LOG_FOUT.flush()\n print(out_str)\n#%%\ni = 0\nFiles = TEST_FILES\nwith tf.Session(config=config) as sess:\n with tf.device('/gpu:'+str(GPU_INDEX)):\n #Restore variables of feature extractor from disk.\n saver_cnn.restore(sess, os.path.join(LOG_DIR, FLAGS.model_cnn+'_'+ \n str(NAME_MODEL)+\"model.ckpt\"))\n #Restore variables of classifier from disk.\n saver_fc.restore(sess, os.path.join(LOG_DIR, FLAGS.model_fc+'_'+ \n str(NAME_MODEL)+\"model.ckpt\"))\n log_string(\"Model restored.\")\n error_cnt = 0\n is_training = False\n total_correct = 0\n total_seen = 0\n loss_sum = 0\n total_seen_class = [0 for _ in range(NUM_CLASSES)]\n total_correct_class = [0 for _ in range(NUM_CLASSES)]\n fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')\n global_feature_vec = np.array([])\n label_vec = np.array([])\n for fn in range(len(Files)):\n log_string('----'+str(fn)+'----')\n current_data, current_label = provider.loadDataFile(Files[fn])\n current_data = current_data[:,0:NUM_POINT,:]\n current_label = np.squeeze(current_label)\n print(current_data.shape)\n \n file_size = current_data.shape[0]\n num_batches = file_size // BATCH_SIZE\n print(file_size)\n \n for batch_idx in range(num_batches):\n start_idx = batch_idx * BATCH_SIZE\n end_idx = (batch_idx+1) * BATCH_SIZE\n cur_batch_size = end_idx - start_idx\n \n # Aggregating begin\n batch_loss_sum = 0 # sum of losses for the batch\n batch_pred_sum = np.zeros((cur_batch_size, NUM_CLASSES)) # score for classes\n batch_pred_classes = np.zeros((cur_batch_size, NUM_CLASSES)) # 0/1 for classes \n feed_dict_cnn = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],\n ops['labels_pl']: current_label[start_idx:end_idx],\n ops['is_training_pl']: is_training}\n # Extract the global_feature from the feature extractor.\n global_feature = np.squeeze(layers['global_feature'].eval(\n feed_dict=feed_dict_cnn))\n \n # I find that we can increase the accuracy by about 0.2% after \n # padding zero vectors, but I do not know the reason.\n global_feature = np.concatenate([global_feature, np.zeros((\n global_feature.shape[0], NUM_FEATURE - global_feature.shape[1]))], axis = -1)\n \n # Input the extracted features and labels to the classifier.\n feed_dict = {ops['features']: global_feature,\n ops['labels_pl']: current_label[start_idx:end_idx],\n ops['is_training_pl']: is_training}\n # Calculate the loss and classification scores.\n loss_val, pred_val = sess.run([ops['loss'], ops['pred']],\n feed_dict=feed_dict)\n batch_pred_sum += pred_val\n batch_pred_val = np.argmax(pred_val, 1)\n for el_idx in range(cur_batch_size):\n batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1\n batch_loss_sum += (loss_val * cur_batch_size)\n # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1]\n pred_val = np.argmax(batch_pred_sum, 1)\n # Aggregating end\n \n correct = np.sum(pred_val == current_label[start_idx:end_idx])\n # correct = np.sum(pred_val_topk[:,0:topk] == label_val)\n total_correct += correct\n total_seen += cur_batch_size\n loss_sum += batch_loss_sum\n \n for i in range(start_idx, end_idx):\n l = current_label[i]\n total_seen_class[l] += 1\n total_correct_class[l] += (pred_val[i-start_idx] == l)\n fout.write('%d, %d\\n' % (pred_val[i-start_idx], l))\n \n log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))\n log_string('eval accuracy: %f' % (total_correct / float(total_seen)))\n log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))\n \n class_accuracies = np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)\n for i, name in enumerate(SHAPE_NAMES):\n log_string('%10s:\\t%0.3f' % (name, class_accuracies[i]))\n#%%\n#calculate confusion matrix\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.utils.multiclass import unique_labels\nimport re\nf = open(\"dump/pred_label.txt\", \"r\")\nstr_data = f.read()\ndata = re.findall(r\"[-+]?\\d*\\.\\d+|\\d+\", str_data)\ndata = np.array(list(map(int, data)))\ndata = np.reshape(data, (-1, 2))\nf = open(\"dump/shape_names.txt\", \"r\")\nclass_names = np.array(f.read().split())\n# Plot the confusion matrix\ncm,ax = PlotClass.plot_confusion_matrix(data[:,1], data[:,0], classes=class_names, normalize=True,\n title='Normalized confusion matrix')" ]
[ [ "numpy.reshape", "numpy.squeeze", "tensorflow.global_variables", "tensorflow.placeholder", "tensorflow.ConfigProto", "numpy.argmax", "tensorflow.Session", "tensorflow.train.Saver", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
linkian209/dnd_map
[ "76c15c632576524cef40b2dc9a6ac56a7fdbb902" ]
[ "map/util.py" ]
[ "'''map.util\n\nThis module contains functions to help generate maps. It also contains\nfunctions to display them.\n'''\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.spatial import Voronoi, voronoi_plot_2d # noqa\n\n\n# in_bounds and generate_voronoi create a bounded set of voronoi regions\n# https://stackoverflow.com/a/33602171/6095609\n# It works by mirroring the inital points across all 4 sides so the edges of\n# each region create the bounding box as a border\ndef in_bounds(points, bounds):\n '''\n This helper function checks if points are within the inputted bounds.\n\n Arguments:\n :param points: (list) List of points to check\n :param bounds: (list) List of bounds to check against\n\n Returns:\n bool: Whether the points are in bounds or not.\n '''\n return np.logical_and(\n np.logical_and(\n bounds[0] <= points[:, 0],\n points[:, 0] <= bounds[1]\n ),\n np.logical_and(\n bounds[2] <= points[:, 1],\n points[:, 1] <= bounds[3]\n )\n )\n\n\ndef generate_voronoi(points, bounds, eps, create_mapping=False):\n '''\n This function generates a voronoi plot that is bounded by the\n inputted bounds\n '''\n # Get whether data is in bounds or not\n in_or_not = in_bounds(points, bounds)\n\n # Now we will mirror the initial points across all 4 sides so we will have\n # sets of points:\n # - Center: Initial set of points\n # - Left: Initial points reflected across the line x = 0.0\n # - Right: Initial points reflected across the line x = 1.0\n # - Upper: Initial points reflected across the line y = 1.0\n # - Lower: Initial points reflected across the line y = 0.0\n points_center = points[in_or_not, :]\n\n points_left = np.copy(points_center)\n points_left[:, 0] = bounds[0] - (points_left[:, 0] - bounds[0])\n\n points_right = np.copy(points_center)\n points_right[:, 0] = bounds[1] + (bounds[1] - points_right[:, 0])\n\n points_upper = np.copy(points_center)\n points_upper[:, 1] = bounds[3] + (bounds[3] - points_upper[:, 1])\n\n points_lower = np.copy(points_center)\n points_lower[:, 1] = bounds[2] - (points_lower[:, 1] - bounds[2])\n\n input_data = np.append(\n points_center,\n np.append(\n np.append(points_left, points_right, axis=0),\n np.append(points_upper, points_lower, axis=0),\n axis=0\n ),\n axis=0\n )\n\n # Compute Voronoi and then filter the regions to the bounded ones\n vor = Voronoi(input_data)\n regions = []\n hashed_regions = np.array([hash_list(x) for x in vor.regions])\n filtered_point_region = np.zeros(len(points_center))\n filtered_region_point = np.empty(len(vor.regions))\n for region in vor.regions:\n is_in_bounds = True\n for index in region:\n # This point is at infinity\n if index is -1:\n is_in_bounds = False\n break\n # Else continue on\n else:\n x = vor.vertices[index, 0]\n y = vor.vertices[index, 1]\n\n # Check if it is in bounds\n if(not((bounds[0] - eps <= x and x <= bounds[1] + eps)\n and (bounds[2] - eps <= y and y <= bounds[3] + eps))):\n # This point is not in bounds\n is_in_bounds = False\n break\n\n if region != [] and is_in_bounds:\n if create_mapping:\n # First add this to the look up lists\n # Find the region index\n region_index = np.where(hashed_regions == hash_list(region))\n region_index = region_index[0][0]\n\n # Now find the point index\n point_index = np.where(vor.point_region == region_index)\n point_index = point_index[0][0]\n\n # Now find this point in the filtered point list\n filtered_point_index = np.where(points_center == vor.points[point_index])\n filtered_point_index = filtered_point_index[0][0]\n\n # Now add the points\n filtered_point_region[filtered_point_index] = len(regions)\n filtered_region_point[len(regions)] = filtered_point_index\n\n regions.append(region)\n\n # Add the filtered data to the vor object\n vor.filtered_points = points_center\n vor.filtered_regions = regions\n vor.filtered_point_region = filtered_point_region\n vor.filtered_region_point = filtered_region_point[:len(regions)]\n\n # Done!\n return vor\n\n\ndef plot_filtered_voronoi(vor, bounds, delta_x, delta_y):\n # Check first if we have the filtered data\n if(not hasattr(vor, 'filtered_points')\n and not hasattr(vor, 'filtered_regions')):\n return\n\n axes = plt.gca()\n\n # Plot initial points\n axes.plot(vor.filtered_points[:, 0], vor.filtered_points[:, 1], 'b.')\n\n # Plot ridges and ridge points and centroids\n for region in vor.filtered_regions:\n # Ridge Points\n vertices = vor.vertices[region, :]\n axes.plot(vertices[:, 0], vertices[:, 1], 'go')\n\n # Ridges\n vertices = vor.vertices[region + [region[0]], :]\n axes.plot(vertices[:, 0], vertices[:, 1], 'k-')\n\n # Centroids\n centroid_region = centroid(vertices)\n axes.plot(centroid_region[0], centroid_region[1], 'r.')\n\n axes.set_xlim([bounds[0] - (delta_x/10), bounds[1] + (delta_x/10)])\n axes.set_ylim([bounds[2] - (delta_y/10), bounds[3] + (delta_y/10)])\n plt.show()\n\n\ndef plot_voronoi_delaunay(vor, delaunay, bounds, delta_x, delta_y):\n # Check first if we have the filtered data\n if(not hasattr(vor, 'filtered_points')\n and not hasattr(vor, 'filtered_regions')):\n return\n\n axes = plt.gca()\n\n # Plot initial points\n axes.plot(vor.filtered_points[:, 0], vor.filtered_points[:, 1], 'b.')\n\n # Plot ridges and ridge points and centroids\n for region in vor.filtered_regions:\n # Ridge Points\n vertices = vor.vertices[region, :]\n axes.plot(vertices[:, 0], vertices[:, 1], 'go')\n\n # Ridges\n vertices = vor.vertices[region + [region[0]], :]\n axes.plot(vertices[:, 0], vertices[:, 1], 'k-')\n\n # Centroids\n centroid_region = centroid(vertices)\n axes.plot(centroid_region[0], centroid_region[1], 'r.')\n\n # Now plot Delaunay triangulations\n plt.triplot(\n delaunay.points[:, 0], delaunay.points[:, 1], delaunay.simplices\n )\n\n axes.set_xlim([bounds[0] - (delta_x/10), bounds[1] + (delta_x/10)])\n axes.set_ylim([bounds[2] - (delta_y/10), bounds[3] + (delta_y/10)])\n plt.show()\n\n\ndef centroid(points, alt_return=False):\n # Find Signed Area\n A = 0\n\n for i in range(len(points)):\n i1 = (i+1) % len(points)\n A += (points[i][0] * points[i1][1]) - (points[i1][0] * points[i][1])\n A /= 2\n if(not A):\n return alt_return\n # Now find centroid points\n Cx = 0\n Cy = 0\n for i in range(len(points)):\n i1 = (i+1) % len(points)\n Cx += (\n (points[i][0] + points[i1][0]) *\n ((points[i][0] * points[i1][1]) - (points[i1][0] * points[i][1]))\n )\n Cy += (\n (points[i][1] + points[i1][1]) *\n ((points[i][0] * points[i1][1]) - (points[i1][0] * points[i][1]))\n )\n Cx /= (6 * A)\n Cy /= (6 * A)\n return np.array([Cx, Cy])\n\n\ndef get_slope_and_intercept(a, b):\n slope = (b[1]-a[1])/(b[0]-[a[0]])\n inter = a[1] - slope * a[0]\n return (slope, inter)\n\n\ndef hash_list(a):\n return hash(str(a))" ]
[ [ "matplotlib.pyplot.gca", "scipy.spatial.Voronoi", "numpy.logical_and", "matplotlib.pyplot.triplot", "numpy.copy", "numpy.append", "numpy.array", "matplotlib.pyplot.show", "numpy.where" ] ]
Zork777/ts_summer
[ "f29e4a24a4df14d9a8af6d12eef400e8ddcb8cd2" ]
[ "stability_index.py" ]
[ "import numpy as np\nimport pandas as pd\nfrom datetime import timedelta\n\nimport isodate\n\n\ndef to_relative(multi_ts: pd.DataFrame):\n \"\"\"\n Transforms multi-dimensional time series into relative values\n \"\"\"\n return multi_ts.apply(lambda x: x / x.sum(), axis=1)\n\n\ndef calculate_si(real, reference, fill_val=0.5):\n \"\"\"\n Calculates population stability index using following formula\n SI = Σ (pA i - pB i ) * ( ln(pA i ) - ln(pB i ) )\n\n Parameters\n ----------\n real : 1d np.array\n Distribution for\n reference : 1d np.array\n Reference distribution.\n fill_val : float\n Value to replace NANs with\n\n Returns\n -------\n stability_index : float\n \"\"\"\n\n si = (real - reference) * (np.log(real) - np.log(reference))\n\n return np.sum(si)\n\n\ndef _get_reference_time_delta(reference_type, granularity):\n \"\"\"Gets timedelta for one of the following reference types: {'day', 'hour', 'instant'}\"\"\"\n if reference_type == 'day':\n delta = timedelta(days=1)\n elif reference_type == 'hour':\n delta = timedelta(hours=1)\n else:\n delta = isodate.parse_duration(granularity)\n return delta\n\n\ndef calculate_ts_stability_index(\n ts, incoming_point, reference_type='day', granularity='PT1H', fill_val=0.5):\n \"\"\"\n Calculates population stability index for n-dimensional time-series.\n\n Parameters\n ----------\n ts : pd.Dataframe (n_samples, n_dim) with datetime index\n N-dimensional time-series with granularity of the following format \"dd:hh:mm\"\n incoming_point : pd.Dataframe of shape (1, n_dim_new) with datetime index\n New point, for which to make decision about stability. N_dim_new doesn't have\n to be equal to n_dim. In case when it's either less or higher all missing\n previous points for that dimension are filled with fill_val (usually quasi-null) value.\n reference_type : {'instant', 'day', 'week'}\n Point which distribution gets compared to the incoming point.\n 'instant' stand for previous point, 'day' for point 24 hours ago and\n 'week' stands for 24*7 hours ago.\n granularity: str\n Frequency of time-series of the following format \"dd::hh::mm\"\n fill_val : float\n Value to use when synchronizing batches with different number of dimensions\n\n Returns\n -------\n si : float\n Stability index for incoming point calculated relative to reference point.\n \"\"\"\n if not isinstance(ts, pd.DataFrame):\n raise TypeError(\n 'ts argument must be of type pandas Dataframe, {} provided.'.format(type(ts)))\n\n if reference_type not in {'instant', 'day', 'week'}:\n raise TypeError('reference_point must be one of [instant, day, week]}')\n\n current_time = incoming_point.index[-1]\n\n time_delta = _get_reference_time_delta(reference_type, granularity)\n\n reference_time = current_time - time_delta\n if reference_time not in ts.index:\n raise KeyError(f\"There is no point with time {reference_time} in the time-series\")\n reference_point = ts.loc[[reference_time]] # use double brackets to slice point as dataframe instead of series\n\n si = calculate_si(incoming_point.values, reference_point.values, fill_val=fill_val)\n\n return si\n\n\ndef calculate_ts_stability_index_batch(ts, **kwargs):\n \"\"\"Returns series with stability index calculated for every point of ts, where possible\"\"\"\n si_values = []\n timestamps = []\n for time, row in ts.iterrows():\n ts_history = ts[:time].iloc[:-1]\n try:\n si_value = calculate_ts_stability_index(ts_history, row.to_frame().T, **kwargs)\n except KeyError: # means either not enouqh points at the start or not standart granularity (like 5 hours)\n si_value = None\n si_values.append(si_value)\n timestamps.append(time)\n\n return pd.Series(data=si_values, index=timestamps)\n\n\ndef make_si_predictor(ts: pd.DataFrame, granularity='PT1H', reference_type='day'):\n # calculate_ts_stability_index_batch\n # fit model\n # return predictor, si_history\n pass\n\n\ndef detect_with_dynamic_threshold(\n ts,\n incoming_point,\n si_history,\n predictor,\n):\n pass" ]
[ [ "numpy.log", "numpy.sum", "pandas.Series" ] ]
kpj/numpyro
[ "eb03c3db993cff8a1ab7613ea334d80a26de42ff" ]
[ "examples/bnn.py" ]
[ "# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"\nExample: Bayesian Neural Network\n================================\n\nWe demonstrate how to use NUTS to do inference on a simple (small)\nBayesian neural network with two hidden layers.\n\n.. image:: ../_static/img/examples/bnn.png\n :align: center\n\"\"\"\n\nimport argparse\nimport os\nimport time\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom jax import vmap\nimport jax.numpy as jnp\nimport jax.random as random\n\nimport numpyro\nfrom numpyro import handlers\nimport numpyro.distributions as dist\nfrom numpyro.infer import MCMC, NUTS\n\nmatplotlib.use('Agg') # noqa: E402\n\n\n# the non-linearity we use in our neural network\ndef nonlin(x):\n return jnp.tanh(x)\n\n\n# a two-layer bayesian neural network with computational flow\n# given by D_X => D_H => D_H => D_Y where D_H is the number of\n# hidden units. (note we indicate tensor dimensions in the comments)\ndef model(X, Y, D_H):\n\n D_X, D_Y = X.shape[1], 1\n\n # sample first layer (we put unit normal priors on all weights)\n w1 = numpyro.sample(\"w1\", dist.Normal(jnp.zeros((D_X, D_H)), jnp.ones((D_X, D_H)))) # D_X D_H\n z1 = nonlin(jnp.matmul(X, w1)) # N D_H <= first layer of activations\n\n # sample second layer\n w2 = numpyro.sample(\"w2\", dist.Normal(jnp.zeros((D_H, D_H)), jnp.ones((D_H, D_H)))) # D_H D_H\n z2 = nonlin(jnp.matmul(z1, w2)) # N D_H <= second layer of activations\n\n # sample final layer of weights and neural network output\n w3 = numpyro.sample(\"w3\", dist.Normal(jnp.zeros((D_H, D_Y)), jnp.ones((D_H, D_Y)))) # D_H D_Y\n z3 = jnp.matmul(z2, w3) # N D_Y <= output of the neural network\n\n # we put a prior on the observation noise\n prec_obs = numpyro.sample(\"prec_obs\", dist.Gamma(3.0, 1.0))\n sigma_obs = 1.0 / jnp.sqrt(prec_obs)\n\n # observe data\n numpyro.sample(\"Y\", dist.Normal(z3, sigma_obs), obs=Y)\n\n\n# helper function for HMC inference\ndef run_inference(model, args, rng_key, X, Y, D_H):\n start = time.time()\n kernel = NUTS(model)\n mcmc = MCMC(kernel, args.num_warmup, args.num_samples, num_chains=args.num_chains,\n progress_bar=False if \"NUMPYRO_SPHINXBUILD\" in os.environ else True)\n mcmc.run(rng_key, X, Y, D_H)\n mcmc.print_summary()\n print('\\nMCMC elapsed time:', time.time() - start)\n return mcmc.get_samples()\n\n\n# helper function for prediction\ndef predict(model, rng_key, samples, X, D_H):\n model = handlers.substitute(handlers.seed(model, rng_key), samples)\n # note that Y will be sampled in the model because we pass Y=None here\n model_trace = handlers.trace(model).get_trace(X=X, Y=None, D_H=D_H)\n return model_trace['Y']['value']\n\n\n# create artificial regression dataset\ndef get_data(N=50, D_X=3, sigma_obs=0.05, N_test=500):\n D_Y = 1 # create 1d outputs\n np.random.seed(0)\n X = jnp.linspace(-1, 1, N)\n X = jnp.power(X[:, np.newaxis], jnp.arange(D_X))\n W = 0.5 * np.random.randn(D_X)\n Y = jnp.dot(X, W) + 0.5 * jnp.power(0.5 + X[:, 1], 2.0) * jnp.sin(4.0 * X[:, 1])\n Y += sigma_obs * np.random.randn(N)\n Y = Y[:, np.newaxis]\n Y -= jnp.mean(Y)\n Y /= jnp.std(Y)\n\n assert X.shape == (N, D_X)\n assert Y.shape == (N, D_Y)\n\n X_test = jnp.linspace(-1.3, 1.3, N_test)\n X_test = jnp.power(X_test[:, np.newaxis], jnp.arange(D_X))\n\n return X, Y, X_test\n\n\ndef main(args):\n N, D_X, D_H = args.num_data, 3, args.num_hidden\n X, Y, X_test = get_data(N=N, D_X=D_X)\n\n # do inference\n rng_key, rng_key_predict = random.split(random.PRNGKey(0))\n samples = run_inference(model, args, rng_key, X, Y, D_H)\n\n # predict Y_test at inputs X_test\n vmap_args = (samples, random.split(rng_key_predict, args.num_samples * args.num_chains))\n predictions = vmap(lambda samples, rng_key: predict(model, rng_key, samples, X_test, D_H))(*vmap_args)\n predictions = predictions[..., 0]\n\n # compute mean prediction and confidence interval around median\n mean_prediction = jnp.mean(predictions, axis=0)\n percentiles = np.percentile(predictions, [5.0, 95.0], axis=0)\n\n # make plots\n fig, ax = plt.subplots(figsize=(8, 6), constrained_layout=True)\n\n # plot training data\n ax.plot(X[:, 1], Y[:, 0], 'kx')\n # plot 90% confidence level of predictions\n ax.fill_between(X_test[:, 1], percentiles[0, :], percentiles[1, :], color='lightblue')\n # plot mean prediction\n ax.plot(X_test[:, 1], mean_prediction, 'blue', ls='solid', lw=2.0)\n ax.set(xlabel=\"X\", ylabel=\"Y\", title=\"Mean predictions with 90% CI\")\n\n plt.savefig('bnn_plot.pdf')\n\n\nif __name__ == \"__main__\":\n assert numpyro.__version__.startswith('0.5.0')\n parser = argparse.ArgumentParser(description=\"Bayesian neural network example\")\n parser.add_argument(\"-n\", \"--num-samples\", nargs=\"?\", default=2000, type=int)\n parser.add_argument(\"--num-warmup\", nargs='?', default=1000, type=int)\n parser.add_argument(\"--num-chains\", nargs='?', default=1, type=int)\n parser.add_argument(\"--num-data\", nargs='?', default=100, type=int)\n parser.add_argument(\"--num-hidden\", nargs='?', default=5, type=int)\n parser.add_argument(\"--device\", default='cpu', type=str, help='use \"cpu\" or \"gpu\".')\n args = parser.parse_args()\n\n numpyro.set_platform(args.device)\n numpyro.set_host_device_count(args.num_chains)\n\n main(args)\n" ]
[ [ "numpy.random.seed", "matplotlib.use", "matplotlib.pyplot.subplots", "numpy.percentile", "matplotlib.pyplot.savefig", "numpy.random.randn" ] ]
sahitpj/Gaussian-EM
[ "2a1ef6afedb1b527637332140a15bd4b892aa613" ]
[ "sample.py" ]
[ "from utils import cov\nimport torch, random, math\nfrom torch.distributions.multivariate_normal import MultivariateNormal\nfrom multiprocessing import Queue\nfrom torch.multiprocessing import Process\n\n\nclass GaussianMMSampler(object):\n '''Gaussian Mixture Model Sampler\n\n Gaussian mixture models, is a set of n Gaussians,\n each with their mean - ui and covaraiance matrix - \n sigmai and a set of weights for each Gaussian denoting \n the probability that if a point were to be sampled from this \n model, then that percentage would come from Gaussian\n Gi.\n\n Args:\n weights: The weights of each Gaussian, in the form\n of a list of Tensors\n means: Means/Centers of the Gaussians\n cov_matrices: The covariance matrices of the \n given Gaussians\n '''\n def __init__(self, weights , mean, cov_matrices):\n self.weights = weights\n self.means = mean\n self.cov_matrices = cov_matrices\n self.no_of_gaussians = len(weights)\n\n\n def mixture_sampling(self, no_of_points):\n '''Sampling from the given mixture model\n\n Given our mixture model, we sample points \n with weights as probabilities.\n\n Args:\n no_of_points: The number of points we \n want to sample\n\n Returns:\n 1. List of tensor points from the mixture\n model\n 2. Gaussian IDs for the points\n '''\n points = list()\n gaussian_id = list()\n\n #iterates through the list of Gaussians to\n # sample points in according to their percentages\n # from each gaussian\n\n for i in xrange(self.no_of_gaussians):\n # Define Gaussian Sampler\n gs = GaussianSampler(self.means[i], self.cov_matrices[i])\n # Sample points from Gaussian Sampler\n sample_points = gs.sample_list(int(math.ceil(self.weights[i]*no_of_points)))\n points.extend(sample_points)\n for j in xrange(int(math.ceil(self.weights[i]*no_of_points))):\n # Adding the Gaussian Ids for the points\n gaussian_id.append(i+1)\n assert(len(points) >= no_of_points)\n return points, gaussian_id\n\n\n\nclass GaussianSampler(object):\n '''Gaussian Sampler\n\n Function which samples from a Gaussian\n given the mean and the covariance of the \n Gaussian\n\n Args:\n mean: Mean of the Gaussian\n cov: covariance matrix of the Gaussian\n '''\n def __init__(self, mean, cov):\n self.mean = mean\n self.cov = cov\n # Define normal function using Pytorch's multivaraiate function\n self.normal_function = MultivariateNormal(self.mean, self.cov)\n\n def sample(self):\n '''Sampling function \n\n Returns:\n Sampled point\n '''\n return self.normal_function.sample()\n \n\n def __sample(self, q):\n '''Private function for Sampling\n\n Has an extra argument to place a \n multiprocessing Queue in order to extract \n the returned value\n '''\n q.put(self.normal_function.sample())\n\n def sample_list(self, no_of_points):\n sample_points = list()\n # Sample points separately in each thread\n for i in xrange(no_of_points):\n q = Queue()\n p = Process(target=self.__sample, args=(q,))\n p.start()\n # retrieves point from Queue\n sample_points.append(q.get())\n p.join()\n return sample_points #being returnd as list of points.\n\n\nclass GaussianRSampler(GaussianSampler):\n '''Gaussian Random Sampler\n\n Randomly creates a Gaussian of a specific\n dimension, which can then be sampled from\n\n Args:\n dimensions: The number of dimensions for \n the Gaussian\n '''\n def __init__(self, dimensions):\n self.dimensions = dimensions\n # Initilaise points from the function\n self.points = self.initilize_points()\n # Initialise mean from the generated points\n self.mean = self.initilize_mean()\n # Initialise Covariance Matrix from the given points\n self.cov = self.initilize_cov_matrix()\n self.zero = torch.zeros(self.mean.shape)\n\n def initilize_points(self):\n return torch.rand(self.dimensions+100, self.dimensions)\n\n def initilize_mean(self):\n '''Initialise mean using given points\n\n Returns\n mean\n '''\n k = torch.mean(self.points, dim=0).view(self.dimensions, )\n assert(k.shape[0] == self.dimensions)\n return k\n\n def initilize_cov_matrix(self):\n '''Initialise covariance matrix using the given points\n\n Returns\n covariance matrix\n '''\n # using utils function to find covariance matrix\n k = cov(self.points)\n assert(k.shape[0] == self.dimensions)\n assert(k.shape[1] == self.dimensions)\n return k\n \n def distance(self, point):\n '''Returns L2 Norm of the point from the center\n which is equivalent to euclidean distance\n '''\n return torch.norm(point-self.mean, p=2)\n\n def sample_distance(self):\n '''Samples distance of a random point\n sampled from the Gaussian\n '''\n point = self.normal_function.sample()\n return torch.norm(point-self.mean)\n\n\n\n# j = GaussianSampler(torch.zeros(3,1), torch.eye(3))\n# k = j.sample()\n# print k" ]
[ [ "torch.mean", "torch.norm", "torch.zeros", "torch.rand", "torch.distributions.multivariate_normal.MultivariateNormal", "torch.multiprocessing.Process" ] ]
abau171/shiny-things
[ "3cf9cd710c4402c9e64496220b7e28d8133b8107" ]
[ "setup.py" ]
[ "from distutils.core import setup, Extension\nimport numpy\n\nmodule = Extension(\"_shinythings\",\n sources=[\"_shinythings/_shinythings.c\",\n \"_shinythings/_shinythings_Scene.c\",\n \"_shinythings/_shinythings_Matrix.c\",\n \"_shinythings/render.c\",\n \"_shinythings/color.c\",\n \"_shinythings/py_parse.c\",\n \"_shinythings/kd_tree.c\",\n \"_shinythings/scene.c\",\n \"_shinythings/matrix.c\",\n \"_shinythings/geometry.c\"],\n include_dirs=[numpy.get_include(), \"_shinythings/\"],\n extra_compile_args=[\"-O3\"])\n\nsetup(\n name=\"shinythings\",\n description=\"Simple ray tracer supporting Phong shading, Wavefront .obj file loading, among other features\",\n author=\"Andrew Bauer\",\n packages=[\"shinythings\"],\n license=\"MIT\",\n ext_modules=[module])\n\n" ]
[ [ "numpy.get_include" ] ]
nfgallimore/microsoft-bonsai-api
[ "932f657df4dd771af6da2cce2c300870aa8bde53" ]
[ "Python/samples/microgrid/main.py" ]
[ "#!/usr/bin/env python3\n\n\"\"\"\nMSFT Bonsai SDK3 Template for Simulator Integration using Python\nCopyright 2020 Microsoft\n\nUsage:\n For registering simulator with the Bonsai service for training:\n python simulator_integration.py \n Then connect your registered simulator to a Brain via UI, or using the CLI: `bonsai simulator unmanaged connect -b <brain-name> -a <train-or-assess> -c BalancePole --simulator-name Microgrid.\n\"\"\"\n\nimport copy\nimport datetime\nimport json\nimport os\nimport pathlib\nimport random\nimport sys\nimport time\nfrom typing import Dict\n\nfrom dotenv import load_dotenv, set_key\nfrom microsoft_bonsai_api.simulator.client import BonsaiClient, BonsaiClientConfig\nfrom microsoft_bonsai_api.simulator.generated.models import (\n SimulatorInterface,\n SimulatorState,\n SimulatorSessionResponse,\n)\nfrom azure.core.exceptions import HttpResponseError\nfrom functools import partial\n\nfrom policies import random_policy, brain_policy, rule_based\nfrom sim import microgrid_sim\n\nDIR_PATH = os.path.dirname(os.path.realpath(__file__))\nLOG_PATH = \"logs\"\ndefault_config = {\"starting_time_step\": 0}\n\n\ndef ensure_log_dir(log_full_path):\n \"\"\"\n Ensure the directory for logs exists — create if needed.\n \"\"\"\n print(f\"logfile: {log_full_path}\")\n logs_directory = pathlib.Path(log_full_path).parent.absolute()\n print(f\"Checking {logs_directory}\")\n if not pathlib.Path(logs_directory).exists():\n print(\n \"Directory does not exist at {0}, creating now...\".format(\n str(logs_directory)\n )\n )\n logs_directory.mkdir(parents=True, exist_ok=True)\n\n\nclass TemplateSimulatorSession:\n def __init__(\n self,\n render: bool = False,\n env_name: str = \"Microgrid\",\n log_data: bool = False,\n log_file_name: str = None,\n ):\n \"\"\"Simulator Interface with the Bonsai Platform\n\n Parameters\n ----------\n render : bool, optional\n Whether to visualize episodes during training, by default False, not implemented.\n env_name : str, optional\n Name of simulator interface, by default \"Microgrid\"\n log_data: bool, optional\n Whether to log data, by default False\n log_file_name : str, optional\n where to log data, by default None. If not specified, will generate a name.\n \"\"\"\n self.sim = microgrid_sim.MicrogridSim()\n self.count_view = False\n self.env_name = env_name\n self.render = render\n self.log_data = log_data\n if not log_file_name:\n current_time = datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\")\n log_file_name = current_time + \"_\" + env_name + \"_log.csv\"\n\n self.log_full_path = os.path.join(LOG_PATH, log_file_name)\n ensure_log_dir(self.log_full_path)\n\n def get_state(self) -> Dict[str, float]:\n \"\"\"Extract current states from the simulator\n\n Returns\n -------\n Dict[str, float]\n Returns float of current values from the simulator\n \"\"\"\n state = self.sim.get_state()\n \n # convert all states to floats (e.g. as opposed to np.float64)\n for key in state:\n state[key] = float(state[key])\n \n return state\n\n def halted(self) -> bool:\n \"\"\"Halt current episode. Note, this should only return True if the simulator has reached an unexpected state.\n\n Returns\n -------\n bool\n Whether to terminate current episode\n \"\"\"\n return False\n\n def episode_start(self, config: Dict = None) -> None:\n \"\"\"Initialize simulator environment using scenario paramters from inkling.\n\n Parameters\n ----------\n config : Dict, optional. The following keys are supported:\n - starting_time_step: The time index we will initialize the sim with.\n \"\"\"\n # Reset the sim, passing fields from config\n if config is None or config == {}:\n config = default_config\n # Keep the config around so we can log it later\n self.config = config\n self.sim.episode_start(config)\n\n def log_iterations(self, state, action, episode: int = 0, iteration: int = 1):\n \"\"\"Log iterations during training to a CSV.\n\n Parameters\n ----------\n state : Dict\n action : Dict\n episode : int, optional\n iteration : int, optional\n \"\"\"\n\n import pandas as pd\n\n def add_prefixes(d, prefix: str):\n return {f\"{prefix}_{k}\": v for k, v in d.items()}\n\n state = add_prefixes(state, \"state\")\n action = add_prefixes(action, \"action\")\n config = add_prefixes(self.config, \"config\")\n data = {**state, **action, **config}\n data[\"episode\"] = episode\n data[\"iteration\"] = iteration\n log_df = pd.DataFrame(data, index=[0])\n\n if os.path.exists(self.log_full_path):\n log_df.to_csv(\n path_or_buf=self.log_full_path, mode=\"a\", header=False, index=False\n )\n else:\n log_df.to_csv(\n path_or_buf=self.log_full_path, mode=\"w\", header=True, index=False\n )\n\n def episode_step(self, action: Dict):\n \"\"\"Step through the environment for a single iteration.\n\n Parameters\n ----------\n action : Dict\n An action to take to modulate environment.\n \"\"\"\n self.sim.episode_step(action)\n\n def sim_render(self):\n raise NotImplemented(\"The rendering feature is not implemented for this sim.\")\n\ndef env_setup():\n \"\"\"Helper function to setup connection with Project Bonsai\n\n Returns\n -------\n Tuple\n workspace, and access_key\n \"\"\"\n\n load_dotenv(verbose=True)\n workspace = os.getenv(\"SIM_WORKSPACE\")\n access_key = os.getenv(\"SIM_ACCESS_KEY\")\n\n env_file_exists = os.path.exists(\".env\")\n if not env_file_exists:\n open(\".env\", \"a\").close()\n\n if not all([env_file_exists, workspace]):\n workspace = input(\"Please enter your workspace id: \")\n set_key(\".env\", \"SIM_WORKSPACE\", workspace)\n if not all([env_file_exists, access_key]):\n access_key = input(\"Please enter your access key: \")\n set_key(\".env\", \"SIM_ACCESS_KEY\", access_key)\n\n load_dotenv(verbose=True, override=True)\n workspace = os.getenv(\"SIM_WORKSPACE\")\n access_key = os.getenv(\"SIM_ACCESS_KEY\")\n\n return workspace, access_key\n\n\ndef test_policy(\n num_episodes: int = 1,\n render: bool = True,\n num_iterations: int = 1000,\n log_iterations: bool = False,\n policy=random_policy,\n policy_name: str = \"random\",\n):\n \"\"\"Test a policy using random actions over a fixed number of episodes\n\n Parameters\n ----------\n num_episodes : int, optional\n number of iterations to run, by default 10\n \"\"\"\n\n current_time = datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\")\n log_file_name = current_time + \"_\" + policy_name + \"_log.csv\"\n sim = TemplateSimulatorSession(\n render=render, log_data=log_iterations, log_file_name=log_file_name\n )\n\n import numpy as np\n for episode in range(num_episodes):\n iteration = 0\n terminal = False\n\n if policy_name == \"exported\":\n config = {\"starting_time_step\": 0, \"cost_battery\": 0 }\n else:\n config = {\"starting_time_step\": 0, \"cost_battery\": 0 } #np.random.randint(0, 8567)\n print(f\"Config: {config}\")\n sim.episode_start(config=config)\n sim_state = sim.get_state()\n print(f\"Observations: {sim_state}\")\n while not terminal:\n action = policy(sim_state)\n sim.episode_step(action)\n sim_state = sim.get_state()\n if log_iterations:\n sim.log_iterations(sim_state, action, episode, iteration)\n print(f\"Running iteration #{iteration} for episode #{episode}\")\n print(f\"Actions: {action}\")\n print(f\"Observations: {sim_state}\")\n iteration += 1\n terminal = iteration >= num_iterations\n\n return sim\n\n\ndef main(\n render: bool = False, log_iterations: bool = False, config_setup: bool = False\n):\n \"\"\"Main entrypoint for running simulator connections\n\n Parameters\n ----------\n render : bool, optional\n visualize steps in environment, by default True, by default False\n log_iterations: bool, optional\n log iterations during training to a CSV file\n \"\"\"\n\n # workspace environment variables\n if config_setup:\n env_setup()\n load_dotenv(verbose=True, override=True)\n\n # Grab standardized way to interact with sim API\n sim = TemplateSimulatorSession(render=render, log_data=log_iterations)\n\n # Configure client to interact with Bonsai service\n config_client = BonsaiClientConfig()\n client = BonsaiClient(config_client)\n\n # # Load json file as simulator integration config type file\n with open(\"microgrid_description.json\") as file:\n interface = json.load(file)\n\n # Create simulator session and init sequence id\n registration_info = SimulatorInterface(\n name=sim.env_name,\n timeout=interface[\"timeout\"],\n simulator_context=config_client.simulator_context,\n description=interface[\"description\"],\n )\n\n def CreateSession(\n registration_info: SimulatorInterface, config_client: BonsaiClientConfig\n ):\n \"\"\"Creates a new Simulator Session and returns new session, sequenceId\n \"\"\"\n\n try:\n print(\n \"config: {}, {}\".format(config_client.server, config_client.workspace)\n )\n registered_session: SimulatorSessionResponse = client.session.create(\n workspace_name=config_client.workspace, body=registration_info\n )\n print(\"Registered simulator. {}\".format(registered_session.session_id))\n\n return registered_session, 1\n except HttpResponseError as ex:\n print(\n \"HttpResponseError in Registering session: StatusCode: {}, Error: {}, Exception: {}\".format(\n ex.status_code, ex.error.message, ex\n )\n )\n raise ex\n except Exception as ex:\n print(\n \"UnExpected error: {}, Most likely, it's some network connectivity issue, make sure you are able to reach bonsai platform from your network.\".format(\n ex\n )\n )\n raise ex\n\n registered_session, sequence_id = CreateSession(registration_info, config_client)\n episode = 0\n iteration = 0\n\n try:\n while True:\n # Advance by the new state depending on the event type\n # TODO: it's risky doing `get_state` without first initializing the sim\n sim_state = SimulatorState(\n sequence_id=sequence_id, state=sim.get_state(), halted=sim.halted(),\n )\n try:\n event = client.session.advance(\n workspace_name=config_client.workspace,\n session_id=registered_session.session_id,\n body=sim_state,\n )\n sequence_id = event.sequence_id\n print(\n \"[{}] Last Event: {}\".format(time.strftime(\"%H:%M:%S\"), event.type)\n )\n except HttpResponseError as ex:\n print(\n \"HttpResponseError in Advance: StatusCode: {}, Error: {}, Exception: {}\".format(\n ex.status_code, ex.error.message, ex\n )\n )\n # This can happen in network connectivity issue, though SDK has retry logic, but even after that request may fail,\n # if your network has some issue, or sim session at platform is going away..\n # So let's re-register sim-session and get a new session and continue iterating. :-)\n registered_session, sequence_id = CreateSession(\n registration_info, config_client\n )\n continue\n except Exception as err:\n print(\"Unexpected error in Advance: {}\".format(err))\n # Ideally this shouldn't happen, but for very long-running sims It can happen with various reasons, let's re-register sim & Move on.\n # If possible try to notify Bonsai team to see, if this is platform issue and can be fixed.\n registered_session, sequence_id = CreateSession(\n registration_info, config_client\n )\n continue\n\n # Event loop\n if event.type == \"Idle\":\n time.sleep(event.idle.callback_time)\n print(\"Idling...\")\n elif event.type == \"EpisodeStart\":\n print(event.episode_start.config)\n sim.episode_start(event.episode_start.config)\n episode += 1\n elif event.type == \"EpisodeStep\":\n iteration += 1\n sim.episode_step(event.episode_step.action)\n #print(event.episode_step.action)\n #print(sim.get_state())\n if sim.log_data:\n sim.log_iterations(\n episode=episode,\n iteration=iteration,\n state=sim.get_state(),\n action=event.episode_step.action,\n )\n elif event.type == \"EpisodeFinish\":\n print(\"Episode Finishing...\")\n iteration = 0\n elif event.type == \"Unregister\":\n print(\n \"Simulator Session unregistered by platform because '{}', Registering again!\".format(\n event.unregister.details\n )\n )\n registered_session, sequence_id = CreateSession(\n registration_info, config_client\n )\n continue\n else:\n pass\n except KeyboardInterrupt:\n # Gracefully unregister with keyboard interrupt\n client.session.delete(\n workspace_name=config_client.workspace,\n session_id=registered_session.session_id,\n )\n print(\"Unregistered simulator.\")\n except Exception as err:\n # Gracefully unregister for any other exceptions\n client.session.delete(\n workspace_name=config_client.workspace,\n session_id=registered_session.session_id,\n )\n print(\"Unregistered simulator because: {}\".format(err))\n\n\nif __name__ == \"__main__\":\n\n import argparse\n\n parser = argparse.ArgumentParser(description=\"Bonsai and Simulator Integration...\")\n parser.add_argument(\n \"--render\", action=\"store_true\", default=False, help=\"Render training episodes\",\n )\n parser.add_argument(\n \"--log-iterations\",\n action=\"store_true\",\n default=False,\n help=\"Log iterations during training\",\n )\n parser.add_argument(\n \"--config-setup\",\n action=\"store_true\",\n default=False,\n help=\"Use a local environment file to setup access keys and workspace ids\",\n )\n\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\n \"--test-random\",\n action=\"store_true\",\n help=\"Run simulator locally with a random policy, without connecting to platform\",\n )\n\n group.add_argument(\n \"--test-rule-based\",\n action=\"store_true\",\n help=\"Run simulator locally with a rule based baseline policy, without connecting to platform\"\n )\n\n group.add_argument(\n \"--test-exported\",\n type=int,\n const=5000, # if arg is passed with no PORT, use this\n nargs=\"?\",\n metavar=\"PORT\",\n help=\"Run simulator with an exported brain running on localhost:PORT (default 5000)\",\n )\n\n parser.add_argument(\n \"--iteration-limit\",\n type=int,\n metavar=\"EPISODE_ITERATIONS\",\n help=\"Episode iteration limit when running local test.\",\n default=200,\n )\n\n args = parser.parse_args()\n\n if args.test_random:\n test_policy(\n render=args.render, log_iterations=args.log_iterations, policy=random_policy\n )\n elif args.test_rule_based:\n test_policy(\n render=args.render, log_iterations=args.log_iterations, policy=rule_based\n )\n elif args.test_exported:\n port = args.test_exported\n url = f\"http://localhost:{port}\"\n print(f\"Connecting to exported brain running at {url}...\")\n trained_brain_policy = partial(brain_policy, exported_brain_url=url)\n test_policy(\n render=args.render,\n log_iterations=args.log_iterations,\n policy=trained_brain_policy,\n policy_name=\"exported\",\n num_iterations=args.iteration_limit,\n )\n else:\n main(\n config_setup=args.config_setup,\n render=args.render,\n log_iterations=args.log_iterations,\n )\n" ]
[ [ "pandas.DataFrame" ] ]
shreyayadav/traDSSAT
[ "cc9650f896910c0d0a7a382aff36bef89aba70f2" ]
[ "tradssat/tmpl/file.py" ]
[ "import os\nimport re\n\nimport numpy as np\n\nfrom tradssat.utils import detect_encod\nfrom .vals import FileValueSet, ValueSubSection\nfrom .var import VariableSet, CODE_MISS\n\n\nclass File(object):\n \"\"\"\n Parent class for all file objects.\n \"\"\"\n\n def __init__(self, file):\n \"\"\"\n\n Parameters\n ----------\n file: str\n\n \"\"\"\n self.file = file\n self._var_info = VariableSet(self._get_var_info())\n\n self._values = FileValueSet()\n self.encoding = detect_encod(self.file)\n self._read()\n\n def _read(self):\n\n with open(self.file, encoding=self.encoding) as f:\n section = [] # To store lines that go in the same section\n for l in f.readlines():\n\n if l[0] == '!': # skip comments\n continue\n\n if l[0] == '*': # start of section\n\n # Process any previously stored block\n if section:\n self._read_section(section)\n\n # Clear the current block\n section.clear()\n\n if l.strip():\n section.append(l) # Append current line to block\n\n # Read the last block too\n self._read_section(section)\n\n def get_var_type(self, var, sect=None):\n return self.get_var(var, sect).type_\n\n def get_var_lims(self, var, sect=None):\n return self.get_var(var, sect).lims\n\n def get_var_spc(self, var, sect=None):\n return self.get_var(var, sect).spc\n\n def get_var_size(self, var, sect=None):\n \"\"\"\n Returns the size of a variable.\n\n Parameters\n ----------\n var: str\n The name of the variable.\n sect: str\n The name of the section in which this variable appears (optional; for ambiguous cases where a file\n has several variables with the same code).\n\n Returns\n -------\n int\n The size of the variable.\n\n \"\"\"\n return self.get_var(var, sect).size\n\n def get_var_code_miss(self, var, sect=None):\n return self.get_var(var, sect).miss\n\n def get_var(self, var, sect=None):\n return self._var_info.get_var(var, sect)\n\n def get_value(self, var, sect=None, subsect=None, cond=None):\n return self._values.get_value(var, sect=sect, subsect=subsect, cond=cond)\n\n def get_dims_val(self, var):\n return self.get_value(var).shape\n\n def add_row(self, sect, subsect=None, vals=None):\n self._values.add_row(sect, subsect, vals)\n\n def remove_row(self, sect, subsect=None, cond=None):\n self._values.remove_row(sect, subsect, cond)\n\n def find_var_sect(self, var):\n return self._values.find_var_sect(var)\n\n def variables(self):\n return list(str(vr) for vr in self._var_info.variables())\n\n def to_dict(self):\n return self._values.to_dict()\n\n def _read_subsection(self, section_name, subblock):\n\n var_names = self._get_var_names(subblock[0])\n\n n_lines = len(subblock) - 1 # -1 for the header line (with \"@\" )\n lengths = [self.get_var_size(vr) for vr in var_names]\n spaces = [self.get_var_spc(vr) for vr in var_names]\n cum_lens = np.insert(np.cumsum(lengths) + np.cumsum(spaces), 0, 0)\n cutoffs = [(cum_lens[i], cum_lens[i + 1]) for i in range(len(var_names))]\n\n d_vals = {vr: self._gen_empty_mtrx(vr, n_lines) for vr in var_names}\n\n for i, l in enumerate(subblock[1:]):\n # Odd workaround necessary because several cultivar names in DSSAT are larger than the allowed space\n # and so run into the next column, which apparently isn't supposed to matter if the next column's value\n # is small enough to allow both to fit. (Really?!)\n vals = [\n (l[0 if c[0] == 0 else max(c[0], l.find(' ', c[0], c[1] - 1)):\n None if l.find(' ', c[1] - 1) < 0 else l.find(' ', c[1] - 1)]).strip()\n for c in cutoffs]\n for vr, vl in zip(var_names, vals):\n if not len(vl):\n vl = self.get_var_code_miss(vr)\n d_vals[vr][i] = vl\n\n l_vars = [self._var_info.get_var(vr, sect=section_name) for vr in var_names]\n l_vals = [d_vals[vr] for vr in var_names]\n subsect = ValueSubSection(l_vars, l_vals)\n\n self._values[section_name].add_subsection(subsect)\n\n def _read_section(self, section):\n section_name, section_lines = self._process_section_header(section)\n\n subblock = []\n for l in section_lines: # skip first line (with \"*\")\n if l[0] == '@':\n\n if subblock:\n self._read_subsection(section_name, subblock)\n subblock.clear()\n\n # Append current line to section\n if l.strip().strip('\\x1a'): # '\\x1a' needed for obscure character DSSAT likes to append to .SNX/SQX\n subblock.append(l)\n\n if subblock:\n self._read_subsection(section_name, subblock)\n\n def _gen_empty_mtrx(self, var, size):\n tp = self.get_var_type(var)\n if tp == 'float':\n dtype = float\n elif tp == 'int':\n dtype = int\n elif tp == 'str' or tp == str:\n str_size = self.get_var_size(var)\n dtype = 'U{}'.format(str_size + 5) # +5 just to be safe (with DSSAT input files you never know)\n else:\n dtype = tp\n\n return np.full(size, CODE_MISS, dtype=dtype)\n\n def _get_var_names(self, line):\n var_names = [str(vr) for vr in self._var_info]\n var_names.sort(key=len, reverse=True)\n\n def _strip(txt):\n return re.sub('^[|.\\W]+', '', txt)\n\n final_names = []\n line = _strip(line[1:]) # skip initial \"@\"\n\n while line:\n try:\n name = next(vr for vr in var_names if line.startswith(vr))\n except StopIteration:\n raise ValueError(\n 'No variable matching \"{line}\" for file {nm}.'.format(\n line=line[:20], nm=os.path.split(self.file)[1]\n )\n )\n final_names.append(name)\n line = _strip(line[len(name):])\n\n return final_names\n\n def __contains__(self, item):\n return item in self._values\n\n def _get_var_info(self):\n \"\"\"\n Return a set of variables.\n\n Returns\n -------\n set[Variable]\n \"\"\"\n\n raise NotImplementedError\n\n @classmethod\n def matches_file(cls, file):\n \"\"\"\n Checks whether a given file can be read by this class. Must be implemented in subclasses.\n\n Parameters\n ----------\n file: str\n The file to be read.\n\n Returns\n -------\n bool\n ``True`` if the file matches; ``False`` otherwise.\n \"\"\"\n\n raise NotImplementedError\n\n def _process_section_header(self, lines):\n \"\"\"\n\n Parameters\n ----------\n lines\n\n Returns\n -------\n tuple[str, list]\n \"\"\"\n\n raise NotImplementedError\n" ]
[ [ "numpy.cumsum", "numpy.full" ] ]
drib861204/Soft-Actor-Critic-and-Extensions
[ "3075df7430c1c49177b3798d753a9e3f6226672e" ]
[ "run_v2.py" ]
[ "import numpy as np\nimport random\nimport gym\n# import pybulletgym # to run e.g. HalfCheetahPyBullet-v0\n# import pybullet_envs # to run e.g. HalfCheetahBullet-v0 different reward function bullet-v0 starts ~ -1500. pybullet-v0 starts at 0\nfrom collections import deque\nimport torch\nimport time\nfrom torch.utils.tensorboard import SummaryWriter\nimport argparse\nfrom files import MultiPro\nfrom files.Agent import Agent\nimport json\nfrom Pendulum_v2 import * # added by Ben\nimport matplotlib.pyplot as plt\n\n\ndef timer(start, end):\n \"\"\" Helper to print training time \"\"\"\n hours, rem = divmod(end - start, 3600)\n minutes, seconds = divmod(rem, 60)\n print(\"\\nTraining Time: {:0>2}:{:0>2}:{:05.2f}\".format(int(hours), int(minutes), seconds))\n\n\ndef evaluate(frame, eval_runs=5, capture=False, rend=False, savedmodel=False, seed=0):\n \"\"\"\n Makes an evaluation run with the current episode\n \"\"\"\n\n reward_batch = []\n\n for i in range(eval_runs):\n\n state_action_log = np.zeros((1,4))\n #state_action_log = np.concatenate((state_action_log,[[1],[3]]),axis=1)\n #print(state_action_log)\n\n state = eval_env.reset(savedmodel, seed)\n rewards = 0\n rep = 0\n rep_max = 200\n if savedmodel:\n rep_max = 10000\n # action_v = 0\n\n while True:\n\n # print(\"eval\")\n # print(rend)\n\n if rend:\n # print(\"render\")\n # eval_env.render(mode=\"human\")\n eval_env.render(i + 1)\n\n action = agent.act(np.expand_dims(state, axis=0), eval=True)\n action = np.clip(action, action_low, action_high)\n state, reward, done, _ = eval_env.step(action[0])\n\n #print(np.asmatrix(state))\n #print(np.transpose(state))\n state_action = np.append(state, action[0])\n #print(state_action)\n state_action_log = np.concatenate((state_action_log,np.asmatrix(state_action)),axis=0)\n #print(state_action_log)\n #print(rep)\n #print(len(state_action_log))\n\n rewards += reward\n if done or rep >= rep_max:\n break\n rep += 1\n\n if savedmodel:\n #print(np.shape(state_action_log)[0])\n fig, axs = plt.subplots(4)\n fig.suptitle('SAC Transient Response')\n t = np.arange(0, eval_env.dt*np.shape(state_action_log)[0], eval_env.dt)\n axs[0].plot(t[1:], state_action_log[1:,0])\n axs[3].plot(t[1:], state_action_log[1:,1])\n axs[1].plot(t[1:], state_action_log[1:,2])\n axs[2].plot(t[1:], state_action_log[1:,3]*eval_env.max_torque)\n axs[0].set_ylabel('q1(rad)')\n axs[1].set_ylabel('q2 dot(rad/s)')\n axs[2].set_ylabel('torque(Nm)')\n axs[3].set_ylabel('q1 dot(rad/s)')\n axs[2].set_xlabel('time(s)')\n #axs[0].set_ylim([-0.01,0.06])\n #axs[0].set_ylim([-pi-0.5,pi+0.5])\n axs[1].set_ylim([-34,34])\n axs[2].set_ylim([-12,12])\n plt.show()\n\n print(\"e_ss=\",state_action_log[-1,0])\n print(\"u_ss=\",state_action_log[-1,3]*eval_env.max_torque)\n print(\"q1_min=\",min(state_action_log[1:,0]))\n print(\"q1_min_index=\",np.argmin(state_action_log[1:,0]))\n print(\"OS%=\",min(state_action_log[1:,0])/(eval_env.ang*pi/180))\n print(\"q1_a=\", eval_env.ang*pi/180 * 0.9)\n print(\"q1_b=\", eval_env.ang*pi/180 * 0.1)\n print(\"q1_c=\", eval_env.ang*pi/180 * 0.1)\n print(\"q1_d=\", -eval_env.ang*pi/180 * 0.1)\n min_a = 100\n min_b = 100\n min_c = 100\n min_d = 100\n t_a = 100\n t_b = 100\n t_c = 100\n t_d = 100\n for i in range(1,np.shape(state_action_log)[0]):\n tr_a = eval_env.ang*pi/180 * 0.9\n tr_b = eval_env.ang*pi/180 * 0.1\n tr_c = eval_env.ang*pi/180 * 0.1\n tr_d = -eval_env.ang*pi/180 * 0.1\n diff_a = abs(state_action_log[i,0] - tr_a)\n diff_b = abs(state_action_log[i,0] - tr_b)\n diff_c = abs(state_action_log[i,0] - tr_c)\n diff_d = abs(state_action_log[i,0] - tr_d)\n if diff_a < min_a:\n min_a = diff_a\n t_a = i * eval_env.dt\n if diff_b < min_b:\n min_b = diff_b\n t_b = i * eval_env.dt\n if diff_c < min_c:\n min_c = diff_c\n t_c = i * eval_env.dt\n if diff_d < min_d:\n min_d = diff_d\n t_d = i * eval_env.dt\n print(\"[min_a, t_a, min_b, t_b]=\",[min_a, t_a, min_b, t_b])\n print(\"rising time=\",t_b-t_a)\n print(\"[min_c, t_c, min_d, t_d]=\",[min_c, t_c, min_d, t_d])\n print(\"settling time=\",t_c,\"or\",t_d)\n\n\n reward_batch.append(rewards)\n if capture == False and savedmodel == False:\n writer.add_scalar(\"Reward\", np.mean(reward_batch), frame)\n\n\ndef run(args):\n rep_max = 200\n\n \"\"\"Deep Q-Learning.\n\n Params\n ======\n n_episodes (int): maximum number of training episodes\n max_t (int): maximum number of timesteps per episode\n eps_start (float): starting value of epsilon, for epsilon-greedy action selection\n eps_end (float): minimum value of epsilon\n eps_decay (float): multiplicative factor (per episode) for decreasing epsilon\n \"\"\"\n scores = [] # list containing scores from each episode\n scores_window = deque(maxlen=100) # last 100 scores\n i_episode = 1\n state = envs.reset(args.saved_model, args.seed)\n score = 0\n frames = args.frames // args.worker\n eval_every = args.eval_every // args.worker\n eval_runs = args.eval_runs\n worker = args.worker\n ERE = args.ere\n if ERE:\n episode_K = 0\n eta_0 = 0.996\n eta_T = 1.0\n # episodes = 0\n max_ep_len = 500 # original = 1000\n c_k_min = 2500 # original = 5000\n\n rep = 0\n for frame in range(1, frames + 1):\n # evaluation runs\n # print(\"run\")\n rep += 1\n\n\n if frame % eval_every == 0 or frame == 1:\n evaluate(frame * worker, eval_runs, rend=args.render_evals, seed=args.seed)\n\n\n action = agent.act(state)\n action = np.clip(action, action_low, action_high)\n next_state, reward, done, _ = envs.step(action) # returns np.stack(obs), np.stack(action) ...\n '''\n if frame > frames * 0.8:\n next_state, reward, done, _ = envs.step_q2dot(action)\n else:\n next_state, reward, done, _ = envs.step(action)\n '''\n # print(state, action, reward, next_state, done)\n # for s, a, r, ns, d in zip(state, action, reward, next_state, done):\n # agent.step(s, a, r, ns, d, frame, ERE)\n agent.step(state, action, reward, next_state, [done], frame, ERE)\n\n if ERE:\n eta_t = eta_0 + (eta_T - eta_0) * (frame / (frames + 1))\n episode_K += 1\n state = next_state\n score += np.mean(reward)\n\n # if done.any():\n if done or rep % rep_max == 0:\n if ERE:\n for k in range(1, episode_K):\n c_k = max(int(agent.memory.__len__() * eta_t ** (k * (max_ep_len / episode_K))), c_k_min)\n agent.ere_step(c_k)\n scores_window.append(score) # save most recent score\n scores.append(score) # save most recent score\n writer.add_scalar(\"Average100\", np.mean(scores_window), frame * worker)\n print('\\rEpisode {}\\tFrame: [{}/{}]\\t Reward: {:.2f} \\tAverage100 Score: {:.2f}'.format(i_episode * worker, frame * worker, frames, score, np.mean(scores_window)), end=\"\", flush=True)\n # if i_episode % 100 == 0:\n # print('\\rEpisode {}\\tFrame \\tReward: {}\\tAverage100 Score: {:.2f}'.format(i_episode*worker, frame*worker, round(eval_reward,2), np.mean(scores_window)), end=\"\", flush=True)\n i_episode += 1\n state = envs.reset(args.saved_model, args.seed)\n score = 0\n episode_K = 0\n\n\nparser = argparse.ArgumentParser(description=\"\")\nparser.add_argument(\"-env\", type=str, default=\"HalfCheetahBulletEnv-v0\",\n help=\"Environment name, default = HalfCheetahBulletEnv-v0\")\nparser.add_argument(\"-per\", type=int, default=0, choices=[0, 1],\n help=\"Adding Priorizied Experience Replay to the agent if set to 1, default = 0\")\nparser.add_argument(\"-munchausen\", type=int, default=0, choices=[0, 1],\n help=\"Adding Munchausen RL to the agent if set to 1, default = 0\")\nparser.add_argument(\"-dist\", \"--distributional\", type=int, default=0, choices=[0, 1],\n help=\"Using a distributional IQN Critic if set to 1, default=0\")\nparser.add_argument(\"-ere\", type=int, default=0, choices=[0, 1],\n help=\"Adding Emphasizing Recent Experience to the agent if set to 1, default = 0\")\nparser.add_argument(\"-n_step\", type=int, default=1, help=\"Using n-step bootstrapping, default=1\")\nparser.add_argument(\"-info\", type=str, default=\"rwip\", help=\"Information or name of the run\")\nparser.add_argument(\"-d2rl\", type=int, choices=[0, 1], default=0,\n help=\"Uses Deep Actor and Deep Critic Networks if set to 1 as described in the D2RL Paper: https://arxiv.org/pdf/2010.09163.pdf, default=0\")\nparser.add_argument(\"-frames\", type=int, default=50000,\n help=\"The amount of training interactions with the environment, default is 1mio\")\nparser.add_argument(\"-eval_every\", type=int, default=1000,\n help=\"Number of interactions after which the evaluation runs are performed, default = 1000\")\nparser.add_argument(\"-eval_runs\", type=int, default=3, help=\"Number of evaluation runs performed, default = 1\")\nparser.add_argument(\"-seed\", type=int, default=0, help=\"Seed for the env and torch network weights, default is 0\")\nparser.add_argument(\"--n_updates\", type=int, default=1,\n help=\"Update-to-Data (UTD) ratio, updates taken per step with the environment, default=1\")\nparser.add_argument(\"-lr_a\", type=float, default=3e-4,\n help=\"Actor learning rate of adapting the network weights, default is 3e-4\")\nparser.add_argument(\"-lr_c\", type=float, default=3e-4,\n help=\"Critic learning rate of adapting the network weights, default is 3e-4\")\nparser.add_argument(\"-a\", \"--alpha\", type=float,\n help=\"entropy alpha value, if not choosen the value is leaned by the agent\")\nparser.add_argument(\"-layer_size\", type=int, default=256,\n help=\"Number of nodes per neural network layer, default is 256\")\nparser.add_argument(\"-repm\", \"--replay_memory\", type=int, default=int(1e6),\n help=\"Size of the Replay memory, default is 1e6\")\nparser.add_argument(\"-bs\", \"--batch_size\", type=int, default=256, help=\"Batch size, default is 256\")\nparser.add_argument(\"-t\", \"--tau\", type=float, default=0.005, help=\"Softupdate factor tau, default is 0.005\")\nparser.add_argument(\"-g\", \"--gamma\", type=float, default=0.99, help=\"discount factor gamma, default is 0.99\")\nparser.add_argument(\"-s\", \"--saved_model\", type=str, default=None, help=\"Load a saved model to perform a test run!\")\nparser.add_argument(\"-w\", \"--worker\", type=int, default=1, help=\"Number of parallel worker, default = 1\")\nparser.add_argument(\"-r\", \"--render_evals\", type=int, default=0, choices=[0, 1],\n help=\"Rendering the evaluation runs if set to 1, default=0\")\nparser.add_argument(\"--trial\", type=int, default=0, help=\"trial\")\nargs = parser.parse_args()\n\nif __name__ == \"__main__\":\n\n if args.saved_model == None:\n writer = SummaryWriter(\"runs_v2/\" + args.info + str(args.trial))\n # envs = MultiPro.SubprocVecEnv([lambda: gym.make(args.env) for i in range(args.worker)])\n # eval_env = gym.make(args.env)\n\n envs = Pendulum(args.render_evals, args.seed)\n eval_env = Pendulum(args.render_evals, args.seed + 1)\n\n # envs.seed(args.seed)\n # eval_env.seed(args.seed+1)\n torch.manual_seed(args.seed)\n np.random.seed(args.seed)\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n print(\"Using device: {}\".format(device))\n\n action_high = eval_env.action_space.high[0]\n action_low = eval_env.action_space.low[0]\n state_size = eval_env.observation_space.shape[0]\n action_size = eval_env.action_space.shape[0]\n '''\n action_high = 1\n action_low = -1\n state_size = 3\n action_size = 1\n '''\n agent = Agent(state_size=state_size, action_size=action_size, args=args, device=device)\n\n t0 = time.time()\n if args.saved_model != None:\n agent.actor_local.load_state_dict(torch.load(args.saved_model, map_location=device))\n evaluate(frame=None, capture=False, rend=args.render_evals, savedmodel=True, seed=args.seed)\n else:\n run(args)\n t1 = time.time()\n timer(t0, t1)\n\n # save policy\n torch.save(agent.actor_local.state_dict(),\n 'runs_v2/{}{}/'.format(args.info, args.trial) + args.info + str(args.trial) + \".pth\")\n\n # save parameter\n with open('runs_v2/{}{}/'.format(args.info, args.trial) + args.info + str(args.trial) + \".json\", 'w') as f:\n json.dump(args.__dict__, f, indent=2)\n\n eval_env.close()\n if args.saved_model == None:\n writer.close()" ]
[ [ "numpy.expand_dims", "numpy.random.seed", "numpy.clip", "torch.load", "torch.manual_seed", "matplotlib.pyplot.subplots", "numpy.asmatrix", "numpy.append", "numpy.mean", "numpy.argmin", "torch.cuda.is_available", "numpy.shape", "matplotlib.pyplot.show", "numpy.zeros" ] ]
wildomonges/artificial-intelligence
[ "2f4d1d8b5ac8990abb89f69dc3096c7ddf9fe459" ]
[ "informed_and_uninformed_search/search.py" ]
[ "\"\"\"\n@autor Wildo Monges\nThis project is to apply the knowledge learned in the course \"Artificial Intelligence\" in edx.org\nI've implemented uninformed search BFS, DFS and informed search A* and IDA\nNote:\n The problem is called n-puzzle. You have an input state and using a search algorithm, the program\n should find the movements that needs to be done to have the goal state\n Python 3.5\n How?\n To run from command line: python search.py bfs 1,2,5,3,4,0,6,7,8\n Expected Output\n In the root of the project an output.txt is generated with the following information\n path_to_goal: ['Up', 'Left', 'Left']\n cost_of_path: 3\n nodes_expanded: 10\n fringe_size: 11\n max_fringe_size: 12\n search_depth: 3\n max_search_depth: 4\n running_time: 0.01719284\n\"\"\"\n\nimport sys\nfrom multiprocessing import Queue\nimport math\nimport numpy as np\nimport time\n\n\nclass State:\n \"\"\"\n This class represents a state of the board configuration\n \"\"\"\n def __init__(self, values):\n self.inherit_from_state = None\n self.values = values\n self.move = None\n self.depth = 0\n self.cost = 0\n self.gn = 1\n self.hn = 0\n self.use_heuristic = False\n\n def __eq__(self, other_state):\n \"\"\" Allow to compare if two numpy arrays have the same elements\"\"\"\n if (self.values == other_state.values).all():\n return True\n return False\n\n def __iter__(self):\n return self.values\n\n def __hash__(self):\n \"\"\"Return a hash\"\"\"\n return hash(tuple(self.values.reshape(self.values.size)))\n\n def hashcode(self):\n \"\"\"Return a unique hash value that represent self.values\"\"\"\n return str(hash(tuple(self.values.reshape(self.values.size))))\n\n def print_matrix(self):\n \"\"\"Function to print in a beautiful format the board in the console\"\"\"\n for row in self.values:\n print(row)\n print(\"\\n\")\n\n def get_moves(self):\n \"\"\"Return a list of movements that the algorithm did to resolve the game\"\"\"\n moves = []\n while self.inherit_from_state is not None:\n moves.append(self.move)\n self = self.inherit_from_state\n return moves\n\n def print_inheritance(self):\n \"\"\"Print the list of movements sort\"\"\"\n while self.inherit_from_state is not None:\n print(self.move)\n self.print_matrix()\n self = self.inherit_from_state\n\n def manhattan(self):\n \"\"\"This function return a heuristic value applying distance of Manhattan\"\"\"\n n = math.sqrt(self.values.size)\n board = self.values.reshape(self.values.size)\n goal = range(0, self.values.size)\n return sum(abs(b % n - g % n) + abs(b // n - g // n) for b, g in\n ((board[i], goal[i]) for i in range(0, self.values.size)))\n\n def neighbors(self):\n \"\"\"This function return a list of states that represent the neighbors\"\"\"\n states = []\n n = len(self.values)\n cor_row = -1\n cor_col = -1\n for x in range(n):\n for y in range(n):\n if self.values[x][y] == 0:\n cor_row = x\n cor_col = y\n break\n\n if cor_row != -1 and cor_col != - 1:\n break\n\n matrix = np.array(self.values[:])\n # if can move UP\n if cor_row > 0:\n element = matrix[cor_row - 1][cor_col]\n matrix[cor_row][cor_col] = element\n matrix[cor_row - 1][cor_col] = 0\n state = State(matrix)\n state.depth = self.depth + 1\n state.move = 'Up'\n # Just for A* and IDA\n if self.use_heuristic:\n state.cost = (self.gn + state.gn + state.manhattan())\n state.use_heuristic = True\n state.inherit_from_state = self\n states.append(state)\n\n matrix = np.array(self.values[:])\n # if can move DOWN\n if cor_row + 1 < n:\n element = matrix[cor_row + 1][cor_col]\n matrix[cor_row][cor_col] = element\n matrix[cor_row + 1][cor_col] = 0\n state = State(matrix)\n state.depth = self.depth + 1\n state.move = 'Down'\n # Just for A* and IDA\n if self.use_heuristic:\n state.cost = (self.gn + state.gn + state.manhattan())\n state.use_heuristic = True\n state.inherit_from_state = self\n states.append(state)\n\n matrix = np.array(self.values[:])\n # if can move LEFT\n if cor_col > 0:\n element = matrix[cor_row][cor_col - 1]\n matrix[cor_row][cor_col] = element\n matrix[cor_row][cor_col - 1] = 0\n state = State(matrix)\n state.depth = self.depth + 1\n state.move = 'Left'\n # Just for A* and IDA\n if self.use_heuristic:\n state.cost = (self.gn + state.gn + state.manhattan())\n state.use_heuristic = True\n state.inherit_from_state = self\n states.append(state)\n\n matrix = np.array(self.values[:])\n # if can move RIGHT\n if cor_col + 1 < n:\n element = matrix[cor_row][cor_col + 1]\n matrix[cor_row][cor_col] = element\n matrix[cor_row][cor_col + 1] = 0\n state = State(matrix)\n state.depth = self.depth + 1\n state.move = 'Right'\n # Just for A* and IDA\n if self.use_heuristic:\n state.cost = (self.gn + state.gn + state.manhattan())\n state.use_heuristic = True\n state.inherit_from_state = self\n states.append(state)\n\n return states\n\n\nclass Solver:\n \"\"\"This class call the algorithms to resolve the game\"\"\"\n def __init__(self, initial_state, goal_state, method):\n self.method = method\n self.initial_state = initial_state\n self.goal_state = goal_state\n\n def add_to_frontier(self, state, frontier, explored):\n try:\n frontier[state.hashcode()]\n return False\n except:\n pass\n\n try:\n explored[state.hashcode()]\n return False\n except:\n pass\n\n return True\n\n def bfs(self):\n \"\"\"BFS Algorithm: https://en.wikipedia.org/wiki/Breadth-first_search\"\"\"\n start_time = time.time()\n frontier = Queue()\n frontier.put(self.initial_state)\n frontier_dic = {self.initial_state.hashcode(): self.initial_state}\n explored = {}\n path_to_goal = []\n max_fringe_size = 1\n max_search_depth = 0\n info = {}\n while not frontier.empty():\n state = frontier.get()\n frontier_dic.pop(state.hashcode())\n explored[state.hashcode()] = state\n if state == self.goal_state:\n \"\"\"if the goal was found, then print to a file\"\"\"\n finished_time = time.time()\n path_to_goal = state.get_moves()\n info['path_to_goal'] = path_to_goal[::-1]\n info['cost_of_path'] = len(path_to_goal)\n info['nodes_expanded'] = len(explored) - 1\n info['fringe_size'] = frontier.qsize()\n info['max_fringe_size'] = max_fringe_size\n info['search_depth'] = len(path_to_goal)\n info['max_search_depth'] = state.depth + 1\n info['running_time'] = round((finished_time - start_time), 8)\n self.write_file(info)\n\n return True\n\n for n in state.neighbors():\n if self.add_to_frontier(n, frontier_dic, explored):\n frontier.put(n)\n frontier_dic[n.hashcode()] = n\n max_search_depth = max_search_depth + 1\n if frontier.qsize() > max_fringe_size:\n max_fringe_size = frontier.qsize()\n return False\n\n def dfs(self):\n \"\"\"DFS Algorithm: https://en.wikipedia.org/wiki/Depth-first_search\"\"\"\n start_time = time.time()\n frontier = []\n frontier.append(self.initial_state)\n frontier_dic = {self.initial_state.hashcode(): self.initial_state}\n explored = {}\n path_to_goal = []\n max_fringe_size = 1\n max_search_depth = 1\n info = {}\n while frontier != []:\n state = frontier.pop()\n frontier_dic.pop(state.hashcode())\n explored[state.hashcode()] = state\n if state == self.goal_state:\n finished_time = time.time()\n path_to_goal = state.get_moves()\n info['path_to_goal'] = path_to_goal[::-1]\n info['cost_of_path'] = len(path_to_goal)\n info['nodes_expanded'] = len(explored) - 1\n info['fringe_size'] = len(frontier)\n info['max_fringe_size'] = max_fringe_size\n info['search_depth'] = len(path_to_goal)\n info['max_search_depth'] = max_search_depth\n info['running_time'] = round((finished_time - start_time), 8)\n self.write_file(info)\n\n return True\n for n in reversed(state.neighbors()):\n if self.add_to_frontier(n, frontier_dic, explored):\n frontier.append(n)\n frontier_dic[n.hashcode()] = n\n if n.depth > max_search_depth:\n max_search_depth = n.depth\n\n if len(frontier) > max_fringe_size:\n max_fringe_size = len(frontier)\n return False\n\n def a_start(self):\n \"\"\"A* Algorithm: https://en.wikipedia.org/wiki/A*_search_algorithm\"\"\"\n print(\"A*\")\n start_time = time.time()\n frontier = Queue()\n self.initial_state.use_heuristic = True\n self.initial_state.cost = int(self.initial_state.gn + self.initial_state.manhattan())\n\n frontier.put({self.initial_state.cost: self.initial_state})\n frontier_dic = {self.initial_state.hashcode(): self.initial_state}\n\n explored = {}\n path_to_goal = []\n max_fringe_size = 1\n max_search_depth = 1\n info = {}\n while not frontier.empty():\n hash_priority_state = frontier.get()\n state = None\n for key, value in hash_priority_state.items():\n state = value\n print(key)\n print(state)\n\n frontier_dic.pop(state.hashcode())\n explored[state.hashcode()] = state\n if state == self.goal_state:\n finished_time = time.time()\n path_to_goal = state.get_moves()\n\n info['path_to_goal'] = path_to_goal[::-1]\n info['cost_of_path'] = len(path_to_goal)\n info['nodes_expanded'] = len(explored) - 1\n info['fringe_size'] = frontier.qsize()\n info['max_fringe_size'] = max_fringe_size\n info['search_depth'] = len(path_to_goal)\n info['max_search_depth'] = max_search_depth\n info['running_time'] = round((finished_time - start_time), 8)\n self.write_file(info)\n\n return True\n\n for n in state.neighbors():\n if self.add_to_frontier(n, frontier_dic, explored):\n frontier.put({int(n.cost): n})\n frontier_dic[n.hashcode()] = n\n\n if n.depth > max_search_depth:\n max_search_depth = n.depth\n\n if frontier.qsize() > max_fringe_size:\n max_fringe_size = frontier.qsize()\n\n return False\n\n def ida_start_search(self, state, g, maxh, level, explored):\n \"\"\"This functions heps to IDA* to search \"\"\"\n explored[state.hashcode()] = state\n result = {}\n f = state.manhattan() + g\n\n if f > maxh:\n result = {'type': 2, 'result1': f, 'state': state, 'explored': explored, 'level': level}\n return result\n\n if state.manhattan() == 0:\n result = {'type': 1, 'result1': f, 'state': state, 'explored': explored, 'level': level}\n return result\n\n minh = sys.float_info.max\n for s in state.neighbors():\n result1 = self.ida_start_search(s, g + s.manhattan(), maxh, level + 1, explored)\n if result1['type'] == 1:\n return result1\n elif result1['type'] == 2:\n new_minh = result1['result1']\n if new_minh < minh:\n minh = new_minh\n elif result1['type'] == 3:\n break\n\n result = {'type': 2, 'result1': minh, 'state': None, 'explored': {}, 'level': level}\n return result\n\n def ida_start(self):\n \"\"\"IDA* Algorithm: https://en.wikipedia.org/wiki/Iterative_deepening_A*\"\"\"\n start_time = time.time()\n state = self.initial_state\n state.use_heuristic = True\n maxh = state.manhattan()\n explored = {}\n info = {}\n while True:\n result = self.ida_start_search(state, 0, maxh, 0, explored)\n if result['type'] == 1:\n finished_time = time.time()\n state = result['state']\n path_to_goal = state.get_moves()\n\n info['path_to_goal'] = path_to_goal[::-1]\n info['cost_of_path'] = len(path_to_goal)\n info['nodes_expanded'] = len(result['explored']) - 1\n info['fringe_size'] = len(result['explored']) - 1\n info['max_fringe_size'] = len(result['explored'])\n info['search_depth'] = len(path_to_goal)\n info['max_search_depth'] = result['level']\n info['running_time'] = round((finished_time - start_time), 8)\n self.write_file(info)\n return True\n\n elif result['type'] == 2:\n minh = result['result1']\n if minh == sys.float_info.max:\n return {'type': 3}\n maxh = result['result1']\n\n def write_file(self, info):\n \"\"\"Function that write the result to output.txt\"\"\"\n f = open('output.txt', 'w+')\n f.write('path_to_goal: ' + str(info['path_to_goal']) + '\\n')\n f.write('cost_of_path: ' + str(info['cost_of_path']) + '\\n')\n f.write('nodes_expanded: ' + str(info['nodes_expanded']) + '\\n')\n f.write('fringe_size: ' + str(info['fringe_size']) + '\\n')\n f.write('max_fringe_size: ' + str(info['max_fringe_size']) + '\\n')\n f.write('search_depth: ' + str(info['search_depth']) + '\\n')\n f.write('max_search_depth: ' + str(info['max_search_depth']) + '\\n')\n f.write('running_time: ' + str(info['running_time']) + '\\n')\n f.close()\n\n def solve(self):\n if self.method == 'bfs':\n self.bfs()\n elif self.method == 'dfs':\n self.dfs()\n elif self.method == 'ast':\n self.a_start()\n elif self.method == 'ida':\n self.ida_start()\n\n\ndef main(argv):\n method = argv[1]\n values = argv[2].split(',')\n nro_col = int(math.sqrt(len(values)))\n\n aux = np.array(values).astype(int)\n init_configuration = np.reshape(aux, (-1, nro_col))\n print(init_configuration)\n\n initial_state = State(init_configuration)\n goal_state = State(np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]]))\n\n solver = Solver(initial_state, goal_state, method)\n solver.solve()\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n" ]
[ [ "numpy.reshape", "numpy.array" ] ]
toshohirasawa/mst
[ "2a6ebf13b1ba3b878064038ffaa0728fc6673852" ]
[ "nmtpytorch/layers/encoders/bilstmp.py" ]
[ "# -*- coding: utf-8 -*-\nimport logging\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom ..ff import FF\n\nlogger = logging.getLogger('nmtpytorch')\n\n\nclass BiLSTMp(nn.Module):\n \"\"\"A bidirectional LSTM encoder for speech features. A batch should\n only contain samples that have the same sequence length.\n\n Arguments:\n input_size (int): Input feature dimensionality.\n hidden_size (int): LSTM hidden state dimensionality.\n proj_size (int): Projection layer size.\n proj_activ (str, optional): Non-linearity to apply to intermediate projection\n layers. (Default: 'tanh')\n layers (str): A '_' separated list of integers that defines the subsampling\n factor for each LSTM.\n dropout (float, optional): Use dropout (Default: 0.)\n Input:\n x (Tensor): A tensor of shape (n_timesteps, n_samples, n_feats)\n that includes acoustic features of dimension ``n_feats`` per\n each timestep (in the first dimension).\n\n Output:\n hs (Tensor): A tensor of shape (n_timesteps, n_samples, hidden * 2)\n that contains encoder hidden states for all timesteps.\n mask (Tensor): `None` since this layer expects all equal frame inputs.\n \"\"\"\n def __init__(self, input_size, hidden_size, proj_size, layers,\n proj_activ='tanh', dropout=0):\n super().__init__()\n\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.proj_size = proj_size\n self.proj_activ = proj_activ\n self.layers = [int(i) for i in layers.split('_')]\n self.dropout = dropout\n self.n_layers = len(self.layers)\n\n # Doubles its size because of concatenation of forw-backw encs\n self.ctx_size = self.hidden_size\n\n # Fill 0-vector as <eos> to the end of the frames\n self.pad_tuple = (0, 0, 0, 0, 0, 1)\n\n # Projections and LSTMs\n self.ffs = nn.ModuleList()\n self.lstms = nn.ModuleList()\n\n if self.dropout > 0:\n self.do = nn.Dropout(self.dropout)\n\n for i, ss_factor in enumerate(self.layers):\n # Add LSTMs\n self.lstms.append(nn.LSTM(\n self.input_size if i == 0 else self.hidden_size,\n self.hidden_size, bidirectional=False))\n # Add non-linear bottlenecks\n self.ffs.append(FF(\n self.ctx_size, self.proj_size, activ=self.proj_activ))\n\n def forward(self, x, **kwargs):\n # Generate a mask to detect padded sequences\n mask = x.ne(0).float().sum(2).ne(0).float()\n\n if mask.eq(0).nonzero().numel() > 0:\n logger.info(\"WARNING: Non-homogeneous batch in BiLSTMp.\")\n\n # Pad with <eos> zero\n hs = F.pad(x, self.pad_tuple)\n\n for (ss_factor, f_lstm, f_ff) in zip(self.layers, self.lstms, self.ffs):\n if ss_factor > 1:\n # Skip states\n hs = f_ff(f_lstm(hs[::ss_factor])[0])\n else:\n hs = f_ff(f_lstm(hs)[0])\n\n if self.dropout > 0:\n hs = self.do(hs)\n\n # No mask is returned as batch should contain same-length sequences\n return hs, None\n" ]
[ [ "torch.nn.Dropout", "torch.nn.ModuleList", "torch.nn.functional.pad", "torch.nn.LSTM" ] ]
crkaushik93/autonomous-system-integration---ROS-Autonomous-system
[ "226f992e4d94e790aa8deec13cbb64c377a78188" ]
[ "ros/src/waypoint_updater/waypoint_updater.py" ]
[ "#!/usr/bin/env python\n\nimport math\nimport numpy as np\n\nimport rospy\nfrom geometry_msgs.msg import PoseStamped\nfrom scipy.spatial import KDTree\nfrom std_msgs.msg import Int32\nfrom styx_msgs.msg import Lane, Waypoint\n\n'''\nThis node will publish waypoints from the car's current position to some `x` distance ahead.\nAs mentioned in the doc, you should ideally first implement a version which does not care\nabout traffic lights or obstacles.\nOnce you have created dbw_node, you will update this node to use the status of traffic lights too.\nPlease note that our simulator also provides the exact location of traffic lights and their\ncurrent status in `/vehicle/traffic_lights` message. You can use this message to build this node\nas well as to verify your TL classifier.\nTODO (for Yousuf and Aaron): Stopline location for each traffic light.\n'''\n\nLOOKAHEAD_WPS = 50 # Number of waypoints we will publish.\nCONSTANT_DECEL = 1 / LOOKAHEAD_WPS # Deceleration constant for smoother braking\nPUBLISHING_RATE = 20 # Rate (Hz) of waypoint publishing\nSTOP_LINE_MARGIN = 4 # Distance in waypoints to pad in front of the stop line\nMAX_DECEL = 0.5\nLOGGING_THROTTLE_FACTOR = PUBLISHING_RATE * 2 # Only log at this rate (1 / Hz)\n\n\nclass WaypointUpdater(object):\n def __init__(self):\n rospy.init_node('waypoint_updater')\n\n self.base_lane = None\n self.pose = None\n self.stopline_wp_idx = -1\n self.waypoints_2d = None\n self.waypoint_tree = None\n self.decelerate_count = 0\n\n rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb, queue_size=2)\n rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb, queue_size=8)\n rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)\n\n self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)\n\n self.loop()\n\n def loop(self):\n rate = rospy.Rate(PUBLISHING_RATE)\n while not rospy.is_shutdown():\n if self.pose and self.base_lane:\n self.publish_waypoints()\n rate.sleep()\n\n def get_closest_waypoint_idx(self):\n x = self.pose.pose.position.x\n y = self.pose.pose.position.y\n closest_idx = self.waypoint_tree.query([x, y], 1)[1]\n\n # Check if closest is ahead or behind vehicle\n closest_coord = self.waypoints_2d[closest_idx]\n prev_coord = self.waypoints_2d[closest_idx - 1]\n\n # Equation for hyperplane through closest_coords\n cl_vect = np.array(closest_coord)\n prev_vect = np.array(prev_coord)\n pos_vect = np.array([x, y])\n\n val = np.dot(cl_vect - prev_vect, pos_vect - cl_vect)\n\n if val > 0:\n closest_idx = (closest_idx + 1) % len(self.waypoints_2d)\n # rospy.logwarn(\"closest_idx={}\".format(closest_idx))\n return closest_idx\n\n def publish_waypoints(self):\n final_lane = self.generate_lane()\n self.final_waypoints_pub.publish(final_lane)\n\n def generate_lane(self):\n lane = Lane()\n\n closest_idx = self.get_closest_waypoint_idx()\n farthest_idx = closest_idx + LOOKAHEAD_WPS\n base_waypoints = self.base_lane.waypoints[closest_idx:farthest_idx]\n\n if (self.stopline_wp_idx == -1) or (self.stopline_wp_idx >= farthest_idx):\n lane.waypoints = base_waypoints\n else:\n lane.waypoints = self.decelerate_waypoints(base_waypoints, closest_idx)\n\n return lane\n\n def decelerate_waypoints(self, waypoints, closest_idx):\n temp = []\n for i, wp in enumerate(waypoints):\n\n p = Waypoint()\n p.pose = wp.pose\n\n # Distance includes a number of waypoints back so front of car stops at line\n stop_idx = max(self.stopline_wp_idx - closest_idx - STOP_LINE_MARGIN, 0)\n dist = self.distance(waypoints, i, stop_idx)\n vel = math.sqrt(2 * MAX_DECEL * dist) + (i * CONSTANT_DECEL)\n if vel < 1.0:\n vel = 0.0\n\n p.twist.twist.linear.x = min(vel, wp.twist.twist.linear.x)\n temp.append(p)\n\n self.decelerate_count += 1\n if (self.decelerate_count % LOGGING_THROTTLE_FACTOR) == 0:\n size = len(waypoints) - 1\n vel_start = temp[0].twist.twist.linear.x\n vel_end = temp[size].twist.twist.linear.x\n rospy.logwarn(\"DECEL: vel[0]={:.2f}, vel[{}]={:.2f}\".format(vel_start, size, vel_end))\n return temp\n\n def pose_cb(self, msg):\n self.pose = msg\n\n def waypoints_cb(self, waypoints):\n self.base_lane = waypoints\n if not self.waypoints_2d:\n self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in\n waypoints.waypoints]\n self.waypoint_tree = KDTree(self.waypoints_2d)\n\n def traffic_cb(self, msg):\n if self.stopline_wp_idx != msg.data:\n rospy.logwarn(\n \"LIGHT: new stopline_wp_idx={}, old stopline_wp_idx={}\".format(msg.data, self.stopline_wp_idx))\n self.stopline_wp_idx = msg.data\n\n def obstacle_cb(self, msg):\n # TODO: Callback for /obstacle_waypoint message. We will implement it later\n pass\n\n def get_waypoint_velocity(self, waypoint):\n return waypoint.twist.twist.linear.x\n\n def set_waypoint_velocity(self, waypoints, waypoint, velocity):\n waypoints[waypoint].twist.twist.linear.x = velocity\n\n def distance(self, waypoints, wp1, wp2):\n dist = 0\n dl = lambda a, b: math.sqrt(pow((a.x - b.x), 2) + pow((a.y - b.y), 2) + pow((a.z - b.z), 2))\n for i in range(wp1, wp2 + 1):\n dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)\n wp1 = i\n return dist\n\n def current_velocity_cb(self, msg):\n self.current_velocity = msg.twist.linear.x\n\n\nif __name__ == '__main__':\n try:\n WaypointUpdater()\n except rospy.ROSInterruptException:\n rospy.logerr('Could not start waypoint updater node.')\n" ]
[ [ "numpy.dot", "numpy.array", "scipy.spatial.KDTree" ] ]
vantuan5644/ReceiptOCR
[ "5e167b55a73c935622ecdfb05f502855d8d44004" ]
[ "labelText.py" ]
[ "import cv2\r\nimport os\r\nimport pandas as pd\r\nfrom shutil import copyfile\r\n\r\ns1 = u'ÀÁÂÃÈÉÊÌÍÒÓÔÕÙÚÝàáâãèéêìíòóôõùúýĂăĐđĨĩŨũƠơƯưẠạẢảẤấẦầẨẩẪẫẬậẮắẰằẲẳẴẵẶặẸẹẺẻẼẽẾếỀềỂểỄễỆệỈỉỊịỌọỎỏỐốỒồỔổỖỗỘộỚớỜờỞởỠỡỢợỤụỦủỨứỪừỬửỮữỰựỲỳỴỵỶỷỸỹ'\r\ns0 = u'AAAAEEEIIOOOOUUYaaaaeeeiioooouuyAaDdIiUuOoUuAaAaAaAaAaAaAaAaAaAaAaAaEeEeEeEeEeEeEeEeIiIiOoOoOoOoOoOoOoOoOoOoOoOoUuUuUuUuUuUuUuYyYyYyYy'\r\ndef remove_accents(input_str):\r\n\ts = ''\r\n\tfor c in input_str:\r\n\t\tif c in s1:\r\n\t\t\ts += s0[s1.index(c)]\r\n\t\telse:\r\n\t\t\ts += c\r\n\treturn s\r\n\r\n\r\n\r\nbill_code_path = 'transformed/bill_code'\r\nmarket_name_path = 'transformed/market_name'\r\ndate_path = 'transformed/date'\r\nfile_name = 'Template_OCR BILL COOP 2_251220.xlsx'\r\nfile_name_25 = 'ORC_TEMPLATE01_total.xlsx'\r\npath_grouth_truth = 'transformed/images'\r\n\r\ndf = pd.read_excel(file_name, sheet_name='FULL',skiprows=1)\r\ndf = df.iloc[:,1:5]\r\ndf.dropna(inplace=True)\r\n\r\ndf_25 = pd.read_excel(file_name_25, sheet_name='Template', skiprows=1,dtype=str)\r\ndf_25 = df_25.iloc[:,1:5]\r\ndf_25[\"Date\"] = pd.to_datetime(df_25[\"Date\"])\r\ndf_25.dropna(inplace=True)\r\n\r\nimagesName = []\r\nimagesName_25 = []\r\nfor image in os.listdir(market_name_path):\r\n condition_1 = 'txt' not in image and '_25_12' not in image[:-11]\r\n condition_2 = 'txt' not in image and '_25_12' in image[:-11]\r\n if condition_1:\r\n imagesName.append(image)\r\n if condition_2:\r\n imagesName_25.append(image)\r\nprint(imagesName)\r\nprint(imagesName_25)\r\ni = 1\r\nt = 1\r\nname_old = df.iloc[0,0]\r\nfor index, row in df.iterrows():\r\n market_name = row['Market Name']\r\n image_name = f'{str(row[\"Image Name\"]).lower()}_padded.jpg'\r\n date = f'Ngay: {row[\"Date\"].day}/{row[\"Date\"].month}/{row[\"Date\"].year}'\r\n bill_code = f'So HD: {row[\"Bill Code\"]}'\r\n if image_name != name_old:\r\n if image_name in imagesName:\r\n with open(os.path.join(path_grouth_truth,f'{image_name[:-4]}_market_name_{i}.gt.txt'),'w+') as f:\r\n src = os.path.join(market_name_path,image_name)\r\n dst = os.path.join(path_grouth_truth,f'{image_name[:-4]}_market_name_{i}.jpg')\r\n try:\r\n # do something\r\n copyfile(src, dst)\r\n market_name = remove_accents(market_name)\r\n f.write(market_name)\r\n except FileNotFoundError:\r\n # handle ValueError exception\r\n pass\r\n with open(os.path.join(path_grouth_truth,f'{image_name[:-4]}_bill_code_{i}.gt.txt'),'w+') as f:\r\n src = os.path.join(bill_code_path,image_name)\r\n dst = os.path.join(path_grouth_truth,f'{image_name[:-4]}_bill_code_{i}.jpg')\r\n try:\r\n # do something\r\n copyfile(src, dst)\r\n f.write(bill_code)\r\n except FileNotFoundError:\r\n # handle ValueError exception\r\n pass\r\n\r\n with open(os.path.join(path_grouth_truth,f'{image_name[:-4]}_date_{i}.gt.txt'),'w+') as f:\r\n src = os.path.join(date_path,image_name)\r\n dst = os.path.join(path_grouth_truth,f'{image_name[:-4]}_date_{i}.jpg')\r\n try:\r\n # do something\r\n copyfile(src, dst)\r\n f.write(date)\r\n except FileNotFoundError:\r\n # handle ValueError exception\r\n pass\r\n\r\n i+=1\r\n name_old = image_name\r\n \r\n\r\nname_old = df_25.iloc[0,0]\r\nfor index, row in df_25.iterrows():\r\n market_name = row['Market Name']\r\n image_name = f'{str(row[\"Image Name\"]).lower()}_25_12_padded.jpg'\r\n date = f'Ngay: {row[\"Date\"].month}/{row[\"Date\"].day}/{row[\"Date\"].year}'\r\n bill_code = f'So HD: {row[\"Bill Code\"]}'\r\n if image_name != name_old:\r\n if image_name in imagesName_25:\r\n with open(os.path.join(path_grouth_truth,f'{image_name[:-4]}_market_name_{i}.gt.txt'),'w+') as f:\r\n src = os.path.join(market_name_path,image_name)\r\n dst = os.path.join(path_grouth_truth,f'{image_name[:-4]}_market_name_{i}.jpg')\r\n \r\n copyfile(src, dst)\r\n market_name = remove_accents(market_name)\r\n f.write(market_name)\r\n with open(os.path.join(path_grouth_truth,f'{image_name[:-4]}_bill_code_{i}.gt.txt'),'w+') as f:\r\n src = os.path.join(bill_code_path,image_name)\r\n dst = os.path.join(path_grouth_truth,f'{image_name[:-4]}_bill_code_{i}.jpg')\r\n copyfile(src, dst)\r\n f.write(bill_code)\r\n with open(os.path.join(path_grouth_truth,f'{image_name[:-4]}_date_{i}.gt.txt'),'w+') as f:\r\n src = os.path.join(date_path,image_name)\r\n dst = os.path.join(path_grouth_truth,f'{image_name[:-4]}_date_{i}.jpg')\r\n copyfile(src, dst)\r\n f.write(date)\r\n i+=1\r\n name_old = image_name\r\n\r\n# for index, row in df.iterrows():\r\n# market_name = row['Market Name']\r\n# image_name = f'{str(row[\"Image Name\"]).lower()}_padded.jpg'\r\n# date = f'Ngay: {row[\"Date\"].day}/{row[\"Date\"].month}/{row[\"Date\"].year}'\r\n# bill_code = f'So HD: {row[\"Bill Code\"]}'\r\n# if image_name in imagesName:\r\n# with open(os.path.join(path_grouth_truth,f'{image_name[:-4]}.gt.txt'),'w+') as f:\r\n# market_name = remove_accents(market_name)\r\n# f.write(market_name)\r\n# with open(os.path.join(bill_code_path,f'{image_name[:-4]}.gt.txt'),'w+') as f:\r\n# f.write(bill_code)\r\n# with open(os.path.join(date_path,f'{image_name[:-4]}.gt.txt'),'w+') as f:\r\n# f.write(date)\r\n\r\n# for index, row in df_25.iterrows():\r\n# market_name = row['Market Name']\r\n# image_name = f'{str(row[\"Image Name\"]).lower()}_25_12_padded.jpg'\r\n# date = f'Ngay: {row[\"Date\"].day}/{row[\"Date\"].month}/{row[\"Date\"].year}'\r\n# bill_code = f'So HD: {row[\"Bill Code\"]}'\r\n# if image_name in imagesName_25:\r\n# with open(os.path.join(market_name_path,f'{image_name[:-4]}.gt.txt'),'w+') as f:\r\n# market_name = remove_accents(market_name)\r\n# f.write(market_name)\r\n# with open(os.path.join(bill_code_path,f'{image_name[:-4]}.gt.txt'),'w+') as f:\r\n# f.write(bill_code)\r\n# with open(os.path.join(date_path,f'{image_name[:-4]}.gt.txt'),'w+') as f:\r\n# f.write(date)\r\n\r\n" ]
[ [ "pandas.read_excel", "pandas.to_datetime" ] ]
salimelawad/recession_predictor
[ "df7c94999125fe2c573d9bbfea3ff80d0b36024b" ]
[ "main.py" ]
[ "# -*- coding: utf-8 -*-\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier\r\nfrom sklearn.ensemble import RandomForestRegressor, RandomForestClassifier\r\nfrom sklearn.neural_network import MLPRegressor, MLPClassifier\r\nfrom seaborn import lineplot\r\nfrom sklearn.metrics import mean_absolute_error as mae\r\nfrom sklearn.metrics import mean_squared_error as mse\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.dates as mdates\r\n\r\n\r\n### READ DATA INTO ONE DATAFRAME\r\n\r\na = pd.read_csv(\"T1YFFM.csv\", index_col=\"DATE\")\r\nb = pd.read_csv(\"T5YFFM.csv\", index_col=\"DATE\")\r\nc = pd.read_csv(\"T10YFFM.csv\", index_col=\"DATE\")\r\nd = pd.read_csv(\"TB3SMFFM.csv\", index_col=\"DATE\")\r\n\r\nr = pd.read_csv(\"USREC.csv\", index_col=\"DATE\")\r\n\r\ndata = pd.concat([a, b, c, d, r], axis=1)\r\ndata.index = pd.to_datetime(data.index)\r\n\r\ndata[\"T1YFFM_1\"] = data[\"T1YFFM\"].shift(1)\r\ndata[\"T5YFFM_1\"] = data[\"T5YFFM\"].shift(1)\r\ndata[\"T10YFFM_1\"] = data[\"T10YFFM\"].shift(1)\r\ndata[\"TB3SMFFM_1\"] = data[\"TB3SMFFM\"].shift(1)\r\n\r\ndata[\"T1YFFM_2\"] = data[\"T1YFFM\"].shift(2)\r\ndata[\"T5YFFM_2\"] = data[\"T5YFFM\"].shift(2)\r\ndata[\"T10YFFM_2\"] = data[\"T10YFFM\"].shift(2)\r\ndata[\"TB3SMFFM_2\"] = data[\"TB3SMFFM\"].shift(2)\r\n\r\n# Create all target variables\r\nfor i in range(18):\r\n data[\"future_rec_6m_{}\".format(str(i))] = (\r\n data[\"USREC\"]\r\n .shift(-1 * i)\r\n .iloc[::-1]\r\n .rolling(6, min_periods=0)\r\n .sum()\r\n .iloc[::-1]\r\n )\r\n\r\ndata = data.fillna(0)\r\n\r\n\r\ndef bad_model_test(data, date_filter, feature_cols, target_col, graph=False):\r\n # filter\r\n data_filtered = data[data.index < date_filter].copy(deep=True)\r\n\r\n features = data_filtered[feature_cols].copy(deep=True)\r\n target = data_filtered[[target_col]].copy(deep=True)\r\n\r\n # Scale features\r\n scaler_feature = MinMaxScaler()\r\n scaler_feature.fit(features)\r\n scaled_features = scaler_feature.transform(features)\r\n\r\n # Scale target\r\n scaler_target = MinMaxScaler()\r\n scaler_target.fit(target)\r\n scaled_target = np.ravel(scaler_target.transform(target))\r\n\r\n regr = MLPRegressor(hidden_layer_sizes=(10, 10, 10))\r\n model = regr.fit(scaled_features, scaled_target)\r\n predictions = model.predict(scaled_features)\r\n print(mse(predictions, scaled_target))\r\n if graph:\r\n # graph_results(data_filtered[['preds', target_col]])\r\n graph_results(\r\n data_filtered.index,\r\n np.ravel(scaler_target.inverse_transform([predictions])),\r\n np.ravel(scaler_target.inverse_transform([scaled_target])),\r\n )\r\n\r\n\r\ndef graph_results(index, predictions, scaled_target):\r\n fig, ax = plt.subplots(figsize=(25, 12))\r\n # myFmt = mdates.DateFormatter(\"%y-%m\")\r\n # ax.xaxis.set_major_formatter(myFmt)\r\n ax.xaxis.set_major_locator(mdates.MonthLocator(interval=12))\r\n ax.set_title(\"Preds\", size=30)\r\n # wide_df = data[['preds', target_col]]\r\n wide_df = pd.DataFrame(index=index)\r\n wide_df[\"predictions\"] = predictions\r\n wide_df[\"target\"] = scaled_target\r\n ax = lineplot(data=wide_df)\r\n plt.xlabel(\"Year\", size=20)\r\n plt.ylabel(\"# of future months in recession\", size=20)\r\n plt.xticks(rotation=45)\r\n plt.grid(which=\"major\")\r\n\r\n\r\n##test all months\r\n# for i in range(18):\r\n# print('month {}: '.format(i), end='')\r\n# bad_model_test(data,\r\n# '2016-01-01',\r\n# ['TB3SMFFM', 'T1YFFM', 'T5YFFM', 'T10YFFM',\r\n# 'TB3SMFFM_1', 'T1YFFM_1', 'T5YFFM_1', 'T10YFFM_1',\r\n# 'TB3SMFFM_2', 'T1YFFM_2', 'T5YFFM_2', 'T10YFFM_2'],\r\n# 'future_rec_6m_{}'.format(str(i))\r\n# )\r\n#\r\n\r\nbad_model_test(\r\n data,\r\n \"2020-01-01\",\r\n [\r\n \"TB3SMFFM\",\r\n \"T1YFFM\",\r\n \"T5YFFM\",\r\n \"T10YFFM\",\r\n \"TB3SMFFM_1\",\r\n \"T1YFFM_1\",\r\n \"T5YFFM_1\",\r\n \"T10YFFM_1\",\r\n \"TB3SMFFM_2\",\r\n \"T1YFFM_2\",\r\n \"T5YFFM_2\",\r\n \"T10YFFM_2\",\r\n ],\r\n \"future_rec_6m_8\",\r\n graph=True,\r\n)\r\n" ]
[ [ "pandas.concat", "pandas.read_csv", "pandas.to_datetime", "matplotlib.pyplot.subplots", "pandas.DataFrame", "sklearn.metrics.mean_squared_error", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks", "matplotlib.dates.MonthLocator", "sklearn.preprocessing.MinMaxScaler", "sklearn.neural_network.MLPRegressor" ] ]
jamesobutler/ITK
[ "ddbaeae13c1711fcb3010b6e81b108cb10677875" ]
[ "Wrapping/Generators/Python/itkExtras.py" ]
[ "#==========================================================================\n#\n# Copyright Insight Software Consortium\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0.txt\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#==========================================================================*/\n\nfrom __future__ import print_function\nimport re\n\n# The following line defines an ascii string used for dynamically refreshing\n# the import and progress callbacks on the same terminal line.\n# See http://www.termsys.demon.co.uk/vtansi.htm\n# \\033 is the C-style octal code for an escape character\n# [2000D moves the cursor back 2000 columns, this is a brute force way of\n# getting to the start of the line.\n# [K erases the end of the line\nclrLine = \"\\033[2000D\\033[K\"\n\n\ndef auto_not_in_place(v=True):\n \"\"\"Force it to not run in place\n \"\"\"\n import itkConfig\n itkConfig.NotInPlace = v\n\n\ndef auto_progress(progress_type=1):\n \"\"\"Set up auto progress report\n\n progress_type:\n 1 or True -> auto progress be used in a terminal\n 2 -> simple auto progress (without special characters)\n 0 or False -> disable auto progress\n \"\"\"\n import itkConfig\n\n if progress_type is True or progress_type == 1:\n itkConfig.ImportCallback = terminal_import_callback\n itkConfig.ProgressCallback = terminal_progress_callback\n\n elif progress_type == 2:\n itkConfig.ImportCallback = simple_import_callback\n itkConfig.ProgressCallback = simple_progress_callback\n\n elif progress_type is False or progress_type == 0:\n itkConfig.ImportCallback = None\n itkConfig.ProgressCallback = None\n\n else:\n raise ValueError(\"Invalid auto progress type: \" + repr(progress_type))\n\n\ndef terminal_progress_callback(name, p):\n \"\"\"Display the progress of an object and clean the display once complete\n\n This function can be used with itkConfig.ProgressCallback\n \"\"\"\n import sys\n print(clrLine + \"%s: %f\" % (name, p), file=sys.stderr, end=\"\")\n if p == 1:\n print(clrLine, file=sys.stderr, end=\"\")\n\n\ndef terminal_import_callback(name, p):\n \"\"\"Display the loading of a module and clean the display once complete\n\n This function can be used with itkConfig.ImportCallback\n \"\"\"\n import sys\n print(clrLine + \"Loading %s... \" % name, file=sys.stderr, end=\"\")\n if p == 1:\n print(clrLine, file=sys.stderr, end=\"\")\n\n\ndef simple_import_callback(name, p):\n \"\"\"Print a message when a module is loading\n\n This function can be used with itkConfig.ImportCallback\n \"\"\"\n import sys\n if p == 0:\n print(\"Loading %s... \" % name, file=sys.stderr, end=\"\")\n elif p == 1:\n print(\"done\", file=sys.stderr)\n\n\ndef simple_progress_callback(name, p):\n \"\"\"Print a message when an object is running\n\n This function can be used with itkConfig.ProgressCallback\n \"\"\"\n import sys\n if p == 0:\n print(\"Running %s... \" % name, file=sys.stderr, end=\"\")\n elif p == 1:\n print(\"done\", file=sys.stderr)\n\n\ndef force_load():\n \"\"\"force itk to load all the submodules\"\"\"\n import itk\n for k in dir(itk):\n getattr(itk, k)\n\n\nimport sys\n\n\ndef echo(object, f=sys.stderr):\n \"\"\"Print an object is f\n\n If the object has a method Print(), this method is used.\n repr(object) is used otherwise\n \"\"\"\n print(f, object)\ndel sys\n\n\ndef size(image_or_filter):\n \"\"\"Return the size of an image, or of the output image of a filter\n\n This method take care of updating the needed informations\n \"\"\"\n # we don't need the entire output, only its size\n image_or_filter.UpdateOutputInformation()\n img = output(image_or_filter)\n return img.GetLargestPossibleRegion().GetSize()\n\n\ndef physical_size(image_or_filter):\n \"\"\"Return the physical size of an image, or of the output image of a filter\n\n This method take care of updating the needed informations\n \"\"\"\n # required because range is overloaded in this module\n import sys\n if sys.version_info >= (3, 0):\n from builtins import range\n else:\n from __builtin__ import range\n spacing_ = spacing(image_or_filter)\n size_ = size(image_or_filter)\n result = []\n for i in range(0, spacing_.Size()):\n result.append(spacing_.GetElement(i) * size_.GetElement(i))\n return result\n\n\ndef spacing(image_or_filter):\n \"\"\"Return the spacing of an image, or of the output image of a filter\n\n This method take care of updating the needed informations\n \"\"\"\n # we don't need the entire output, only its size\n image_or_filter.UpdateOutputInformation()\n img = output(image_or_filter)\n return img.GetSpacing()\n\n\ndef origin(image_or_filter):\n \"\"\"Return the origin of an image, or of the output image of a filter\n\n This method take care of updating the needed informations\n \"\"\"\n # we don't need the entire output, only its size\n image_or_filter.UpdateOutputInformation()\n img = output(image_or_filter)\n return img.GetOrigin()\n\n\ndef index(image_or_filter):\n \"\"\"Return the index of an image, or of the output image of a filter\n\n This method take care of updating the needed informations\n \"\"\"\n # we don't need the entire output, only its size\n image_or_filter.UpdateOutputInformation()\n img = output(image_or_filter)\n return img.GetLargestPossibleRegion().GetIndex()\n\n\ndef region(image_or_filter):\n \"\"\"Return the region of an image, or of the output image of a filter\n\n This method take care of updating the needed informations\n \"\"\"\n # we don't need the entire output, only its size\n image_or_filter.UpdateOutputInformation()\n img = output(image_or_filter)\n return img.GetLargestPossibleRegion()\n\nHAVE_NUMPY = True\ntry:\n import numpy\nexcept ImportError:\n HAVE_NUMPY = False\n\ndef _get_itk_pixelid(numpy_array_type):\n \"\"\"Returns a ITK PixelID given a numpy array.\"\"\"\n\n if not HAVE_NUMPY:\n raise ImportError('Numpy not available.')\n import itk\n # This is a Mapping from numpy array types to itk pixel types.\n _np_itk = {numpy.uint8:itk.UC,\n numpy.uint16:itk.US,\n numpy.uint32:itk.UI,\n numpy.uint64:itk.UL,\n numpy.int8:itk.SC,\n numpy.int16:itk.SS,\n numpy.int32:itk.SI,\n numpy.int64:itk.SL,\n numpy.float32:itk.F,\n numpy.float64:itk.D,\n numpy.complex64:itk.complex[itk.F],\n numpy.complex128:itk.complex[itk.D]\n }\n try:\n return _np_itk[numpy_array_type.dtype.type]\n except KeyError as e:\n for key in _np_itk:\n if numpy.issubdtype(numpy_array_type.dtype.type, key):\n return _np_itk[key]\n raise e\n\ndef _GetArrayFromImage(image_or_filter, function, keep_axes, update):\n \"\"\"Get an Array with the content of the image buffer\n \"\"\"\n # Check for numpy\n if not HAVE_NUMPY:\n raise ImportError('Numpy not available.')\n # Finds the image type\n import itk\n keys = [k for k in itk.PyBuffer.keys() if k[0] == output(image_or_filter).__class__]\n if len(keys ) == 0:\n raise RuntimeError(\"No suitable template parameter can be found.\")\n ImageType = keys[0]\n # Create a numpy array of the type of the input image\n templatedFunction = getattr(itk.PyBuffer[keys[0]], function)\n return templatedFunction(output(image_or_filter), keep_axes, update)\n\ndef GetArrayFromImage(image_or_filter, keep_axes=False, update=True):\n \"\"\"Get an array with the content of the image buffer\n \"\"\"\n return _GetArrayFromImage(image_or_filter, \"GetArrayFromImage\", keep_axes, update)\n\narray_from_image = GetArrayFromImage\n\ndef GetArrayViewFromImage(image_or_filter, keep_axes=False, update=True):\n \"\"\"Get an array view with the content of the image buffer\n \"\"\"\n return _GetArrayFromImage(image_or_filter, \"GetArrayViewFromImage\", keep_axes, update)\n\narray_view_from_image = GetArrayViewFromImage\n\ndef _GetImageFromArray(arr, function, is_vector):\n \"\"\"Get an ITK image from a Python array.\n \"\"\"\n if not HAVE_NUMPY:\n raise ImportError('Numpy not available.')\n import itk\n PixelType = _get_itk_pixelid(arr)\n if is_vector:\n Dimension = arr.ndim - 1\n if arr.flags['C_CONTIGUOUS']:\n VectorDimension = arr.shape[-1]\n else:\n VectorDimension = arr.shape[0]\n if PixelType == itk.UC:\n if VectorDimension == 3:\n ImageType = itk.Image[ itk.RGBPixel[itk.UC], Dimension ]\n elif VectorDimension == 4:\n ImageType = itk.Image[ itk.RGBAPixel[itk.UC], Dimension ]\n else:\n ImageType = itk.Image[ itk.Vector[PixelType, VectorDimension] , Dimension]\n else:\n Dimension = arr.ndim\n ImageType = itk.Image[PixelType, Dimension]\n templatedFunction = getattr(itk.PyBuffer[ImageType], function)\n return templatedFunction(arr, is_vector)\n\ndef GetImageFromArray(arr, is_vector=False):\n \"\"\"Get an ITK image from a Python array.\n \"\"\"\n return _GetImageFromArray(arr, \"GetImageFromArray\", is_vector)\n\nimage_from_array = GetImageFromArray\n\ndef GetImageViewFromArray(arr, is_vector=False):\n \"\"\"Get an ITK image view from a Python array.\n \"\"\"\n return _GetImageFromArray(arr, \"GetImageViewFromArray\", is_vector)\n\nimage_view_from_array = GetImageFromArray\n\ndef _GetArrayFromVnlObject(vnl_object, function):\n \"\"\"Get an array with the content of vnl_object\n \"\"\"\n # Check for numpy\n if not HAVE_NUMPY:\n raise ImportError('Numpy not available.')\n # Finds the vnl object type\n import itk\n PixelType = itk.template(vnl_object)[1][0]\n keys = [k for k in itk.PyVnl.keys() if k[0] == PixelType]\n if len(keys ) == 0:\n raise RuntimeError(\"No suitable template parameter can be found.\")\n # Create a numpy array of the type of the vnl object\n templatedFunction = getattr(itk.PyVnl[keys[0]], function)\n return templatedFunction(vnl_object)\n\ndef GetArrayFromVnlVector(vnl_vector):\n \"\"\"Get an array with the content of vnl_vector\n \"\"\"\n return _GetArrayFromVnlObject(vnl_vector, \"GetArrayFromVnlVector\")\n\narray_from_vnl_vector = GetArrayFromVnlVector\n\ndef GetArrayViewFromVnlVector(vnl_vector):\n \"\"\"Get an array view of vnl_vector\n \"\"\"\n return _GetArrayFromVnlObject(vnl_vector, \"GetArrayViewFromVnlVector\")\n\narray_view_from_vnl_vector = GetArrayFromVnlVector\n\ndef GetArrayFromVnlMatrix(vnl_matrix):\n \"\"\"Get an array with the content of vnl_matrix\n \"\"\"\n return _GetArrayFromVnlObject(vnl_matrix, \"GetArrayFromVnlMatrix\")\n\ndef GetArrayViewFromVnlMatrix(vnl_matrix):\n \"\"\"Get an array view of vnl_matrix\n \"\"\"\n return _GetArrayFromVnlObject(vnl_matrix, \"GetArrayViewFromVnlMatrix\")\n\narray_from_vnl_matrix = GetArrayFromVnlMatrix\n\ndef _GetVnlObjectFromArray(arr, function):\n \"\"\"Get a vnl object from a Python array.\n \"\"\"\n if not HAVE_NUMPY:\n raise ImportError('Numpy not available.')\n import itk\n PixelType = _get_itk_pixelid(arr)\n templatedFunction = getattr(itk.PyVnl[PixelType], function)\n return templatedFunction(arr)\n\ndef GetVnlVectorFromArray(arr):\n \"\"\"Get a vnl vector from a Python array.\n \"\"\"\n return _GetVnlObjectFromArray(arr, \"GetVnlVectorFromArray\")\n\nvnl_vector_from_array = GetVnlVectorFromArray\n\ndef GetVnlMatrixFromArray(arr):\n \"\"\"Get a vnl matrix from a Python array.\n \"\"\"\n return _GetVnlObjectFromArray(arr, \"GetVnlMatrixFromArray\")\n\nvnl_matrix_from_array = GetVnlMatrixFromArray\n\ndef GetArrayFromMatrix(itk_matrix):\n return GetArrayFromVnlMatrix(itk_matrix.GetVnlMatrix().as_matrix())\n\narray_from_matrix = GetArrayFromMatrix\n\ndef GetMatrixFromArray(arr):\n import itk\n vnl_matrix = GetVnlMatrixFromArray(arr)\n dims = arr.shape\n PixelType = _get_itk_pixelid(arr)\n m = itk.Matrix[PixelType, dims[0], dims[1]](vnl_matrix)\n return m\n\nmatrix_from_array = GetMatrixFromArray\n\n# return an image\nfrom itkTemplate import image, output\n\n\ndef template(cl):\n \"\"\"Return the template of a class (or of the class of an object) and\n its parameters\n\n template() returns a tuple with 2 elements:\n - the first one is the itkTemplate object\n - the second is a tuple containing the template parameters\n \"\"\"\n from itkTemplate import itkTemplate\n return itkTemplate.__class_to_template__[class_(cl)]\n\n\ndef ctype(s):\n \"\"\"Return the c type corresponding to the string passed in parameter\n\n The string can contain some extra spaces.\n see also itkCType\n \"\"\"\n from itkTypes import itkCType\n\n ret = itkCType.GetCType(\" \".join(s.split()))\n if ret is None:\n raise KeyError(\"Unrecognized C type '%s'\" % s)\n return ret\n\n\ndef class_(obj):\n \"\"\"Return a class from an object\n\n Often in itk, the __class__ is not what the user is expecting.\n class_() should do a better job\n \"\"\"\n import inspect\n if inspect.isclass(obj):\n # obj is already a class !\n return obj\n else:\n return obj.__class__\n\ndef python_type(obj):\n \"\"\"Returns the Python type name of an object\n\n The Python name corresponding to the given instantiated object is printed.\n This includes both the Python name and the parameters of the object. A user\n can copy and paste the printed value to instantiate a new object of the\n same type.\"\"\"\n import itkTemplate\n from itkTypes import itkCType\n\n def in_itk(name):\n import itk\n # Remove \"itk::\" and \"std::\" from template name.\n # Only happens for ITK objects.\n shortname = name.split('::')[-1]\n shortname = shortname.split('itk')[-1]\n\n namespace = itk\n # A type cannot be part of ITK if its name was not modified above. This\n # check avoids having an input of type `list` and return `itk.list` that\n # also exists.\n likely_itk = (shortname != name or name[:3] == 'vnl')\n if likely_itk and hasattr(namespace, shortname):\n return namespace.__name__ + '.' + shortname # Prepend name with 'itk.'\n else:\n return name\n\n def recursive(obj, level):\n try:\n T, P = template(obj)\n name = in_itk(T.__name__)\n parameters = []\n for t in P:\n parameters.append(recursive(t, level+1))\n return name + \"[\" + \",\".join(parameters) + \"]\"\n except KeyError:\n if isinstance(obj, itkCType): # Handles CTypes differently\n return 'itk.' + obj.short_name\n elif hasattr(obj, \"__name__\"):\n # This should be where most ITK types end up.\n return in_itk(obj.__name__)\n elif (not isinstance(obj, type)\n and type(obj) != itkTemplate.itkTemplate and level != 0):\n # obj should actually be considered a value, not a type,\n # or it is already an itkTemplate type.\n # A value can be an integer that is a template parameter.\n # This does not happen at the first level of the recursion\n # as it is not possible that this object would be a template\n # parameter. Checking the level `0` allows e.g. to find the\n # type of an object that is a `list` or an `int`.\n return str(obj)\n else:\n return in_itk(type(obj).__name__)\n return recursive(obj, 0)\n\n\ndef range(image_or_filter):\n \"\"\"Return the range of values in a image of in the output image of a filter\n\n The minimum and maximum values are returned in a tuple: (min, max)\n range() take care of updating the pipeline\n \"\"\"\n import itk\n img = output(image_or_filter)\n img.UpdateOutputInformation()\n img.Update()\n # don't put that calculator in the automatic pipeline\n tmp_auto_pipeline = auto_pipeline.current\n auto_pipeline.current = None\n comp = itk.MinimumMaximumImageCalculator[img].New(Image=img)\n auto_pipeline.current = tmp_auto_pipeline\n comp.Compute()\n return (comp.GetMinimum(), comp.GetMaximum())\n\n\ndef imwrite(image_or_filter, filename, compression=False):\n \"\"\"Write a image or the output image of a filter to a file.\n\n The writer is instantiated with the image type of the image in\n parameter (or, again, with the output image of the filter in parameter).\n \"\"\"\n import itk\n img = output(image_or_filter)\n img.UpdateOutputInformation()\n # don't put that writer in the automatic pipeline\n tmp_auto_pipeline = auto_pipeline.current\n auto_pipeline.current = None\n writer = itk.ImageFileWriter[img].New(\n Input=img,\n FileName=filename,\n UseCompression=compression)\n auto_pipeline.current = tmp_auto_pipeline\n writer.Update()\n\ndef imread(filename, pixel_type=None, fallback_only=False):\n \"\"\"Read an image from a file or series of files and return an itk.Image.\n\n The reader is instantiated with the image type of the image file if\n `pixel_type` is not provided (default). The dimension of the image is\n automatically found. If the given filename is a list or a tuple, the\n reader will use an itk.ImageSeriesReader object to read the files.\n\n If `fallback_only` is set to `True`, `imread()` will first try to\n automatically deduce the image pixel_type, and only use the given\n `pixel_type` if the automatic deduction fail. Failures typically\n happen if the pixel type is not supported (e.g. it is not currently\n wrapped).\n \"\"\"\n import itk\n if fallback_only == True:\n if pixel_type is None:\n raise Exception(\"`pixel_type` must be set when using `fallback_only`\"\n \" option\")\n try:\n return imread(filename)\n except KeyError:\n pass\n if type(filename) in [list, tuple]:\n TemplateReaderType=itk.ImageSeriesReader\n io_filename=filename[0]\n increase_dimension=True\n kwargs={'FileNames':filename}\n else:\n TemplateReaderType=itk.ImageFileReader\n io_filename=filename\n increase_dimension=False\n kwargs={'FileName':filename}\n if pixel_type:\n imageIO = itk.ImageIOFactory.CreateImageIO(io_filename, itk.ImageIOFactory.ReadMode)\n if not imageIO:\n raise RuntimeError(\"No ImageIO is registered to handle the given file.\")\n imageIO.SetFileName(io_filename)\n imageIO.ReadImageInformation()\n dimension = imageIO.GetNumberOfDimensions()\n # Increase dimension if last dimension is not of size one.\n if increase_dimension and imageIO.GetDimensions(dimension-1) != 1:\n dimension += 1\n ImageType=itk.Image[pixel_type,dimension]\n reader = TemplateReaderType[ImageType].New(**kwargs)\n else:\n reader = TemplateReaderType.New(**kwargs)\n reader.Update()\n return reader.GetOutput()\n\ndef search(s, case_sensitive=False): # , fuzzy=True):\n \"\"\"Search for a class name in the itk module.\n \"\"\"\n s = s.replace(\" \", \"\")\n if not case_sensitive:\n s = s.lower()\n import itk\n names = sorted(dir(itk))\n # exact match first\n if case_sensitive:\n res = [n for n in names if s == n]\n else:\n res = [n for n in names if s == n.lower()]\n # then exact match inside the name\n if case_sensitive:\n res += [n for n in names if s in n and s != n]\n else:\n res += [n for n in names if s in n.lower() and s != n.lower()]\n# if fuzzy:\n# try:\n# everything now requires editdist\n# import editdist\n# if case_sensitive:\n# res.sort(key=lambda x: editdist.distance(x, s))\n# else:\n# res.sort(key=lambda x: (editdist.distance(x.lower(), s), x))\n# except:\n# pass\n return res\n\n\n# Helpers for set_inputs snake case to CamelCase keyword argument conversion\n_snake_underscore_re = re.compile('(_)([a-z0-9A-Z])')\ndef _underscore_upper(matchobj):\n return matchobj.group(2).upper()\ndef _snake_to_camel(keyword):\n camel = keyword[0].upper()\n if _snake_underscore_re.search(keyword[1:]):\n return camel + _snake_underscore_re.sub(_underscore_upper, keyword[1:])\n return camel + keyword[1:]\n\ndef set_inputs(new_itk_object, args=[], kargs={}):\n \"\"\"Set the inputs of the given objects, according to the non named or the\n named parameters in args and kargs\n\n This function tries to assign all the non named parameters in the input of\n the new_itk_object\n - the first non named parameter in the first input, etc.\n\n The named parameters are used by calling the method with the same name\n prefixed by 'Set'.\n set_inputs( obj, kargs={'Threshold': 10} ) calls obj.SetThreshold(10)\n\n This is the function use in the enhanced New() method to manage the inputs.\n It can be used to produce a similar behavior:\n\n def SetInputs(self, *args, **kargs):\n import itk\n itk.set_inputs(self, *args, **kargs)\n \"\"\"\n # try to get the images from the filters in args\n args = [output(arg) for arg in args]\n\n # args without name are filter used to set input image\n #\n # count SetInput calls to call SetInput, SetInput2, SetInput3, ...\n # useful with filter which take 2 input (or more) like SubtractImageFiler\n # Ex: subtract image2.png to image1.png and save the result in result.png\n # r1 = itk.ImageFileReader.US2.New(FileName='image1.png')\n # r2 = itk.ImageFileReader.US2.New(FileName='image2.png')\n # s = itk.SubtractImageFilter.US2US2US2.New(r1, r2)\n # itk.ImageFileWriter.US2.New(s, FileName='result.png').Update()\n try:\n for setInputNb, arg in enumerate(args):\n methodName = 'SetInput%i' % (setInputNb + 1)\n if methodName in dir(new_itk_object):\n # first try to use methods called SetInput1, SetInput2, ...\n # those method should have more chances to work in case of\n # multiple input types\n getattr(new_itk_object, methodName)(arg)\n else:\n # no method called SetInput?\n # try with the standard SetInput(nb, input)\n new_itk_object.SetInput(setInputNb, arg)\n except TypeError as e:\n # the exception have (at least) to possible reasons:\n # + the filter don't take the input number as first argument\n # + arg is an object of wrong type\n #\n # if it's not the first input, re-raise the exception\n if setInputNb != 0:\n raise e\n # it's the first input, try to use the SetInput() method without input\n # number\n new_itk_object.SetInput(args[0])\n # but raise an exception if there is more than 1 argument\n if len(args) > 1:\n raise TypeError('Object accepts only 1 input.')\n except AttributeError:\n # There is no SetInput() method, try SetImage\n # but before, check the number of inputs\n if len(args) > 1:\n raise TypeError('Object accepts only 1 input.')\n methodList = ['SetImage', 'SetInputImage']\n methodName = None\n for m in methodList:\n if m in dir(new_itk_object):\n methodName = m\n if methodName:\n getattr(new_itk_object, methodName)(args[0])\n else:\n raise AttributeError('No method found to set the input.')\n\n # named args : name is the function name, value is argument(s)\n for attribName, value in kargs.items():\n # use Set as prefix. It allow to use a shorter and more intuitive\n # call (Ex: itk.ImageFileReader.UC2.New(FileName='image.png')) than\n # with the full name\n # (Ex: itk.ImageFileReader.UC2.New(SetFileName='image.png'))\n if attribName not in [\"auto_progress\", \"template_parameters\"]:\n if attribName.islower():\n attribName = _snake_to_camel(attribName)\n attrib = getattr(new_itk_object, 'Set' + attribName)\n\n # Do not use try-except mechanism as this leads to\n # segfaults. Instead limit the number of types that are\n # tested. The list of tested type could maybe be replaced by\n # a test that would check for iterables.\n if type(value) in [list, tuple]:\n try:\n output_value = [output(x) for x in value]\n attrib(*output_value)\n except:\n attrib(output(value))\n else:\n attrib(output(value))\n\nclass templated_class:\n\n \"\"\"This class is used to mimic the behavior of the templated C++ classes.\n\n It is used this way:\n\n class CustomClass:\n # class definition here\n CustomClass = templated_class(CustomClass)\n\n customObject = CustomClass[template, parameters].New()\n\n The template parameters are passed to the custom class constructor as a\n named parameter 'template_parameters' in a tuple.\n\n The custom class may implement a static method\n check_template_parameters(parameters) which should raise an exception if\n the template parameters provided are not suitable to instantiate the custom\n class.\n \"\"\"\n\n def __init__(self, cls):\n \"\"\"cls is the custom class\n \"\"\"\n self.__cls__ = cls\n self.__templates__ = {}\n\n def New(self, *args, **kargs):\n \"\"\"Use the parameters to infer the types of the template parameters.\n \"\"\"\n # extract the types from the arguments to instantiate the class\n import itk\n types = tuple(itk.class_(o) for o in args)\n return self[types].New(*args, **kargs)\n\n def __getitem__(self, template_parameters):\n \"\"\"Return a pair class-template parameters ready to be instantiated.\n\n The template parameters may be validated if the custom class provide\n the static method check_template_parameters(parameters).\n \"\"\"\n if not isinstance(template_parameters, tuple):\n template_parameters = (template_parameters,)\n return (\n templated_class.__templated_class_and_parameters__(\n self,\n template_parameters)\n )\n\n def check_template_parameters(self, template_parameters):\n \"\"\"Check the template parameters passed in parameter.\n \"\"\"\n # this method is there mainly to make possible to reuse it in the\n # custom class constructor after having used templated_class().\n # Without that, the following example doesn't work:\n #\n # class CustomClass:\n # def __init__(self, *args, **kargs):\n # template_parameters = kargs[\"template_parameters\"]\n # CustomClass.check_template_parameters(template_parameters)\n # other init stuff\n # def check_template_parameters(template_parameters):\n # check, really\n # pass\n # CustomClass = templated_class(CustomClass)\n #\n self.__cls__.check_template_parameters(template_parameters)\n\n def add_template(self, name, params):\n if not isinstance(params, list) and not isinstance(params, tuple):\n params = (params,)\n params = tuple(params)\n val = self[params]\n self.__templates__[params] = val\n setattr(self, name, val)\n\n def add_image_templates(self, *args):\n import itk\n if args == []:\n return\n combinations = [[t] for t in args[0]]\n for types in args[1:]:\n temp = []\n for t in types:\n for c in combinations:\n temp.append(c + [t])\n combinations = temp\n for d in itk.DIMS:\n for c in combinations:\n parameters = []\n name = \"\"\n for t in c:\n parameters.append(itk.Image[t, d])\n name += \"I\" + t.short_name + str(d)\n self.add_template(name, tuple(parameters))\n\n class __templated_class_and_parameters__:\n\n \"\"\"Inner class used to store the pair class-template parameters ready\n to instantiate.\n \"\"\"\n\n def __init__(self, templated_class, template_parameters):\n self.__templated_class__ = templated_class\n self.__template_parameters__ = template_parameters\n if \"check_template_parameters\" in dir(templated_class.__cls__):\n templated_class.__cls__.check_template_parameters(\n template_parameters)\n\n def New(self, *args, **kargs):\n \"\"\"A New() method to mimic the ITK default behavior, even if the\n class doesn't provide any New() method.\n \"\"\"\n kargs[\"template_parameters\"] = self.__template_parameters__\n if \"New\" in dir(self.__templated_class__.__cls__):\n obj = self.__templated_class__.__cls__.New(*args, **kargs)\n else:\n obj = self.__templated_class__.__cls__(*args, **kargs)\n setattr(\n obj,\n \"__template_parameters__\",\n self.__template_parameters__)\n setattr(obj, \"__templated_class__\", self.__templated_class__)\n return obj\n\n def __call__(self, *args, **kargs):\n return self.New(*args, **kargs)\n\n def keys(self):\n return self.__templates__.keys()\n\n # everything after this comment is for dict interface\n # and is a copy/paste from DictMixin\n # only methods to edit dictionary are not there\n def __iter__(self):\n for k in self.keys():\n yield k\n\n def has_key(self, key):\n try:\n value = self[key]\n except KeyError:\n return False\n return True\n\n def __contains__(self, key):\n return key in self\n\n # third level takes advantage of second level definitions\n def iteritems(self):\n for k in self:\n yield (k, self[k])\n\n def iterkeys(self):\n return self.__iter__()\n\n # fourth level uses definitions from lower levels\n def itervalues(self):\n for _, v in self.iteritems():\n yield v\n\n def values(self):\n return [v for _, v in self.iteritems()]\n\n def items(self):\n return list(self.iteritems())\n\n def get(self, key, default=None):\n try:\n return self[key]\n except KeyError:\n return default\n\n def __len__(self):\n return len(self.keys())\n\n\nclass pipeline:\n\n \"\"\"A convenient class to store the reference to the filters of a pipeline\n\n With this class, a method can create a pipeline of several filters and\n return it without losing the references to the filters in this pipeline.\n The pipeline object act almost like a filter (it has a GetOutput() method)\n and thus can be simply integrated in another pipeline.\n \"\"\"\n\n def __init__(self, *args, **kargs):\n self.clear()\n self.input = None\n set_inputs(self, args, kargs)\n\n def connect(self, filter):\n \"\"\"Connect a new filter to the pipeline\n\n The output of the first filter will be used as the input of this\n one and the filter passed as parameter will be added to the list\n \"\"\"\n if self.GetOutput() is not None:\n set_inputs(filter, [self.GetOutput()])\n self.append(filter)\n\n def append(self, filter):\n \"\"\"Add a new filter to the pipeline\n\n The new filter will not be connected. The user must connect it.\n \"\"\"\n self.filters.append(filter)\n\n def clear(self):\n \"\"\"Clear the filter list\n \"\"\"\n self.filters = []\n\n def GetOutput(self, index=0):\n \"\"\"Return the output of the pipeline\n\n If another output is needed, use\n pipeline.filters[-1].GetAnotherOutput() instead of this method,\n subclass pipeline to implement another GetOutput() method, or use\n expose()\n \"\"\"\n if len(self.filters) == 0:\n return self.GetInput()\n else:\n filter = self.filters[-1]\n if hasattr(filter, \"__getitem__\"):\n return filter[index]\n try:\n return filter.GetOutput(index)\n except:\n if index == 0:\n return filter.GetOutput()\n else:\n raise ValueError(\"Index can only be 0 on that object\")\n\n def GetNumberOfOutputs(self):\n \"\"\"Return the number of outputs\n \"\"\"\n if len(self.filters) == 0:\n return 1\n else:\n return self.filters[-1].GetNumberOfOutputs()\n\n def SetInput(self, input):\n \"\"\"Set the input of the pipeline\n \"\"\"\n if len(self.filters) != 0:\n set_inputs(self.filters[0], [input])\n self.input = input\n\n def GetInput(self):\n \"\"\"Get the input of the pipeline\n \"\"\"\n return self.input\n\n def Update(self):\n \"\"\"Update the pipeline\n \"\"\"\n if len(self.filters) > 0:\n return self.filters[-1].Update()\n\n def UpdateLargestPossibleRegion(self):\n \"\"\"Update the pipeline\n \"\"\"\n if len(self.filters) > 0:\n return self.filters[-1].UpdateLargestPossibleRegion()\n\n def UpdateOutputInformation(self):\n if \"UpdateOutputInformation\" in dir(self.filters[-1]):\n self.filters[-1].UpdateOutputInformation()\n else:\n self.Update()\n\n def __len__(self):\n return self.GetNumberOfOutputs()\n\n def __getitem__(self, item):\n return self.GetOutput(item)\n\n def __call__(self, *args, **kargs):\n set_inputs(self, args, kargs)\n self.UpdateLargestPossibleRegion()\n return self\n\n def expose(self, name, new_name=None, position=-1):\n \"\"\"Expose an attribute from a filter of the minipeline.\n\n Once called, the pipeline instance has a new Set/Get set of methods to\n access directly the corresponding method of one of the filter of the\n pipeline.\n Ex: p.expose( \"Radius\" )\n p.SetRadius( 5 )\n p.GetRadius( 5 )\n By default, the attribute usable on the pipeline instance has the same\n name than the one of the filter, but it can be changed by providing a\n value to new_name.\n The last filter of the pipeline is used by default, but another one may\n be used by giving its position.\n Ex: p.expose(\"Radius\", \"SmoothingNeighborhood\", 2)\n p.GetSmoothingNeighborhood()\n \"\"\"\n if new_name is None:\n new_name = name\n src = self.filters[position]\n ok = False\n set_name = \"Set\" + name\n if set_name in dir(src):\n setattr(self, \"Set\" + new_name, getattr(src, set_name))\n ok = True\n get_name = \"Get\" + name\n if get_name in dir(src):\n setattr(self, \"Get\" + new_name, getattr(src, get_name))\n ok = True\n if not ok:\n raise RuntimeError(\n \"No attribute %s at position %s.\" %\n (name, position))\n\n\nclass auto_pipeline(pipeline):\n current = None\n\n def __init__(self, *args, **kargs):\n pipeline.__init__(self, *args, **kargs)\n self.Start()\n\n def Start(self):\n auto_pipeline.current = self\n\n def Stop(self):\n auto_pipeline.current = None\n\n\ndef down_cast(obj):\n \"\"\"Down cast an itkLightObject (or a object of a subclass) to its most\n specialized type.\n \"\"\"\n import itk\n import itkTemplate\n className = obj.GetNameOfClass()\n t = getattr(itk, className)\n if isinstance(t, itkTemplate.itkTemplate):\n for c in t.values():\n try:\n return c.cast(obj)\n except:\n # fail silently for now\n pass\n raise RuntimeError(\n \"Can't downcast to a specialization of %s\" %\n className)\n else:\n return t.cast(obj)\n\n\ndef attribute_list(i, name):\n \"\"\"Returns a list of the specified attributes for the objects in the image.\n\n i: the input LabelImage\n name: the attribute name\n \"\"\"\n import itk\n i = itk.output(i)\n relabel = itk.StatisticsRelabelLabelMapFilter[i].New(\n i,\n Attribute=name,\n ReverseOrdering=True,\n InPlace=False)\n relabel.UpdateLargestPossibleRegion()\n r = relabel.GetOutput()\n l = []\n for i in range(1, r.GetNumberOfLabelObjects() + 1):\n l.append(r.GetLabelObject(i).__getattribute__(\"Get\" + name)())\n return l\n\n\ndef attributes_list(i, names):\n \"\"\"Returns a list of the specified attributes for the objects in the image.\n\n i: the input LabelImage\n name: the attribute name\n \"\"\"\n import itk\n i = itk.output(i)\n relabel = itk.StatisticsRelabelLabelMapFilter[i].New(\n i,\n Attribute=names[0],\n ReverseOrdering=True,\n InPlace=False)\n relabel.UpdateLargestPossibleRegion()\n r = relabel.GetOutput()\n l = []\n for i in range(1, r.GetNumberOfLabelObjects() + 1):\n attrs = []\n for name in names:\n attrs.append(r.GetLabelObject(i).__getattribute__(\"Get\" + name)())\n l.append(tuple(attrs))\n return l\n\n\ndef attribute_dict(i, name):\n \"\"\"Returns a dict with the attribute values in keys and a list of the\n corresponding objects in value\n\n i: the input LabelImage\n name: the name of the attribute\n \"\"\"\n import itk\n i = itk.output(i)\n relabel = itk.StatisticsRelabelLabelMapFilter[i].New(\n i,\n Attribute=name,\n ReverseOrdering=True,\n InPlace=False)\n relabel.UpdateLargestPossibleRegion()\n r = relabel.GetOutput()\n d = {}\n for i in range(1, r.GetNumberOfLabelObjects() + 1):\n lo = r.GetLabelObject(i)\n v = lo.__getattribute__(\"Get\" + name)()\n l = d.get(v, [])\n l.append(lo)\n d[v] = l\n return d\n\n\ndef number_of_objects(i):\n \"\"\"Returns the number of objets in the image.\n\n i: the input LabelImage\n \"\"\"\n import itk\n i.UpdateLargestPossibleRegion()\n i = itk.output(i)\n return i.GetNumberOfLabelObjects()\n\n\ndef ipython_kw_matches(text):\n \"\"\"Match named ITK object's named parameters\"\"\"\n import IPython\n import itk\n import re\n import inspect\n import itkTemplate\n regexp = re.compile(r'''\n '.*?' | # single quoted strings or\n \".*?\" | # double quoted strings or\n \\w+ | # identifier\n \\S # other characters\n ''', re.VERBOSE | re.DOTALL)\n ip = IPython.get_ipython()\n if \".\" in text: # a parameter cannot be dotted\n return []\n # 1. Find the nearest identifier that comes before an unclosed\n # parenthesis e.g. for \"foo (1+bar(x), pa\", the candidate is \"foo\".\n if ip.Completer.readline:\n textUntilCursor = ip.Completer.readline.get_line_buffer()[:ip.Completer.readline.get_endidx()]\n else:\n # IPython >= 5.0.0, which is based on the Python Prompt Toolkit\n textUntilCursor = ip.Completer.text_until_cursor\n\n tokens = regexp.findall(textUntilCursor)\n tokens.reverse()\n iterTokens = iter(tokens)\n openPar = 0\n for token in iterTokens:\n if token == ')':\n openPar -= 1\n elif token == '(':\n openPar += 1\n if openPar > 0:\n # found the last unclosed parenthesis\n break\n else:\n return []\n # 2. Concatenate dotted names (\"foo.bar\" for \"foo.bar(x, pa\" )\n ids = []\n isId = re.compile(r'\\w+$').match\n while True:\n try:\n ids.append(iterTokens.next())\n if not isId(ids[-1]):\n ids.pop()\n break\n if not iterTokens.next() == '.':\n break\n except StopIteration:\n break\n # lookup the candidate callable matches either using global_matches\n # or attr_matches for dotted names\n if len(ids) == 1:\n callableMatches = ip.Completer.global_matches(ids[0])\n else:\n callableMatches = ip.Completer.attr_matches('.'.join(ids[::-1]))\n argMatches = []\n for callableMatch in callableMatches:\n # drop the .New at this end, so we can search in the class members\n if callableMatch.endswith(\".New\"):\n callableMatch = callableMatch[:-4]\n elif not re.findall('([A-Z])', callableMatch): # True if snake case\n # Split at the last '.' occurence\n splitted = callableMatch.split('.')\n namespace = splitted[:-1]\n function_name = splitted[-1]\n # Find corresponding object name\n object_name = _snake_to_camel(function_name)\n # Check that this object actually exists\n try:\n objectCallableMatch = \".\".join(namespace + [object_name])\n eval(objectCallableMatch, ip.Completer.namespace)\n # Reconstruct full object name\n callableMatch = objectCallableMatch\n except AttributeError:\n # callableMatch is not a snake case function with a\n # corresponding object.\n pass\n try:\n object = eval(callableMatch, ip.Completer.namespace)\n if isinstance(object, itkTemplate.itkTemplate):\n # this is a template - lets grab the first entry to search for\n # the methods\n object = object.values()[0]\n namedArgs = []\n isin = isinstance(object, itk.LightObject)\n if inspect.isclass(object):\n issub = issubclass(object, itk.LightObject)\n if isin or (inspect.isclass(object) and issub):\n namedArgs = [n[3:] for n in dir(object) if n.startswith(\"Set\")]\n except Exception as e:\n print(e)\n continue\n for namedArg in namedArgs:\n if namedArg.startswith(text):\n argMatches.append(u\"%s=\" % namedArg)\n return argMatches\n\n# install progress callback and custom completer if we are in ipython\n# interpreter\ntry:\n import itkConfig\n import IPython\n if IPython.get_ipython():\n IPython.get_ipython().Completer.matchers.insert(0, ipython_kw_matches)\n # some cleanup\n del itkConfig, IPython\nexcept (ImportError, AttributeError):\n # fail silently\n pass\n" ]
[ [ "numpy.issubdtype" ] ]
udibr/LRE
[ "2571ba133ec8ac276e36074915bfa7d2113e5baa" ]
[ "nn.py" ]
[ "from collections import OrderedDict\nimport logging\n\nimport scipy\nimport numpy as np\nfrom theano import tensor\nfrom theano.tensor.signal.downsample import max_pool_2d, DownsampleFactorMax\n\nfrom blocks.extensions import SimpleExtension\nfrom blocks.extensions.monitoring import (DataStreamMonitoring,\n MonitoringExtension)\nfrom blocks.filter import VariableFilter\nfrom blocks.graph import ComputationGraph\nfrom blocks.monitoring.evaluators import DatasetEvaluator\nfrom blocks.roles import AuxiliaryRole\n\nlogger = logging.getLogger('main.nn')\n\n\nclass BnParamRole(AuxiliaryRole):\n pass\n\n# Batch normalization parameters that have to be replaced when testing\nBNPARAM = BnParamRole()\n\n\nclass ZCA(object):\n def __init__(self, n_components=None, data=None, filter_bias=0.1):\n self.filter_bias = np.float32(filter_bias)\n self.P = None\n self.P_inv = None\n self.n_components = 0\n self.is_fit = False\n if n_components and data:\n self.fit(n_components, data)\n\n def fit(self, n_components, data):\n if len(data.shape) == 2:\n self.reshape = None\n else:\n assert n_components == np.product(data.shape[1:]), \\\n 'ZCA whitening components should be %d for convolutional data'\\\n % np.product(data.shape[1:])\n self.reshape = data.shape[1:]\n\n data = self._flatten_data(data)\n assert len(data.shape) == 2\n n, m = data.shape\n self.mean = np.mean(data, axis=0)\n\n bias = self.filter_bias * scipy.sparse.identity(m, 'float32')\n cov = np.cov(data, rowvar=0, bias=1) + bias\n eigs, eigv = scipy.linalg.eigh(cov)\n\n assert not np.isnan(eigs).any()\n assert not np.isnan(eigv).any()\n assert eigs.min() > 0\n\n if self.n_components:\n eigs = eigs[-self.n_components:]\n eigv = eigv[:, -self.n_components:]\n\n sqrt_eigs = np.sqrt(eigs)\n self.P = np.dot(eigv * (1.0 / sqrt_eigs), eigv.T)\n assert not np.isnan(self.P).any()\n self.P_inv = np.dot(eigv * sqrt_eigs, eigv.T)\n\n self.P = np.float32(self.P)\n self.P_inv = np.float32(self.P_inv)\n\n self.is_fit = True\n\n def apply(self, data, remove_mean=True):\n data = self._flatten_data(data)\n d = data - self.mean if remove_mean else data\n return self._reshape_data(np.dot(d, self.P))\n\n def inv(self, data, add_mean=True):\n d = np.dot(self._flatten_data(data), self.P_inv)\n d += self.mean if add_mean else 0.\n return self._reshape_data(d)\n\n def _flatten_data(self, data):\n if self.reshape is None:\n return data\n assert data.shape[1:] == self.reshape\n return data.reshape(data.shape[0], np.product(data.shape[1:]))\n\n def _reshape_data(self, data):\n assert len(data.shape) == 2\n if self.reshape is None:\n return data\n return np.reshape(data, (data.shape[0],) + self.reshape)\n\n\nclass ContrastNorm(object):\n def __init__(self, scale=55, epsilon=1e-8):\n self.scale = np.float32(scale)\n self.epsilon = np.float32(epsilon)\n\n def apply(self, data, copy=False):\n if copy:\n data = np.copy(data)\n data_shape = data.shape\n if len(data.shape) > 2:\n data = data.reshape(data.shape[0], np.product(data.shape[1:]))\n\n assert len(data.shape) == 2, 'Contrast norm on flattened data'\n\n data -= data.mean(axis=1)[:, np.newaxis]\n\n norms = np.sqrt(np.sum(data ** 2, axis=1)) / self.scale\n norms[norms < self.epsilon] = np.float32(1.)\n\n data /= norms[:, np.newaxis]\n\n if data_shape != data.shape:\n data = data.reshape(data_shape)\n\n return data\n\n\nclass TestMonitoring(object):\n def _get_bn_params(self, output_vars):\n # Pick out the nodes with batch normalization vars\n cg = ComputationGraph(output_vars)\n var_filter = VariableFilter(roles=[BNPARAM])\n bn_ps = var_filter(cg.variables)\n\n if len(bn_ps) == 0:\n logger.warn('No batch normalization parameters found - is' +\n ' batch normalization turned off?')\n self._bn = False\n self._counter = None\n self._counter_max = None\n bn_share = []\n output_vars_replaced = output_vars\n else:\n self._bn = True\n assert len(set([p.name for p in bn_ps])) == len(bn_ps), \\\n 'Some batch norm params have the same name'\n logger.info('Batch norm parameters: %s' % ', '.join([p.name for p in bn_ps]))\n\n # Filter out the shared variables from the model updates\n def filter_share(par):\n lst = [up for up in cg.updates if up.name == 'shared_%s' % par.name]\n assert len(lst) == 1\n return lst[0]\n bn_share = map(filter_share, bn_ps)\n\n # Replace the BN coefficients in the test data model - Replace the\n # theano variables in the test graph with the shareds\n output_vars_replaced = cg.replace(zip(bn_ps, bn_share)).outputs\n\n # Pick out the counter\n self._counter = self._param_from_updates(cg.updates, 'counter')\n self._counter_max = self._param_from_updates(cg.updates, 'counter_max')\n\n return bn_ps, bn_share, output_vars_replaced\n\n def _param_from_updates(self, updates, p_name):\n var_filter = VariableFilter(roles=[BNPARAM])\n bn_ps = var_filter(updates.keys())\n p = [p for p in bn_ps if p.name == p_name]\n assert len(p) == 1, 'No %s of more than one %s' % (p_name, p_name)\n return p[0]\n\n def reset_counter(self):\n if self._bn:\n self._counter.set_value(np.float32(1))\n\n def replicate_vars(self, output_vars):\n # Problem in Blocks with multiple monitors monitoring the\n # same value in a graph. Therefore, they are all \"replicated\" to a new\n # Theano variable\n if isinstance(output_vars, (list, tuple)):\n return map(self.replicate_vars, output_vars)\n assert not hasattr(output_vars.tag, 'aggregation_scheme'), \\\n 'The variable %s already has an aggregator ' % output_vars.name + \\\n 'assigned to it - are you using a datasetmonitor with the same' + \\\n ' variable as output? This might cause trouble in Blocks'\n new_var = 1 * output_vars\n new_var.name = output_vars.name\n return new_var\n\n\nclass ApproxTestMonitoring(DataStreamMonitoring, TestMonitoring):\n def __init__(self, output_vars, *args, **kwargs):\n output_vars = self.replicate_vars(output_vars)\n _, _, replaced_vars = self._get_bn_params(output_vars)\n super(ApproxTestMonitoring, self).__init__(replaced_vars, *args,\n **kwargs)\n\n def do(self, which_callback, *args, **kwargs):\n assert not which_callback == \"after_batch\", \"Do not monitor each mb\"\n self.reset_counter()\n super(ApproxTestMonitoring, self).do(which_callback, *args, **kwargs)\n\n\nclass FinalTestMonitoring(SimpleExtension, MonitoringExtension, TestMonitoring):\n \"\"\"Monitors validation and test set data with batch norm\n\n Calculates the training set statistics for batch normalization and adds\n them to the model before calculating the validation and test set values.\n This is done in two steps: First the training set is iterated and the\n statistics are saved in shared variables, then the model iterates through\n the test/validation set using the saved shared variables.\n When the training set is iterated, it is done for the full set, layer by\n layer so that the statistics are correct. This is expensive for very deep\n models, in which case some approximation could be in order\n \"\"\"\n def __init__(self, output_vars, train_data_stream, test_data_stream,\n **kwargs):\n output_vars = self.replicate_vars(output_vars)\n super(FinalTestMonitoring, self).__init__(**kwargs)\n self.trn_stream = train_data_stream\n self.tst_stream = test_data_stream\n\n bn_ps, bn_share, output_vars_replaced = self._get_bn_params(output_vars)\n\n if self._bn:\n updates = self._get_updates(bn_ps, bn_share)\n trn_evaluator = DatasetEvaluator(bn_ps, updates=updates)\n else:\n trn_evaluator = None\n\n self._trn_evaluator = trn_evaluator\n self._tst_evaluator = DatasetEvaluator(output_vars_replaced)\n\n def _get_updates(self, bn_ps, bn_share):\n cg = ComputationGraph(bn_ps)\n # Only store updates that relate to params or the counter\n updates = OrderedDict([(up, cg.updates[up]) for up in\n cg.updates if up.name == 'counter' or\n up in bn_share])\n assert self._counter == self._param_from_updates(cg.updates, 'counter')\n assert self._counter_max == self._param_from_updates(cg.updates,\n 'counter_max')\n assert len(updates) == len(bn_ps) + 1, \\\n 'Counter or var missing from update'\n return updates\n\n def do(self, which_callback, *args):\n \"\"\"Write the values of monitored variables to the log.\"\"\"\n assert not which_callback == \"after_batch\", \"Do not monitor each mb\"\n # Run on train data and get the statistics\n if self._bn:\n self._counter_max.set_value(np.float32(np.inf))\n self.reset_counter()\n self._trn_evaluator.evaluate(self.trn_stream)\n self.reset_counter()\n\n value_dict = self._tst_evaluator.evaluate(self.tst_stream)\n self.add_records(self.main_loop.log, value_dict.items())\n\n\nclass LRDecay(SimpleExtension):\n def __init__(self, lr, decay_first, decay_last, lrmin=0., **kwargs):\n super(LRDecay, self).__init__(**kwargs)\n self.iter = 0\n self.decay_first = decay_first\n self.decay_last = decay_last\n self.lr = lr\n self.lrmin = lrmin\n self.lr_init = lr.get_value()\n\n def do(self, which_callback, *args):\n self.iter += 1\n if self.iter > self.decay_first:\n ratio = 1.0 * (self.decay_last - self.iter)\n ratio = np.maximum(0, ratio / (self.decay_last - self.decay_first + 1e-6))\n self.lr.set_value(np.float32(ratio * (self.lr_init - self.lrmin) + self.lrmin))\n logger.info(\"Iter %d, lr %f\" % (self.iter, self.lr.get_value()))\n\n\ndef global_meanpool_2d(x, num_filters):\n mean = tensor.mean(x.flatten(3), axis=2)\n mean = mean.dimshuffle(0, 1, 'x', 'x')\n return mean, (num_filters, 1, 1)\n\n\ndef pool_2d(x, mode=\"average\", ws=(2, 2), stride=(2, 2)):\n import theano.sandbox.cuda as cuda\n assert cuda.dnn.dnn_available()\n return cuda.dnn.dnn_pool(x, ws=ws, stride=stride, mode=mode)\n\n\ndef maxpool_2d(z, in_dim, poolsize, poolstride):\n z = max_pool_2d(z, ds=poolsize, st=poolstride)\n output_size = tuple(DownsampleFactorMax.out_shape(in_dim, poolsize,\n st=poolstride))\n return z, output_size\n" ]
[ [ "numpy.dot", "numpy.product", "numpy.maximum", "numpy.sqrt", "numpy.reshape", "numpy.isnan", "scipy.linalg.eigh", "numpy.copy", "scipy.sparse.identity", "numpy.mean", "numpy.cov", "numpy.float32", "numpy.sum" ] ]
alexfikl/arraycontext
[ "fe059e66e90c159029caaaa3e6df4fe3ad9fd9da" ]
[ "test/test_utils.py" ]
[ "\"\"\"Testing for internal utilities.\"\"\"\n\n\n__copyright__ = \"Copyright (C) 2021 University of Illinois Board of Trustees\"\n\n__license__ = \"\"\"\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\nimport pytest\n\nimport numpy as np\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n\n# {{{ test_pt_actx_key_stringification_uniqueness\n\ndef test_pt_actx_key_stringification_uniqueness():\n from arraycontext.impl.pytato.compile import _ary_container_key_stringifier\n\n assert (_ary_container_key_stringifier(((3, 2), 3))\n != _ary_container_key_stringifier((3, (2, 3))))\n\n assert (_ary_container_key_stringifier((\"tup\", 3, \"endtup\"))\n != _ary_container_key_stringifier(((3,),)))\n\n# }}}\n\n\n# {{{ test_dataclass_array_container\n\ndef test_dataclass_array_container():\n from typing import Optional\n from dataclasses import dataclass, field\n from arraycontext import dataclass_array_container\n\n # {{{ string fields\n\n @dataclass\n class ArrayContainerWithStringTypes:\n x: np.ndarray\n y: \"np.ndarray\"\n\n with pytest.raises(TypeError):\n # NOTE: cannot have string annotations in container\n dataclass_array_container(ArrayContainerWithStringTypes)\n\n # }}}\n\n # {{{ optional fields\n\n @dataclass\n class ArrayContainerWithOptional:\n x: np.ndarray\n y: Optional[np.ndarray]\n\n with pytest.raises(TypeError):\n # NOTE: cannot have wrapped annotations (here by `Optional`)\n dataclass_array_container(ArrayContainerWithOptional)\n\n # }}}\n\n # {{{ field(init=False)\n\n @dataclass\n class ArrayContainerWithInitFalse:\n x: np.ndarray\n y: np.ndarray = field(default=np.zeros(42), init=False, repr=False)\n\n with pytest.raises(ValueError):\n # NOTE: init=False fields are not allowed\n dataclass_array_container(ArrayContainerWithInitFalse)\n\n # }}}\n\n# }}}\n\n\nif __name__ == \"__main__\":\n import sys\n if len(sys.argv) > 1:\n exec(sys.argv[1])\n else:\n pytest.main([__file__])\n\n# vim: fdm=marker\n" ]
[ [ "numpy.zeros" ] ]
carlos-ramos-mv/laboratoriouno
[ "e057b1fb1a747f619c7ad45d1d5c5d77c751bd7a" ]
[ "backup/shellManager.py" ]
[ "import sys, re, subprocess, os\nfrom datetime import datetime\nimport pandas as pd\n\nclass LogsHandler():\n def __init__(self, path=\"/var/log/VirtualLab\"):\n self.file = os.path.join(self.file, \"VirtualLab-\" + self.getTime() + \".log\")\n # If path does not exist, make it\n if not os.path.isdir(path):\n os.mkdir(path)\n # if logs file does not exist, create it\n if not os.path.exists(self.file):\n with open(self.file, \"x\") as f:\n f.write(\"Virtual Lab logs\")\n\n\n def addLog(self, log, statusCode=None):\n \"\"\"\n Adds `log` string into log file with datetime.\n Params:\n log: str\n Log to be saved into file.\n statusCode : int, str (optional)\n Status code to record into this log.\n \"\"\"\n with open(self.file, 'a') as f:\n statusCode = f\" {statusCode}\" if statusCode else \"\"\n f.write(f\"{self.getTime(datetime=True)}{statusCode} {log}\")\n\n\n def getTime(self, datetime=False):\n \"\"\"\n Returns the date (or datetime) from now as a formatted string [d-m-Y] or [d-m-Y H:M:S].\n Params:\n datetime: bool\n If format must require time (hours, minutes and seconds).\n \"\"\"\n return datetime.now().strftime(\"%d-%m-%Y %H:%M:%S\" if datetime else \"%d-%m-%Y\")\n\n\n\ndef checkArgs(args, av):\n matches = {}\n argKey = None;\n for arg in args:\n if argKey:\n matches[argKey] = arg\n argKey = None\n continue\n for argK, argVals in av.items():\n if arg in argVals:\n argKey = argK\n return matches\n\n\nclass CommitHistory():\n def __init__(self, path, historyFile=\"backupCommits.csv\"):\n # Check if file name is csv\n if historyFile.split(\".\")[-1].lower() != \"csv\":\n raise ValueError(\"Invalid file given. Must be a csv file\")\n self.file = os.path.join(path, historyFile)\n # if file does not exist, create it and fille with headers\n if not os.path.exists(self.file):\n print(\"File does not exist. Creating it!\")\n self.createFile()\n self.history = pd.read_csv(self.file)\n self.logs = LogsHandler() # where to write logs\n\n\n def createFile(self, headers=[\"Datetime\", \"Hash\", \"Type\"]):\n \"\"\"\n Create file (csv) and set `headers`.\n Params:\n headers: iter (list, array)\n Headers to be written into the new file.\n \"\"\"\n self.logs.addLog(\"Creating commit history file for the first time.\")\n with open(self.file, \"x\") as f:\n f.write(\",\".join(headers) + '\\n')\n\n\n def addHashToHistory(self, datetime, hash, type):\n \"\"\"\n Adds a new line to `historyFile` with `datetime`, `hash` and `type`.\n Params:\n datetime: str\n Commit's datetime.\n hash: str\n Commit's hash.\n type: str\n Commit's type (backup, recovery, etc.).\n \"\"\"\n self.logs.addLog(\"Adding hash to commit history\")\n f = open(self.file, \"a\")\n st = \",\".join([a.replace('\\n', '') for a in [datetime, hash, type]])\n f.write(f\"{st}\\n\")\n f.close()\n\n\n def hashFromCommitDate(self, date):\n \"\"\"\n Returns hash if a commit with the specified date exists. If date has format `%d-%m-%Y %H:%M`\n perfect match must be true, otherwise, first match for given date (in format `%d-%m-%Y`) will\n be returned.\n Params:\n date: str\n Date format string.\n Output:\n hash : str\n If a commit matches the given date, the commit hash will be returned,\n if not commit matches, returns `None`.\n \"\"\"\n patt = fr\"^{date}.*\"\n # read file and check if date is valid\n for i, commit in self.history.iterrows():\n if re.match(patt, commit[\"Datetime\"]):\n return commit[\"Hash\"]\n return None\n\n\n def hashExist(self, commitHash):\n \"\"\"\n Returns if hash exists in commits history.\n Params:\n commitHash : str\n Hash to query.\n Output:\n validHash: bool\n `True` if `commitHash` matches any commit, otherwise `False`.\n \"\"\"\n return any(self.history[\"Hash\"] == commitHash)\n\n\n#### Process handling ####\nclass ShellProcess():\n def __init__(self, dirPath=None):\n \"\"\"\n Let run commands inside shell inside a given directory.\n Args:\n dirPath : str (optional)\n Directory where all commands will be executed.\n \"\"\"\n self.path = dirPath\n self.baseCmd = f\"cd {self.path}; \" if self.path else \"\"\n self.baseRun = lambda cmd: subprocess.Popen(self.baseCmd + cmd, stdout=subprocess.PIPE,stdin=subprocess.PIPE, stderr = subprocess.PIPE, shell=True).communicate()\n\n\n def runCommandWithOutput(self, cmd, output, error):\n \"\"\"\n Run given command, raise errors or return if given desired\n output was given by process.\n Returns a tuple with re search results (bool) and the output\n returned by process.\n Args:\n cmd : str\n Command to be executed inside shell.\n output: raw str (regex pattern)\n Output to search on `cmd` output.\n error : str (default = UNKNOWN)\n Error key (from ErrorCodesHandler possibilities) to\n raise status code.\n Returns:\n out : tuple\n out[0] : bool\n If given output was found or not.\n out[1] : str\n Output recieved after `cmd` execution.\n \"\"\"\n out, err = self.baseRun(cmd)\n # if an error ocurred, raise corresponding status code\n if err:\n err = err.decode(\"utf-8\")\n self.logs.addLog(f\"Error: {err}\")\n errorHandler = ErrorCodesHandler() # instantiate new error handler\n errorHandler.raiseError(error, err) # raise errro type and error output\n patt = fr\"{output}\"\n out = out.decode('utf-8')\n return (re.search(patt, out, re.MULTILINE) != None, out)\n\n\n#### Error handling ####\nclass Error(Exception):\n \"\"\"Base class for exceptions in this module.\"\"\"\n pass\n\n\nclass ErrorCodesHandler():\n def __init__(self):\n self.ERROR_CODES = {\n \"OK\": 0,\n\n \"CMD ARGUMENTS\": 10,\n \"CMD INVALID DATE\": 11,\n\n \"PG DUMP\": 20,\n \"PG RESTORE\": 21,\n\n \"DB INVALID HASH\": 30,\n \"DB NOT SAVED\": 31,\n \"DB INVALID DATE\": 32,\n\n \"GIT REPOSITORY INVALID\": 40,\n \"GIT ADD\": 41,\n \"GIT COMMIT\": 42,\n \"GIT REVPARSE HASH\": 43,\n \"GIT REVERT\": 44,\n\n \"BACKUP FAILED\": 50,\n\n \"UNKNOWN\": 99\n }\n self.logs = LogsHandler() # where to write logs\n\n\n def raiseError(self, errorName, errorOutput=None):\n \"\"\"\n Display status code into console so user can read it. If `errorOutput` is\n given, display it after the status code. Write error raise into logs.\n Args:\n errorName : str\n Error name or key to find status code.\n errorOutput : str (optional)\n Extra ouptut to be displayed after the status code.\n \"\"\"\n # Get error code if exists, otherwise raise an exception\n try:\n code = self.ERROR_CODES[errorName]\n except:\n raise ValueError(f\"Invalid error name {errorName}. Not mathcing error code.\")\n print(code) # display status code\n # If some output is available, display it\n if errorOutput:\n print(errorOutput)\n self.logs.addLog(log=errorOutput, statusCode=code)\n sys.exit(code) # set exit code as status code\n" ]
[ [ "pandas.read_csv" ] ]
arpitvaghela/PySyft
[ "be534ad861dfddf2f314c5cb346b6b6319719ca3" ]
[ "packages/syft/src/syft/lib/torch/module.py" ]
[ "# stdlib\nimport ast\nfrom collections import OrderedDict\nimport copy\nimport inspect\nfrom itertools import islice\nimport os\nfrom pathlib import Path\nimport sys\nfrom typing import Any\nfrom typing import Callable\nfrom typing import Dict\nfrom typing import Iterator\nfrom typing import List\nfrom typing import Optional\nfrom typing import Tuple\nfrom typing import Union\n\n# third party\nimport torch\n\n# syft absolute\nimport syft as sy\nfrom syft.core.node.common.action.save_object_action import SaveObjectAction\nfrom syft.core.plan.plan_builder import ROOT_CLIENT\nfrom syft.core.plan.plan_builder import make_plan\nfrom syft.lib.python import _SyNone\n\n# syft relative\nfrom ...core.pointer.pointer import Pointer\nfrom ...generate_wrapper import GenerateWrapper\nfrom ...lib.util import full_name_with_qualname\nfrom ...logger import critical\nfrom ...logger import info\nfrom ...logger import traceback_and_raise\nfrom ...proto.lib.torch.module_pb2 import Module as Module_PB\nfrom ..python.collections import OrderedDict as SyOrderedDict\nfrom ..python.util import downcast\n\n# from ...core.node.common.service.auth import AuthorizationException\n\n\ndef repr_to_kwargs(repr_str: str) -> Tuple[List[Any], Dict[Any, Any]]:\n # for example: repr_str = Conv2d(...).extra_repr()\n # produces: > str(\"1, 32, kernel_size=(3, 3), stride=(1, 1)\")\n # then we just have to split it into args and kwargs\n # using ast.literal_eval we can use python to give us the real primitive types\n # from the strings in a safe way\n # str(\"1 \") becomes int(1)\n # str(\"(1, 2) \") becomes tuple(1, 2)\n args: List[Any] = []\n kwargs: Dict[Any, Any] = {}\n parts = repr_str.split(\",\")\n\n # tuples are split by commas as well, so we will keep a tab on open parentheses\n # then concat with \",\" until we find a close parentheses\n # TODO: make work nested with a count and add tests\n para_open = False\n buffer = \"\"\n for part in parts:\n try:\n if \"(\" in part:\n para_open = True\n buffer = \"\"\n if para_open is True:\n buffer += part + \",\"\n if \")\" in part:\n # remove trailing ,\n part = buffer[:-1]\n buffer = \"\"\n para_open = False\n else:\n continue\n\n string = part.strip()\n if \"=\" not in string:\n # its an arg\n arg = ast.literal_eval(string)\n args.append(arg)\n else:\n # its a kwarg\n kv = string.split(\"=\")\n key = str(kv[0])\n string = kv[1].strip()\n value = ast.literal_eval(string)\n kwargs[key.strip()] = value\n except Exception as e:\n info(f\"ast.literal_eval failed to parse part: {string}. {e}\")\n\n return (args, kwargs)\n\n\nclass Module:\n \"\"\"\n This is our equivalent of torch.nn.Module and aims to have the same external\n interface. We will need to support both torch Modules and Module Pointers.\n \"\"\"\n\n def __init__(self, torch_ref: Any) -> None:\n self.setup(torch_ref=torch_ref)\n\n def setup(self, torch_ref: Any) -> None:\n # the remote torch means the model is remote\n self.remote_model: Optional[\"Module\"] = None\n self.local_model: Optional[\"Module\"] = None\n self.duet = None\n if \"syft\" in full_name_with_qualname(klass=type(torch_ref)):\n info(\"> Creating remote model\")\n self.is_local = False\n else:\n # otherwise we have a local model\n info(\"> Creating local model\")\n self.is_local = True\n\n self.torch_ref = torch_ref\n self.training = False\n self._modules: OrderedDict[str, Module] = OrderedDict()\n real_module = torch_ref.nn.Module()\n self.__dict__[\"real_module\"] = real_module # bypass getattr/setattr\n # if issubclass(type(real_module), Pointer):\n # try:\n # # TODO: this needs fixing but should be on by default for now\n # # https://github.com/OpenMined/PySyft/issues/5242\n # real_module.searchable = True\n # except AuthorizationException as e:\n # print(f\"Cant make real_module searchable. {e}\")\n\n def __setattr__(self, name: str, value: Union[Any, \"Module\"]) -> None:\n # this is how we catch the modules being set during subclass init\n # bug where torch.nn.modules isn't the full name on some imports\n # TODO: fix this properly\n # third party\n import torch\n\n if \"torch.nn\" in full_name_with_qualname(klass=type(value)) or isinstance(\n value, torch.nn.Module\n ):\n modules = self.__dict__.get(\"_modules\")\n if modules is not None:\n modules[name] = value\n\n # attach all the sub modules to a real module so that we can have a\n # remote module pointer that acts like a real model\n real_module: Optional[OrderedDict] = self.__dict__.get(\"real_module\")\n if real_module is not None:\n real_module.add_module(name, value) # type: ignore\n else:\n object.__setattr__(self, name, value)\n\n def __getattr__(self, name: str) -> Union[Any, \"Module\"]:\n modules: Optional[OrderedDict] = self.__dict__.get(\"_modules\")\n if modules is not None:\n if name in modules:\n return modules[name]\n\n return object.__getattribute__(self, name)\n\n def train(self, mode: bool = True) -> \"Module\":\n self.training = mode\n for _, module in self.modules.items():\n module.train(mode)\n return self\n\n def eval(self) -> \"Module\":\n return self.train(False)\n\n def __call__(\n self, *args: Union[List[Any], Tuple[Any, ...]], **kwargs: Dict[Any, Any]\n ) -> Any:\n return self.forward(*args, **kwargs)\n\n @property\n def modules(self) -> OrderedDict:\n modules = self.__dict__.get(\"_modules\")\n if modules is not None:\n return modules\n return OrderedDict()\n\n # local list of remote ListPointers of TensorPointers\n def parameters(self, recurse: bool = True) -> Optional[List[Any]]:\n params_list: Optional[List[Any]] = None\n\n if self.is_local is True:\n # we are local so use normal torch params\n params_list = []\n for _, module in self.modules.items():\n params = module.parameters(recurse)\n if params_list is None:\n # only on remote create a remote list so we can concat the param list\n # pointers without having to actually get them\n self.duet = params.client\n params_list = self.duet.syft.lib.python.List() # type: ignore\n # either way lets concat them until we have a big list of parameters\n params_list += params\n return params_list\n\n def cuda(self, device: Any) -> \"Module\":\n for _, module in self.modules.items():\n module.cuda(device)\n return self\n\n def cpu(self) -> \"Module\":\n for _, module in self.modules.items():\n module.cpu()\n return self\n\n def load_state_dict(self, input: Union[str, os.PathLike, Dict[str, Any]]) -> None:\n if not self.is_local:\n info(\"> This model is remote so try calling .get()\")\n return None\n\n state_dict = {}\n if isinstance(input, (str, os.PathLike)):\n with open(Path(input), \"rb\") as f:\n state_dict = torch.load(f)\n else:\n state_dict = dict(input)\n\n if not issubclass(type(state_dict), dict):\n traceback_and_raise(\n f\" Invalid input: {type(input)}. \"\n + \"Try inputting a state_dict or .pth file.\"\n )\n\n info(\"> Loading model weights\")\n layers: Dict[str, Any] = {}\n for save_key, values in state_dict.items():\n parts = save_key.split(\".\")\n if len(parts) < 2:\n info(f\" state dict key is too short: {save_key}\")\n continue\n layer = parts[0]\n attr = parts[1]\n if layer not in layers:\n layers[layer] = {}\n layers[layer][attr] = values\n\n for layer, sd in layers.items():\n local_layer = getattr(self, layer, None)\n if local_layer is not None and hasattr(local_layer, \"load_state_dict\"):\n d = local_layer.load_state_dict(sd)\n info(f\" {layer} state dict loaded with: {d}\")\n else:\n info(f\" Model doesnt have layer {layer}\")\n\n info(\"> Finished loading weights\")\n return None\n\n def state_dict(self) -> Optional[Dict[str, Any]]:\n if not self.is_local:\n info(\"> This model is remote so try calling .get()\")\n return None\n\n info(\"> Saving model weights\")\n model_state_dict = OrderedDict()\n for name, module in self.modules.items():\n if hasattr(module, \"state_dict\"):\n for k, v in module.state_dict().items():\n save_key = f\"{name}.{k}\"\n model_state_dict[save_key] = v\n\n info(\"> Finished saving weights\")\n return model_state_dict\n\n def save(self, path: Union[str, bytes, os.PathLike]) -> None:\n if not self.is_local:\n info(\"> This model is remote so try calling .get()\")\n return\n\n state_dict = self.state_dict()\n torch.save(state_dict, path)\n\n def load(self, path: Union[str, os.PathLike]) -> None:\n if not self.is_local:\n info(\"> This model is remote so try calling .get()\")\n return\n\n self.load_state_dict(input=path)\n\n def send(self, client: Any, send_parameters: bool = True) -> Any:\n if not self.is_local:\n info(\"> This model is remote so try calling .get()\")\n return\n\n info(\"> Sending local model\")\n\n remote_model = copy.copy(self)\n remote_model.setup(torch_ref=client.torch)\n remote_model.duet = client\n\n for name, module in self.modules.items():\n fqn = full_name_with_qualname(klass=type(module))\n klass = client.lib_ast.query(fqn, obj_type=type(module))\n module_repr = module.extra_repr()\n args, kwargs = repr_to_kwargs(repr_str=module_repr)\n remote_module_ptr = klass(*args, **kwargs)\n remote_model.__setattr__(name, remote_module_ptr)\n\n # if the remote module has state_dict lets get it\n if (\n send_parameters\n and hasattr(module, \"state_dict\")\n and hasattr(remote_module_ptr, \"load_state_dict\")\n ):\n local_state_ord_dict = module.state_dict()\n # cast to dict because OrderedDict is not supported\n\n # get a blocking copy of the state_dict\n info(f\" Sending local layer: {name}\")\n # cant import Dict / PrimitiveFactory due to circular imports\n remote_state_dict_ptr = client.syft.lib.python.Dict(\n dict(local_state_ord_dict)\n )\n # iterate through the key, values\n # weights and biases should be in there\n remote_module_ptr.load_state_dict(remote_state_dict_ptr)\n\n info(\"\\n> Finished sending local model <\\n\\n\")\n self.remote_model = remote_model\n return self.remote_model\n\n def get(\n self,\n request_block: bool = False,\n timeout_secs: int = 20,\n reason: str = \"\",\n delete_obj: bool = False,\n ) -> Optional[\"Module\"]:\n\n if self.is_local:\n info(\"> This model is local. Maybe you meant to call .send()?\")\n return None\n\n info(\"> Downloading remote model\")\n\n local_model = copy.copy(self)\n local_model.setup(torch_ref=torch)\n local_model.duet = self.duet\n\n for layer_name, module in self.modules.items():\n module_parts = module.path_and_name.split(\".\")\n klass_name = module_parts.pop()\n klass = getattr(sys.modules[\".\".join(module_parts)], klass_name)\n repr_ptr = module.extra_repr()\n\n module_repr = repr_ptr.get(\n request_block=request_block,\n reason=reason,\n timeout_secs=timeout_secs,\n )\n\n if module_repr is None:\n info(f\" Request for {reason} extra_repr failed, skipping layer\")\n continue\n\n args, kwargs = repr_to_kwargs(repr_str=module_repr.upcast())\n local_module = klass(*args, **kwargs)\n\n # the local real module has been set on the sy module\n local_model.__setattr__(layer_name, local_module)\n\n try:\n # if the remote module has state_dict lets get it\n if hasattr(module, \"state_dict\") and hasattr(\n local_module, \"load_state_dict\"\n ):\n info(\"loading remote state dict\")\n sd_ptr = module.state_dict()\n # get a blocking copy of the state_dict\n info(f\" Downloading remote layer: {layer_name}\")\n state_dict = sd_ptr.get(\n request_block=request_block,\n reason=reason,\n timeout_secs=timeout_secs,\n delete_obj=delete_obj,\n )\n # We have to recreate the OrderedDict for load_state_dict to work\n ordered_state_dict = OrderedDict()\n for elem, item in state_dict.items():\n ordered_state_dict[str(elem)] = item\n # iterate through the key, values\n # weights and biases should be in there\n if state_dict is not None:\n # TODO: support torch.nn.modules.module._IncompatibleKeys\n local_module.load_state_dict(ordered_state_dict)\n else:\n info(\n f\" Failed to get {layer_name} state_dict, skipping layer.\"\n )\n\n except Exception as e:\n critical(f\" Failed to download remote state for {layer_name}.\")\n traceback_and_raise(e)\n\n info(\"\\n> Finished downloading remote model <\\n\\n\")\n self.local_model = local_model\n return self.local_model\n\n # zero them so we know they are copied\n def zero_layers(self) -> None:\n for m in self.modules.values():\n if hasattr(m, \"weight\"):\n m.weight.requires_grad_(False).zero_()\n if hasattr(m, \"bias\"):\n m.bias.requires_grad_(False).zero_()\n\n # easy way to check the weights have changed\n def debug_sum_layers(self) -> None:\n info(\"> Summing layers for debugging: \")\n for n, m in self.modules.items():\n if hasattr(m, \"state_dict\"):\n if self.is_local:\n state_dict = m.state_dict()\n else:\n state_dict = m.state_dict().get()\n\n for k, v in state_dict.items():\n if hasattr(v, \"sum\"):\n s = v.sum().item()\n info(f\" Layer {n} sum({k}): {s}\")\n\n\ndef object2proto(obj: torch.nn.Module, is_child: bool = False) -> Module_PB:\n proto = Module_PB()\n if \"torch.nn.\" in type(obj).__module__:\n proto.module_type = type(obj).__name__\n else:\n proto.module_type = f\"_USER_DEFINED_MODULE_{type(obj).__name__}\"\n proto.forward.CopyFrom(sy.serialize(obj._forward_plan))\n\n proto.module_repr = obj.extra_repr()\n\n if hasattr(obj, \"_uid2attr\"):\n proto._uid2attr.CopyFrom(sy.serialize(SyOrderedDict(obj._uid2attr)))\n\n proto.parameters.CopyFrom(sy.serialize(SyOrderedDict(obj._parameters)))\n\n for n, m in obj.named_children():\n child_proto = object2proto(m, is_child=True)\n child_proto.module_name = n\n proto.children.append(child_proto)\n\n return proto\n\n\ndef proto2object(proto: Module_PB) -> torch.nn.Module:\n is_userdefined = proto.module_type.startswith(\"_USER_DEFINED_MODULE_\")\n\n if is_userdefined:\n obj_type = type(\n proto.module_type.replace(\"_USER_DEFINED_MODULE_\", \"\"),\n (torch.nn.Module,),\n {},\n )\n else:\n obj_type = getattr(torch.nn, proto.module_type)\n\n args, kwargs = repr_to_kwargs(repr_str=proto.module_repr)\n obj = obj_type(*args, **kwargs)\n\n for name, param in sy.deserialize(proto.parameters).items():\n # if we don't do this check, some torch.nn layers fail ( e.g. Conv2D with bias=False)\n if not isinstance(param, _SyNone):\n setattr(obj, str(name), param)\n\n if proto.HasField(\"forward\"):\n forward_plan = sy.deserialize(proto.forward)\n obj._forward_plan = forward_plan\n compile_and_forward = create_compile_and_forward_fn(obj)\n obj.__call__ = compile_and_forward\n obj.forward = compile_and_forward\n # obj.__call__ = forward_plan\n # obj.forward = forward_plan\n\n for child_proto in proto.children:\n setattr(obj, str(child_proto.module_name), sy.deserialize(child_proto))\n\n if proto.HasField(\"_uid2attr\"):\n obj._uid2attr = sy.deserialize(proto._uid2attr)\n\n if is_userdefined:\n recompile(obj)\n\n return obj\n\n\ndef create_compile_and_forward_fn(obj: \"SyModule\") -> Callable:\n \"\"\"Wraps a forward plan in a function that first recompiles the plan, and then\n executes the plan\n\n Args:\n obj (SyModule): the SyModule\n \"\"\"\n\n def _compile_and_forward(*args, **kwargs): # type: ignore\n recompile(obj)\n return obj._forward_plan(*args, **kwargs)\n\n return _compile_and_forward\n\n\ndef recompile(sy_module: \"SyModule\") -> None:\n \"\"\"Recompiles the forward plan, if the object state has changed since the\n forward plan was created, we update the plan here\n\n Args:\n sy_module (SyModule): the module to compile\n \"\"\"\n if hasattr(sy_module, \"_forward_plan\"):\n for action in sy_module._forward_plan.actions: # type: ignore\n if (\n isinstance(action, SaveObjectAction)\n and action.obj.id in sy_module._uid2attr\n ):\n action.obj.data = getattr(\n sy_module, str(sy_module._uid2attr[action.obj.id])\n )\n\n\nGenerateWrapper(\n wrapped_type=torch.nn.Module,\n import_path=\"torch.nn.Module\",\n protobuf_scheme=Module_PB,\n type_object2proto=object2proto,\n type_proto2object=proto2object,\n)\n\n\nclass ForwardToPlanConverter(type):\n \"\"\"This metaclass ensures that:\n 1) the object is initialized when calling Object()\n 2) obj._make_forward_plan() is called after initialization\n \"\"\"\n\n def __call__(cls: Any, *args, **kwargs) -> Any: # type: ignore\n # TODO: check if contains input_size\n obj = type.__call__(cls, *args, **kwargs)\n obj._make_forward_plan()\n return obj\n\n\nclass SyModule(torch.nn.Module, metaclass=ForwardToPlanConverter):\n \"\"\"A `SyModule` is the pointable equivalent of a torch.nn.Module. In order to make\n SyModule remotely executable, its `.forward` method is converted into a `Plan` object\n when initializing a `SyModule` object. This object has two \"modes\", in which it behaves\n differently. During the \"forward plan building stage\" it transforms parameters and submodules\n into pointer when the user retrieves them. After plan building the model behaves more\n like a regular torch.nn.Module, but instead of running a forward method, the user executes\n a `Plan`. As the user does not need to understand the building stage, and the .forward API\n is fairly similar to a regular torch.nn.Module, there is no need to understand all internals\n to use this module.\n\n \"\"\"\n\n def __init__( # type: ignore\n self,\n *args,\n input_size: Optional[Tuple[int]] = None,\n inputs: Optional[Dict[str, torch.Tensor]] = None,\n **kwargs,\n ):\n \"\"\"\n Args:\n input_size (Optional[Tuple[int]], optional): input_size of the Module,\n needs to be defined or inferrable. Defaults to None.\n inputs (Optional[Dict[str, torch.Tensor]], optional): dictionary of dummy input tensors.\n Use this argument instead of `input_size` if there are multiple forward inputs,\n or if the forward input is not a FloatTensor. Defaults to None.\n\n Raises:\n ValueError: [description]\n \"\"\"\n super().__init__(*args, **kwargs)\n self.building_forward = False\n self._parameter_pointers: Dict[str, Pointer] = dict()\n\n if (input_size is None) == (inputs is None):\n raise ValueError(\n \"Either `input_size` or `inputs` should be specified, but not both.\"\n )\n\n self.input_size = input_size\n self.inputs = inputs\n\n def _make_forward_plan(self) -> None:\n \"\"\"Convert forward function into a `Plan` object\n\n Raises:\n ValueError: `.forward` method must be defined\n \"\"\"\n if getattr(self.forward, __name__, None) == \"_forward_unimplemented\": # type: ignore\n raise ValueError(\"Missing .forward() method for Module\")\n\n inputs = self._get_forward_inputs()\n\n self.building_forward = True\n plan = make_plan(self.forward, inputs=inputs) # type: ignore\n self.forward = self._local_forward\n self._forward_plan = plan\n self.__call__ = plan\n self._create_uid2attr()\n self.building_forward = False\n self._remove_plan_action_data()\n\n def _remove_plan_action_data(self) -> None:\n \"\"\"\n Sets `action.obj.data` for each symodule action in `self._forward_plan` to `None`.\n\n This greatly reduces the proto memory footprint;\n The whole state of `self` is saved in the action, which will be recompiled anyway.\n \"\"\"\n\n # Remove module action data\n for action in self._forward_plan.actions:\n if isinstance(action, SaveObjectAction) and action.obj.id in self._uid2attr:\n action.obj.data = downcast(None)\n\n def _local_forward(self, *args, **kwargs): # type: ignore\n recompile(self)\n return self._forward_plan(*args, **kwargs)\n\n def _create_uid2attr(self) -> None:\n self._uid2attr = {\n param.id_at_location: attr_name\n for attr_name, param in self._parameter_pointers.items()\n }\n\n def __getattr__(self, name: str) -> Any:\n \"\"\"A custom getattr method. When retrieving a torch.nn.Module or a torch.nn.Parameter\n *during forward plan building*, SyModule instead returns a Pointer to this attribute.\n The first time an attribute is retrieved, we send it to the plan builder VM, and store\n it in self._parameters_pointers, which will be used for plan Recompilation during\n *deserialization*. If an attribute is requested again, we return the pointer from\n `_parameters_pointers`\n\n Args:\n name (str): name of the attr\n\n Returns:\n Any: Attribute value or Pointer to it\n \"\"\"\n # this is __getattr__ instead of __getattribute__ because of the structure of torch.nn.Module\n if name in self._parameter_pointers and self.building_forward:\n return self._parameter_pointers[name]\n\n res = super().__getattr__(name)\n if (\n isinstance(res, (torch.nn.Module, torch.nn.Parameter))\n and self.building_forward\n ):\n res_ptr = res.send(ROOT_CLIENT)\n self._parameter_pointers[name] = res_ptr\n return res_ptr\n else:\n return res\n\n def _get_inp_keys(self) -> List[str]:\n \"\"\"Get key for the `.forward` argument\n Returns:\n str: input key\n \"\"\"\n\n forward_signature = inspect.signature(self.forward)\n args = list(forward_signature.parameters.items())\n if len(args) == 0:\n raise ValueError(\"SyModules requires more than one argument, and no kwargs\")\n inp_keys = []\n for k, v in args:\n if v.default is not inspect.Parameter.empty:\n raise ValueError(\"SyModules accept only args, not kwargs\")\n inp_keys.append(k)\n return inp_keys\n\n def _get_forward_inputs(self) -> Dict[str, Pointer]:\n \"\"\"Get the dummy inputs for generating the .forward `Plan`\n\n Returns:\n Dict[str: Any]: inputs for .forward\n \"\"\"\n inp_keys = self._get_inp_keys()\n\n if hasattr(self, \"inputs\") and isinstance(self.inputs, dict):\n if set(inp_keys) != set(self.inputs.keys()):\n raise ValueError(\n \"Given `inputs` dict and expected `forward` inputs do not match.\"\n )\n inputs = {k: v.send(ROOT_CLIENT) for k, v in self.inputs.items()}\n\n elif hasattr(self, \"input_size\") and isinstance(self.input_size, tuple):\n if len(inp_keys) != 1:\n raise ValueError(\n \"`.forward` method has more than one input, define dummy inputs with `inputs` kwarg.\"\n )\n inputs = {inp_keys[0]: torch.rand(self.input_size).send(ROOT_CLIENT)}\n\n else:\n raise ValueError(\n \"SyModule needs either `input_size`: Tuple(Int) or `inputs`: Dict[str, Tensor] as kwarg\"\n \"to trace the forward plan.\"\n \"Also, make sure to call **super().__init__(**kwargs)** in ALL your SyModules\"\n \"\"\n )\n\n return inputs\n\n\nclass SySequential(SyModule):\n \"\"\"The Syft equivalent of torch.nn.Sequential\"\"\"\n\n def __init__(self, *args, input_size: Optional[Tuple[int]] = None, **kwargs): # type: ignore\n \"\"\"initializes SySequential and stores the submodules\n\n input_size (Tuple[Int], optional): input_size of the Module, needs to be defined or inferrable.\n Defaults to None.\n \"\"\"\n if input_size is None:\n input_size = self._infer_input_size(*args)\n\n super().__init__(input_size=input_size, **kwargs)\n for idx, module in enumerate(args):\n setattr(self, str(idx), module)\n self.n_modules = len(args)\n\n def _infer_input_size(self, *args: Any) -> Tuple[int]:\n \"\"\"Infer input size from first child\n\n Returns:\n Tuple[int]: input size of first child SyModule.\n \"\"\"\n if hasattr(args[0], \"input_size\"):\n return args[0].input_size\n else:\n raise ValueError(\n \"Could not infer `input_size` from children modules.\"\n \"Either 1) define `input_size` as a kwarg of SySequential OR\"\n \" 2) define `input_size`: Tuple[int] as kwarg on the first child module.\"\n )\n\n def __iter__(self): # type: ignore\n if self.building_forward:\n return iter([getattr(self, str(i)) for i in range(self.n_modules)])\n else:\n return iter(self._modules.values())\n\n def _get_item_by_idx(self, iterator: Iterator, idx: int) -> SyModule:\n \"\"\"Get the idx-th item of the iterator\"\"\"\n size = self.n_modules\n if not -size <= idx < size:\n raise IndexError(f\"index {idx} is out of range\")\n return next(islice(iterator, idx, None))\n\n def __getitem__(self, idx: int) -> SyModule:\n if isinstance(idx, slice): # type: ignore\n raise ValueError(\"SySequential does not support slices\")\n else:\n return self._get_item_by_idx(self._modules.values(), idx)\n\n def __setitem__(self, idx: int, module: Module) -> None:\n key = self._get_item_by_idx(self._modules.keys(), idx)\n return setattr(self, key, module)\n\n def __delitem__(self, idx: Union[slice, int]) -> None:\n if isinstance(idx, slice):\n raise ValueError(\"SySequential does not support slices\")\n else:\n key = self._get_item_by_idx(self._modules.keys(), idx)\n delattr(self, key)\n\n def forward(self, x: Any) -> Any: # type: ignore\n \"\"\"Sequentially call submodule.forward\n\n Args:\n x (Any, optional): input. Defaults to None.\n\n Returns:\n Any: Module output\n \"\"\"\n out = x\n for i, module in enumerate(self):\n # handle indexing in the block, or in the sequential?\n if module.__class__.__name__ == \"ModulePointer\":\n # user defined module\n out = module.forward(x=out)[0]\n else:\n # AST module\n out = module(out)\n return out\n\n def _get_inp_key(self) -> str:\n \"\"\"Get key for the `.forward` argument, allways x for this module\n\n Returns:\n str: \"x\"\n \"\"\"\n return \"x\"\n" ]
[ [ "torch.load", "torch.rand", "torch.save" ] ]
mascondosa/MateUBA_PDE-
[ "e9313e2ead43004f4a2efe3001fdc810294288dd", "e9313e2ead43004f4a2efe3001fdc810294288dd" ]
[ "minifemlib.py", "ej_CalorExplCBPeriodicas.py" ]
[ "import numpy as np\nfrom scipy.spatial import Delaunay\n\ndef Elements( Egeom, order ):\n # Devuelve bases de Lagrange y cuadraturas en el elemento de referencia\n\n if ( Egeom == 'triangular' and order==1 ):\n def phi(j,Xnodes):\n if (j==0): return 1-Xnodes[:,0]-Xnodes[:,1]\n elif (j==1): return Xnodes[:,0]\n else: return Xnodes[:,1] \n def gradphi(j,Xnodes):\n if (j==0): return np.ones(np.shape(Xnodes))*np.array([-1,-1])\n elif (j==1): return np.ones(np.shape(Xnodes))*np.array([1,0])\n else: return np.ones(np.shape(Xnodes))*np.array([0,1])\n Xnodes = np.array([[0.5,0],[0,0.5],[0.5,0.5]])\n quadw = np.array([1/6,1/6,1/6])\n \n return phi, gradphi, Xnodes, quadw\n\n# Geometría de ejemplo: rectángulo. Devuelve triangulación de Delaunay.\ndef rect_mesh(xlim,ylim,I,J):\n x = np.linspace(xlim[0],xlim[1],I)\n y = np.linspace(ylim[0],ylim[1],J)\n X,Y = np.meshgrid(x,y)\n P = np.array([X.flatten(),Y.flatten()]).T\n T = Delaunay(P)\n return T\n\n\n# Función que encuentra el borde de una triangulación\ndef Boundary(T):\n boundary = set()\n for i in range(len(T.neighbors)):\n for k in range(3):\n if (T.neighbors[i][k] == -1):\n nk1,nk2 = (k+1)%3, (k+2)%3 \n boundary.add(T.simplices[i][nk1])\n boundary.add(T.simplices[i][nk2])\n return boundary\n\n\ndef StiffnessLaplacian(T,Egeom,order):\n # Matriz de rigidez del laplaciano.\n \n # Reservamos espacio\n n_nodes = len(T.points)\n n_elem = len(T.simplices)\n A = np.zeros([n_nodes,n_nodes])\n \n # Construimos bases de Lagrange y nodos de cuadratura\n phi, gradphi, Xnodes, quadw = Elements(Egeom,order)\n \n # Pre-calcular matriz local: int_T0 gradphi_i gradphi_j dx\n S = np.zeros([3,3])\n for i in range(3):\n for j in range(3):\n S[i,j] = np.sum(np.sum(gradphi(i,Xnodes)*gradphi(j,Xnodes),1)*quadw)\n \n # Matriz global, recorriendo los elementos\n for i in range(n_elem):\n # Índices de los vértices del triángulo i-ésimo (T_i)\n vertex_index = T.simplices[i]\n \n # Contribución a la matriz de rigidez\n A[np.ix_(vertex_index,vertex_index)] = A[np.ix_(vertex_index,vertex_index)] + S\n \n return A\n \ndef LoadVector(rhs, T, Egeom, order):\n # Vector del lado derecho \\int_\\Omega f v\n \n # Reservamos espacio \n n_nodes = len(T.points)\n n_elem = len(T.simplices)\n F = np.zeros(n_nodes)\n Fint = np.zeros(3)\n \n # Construimos bases de Lagrange y nodos de cuadratura\n phi, gradphi, Xnodes, quadw = Elements(Egeom,order) \n \n for i in range(n_elem):\n # Índices de los vértices del triángulo i-ésimo (T_i)\n vertex_index = T.simplices[i]\n \n # Vertices del triángulo T_i\n vertices = [ T.points[T.simplices[i][j]] for j in range(3) ]\n \n # Transformación afin del triángulo de referencia al T_i\n B = np.array([vertices[1] - vertices[0], vertices[2] - vertices[0]])\n detB = abs(np.linalg.det(B))\n \n # Nodos de cuadratura dentro de T_i\n Xtriangle = np.array([np.matmul(B,Xnodes[j]) + vertices[0] for j in range(3)])\n \n # Evaluamos lado derecho * phi_j en los nodos e integramos\n for j in range(3):\n integrand = phi(j,Xnodes) * rhs(Xtriangle[:,0],Xtriangle[:,1])\n Fint[j] = np.sum( integrand*quadw ) * detB\n \n # Sumamos contribución al lado derecho\n F[vertex_index] = F[vertex_index] + Fint \n \n return F\n", "import MateUBA_PDE as pde\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport imageio\nfrom matplotlib import cm\nfrom matplotlib import animation\n\n#\n# Código para resolver la ecuación del calor con un método explícito\n# y condiciones de borde periodicas\n#\n# Autores: Sol Acuña y Maria Galante, 10 de Nov 2020.\n#\n\n# Leemos las condiciones iniciales de un archivo\nim = imageio.imread('MateUBA_PDE.png')\nF = im[:,:,0]\nF = -(F - 255)/255.0\n\n# Dimensiones del problema\nNx = np.shape(F)[0]\nNy = np.shape(F)[1]\nLx = Nx / 100\nLy = Ny / 100\nsizeF = np.size(F)\n\n# Grillas espaciales: queremos solo unos de los bordes (Condiciones periódicas)\nx = pde.Mesh([0,Lx], Nx, 'close')[1:]\ny = pde.Mesh([0,Ly], Ny, 'close')[1:]\nX, Y = np.meshgrid(y,x)\n\n# Parámetros para las iteraciones\nhx = Lx / (Nx + 1)\nhy = Ly / (Ny + 1)\ndeltat = min([hx,hy])**2 / 8 # condición de estabilidad para el método explícito\nTf = 0.005\nNt = int(Tf / deltat) + 1\n\ndef CalorExplCBPeridicas2D( F,alpha ):\n return alpha * deltat * ( np.matmul(Dx2,F)/hx**2 + np.matmul(F, Dy2)/hy**2 ) + F\n\n#\n# Iteraciones temporales y animación\n#\n\ndef VerAnim ( iterFunc ):\n fig, axs = plt.subplots()\n imfr = axs.imshow(F, cmap=cm.coolwarm) \n axs.set_title('Método explícito para la ecuación del calor')\n \n def init():\n imfr.set_data(F)\n return [imfr]\n\n # Función de animación que se llama en un loop\n def animate(i):\n global F\n \n # Actualizo el cálculo según el método explícito\n F = iterFunc( F,1 )\n \n # Actualizo el título para mostrar el tiempo\n axs.set_title('Animacion a tiempo t=' + '{:.5}'.format(i*deltat) )\n \n # Actualizo el gráfico\n imfr.set_array(F)\n \n return [imfr]\n\n # Loop para llamar a la animación\n anim = animation.FuncAnimation(fig, animate, init_func=init,\n frames=Nt, interval=10, blit=False)\n\n # Mostrar animación\n plt.show()\n\n\n# Mostramos la animación en pantalla\n\nDx2,Dy2 = pde.MatricesCBPer2D( Nx,Ny )\nVerAnim( CalorExplCBPeridicas2D )\n\n# ~ # Guardar el video a un archivo\n# ~ matplotlib.use(\"Agg\")\n# ~ Writer = animation.writers['ffmpeg']\n# ~ writer = Writer(fps=30, metadata=dict(artist='Me'), bitrate=1800)\n# ~ anim.save('im.mp4', writer=writer)\n\n\n\n" ]
[ [ "numpy.ix_", "numpy.meshgrid", "numpy.linspace", "scipy.spatial.Delaunay", "numpy.matmul", "numpy.linalg.det", "numpy.shape", "numpy.array", "numpy.zeros", "numpy.sum" ], [ "numpy.matmul", "matplotlib.pyplot.subplots", "numpy.size", "numpy.shape", "matplotlib.animation.FuncAnimation", "numpy.meshgrid", "matplotlib.pyplot.show" ] ]
nasser-glx/openpilot-1
[ "33fdc9abb75aec5817408a589097844b6f83c6b1" ]
[ "selfdrive/test/longitudinal_maneuvers/test_longitudinal.py" ]
[ "#!/usr/bin/env python3\nimport os\nos.environ['OLD_CAN'] = '1'\nos.environ['NOCRASH'] = '1'\n\nimport unittest\nimport matplotlib\nmatplotlib.use('svg')\n\nfrom selfdrive.config import Conversions as CV\nfrom selfdrive.car.honda.values import CruiseButtons as CB\nfrom selfdrive.test.longitudinal_maneuvers.maneuver import Maneuver\nimport selfdrive.manager as manager\nfrom common.params import Params\n\n\ndef create_dir(path):\n try:\n os.makedirs(path)\n except OSError:\n pass\n\n\ndef check_no_collision(log):\n return min(log['d_rel']) > 0\n\ndef check_fcw(log):\n return any(log['fcw'])\n\ndef check_engaged(log):\n return log['controls_state_msgs'][-1][-1].active\n\nmaneuvers = [\n Maneuver(\n 'while cruising at 40 mph, change cruise speed to 50mph',\n duration=30.,\n initial_speed = 40. * CV.MPH_TO_MS,\n cruise_button_presses = [(CB.DECEL_SET, 2.), (0, 2.3),\n (CB.RES_ACCEL, 10.), (0, 10.1),\n (CB.RES_ACCEL, 10.2), (0, 10.3)],\n checks=[check_engaged],\n ),\n Maneuver(\n 'while cruising at 60 mph, change cruise speed to 50mph',\n duration=30.,\n initial_speed=60. * CV.MPH_TO_MS,\n cruise_button_presses = [(CB.DECEL_SET, 2.), (0, 2.3),\n (CB.DECEL_SET, 10.), (0, 10.1),\n (CB.DECEL_SET, 10.2), (0, 10.3)],\n checks=[check_engaged],\n ),\n Maneuver(\n 'while cruising at 20mph, grade change +10%',\n duration=25.,\n initial_speed=20. * CV.MPH_TO_MS,\n cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3)],\n grade_values = [0., 0., 1.0],\n grade_breakpoints = [0., 10., 11.],\n checks=[check_engaged],\n ),\n Maneuver(\n 'while cruising at 20mph, grade change -10%',\n duration=25.,\n initial_speed=20. * CV.MPH_TO_MS,\n cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3)],\n grade_values = [0., 0., -1.0],\n grade_breakpoints = [0., 10., 11.],\n checks=[check_engaged],\n ),\n Maneuver(\n 'approaching a 40mph car while cruising at 60mph from 100m away',\n duration=30.,\n initial_speed = 60. * CV.MPH_TO_MS,\n lead_relevancy=True,\n initial_distance_lead=100.,\n speed_lead_values = [40.*CV.MPH_TO_MS, 40.*CV.MPH_TO_MS],\n speed_lead_breakpoints = [0., 100.],\n cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3)],\n checks=[check_engaged, check_no_collision],\n ),\n Maneuver(\n 'approaching a 0mph car while cruising at 40mph from 150m away',\n duration=30.,\n initial_speed = 40. * CV.MPH_TO_MS,\n lead_relevancy=True,\n initial_distance_lead=150.,\n speed_lead_values = [0.*CV.MPH_TO_MS, 0.*CV.MPH_TO_MS],\n speed_lead_breakpoints = [0., 100.],\n cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3)],\n checks=[check_engaged, check_no_collision],\n ),\n Maneuver(\n 'steady state following a car at 20m/s, then lead decel to 0mph at 1m/s^2',\n duration=50.,\n initial_speed = 20.,\n lead_relevancy=True,\n initial_distance_lead=35.,\n speed_lead_values = [20., 20., 0.],\n speed_lead_breakpoints = [0., 15., 35.0],\n cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3)],\n checks=[check_engaged, check_no_collision],\n ),\n Maneuver(\n 'steady state following a car at 20m/s, then lead decel to 0mph at 2m/s^2',\n duration=50.,\n initial_speed = 20.,\n lead_relevancy=True,\n initial_distance_lead=35.,\n speed_lead_values = [20., 20., 0.],\n speed_lead_breakpoints = [0., 15., 25.0],\n cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3)],\n checks=[check_engaged, check_no_collision],\n ),\n Maneuver(\n 'steady state following a car at 20m/s, then lead decel to 0mph at 3m/s^2',\n duration=50.,\n initial_speed = 20.,\n lead_relevancy=True,\n initial_distance_lead=35.,\n speed_lead_values = [20., 20., 0.],\n speed_lead_breakpoints = [0., 15., 21.66],\n cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3)],\n checks=[check_engaged, check_fcw],\n ),\n Maneuver(\n 'steady state following a car at 20m/s, then lead decel to 0mph at 5m/s^2',\n duration=40.,\n initial_speed = 20.,\n lead_relevancy=True,\n initial_distance_lead=35.,\n speed_lead_values = [20., 20., 0.],\n speed_lead_breakpoints = [0., 15., 19.],\n cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3)],\n checks=[check_engaged, check_fcw],\n ),\n Maneuver(\n 'starting at 0mph, approaching a stopped car 100m away',\n duration=30.,\n initial_speed = 0.,\n lead_relevancy=True,\n initial_distance_lead=100.,\n cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3),\n (CB.RES_ACCEL, 1.4), (0.0, 1.5),\n (CB.RES_ACCEL, 1.6), (0.0, 1.7),\n (CB.RES_ACCEL, 1.8), (0.0, 1.9)],\n checks=[check_engaged, check_no_collision],\n ),\n Maneuver(\n \"following a car at 60mph, lead accel and decel at 0.5m/s^2 every 2s\",\n duration=25.,\n initial_speed=30.,\n lead_relevancy=True,\n initial_distance_lead=49.,\n speed_lead_values=[30., 30., 29., 31., 29., 31., 29.],\n speed_lead_breakpoints=[0., 6., 8., 12., 16., 20., 24.],\n cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3),\n (CB.RES_ACCEL, 1.4), (0.0, 1.5),\n (CB.RES_ACCEL, 1.6), (0.0, 1.7)],\n checks=[check_engaged, check_no_collision],\n ),\n Maneuver(\n \"following a car at 10mph, stop and go at 1m/s2 lead dece1 and accel\",\n duration=70.,\n initial_speed=10.,\n lead_relevancy=True,\n initial_distance_lead=20.,\n speed_lead_values=[10., 0., 0., 10., 0., 10.],\n speed_lead_breakpoints=[10., 20., 30., 40., 50., 60.],\n cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3),\n (CB.RES_ACCEL, 1.4), (0.0, 1.5),\n (CB.RES_ACCEL, 1.6), (0.0, 1.7)],\n checks=[check_engaged, check_no_collision],\n ),\n Maneuver(\n \"green light: stopped behind lead car, lead car accelerates at 1.5 m/s\",\n duration=30.,\n initial_speed=0.,\n lead_relevancy=True,\n initial_distance_lead=4.,\n speed_lead_values=[0, 0 , 45],\n speed_lead_breakpoints=[0, 10., 40.],\n cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3),\n (CB.RES_ACCEL, 1.4), (0.0, 1.5),\n (CB.RES_ACCEL, 1.6), (0.0, 1.7),\n (CB.RES_ACCEL, 1.8), (0.0, 1.9),\n (CB.RES_ACCEL, 2.0), (0.0, 2.1),\n (CB.RES_ACCEL, 2.2), (0.0, 2.3)],\n checks=[check_engaged, check_no_collision],\n ),\n Maneuver(\n \"stop and go with 1m/s2 lead decel and accel, with full stops\",\n duration=70.,\n initial_speed=0.,\n lead_relevancy=True,\n initial_distance_lead=20.,\n speed_lead_values=[10., 0., 0., 10., 0., 0.] ,\n speed_lead_breakpoints=[10., 20., 30., 40., 50., 60.],\n cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3),\n (CB.RES_ACCEL, 1.4), (0.0, 1.5),\n (CB.RES_ACCEL, 1.6), (0.0, 1.7)],\n checks=[check_engaged, check_no_collision],\n ),\n Maneuver(\n \"stop and go with 1.5m/s2 lead accel and 3.3m/s^2 lead decel, with full stops\",\n duration=45.,\n initial_speed=0.,\n lead_relevancy=True,\n initial_distance_lead=20.,\n speed_lead_values=[10., 0., 0., 10., 0., 0.] ,\n speed_lead_breakpoints=[10., 13., 26., 33., 36., 45.],\n cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3),\n (CB.RES_ACCEL, 1.4), (0.0, 1.5),\n (CB.RES_ACCEL, 1.6), (0.0, 1.7)],\n checks=[check_engaged, check_no_collision],\n ),\n Maneuver(\n \"accelerate from 20 while lead vehicle decelerates from 40 to 20 at 1m/s2\",\n duration=30.,\n initial_speed=10.,\n lead_relevancy=True,\n initial_distance_lead=10.,\n speed_lead_values=[20., 10.],\n speed_lead_breakpoints=[1., 11.],\n cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3),\n (CB.RES_ACCEL, 1.4), (0.0, 1.5),\n (CB.RES_ACCEL, 1.6), (0.0, 1.7),\n (CB.RES_ACCEL, 1.8), (0.0, 1.9),\n (CB.RES_ACCEL, 2.0), (0.0, 2.1),\n (CB.RES_ACCEL, 2.2), (0.0, 2.3)],\n checks=[check_engaged, check_no_collision],\n ),\n Maneuver(\n \"accelerate from 20 while lead vehicle decelerates from 40 to 0 at 2m/s2\",\n duration=30.,\n initial_speed=10.,\n lead_relevancy=True,\n initial_distance_lead=10.,\n speed_lead_values=[20., 0.],\n speed_lead_breakpoints=[1., 11.],\n cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3),\n (CB.RES_ACCEL, 1.4), (0.0, 1.5),\n (CB.RES_ACCEL, 1.6), (0.0, 1.7),\n (CB.RES_ACCEL, 1.8), (0.0, 1.9),\n (CB.RES_ACCEL, 2.0), (0.0, 2.1),\n (CB.RES_ACCEL, 2.2), (0.0, 2.3)],\n checks=[check_engaged, check_no_collision],\n ),\n Maneuver(\n \"fcw: traveling at 30 m/s and approaching lead traveling at 20m/s\",\n duration=15.,\n initial_speed=30.,\n lead_relevancy=True,\n initial_distance_lead=100.,\n speed_lead_values=[20.],\n speed_lead_breakpoints=[1.],\n cruise_button_presses = [],\n checks=[check_fcw],\n ),\n Maneuver(\n \"fcw: traveling at 20 m/s following a lead that decels from 20m/s to 0 at 1m/s2\",\n duration=18.,\n initial_speed=20.,\n lead_relevancy=True,\n initial_distance_lead=35.,\n speed_lead_values=[20., 0.],\n speed_lead_breakpoints=[3., 23.],\n cruise_button_presses = [],\n checks=[check_fcw],\n ),\n Maneuver(\n \"fcw: traveling at 20 m/s following a lead that decels from 20m/s to 0 at 3m/s2\",\n duration=13.,\n initial_speed=20.,\n lead_relevancy=True,\n initial_distance_lead=35.,\n speed_lead_values=[20., 0.],\n speed_lead_breakpoints=[3., 9.6],\n cruise_button_presses = [],\n checks=[check_fcw],\n ),\n Maneuver(\n \"fcw: traveling at 20 m/s following a lead that decels from 20m/s to 0 at 5m/s2\",\n duration=8.,\n initial_speed=20.,\n lead_relevancy=True,\n initial_distance_lead=35.,\n speed_lead_values=[20., 0.],\n speed_lead_breakpoints=[3., 7.],\n cruise_button_presses = [],\n checks=[check_fcw],\n )\n]\n\n# maneuvers = [maneuvers[-11]]\n# maneuvers = [maneuvers[6]]\n\ndef setup_output():\n output_dir = os.path.join(os.getcwd(), 'out/longitudinal')\n if not os.path.exists(os.path.join(output_dir, \"index.html\")):\n # write test output header\n\n css_style = \"\"\"\n .maneuver_title {\n font-size: 24px;\n text-align: center;\n }\n .maneuver_graph {\n width: 100%;\n }\n \"\"\"\n\n view_html = \"<html><head><style>%s</style></head><body><table>\" % (css_style,)\n for i, man in enumerate(maneuvers):\n view_html += \"<tr><td class='maneuver_title' colspan=5><div>%s</div></td></tr><tr>\" % (man.title,)\n for c in ['distance.svg', 'speeds.svg', 'acceleration.svg', 'pedals.svg', 'pid.svg']:\n view_html += \"<td><img class='maneuver_graph' src='%s'/></td>\" % (os.path.join(\"maneuver\" + str(i+1).zfill(2), c), )\n view_html += \"</tr>\"\n\n create_dir(output_dir)\n with open(os.path.join(output_dir, \"index.html\"), \"w\") as f:\n f.write(view_html)\n\nclass LongitudinalControl(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n os.environ['NO_CAN_TIMEOUT'] = \"1\"\n\n setup_output()\n\n params = Params()\n params.clear_all()\n params.put(\"Passive\", \"1\" if os.getenv(\"PASSIVE\") else \"0\")\n params.put(\"OpenpilotEnabledToggle\", \"1\")\n params.put(\"CommunityFeaturesToggle\", \"1\")\n\n manager.prepare_managed_process('radard')\n manager.prepare_managed_process('controlsd')\n manager.prepare_managed_process('plannerd')\n manager.prepare_managed_process('dmonitoringd')\n\n @classmethod\n def tearDownClass(cls):\n pass\n\n # hack\n def test_longitudinal_setup(self):\n pass\n\n\ndef run_maneuver_worker(k):\n man = maneuvers[k]\n output_dir = os.path.join(os.getcwd(), 'out/longitudinal')\n\n def run(self):\n print(man.title)\n valid = False\n\n for retries in range(3):\n manager.start_managed_process('radard')\n manager.start_managed_process('controlsd')\n manager.start_managed_process('plannerd')\n manager.start_managed_process('dmonitoringd')\n\n plot, valid = man.evaluate()\n plot.write_plot(output_dir, \"maneuver\" + str(k + 1).zfill(2))\n\n manager.kill_managed_process('radard')\n manager.kill_managed_process('controlsd')\n manager.kill_managed_process('plannerd')\n manager.kill_managed_process('dmonitoringd')\n\n if valid:\n break\n\n self.assertTrue(valid)\n\n return run\n\n\nfor k in range(len(maneuvers)):\n setattr(LongitudinalControl, \"test_longitudinal_maneuvers_%d\" % (k + 1), run_maneuver_worker(k))\n\nif __name__ == \"__main__\":\n unittest.main(failfast=True)\n" ]
[ [ "matplotlib.use" ] ]
mingwayzhang/tvm
[ "3b287c4d4e6d83e6fd30db47ffa3d5481a332a63" ]
[ "topi/tests/python/test_topi_vision.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Test code for vision package\"\"\"\nfrom __future__ import print_function\nimport math\nimport numpy as np\nimport tvm\nimport topi\nimport topi.testing\n\nfrom tvm.contrib.pickle_memoize import memoize\nfrom topi.util import get_const_tuple\nfrom topi.vision import ssd, non_max_suppression, get_valid_counts\n\n\ndef verify_get_valid_counts(dshape, score_threshold, id_index, score_index):\n dtype = \"float32\"\n batch_size, num_anchor, elem_length = dshape\n np_data = np.random.uniform(low=-2, high=2, size=dshape).astype(dtype)\n np_out1 = np.zeros(shape=(batch_size,))\n np_out2 = np.zeros(shape=dshape).astype(dtype)\n for i in range(batch_size):\n np_out1[i] = 0\n inter_idx = 0\n for j in range(num_anchor):\n score = np_data[i, j, score_index]\n if score > score_threshold and (id_index < 0 or np_data[i, j, id_index] >= 0):\n for k in range(elem_length):\n np_out2[i, inter_idx, k] = np_data[i, j, k]\n np_out1[i] += 1\n inter_idx += 1\n if j >= np_out1[i]:\n for k in range(elem_length):\n np_out2[i, j, k] = -1.0\n\n def check_device(device):\n ctx = tvm.context(device, 0)\n if not ctx.exist:\n print(\"Skip because %s is not enabled\" % device)\n return\n print(\"Running on target: %s\" % device)\n with tvm.target.create(device):\n data = tvm.placeholder(dshape, name=\"data\", dtype=dtype)\n outs = get_valid_counts(data, score_threshold, id_index, score_index)\n s = topi.generic.schedule_get_valid_counts(outs)\n\n tvm_input_data = tvm.nd.array(np_data, ctx)\n tvm_out1 = tvm.nd.array(np.zeros(np_out1.shape, dtype=\"int32\"), ctx)\n tvm_out2 = tvm.nd.array(np.zeros(np_out2.shape, dtype=dtype), ctx)\n f = tvm.build(s, [data, outs[0], outs[1]], device)\n f(tvm_input_data, tvm_out1, tvm_out2)\n tvm.testing.assert_allclose(tvm_out1.asnumpy(), np_out1, rtol=1e-3)\n tvm.testing.assert_allclose(tvm_out2.asnumpy(), np_out2, rtol=1e-3)\n\n for device in ['llvm', 'cuda', 'opencl']:\n # Disable gpu test for now\n if device != \"llvm\":\n continue\n check_device(device)\n\n\ndef test_get_valid_counts():\n verify_get_valid_counts((1, 2500, 6), 0, 0, 1)\n verify_get_valid_counts((1, 2500, 5), -1, -1, 0)\n verify_get_valid_counts((3, 1000, 6), 0.55, 1, 0)\n verify_get_valid_counts((16, 500, 5), 0.95, -1, 1)\n\n\ndef verify_non_max_suppression(np_data, np_valid_count, np_result, np_indices_result, iou_threshold,\n force_suppress, top_k, coord_start, score_index, id_index):\n dshape = np_data.shape\n batch, num_anchors, _ = dshape\n indices_dshape = (batch, num_anchors)\n data = tvm.placeholder(dshape, name=\"data\")\n valid_count = tvm.placeholder((batch,), dtype=\"int32\", name=\"valid_count\")\n\n def check_device(device):\n ctx = tvm.context(device, 0)\n if not ctx.exist:\n print(\"Skip because %s is not enabled\" % device)\n return\n print(\"Running on target: %s\" % device)\n with tvm.target.create(device):\n if device == 'llvm':\n out = non_max_suppression(data, valid_count, -1, iou_threshold, force_suppress, top_k,\n coord_start=coord_start, score_index=score_index, id_index=id_index,\n return_indices=False)\n indices_out = non_max_suppression(data, valid_count, -1, iou_threshold, force_suppress, top_k,\n coord_start=coord_start, score_index=score_index, id_index=id_index)\n else:\n out = topi.cuda.non_max_suppression(data, valid_count, -1, iou_threshold, force_suppress, top_k,\n coord_start=coord_start, score_index=score_index, id_index=id_index,\n return_indices=False)\n indices_out = topi.cuda.non_max_suppression(data, valid_count, -1, iou_threshold, force_suppress, top_k,\n coord_start=coord_start, score_index=score_index, id_index=id_index)\n s = topi.generic.schedule_nms(out)\n indices_s = topi.generic.schedule_nms(indices_out)\n\n tvm_data = tvm.nd.array(np_data, ctx)\n tvm_valid_count = tvm.nd.array(np_valid_count, ctx)\n\n tvm_out = tvm.nd.array(np.zeros(dshape, dtype=data.dtype), ctx)\n f = tvm.build(s, [data, valid_count, out], device)\n f(tvm_data, tvm_valid_count, tvm_out)\n tvm.testing.assert_allclose(tvm_out.asnumpy(), np_result, rtol=1e-4)\n\n tvm_indices_out = tvm.nd.array(np.zeros(indices_dshape, dtype=\"int32\"), ctx)\n f = tvm.build(indices_s, [data, valid_count, indices_out], device)\n f(tvm_data, tvm_valid_count, tvm_indices_out)\n tvm.testing.assert_allclose(tvm_indices_out.asnumpy(), np_indices_result, rtol=1e-4)\n\n for device in ['llvm', 'cuda', 'opencl']:\n check_device(device)\n\n\ndef test_non_max_suppression():\n np_data = np.array([[[0, 0.8, 1, 20, 25, 45], [1, 0.7, 30, 60, 50, 80],\n [0, 0.4, 4, 21, 19, 40], [2, 0.9, 35, 61, 52, 79],\n [1, 0.5, 100, 60, 70, 110]]]).astype(\"float32\")\n np_valid_count = np.array([4]).astype(\"int32\")\n np_result = np.array([[[2, 0.9, 35, 61, 52, 79], [0, 0.8, 1, 20, 25, 45],\n [-1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1]]])\n np_indices_result = np.array([[3, 0, -1, -1, -1]])\n\n verify_non_max_suppression(np_data, np_valid_count, np_result, np_indices_result, 0.7, True, 2, 2, 1, 0)\n\n np_data = np.array([[[0.8, 1, 20, 25, 45], [0.7, 30, 60, 50, 80],\n [0.4, 4, 21, 19, 40], [0.9, 35, 61, 52, 79],\n [0.5, 100, 60, 70, 110]]]).astype(\"float32\")\n np_valid_count = np.array([4]).astype(\"int32\")\n np_result = np.array([[[0.9, 35, 61, 52, 79], [0.8, 1, 20, 25, 45],\n [-1, -1, -1, -1, -1], [-1, -1, -1, -1, -1],\n [-1, -1, -1, -1, -1]]])\n np_indices_result = np.array([[3, 0, -1, -1, -1]])\n verify_non_max_suppression(np_data, np_valid_count, np_result, np_indices_result, 0.7, False, 2, 1, 0, -1)\n\n\n\ndef verify_multibox_prior(dshape, sizes=(1,), ratios=(1,), steps=(-1, -1), offsets=(0.5, 0.5), clip=False):\n data = tvm.placeholder(dshape, name=\"data\")\n\n dtype = data.dtype\n input_data = np.random.uniform(size=dshape).astype(dtype)\n\n in_height = data.shape[2].value\n in_width = data.shape[3].value\n num_sizes = len(sizes)\n num_ratios = len(ratios)\n size_ratio_concat = sizes + ratios\n steps_h = steps[0] if steps[0] > 0 else 1.0 / in_height\n steps_w = steps[1] if steps[1] > 0 else 1.0 / in_width\n offset_h = offsets[0]\n offset_w = offsets[1]\n\n oshape = (1, in_height * in_width * (num_sizes + num_ratios - 1), 4)\n np_out = np.zeros(oshape).astype(dtype)\n\n for i in range(in_height):\n center_h = (i + offset_h) * steps_h\n for j in range(in_width):\n center_w = (j + offset_w) * steps_w\n for k in range(num_sizes + num_ratios - 1):\n w = size_ratio_concat[k] * in_height / in_width / 2.0 if k < num_sizes else \\\n size_ratio_concat[0] * in_height / in_width * math.sqrt(size_ratio_concat[k + 1]) / 2.0\n h = size_ratio_concat[k] / 2.0 if k < num_sizes else \\\n size_ratio_concat[0] / math.sqrt(size_ratio_concat[k + 1]) / 2.0\n count = i * in_width * (num_sizes + num_ratios - 1) + j * (num_sizes + num_ratios - 1) + k\n np_out[0][count][0] = center_w - w\n np_out[0][count][1] = center_h - h\n np_out[0][count][2] = center_w + w\n np_out[0][count][3] = center_h + h\n if clip:\n np_out = np.clip(np_out, 0, 1)\n\n def check_device(device):\n ctx = tvm.context(device, 0)\n if not ctx.exist:\n print(\"Skip because %s is not enabled\" % device)\n return\n print(\"Running on target: %s\" % device)\n with tvm.target.create(device):\n if device == 'llvm':\n out = ssd.multibox_prior(data, sizes, ratios, steps, offsets, clip)\n else:\n out = topi.cuda.ssd.multibox_prior(data, sizes, ratios, steps, offsets, clip)\n s = topi.generic.schedule_multibox_prior(out)\n\n tvm_input_data = tvm.nd.array(input_data, ctx)\n tvm_out = tvm.nd.array(np.zeros(oshape, dtype=dtype), ctx)\n f = tvm.build(s, [data, out], device)\n f(tvm_input_data, tvm_out)\n tvm.testing.assert_allclose(tvm_out.asnumpy(), np_out, rtol=1e-3)\n\n for device in ['llvm', 'opencl', 'cuda']:\n check_device(device)\n\n\ndef test_multibox_prior():\n verify_multibox_prior((1, 3, 50, 50))\n verify_multibox_prior((1, 3, 224, 224), sizes=(0.5, 0.25, 0.1), ratios=(1, 2, 0.5))\n verify_multibox_prior((1, 32, 32, 32), sizes=(0.5, 0.25), ratios=(1, 2), steps=(2, 2), clip=True)\n\n\ndef test_multibox_detection():\n batch_size = 1\n num_anchors = 3\n num_classes = 3\n cls_prob = tvm.placeholder((batch_size, num_anchors, num_classes), name=\"cls_prob\")\n loc_preds = tvm.placeholder((batch_size, num_anchors * 4), name=\"loc_preds\")\n anchors = tvm.placeholder((1, num_anchors, 4), name=\"anchors\")\n\n # Manually create test case\n np_cls_prob = np.array([[[0.2, 0.5, 0.3], [0.25, 0.3, 0.45], [0.7, 0.1, 0.2]]])\n np_loc_preds = np.array([[0.1, -0.2, 0.3, 0.2, 0.2, 0.4, 0.5, -0.3, 0.7, -0.2, -0.4, -0.8]])\n np_anchors = np.array([[[-0.1, -0.1, 0.1, 0.1], [-0.2, -0.2, 0.2, 0.2], [1.2, 1.2, 1.5, 1.5]]])\n\n expected_np_out = np.array([[[1, 0.69999999, 0, 0, 0.10818365, 0.10008108],\n [0, 0.44999999, 1, 1, 1, 1],\n [0, 0.30000001, 0, 0, 0.22903419, 0.20435292]]])\n\n def check_device(device):\n ctx = tvm.context(device, 0)\n if not ctx.exist:\n print(\"Skip because %s is not enabled\" % device)\n return\n print(\"Running on target: %s\" % device)\n with tvm.target.create(device):\n if device == 'llvm':\n out = ssd.multibox_detection(cls_prob, loc_preds, anchors)\n else:\n out = topi.cuda.ssd.multibox_detection(cls_prob, loc_preds, anchors)\n s = topi.generic.schedule_multibox_detection(out)\n\n tvm_cls_prob = tvm.nd.array(np_cls_prob.astype(cls_prob.dtype), ctx)\n tvm_loc_preds = tvm.nd.array(np_loc_preds.astype(loc_preds.dtype), ctx)\n tvm_anchors = tvm.nd.array(np_anchors.astype(anchors.dtype), ctx)\n tvm_out = tvm.nd.array(np.zeros((batch_size, num_anchors, 6)).astype(out.dtype), ctx)\n f = tvm.build(s, [cls_prob, loc_preds, anchors, out], device)\n f(tvm_cls_prob, tvm_loc_preds, tvm_anchors, tvm_out)\n tvm.testing.assert_allclose(tvm_out.asnumpy(), expected_np_out, rtol=1e-4)\n\n for device in ['llvm', 'opencl', 'cuda']:\n check_device(device)\n\n\ndef verify_roi_align(batch, in_channel, in_size, num_roi, pooled_size, spatial_scale, sample_ratio):\n a_shape = (batch, in_channel, in_size, in_size)\n rois_shape = (num_roi, 5)\n\n a = tvm.placeholder(a_shape)\n rois = tvm.placeholder(rois_shape)\n\n @memoize(\"topi.tests.test_topi_vision.verify_roi_align\")\n def get_ref_data():\n a_np = np.random.uniform(size=a_shape).astype('float32')\n rois_np = np.random.uniform(size=rois_shape).astype('float32') * in_size\n rois_np[:, 0] = np.random.randint(low = 0, high = batch, size = num_roi)\n b_np = topi.testing.roi_align_nchw_python(a_np, rois_np, pooled_size=pooled_size,\n spatial_scale=spatial_scale,\n sample_ratio=sample_ratio)\n\n return a_np, rois_np, b_np\n\n a_np, rois_np, b_np = get_ref_data()\n\n def check_device(device):\n ctx = tvm.context(device, 0)\n if not ctx.exist:\n print(\"Skip because %s is not enabled\" % device)\n return\n print(\"Running on target: %s\" % device)\n\n with tvm.target.create(device):\n b = topi.vision.rcnn.roi_align_nchw(a, rois, pooled_size=pooled_size,\n spatial_scale=spatial_scale,\n sample_ratio=sample_ratio)\n s = topi.generic.schedule_roi_align(b)\n\n tvm_a = tvm.nd.array(a_np, ctx)\n tvm_rois = tvm.nd.array(rois_np, ctx)\n tvm_b = tvm.nd.array(np.zeros(get_const_tuple(b.shape), dtype=b.dtype), ctx=ctx)\n f = tvm.build(s, [a, rois, b], device)\n f(tvm_a, tvm_rois, tvm_b)\n tvm.testing.assert_allclose(tvm_b.asnumpy(), b_np, rtol=1e-3)\n\n for device in ['llvm', 'cuda', 'opencl']:\n check_device(device)\n\n\ndef test_roi_align():\n verify_roi_align(1, 16, 32, 64, 7, 1.0, -1)\n verify_roi_align(4, 16, 32, 64, 7, 0.5, 2)\n verify_roi_align(1, 32, 32, 80, 8, 0.0625, 2)\n verify_roi_align(1, 32, 500, 80, 8, 0.0625, 2)\n\n\ndef verify_roi_pool(batch, in_channel, in_size, num_roi, pooled_size, spatial_scale):\n a_shape = (batch, in_channel, in_size, in_size)\n rois_shape = (num_roi, 5)\n\n a = tvm.placeholder(a_shape)\n rois = tvm.placeholder(rois_shape)\n\n @memoize(\"topi.tests.test_topi_vision.verify_roi_pool\")\n def get_ref_data():\n a_np = np.random.uniform(size=a_shape).astype('float32')\n rois_np = np.random.uniform(size=rois_shape).astype('float32') * in_size\n rois_np[:, 0] = np.random.randint(low = 0, high = batch, size = num_roi).astype('float32')\n\n b_np = topi.testing.roi_pool_nchw_python(a_np, rois_np, pooled_size=pooled_size,\n spatial_scale=spatial_scale)\n return a_np, rois_np, b_np\n\n a_np, rois_np, b_np = get_ref_data()\n\n def check_device(device):\n ctx = tvm.context(device, 0)\n if not ctx.exist:\n print(\"Skip because %s is not enabled\" % device)\n return\n print(\"Running on target: %s\" % device)\n\n with tvm.target.create(device):\n b = topi.vision.rcnn.roi_pool_nchw(a, rois, pooled_size=pooled_size,\n spatial_scale=spatial_scale)\n s = topi.generic.schedule_roi_pool(b)\n\n tvm_a = tvm.nd.array(a_np, ctx)\n tvm_rois = tvm.nd.array(rois_np, ctx)\n tvm_b = tvm.nd.array(np.zeros(get_const_tuple(b.shape), dtype=b.dtype), ctx=ctx)\n f = tvm.build(s, [a, rois, b], device)\n f(tvm_a, tvm_rois, tvm_b)\n tvm.testing.assert_allclose(tvm_b.asnumpy(), b_np, rtol=1e-4)\n\n for device in ['cuda', 'llvm']:\n check_device(device)\n\n\ndef test_roi_pool():\n verify_roi_pool(1, 4, 16, 32, 7, 1.0)\n verify_roi_pool(4, 4, 16, 32, 7, 0.5)\n\n\ndef verify_proposal(np_cls_prob, np_bbox_pred, np_im_info, np_out, attrs):\n cls_prob = tvm.placeholder(np_cls_prob.shape)\n bbox_pred = tvm.placeholder(np_bbox_pred.shape)\n im_info = tvm.placeholder(np_im_info.shape)\n\n def check_device(device):\n ctx = tvm.context(device, 0)\n if not ctx.exist:\n print(\"Skip because %s is not enabled\" % device)\n return\n print(\"Running on target: %s\" % device)\n with tvm.target.create(device):\n out = topi.vision.proposal(cls_prob, bbox_pred, im_info, **attrs)\n s = topi.generic.schedule_proposal(out)\n f = tvm.build(s, [cls_prob, bbox_pred, im_info, out], device)\n tvm_cls_prob = tvm.nd.array(np_cls_prob, ctx=ctx)\n tvm_bbox_pred = tvm.nd.array(np_bbox_pred, ctx=ctx)\n tvm_im_info = tvm.nd.array(np_im_info, ctx=ctx)\n tvm_out = tvm.nd.empty(ctx=ctx, shape=out.shape, dtype=out.dtype)\n f(tvm_cls_prob, tvm_bbox_pred, tvm_im_info, tvm_out)\n tvm.testing.assert_allclose(tvm_out.asnumpy(), np_out, rtol=1e-4)\n\n for device in ['cuda']:\n check_device(device)\n\n\ndef test_proposal():\n attrs = {'scales': (0.5,),'ratios': (0.5,),\n 'feature_stride': 16,\n 'iou_loss': False,\n 'rpn_min_size': 16,\n 'threshold': 0.7,\n 'rpn_pre_nms_top_n': 200,\n 'rpn_post_nms_top_n': 4,\n }\n np_cls_prob = np.array([[\n [[0.3, 0.6, 0.2], [0.4, 0.7, 0.5], [0.1, 0.4, 0.3]],\n [[0.7, 0.5, 0.3], [0.6, 0.4, 0.8], [0.9, 0.2, 0.5]]\n ]], dtype='float32')\n np_bbox_pred = np.array([[\n [[0.5, 1.0, 0.6], [0.8, 1.2, 2.0], [0.9, 1.0, 0.8]],\n [[0.5, 1.0, 0.7], [0.8, 1.2, 1.6], [2.1, 1.5, 0.7]],\n [[1.0, 0.5, 0.7], [1.5, 0.9, 1.6], [1.4, 1.5, 0.8]],\n [[1.0, 0.5, 0.6], [1.5, 0.9, 2.0], [1.8, 1.0, 0.9]],\n ]], dtype='float32')\n np_im_info = np.array([[48., 48., 1.]], dtype='float32')\n np_out = np.array([\n [0., 0., 2.8451548,28.38012, 18.154846],\n [0., 0., 15.354933, 41.96971, 41.245064],\n [0., 18.019852, 1.0538368, 51.98015, 25.946163],\n [0., 27.320923, -1.266357, 55., 24.666357]\n ], dtype='float32')\n\n verify_proposal(np_cls_prob, np_bbox_pred, np_im_info, np_out, attrs)\n\n np_out = np.array([\n [ 0., -5.25, -2.5, 21.75, 19.],\n [ 0., 11.25, -2., 37.25, 18.5],\n [ 0., 26.849998, -2.3000002, 53.45, 18.6],\n [ 0., -4.95, 13.799999, 22.25, 35.5]\n ], dtype='float32')\n\n attrs['iou_loss'] = True\n verify_proposal(np_cls_prob, np_bbox_pred, np_im_info, np_out, attrs)\n\n\nif __name__ == \"__main__\":\n test_get_valid_counts()\n test_non_max_suppression()\n test_multibox_prior()\n test_multibox_detection()\n test_roi_align()\n test_proposal()\n" ]
[ [ "numpy.clip", "numpy.random.uniform", "numpy.array", "numpy.zeros", "numpy.random.randint" ] ]
ZHAW-Audio-Style-Transfer/melgan
[ "f0249fca167ee866c70e4a0b4ed943e869e634c9" ]
[ "src/models/base.py" ]
[ "from __future__ import annotations\nfrom abc import ABC, abstractmethod\nfrom logger.logger import Logger\nfrom config.config import Config\nfrom pathlib import Path\nfrom tensorflow import keras as K\n\n\nclass BaseModel(ABC):\n def __init__(self):\n Logger.log(f\"Initializing {self.__class__.__name__}...\")\n self._load() or self._build()\n\n @abstractmethod\n def train(self):\n raise NotImplementedError\n\n @abstractmethod\n def predict(self, file: str):\n raise NotImplementedError\n\n @abstractmethod\n def summary(self):\n raise NotImplementedError\n\n @abstractmethod\n def _build(self):\n raise NotImplementedError\n\n @abstractmethod\n def _load(self) -> bool:\n raise NotImplementedError\n\n def _summary(self, models: dict[str, K.Model]):\n for name, model in models.items():\n Logger.log(f\"\\nSummary of {name}:\")\n model.summary()\n\n K.utils.plot_model(\n model,\n show_shapes=True,\n to_file=createFolder(self._getModelSavePath()).joinpath(f\"{name}.png\"),\n )\n\n def _getModelSavePath(self):\n return Path(Config().environment.output_path, \"model\")\n\n def _tensorboardCallback(self):\n path = Path(Config().environment.output_path, \"tensorboard\")\n return K.callbacks.TensorBoard(log_dir=path, histogram_freq=1)\n\n\ndef createFolder(path: Path):\n path.mkdir(parents=True, exist_ok=True)\n return path\n" ]
[ [ "tensorflow.keras.callbacks.TensorBoard" ] ]
leofang/qutip
[ "67c46870749b6e5925feed106a493eac9124bcc3" ]
[ "qutip/correlation.py" ]
[ "# This file is part of QuTiP: Quantum Toolbox in Python.\n#\n# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names\n# of its contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\n# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\n__all__ = ['correlation_2op_1t', 'correlation_2op_2t', 'correlation_3op_1t',\n 'correlation_3op_2t', 'coherence_function_g1',\n 'coherence_function_g2', 'spectrum', 'spectrum_correlation_fft',\n 'correlation_ss', 'correlation', 'correlation_4op_1t',\n 'correlation_4op_2t', 'spectrum_ss', 'spectrum_pi']\n\nfrom re import sub\nfrom warnings import warn\nimport types\n\nimport numpy as np\nimport scipy.fftpack\n\nfrom qutip.eseries import esval, esspec\nfrom qutip.essolve import ode2es\nfrom qutip.expect import expect\nfrom qutip.mesolve import mesolve\nfrom qutip.mcsolve import mcsolve\nfrom qutip.operators import qeye\nfrom qutip.qobj import Qobj, isket, issuper\nfrom qutip.qobjevo import QobjEvo\nfrom qutip.rhs_generate import rhs_clear, _td_wrap_array_str\nfrom qutip.cy.utilities import _cython_build_cleanup\nfrom qutip.settings import debug\nfrom qutip.solver import Options, config\nfrom qutip.steadystate import steadystate\nfrom qutip.states import ket2dm\nfrom qutip.superoperator import liouvillian, spre, mat2vec\nfrom qutip.tensor import tensor\n\nif debug:\n import inspect\n\n\n# -----------------------------------------------------------------------------\n# PUBLIC API\n# -----------------------------------------------------------------------------\n\n# low level correlation\n\ndef correlation_2op_1t(H, state0, taulist, c_ops, a_op, b_op,\n solver=\"me\", reverse=False, args={},\n options=Options(ntraj=[20, 100])):\n r\"\"\"\n Calculate the two-operator two-time correlation function:\n :math:`\\left<A(t+\\tau)B(t)\\right>`\n along one time axis using the quantum regression theorem and the evolution\n solver indicated by the `solver` parameter.\n\n Parameters\n ----------\n\n H : Qobj\n system Hamiltonian, may be time-dependent for solver choice of `me` or\n `mc`.\n state0 : Qobj\n Initial state density matrix :math:`\\rho(t_0)` or state vector\n :math:`\\psi(t_0)`. If 'state0' is 'None', then the steady state will\n be used as the initial state. The 'steady-state' is only implemented\n for the `me` and `es` solvers.\n taulist : array_like\n list of times for :math:`\\tau`. taulist must be positive and contain\n the element `0`.\n c_ops : list\n list of collapse operators, may be time-dependent for solver choice of\n `me` or `mc`.\n a_op : Qobj\n operator A.\n b_op : Qobj\n operator B.\n reverse : bool {False, True}\n If `True`, calculate :math:`\\left<A(t)B(t+\\tau)\\right>` instead of\n :math:`\\left<A(t+\\tau)B(t)\\right>`.\n solver : str {'me', 'mc', 'es'}\n choice of solver (`me` for master-equation, `mc` for Monte Carlo, and\n `es` for exponential series).\n options : Options\n Solver options class. `ntraj` is taken as a two-element list because\n the `mc` correlator calls `mcsolve()` recursively; by default,\n `ntraj=[20, 100]`. `mc_corr_eps` prevents divide-by-zero errors in\n the `mc` correlator; by default, `mc_corr_eps=1e-10`.\n\n Returns\n -------\n corr_vec : ndarray\n An array of correlation values for the times specified by `taulist`.\n\n References\n ----------\n See, Gardiner, Quantum Noise, Section 5.2.\n\n \"\"\"\n\n if debug:\n print(inspect.stack()[0][3])\n\n if reverse:\n A_op = a_op\n B_op = b_op\n C_op = 1\n else:\n A_op = 1\n B_op = a_op\n C_op = b_op\n\n return _correlation_2t(H, state0, [0], taulist, c_ops, A_op, B_op, C_op,\n solver=solver, args=args, options=options)[0]\n\n\ndef correlation_2op_2t(H, state0, tlist, taulist, c_ops, a_op, b_op,\n solver=\"me\", reverse=False, args={},\n options=Options(ntraj=[20, 100])):\n r\"\"\"\n Calculate the two-operator two-time correlation function:\n :math:`\\left<A(t+\\tau)B(t)\\right>`\n along two time axes using the quantum regression theorem and the\n evolution solver indicated by the `solver` parameter.\n\n Parameters\n ----------\n H : Qobj\n system Hamiltonian, may be time-dependent for solver choice of `me` or\n `mc`.\n state0 : Qobj\n Initial state density matrix :math:`\\rho_0` or state vector\n :math:`\\psi_0`. If 'state0' is 'None', then the steady state will\n be used as the initial state. The 'steady-state' is only implemented\n for the `me` and `es` solvers.\n tlist : array_like\n list of times for :math:`t`. tlist must be positive and contain the\n element `0`. When taking steady-steady correlations only one tlist\n value is necessary, i.e. when :math:`t \\rightarrow \\infty`; here\n tlist is automatically set, ignoring user input.\n taulist : array_like\n list of times for :math:`\\tau`. taulist must be positive and contain\n the element `0`.\n c_ops : list\n list of collapse operators, may be time-dependent for solver choice of\n `me` or `mc`.\n a_op : Qobj\n operator A.\n b_op : Qobj\n operator B.\n reverse : bool {False, True}\n If `True`, calculate :math:`\\left<A(t)B(t+\\tau)\\right>` instead of\n :math:`\\left<A(t+\\tau)B(t)\\right>`.\n solver : str\n choice of solver (`me` for master-equation, `mc` for Monte Carlo, and\n `es` for exponential series).\n options : Options\n solver options class. `ntraj` is taken as a two-element list because\n the `mc` correlator calls `mcsolve()` recursively; by default,\n `ntraj=[20, 100]`. `mc_corr_eps` prevents divide-by-zero errors in\n the `mc` correlator; by default, `mc_corr_eps=1e-10`.\n\n Returns\n -------\n corr_mat : ndarray\n An 2-dimensional array (matrix) of correlation values for the times\n specified by `tlist` (first index) and `taulist` (second index). If\n `tlist` is `None`, then a 1-dimensional array of correlation values\n is returned instead.\n\n References\n ----------\n See, Gardiner, Quantum Noise, Section 5.2.\n\n \"\"\"\n\n if debug:\n print(inspect.stack()[0][3])\n\n if tlist is None:\n return correlation_2op_1t(H, state0, taulist, c_ops, a_op, b_op,\n solver=solver, reverse=reverse, args=args,\n options=options)\n else:\n if reverse:\n A_op = a_op\n B_op = b_op\n C_op = 1\n else:\n A_op = 1\n B_op = a_op\n C_op = b_op\n\n return _correlation_2t(H, state0, tlist, taulist,\n c_ops, A_op, B_op, C_op,\n solver=solver, args=args, options=options)\n\n\ndef correlation_3op_1t(H, state0, taulist, c_ops, a_op, b_op, c_op,\n solver=\"me\", args={},\n options=Options(ntraj=[20, 100])):\n r\"\"\"\n Calculate the three-operator two-time correlation function:\n :math:`\\left<A(t)B(t+\\tau)C(t)\\right>`\n along one time axis using the quantum regression theorem and the\n evolution solver indicated by the `solver` parameter.\n\n Note: it is not possibly to calculate a physically meaningful correlation\n of this form where :math:`\\tau<0`.\n\n Parameters\n ----------\n H : Qobj\n system Hamiltonian, may be time-dependent for solver choice of `me` or\n `mc`.\n rho0 : Qobj\n Initial state density matrix :math:`\\rho(t_0)` or state vector\n :math:`\\psi(t_0)`. If 'state0' is 'None', then the steady state will\n be used as the initial state. The 'steady-state' is only implemented\n for the `me` and `es` solvers.\n taulist : array_like\n list of times for :math:`\\tau`. taulist must be positive and contain\n the element `0`.\n c_ops : list\n list of collapse operators, may be time-dependent for solver choice of\n `me` or `mc`.\n a_op : Qobj\n operator A.\n b_op : Qobj\n operator B.\n c_op : Qobj\n operator C.\n solver : str\n choice of solver (`me` for master-equation, `mc` for Monte Carlo, and\n `es` for exponential series).\n options : Options\n solver options class. `ntraj` is taken as a two-element list because\n the `mc` correlator calls `mcsolve()` recursively; by default,\n `ntraj=[20, 100]`. `mc_corr_eps` prevents divide-by-zero errors in\n the `mc` correlator; by default, `mc_corr_eps=1e-10`.\n\n Returns\n -------\n corr_vec : array\n An array of correlation values for the times specified by `taulist`\n\n References\n ----------\n See, Gardiner, Quantum Noise, Section 5.2.\n\n \"\"\"\n\n if debug:\n print(inspect.stack()[0][3])\n\n return _correlation_2t(H, state0, [0], taulist, c_ops, a_op, b_op, c_op,\n solver=solver, args=args, options=options)[0]\n\n\ndef correlation_3op_2t(H, state0, tlist, taulist, c_ops, a_op, b_op, c_op,\n solver=\"me\", args={},\n options=Options(ntraj=[20, 100])):\n r\"\"\"\n Calculate the three-operator two-time correlation function:\n :math:`\\left<A(t)B(t+\\tau)C(t)\\right>`\n along two time axes using the quantum regression theorem and the\n evolution solver indicated by the `solver` parameter.\n\n Note: it is not possibly to calculate a physically meaningful correlation\n of this form where :math:`\\tau<0`.\n\n Parameters\n ----------\n H : Qobj\n system Hamiltonian, may be time-dependent for solver choice of `me` or\n `mc`.\n rho0 : Qobj\n Initial state density matrix :math:`\\rho_0` or state vector\n :math:`\\psi_0`. If 'state0' is 'None', then the steady state will\n be used as the initial state. The 'steady-state' is only implemented\n for the `me` and `es` solvers.\n tlist : array_like\n list of times for :math:`t`. tlist must be positive and contain the\n element `0`. When taking steady-steady correlations only one tlist\n value is necessary, i.e. when :math:`t \\rightarrow \\infty`; here\n tlist is automatically set, ignoring user input.\n taulist : array_like\n list of times for :math:`\\tau`. taulist must be positive and contain\n the element `0`.\n c_ops : list\n list of collapse operators, may be time-dependent for solver choice of\n `me` or `mc`.\n a_op : Qobj\n operator A.\n b_op : Qobj\n operator B.\n c_op : Qobj\n operator C.\n solver : str\n choice of solver (`me` for master-equation, `mc` for Monte Carlo, and\n `es` for exponential series).\n options : Options\n solver options class. `ntraj` is taken as a two-element list because\n the `mc` correlator calls `mcsolve()` recursively; by default,\n `ntraj=[20, 100]`. `mc_corr_eps` prevents divide-by-zero errors in\n the `mc` correlator; by default, `mc_corr_eps=1e-10`.\n\n Returns\n -------\n corr_mat : array\n An 2-dimensional array (matrix) of correlation values for the times\n specified by `tlist` (first index) and `taulist` (second index). If\n `tlist` is `None`, then a 1-dimensional array of correlation values\n is returned instead.\n\n References\n ----------\n\n See, Gardiner, Quantum Noise, Section 5.2.\n\n \"\"\"\n\n if debug:\n print(inspect.stack()[0][3])\n\n if tlist is None:\n return correlation_3op_1t(H, state0, taulist, c_ops, a_op, b_op, c_op,\n solver=solver, args=args, options=options)\n else:\n return _correlation_2t(H, state0, tlist, taulist,\n c_ops, a_op, b_op, c_op,\n solver=solver, args=args, options=options)\n\n\n# high level correlation\n\ndef coherence_function_g1(H, state0, taulist, c_ops, a_op, solver=\"me\",\n args={}, options=Options(ntraj=[20, 100])):\n r\"\"\"\n Calculate the normalized first-order quantum coherence function:\n\n .. math::\n\n g^{(1)}(\\tau) =\n \\frac{\\langle A^\\dagger(\\tau)A(0)\\rangle}\n {\\sqrt{\\langle A^\\dagger(\\tau)A(\\tau)\\rangle\n \\langle A^\\dagger(0)A(0)\\rangle}}\n\n using the quantum regression theorem and the evolution solver indicated by\n the `solver` parameter.\n\n Parameters\n ----------\n H : Qobj\n system Hamiltonian, may be time-dependent for solver choice of `me` or\n `mc`.\n state0 : Qobj\n Initial state density matrix :math:`\\rho(t_0)` or state vector\n :math:`\\psi(t_0)`. If 'state0' is 'None', then the steady state will\n be used as the initial state. The 'steady-state' is only implemented\n for the `me` and `es` solvers.\n taulist : array_like\n list of times for :math:`\\tau`. taulist must be positive and contain\n the element `0`.\n c_ops : list\n list of collapse operators, may be time-dependent for solver choice of\n `me` or `mc`.\n a_op : Qobj\n operator A.\n solver : str\n choice of solver (`me` for master-equation and\n `es` for exponential series).\n options : Options\n solver options class. `ntraj` is taken as a two-element list because\n the `mc` correlator calls `mcsolve()` recursively; by default,\n `ntraj=[20, 100]`. `mc_corr_eps` prevents divide-by-zero errors in\n the `mc` correlator; by default, `mc_corr_eps=1e-10`.\n\n Returns\n -------\n g1, G1 : tuple\n The normalized and unnormalized second-order coherence function.\n\n \"\"\"\n\n # first calculate the photon number\n if state0 is None:\n state0 = steadystate(H, c_ops)\n n = np.array([expect(state0, a_op.dag() * a_op)])\n else:\n n = mesolve(H, state0, taulist, c_ops, [a_op.dag() * a_op],\n options=options).expect[0]\n\n # calculate the correlation function G1 and normalize with n to obtain g1\n G1 = correlation_2op_1t(H, state0, taulist, c_ops, a_op.dag(), a_op,\n solver=solver, args=args, options=options)\n g1 = G1 / np.sqrt(n[0] * n)\n\n return g1, G1\n\n\ndef coherence_function_g2(H, state0, taulist, c_ops, a_op, solver=\"me\", args={},\n options=Options(ntraj=[20, 100])):\n r\"\"\"\n Calculate the normalized second-order quantum coherence function:\n\n .. math::\n\n g^{(2)}(\\tau) =\n \\frac{\\langle A^\\dagger(0)A^\\dagger(\\tau)A(\\tau)A(0)\\rangle}\n {\\langle A^\\dagger(\\tau)A(\\tau)\\rangle\n \\langle A^\\dagger(0)A(0)\\rangle}\n\n using the quantum regression theorem and the evolution solver indicated by\n the `solver` parameter.\n\n Parameters\n ----------\n H : Qobj\n system Hamiltonian, may be time-dependent for solver choice of `me` or\n `mc`.\n state0 : Qobj\n Initial state density matrix :math:`\\rho(t_0)` or state vector\n :math:`\\psi(t_0)`. If 'state0' is 'None', then the steady state will\n be used as the initial state. The 'steady-state' is only implemented\n for the `me` and `es` solvers.\n taulist : array_like\n list of times for :math:`\\tau`. taulist must be positive and contain\n the element `0`.\n c_ops : list\n list of collapse operators, may be time-dependent for solver choice of\n `me` or `mc`.\n a_op : Qobj\n operator A.\n args : dict\n Dictionary of arguments to be passed to solver.\n solver : str\n choice of solver (`me` for master-equation and\n `es` for exponential series).\n options : Options\n solver options class. `ntraj` is taken as a two-element list because\n the `mc` correlator calls `mcsolve()` recursively; by default,\n `ntraj=[20, 100]`. `mc_corr_eps` prevents divide-by-zero errors in\n the `mc` correlator; by default, `mc_corr_eps=1e-10`.\n\n Returns\n -------\n g2, G2 : tuple\n The normalized and unnormalized second-order coherence function.\n\n \"\"\"\n\n # first calculate the photon number\n if state0 is None:\n state0 = steadystate(H, c_ops)\n n = np.array([expect(state0, a_op.dag() * a_op)])\n else:\n n = mesolve(H, state0, taulist, c_ops, [a_op.dag() * a_op], args=args).expect[0]\n\n # calculate the correlation function G2 and normalize with n to obtain g2\n G2 = correlation_3op_1t(H, state0, taulist, c_ops,\n a_op.dag(), a_op.dag()*a_op, a_op,\n solver=solver, args=args, options=options)\n g2 = G2 / (n[0] * n)\n\n return g2, G2\n\n\n# spectrum\n\ndef spectrum(H, wlist, c_ops, a_op, b_op, solver=\"es\", use_pinv=False):\n r\"\"\"\n Calculate the spectrum of the correlation function\n :math:`\\lim_{t \\to \\infty} \\left<A(t+\\tau)B(t)\\right>`,\n i.e., the Fourier transform of the correlation function:\n\n .. math::\n\n S(\\omega) = \\int_{-\\infty}^{\\infty}\n \\lim_{t \\to \\infty} \\left<A(t+\\tau)B(t)\\right>\n e^{-i\\omega\\tau} d\\tau.\n\n using the solver indicated by the `solver` parameter. Note: this spectrum\n is only defined for stationary statistics (uses steady state rho0)\n\n Parameters\n ----------\n H : :class:`qutip.qobj`\n system Hamiltonian.\n wlist : array_like\n list of frequencies for :math:`\\omega`.\n c_ops : list\n list of collapse operators.\n a_op : Qobj\n operator A.\n b_op : Qobj\n operator B.\n solver : str\n choice of solver (`es` for exponential series and\n `pi` for psuedo-inverse).\n use_pinv : bool\n For use with the `pi` solver: if `True` use numpy's pinv method,\n otherwise use a generic solver.\n\n Returns\n -------\n spectrum : array\n An array with spectrum :math:`S(\\omega)` for the frequencies\n specified in `wlist`.\n\n \"\"\"\n\n if debug:\n print(inspect.stack()[0][3])\n\n if solver == \"es\":\n return _spectrum_es(H, wlist, c_ops, a_op, b_op)\n elif solver == \"pi\":\n return _spectrum_pi(H, wlist, c_ops, a_op, b_op, use_pinv)\n else:\n raise ValueError(\"Unrecognized choice of solver\" +\n \"%s (use es or pi).\" % solver)\n\n\ndef spectrum_correlation_fft(tlist, y, inverse=False):\n \"\"\"\n Calculate the power spectrum corresponding to a two-time correlation\n function using FFT.\n\n Parameters\n ----------\n tlist : array_like\n list/array of times :math:`t` which the correlation function is given.\n y : array_like\n list/array of correlations corresponding to time delays :math:`t`.\n inverse: boolean\n boolean parameter for using a positive exponent in the Fourier Transform instead. Default is False.\n\n Returns\n -------\n w, S : tuple\n Returns an array of angular frequencies 'w' and the corresponding\n two-sided power spectrum 'S(w)'.\n\n \"\"\"\n\n if debug:\n print(inspect.stack()[0][3])\n tlist = np.asarray(tlist)\n N = tlist.shape[0]\n dt = tlist[1] - tlist[0]\n if not np.allclose(np.diff(tlist), dt*np.ones(N-1,dtype=float)):\n raise Exception('tlist must be equally spaced for FFT.')\n\n if inverse:\n F = N * scipy.fftpack.ifft(y)\n else:\n F = scipy.fftpack.fft(y)\n\n # calculate the frequencies for the components in F\n f = scipy.fftpack.fftfreq(N, dt)\n\n # re-order frequencies from most negative to most positive (centre on 0)\n idx = np.array([], dtype = 'int')\n idx = np.append(idx, np.where(f < 0.0))\n idx = np.append(idx, np.where(f >= 0.0))\n\n return 2 * np.pi * f[idx], 2 * dt * np.real(F[idx])\n\n\n# -----------------------------------------------------------------------------\n# LEGACY API\n# -----------------------------------------------------------------------------\n\n# low level correlation\n\ndef correlation_ss(H, taulist, c_ops, a_op, b_op,\n solver=\"me\", reverse=False, args={},\n options=Options(ntraj=[20, 100])):\n r\"\"\"\n Calculate the two-operator two-time correlation function:\n\n .. math::\n\n \\lim_{t \\to \\infty} \\left<A(t+\\tau)B(t)\\right>\n\n along one time axis (given steady-state initial conditions) using the\n quantum regression theorem and the evolution solver indicated by the\n `solver` parameter.\n\n Parameters\n ----------\n\n H : Qobj\n system Hamiltonian.\n\n taulist : array_like\n list of times for :math:`\\tau`. taulist must be positive and contain\n the element `0`.\n\n c_ops : list\n list of collapse operators.\n\n a_op : Qobj\n operator A.\n\n b_op : Qobj\n operator B.\n\n reverse : *bool*\n If `True`, calculate\n :math:`\\lim_{t \\to \\infty} \\left<A(t)B(t+\\tau)\\right>` instead of\n :math:`\\lim_{t \\to \\infty} \\left<A(t+\\tau)B(t)\\right>`.\n\n solver : str\n choice of solver (`me` for master-equation and\n `es` for exponential series).\n\n options : Options\n solver options class. `ntraj` is taken as a two-element list because\n the `mc` correlator calls `mcsolve()` recursively; by default,\n `ntraj=[20, 100]`. `mc_corr_eps` prevents divide-by-zero errors in\n the `mc` correlator; by default, `mc_corr_eps=1e-10`.\n\n Returns\n -------\n\n corr_vec : array\n An array of correlation values for the times specified by `tlist`.\n\n References\n ----------\n\n See, Gardiner, Quantum Noise, Section 5.2.\n\n \"\"\"\n\n warn(\"correlation_ss() now legacy, please use correlation_2op_1t() with\" +\n \"initial state as None\", FutureWarning)\n\n if debug:\n print(inspect.stack()[0][3])\n\n return correlation_2op_1t(H, None, taulist, c_ops, a_op, b_op,\n solver=solver, reverse=reverse, args=args,\n options=options)\n\n\ndef correlation(H, state0, tlist, taulist, c_ops, a_op, b_op,\n solver=\"me\", reverse=False, args={},\n options=Options(ntraj=[20, 100])):\n r\"\"\"\n Calculate the two-operator two-time correlation function:\n :math:`\\left<A(t+\\tau)B(t)\\right>`\n along two time axes using the quantum regression theorem and the\n evolution solver indicated by the `solver` parameter.\n\n Parameters\n ----------\n\n H : Qobj\n system Hamiltonian, may be time-dependent for solver choice of `me` or\n `mc`.\n\n state0 : Qobj\n Initial state density matrix :math:`\\rho(t_0)` or state vector\n :math:`\\psi(t_0)`. If 'state0' is 'None', then the steady state will\n be used as the initial state. The 'steady-state' is only implemented\n for the `me` and `es` solvers.\n\n tlist : array_like\n list of times for :math:`t`. tlist must be positive and contain the\n element `0`. When taking steady-steady correlations only one tlist\n value is necessary, i.e. when :math:`t \\rightarrow \\infty`; here\n tlist is automatically set, ignoring user input.\n\n taulist : array_like\n list of times for :math:`\\tau`. taulist must be positive and contain\n the element `0`.\n\n c_ops : list\n list of collapse operators, may be time-dependent for solver choice of\n `me` or `mc`.\n\n a_op : Qobj\n operator A.\n\n b_op : Qobj\n operator B.\n\n reverse : *bool*\n If `True`, calculate :math:`\\left<A(t)B(t+\\tau)\\right>` instead of\n :math:`\\left<A(t+\\tau)B(t)\\right>`.\n\n solver : str\n choice of solver (`me` for master-equation, `mc` for Monte Carlo, and\n `es` for exponential series).\n\n options : Options\n solver options class. `ntraj` is taken as a two-element list because\n the `mc` correlator calls `mcsolve()` recursively; by default,\n `ntraj=[20, 100]`. `mc_corr_eps` prevents divide-by-zero errors in\n the `mc` correlator; by default, `mc_corr_eps=1e-10`.\n\n Returns\n -------\n\n corr_mat : array\n An 2-dimensional array (matrix) of correlation values for the times\n specified by `tlist` (first index) and `taulist` (second index). If\n `tlist` is `None`, then a 1-dimensional array of correlation values\n is returned instead.\n\n References\n ----------\n\n See, Gardiner, Quantum Noise, Section 5.2.\n\n \"\"\"\n\n warn(\"correlation() now legacy, please use correlation_2op_2t()\",\n FutureWarning)\n\n if debug:\n print(inspect.stack()[0][3])\n\n return correlation_2op_2t(H, state0, tlist, taulist, c_ops, a_op, b_op,\n solver=solver, reverse=reverse, args=args,\n options=options)\n\n\ndef correlation_4op_1t(H, state0, taulist, c_ops, a_op, b_op, c_op, d_op,\n solver=\"me\", args={},\n options=Options(ntraj=[20, 100])):\n r\"\"\"\n Calculate the four-operator two-time correlation function:\n :math:`\\left<A(t)B(t+\\tau)C(t+\\tau)D(t)\\right>`\n along one time axis using the quantum regression theorem and the\n evolution solver indicated by the `solver` parameter.\n\n Note: it is not possibly to calculate a physically meaningful correlation\n of this form where :math:`\\tau<0`.\n\n Parameters\n ----------\n H : Qobj\n system Hamiltonian, may be time-dependent for solver choice of `me` or\n `mc`.\n rho0 : Qobj\n Initial state density matrix :math:`\\rho(t_0)` or state vector\n :math:`\\psi(t_0)`. If 'state0' is 'None', then the steady state will\n be used as the initial state. The 'steady-state' is only implemented\n for the `me` and `es` solvers.\n taulist : array_like\n list of times for :math:`\\tau`. taulist must be positive and contain\n the element `0`.\n c_ops : list\n list of collapse operators, may be time-dependent for solver choice of\n `me` or `mc`.\n\n a_op : Qobj\n operator A.\n\n b_op : Qobj\n operator B.\n\n c_op : Qobj\n operator C.\n\n d_op : Qobj\n operator D.\n\n solver : str\n choice of solver (`me` for master-equation, `mc` for Monte Carlo, and\n `es` for exponential series).\n\n options : Options\n solver options class. `ntraj` is taken as a two-element list because\n the `mc` correlator calls `mcsolve()` recursively; by default,\n `ntraj=[20, 100]`. `mc_corr_eps` prevents divide-by-zero errors in\n the `mc` correlator; by default, `mc_corr_eps=1e-10`.\n\n Returns\n -------\n corr_vec : array\n An array of correlation values for the times specified by `taulist`.\n\n References\n ----------\n See, Gardiner, Quantum Noise, Section 5.2.\n\n .. note:: Deprecated in QuTiP 3.1\n Use correlation_3op_1t() instead.\n\n \"\"\"\n\n warn(\"correlation_4op_1t() now legacy, please use correlation_3op_1t()\",\n FutureWarning)\n warn(\"the reverse argument has been removed as it did not contain any\" +\n \"new physical information\", DeprecationWarning)\n\n if debug:\n print(inspect.stack()[0][3])\n\n return correlation_3op_1t(H, state0, taulist, c_ops,\n a_op, b_op * c_op, d_op,\n solver=solver, args=args, options=options)\n\n\ndef correlation_4op_2t(H, state0, tlist, taulist, c_ops,\n a_op, b_op, c_op, d_op, solver=\"me\", args={},\n options=Options(ntraj=[20, 100])):\n r\"\"\"\n Calculate the four-operator two-time correlation function:\n :math:`\\left<A(t)B(t+\\tau)C(t+\\tau)D(t)\\right>`\n along two time axes using the quantum regression theorem and the\n evolution solver indicated by the `solver` parameter.\n\n Note: it is not possibly to calculate a physically meaningful correlation\n of this form where :math:`\\tau<0`.\n\n Parameters\n ----------\n\n H : Qobj\n system Hamiltonian, may be time-dependent for solver choice of `me` or\n `mc`.\n\n rho0 : Qobj\n Initial state density matrix :math:`\\rho_0` or state vector\n :math:`\\psi_0`. If 'state0' is 'None', then the steady state will\n be used as the initial state. The 'steady-state' is only implemented\n for the `me` and `es` solvers.\n\n tlist : array_like\n list of times for :math:`t`. tlist must be positive and contain the\n element `0`. When taking steady-steady correlations only one tlist\n value is necessary, i.e. when :math:`t \\rightarrow \\infty`; here\n tlist is automatically set, ignoring user input.\n\n taulist : array_like\n list of times for :math:`\\tau`. taulist must be positive and contain\n the element `0`.\n\n c_ops : list\n list of collapse operators, may be time-dependent for solver choice of\n `me` or `mc`.\n\n a_op : Qobj\n operator A.\n\n b_op : Qobj\n operator B.\n\n c_op : Qobj\n operator C.\n\n d_op : Qobj\n operator D.\n\n solver : str\n choice of solver (`me` for master-equation, `mc` for Monte Carlo, and\n `es` for exponential series).\n\n options : Options\n solver options class. `ntraj` is taken as a two-element list because\n the `mc` correlator calls `mcsolve()` recursively; by default,\n `ntraj=[20, 100]`. `mc_corr_eps` prevents divide-by-zero errors in\n the `mc` correlator; by default, `mc_corr_eps=1e-10`.\n\n Returns\n -------\n\n corr_mat : array\n An 2-dimensional array (matrix) of correlation values for the times\n specified by `tlist` (first index) and `taulist` (second index). If\n `tlist` is `None`, then a 1-dimensional array of correlation values\n is returned instead.\n\n References\n ----------\n\n See, Gardiner, Quantum Noise, Section 5.2.\n\n \"\"\"\n\n warn(\"correlation_4op_2t() now legacy, please use correlation_3op_2t()\",\n FutureWarning)\n warn(\"the reverse argument has been removed as it did not contain any\" +\n \"new physical information\", DeprecationWarning)\n\n if debug:\n print(inspect.stack()[0][3])\n\n return correlation_3op_2t(H, state0, tlist, taulist, c_ops,\n a_op, b_op * c_op, d_op,\n solver=solver, args=args, options=options)\n\n\n# spectrum\n\ndef spectrum_ss(H, wlist, c_ops, a_op, b_op):\n r\"\"\"\n Calculate the spectrum of the correlation function\n :math:`\\lim_{t \\to \\infty} \\left<A(t+\\tau)B(t)\\right>`,\n i.e., the Fourier transform of the correlation function:\n\n .. math::\n\n S(\\omega) = \\int_{-\\infty}^{\\infty}\n \\lim_{t \\to \\infty} \\left<A(t+\\tau)B(t)\\right>\n e^{-i\\omega\\tau} d\\tau.\n\n using an eseries based solver Note: this spectrum is only defined for\n stationary statistics (uses steady state rho0).\n\n Parameters\n ----------\n\n H : :class:`qutip.qobj`\n system Hamiltonian.\n\n wlist : array_like\n list of frequencies for :math:`\\omega`.\n\n c_ops : *list* of :class:`qutip.qobj`\n list of collapse operators.\n\n a_op : :class:`qutip.qobj`\n operator A.\n\n b_op : :class:`qutip.qobj`\n operator B.\n\n use_pinv : *bool*\n If `True` use numpy's `pinv` method, otherwise use a generic solver.\n\n Returns\n -------\n\n spectrum : array\n An array with spectrum :math:`S(\\omega)` for the frequencies\n specified in `wlist`.\n\n \"\"\"\n\n warn(\"spectrum_ss() now legacy, please use spectrum()\", FutureWarning)\n\n return spectrum(H, wlist, c_ops, a_op, b_op, solver=\"es\")\n\n\ndef spectrum_pi(H, wlist, c_ops, a_op, b_op, use_pinv=False):\n r\"\"\"\n Calculate the spectrum of the correlation function\n :math:`\\lim_{t \\to \\infty} \\left<A(t+\\tau)B(t)\\right>`,\n i.e., the Fourier transform of the correlation function:\n\n .. math::\n\n S(\\omega) = \\int_{-\\infty}^{\\infty}\n \\lim_{t \\to \\infty} \\left<A(t+\\tau)B(t)\\right>\n e^{-i\\omega\\tau} d\\tau.\n\n using a psuedo-inverse method. Note: this spectrum is only defined for\n stationary statistics (uses steady state rho0)\n\n Parameters\n ----------\n\n H : :class:`qutip.qobj`\n system Hamiltonian.\n\n wlist : array_like\n list of frequencies for :math:`\\omega`.\n\n c_ops : *list* of :class:`qutip.qobj`\n list of collapse operators.\n\n a_op : :class:`qutip.qobj`\n operator A.\n\n b_op : :class:`qutip.qobj`\n operator B.\n\n use_pinv : *bool*\n If `True` use numpy's pinv method, otherwise use a generic solver.\n\n Returns\n -------\n\n spectrum : array\n An array with spectrum :math:`S(\\omega)` for the frequencies\n specified in `wlist`.\n\n \"\"\"\n\n warn(\"spectrum_pi() now legacy, please use spectrum()\", FutureWarning)\n\n return spectrum(H, wlist, c_ops, a_op, b_op,\n solver=\"pi\", use_pinv=use_pinv)\n\n\n# -----------------------------------------------------------------------------\n# PRIVATE SOLVER METHODS\n# -----------------------------------------------------------------------------\n\n# master 2t correlation solver\n\ndef _correlation_2t(H, state0, tlist, taulist, c_ops, a_op, b_op, c_op,\n solver=\"me\", args={}, options=Options()):\n \"\"\"\n Internal function for calling solvers in order to calculate the\n three-operator two-time correlation function:\n <A(t)B(t+tau)C(t)>\n \"\"\"\n\n # Note: the current form of the correlator is sufficient for all possible\n # two-time correlations (incuding those with 2ops vs 3). Ex: to compute a\n # correlation of the form <A(t+tau)B(t)>: a_op = identity, b_op = A,\n # and c_op = B.\n\n if debug:\n print(inspect.stack()[0][3])\n\n if min(tlist) != 0:\n raise TypeError(\"tlist must be positive and contain the element 0.\")\n if min(taulist) != 0:\n raise TypeError(\"taulist must be positive and contain the element 0.\")\n\n if config.tdname:\n _cython_build_cleanup(config.tdname)\n rhs_clear()\n H, c_ops, args = _td_wrap_array_str(H, c_ops, args, tlist)\n\n if solver == \"me\":\n return _correlation_me_2t(H, state0, tlist, taulist,\n c_ops, a_op, b_op, c_op,\n args=args, options=options)\n elif solver == \"mc\":\n return _correlation_mc_2t(H, state0, tlist, taulist,\n c_ops, a_op, b_op, c_op,\n args=args, options=options)\n elif solver == \"es\":\n return _correlation_es_2t(H, state0, tlist, taulist,\n c_ops, a_op, b_op, c_op)\n else:\n raise ValueError(\"Unrecognized choice of solver\" +\n \"%s (use me, mc, or es).\" % solver)\n\n\n# master equation solvers\n\ndef _correlation_me_2t(H, state0, tlist, taulist, c_ops, a_op, b_op, c_op,\n args={}, options=Options()):\n \"\"\"\n Internal function for calculating the three-operator two-time\n correlation function:\n <A(t)B(t+tau)C(t)>\n using a master equation solver.\n \"\"\"\n\n # the solvers only work for positive time differences and the correlators\n # require positive tau\n if state0 is None:\n rho0 = steadystate(H, c_ops)\n tlist = [0]\n elif isket(state0):\n rho0 = ket2dm(state0)\n else:\n rho0 = state0\n\n if debug:\n print(inspect.stack()[0][3])\n\n rho_t = mesolve(H, rho0, tlist, c_ops, [],\n args=args, options=options).states\n corr_mat = np.zeros([np.size(tlist), np.size(taulist)], dtype=complex)\n H_shifted, c_ops_shifted, _args = _transform_L_t_shift_new(H, c_ops, args)\n if config.tdname:\n _cython_build_cleanup(config.tdname)\n rhs_clear()\n\n for t_idx, rho in enumerate(rho_t):\n if not isinstance(H, Qobj):\n _args[\"_t0\"] = tlist[t_idx]\n\n corr_mat[t_idx, :] = mesolve(\n H_shifted, c_op * rho * a_op, taulist, c_ops_shifted,\n [b_op], args=_args, options=options\n ).expect[0]\n\n if t_idx == 1:\n options.rhs_reuse = True\n\n if config.tdname:\n _cython_build_cleanup(config.tdname)\n rhs_clear()\n\n return corr_mat\n\n\n# exponential series solvers\n\ndef _correlation_es_2t(H, state0, tlist, taulist, c_ops, a_op, b_op, c_op):\n \"\"\"\n Internal function for calculating the three-operator two-time\n correlation function:\n <A(t)B(t+tau)C(t)>\n using an exponential series solver.\n \"\"\"\n\n # the solvers only work for positive time differences and the correlators\n # require positive tau\n if state0 is None:\n rho0 = steadystate(H, c_ops)\n tlist = [0]\n elif isket(state0):\n rho0 = ket2dm(state0)\n else:\n rho0 = state0\n\n if debug:\n print(inspect.stack()[0][3])\n\n # contruct the Liouvillian\n L = liouvillian(H, c_ops)\n\n corr_mat = np.zeros([np.size(tlist), np.size(taulist)], dtype=complex)\n\n solES_t = ode2es(L, rho0)\n # evaluate the correlation function\n for t_idx in range(len(tlist)):\n rho_t = esval(solES_t, [tlist[t_idx]])\n solES_tau = ode2es(L, c_op * rho_t * a_op)\n corr_mat[t_idx, :] = esval(expect(b_op, solES_tau), taulist)\n return corr_mat\n\n\ndef _spectrum_es(H, wlist, c_ops, a_op, b_op):\n r\"\"\"\n Internal function for calculating the spectrum of the correlation function\n :math:`\\left<A(\\tau)B(0)\\right>`.\n \"\"\"\n if debug:\n print(inspect.stack()[0][3])\n\n # construct the Liouvillian\n L = liouvillian(H, c_ops)\n\n # find the steady state density matrix and a_op and b_op expecation values\n rho0 = steadystate(L)\n\n a_op_ss = expect(a_op, rho0)\n b_op_ss = expect(b_op, rho0)\n\n # eseries solution for (b * rho0)(t)\n es = ode2es(L, b_op * rho0)\n # correlation\n corr_es = expect(a_op, es)\n # covariance\n cov_es = corr_es - a_op_ss * b_op_ss\n # tidy up covariance (to combine, e.g., zero-frequency components that\n # cancel)\n cov_es.tidyup()\n # spectrum\n return esspec(cov_es, wlist)\n\n\n# Monte Carlo solvers\n\ndef _correlation_mc_2t(H, state0, tlist, taulist, c_ops, a_op, b_op, c_op,\n args={}, options=Options()):\n \"\"\"\n Internal function for calculating the three-operator two-time\n correlation function:\n <A(t)B(t+tau)C(t)>\n using a Monte Carlo solver.\n \"\"\"\n\n if not c_ops:\n raise TypeError(\"If no collapse operators are required, use the `me`\" +\n \"or `es` solvers\")\n\n # the solvers only work for positive time differences and the correlators\n # require positive tau\n if state0 is None:\n raise NotImplementedError(\"steady state not implemented for \" +\n \"mc solver, please use `es` or `me`\")\n elif not isket(state0):\n raise TypeError(\"state0 must be a state vector.\")\n psi0 = state0\n\n if debug:\n print(inspect.stack()[0][3])\n\n psi_t_mat = mcsolve(\n H, psi0, tlist, c_ops, [],\n args=args, ntraj=options.ntraj[0], options=options, progress_bar=None\n ).states\n\n corr_mat = np.zeros([np.size(tlist), np.size(taulist)], dtype=complex)\n H_shifted, c_ops_shifted, _args = _transform_L_t_shift_new(H, c_ops, args)\n if config.tdname:\n _cython_build_cleanup(config.tdname)\n rhs_clear()\n\n # calculation of <A(t)B(t+tau)C(t)> from only knowledge of psi0 requires\n # averaging over both t and tau\n for t_idx in range(np.size(tlist)):\n if not isinstance(H, Qobj):\n _args[\"_t0\"] = tlist[t_idx]\n\n for trial_idx in range(options.ntraj[0]):\n if isinstance(a_op, Qobj) and isinstance(c_op, Qobj):\n if a_op.dag() == c_op:\n # A shortcut here, requires only 1/4 the trials\n chi_0 = (options.mc_corr_eps + c_op) * \\\n psi_t_mat[trial_idx, t_idx]\n\n # evolve these states and calculate expectation value of B\n c_tau = chi_0.norm()**2 * mcsolve(\n H_shifted, chi_0/chi_0.norm(), taulist, c_ops_shifted,\n [b_op],\n args=_args, ntraj=options.ntraj[1], options=options,\n progress_bar=None\n ).expect[0]\n\n # final correlation vector computed by combining the\n # averages\n corr_mat[t_idx, :] += c_tau/options.ntraj[1]\n else:\n # otherwise, need four trial wavefunctions\n # (Ad+C)*psi_t, (Ad+iC)*psi_t, (Ad-C)*psi_t, (Ad-iC)*psi_t\n if isinstance(a_op, Qobj):\n a_op_dag = a_op.dag()\n else:\n # assume this is a number, ex. i.e. a_op = 1\n # if this is not correct, the over-loaded addition\n # operation will raise errors\n a_op_dag = a_op\n chi_0 = [(options.mc_corr_eps + a_op_dag +\n np.exp(1j*x*np.pi/2)*c_op) *\n psi_t_mat[trial_idx, t_idx]\n for x in range(4)]\n\n # evolve these states and calculate expectation value of B\n c_tau = [\n chi.norm()**2 * mcsolve(\n H_shifted, chi/chi.norm(), taulist, c_ops_shifted,\n [b_op],\n args=_args, ntraj=options.ntraj[1], options=options,\n progress_bar=None\n ).expect[0]\n for chi in chi_0\n ]\n\n # final correlation vector computed by combining the averages\n corr_mat_add = np.asarray(\n 1.0 / (4*options.ntraj[0]) *\n (c_tau[0] - c_tau[2] - 1j*c_tau[1] + 1j*c_tau[3]),\n dtype=corr_mat.dtype\n )\n corr_mat[t_idx, :] += corr_mat_add\n\n if t_idx == 1:\n options.rhs_reuse = True\n\n if config.tdname:\n _cython_build_cleanup(config.tdname)\n rhs_clear()\n\n return corr_mat\n\n\n# pseudo-inverse solvers\ndef _spectrum_pi(H, wlist, c_ops, a_op, b_op, use_pinv=False):\n r\"\"\"\n Internal function for calculating the spectrum of the correlation function\n :math:`\\left<A(\\tau)B(0)\\right>`.\n \"\"\"\n\n L = H if issuper(H) else liouvillian(H, c_ops)\n\n tr_mat = tensor([qeye(n) for n in L.dims[0][0]])\n N = np.prod(L.dims[0][0])\n\n A = L.full()\n b = spre(b_op).full()\n a = spre(a_op).full()\n\n tr_vec = np.transpose(mat2vec(tr_mat.full()))\n\n rho_ss = steadystate(L)\n rho = np.transpose(mat2vec(rho_ss.full()))\n\n I = np.identity(N * N)\n P = np.kron(np.transpose(rho), tr_vec)\n Q = I - P\n\n spectrum = np.zeros(len(wlist))\n\n for idx, w in enumerate(wlist):\n if use_pinv:\n MMR = np.linalg.pinv(-1.0j * w * I + A)\n else:\n MMR = np.dot(Q, np.linalg.solve(-1.0j * w * I + A, Q))\n\n s = np.dot(tr_vec,\n np.dot(a, np.dot(MMR, np.dot(b, np.transpose(rho)))))\n spectrum[idx] = -2 * np.real(s[0, 0])\n\n return spectrum\n\n\n# auxiliary\ndef _transform_shift_one_coeff(op, args):\n if isinstance(op, types.FunctionType):\n # function-list based time-dependence\n if isinstance(args, dict):\n def fn(t, args_i):\n return op(t + args_i[\"_t0\"], args_i)\n fn = lambda t, args_i: \\\n op(t + args_i[\"_t0\"], args_i)\n else:\n def fn(t, args_i):\n return op(t + args_i[\"_t0\"], args_i[\"_user_args\"])\n else:\n fn = sub(\"(?<=[^0-9a-zA-Z_])t(?=[^0-9a-zA-Z_])\",\n \"(t+_t0)\", \" \" + op + \" \")\n return fn\n\n\ndef _transform_shift_one_op(op, args={}):\n if isinstance(op, Qobj):\n new_op = op\n elif isinstance(op, QobjEvo):\n new_op = op\n new_op._shift\n elif callable(op):\n def new_op(t, args_i):\n return op(t + args_i[\"_t0\"], args_i)\n elif isinstance(op, list):\n new_op = []\n for block in op:\n if isinstance(block, list):\n new_op.append([block[0],\n _transform_shift_one_coeff(block[1], args)])\n else:\n new_op.append(block)\n return new_op\n\n\ndef _transform_L_t_shift_new(H, c_ops, args={}):\n H_shifted = _transform_shift_one_op(H, args)\n c_ops_shifted = [_transform_shift_one_op(op, args) for op in c_ops]\n if args is None:\n _args = {\"_t0\": 0}\n elif isinstance(args, dict):\n _args = args.copy()\n _args[\"_t0\"] = 0\n else:\n _args = {\"_user_args\": args, \"_t0\": 0}\n\n return H_shifted, c_ops_shifted, _args\n" ]
[ [ "numpy.linalg.solve", "numpy.sqrt", "numpy.asarray", "numpy.ones", "numpy.linalg.pinv", "numpy.size", "numpy.real", "numpy.identity", "numpy.diff", "numpy.prod", "numpy.transpose", "numpy.exp", "numpy.array", "numpy.where" ] ]
indy-lab/ProtoTransfer
[ "2e186ffd5bd795244c6dd7192575b84f935c5749" ]
[ "omni-mini/prototransfer/protonet.py" ]
[ "\"\"\"Adapted from https://github.com/jakesnell/prototypical-networks/blob/master/protonets/models/few_shot.py\"\"\"\n\nimport torch\nimport torch.nn as nn\n\nfrom prototransfer.utils_proto import prototypical_loss, get_prototypes\n\nclass Protonet(nn.Module):\n \"\"\"Prototypical network\n\n Parameters:\n - encoder (nn.Module): Embedding function.\n - distance (string): Use euclidean or cosine distance.\n - device (string, torch.device): Use GPU or CPU?\n \n \"\"\"\n def __init__(self, encoder, distance='euclidean', device=\"cpu\"):\n super(Protonet, self).__init__()\n self.encoder = encoder\n self.device = device\n self.distance = distance\n\n def loss(self, sample, ways):\n # Extract support and query data\n # with shape [batch_size x num_samples x img_dims]\n # Labels are dummy labels in [0, ..., ways]\n if \"support\" in sample.keys():\n x_support = sample[\"support\"][0]\n y_support = sample[\"support\"][1]\n else:\n x_support = sample[\"train\"][0]\n y_support = sample[\"train\"][1]\n x_support = x_support.to(self.device)\n y_support = y_support.to(self.device)\n\n if \"query\" in sample.keys():\n x_query = sample[\"query\"][0]\n y_query = sample[\"query\"][1]\n else:\n x_query = sample[\"test\"][0]\n y_query = sample[\"test\"][1]\n x_query = x_query.to(self.device)\n y_query = y_query.to(self.device)\n\n # Extract shots\n shots = int(x_support.size(1) / ways)\n test_shots = int(x_query.size(1) / ways)\n\n # Extract features (first dim is batch dim)\n x = torch.cat([x_support, x_query], 1)\n z = self.encoder.forward(x)\n z_support = z[:,:ways*shots]\n z_query = z[:,ways*shots:]\n\n # Calucalte prototypes\n z_proto = get_prototypes(z_support, y_support, ways)\n\n # Calculate loss and accuracies\n loss, accuracy = prototypical_loss(z_proto, z_query, y_query,\n distance=self.distance)\n\n return loss, accuracy\n\n" ]
[ [ "torch.cat" ] ]