repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
Loranet-Technologies/traffic-analysis
|
[
"e1e50b6c36b3da6279678c679500a8cf4e62ccef"
] |
[
"maskrcnn_benchmark/structures/segmentation_mask.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nimport torch\n\nimport pycocotools.mask as mask_utils\n\n# transpose\nFLIP_LEFT_RIGHT = 0\nFLIP_TOP_BOTTOM = 1\n\n\nclass Mask(object):\n \"\"\"\n This class is unfinished and not meant for use yet\n It is supposed to contain the mask for an object as\n a 2d tensor\n \"\"\"\n\n def __init__(self, masks, size, mode):\n self.masks = masks\n self.size = size\n self.mode = mode\n\n def transpose(self, method):\n if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):\n raise NotImplementedError(\n \"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented\"\n )\n\n width, height = self.size\n if method == FLIP_LEFT_RIGHT:\n dim = width\n idx = 2\n elif method == FLIP_TOP_BOTTOM:\n dim = height\n idx = 1\n\n flip_idx = list(range(dim)[::-1])\n flipped_masks = self.masks.index_select(dim, flip_idx)\n return Mask(flipped_masks, self.size, self.mode)\n\n def crop(self, box):\n w, h = box[2] - box[0], box[3] - box[1]\n\n cropped_masks = self.masks[:, box[1] : box[3], box[0] : box[2]]\n return Mask(cropped_masks, size=(w, h), mode=self.mode)\n\n def resize(self, size, *args, **kwargs):\n pass\n\n\nclass Polygons(object):\n \"\"\"\n This class holds a set of polygons that represents a single instance\n of an object mask. The object can be represented as a set of\n polygons\n \"\"\"\n\n def __init__(self, polygons, size, mode):\n # assert isinstance(polygons, list), '{}'.format(polygons)\n if isinstance(polygons, list):\n polygons = [torch.as_tensor(p, dtype=torch.float32) for p in polygons]\n elif isinstance(polygons, Polygons):\n polygons = polygons.polygons\n\n self.polygons = polygons\n self.size = size\n self.mode = mode\n\n def transpose(self, method):\n if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):\n raise NotImplementedError(\n \"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented\"\n )\n\n flipped_polygons = []\n width, height = self.size\n if method == FLIP_LEFT_RIGHT:\n dim = width\n idx = 0\n elif method == FLIP_TOP_BOTTOM:\n dim = height\n idx = 1\n\n for poly in self.polygons:\n p = poly.clone()\n TO_REMOVE = 1\n p[idx::2] = dim - poly[idx::2] - TO_REMOVE\n flipped_polygons.append(p)\n\n return Polygons(flipped_polygons, size=self.size, mode=self.mode)\n\n def crop(self, box):\n w, h = box[2] - box[0], box[3] - box[1]\n\n # TODO chck if necessary\n w = max(w, 1)\n h = max(h, 1)\n\n cropped_polygons = []\n for poly in self.polygons:\n p = poly.clone()\n p[0::2] = p[0::2] - box[0] # .clamp(min=0, max=w)\n p[1::2] = p[1::2] - box[1] # .clamp(min=0, max=h)\n cropped_polygons.append(p)\n\n return Polygons(cropped_polygons, size=(w, h), mode=self.mode)\n\n def resize(self, size, *args, **kwargs):\n ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(size, self.size))\n if ratios[0] == ratios[1]:\n ratio = ratios[0]\n scaled_polys = [p * ratio for p in self.polygons]\n return Polygons(scaled_polys, size, mode=self.mode)\n\n ratio_w, ratio_h = ratios\n scaled_polygons = []\n for poly in self.polygons:\n p = poly.clone()\n p[0::2] *= ratio_w\n p[1::2] *= ratio_h\n scaled_polygons.append(p)\n\n return Polygons(scaled_polygons, size=size, mode=self.mode)\n\n def convert(self, mode):\n width, height = self.size\n if mode == \"mask\":\n rles = mask_utils.frPyObjects(\n [p.numpy() for p in self.polygons], height, width\n )\n rle = mask_utils.merge(rles)\n mask = mask_utils.decode(rle)\n mask = torch.from_numpy(mask)\n # TODO add squeeze?\n return mask\n\n def __repr__(self):\n s = self.__class__.__name__ + \"(\"\n s += \"num_polygons={}, \".format(len(self.polygons))\n s += \"image_width={}, \".format(self.size[0])\n s += \"image_height={}, \".format(self.size[1])\n s += \"mode={})\".format(self.mode)\n return s\n\n\nclass SegmentationMask(object):\n \"\"\"\n This class stores the segmentations for all objects in the image\n \"\"\"\n\n def __init__(self, polygons, size, mode=None):\n \"\"\"\n Arguments:\n polygons: a list of list of lists of numbers. The first\n level of the list correspond to individual instances,\n the second level to all the polygons that compose the\n object, and the third level to the polygon coordinates.\n \"\"\"\n assert isinstance(polygons, list)\n\n self.polygons = [Polygons(p, size, mode) for p in polygons]\n self.size = size\n self.mode = mode\n\n def transpose(self, method):\n if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):\n raise NotImplementedError(\n \"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented\"\n )\n\n flipped = []\n for polygon in self.polygons:\n flipped.append(polygon.transpose(method))\n return SegmentationMask(flipped, size=self.size, mode=self.mode)\n\n def crop(self, box):\n w, h = box[2] - box[0], box[3] - box[1]\n cropped = []\n for polygon in self.polygons:\n cropped.append(polygon.crop(box))\n return SegmentationMask(cropped, size=(w, h), mode=self.mode)\n\n def resize(self, size, *args, **kwargs):\n scaled = []\n for polygon in self.polygons:\n scaled.append(polygon.resize(size, *args, **kwargs))\n return SegmentationMask(scaled, size=size, mode=self.mode)\n\n def to(self, *args, **kwargs):\n return self\n\n def __getitem__(self, item):\n if isinstance(item, (int, slice)):\n selected_polygons = [self.polygons[item]]\n else:\n # advanced indexing on a single dimension\n selected_polygons = []\n if isinstance(item, torch.Tensor) and item.dtype == torch.uint8:\n item = item.nonzero()\n item = item.squeeze(1) if item.numel() > 0 else item\n item = item.tolist()\n for i in item:\n selected_polygons.append(self.polygons[i])\n return SegmentationMask(selected_polygons, size=self.size, mode=self.mode)\n\n def __len__(self):\n return len(self.polygons)\n\n def __iter__(self):\n return iter(self.polygons)\n\n def __repr__(self):\n s = self.__class__.__name__ + \"(\"\n s += \"num_instances={}, \".format(len(self.polygons))\n s += \"image_width={}, \".format(self.size[0])\n s += \"image_height={})\".format(self.size[1])\n return s\n"
] |
[
[
"torch.as_tensor",
"torch.from_numpy"
]
] |
saanikagupta/Computational-Intelligence
|
[
"1ae304c691462a9e69bfb1035053a78653041901"
] |
[
"kNN_IrisDataset/kNN.py"
] |
[
"# K-Nearest Neighbors (K-NN)\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('Iris.csv')\nX = dataset.iloc[:, [1, 2, 3, 4]].values\ny = dataset.iloc[:, 5].values\n\n# Encoding the Independent Variable\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nlabelencoder_y = LabelEncoder()\ny = labelencoder_y.fit_transform(y)\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.cross_validation import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)\n\n# Feature Scalingss\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\n# Fitting K-NN to the Training set\nfrom sklearn.neighbors import KNeighborsClassifier\nclassifier = KNeighborsClassifier(n_neighbors = 5, metric = 'minkowski', p = 2)\nclassifier.fit(X_train, y_train)\n\n# Predicting the Test set results\ny_pred = classifier.predict(X_test)\n\n# Making the Confusion Matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\n\n# Pairplot\nimport seaborn as sns\nsns.pairplot(dataset, hue = \"Species\");\n"
] |
[
[
"sklearn.preprocessing.LabelEncoder",
"sklearn.metrics.confusion_matrix",
"sklearn.preprocessing.StandardScaler",
"sklearn.neighbors.KNeighborsClassifier",
"pandas.read_csv",
"sklearn.cross_validation.train_test_split"
]
] |
acharal/tensorflow
|
[
"c5d99169bea3f5d1e3cef973690d2ec7fdbac80f",
"c5d99169bea3f5d1e3cef973690d2ec7fdbac80f",
"c5d99169bea3f5d1e3cef973690d2ec7fdbac80f"
] |
[
"tensorflow/python/kernel_tests/qr_op_test.py",
"tensorflow/compiler/tests/binary_ops_test.py",
"tensorflow/python/keras/_impl/keras/estimator_test.py"
] |
[
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tensorflow.ops.math_ops.matrix_inverse.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import linalg_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.platform import test\n\n\ndef _AddTest(test_class, op_name, testcase_name, fn):\n test_name = \"_\".join([\"test\", op_name, testcase_name])\n if hasattr(test_class, test_name):\n raise RuntimeError(\"Test %s defined more than once\" % test_name)\n setattr(test_class, test_name, fn)\n\n\nclass QrOpTest(test.TestCase):\n\n def testWrongDimensions(self):\n # The input to qr should be a tensor of at least rank 2.\n scalar = constant_op.constant(1.)\n with self.assertRaisesRegexp(ValueError,\n \"Shape must be at least rank 2 but is rank 0\"):\n linalg_ops.qr(scalar)\n vector = constant_op.constant([1., 2.])\n with self.assertRaisesRegexp(ValueError,\n \"Shape must be at least rank 2 but is rank 1\"):\n linalg_ops.qr(vector)\n\n def testConcurrentExecutesWithoutError(self):\n with self.test_session(use_gpu=True) as sess:\n all_ops = []\n for full_matrices_ in True, False:\n for rows_ in 4, 5:\n for cols_ in 4, 5:\n matrix1 = random_ops.random_normal([rows_, cols_], seed=42)\n matrix2 = random_ops.random_normal([rows_, cols_], seed=42)\n q1, r1 = linalg_ops.qr(matrix1, full_matrices=full_matrices_)\n q2, r2 = linalg_ops.qr(matrix2, full_matrices=full_matrices_)\n all_ops += [q1, r1, q2, r2]\n val = sess.run(all_ops)\n for i in range(8):\n q = 4 * i\n self.assertAllEqual(val[q], val[q + 2]) # q1 == q2\n self.assertAllEqual(val[q + 1], val[q + 3]) # r1 == r2\n\n\ndef _GetQrOpTest(dtype_, shape_, full_matrices_, use_static_shape_):\n\n is_complex = dtype_ in (np.complex64, np.complex128)\n is_single = dtype_ in (np.float32, np.complex64)\n\n def CompareOrthogonal(self, x, y, rank):\n if is_single:\n atol = 5e-4\n else:\n atol = 5e-14\n # We only compare the first 'rank' orthogonal vectors since the\n # remainder form an arbitrary orthonormal basis for the\n # (row- or column-) null space, whose exact value depends on\n # implementation details. Notice that since we check that the\n # matrices of singular vectors are unitary elsewhere, we do\n # implicitly test that the trailing vectors of x and y span the\n # same space.\n x = x[..., 0:rank]\n y = y[..., 0:rank]\n # Q is only unique up to sign (complex phase factor for complex matrices),\n # so we normalize the sign first.\n sum_of_ratios = np.sum(np.divide(y, x), -2, keepdims=True)\n phases = np.divide(sum_of_ratios, np.abs(sum_of_ratios))\n x *= phases\n self.assertAllClose(x, y, atol=atol)\n\n def CheckApproximation(self, a, q, r):\n if is_single:\n tol = 1e-5\n else:\n tol = 1e-14\n # Tests that a ~= q*r.\n a_recon = math_ops.matmul(q, r)\n self.assertAllClose(a_recon.eval(), a, rtol=tol, atol=tol)\n\n def CheckUnitary(self, x):\n # Tests that x[...,:,:]^H * x[...,:,:] is close to the identity.\n xx = math_ops.matmul(math_ops.conj(x), x, transpose_a=True)\n identity = array_ops.matrix_band_part(array_ops.ones_like(xx), 0, 0)\n if is_single:\n tol = 1e-5\n else:\n tol = 1e-14\n self.assertAllClose(identity.eval(), xx.eval(), atol=tol)\n\n def Test(self):\n np.random.seed(1)\n x_np = np.random.uniform(\n low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_)\n if is_complex:\n x_np += 1j * np.random.uniform(\n low=-1.0, high=1.0,\n size=np.prod(shape_)).reshape(shape_).astype(dtype_)\n\n with self.test_session(use_gpu=True) as sess:\n if use_static_shape_:\n x_tf = constant_op.constant(x_np)\n else:\n x_tf = array_ops.placeholder(dtype_)\n q_tf, r_tf = linalg_ops.qr(x_tf, full_matrices=full_matrices_)\n\n if use_static_shape_:\n q_tf_val, r_tf_val = sess.run([q_tf, r_tf])\n else:\n q_tf_val, r_tf_val = sess.run([q_tf, r_tf], feed_dict={x_tf: x_np})\n\n q_dims = q_tf_val.shape\n np_q = np.ndarray(q_dims, dtype_)\n np_q_reshape = np.reshape(np_q, (-1, q_dims[-2], q_dims[-1]))\n new_first_dim = np_q_reshape.shape[0]\n\n x_reshape = np.reshape(x_np, (-1, x_np.shape[-2], x_np.shape[-1]))\n for i in range(new_first_dim):\n if full_matrices_:\n np_q_reshape[i,:,:], _ = \\\n np.linalg.qr(x_reshape[i,:,:], mode=\"complete\")\n else:\n np_q_reshape[i,:,:], _ = \\\n np.linalg.qr(x_reshape[i,:,:], mode=\"reduced\")\n np_q = np.reshape(np_q_reshape, q_dims)\n CompareOrthogonal(self, np_q, q_tf_val, min(shape_[-2:]))\n CheckApproximation(self, x_np, q_tf_val, r_tf_val)\n CheckUnitary(self, q_tf_val)\n\n return Test\n\n\nif __name__ == \"__main__\":\n for dtype in np.float32, np.float64, np.complex64, np.complex128:\n for rows in 1, 2, 5, 10, 32, 100:\n for cols in 1, 2, 5, 10, 32, 100:\n for full_matrices in False, True:\n for batch_dims in [(), (3,)] + [(3, 2)] * (max(rows, cols) < 10):\n for use_static_shape in True, False:\n shape = batch_dims + (rows, cols)\n name = \"%s_%s_full_%s_static_%s\" % (dtype.__name__,\n \"_\".join(map(str, shape)),\n full_matrices,\n use_static_shape)\n _AddTest(QrOpTest, \"Qr\", name,\n _GetQrOpTest(dtype, shape, full_matrices,\n use_static_shape))\n test.main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Test cases for binary operators.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.compiler.tests.xla_test import XLATestCase\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_math_ops\nfrom tensorflow.python.ops import gen_nn_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.platform import googletest\n\n\nclass BinaryOpsTest(XLATestCase):\n \"\"\"Test cases for binary operators.\"\"\"\n\n def _testBinary(self, op, a, b, expected, equality_test=None):\n with self.test_session() as session:\n with self.test_scope():\n pa = array_ops.placeholder(dtypes.as_dtype(a.dtype), a.shape, name=\"a\")\n pb = array_ops.placeholder(dtypes.as_dtype(b.dtype), b.shape, name=\"b\")\n output = op(pa, pb)\n result = session.run(output, {pa: a, pb: b})\n if equality_test is None:\n equality_test = self.assertAllClose\n equality_test(result, expected, rtol=1e-3)\n\n def ListsAreClose(self, result, expected, rtol):\n \"\"\"Tests closeness of two lists of floats.\"\"\"\n self.assertEqual(len(result), len(expected))\n for i in range(len(result)):\n self.assertAllClose(result[i], expected[i], rtol)\n\n def testFloatOps(self):\n for dtype in self.float_types:\n self._testBinary(\n lambda x, y: math_ops.approximate_equal(x, y, tolerance=0.0001),\n np.array([[[[-1, 2.00009999], [-3, 4.01]]]], dtype=dtype),\n np.array([[[[-1.001, 2], [-3.00009, 4]]]], dtype=dtype),\n expected=np.array([[[[False, True], [True, False]]]], dtype=dtype))\n\n self._testBinary(\n gen_math_ops._real_div,\n np.array([3, 3, -1.5, -8, 44], dtype=dtype),\n np.array([2, -2, 7, -4, 0], dtype=dtype),\n expected=np.array(\n [1.5, -1.5, -0.2142857, 2, float(\"inf\")], dtype=dtype))\n\n self._testBinary(math_ops.pow, dtype(3), dtype(4), expected=dtype(81))\n\n self._testBinary(\n math_ops.pow,\n np.array([1, 2], dtype=dtype),\n np.zeros(shape=[0, 2], dtype=dtype),\n expected=np.zeros(shape=[0, 2], dtype=dtype))\n self._testBinary(\n math_ops.pow,\n np.array([10, 4], dtype=dtype),\n np.array([2, 3], dtype=dtype),\n expected=np.array([100, 64], dtype=dtype))\n self._testBinary(\n math_ops.pow,\n dtype(2),\n np.array([3, 4], dtype=dtype),\n expected=np.array([8, 16], dtype=dtype))\n self._testBinary(\n math_ops.pow,\n np.array([[2], [3]], dtype=dtype),\n dtype(4),\n expected=np.array([[16], [81]], dtype=dtype))\n\n self._testBinary(\n gen_math_ops._reciprocal_grad,\n np.array([4, -3, -2, 1], dtype=dtype),\n np.array([5, -6, 7, -8], dtype=dtype),\n expected=np.array([-80, 54, -28, 8], dtype=dtype))\n\n self._testBinary(\n gen_math_ops._sigmoid_grad,\n np.array([4, 3, 2, 1], dtype=dtype),\n np.array([5, 6, 7, 8], dtype=dtype),\n expected=np.array([-60, -36, -14, 0], dtype=dtype))\n\n self._testBinary(\n gen_math_ops._rsqrt_grad,\n np.array([4, 3, 2, 1], dtype=dtype),\n np.array([5, 6, 7, 8], dtype=dtype),\n expected=np.array([-160, -81, -28, -4], dtype=dtype))\n\n self._testBinary(\n gen_math_ops._sqrt_grad,\n np.array([4, 3, 2, 1], dtype=dtype),\n np.array([5, 6, 7, 8], dtype=dtype),\n expected=np.array([0.625, 1, 1.75, 4], dtype=dtype))\n\n self._testBinary(\n gen_nn_ops._softplus_grad,\n np.array([4, 3, 2, 1], dtype=dtype),\n np.array([5, 6, 7, 8], dtype=dtype),\n expected=np.array(\n [3.97322869, 2.99258232, 1.99817801, 0.99966466], dtype=dtype))\n\n self._testBinary(\n gen_nn_ops._softsign_grad,\n np.array([4, 3, 2, 1], dtype=dtype),\n np.array([5, 6, 7, 8], dtype=dtype),\n expected=np.array(\n [0.11111111, 0.06122449, 0.03125, 0.01234568], dtype=dtype))\n\n self._testBinary(\n gen_math_ops._tanh_grad,\n np.array([4, 3, 2, 1], dtype=dtype),\n np.array([5, 6, 7, 8], dtype=dtype),\n expected=np.array([-75, -48, -21, 0], dtype=dtype))\n\n self._testBinary(\n gen_nn_ops._elu_grad,\n np.array([1, 2, 3, 4, 5, 6], dtype=dtype),\n np.array([-.6, -.4, -.2, 0, .2, .4], dtype=dtype),\n expected=np.array([0.4, 1.2, 2.4, 4, 5, 6], dtype=dtype))\n\n self._testBinary(\n gen_nn_ops._selu_grad,\n np.array([1, 2, 3, 4, 5, 6], dtype=dtype),\n np.array([-.6, -.4, -.2, .2, .4, .6], dtype=dtype),\n expected=np.array(\n [1.158099340847, 2.7161986816948, 4.67429802254,\n 4.202803949422, 5.2535049367774, 6.30420592413], dtype=dtype))\n\n self._testBinary(\n gen_nn_ops._relu_grad,\n np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=dtype),\n np.array([0, 0, 0, 0, 0, 0.1, 0.3, 0.5, 0.7, 0.9], dtype=dtype),\n expected=np.array([0, 0, 0, 0, 0, 6, 7, 8, 9, 10], dtype=dtype))\n\n self._testBinary(\n gen_nn_ops._relu6_grad,\n np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtype),\n np.array(\n [0, 0, 0, 0, 0, 0.1, 0.3, 0.5, 0.7, 0.9, 6.1, 10.0], dtype=dtype),\n expected=np.array([0, 0, 0, 0, 0, 6, 7, 8, 9, 10, 0, 0], dtype=dtype))\n\n self._testBinary(\n gen_nn_ops._softmax_cross_entropy_with_logits,\n np.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=dtype),\n np.array([[0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1]], dtype=dtype),\n expected=[\n np.array([1.44019, 2.44019], dtype=dtype),\n np.array([[-0.067941, -0.112856, -0.063117, 0.243914],\n [-0.367941, -0.212856, 0.036883, 0.543914]],\n dtype=dtype),\n ],\n equality_test=self.ListsAreClose)\n\n self._testBinary(\n gen_nn_ops._sparse_softmax_cross_entropy_with_logits,\n np.array([[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8],\n [0.9, 1.0, 1.1, 1.2]], dtype=dtype),\n np.array([2, 1, 7], dtype=np.int32),\n expected=[\n np.array([1.342536, 1.442536, np.nan], dtype=dtype),\n np.array([[0.213838, 0.236328, -0.738817, 0.288651],\n [0.213838, -0.763672, 0.261183, 0.288651],\n [np.nan, np.nan, np.nan, np.nan]],\n dtype=dtype),\n ],\n equality_test=self.ListsAreClose)\n\n def testIntOps(self):\n for dtype in self.int_types:\n self._testBinary(\n gen_math_ops._truncate_div,\n np.array([3, 3, -1, -9, -8], dtype=dtype),\n np.array([2, -2, 7, 2, -4], dtype=dtype),\n expected=np.array([1, -1, 0, -4, 2], dtype=dtype))\n\n def testNumericOps(self):\n for dtype in self.numeric_types:\n self._testBinary(\n math_ops.add,\n np.array([1, 2], dtype=dtype),\n np.array([10, 20], dtype=dtype),\n expected=np.array([11, 22], dtype=dtype))\n self._testBinary(\n math_ops.add,\n dtype(5),\n np.array([1, 2], dtype=dtype),\n expected=np.array([6, 7], dtype=dtype))\n self._testBinary(\n math_ops.add,\n np.array([[1], [2]], dtype=dtype),\n dtype(7),\n expected=np.array([[8], [9]], dtype=dtype))\n\n self._testBinary(\n math_ops.subtract,\n np.array([1, 2], dtype=dtype),\n np.array([10, 20], dtype=dtype),\n expected=np.array([-9, -18], dtype=dtype))\n self._testBinary(\n math_ops.subtract,\n dtype(5),\n np.array([1, 2], dtype=dtype),\n expected=np.array([4, 3], dtype=dtype))\n self._testBinary(\n math_ops.subtract,\n np.array([[1], [2]], dtype=dtype),\n dtype(7),\n expected=np.array([[-6], [-5]], dtype=dtype))\n\n self._testBinary(\n math_ops.maximum,\n np.array([1, 2], dtype=dtype),\n np.array([10, 20], dtype=dtype),\n expected=np.array([10, 20], dtype=dtype))\n self._testBinary(\n math_ops.maximum,\n dtype(5),\n np.array([1, 20], dtype=dtype),\n expected=np.array([5, 20], dtype=dtype))\n self._testBinary(\n math_ops.maximum,\n np.array([[10], [2]], dtype=dtype),\n dtype(7),\n expected=np.array([[10], [7]], dtype=dtype))\n\n self._testBinary(\n math_ops.minimum,\n np.array([1, 20], dtype=dtype),\n np.array([10, 2], dtype=dtype),\n expected=np.array([1, 2], dtype=dtype))\n self._testBinary(\n math_ops.minimum,\n dtype(5),\n np.array([1, 20], dtype=dtype),\n expected=np.array([1, 5], dtype=dtype))\n self._testBinary(\n math_ops.minimum,\n np.array([[10], [2]], dtype=dtype),\n dtype(7),\n expected=np.array([[7], [2]], dtype=dtype))\n\n self._testBinary(\n math_ops.multiply,\n np.array([1, 20], dtype=dtype),\n np.array([10, 2], dtype=dtype),\n expected=np.array([10, 40], dtype=dtype))\n self._testBinary(\n math_ops.multiply,\n dtype(5),\n np.array([1, 20], dtype=dtype),\n expected=np.array([5, 100], dtype=dtype))\n self._testBinary(\n math_ops.multiply,\n np.array([[10], [2]], dtype=dtype),\n dtype(7),\n expected=np.array([[70], [14]], dtype=dtype))\n\n self._testBinary(\n math_ops.squared_difference,\n np.array([1, 2], dtype=dtype),\n np.array([10, 20], dtype=dtype),\n expected=np.array([81, 324], dtype=dtype))\n self._testBinary(\n math_ops.squared_difference,\n dtype(5),\n np.array([1, 2], dtype=dtype),\n expected=np.array([16, 9], dtype=dtype))\n self._testBinary(\n math_ops.squared_difference,\n np.array([[1], [2]], dtype=dtype),\n dtype(7),\n expected=np.array([[36], [25]], dtype=dtype))\n\n self._testBinary(\n nn_ops.bias_add,\n np.array([[1, 2], [3, 4]], dtype=dtype),\n np.array([2, -1], dtype=dtype),\n expected=np.array([[3, 1], [5, 3]], dtype=dtype))\n self._testBinary(\n nn_ops.bias_add,\n np.array([[[[1, 2], [3, 4]]]], dtype=dtype),\n np.array([2, -1], dtype=dtype),\n expected=np.array([[[[3, 1], [5, 3]]]], dtype=dtype))\n\n def _testDivision(self, dtype):\n \"\"\"Test cases for division operators.\"\"\"\n self._testBinary(\n math_ops.div,\n np.array([10, 20], dtype=dtype),\n np.array([10, 2], dtype=dtype),\n expected=np.array([1, 10], dtype=dtype))\n self._testBinary(\n math_ops.div,\n dtype(40),\n np.array([2, 20], dtype=dtype),\n expected=np.array([20, 2], dtype=dtype))\n self._testBinary(\n math_ops.div,\n np.array([[10], [4]], dtype=dtype),\n dtype(2),\n expected=np.array([[5], [2]], dtype=dtype))\n\n self._testBinary(\n gen_math_ops._floor_div,\n np.array([3, 3, -1, -9, -8], dtype=dtype),\n np.array([2, -2, 7, 2, -4], dtype=dtype),\n expected=np.array([1, -2, -1, -5, 2], dtype=dtype))\n\n def testIntDivision(self):\n for dtype in self.int_types:\n self._testDivision(dtype)\n\n def testFloatDivision(self):\n for dtype in self.float_types:\n self._testDivision(dtype)\n\n def _testRemainder(self, dtype):\n \"\"\"Test cases for remainder operators.\"\"\"\n self._testBinary(\n gen_math_ops._floor_mod,\n np.array([3, 3, -1, -8], dtype=dtype),\n np.array([2, -2, 7, -4], dtype=dtype),\n expected=np.array([1, -1, 6, 0], dtype=dtype))\n self._testBinary(\n gen_math_ops._truncate_mod,\n np.array([3, 3, -1, -8], dtype=dtype),\n np.array([2, -2, 7, -4], dtype=dtype),\n expected=np.array([1, 1, -1, 0], dtype=dtype))\n\n def testIntRemainder(self):\n for dtype in self.int_types:\n self._testRemainder(dtype)\n\n def testFloatRemainder(self):\n for dtype in self.float_types:\n self._testRemainder(dtype)\n\n def testLogicalOps(self):\n self._testBinary(\n math_ops.logical_and,\n np.array([[True, False], [False, True]], dtype=np.bool),\n np.array([[False, True], [False, True]], dtype=np.bool),\n expected=np.array([[False, False], [False, True]], dtype=np.bool))\n\n self._testBinary(\n math_ops.logical_or,\n np.array([[True, False], [False, True]], dtype=np.bool),\n np.array([[False, True], [False, True]], dtype=np.bool),\n expected=np.array([[True, True], [False, True]], dtype=np.bool))\n\n def testComparisons(self):\n self._testBinary(\n math_ops.equal,\n np.array([1, 5, 20], dtype=np.float32),\n np.array([10, 5, 2], dtype=np.float32),\n expected=np.array([False, True, False], dtype=np.bool))\n self._testBinary(\n math_ops.equal,\n np.float32(5),\n np.array([1, 5, 20], dtype=np.float32),\n expected=np.array([False, True, False], dtype=np.bool))\n self._testBinary(\n math_ops.equal,\n np.array([[10], [7], [2]], dtype=np.float32),\n np.float32(7),\n expected=np.array([[False], [True], [False]], dtype=np.bool))\n\n self._testBinary(\n math_ops.not_equal,\n np.array([1, 5, 20], dtype=np.float32),\n np.array([10, 5, 2], dtype=np.float32),\n expected=np.array([True, False, True], dtype=np.bool))\n self._testBinary(\n math_ops.not_equal,\n np.float32(5),\n np.array([1, 5, 20], dtype=np.float32),\n expected=np.array([True, False, True], dtype=np.bool))\n self._testBinary(\n math_ops.not_equal,\n np.array([[10], [7], [2]], dtype=np.float32),\n np.float32(7),\n expected=np.array([[True], [False], [True]], dtype=np.bool))\n\n for greater_op in [math_ops.greater, (lambda x, y: x > y)]:\n self._testBinary(\n greater_op,\n np.array([1, 5, 20], dtype=np.float32),\n np.array([10, 5, 2], dtype=np.float32),\n expected=np.array([False, False, True], dtype=np.bool))\n self._testBinary(\n greater_op,\n np.float32(5),\n np.array([1, 5, 20], dtype=np.float32),\n expected=np.array([True, False, False], dtype=np.bool))\n self._testBinary(\n greater_op,\n np.array([[10], [7], [2]], dtype=np.float32),\n np.float32(7),\n expected=np.array([[True], [False], [False]], dtype=np.bool))\n\n for greater_equal_op in [math_ops.greater_equal, (lambda x, y: x >= y)]:\n self._testBinary(\n greater_equal_op,\n np.array([1, 5, 20], dtype=np.float32),\n np.array([10, 5, 2], dtype=np.float32),\n expected=np.array([False, True, True], dtype=np.bool))\n self._testBinary(\n greater_equal_op,\n np.float32(5),\n np.array([1, 5, 20], dtype=np.float32),\n expected=np.array([True, True, False], dtype=np.bool))\n self._testBinary(\n greater_equal_op,\n np.array([[10], [7], [2]], dtype=np.float32),\n np.float32(7),\n expected=np.array([[True], [True], [False]], dtype=np.bool))\n\n for less_op in [math_ops.less, (lambda x, y: x < y)]:\n self._testBinary(\n less_op,\n np.array([1, 5, 20], dtype=np.float32),\n np.array([10, 5, 2], dtype=np.float32),\n expected=np.array([True, False, False], dtype=np.bool))\n self._testBinary(\n less_op,\n np.float32(5),\n np.array([1, 5, 20], dtype=np.float32),\n expected=np.array([False, False, True], dtype=np.bool))\n self._testBinary(\n less_op,\n np.array([[10], [7], [2]], dtype=np.float32),\n np.float32(7),\n expected=np.array([[False], [False], [True]], dtype=np.bool))\n\n for less_equal_op in [math_ops.less_equal, (lambda x, y: x <= y)]:\n self._testBinary(\n less_equal_op,\n np.array([1, 5, 20], dtype=np.float32),\n np.array([10, 5, 2], dtype=np.float32),\n expected=np.array([True, True, False], dtype=np.bool))\n self._testBinary(\n less_equal_op,\n np.float32(5),\n np.array([1, 5, 20], dtype=np.float32),\n expected=np.array([False, True, True], dtype=np.bool))\n self._testBinary(\n less_equal_op,\n np.array([[10], [7], [2]], dtype=np.float32),\n np.float32(7),\n expected=np.array([[False], [True], [True]], dtype=np.bool))\n\n def testBroadcasting(self):\n \"\"\"Tests broadcasting behavior of an operator.\"\"\"\n\n for dtype in self.numeric_types:\n self._testBinary(\n math_ops.add,\n np.array(3, dtype=dtype),\n np.array([10, 20], dtype=dtype),\n expected=np.array([13, 23], dtype=dtype))\n self._testBinary(\n math_ops.add,\n np.array([10, 20], dtype=dtype),\n np.array(4, dtype=dtype),\n expected=np.array([14, 24], dtype=dtype))\n\n # [1,3] x [4,1] => [4,3]\n self._testBinary(\n math_ops.add,\n np.array([[10, 20, 30]], dtype=dtype),\n np.array([[1], [2], [3], [4]], dtype=dtype),\n expected=np.array(\n [[11, 21, 31], [12, 22, 32], [13, 23, 33], [14, 24, 34]],\n dtype=dtype))\n\n # [3] * [4,1] => [4,3]\n self._testBinary(\n math_ops.add,\n np.array([10, 20, 30], dtype=dtype),\n np.array([[1], [2], [3], [4]], dtype=dtype),\n expected=np.array(\n [[11, 21, 31], [12, 22, 32], [13, 23, 33], [14, 24, 34]],\n dtype=dtype))\n\n def testFill(self):\n for dtype in self.numeric_types:\n self._testBinary(\n array_ops.fill,\n np.array([], dtype=np.int32),\n dtype(-42),\n expected=dtype(-42))\n self._testBinary(\n array_ops.fill,\n np.array([1, 2], dtype=np.int32),\n dtype(7),\n expected=np.array([[7, 7]], dtype=dtype))\n self._testBinary(\n array_ops.fill,\n np.array([3, 2], dtype=np.int32),\n dtype(50),\n expected=np.array([[50, 50], [50, 50], [50, 50]], dtype=dtype))\n\n # Helper method used by testMatMul, testSparseMatMul, testBatchMatMul below.\n def _testMatMul(self, op):\n for dtype in self.float_types:\n self._testBinary(\n op,\n np.array([[-0.25]], dtype=dtype),\n np.array([[8]], dtype=dtype),\n expected=np.array([[-2]], dtype=dtype))\n self._testBinary(\n op,\n np.array([[100, 10, 0.5]], dtype=dtype),\n np.array([[1, 3], [2, 5], [6, 8]], dtype=dtype),\n expected=np.array([[123, 354]], dtype=dtype))\n self._testBinary(\n op,\n np.array([[1, 3], [2, 5], [6, 8]], dtype=dtype),\n np.array([[100], [10]], dtype=dtype),\n expected=np.array([[130], [250], [680]], dtype=dtype))\n self._testBinary(\n op,\n np.array([[1000, 100], [10, 1]], dtype=dtype),\n np.array([[1, 2], [3, 4]], dtype=dtype),\n expected=np.array([[1300, 2400], [13, 24]], dtype=dtype))\n\n self._testBinary(\n op,\n np.array([], dtype=dtype).reshape((2, 0)),\n np.array([], dtype=dtype).reshape((0, 3)),\n expected=np.array([[0, 0, 0], [0, 0, 0]], dtype=dtype))\n\n def testMatMul(self):\n self._testMatMul(math_ops.matmul)\n\n # TODO(phawkins): failing on GPU, no registered kernel.\n def DISABLED_testSparseMatMul(self):\n # Binary wrappers for sparse_matmul with different hints\n def SparseMatmulWrapperTF(a, b):\n return tf.sparse_matmul(a, b, a_is_sparse=True)\n\n def SparseMatmulWrapperFT(a, b):\n return tf.sparse_matmul(a, b, b_is_sparse=True)\n\n def SparseMatmulWrapperTT(a, b):\n return tf.sparse_matmul(a, b, a_is_sparse=True, b_is_sparse=True)\n\n self._testMatMul(tf.sparse_matmul)\n self._testMatMul(SparseMatmulWrapperTF)\n self._testMatMul(SparseMatmulWrapperFT)\n self._testMatMul(SparseMatmulWrapperTT)\n\n def testBatchMatMul(self):\n # Same tests as for tf.matmul above.\n self._testMatMul(math_ops.matmul)\n\n # Tests with batches of matrices.\n self._testBinary(\n math_ops.matmul,\n np.array([[[-0.25]]], dtype=np.float32),\n np.array([[[8]]], dtype=np.float32),\n expected=np.array([[[-2]]], dtype=np.float32))\n self._testBinary(\n math_ops.matmul,\n np.array([[[-0.25]], [[4]]], dtype=np.float32),\n np.array([[[8]], [[2]]], dtype=np.float32),\n expected=np.array([[[-2]], [[8]]], dtype=np.float32))\n self._testBinary(\n math_ops.matmul,\n np.array(\n [[[[7, 13], [10, 1]], [[2, 0.25], [20, 2]]],\n [[[3, 5], [30, 3]], [[0.75, 1], [40, 4]]]],\n dtype=np.float32),\n np.array(\n [[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[11, 22], [33, 44]],\n [[55, 66], [77, 88]]]],\n dtype=np.float32),\n expected=np.array(\n [[[[46, 66], [13, 24]], [[11.75, 14], [114, 136]]],\n [[[198, 286], [429, 792]], [[118.25, 137.5], [2508, 2992]]]],\n dtype=np.float32))\n\n self._testBinary(\n math_ops.matmul,\n np.array([], dtype=np.float32).reshape((2, 2, 0)),\n np.array([], dtype=np.float32).reshape((2, 0, 3)),\n expected=np.array(\n [[[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]]],\n dtype=np.float32))\n self._testBinary(\n math_ops.matmul,\n np.array([], dtype=np.float32).reshape((0, 2, 4)),\n np.array([], dtype=np.float32).reshape((0, 4, 3)),\n expected=np.array([], dtype=np.float32).reshape(0, 2, 3))\n\n # Regression test for b/31472796.\n if hasattr(np, \"matmul\"):\n x = np.arange(0, 3 * 5 * 2 * 7, dtype=np.float32).reshape((3, 5, 2, 7))\n self._testBinary(\n lambda x, y: math_ops.matmul(x, y, adjoint_b=True),\n x, x,\n expected=np.matmul(x, x.transpose([0, 1, 3, 2])))\n\n def testExpandDims(self):\n for dtype in self.numeric_types:\n self._testBinary(\n array_ops.expand_dims,\n dtype(7),\n np.int32(0),\n expected=np.array([7], dtype=dtype))\n self._testBinary(\n array_ops.expand_dims,\n np.array([42], dtype=dtype),\n np.int32(0),\n expected=np.array([[42]], dtype=dtype))\n self._testBinary(\n array_ops.expand_dims,\n np.array([], dtype=dtype),\n np.int32(0),\n expected=np.array([[]], dtype=dtype))\n self._testBinary(\n array_ops.expand_dims,\n np.array([[[1, 2], [3, 4]]], dtype=dtype),\n np.int32(0),\n expected=np.array([[[[1, 2], [3, 4]]]], dtype=dtype))\n self._testBinary(\n array_ops.expand_dims,\n np.array([[[1, 2], [3, 4]]], dtype=dtype),\n np.int32(1),\n expected=np.array([[[[1, 2], [3, 4]]]], dtype=dtype))\n self._testBinary(\n array_ops.expand_dims,\n np.array([[[1, 2], [3, 4]]], dtype=dtype),\n np.int32(2),\n expected=np.array([[[[1, 2]], [[3, 4]]]], dtype=dtype))\n self._testBinary(\n array_ops.expand_dims,\n np.array([[[1, 2], [3, 4]]], dtype=dtype),\n np.int32(3),\n expected=np.array([[[[1], [2]], [[3], [4]]]], dtype=dtype))\n\n def testPad(self):\n for dtype in self.numeric_types:\n self._testBinary(\n array_ops.pad,\n np.array(\n [[1, 2, 3], [4, 5, 6]], dtype=dtype),\n np.array(\n [[1, 2], [2, 1]], dtype=np.int32),\n expected=np.array(\n [[0, 0, 0, 0, 0, 0],\n [0, 0, 1, 2, 3, 0],\n [0, 0, 4, 5, 6, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0]],\n dtype=dtype))\n\n def testMirrorPad(self):\n mirror_pad = lambda t, paddings: array_ops.pad(t, paddings, \"REFLECT\")\n for dtype in self.numeric_types:\n self._testBinary(\n mirror_pad,\n np.array(\n [\n [1, 2, 3], #\n [4, 5, 6], #\n ],\n dtype=dtype),\n np.array([[\n 1,\n 1,\n ], [2, 2]], dtype=np.int32),\n expected=np.array(\n [\n [6, 5, 4, 5, 6, 5, 4], #\n [3, 2, 1, 2, 3, 2, 1], #\n [6, 5, 4, 5, 6, 5, 4], #\n [3, 2, 1, 2, 3, 2, 1]\n ],\n dtype=dtype))\n self._testBinary(\n mirror_pad,\n np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype),\n np.array([[0, 0], [0, 0]], dtype=np.int32),\n expected=np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype))\n self._testBinary(\n mirror_pad,\n np.array(\n [\n [1, 2, 3], #\n [4, 5, 6], #\n [7, 8, 9]\n ],\n dtype=dtype),\n np.array([[2, 2], [0, 0]], dtype=np.int32),\n expected=np.array(\n [\n [7, 8, 9], #\n [4, 5, 6], #\n [1, 2, 3], #\n [4, 5, 6], #\n [7, 8, 9], #\n [4, 5, 6], #\n [1, 2, 3]\n ],\n dtype=dtype))\n self._testBinary(\n mirror_pad,\n np.array(\n [\n [[1, 2, 3], [4, 5, 6]],\n [[7, 8, 9], [10, 11, 12]],\n ], dtype=dtype),\n np.array([[0, 0], [1, 1], [1, 1]], dtype=np.int32),\n expected=np.array(\n [\n [\n [5, 4, 5, 6, 5], #\n [2, 1, 2, 3, 2], #\n [5, 4, 5, 6, 5], #\n [2, 1, 2, 3, 2], #\n ],\n [\n [11, 10, 11, 12, 11], #\n [8, 7, 8, 9, 8], #\n [11, 10, 11, 12, 11], #\n [8, 7, 8, 9, 8], #\n ]\n ],\n dtype=dtype))\n\n def testReshape(self):\n for dtype in self.numeric_types:\n self._testBinary(\n array_ops.reshape,\n np.array([], dtype=dtype),\n np.array([0, 4], dtype=np.int32),\n expected=np.zeros(shape=[0, 4], dtype=dtype))\n self._testBinary(\n array_ops.reshape,\n np.array([0, 1, 2, 3, 4, 5], dtype=dtype),\n np.array([2, 3], dtype=np.int32),\n expected=np.array([[0, 1, 2], [3, 4, 5]], dtype=dtype))\n self._testBinary(\n array_ops.reshape,\n np.array([0, 1, 2, 3, 4, 5], dtype=dtype),\n np.array([3, 2], dtype=np.int32),\n expected=np.array([[0, 1], [2, 3], [4, 5]], dtype=dtype))\n self._testBinary(\n array_ops.reshape,\n np.array([0, 1, 2, 3, 4, 5], dtype=dtype),\n np.array([-1, 6], dtype=np.int32),\n expected=np.array([[0, 1, 2, 3, 4, 5]], dtype=dtype))\n self._testBinary(\n array_ops.reshape,\n np.array([0, 1, 2, 3, 4, 5], dtype=dtype),\n np.array([6, -1], dtype=np.int32),\n expected=np.array([[0], [1], [2], [3], [4], [5]], dtype=dtype))\n self._testBinary(\n array_ops.reshape,\n np.array([0, 1, 2, 3, 4, 5], dtype=dtype),\n np.array([2, -1], dtype=np.int32),\n expected=np.array([[0, 1, 2], [3, 4, 5]], dtype=dtype))\n self._testBinary(\n array_ops.reshape,\n np.array([0, 1, 2, 3, 4, 5], dtype=dtype),\n np.array([-1, 3], dtype=np.int32),\n expected=np.array([[0, 1, 2], [3, 4, 5]], dtype=dtype))\n\n def testSplit(self):\n for dtype in self.numeric_types:\n for axis in [0, -3]:\n self._testBinary(\n lambda x, y: array_ops.split(value=y, num_or_size_splits=3, axis=x),\n np.int32(axis),\n np.array([[[1], [2]], [[3], [4]], [[5], [6]]],\n dtype=dtype),\n expected=[\n np.array([[[1], [2]]], dtype=dtype),\n np.array([[[3], [4]]], dtype=dtype),\n np.array([[[5], [6]]], dtype=dtype),\n ],\n equality_test=self.ListsAreClose)\n\n for axis in [1, -2]:\n self._testBinary(\n lambda x, y: array_ops.split(value=y, num_or_size_splits=2, axis=x),\n np.int32(axis),\n np.array([[[1], [2]], [[3], [4]], [[5], [6]]],\n dtype=dtype),\n expected=[\n np.array([[[1]], [[3]], [[5]]], dtype=dtype),\n np.array([[[2]], [[4]], [[6]]], dtype=dtype),\n ],\n equality_test=self.ListsAreClose)\n\n def testTile(self):\n for dtype in self.numeric_types:\n self._testBinary(\n array_ops.tile,\n np.array([[6]], dtype=dtype),\n np.array([1, 2], dtype=np.int32),\n expected=np.array([[6, 6]], dtype=dtype))\n self._testBinary(\n array_ops.tile,\n np.array([[1], [2]], dtype=dtype),\n np.array([1, 2], dtype=np.int32),\n expected=np.array([[1, 1], [2, 2]], dtype=dtype))\n self._testBinary(\n array_ops.tile,\n np.array([[1, 2], [3, 4]], dtype=dtype),\n np.array([3, 2], dtype=np.int32),\n expected=np.array(\n [[1, 2, 1, 2],\n [3, 4, 3, 4],\n [1, 2, 1, 2],\n [3, 4, 3, 4],\n [1, 2, 1, 2],\n [3, 4, 3, 4]],\n dtype=dtype))\n self._testBinary(\n array_ops.tile,\n np.array([[1, 2], [3, 4]], dtype=dtype),\n np.array([1, 1], dtype=np.int32),\n expected=np.array(\n [[1, 2],\n [3, 4]],\n dtype=dtype))\n self._testBinary(\n array_ops.tile,\n np.array([[1, 2]], dtype=dtype),\n np.array([3, 1], dtype=np.int32),\n expected=np.array(\n [[1, 2],\n [1, 2],\n [1, 2]],\n dtype=dtype))\n\n def testTranspose(self):\n for dtype in self.numeric_types:\n self._testBinary(\n array_ops.transpose,\n np.zeros(shape=[1, 0, 4], dtype=dtype),\n np.array([1, 2, 0], dtype=np.int32),\n expected=np.zeros(shape=[0, 4, 1], dtype=dtype))\n self._testBinary(\n array_ops.transpose,\n np.array([[1, 2], [3, 4]], dtype=dtype),\n np.array([0, 1], dtype=np.int32),\n expected=np.array([[1, 2], [3, 4]], dtype=dtype))\n self._testBinary(\n array_ops.transpose,\n np.array([[1, 2], [3, 4]], dtype=dtype),\n np.array([1, 0], dtype=np.int32),\n expected=np.array([[1, 3], [2, 4]], dtype=dtype))\n\n def testCross(self):\n for dtype in self.float_types:\n self._testBinary(\n gen_math_ops.cross,\n np.zeros((4, 3), dtype=dtype),\n np.zeros((4, 3), dtype=dtype),\n expected=np.zeros((4, 3), dtype=dtype))\n self._testBinary(\n gen_math_ops.cross,\n np.array([1, 2, 3], dtype=dtype),\n np.array([4, 5, 6], dtype=dtype),\n expected=np.array([-3, 6, -3], dtype=dtype))\n self._testBinary(\n gen_math_ops.cross,\n np.array([[1, 2, 3], [10, 11, 12]], dtype=dtype),\n np.array([[4, 5, 6], [40, 50, 60]], dtype=dtype),\n expected=np.array([[-3, 6, -3], [60, -120, 60]], dtype=dtype))\n\n def testBroadcastArgs(self):\n self._testBinary(array_ops.broadcast_dynamic_shape,\n np.array([2, 3, 5], dtype=np.int32),\n np.array([1], dtype=np.int32),\n expected=np.array([2, 3, 5], dtype=np.int32))\n\n self._testBinary(array_ops.broadcast_dynamic_shape,\n np.array([1], dtype=np.int32),\n np.array([2, 3, 5], dtype=np.int32),\n expected=np.array([2, 3, 5], dtype=np.int32))\n\n self._testBinary(array_ops.broadcast_dynamic_shape,\n np.array([2, 3, 5], dtype=np.int32),\n np.array([5], dtype=np.int32),\n expected=np.array([2, 3, 5], dtype=np.int32))\n\n self._testBinary(array_ops.broadcast_dynamic_shape,\n np.array([5], dtype=np.int32),\n np.array([2, 3, 5], dtype=np.int32),\n expected=np.array([2, 3, 5], dtype=np.int32))\n\n self._testBinary(array_ops.broadcast_dynamic_shape,\n np.array([2, 3, 5], dtype=np.int32),\n np.array([3, 5], dtype=np.int32),\n expected=np.array([2, 3, 5], dtype=np.int32))\n\n self._testBinary(array_ops.broadcast_dynamic_shape,\n np.array([3, 5], dtype=np.int32),\n np.array([2, 3, 5], dtype=np.int32),\n expected=np.array([2, 3, 5], dtype=np.int32))\n\n self._testBinary(array_ops.broadcast_dynamic_shape,\n np.array([2, 3, 5], dtype=np.int32),\n np.array([3, 1], dtype=np.int32),\n expected=np.array([2, 3, 5], dtype=np.int32))\n\n self._testBinary(array_ops.broadcast_dynamic_shape,\n np.array([3, 1], dtype=np.int32),\n np.array([2, 3, 5], dtype=np.int32),\n expected=np.array([2, 3, 5], dtype=np.int32))\n\n self._testBinary(array_ops.broadcast_dynamic_shape,\n np.array([2, 1, 5], dtype=np.int32),\n np.array([3, 1], dtype=np.int32),\n expected=np.array([2, 3, 5], dtype=np.int32))\n\n self._testBinary(array_ops.broadcast_dynamic_shape,\n np.array([3, 1], dtype=np.int32),\n np.array([2, 1, 5], dtype=np.int32),\n expected=np.array([2, 3, 5], dtype=np.int32))\n\n with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,\n \"Incompatible shapes\"):\n self._testBinary(array_ops.broadcast_dynamic_shape,\n np.array([1, 2, 3], dtype=np.int32),\n np.array([4, 5, 6], dtype=np.int32),\n expected=None)\n\n\nif __name__ == \"__main__\":\n googletest.main()\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for training routines.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom math import log10\nimport os\nimport tempfile\n\nimport numpy as np\n\nfrom tensorflow.python.estimator.inputs import numpy_io\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.keras._impl import keras\nfrom tensorflow.python.keras._impl.keras import testing_utils\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import test\n\ntry:\n import h5py # pylint:disable=g-import-not-at-top\nexcept ImportError:\n h5py = None\n\n\ndef simple_sequential_model():\n model = keras.models.Sequential()\n model.add(\n keras.layers.Conv2D(\n 32, kernel_size=(3, 3), activation='relu', input_shape=(14, 14, 3)))\n model.add(keras.layers.MaxPooling2D(pool_size=(2, 2)))\n model.add(keras.layers.Dropout(0.25))\n model.add(keras.layers.Flatten())\n model.add(keras.layers.Dense(16, activation='relu'))\n model.add(keras.layers.Dropout(0.25))\n model.add(keras.layers.Dense(3, activation='softmax'))\n return model\n\n\ndef simple_functional_model():\n a = keras.layers.Input(shape=(14, 14, 3))\n b = keras.layers.Conv2D(32, kernel_size=(3, 3), activation='relu')(a)\n b = keras.layers.MaxPooling2D(pool_size=(2, 2))(b)\n b = keras.layers.Dropout(0.25)(b)\n b = keras.layers.Flatten()(b)\n b = keras.layers.Dense(16, activation='relu')(b)\n b = keras.layers.Dropout(0.25)(b)\n b = keras.layers.Dense(3, activation='softmax')(b)\n model = keras.models.Model(inputs=[a], outputs=[b])\n return model\n\n\ndef get_resource_for_simple_model(is_sequential, is_evaluate):\n model = simple_sequential_model(\n ) if is_sequential else simple_functional_model()\n if is_sequential:\n model.build()\n input_name = model.input_names[0]\n\n np.random.seed(1337)\n (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(\n train_samples=200,\n test_samples=100,\n input_shape=(14, 14, 3),\n num_classes=3)\n y_train = keras.utils.to_categorical(y_train)\n y_test = keras.utils.to_categorical(y_test)\n\n train_input_fn = numpy_io.numpy_input_fn(\n x={input_name: np.array(x_train, dtype=np.float32)},\n y=np.array(y_train, dtype=np.float32),\n shuffle=False,\n num_epochs=None,\n batch_size=16)\n\n evaluate_input_fn = numpy_io.numpy_input_fn(\n x={input_name: np.array(x_test, dtype=np.float32)},\n y=np.array(y_test, dtype=np.float32),\n num_epochs=1,\n shuffle=False)\n\n predict_input_fn = numpy_io.numpy_input_fn(\n x={input_name: np.array(x_test, dtype=np.float32)},\n num_epochs=1,\n shuffle=False)\n\n inference_input_fn = evaluate_input_fn if is_evaluate else predict_input_fn\n\n return model, (x_train, y_train), (x_test,\n y_test), train_input_fn, inference_input_fn\n\n\ndef multi_inputs_multi_outputs_model():\n # test multi-input layer\n a = keras.layers.Input(shape=(32,), name='input_a')\n b = keras.layers.Input(shape=(32,), name='input_b')\n dense = keras.layers.Dense(16, name='dense_1')\n a_2 = dense(a)\n b_2 = dense(b)\n merged = keras.layers.concatenate([a_2, b_2], name='merge')\n c = keras.layers.Dense(3, activation='softmax', name='dense_2')(merged)\n d = keras.layers.Dense(2, activation='softmax', name='dense_3')(merged)\n model = keras.models.Model(inputs=[a, b], outputs=[c, d])\n model.compile(\n loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics={'dense_2': 'accuracy',\n 'dense_3': 'accuracy'})\n return model\n\n\nclass TestKerasEstimator(test.TestCase):\n\n def setUp(self):\n self._base_dir = os.path.join(self.get_temp_dir(), 'keras_estimator_test')\n gfile.MakeDirs(self._base_dir)\n\n def tearDown(self):\n gfile.DeleteRecursively(self._base_dir)\n\n def test_train(self):\n for is_sequential in [True, False]:\n keras_model, (_, _), (\n _, _), train_input_fn, eval_input_fn = get_resource_for_simple_model(\n is_sequential=is_sequential, is_evaluate=True)\n keras_model.compile(\n loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy', 'mse', keras.metrics.categorical_accuracy])\n\n with self.test_session():\n est_keras = keras.estimator.model_to_estimator(\n keras_model=keras_model,\n model_dir=tempfile.mkdtemp(dir=self._base_dir))\n est_keras.train(input_fn=train_input_fn, steps=200 * 10 / 16)\n eval_results = est_keras.evaluate(input_fn=eval_input_fn)\n self.assertGreater(eval_results['accuracy'], 0.9)\n self.assertGreater(eval_results['categorical_accuracy'], 0.9)\n self.assertLess(eval_results['mse'], 0.1)\n\n def test_evaluate(self):\n keras_model, (x_train, y_train), (\n x_test, y_test), _, eval_input_fn = get_resource_for_simple_model(\n is_sequential=False, is_evaluate=True)\n\n with self.test_session():\n metrics = [\n 'binary_accuracy', 'binary_crossentropy', 'categorical_accuracy',\n 'categorical_crossentropy', 'cosine_proximity', 'hinge',\n 'kullback_leibler_divergence', 'mean_absolute_error',\n 'mean_absolute_percentage_error', 'mean_squared_error',\n 'mean_squared_logarithmic_error', 'poisson', 'squared_hinge',\n 'top_k_categorical_accuracy'\n ]\n keras_model.compile(\n loss='categorical_crossentropy', optimizer='adam', metrics=metrics)\n keras_model.fit(x_train, y_train, epochs=1)\n keras_eval = keras_model.evaluate(x_test, y_test, batch_size=32)\n\n with self.test_session():\n keras_est = keras.estimator.model_to_estimator(\n keras_model=keras_model,\n model_dir=tempfile.mkdtemp(dir=self._base_dir))\n est_eval = keras_est.evaluate(input_fn=eval_input_fn)\n\n metrics = ['loss'] + metrics\n\n # Check loss and all metrics match between keras and estimator.\n def shift(val):\n return val / 10**int(log10(abs(val)))\n\n for i, metric_name in enumerate(metrics):\n self.assertAlmostEqual(\n shift(est_eval[metric_name]),\n shift(keras_eval[i]),\n places=4,\n msg='%s mismatch, keras model: %s, estimator: %s' %\n (metric_name, est_eval[metric_name], keras_eval[i]))\n\n def test_predict(self):\n # Check that predict on a pretrained model yield the same result.\n keras_model, (x_train, y_train), (\n x_test, _), _, pred_input_fn = get_resource_for_simple_model(\n is_sequential=True, is_evaluate=False)\n\n with self.test_session():\n keras_model.compile(\n loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n keras_model.fit(x_train, y_train, epochs=1)\n keras_pred = [np.argmax(y) for y in keras_model.predict(x_test)]\n\n with self.test_session():\n keras_est = keras.estimator.model_to_estimator(\n keras_model=keras_model,\n model_dir=tempfile.mkdtemp(dir=self._base_dir))\n est_pred = [\n np.argmax(y[keras_model.output_names[0]])\n for y in keras_est.predict(input_fn=pred_input_fn)\n ]\n self.assertAllEqual(est_pred, keras_pred)\n\n def test_multi_inputs_multi_outputs(self):\n np.random.seed(1337)\n (a_train, c_train), (a_test, c_test) = testing_utils.get_test_data(\n train_samples=200, test_samples=100, input_shape=(32,), num_classes=3)\n (b_train, d_train), (b_test, d_test) = testing_utils.get_test_data(\n train_samples=200, test_samples=100, input_shape=(32,), num_classes=2)\n c_train = keras.utils.to_categorical(c_train)\n c_test = keras.utils.to_categorical(c_test)\n d_train = keras.utils.to_categorical(d_train)\n d_test = keras.utils.to_categorical(d_test)\n\n def train_input_fn():\n input_dict = {\n 'input_a':\n ops.convert_to_tensor(\n np.array(a_train, dtype=np.float32), dtype=dtypes.float32),\n 'input_b':\n ops.convert_to_tensor(\n np.array(b_train, dtype=np.float32), dtype=dtypes.float32)\n }\n output_dict = {\n 'dense_2':\n ops.convert_to_tensor(\n np.array(c_train, dtype=np.float32), dtype=dtypes.float32),\n 'dense_3':\n ops.convert_to_tensor(\n np.array(d_train, dtype=np.float32), dtype=dtypes.float32)\n }\n return input_dict, output_dict\n\n def evaluate_input_fn():\n input_dict = {\n 'input_a':\n ops.convert_to_tensor(\n np.array(a_test, dtype=np.float32), dtype=dtypes.float32),\n 'input_b':\n ops.convert_to_tensor(\n np.array(b_test, dtype=np.float32), dtype=dtypes.float32)\n }\n output_dict = {\n 'dense_2':\n ops.convert_to_tensor(\n np.array(c_test, dtype=np.float32), dtype=dtypes.float32),\n 'dense_3':\n ops.convert_to_tensor(\n np.array(d_test, dtype=np.float32), dtype=dtypes.float32)\n }\n return input_dict, output_dict\n\n with self.test_session():\n model = multi_inputs_multi_outputs_model()\n est_keras = keras.estimator.model_to_estimator(\n keras_model=model, model_dir=tempfile.mkdtemp(dir=self._base_dir))\n est_keras.train(input_fn=train_input_fn, steps=200 * 10 / 16)\n eval_results = est_keras.evaluate(input_fn=evaluate_input_fn, steps=1)\n self.assertGreater(eval_results['accuracy_dense_2'], 0.5)\n self.assertGreater(eval_results['accuracy_dense_3'], 0.5)\n\n def test_init_from_file(self):\n if h5py is None:\n return # Skip test if models cannot be saved.\n\n keras_model, (x_train, y_train), (\n x_test, _), _, pred_input_fn = get_resource_for_simple_model(\n is_sequential=False, is_evaluate=False)\n\n with self.test_session():\n keras_model.compile(\n loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n keras_model.fit(x_train, y_train, epochs=1)\n keras_pred = [np.argmax(y) for y in keras_model.predict(x_test)]\n fname = os.path.join(self._base_dir, 'keras_model.h5')\n keras.models.save_model(keras_model, fname)\n\n with self.test_session():\n keras_est = keras.estimator.model_to_estimator(\n keras_model_path=fname,\n model_dir=tempfile.mkdtemp(dir=self._base_dir))\n est_pred = [\n np.argmax(y[keras_model.output_names[0]])\n for y in keras_est.predict(input_fn=pred_input_fn)\n ]\n self.assertAllEqual(est_pred, keras_pred)\n\n def test_keras_model_init_error(self):\n with self.assertRaisesRegexp(ValueError, 'Either'):\n keras.estimator.model_to_estimator()\n\n with self.test_session():\n keras_model = simple_sequential_model()\n with self.assertRaisesRegexp(ValueError, 'not both'):\n keras.estimator.model_to_estimator(\n keras_model=keras_model,\n keras_model_path=tempfile.mkdtemp(dir=self._base_dir))\n\n with self.test_session():\n keras_model = simple_sequential_model()\n with self.assertRaisesRegexp(ValueError, 'compiled'):\n keras.estimator.model_to_estimator(keras_model=keras_model)\n\n with self.test_session():\n keras_model = simple_sequential_model()\n with self.assertRaisesRegexp(ValueError, 'not a local path'):\n keras.estimator.model_to_estimator(\n keras_model_path='gs://bucket/object')\n\n def test_invalid_ionames_error(self):\n np.random.seed(1337)\n (x_train, y_train), (_, _) = testing_utils.get_test_data(\n train_samples=200, test_samples=100, input_shape=(10,), num_classes=2)\n y_train = keras.utils.to_categorical(y_train)\n\n def invald_input_name_input_fn():\n input_dict = {\n 'invalid_input_name':\n ops.convert_to_tensor(\n np.array(x_train, dtype=np.float32), dtype=dtypes.float32),\n }\n output = ops.convert_to_tensor(\n np.array(y_train, dtype=np.float32), dtype=dtypes.float32)\n return input_dict, output\n\n def invald_output_name_input_fn():\n input_dict = {\n 'input_1':\n ops.convert_to_tensor(\n np.array(x_train, dtype=np.float32), dtype=dtypes.float32),\n }\n output_dict = {\n 'invalid_output_name':\n ops.convert_to_tensor(\n np.array(y_train, dtype=np.float32), dtype=dtypes.float32),\n }\n return input_dict, output_dict\n\n model = simple_functional_model()\n model.compile(\n loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])\n est_keras = keras.estimator.model_to_estimator(\n keras_model=model, model_dir=tempfile.mkdtemp(dir=self._base_dir))\n\n with self.test_session():\n with self.assertRaises(ValueError):\n est_keras.train(input_fn=invald_input_name_input_fn, steps=100)\n\n with self.assertRaises(ValueError):\n est_keras.train(input_fn=invald_output_name_input_fn, steps=100)\n\n def test_custom_objects(self):\n keras_model, (_, _), (\n _, _), train_input_fn, eval_input_fn = get_resource_for_simple_model(\n is_sequential=True, is_evaluate=True)\n\n class CustomOp(keras.optimizers.RMSprop):\n pass\n\n def custom_loss(y_true, y_pred):\n return keras.losses.categorical_crossentropy(y_true, y_pred)\n\n keras_model.compile(\n loss=custom_loss, optimizer=CustomOp(), metrics=['accuracy'])\n\n with self.test_session():\n est_keras = keras.estimator.model_to_estimator(\n keras_model=keras_model,\n model_dir=tempfile.mkdtemp(dir=self._base_dir))\n est_keras.train(input_fn=train_input_fn, steps=200 * 10 / 16)\n eval_results = est_keras.evaluate(input_fn=eval_input_fn)\n self.assertGreater(eval_results['accuracy'], 0.9)\n\n\nif __name__ == '__main__':\n test.main()\n"
] |
[
[
"numpy.divide",
"numpy.reshape",
"numpy.random.seed",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.python.framework.constant_op.constant",
"numpy.linalg.qr",
"tensorflow.python.ops.linalg_ops.qr",
"tensorflow.python.ops.array_ops.ones_like",
"numpy.ndarray",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.ops.math_ops.conj",
"numpy.abs",
"tensorflow.python.ops.random_ops.random_normal",
"numpy.prod",
"tensorflow.python.platform.test.main"
],
[
"numpy.array",
"tensorflow.python.platform.googletest.main",
"numpy.zeros",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.python.framework.dtypes.as_dtype",
"tensorflow.python.ops.math_ops.approximate_equal",
"tensorflow.python.ops.array_ops.split",
"numpy.float32",
"numpy.arange",
"numpy.int32",
"tensorflow.python.ops.array_ops.pad"
],
[
"tensorflow.python.keras._impl.keras.utils.to_categorical",
"tensorflow.python.platform.test.main",
"tensorflow.python.keras._impl.keras.models.save_model",
"tensorflow.python.keras._impl.keras.models.Sequential",
"tensorflow.python.keras._impl.keras.layers.Dropout",
"numpy.argmax",
"tensorflow.python.keras._impl.keras.testing_utils.get_test_data",
"tensorflow.python.keras._impl.keras.losses.categorical_crossentropy",
"numpy.array",
"tensorflow.python.platform.gfile.DeleteRecursively",
"tensorflow.python.keras._impl.keras.layers.Flatten",
"tensorflow.python.keras._impl.keras.layers.concatenate",
"tensorflow.python.keras._impl.keras.estimator.model_to_estimator",
"tensorflow.python.keras._impl.keras.models.Model",
"tensorflow.python.keras._impl.keras.layers.MaxPooling2D",
"tensorflow.python.keras._impl.keras.layers.Dense",
"tensorflow.python.keras._impl.keras.layers.Conv2D",
"tensorflow.python.keras._impl.keras.layers.Input",
"numpy.random.seed",
"tensorflow.python.platform.gfile.MakeDirs"
]
] |
guyemerson/sem-func
|
[
"6945d02bc204203a7fb43d70bbf8aa232646158a"
] |
[
"src/core/restart.py"
] |
[
"import argparse, numpy as np, logging\n\nfrom trainer import Trainer\n\nif __name__ == \"__main__\":\n np.set_printoptions(precision=3, suppress=True, threshold=np.nan)\n \n parser = argparse.ArgumentParser(description=\"Train a sem-func model\")\n # File name\n parser.add_argument('fullname', nargs='?', default=None)\n parser.add_argument('-p', '--prefix', default='multicore')\n parser.add_argument('-t', '--threshold', type=int, default=5)\n parser.add_argument('-s', '--suffix', default=None)\n parser.add_argument('-o', '--output', default=None)\n # Practical stuff\n parser.add_argument('-c', '--clear', action='store_true')\n parser.add_argument('-T', '--timeout', type=int, default=0)\n parser.add_argument('-v', '--validation', nargs='+', default=[])\n parser.add_argument('-m', '--maxtasksperchild', type=int, default=None)\n parser.add_argument('-d', '--debug', action='store_true')\n parser.add_argument('-l', '--logging', default='WARNING')\n \n args = parser.parse_args()\n \n # Allow remote debugging\n if args.debug:\n from pdb_clone import pdbhandler\n pdbhandler.register()\n # To debug, run: `pdb-attach --kill --pid PID`\n # child pid can be found with: `ps x --forest`\n \n # Set input and output names\n if args.fullname:\n name = args.fullname\n else:\n name = '{}-{}-{}'.format(args.prefix, args.threshold, args.suffix)\n \n if args.output:\n output_name = '{}-{}-{}'.format(args.prefix, args.threshold, args.output)\n else:\n output_name = None\n \n # Load the trainer\n trainer = Trainer.load(name, output_name=output_name)\n \n # Clear list of completed files, if desired\n if args.clear:\n while trainer.completed_files:\n trainer.completed_files.pop()\n \n # Start training again\n trainer.start(timeout=args.timeout,\n validation=[x+'.pkl' for x in args.validation],\n debug=args.debug,\n logging_level=logging.getLevelName(args.logging),\n maxtasksperchild=args.maxtasksperchild)\n"
] |
[
[
"numpy.set_printoptions"
]
] |
liud16/direct18project
|
[
"a92a66ea688469b48be7e701d1b6b9baef28883f"
] |
[
"SAFMI/UserImageProcessing/ImagePreprocessing.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 05 12:15:10 2018\n\n@author: sarth\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom skimage import morphology\nimport cv2\n\n\n\ndef savefile(filename, file):\n \"\"\" \n Save an array to a txt file\n \n Args:\n filename: name of the txt file, a string\n file: data to be saved, 1D or 2D array-like\n \n Returns:\n txt file\n \n \"\"\"\n\n output_file = np.savetxt(filename, file, fmt = '%.3f', delimiter = '\\t')\n \n return output_file\n\n\n\ndef bckgrnd_correc_rect(image, row_len, col_len):\n \"\"\"\n Background correction using a rectangular structuring element. \n This function uses white_tophat from \n skimage.morphology to return image minus \n the morphological opening obtained from the structuring element.\n \n Args:\n image: image to be processed, a numpy array\n row_len: side-length of the rectangle filter, int\n col_len: side-length of the rectangle filter, int\n \n Returns:\n numpy array\n \n Raises:\n Errors when input type is wrong\n \n \"\"\"\n \n # Checking the right data type for the input image\n assert type(image) == np.ndarray, ('Wrong data type', 'image must be a numpy array')\n \n # Checking the right data type for the row length of the rectangular structuring element\n assert type(row_len) == int, ('Wrong data type', 'row length must be a float')\n \n # Checking the right data type for the column length of the rectangular structuring element\n assert type(col_len) == int, ('Wrong data type', 'column length must be a float')\n \n # background corrrection \n image_bckgrnd_corrected = morphology.white_tophat(image, morphology.rectangle(row_len,col_len))\n \n # plotting image\n plt.figure()\n plt.gray()\n plt.title('Background correction with rectangle')\n plt.imshow(image_bckgrnd_corrected)\n plt.colorbar()\n plt.show()\n \n return image_bckgrnd_corrected\n\n\n\ndef bckgrnd_correc_sq(image, length):\n \"\"\"\n Background correction using a square structuring element. \n This function uses white_tophat from \n skimage.morphology to return image minus \n the morphological opening obtained from the structuring element. \n \n Args:\n image: image to be processed, a numpy array\n length: side-length of the square filter, int\n\n \n Returns:\n numpy array\n \n Raises:\n Errors when input type is wrong\n \n \"\"\"\n \n # Checking the right data type for the input image\n assert type(image) == np.ndarray, ('Wrong data type', 'image must be a numpy array')\n \n # Checking the right data type for the length of the square structuring element\n assert type(length) == int, ('Wrong data type', 'length of the square structuring element must be a float')\n \n # background correction\n image_bckgrnd_corrected = morphology.white_tophat(image, morphology.square(length))\n \n # plotting image\n plt.figure()\n plt.gray()\n plt.title('Background correction with square')\n plt.imshow(image_bckgrnd_corrected)\n plt.colorbar()\n plt.show()\n \n return image_bckgrnd_corrected\n\n\n\ndef bckgrnd_correc_disk(image, radius):\n \"\"\"\n Background correction using a disk structuring element. \n This function uses white_tophat from \n skimage.morphology to return image minus \n the morphological opening obtained from the structuring element. \n \n Args:\n image: image to be processed, a numpy array\n radius: radius of the disk filter, int\n\n \n Returns:\n numpy array\n \n Raises:\n Errors when input type is wrong\n \n \"\"\"\n \n # Checking the right data type for the input image\n assert type(image) == np.ndarray, ('Wrong data type', 'image must be a numpy array')\n \n # Checking the right data type for the length of the square structuring element\n assert type(radius) == int, ('Wrong data type', 'radius of the disk structuring element must be a float')\n \n # background correction\n image_bckgrnd_corrected = morphology.white_tophat(image, morphology.disk(radius))\n \n # plotting image\n plt.figure()\n plt.gray()\n plt.title('Background correction with disk')\n plt.imshow(image_bckgrnd_corrected)\n plt.colorbar()\n plt.show()\n \n return image_bckgrnd_corrected\n \n\ndef convert_to_grayscale(image):\n \"\"\"\n Converting the image to grayscale - \n where minimum pixel value is 0.0 and maximum pixel value is 1.0\n \n Args:\n image: image to be processed, numpy array\n \n Returns:\n numpy array\n \n Raises:\n Errors when input type is wrong\n \n \"\"\"\n \n # Checking the right data type for the input image\n assert type(image) == np.ndarray, ('Wrong data type', 'image must be a numpy array')\n \n # converting to grayscale\n dst = np.zeros(image.shape)\n image_gray = cv2.normalize(image, dst, 0.0, 1.0, cv2.NORM_MINMAX)\n \n # plotting the image\n plt.figure()\n plt.gray()\n plt.title('After background correction, grey-scale')\n plt.imshow(image_gray)\n plt.colorbar()\n plt.show()\n \n return image_gray\n\n\n\ndef bckgrnd_corr(image):\n \"\"\"\n Wrapper function to perform and optimize background correction\n \n Args:\n image: numpy array\n \n Returns:\n numpy array\n \n \"\"\"\n\n #Optimize background removal results based on user interaction\n #After each run of the function, the user will be asked\n #if background removal results look okay. The function will run \n #until user is okay with the result.\n \n bkgnd_ok = 'N'\n while bkgnd_ok == 'N':\n \n #user chooses the filter shape\n print ('Please choose filter: Rectangle, Square or Disk.')\n filter_shape = input('Filter: ')\n\n #perform background correction using a rectangle filter \n if filter_shape == 'Rectangle':\n \n #ask user the dimension of the filter\n #if the user chooses to not to use the default dimension,\n #the program will ask user to enter dimensions\n print ('What are the dimensions of the filter?')\n default_dim = input('Default dimensions? Y/N ')\n \n \n if default_dim == 'Y': \n row_len = 10\n col_len = 200\n image_bckgrnd_corrected = bckgrnd_correc_rect(image, row_len, col_len)\n bkgnd_ok = input('Are you okay with background removal?')\n\n elif default_dim == 'N':\n row_len = int(input('Row length (in pixel): '))\n col_len = int(input('Column length (in pixel): '))\n \n image_bckgrnd_corrected = bckgrnd_correc_rect(image, row_len, col_len)\n bkgnd_ok = input('Are you okay with background removal?')\n \n #if the user did not enter Y or N,\n #a message will show up, and the user will be\n #asked to choose filter shape and dimensions again\n else:\n print ('Please enter Y or N only for \"Default dimension?\".')\n bkgnd_ok = 'N'\n\n \n #perform background correction using a square filter \n elif filter_shape == 'Square':\n print ('What are the dimensions of the filter?')\n default_dim = input('Default dimensions? Y/N ')\n\n if default_dim == 'Y': \n length = 10\n image_bckgrnd_corrected = bckgrnd_correc_sq(image, length)\n bkgnd_ok = input('Are you okay with background removal?')\n \n elif default_dim == 'N':\n length = int(input('Side length (in pixel): '))\n image_bckgrnd_corrected = bckgrnd_correc_sq(image, length)\n bkgnd_ok = input('Are you okay with background removal?')\n \n else:\n print ('Please enter Y or N only for \"Default dimension?\".')\n bkgnd_ok = 'N'\n \n\n #perform background correction using a disk filter \n elif filter_shape == 'Disk':\n print ('What are the dimensions of the filter?')\n default_dim = input('Default dimensions? Y/N ')\n \n if default_dim == 'Y': \n radius = 5\n image_bckgrnd_corrected = bckgrnd_correc_disk(image, radius)\n bkgnd_ok = input('Are you okay with background removal?')\n\n elif default_dim == 'N':\n radius = int(input('Radius length (in pixel): '))\n image_bckgrnd_corrected = bckgrnd_correc_disk(image, radius)\n bkgnd_ok = input('Are you okay with background removal?')\n\n else:\n print ('Please enter Y or N only for \"Default dimension?\".')\n bkgnd_ok = 'N'\n \n #if user did not enter the given choices,\n #a message will show up. User will be asked to\n #choose shape again.\n else:\n print ('Please choose from the listed shapes.')\n \n \n return image_bckgrnd_corrected\n \n "
] |
[
[
"matplotlib.pyplot.colorbar",
"numpy.savetxt",
"numpy.zeros",
"matplotlib.pyplot.gray",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow"
]
] |
mjbigdel/baselines
|
[
"ea25b9e8b234e6ee1bca43083f8f3cf974143998"
] |
[
"baselines/deepq/build_graph.py"
] |
[
"\"\"\"Deep Q learning graph\n\nThe functions in this file can are used to create the following functions:\n\n======= act ========\n\n Function to chose an action given an observation\n\n Parameters\n ----------\n observation: object\n Observation that can be feed into the output of make_obs_ph\n stochastic: bool\n if set to False all the actions are always deterministic (default False)\n update_eps_ph: float\n update epsilon a new value, if negative no update happens\n (default: no update)\n\n Returns\n -------\n Tensor of dtype tf.int64 and shape (BATCH_SIZE,) with an action to be performed for\n every element of the batch.\n\n\n======= act (in case of parameter noise) ========\n\n Function to chose an action given an observation\n\n Parameters\n ----------\n observation: object\n Observation that can be feed into the output of make_obs_ph\n stochastic: bool\n if set to False all the actions are always deterministic (default False)\n update_eps_ph: float\n update epsilon to a new value, if negative no update happens\n (default: no update)\n reset_ph: bool\n reset the perturbed policy by sampling a new perturbation\n update_param_noise_threshold_ph: float\n the desired threshold for the difference between non-perturbed and perturbed policy\n update_param_noise_scale_ph: bool\n whether or not to update the scale of the noise for the next time it is re-perturbed\n\n Returns\n -------\n Tensor of dtype tf.int64 and shape (BATCH_SIZE,) with an action to be performed for\n every element of the batch.\n\n\n======= train =======\n\n Function that takes a transition (s,a,r,s') and optimizes Bellman equation's error:\n\n td_error = Q(s,a) - (r + gamma * max_a' Q(s', a'))\n loss = huber_loss[td_error]\n\n Parameters\n ----------\n obs_t: object\n a batch of observations\n action: np.array\n actions that were selected upon seeing obs_t.\n dtype must be int32 and shape must be (batch_size,)\n reward: np.array\n immediate reward attained after executing those actions\n dtype must be float32 and shape must be (batch_size,)\n obs_tp1: object\n observations that followed obs_t\n done: np.array\n 1 if obs_t was the last observation in the episode and 0 otherwise\n obs_tp1 gets ignored, but must be of the valid shape.\n dtype must be float32 and shape must be (batch_size,)\n weight: np.array\n imporance weights for every element of the batch (gradient is multiplied\n by the importance weight) dtype must be float32 and shape must be (batch_size,)\n\n Returns\n -------\n td_error: np.array\n a list of differences between Q(s,a) and the target in Bellman's equation.\n dtype is float32 and shape is (batch_size,)\n\n======= update_target ========\n\n copy the parameters from optimized Q function to the target Q function.\n In Q learning we actually optimize the following error:\n\n Q(s,a) - (r + gamma * max_a' Q'(s', a'))\n\n Where Q' is lagging behind Q to stablize the learning. For example for Atari\n\n Q' is set to Q once every 10000 updates training steps.\n\n\"\"\"\nimport tensorflow as tf\nimport baselines.common.tf_util as U\n\n\ndef scope_vars(scope, trainable_only=False):\n \"\"\"\n Get variables inside a scope\n The scope can be specified as a string\n Parameters\n ----------\n scope: str or VariableScope\n scope in which the variables reside.\n trainable_only: bool\n whether or not to return only the variables that were marked as trainable.\n Returns\n -------\n vars: [tf.Variable]\n list of variables in `scope`.\n \"\"\"\n return tf.get_collection(\n tf.GraphKeys.TRAINABLE_VARIABLES if trainable_only else tf.GraphKeys.GLOBAL_VARIABLES,\n scope=scope if isinstance(scope, str) else scope.name\n )\n\n\ndef scope_name():\n \"\"\"Returns the name of current scope as a string, e.g. deepq/q_func\"\"\"\n return tf.get_variable_scope().name\n\n\ndef absolute_scope_name(relative_scope_name):\n \"\"\"Appends parent scope name to `relative_scope_name`\"\"\"\n return scope_name() + \"/\" + relative_scope_name\n\n\ndef default_param_noise_filter(var):\n if var not in tf.trainable_variables():\n # We never perturb non-trainable vars.\n return False\n if \"fully_connected\" in var.name:\n # We perturb fully-connected layers.\n return True\n\n # The remaining layers are likely conv or layer norm layers, which we do not wish to\n # perturb (in the former case because they only extract features, in the latter case because\n # we use them for normalization purposes). If you change your network, you will likely want\n # to re-consider which layers to perturb and which to keep untouched.\n return False\n\n\ndef build_act(make_obs_ph, q_func, num_actions, scope=\"deepq\", reuse=None):\n \"\"\"Creates the act function:\n\n Parameters\n ----------\n make_obs_ph: str -> tf.placeholder or TfInput\n a function that take a name and creates a placeholder of input with that name\n q_func: (tf.Variable, int, str, bool) -> tf.Variable\n the model that takes the following inputs:\n observation_in: object\n the output of observation placeholder\n num_actions: int\n number of actions\n scope: str\n reuse: bool\n should be passed to outer variable scope\n and returns a tensor of shape (batch_size, num_actions) with values of every action.\n num_actions: int\n number of actions.\n scope: str or VariableScope\n optional scope for variable_scope.\n reuse: bool or None\n whether or not the variables should be reused. To be able to reuse the scope must be given.\n\n Returns\n -------\n act: (tf.Variable, bool, float) -> tf.Variable\n function to select and action given observation.\n` See the top of the file for details.\n \"\"\"\n with tf.variable_scope(scope, reuse=reuse):\n observations_ph = make_obs_ph(\"observation\")\n stochastic_ph = tf.placeholder(tf.bool, (), name=\"stochastic\")\n update_eps_ph = tf.placeholder(tf.float32, (), name=\"update_eps\")\n\n eps = tf.get_variable(\"eps\", (), initializer=tf.constant_initializer(0))\n\n q_values = q_func(observations_ph.get(), num_actions, scope=\"q_func\")\n deterministic_actions = tf.argmax(q_values, axis=1)\n\n batch_size = tf.shape(observations_ph.get())[0]\n random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)\n chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps\n stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)\n\n output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)\n update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))\n _act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph],\n outputs=output_actions,\n givens={update_eps_ph: -1.0, stochastic_ph: True},\n updates=[update_eps_expr])\n def act(ob, stochastic=True, update_eps=-1):\n return _act(ob, stochastic, update_eps)\n return act\n\n\ndef build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope=\"deepq\", reuse=None, param_noise_filter_func=None):\n \"\"\"Creates the act function with support for parameter space noise exploration (https://arxiv.org/abs/1706.01905):\n\n Parameters\n ----------\n make_obs_ph: str -> tf.placeholder or TfInput\n a function that take a name and creates a placeholder of input with that name\n q_func: (tf.Variable, int, str, bool) -> tf.Variable\n the model that takes the following inputs:\n observation_in: object\n the output of observation placeholder\n num_actions: int\n number of actions\n scope: str\n reuse: bool\n should be passed to outer variable scope\n and returns a tensor of shape (batch_size, num_actions) with values of every action.\n num_actions: int\n number of actions.\n scope: str or VariableScope\n optional scope for variable_scope.\n reuse: bool or None\n whether or not the variables should be reused. To be able to reuse the scope must be given.\n param_noise_filter_func: tf.Variable -> bool\n function that decides whether or not a variable should be perturbed. Only applicable\n if param_noise is True. If set to None, default_param_noise_filter is used by default.\n\n Returns\n -------\n act: (tf.Variable, bool, float, bool, float, bool) -> tf.Variable\n function to select and action given observation.\n` See the top of the file for details.\n \"\"\"\n if param_noise_filter_func is None:\n param_noise_filter_func = default_param_noise_filter\n\n with tf.variable_scope(scope, reuse=reuse):\n observations_ph = make_obs_ph(\"observation\")\n stochastic_ph = tf.placeholder(tf.bool, (), name=\"stochastic\")\n update_eps_ph = tf.placeholder(tf.float32, (), name=\"update_eps\")\n update_param_noise_threshold_ph = tf.placeholder(tf.float32, (), name=\"update_param_noise_threshold\")\n update_param_noise_scale_ph = tf.placeholder(tf.bool, (), name=\"update_param_noise_scale\")\n reset_ph = tf.placeholder(tf.bool, (), name=\"reset\")\n\n eps = tf.get_variable(\"eps\", (), initializer=tf.constant_initializer(0))\n param_noise_scale = tf.get_variable(\"param_noise_scale\", (), initializer=tf.constant_initializer(0.01), trainable=False)\n param_noise_threshold = tf.get_variable(\"param_noise_threshold\", (), initializer=tf.constant_initializer(0.05), trainable=False)\n\n # Unmodified Q.\n q_values = q_func(observations_ph.get(), num_actions, scope=\"q_func\")\n\n # Perturbable Q used for the actual rollout.\n q_values_perturbed = q_func(observations_ph.get(), num_actions, scope=\"perturbed_q_func\")\n # We have to wrap this code into a function due to the way tf.cond() works. See\n # https://stackoverflow.com/questions/37063952/confused-by-the-behavior-of-tf-cond for\n # a more detailed discussion.\n def perturb_vars(original_scope, perturbed_scope):\n all_vars = scope_vars(absolute_scope_name(original_scope))\n all_perturbed_vars = scope_vars(absolute_scope_name(perturbed_scope))\n assert len(all_vars) == len(all_perturbed_vars)\n perturb_ops = []\n for var, perturbed_var in zip(all_vars, all_perturbed_vars):\n if param_noise_filter_func(perturbed_var):\n # Perturb this variable.\n op = tf.assign(perturbed_var, var + tf.random_normal(shape=tf.shape(var), mean=0., stddev=param_noise_scale))\n else:\n # Do not perturb, just assign.\n op = tf.assign(perturbed_var, var)\n perturb_ops.append(op)\n assert len(perturb_ops) == len(all_vars)\n return tf.group(*perturb_ops)\n\n # Set up functionality to re-compute `param_noise_scale`. This perturbs yet another copy\n # of the network and measures the effect of that perturbation in action space. If the perturbation\n # is too big, reduce scale of perturbation, otherwise increase.\n q_values_adaptive = q_func(observations_ph.get(), num_actions, scope=\"adaptive_q_func\")\n perturb_for_adaption = perturb_vars(original_scope=\"q_func\", perturbed_scope=\"adaptive_q_func\")\n kl = tf.reduce_sum(tf.nn.softmax(q_values) * (tf.log(tf.nn.softmax(q_values)) - tf.log(tf.nn.softmax(q_values_adaptive))), axis=-1)\n mean_kl = tf.reduce_mean(kl)\n def update_scale():\n with tf.control_dependencies([perturb_for_adaption]):\n update_scale_expr = tf.cond(mean_kl < param_noise_threshold,\n lambda: param_noise_scale.assign(param_noise_scale * 1.01),\n lambda: param_noise_scale.assign(param_noise_scale / 1.01),\n )\n return update_scale_expr\n\n # Functionality to update the threshold for parameter space noise.\n update_param_noise_threshold_expr = param_noise_threshold.assign(tf.cond(update_param_noise_threshold_ph >= 0,\n lambda: update_param_noise_threshold_ph, lambda: param_noise_threshold))\n\n # Put everything together.\n deterministic_actions = tf.argmax(q_values_perturbed, axis=1)\n batch_size = tf.shape(observations_ph.get())[0]\n random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)\n chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps\n stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)\n\n output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)\n update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))\n updates = [\n update_eps_expr,\n tf.cond(reset_ph, lambda: perturb_vars(original_scope=\"q_func\", perturbed_scope=\"perturbed_q_func\"), lambda: tf.group(*[])),\n tf.cond(update_param_noise_scale_ph, lambda: update_scale(), lambda: tf.Variable(0., trainable=False)),\n update_param_noise_threshold_expr,\n ]\n _act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph, reset_ph, update_param_noise_threshold_ph, update_param_noise_scale_ph],\n outputs=output_actions,\n givens={update_eps_ph: -1.0, stochastic_ph: True, reset_ph: False, update_param_noise_threshold_ph: False, update_param_noise_scale_ph: False},\n updates=updates)\n def act(ob, reset=False, update_param_noise_threshold=False, update_param_noise_scale=False, stochastic=True, update_eps=-1):\n return _act(ob, stochastic, update_eps, reset, update_param_noise_threshold, update_param_noise_scale)\n return act\n\n\ndef build_train(make_obs_ph, q_func, num_actions, optimizer, grad_norm_clipping=None, gamma=1.0,\n double_q=True, scope=\"deepq\", reuse=None, param_noise=False, param_noise_filter_func=None):\n \"\"\"Creates the train function:\n\n Parameters\n ----------\n make_obs_ph: str -> tf.placeholder or TfInput\n a function that takes a name and creates a placeholder of input with that name\n q_func: (tf.Variable, int, str, bool) -> tf.Variable\n the model that takes the following inputs:\n observation_in: object\n the output of observation placeholder\n num_actions: int\n number of actions\n scope: str\n reuse: bool\n should be passed to outer variable scope\n and returns a tensor of shape (batch_size, num_actions) with values of every action.\n num_actions: int\n number of actions\n reuse: bool\n whether or not to reuse the graph variables\n optimizer: tf.train.Optimizer\n optimizer to use for the Q-learning objective.\n grad_norm_clipping: float or None\n clip gradient norms to this value. If None no clipping is performed.\n gamma: float\n discount rate.\n double_q: bool\n if true will use Double Q Learning (https://arxiv.org/abs/1509.06461).\n In general it is a good idea to keep it enabled.\n scope: str or VariableScope\n optional scope for variable_scope.\n reuse: bool or None\n whether or not the variables should be reused. To be able to reuse the scope must be given.\n param_noise: bool\n whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)\n param_noise_filter_func: tf.Variable -> bool\n function that decides whether or not a variable should be perturbed. Only applicable\n if param_noise is True. If set to None, default_param_noise_filter is used by default.\n\n Returns\n -------\n act: (tf.Variable, bool, float) -> tf.Variable\n function to select and action given observation.\n` See the top of the file for details.\n train: (object, np.array, np.array, object, np.array, np.array) -> np.array\n optimize the error in Bellman's equation.\n` See the top of the file for details.\n update_target: () -> ()\n copy the parameters from optimized Q function to the target Q function.\n` See the top of the file for details.\n debug: {str: function}\n a bunch of functions to print debug data like q_values.\n \"\"\"\n if param_noise:\n act_f = build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse,\n param_noise_filter_func=param_noise_filter_func)\n else:\n act_f = build_act(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse)\n\n with tf.variable_scope(scope, reuse=reuse):\n # set up placeholders\n obs_t_input = make_obs_ph(\"obs_t\")\n act_t_ph = tf.placeholder(tf.int32, [None], name=\"action\")\n rew_t_ph = tf.placeholder(tf.float32, [None], name=\"reward\")\n obs_tp1_input = make_obs_ph(\"obs_tp1\")\n done_mask_ph = tf.placeholder(tf.float32, [None], name=\"done\")\n importance_weights_ph = tf.placeholder(tf.float32, [None], name=\"weight\")\n\n # q network evaluation\n q_t = q_func(obs_t_input.get(), num_actions, scope=\"q_func\", reuse=True) # reuse parameters from act\n q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name + \"/q_func\")\n\n # target q network evalution\n q_tp1 = q_func(obs_tp1_input.get(), num_actions, scope=\"target_q_func\")\n target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name + \"/target_q_func\")\n\n # q scores for actions which we know were selected in the given state.\n q_t_selected = tf.reduce_sum(q_t * tf.one_hot(act_t_ph, num_actions), 1)\n\n # compute estimate of best possible value starting from state at t + 1\n if double_q:\n q_tp1_using_online_net = q_func(obs_tp1_input.get(), num_actions, scope=\"q_func\", reuse=True)\n q_tp1_best_using_online_net = tf.argmax(q_tp1_using_online_net, 1)\n q_tp1_best = tf.reduce_sum(q_tp1 * tf.one_hot(q_tp1_best_using_online_net, num_actions), 1)\n else:\n q_tp1_best = tf.reduce_max(q_tp1, 1)\n q_tp1_best_masked = (1.0 - done_mask_ph) * q_tp1_best\n\n # compute RHS of bellman equation\n q_t_selected_target = rew_t_ph + gamma * q_tp1_best_masked\n\n # compute the error (potentially clipped)\n td_error = q_t_selected - tf.stop_gradient(q_t_selected_target)\n errors = U.huber_loss(td_error)\n weighted_error = tf.reduce_mean(importance_weights_ph * errors)\n\n # compute optimization op (potentially with gradient clipping)\n if grad_norm_clipping is not None:\n gradients = optimizer.compute_gradients(weighted_error, var_list=q_func_vars)\n for i, (grad, var) in enumerate(gradients):\n if grad is not None:\n gradients[i] = (tf.clip_by_norm(grad, grad_norm_clipping), var)\n optimize_expr = optimizer.apply_gradients(gradients)\n else:\n optimize_expr = optimizer.minimize(weighted_error, var_list=q_func_vars)\n\n # update_target_fn will be called periodically to copy Q network to target Q network\n update_target_expr = []\n for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name),\n sorted(target_q_func_vars, key=lambda v: v.name)):\n update_target_expr.append(var_target.assign(var))\n update_target_expr = tf.group(*update_target_expr)\n\n # Create callable functions\n train = U.function(\n inputs=[\n obs_t_input,\n act_t_ph,\n rew_t_ph,\n obs_tp1_input,\n done_mask_ph,\n importance_weights_ph\n ],\n outputs=td_error,\n updates=[optimize_expr]\n )\n update_target = U.function([], [], updates=[update_target_expr])\n\n q_values = U.function([obs_t_input], q_t)\n\n return act_f, train, update_target, {'q_values': q_values}\n"
] |
[
[
"tensorflow.constant_initializer",
"tensorflow.group",
"tensorflow.clip_by_norm",
"tensorflow.stack",
"tensorflow.control_dependencies",
"tensorflow.nn.softmax",
"tensorflow.one_hot",
"tensorflow.trainable_variables",
"tensorflow.shape",
"tensorflow.argmax",
"tensorflow.Variable",
"tensorflow.variable_scope",
"tensorflow.get_variable_scope",
"tensorflow.where",
"tensorflow.placeholder",
"tensorflow.assign",
"tensorflow.cond",
"tensorflow.reduce_max",
"tensorflow.reduce_mean",
"tensorflow.stop_gradient"
]
] |
arunmp2004/PixelLib
|
[
"9e8690b594528a48d983992c55f7ce37029d9f25"
] |
[
"pixellib/instance.py"
] |
[
"import cv2\r\nimport numpy as np\r\nimport random\r\nimport os\r\nimport sys\r\nimport math\r\nfrom pixellib.mask_rcnn import MaskRCNN\r\nfrom pixellib.config import Config\r\nimport colorsys\r\nimport time\r\nfrom datetime import datetime\r\n\r\n\r\nclass configuration(Config):\r\n NAME = \"configuration\"\r\n\r\ncoco_config = configuration(BACKBONE = \"resnet101\", NUM_CLASSES = 81, class_names = [\"BG\"], IMAGES_PER_GPU = 1, \r\nDETECTION_MIN_CONFIDENCE = 0.7,IMAGE_MAX_DIM = 1024, IMAGE_MIN_DIM = 800,IMAGE_RESIZE_MODE =\"square\", GPU_COUNT = 1) \r\n\r\n\r\nclass instance_segmentation():\r\n def __init__(self, infer_speed = None):\r\n if infer_speed == \"average\":\r\n coco_config.IMAGE_MAX_DIM = 512\r\n coco_config.IMAGE_MIN_DIM = 512\r\n coco_config.DETECTION_MIN_CONFIDENCE = 0.45\r\n\r\n elif infer_speed == \"fast\":\r\n coco_config.IMAGE_MAX_DIM = 384\r\n coco_config.IMAGE_MIN_DIM = 384\r\n coco_config.DETECTION_MIN_CONFIDENCE = 0.25\r\n\r\n elif infer_speed == \"rapid\":\r\n coco_config.IMAGE_MAX_DIM = 256\r\n coco_config.IMAGE_MIN_DIM = 256\r\n coco_config.DETECTION_MIN_CONFIDENCE = 0.20 \r\n \r\n\r\n self.model_dir = os.getcwd()\r\n\r\n def load_model(self, model_path):\r\n self.model = MaskRCNN(mode = \"inference\", model_dir = self.model_dir, config = coco_config)\r\n self.model.load_weights(model_path, by_name= True)\r\n\r\n\r\n def segmentImage(self, image_path, show_bboxes = False, output_image_name = None, verbose = None):\r\n \r\n image = cv2.imread(image_path)\r\n new_img = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\r\n # Run detection\r\n if verbose is not None:\r\n print(\"Processing image...\")\r\n results = self.model.detect([new_img]) \r\n\r\n coco_config.class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',\r\n 'bus', 'train', 'truck', 'boat', 'traffic light',\r\n 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',\r\n 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',\r\n 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',\r\n 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',\r\n 'kite', 'baseball bat', 'baseball glove', 'skateboard',\r\n 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',\r\n 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',\r\n 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',\r\n 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',\r\n 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',\r\n 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',\r\n 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',\r\n 'teddy bear', 'hair drier', 'toothbrush']\r\n r = results[0] \r\n if show_bboxes == False:\r\n \r\n #apply segmentation mask\r\n output = display_instances(image, r['rois'], r['masks'], r['class_ids'], coco_config.class_names)\r\n \r\n if output_image_name is not None:\r\n cv2.imwrite(output_image_name, output)\r\n print(\"Processed image saved successfully in your current working directory.\")\r\n return r, output\r\n\r\n else:\r\n #apply segmentation mask with bounding boxes\r\n output = display_box_instances(image, r['rois'], r['masks'], r['class_ids'], coco_config.class_names, r['scores'])\r\n\r\n if output_image_name is not None:\r\n cv2.imwrite(output_image_name, output)\r\n print(\"Processed Image saved successfully in your current working directory.\")\r\n return r, output\r\n\r\n \r\n\r\n\r\n\r\n def segmentFrame(self, frame, show_bboxes = False, output_image_name = None, verbose = None):\r\n\r\n new_img = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\r\n if verbose is not None:\r\n print(\"Processing frame...\")\r\n # Run detection\r\n results = self.model.detect([new_img])\r\n\r\n coco_config.class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',\r\n 'bus', 'train', 'truck', 'boat', 'traffic light',\r\n 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',\r\n 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',\r\n 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',\r\n 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',\r\n 'kite', 'baseball bat', 'baseball glove', 'skateboard',\r\n 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',\r\n 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',\r\n 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',\r\n 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',\r\n 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',\r\n 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',\r\n 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',\r\n 'teddy bear', 'hair drier', 'toothbrush']\r\n r = results[0] \r\n if show_bboxes == False:\r\n \r\n #apply segmentation mask\r\n output = display_instances(frame, r['rois'], r['masks'], r['class_ids'], coco_config.class_names)\r\n \r\n if output_image_name is not None:\r\n cv2.imwrite(output_image_name, output)\r\n print(\"Processed image saved successfully in your current working directory.\")\r\n return r, output\r\n\r\n else:\r\n #apply segmentation mask with bounding boxes\r\n output = display_box_instances(frame, r['rois'], r['masks'], r['class_ids'], coco_config.class_names, r['scores'])\r\n\r\n if output_image_name is not None:\r\n cv2.imwrite(output_image_name, output)\r\n print(\"Processed Image saved successfully in your current working directory.\")\r\n return r, output\r\n \r\n\r\n def process_video(self, video_path, show_bboxes = False, output_video_name = None, frames_per_second = None):\r\n capture = cv2.VideoCapture(video_path)\r\n width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))\r\n height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))\r\n codec = cv2.VideoWriter_fourcc(*'DIVX')\r\n coco_config.class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',\r\n 'bus', 'train', 'truck', 'boat', 'traffic light',\r\n 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',\r\n 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',\r\n 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',\r\n 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',\r\n 'kite', 'baseball bat', 'baseball glove', 'skateboard',\r\n 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',\r\n 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',\r\n 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',\r\n 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',\r\n 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',\r\n 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',\r\n 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',\r\n 'teddy bear', 'hair drier', 'toothbrush']\r\n if frames_per_second is not None:\r\n save_video = cv2.VideoWriter(output_video_name, codec, frames_per_second, (width, height))\r\n counter = 0\r\n start = time.time() \r\n \r\n if show_bboxes == False:\r\n while True:\r\n counter +=1\r\n ret, frame = capture.read()\r\n if ret:\r\n # Run detection\r\n results = self.model.detect([frame])\r\n print(\"No. of frames:\", counter)\r\n r = results[0] \r\n #apply segmentation mask\r\n output = display_instances(frame, r['rois'], r['masks'], r['class_ids'], coco_config.class_names)\r\n output = cv2.resize(output, (width,height), interpolation=cv2.INTER_AREA)\r\n\r\n if output_video_name is not None:\r\n save_video.write(output)\r\n \r\n else:\r\n break \r\n \r\n end = time.time() \r\n print(f\"Processed {counter} frames in {end-start:.1f} seconds\") \r\n \r\n \r\n capture.release()\r\n if frames_per_second is not None:\r\n save_video.release() \r\n return r, output \r\n\r\n else:\r\n while True:\r\n counter +=1\r\n ret, frame = capture.read()\r\n if ret:\r\n # Run detection\r\n results = self.model.detect([frame])\r\n print(\"No. of frames:\", counter)\r\n r = results[0] \r\n #apply segmentation mask with bounding boxes\r\n output = display_box_instances(frame, r['rois'], r['masks'], r['class_ids'], coco_config.class_names, r['scores'])\r\n output = cv2.resize(output, (width,height), interpolation=cv2.INTER_AREA)\r\n \r\n if output_video_name is not None:\r\n save_video.write(output)\r\n else:\r\n break\r\n \r\n capture.release()\r\n\r\n end = time.time()\r\n print(f\"Processed {counter} frames in {end-start:.1f} seconds\") \r\n \r\n \r\n if frames_per_second is not None:\r\n save_video.release()\r\n \r\n return r, output \r\n\r\n\r\n def process_camera(self, cam, show_bboxes = False, output_video_name = None, frames_per_second = None, show_frames = None, frame_name = None, verbose = None, check_fps = False):\r\n capture = cam\r\n if output_video_name is not None:\r\n width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))\r\n height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))\r\n save_video = cv2.VideoWriter(output_video_name, cv2.VideoWriter_fourcc(*'DIVX'), frames_per_second, (width, height))\r\n \r\n counter = 0\r\n \r\n start = datetime.now() \r\n\r\n coco_config.class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',\r\n 'bus', 'train', 'truck', 'boat', 'traffic light',\r\n 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',\r\n 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',\r\n 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',\r\n 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',\r\n 'kite', 'baseball bat', 'baseball glove', 'skateboard',\r\n 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',\r\n 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',\r\n 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',\r\n 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',\r\n 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',\r\n 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',\r\n 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',\r\n 'teddy bear', 'hair drier', 'toothbrush']\r\n \r\n if show_bboxes == False:\r\n while True:\r\n \r\n ret, frame = capture.read()\r\n if ret:\r\n # Run detection\r\n results = self.model.detect([frame])\r\n \r\n r = results[0] \r\n #apply segmentation mask\r\n output = display_instances(frame, r['rois'], r['masks'], r['class_ids'], coco_config.class_names)\r\n counter +=1\r\n \r\n\r\n if show_frames == True:\r\n if frame_name is not None:\r\n cv2.imshow(frame_name, output)\r\n \r\n if cv2.waitKey(25) & 0xFF == ord('q'):\r\n break \r\n\r\n \r\n\r\n if output_video_name is not None:\r\n output = cv2.resize(output, (width,height), interpolation=cv2.INTER_AREA)\r\n save_video.write(output)\r\n \r\n elif counter == 30:\r\n break \r\n \r\n \r\n end = datetime.now()\r\n if check_fps == True:\r\n timetaken = (end-start).total_seconds()\r\n \r\n out = counter / timetaken\r\n print(f\"{out:.3f} frames per second\") \r\n\r\n if verbose is not None: \r\n print(f\"Processed {counter} frames in {timetaken:.1f} seconds\") \r\n \r\n capture.release()\r\n\r\n if output_video_name is not None:\r\n save_video.release() \r\n\r\n \r\n\r\n return r, output \r\n\r\n else:\r\n while True:\r\n \r\n ret, frame = capture.read()\r\n if ret:\r\n # Run detection\r\n results = self.model.detect([frame])\r\n \r\n r = results[0] \r\n #apply segmentation mask with bounding boxes\r\n output = display_box_instances(frame, r['rois'], r['masks'], r['class_ids'], coco_config.class_names, r['scores'])\r\n \r\n counter +=1\r\n if show_frames == True:\r\n if frame_name is not None:\r\n cv2.imshow(frame_name, output)\r\n\r\n if cv2.waitKey(25) & 0xFF == ord('q'):\r\n break \r\n \r\n \r\n if output_video_name is not None:\r\n output = cv2.resize(output, (width,height), interpolation=cv2.INTER_AREA)\r\n save_video.write(output)\r\n\r\n elif counter == 30:\r\n break\r\n\r\n end = datetime.now()\r\n if check_fps == True:\r\n timetaken = (end-start).total_seconds()\r\n fps = counter / timetaken\r\n print(f\"{fps:.3f} frames per second\") \r\n\r\n if verbose is not None:\r\n print(f\"Processed {counter} frames in {timetaken:.1f} seconds\") \r\n \r\n \r\n capture.release()\r\n\r\n if output_video_name is not None:\r\n save_video.release() \r\n\r\n return r, output \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#############################################################\r\n#############################################################\r\n\"\"\" CLASS FOR PERFORMING INFERENCE WITH A CUSTOM MODEL \"\"\"\r\n#############################################################\r\n#############################################################\r\n\r\n\r\n\r\n\r\nclass custom_segmentation:\r\n def __init__(self):\r\n self.model_dir = os.getcwd()\r\n\r\n def inferConfig(self,name = None, network_backbone = \"resnet101\", num_classes = 1, class_names = [\"BG\"], batch_size = 1, detection_threshold = 0.7, \r\n image_max_dim = 512, image_min_dim = 512, image_resize_mode =\"square\", gpu_count = 1):\r\n self.config = Config(BACKBONE = network_backbone, NUM_CLASSES = 1 + num_classes, class_names = class_names, \r\n IMAGES_PER_GPU = batch_size, IMAGE_MAX_DIM = image_max_dim, IMAGE_MIN_DIM = image_min_dim, DETECTION_MIN_CONFIDENCE = detection_threshold,\r\n IMAGE_RESIZE_MODE = image_resize_mode,GPU_COUNT = gpu_count)\r\n \r\n def load_model(self, model_path):\r\n #load the weights for COCO\r\n self.model = MaskRCNN(mode=\"inference\", model_dir = self.model_dir, config=self.config)\r\n self.model.load_weights(model_path, by_name=True)\r\n \r\n def segmentImage(self, image_path, show_bboxes = False, output_image_name = None, verbose = None):\r\n image = cv2.imread(image_path)\r\n new_img = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\r\n # Run detection\r\n if verbose is not None:\r\n print(\"Processing image...\")\r\n \r\n results = self.model.detect([new_img])\r\n\r\n \r\n r = results[0] \r\n if show_bboxes == False:\r\n \r\n #apply segmentation mask\r\n output = display_instances(image, r['rois'], r['masks'], r['class_ids'],self.config.class_names)\r\n \r\n if output_image_name is not None:\r\n cv2.imwrite(output_image_name, output)\r\n print(\"Processed image saved successfully in your current working directory.\")\r\n return r, output\r\n\r\n else:\r\n #apply segmentation mask with bounding boxes\r\n output = display_box_instances(image, r['rois'], r['masks'], r['class_ids'], self.config.class_names, r['scores'])\r\n\r\n if output_image_name is not None:\r\n cv2.imwrite(output_image_name, output)\r\n print(\"Processed Image saved successfully in your current working directory.\")\r\n \r\n return r, output \r\n\r\n\r\n def segmentFrame(self, frame, show_bboxes = False, output_image_name = None, verbose= None):\r\n\r\n new_img = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\r\n if verbose is not None:\r\n print(\"Processing frame...\")\r\n # Run detection\r\n results = self.model.detect([new_img])\r\n\r\n r = results[0] \r\n \r\n if show_bboxes == False:\r\n \r\n #apply segmentation mask\r\n output = display_instances(frame, r['rois'], r['masks'], r['class_ids'], self.config.class_names)\r\n \r\n if output_image_name is not None:\r\n cv2.imwrite(output_image_name, output)\r\n print(\"Processed image saved successfully in your current working directory.\")\r\n return r, output\r\n\r\n else:\r\n #apply segmentation mask with bounding boxes\r\n output = display_box_instances(frame, r['rois'], r['masks'], r['class_ids'], self.config.class_names, r['scores'])\r\n\r\n if output_image_name is not None:\r\n cv2.imwrite(output_image_name, output)\r\n print(\"Processed Image saved successfully in your current working directory.\")\r\n return r, output\r\n \r\n def process_video(self, video_path, show_bboxes = False, output_video_name = None, frames_per_second = None):\r\n capture = cv2.VideoCapture(video_path)\r\n width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))\r\n height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))\r\n codec = cv2.VideoWriter_fourcc(*'DIVX')\r\n if frames_per_second is not None:\r\n save_video = cv2.VideoWriter(output_video_name, codec, frames_per_second, (width, height))\r\n counter = 0\r\n start = time.time() \r\n \r\n if show_bboxes == False:\r\n while True:\r\n counter +=1\r\n ret, frame = capture.read()\r\n if ret:\r\n # Run detection\r\n results = self.model.detect([frame], verbose=0)\r\n print(\"No. of frames:\", counter)\r\n r = results[0] \r\n #apply segmentation mask\r\n output = display_instances(frame, r['rois'], r['masks'], r['class_ids'], self.config.class_names)\r\n output = cv2.resize(output, (width,height), interpolation=cv2.INTER_AREA)\r\n\r\n if output_video_name is not None:\r\n save_video.write(output)\r\n \r\n else:\r\n break \r\n \r\n end = time.time() \r\n print(f\"Processed {counter} frames in {end-start:.1f} seconds\") \r\n \r\n \r\n capture.release()\r\n if frames_per_second is not None:\r\n save_video.release() \r\n return r, output \r\n\r\n else:\r\n while True:\r\n counter +=1\r\n ret, frame = capture.read()\r\n if ret:\r\n # Run detection\r\n results = self.model.detect([frame], verbose=0)\r\n print(\"No. of frames:\", counter)\r\n r = results[0] \r\n #apply segmentation mask with bounding boxes\r\n output = display_box_instances(frame, r['rois'], r['masks'], r['class_ids'], self.config.class_names, r['scores'])\r\n output = cv2.resize(output, (width,height), interpolation=cv2.INTER_AREA)\r\n \r\n if output_video_name is not None:\r\n save_video.write(output)\r\n else:\r\n break\r\n \r\n capture.release()\r\n\r\n end = time.time()\r\n print(f\"Processed {counter} frames in {end-start:.1f} seconds\") \r\n \r\n \r\n if frames_per_second is not None:\r\n save_video.release()\r\n \r\n return r, output \r\n \r\n \r\n def process_camera(self, cam, show_bboxes = False, output_video_name = None, frames_per_second = None, show_frames = None, frame_name = None, verbose = None, check_fps = False):\r\n capture = cam\r\n \r\n if output_video_name is not None:\r\n width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))\r\n height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))\r\n codec = cv2.VideoWriter_fourcc(*'DIVX')\r\n save_video = cv2.VideoWriter(output_video_name, codec, frames_per_second, (width, height))\r\n\r\n counter = 0\r\n start = datetime.now() \r\n\r\n if show_bboxes == False:\r\n while True:\r\n \r\n ret, frame = capture.read()\r\n if ret:\r\n # Run detection\r\n results = self.model.detect([frame])\r\n \r\n r = results[0] \r\n #apply segmentation mask\r\n output = display_instances(frame, r['rois'], r['masks'], r['class_ids'], self.config.class_names)\r\n counter +=1\r\n \r\n\r\n if show_frames == True:\r\n if frame_name is not None:\r\n cv2.imshow(frame_name, output)\r\n \r\n if cv2.waitKey(25) & 0xFF == ord('q'):\r\n break \r\n\r\n if output_video_name is not None:\r\n output = cv2.resize(output, (width,height), interpolation=cv2.INTER_AREA)\r\n save_video.write(output)\r\n \r\n elif counter == 30:\r\n break \r\n \r\n end = datetime.now() \r\n \r\n \r\n if check_fps == True:\r\n timetaken = (end-start).total_seconds()\r\n fps = counter/timetaken\r\n print(f\"{fps} frames per seconds\") \r\n\r\n if verbose is not None:\r\n print(f\"Processed {counter} frames in {timetaken:.1f} seconds\") \r\n \r\n \r\n capture.release()\r\n\r\n if output_video_name is not None:\r\n save_video.release() \r\n\r\n \r\n\r\n return r, output \r\n\r\n else:\r\n while True:\r\n \r\n ret, frame = capture.read()\r\n if ret:\r\n # Run detection\r\n results = self.model.detect([frame])\r\n \r\n r = results[0] \r\n #apply segmentation mask with bounding boxes\r\n output = display_box_instances(frame, r['rois'], r['masks'], r['class_ids'], self.config.class_names, r['scores'])\r\n \r\n counter +=1\r\n if show_frames == True:\r\n if frame_name is not None:\r\n cv2.imshow(frame_name, output)\r\n\r\n if cv2.waitKey(25) & 0xFF == ord('q'):\r\n break \r\n \r\n if output_video_name is not None:\r\n output = cv2.resize(output, (width,height), interpolation=cv2.INTER_AREA)\r\n save_video.write(output)\r\n\r\n elif counter == 30:\r\n break\r\n\r\n end = datetime.now()\r\n \r\n\r\n if check_fps == True:\r\n timetaken = (end-start).total_seconds()\r\n fps = counter/timetaken\r\n print(f\"{fps} frames per seconds\")\r\n\r\n if verbose is not None:\r\n print(f\"Processed {counter} frames in {timetaken:.1f} seconds\") \r\n \r\n capture.release()\r\n\r\n if output_video_name is not None:\r\n save_video.release() \r\n\r\n return r, output \r\n\r\n\r\n\r\n\r\n\r\n################VISUALIZATION CODE ##################\r\n\r\n\r\n\r\n\r\ndef random_colors(N, bright=True):\r\n \"\"\"\r\n Generate random colors.\r\n To get visually distinct colors, generate them in HSV space then\r\n convert to RGB.\r\n \"\"\"\r\n brightness = 1.0 if bright else 0.7\r\n hsv = [(i / N, 1, brightness) for i in range(N)]\r\n colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))\r\n random.shuffle(colors)\r\n return colors\r\n\r\n\r\ndef apply_mask(image, mask, color, alpha=0.5):\r\n \"\"\"Apply the given mask to the image.\r\n \"\"\"\r\n for c in range(3):\r\n image[:, :, c] = np.where(mask == 1,\r\n image[:, :, c] *\r\n (1 - alpha) + alpha * color[c] * 255,\r\n image[:, :, c])\r\n return image\r\n\r\n \r\n\r\n\r\ndef display_instances(image, boxes, masks, class_ids, class_name):\r\n \r\n n_instances = boxes.shape[0]\r\n colors = random_colors(n_instances)\r\n\r\n \r\n assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]\r\n\r\n for i, color in enumerate(colors):\r\n mask = masks[:, :, i]\r\n\r\n image = apply_mask(image, mask, color)\r\n\r\n\r\n return image\r\n\r\n\r\n\r\n\r\n\r\ndef display_box_instances(image, boxes, masks, class_ids, class_name, scores):\r\n \r\n n_instances = boxes.shape[0]\r\n colors = random_colors(n_instances)\r\n\r\n \r\n assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]\r\n\r\n for i, color in enumerate(colors):\r\n if not np.any(boxes[i]):\r\n continue\r\n\r\n y1, x1, y2, x2 = boxes[i]\r\n label = class_name[class_ids[i]]\r\n score = scores[i] if scores is not None else None\r\n caption = '{} {:.2f}'.format(label, score) if score else label\r\n mask = masks[:, :, i]\r\n\r\n image = apply_mask(image, mask, color)\r\n color_rec = [int(c) for c in np.array(colors[i]) * 255]\r\n image = cv2.rectangle(image, (x1, y1), (x2, y2), color_rec, 2)\r\n image = cv2.putText(\r\n image, caption, (x1, y1), cv2.FONT_HERSHEY_COMPLEX, 0.5, color = (255, 255, 255))\r\n\r\n return image\r\n\r\n\r\n\r\n"
] |
[
[
"numpy.any",
"numpy.where",
"numpy.array"
]
] |
krkaufma/Electron-Diffraction-CNN
|
[
"157ce788eabed6d8599193d4b826d703ea181c0a"
] |
[
"src/vecchio/make_data.py"
] |
[
"import os\nimport typing as t\n\nimport abc\nimport collections.abc as cabc\nimport imageio\nimport keras\nimport numpy as np\nimport pandas as pd\nfrom skimage.transform import resize\nfrom sklearn.model_selection import train_test_split\n\nfrom vecchio.file_utils import dirfmt\n\n\ndef scale_dims(max_dim, h, w):\n if w > h:\n r = max_dim / w\n else:\n r = max_dim / h\n return int(r * h), int(r * w)\n\n\nMAX_DIM = 299\nHEIGHT, WIDTH = scale_dims(MAX_DIM, 512, 622)\nDEPTH = 1\n\n\nclass EBSDSequence(keras.utils.Sequence):\n def __init__(self, x_set, y_set, batch_size, ):\n self.x, self.y = x_set, y_set\n self.batch_size = batch_size\n\n def __len__(self):\n return int(np.ceil(len(self.x) / float(self.batch_size)))\n\n def __getitem__(self, idx):\n batch_x = self.x[idx * self.batch_size:(idx + 1) * self.batch_size]\n batch_y = self.y[idx * self.batch_size:(idx + 1) * self.batch_size]\n\n return (np.array([filename_to_img(filename) for filename in batch_x]),\n self.prepare_y(batch_y))\n\n @abc.abstractmethod\n def prepare_y(self, batch_y: cabc.Sequence) -> cabc.Sequence:\n pass\n\n\nclass ClassificationEBSDSequence(EBSDSequence):\n\n def __init__(self, *args, num_classes=None):\n super().__init__(*args)\n self.num_classes = num_classes\n\n def prepare_y(self, batch_y: cabc.Sequence) -> cabc.Sequence:\n return keras.utils.to_categorical(batch_y, num_classes=self.num_classes)\n\n\nclass RegressionEBSDSequence(EBSDSequence):\n def prepare_y(self, batch_y: cabc.Sequence) -> cabc.Sequence:\n return batch_y\n\n\nclass MultiLabelRegressionEBSDSequence(RegressionEBSDSequence):\n def __init__(self, *args, n_labels=None):\n super().__init__(*args)\n\n self.n_labels = 3 if n_labels is None else n_labels\n\n def prepare_y(self, batch_y: cabc.Sequence) -> cabc.Sequence:\n return np.array(batch_y, dtype=np.float32)\n\n\ndef filename_to_img(filename):\n img = imageio.imread(filename)\n\n if len(img.shape) < 3:\n img = img[:, :, np.newaxis]\n\n img_resized = resize(img, (HEIGHT, WIDTH), mode='constant', anti_aliasing=True)\n img_float = img_resized.astype(dtype=np.float32)\n\n return img_float\n\n\ndef present_manifest(manifest_path: str, label_columns: t.Union[t.Sequence[str], str]) -> \\\n t.Mapping[str, t.List[pd.Series]]:\n \"\"\"Present pre-split data from the manifest in standard way\n\n Expects split to be `test`, `train`, `validation`, but will work with arbitrary named groups.\n Supports single or multi-label y-values.\n\n Args:\n manifest_path: /path/to/manifest.csv\n label_columns: one or a list of strings corresponding to the column names from the manifest file.\n\n Returns:\n A dictionary whose keys are the split group, and values are X, y tuples.\n \"\"\"\n man_df = pd.read_csv(manifest_path)\n\n splits = {}\n\n split_set = man_df['_split'].unique()\n\n for sp in split_set:\n curr_split = man_df[man_df['_split'] == sp]\n splits[sp] = [curr_split['path'], curr_split[label_columns]]\n\n return splits\n\n\ndef get_filepaths(data_dir, test_size=0.20, val_size=0.20, *, seed=42):\n \"\"\"Return a list of filepaths and classes, split into train, test, and validation sets\n\n Test and train data are split first. Then validation taken, taking a percentage of the train data.\n\n Args:\n data_dir: source directory for data\n test_size: percentage of data to put in test bucket\n val_size: percentage of data to put in validation bucket\n seed: seed for random partition of the data. Needs to be fixed so model_train and model_eval will work on the\n same test/training data\n\n Returns:\n a dictionary of tuples. The left value of each tuple is the data, the right is the label. The keys of the\n dictionary are 'train', 'test' and 'validation'.\n \"\"\"\n dir_contents = os.listdir(data_dir)\n class_folders = [folder for folder in dir_contents if os.path.isdir(dirfmt(data_dir) + dirfmt(folder))]\n\n img_files = []\n img_labels = []\n c = 0\n for klass in class_folders:\n path = dirfmt(data_dir) + dirfmt(klass) + dirfmt('Images')\n files = os.listdir(path)\n\n imgs = [path + img for img in files if '.tiff' in img]\n img_files.extend(imgs)\n img_labels.extend([c for _ in range(len(imgs))])\n\n c += 1\n\n x_train, x_test, y_train, y_test = train_test_split(img_files, img_labels, test_size=test_size, random_state=seed)\n x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=val_size, random_state=seed)\n\n # Lists are preferred over tuples in order to prevent copying the data\n return {'train': [x_train, y_train], 'test': [x_test, y_test], 'validation': [x_val, y_val]}\n"
] |
[
[
"sklearn.model_selection.train_test_split",
"numpy.array",
"pandas.read_csv"
]
] |
MartinLidh/ida_ai
|
[
"bbc2f8e1778d77bc704763f94985860d517af23c"
] |
[
"model/ner_model.py"
] |
[
" # -*- coding: utf-8 -*-\nimport numpy as np\nimport os\nimport tensorflow as tf\n\n\nfrom .data_utils import minibatches, pad_sequences, get_chunks\nfrom .general_utils import Progbar\nfrom .base_model import BaseModel\nfrom .gazetteer import Gazetteer\n\n\nclass NERModel(BaseModel):\n \"\"\"Specialized class of Model for NER\"\"\"\n\n def __init__(self, config):\n super(NERModel, self).__init__(config)\n self.idx_to_tag = {idx: tag for tag, idx in\n self.config.vocab_tags.items()}\n self.gazetteer = Gazetteer(\"data/gazetters.txt\")\n\n\n def add_placeholders(self):\n \"\"\"Define placeholders = entries to computational graph\"\"\"\n # shape = (batch size, max length of sentence in batch)\n self.word_ids = tf.placeholder(tf.int32, shape=[None, None],\n name=\"word_ids\")\n\n # shape = (batch size)\n self.sequence_lengths = tf.placeholder(tf.int32, shape=[None],\n name=\"sequence_lengths\")\n\n # shape = (batch size, max length of sentence, max length of word)\n self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None],\n name=\"char_ids\")\n\n # shape = (batch_size, max_length of sentence)\n self.word_lengths = tf.placeholder(tf.int32, shape=[None, None],\n name=\"word_lengths\")\n\n # shape = (batch size, max length of sentence in batch)\n self.labels = tf.placeholder(tf.int32, shape=[None, None],\n name=\"labels\")\n\n # hyper parameters\n self.dropout = tf.placeholder(dtype=tf.float32, shape=[],\n name=\"dropout\")\n self.lr = tf.placeholder(dtype=tf.float32, shape=[],\n name=\"lr\")\n\n\n def get_feed_dict(self, words, labels=None, lr=None, dropout=None):\n \"\"\"Given some data, pad it and build a feed dictionary\n\n Args:\n words: list of sentences. A sentence is a list of ids of a list of\n words. A word is a list of ids\n labels: list of ids\n lr: (float) learning rate\n dropout: (float) keep prob\n\n Returns:\n dict {placeholder: value}\n\n \"\"\"\n # perform padding of the given data\n if self.config.use_chars:\n char_ids, word_ids = zip(*words)\n word_ids, sequence_lengths = pad_sequences(word_ids, 0)\n char_ids, word_lengths = pad_sequences(char_ids, pad_tok=0,\n nlevels=2)\n else:\n word_ids, sequence_lengths = pad_sequences(words, 0)\n\n # build feed dictionary\n feed = {\n self.word_ids: word_ids,\n self.sequence_lengths: sequence_lengths\n }\n\n if self.config.use_chars:\n feed[self.char_ids] = char_ids\n feed[self.word_lengths] = word_lengths\n\n if labels is not None:\n labels, _ = pad_sequences(labels, 0)\n feed[self.labels] = labels\n\n if lr is not None:\n feed[self.lr] = lr\n\n if dropout is not None:\n feed[self.dropout] = dropout\n\n return feed, sequence_lengths\n\n\n def add_word_embeddings_op(self):\n \"\"\"Defines self.word_embeddings\n\n If self.config.embeddings is not None and is a np array initialized\n with pre-trained word vectors, the word embeddings is just a look-up\n and we don't train the vectors. Otherwise, a random matrix with\n the correct shape is initialized.\n \"\"\"\n with tf.variable_scope(\"words\"):\n if self.config.embeddings is None:\n self.logger.info(\"WARNING: randomly initializing word vectors\")\n _word_embeddings = tf.get_variable(\n name=\"_word_embeddings\",\n dtype=tf.float32,\n shape=[self.config.nwords, self.config.dim_word])\n else:\n _word_embeddings = tf.Variable(\n self.config.embeddings,\n name=\"_word_embeddings\",\n dtype=tf.float32,\n trainable=self.config.train_embeddings)\n\n word_embeddings = tf.nn.embedding_lookup(_word_embeddings,\n self.word_ids, name=\"word_embeddings\")\n\n with tf.variable_scope(\"chars\"):\n if self.config.use_chars:\n # get char embeddings matrix\n _char_embeddings = tf.get_variable(\n name=\"_char_embeddings\",\n dtype=tf.float32,\n shape=[self.config.nchars, self.config.dim_char])\n char_embeddings = tf.nn.embedding_lookup(_char_embeddings,\n self.char_ids, name=\"char_embeddings\")\n\n # put the time dimension on axis=1\n s = tf.shape(char_embeddings)\n char_embeddings = tf.reshape(char_embeddings,\n shape=[s[0]*s[1], s[-2], self.config.dim_char])\n word_lengths = tf.reshape(self.word_lengths, shape=[s[0]*s[1]])\n\n # bi lstm on chars\n cell_fw = tf.contrib.rnn.LSTMCell(self.config.hidden_size_char,\n state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(self.config.hidden_size_char,\n state_is_tuple=True)\n _output = tf.nn.bidirectional_dynamic_rnn(\n cell_fw, cell_bw, char_embeddings,\n sequence_length=word_lengths, dtype=tf.float32)\n\n # read and concat output\n _, ((_, output_fw), (_, output_bw)) = _output\n output = tf.concat([output_fw, output_bw], axis=-1)\n\n # shape = (batch size, max sentence length, char hidden size)\n output = tf.reshape(output,\n shape=[s[0], s[1], 2*self.config.hidden_size_char])\n word_embeddings = tf.concat([word_embeddings, output], axis=-1)\n\n self.word_embeddings = tf.nn.dropout(word_embeddings, self.dropout)\n\n\n def add_logits_op(self):\n \"\"\"Defines self.logits\n\n For each word in each sentence of the batch, it corresponds to a vector\n of scores, of dimension equal to the number of tags.\n \"\"\"\n with tf.variable_scope(\"bi-lstm\"):\n cell_fw = tf.contrib.rnn.LSTMCell(self.config.hidden_size_lstm)\n cell_bw = tf.contrib.rnn.LSTMCell(self.config.hidden_size_lstm)\n (output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(\n cell_fw, cell_bw, self.word_embeddings,\n sequence_length=self.sequence_lengths, dtype=tf.float32)\n output = tf.concat([output_fw, output_bw], axis=-1)\n output = tf.nn.dropout(output, self.dropout)\n\n with tf.variable_scope(\"proj\"):\n W = tf.get_variable(\"W\", dtype=tf.float32,\n shape=[2*self.config.hidden_size_lstm, self.config.ntags])\n\n b = tf.get_variable(\"b\", shape=[self.config.ntags],\n dtype=tf.float32, initializer=tf.zeros_initializer())\n\n nsteps = tf.shape(output)[1]\n output = tf.reshape(output, [-1, 2*self.config.hidden_size_lstm])\n pred = tf.matmul(output, W) + b\n self.logits = tf.reshape(pred, [-1, nsteps, self.config.ntags])\n\n\n def add_pred_op(self):\n \"\"\"Defines self.labels_pred\n\n This op is defined only in the case where we don't use a CRF since in\n that case we can make the prediction \"in the graph\" (thanks to tf\n functions in other words). With theCRF, as the inference is coded\n in python and not in pure tensroflow, we have to make the prediciton\n outside the graph.\n \"\"\"\n if not self.config.use_crf:\n self.labels_pred = tf.cast(tf.argmax(self.logits, axis=-1),\n tf.int32)\n\n\n def add_loss_op(self):\n \"\"\"Defines the loss\"\"\"\n if self.config.use_crf:\n log_likelihood, trans_params = tf.contrib.crf.crf_log_likelihood(\n self.logits, self.labels, self.sequence_lengths)\n self.trans_params = trans_params # need to evaluate it for decoding\n self.loss = tf.reduce_mean(-log_likelihood)\n else:\n losses = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=self.logits, labels=self.labels)\n mask = tf.sequence_mask(self.sequence_lengths)\n losses = tf.boolean_mask(losses, mask)\n self.loss = tf.reduce_mean(losses)\n\n # for tensorboard\n tf.summary.scalar(\"loss\", self.loss)\n\n\n def build(self):\n # NER specific functions\n self.add_placeholders()\n self.add_word_embeddings_op()\n self.add_logits_op()\n self.add_pred_op()\n self.add_loss_op()\n\n # Generic functions that add training op and initialize session\n self.add_train_op(self.config.lr_method, self.lr, self.loss,\n self.config.clip)\n self.initialize_session() # now self.sess is defined and vars are init\n\n\n def predict_batch(self, words):\n \"\"\"\n Args:\n words: list of sentences\n\n Returns:\n labels_pred: list of labels for each sentence\n sequence_length\n\n \"\"\"\n fd, sequence_lengths = self.get_feed_dict(words, dropout=1.0)\n\n if self.config.use_crf:\n # get tag scores and transition params of CRF\n viterbi_sequences = []\n logits, trans_params = self.sess.run(\n [self.logits, self.trans_params], feed_dict=fd)\n\n # iterate over the sentences because no batching in vitervi_decode\n for logit, sequence_length in zip(logits, sequence_lengths):\n logit = logit[:sequence_length] # keep only the valid steps\n viterbi_seq, viterbi_score = tf.contrib.crf.viterbi_decode(\n logit, trans_params)\n viterbi_sequences += [viterbi_seq]\n return viterbi_sequences, sequence_lengths\n\n else:\n labels_pred = self.sess.run(self.labels_pred, feed_dict=fd)\n return labels_pred, sequence_lengths\n\n\n def run_epoch(self, train, dev, epoch):\n \"\"\"Performs one complete pass over the train set and evaluate on dev\n\n Args:\n train: dataset that yields tuple of sentences, tags\n dev: dataset\n epoch: (int) index of the current epoch\n\n Returns:\n f1: (python float), score to select model on, higher is better\n\n \"\"\"\n # progbar stuff for logging\n batch_size = self.config.batch_size\n nbatches = (len(train) + batch_size - 1) // batch_size\n prog = Progbar(target=nbatches)\n\n # iterate over dataset\n for i, (words, labels) in enumerate(minibatches(train, batch_size)):\n fd, _ = self.get_feed_dict(words, labels, self.config.lr,\n self.config.dropout)\n\n _, train_loss, summary = self.sess.run(\n [self.train_op, self.loss, self.merged], feed_dict=fd)\n\n prog.update(i + 1, [(\"train loss\", train_loss)])\n\n # tensorboard\n if i % 10 == 0:\n self.file_writer.add_summary(summary, epoch*nbatches + i)\n\n metrics = self.run_evaluate(dev)\n msg = \" - \".join([\"{} {:04.2f}\".format(k, v)\n for k, v in metrics.items()])\n self.logger.info(msg)\n\n return metrics[\"f1\"]\n\n\n def run_evaluate(self, test):\n \"\"\"Evaluates performance on test set\n\n Args:\n test: dataset that yields tuple of (sentences, tags)\n\n Returns:\n metrics: (dict) metrics[\"acc\"] = 98.4, ...\n\n \"\"\"\n accs = []\n correct_preds, total_correct, total_preds = 0., 0., 0.\n for words, labels in minibatches(test, self.config.batch_size):\n labels_pred, sequence_lengths = self.predict_batch(words)\n for lab, lab_pred, length in zip(labels, labels_pred,\n sequence_lengths):\n lab = lab[:length]\n lab_pred = lab_pred[:length]\n accs += [a==b for (a, b) in zip(lab, lab_pred)]\n\n lab_chunks = set(get_chunks(lab, self.config.vocab_tags))\n lab_pred_chunks = set(get_chunks(lab_pred,\n self.config.vocab_tags))\n correct_preds += len(lab_chunks & lab_pred_chunks)\n total_preds += len(lab_pred_chunks)\n total_correct += len(lab_chunks)\n\n p = correct_preds / total_preds if correct_preds > 0 else 0\n r = correct_preds / total_correct if correct_preds > 0 else 0\n f1 = 2 * p * r / (p + r) if correct_preds > 0 else 0\n acc = np.mean(accs)\n\n return {\"acc\": 100*acc, \"f1\": 100*f1}\n\n\n def predict(self, words_raw):\n \"\"\"Returns list of tags\n\n Args:\n words_raw: list of words (string), just one sentence (no batch)\n\n Returns:\n preds: list of tags (string), one for each word in the sentence\n\n \"\"\"\n words = [self.config.processing_word(w) for w in words_raw]\n if type(words[0]) == tuple:\n words = zip(*words)\n pred_ids, _ = self.predict_batch([words])\n preds = [self.idx_to_tag[idx] for idx in list(pred_ids[0])]\n self.gazetteer.lookup_tags(words_raw, preds)\n return preds\n"
] |
[
[
"tensorflow.matmul",
"numpy.mean",
"tensorflow.reshape",
"tensorflow.nn.embedding_lookup",
"tensorflow.shape",
"tensorflow.concat",
"tensorflow.nn.bidirectional_dynamic_rnn",
"tensorflow.argmax",
"tensorflow.Variable",
"tensorflow.variable_scope",
"tensorflow.nn.dropout",
"tensorflow.contrib.rnn.LSTMCell",
"tensorflow.summary.scalar",
"tensorflow.placeholder",
"tensorflow.get_variable",
"tensorflow.boolean_mask",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.contrib.crf.crf_log_likelihood",
"tensorflow.zeros_initializer",
"tensorflow.sequence_mask",
"tensorflow.contrib.crf.viterbi_decode",
"tensorflow.reduce_mean"
]
] |
zyhazwraith/FigureQA-baseline
|
[
"241a6054b3016793529b1508c1b67507166a2e67"
] |
[
"eval_model_on_figureqa.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport argparse\nfrom glob import glob\nimport json\nimport os\nimport re\nimport shutil\nimport tarfile\n\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom tqdm import tqdm\n\nfrom data.figureqa import FigureQA\nfrom models.cnn_baseline import CNNBaselineModel\nfrom models.rn import RNModel\nfrom models.textonly_baseline import TextOnlyBaselineModel\n\nDEBUG = False\nTFDBG = False\nif DEBUG and TFDBG:\n from tensorflow.python import debug as tf_debug\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--data-path', type=str, required=True,\n help='parent folder of where the data set is located')\nparser.add_argument('--tmp-path', type=str,\n help=('tmp directory where to extract data set (optimally '\n 'on faster storage, if not specified DATA_PATH is '\n 'used)'))\nparser.add_argument('--train-dir', type=str, required=True,\n help='training directory of the trained model.')\nparser.add_argument('--meta-file', type=str,\n help='path to the meta-file from which to load parameters.')\nparser.add_argument('--partition', type=str, default='test2',\n help='name of the partition to evaluate the model on.')\nparser.add_argument('--batchsize', type=int, default=64,\n help='Size of the mini-batches.')\n\nargs = parser.parse_args()\n\ntrain_dir = args.train_dir\n\n# load config file (expects unique config file)\nconfig_files = glob(os.path.join(train_dir, '*_on_figureqa_config.json'))\nif len(config_files) == 0:\n raise IOError('no config file found in TRAIN_DIR')\nelif len(config_files) > 1:\n raise IOError('more than one config file found in TRAIN_DIR')\nconfig_file = config_files[0]\n\nconfig = json.load(open(config_file))\nmodel_str = config['model']['name']\n\nif args.meta_file is None:\n meta_file = os.path.join(train_dir, 'model_val_best.meta')\n assert os.access(meta_file, os.R_OK)\nelse:\n meta_file = args.meta_file\n\n# copy FigureQA to tmp path, if not done before\nfigureqa_src_path = os.path.join(args.data_path, 'FigureQA')\nfigureqa_path = os.path.join(args.tmp_path, 'FigureQA')\nassert not os.system('mkdir -p {}'.format(figureqa_path))\ncopy_done_file = os.path.join(figureqa_path, '.done')\nsame_path = os.path.samefile(figureqa_src_path, figureqa_path)\ncopy_done = os.path.isfile(copy_done_file)\nif not (copy_done or same_path):\n for fname in glob(os.path.join(figureqa_src_path, '*.tar.gz')):\n shutil.copy2(fname, figureqa_path)\nif not copy_done:\n for fname in glob(os.path.join(figureqa_path, '*.tar.gz')):\n print('extracting {}...'.format(fname))\n f = tarfile.open(fname)\n f.extractall(path=figureqa_path)\n # if figureqa_path not equal to figureqa_src_path, we delete the\n # copy of the archive\n if not same_path:\n os.remove(fname)\n assert not os.system('touch {}'.format(copy_done_file))\n\n\nno_images = model_str == 'TextOnlyBaseline'\n\nif no_images:\n im_size = None\nelse:\n im_size = config['im_size']\n\nprint('loading {} data...'.format(args.partition))\ndata_object = FigureQA(\n data_path=figureqa_path,\n partition=args.partition,\n shuffle=False,\n load_dict_file='figureqa_dict.json',\n im_size=im_size,\n no_images=no_images\n)\nnsamples = len(data_object._questions)\ndict_size = len(data_object.tok_to_idx)\n\nif 'test' in args.partition:\n # check if answers available. If not, just dump predictions...\n try:\n answers_df = pd.read_csv(\n os.path.join(figureqa_path, args.partition, 'answers.csv'), index_col=0)\n answers = answers_df['answer'].as_matrix()\n except:\n answers = None\nelse:\n # not test partition, so true answers are collected as we iterate through\n # the data\n answers = []\n\ndata = data_object.get_data(batch_size=args.batchsize, return_indices=True,\n return_question_strings=True)\n\n# create the session\nsess = tf.Session()\nif DEBUG and TFDBG:\n sess = tf_debug.LocalCLIDebugWrapperSession(\n sess, dump_root='/data/home/vmichals/data/tfdbg')\n sess.add_tensor_filter(\"has_inf_or_nan\", tf_debug.has_inf_or_nan)\n\nprint('restoring model graph...')\nis_training = tf.placeholder(tf.bool, shape=(), name='is_training')\n\niterator = data.make_one_shot_iterator()\nnext_batch = iterator.get_next()\nif answers == []:\n (input_images, input_questions, input_questions_lengths,\n answers_batch, question_indices, image_indices, question_ids,\n question_strings) = next_batch\n iter_answers = True\nelse:\n (input_images, input_questions, input_questions_lengths, question_indices,\n image_indices, question_ids, question_strings) = next_batch\n iter_answers = False\n\n# instantiate the model\nif model_str == 'RN':\n model = RNModel(\n is_training=is_training,\n config=config['model'],\n dictionary_size=dict_size\n )\nelif model_str == 'CNNBaseline':\n model = CNNBaselineModel(\n is_training=is_training,\n config=config['model'],\n dictionary_size=dict_size\n )\nelif model_str == 'TextOnlyBaseline':\n model = TextOnlyBaselineModel(\n is_training=is_training,\n config=config['model'],\n dictionary_size=dict_size\n )\nelse:\n raise ValueError('unsupported model type found in config file')\n\ninput_kwargs = {\n 'q': input_questions,\n 'qlen': input_questions_lengths,\n}\nif model_str != 'TextOnlyBaseline':\n input_kwargs['img'] = input_images\n# NOTE: workaround, during training inference is nested inside\n# the loss variable scope\nwith tf.variable_scope('loss'):\n _, predictions = model.inference(**input_kwargs)\n\nprint('create lists of indices for dumping predictions.')\nprediction_list = []\nq_index_list = []\nim_index_list = []\nq_id_list = []\nq_str_list = []\nidx = 0\nbatch_cnt = 0\nnbatches = int(np.ceil(float(nsamples) / args.batchsize))\nprint('nbatches: {0}'.format(nbatches, ))\nprint('nsamples: {0}'.format(nsamples, ))\n\ndir(tf.contrib)\nsaver = tf.train.Saver(max_to_keep=100)\nsaver.restore(sess, os.path.splitext(meta_file)[0])\n\nif answers is not None:\n accuracy = 0.\n preds_correct_list = []\n\n# loop over all batches until tf.errors.OutOfRangeError\nwhile True:\n try:\n print('fetching batch {}/{}'.format(batch_cnt + 1, nbatches))\n\n # fetch predictions\n if iter_answers:\n (preds, answers_batch_np, q_indices, im_indices, q_ids,\n q_strs) = sess.run(\n (predictions, answers_batch, question_indices, image_indices,\n question_ids, question_strings), feed_dict={\n is_training: False,\n }\n )\n\n answers.extend(answers_batch_np)\n else:\n (preds, q_indices, im_indices, q_ids, q_strs) = sess.run(\n (predictions, question_indices, image_indices,\n question_ids, question_strings), feed_dict={\n is_training: False,\n }\n )\n nbatch_samples = len(preds)\n prediction_list.extend(preds.astype(bool).tolist())\n q_index_list.extend(q_indices)\n im_index_list.extend(im_indices)\n q_id_list.extend(q_ids)\n q_str_list.extend(q_strs)\n\n # compare with answers\n if answers is not None:\n preds_correct = answers[idx:idx+nbatch_samples] == preds.astype(bool)\n preds_correct_list.extend(preds_correct)\n accuracy = (accuracy * idx + np.sum(preds_correct)) / (\n idx + nbatch_samples)\n idx += nbatch_samples\n batch_cnt += 1\n except tf.errors.OutOfRangeError:\n break\n\n# if there are ground truth answers, print and save accuracy\nif answers is not None:\n print('\\naccuracy on \"{1}\": {0}'.format(accuracy, args.partition))\n with open('accuracy_{0}_{1}_{2}_{3}.txt'.format(\n args.partition,\n config['model']['name'],\n config['dataset']['name'],\n os.path.splitext(os.path.split(meta_file)[1])[0]), 'w') as fid:\n json.dump(accuracy, fid)\n\n# dump predictions\nlines = ['question_index,image_index,question_id,question_string,answer\\n']\n\nfor i in range(len(prediction_list)):\n lines.append('{},{},{},{},{}\\n'.format(q_index_list[i],\n im_index_list[i],\n q_id_list[i],\n q_str_list[i].decode('UTF-8'),\n int(prediction_list[i])))\n\npredictions_fname = 'preds_{0}_{1}_{2}_{3}.csv'.format(\n args.partition,\n config['model']['name'],\n config['dataset']['name'],\n os.path.splitext(os.path.split(meta_file)[1])[0]\n)\nwith open(predictions_fname, 'w') as fid:\n fid.writelines(lines)\n\nif answers is None:\n print(\n ('Dumped {} predictions to {}, to get the accuracy, please send the file'\n 'to figureqa@microsoft.com with a short comment.').format(\n args.partition, predictions_fname)\n )\n\n# vim: set ts=4 sw=4 sts=4 expandtab:\n"
] |
[
[
"numpy.sum",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.variable_scope",
"tensorflow.python.debug.LocalCLIDebugWrapperSession",
"tensorflow.placeholder"
]
] |
albertomarchisio/EfficientSNN
|
[
"783005ccbbb31bd9ce3f4defadc14491c4ca9cf6",
"783005ccbbb31bd9ce3f4defadc14491c4ca9cf6"
] |
[
"DVS_preprocessing/dataset_generator32x32_1ch.py",
"DVS_preprocessing/dataset_generator32x32temporal_6chNoPol.py"
] |
[
"import struct\nimport numpy as np\nimport time\nfrom time import sleep\nfrom matplotlib import pyplot as plt\nfrom matplotlib import style\nimport csv\nimport glob\nimport os\nimport math\nimport pickle\nimport time\nimport datetime\n\ntrain_set = True # flag that is used to define if the current file is a part of the train set\n\n# the following list contain the frame and the labels of train and test set\nx_train = list() \ny_train = list()\nx_test = list() \ny_test = list()\n\n\ndef csv_open (csv_gesture, csv_mapping):\n # this function is used to open the csv files and takes the data.\n # returns a list of lists: each inner list contains:\n # ['*gesture_name*', 'start_time(usec)', 'stop_time(usec)']\n \n start_stop_list= list() # list that contains start and stop time of each gesture\n key_list = list() # list that contains the name of each gesture; these names will become the keys of the dictionary \"start_stop_dict\"\n\n with open(csv_gesture) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter = ',')\n line_count = 0\n\n previous = list() # the gesture 8 (arm rolling) is repeated two times in the dataset, so we need to remove the repetition and sum the two time sequence into a single interval \n for row in csv_reader:\n if line_count == 0: # skip the first line of the file\n line_count += 1\n continue\n temp = list (row[1:]) #take start and stop times\n \n if (row[0] == previous): # check if the previous gesture is equal to the present gesture (happens with arm rolling)\n start_stop_list[-1][1] = row[2] # create a single time interval for the repeated gesture\n else: # in this case there is not a repetition, so simply add the time interval to the start_stop list\n start_stop_list.append(temp)\n previous = row[0] # update the previous gesture with the present one\n \n with open(csv_mapping) as csv_file: \n csv_reader = csv.reader(csv_file, delimiter = ',')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n line_count += 1\n continue\n temp = row[0]\n key_list.append(temp)\n\n start_stop_dict = {key: None for key in key_list} # dictionary that contains, for each gesture, its start and stop times\n\n for (key, start_stop) in zip(key_list,start_stop_list):\n start_stop_dict[key] = start_stop\n return start_stop_dict, key_list\n\n\ndef skip_header(file_read):\n ''' skip header '''\n line = file_read.readline()\n while line.startswith(b'#'):\n if ( line == b'#!END-HEADER\\r\\n'):\n break\n else:\n line = file_read.readline()\n\n\ndef read_events(file_read):\n \"\"\" A simple function that read events from cAER tcp\"\"\"\n \n #raise Exception\n data = file_read.read(28)\n\n if(len(data) == 0 ):\n return [-1], [-1], [-1], [-1], [-1], [-1]\n\n # read header\n\n # struct.unpack() returns a tuple. I take only the first element of the tuple (by putting the [0] after the unpack)\n eventtype = struct.unpack('H', data[0:2])[0] # 'H' stands for unsigned short, because is 2 Byte\n eventsource = struct.unpack('H', data[2:4])[0]\n eventsize = struct.unpack('I', data[4:8])[0] # 'I' stands for unsigned short, because is 4 Byte\n eventoffset = struct.unpack('I', data[8:12])[0]\n eventtsoverflow = struct.unpack('I', data[12:16])[0]\n eventcapacity = struct.unpack('I', data[16:20])[0]\n eventnumber = struct.unpack('I', data[20:24])[0]\n eventvalid = struct.unpack('I', data[24:28])[0]\n next_read = eventcapacity * eventsize # we now read the full packet\n data = file_read.read(next_read) # I have moved to the [events] block, after the [header]\n counter = 0 # eventnumber[0]\n #return arrays\n x_addr_tot = []\n y_addr_tot = []\n pol_tot = []\n ts_tot =[]\n spec_type_tot =[]\n spec_ts_tot = []\n\n # eventType = 1 is a polarity event, so is ok!\n if(eventtype == 1): # something is wrong as we set in the cAER to send only polarity events\n while(data[counter:counter + eventsize]): # loop over all event packets\n aer_data = struct.unpack('I', data[counter:counter + 4])[0]\n timestamp = struct.unpack('I', data[counter + 4:counter + 8])[0]\n x_addr = (aer_data >> 17) & 0x00007FFF\n y_addr = (aer_data >> 2) & 0x00007FFF\n x_addr_tot.append(x_addr)\n y_addr_tot.append(y_addr)\n pol = (aer_data >> 1) & 0x00000001\n pol_tot.append(pol)\n ts_tot.append(timestamp)\n #print (timestamp, x_addr, y_addr, pol)\n counter = counter + eventsize\n elif(eventtype == 0): # eventType 0 is a special event\n spec_type_tot =[]\n spec_ts_tot = []\n while(data[counter:counter + eventsize]): # loop over all event packets\n special_data = struct.unpack('I', data[counter:counter + 4])[0]\n timestamp = struct.unpack('I', data[counter + 4:counter + 8])[0]\n spec_type = (special_data >> 1) & 0x0000007F\n spec_type_tot.append(spec_type)\n spec_ts_tot.append(timestamp)\n if(spec_type == 6 or spec_type == 7 or spec_type == 9 or spec_type == 10):\n print (timestamp, spec_type)\n counter = counter + eventsize\n elif(eventtype == 2):\n print(\"Frame Event\")\n\n return np.array(x_addr_tot), np.array(y_addr_tot), np.array(pol_tot), np.array(ts_tot), np.array(spec_type_tot), np.array(spec_ts_tot)\n\n#--- frame_generator:\n# function used to generate all the frames from the collected events of a given hand gesture\ndef frame_generator(events_list, number_of_events, accumulation_set, numeric_label):\n # events_list structure:\n # - list of [x, y, p] for each event\n # x is in the interval [0, 128]\n # y is in the interval [0, 128]\n # p can be 0 (OFF) or 1 (ON)\n # ts_tot is not necessary for this task\n\n # number of frames given the defined accumulation_set\n n_frames = math.floor(number_of_events/accumulation_set)\n \n for i in range(n_frames): \n # create two frame matrix: one for the positive events and one for negative events\n frame = np.full((128,128), 128) \n \n # assign polarity value to the matrix\n \n if i != n_frames-1:\n subset_of_events = events_list[(i*accumulation_set) : ((i+1)*accumulation_set-1)]\n else: \n subset_of_events = events_list[(i*accumulation_set) :]\n\n for event in subset_of_events:\n \n if event[2] == 1: \n if frame[event[0],event[1]] < 255: # set a maximum accumulation to 128 positive events\n frame[event[0],event[1]] += 1 #sum event that happens in the same pixel\n \n else: \n if frame[event[0],event[1]] > 0: # set a maximum accumulation to 128 positive events\n frame[event[0],event[1]] -= 1 #sum event that happens in the same pixel\n #frame_PREDATOR[event[0],event[1]] += -1/200 #sum event that happens in the same pixel\n \n\n # dimensionality reduction of the 2 matrices\n # I want to resize the image to 32x32, so I need to create 1 matrix with dimension 32x32, following the rule:\n\n frame_resize = np.full((32,32), 128) \n #frame_PREDATOR_resize = np.zeros(shape=(32,32))\n\n for i in range(0,128,4):\n for j in range(0,128,4):\n p_x = int(i/4) # pixel x\n p_y = int(j/4) # pixel y\n polarity_count = 0\n for k in range(4):\n for h in range(4):\n polarity_count += frame[i+k,j+h]\n \n frame_resize[p_x,p_y] = polarity_count/16 \n \n frame_resize = np.subtract(frame_resize, np.min(frame_resize)) # subtract minimul value to all elements: I want to clip the values in the interval [0, 255]\n\n # Normalization:\n max_ = np.amax(frame_resize)\n frame_resize = np.divide(frame_resize,max_)\n frame_resize = np.multiply(frame_resize,255)\n \n \n # add the resulting frame and its corresponding label to the complete list of frames\n if train_set == True:\n x_train.append(frame_resize)\n y_train.append(numeric_label)\n else: \n x_test.append(frame_resize)\n y_test.append(numeric_label)\n#-- END frame_generator\n\n\n# for DVS dataset\nxdim = 128\nydim = 128\naccumulation_set = 5000 #number of events that will produce a single frame\n\nimport platform\nprint(platform.release())\nif platform.release() == \"5.0.0-37-generic\": # I am on my pc \n base_path = '/home/riccardo/Documents/uni/python/thesis/nxsdk/dnn_models/snntoolbox/DVS_Gesture_dataset/DvsGesture'\nelse: # I am on LOPO1\n base_path = '/srv/data/rmassa/from_my_pc/DVS_Gesture_dataset/DvsGesture'\n\n#--- find all the aedat and csv file of the dataset:\naedat_list = list()\nkey_list = list() # list that contains the name of each gesture; these names will become the keys of the dictionary \"start_stop_dict\"\nfor file in os.listdir(base_path):\n if file.endswith(\".aedat\"):\n if file == 'user02_lab.aedat' or file == 'user12_fluorescent_led.aedat': # check if the file is one of the damaged (from errata.txt)\n continue # broken file will not be used\n else:\n aedat_list.append(file)\n\naedat_list.sort()\n\n# open gesture mapping csv\n\nimport platform\nprint(platform.release())\nif platform.release() == \"5.0.0-37-generic\": # I am on my pc \n csv_mapping = '/home/riccardo/Documents/uni/python/thesis/nxsdk/dnn_models/snntoolbox/DVS_Gesture_dataset/DvsGesture/gesture_mapping.csv'\nelse: # I am on LOPO1\n csv_mapping = '/srv/data/rmassa/from_my_pc/DVS_Gesture_dataset/DvsGesture/gesture_mapping.csv'\n\n\naedat_counter = 0 # to keep count of how many aedat file I have analyzed\n\nstart = time.time() # start time for measure conversion time\n\n\nfor aedat in aedat_list:\n # check if the current file is for train or test: (test from user24 up)\n if aedat[4:6] == \"24\":\n train_set = False\n\n aedat_counter += 1\n #--- prepare the necessary variables and dictionary for the collection of events:\n class_counter = 0 # this variable is used to count the classes as they are recognized during the extraction from the aedat file\n collect_enabler = True # flag that is set to True when the hand gesture is finished \n start_collect = False # define if it is time to start collecting the events of a given gesture: needed to get gesture of 1.5s lenght\n end_of_file = False # flag that is set to True when the file is completly scanned\n gesture_counter = 0 # counter that counts which gestures have been extracted yet \n number_of_events = 0 # counts the total number of events of the hand gesture\n events_list = list() # list of all the collected events\n \n #--- open file:\n \n # open aedat file\n print('###############################################')\n print(\"FILE {}/{}: {}\".format(aedat_counter,len(aedat_list),aedat))\n print('-----------------------------------------------')\n elapsed = time.time()-start\n elapsed_hms = str(datetime.timedelta(seconds=elapsed))\n print('| time elapsed = ' + elapsed_hms)\n estimated_remaining_time = (len(aedat_list)-aedat_counter)*(elapsed/aedat_counter)\n estimated_remaining_time_hms =str(datetime.timedelta(seconds=estimated_remaining_time))\n print('| estimated remaining time = ' + estimated_remaining_time_hms)\n print('-----------------------------------------------')\n\n aedat = os.path.join(base_path,aedat)\n file_read = open(aedat, \"rb\") \n skip_header(file_read)\n \n #open csv file\n csv_gesture = os.path.join(base_path, aedat[:-6]+'_labels.csv')\n\n # collect info from the csv files\n start_stop_dict, key_list = csv_open(csv_gesture, csv_mapping) \n\n\n # reduce the duration of each each gesture to 1.5 seconds (as in the slayer paper)\n #if train_set == False: # reduce test set to 1.5seconds\n for key in key_list:\n start = int(start_stop_dict[key][0]) # start time\n stop = int(start_stop_dict[key][1]) # stop time\n duration = stop - start # duration\n print(\"{} duration: {}\".format(key, duration/1000000))\n\n #time_gap = duration-1500000 # how much time I have to skip to get the center part of the video of the gesture\n #new_start = int(start + time_gap/2) # new start is half of the time_gap after the initial start --> I get that the part of the gesture that I take is centered\n #new_stop = int(stop - time_gap/2) # new stop is half of the time_gap before the initial stop --> I get that the part of the gesture that I take is centered\n\n #print(\"old start: {} stop:{}\".format(start, stop))\n #print(\"new start: {} stop:{}\".format(new_start, new_stop))\n new_start = int(start + 500000) # new start is half of the time_gap after the initial start --> I get that the part of the gesture that I take is centered\n new_stop = int(stop - 500000) # new stop is half of the time_gap before the initial stop --> I get that the part of the gesture that I take is centered\n print(\"new duration: {}\".format((new_stop-new_start)/1000000))\n #print(\"\\n\")\n start_stop_dict[key][0] = str(new_start) # set the new start\n start_stop_dict[key][1] = str(new_stop) # set the new stop\n\n \n\n actual_gesture = key_list[class_counter] # the actual gesture to be recognized\n\n while(end_of_file == False):\n if collect_enabler == True:\n x, y, p, ts_tot, spec_type, spec_type_ts = read_events(file_read)\n for ts in ts_tot: \n if (abs(int(start_stop_dict[actual_gesture][0])>int(ts))):\n start_collect = False \n else: \n start_collect = True\n break\n\n if start_collect == True:\n\n for (x_, y_, p_) in zip(x,y,p):\n events_info = [y_,x_,p_] # store relevant infos, NB: switch x and y is needed because otherwise the image is 90 rotate\n events_list.append(events_info) # create a list of all the events that will be used to make the frames\n \n number_of_events += len(ts_tot) # counts the total number of events of the hand gesture\n\n # check if the gesture events are finished:\n for ts in ts_tot:\n if (abs(int(start_stop_dict[actual_gesture][1])<=int(ts))): #stop collecting events of the gesture\n collect_enabler = False\n print(\"- {}: completed, {} frames collected\".format(actual_gesture, math.floor(number_of_events/accumulation_set))) \n break\n\n if collect_enabler == False:\n\n \n # create the frames:\n frame_generator(events_list, number_of_events, accumulation_set, class_counter)\n \n # move to the next hand gesture\n number_of_events = 0\n class_counter += 1\n if actual_gesture != \"other_gestures\":\n actual_gesture = key_list[class_counter]\n else: \n end_of_file = True\n events_list = list()\n collect_enabler = True \n start_collect = False\n\n \n# Final step: create the dataset from all the accumulated frames \nx_train = np.asarray(x_train)\nx_test = np.asarray(x_test)\ny_train = np.asarray(y_train)\ny_test = np.asarray(y_test)\n\n\nA = list()\nB = list()\ntot = list()\n\nA.append(x_train)\nx_train = None # empty to save memory\nA.append(y_train)\ny_train = None # empty to save memory\nB.append(x_test)\nx_test = None # empty to save memory\nB.append(y_test)\ny_test = None # empty to save memory\n\n\ntot.append(A)\nA = None # empty to save memory\ntot.append(B)\nB = None # empty to save memory\n\nprint('###############################################')\nprint(\"saving dataset as: dvs_gesture32x32_1ch.pickle ...\")\nwith open(\"dvs_gesture32x32_1ch.pickle\",'wb') as pickle_file: \n pickle.dump(tot, pickle_file)\n pickle_file.close()\nprint(\"completed\")\n",
"import struct\nimport numpy as np\nimport time\nfrom time import sleep\nfrom matplotlib import pyplot as plt\nfrom matplotlib import style\nimport csv\nimport glob\nimport os\nimport math\nimport pickle\nimport time\nimport datetime\n\ntrain_set = True # flag that is used to define if the current file is a part of the train set\n\n# the following list contain the frame and the labels of train and test set\nx_train = list() \ny_train = list()\nx_test = list() \ny_test = list()\n\n\ndef csv_open (csv_gesture, csv_mapping):\n # this function is used to open the csv files and takes the data.\n # returns a list of lists: each inner list contains:\n # ['*gesture_name*', 'start_time(usec)', 'stop_time(usec)']\n \n start_stop_list= list() # list that contains start and stop time of each gesture\n key_list = list() # list that contains the name of each gesture; these names will become the keys of the dictionary \"start_stop_dict\"\n\n with open(csv_gesture) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter = ',')\n line_count = 0\n\n previous = list() # the gesture 8 (arm rolling) is repeated two times in the dataset, so we need to remove the repetition and sum the two time sequence into a single interval \n for row in csv_reader:\n if line_count == 0: # skip the first line of the file\n line_count += 1\n continue\n temp = list (row[1:]) #take start and stop times\n \n if (row[0] == previous): # check if the previous gesture is equal to the present gesture (happens with arm rolling)\n start_stop_list[-1][1] = row[2] # create a single time interval for the repeated gesture\n else: # in this case there is not a repetition, so simply add the time interval to the start_stop list\n start_stop_list.append(temp)\n previous = row[0] # update the previous gesture with the present one\n \n with open(csv_mapping) as csv_file: \n csv_reader = csv.reader(csv_file, delimiter = ',')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n line_count += 1\n continue\n temp = row[0]\n key_list.append(temp)\n\n start_stop_dict = {key: None for key in key_list} # dictionary that contains, for each gesture, its start and stop times\n\n for (key, start_stop) in zip(key_list,start_stop_list):\n start_stop_dict[key] = start_stop\n return start_stop_dict, key_list\n\n\ndef skip_header(file_read):\n ''' skip header '''\n line = file_read.readline()\n while line.startswith(b'#'):\n if ( line == b'#!END-HEADER\\r\\n'):\n break\n else:\n line = file_read.readline()\n\n\ndef read_events(file_read):\n \"\"\" A simple function that read events from cAER tcp\"\"\"\n \n #raise Exception\n data = file_read.read(28)\n\n if(len(data) == 0 ):\n return [-1], [-1], [-1], [-1], [-1], [-1]\n\n # read header\n\n # struct.unpack() returns a tuple. I take only the first element of the tuple (by putting the [0] after the unpack)\n eventtype = struct.unpack('H', data[0:2])[0] # 'H' stands for unsigned short, because is 2 Byte\n eventsource = struct.unpack('H', data[2:4])[0]\n eventsize = struct.unpack('I', data[4:8])[0] # 'I' stands for unsigned short, because is 4 Byte\n eventoffset = struct.unpack('I', data[8:12])[0]\n eventtsoverflow = struct.unpack('I', data[12:16])[0]\n eventcapacity = struct.unpack('I', data[16:20])[0]\n eventnumber = struct.unpack('I', data[20:24])[0]\n eventvalid = struct.unpack('I', data[24:28])[0]\n next_read = eventcapacity * eventsize # we now read the full packet\n data = file_read.read(next_read) # I have moved to the [events] block, after the [header]\n counter = 0 # eventnumber[0]\n #return arrays\n x_addr_tot = []\n y_addr_tot = []\n pol_tot = []\n ts_tot =[]\n spec_type_tot =[]\n spec_ts_tot = []\n\n # eventType = 1 is a polarity event, so is ok!\n if(eventtype == 1): # something is wrong as we set in the cAER to send only polarity events\n while(data[counter:counter + eventsize]): # loop over all event packets\n aer_data = struct.unpack('I', data[counter:counter + 4])[0]\n timestamp = struct.unpack('I', data[counter + 4:counter + 8])[0]\n x_addr = (aer_data >> 17) & 0x00007FFF\n y_addr = (aer_data >> 2) & 0x00007FFF\n x_addr_tot.append(x_addr)\n y_addr_tot.append(y_addr)\n pol = (aer_data >> 1) & 0x00000001\n pol_tot.append(pol)\n ts_tot.append(timestamp)\n #print (timestamp, x_addr, y_addr, pol)\n counter = counter + eventsize\n elif(eventtype == 0): # eventType 0 is a special event\n spec_type_tot =[]\n spec_ts_tot = []\n while(data[counter:counter + eventsize]): # loop over all event packets\n special_data = struct.unpack('I', data[counter:counter + 4])[0]\n timestamp = struct.unpack('I', data[counter + 4:counter + 8])[0]\n spec_type = (special_data >> 1) & 0x0000007F\n spec_type_tot.append(spec_type)\n spec_ts_tot.append(timestamp)\n if(spec_type == 6 or spec_type == 7 or spec_type == 9 or spec_type == 10):\n print (timestamp, spec_type)\n counter = counter + eventsize\n elif(eventtype == 2):\n print(\"Frame Event\")\n\n return np.array(x_addr_tot), np.array(y_addr_tot), np.array(pol_tot), np.array(ts_tot), np.array(spec_type_tot), np.array(spec_ts_tot)\n\n#--- frame_generator:\n# function used to generate all the frames from the collected events of a given hand gesture\ndef frame_generator(events_list, number_of_events, accumulation_set, numeric_label):\n # events_list structure:\n # - list of [x, y, p, ts] for each event\n # x is in the interval [0, 128]\n # y is in the interval [0, 128]\n # ts_tot is not necessary for this task\n\n # EVENTS ACCUMULATION POLICY:--------------------\n # the final tensor will have shape 32x32x6:\n # Each of the 6 ouput channels contains 10ms accumulated events.\n # The final tensor will stack the 6 channels and form a single output frame. \n\n # compute the number of frames:\n frame_duration = 60000 #60ms\n channel_duration = 10000\n start = events_list[0][3]\n end = events_list[-1][3]\n lenght = end-start\n n_frames = math.floor(lenght/frame_duration) # the number of frames is given by the division of the gesture duration (4seconds) by 60ms. That should be 66 frames\n n_channels = 6\n # find the accumulation groups: \n # for each frame, I have to accumulate a certain number of events, \n # that depens on the number of events that are withing 60ms. At the same time, each channel will\n # contain a number of events equal to the events that happens within 10 ms.\n # Therefore, I need to find out which are the intervals of accumulation for each channel of each frame of each gesture.\n n_events_per_frame_tot = list() # list that will contain all the indecies of the events that will compose a single frame\n \n for i in range(len(events_list)):\n ts = events_list[i][3]\n\n # collect the number of events to be accumulated in each frame:\n if (ts-start) >= frame_duration:\n n_events_per_frame_tot.append(n_events_per_frame)\n start = ts # update the start value such that the next frame will start counting from the following 60 ms\n else:\n n_events_per_frame = i\n\n for fr_count in range(n_frames):\n if fr_count == 0: # first frame:\n subset_of_events = events_list[0 : n_events_per_frame_tot[fr_count]-1]\n elif fr_count == n_frames-1: # last frame\n subset_of_events = events_list[n_events_per_frame_tot[fr_count-1] : ]\n else: # intermidiate frames\n subset_of_events = events_list[n_events_per_frame_tot[fr_count-1] : n_events_per_frame_tot[fr_count]-1]\n \n # now I compute the indecies of all events that occure in one channel:\n n_events_per_channel_tot = list()\n start = subset_of_events[0][3]\n for i in range(len(subset_of_events)):\n ts = subset_of_events[i][3]\n \n # collect the number of events to be accumulated in each channel:\n if (ts-start) >= channel_duration:\n n_events_per_channel_tot.append(n_events_per_channel)\n start = ts # update the start value such that the next frame will start counting from the following 60 ms\n \n else:\n n_events_per_channel = i\n\n frame = np.zeros((32,32,6))\n \n for ch_count in range(n_channels): \n if ch_count == 0: # first frame:\n ch_events = subset_of_events[0 : n_events_per_channel_tot[ch_count]-1]\n\n elif ch_count == n_channels-1: # last frame\n ch_events = subset_of_events[n_events_per_channel_tot[ch_count-1] : ]\n \n else: # intermidiate frames\n ch_events = subset_of_events[n_events_per_channel_tot[ch_count-1] : n_events_per_channel_tot[ch_count]-1]\n\n #create the channel matrix\n channel = np.zeros((128,128)) \n\n for event in ch_events:\n channel[event[0],event[1]] += 1 #sum event that happens in the same pixel\n \n # dimensionality reduction of the 2 matrices\n # I want to resize the image to 32x32, so I need to create 1 matrix with dimension 32x32, following the rule:\n\n resized_channel = np.zeros((32,32)) \n\n for i in range(0,128,4):\n for j in range(0,128,4):\n p_x = int(i/4) # pixel x\n p_y = int(j/4) # pixel y\n polarity_count = 0\n for k in range(4):\n for h in range(4):\n polarity_count += channel[i+k,j+h]\n \n resized_channel[p_x,p_y] = polarity_count/16 \n\n # Normalization:\n max_ = np.amax(resized_channel)\n if max_!=0:\n resized_channel = np.divide(resized_channel,max_)\n \n resized_channel = np.multiply(resized_channel,255)\n\n # add the channel to the output frame:\n for x in range(32):\n for y in range(32):\n frame[x,y,ch_count] = resized_channel[x,y]\n\n \n # add the resulting frame and its corresponding label to the complete list of frames\n if train_set == True:\n x_train.append(frame)\n y_train.append(numeric_label)\n else: \n x_test.append(frame)\n y_test.append(numeric_label)\n\n print(\"gesture {} completed, {} frames\".format(numeric_label, n_frames))\n\n\n#-- END frame_generator\n\n\n# for DVS dataset\nxdim = 128\nydim = 128\naccumulation_set = 6000 #number of events that will produce 6 channels of the output frame\n\n\nimport platform\nprint(platform.release())\nif platform.release() == \"5.3.0-26-generic\": # I am on my pc \n base_path = '/home/riccardo/Documents/uni/python/thesis/nxsdk/dnn_models/snntoolbox/DVS_Gesture_dataset/DvsGesture'\nelse: # I am on LOPO1\n base_path = '/srv/data/rmassa/from_my_pc/DVS_Gesture_dataset/DvsGesture'\n\n#--- find all the aedat and csv file of the dataset:\naedat_list = list()\nkey_list = list() # list that contains the name of each gesture; these names will become the keys of the dictionary \"start_stop_dict\"\nfor file in os.listdir(base_path):\n if file.endswith(\".aedat\"):\n if file == 'user02_lab.aedat' or file == 'user12_fluorescent_led.aedat': # check if the file is one of the damaged (from errata.txt)\n continue # broken file will not be used\n else:\n aedat_list.append(file)\n\naedat_list.sort()\n\n# open gesture mapping csv\n\nimport platform\nprint(platform.release())\nif platform.release() == \"5.3.0-26-generic\": # I am on my pc \n csv_mapping = '/home/riccardo/Documents/uni/python/thesis/nxsdk/dnn_models/snntoolbox/DVS_Gesture_dataset/DvsGesture/gesture_mapping.csv'\nelse: # I am on LOPO1\n csv_mapping = '/srv/data/rmassa/from_my_pc/DVS_Gesture_dataset/DvsGesture/gesture_mapping.csv'\n\n\naedat_counter = 0 # to keep count of how many aedat file I have analyzed\n\nstart = time.time() # start time for measure conversion time\n\nfor aedat in aedat_list:\n # check if the current file is for train or test: (test from user24 up)\n if aedat[4:6] == \"24\":\n train_set = False\n\n aedat_counter += 1\n #--- prepare the necessary variables and dictionary for the collection of events:\n class_counter = 0 # this variable is used to count the classes as they are recognized during the extraction from the aedat file\n collect_enabler = True # flag that is set to True when the hand gesture is finished \n start_collect = False # define if it is time to start collecting the events of a given gesture: needed to get gesture of 1.5s lenght\n end_of_file = False # flag that is set to True when the file is completly scanned\n gesture_counter = 0 # counter that counts which gestures have been extracted yet \n number_of_events = 0 # counts the total number of events of the hand gesture\n events_list = list() # list of all the collected events\n \n #--- open file:\n \n # open aedat file\n print('###############################################')\n print(\"FILE {}/{}: {}\".format(aedat_counter,len(aedat_list),aedat))\n print('-----------------------------------------------')\n elapsed = time.time()-start\n elapsed_hms = str(datetime.timedelta(seconds=elapsed))\n print('| time elapsed = ' + elapsed_hms)\n estimated_remaining_time = (len(aedat_list)-aedat_counter)*(elapsed/aedat_counter)\n estimated_remaining_time_hms =str(datetime.timedelta(seconds=estimated_remaining_time))\n print('| estimated remaining time = ' + estimated_remaining_time_hms)\n print('-----------------------------------------------')\n\n aedat = os.path.join(base_path,aedat)\n file_read = open(aedat, \"rb\") \n skip_header(file_read)\n \n #open csv file\n csv_gesture = os.path.join(base_path, aedat[:-6]+'_labels.csv')\n\n # collect info from the csv files\n start_stop_dict, key_list = csv_open(csv_gesture, csv_mapping) \n\n\n # reduce the duration of each each to a maximum duration of 4 seconds: this is done also in order to have a balanced dataset, and keep its size small\n\n for key in key_list:\n start = int(start_stop_dict[key][0]) # start time\n stop = int(start_stop_dict[key][1]) # stop time\n duration = stop - start # duration\n #print(\"{} duration: {}\".format(key, duration/1000000))\n\n if duration >= 4000001:\n time_gap = duration-4000000 # how much time I have to skip to get the center part of the video of the gesture\n new_start = int(start + time_gap/2) # new start is half of the time_gap after the initial start --> I get that the part of the gesture that I take is centered\n new_stop = int(stop - time_gap/2) # new stop is half of the time_gap before the initial stop --> I get that the part of the gesture that I take is centered\n\n #print(\"old start: {} stop:{}\".format(start, stop))\n #print(\"new start: {} stop:{}\".format(new_start, new_stop))\n #new_start = int(start + 500000) # new start is half of the time_gap after the initial start --> I get that the part of the gesture that I take is centered\n #new_stop = int(stop - 500000) # new stop is half of the time_gap before the initial stop --> I get that the part of the gesture that I take is centered\n #print(\"new duration: {}\".format((new_stop-new_start)/1000000))\n #print(\"\\n\")\n start_stop_dict[key][0] = str(new_start) # set the new start\n start_stop_dict[key][1] = str(new_stop) # set the new stop\n \n\n \n\n actual_gesture = key_list[class_counter] # the actual gesture to be recognized\n\n while(end_of_file == False):\n if collect_enabler == True:\n x, y, p, ts_tot, spec_type, spec_type_ts = read_events(file_read)\n for ts in ts_tot: \n if (abs(int(start_stop_dict[actual_gesture][0])>int(ts))):\n start_collect = False \n else: \n start_collect = True\n break\n\n if start_collect == True:\n\n for (ts_, x_, y_, p_) in zip(ts_tot, x,y,p):\n events_info = [y_,x_,p_, ts_] # store relevant infos, NB: switch x and y is needed because otherwise the image is 90 rotate\n events_list.append(events_info) # create a list of all the events that will be used to make the frames\n \n number_of_events += len(ts_tot) # counts the total number of events of the hand gesture\n\n # check if the gesture events are finished:\n for ts in ts_tot:\n if (abs(int(start_stop_dict[actual_gesture][1])<=int(ts))): #stop collecting events of the gesture\n collect_enabler = False \n break\n\n if collect_enabler == False:\n\n \n # create the frames:\n frame_generator(events_list, number_of_events, accumulation_set, class_counter)\n \n # move to the next hand gesture\n number_of_events = 0\n class_counter += 1\n if actual_gesture != \"other_gestures\":\n actual_gesture = key_list[class_counter]\n else: \n end_of_file = True\n events_list = list()\n collect_enabler = True \n start_collect = False\n\n \n# Final step: create the dataset from all the accumulated frames \nx_train = np.asarray(x_train)\nx_test = np.asarray(x_test)\ny_train = np.asarray(y_train)\ny_test = np.asarray(y_test)\n\n\nA = list()\nB = list()\ntot = list()\n\nA.append(x_train)\nx_train = None # empty to save memory\nA.append(y_train)\ny_train = None # empty to save memory\nB.append(x_test)\nx_test = None # empty to save memory\nB.append(y_test)\ny_test = None # empty to save memory\n\n\ntot.append(A)\nA = None # empty to save memory\ntot.append(B)\nB = None # empty to save memory\n\nprint('###############################################')\nprint(\"saving dataset as: dvs_gesture32x32temporal_6chNoPol.pickle ...\")\nwith open(\"dvs_gesture32x32temporal_6chNoPol.pickle\",'wb') as pickle_file: \n pickle.dump(tot, pickle_file)\n pickle_file.close()\nprint(\"completed\")\n"
] |
[
[
"numpy.divide",
"numpy.full",
"numpy.array",
"numpy.asarray",
"numpy.min",
"numpy.multiply",
"numpy.amax"
],
[
"numpy.divide",
"numpy.array",
"numpy.asarray",
"numpy.zeros",
"numpy.multiply",
"numpy.amax"
]
] |
tonglizi/SfmLearner-Pytorch-master_new
|
[
"b4315b75d47f0bb3662adf50c8db12a82f082675"
] |
[
"point_cloud_processing/prepare_data.py"
] |
[
"# -*- coding: utf-8 -*-\r\n'''\r\n***********点云的处理库*******\r\npcl:c++的开源库,python的接口提供的支持较少(windows环境下超级难装!)\r\nopen3D: intel开源的python点云库,作为python环境的主力库\r\n在读取数据方面,两者都能达到同样的效果\r\n'''\r\nimport os\r\n# import pcl\r\n# import open3d as o3d\r\nimport numpy as np\r\n\r\n\r\n# from pypcd import pypcd\r\n\r\n\r\n# def loadPointCloud(rootdir):\r\n# files=os.listdir(rootdir)\r\n# pointclouds=[]\r\n# for file in files:\r\n# if not os.path.isdir(file):\r\n# p=pcl.load(rootdir+\"\\\\\"+file)\r\n# pointclouds.append(p)\r\n# return pointclouds\r\n\r\n# def loadImage(rootdir):\r\n# files=os.listdir(rootdir)\r\n# images=[]\r\n# for file in files:\r\n# if not os.path.isdir(file):\r\n# img=pcl.load(rootdir+\"\\\\\"+file)\r\n# images.append(img)\r\n# return images\r\n\r\n# def read_pcd(path_file):\r\n# pcd = o3d.io.read_point_cloud(path_file)\r\n# return pcd\r\n\r\n\r\n# def loadPointCloudOpen3D(rootdir):\r\n# files = os.listdir(rootdir)\r\n# pointclouds = []\r\n# for file in files:\r\n# if not os.path.isdir(file):\r\n# p = read_pcd(rootdir + \"/\" + file)\r\n# pointclouds.append(p)\r\n# return pointclouds\r\n\r\n\r\ndef loadPointCloud(rootdir):\r\n files = os.listdir(rootdir)\r\n pointclouds = []\r\n for file in files:\r\n if not os.path.isdir(file):\r\n p = np.fromfile(rootdir + \"/\" + file, dtype=np.float32)\r\n pointclouds.append(p)\r\n return pointclouds\r\n\r\n# *********test code***************\r\n# path=r'C:\\Users\\93121\\Desktop\\dataset\\velodyne_pcd\\0000000000.pcd'\r\n\r\n# test open3D read and convert to ndarray\r\n# p=read_pcd(path)\r\n# print(p)\r\n# import numpy as np\r\n# xyz_load = np.asarray(p.points)\r\n# print(xyz_load)\r\n\r\n\r\n# test PCL read and convert to ndarray\r\n# p=pcl.load(path)\r\n# print(p)\r\n# import numpy as np\r\n# xyz_load=np.asarray(p)\r\n# print(xyz_load)\r\n"
] |
[
[
"numpy.fromfile"
]
] |
tanakatsu/dqn-jan-ken-pon
|
[
"3ae2c928d9ab00edee709f553b16963951859189"
] |
[
"tf/jan_ken_pon.py"
] |
[
"import os\nimport numpy as np\n\n\nclass JanKenPon:\n\n def __init__(self):\n # parameters\n self.name = os.path.splitext(os.path.basename(__file__))[0]\n self.player_hand = None\n self.opponent_hand = None\n self.observation = np.zeros(6)\n self.frame_length = 1\n self.state = np.stack([self.observation for _ in xrange(self.frame_length)], axis=0)\n self.enable_actions = (0, 1, 2, 3, 4, 5)\n self.enable_valid_actions = (0, 2, 5)\n\n # variables\n self.reset()\n\n def execute_action(self, action):\n \"\"\"\n action:\n 0: goo\n 1: undefined\n 2: choki\n 3: undefined\n 4: undefined\n 5: par\n \"\"\"\n # update player state\n self.player_hand = action\n\n # determine win or loose\n self.reward = 0\n self.terminal = False\n\n undefined = (1, 3, 4)\n\n if self.player_hand in undefined:\n self.reward = -3\n elif self.player_hand == self.opponent_hand:\n # self.reward = 0\n self.reward = -1\n elif self.player_hand == 0 and self.opponent_hand == 2:\n self.reward = 1\n elif self.player_hand == 2 and self.opponent_hand == 5:\n self.reward = 1\n elif self.player_hand == 5 and self.opponent_hand == 0:\n self.reward = 1\n else:\n self.reward = -1\n\n if self.reward != 0:\n self.terminal = True\n\n def observe(self):\n # set opponent next hand\n self.observation = np.zeros(6)\n self.opponent_hand = self.enable_valid_actions[np.random.randint(len(self.enable_valid_actions))]\n self.observation[self.opponent_hand] = 1\n\n # update state history\n self.update_state_history()\n\n return self.state, self.reward, self.terminal\n\n def update_state_history(self):\n self.state = np.append(self.state[1:], self.observation.reshape(-1, self.observation.size), axis=0)\n\n def render(self):\n pass\n\n def render_hand_shape(self, action):\n if action == 0:\n return 'goo'\n elif action == 2:\n return 'choki'\n elif action == 5:\n return 'par'\n return 'unknown'\n\n def reset(self):\n # reset player\n self.player_hand = np.random.randint(len(self.enable_actions))\n\n # reset opponent\n self.opponent_hand = self.enable_valid_actions[np.random.randint(len(self.enable_valid_actions))]\n\n # reset other variables\n self.reward = 0\n self.terminal = False\n"
] |
[
[
"numpy.zeros"
]
] |
zklgame/CatEyeNets
|
[
"b5cdd9fa6b84f6ebaaf56d03d5beca4a63af6932"
] |
[
"utils/gradient_check.py"
] |
[
"import numpy as np\nfrom random import randrange\n\ndef grad_check_sparse(f, x, analytic_grad, num_checks=10, h=1e-5):\n \"\"\"\n sample a few random elements and only return numerical\n in this dimensions\n \"\"\"\n for i in xrange(num_checks):\n ix = tuple([randrange(m) for m in x.shape])\n oldval = x[ix]\n x[ix] = oldval + h\n fxph = f(x) # evaluate f(x + h)\n x[ix] = oldval - h\n fxmh = f(x) # evaluate f(x - h)\n x[ix] = oldval\n\n grad_numerical = (fxph - fxmh) / (2 * h)\n grad_analytic = analytic_grad[ix]\n rel_error = abs(grad_numerical - grad_analytic) / (abs(grad_numerical) + abs(grad_analytic))\n print('numerical: %f, analytic: %f, relative error: %e' % (grad_numerical, grad_analytic, rel_error))\n\n\ndef eval_numerical_gradient(f, x, verbose=True, h=0.00001):\n \"\"\"\n a naive implementation of numerical gradient of f at x\n - f should be a function that takes a single argument\n - x is the point (numpy array) to evaluate the gradient at\n \"\"\"\n\n grad = np.zeros_like(x)\n # iterate over all indexes in x\n it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\n while not it.finished:\n # evaluate function at x+h\n ix = it.multi_index\n oldval = x[ix]\n x[ix] = oldval + h # evaluate f(x + h)\n fxph = f(x)\n x[ix] = oldval - h # evaluate f(x - h)\n fxmh = f(x)\n x[ix] = oldval\n\n # compute the partial derivative with centered formula\n grad[ix] = (fxph - fxmh) / (2 * h)\n if verbose:\n print(ix, grad[ix])\n it.iternext()\n\n return grad\n\n\ndef eval_numerical_gradient_array(f, x, df, h=1e-5):\n \"\"\"\n Evaluate a numeric gradient for a function that accepts a numpy\n array and returns a numpy array.\n \"\"\"\n grad = np.zeros_like(x)\n # iterate over all indexes in x\n it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\n while not it.finished:\n # evaluate function at x+h\n ix = it.multi_index\n oldval = x[ix]\n x[ix] = oldval + h # evaluate f(x + h)\n fxph = f(x)\n x[ix] = oldval - h # evaluate f(x - h)\n fxmh = f(x)\n x[ix] = oldval\n\n grad[ix] = np.sum((fxph - fxmh) * df) / (2 * h)\n\n it.iternext()\n\n return grad\n\n"
] |
[
[
"numpy.sum",
"numpy.zeros_like",
"numpy.nditer"
]
] |
khaledsaab/NeuralCDE
|
[
"559d9d6fdb137afd14965725ea4845cf31e9235c"
] |
[
"experiments/common.py"
] |
[
"import copy\nimport json\nimport math\nimport numpy as np\nimport os\nimport pathlib\nimport sklearn.metrics\nimport torch\nimport tqdm\n\nimport models\n\n#here = pathlib.Path(__file__).resolve().parent\nhere = pathlib.Path(\"/dfs/scratch1/ksaab/ncde_results\")\n\ndef _add_weight_regularisation(loss_fn, regularise_parameters, scaling=0.03):\n def new_loss_fn(pred_y, true_y):\n total_loss = loss_fn(pred_y, true_y)\n for parameter in regularise_parameters.parameters():\n if parameter.requires_grad:\n total_loss = total_loss + scaling * parameter.norm()\n return total_loss\n return new_loss_fn\n\n\nclass _SqueezeEnd(torch.nn.Module):\n def __init__(self, model):\n super(_SqueezeEnd, self).__init__()\n self.model = model\n\n def forward(self, *args, **kwargs):\n return self.model(*args, **kwargs).squeeze(-1)\n\n\ndef _count_parameters(model):\n \"\"\"Counts the number of parameters in a model.\"\"\"\n return sum(param.numel() for param in model.parameters() if param.requires_grad_)\n\n\nclass _AttrDict(dict):\n def __setattr__(self, key, value):\n self[key] = value\n\n def __getattr__(self, item):\n return self[item]\n\n\ndef _evaluate_metrics(dataloader, model, times, loss_fn, num_classes, device, kwargs):\n with torch.no_grad():\n total_accuracy = 0\n total_confusion = torch.zeros(num_classes, num_classes).numpy() # occurs all too often\n total_dataset_size = 0\n total_loss = 0\n true_y_cpus = []\n pred_y_cpus = []\n\n for batch in dataloader:\n batch = tuple(b.to(device) for b in batch)\n *coeffs, true_y, lengths = batch\n batch_size = true_y.size(0)\n pred_y = model(times, coeffs, lengths, **kwargs)\n\n if num_classes == 2:\n thresholded_y = (pred_y > 0).to(true_y.dtype)\n else:\n thresholded_y = torch.argmax(pred_y, dim=1)\n true_y_cpu = true_y.detach().cpu()\n pred_y_cpu = pred_y.detach().cpu()\n if num_classes == 2:\n # Assume that our datasets aren't so large that this breaks\n true_y_cpus.append(true_y_cpu)\n pred_y_cpus.append(pred_y_cpu)\n thresholded_y_cpu = thresholded_y.detach().cpu()\n\n total_accuracy += (thresholded_y == true_y).sum().to(pred_y.dtype)\n total_confusion += sklearn.metrics.confusion_matrix(true_y_cpu, thresholded_y_cpu,\n labels=range(num_classes))\n total_dataset_size += batch_size\n total_loss += loss_fn(pred_y, true_y) * batch_size\n\n total_loss /= total_dataset_size # assume 'mean' reduction in the loss function\n total_accuracy /= total_dataset_size\n metrics = _AttrDict(accuracy=total_accuracy.item(), confusion=total_confusion, dataset_size=total_dataset_size,\n loss=total_loss.item())\n\n if num_classes == 2:\n true_y_cpus = torch.cat(true_y_cpus, dim=0)\n pred_y_cpus = torch.cat(pred_y_cpus, dim=0)\n metrics.auroc = sklearn.metrics.roc_auc_score(true_y_cpus, pred_y_cpus)\n metrics.average_precision = sklearn.metrics.average_precision_score(true_y_cpus, pred_y_cpus)\n return metrics\n\n\nclass _SuppressAssertions:\n def __init__(self, tqdm_range):\n self.tqdm_range = tqdm_range\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if exc_type is AssertionError:\n self.tqdm_range.write('Caught AssertionError: ' + str(exc_val))\n return True\n\n\ndef _train_loop(train_dataloader, val_dataloader, model, times, optimizer, loss_fn, max_epochs, num_classes, device,\n kwargs, step_mode):\n model.train()\n best_model = model\n best_train_loss = math.inf\n best_train_accuracy = 0\n best_val_accuracy = 0\n best_train_accuracy_epoch = 0\n best_train_loss_epoch = 0\n history = []\n breaking = False\n\n if step_mode:\n epoch_per_metric = 10\n plateau_terminate = 100\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=2)\n else:\n epoch_per_metric = 10\n plateau_terminate = 50\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=1, mode='max')\n\n tqdm_range = tqdm.tqdm(range(max_epochs))\n tqdm_range.write('Starting training for model:\\n\\n' + str(model) + '\\n\\n')\n for epoch in tqdm_range:\n if breaking:\n break\n for batch in train_dataloader:\n batch = tuple(b.to(device) for b in batch)\n if breaking:\n break\n with _SuppressAssertions(tqdm_range):\n *train_coeffs, train_y, lengths = batch\n pred_y = model(times, train_coeffs, lengths, **kwargs)\n loss = loss_fn(pred_y, train_y)\n loss.backward()\n optimizer.step()\n optimizer.zero_grad()\n\n if epoch % epoch_per_metric == 0 or epoch == max_epochs - 1:\n model.eval()\n train_metrics = _evaluate_metrics(train_dataloader, model, times, loss_fn, num_classes, device, kwargs)\n val_metrics = _evaluate_metrics(val_dataloader, model, times, loss_fn, num_classes, device, kwargs)\n model.train()\n\n if train_metrics.loss * 1.0001 < best_train_loss:\n best_train_loss = train_metrics.loss\n best_train_loss_epoch = epoch\n\n if train_metrics.accuracy > best_train_accuracy * 1.001:\n best_train_accuracy = train_metrics.accuracy\n best_train_accuracy_epoch = epoch\n\n if val_metrics.accuracy > best_val_accuracy:\n best_val_accuracy = val_metrics.accuracy\n del best_model # so that we don't have three copies of a model simultaneously\n best_model = copy.deepcopy(model)\n\n tqdm_range.write('Epoch: {} Train loss: {:.3} Train accuracy: {:.3} Val loss: {:.3} '\n 'Val accuracy: {:.3}'\n ''.format(epoch, train_metrics.loss, train_metrics.accuracy, val_metrics.loss,\n val_metrics.accuracy))\n if step_mode:\n scheduler.step(train_metrics.loss)\n else:\n scheduler.step(val_metrics.accuracy)\n history.append(_AttrDict(epoch=epoch, train_metrics=train_metrics, val_metrics=val_metrics))\n\n if epoch > best_train_loss_epoch + plateau_terminate:\n tqdm_range.write('Breaking because of no improvement in training loss for {} epochs.'\n ''.format(plateau_terminate))\n breaking = True\n if epoch > best_train_accuracy_epoch + plateau_terminate:\n tqdm_range.write('Breaking because of no improvement in training accuracy for {} epochs.'\n ''.format(plateau_terminate))\n breaking = True\n\n for parameter, best_parameter in zip(model.parameters(), best_model.parameters()):\n parameter.data = best_parameter.data\n return history\n\n\nclass _TensorEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, (torch.Tensor, np.ndarray)):\n return o.tolist()\n else:\n super(_TensorEncoder, self).default(o)\n\n\ndef _save_results(name, result):\n loc = here / 'results' / name\n if not os.path.exists(loc):\n os.mkdir(loc)\n num = -1\n for filename in os.listdir(loc):\n try:\n num = max(num, int(filename))\n except ValueError:\n pass\n result_to_save = result.copy()\n del result_to_save['train_dataloader']\n del result_to_save['val_dataloader']\n del result_to_save['test_dataloader']\n result_to_save['model'] = str(result_to_save['model'])\n\n num += 1\n with open(loc / str(num), 'w') as f:\n json.dump(result_to_save, f, cls=_TensorEncoder)\n\n\ndef main(name, times, train_dataloader, val_dataloader, test_dataloader, device, make_model, num_classes, max_epochs,\n lr, kwargs, step_mode, pos_weight=torch.tensor(1)):\n times = times.to(device)\n if device != 'cpu':\n torch.cuda.reset_max_memory_allocated(device)\n baseline_memory = torch.cuda.memory_allocated(device)\n else:\n baseline_memory = None\n\n model, regularise_parameters = make_model()\n if num_classes == 2:\n model = _SqueezeEnd(model)\n loss_fn = torch.nn.BCEWithLogitsLoss(pos_weight=pos_weight)\n else:\n loss_fn = torch.nn.functional.cross_entropy\n loss_fn = _add_weight_regularisation(loss_fn, regularise_parameters)\n model.to(device)\n\n optimizer = torch.optim.Adam(model.parameters(), lr=lr)\n\n history = _train_loop(train_dataloader, val_dataloader, model, times, optimizer, loss_fn, max_epochs,\n num_classes, device, kwargs, step_mode)\n\n model.eval()\n train_metrics = _evaluate_metrics(train_dataloader, model, times, loss_fn, num_classes, device, kwargs)\n val_metrics = _evaluate_metrics(val_dataloader, model, times, loss_fn, num_classes, device, kwargs)\n test_metrics = _evaluate_metrics(test_dataloader, model, times, loss_fn, num_classes, device, kwargs)\n\n if device != 'cpu':\n memory_usage = torch.cuda.max_memory_allocated(device) - baseline_memory\n else:\n memory_usage = None\n\n result = _AttrDict(times=times,\n memory_usage=memory_usage,\n baseline_memory=baseline_memory,\n num_classes=num_classes,\n train_dataloader=train_dataloader,\n val_dataloader=val_dataloader,\n test_dataloader=test_dataloader,\n model=model.to('cpu'),\n parameters=_count_parameters(model),\n history=history,\n train_metrics=train_metrics,\n val_metrics=val_metrics,\n test_metrics=test_metrics)\n if name is not None:\n _save_results(name, result)\n return result\n\n\ndef make_model(name, input_channels, output_channels, hidden_channels, hidden_hidden_channels, num_hidden_layers,\n use_intensity, initial):\n if name == 'ncde':\n def make_model():\n vector_field = models.FinalTanh(input_channels=input_channels, hidden_channels=hidden_channels,\n hidden_hidden_channels=hidden_hidden_channels,\n num_hidden_layers=num_hidden_layers)\n model = models.NeuralCDE(func=vector_field, input_channels=input_channels, hidden_channels=hidden_channels,\n output_channels=output_channels, initial=initial)\n return model, vector_field\n elif name == 'gruode':\n def make_model():\n vector_field = models.GRU_ODE(input_channels=input_channels, hidden_channels=hidden_channels)\n model = models.NeuralCDE(func=vector_field, input_channels=input_channels,\n hidden_channels=hidden_channels, output_channels=output_channels, initial=initial)\n return model, vector_field\n elif name == 'dt':\n def make_model():\n model = models.GRU_dt(input_channels=input_channels, hidden_channels=hidden_channels,\n output_channels=output_channels, use_intensity=use_intensity)\n return model, model\n elif name == 'decay':\n def make_model():\n model = models.GRU_D(input_channels=input_channels, hidden_channels=hidden_channels,\n output_channels=output_channels, use_intensity=use_intensity)\n return model, model\n elif name == 'odernn':\n def make_model():\n model = models.ODERNN(input_channels=input_channels, hidden_channels=hidden_channels,\n hidden_hidden_channels=hidden_hidden_channels, num_hidden_layers=num_hidden_layers,\n output_channels=output_channels, use_intensity=use_intensity)\n return model, model\n else:\n raise ValueError(\"Unrecognised model name {}. Valid names are 'ncde', 'gruode', 'dt', 'decay' and 'odernn'.\"\n \"\".format(name))\n return make_model\n"
] |
[
[
"torch.zeros",
"torch.cat",
"torch.no_grad",
"torch.cuda.reset_max_memory_allocated",
"torch.cuda.memory_allocated",
"torch.cuda.max_memory_allocated",
"torch.tensor",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.nn.BCEWithLogitsLoss",
"torch.argmax"
]
] |
salesforce/UniversalFewShotNLP
|
[
"7a1564a886e5ef71d2f01f60beb123c279f27bfe"
] |
[
"SciTail/0.shot.py"
] |
[
"# Copyright (c) 2018, salesforce.com, inc.\n# All rights reserved.\n# SPDX-License-Identifier: BSD-3-Clause\n# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause\n\n\"\"\"BERT finetuning runner.\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport argparse\nimport csv\nimport logging\nimport os\nimport random\nimport sys\nimport codecs\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom collections import defaultdict\nfrom torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,\n TensorDataset)\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm import tqdm, trange\nfrom scipy.stats import beta\nfrom torch.nn import CrossEntropyLoss, MSELoss\nfrom scipy.special import softmax\n# from scipy.stats import pearsonr, spearmanr\n# from sklearn.metrics import matthews_corrcoef, f1_score\n\nfrom transformers.tokenization_roberta import RobertaTokenizer\nfrom transformers.optimization import AdamW\nfrom transformers.modeling_roberta import RobertaModel#RobertaForSequenceClassification\n\n# from transformers.modeling_bert import BertModel\n# from transformers.tokenization_bert import BertTokenizer\n# from bert_common_functions import store_transformers_models\n\nlogging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt = '%m/%d/%Y %H:%M:%S',\n level = logging.INFO)\nlogger = logging.getLogger(__name__)\n\n# from pytorch_transformers.modeling_bert import BertPreTrainedModel, BertModel\n# import torch.nn as nn\n\nbert_hidden_dim = 1024\npretrain_model_dir = 'roberta-large' #'roberta-large' , 'roberta-large-mnli', 'bert-large-uncased'\n\n\nclass RobertaForSequenceClassification(nn.Module):\n def __init__(self, tagset_size):\n super(RobertaForSequenceClassification, self).__init__()\n self.tagset_size = tagset_size\n\n self.roberta_single= RobertaModel.from_pretrained(pretrain_model_dir)\n self.single_hidden2tag = RobertaClassificationHead(bert_hidden_dim, tagset_size)\n\n def forward(self, input_ids, input_mask):\n outputs_single = self.roberta_single(input_ids, input_mask, None)\n hidden_states_single = outputs_single[1]#torch.tanh(self.hidden_layer_2(torch.tanh(self.hidden_layer_1(outputs_single[1])))) #(batch, hidden)\n\n score_single = self.single_hidden2tag(hidden_states_single) #(batch, tag_set)\n return score_single\n\n\n\nclass RobertaClassificationHead(nn.Module):\n \"\"\"wenpeng overwrite it so to accept matrix as input\"\"\"\n\n def __init__(self, bert_hidden_dim, num_labels):\n super(RobertaClassificationHead, self).__init__()\n self.dense = nn.Linear(bert_hidden_dim, bert_hidden_dim)\n self.dropout = nn.Dropout(0.1)\n self.out_proj = nn.Linear(bert_hidden_dim, num_labels)\n\n def forward(self, features):\n x = features#[:, 0, :] # take <s> token (equiv. to [CLS])\n x = self.dropout(x)\n x = self.dense(x)\n x = torch.tanh(x)\n x = self.dropout(x)\n x = self.out_proj(x)\n return x\n\n\n\nclass InputExample(object):\n \"\"\"A single training/test example for simple sequence classification.\"\"\"\n\n def __init__(self, guid, text_a, text_b=None, label=None):\n \"\"\"Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n text_b: (Optional) string. The untokenized text of the second sequence.\n Only must be specified for sequence pair tasks.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n self.guid = guid\n self.text_a = text_a\n self.text_b = text_b\n self.label = label\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self, input_ids, input_mask, segment_ids, label_id):\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.label_id = label_id\n\n\nclass DataProcessor(object):\n \"\"\"Base class for data converters for sequence classification data sets.\"\"\"\n\n def get_labels(self):\n \"\"\"Gets the list of labels for this data set.\"\"\"\n raise NotImplementedError()\n\n @classmethod\n def _read_tsv(cls, input_file, quotechar=None):\n \"\"\"Reads a tab separated value file.\"\"\"\n with open(input_file, \"r\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n if sys.version_info[0] == 2:\n line = list(unicode(cell, 'utf-8') for cell in line)\n lines.append(line)\n return lines\n\nclass RteProcessor(DataProcessor):\n \"\"\"Processor for the RTE data set (GLUE version).\"\"\"\n\n def get_SciTail_dev_and_test(self, train_filename, dev_filename):\n '''\n classes: entails, neutral\n '''\n examples_per_file = []\n for filename in [train_filename, dev_filename]:\n examples=[]\n readfile = codecs.open(filename, 'r', 'utf-8')\n line_co=0\n for row in readfile:\n\n line=row.strip().split('\\t')\n if len(line) == 3:\n guid = \"train-\"+str(line_co-1)\n # text_a = 'SciTail. '+line[0].strip()\n text_a = line[0].strip()\n text_b = line[1].strip()\n label = line[2].strip()\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n\n readfile.close()\n print('loaded SciTail size:', len(examples))\n examples_per_file.append(examples)\n return examples_per_file[0], examples_per_file[1] #train, dev\n\n def get_labels(self):\n 'here we keep the three-way in MNLI training '\n return [\"entailment\", \"not_entailment\"]\n # return [\"entailment\", \"neutral\", \"contradiction\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n text_a = line[1]\n text_b = line[2]\n label = line[-1]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n\n\ndef convert_examples_to_features(examples, label_list, max_seq_length,\n tokenizer, output_mode,\n cls_token_at_end=False,\n cls_token='[CLS]',\n cls_token_segment_id=1,\n sep_token='[SEP]',\n sep_token_extra=False,\n pad_on_left=False,\n pad_token=0,\n pad_token_segment_id=0,\n sequence_a_segment_id=0,\n sequence_b_segment_id=1,\n mask_padding_with_zero=True):\n \"\"\" Loads a data file into a list of `InputBatch`s\n `cls_token_at_end` define the location of the CLS token:\n - False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]\n - True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]\n `cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)\n \"\"\"\n\n label_map = {label : i for i, label in enumerate(label_list)}\n\n features = []\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n logger.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n tokens_a = tokenizer.tokenize(example.text_a)\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\". \" -4\" for RoBERTa.\n special_tokens_count = 4 if sep_token_extra else 3\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - special_tokens_count)\n else:\n # Account for [CLS] and [SEP] with \"- 2\" and with \"- 3\" for RoBERTa.\n special_tokens_count = 3 if sep_token_extra else 2\n if len(tokens_a) > max_seq_length - special_tokens_count:\n tokens_a = tokens_a[:(max_seq_length - special_tokens_count)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = tokens_a + [sep_token]\n if sep_token_extra:\n # roberta uses an extra separator b/w pairs of sentences\n tokens += [sep_token]\n segment_ids = [sequence_a_segment_id] * len(tokens)\n\n if tokens_b:\n tokens += tokens_b + [sep_token]\n segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1)\n\n if cls_token_at_end:\n tokens = tokens + [cls_token]\n segment_ids = segment_ids + [cls_token_segment_id]\n else:\n tokens = [cls_token] + tokens\n segment_ids = [cls_token_segment_id] + segment_ids\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding_length = max_seq_length - len(input_ids)\n if pad_on_left:\n input_ids = ([pad_token] * padding_length) + input_ids\n input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask\n segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids\n else:\n input_ids = input_ids + ([pad_token] * padding_length)\n input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)\n segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n if output_mode == \"classification\":\n label_id = label_map[example.label]\n elif output_mode == \"regression\":\n label_id = float(example.label)\n else:\n raise KeyError(output_mode)\n\n # if ex_index < 5:\n # logger.info(\"*** Example ***\")\n # logger.info(\"guid: %s\" % (example.guid))\n # logger.info(\"tokens: %s\" % \" \".join(\n # [str(x) for x in tokens]))\n # logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n # logger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n # logger.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n # logger.info(\"label: %s (id = %d)\" % (example.label, label_id))\n\n features.append(\n InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id))\n return features\n\ndef _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()\n\n\n\n\n\n\n\n\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n ## Required parameters\n parser.add_argument(\"--task_name\",\n default=None,\n type=str,\n required=True,\n help=\"The name of the task to train.\")\n ## Other parameters\n parser.add_argument(\"--cache_dir\",\n default=\"\",\n type=str,\n help=\"Where do you want to store the pre-trained models downloaded from s3\")\n parser.add_argument(\"--max_seq_length\",\n default=128,\n type=int,\n help=\"The maximum total input sequence length after WordPiece tokenization. \\n\"\n \"Sequences longer than this will be truncated, and sequences shorter \\n\"\n \"than this will be padded.\")\n\n parser.add_argument(\"--do_eval\",\n action='store_true',\n help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\"--do_lower_case\",\n action='store_true',\n help=\"Set this flag if you are using an uncased model.\")\n parser.add_argument(\"--train_batch_size\",\n default=16,\n type=int,\n help=\"Total batch size for training.\")\n parser.add_argument(\"--eval_batch_size\",\n default=64,\n type=int,\n help=\"Total batch size for eval.\")\n parser.add_argument(\"--learning_rate\",\n default=1e-5,\n type=float,\n help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--num_train_epochs\",\n default=3.0,\n type=float,\n help=\"Total number of training epochs to perform.\")\n parser.add_argument(\"--warmup_proportion\",\n default=0.1,\n type=float,\n help=\"Proportion of training to perform linear learning rate warmup for. \"\n \"E.g., 0.1 = 10%% of training.\")\n parser.add_argument(\"--no_cuda\",\n action='store_true',\n help=\"Whether not to use CUDA when available\")\n parser.add_argument(\"--local_rank\",\n type=int,\n default=-1,\n help=\"local_rank for distributed training on gpus\")\n parser.add_argument('--seed',\n type=int,\n default=42,\n help=\"random seed for initialization\")\n parser.add_argument('--gradient_accumulation_steps',\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\n parser.add_argument('--fp16',\n action='store_true',\n help=\"Whether to use 16-bit float precision instead of 32-bit\")\n parser.add_argument('--loss_scale',\n type=float, default=0,\n help=\"Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\\n\"\n \"0 (default value): dynamic loss scaling.\\n\"\n \"Positive power of 2: static loss scaling value.\\n\")\n parser.add_argument('--server_ip', type=str, default='', help=\"Can be used for distant debugging.\")\n parser.add_argument('--server_port', type=str, default='', help=\"Can be used for distant debugging.\")\n\n\n args = parser.parse_args()\n\n\n processors = {\n \"rte\": RteProcessor\n }\n\n output_modes = {\n \"rte\": \"classification\"\n }\n\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n n_gpu = torch.cuda.device_count()\n else:\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n n_gpu = 1\n # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.distributed.init_process_group(backend='nccl')\n logger.info(\"device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}\".format(\n device, n_gpu, bool(args.local_rank != -1), args.fp16))\n\n if args.gradient_accumulation_steps < 1:\n raise ValueError(\"Invalid gradient_accumulation_steps parameter: {}, should be >= 1\".format(\n args.gradient_accumulation_steps))\n\n args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps\n\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n\n task_name = args.task_name.lower()\n\n if task_name not in processors:\n raise ValueError(\"Task not found: %s\" % (task_name))\n\n\n\n processor = processors[task_name]()\n output_mode = output_modes[task_name]\n\n scitail_path = '/export/home/Dataset/SciTailV1/tsv_format/'\n # train_examples = processor.get_SciTail_as_train_k_shot(scitail_path+'scitail_1.0_train.tsv', args.kshot) #train_pu_half_v1.txt\n _, test_examples = processor.get_SciTail_dev_and_test(scitail_path+'scitail_1.0_dev.tsv', scitail_path+'scitail_1.0_test.tsv')\n\n # dev_examples = processor.get_RTE_as_dev('/export/home/Dataset/glue_data/RTE/dev.tsv')\n # test_examples = processor.get_RTE_as_test('/export/home/Dataset/RTE/test_RTE_1235.txt')\n label_list = [\"entails\", \"neutral\"]\n # train_examples = get_data_hulu_fewshot('train', 5)\n\n num_labels = len(label_list)\n print('num_labels:', num_labels,'test size:', len(test_examples))\n\n model = RobertaForSequenceClassification(3)\n tokenizer = RobertaTokenizer.from_pretrained(pretrain_model_dir, do_lower_case=args.do_lower_case)\n model.load_state_dict(torch.load('/export/home/Dataset/BERT_pretrained_mine/MNLI_pretrained/_acc_0.9040886899918633.pt'))\n model.to(device)\n\n '''load test set'''\n test_features = convert_examples_to_features(\n test_examples, label_list, args.max_seq_length, tokenizer, output_mode,\n cls_token_at_end=False,#bool(args.model_type in ['xlnet']), # xlnet has a cls token at the end\n cls_token=tokenizer.cls_token,\n cls_token_segment_id=0,#2 if args.model_type in ['xlnet'] else 0,\n sep_token=tokenizer.sep_token,\n sep_token_extra=True,#bool(args.model_type in ['roberta']), # roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805\n pad_on_left=False,#bool(args.model_type in ['xlnet']), # pad on the left for xlnet\n pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],\n pad_token_segment_id=0)#4 if args.model_type in ['xlnet'] else 0,)\n\n eval_all_input_ids = torch.tensor([f.input_ids for f in test_features], dtype=torch.long)\n eval_all_input_mask = torch.tensor([f.input_mask for f in test_features], dtype=torch.long)\n eval_all_segment_ids = torch.tensor([f.segment_ids for f in test_features], dtype=torch.long)\n eval_all_label_ids = torch.tensor([f.label_id for f in test_features], dtype=torch.long)\n\n eval_data = TensorDataset(eval_all_input_ids, eval_all_input_mask, eval_all_segment_ids, eval_all_label_ids)\n eval_sampler = SequentialSampler(eval_data)\n test_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)\n\n\n\n model.eval()\n\n\n logger.info(\"***** Running test *****\")\n logger.info(\" Num examples = %d\", len(test_examples))\n # logger.info(\" Batch size = %d\", args.eval_batch_size)\n\n eval_loss = 0\n nb_eval_steps = 0\n preds = []\n gold_label_ids = []\n # print('Evaluating...')\n for input_ids, input_mask, segment_ids, label_ids in test_dataloader:\n input_ids = input_ids.to(device)\n input_mask = input_mask.to(device)\n segment_ids = segment_ids.to(device)\n label_ids = label_ids.to(device)\n gold_label_ids+=list(label_ids.detach().cpu().numpy())\n\n with torch.no_grad():\n logits = model(input_ids, input_mask)\n if len(preds) == 0:\n preds.append(logits.detach().cpu().numpy())\n else:\n preds[0] = np.append(preds[0], logits.detach().cpu().numpy(), axis=0)\n\n preds = preds[0]\n\n pred_probs = softmax(preds,axis=1)\n pred_label_ids_3way = list(np.argmax(pred_probs, axis=1))\n pred_label_ids = []\n for pred_id in pred_label_ids_3way:\n if pred_id !=0:\n pred_label_ids.append(1)\n else:\n pred_label_ids.append(0)\n\n\n gold_label_ids = gold_label_ids\n assert len(pred_label_ids) == len(gold_label_ids)\n hit_co = 0\n for k in range(len(pred_label_ids)):\n if pred_label_ids[k] == gold_label_ids[k]:\n hit_co +=1\n test_acc = hit_co/len(gold_label_ids)\n print('test_acc:', test_acc)\n\n\n\n\nif __name__ == \"__main__\":\n main()\n\n'''\n\nCUDA_VISIBLE_DEVICES=7 python -u 0.shot.py --task_name rte --do_lower_case --num_train_epochs 20 --train_batch_size 5 --eval_batch_size 128 --learning_rate 1e-6 --max_seq_length 128 --seed 42\n\ntest_acc: 0.8170272812793979\n\n'''\n"
] |
[
[
"torch.nn.Linear",
"torch.cuda.is_available",
"torch.load",
"scipy.special.softmax",
"torch.distributed.init_process_group",
"torch.manual_seed",
"torch.tensor",
"torch.utils.data.DataLoader",
"numpy.argmax",
"torch.device",
"torch.cuda.manual_seed_all",
"torch.utils.data.SequentialSampler",
"torch.cuda.device_count",
"torch.cuda.set_device",
"torch.utils.data.TensorDataset",
"torch.nn.Dropout",
"numpy.random.seed",
"torch.no_grad",
"torch.tanh"
]
] |
p-christ/Deep-Reinforcement-Learning-PyTorch-Algorithms
|
[
"135d3e2e06bbde2868047d738e3fc2d73fd8cc93",
"135d3e2e06bbde2868047d738e3fc2d73fd8cc93"
] |
[
"agents/HER_Base.py",
"agents/DQN_agents/Dueling_DDQN.py"
] |
[
"import torch\nimport numpy as np\nfrom utilities.data_structures.Replay_Buffer import Replay_Buffer\nfrom utilities.Utility_Functions import abstract\n\n@abstract\nclass HER_Base(object):\n \"\"\"Contains methods needed to turn an algorithm into a hindsight experience replay (HER) algorithm\"\"\"\n def __init__(self, buffer_size, batch_size, HER_sample_proportion):\n self.HER_memory = Replay_Buffer(buffer_size, batch_size, self.config.seed)\n self.ordinary_buffer_batch_size = int(batch_size * (1.0 - HER_sample_proportion))\n self.HER_buffer_batch_size = batch_size - self.ordinary_buffer_batch_size\n\n def reset_game(self):\n \"\"\"Resets the game information so we are ready to play a new episode\"\"\"\n self.state_dict = self.environment.reset()\n self.observation = self.state_dict[\"observation\"]\n self.desired_goal = self.state_dict[\"desired_goal\"]\n self.achieved_goal = self.state_dict[\"achieved_goal\"]\n\n self.state = self.create_state_from_observation_and_desired_goal(self.observation, self.desired_goal)\n self.next_state = None\n self.action = None\n self.reward = None\n self.done = False\n\n self.episode_states = []\n self.episode_rewards = []\n self.episode_actions = []\n self.episode_next_states = []\n self.episode_dones = []\n\n self.episode_desired_goals = []\n self.episode_achieved_goals = []\n self.episode_observations = []\n\n self.episode_next_desired_goals = []\n self.episode_next_achieved_goals = []\n self.episode_next_observations = []\n\n self.total_episode_score_so_far = 0\n\n def track_changeable_goal_episodes_data(self):\n \"\"\"Saves the data from the recent episodes in a way compatible with changeable goal environments\"\"\"\n self.episode_rewards.append(self.reward)\n self.episode_actions.append(self.action)\n self.episode_dones.append(self.done)\n\n self.episode_states.append(self.state)\n self.episode_next_states.append(self.next_state)\n\n self.episode_desired_goals.append(self.state_dict[\"desired_goal\"])\n self.episode_achieved_goals.append(self.state_dict[\"achieved_goal\"])\n self.episode_observations.append(self.state_dict[\"observation\"])\n\n self.episode_next_desired_goals.append(self.next_state_dict[\"desired_goal\"])\n self.episode_next_achieved_goals.append(self.next_state_dict[\"achieved_goal\"])\n self.episode_next_observations.append(self.next_state_dict[\"observation\"])\n\n def conduct_action_in_changeable_goal_envs(self, action):\n \"\"\"Adapts conduct_action from base agent so that can handle changeable goal environments\"\"\"\n self.next_state_dict, self.reward, self.done, _ = self.environment.step(action)\n self.total_episode_score_so_far += self.reward\n if self.hyperparameters[\"clip_rewards\"]:\n self.reward = max(min(self.reward, 1.0), -1.0)\n self.observation = self.next_state_dict[\"observation\"]\n self.desired_goal = self.next_state_dict[\"desired_goal\"]\n self.achieved_goal = self.next_state_dict[\"achieved_goal\"]\n self.next_state = self.create_state_from_observation_and_desired_goal(self.observation, self.desired_goal)\n\n\n def create_state_from_observation_and_desired_goal(self, observation, desired_goal):\n return np.concatenate((observation, desired_goal))\n\n def save_alternative_experience(self):\n \"\"\"Saves the experiences as if the final state visited in the episode was the goal state\"\"\"\n new_goal = self.achieved_goal\n new_states = [self.create_state_from_observation_and_desired_goal(observation, new_goal) for observation in self.episode_observations]\n new_next_states = [self.create_state_from_observation_and_desired_goal(observation, new_goal) for observation in\n self.episode_next_observations]\n new_rewards = [self.environment.compute_reward(next_achieved_goal, new_goal, None) for next_achieved_goal in self.episode_next_achieved_goals]\n\n if self.hyperparameters[\"clip_rewards\"]:\n new_rewards = [max(min(reward, 1.0), -1.0) for reward in new_rewards]\n\n self.HER_memory.add_experience(new_states, self.episode_actions, new_rewards, new_next_states, self.episode_dones)\n\n def sample_from_HER_and_Ordinary_Buffer(self):\n \"\"\"Samples from the ordinary replay buffer and HER replay buffer according to a proportion specified in config\"\"\"\n states, actions, rewards, next_states, dones = self.memory.sample(self.ordinary_buffer_batch_size)\n HER_states, HER_actions, HER_rewards, HER_next_states, HER_dones = self.HER_memory.sample(self.HER_buffer_batch_size)\n\n states = torch.cat((states, HER_states))\n actions = torch.cat((actions, HER_actions))\n rewards = torch.cat((rewards, HER_rewards))\n next_states = torch.cat((next_states, HER_next_states))\n dones = torch.cat((dones, HER_dones))\n return states, actions, rewards, next_states, dones\n\n\n",
"import torch\nfrom torch import optim\nfrom agents.Base_Agent import Base_Agent\nfrom agents.DQN_agents.DDQN import DDQN\n\nclass Dueling_DDQN(DDQN):\n \"\"\"A dueling double DQN agent as described in the paper http://proceedings.mlr.press/v48/wangf16.pdf\"\"\"\n agent_name = \"Dueling DDQN\"\n\n def __init__(self, config):\n DDQN.__init__(self, config)\n self.q_network_local = self.create_NN(input_dim=self.state_size, output_dim=self.action_size + 1)\n self.q_network_optimizer = optim.Adam(self.q_network_local.parameters(), lr=self.hyperparameters[\"learning_rate\"], eps=1e-4)\n self.q_network_target = self.create_NN(input_dim=self.state_size, output_dim=self.action_size + 1)\n Base_Agent.copy_model_over(from_model=self.q_network_local, to_model=self.q_network_target)\n\n def pick_action(self, state=None):\n \"\"\"Uses the local Q network and an epsilon greedy policy to pick an action\"\"\"\n # PyTorch only accepts mini-batches and not single observations so we have to use unsqueeze to add\n # a \"fake\" dimension to make it a mini-batch rather than a single observation\n if state is None: state = self.state\n state = torch.from_numpy(state).float().unsqueeze(0).to(self.device)\n if len(state.shape) < 2: state = state.unsqueeze(0)\n self.q_network_local.eval()\n with torch.no_grad():\n action_values = self.q_network_local(state)\n action_values = action_values[:, :-1] #because we treat the last output element as state-value and rest as advantages\n self.q_network_local.train()\n action = self.exploration_strategy.perturb_action_for_exploration_purposes({\"action_values\": action_values,\n \"turn_off_exploration\": self.turn_off_exploration,\n \"episode_number\": self.episode_number})\n return action\n\n def compute_q_values_for_next_states(self, next_states):\n \"\"\"Computes the q_values for next state we will use to create the loss to train the Q network. Double DQN\n uses the local index to pick the maximum q_value action and then the target network to calculate the q_value.\n The reasoning behind this is that it will help stop the network from overestimating q values\"\"\"\n max_action_indexes = self.q_network_local(next_states)[:, :-1].detach().argmax(1)\n duelling_network_output = self.q_network_target(next_states)\n q_values = self.calculate_duelling_q_values(duelling_network_output)\n Q_targets_next = q_values.gather(1, max_action_indexes.unsqueeze(1))\n return Q_targets_next\n\n def calculate_duelling_q_values(self, duelling_q_network_output):\n \"\"\"Calculates the q_values using the duelling network architecture. This is equation (9) in the paper\n referenced at the top of the class\"\"\"\n state_value = duelling_q_network_output[:, -1]\n avg_advantage = torch.mean(duelling_q_network_output[:, :-1], dim=1)\n q_values = state_value.unsqueeze(1) + (duelling_q_network_output[:, :-1] - avg_advantage.unsqueeze(1))\n return q_values\n\n def compute_expected_q_values(self, states, actions):\n \"\"\"Computes the expected q_values we will use to create the loss to train the Q network\"\"\"\n duelling_network_output = self.q_network_local(states)\n q_values = self.calculate_duelling_q_values(duelling_network_output)\n Q_expected = q_values.gather(1, actions.long())\n return Q_expected\n\n\n\n\n\n\n\n"
] |
[
[
"numpy.concatenate",
"torch.cat"
],
[
"torch.no_grad",
"torch.mean",
"torch.from_numpy"
]
] |
JamzumSum/yNet
|
[
"78506738e64321cfd26f0af70a62dd2119948e39"
] |
[
"src/toynet/unet.py"
] |
[
"\"\"\"\nA torch implement for U-Net.\n\n* see: U-Net: Convolutional Networks for Biomedical Image Segmentation\n\n* author: JamzumSum\n* create: 2021-1-11\n\"\"\"\nfrom functools import partial\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom common.layers import BlurPool, MaxBlurPool2d, Swish\nfrom common.support import SelfInitialed\nfrom misc import CheckpointSupport\nfrom misc.decorators import autoPropertyClass\n\n\n@autoPropertyClass\nclass ChannelInference(nn.Module):\n ic: int\n oc: int\n\n def __init__(self, ic: int, oc: int):\n super().__init__()\n\n\nclass ChannelNorm(ChannelInference, nn.GroupNorm):\n def __init__(self, ic, channels=16, *args, **kwargs):\n nn.GroupNorm.__init__(self, max(1, ic // channels), ic, *args, **kwargs)\n ChannelInference.__init__(self, ic, ic)\n\n\ndef norm_layer(norm: str, ndim=2):\n return {\n \"batchnorm\": [nn.BatchNorm1d, nn.BatchNorm2d][ndim - 1],\n \"groupnorm\": ChannelNorm,\n \"none\": nn.Identity,\n }[norm]\n\n\nclass ParallelLayers(nn.Module):\n def __init__(self, layers: list, dim=1):\n super().__init__()\n for i, layer in enumerate(layers):\n self.add_module(f\"P{i}\", layer)\n self.layers = layers\n self.dim = dim\n\n def forward(self, X):\n r = [f(X) for f in self.layers]\n if self.dim is None: return r\n return torch.cat(r, dim=self.dim)\n\n\n@autoPropertyClass\nclass ConvStack2(ChannelInference):\n \"\"\"\n [N, ic, H, W] -> [N, oc, H, W]\n \"\"\"\n\n res: bool\n\n def __init__(self, ic, oc, *, res=False, norm=\"batchnorm\", padding_mode='same'):\n super().__init__(ic, oc)\n\n # nonlinear = Swish if ic < oc else nn.ReLU\n nonlinear = nn.PReLU\n bias = norm == \"none\"\n self.pad = {'same': 1, 'none': 0}[padding_mode]\n\n self.CBR = nn.Sequential(\n nn.Conv2d(ic, oc, 3, 1, self.pad, bias=bias),\n norm_layer(norm)(oc),\n nonlinear(),\n )\n self.CB = nn.Sequential(\n nn.Conv2d(oc, oc, 3, 1, self.pad, bias=bias),\n norm_layer(norm)(oc)\n )\n if res:\n self.downsample = (\n nn.Sequential(nn.Conv2d(ic, oc, 1, bias=bias),\n norm_layer(norm)(oc)) if ic != oc else nn.Identity()\n )\n\n def forward(self, X):\n r = self.CBR(X)\n if self.res:\n ds = self.downsample(X)\n if self.pad == 0: ds = ds[..., 2:-2, 2:-2]\n r = ds + self.CB(r)\n else:\n r = self.CB(r)\n return torch.relu(r)\n\n\nclass DownConv(ChannelInference):\n \"\"\"\n [N, C, H, W] -> [N, C, H//2, W//2]\n \"\"\"\n def __init__(self, ic, mode=\"maxpool\", blur=False):\n \"\"\"\n Args:\n ic (int): input channel\n mode (str, optional): `maxpool`/`avgpool`. Defaults to \"maxpool\".\n blur (str, optional): `none`. blur kernel before pooling.\n \"\"\"\n super().__init__(ic, ic)\n\n f = {\n (\"maxpool\", False): nn.MaxPool2d,\n (\"avgpool\", False): nn.AvgPool2d,\n ('maxpool', True): partial(MaxBlurPool2d, ic=ic),\n ('avgpool', True): partial(BlurPool, channels=ic),\n }[(mode, blur)]\n self.pool = f(kernel_size=2, stride=2)\n\n def forward(self, X):\n return self.pool(X)\n\n\nclass UpConv(ChannelInference, nn.Sequential):\n \"\"\"\n [N, C, H, W] -> [N, C//2, H*2, W*2]\n \"\"\"\n def __init__(self, ic, norm=\"batchnorm\", transConv=False):\n ChannelInference.__init__(self, ic, ic // 2)\n bias = norm == \"none\"\n\n if transConv:\n layers = [nn.ConvTranspose2d(ic, self.oc, 2, 2, bias=False)]\n else:\n # NOTE: Since 2x2 conv cannot be aligned when the shape is odd,\n # 0318: conv here is mainly object to reduce channel size. Hence use a conv1x1 instead.\n layers = [\n nn.Upsample(scale_factor=2, mode=\"bilinear\", align_corners=True),\n nn.Conv2d(ic, ic // 2, 1, bias=bias),\n ]\n layers.append(norm_layer(norm)(self.oc))\n nn.Sequential.__init__(self, *layers)\n\n def forward(self, X):\n return nn.Sequential.forward(self, X)\n\n\n@autoPropertyClass\nclass BareUNet(ChannelInference):\n \"\"\"\n [N, ic, H, W] -> [N, fc * 2^level, H, W], [N, fc, H, W]\n \"\"\"\n\n level: int\n fc: int\n cps: CheckpointSupport\n cat: bool\n backbone_only: bool\n\n def __init__(\n self,\n ic,\n level=4,\n fc=64,\n *,\n cps=None,\n residual=False,\n norm='batchnorm',\n transConv=False,\n padding_mode='none',\n antialias=True,\n backbone_only=False,\n cat=True,\n ):\n super().__init__(ic, fc * 2 ** level)\n uniarg = dict(res=residual, norm=norm, padding_mode=padding_mode)\n\n self.L1 = ConvStack2(ic, fc, **uniarg)\n cc = self.L1.oc\n\n for i in range(level):\n dsample = DownConv(cc, blur=antialias)\n cc = dsample.oc\n conv = ConvStack2(cc, cc * 2, **uniarg)\n cc = conv.oc\n self.add_module(f\"D{i + 1}\", dsample)\n self.add_module(f\"L{i + 2}\", conv)\n\n if backbone_only: return\n\n for i in range(level):\n usample = UpConv(cc, norm=norm, transConv=transConv)\n cc = usample.oc\n conv = ConvStack2(cc * 2 if self.cat else cc, cc, **uniarg)\n cc = conv.oc\n self.add_module(f\"U{i + 1}\", usample)\n self.add_module(f\"L{i + self.level + 2}\", conv)\n\n def add_module(self, name, model):\n return nn.Module.add_module(self, name, self.cps(model))\n\n def catoradd(self, X, Y):\n \"\"\"Crop X. Then cat X & Y or add them.\n\n Args:\n X (Tensor): [N, C, H, W]\n Y (Tensor): [N, C, H, W]\n\n Returns:\n Tensor: [N, 2C, H, W] if cat, else [N, C, H, W]\n \"\"\"\n top = (X.size(-2) - Y.size(-2)) // 2\n left = (X.size(-1) - Y.size(-1)) // 2\n\n X = X[..., top:top + Y.size(-2), left:left + Y.size(-1)]\n return torch.cat([X, Y], dim=1) if self.cat else X + Y\n\n def _L(self, i) -> ConvStack2:\n return self._modules[f\"L{i}\"]\n\n def _D(self, i) -> DownConv:\n return self._modules[f\"D{i}\"]\n\n def _U(self, i) -> UpConv:\n return self._modules[f\"U{i}\"]\n\n def forward(self, X, expand=True):\n \"\"\"\n X: [N, C, H, W]\n O: [N, fc, H, W], [N, oc, H, W]\n \"\"\"\n xn = [self.L1(X)]\n L = self.level\n\n for i in range(1, L + 1):\n xn.append(\n self._L(i + 1)(self._D(i)(xn[-1])) # [N, t * fc, H//t, W//t], t = 2^i\n )\n\n if not expand:\n return xn[L], None\n\n for i in range(L):\n xn.append(\n self._L(L + i + 2)(\n self.catoradd(\n xn[L - i - 1],\n self._U(i + 1)(xn[L + i]),\n ) # [N, t*fc, H//t, W//t], t = 2^(level - i - 1)\n )\n )\n\n return xn[L], xn[-1]\n\n\n@autoPropertyClass\nclass UNet(BareUNet):\n \"\"\"Add multiple parallel header along with original segment header.\n\n illustrate:\n finalx ---conv-> seg1 (original header)\n --tanh--conv--> seg2 (additional header 1)\n --tanh--conv--> seg3 (additional header 2)\n ...\n return:\n [bottomx, *header_outputs]\n e.g. bottomx, seg\n bottomx, seg1, add_seg1, add_seg2, ...\n \"\"\"\n oc: int\n\n def __init__(self, ic, oc, level=4, fc=64, *, headeroc=None, **kwargs):\n super().__init__(ic, level, fc, **kwargs)\n headers = [nn.Sequential(nn.Conv2d(fc, oc, 1), nn.Sigmoid())]\n if not self.backbone_only and headeroc:\n headers.extend(\n nn.Sequential(nn.Tanh(), nn.Conv2d(fc, oc, 1), nn.Sigmoid())\n for oc in headeroc\n )\n if not self.backbone_only:\n self.headers = ParallelLayers(headers, None)\n\n @staticmethod\n def padback(X, shape):\n top = shape[-2] - X.size(-2)\n left = shape[-1] - X.size(-1)\n\n return F.pad(X, [left // 2, left - left // 2, top // 2, top - top // 2])\n\n def forward(self, X, expand: bool = True) -> dict:\n assert not (expand and self.backbone_only)\n \n bottomx, finalx = super().forward(X, expand)\n d = {\"bottom\": bottomx}\n if not expand:\n return d\n\n d['seg'] = [self.padback(i, X.shape) for i in self.headers(finalx)]\n return d\n"
] |
[
[
"torch.cat",
"torch.nn.Identity",
"torch.relu",
"torch.nn.Sigmoid",
"torch.nn.Tanh",
"torch.nn.ConvTranspose2d",
"torch.nn.Upsample",
"torch.nn.Conv2d",
"torch.nn.Sequential.__init__",
"torch.nn.functional.pad",
"torch.nn.Sequential.forward"
]
] |
yesunhuang/QRNNs_Memory
|
[
"69adbdb2b580f38420854422fe9a2afcc7782386"
] |
[
"src/CExpFQC.py"
] |
[
"'''\nName: CExpFQC\nDesriptption: Full power with quantum counterpart sRNN\nEmail: yesunhuang@mail.ustc.edu.cn\nOpenSource: https://github.com/yesunhuang\nMsg: Experiment One\nAuthor: YesunHuang\nDate: 2022-04-17 20:40:50\n'''\n#import all the things we need\nimport os\nos.environ['KMP_DUPLICATE_LIB_OK']='True'\nimport matplotlib.pyplot as plt\nimport torch\ndef transform(Xs):\n return [torch.squeeze(x) for x in Xs]\n#Some constants\nGENERATE_DATA=False\nTRAIN_NETWORK=True\nSAVE_NETWORK=True\nLOAD_NETWORK=False\nPREDICTION_TEST=True\n\nif __name__=='__main__':\n from DataGenerator.HenonMapDataGen import HenonMapDataGen\n from ClassicalModels.ClassicalSRNNs import ClassicalSRNN,SuportFunction\n from GradientFreeOptimizers.CostFunc import GradFreeMSELoss\n from GradientFreeOptimizers.Optimizers import MCSOptimizer\n import GradientFreeOptimizers.Helpers as hp\n\n #Save path:\nif __name__=='__main__':\n currentPath=os.getcwd()\n dataSavepath=os.path.join(currentPath,'data','HenonMap','Exp')\n netSavepath=os.path.join(currentPath,'TrainedNet','Exp')\n\nif __name__=='__main__':\n # Data Iter\n ## Parameters\n testSetRatio=0.2\n numStep=10\n batchSize=16\n filename='QExp1.csv'\n\n ## Generate Data\nif GENERATE_DATA and __name__=='__main__':\n hmap=HenonMapDataGen(savepath=dataSavepath)\n hmap(1000)\n hmap.save_to_CSV(filename)\n\nif __name__=='__main__': \n ## Read the data\n hmap=HenonMapDataGen(savepath=dataSavepath)\n hmap.read_from_CSV(filename)\n ## Get the Iter\n trainIter,testIter=hmap.get_data_iter(testSetRatio,numStep,batchSize,mask=0,shuffle=False)\n \n ## Print information\nif __name__=='__main__':\n print(hmap)\n X,Y=next(iter(trainIter))\n print('Train Data Size:',len(trainIter))\n X,Y=next(iter(testIter))\n print('Test Data Size:',len(testIter))\n\n # Load the network\nif LOAD_NETWORK and __name__=='__main__':\n filename='CExpFQC.pt'\n netData=torch.load(os.path.join(netSavepath,filename))\n\n inputSize=netData['inputSize']\n outputSize=netData['outputSize']\n hiddenSize=netData['hiddenSize']\n \n inputRatio=netData['inputRatio']\n outputRatio=netData['outputRatio']\n initValue=netData['initValue']\n \n inactive=netData['inactive']\n rescale=netData['rescale']\n\n isTypical=netData['isTypical']\n\nelif __name__=='__main__':\n # Model\n ## Parameters\n inputSize=outputSize=1\n hiddenSize=2\n initValue=1.0\n inputRatio=outputRatio=1.0\n rescale=1.0\n inactive=[]\n isTypical=False\n\nif __name__=='__main__':\n ## print parameters\n print('Input Ratio:',inputRatio)\n print('Output Ratio:',outputRatio)\n\nif __name__=='__main__':\n ## Get neccesary functions\n srnnTestSup=SuportFunction()\n #transform=lambda Xs:[torch.squeeze(x) for x in Xs]\n init_rnn_state=srnnTestSup.get_init_state_fun(initStateValue=initValue)\n get_params=srnnTestSup.get_get_params_fun(inputRatio=inputRatio,\\\n outputRatio=outputRatio,\\\n rescale=rescale,\\\n inactive=inactive)\n rnn=srnnTestSup.get_forward_fn_fun(isTypical=isTypical)\n predict_fun=srnnTestSup.get_predict_fun(outputTransoform=transform)\n\n net=ClassicalSRNN(inputSize,hiddenSize,outputSize,get_params,init_rnn_state,rnn)\n\nif LOAD_NETWORK and __name__=='__main__':\n net.params=netData['NetParams']\n net.constants=netData['NetConstants']\n\n## Test prediction\nif __name__=='__main__':\n state=net.begin_state(batchSize)\n Y,newState=net(X,state)\n print(Y.shape, len(newState), newState[0][0].shape)\n\nif not LOAD_NETWORK and not TRAIN_NETWORK:\n print('The network is not trained, are you sure to move on?')\n\n # Train the network\nif TRAIN_NETWORK and __name__=='__main__': \n ## Parameters\n if LOAD_NETWORK:\n print('Are you sure to train the trained network?')\n num_epochs=netData['OptimizerConstant']['num_epochs']\n maxLevyStepSize=netData['OptimizerConstant']['maxLevyStepSize']\n regular=netData['OptimizerConstant']['regular']\n nestNum=netData['OptimizerConstant']['nestNum']\n else:\n num_epochs= 300\n maxLevyStepSize=[0.3]*5\n regular=None\n nestNum=40\n step_epochs=5\n\n## Initial loss\nif __name__=='__main__':\n ## Loss function\n lossFunc=GradFreeMSELoss(net)\n if LOAD_NETWORK:\n l_epochs=netData['Loss']\n print(f'Saved Train Loss: {l_epochs[-1][0]:f}')\n print(f'Saved Test Loss: {l_epochs[-1][1]:f}')\n else:\n l_epochs=[]\n timer=hp.Timer()\n train_l=SuportFunction.evaluate_accuracy(net,trainIter,lossFunc,False)\n t1=timer.stop()\n timer.start()\n test_l=SuportFunction.evaluate_accuracy(net,testIter,lossFunc,False)\n t2=timer.stop()\n l_epochs.append([train_l,test_l])\n print(f'Initial Train Loss: {train_l:f}, Time Cost: {t1:f}s')\n print(f'Initial Test Loss: {test_l:f}, Time Cost: {t2:f}s')\n \n ## Training\nif TRAIN_NETWORK and __name__=='__main__':\n ## Optimizer\n mcs=MCSOptimizer(net.params,lossFunc,trainIter,nestNum=nestNum,\\\n maxLevyStepSize=maxLevyStepSize,regular=regular,\\\n randInit=True,epochToGeneration=lambda x:max(int(x/100),1))\n ## prediction\n predict = lambda prefix: predict_fun(prefix,net, numPreds=9)\n ## train and predict\n timer=hp.Timer()\n for epoch in range(num_epochs):\n trainLoss, _=mcs.step()\n testLoss=SuportFunction.evaluate_accuracy(net, testIter, lossFunc, False)\n if (epoch + 1) % step_epochs == 0:\n timeEpoch=timer.stop()\n print(f'Epoch [{epoch+1}/{num_epochs}], Train Loss: {trainLoss:.4f}, Test Loss: {testLoss:.4f},\\\n Time: {timeEpoch:.4f}s') \n timer.start()\n l_epochs.append([trainLoss,testLoss])\n #scheduler.step()\n testLoss=SuportFunction.evaluate_accuracy(net, testIter, lossFunc, False)\n print(f'TestLoss {testLoss:f}')\n\n ## Save the network\nif SAVE_NETWORK and __name__=='__main__':\n ## Parameters\n filename='CExpFQC.pt'\n OptimizerConstant={'num_epochs':num_epochs,'maxLevyStepSize':maxLevyStepSize,\\\n 'nestNum':nestNum}\n netData={'NetParams':net.params,'NetConstants':net.constants,\\\n 'inputSize':inputSize,'hiddenSize':hiddenSize,'outputSize':outputSize,\\\n 'inputRatio':inputRatio,'outputRatio':outputRatio,'initValue':initValue,\\\n 'inactive':inactive,'rescale':rescale,'isTypical':isTypical,\\\n 'Loss':l_epochs,'OptimizerConstant':OptimizerConstant}\n torch.save(netData,os.path.join(netSavepath,filename))\n\nif PREDICTION_TEST and __name__=='__main__':\n # Prediction\n ## One-step prediction\n X,Y=next(iter(testIter))\n state=net.begin_state(batchSize)\n Y_hat,newState=net(X,state)\n Y=Y.transpose(0,1).reshape([-1,Y.shape[-1]])\n\n axes,fig=plt.subplots(1,1,figsize=(4,3))\n plt.title('One-Step Prediction')\n plt.plot(torch.linspace(1,Y.numel(),Y.numel()),torch.squeeze(Y),label='Y')\n plt.plot(torch.linspace(1,Y.numel(),Y.numel()),torch.squeeze(Y_hat).detach(),label=r'$\\hat{Y}$')\n plt.legend()\n plt.show()\n\n ## Multi Step Prediction\n prefixSize=10\n totalSize=20\n testShift=int(len(hmap)*(1-testSetRatio))\n preX,preY=hmap.data_as_tensor\n preX,preY=torch.unsqueeze(preX[testShift:testShift+prefixSize],-1),torch.unsqueeze(preY[testShift:testShift+totalSize-1],-1)\n preY=[y for y in torch.cat((preX[:2],preY[1:]),dim=0)]\n preX=torch.unsqueeze(preX,-1)\n YHat=predict_fun(preX,net,numPreds=totalSize-prefixSize)\n\n axes,fig=plt.subplots(1,1,figsize=(4,3))\n plt.title('Multi-Step Prediction')\n fig.set_ylim(-2,2)\n plt.plot(torch.linspace(1,len(preY),len(preY)),preY,label='Y')\n plt.plot(torch.linspace(1,len(preY),len(preY)),YHat,label=r'$\\hat{Y}$')\n plt.vlines([prefixSize-1],ymin=-2,ymax=2,linestyles='dashed',label='Prediction')\n plt.legend()\n plt.show()\n\n\n\n"
] |
[
[
"torch.cat",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.vlines",
"matplotlib.pyplot.subplots",
"torch.unsqueeze",
"torch.squeeze",
"matplotlib.pyplot.show"
]
] |
nfrumkin/GNN_Workload_Characterization
|
[
"7a329c2e94b5996af35f3a46179a2070e6a43c9b"
] |
[
"hgcn/test.py"
] |
[
"from __future__ import division\nfrom __future__ import print_function\n\nimport datetime\nimport json\nimport logging\nimport os\nimport pickle\nimport time\n\nimport numpy as np\nimport optimizers\nimport torch\nfrom config import parser\nfrom models.base_models import NCModel, LPModel\nfrom utils.data_utils import load_data\nfrom utils.train_utils import get_dir_name, format_metrics\n\nimport torch.cuda.profiler as profiler\n\n\ndef test(args):\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if int(args.double_precision):\n torch.set_default_dtype(torch.float64)\n if int(args.cuda) >= 0:\n torch.cuda.manual_seed(args.seed)\n args.device = 'cuda:' + str(args.cuda) if int(args.cuda) >= 0 else 'cpu'\n args.patience = args.epochs if not args.patience else int(args.patience)\n logging.getLogger().setLevel(logging.INFO)\n if args.save:\n if not args.save_dir:\n dt = datetime.datetime.now()\n date = f\"{dt.year}_{dt.month}_{dt.day}\"\n models_dir = os.path.join(os.environ['LOG_DIR'], args.task, date)\n save_dir = get_dir_name(models_dir)\n else:\n save_dir = args.save_dir\n logging.basicConfig(level=logging.INFO,\n handlers=[\n logging.FileHandler(os.path.join(save_dir, 'log.txt')),\n logging.StreamHandler()\n ])\n\n logging.info(f'Using: {args.device}')\n logging.info(\"Using seed {}.\".format(args.seed))\n\n # Load data\n data = load_data(args, os.path.join(os.environ['DATAPATH'], args.dataset))\n args.n_nodes, args.feat_dim = data['features'].shape\n if args.task == 'nc':\n Model = NCModel\n args.n_classes = int(data['labels'].max() + 1)\n logging.info(f'Num classes: {args.n_classes}')\n else:\n args.nb_false_edges = len(data['train_edges_false'])\n args.nb_edges = len(data['train_edges'])\n if args.task == 'lp':\n Model = LPModel\n else:\n Model = RECModel\n # No validation for reconstruction task\n args.eval_freq = args.epochs + 1\n\n if not args.lr_reduce_freq:\n args.lr_reduce_freq = args.epochs\n\n # Model and optimizer\n model = Model(args)\n checkpoint_path=\"hgcn_chkpt/model.pth\"\n model.load_state_dict(torch.load(checkpoint_path))\n logging.info(str(model))\n optimizer = getattr(optimizers, args.optimizer)(params=model.parameters(), lr=args.lr,\n weight_decay=args.weight_decay)\n lr_scheduler = torch.optim.lr_scheduler.StepLR(\n optimizer,\n step_size=int(args.lr_reduce_freq),\n gamma=float(args.gamma)\n )\n tot_params = sum([np.prod(p.size()) for p in model.parameters()])\n logging.info(f\"Total number of parameters: {tot_params}\")\n if args.cuda is not None and int(args.cuda) >= 0 :\n os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda)\n model = model.to(args.device)\n for x, val in data.items():\n if torch.is_tensor(data[x]):\n data[x] = data[x].to(args.device)\n\n if len(args.time_file) == 0: \n model.eval() # set evaluation mode\n embeddings = model.encode(data['features'], data['adj_train_norm'])\n val_metrics = model.compute_metrics(embeddings, data, 'val')\n else:\n n_warmup = 50\n n_sample = 50\n model.eval() # set evaluation mode\n print(\"=== Running Warmup Passes\")\n for i in range(0,n_warmup):\n embeddings = model.encode(data['features'], data['adj_train_norm'])\n val_metrics = model.compute_metrics(embeddings, data, 'val')\n\n print(\"=== Collecting Runtime over \", str(n_sample), \" Passes\")\n tic = time.perf_counter()\n for i in range(0,n_sample):\n embeddings = model.encode(data['features'], data['adj_train_norm'])\n val_metrics = model.compute_metrics(embeddings, data, 'val')\n toc = time.perf_counter()\n avg_runtime = float(toc - tic)/n_sample\n print(\"average runtime = \", avg_runtime)\n\n # write runtime to file\n f = open(args.time_file, \"w\")\n f.write(str(avg_runtime)+\"\\n\")\n f.close()\n\nif __name__ == '__main__':\n parser.add_argument('--time_file', type=str, default='', help='timing output file')\n args = parser.parse_args()\n profiler.start()\n test(args)\n profiler.stop()\n"
] |
[
[
"torch.cuda.manual_seed",
"torch.cuda.profiler.stop",
"numpy.random.seed",
"torch.is_tensor",
"torch.manual_seed",
"torch.cuda.profiler.start",
"torch.load",
"torch.set_default_dtype"
]
] |
nitram-bot/fhnw_lecture
|
[
"201009fdb695d52db5aa44d8d0e80b52cf9db6c0"
] |
[
"scripts/animated_k_means.py"
] |
[
"#%matplotlib inline\nimport matplotlib.pyplot as plt\nimport seaborn; seaborn.set() # for plot styling\nimport numpy as np\nfrom ipywidgets import interact\nfrom sklearn.metrics import pairwise_distances_argmin\nfrom sklearn.neighbors import NearestCentroid\nfrom sklearn.datasets.samples_generator import make_blobs\nimport matplotlib.gridspec as gridspec\nfrom sklearn.cluster import KMeans\nfrom itertools import chain\nimport matplotlib.animation as animation \nfrom IPython.display import HTML\nfrom scipy.spatial import distance\n\n# make data\nX, y = make_blobs(n_samples=300, centers=4,\n random_state=0, cluster_std=0.60)\n\n\n# initialize centers\ncenters = np.array([[min(X[:, 0]) + np.random.rand(1)[0], min(X[:, 1]) + np.random.rand(1)[0]],\n [min(X[:, 0]) + np.random.rand(1)[0], min(X[:, 1]) + np.random.rand(1)[0]], \n [min(X[:, 0]) + np.random.rand(1)[0], min(X[:, 1]) + np.random.rand(1)[0]], \n [min(X[:, 0]) + np.random.rand(1)[0], min(X[:, 1]) + np.random.rand(1)[0]]])\n\nclf = NearestCentroid()\nclf.fit(centers, np.array([0, 1, 2, 3]))\nlabels = clf.predict(X)\ninertia = [np.sum([distance.euclidean(x , centers[i]) for i in np.unique(labels) for x in X[labels == i]])]\n\nkm = KMeans(n_clusters = 4,\\\n init = centers,\\\n n_init=1, max_iter=1).fit(X)\n\ncenters = [centers]\nlabels = [labels]\nwhile inertia[-1] != km.inertia_:\n inertia.append(km.inertia_)\n centers.append(km.cluster_centers_)\n labels.append(km.labels_)\n km = KMeans(n_clusters = 4,\\\n init = km.cluster_centers_,\\\n n_init=1, max_iter=1).fit(X)\n \n\nfig = plt.figure(figsize=(12, 9))\nG = gridspec.GridSpec(1, 3) \naxes_1 = plt.subplot(G[0, 0])\naxes_1.set_xlabel('iteration')\naxes_1.set_ylabel('sum of squared dists')\naxes_1.set_title('reduction in within cluster variance')\naxes_1.set_xlim([-0.5, len(labels) + 0.5])\naxes_1.set_ylim([min(inertia) -50, max(inertia) + 50])\n#pl.xticks(np.arange(0, n_estimators, 1.0))\naxes_2 = plt.subplot(G[0, 1:3])\naxes_2.set_xlim([min(X[:,0]) - 0.2, max(X[:, 0]) + 0.2])\naxes_2.set_ylim([min(X[:,1])- 0.2, max(X[:, 1]) + 0.2])\nmycmap=plt.cm.Paired\ncolors = [np.array([mycmap(1)]), np.array([mycmap(10)]), np.array([mycmap(2)]), np.array([mycmap(20)])]\n\n\n#plot_step_of_k_means(labels[0], centers[0])\n\n# muss mit 1 starten\ndef run(j):\n idx0 = np.where(labels[j]== 0)[0]\n idx1 = np.where(labels[j]== 1)[0]\n idx2 = np.where(labels[j]== 2)[0]\n idx3 = np.where(labels[j]== 3)[0]\n\n axes_2.scatter(X[idx0, 0], X[idx0,1], marker = 'x', c=colors[0], edgecolors = colors[0])\n axes_2.scatter(X[idx1, 0], X[idx1,1], marker = 'x', c=colors[1], edgecolors = colors[1])\n axes_2.scatter(X[idx2, 0], X[idx2,1], marker = 'x', c=colors[2], edgecolors = colors[2])\n axes_2.scatter(X[idx3, 0], X[idx3,1], marker = 'x', c=colors[3], edgecolors = colors[3])\n\n if j == 0:\n axes_2.scatter(centers[j][:, 0], centers[j][:, 1], marker= 'o',\\\n c = np.array(colors).reshape((4, 4)), edgecolors = 'blue', s=80)\n axes_1.plot([0, inertia[j]], 'o')\n else:\n axes_1.plot([j-1, j], [inertia[j-1], inertia[j]], '-bo')\n for i in range(len(colors)):\n axes_2.plot([centers[j-1][i][0], centers[j][i][0]],\\\n [centers[j-1][i][1], centers[j][i][1]], '-bo',\\\n color = colors[i][0])\n axes_2.scatter(centers[j][:, 0], centers[j][:, 1], marker= 'o',\\\n c = np.array(colors).reshape((4, 4)), edgecolors = 'blue', s=80)\n\n\n\ndef init():\n return[]\n \nani = animation.FuncAnimation(fig, func = run, init_func = init, frames = np.arange(1, len(labels)),\n interval = 200, blit = False)\n\nani.save('/home/martin/k-means.gif', writer = 'imagemagick', fps = 2)\n"
] |
[
[
"numpy.array",
"scipy.spatial.distance.euclidean",
"numpy.random.rand",
"sklearn.cluster.KMeans",
"sklearn.neighbors.NearestCentroid",
"sklearn.datasets.samples_generator.make_blobs",
"matplotlib.pyplot.figure",
"numpy.where",
"numpy.unique",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.subplot"
]
] |
bentotten/gamma-spectra_denoising
|
[
"42cf6791e2399475666f4308526f574fed59763b"
] |
[
"load_data.py"
] |
[
"import sys\nimport time\nimport h5py\nimport argparse\nimport numpy as np\nfrom skimage.metrics import peak_signal_noise_ratio as psnr\n\nfrom spectra_utils import split_radionuclide_name, plot_data\n\n\ndef load_data(datafile, det, show_data=False):\n with h5py.File(datafile, 'r') as h5f:\n assert h5f[det][\"spectrum\"].shape == h5f[det][\"noisy_spectrum\"].shape, f'Mismatch between training examples and target examples'\n dataset = {\"name\": h5f[det][\"name\"][()], \"keV\": h5f[det][\"keV\"][()], \"spectrum\": h5f[det][\"spectrum\"][()], \\\n \"noisy_spectrum\": h5f[det][\"noisy_spectrum\"][()], \"noise\": h5f[det][\"noise\"][()], \\\n \"compton_scale\": h5f[det][\"compton_scale\"][()], \"noise_scale\": h5f[det][\"noise_scale\"][()]}\n if show_data:\n plot_data(dataset)\n\n return dataset\n\ndef dataset_stats(dataset, det):\n print(f'Dataset {det}:')\n print(f'\\tfeatures: {dataset[\"keV\"].shape}')\n print(f'\\tclean spectra: {dataset[\"spectrum\"].shape}')\n print(f'\\tnoisy spectra: {dataset[\"noisy_spectrum\"].shape}')\n print(f'\\tnoise: {dataset[\"noise\"].shape}')\n print(f'\\tmin Compton scale: {np.min(dataset[\"compton_scale\"])}')\n print(f'\\tmax Compton scale: {np.max(dataset[\"compton_scale\"])}')\n print(f'\\tmin Noise scale: {np.min(dataset[\"noise_scale\"])}')\n print(f'\\tmax Noise scale: {np.max(dataset[\"noise_scale\"])}')\n\n noisy_spectra = dataset['noisy_spectrum']\n clean_spectra = dataset['spectrum']\n\n min_psnr = 9999.0\n max_psnr = 0.0\n for clean, noisy in zip(clean_spectra, noisy_spectra):\n noisy_psnr = psnr(clean, noisy)\n if noisy_psnr < min_psnr:\n min_psnr = noisy_psnr\n if noisy_psnr > max_psnr:\n max_psnr = noisy_psnr\n\n print(f'\\tmax PSNR {max_psnr:.2f} dB')\n print(f'\\tmin PSNR {min_psnr:.2f} dB')\n\n\ndef main():\n start = time.time()\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-df\", \"--datafile\", help=\"data file containing templates\", default=\"data/training.h5\")\n parser.add_argument(\"-det\", \"--dettype\", help=\"detector type\", default=\"HPGe\")\n parser.add_argument(\"-sf\", \"--showfigs\", help=\"saves plots of data\", default=False, action=\"store_true\")\n arg = parser.parse_args()\n\n print(f'Loading data set from {arg.datafile}')\n dataset = load_data(arg.datafile, arg.dettype.upper(), show_data=arg.showfigs)\n\n print(f'{len(dataset[\"name\"])} examples in dataset.')\n\n dataset_stats(dataset, arg.dettype)\n\n print(f'\\nScript completed in {time.time()-start:.2f} secs')\n\n return 0\n\nif __name__ == '__main__':\n sys.exit(main())\n"
] |
[
[
"numpy.max",
"numpy.min"
]
] |
pennucci/enterprise
|
[
"24b46116b63d2ef76e0f4132830d17dec575f8a3"
] |
[
"enterprise/signals/selections.py"
] |
[
"# selections.py\n\"\"\"Contains various selection functions to mask parameters by backend flags,\ntime-intervals, etc.\"\"\"\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport functools\nimport inspect\n\nimport numpy as np\n\n\ndef call_me_maybe(obj):\n \"\"\"See `here`_ for description.\n\n .. _here: https://www.youtube.com/watch?v=fWNaR-rxAic\n \"\"\"\n return obj() if hasattr(obj, \"__call__\") else obj\n\n\ndef selection_func(func):\n try:\n funcargs = inspect.getfullargspec(func).args\n except:\n funcargs = inspect.getargspec(func).args\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n targs = list(args)\n\n # check for mask\n mask = kwargs.get(\"mask\", Ellipsis)\n if \"mask\" in kwargs:\n del kwargs[\"mask\"]\n\n if len(targs) < len(funcargs) and \"psr\" in kwargs:\n psr = kwargs[\"psr\"]\n\n for funcarg in funcargs[len(args) :]:\n if funcarg not in kwargs and hasattr(psr, funcarg):\n attr = call_me_maybe(getattr(psr, funcarg))\n if isinstance(attr, np.ndarray) and getattr(mask, \"shape\", [0])[0] == len(attr):\n targs.append(attr[mask])\n else:\n targs.append(attr)\n\n if \"psr\" in kwargs and \"psr\" not in funcargs:\n del kwargs[\"psr\"]\n\n return func(*targs, **kwargs)\n\n return wrapper\n\n\ndef Selection(func):\n \"\"\"Class factory for TOA selection.\"\"\"\n\n class Selection(object):\n def __init__(self, psr):\n self._psr = psr\n\n @property\n def masks(self):\n return selection_func(func)(psr=self._psr)\n\n def _get_masked_array_dict(self, masks, arr):\n return {key: val * arr for key, val in masks.items()}\n\n def __call__(self, parname, parameter, arr=None):\n params, kmasks = {}, {}\n for key, val in self.masks.items():\n kname = \"_\".join([key, parname]) if key else parname\n pname = \"_\".join([self._psr.name, kname])\n params.update({kname: parameter(pname)})\n kmasks.update({kname: val})\n\n if arr is not None:\n ma = self._get_masked_array_dict(kmasks, arr)\n ret = (params, ma)\n else:\n ret = params, kmasks\n return ret\n\n return Selection\n\n\n# SELECTION FUNCTIONS\n\n\ndef cut_half(toas):\n \"\"\"Selection function to split by data segment\"\"\"\n midpoint = (toas.max() + toas.min()) / 2\n return dict(zip([\"t1\", \"t2\"], [toas <= midpoint, toas > midpoint]))\n\n\ndef by_band(flags):\n \"\"\"Selection function to split by PPTA frequency band under -B flag\"\"\"\n flagvals = np.unique(flags[\"B\"])\n return {val: flags[\"B\"] == val for val in flagvals}\n\n\ndef by_frontend(flags):\n \"\"\"Selection function to split by frontend under -fe flag\"\"\"\n flagvals = np.unique(flags[\"fe\"])\n return {val: flags[\"fe\"] == val for val in flagvals}\n\n\ndef by_backend(backend_flags):\n \"\"\"Selection function to split by backend flags.\"\"\"\n flagvals = np.unique(backend_flags)\n return {val: backend_flags == val for val in flagvals}\n\n\ndef nanograv_backends(backend_flags):\n \"\"\"Selection function to split by NANOGRav backend flags only.\"\"\"\n flagvals = np.unique(backend_flags)\n ngb = [\"ASP\", \"GASP\", \"GUPPI\", \"PUPPI\"]\n flagvals = [val for val in flagvals if any([b in val for b in ngb])]\n return {val: backend_flags == val for val in flagvals}\n\n\ndef no_selection(toas):\n \"\"\"Default selection with no splitting.\"\"\"\n return {\"\": np.ones_like(toas, dtype=bool)}\n"
] |
[
[
"numpy.ones_like",
"numpy.unique"
]
] |
arashbm/lattice
|
[
"e71587ab037ec417e8f6ef75cfadcb5d0ea11821"
] |
[
"plot_ratios.py"
] |
[
"import fileinput\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nselected_dim = 1\np_crit = 0.644701\n\nsizes = {}\nfor line in fileinput.input():\n pc, dims, seed, size, mass, lt, volume = \\\n [float(i.strip()) for i in line.split()]\n\n if dims == selected_dim:\n nodes = int(size**dims)\n if nodes not in sizes:\n sizes[nodes] = {}\n if pc not in sizes[nodes]:\n sizes[nodes][pc] = {\"mass\": [], \"lt\": [], \"volume\": []}\n\n sizes[nodes][pc][\"mass\"].append(int(mass))\n sizes[nodes][pc][\"volume\"].append(int(volume))\n sizes[nodes][pc][\"lt\"].append(int(lt))\n\n\nmass_fig, mass_ax = plt.subplots()\nlt_fig, lt_ax = plt.subplots()\nvolume_fig, volume_ax = plt.subplots()\n\nn = [(size, size//2) for size in sizes if size//2 in sizes]\nfor s1, s2 in n:\n pcs = sorted(set(sizes[s1]) & set(sizes[s2]))\n mass_ax.plot(pcs,\n [np.mean(sizes[s1][pc][\"mass\"])/np.mean(sizes[s2][pc][\"mass\"])\n for pc in pcs],\n label=f\"N={s1}\")\n lt_ax.plot(pcs,\n [np.mean(sizes[s1][pc][\"lt\"])/np.mean(sizes[s2][pc][\"lt\"])\n for pc in pcs],\n label=f\"N={s1}\")\n volume_ax.plot(pcs,\n [np.mean(sizes[s1][pc][\"volume\"])/np.mean(sizes[s2][pc][\"volume\"])\n for pc in pcs],\n label=f\"N={s1}\")\n\nmass_ax.axvline(p_crit, label=f\"$p_c={p_crit}$\", ls='--')\nmass_ax.set_yscale(\"log\")\nmass_ax.legend()\nmass_fig.savefig(\"figures/ratios-mass.pdf\")\n\nlt_ax.axvline(p_crit, label=f\"$p_c={p_crit}$\", ls='--')\nlt_ax.set_yscale(\"log\")\nlt_ax.legend()\nlt_fig.savefig(\"figures/ratios-lt.pdf\")\n\nvolume_ax.axvline(p_crit, label=f\"$p_c={p_crit}$\", ls='--')\nvolume_ax.set_yscale(\"log\")\nvolume_ax.legend()\nvolume_fig.savefig(\"figures/ratios-volume.pdf\")\n"
] |
[
[
"numpy.mean",
"matplotlib.pyplot.subplots"
]
] |
huzq85/tensorflow-fork
|
[
"92f1dd09bef516a6eb0ad6be6833f28785ef2be8",
"92f1dd09bef516a6eb0ad6be6833f28785ef2be8"
] |
[
"tensorflow/python/checkpoint/checkpoint_view.py",
"tensorflow/cc/saved_model/testdata/generate_saved_models.py"
] |
[
"\"\"\"Manages a Checkpoint View.\"\"\"\n# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom tensorflow.core.protobuf import trackable_object_graph_pb2\nfrom tensorflow.python.framework import errors_impl\nfrom tensorflow.python.trackable import base\nfrom tensorflow.python.training import py_checkpoint_reader\n\n\nclass CheckpointView(object):\n \"\"\"Gathers and serializes a checkpoint view.\"\"\"\n\n def __init__(self, checkpoint_path):\n \"\"\"Configure the trackable view.\n\n Args:\n checkpoint_path: The path to the checkpoint.\n\n Raises:\n ValueError: If an object graph was not found in the checkpoint.\n \"\"\"\n reader = py_checkpoint_reader.NewCheckpointReader(checkpoint_path)\n try:\n object_graph_string = reader.get_tensor(base.OBJECT_GRAPH_PROTO_KEY)\n except errors_impl.NotFoundError as not_found_error:\n raise ValueError(\n f\"The specified checkpoint \\\"{checkpoint_path}\\\" does not appear to be \"\n \"object-based (saved with TF2) since it is missing the key \"\n f\"\\\"{base.OBJECT_GRAPH_PROTO_KEY}\\\". Likely it was created with the \"\n \"TF1 name-based saver and does not contain an object dependency graph.\"\n ) from not_found_error\n object_graph_proto = (trackable_object_graph_pb2.TrackableObjectGraph())\n object_graph_proto.ParseFromString(object_graph_string)\n self._object_graph_proto = object_graph_proto\n\n def children(self, node_id):\n \"\"\"Returns all child trackables attached to obj.\n\n Args:\n node_id: Id of the node to return its children.\n\n Returns:\n Dictionary of all children attached to the object with name to node_id.\n \"\"\"\n return {\n child.local_name: child.node_id\n for child in self._object_graph_proto.nodes[node_id].children\n }\n\n def descendants(self):\n \"\"\"Returns a list of all node_ids from ObjectGraphProto.\"\"\"\n all_nodes = []\n for node in list(self._object_graph_proto.nodes):\n for child in list(node.children):\n all_nodes.append(child.node_id)\n return all_nodes\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Standalone utility to generate some test saved models.\"\"\"\n\nimport os\n\nfrom absl import app\n\nfrom tensorflow.python.client import session as session_lib\nfrom tensorflow.python.compat import v2_compat\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.module import module\nfrom tensorflow.python.ops import io_ops\nfrom tensorflow.python.ops import lookup_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.saved_model import save_options\nfrom tensorflow.python.saved_model import saved_model\nfrom tensorflow.python.trackable import asset\n\n\nclass VarsAndArithmeticObjectGraph(module.Module):\n \"\"\"Three vars (one in a sub-module) and compute method.\"\"\"\n\n def __init__(self):\n self.x = variables.Variable(1.0, name=\"variable_x\")\n self.y = variables.Variable(2.0, name=\"variable_y\")\n self.child = module.Module()\n self.child.z = variables.Variable(3.0, name=\"child_variable\")\n self.child.c = ops.convert_to_tensor(5.0)\n\n @def_function.function(input_signature=[\n tensor_spec.TensorSpec((), dtypes.float32),\n tensor_spec.TensorSpec((), dtypes.float32)\n ])\n def compute(self, a, b):\n return (a + self.x) * (b + self.y) / (self.child.z) + self.child.c\n\n\nclass ReferencesParent(module.Module):\n\n def __init__(self, parent):\n super(ReferencesParent, self).__init__()\n self.parent = parent\n self.my_variable = variables.Variable(3., name=\"MyVariable\")\n\n\n# Creates a cyclic object graph.\nclass CyclicModule(module.Module):\n\n def __init__(self):\n super(CyclicModule, self).__init__()\n self.child = ReferencesParent(self)\n\n\nclass AssetModule(module.Module):\n\n def __init__(self):\n self.asset = asset.Asset(\n test.test_src_dir_path(\"cc/saved_model/testdata/test_asset.txt\"))\n\n @def_function.function(input_signature=[])\n def read_file(self):\n return io_ops.read_file(self.asset)\n\n\nclass StaticHashTableModule(module.Module):\n \"\"\"A module with an Asset, StaticHashTable, and a lookup function.\"\"\"\n\n def __init__(self):\n self.asset = asset.Asset(\n test.test_src_dir_path(\n \"cc/saved_model/testdata/static_hashtable_asset.txt\"))\n self.table = lookup_ops.StaticHashTable(\n lookup_ops.TextFileInitializer(self.asset, dtypes.string,\n lookup_ops.TextFileIndex.WHOLE_LINE,\n dtypes.int64,\n lookup_ops.TextFileIndex.LINE_NUMBER),\n -1)\n\n @def_function.function(\n input_signature=[tensor_spec.TensorSpec(shape=None, dtype=dtypes.string)])\n def lookup(self, word):\n return self.table.lookup(word)\n\n\ndef get_simple_session():\n ops.disable_eager_execution()\n sess = session_lib.Session()\n variables.Variable(1.)\n sess.run(variables.global_variables_initializer())\n return sess\n\n\nMODULE_CTORS = {\n \"VarsAndArithmeticObjectGraph\": (VarsAndArithmeticObjectGraph, 2),\n \"CyclicModule\": (CyclicModule, 2),\n \"AssetModule\": (AssetModule, 2),\n \"StaticHashTableModule\": (StaticHashTableModule, 2),\n \"SimpleV1Model\": (get_simple_session, 1)\n}\n\n\ndef main(args):\n if len(args) != 3:\n print(\"Expected: {export_path} {ModuleName}\")\n print(\"Allowed ModuleNames:\", MODULE_CTORS.keys())\n return 1\n\n _, export_path, module_name = args\n module_ctor, version = MODULE_CTORS.get(module_name)\n if not module_ctor:\n print(\"Expected ModuleName to be one of:\", MODULE_CTORS.keys())\n return 2\n os.makedirs(export_path)\n\n tf_module = module_ctor()\n if version == 2:\n options = save_options.SaveOptions(save_debug_info=True)\n saved_model.save(tf_module, export_path, options=options)\n else:\n builder = saved_model.builder.SavedModelBuilder(export_path)\n builder.add_meta_graph_and_variables(tf_module, [\"serve\"])\n builder.save()\n\n\nif __name__ == \"__main__\":\n v2_compat.enable_v2_behavior()\n app.run(main)\n"
] |
[
[
"tensorflow.core.protobuf.trackable_object_graph_pb2.TrackableObjectGraph",
"tensorflow.python.training.py_checkpoint_reader.NewCheckpointReader"
],
[
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.compat.v2_compat.enable_v2_behavior",
"tensorflow.python.platform.test.test_src_dir_path",
"tensorflow.python.eager.def_function.function",
"tensorflow.python.client.session.Session",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.saved_model.saved_model.builder.SavedModelBuilder",
"tensorflow.python.ops.io_ops.read_file",
"tensorflow.python.saved_model.save_options.SaveOptions",
"tensorflow.python.ops.lookup_ops.TextFileInitializer",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.framework.tensor_spec.TensorSpec",
"tensorflow.python.saved_model.saved_model.save",
"tensorflow.python.module.module.Module",
"tensorflow.python.framework.ops.disable_eager_execution"
]
] |
singhst/web-scraping-tv-movie
|
[
"1942cad5f631424dccc4a0c89fc346497ce9854d"
] |
[
"(useless)combine-and-clean-csv.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jan 12 06:56:16 2019\n@author: Chris\n\nhttps://github.com/ekapope/Combine-CSV-files-in-the-folder/blob/master/Combine_CSVs.py\n\"\"\"\n#credited:\n#https://stackoverflow.com/questions/9234560/find-all-csv-files-in-a-directory-using-python/12280052\n\nimport os\nimport glob\nfrom datetime import date\nimport pandas as pd\n\n\ndef cleanDf(df: pd.DataFrame):\n \"\"\"\n The following are the instructions of the cleansing process:\n 1. `7th` column ==> `IMDB Score` as column name\n 2. `8th` column ==> `Reelgood Rating Score` as column name\n 3. `1st`, `11th` column ==> Remove, useless\n 4. `2nd`, `4th` & `10th` columns ==> Remove, empty columns\n \"\"\"\n\n # df.columns.values[6] = 'IMDB Score'\n df = df.rename(columns={df.columns[6]: 'IMDB Score'})\n \n # df.columns.values[7] = 'Reelgood Rating Score'\n df = df.rename(columns={df.columns[7]: 'Reelgood Rating Score'})\n \n df.drop(df.columns[[0,10]], axis=1, inplace=True)\n\n df.replace(\"\", float(\"NaN\"), inplace=True)\n df.dropna(how='all', axis=1, inplace=True)\n \n return df\n\n\ndef combineCsv():\n #set working directory\n dir = os.path.join(os.getcwd(), parent_folder)\n dir = os.path.join(dir, folder_name)\n\n os.chdir(dir)\n\n #find all csv files in the folder\n #use glob pattern matching -> extension = 'csv'\n #save result in list -> all_filenames\n extension = 'csv'\n all_filenames = [i for i in glob.glob('*.{}'.format(extension))]\n print(all_filenames)\n\n #combine all files in the list\n combined_csv = pd.concat([cleanDf(pd.read_csv(f)) for f in all_filenames])\n \n #export to csv\n os.chdir(\"..\") #change dir to parent folder `/reelgood-database`\n dir = os.getcwd()\n file_name = f\"all-{movie_or_tv}.csv\"\n combined_csv.to_csv(file_name, index=False, encoding='utf-8-sig')\n print(f\"> export '{dir}/{file_name}'\")\n \n dir = os.path.join(dir, backup_folder) #change dir to folder `/reelgood-database/(backup)`\n os.chdir(dir)\n dir = os.getcwd()\n today = date.today()\n file_name = f\"all-{movie_or_tv}-{today}.csv\"\n combined_csv.to_csv(file_name, index=False, encoding='utf-8-sig')\n print(f\"> export '{os.getcwd()}/{file_name}'\")\n\n return combined_csv\n\n\ndef test():\n #set working directory\n dir = os.path.join(os.getcwd(), 'reelgood-database')\n dir = os.path.join(dir, folder_name)\n\n os.chdir(dir)\n\n #find all csv files in the folder\n #use glob pattern matching -> extension = 'csv'\n #save result in list -> all_filenames\n extension = 'csv'\n all_filenames = [i for i in glob.glob('*.{}'.format(extension))]\n print(all_filenames[0])\n\n df = pd.read_csv(all_filenames[0])\n print(df.head())\n\n df = cleanDf(df)\n print(df.head())\n\n\nif __name__ == \"__main__\":\n \n # test()\n \n # folder_name = 'tv'\n folder_name = 'movies'\n\n movie_or_tv = folder_name\n parent_folder = 'reelgood-database'\n backup_folder = '(backup)'\n \n df = combineCsv()\n print(df.head())\n print(df.shape)"
] |
[
[
"pandas.read_csv"
]
] |
JakubMroz4/PythonUiS
|
[
"fc4f8ff15f280e07e21d55df31f9dfb81cf5a6cd"
] |
[
"Oving4/samlingobjekter/demo_numpy_arrays.py"
] |
[
"import numpy as np\n\n# Lager en array med 10 0-ere\narray = np.zeros(10)\n\n# Kan indeksere og tilordne som ei liste, men kan ikke legge til elementer da\n# numpy arrays har en fast størrelse, definert nbår du lager den\narray[5] = 3\n\n# Lager en array med elementene fra og med 0, til men ikke med 10, og med\n# 0.2 mellom hvert element\narray2 = np.arange(0, 10, 0.2)\n\nprint(array)\nprint(array2)\n\n# LAger en 4*3 matrise gjennom å oppgi et tuppel med to elementer i stedet for\n# en enkeltverdi\nmatrise = np.zeros((4, 3))\n\nprint(matrise)\n\n# Kan gjøre matematiske operasjoner på numpy arrays:\nprint(array2 + 10)\nprint(array2 * 5)\n\n# Numpy inneholder matematiske funksjoner som opererer element-for-element\n# på numpy arrays. Eksempel sinus:\nprint(np.sin(array2))\n\n# Kan summere og multiplisere to arrays med samme dimensjoner, dette gjøres\n# element for element.\n\n# Vanlig matrisemultiplikasjon gjøres med @ operatoren på numpy arrays\n"
] |
[
[
"numpy.sin",
"numpy.arange",
"numpy.zeros"
]
] |
noisyoscillator/Patterns-in-Real-World-Time-Series-Data
|
[
"1bf203a238be0cfbde42e9f65b3071ee9fab5466"
] |
[
"brownianMotion.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 13 14:22:57 2022\n\n@author: kamaludindingle\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef BrownianMotion(years=15,record=1):\n #Brownian motion for (10) years, recorded every \"record\" years\n Gaussian = np.random.randn(years)\n Bm = np.cumsum(Gaussian)# Brownian motion\n if record==1:\n SaveBm = Bm\n elif record!=1:\n SaveBm = [Bm[j] for j in range(len(Bm)) if (j%record)==0]\n\n return SaveBm"
] |
[
[
"numpy.random.randn",
"numpy.cumsum"
]
] |
keithmgould/deep-rl
|
[
"ca87f50603b3e1646e2194a9b82cb3a8d6a75fc3"
] |
[
"ddpg/ddpg.py"
] |
[
"\"\"\"\nImplementation of DDPG - Deep Deterministic Policy Gradient\n\nAlgorithm and hyperparameter details can be found here:\n http://arxiv.org/pdf/1509.02971v2.pdf\n\nThe algorithm is tested on the Pendulum-v0 OpenAI gym task\nand developed with tflearn + Tensorflow\n\nAuthor: Patrick Emami\n\"\"\"\nimport tensorflow as tf\nimport roboschool\nimport numpy as np\nimport gym\nfrom gym import wrappers\nimport tflearn\nimport argparse\nimport pprint as pp\n\nfrom replay_buffer import ReplayBuffer\n\n# ===========================\n# Actor and Critic DNNs\n# ===========================\n\nclass ActorNetwork(object):\n \"\"\"\n Input to the network is the state, output is the action\n under a deterministic policy.\n\n The output layer activation is a tanh to keep the action\n between -action_bound and action_bound\n \"\"\"\n\n def __init__(self, sess, state_dim, action_dim, action_bound, learning_rate, tau, batch_size):\n self.sess = sess\n self.s_dim = state_dim\n self.a_dim = action_dim\n self.action_bound = action_bound\n self.learning_rate = learning_rate\n self.tau = tau\n self.batch_size = batch_size\n\n # Actor Network\n self.inputs, self.out, self.scaled_out = self.create_actor_network()\n\n self.network_params = tf.trainable_variables()\n\n # Target Network\n self.target_inputs, self.target_out, self.target_scaled_out = self.create_actor_network()\n\n self.target_network_params = tf.trainable_variables()[\n len(self.network_params):]\n\n # Op for periodically updating target network with online network\n # weights\n self.update_target_network_params = \\\n [self.target_network_params[i].assign(tf.multiply(self.network_params[i], self.tau) +\n tf.multiply(self.target_network_params[i], 1. - self.tau))\n for i in range(len(self.target_network_params))]\n\n # This gradient will be provided by the critic network\n self.action_gradient = tf.placeholder(tf.float32, [None, self.a_dim])\n\n # Combine the gradients here\n self.unnormalized_actor_gradients = tf.gradients(\n self.scaled_out, self.network_params, -self.action_gradient)\n self.actor_gradients = list(map(lambda x: tf.div(x, self.batch_size), self.unnormalized_actor_gradients))\n\n # Optimization Op\n self.optimize = tf.train.AdamOptimizer(self.learning_rate).\\\n apply_gradients(zip(self.actor_gradients, self.network_params))\n\n self.num_trainable_vars = len(\n self.network_params) + len(self.target_network_params)\n\n def create_actor_network(self):\n inputs = tflearn.input_data(shape=[None, self.s_dim])\n net = tflearn.fully_connected(inputs, 400)\n net = tflearn.layers.normalization.batch_normalization(net)\n net = tflearn.activations.relu(net)\n net = tflearn.fully_connected(net, 300)\n net = tflearn.layers.normalization.batch_normalization(net)\n net = tflearn.activations.relu(net)\n # Final layer weights are init to Uniform[-3e-3, 3e-3]\n w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)\n out = tflearn.fully_connected(\n net, self.a_dim, activation='tanh', weights_init=w_init)\n # Scale output to -action_bound to action_bound\n scaled_out = tf.multiply(out, self.action_bound)\n return inputs, out, scaled_out\n\n def train(self, inputs, a_gradient):\n self.sess.run(self.optimize, feed_dict={\n self.inputs: inputs,\n self.action_gradient: a_gradient\n })\n\n def predict(self, inputs):\n return self.sess.run(self.scaled_out, feed_dict={\n self.inputs: inputs\n })\n\n def predict_target(self, inputs):\n return self.sess.run(self.target_scaled_out, feed_dict={\n self.target_inputs: inputs\n })\n\n def update_target_network(self):\n self.sess.run(self.update_target_network_params)\n\n def get_num_trainable_vars(self):\n return self.num_trainable_vars\n\n\nclass CriticNetwork(object):\n \"\"\"\n Input to the network is the state and action, output is Q(s,a).\n The action must be obtained from the output of the Actor network.\n\n \"\"\"\n\n def __init__(self, sess, state_dim, action_dim, learning_rate, tau, gamma, num_actor_vars):\n self.sess = sess\n self.s_dim = state_dim\n self.a_dim = action_dim\n self.learning_rate = learning_rate\n self.tau = tau\n self.gamma = gamma\n\n # Create the critic network\n self.inputs, self.action, self.out = self.create_critic_network()\n\n self.network_params = tf.trainable_variables()[num_actor_vars:]\n\n # Target Network\n self.target_inputs, self.target_action, self.target_out = self.create_critic_network()\n\n self.target_network_params = tf.trainable_variables()[(len(self.network_params) + num_actor_vars):]\n\n # Op for periodically updating target network with online network\n # weights with regularization\n self.update_target_network_params = \\\n [self.target_network_params[i].assign(tf.multiply(self.network_params[i], self.tau) \\\n + tf.multiply(self.target_network_params[i], 1. - self.tau))\n for i in range(len(self.target_network_params))]\n\n # Network target (y_i)\n self.predicted_q_value = tf.placeholder(tf.float32, [None, 1])\n\n # Define loss and optimization Op\n self.loss = tflearn.mean_square(self.predicted_q_value, self.out)\n self.optimize = tf.train.AdamOptimizer(\n self.learning_rate).minimize(self.loss)\n\n # Get the gradient of the net w.r.t. the action.\n # For each action in the minibatch (i.e., for each x in xs),\n # this will sum up the gradients of each critic output in the minibatch\n # w.r.t. that action. Each output is independent of all\n # actions except for one.\n self.action_grads = tf.gradients(self.out, self.action)\n\n def create_critic_network(self):\n inputs = tflearn.input_data(shape=[None, self.s_dim])\n action = tflearn.input_data(shape=[None, self.a_dim])\n net = tflearn.fully_connected(inputs, 400)\n net = tflearn.layers.normalization.batch_normalization(net)\n net = tflearn.activations.relu(net)\n\n # Add the action tensor in the 2nd hidden layer\n # Use two temp layers to get the corresponding weights and biases\n t1 = tflearn.fully_connected(net, 300)\n t2 = tflearn.fully_connected(action, 300)\n\n net = tflearn.activation(\n tf.matmul(net, t1.W) + tf.matmul(action, t2.W) + t2.b, activation='relu')\n\n # linear layer connected to 1 output representing Q(s,a)\n # Weights are init to Uniform[-3e-3, 3e-3]\n w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)\n out = tflearn.fully_connected(net, 1, weights_init=w_init)\n return inputs, action, out\n\n def train(self, inputs, action, predicted_q_value):\n return self.sess.run([self.out, self.optimize], feed_dict={\n self.inputs: inputs,\n self.action: action,\n self.predicted_q_value: predicted_q_value\n })\n\n def predict(self, inputs, action):\n return self.sess.run(self.out, feed_dict={\n self.inputs: inputs,\n self.action: action\n })\n\n def predict_target(self, inputs, action):\n return self.sess.run(self.target_out, feed_dict={\n self.target_inputs: inputs,\n self.target_action: action\n })\n\n def action_gradients(self, inputs, actions):\n return self.sess.run(self.action_grads, feed_dict={\n self.inputs: inputs,\n self.action: actions\n })\n\n def update_target_network(self):\n self.sess.run(self.update_target_network_params)\n\n# Taken from https://github.com/openai/baselines/blob/master/baselines/ddpg/noise.py, which is\n# based on http://math.stackexchange.com/questions/1287634/implementing-ornstein-uhlenbeck-in-matlab\nclass OrnsteinUhlenbeckActionNoise:\n def __init__(self, mu, sigma=0.3, theta=.15, dt=1e-2, x0=None):\n self.theta = theta\n self.mu = mu\n self.sigma = sigma\n self.dt = dt\n self.x0 = x0\n self.reset()\n\n def __call__(self):\n x = self.x_prev + self.theta * (self.mu - self.x_prev) * self.dt + \\\n self.sigma * np.sqrt(self.dt) * np.random.normal(size=self.mu.shape)\n self.x_prev = x\n return x\n\n def reset(self):\n self.x_prev = self.x0 if self.x0 is not None else np.zeros_like(self.mu)\n\n def __repr__(self):\n return 'OrnsteinUhlenbeckActionNoise(mu={}, sigma={})'.format(self.mu, self.sigma)\n\n# ===========================\n# Tensorflow Summary Ops\n# ===========================\n\ndef build_summaries():\n episode_reward = tf.Variable(0.)\n tf.summary.scalar(\"Reward\", episode_reward)\n episode_ave_max_q = tf.Variable(0.)\n tf.summary.scalar(\"Qmax Value\", episode_ave_max_q)\n\n summary_vars = [episode_reward, episode_ave_max_q]\n summary_ops = tf.summary.merge_all()\n\n return summary_ops, summary_vars\n\n# ===========================\n# Agent Training\n# ===========================\n\ndef train(sess, env, args, actor, critic, actor_noise):\n\n # Set up summary Ops\n summary_ops, summary_vars = build_summaries()\n\n sess.run(tf.global_variables_initializer())\n writer = tf.summary.FileWriter(args['summary_dir'], sess.graph)\n\n # Initialize target network weights\n actor.update_target_network()\n critic.update_target_network()\n\n # Initialize replay memory\n replay_buffer = ReplayBuffer(int(args['buffer_size']), int(args['random_seed']))\n\n for i in range(int(args['max_episodes'])):\n\n s = env.reset()\n\n ep_reward = 0\n ep_ave_max_q = 0\n\n for j in range(int(args['max_episode_len'])):\n\n if args['render_env']:\n env.render()\n\n # Added exploration noise\n #a = actor.predict(np.reshape(s, (1, 3))) + (1. / (1. + i))\n a = actor.predict(np.reshape(s, (1, actor.s_dim))) + actor_noise()\n\n s2, r, terminal, info = env.step(a[0])\n\n replay_buffer.add(np.reshape(s, (actor.s_dim,)), np.reshape(a, (actor.a_dim,)), r,\n terminal, np.reshape(s2, (actor.s_dim,)))\n\n # Keep adding experience to the memory until\n # there are at least minibatch size samples\n if replay_buffer.size() > int(args['minibatch_size']):\n s_batch, a_batch, r_batch, t_batch, s2_batch = \\\n replay_buffer.sample_batch(int(args['minibatch_size']))\n\n # Calculate targets\n target_q = critic.predict_target(\n s2_batch, actor.predict_target(s2_batch))\n\n y_i = []\n for k in range(int(args['minibatch_size'])):\n if t_batch[k]:\n y_i.append(r_batch[k])\n else:\n y_i.append(r_batch[k] + critic.gamma * target_q[k])\n\n # Update the critic given the targets\n predicted_q_value, _ = critic.train(\n s_batch, a_batch, np.reshape(y_i, (int(args['minibatch_size']), 1)))\n\n ep_ave_max_q += np.amax(predicted_q_value)\n\n # Update the actor policy using the sampled gradient\n a_outs = actor.predict(s_batch)\n grads = critic.action_gradients(s_batch, a_outs)\n actor.train(s_batch, grads[0])\n\n # Update target networks\n actor.update_target_network()\n critic.update_target_network()\n\n s = s2\n ep_reward += r\n\n if terminal:\n\n summary_str = sess.run(summary_ops, feed_dict={\n summary_vars[0]: ep_reward,\n summary_vars[1]: ep_ave_max_q / float(j)\n })\n\n writer.add_summary(summary_str, i)\n writer.flush()\n\n print('| Reward: {:d} | Episode: {:d} | Qmax: {:.4f}'.format(int(ep_reward), \\\n i, (ep_ave_max_q / float(j))))\n break\n\ndef main(args):\n\n with tf.Session() as sess:\n\n env = gym.make(args['env'])\n np.random.seed(int(args['random_seed']))\n tf.set_random_seed(int(args['random_seed']))\n env.seed(int(args['random_seed']))\n\n state_dim = env.observation_space.shape[0]\n action_dim = env.action_space.shape[0]\n action_bound = env.action_space.high\n # Ensure action bound is symmetric\n assert (env.action_space.high == -env.action_space.low)\n\n actor = ActorNetwork(sess, state_dim, action_dim, action_bound,\n float(args['actor_lr']), float(args['tau']),\n int(args['minibatch_size']))\n\n critic = CriticNetwork(sess, state_dim, action_dim,\n float(args['critic_lr']), float(args['tau']),\n float(args['gamma']),\n actor.get_num_trainable_vars())\n\n actor_noise = OrnsteinUhlenbeckActionNoise(mu=np.zeros(action_dim))\n\n if args['use_gym_monitor']:\n if not args['render_env']:\n env = wrappers.Monitor(\n env, args['monitor_dir'], video_callable=False, force=True)\n else:\n env = wrappers.Monitor(env, args['monitor_dir'], force=True)\n\n train(sess, env, args, actor, critic, actor_noise)\n\n if args['use_gym_monitor']:\n env.monitor.close()\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='provide arguments for DDPG agent')\n\n # agent parameters\n parser.add_argument('--actor-lr', help='actor network learning rate', default=0.0001)\n parser.add_argument('--critic-lr', help='critic network learning rate', default=0.001)\n parser.add_argument('--gamma', help='discount factor for critic updates', default=0.99)\n parser.add_argument('--tau', help='soft target update parameter', default=0.001)\n parser.add_argument('--buffer-size', help='max size of the replay buffer', default=1000000)\n parser.add_argument('--minibatch-size', help='size of minibatch for minibatch-SGD', default=64)\n\n # run parameters\n parser.add_argument('--env', help='choose the gym env- tested on {Pendulum-v0}', default='RoboschoolInvertedPendulum-v1')\n parser.add_argument('--random-seed', help='random seed for repeatability', default=1234)\n parser.add_argument('--max-episodes', help='max num of episodes to do while training', default=50000)\n parser.add_argument('--max-episode-len', help='max length of 1 episode', default=1000)\n parser.add_argument('--render-env', help='render the gym env', action='store_true')\n parser.add_argument('--use-gym-monitor', help='record gym results', action='store_true')\n parser.add_argument('--monitor-dir', help='directory for storing gym results', default='./results/gym_ddpg')\n parser.add_argument('--summary-dir', help='directory for storing tensorboard info', default='./results/tf_ddpg')\n\n parser.set_defaults(render_env=False)\n parser.set_defaults(use_gym_monitor=True)\n\n args = vars(parser.parse_args())\n\n pp.pprint(args)\n\n main(args)\n"
] |
[
[
"tensorflow.matmul",
"tensorflow.gradients",
"tensorflow.global_variables_initializer",
"tensorflow.trainable_variables",
"numpy.zeros_like",
"numpy.random.normal",
"tensorflow.Variable",
"numpy.sqrt",
"tensorflow.train.AdamOptimizer",
"numpy.reshape",
"numpy.zeros",
"tensorflow.summary.scalar",
"tensorflow.Session",
"tensorflow.placeholder",
"numpy.amax",
"tensorflow.summary.merge_all",
"tensorflow.multiply",
"tensorflow.div",
"tensorflow.summary.FileWriter"
]
] |
WangTaoAs/MGN_ReID
|
[
"916244c39b57b8068c34a7bfa1803781193bb554"
] |
[
"test.py"
] |
[
"import os\nimport torch\nimport argparse\nfrom configs import args\nfrom data import make_data_loader\nfrom model import bulid_MGN_resnet\nfrom processor import inference\n\ndef test(args):\n\n print('Start Testing ------')\n train_loader, val_loader, class_num, _, _, num_query = make_data_loader(args)\n\n device = torch.device(args.cuda)\n\n model = bulid_MGN_resnet(args)\n\n model.to(device)\n\n model.load_param(args.test_weight)\n\n\n inference(args, model, val_loader, num_query, device)\n\n\nif __name__ == '__main__':\n \n test(args)"
] |
[
[
"torch.device"
]
] |
kevinLCG/machinelearning-az
|
[
"54e3090275a3fc419aad17caadc6a47a71dcd3d4"
] |
[
"datasets/Part 1 - Data Preprocessing/Section 2 -------------------- Part 1 - Data Preprocessing --------------------/data_preprocessing_template.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 27 19:43:11 2019\n\n@author: juangabriel\n\"\"\"\n\n# Plantilla de Pre Procesado\n\n# Cómo importar las librerías\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importar el data set\ndataset = pd.read_csv('Data.csv')\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, 3].values\n\n\n# Dividir el data set en conjunto de entrenamiento y conjunto de testing\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.2, random_state=0)\n\n\n# Escalado de variables\n\"\"\"from sklearn.preprocessing import StandardScaler\nsc_X = StandardScaler()\nX_train = sc_X.fit_transform(X_train)\nX_test = sc_X.transform(X_test)\"\"\"\n"
] |
[
[
"sklearn.model_selection.train_test_split",
"pandas.read_csv"
]
] |
Lazea/TensorFlow
|
[
"3237a043a6c43c94e1dabcfa88a3a3afb1bb3646"
] |
[
"util/input_data.py"
] |
[
"\"\"\"Data util script for managing and processing data.\"\"\"\n\nimport os\nimport numpy as np\n\nclass Data:\n\n def __init__(self):\n self.offset = 0\n self.data = None\n self.count = 0\n\n def __init__(self, filepath):\n self.offset = 0\n self.count = 0\n self.data = self.load_data(filepath)\n\n def load_data(self, filepath):\n self.data = np.load(filepath)\n self.count = self.data.shape[0]\n\n return np.load(filepath)\n\n def save_data(self, filepath):\n np.save(filepath, self.data)\n\n def get_data(self, shape=None):\n \"\"\"Returns input data. Can be returned in desired shape.\"\"\"\n data = np.array([row[0] for row in self.data])\n if shape != None:\n return np.array([np.reshape(data_point, shape)for data_point in data])\n else:\n return data\n\n def get_labels(self):\n \"\"\"Returns data labels.\"\"\"\n return np.array([row[1] for row in self.data])\n\n def shuffle_data(self):\n \"\"\"Shuffles the data along axis=0.\"\"\"\n np.random.shuffle(self.data)\n\n def next_batch(self, batch_size):\n \"\"\"Returns the next data batch of size batch_size.\"\"\"\n data_points = []\n labels = []\n for i in range(batch_size):\n idx = i + self.offset\n if idx >= self.data.shape[0]:\n self.offset = i - batch_size\n idx = i + self.offset\n\n data_points.append(self.data[idx][0])\n labels.append(self.data[idx][1])\n\n self.offset += batch_size\n\n return data_points, labels\n\nif __name__ == \"__main__\":\n filepath = 'sample_data.npy'\n if not os.path.isfile(filepath):\n data = []\n for i in range(1000):\n data_pts = np.random.random(28*28)\n labels = np.random.random(10)\n\n data.append([data_pts, labels])\n\n data = np.array(data)\n np.save(filepath, data)\n\n my_data = Data()\n my_data.load_data(filepath)\n\n my_data.shuffle_data()\n print(my_data.get_data().shape)\n print(my_data.get_labels().shape)\n\n my_data.save_data(filepath)\n"
] |
[
[
"numpy.array",
"numpy.reshape",
"numpy.load",
"numpy.random.shuffle",
"numpy.save",
"numpy.random.random"
]
] |
cgranade/Qcodes
|
[
"2d8fd0b8e0fa12d7921a96003318598ad347dd05",
"2d8fd0b8e0fa12d7921a96003318598ad347dd05"
] |
[
"qcodes/tests/test_parameter.py",
"qcodes/dataset/measurements.py"
] |
[
"\"\"\"\nTest suite for parameter\n\"\"\"\nfrom collections import namedtuple\nfrom collections.abc import Iterable\nfrom unittest import TestCase\nfrom typing import Tuple\nimport pytest\n\nimport numpy as np\nfrom hypothesis import given, event, settings\nimport hypothesis.strategies as hst\nfrom qcodes import Function\nfrom qcodes.instrument.parameter import (\n Parameter, ArrayParameter, MultiParameter, ManualParameter,\n InstrumentRefParameter, ScaledParameter)\nimport qcodes.utils.validators as vals\nfrom qcodes.tests.instrument_mocks import DummyInstrument\nfrom qcodes.utils.validators import Numbers\n\n\nclass GettableParam(Parameter):\n \"\"\" Parameter that keeps track of number of get operations\"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._get_count = 0\n\n def get_raw(self):\n self._get_count += 1\n self._save_val(42)\n return 42\n\n\nclass BookkeepingValidator(vals.Validator):\n \"\"\"\n Validator that keeps track of what it validates\n \"\"\"\n def __init__(self, min_value=-float(\"inf\"), max_value=float(\"inf\")):\n self.values_validated = []\n\n def validate(self, value, context=''):\n self.values_validated.append(value)\n\n is_numeric = True\n\n\nblank_instruments = (\n None, # no instrument at all\n namedtuple('noname', '')(), # no .name\n namedtuple('blank', 'name')('') # blank .name\n) # type: Tuple\nnamed_instrument = namedtuple('yesname', 'name')('astro')\n\n\nclass MemoryParameter(Parameter):\n def __init__(self, get_cmd=None, **kwargs):\n self.set_values = []\n self.get_values = []\n super().__init__(set_cmd=self.add_set_value,\n get_cmd=self.create_get_func(get_cmd), **kwargs)\n\n def add_set_value(self, value):\n self.set_values.append(value)\n\n def create_get_func(self, func):\n def get_func():\n if func is not None:\n val = func()\n else:\n val = self._latest['raw_value']\n self.get_values.append(val)\n return val\n return get_func\n\n\nclass TestParameter(TestCase):\n def test_no_name(self):\n with self.assertRaises(TypeError):\n Parameter()\n\n def test_default_attributes(self):\n # Test the default attributes, providing only a name\n name = 'repetitions'\n p = GettableParam(name, vals=vals.Numbers())\n self.assertEqual(p.name, name)\n self.assertEqual(p.label, name)\n self.assertEqual(p.unit, '')\n self.assertEqual(str(p), name)\n\n # default validator is all numbers\n p.validate(-1000)\n with self.assertRaises(TypeError):\n p.validate('not a number')\n\n # docstring exists, even without providing one explicitly\n self.assertIn(name, p.__doc__)\n\n # test snapshot_get by looking at _get_count\n self.assertEqual(p._get_count, 0)\n snap = p.snapshot(update=True)\n self.assertEqual(p._get_count, 1)\n snap_expected = {\n 'name': name,\n 'label': name,\n 'unit': '',\n 'value': 42,\n 'vals': repr(vals.Numbers())\n }\n for k, v in snap_expected.items():\n self.assertEqual(snap[k], v)\n\n def test_explicit_attributes(self):\n # Test the explicit attributes, providing everything we can\n name = 'volt'\n label = 'Voltage'\n unit = 'V'\n docstring = 'DOCS!'\n metadata = {'gain': 100}\n p = GettableParam(name, label=label, unit=unit,\n vals=vals.Numbers(5, 10), docstring=docstring,\n snapshot_get=False, metadata=metadata)\n\n self.assertEqual(p.name, name)\n self.assertEqual(p.label, label)\n self.assertEqual(p.unit, unit)\n self.assertEqual(str(p), name)\n\n with self.assertRaises(ValueError):\n p.validate(-1000)\n p.validate(6)\n with self.assertRaises(TypeError):\n p.validate('not a number')\n\n self.assertIn(name, p.__doc__)\n self.assertIn(docstring, p.__doc__)\n\n # test snapshot_get by looking at _get_count\n self.assertEqual(p._get_count, 0)\n # Snapshot should not perform get since snapshot_get is False\n snap = p.snapshot(update=True)\n self.assertEqual(p._get_count, 0)\n snap_expected = {\n 'name': name,\n 'label': label,\n 'unit': unit,\n 'vals': repr(vals.Numbers(5, 10)),\n 'metadata': metadata\n }\n for k, v in snap_expected.items():\n self.assertEqual(snap[k], v)\n\n # attributes only available in MultiParameter\n for attr in ['names', 'labels', 'setpoints', 'setpoint_names',\n 'setpoint_labels', 'full_names']:\n self.assertFalse(hasattr(p, attr), attr)\n\n def test_number_of_validations(self):\n\n p = Parameter('p', set_cmd=None, initial_value=0,\n vals=BookkeepingValidator())\n # in the set wrapper the final value is validated\n # and then subsequently each step is validated.\n # in this case there is one step so the final value\n # is validated twice.\n self.assertEqual(p.vals.values_validated, [0, 0])\n\n p.step = 1\n p.set(10)\n self.assertEqual(p.vals.values_validated,\n [0, 0, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n\n def test_snapshot_value(self):\n p_snapshot = Parameter('no_snapshot', set_cmd=None, get_cmd=None,\n snapshot_value=True)\n p_snapshot(42)\n snap = p_snapshot.snapshot()\n self.assertIn('value', snap)\n p_no_snapshot = Parameter('no_snapshot', set_cmd=None, get_cmd=None,\n snapshot_value=False)\n p_no_snapshot(42)\n snap = p_no_snapshot.snapshot()\n self.assertNotIn('value', snap)\n\n def test_has_set_get(self):\n # Create parameter that has no set_cmd, and get_cmd returns last value\n gettable_parameter = Parameter('one', set_cmd=False, get_cmd=None)\n self.assertTrue(hasattr(gettable_parameter, 'get'))\n self.assertFalse(hasattr(gettable_parameter, 'set'))\n with self.assertRaises(NotImplementedError):\n gettable_parameter(1)\n # Initial value is None if not explicitly set\n self.assertIsNone(gettable_parameter())\n\n # Create parameter that saves value during set, and has no get_cmd\n settable_parameter = Parameter('two', set_cmd=None, get_cmd=False)\n self.assertFalse(hasattr(settable_parameter, 'get'))\n self.assertTrue(hasattr(settable_parameter, 'set'))\n with self.assertRaises(NotImplementedError):\n settable_parameter()\n settable_parameter(42)\n\n settable_gettable_parameter = Parameter('three', set_cmd=None, get_cmd=None)\n self.assertTrue(hasattr(settable_gettable_parameter, 'set'))\n self.assertTrue(hasattr(settable_gettable_parameter, 'get'))\n self.assertIsNone(settable_gettable_parameter())\n settable_gettable_parameter(22)\n self.assertEqual(settable_gettable_parameter(), 22)\n\n def test_str_representation(self):\n # three cases where only name gets used for full_name\n for instrument in blank_instruments:\n p = Parameter(name='fred')\n p._instrument = instrument\n self.assertEqual(str(p), 'fred')\n\n # and finally an instrument that really has a name\n p = Parameter(name='wilma')\n p._instrument = named_instrument\n self.assertEqual(str(p), 'astro_wilma')\n\n def test_bad_validator(self):\n with self.assertRaises(TypeError):\n Parameter('p', vals=[1, 2, 3])\n\n def test_bad_name(self):\n with self.assertRaises(ValueError):\n Parameter('p with space')\n with self.assertRaises(ValueError):\n Parameter('⛄')\n with self.assertRaises(ValueError):\n Parameter('1')\n\n\n def test_step_ramp(self):\n p = MemoryParameter(name='test_step')\n p(42)\n self.assertListEqual(p.set_values, [42])\n p.step = 1\n\n self.assertListEqual(p.get_ramp_values(44.5, 1), [43, 44, 44.5])\n\n p(44.5)\n self.assertListEqual(p.set_values, [42, 43, 44, 44.5])\n\n def test_scale_raw_value(self):\n p = Parameter(name='test_scale_raw_value', set_cmd=None)\n p(42)\n self.assertEqual(p.raw_value, 42)\n\n p.scale = 2\n self.assertEqual(p.raw_value, 42) # No set/get cmd performed\n self.assertEqual(p(), 21)\n\n p(10)\n self.assertEqual(p.raw_value, 20)\n self.assertEqual(p(), 10)\n\n # There are a number different scenarios for testing a parameter with scale\n # and offset. Therefore a custom strategy for generating test parameters\n # is implemented here. The possible cases are:\n # for getting and setting a parameter: values can be\n # scalar:\n # offset and scale can be scalars\n # for getting only:\n # array:\n # offset and scale can be scalars or arrays(of same legnth as values)\n # independently\n\n # define shorthands for strategies\n TestFloats = hst.floats(min_value=-1e40, max_value=1e40).filter(lambda x: x != 0)\n SharedSize = hst.shared(hst.integers(min_value=1, max_value=100), key='shared_size')\n ValuesScalar = hst.shared(hst.booleans(), key='values_scalar')\n\n # the following test stra\n @hst.composite\n def iterable_or_number(draw, values, size, values_scalar, is_values):\n if draw(values_scalar):\n # if parameter values are scalar, return scalar for values and scale/offset\n return draw(values)\n elif is_values:\n # if parameter values are not scalar and parameter values are requested\n # return a list of values of the given size\n return draw(hst.lists(values, min_size=draw(size), max_size=draw(size)))\n else:\n # if parameter values are not scalar and scale/offset are requested\n # make a random choice whether to return a list of the same size as the values\n # or a simple scalar\n if draw(hst.booleans()):\n return draw(hst.lists(values, min_size=draw(size), max_size=draw(size)))\n else:\n return draw(values)\n\n @settings(max_examples=500) # default:100 increased\n @given(values=iterable_or_number(TestFloats, SharedSize, ValuesScalar, True),\n offsets=iterable_or_number(TestFloats, SharedSize, ValuesScalar, False),\n scales=iterable_or_number(TestFloats, SharedSize, ValuesScalar, False))\n def test_scale_and_offset_raw_value_iterable(self, values, offsets, scales):\n p = Parameter(name='test_scale_and_offset_raw_value', set_cmd=None)\n\n # test that scale and offset does not change the default behaviour\n p(values)\n self.assertEqual(p.raw_value, values)\n\n # test setting scale and offset does not change anything\n p.scale = scales\n p.offset = offsets\n self.assertEqual(p.raw_value, values)\n\n\n np_values = np.array(values)\n np_offsets = np.array(offsets)\n np_scales = np.array(scales)\n np_get_values = np.array(p())\n np.testing.assert_allclose(np_get_values, (np_values-np_offsets)/np_scales) # No set/get cmd performed\n\n # test set, only for scalar values\n if not isinstance(values, Iterable):\n p(values)\n np.testing.assert_allclose(np.array(p.raw_value), np_values*np_scales + np_offsets) # No set/get cmd performed\n\n # testing conversion back and forth\n p(values)\n np_get_values = np.array(p())\n np.testing.assert_allclose(np_get_values, np_values) # No set/get cmd performed\n\n # adding statistics\n if isinstance(offsets, Iterable):\n event('Offset is array')\n if isinstance(scales, Iterable):\n event('Scale is array')\n if isinstance(values, Iterable):\n event('Value is array')\n if isinstance(scales, Iterable) and isinstance(offsets, Iterable):\n event('Scale is array and also offset')\n if isinstance(scales, Iterable) and not isinstance(offsets, Iterable):\n event('Scale is array but not offset')\n\n\n @given(scale=hst.integers(1, 100),\n value=hst.floats(min_value=1e-9, max_value=10))\n def test_ramp_scaled(self, scale, value):\n start_point = 0.0\n p = MemoryParameter(name='p', scale=scale,\n initial_value=start_point)\n assert p() == start_point\n # first set a step size\n p.step = 0.1\n # and a wait time\n p.inter_delay = 1e-9 # in seconds\n first_step = 1.0\n second_step = 10.0\n # do a step to start from a non zero starting point where\n # scale matters\n p.set(first_step)\n np.testing.assert_allclose(np.array([p.get()]),\n np.array([first_step]))\n\n expected_raw_steps = np.linspace(start_point*scale, first_step*scale, 11)\n # getting the raw values that are actually send to the instrument.\n # these are scaled in the set_wrapper\n np.testing.assert_allclose(np.array(p.set_values), expected_raw_steps)\n assert p.raw_value == first_step*scale\n # then check the generated steps. They should not be scaled as the\n # scaling happens when setting them\n expected_steps = np.linspace(first_step+p.step,\n second_step,90)\n np.testing.assert_allclose(p.get_ramp_values(second_step, p.step),\n expected_steps)\n p.set(10)\n np.testing.assert_allclose(np.array(p.set_values),\n np.linspace(0.0*scale, 10*scale, 101))\n p.set(value)\n np.testing.assert_allclose(p.get(), value)\n assert p.raw_value == value * scale\n\n @given(value=hst.floats(min_value=1e-9, max_value=10))\n def test_ramp_parser(self, value):\n start_point = 0.0\n p = MemoryParameter(name='p',\n set_parser=lambda x: -x,\n get_parser=lambda x: -x,\n initial_value=start_point)\n assert p() == start_point\n # first set a step size\n p.step = 0.1\n # and a wait time\n p.inter_delay = 1e-9 # in seconds\n first_step = 1.0\n second_step = 10.0\n # do a step to start from a non zero starting point where\n # scale matters\n p.set(first_step)\n assert p.get() == first_step\n assert p.raw_value == - first_step\n np.testing.assert_allclose(np.array([p.get()]),\n np.array([first_step]))\n\n expected_raw_steps = np.linspace(-start_point, -first_step, 11)\n # getting the raw values that are actually send to the instrument.\n # these are parsed in the set_wrapper\n np.testing.assert_allclose(np.array(p.set_values), expected_raw_steps)\n assert p.raw_value == -first_step\n # then check the generated steps. They should not be parsed as the\n # scaling happens when setting them\n expected_steps = np.linspace((first_step+p.step),\n second_step,90)\n np.testing.assert_allclose(p.get_ramp_values(second_step, p.step),\n expected_steps)\n p.set(second_step)\n np.testing.assert_allclose(np.array(p.set_values),\n np.linspace(-start_point, -second_step, 101))\n p.set(value)\n np.testing.assert_allclose(p.get(), value)\n assert p.raw_value == - value\n\n\n\n @given(scale=hst.integers(1, 100),\n value=hst.floats(min_value=1e-9, max_value=10))\n def test_ramp_parsed_scaled(self, scale, value):\n start_point = 0.0\n p = MemoryParameter(name='p',\n scale = scale,\n set_parser=lambda x: -x,\n get_parser=lambda x: -x,\n initial_value=start_point)\n assert p() == start_point\n # first set a step size\n p.step = 0.1\n # and a wait time\n p.inter_delay = 1e-9 # in seconds\n first_step = 1.0\n second_step = 10.0\n p.set(first_step)\n assert p.get() == first_step\n assert p.raw_value == - first_step * scale\n expected_raw_steps = np.linspace(-start_point*scale, -first_step*scale, 11)\n # getting the raw values that are actually send to the instrument.\n # these are parsed in the set_wrapper\n np.testing.assert_allclose(np.array(p.set_values), expected_raw_steps)\n assert p.raw_value == - scale * first_step\n expected_steps = np.linspace(first_step+p.step,second_step,90)\n np.testing.assert_allclose(p.get_ramp_values(10, p.step),\n expected_steps)\n p.set(second_step)\n np.testing.assert_allclose(np.array(p.set_values),\n np.linspace(-start_point*scale, -second_step*scale, 101))\n p.set(value)\n np.testing.assert_allclose(p.get(), value)\n assert p.raw_value == -scale * value\n\n def test_steppeing_from_invalid_starting_point(self):\n\n the_value = -10\n\n def set_function(value):\n nonlocal the_value\n the_value = value\n\n def get_function():\n return the_value\n\n a = Parameter('test', set_cmd=set_function, get_cmd=get_function,\n vals=Numbers(0, 100), step=5)\n # We start out by setting the parameter to an\n # invalid value. This is not possible using initial_value\n # as the validator will catch that but perhaps this may happen\n # if the instrument can return out of range values.\n assert a.get() == -10\n with pytest.raises(ValueError):\n # trying to set to 10 should raise even with 10 valid\n # as the steps demand that we first step to -5 which is not\n a.set(10)\n # afterwards the value should still be the same\n assert a.get() == -10\n\n\nclass TestValsandParseParameter(TestCase):\n\n def setUp(self):\n self.parameter = Parameter(name='foobar',\n set_cmd=None, get_cmd=None,\n set_parser=lambda x: int(round(x)),\n vals=vals.PermissiveInts(0))\n\n def test_setting_int_with_float(self):\n\n a = 0\n b = 10\n values = np.linspace(a, b, b-a+1)\n for i in values:\n self.parameter(i)\n a = self.parameter()\n assert isinstance(a, int)\n\n def test_setting_int_with_float_not_close(self):\n\n a = 0\n b = 10\n values = np.linspace(a, b, b-a+2)\n for i in values[1:-2]:\n with self.assertRaises(TypeError):\n self.parameter(i)\n\n\nclass SimpleArrayParam(ArrayParameter):\n def __init__(self, return_val, *args, **kwargs):\n self._return_val = return_val\n self._get_count = 0\n super().__init__(*args, **kwargs)\n\n def get_raw(self):\n self._get_count += 1\n self._save_val(self._return_val)\n return self._return_val\n\n\nclass SettableArray(SimpleArrayParam):\n # this is not allowed - just created to raise an error in the test below\n def set_raw(self, v):\n self.v = v\n\n\nclass TestArrayParameter(TestCase):\n def test_default_attributes(self):\n name = 'array_param'\n shape = (2, 3)\n p = SimpleArrayParam([[1, 2, 3], [4, 5, 6]], name, shape)\n\n self.assertEqual(p.name, name)\n self.assertEqual(p.shape, shape)\n\n self.assertEqual(p.label, name)\n self.assertEqual(p.unit, '')\n self.assertIsNone(p.setpoints)\n self.assertIsNone(p.setpoint_names)\n self.assertIsNone(p.setpoint_labels)\n\n self.assertEqual(str(p), name)\n\n self.assertEqual(p._get_count, 0)\n snap = p.snapshot(update=True)\n self.assertEqual(p._get_count, 0)\n snap_expected = {\n 'name': name,\n 'label': name,\n 'unit': ''\n }\n for k, v in snap_expected.items():\n self.assertEqual(snap[k], v)\n\n self.assertIn(name, p.__doc__)\n\n def test_explicit_attributes(self):\n name = 'tiny_array'\n shape = (2,)\n label = 'it takes two to tango'\n unit = 'steps'\n setpoints = [(0, 1)]\n setpoint_names = ['sp_index']\n setpoint_labels = ['Setpoint Label']\n docstring = 'Whats up Doc?'\n metadata = {'size': 2}\n p = SimpleArrayParam([6, 7], name, shape, label=label, unit=unit,\n setpoints=setpoints,\n setpoint_names=setpoint_names,\n setpoint_labels=setpoint_labels,\n docstring=docstring, snapshot_value=True,\n metadata=metadata)\n\n self.assertEqual(p.name, name)\n self.assertEqual(p.shape, shape)\n self.assertEqual(p.label, label)\n self.assertEqual(p.unit, unit)\n self.assertEqual(p.setpoints, setpoints)\n self.assertEqual(p.setpoint_names, setpoint_names)\n self.assertEqual(p.setpoint_full_names, setpoint_names)\n self.assertEqual(p.setpoint_labels, setpoint_labels)\n\n self.assertEqual(p._get_count, 0)\n snap = p.snapshot(update=True)\n self.assertEqual(p._get_count, 1)\n snap_expected = {\n 'name': name,\n 'label': label,\n 'unit': unit,\n 'setpoint_names': setpoint_names,\n 'setpoint_labels': setpoint_labels,\n 'metadata': metadata,\n 'value': [6, 7]\n }\n for k, v in snap_expected.items():\n self.assertEqual(snap[k], v)\n\n self.assertIn(name, p.__doc__)\n self.assertIn(docstring, p.__doc__)\n\n def test_has_set_get(self):\n name = 'array_param'\n shape = (3,)\n with self.assertRaises(AttributeError):\n ArrayParameter(name, shape)\n\n p = SimpleArrayParam([1, 2, 3], name, shape)\n\n self.assertTrue(hasattr(p, 'get'))\n self.assertFalse(hasattr(p, 'set'))\n\n with self.assertRaises(AttributeError):\n SettableArray([1, 2, 3], name, shape)\n\n def test_full_name(self):\n # three cases where only name gets used for full_name\n for instrument in blank_instruments:\n p = SimpleArrayParam([6, 7], 'fred', (2,),\n setpoint_names=('barney',))\n p._instrument = instrument\n self.assertEqual(str(p), 'fred')\n self.assertEqual(p.setpoint_full_names, ('barney',))\n\n # and then an instrument that really has a name\n p = SimpleArrayParam([6, 7], 'wilma', (2,),\n setpoint_names=('betty',))\n p._instrument = named_instrument\n self.assertEqual(str(p), 'astro_wilma')\n self.assertEqual(p.setpoint_full_names, ('astro_betty',))\n\n # and with a 2d parameter to test mixed setpoint_names\n p = SimpleArrayParam([[6, 7, 8], [1, 2, 3]], 'wilma', (3, 2),\n setpoint_names=('betty', None))\n p._instrument = named_instrument\n self.assertEqual(p.setpoint_full_names, ('astro_betty', None))\n\n\n def test_constructor_errors(self):\n bad_constructors = [\n {'shape': [[3]]}, # not a depth-1 sequence\n {'shape': [3], 'setpoints': [1, 2, 3]}, # should be [[1, 2, 3]]\n {'shape': [3], 'setpoint_names': 'index'}, # should be ['index']\n {'shape': [3], 'setpoint_labels': 'the index'}, # ['the index']\n {'shape': [3], 'setpoint_names': [None, 'index2']}\n ]\n for kwargs in bad_constructors:\n with self.subTest(**kwargs):\n with self.assertRaises(ValueError):\n SimpleArrayParam([1, 2, 3], 'p', **kwargs)\n\n\nclass SimpleMultiParam(MultiParameter):\n def __init__(self, return_val, *args, **kwargs):\n self._return_val = return_val\n self._get_count = 0\n super().__init__(*args, **kwargs)\n\n def get_raw(self):\n self._get_count += 1\n self._save_val(self._return_val)\n return self._return_val\n\n\nclass SettableMulti(SimpleMultiParam):\n # this is not fully suported - just created to raise a warning in the test below.\n # We test that the warning is raised\n def set_raw(self, v):\n print(\"Calling set\")\n self.v = v\n\n\nclass TestMultiParameter(TestCase):\n def test_default_attributes(self):\n name = 'mixed_dimensions'\n names = ['0D', '1D', '2D']\n shapes = ((), (3,), (2, 2))\n p = SimpleMultiParam([0, [1, 2, 3], [[4, 5], [6, 7]]],\n name, names, shapes)\n\n self.assertEqual(p.name, name)\n self.assertEqual(p.names, names)\n self.assertEqual(p.shapes, shapes)\n\n self.assertEqual(p.labels, names)\n self.assertEqual(p.units, [''] * 3)\n self.assertIsNone(p.setpoints)\n self.assertIsNone(p.setpoint_names)\n self.assertIsNone(p.setpoint_labels)\n\n self.assertEqual(str(p), name)\n\n self.assertEqual(p._get_count, 0)\n snap = p.snapshot(update=True)\n self.assertEqual(p._get_count, 0)\n snap_expected = {\n 'name': name,\n 'names': names,\n 'labels': names,\n 'units': [''] * 3\n }\n for k, v in snap_expected.items():\n self.assertEqual(snap[k], v)\n\n self.assertIn(name, p.__doc__)\n\n # only in simple parameters\n self.assertFalse(hasattr(p, 'label'))\n self.assertFalse(hasattr(p, 'unit'))\n\n def test_explicit_attributes(self):\n name = 'mixed_dimensions'\n names = ['0D', '1D', '2D']\n shapes = ((), (3,), (2, 2))\n labels = ['scalar', 'vector', 'matrix']\n units = ['V', 'A', 'W']\n setpoints = [(), ((4, 5, 6),), ((7, 8), None)]\n setpoint_names = [(), ('sp1',), ('sp2', None)]\n setpoint_labels = [(), ('setpoint1',), ('setpoint2', None)]\n docstring = 'DOCS??'\n metadata = {'sizes': [1, 3, 4]}\n p = SimpleMultiParam([0, [1, 2, 3], [[4, 5], [6, 7]]],\n name, names, shapes, labels=labels, units=units,\n setpoints=setpoints,\n setpoint_names=setpoint_names,\n setpoint_labels=setpoint_labels,\n docstring=docstring, snapshot_value=True,\n metadata=metadata)\n\n self.assertEqual(p.name, name)\n self.assertEqual(p.names, names)\n self.assertEqual(p.shapes, shapes)\n\n self.assertEqual(p.labels, labels)\n self.assertEqual(p.units, units)\n self.assertEqual(p.setpoints, setpoints)\n self.assertEqual(p.setpoint_names, setpoint_names)\n # as the parameter is not attached to an instrument the full names are\n # equivalent to the setpoint_names\n self.assertEqual(p.setpoint_full_names, setpoint_names)\n self.assertEqual(p.setpoint_labels, setpoint_labels)\n\n self.assertEqual(p._get_count, 0)\n snap = p.snapshot(update=True)\n self.assertEqual(p._get_count, 1)\n snap_expected = {\n 'name': name,\n 'names': names,\n 'labels': labels,\n 'units': units,\n 'setpoint_names': setpoint_names,\n 'setpoint_labels': setpoint_labels,\n 'metadata': metadata,\n 'value': [0, [1, 2, 3], [[4, 5], [6, 7]]]\n }\n for k, v in snap_expected.items():\n self.assertEqual(snap[k], v)\n\n self.assertIn(name, p.__doc__)\n self.assertIn(docstring, p.__doc__)\n\n def test_has_set_get(self):\n name = 'mixed_dimensions'\n names = ['0D', '1D', '2D']\n shapes = ((), (3,), (2, 2))\n with self.assertRaises(AttributeError):\n MultiParameter(name, names, shapes)\n\n p = SimpleMultiParam([0, [1, 2, 3], [[4, 5], [6, 7]]],\n name, names, shapes)\n\n self.assertTrue(hasattr(p, 'get'))\n self.assertFalse(hasattr(p, 'set'))\n # We allow creation of Multiparameters with set to support\n # instruments that already make use of them.\n with self.assertWarns(UserWarning):\n SettableMulti([0, [1, 2, 3], [[4, 5], [6, 7]]],\n name, names, shapes)\n\n def test_full_name_s(self):\n name = 'mixed_dimensions'\n names = ['0D', '1D', '2D']\n setpoint_names = ((),\n ('setpoints_1D',),\n ('setpoints_2D_1',\n None))\n shapes = ((), (3,), (2, 2))\n\n # three cases where only name gets used for full_name\n for instrument in blank_instruments:\n p = SimpleMultiParam([0, [1, 2, 3], [[4, 5], [6, 7]]],\n name, names, shapes,\n setpoint_names=setpoint_names)\n p._instrument = instrument\n self.assertEqual(str(p), name)\n self.assertEqual(p.full_names, names)\n self.assertEqual(p.setpoint_full_names,\n ((), ('setpoints_1D',), ('setpoints_2D_1', None)))\n\n # and finally an instrument that really has a name\n p = SimpleMultiParam([0, [1, 2, 3], [[4, 5], [6, 7]]],\n name, names, shapes, setpoint_names=setpoint_names)\n p._instrument = named_instrument\n self.assertEqual(str(p), 'astro_mixed_dimensions')\n\n self.assertEqual(p.full_names, ['astro_0D', 'astro_1D', 'astro_2D'])\n self.assertEqual(p.setpoint_full_names,\n ((), ('astro_setpoints_1D',),\n ('astro_setpoints_2D_1', None)))\n\n def test_constructor_errors(self):\n bad_constructors = [\n {'names': 'a', 'shapes': ((3,), ())},\n {'names': ('a', 'b'), 'shapes': (3, 2)},\n {'names': ('a', 'b'), 'shapes': ((3,), ()),\n 'setpoints': [(1, 2, 3), ()]},\n {'names': ('a', 'b'), 'shapes': ((3,), ()),\n 'setpoint_names': (None, ('index',))},\n {'names': ('a', 'b'), 'shapes': ((3,), ()),\n 'setpoint_labels': (None, None, None)}\n ]\n for kwargs in bad_constructors:\n with self.subTest(**kwargs):\n with self.assertRaises(ValueError):\n SimpleMultiParam([1, 2, 3], 'p', **kwargs)\n\n\nclass TestManualParameter(TestCase):\n\n def test_bare_function(self):\n # not a use case we want to promote, but it's there...\n p = Parameter('test', get_cmd=None, set_cmd=None)\n\n def doubler(x):\n p.set(x * 2)\n\n f = Function('f', call_cmd=doubler, args=[vals.Numbers(-10, 10)])\n\n f(4)\n self.assertEqual(p.get(), 8)\n with self.assertRaises(ValueError):\n f(20)\n\n\nclass TestStandardParam(TestCase):\n def set_p(self, val):\n self._p = val\n\n def set_p_prefixed(self, val):\n self._p = 'PVAL: {:d}'.format(val)\n\n def strip_prefix(self, val):\n return int(val[6:])\n\n def get_p(self):\n return self._p\n\n def parse_set_p(self, val):\n return '{:d}'.format(val)\n\n def test_param_cmd_with_parsing(self):\n p = Parameter('p_int', get_cmd=self.get_p, get_parser=int,\n set_cmd=self.set_p, set_parser=self.parse_set_p)\n\n p(5)\n self.assertEqual(self._p, '5')\n self.assertEqual(p(), 5)\n\n def test_settable(self):\n p = Parameter('p', set_cmd=self.set_p, get_cmd=False)\n\n p(10)\n self.assertEqual(self._p, 10)\n with self.assertRaises(NotImplementedError):\n p()\n\n self.assertTrue(hasattr(p, 'set'))\n self.assertFalse(hasattr(p, 'get'))\n\n def test_gettable(self):\n p = Parameter('p', get_cmd=self.get_p)\n self._p = 21\n\n self.assertEqual(p(), 21)\n self.assertEqual(p.get(), 21)\n\n with self.assertRaises(NotImplementedError):\n p(10)\n\n self.assertTrue(hasattr(p, 'get'))\n self.assertFalse(hasattr(p, 'set'))\n\n def test_val_mapping_basic(self):\n p = Parameter('p', set_cmd=self.set_p, get_cmd=self.get_p,\n val_mapping={'off': 0, 'on': 1},\n vals=vals.Enum('off', 'on'))\n\n p('off')\n self.assertEqual(self._p, 0)\n self.assertEqual(p(), 'off')\n\n self._p = 1\n self.assertEqual(p(), 'on')\n\n # implicit mapping to ints\n self._p = '0'\n self.assertEqual(p(), 'off')\n\n # unrecognized response\n self._p = 2\n with self.assertRaises(KeyError):\n p()\n\n def test_val_mapping_with_parsers(self):\n # set_parser with val_mapping\n Parameter('p', set_cmd=self.set_p, get_cmd=self.get_p,\n val_mapping={'off': 0, 'on': 1},\n set_parser=self.parse_set_p)\n\n # get_parser with val_mapping\n p = Parameter('p', set_cmd=self.set_p_prefixed,\n get_cmd=self.get_p, get_parser=self.strip_prefix,\n val_mapping={'off': 0, 'on': 1},\n vals=vals.Enum('off', 'on'))\n\n p('off')\n self.assertEqual(self._p, 'PVAL: 0')\n self.assertEqual(p(), 'off')\n\n self._p = 'PVAL: 1'\n self.assertEqual(p(), 'on')\n\n\nclass TestManualParameterValMapping(TestCase):\n def setUp(self):\n self.instrument = DummyInstrument('dummy_holder')\n\n def tearDown(self):\n self.instrument.close()\n del self.instrument\n\n\n def test_val_mapping(self):\n self.instrument.add_parameter('myparameter', set_cmd=None, get_cmd=None, val_mapping={'A': 0, 'B': 1})\n self.instrument.myparameter('A')\n assert self.instrument.myparameter() == 'A'\n assert self.instrument.myparameter() == 'A'\n assert self.instrument.myparameter.raw_value == 0\n\n\n\nclass TestInstrumentRefParameter(TestCase):\n\n def setUp(self):\n self.a = DummyInstrument('dummy_holder')\n self.d = DummyInstrument('dummy')\n\n def test_get_instr(self):\n self.a.add_parameter('test', parameter_class=InstrumentRefParameter)\n\n self.a.test.set(self.d.name)\n\n self.assertEqual(self.a.test.get(), self.d.name)\n self.assertEqual(self.a.test.get_instr(), self.d)\n\n def tearDown(self):\n self.a.close()\n self.d.close()\n del self.a\n del self.d\n\n\nclass TestScaledParameter(TestCase):\n @classmethod\n def setUpClass(cls):\n cls.parent_instrument = DummyInstrument('dummy')\n\n def setUp(self):\n self.target_name = 'target_parameter'\n self.target_label = 'Target Parameter'\n self.target_unit = 'V'\n\n self.target = ManualParameter(name=self.target_name, label=self.target_label,\n unit=self.target_unit, initial_value=1.0,\n instrument=self.parent_instrument)\n self.parent_instrument.add_parameter(self.target)\n self.scaler = ScaledParameter(self.target, division=1)\n\n @classmethod\n def tearDownClass(cls):\n cls.parent_instrument.close()\n del cls.parent_instrument\n\n def test_constructor(self):\n #Test the behaviour of the constructor\n\n # Require a wrapped parameter\n with self.assertRaises(TypeError):\n ScaledParameter()\n\n # Require a scaling factor\n with self.assertRaises(ValueError):\n ScaledParameter(self.target)\n\n # Require only one scaling factor\n with self.assertRaises(ValueError):\n ScaledParameter(self.target, division=1, gain=1)\n\n def test_namelabel(self):\n #Test handling of name and label\n\n # Test correct inheritance\n assert self.scaler.name == self.target_name + '_scaled'\n assert self.scaler.label == self.target_label + '_scaled'\n\n # Test correct name/label handling by the constructor\n scaled_name = 'scaled'\n scaled_label = \"Scaled parameter\"\n scaler2 = ScaledParameter(self.target, division=1, name=scaled_name, label=scaled_label)\n assert scaler2.name == scaled_name\n assert scaler2.label == scaled_label\n\n def test_unit(self):\n # Test handling of the units\n\n # Check if the unit is correctly inherited\n assert self.scaler.unit == 'V'\n\n # Check if we can change succesfully the unit\n self.scaler.unit = 'A'\n assert self.scaler.unit == 'A'\n\n # Check if unit is correctly set in the constructor\n scaler2 = ScaledParameter(self.target, name='scaled_value', division=1, unit='K')\n assert scaler2.unit == 'K'\n\n def test_metadata(self):\n #Test the metadata\n\n test_gain = 3\n test_unit = 'V'\n self.scaler.gain = test_gain\n self.scaler.unit = test_unit\n\n # Check if relevant fields are present in the snapshot\n snap = self.scaler.snapshot()\n snap_keys = snap.keys()\n metadata_keys = snap['metadata'].keys()\n assert 'division' in snap_keys\n assert 'gain' in snap_keys\n assert 'role' in snap_keys\n assert 'unit' in snap_keys\n assert 'variable_multiplier' in metadata_keys\n assert 'wrapped_parameter' in metadata_keys\n assert 'wrapped_instrument' in metadata_keys\n\n # Check if the fields are correct\n assert snap['gain'] == test_gain\n assert snap['division'] == 1/test_gain\n assert snap['role'] == ScaledParameter.Role.GAIN\n assert snap['unit'] == test_unit\n assert snap['metadata']['variable_multiplier'] == False\n assert snap['metadata']['wrapped_parameter'] == self.target.name\n\n def test_wrapped_parameter(self):\n #Test if the target parameter is correctly inherited\n assert self.scaler.wrapped_parameter == self.target\n\n def test_divider(self):\n test_division = 10\n test_value = 5\n\n self.scaler.division = test_division\n self.scaler(test_value)\n assert self.scaler() == test_value\n assert self.target() == test_division * test_value\n assert self.scaler.gain == 1/test_division\n assert self.scaler.role == ScaledParameter.Role.DIVISION\n\n def test_multiplier(self):\n test_multiplier= 10\n test_value = 5\n\n self.scaler.gain = test_multiplier\n self.scaler(test_value)\n assert self.scaler() == test_value\n assert self.target() == test_value / test_multiplier\n assert self.scaler.division == 1/test_multiplier\n assert self.scaler.role == ScaledParameter.Role.GAIN\n\n def test_variable_gain(self):\n test_value = 5\n\n initial_gain = 2\n variable_gain_name = 'gain'\n gain = ManualParameter(name=variable_gain_name, initial_value=initial_gain)\n self.scaler.gain = gain\n self.scaler(test_value)\n\n assert self.scaler() == test_value\n assert self.target() == test_value / initial_gain\n assert self.scaler.division == 1/initial_gain\n\n second_gain = 7\n gain(second_gain)\n assert self.target() == test_value / initial_gain #target value must change on scaler value change, not on gain/division\n self.scaler(test_value)\n assert self.target() == test_value / second_gain\n assert self.scaler.division == 1 / second_gain\n\n assert self.scaler.metadata['variable_multiplier'] == variable_gain_name\n\n\nclass TestSetContextManager(TestCase):\n\n def setUp(self):\n self.instrument = DummyInstrument('dummy_holder')\n self.instrument.add_parameter(\n \"a\",\n set_cmd=None,\n get_cmd=None\n )\n\n def tearDown(self):\n self.instrument.close()\n del self.instrument\n\n def test_none_value(self):\n with self.instrument.a.set_to(3):\n assert self.instrument.a.get() == 3\n assert self.instrument.a.get() is None\n\n def test_context(self):\n self.instrument.a.set(2)\n\n with self.instrument.a.set_to(3):\n assert self.instrument.a.get() == 3\n assert self.instrument.a.get() == 2\n",
"import json\nimport logging\nfrom time import monotonic\nfrom collections import OrderedDict\nfrom typing import (Callable, Union, Dict, Tuple, List, Sequence, cast,\n MutableMapping, MutableSequence, Optional, Any)\nfrom inspect import signature\nfrom numbers import Number\n\nimport numpy as np\n\nimport qcodes as qc\nfrom qcodes import Station\nfrom qcodes.instrument.parameter import ArrayParameter, _BaseParameter, \\\n Parameter, MultiParameter\nfrom qcodes.dataset.experiment_container import Experiment\nfrom qcodes.dataset.param_spec import ParamSpec\nfrom qcodes.dataset.data_set import DataSet\nfrom qcodes.utils.helpers import NumpyJSONEncoder\n\nlog = logging.getLogger(__name__)\n\narray_like_types = (tuple, list, np.ndarray)\nres_type = Tuple[Union[_BaseParameter, str],\n Union[str, int, float, np.dtype, np.ndarray]]\nsetpoints_type = Sequence[Union[str, _BaseParameter]]\nnumeric_types = Union[float, int]\n\n\nclass ParameterTypeError(Exception):\n pass\n\n\ndef is_number(thing: Any) -> bool:\n \"\"\"\n Test if an object can be converted to a number UNLESS it is a string\n \"\"\"\n if isinstance(thing, str):\n return False\n try:\n float(thing)\n return True\n except (ValueError, TypeError):\n return False\n\n\nclass DataSaver:\n \"\"\"\n The class used by the Runner context manager to handle the datasaving to\n the database.\n \"\"\"\n\n default_callback: Optional[dict] = None\n\n def __init__(self, dataset: DataSet, write_period: numeric_types,\n parameters: Dict[str, ParamSpec]) -> None:\n self._dataset = dataset\n if DataSaver.default_callback is not None \\\n and 'run_tables_subscription_callback' \\\n in DataSaver.default_callback:\n callback = DataSaver.default_callback[\n 'run_tables_subscription_callback']\n min_wait = DataSaver.default_callback[\n 'run_tables_subscription_min_wait']\n min_count = DataSaver.default_callback[\n 'run_tables_subscription_min_count']\n snapshot = dataset.get_metadata('snapshot')\n self._dataset.subscribe(callback,\n min_wait=min_wait,\n min_count=min_count,\n state={},\n callback_kwargs={'run_id':\n self._dataset.run_id,\n 'snapshot': snapshot})\n\n self.write_period = float(write_period)\n self.parameters = parameters\n self._known_parameters = list(parameters.keys())\n self._results: List[dict] = [] # will be filled by addResult\n self._last_save_time = monotonic()\n self._known_dependencies: Dict[str, List[str]] = {}\n for param, parspec in parameters.items():\n if parspec.depends_on != '':\n self._known_dependencies.update(\n {str(param): parspec.depends_on.split(', ')})\n\n def add_result(self, *res_tuple: res_type) -> None:\n \"\"\"\n Add a result to the measurement results. Represents a measurement\n point in the space of measurement parameters, e.g. in an experiment\n varying two voltages and measuring two currents, a measurement point\n is the four dimensional (v1, v2, c1, c2). The corresponding call\n to this function would be (e.g.)\n >> datasaver.add_result((v1, 0.1), (v2, 0.2), (c1, 5), (c2, -2.1))\n\n For better performance, this function does not immediately write to\n the database, but keeps the results in memory. Writing happens every\n `write_period` seconds and during the __exit__ method if this class.\n\n Regarding arrays: since arrays as binary blobs are (almost) worthless\n in a relational database, this function \"unravels\" arrays passed to it.\n That, in turn, forces us to impose rules on what can be saved in one\n go. Any number of scalars and any number of arrays OF THE SAME LENGTH\n can be passed to add_result. The scalars are duplicated to match the\n arrays.\n\n However, if the parameter is registered as array type the numpy arrays\n are not unraveled but stored directly for improved performance.\n\n Args:\n res_tuple: a tuple with the first element being the parameter name\n and the second element is the corresponding value(s) at this\n measurement point. The function takes as many tuples as there\n are results.\n\n Raises:\n ValueError: if a parameter name not registered in the parent\n Measurement object is encountered.\n ParameterTypeError: if a parameter is given a value not matching\n its type.\n \"\"\"\n res: List[res_type] = []\n\n # we iterate through the input twice. First we find any array and\n # multiparameters that needs to be unbundled and collect the names\n # of all parameters. This also allows users to call\n # add_result with the arguments in any particular order, i.e. NOT\n # enforcing that setpoints come before dependent variables.\n input_size = 1\n found_parameters: List[str] = []\n inserting_as_arrays = False\n inserting_unrolled_array = False\n\n for partial_result in res_tuple:\n parameter = partial_result[0]\n if isinstance(parameter, MultiParameter):\n # unpack parameters and potential setpoints from MultiParameter\n # unlike regular Parameters and ArrayParameters we don't want\n # to add the parameter it self only its components.\n data = partial_result[1]\n self._unbundle_multiparameter(parameter,\n data,\n res,\n found_parameters)\n else:\n res.append(partial_result)\n paramstr = str(parameter)\n found_parameters.append(paramstr)\n # unpack setpoints from array parameters and add them\n # to the res list\n if isinstance(parameter, ArrayParameter):\n self._unbundle_arrayparameter(parameter,\n res,\n found_parameters)\n\n for partial_result in res:\n parameter = partial_result[0]\n paramstr = str(parameter)\n value = partial_result[1]\n found_parameters.append(paramstr)\n inserting_this_as_array = False\n if paramstr not in self._known_parameters:\n raise ValueError(f'Can not add a result for {paramstr}, no '\n 'such parameter registered in this '\n 'measurement.')\n param_spec = self.parameters[paramstr]\n if param_spec.type == 'array':\n inserting_as_arrays = True\n inserting_this_as_array = True\n if any(isinstance(value, typ) for typ in array_like_types):\n\n value = cast(np.ndarray, partial_result[1])\n value = np.atleast_1d(value)\n array_size = len(value.ravel())\n if param_spec.type != 'array' and array_size > 1:\n inserting_unrolled_array = True\n if input_size > 1 and input_size != array_size:\n raise ValueError('Incompatible array dimensions. Trying to'\n f' add arrays of dimension {input_size} '\n f'and {array_size}')\n else:\n input_size = array_size\n elif is_number(value):\n if inserting_this_as_array:\n raise ValueError(\"Trying to insert into an ArrayType with \"\n \"a scalar value\")\n if param_spec.type == 'text':\n raise ValueError(f\"It is not possible to save a numeric \"\n f\"value for parameter {paramstr!r} \"\n f\"because its type class is \"\n f\"'text', not 'numeric'.\")\n elif isinstance(value, str):\n if param_spec.type != 'text':\n raise ValueError(f\"It is not possible to save a string \"\n f\"value for parameter {paramstr!r} \"\n f\"because its type class is \"\n f\"{param_spec.type!r}, not 'text'.\")\n else:\n raise ValueError('Wrong value type received. '\n f'Got {type(value)}, but only int, float, '\n 'str, tuple, list, and np.ndarray is '\n 'allowed.')\n\n # Now check for missing setpoints\n if paramstr in self._known_dependencies.keys():\n stuffweneed = set(self._known_dependencies[paramstr])\n stuffwehave = set(found_parameters)\n if not stuffweneed.issubset(stuffwehave):\n raise ValueError('Can not add this result; missing '\n f'setpoint values for {paramstr}:'\n f' {stuffweneed}.'\n f' Values only given for'\n f' {found_parameters}.')\n\n if inserting_unrolled_array and inserting_as_arrays:\n raise RuntimeError(\"Trying to insert multiple data values both \"\n \"in array from and as numeric. This is not \"\n \"possible.\")\n elif inserting_as_arrays:\n input_size = 1\n\n self._append_results(res, input_size)\n\n if monotonic() - self._last_save_time > self.write_period:\n self.flush_data_to_database()\n self._last_save_time = monotonic()\n\n def _append_results(self, res: Sequence[res_type],\n input_size: int) -> None:\n \"\"\"\n A private method to add the data to actual queue of data to be written.\n\n Args:\n res: A sequence of the data to be added\n input_size: The length of the data to be added. 1 if its\n to be inserted as arrays.\n \"\"\"\n for index in range(input_size):\n res_dict = {}\n for partial_result in res:\n param = str(partial_result[0])\n value = partial_result[1]\n param_spec = self.parameters[param]\n if param_spec.type == 'array' and index == 0:\n res_dict[param] = value\n elif param_spec.type != 'array':\n # For compatibility with the old Loop, setpoints are\n # tuples of numbers (usually tuple(np.linspace(...))\n if hasattr(value, '__len__') and not isinstance(value, str):\n value = cast(Union[Sequence, np.ndarray], value)\n if isinstance(value, np.ndarray):\n # this is significantly faster than atleast_1d\n # espcially for non 0D arrays\n # because we already know that this is a numpy\n # array and just one numpy array. atleast_1d\n # performs additional checks.\n if value.ndim == 0:\n value = value.reshape(1)\n value = value.ravel()\n res_dict[param] = value[index]\n else:\n res_dict[param] = value\n if len(res_dict) > 0:\n self._results.append(res_dict)\n\n def _unbundle_arrayparameter(self,\n parameter: ArrayParameter,\n res: List[res_type],\n found_parameters: List[str]) -> None:\n \"\"\"\n Extract the setpoints from an ArrayParameter and add them to results\n as a regular parameter tuple.\n\n Args:\n parameter: The ArrayParameter to extract setpoints from.\n res: The result list to add to. Note that this is modified inplace\n found_parameters: The list of all parameters that we know of by now\n Note that this is modified in place.\n \"\"\"\n sp_names = parameter.setpoint_full_names\n fallback_sp_name = f\"{parameter.full_name}_setpoint\"\n self._unbundle_setpoints_from_param(parameter, sp_names,\n fallback_sp_name,\n parameter.setpoints,\n res, found_parameters)\n\n def _unbundle_setpoints_from_param(self, parameter: _BaseParameter,\n sp_names: Sequence[str],\n fallback_sp_name: str,\n setpoints: Sequence,\n res: List[res_type],\n found_parameters: List[str]):\n \"\"\"\n Private function to unbundle setpoints from an ArrayParameter or\n a subset of a MultiParameter.\n\n Args:\n parameter:\n sp_names: Names of the setpoint axes\n fallback_sp_name: Fallback name for setpoints in case sp_names\n is None. The axis num is appended to this name to ensure all\n setpoint axes names are unique.\n setpoints: The actual setpoints i.e. `parameter.setpoints` for an\n ArrayParameter and `parameter.setpoints[i]` for a MultiParameter\n res: The result list the unpacked setpoints are added too.\n Note that this will be modified in place.\n found_parameters: The list of all parameters that we know of by now\n This is modified in place with new parameters found here.\n \"\"\"\n setpoint_axes = []\n setpoint_meta = []\n if setpoints is None:\n raise RuntimeError(f\"{parameter.full_name} is an {type(parameter)} \"\n f\"without setpoints. Cannot handle this.\")\n\n for i, sps in enumerate(setpoints):\n if sp_names is not None:\n spname = sp_names[i]\n else:\n spname = f'{fallback_sp_name}_{i}'\n\n if spname not in self.parameters.keys():\n raise RuntimeError('No setpoints registered for '\n f'{type(parameter)} {parameter.full_name}!')\n sps = np.array(sps)\n while sps.ndim > 1:\n # The outermost setpoint axis or an nD param is nD\n # but the innermost is 1D. In all cases we just need\n # the axis along one dim, the innermost one.\n sps = sps[0]\n\n setpoint_meta.append(spname)\n found_parameters.append(spname)\n setpoint_axes.append(sps)\n\n output_grids = np.meshgrid(*setpoint_axes, indexing='ij')\n for grid, meta in zip(output_grids, setpoint_meta):\n res.append((meta, grid))\n\n def _unbundle_multiparameter(self,\n parameter: MultiParameter,\n data: Union[tuple, list, np.ndarray],\n res: List[res_type],\n found_parameters: List[str]) -> None:\n \"\"\"\n Extract the subarrays and setpoints from an MultiParameter and\n add them to res as a regular parameter tuple.\n\n Args:\n parameter: The MultiParameter to extract from\n data: The acquired data for this parameter\n res: The result list that the unpacked data and setpoints\n is added too. Note that this will be modified in place.\n found_parameters: The list of all parameters that we know of by now\n This is modified in place with new parameters found here.\n \"\"\"\n for i in range(len(parameter.shapes)):\n shape = parameter.shapes[i]\n res.append((parameter.names[i], data[i]))\n if shape != ():\n # array parameter like part of the multiparameter\n # need to find setpoints too\n fallback_sp_name = f'{parameter.full_names[i]}_setpoint'\n\n if parameter.setpoint_full_names[i] is not None:\n sp_names = parameter.setpoint_full_names[i]\n else:\n sp_names = None\n\n self._unbundle_setpoints_from_param(parameter, sp_names,\n fallback_sp_name,\n parameter.setpoints[i],\n res, found_parameters)\n\n def flush_data_to_database(self) -> None:\n \"\"\"\n Write the in-memory results to the database.\n \"\"\"\n log.debug('Flushing to database')\n if self._results != []:\n try:\n write_point = self._dataset.add_results(self._results)\n log.debug(f'Successfully wrote from index {write_point}')\n self._results = []\n except Exception as e:\n log.warning(f'Could not commit to database; {e}')\n else:\n log.debug('No results to flush')\n\n @property\n def run_id(self) -> int:\n return self._dataset.run_id\n\n @property\n def points_written(self) -> int:\n return self._dataset.number_of_results\n\n @property\n def dataset(self):\n return self._dataset\n\n\nclass Runner:\n \"\"\"\n Context manager for the measurement.\n\n Lives inside a Measurement and should never be instantiated\n outside a Measurement.\n\n This context manager handles all the dirty business of writing data\n to the database. Additionally, it may perform experiment bootstrapping\n and clean-up after the measurement.\n \"\"\"\n\n def __init__(\n self, enteractions: List, exitactions: List,\n experiment: Experiment = None, station: Station = None,\n write_period: numeric_types = None,\n parameters: Dict[str, ParamSpec] = None,\n name: str = '',\n subscribers: Sequence[Tuple[Callable,\n Union[MutableSequence,\n MutableMapping]]] = None) -> None:\n\n self.enteractions = enteractions\n self.exitactions = exitactions\n self.subscribers: Sequence[Tuple[Callable,\n Union[MutableSequence,\n MutableMapping]]]\n if subscribers is None:\n self.subscribers = []\n else:\n self.subscribers = subscribers\n self.experiment = experiment\n self.station = station\n self.parameters = parameters\n # here we use 5 s as a sane default, but that value should perhaps\n # be read from some config file\n self.write_period = float(write_period) \\\n if write_period is not None else 5.0\n self.name = name if name else 'results'\n\n def __enter__(self) -> DataSaver:\n # TODO: should user actions really precede the dataset?\n # first do whatever bootstrapping the user specified\n for func, args in self.enteractions:\n func(*args)\n\n # next set up the \"datasaver\"\n if self.experiment is not None:\n self.ds = qc.new_data_set(\n self.name, self.experiment.exp_id, conn=self.experiment.conn\n )\n else:\n self.ds = qc.new_data_set(self.name)\n\n # .. and give the dataset a snapshot as metadata\n if self.station is None:\n station = qc.Station.default\n else:\n station = self.station\n\n if station:\n self.ds.add_metadata('snapshot',\n json.dumps({'station': station.snapshot()},\n cls=NumpyJSONEncoder)\n )\n\n if self.parameters is not None:\n for paramspec in self.parameters.values():\n self.ds.add_parameter(paramspec)\n else:\n raise RuntimeError(\"No parameters supplied\")\n\n # register all subscribers\n for (callble, state) in self.subscribers:\n # We register with minimal waiting time.\n # That should make all subscribers be called when data is flushed\n # to the database\n log.debug(f'Subscribing callable {callble} with state {state}')\n self.ds.subscribe(callble, min_wait=0, min_count=1, state=state)\n\n print(f'Starting experimental run with id: {self.ds.run_id}')\n\n self.datasaver = DataSaver(dataset=self.ds,\n write_period=self.write_period,\n parameters=self.parameters)\n\n return self.datasaver\n\n def __exit__(self, exception_type, exception_value, traceback) -> None:\n\n self.datasaver.flush_data_to_database()\n\n # perform the \"teardown\" events\n for func, args in self.exitactions:\n func(*args)\n\n # and finally mark the dataset as closed, thus\n # finishing the measurement\n self.ds.mark_complete()\n\n self.ds.unsubscribe_all()\n\n\nclass Measurement:\n \"\"\"\n Measurement procedure container\n\n Args:\n exp: Specify the experiment to use. If not given\n the default one is used.\n station: The QCoDeS station to snapshot. If not given, the\n default one is used.\n \"\"\"\n\n def __init__(self, exp: Optional[Experiment] = None,\n station: Optional[qc.Station] = None) -> None:\n self.exitactions: List[Tuple[Callable, Sequence]] = []\n self.enteractions: List[Tuple[Callable, Sequence]] = []\n self.subscribers: List[Tuple[Callable, Union[MutableSequence,\n MutableMapping]]] = []\n self.experiment = exp\n self.station = station\n self.parameters: Dict[str, ParamSpec] = OrderedDict()\n self._write_period: Optional[float] = None\n self.name = ''\n\n @property\n def write_period(self) -> float:\n return self._write_period\n\n @write_period.setter\n def write_period(self, wp: numeric_types) -> None:\n if not isinstance(wp, Number):\n raise ValueError('The write period must be a number (of seconds).')\n wp_float = float(wp)\n if wp_float < 1e-3:\n raise ValueError('The write period must be at least 1 ms.')\n self._write_period = wp_float\n\n def _registration_validation(\n self, name: str, setpoints: Sequence[str] = None,\n basis: Sequence[str] = None) -> Tuple[List[str], List[str]]:\n \"\"\"\n Helper function to do all the validation in terms of dependencies\n when adding parameters, e.g. that no setpoints have setpoints etc.\n\n Called by register_parameter and register_custom_parameter\n\n Args:\n name: Name of the parameter to register\n setpoints: name(s) of the setpoint parameter(s)\n basis: name(s) of the parameter(s) that this parameter is\n inferred from\n \"\"\"\n\n # now handle setpoints\n depends_on = []\n if setpoints:\n for sp in setpoints:\n if sp not in list(self.parameters.keys()):\n raise ValueError(f'Unknown setpoint: {sp}.'\n ' Please register that parameter first.')\n elif sp == name:\n raise ValueError('A parameter can not have itself as '\n 'setpoint.')\n elif self.parameters[sp].depends_on != '':\n raise ValueError(\"A parameter's setpoints can not have \"\n f\"setpoints themselves. {sp} depends on\"\n f\" {self.parameters[sp].depends_on}\")\n else:\n depends_on.append(sp)\n\n # now handle inferred parameters\n inf_from = []\n if basis:\n for inff in basis:\n if inff not in list(self.parameters.keys()):\n raise ValueError(f'Unknown basis parameter: {inff}.'\n ' Please register that parameter first.')\n elif inff == name:\n raise ValueError('A parameter can not be inferred from'\n 'itself.')\n else:\n inf_from.append(inff)\n\n return depends_on, inf_from\n\n def register_parameter(\n self, parameter: _BaseParameter,\n setpoints: setpoints_type = None,\n basis: setpoints_type = None,\n paramtype: str = 'numeric') -> None:\n \"\"\"\n Add QCoDeS Parameter to the dataset produced by running this\n measurement.\n\n Args:\n parameter: The parameter to add\n setpoints: The Parameter representing the setpoints for this\n parameter. If this parameter is a setpoint,\n it should be left blank\n basis: The parameters that this parameter is inferred from. If\n this parameter is not inferred from any other parameters,\n this should be left blank.\n paramtype: type of the parameter, i.e. the SQL storage class\n \"\"\"\n # input validation\n if paramtype not in ParamSpec.allowed_types:\n raise RuntimeError(\"Trying to register a parameter with type \"\n f\"{paramtype}. However, only \"\n f\"{ParamSpec.allowed_types} are supported.\")\n if not isinstance(parameter, _BaseParameter):\n raise ValueError('Can not register object of type {}. Can only '\n 'register a QCoDeS Parameter.'\n ''.format(type(parameter)))\n # perhaps users will want a different name? But the name must be unique\n # on a per-run basis\n # we also use the name below, but perhaps is is better to have\n # a more robust Parameter2String function?\n name = str(parameter)\n if isinstance(parameter, ArrayParameter):\n self._register_arrayparameter(parameter,\n setpoints,\n basis,\n paramtype)\n elif isinstance(parameter, MultiParameter):\n self._register_multiparameter(parameter,\n setpoints,\n basis,\n paramtype,\n )\n elif isinstance(parameter, Parameter):\n self._register_parameter(name,\n parameter.label,\n parameter.unit,\n setpoints,\n basis, paramtype)\n else:\n raise RuntimeError(\"Does not know how to register a parameter\"\n f\"of type {type(parameter)}\")\n\n def _register_parameter(self, name: str,\n label: str,\n unit: str,\n setpoints: setpoints_type,\n basis: setpoints_type,\n paramtype: str) -> None:\n \"\"\"\n Generate ParamSpecs and register them for an individual parameter\n \"\"\"\n\n if setpoints is not None:\n sp_strings = [str(sp) for sp in setpoints]\n else:\n sp_strings = []\n\n if basis is not None:\n bs_strings = [str(bs) for bs in basis]\n else:\n bs_strings = []\n\n # validate all dependencies\n depends_on, inf_from = self._registration_validation(name, sp_strings,\n bs_strings)\n paramspec = ParamSpec(name=name,\n paramtype=paramtype,\n label=label,\n unit=unit,\n inferred_from=inf_from,\n depends_on=depends_on)\n # ensure the correct order\n if name in self.parameters.keys():\n self.parameters.pop(name)\n self.parameters[name] = paramspec\n log.info(f'Registered {name} in the Measurement.')\n\n def _register_arrayparameter(self,\n parameter: ArrayParameter,\n setpoints: setpoints_type,\n basis: setpoints_type,\n paramtype: str, ) -> None:\n \"\"\"\n Register an Array paramter and the setpoints belonging to the\n ArrayParameter\n \"\"\"\n name = str(parameter)\n my_setpoints = list(setpoints) if setpoints else []\n for i in range(len(parameter.shape)):\n if parameter.setpoint_full_names is not None and \\\n parameter.setpoint_full_names[i] is not None:\n spname = parameter.setpoint_full_names[i]\n else:\n spname = f'{name}_setpoint_{i}'\n if parameter.setpoint_labels:\n splabel = parameter.setpoint_labels[i]\n else:\n splabel = ''\n if parameter.setpoint_units:\n spunit = parameter.setpoint_units[i]\n else:\n spunit = ''\n\n sp = ParamSpec(name=spname, paramtype=paramtype,\n label=splabel, unit=spunit)\n\n self.parameters[spname] = sp\n\n my_setpoints += [spname]\n\n self._register_parameter(name,\n parameter.label,\n parameter.unit,\n my_setpoints,\n basis,\n paramtype)\n\n def _register_multiparameter(self,\n multiparameter: MultiParameter,\n setpoints: setpoints_type,\n basis: setpoints_type,\n paramtype: str) -> None:\n \"\"\"\n Find the individual multiparameter components and their setpoints\n and register these\n \"\"\"\n setpoints_lists = []\n for i in range(len(multiparameter.shapes)):\n shape = multiparameter.shapes[i]\n name = multiparameter.full_names[i]\n if shape is ():\n my_setpoints = setpoints\n else:\n my_setpoints = list(setpoints) if setpoints else []\n for j in range(len(shape)):\n if multiparameter.setpoint_full_names is not None and \\\n multiparameter.setpoint_full_names[i] is not None:\n spname = multiparameter.setpoint_full_names[i][j]\n else:\n spname = f'{name}_setpoint_{j}'\n if multiparameter.setpoint_labels is not None and \\\n multiparameter.setpoint_labels[i] is not None:\n splabel = multiparameter.setpoint_labels[i][j]\n else:\n splabel = ''\n if multiparameter.setpoint_units is not None and \\\n multiparameter.setpoint_units[i] is not None:\n spunit = multiparameter.setpoint_units[i][j]\n else:\n spunit = ''\n\n sp = ParamSpec(name=spname, paramtype=paramtype,\n label=splabel, unit=spunit)\n\n self.parameters[spname] = sp\n my_setpoints += [spname]\n\n setpoints_lists.append(my_setpoints)\n\n for i, setpoints in enumerate(setpoints_lists):\n self._register_parameter(multiparameter.names[i],\n multiparameter.labels[i],\n multiparameter.units[i],\n setpoints,\n basis,\n paramtype)\n\n def register_custom_parameter(\n self, name: str,\n label: str = None, unit: str = None,\n basis: setpoints_type = None,\n setpoints: setpoints_type = None,\n paramtype: str = 'numeric') -> None:\n \"\"\"\n Register a custom parameter with this measurement\n\n Args:\n name: The name that this parameter will have in the dataset. Must\n be unique (will overwrite an existing parameter with the same\n name!)\n label: The label\n unit: The unit\n basis: A list of either QCoDeS Parameters or the names\n of parameters already registered in the measurement that\n this parameter is inferred from\n setpoints: A list of either QCoDeS Parameters or the names of\n of parameters already registered in the measurement that\n are the setpoints of this parameter\n paramtype: type of the parameter, i.e. the SQL storage class\n \"\"\"\n self._register_parameter(name,\n label,\n unit,\n setpoints,\n basis,\n paramtype)\n\n def unregister_parameter(self,\n parameter: setpoints_type) -> None:\n \"\"\"\n Remove a custom/QCoDeS parameter from the dataset produced by\n running this measurement\n \"\"\"\n if isinstance(parameter, _BaseParameter):\n param = str(parameter)\n elif isinstance(parameter, str):\n param = parameter\n else:\n raise ValueError('Wrong input type. Must be a QCoDeS parameter or'\n ' the name (a string) of a parameter.')\n\n if param not in self.parameters:\n log.info(f'Tried to unregister {param}, but it was not'\n 'registered.')\n return\n\n for name, paramspec in self.parameters.items():\n if param in paramspec.depends_on:\n raise ValueError(f'Can not unregister {param}, it is a '\n f'setpoint for {name}')\n if param in paramspec.inferred_from:\n raise ValueError(f'Can not unregister {param}, it is a '\n f'basis for {name}')\n\n self.parameters.pop(param)\n log.info(f'Removed {param} from Measurement.')\n\n def add_before_run(self, func: Callable, args: tuple) -> None:\n \"\"\"\n Add an action to be performed before the measurement.\n\n Args:\n func: Function to be performed\n args: The arguments to said function\n \"\"\"\n # some tentative cheap checking\n nargs = len(signature(func).parameters)\n if len(args) != nargs:\n raise ValueError('Mismatch between function call signature and '\n 'the provided arguments.')\n\n self.enteractions.append((func, args))\n\n def add_after_run(self, func: Callable, args: tuple) -> None:\n \"\"\"\n Add an action to be performed after the measurement.\n\n Args:\n func: Function to be performed\n args: The arguments to said function\n \"\"\"\n # some tentative cheap checking\n nargs = len(signature(func).parameters)\n if len(args) != nargs:\n raise ValueError('Mismatch between function call signature and '\n 'the provided arguments.')\n\n self.exitactions.append((func, args))\n\n def add_subscriber(self,\n func: Callable,\n state: Union[MutableSequence, MutableMapping]) -> None:\n \"\"\"\n Add a subscriber to the dataset of the measurement.\n\n Args:\n func: A function taking three positional arguments: a list of\n tuples of parameter values, an integer, a mutable variable\n (list or dict) to hold state/writes updates to.\n state: The variable to hold the state.\n \"\"\"\n self.subscribers.append((func, state))\n\n def run(self) -> Runner:\n \"\"\"\n Returns the context manager for the experimental run\n \"\"\"\n return Runner(self.enteractions, self.exitactions,\n self.experiment, station=self.station,\n write_period=self._write_period,\n parameters=self.parameters,\n name=self.name,\n subscribers=self.subscribers)\n"
] |
[
[
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.linspace"
],
[
"numpy.atleast_1d",
"numpy.array",
"numpy.meshgrid"
]
] |
countywest/progressive_growing_of_points
|
[
"17a7641cdf73f22e60eaf2ca378ac74e9a4d4572"
] |
[
"models/auto_encoder.py"
] |
[
"import torch\nimport torch.nn as nn\nimport os\nimport shutil\nfrom layers.pointnet import pointnet\nfrom layers.srtdecoder import SRTDecoder\nfrom layers.srtdecoder_pg import SRTDecoderPG\nfrom layers.mrtdecoder import MRTDecoder\nfrom layers.mrtdecoder_pg import MRTDecoderPG\nfrom layers.mrtdecoder_pg2 import MRTDecoderPGV2\nfrom layers.topnet import topnet\nfrom layers.topnet_pg import topnetPG\nfrom layers.treegcn import TreeGCNGenerator\nfrom layers.treegcn_pg import TreeGCNGeneratorPG\n\nclass AE(nn.Module):\n def __init__(self):\n super(AE, self).__init__()\n\n def get_encoder(self, config):\n if config['model']['encoder']['type'] == \"pointnet\":\n GFV_dim = config['model']['encoder']['GFV_dim']\n last_feat_dim = config['model']['encoder']['pointnet_hp']['feat_dims_list'][-1]\n assert GFV_dim == last_feat_dim, 'GFV_dim MUST be equal to last feature dimension.'\n\n kwargs = {\n 'feat_dims_list': config['model']['encoder']['pointnet_hp']['feat_dims_list']\n }\n return pointnet(**kwargs)\n else:\n raise NotImplementedError\n\n def get_decoder(self, config):\n pass\n\n def load_ae(self, ckpt_path):\n data = torch.load(ckpt_path)\n self.encoder.load_state_dict(data['encoder_weights'])\n self.decoder.load_state_dict(data['decoder_weights'])\n global_step = data['global_step']\n return global_step\n\n def save_ae(self, global_step, best_valid_loss, is_best, ckpt_dir):\n saving_contents = {\n 'global_step': global_step,\n 'encoder_weights': self.encoder.state_dict(),\n 'decoder_weights': self.decoder.state_dict(),\n 'best_valid_loss': best_valid_loss\n }\n torch.save(saving_contents, os.path.join(ckpt_dir, '%06d' % global_step + '.pth'))\n if is_best:\n src_path = os.path.join(ckpt_dir, '%06d' % global_step + '.pth')\n target_path = os.path.join(ckpt_dir, 'best.pth')\n shutil.copy(src_path, target_path)\n\n\nclass AENormal(AE):\n def __init__(self, config):\n super(AENormal, self).__init__()\n self.encoder = self.get_encoder(config)\n self.decoder = self.get_decoder(config)\n self.encoder_type = config['model']['encoder']['type']\n self.decoder_type = config['model']['decoder']['type']\n\n def get_decoder(self, config):\n if config['model']['decoder']['type'] == 'srtdecoder':\n kwargs = {\n 'z_dim': config['model']['encoder']['GFV_dim'],\n 'nlevels': config['model']['decoder']['srtdecoder_hp']['nlevels'],\n 'feat_dims': config['model']['decoder']['srtdecoder_hp']['feat_dims'],\n 'num_output_points': config['model']['decoder']['srtdecoder_hp']['num_output_points']\n }\n return SRTDecoder(**kwargs)\n elif config['model']['decoder']['type'] == 'mrtdecoder':\n kwargs = {\n 'z_dim': config['model']['encoder']['GFV_dim'],\n 'nlevels': config['model']['decoder']['mrtdecoder_hp']['nlevels'],\n 'feat_dims': config['model']['decoder']['mrtdecoder_hp']['feat_dims'],\n 'num_output_points': config['model']['decoder']['mrtdecoder_hp']['num_output_points']\n }\n return MRTDecoder(**kwargs)\n elif config['model']['decoder']['type'] == 'topnet':\n kwargs = {\n 'z_dim': config['model']['encoder']['GFV_dim'],\n 'nlevels': config['model']['decoder']['topnet_hp']['nlevels'],\n 'node_feat_dim': config['model']['decoder']['topnet_hp']['node_feat_dim'],\n 'num_output_points': config['model']['decoder']['topnet_hp']['num_output_points']\n }\n return topnet(**kwargs)\n elif config['model']['decoder']['type'] == 'treegcn':\n kwargs = {\n 'features': config['model']['decoder']['treegcn_hp']['G_FEAT'],\n 'degrees': config['model']['decoder']['treegcn_hp']['DEGREE'],\n 'support': config['model']['decoder']['treegcn_hp']['support']\n }\n return TreeGCNGenerator(**kwargs)\n else:\n raise NotImplementedError\n\n def forward(self, x): # x: (B, N, 3)\n batch_size, num_points = x.shape[0], x.shape[1]\n if self.encoder_type == 'pointnet':\n # encoding x\n gfv = self.encoder(x)\n else:\n raise NotImplementedError\n\n out = self.decoder(gfv)\n return out\n\nclass AEPG(AE):\n def __init__(self, config):\n super(AEPG, self).__init__()\n self.encoder = self.get_encoder(config)\n self.decoder = self.get_decoder(config)\n self.encoder_type = config['model']['encoder']['type']\n self.decoder_type = config['model']['decoder']['type']\n\n def get_decoder(self, config):\n if config['model']['decoder']['type'] == 'srtdecoder':\n kwargs = {\n 'z_dim': config['model']['encoder']['GFV_dim'],\n 'nlevels': config['model']['decoder']['srtdecoder_hp']['nlevels'],\n 'feat_dims': config['model']['decoder']['srtdecoder_hp']['feat_dims'],\n 'num_output_points': config['model']['decoder']['srtdecoder_hp']['num_output_points']\n }\n return SRTDecoderPG(**kwargs)\n elif config['model']['decoder']['type'] == 'mrtdecoder':\n kwargs = {\n 'z_dim': config['model']['encoder']['GFV_dim'],\n 'nlevels': config['model']['decoder']['mrtdecoder_hp']['nlevels'],\n 'feat_dims': config['model']['decoder']['mrtdecoder_hp']['feat_dims'],\n 'num_output_points': config['model']['decoder']['mrtdecoder_hp']['num_output_points']\n }\n return MRTDecoderPG(**kwargs)\n elif config['model']['decoder']['type'] == 'mrtdecoder_pgv2':\n kwargs = {\n 'z_dim': config['model']['encoder']['GFV_dim'],\n 'nlevels': config['model']['decoder']['mrtdecoder_hp']['nlevels'],\n 'feat_dims': config['model']['decoder']['mrtdecoder_hp']['feat_dims'],\n 'num_output_points': config['model']['decoder']['mrtdecoder_hp']['num_output_points']\n }\n return MRTDecoderPGV2(**kwargs)\n elif config['model']['decoder']['type'] == 'topnet':\n kwargs = {\n 'z_dim': config['model']['encoder']['GFV_dim'],\n 'nlevels': config['model']['decoder']['topnet_hp']['nlevels'],\n 'node_feat_dim': config['model']['decoder']['topnet_hp']['node_feat_dim'],\n 'num_output_points': config['model']['decoder']['topnet_hp']['num_output_points']\n }\n return topnetPG(**kwargs)\n elif config['model']['decoder']['type'] == 'treegcn':\n kwargs = {\n 'features': config['model']['decoder']['treegcn_hp']['G_FEAT'],\n 'degrees': config['model']['decoder']['treegcn_hp']['DEGREE'],\n 'support': config['model']['decoder']['treegcn_hp']['support']\n }\n return TreeGCNGeneratorPG(**kwargs)\n else:\n raise NotImplementedError\n\n def forward(self, x, phase, alpha): # x: (B, N, 3)\n batch_size, num_points = x.shape[0], x.shape[1]\n if self.encoder_type == 'pointnet':\n # encoding x\n gfv = self.encoder(x)\n else:\n raise NotImplementedError\n\n # decoding\n out = self.decoder(gfv, phase, alpha)\n\n return out\n\ndef get_autoencoder(config):\n if config['train_setting']['pg_on']:\n return AEPG(config)\n else:\n return AENormal(config)"
] |
[
[
"torch.load"
]
] |
dohmatob/POT
|
[
"42d0aa94e7cb49711a646fe9b263a86cdb817161",
"42d0aa94e7cb49711a646fe9b263a86cdb817161"
] |
[
"test/test_plot.py",
"docs/source/auto_examples/plot_otda_semi_supervised.py"
] |
[
"\"\"\"Tests for module plot for visualization \"\"\"\n\n# Author: Remi Flamary <remi.flamary@unice.fr>\n#\n# License: MIT License\n\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\n\n\ndef test_plot1D_mat():\n\n import ot\n\n n_bins = 100 # nb bins\n\n # bin positions\n x = np.arange(n_bins, dtype=np.float64)\n\n # Gaussian distributions\n a = ot.datasets.get_1D_gauss(n_bins, m=20, s=5) # m= mean, s= std\n b = ot.datasets.get_1D_gauss(n_bins, m=60, s=10)\n\n # loss matrix\n M = ot.dist(x.reshape((n_bins, 1)), x.reshape((n_bins, 1)))\n M /= M.max()\n\n ot.plot.plot1D_mat(a, b, M, 'Cost matrix M')\n\n\ndef test_plot2D_samples_mat():\n\n import ot\n\n n_bins = 50 # nb samples\n\n mu_s = np.array([0, 0])\n cov_s = np.array([[1, 0], [0, 1]])\n\n mu_t = np.array([4, 4])\n cov_t = np.array([[1, -.8], [-.8, 1]])\n\n xs = ot.datasets.get_2D_samples_gauss(n_bins, mu_s, cov_s)\n xt = ot.datasets.get_2D_samples_gauss(n_bins, mu_t, cov_t)\n\n G = 1.0 * (np.random.rand(n_bins, n_bins) < 0.01)\n\n ot.plot.plot2D_samples_mat(xs, xt, G, thr=1e-5)\n",
"# -*- coding: utf-8 -*-\n\"\"\"\n============================================\nOTDA unsupervised vs semi-supervised setting\n============================================\n\nThis example introduces a semi supervised domain adaptation in a 2D setting.\nIt explicits the problem of semi supervised domain adaptation and introduces\nsome optimal transport approaches to solve it.\n\nQuantities such as optimal couplings, greater coupling coefficients and\ntransported samples are represented in order to give a visual understanding\nof what the transport methods are doing.\n\"\"\"\n\n# Authors: Remi Flamary <remi.flamary@unice.fr>\n# Stanislas Chambon <stan.chambon@gmail.com>\n#\n# License: MIT License\n\nimport matplotlib.pylab as pl\nimport ot\n\n\n##############################################################################\n# Generate data\n# -------------\n\nn_samples_source = 150\nn_samples_target = 150\n\nXs, ys = ot.datasets.get_data_classif('3gauss', n_samples_source)\nXt, yt = ot.datasets.get_data_classif('3gauss2', n_samples_target)\n\n\n##############################################################################\n# Transport source samples onto target samples\n# --------------------------------------------\n\n\n# unsupervised domain adaptation\not_sinkhorn_un = ot.da.SinkhornTransport(reg_e=1e-1)\not_sinkhorn_un.fit(Xs=Xs, Xt=Xt)\ntransp_Xs_sinkhorn_un = ot_sinkhorn_un.transform(Xs=Xs)\n\n# semi-supervised domain adaptation\not_sinkhorn_semi = ot.da.SinkhornTransport(reg_e=1e-1)\not_sinkhorn_semi.fit(Xs=Xs, Xt=Xt, ys=ys, yt=yt)\ntransp_Xs_sinkhorn_semi = ot_sinkhorn_semi.transform(Xs=Xs)\n\n# semi supervised DA uses available labaled target samples to modify the cost\n# matrix involved in the OT problem. The cost of transporting a source sample\n# of class A onto a target sample of class B != A is set to infinite, or a\n# very large value\n\n# note that in the present case we consider that all the target samples are\n# labeled. For daily applications, some target sample might not have labels,\n# in this case the element of yt corresponding to these samples should be\n# filled with -1.\n\n# Warning: we recall that -1 cannot be used as a class label\n\n\n##############################################################################\n# Fig 1 : plots source and target samples + matrix of pairwise distance\n# ---------------------------------------------------------------------\n\npl.figure(1, figsize=(10, 10))\npl.subplot(2, 2, 1)\npl.scatter(Xs[:, 0], Xs[:, 1], c=ys, marker='+', label='Source samples')\npl.xticks([])\npl.yticks([])\npl.legend(loc=0)\npl.title('Source samples')\n\npl.subplot(2, 2, 2)\npl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples')\npl.xticks([])\npl.yticks([])\npl.legend(loc=0)\npl.title('Target samples')\n\npl.subplot(2, 2, 3)\npl.imshow(ot_sinkhorn_un.cost_, interpolation='nearest')\npl.xticks([])\npl.yticks([])\npl.title('Cost matrix - unsupervised DA')\n\npl.subplot(2, 2, 4)\npl.imshow(ot_sinkhorn_semi.cost_, interpolation='nearest')\npl.xticks([])\npl.yticks([])\npl.title('Cost matrix - semisupervised DA')\n\npl.tight_layout()\n\n# the optimal coupling in the semi-supervised DA case will exhibit \" shape\n# similar\" to the cost matrix, (block diagonal matrix)\n\n\n##############################################################################\n# Fig 2 : plots optimal couplings for the different methods\n# ---------------------------------------------------------\n\npl.figure(2, figsize=(8, 4))\n\npl.subplot(1, 2, 1)\npl.imshow(ot_sinkhorn_un.coupling_, interpolation='nearest')\npl.xticks([])\npl.yticks([])\npl.title('Optimal coupling\\nUnsupervised DA')\n\npl.subplot(1, 2, 2)\npl.imshow(ot_sinkhorn_semi.coupling_, interpolation='nearest')\npl.xticks([])\npl.yticks([])\npl.title('Optimal coupling\\nSemi-supervised DA')\n\npl.tight_layout()\n\n\n##############################################################################\n# Fig 3 : plot transported samples\n# --------------------------------\n\n# display transported samples\npl.figure(4, figsize=(8, 4))\npl.subplot(1, 2, 1)\npl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o',\n label='Target samples', alpha=0.5)\npl.scatter(transp_Xs_sinkhorn_un[:, 0], transp_Xs_sinkhorn_un[:, 1], c=ys,\n marker='+', label='Transp samples', s=30)\npl.title('Transported samples\\nEmdTransport')\npl.legend(loc=0)\npl.xticks([])\npl.yticks([])\n\npl.subplot(1, 2, 2)\npl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o',\n label='Target samples', alpha=0.5)\npl.scatter(transp_Xs_sinkhorn_semi[:, 0], transp_Xs_sinkhorn_semi[:, 1], c=ys,\n marker='+', label='Transp samples', s=30)\npl.title('Transported samples\\nSinkhornTransport')\npl.xticks([])\npl.yticks([])\n\npl.tight_layout()\npl.show()\n"
] |
[
[
"matplotlib.use",
"numpy.array",
"numpy.arange",
"numpy.random.rand"
],
[
"matplotlib.pylab.legend",
"matplotlib.pylab.show",
"matplotlib.pylab.figure",
"matplotlib.pylab.subplot",
"matplotlib.pylab.yticks",
"matplotlib.pylab.tight_layout",
"matplotlib.pylab.title",
"matplotlib.pylab.xticks",
"matplotlib.pylab.imshow",
"matplotlib.pylab.scatter"
]
] |
theo-brown/ahrs
|
[
"cd9c9e0bbf9db7fd67a297e1aafa8518bf17050d"
] |
[
"test_data/barometer_kalman.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Slider\nfrom kalman_filter import KalmanFilter\n\nraw_data = np.loadtxt(\"barometer_data.txt\")\n# Truncate raw data (it's super long)\nraw_data = raw_data[:raw_data.size//4]\nraw_data_step = np.loadtxt(\"barometer_data_step.txt\")\nt1 = np.arange(0, raw_data.size/12.5, 1/12.5)\nt2 = np.arange(0, raw_data_step.size/12.5, 1/12.5)\n\nfig1 = plt.figure(\"Data\")\nax1 = fig1.add_subplot(121)\nax2 = fig1.add_subplot(122)\nfig1.subplots_adjust(bottom=0.25)\n\n[unfiltered_raw_line] = ax1.plot(t1, raw_data)\n[unfiltered__step_line] = ax2.plot(t2, raw_data_step)\n\ndef filter_data(data, x0, P, Q, R):\n filter1 = KalmanFilter(x0, P, 1, 0, 1, Q, R)\n \n x_out = np.zeros(data.size)\n P_out = np.zeros(data.size)\n \n for k in np.arange(1, data.size):\n x_out[k], P_out[k] = filter1.update(0, data[k])\n\n return x_out, P_out\n\nP0 = 2\nQ0 = 1e-4\n\n[filtered_raw_line] = ax1.plot(t1, filter_data(raw_data, 0, P0, Q0, R=raw_data.var())[0])\n[filtered_step_line] = ax2.plot(t2, filter_data(raw_data_step, 0, P0, Q0, R=raw_data.var())[0])\n\nP_slider_ax = fig1.add_axes([0.25, 0.15, 0.65, 0.03])\nQ_slider_ax = fig1.add_axes([0.25, 0.1, 0.65, 0.03])\n\nP_slider = Slider(P_slider_ax, 'P', 0.5, 5, valinit=P0)\nQ_slider = Slider(Q_slider_ax, 'Q', 1e-4, 1e-3, valinit=Q0)\n\ndef sliders_on_changed(val):\n P = P_slider.val\n Q = Q_slider.val\n x_raw_new, P_raw_new = filter_data(raw_data, 0, P, Q, R=raw_data.var())\n filtered_raw_line.set_ydata(x_raw_new)\n x_step_new, P_step_new = filter_data(raw_data_step, 0, P, Q, R=raw_data.var())\n filtered_step_line.set_ydata(x_step_new)\n\nP_slider.on_changed(sliders_on_changed)\nQ_slider.on_changed(sliders_on_changed)\n\nplt.show()\n"
] |
[
[
"matplotlib.widgets.Slider",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.loadtxt",
"numpy.arange",
"matplotlib.pyplot.show"
]
] |
BartlomiejOlber/od-test
|
[
"fb7adb8fe003e405ce94fa3887553c7af55d3fd3",
"fb7adb8fe003e405ce94fa3887553c7af55d3fd3"
] |
[
"methods/nearest_neighbor.py",
"methods/energy/integrated.py"
] |
[
"from __future__ import print_function\nfrom os import path\nfrom termcolor import colored\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\n\nimport global_vars as Global\nimport models as Models\nfrom datasets import MirroredDataset\nfrom methods.score_svm import ScoreSVM\n\nfrom tqdm import tqdm\n\nclass KNNModel(nn.Module):\n \"\"\"\n This is our Nearest Neighbour \"neural network\".\n \"\"\"\n\n def __init__(self, base_data, k=1):\n super(KNNModel, self).__init__()\n self.base_data = base_data.half().cuda() # We probably have to rewrite this part of the code \n # as larger datasets may not entirely fit into the GPU memory.\n n_data = self.base_data.size(0)\n self.base_data = self.base_data.view(n_data, -1) # Flatten the train data.\n self.base_data_norm = (self.base_data*self.base_data).sum(dim=1)\n self.K = k\n self.norm = 2\n \n def forward(self, x, **kwargs):\n n_samples = x.size(0)\n x = x.data.view(n_samples, -1).half() # flatten to vectors.\n base_data = self.base_data\n base_norm = self.base_data_norm\n ref_size = base_data.size(0)\n\n x_norm = (x*x).sum(dim=1)\n diffs = base_norm.unsqueeze(0).expand(n_samples, ref_size) + x_norm.unsqueeze(1).expand(n_samples, ref_size) - 2*x.matmul(base_data.t())\n diffs.sqrt_().detach_()\n\n output, _ = torch.topk(diffs, self.K, dim=1, largest=False, sorted=True)\n\n return output.float()\n \n def preferred_name(self):\n return '%d-NN'%self.K\n\n def output_size(self):\n return torch.LongTensor([1, self.K])\n\nclass KNNSVM(ScoreSVM):\n def __init__(self, args):\n super(KNNSVM, self).__init__(args)\n self.base_data = None\n self.default_model = 1\n\n def method_identifier(self):\n output = \"KNNSVM%d\"%self.default_model\n return output\n\n def propose_H(self, dataset):\n assert self.default_model > 0, 'KNN needs K>0'\n if self.base_model is not None:\n self.base_model.base_data = None\n self.base_model = None\n\n self.train_dataset_name = dataset.name\n if dataset.name in Global.mirror_augment:\n print(colored(\"Mirror augmenting %s\"%dataset.name, 'green'))\n new_train_ds = dataset + MirroredDataset(dataset)\n dataset = new_train_ds\n \n n_data = len(dataset)\n n_dim = dataset[0][0].numel()\n self.base_data = torch.zeros(n_data, n_dim, dtype=torch.float32)\n\n with tqdm(total=n_data) as pbar:\n pbar.set_description('Caching X_train for %d-nn'%self.default_model)\n for i, (x, _) in enumerate(dataset):\n self.base_data[i].copy_(x.view(-1))\n pbar.update()\n # self.base_data = torch.cat([x.view(1, -1) for x,_ in dataset])\n self.base_model = KNNModel(self.base_data, k=self.default_model).to(self.args.device)\n self.base_model.eval()\n self.model_name = \"VGG\" if self.add_identifier.find(\"VGG\") >= 0 else (\"Resnet\" if self.add_identifier.find(\"Resnet\") >= 0 else \"\")\n self.add_identifier = \"\"\n\nclass AEKNNModel(nn.Module):\n \"\"\"\n This is our Nearest Neighbour \"neural network\" with AE latent representations.\n \"\"\"\n\n def __init__(self, subnetwork, base_data, k=1):\n super(AEKNNModel, self).__init__()\n self.base_data = base_data.cuda()\n n_data = self.base_data.size(0)\n self.base_data = self.base_data.view(n_data, -1) # Flatten the train data.\n self.base_data_norm = (self.base_data*self.base_data).sum(dim=1)\n self.K = k\n self.norm = 2\n self.subnetwork = subnetwork\n \n def forward(self, x, **kwargs):\n n_samples = x.size(0)\n self.subnetwork.eval()\n x = self.subnetwork.encode(x).data\n base_data = self.base_data\n base_norm = self.base_data_norm\n ref_size = base_data.size(0)\n\n x_norm = (x*x).sum(dim=1)\n diffs = base_norm.unsqueeze(0).expand(n_samples, ref_size) + x_norm.unsqueeze(1).expand(n_samples, ref_size) - 2*x.matmul(base_data.t())\n diffs.sqrt_().detach_()\n\n output, _ = torch.topk(diffs, self.K, dim=1, largest=False, sorted=True)\n\n return output.float()\n \n def preferred_name(self):\n return '%d-AENN'%self.K\n\n def output_size(self):\n return torch.LongTensor([1, self.K])\n\nclass AEKNNSVM(ScoreSVM):\n def __init__(self, args):\n super(AEKNNSVM, self).__init__(args)\n self.base_data = None\n self.default_model = 1\n\n def method_identifier(self):\n output = \"AEKNNSVM%d\"%self.default_model\n return output\n\n def propose_H(self, dataset):\n assert self.default_model > 0, 'KNN needs K>0'\n if self.base_model is not None:\n self.base_model.base_data = None\n self.base_model = None\n\n # Set up the base-model\n if isinstance(self, BCEKNNSVM) or isinstance(self, MSEKNNSVM):\n base_model = Global.get_ref_autoencoder(dataset.name)[0]().to(self.args.device)\n if isinstance(self, BCEKNNSVM):\n base_model.netid = \"BCE.\" + base_model.netid\n else:\n base_model.netid = \"MSE.\" + base_model.netid\n home_path = Models.get_ref_model_path(self.args, base_model.__class__.__name__, dataset.name, suffix_str=base_model.netid)\n elif isinstance(self, VAEKNNSVM):\n base_model = Global.get_ref_vae(dataset.name)[0]().to(self.args.device)\n home_path = Models.get_ref_model_path(self.args, base_model.__class__.__name__, dataset.name, suffix_str=base_model.netid)\n else:\n raise NotImplementedError()\n\n hbest_path = path.join(home_path, 'model.best.pth')\n best_h_path = hbest_path\n print(colored('Loading H1 model from %s'%best_h_path, 'red'))\n base_model.load_state_dict(torch.load(best_h_path))\n base_model.eval()\n self.train_dataset_name = self.args.D1\n if dataset.name in Global.mirror_augment:\n print(colored(\"Mirror augmenting %s\"%dataset.name, 'green'))\n new_train_ds = dataset + MirroredDataset(dataset)\n dataset = new_train_ds\n\n # Initialize the multi-threaded loaders.\n all_loader = DataLoader(dataset, batch_size=self.args.batch_size, num_workers=1, pin_memory=True)\n\n n_data = len(dataset)\n n_dim = base_model.encode(dataset[0][0].to(self.args.device).unsqueeze(0)).numel()\n print('nHidden %d'%(n_dim))\n self.base_data = torch.zeros(n_data, n_dim, dtype=torch.float32)\n base_ind = 0\n with torch.set_grad_enabled(False):\n with tqdm(total=len(all_loader)) as pbar:\n pbar.set_description('Caching X_train for %d-nn'%self.default_model)\n for i, (x, _) in enumerate(all_loader):\n n_data = x.size(0)\n output = base_model.encode(x.to(self.args.device)).data\n self.base_data[base_ind:base_ind+n_data].copy_(output)\n base_ind = base_ind + n_data\n pbar.update()\n # self.base_data = torch.cat([x.view(1, -1) for x,_ in dataset])\n self.base_model = AEKNNModel(base_model, self.base_data, k=self.default_model).to(self.args.device)\n self.base_model.eval()\n self.model_name = \"VGG\" if self.add_identifier.find(\"VGG\") >= 0 else (\"Resnet\" if self.add_identifier.find(\"Resnet\") >= 0 else \"\")\n self.add_identifier = \"\"\n\n\"\"\"\n Actual implementation in AEKNNSVM.\n\"\"\"\nclass BCEKNNSVM(AEKNNSVM):\n def method_identifier(self):\n # output = \"BCEKNNSVM/%d\"%self.default_model\n output = \"BCEKNNSVM%d\"%self.default_model\n return output\nclass MSEKNNSVM(AEKNNSVM):\n def method_identifier(self):\n # output = \"MSEKNNSVM/%d\"%self.default_model\n output = \"MSEKNNSVM%d\"%self.default_model\n return output\nclass VAEKNNSVM(AEKNNSVM):\n def method_identifier(self):\n # output = \"VAEKNNSVM/%d\"%self.default_model\n output = \"VAEKNNSVM%d\"%self.default_model\n return output",
"from __future__ import print_function\n\nimport time\n\nimport numpy as np\nimport tqdm\n\nimport global_vars as Global\nfrom datasets import MirroredDataset\nfrom utils.iterative_trainer import IterativeTrainerConfig\nfrom utils.logger import Logger\nfrom termcolor import colored\nfrom torch.utils.data.dataloader import DataLoader\nimport torch\nimport os\n\nfrom torch.autograd import Variable\nimport torch.backends.cudnn as cudnn\nimport torchvision.transforms as trn\nimport torchvision.datasets as dset\nimport torch.nn.functional as F\nimport torch.nn as nn\nfrom sklearn.metrics import roc_auc_score, auc, precision_recall_curve\n\nfrom methods import AbstractMethodInterface\n\n\nclass Energy(AbstractMethodInterface):\n def __init__(self, args):\n super(Energy, self).__init__()\n self.base_model = None\n self.H_class = None\n self.args = args\n self.class_count = 0\n self.default_model = 0\n self.add_identifier = \"\"\n self.known_loader = None\n self.unknown_loader = None\n self.train_loader = None\n self.train_dataset_name = \"\"\n self.valid_dataset_name = \"\"\n self.test_dataset_name = \"\"\n self.train_dataset_length = 0\n self.seed = 1\n self.model_name = \"\"\n self.workspace_dir = \"workspace/energy\"\n\n def propose_H(self, dataset, mirror=True):\n config = self.get_H_config(dataset, mirror)\n\n from models import get_ref_model_path\n h_path = get_ref_model_path(self.args, config.model.__class__.__name__, dataset.name)\n self.best_h_path = os.path.join(h_path, 'model.best.pth')\n\n # trainer = IterativeTrainer(config, self.args)\n\n if not os.path.isfile(self.best_h_path):\n raise NotImplementedError(\"Please use model_setup to pretrain the networks first!\")\n else:\n print(colored('Loading H1 model from %s' % self.best_h_path, 'red'))\n config.model.load_state_dict(torch.load(self.best_h_path))\n\n self.base_model = config.model\n self.base_model.eval()\n self.class_count = self.base_model.output_size()[1].item()\n self.add_identifier = self.base_model.__class__.__name__\n self.train_dataset_name = dataset.name\n self.model_name = \"VGG\" if self.add_identifier.find(\"VGG\") >= 0 else \"Resnet\"\n if hasattr(self.base_model, 'preferred_name'):\n self.add_identifier = self.base_model.preferred_name()\n\n def method_identifier(self):\n output = \"Energy\"\n # if len(self.add_identifier) > 0:\n # output = output + \"/\" + self.add_identifier\n return output\n\n def get_H_config(self, dataset, mirror):\n if self.args.D1 in Global.mirror_augment and mirror:\n print(colored(\"Mirror augmenting %s\" % self.args.D1, 'green'))\n new_train_ds = dataset + MirroredDataset(dataset)\n dataset = new_train_ds\n\n self.train_loader = DataLoader(dataset, batch_size=self.args.batch_size, num_workers=self.args.workers,\n pin_memory=True, shuffle=True)\n self.train_dataset_length = len(dataset)\n self.input_shape = iter(dataset).__next__()[0].shape\n # Set up the model\n model = Global.get_ref_classifier(self.args.D1)[self.default_model]().to(self.args.device)\n # model.forward()\n\n # Set up the config\n config = IterativeTrainerConfig()\n\n base_model_name = self.base_model.__class__.__name__\n if hasattr(self.base_model, 'preferred_name'):\n base_model_name = self.base_model.preferred_name()\n\n config.name = '_%s[%s](%s->%s)' % (self.__class__.__name__, base_model_name, self.args.D1, self.args.D2)\n config.train_loader = self.train_loader\n config.visualize = not self.args.no_visualize\n config.model = model\n config.logger = Logger()\n return config\n\n def train_H(self, dataset):\n self.known_loader = DataLoader(dataset.datasets[0], batch_size=self.args.batch_size, shuffle=True,\n num_workers=self.args.workers,\n pin_memory=True)\n self.unknown_loader = DataLoader(dataset.datasets[1], batch_size=self.args.batch_size, shuffle=False,\n num_workers=self.args.workers,\n pin_memory=True)\n\n self.valid_dataset_name = dataset.datasets[1].name\n self.valid_dataset_length = len(dataset.datasets[0])\n best_acc = 0\n epochs = 10\n for m_in in [-23]:\n for m_out in [-5]:\n self._fine_tune_model(epochs=epochs, m_in=m_in, m_out=m_out)\n acc = self._find_threshold()\n self.base_model.load_state_dict(torch.load(self.best_h_path))\n if acc > best_acc:\n best_acc = acc\n self.m_in = m_in\n self.m_out = m_out\n\n model_path = os.path.join(os.path.join(self.workspace_dir,\n self.train_dataset_name + '_' + self.valid_dataset_name + '_' + self.model_name + '_s' + str(\n self.seed) + '_min' + str(self.m_in) + '_mout' + str(self.m_out) +\n '_epoch_' + str(epochs - 1) + '.pt'))\n if os.path.exists(model_path):\n self.base_model.load_state_dict(torch.load(model_path))\n return\n\n def test_H(self, dataset):\n self.base_model.eval()\n\n with tqdm.tqdm(total=len(dataset)) as pbar:\n with torch.no_grad():\n for t in [1]:\n correct = 0.0\n all_probs = np.array([])\n labels = np.array([])\n dataset_iter = DataLoader(dataset, batch_size=self.args.batch_size, shuffle=False,\n num_workers=self.args.workers, pin_memory=True)\n\n self._generate_execution_times(dataset_iter)\n return 0, 0, 0\n counter = 0\n for i, (image, label) in enumerate(dataset_iter):\n pbar.update()\n counter += 1\n # Get and prepare data.\n input, target = image.to(self.args.device), label.to(self.args.device)\n logits = self.base_model(input, softmax=False)\n scores = self._get_energy_score(logits, temperature=t)\n\n classification = np.where(scores > self.threshold, 1, 0)\n correct += (classification == label.numpy()).sum()\n if all_probs.size:\n labels = np.concatenate((labels, label))\n all_probs = np.concatenate((all_probs, scores))\n else:\n labels = label\n all_probs = scores\n\n auroc = roc_auc_score(labels, all_probs)\n p, r, _ = precision_recall_curve(labels, all_probs)\n aupr = auc(r, p)\n print(\"Final Test average accuracy %s\" % (\n colored('%.4f%%' % (correct / labels.shape[0] * 100), 'red')))\n print(f\"Auroc: {auroc} aupr: {aupr}\")\n print(counter)\n return correct / labels.shape[0], auroc, aupr\n\n def _cosine_annealing(self, step, total_steps, lr_max, lr_min):\n return lr_min + (lr_max - lr_min) * 0.5 * (\n 1 + np.cos(step / total_steps * np.pi))\n\n def _fine_tune_model(self, epochs, m_in, m_out):\n model_path = os.path.join(os.path.join(self.workspace_dir,\n self.train_dataset_name + '_' + self.valid_dataset_name + '_' + self.model_name + '_s' + str(\n self.seed) + '_min' + str(m_in) + '_mout' + str(\n m_out) + '_epoch_' + str(epochs - 1) + '.pt'))\n if os.path.exists(model_path):\n self.base_model.load_state_dict(torch.load(model_path))\n return\n if not os.path.exists(self.workspace_dir):\n os.makedirs(self.workspace_dir)\n if not os.path.isdir(self.workspace_dir):\n raise Exception('%s is not a dir' % self.workspace_dir)\n\n torch.manual_seed(self.seed)\n np.random.seed(self.seed)\n\n with open(os.path.join(self.workspace_dir,\n self.train_dataset_name + '_' + self.valid_dataset_name + '_' + self.model_name + '_s' + str(\n self.seed) + '_min' + str(m_in) + '_mout' + str(\n m_out) +\n '_training_results.csv'), 'w') as f:\n f.write('epoch,time(s),train_loss,test_loss,test_error(%)\\n')\n\n print('Beginning Training\\n')\n self.optimizer = torch.optim.SGD(\n self.base_model.parameters(), 0.001, momentum=0.9,\n weight_decay=0.0005, nesterov=True)\n\n self.scheduler = torch.optim.lr_scheduler.LambdaLR(self.optimizer,\n lr_lambda=lambda step: self._cosine_annealing(step,\n 10 * self.valid_dataset_length,\n 1,\n 1e-6 / 0.001))\n # Main loop\n for epoch in range(0, epochs):\n self.epoch = epoch\n\n begin_epoch = time.time()\n\n self._train_epoch(m_in=m_in, m_out=m_out)\n self._eval_model()\n\n # Save model\n torch.save(self.base_model.state_dict(),\n os.path.join(os.path.join(self.workspace_dir,\n self.train_dataset_name + '_' + self.valid_dataset_name + '_' + self.model_name + '_s' + str(\n self.seed) + '_min' + str(m_in) + '_mout' + str(\n m_out) +\n '_epoch_' + str(epoch) + '.pt')))\n\n # Let us not waste space and delete the previous model\n prev_path = os.path.join(os.path.join(self.workspace_dir,\n self.train_dataset_name + '_' + self.valid_dataset_name + '_' + self.model_name + '_s' + str(\n self.seed) + '_min' + str(m_in) + '_mout' + str(\n m_out) +\n '_epoch_' + str(epoch - 1) + '.pt'))\n if os.path.exists(prev_path): os.remove(prev_path)\n\n # Show results\n with open(\n os.path.join(self.workspace_dir,\n self.train_dataset_name + '_' + self.valid_dataset_name + '_' + self.model_name + '_s' + str(\n self.seed) + '_min' + str(m_in) + '_mout' + str(\n m_out) +\n '_training_results.csv'), 'a') as f:\n f.write('%03d,%05d,%0.6f,%0.5f,%0.2f\\n' % (\n (epoch + 1),\n time.time() - begin_epoch,\n self._train_loss,\n self._test_loss,\n 100 - 100. * self._test_accuracy,\n ))\n\n # # print state with rounded decimals\n # print({k: round(v, 4) if isinstance(v, float) else v for k, v in state.items()})\n\n print('Epoch {0:3d} | Time {1:5d} | Train Loss {2:.4f} | Test Loss {3:.3f} | Test Error {4:.2f}'.format(\n (epoch + 1),\n int(time.time() - begin_epoch),\n self._train_loss,\n self._test_loss,\n 100 - 100. * self._test_accuracy,\n ))\n\n def _train_epoch(self, m_in, m_out):\n self.base_model.train() # enter train mode\n loss_avg = 0.0\n\n # start at a random point of the outlier dataset; this induces more randomness without obliterating locality\n self.unknown_loader.dataset.offset = np.random.randint(self.valid_dataset_length)\n for in_set, out_set in zip(self.train_loader, self.unknown_loader):\n data = torch.cat((in_set[0], out_set[0]), 0)\n target = in_set[1]\n\n data, target = data.cuda(), target.cuda()\n\n # forward\n x = self.base_model(data, softmax=False)\n\n # backward\n self.scheduler.step()\n self.optimizer.zero_grad()\n\n loss = F.cross_entropy(x[:len(in_set[0])], target)\n # cross-entropy from softmax distribution to uniform distribution\n\n Ec_out = -torch.logsumexp(x[len(in_set[0]):], dim=1)\n Ec_in = -torch.logsumexp(x[:len(in_set[0])], dim=1)\n loss += 0.1 * (torch.pow(F.relu(Ec_in - m_in), 2).mean() + torch.pow(F.relu(m_out - Ec_out),\n 2).mean())\n\n loss.backward()\n self.optimizer.step()\n\n # exponential moving average\n loss_avg = loss_avg * 0.8 + float(loss) * 0.2\n self._train_loss = loss_avg\n\n def _eval_model(self):\n self.base_model.eval()\n loss_avg = 0.0\n correct = 0\n with torch.no_grad():\n for data, target in self.train_loader:\n data, target = data.cuda(), target.cuda()\n\n # forward\n output = self.base_model(data, softmax=False)\n loss = F.cross_entropy(output, target)\n\n # accuracy\n pred = output.data.max(1)[1]\n # print(f\"data {data.shape} output: {output.shape} pred: {pred.shape} targetL {target.shape} f {target.data} f {pred.eq(target.data).sum()}\")\n correct += pred.eq(target.data).sum().item()\n\n # test loss average\n loss_avg += float(loss.data)\n\n self._test_loss = loss_avg / self.train_dataset_length\n print(f\"correct {correct} len: {self.train_dataset_length}\")\n self._test_accuracy = correct / self.train_dataset_length\n\n def _find_threshold(self):\n scores_known = np.array([])\n scores_unknown = np.array([])\n with torch.no_grad():\n for i, (image, label) in enumerate(self.known_loader):\n\n # Get and prepare data.\n input, target = image.to(self.args.device), label.to(self.args.device)\n logits = self.base_model(input, softmax=False)\n scores = self._get_energy_score(logits, temperature=1)\n if scores_known.size:\n scores_known = np.concatenate((scores_known, scores))\n else:\n scores_known = scores\n\n for i, (image, label) in enumerate(self.unknown_loader):\n # Get and prepare data.\n input, target = image.to(self.args.device), label.to(self.args.device)\n logits = self.base_model(input, softmax=False)\n scores = self._get_energy_score(logits, temperature=1)\n if scores_unknown.size:\n scores_unknown = np.concatenate((scores_unknown, scores))\n else:\n scores_unknown = scores\n\n min = np.max([scores_unknown.min(), scores_known.min()])\n max = np.min([scores_unknown.max(), scores_known.max()])\n cut_threshold = np.quantile(scores_known, .95)\n cut_correct_count = (scores_unknown > cut_threshold).sum()\n cut_correct_count += (scores_known <= cut_threshold).sum()\n best_correct_count = 0\n best_threshold = 0\n for i in np.linspace(min, max, num=1000):\n correct_count = 0\n correct_count += (scores_unknown > i).sum()\n correct_count += (scores_known <= i).sum()\n if best_correct_count < correct_count:\n best_correct_count = correct_count\n best_threshold = i\n if best_threshold > cut_threshold:\n best_correct_count = cut_correct_count\n best_threshold = cut_threshold\n self.threshold = best_threshold\n acc = best_correct_count / (scores_known.shape[0] * 2)\n print(f\"Best th: {best_threshold} acc: {acc}\")\n return acc\n\n def _get_energy_score(self, logits, temperature=1):\n scores = -(temperature * torch.logsumexp(logits.data.cpu() / temperature, dim=1).numpy())\n return scores\n\n def _generate_execution_times(self, loader):\n import time\n import numpy as np\n n_times = 1000\n exec_times = np.ones(n_times)\n\n trainiter = iter(loader)\n x = trainiter.__next__()[0][0].unsqueeze(0).to(self.args.device)\n with torch.no_grad():\n for i in range(n_times):\n start_time = time.time()\n logits = self.base_model(x, softmax=False)\n scores = self._get_energy_score(logits, temperature=1)\n\n _ = np.where(scores > self.threshold, 1, 0)\n exec_times[i] = time.time() - start_time\n\n exec_times = exec_times.mean()\n np.savez(\"results/article_plots/execution_times/\" + self.method_identifier() + \"_\" + self.model_name + \"_\" + self.train_dataset_name, exec_times=exec_times)\n"
] |
[
[
"torch.zeros",
"torch.LongTensor",
"torch.utils.data.DataLoader",
"torch.load",
"torch.set_grad_enabled",
"torch.topk"
],
[
"numpy.quantile",
"torch.cat",
"numpy.where",
"torch.nn.functional.cross_entropy",
"torch.load",
"numpy.cos",
"numpy.concatenate",
"sklearn.metrics.precision_recall_curve",
"torch.manual_seed",
"numpy.random.randint",
"torch.nn.functional.relu",
"numpy.array",
"sklearn.metrics.roc_auc_score",
"torch.utils.data.dataloader.DataLoader",
"numpy.random.seed",
"torch.no_grad",
"numpy.ones",
"sklearn.metrics.auc",
"numpy.linspace"
]
] |
Yujun-Shi/BLIP
|
[
"87bc558aec17feb6c65ed3c37ab9317fd444b254"
] |
[
"miniImageNetClassification/src/run_baselines.py"
] |
[
"import math\nimport sys,os,argparse,time\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport utils\nfrom datetime import datetime\n\ndef main():\n tstart=time.time()\n\n parser=argparse.ArgumentParser(description='xxx')\n\n # Data parameters\n parser.add_argument('--seed', default=0, type=int, help='(default=%(default)d)')\n parser.add_argument('--device', default='cuda:0', type=str, help='gpu id')\n parser.add_argument('--approach', default='lwf', type=str, help='approach used')\n parser.add_argument('--experiment', default='MI', type=str)\n parser.add_argument('--data_dir', default='data', type=str, help='data directory')\n parser.add_argument('--ntasks', default=10, type=int)\n parser.add_argument('--pc-valid', default=0.02, type=float)\n parser.add_argument('--workers', default=4, type=int)\n\n # Training parameters\n parser.add_argument('--output', default='', type=str, help='')\n parser.add_argument('--checkpoint_dir', default='checkpoints/', type=str, help='')\n parser.add_argument('--nepochs', default=200, type=int, help='')\n parser.add_argument('--sbatch', default=64, type=int, help='')\n parser.add_argument('--lr', default=0.05, type=float, help='')\n parser.add_argument('--momentum', default=0.9, type=float)\n parser.add_argument('--weight-decay', default=0.0, type=float)\n parser.add_argument('--resume', default='no', type=str, help='resume?')\n parser.add_argument('--sti', default=0, type=int, help='starting task?')\n parser.add_argument('--mul', default=2, type=int)\n\n args=parser.parse_args()\n utils.print_arguments(args)\n\n #####################################################################################\n\n # Seed\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(args.seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n print('Using device:', args.device)\n checkpoint = utils.make_directories(args)\n args.checkpoint = checkpoint\n print()\n\n # Args -- Experiment\n from dataloaders.miniimagenet import DatasetGen\n\n # Args -- Approach\n if args.approach == 'ewc':\n from approaches import ewc as approach\n elif args.approach == 'sgd':\n from approaches import sgd as approach\n elif args.approach == 'sgd-frozen':\n from approaches import sgd_frozen as approach\n elif args.approach == 'imm-mode':\n from approaches import imm_mode as approach\n elif args.approach == 'lwf':\n from approaches import lwf as approach\n else:\n raise NotImplementedError(\"approach currently not implemented\")\n\n # Args -- Network\n if args.approach != 'hat':\n from networks import alexnet as network\n else:\n from networks import alexnet_hat as network\n\n ########################################################################################\n print()\n print(\"Starting this run on :\")\n print(datetime.now().strftime(\"%Y-%m-%d %H:%M\"))\n\n # Load\n print('Load data...')\n # prepare data for each task\n datagen = DatasetGen(args)\n for task_id in range(args.ntasks):\n datagen.get(task_id)\n print('\\nTask info =',datagen.taskcla)\n\n args.num_tasks = len(datagen.taskcla)\n args.inputsize, args.taskcla = datagen.inputsize, datagen.taskcla\n\n # Inits\n print('Inits...')\n model=network.Net(args).to(args.device)\n\n # print number of parameters\n count = 0\n for p in model.parameters():\n count+=np.prod(p.size())\n print('model size in MB: ', count*4/(1024*1024))\n\n print('-'*100)\n appr=approach.Appr(model,args=args)\n print('-'*100)\n\n if args.resume == 'yes':\n checkpoint = torch.load(os.path.join(args.checkpoint, 'model_{}.pth.tar'.format(args.sti)))\n model.load_state_dict(checkpoint['model_state_dict'])\n model = model.to(device=args.device)\n else:\n args.sti = 0\n\n # Loop tasks\n acc=np.zeros((len(args.taskcla),len(args.taskcla)),dtype=np.float32)\n lss=np.zeros((len(args.taskcla),len(args.taskcla)),dtype=np.float32)\n for task, ncla in args.taskcla[args.sti:]:\n data_t = datagen.dataloaders[task]\n print('*'*100)\n print('Task {:2d} ({:s})'.format(task, data_t['name']))\n print('*'*100)\n\n # Train\n appr.train(task, data_t['train'], data_t['valid'])\n print('-'*100)\n\n appr.save_model(task)\n # Test\n for u in range(task+1):\n data_u = datagen.dataloaders[u]\n test_loss, test_acc=appr.eval(u, data_u['test'])\n print('>>> Test on task {:2d} - {:15s}: loss={:.3f}, acc={:5.3f}% <<<'.format(u,data_u['name'],test_loss,100*test_acc))\n acc[task,u]=test_acc\n lss[task,u]=test_loss\n\n # Save\n print('Save at '+args.checkpoint)\n np.savetxt(os.path.join(args.checkpoint,'{}_{}.txt'.format(args.approach,args.seed)), acc, '%.5f')\n\n utils.print_log_acc_bwt(args, acc, lss)\n print('[Elapsed time = {:.1f} h]'.format((time.time()-tstart)/(60*60)))\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.random.seed",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.cuda.manual_seed"
]
] |
1995subhankar1995/Machine_Learning_codes
|
[
"5170f4880798184b257d7e64577dc9dd8d0e1e81"
] |
[
"SVM.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt \nimport pandas as pd\nfrom qpsolvers import solve_qp\nfrom sklearn.metrics import confusion_matrix\n\n########### Importing Dataset #######################\ndataset = pd.read_csv('Social_Network_Ads.csv')\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, -1].values\nfor i in range(len(X)):\n if(y[i] == 0):\n y[i] = -1\n########## Splitting Data into Train and Test data ##########\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size = 0.2, random_state = 1)\n\n######### Train data scaling ######################\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train[:, 0:] = sc.fit_transform(X_train[:, 0:])\nX_test[:, 0:] = sc.transform(X_test[:, 0:])\n\ndef Kernel(X, y):\n K = np.zeros((len(X), len(X)))\n for i in range(len(X)):\n for j in range(len(X)):\n K[i, j] = y[i]*np.dot(X[i, :], X[j, :])*y[j]\n return K\ndef SVM_learner(X, y, C):\n P = Kernel(X, y)\n p1 = 1e-5*np.identity(len(X))\n P = P + p1\n q = -1*np.ones(len(X))\n A = y;\n A = A.reshape(1, len(X))\n u = np.zeros(1)\n h1 = C*np.ones(len(X))\n h2 = np.zeros(len(X))\n h = np.concatenate((h1, h2))\n G1 = np.identity(len(X_train))\n G2 = -1*np.identity(len(X_train))\n G = np.concatenate((G1, G2))\n alphas = solve_qp(P, q, G, h, A, u)\n SV_indices = []\n for i in range(len(X)):\n if(alphas[i] >= 0.001):\n SV_indices.append(i)\n SV = X[SV_indices]\n SV_labels = y[SV_indices]\n SV_alphas = alphas[SV_indices]\n W = np.zeros(X.shape[1])\n for i in range(len(SV_alphas)):\n W += SV_alphas[i]*SV_labels[i]*SV[i]\n b = SV_labels[25] - np.dot(W, SV[25])\n class model_struct:\n pass\n model = model_struct()\n model.W = W\n model.b = b\n model.SV = SV\n model.SV_labels = SV_labels\n model.SV_alphas = SV_alphas\n return model\ndef Prediction(X, model):\n return np.sign(np.dot(X, model.W) + model.b)\ndef Accuracy(X, y_actual, y_pred):\n matrix = confusion_matrix(y_actual, y_pred)\n return (1/len(X))*sum(y_actual == y_pred)*100, matrix\nC = 1\nmodel = SVM_learner(X_train, y_train, C)\ny_predict_train = Prediction(X_train, model)\ny_predict_test = Prediction(X_test, model)\naccuracy1, confuse1 = Accuracy(X_train, y_train, y_predict_train)\naccuracy2, test_confuse = Accuracy(X_test, y_test, y_predict_test)\nprint(\"Train accuracy:\", accuracy1,\"%\\n\",\"Test accuracy:\",\n accuracy2, \"%\\n\")\nprint(\"Train confusion matrix:\\n\",confuse1, \"\\n\",\n \"Test confusion matrix:\\n\", test_confuse)\n"
] |
[
[
"numpy.concatenate",
"sklearn.metrics.confusion_matrix",
"numpy.dot",
"numpy.zeros",
"sklearn.preprocessing.StandardScaler",
"sklearn.model_selection.train_test_split",
"pandas.read_csv"
]
] |
itisianlee/hawk-facedet
|
[
"55774ac5619f9a4c76a3a872ff11940a874b32d1"
] |
[
"hawkdet/dataset/widerface.py"
] |
[
"import cv2\nimport numpy as np\nimport torch\n\nfrom torch.utils.data import Dataset\n\n\nclass WiderFace(Dataset):\n def __init__(self, txt_path, transforms=None):\n super().__init__()\n self.transforms = transforms\n self.imgs_path = []\n self.annos = []\n with open(txt_path) as f:\n lines = f.readlines()\n\n isFirst = True\n labels = []\n for line in lines:\n line = line.rstrip()\n if line.startswith('#'):\n if isFirst is True:\n isFirst = False\n else:\n labels_copy = labels.copy()\n self.annos.append(np.array(labels_copy))\n labels.clear()\n path = line[2:]\n path = txt_path.replace('label.txt','images/') + path\n self.imgs_path.append(path)\n else:\n label = [float(x) for x in line.split(' ')]\n # format: xyxy\n anno = [\n label[0], label[1], label[0] + label[2], label[1] + label[3],\n label[4], label[5], label[7], label[8], label[10], label[11],\n label[13], label[14], label[16], label[17], -1 if label[4]<0 else 1\n ]\n labels.append(anno)\n\n self.annos.append(np.array(labels))\n\n def __len__(self):\n return len(self.imgs_path)\n\n def __getitem__(self, index):\n img = cv2.imread(self.imgs_path[index])\n anno = self.annos[index]\n assert anno.shape[0] > 0, 'length of annotation must be greater than 0'\n \n item = {\n 'image': img,\n 'bboxes': anno[:, :4],\n 'labels': anno[:, -1],\n 'landmarks': anno[:, 4:-1]\n }\n\n if self.transforms is not None:\n item = self.transforms(item)\n\n return item\n\n\ndef collater(batch_samples):\n bboxes = []\n labels = []\n lmks = []\n imgs = []\n for sample in batch_samples:\n single_img = sample['image']\n single_bboxes = sample['bboxes']\n single_labels = sample['labels']\n single_lmks = sample['landmarks']\n\n imgs.append(single_img)\n\n bboxes.append(torch.from_numpy(single_bboxes).float())\n labels.append(torch.from_numpy(single_lmks).float())\n lmks.append(torch.from_numpy(single_labels).float())\n \n\n return {'images': torch.from_numpy(np.array(imgs)), 'bboxes': bboxes, 'landmarks': lmks, 'labels': labels}"
] |
[
[
"numpy.array",
"torch.from_numpy"
]
] |
parkerwray/smuthi-1
|
[
"a5ced07461b8fd223dc37d28259261ceed78aed5"
] |
[
"tests/unit_tests/test_layerresponse.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"Test the layerresponse functions defined in layers.py.\"\"\"\nimport unittest\n\nimport numpy as np\nimport smuthi.layers as lay\nimport smuthi.fields.expansions as fldex\n\n\nlayer_d = [0, 300, 400, 0]\nlayer_n = [1, 2 + 0.1j, 3, 1 + 5j]\nomega = 2 * 3.15 / 550\nkpar = omega * 1.7\nprecision = 15\n\n\nclass TestLayerResponse(unittest.TestCase):\n def test_layerresponse_mpmath_equals_numpy(self):\n \"\"\"Are the results with multiple precision consistent with numpy equivalent?\"\"\"\n for pol in [0, 1]:\n for fromlayer in range(len(layer_d)):\n for tolayer in range(len(layer_d)):\n lay.set_precision(None)\n lmat1 = lay.layersystem_response_matrix(pol, layer_d, layer_n, kpar, omega, fromlayer, tolayer)\n lay.set_precision(precision)\n lmat2 = lay.layersystem_response_matrix(pol, layer_d, layer_n, kpar, omega, fromlayer, tolayer)\n np.testing.assert_almost_equal(lmat1, lmat2)\n lay.set_precision(None)\n\n def test_scattering_matrix_equals_transfer_matrix(self):\n \"\"\"Are the results from the transfer matrix algorithm and from the scattering matrix algorithm consistent?\"\"\"\n for pol in [0, 1]:\n tmat = lay.layersystem_transfer_matrix(pol, layer_d, layer_n, kpar, omega)\n smat = lay.layersystem_scattering_matrix(pol, layer_d, layer_n, kpar, omega)\n np.testing.assert_almost_equal(tmat[1, 0] / tmat[0, 0], smat[1, 0])\n\n def test_layerresponse_against_prototype(self):\n \"\"\"Are the results from layers.py and consistent with the MATLAB prototype code TSPL?\"\"\"\n pol = 0\n fromlayer = 2\n tolayer = 1\n lmat = lay.layersystem_response_matrix(pol, layer_d, layer_n, kpar, omega, fromlayer, tolayer)\n lmat_TSPL = np.array([[-0.392979481352895 - 0.376963315605839j, -0.455367266697897 + 0.426065579868901j],\n [0.545168303416962 - 0.345873455516963j, -0.361796569025878 - 0.644799225334747j]])\n np.testing.assert_almost_equal(lmat, lmat_TSPL)\n\n pol = 1\n fromlayer = 1\n tolayer = 2\n lmat = lay.layersystem_response_matrix(pol, layer_d, layer_n, kpar, omega, fromlayer, tolayer)\n lmat_TSPL = np.array([[-0.240373686730040 - 0.148769054113797j, 0.161922209423045 + 0.222085165907288j],\n [-0.182951011363592 + 0.138158890222525j, 0.215395950986834 - 0.057346289106977j]])\n np.testing.assert_almost_equal(lmat, lmat_TSPL)\n\n def test_layerresponse_for_kpar_arrays(self):\n pol = 1\n fromlayer = 2\n tolayer = 1\n kpar_array = np.linspace(0, kpar)\n lmat_vec = lay.layersystem_response_matrix(pol, layer_d, layer_n, kpar_array, omega, fromlayer, tolayer)\n lmat_end = lay.layersystem_response_matrix(pol, layer_d, layer_n, kpar_array[-1], omega, fromlayer, tolayer)\n lmat0 = lay.layersystem_response_matrix(pol, layer_d, layer_n, kpar_array[0], omega, fromlayer, tolayer)\n np.testing.assert_almost_equal(lmat_end, lmat_vec[:, :, -1])\n np.testing.assert_almost_equal(lmat0, lmat_vec[:, :, 0])\n\n def test_layerresponse_method(self):\n fromlayer=2\n tolayer=1\n kp = np.linspace(0, 2) * omega\n a = np.linspace(0, 2*np.pi)\n layer_system = lay.LayerSystem(thicknesses=layer_d, refractive_indices=layer_n)\n ref = [0, 0, layer_system.reference_z(fromlayer)]\n pwe_up = fldex.PlaneWaveExpansion(k=omega*1.2, k_parallel=kp, azimuthal_angles=a, kind='upgoing',\n reference_point=ref)\n pwe_up.coefficients[0,:, :] = np.exp(-pwe_up.k_parallel_grid()/omega)\n pwe_down = fldex.PlaneWaveExpansion(k=omega * 1.2, k_parallel=kp, azimuthal_angles=a, kind='downgoing',\n reference_point=ref)\n pwe_down.coefficients[0, :, :] = 2j * np.exp(-pwe_up.k_parallel_grid() / omega * 3)\n\n pwe_r_up, pwe_r_down = layer_system.response(pwe_up, fromlayer, tolayer)\n pwe_r_up2, pwe_r_down2 = layer_system.response(pwe_down, fromlayer, tolayer)\n pwe_r_up3, pwe_r_down3 = layer_system.response((pwe_up, pwe_down), fromlayer, tolayer)\n\n # print(pwe_r_up.coefficients[0, 0, 0] + pwe_r_up2.coefficients[0, 0, 0])\n # print(pwe_r_up3.coefficients[0, 0, 0])\n\n # FIXME: no assert in this test\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"numpy.testing.assert_almost_equal",
"numpy.array",
"numpy.linspace"
]
] |
avasalya/singleshot6Dpose
|
[
"a39eae0191dce42d4aca08eaff5a1e5646d592df"
] |
[
"utils.py"
] |
[
"import sys\nimport os\nimport time\nimport math\nimport torch\nimport numpy as np\nfrom PIL import Image, ImageDraw, ImageFont\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport cv2\nfrom scipy import spatial\n\nimport struct\nimport imghdr\n\n# Create new directory\ndef makedirs(path):\n if not os.path.exists( path ):\n os.makedirs( path )\n\ndef get_all_files(directory):\n files = []\n\n for f in os.listdir(directory):\n if os.path.isfile(os.path.join(directory, f)):\n files.append(os.path.join(directory, f))\n else:\n files.extend(get_all_files(os.path.join(directory, f)))\n return files\n\ndef calcAngularDistance(gt_rot, pr_rot):\n\n rotDiff = np.dot(gt_rot, np.transpose(pr_rot))\n trace = np.trace(rotDiff)\n return np.rad2deg(np.arccos((trace-1.0)/2.0))\n\ndef get_camera_intrinsic(u0, v0, fx, fy):\n return np.array([[fx, 0.0, u0], [0.0, fy, v0], [0.0, 0.0, 1.0]])\n\ndef compute_projection(points_3D, transformation, internal_calibration):\n projections_2d = np.zeros((2, points_3D.shape[1]), dtype='float32')\n camera_projection = (internal_calibration.dot(transformation)).dot(points_3D)\n projections_2d[0, :] = camera_projection[0, :]/camera_projection[2, :]\n projections_2d[1, :] = camera_projection[1, :]/camera_projection[2, :]\n return projections_2d\n\ndef compute_transformation(points_3D, transformation):\n return transformation.dot(points_3D)\n\ndef calc_pts_diameter(pts):\n diameter = -1\n for pt_id in range(pts.shape[0]):\n pt_dup = np.tile(np.array([pts[pt_id, :]]), [pts.shape[0] - pt_id, 1])\n pts_diff = pt_dup - pts[pt_id:, :]\n max_dist = math.sqrt((pts_diff * pts_diff).sum(axis=1).max())\n if max_dist > diameter:\n diameter = max_dist\n return diameter\n\ndef adi(pts_est, pts_gt):\n nn_index = spatial.cKDTree(pts_est)\n nn_dists, _ = nn_index.query(pts_gt, k=1)\n e = nn_dists.mean()\n return e\n\ndef get_3D_corners(vertices):\n\n min_x = np.min(vertices[0,:])\n max_x = np.max(vertices[0,:])\n min_y = np.min(vertices[1,:])\n max_y = np.max(vertices[1,:])\n min_z = np.min(vertices[2,:])\n max_z = np.max(vertices[2,:])\n corners = np.array([[min_x, min_y, min_z],\n [min_x, min_y, max_z],\n [min_x, max_y, min_z],\n [min_x, max_y, max_z],\n [max_x, min_y, min_z],\n [max_x, min_y, max_z],\n [max_x, max_y, min_z],\n [max_x, max_y, max_z]])\n\n corners = np.concatenate((np.transpose(corners), np.ones((1,8)) ), axis=0)\n return corners\n\ndef pnp(points_3D, points_2D, cameraMatrix):\n try:\n distCoeffs = pnp.distCoeffs\n except:\n distCoeffs = np.zeros((8, 1), dtype='float32')\n\n assert points_2D.shape[0] == points_2D.shape[0], 'points 3D and points 2D must have same number of vertices'\n\n _, R_exp, t = cv2.solvePnP(points_3D,\n np.ascontiguousarray(points_2D[:,:2]).reshape((-1,1,2)),\n cameraMatrix,\n distCoeffs)\n\n R, _ = cv2.Rodrigues(R_exp)\n return R, t\n\ndef get_2d_bb(box, size):\n x = box[0]\n y = box[1]\n min_x = np.min(np.reshape(box, [-1,2])[:,0])\n max_x = np.max(np.reshape(box, [-1,2])[:,0])\n min_y = np.min(np.reshape(box, [-1,2])[:,1])\n max_y = np.max(np.reshape(box, [-1,2])[:,1])\n w = max_x - min_x\n h = max_y - min_y\n new_box = [x*size, y*size, w*size, h*size]\n return new_box\n\ndef compute_2d_bb(pts):\n min_x = np.min(pts[0,:])\n max_x = np.max(pts[0,:])\n min_y = np.min(pts[1,:])\n max_y = np.max(pts[1,:])\n w = max_x - min_x\n h = max_y - min_y\n cx = (max_x + min_x) / 2.0\n cy = (max_y + min_y) / 2.0\n new_box = [cx, cy, w, h]\n return new_box\n\ndef compute_2d_bb_from_orig_pix(pts, size):\n min_x = np.min(pts[0,:]) / 640.0\n max_x = np.max(pts[0,:]) / 640.0\n min_y = np.min(pts[1,:]) / 480.0\n max_y = np.max(pts[1,:]) / 480.0\n w = max_x - min_x\n h = max_y - min_y\n cx = (max_x + min_x) / 2.0\n cy = (max_y + min_y) / 2.0\n new_box = [cx*size, cy*size, w*size, h*size]\n return new_box\n\ndef corner_confidences(gt_corners, pr_corners, th=80, sharpness=2, im_width=640, im_height=480):\n ''' gt_corners: Ground-truth 2D projections of the 3D bounding box corners, shape: (16 x nA), type: torch.FloatTensor\n pr_corners: Prediction for the 2D projections of the 3D bounding box corners, shape: (16 x nA), type: torch.FloatTensor\n th : distance threshold, type: int\n sharpness : sharpness of the exponential that assigns a confidence value to the distance\n -----------\n return : a torch.FloatTensor of shape (nA,) with 9 confidence values\n '''\n shape = gt_corners.size()\n nA = shape[1]\n dist = gt_corners - pr_corners\n num_el = dist.numel()\n num_keypoints = num_el//(nA*2)\n dist = dist.t().contiguous().view(nA, num_keypoints, 2)\n dist[:, :, 0] = dist[:, :, 0] * im_width\n dist[:, :, 1] = dist[:, :, 1] * im_height\n\n eps = 1e-5\n distthresh = torch.FloatTensor([th]).repeat(nA, num_keypoints)\n dist = torch.sqrt(torch.sum((dist)**2, dim=2)).squeeze() # nA x 9\n mask = (dist < distthresh).type(torch.FloatTensor)\n conf = torch.exp(sharpness*(1 - dist/distthresh))-1 # mask * (torch.exp(math.log(2) * (1.0 - dist/rrt)) - 1)\n conf0 = torch.exp(sharpness*(1 - torch.zeros(conf.size(0),1))) - 1\n conf = conf / conf0.repeat(1, num_keypoints)\n # conf = 1 - dist/distthresh\n conf = mask * conf # nA x 9\n mean_conf = torch.mean(conf, dim=1)\n return mean_conf\n\ndef corner_confidence(gt_corners, pr_corners, th=80, sharpness=2, im_width=640, im_height=480):\n ''' gt_corners: Ground-truth 2D projections of the 3D bounding box corners, shape: (18,) type: list\n pr_corners: Prediction for the 2D projections of the 3D bounding box corners, shape: (18,), type: list\n th : distance threshold, type: int\n sharpness : sharpness of the exponential that assigns a confidence value to the distance\n -----------\n return : a list of shape (9,) with 9 confidence values\n '''\n dist = torch.FloatTensor(gt_corners) - pr_corners\n num_keypoints = dist.numel()//2\n dist = dist.view(num_keypoints, 2)\n dist[:, 0] = dist[:, 0] * im_width\n dist[:, 1] = dist[:, 1] * im_height\n eps = 1e-5\n dist = torch.sqrt(torch.sum((dist)**2, dim=1))\n mask = (dist < th).type(torch.FloatTensor)\n conf = torch.exp(sharpness * (1.0 - dist/th)) - 1\n conf0 = torch.exp(torch.FloatTensor([sharpness])) - 1 + eps\n conf = conf / conf0.repeat(num_keypoints, 1)\n conf = mask * conf\n return torch.mean(conf)\n\ndef sigmoid(x):\n return 1.0/(math.exp(-x)+1.)\n\ndef softmax(x):\n x = torch.exp(x - torch.max(x))\n x = x/x.sum()\n return x\n\ndef fix_corner_order(corners2D_gt):\n corners2D_gt_corrected = np.zeros((9, 2), dtype='float32')\n corners2D_gt_corrected[0, :] = corners2D_gt[0, :]\n corners2D_gt_corrected[1, :] = corners2D_gt[1, :]\n corners2D_gt_corrected[2, :] = corners2D_gt[3, :]\n corners2D_gt_corrected[3, :] = corners2D_gt[5, :]\n corners2D_gt_corrected[4, :] = corners2D_gt[7, :]\n corners2D_gt_corrected[5, :] = corners2D_gt[2, :]\n corners2D_gt_corrected[6, :] = corners2D_gt[4, :]\n corners2D_gt_corrected[7, :] = corners2D_gt[6, :]\n corners2D_gt_corrected[8, :] = corners2D_gt[8, :]\n return corners2D_gt_corrected\n\ndef convert2cpu(gpu_matrix):\n return torch.FloatTensor(gpu_matrix.size()).copy_(gpu_matrix)\n\ndef convert2cpu_long(gpu_matrix):\n return torch.LongTensor(gpu_matrix.size()).copy_(gpu_matrix)\n\ndef get_region_boxes(output, num_classes, num_keypoints, only_objectness=1, validation=True):\n\n # Parameters\n anchor_dim = 1\n if output.dim() == 3:\n output = output.unsqueeze(0)\n batch = output.size(0)\n # print('output.size(1)', output.size(1))\n # print('(2*num_keypoints+1+num_classes)*anchor_dim', (2*num_keypoints+1+num_classes)*anchor_dim)\n assert(output.size(1) == (2*num_keypoints+1+num_classes)*anchor_dim)\n h = output.size(2)\n w = output.size(3)\n\n # Activation\n t0 = time.time()\n max_conf = -sys.maxsize\n output = output.view(batch*anchor_dim, 2*num_keypoints+1+num_classes, h*w).transpose(0,1).contiguous().view(2*num_keypoints+1+num_classes, batch*anchor_dim*h*w)\n grid_x = torch.linspace(0, w-1, w).repeat(h,1).repeat(batch*anchor_dim, 1, 1).view(batch*anchor_dim*h*w).cuda()\n grid_y = torch.linspace(0, h-1, h).repeat(w,1).t().repeat(batch*anchor_dim, 1, 1).view(batch*anchor_dim*h*w).cuda()\n\n xs = list()\n ys = list()\n xs.append(torch.sigmoid(output[0]) + grid_x)\n ys.append(torch.sigmoid(output[1]) + grid_y)\n for j in range(1,num_keypoints):\n xs.append(output[2*j + 0] + grid_x)\n ys.append(output[2*j + 1] + grid_y)\n det_confs = torch.sigmoid(output[2*num_keypoints])\n cls_confs = torch.nn.Softmax()(Variable(output[2*num_keypoints+1:2*num_keypoints+1+num_classes].transpose(0,1))).data\n cls_max_confs, cls_max_ids = torch.max(cls_confs, 1)\n cls_max_confs = cls_max_confs.view(-1)\n cls_max_ids = cls_max_ids.view(-1)\n t1 = time.time()\n\n # GPU to CPU\n sz_hw = h*w\n sz_hwa = sz_hw*anchor_dim\n det_confs = convert2cpu(det_confs)\n cls_max_confs = convert2cpu(cls_max_confs)\n cls_max_ids = convert2cpu_long(cls_max_ids)\n for j in range(num_keypoints):\n xs[j] = convert2cpu(xs[j])\n ys[j] = convert2cpu(ys[j])\n if validation:\n cls_confs = convert2cpu(cls_confs.view(-1, num_classes))\n t2 = time.time()\n\n # Boxes filter\n for b in range(batch):\n for cy in range(h):\n for cx in range(w):\n for i in range(anchor_dim):\n ind = b*sz_hwa + i*sz_hw + cy*w + cx\n det_conf = det_confs[ind]\n if only_objectness:\n conf = det_confs[ind]\n else:\n conf = det_confs[ind] * cls_max_confs[ind]\n\n if conf > max_conf:\n max_conf = conf\n bcx = list()\n bcy = list()\n for j in range(num_keypoints):\n bcx.append(xs[j][ind])\n bcy.append(ys[j][ind])\n cls_max_conf = cls_max_confs[ind]\n cls_max_id = cls_max_ids[ind]\n box = list()\n for j in range(num_keypoints):\n box.append(bcx[j]/w)\n box.append(bcy[j]/h)\n box.append(det_conf)\n box.append(cls_max_conf)\n box.append(cls_max_id)\n t3 = time.time()\n if False:\n print('---------------------------------')\n print('matrix computation : %f' % (t1-t0))\n print(' gpu to cpu : %f' % (t2-t1))\n print(' boxes filter : %f' % (t3-t2))\n print('---------------------------------')\n return box\n\n\ndef get_region_boxes2(output, conf_thresh, num_classes, only_objectness=1, validation=False):\n\n # Parameters\n anchor_dim = 1\n if output.dim() == 3:\n output = output.unsqueeze(0)\n batch = output.size(0)\n assert(output.size(1) == (19+num_classes)*anchor_dim)\n h = output.size(2)\n w = output.size(3)\n\n # Activation\n t0 = time.time()\n all_boxes = []\n max_conf = -100000\n output = output.view(batch*anchor_dim, 19+num_classes, h*w).transpose(0,1).contiguous().view(19+num_classes, batch*anchor_dim*h*w)\n grid_x = torch.linspace(0, w-1, w).repeat(h,1).repeat(batch*anchor_dim, 1, 1).view(batch*anchor_dim*h*w).cuda()\n grid_y = torch.linspace(0, h-1, h).repeat(w,1).t().repeat(batch*anchor_dim, 1, 1).view(batch*anchor_dim*h*w).cuda()\n xs0 = torch.sigmoid(output[0]) + grid_x\n ys0 = torch.sigmoid(output[1]) + grid_y\n xs1 = output[2] + grid_x\n ys1 = output[3] + grid_y\n xs2 = output[4] + grid_x\n ys2 = output[5] + grid_y\n xs3 = output[6] + grid_x\n ys3 = output[7] + grid_y\n xs4 = output[8] + grid_x\n ys4 = output[9] + grid_y\n xs5 = output[10] + grid_x\n ys5 = output[11] + grid_y\n xs6 = output[12] + grid_x\n ys6 = output[13] + grid_y\n xs7 = output[14] + grid_x\n ys7 = output[15] + grid_y\n xs8 = output[16] + grid_x\n ys8 = output[17] + grid_y\n det_confs = torch.sigmoid(output[18])\n cls_confs = torch.nn.Softmax()(Variable(output[19:19+num_classes].transpose(0,1))).data\n cls_max_confs, cls_max_ids = torch.max(cls_confs, 1)\n cls_max_confs = cls_max_confs.view(-1)\n cls_max_ids = cls_max_ids.view(-1)\n t1 = time.time()\n\n # GPU to CPU\n sz_hw = h*w\n sz_hwa = sz_hw*anchor_dim\n det_confs = convert2cpu(det_confs)\n cls_max_confs = convert2cpu(cls_max_confs)\n cls_max_ids = convert2cpu_long(cls_max_ids)\n xs0 = convert2cpu(xs0)\n ys0 = convert2cpu(ys0)\n xs1 = convert2cpu(xs1)\n ys1 = convert2cpu(ys1)\n xs2 = convert2cpu(xs2)\n ys2 = convert2cpu(ys2)\n xs3 = convert2cpu(xs3)\n ys3 = convert2cpu(ys3)\n xs4 = convert2cpu(xs4)\n ys4 = convert2cpu(ys4)\n xs5 = convert2cpu(xs5)\n ys5 = convert2cpu(ys5)\n xs6 = convert2cpu(xs6)\n ys6 = convert2cpu(ys6)\n xs7 = convert2cpu(xs7)\n ys7 = convert2cpu(ys7)\n xs8 = convert2cpu(xs8)\n ys8 = convert2cpu(ys8)\n if validation:\n cls_confs = convert2cpu(cls_confs.view(-1, num_classes))\n t2 = time.time()\n\n # Boxes filter\n for b in range(batch):\n boxes = []\n max_conf = -1\n for cy in range(h):\n for cx in range(w):\n for i in range(anchor_dim):\n ind = b*sz_hwa + i*sz_hw + cy*w + cx\n det_conf = det_confs[ind]\n if only_objectness:\n conf = det_confs[ind]\n else:\n conf = det_confs[ind] * cls_max_confs[ind]\n\n if conf > max_conf:\n max_conf = conf\n max_ind = ind\n\n if conf > conf_thresh:\n bcx0 = xs0[ind]\n bcy0 = ys0[ind]\n bcx1 = xs1[ind]\n bcy1 = ys1[ind]\n bcx2 = xs2[ind]\n bcy2 = ys2[ind]\n bcx3 = xs3[ind]\n bcy3 = ys3[ind]\n bcx4 = xs4[ind]\n bcy4 = ys4[ind]\n bcx5 = xs5[ind]\n bcy5 = ys5[ind]\n bcx6 = xs6[ind]\n bcy6 = ys6[ind]\n bcx7 = xs7[ind]\n bcy7 = ys7[ind]\n bcx8 = xs8[ind]\n bcy8 = ys8[ind]\n cls_max_conf = cls_max_confs[ind]\n cls_max_id = cls_max_ids[ind]\n box = [bcx0/w, bcy0/h, bcx1/w, bcy1/h, bcx2/w, bcy2/h, bcx3/w, bcy3/h, bcx4/w, bcy4/h, bcx5/w, bcy5/h, bcx6/w, bcy6/h, bcx7/w, bcy7/h, bcx8/w, bcy8/h, det_conf, cls_max_conf, cls_max_id]\n if (not only_objectness) and validation:\n for c in range(num_classes):\n tmp_conf = cls_confs[ind][c]\n if c != cls_max_id and det_confs[ind]*tmp_conf > conf_thresh:\n box.append(tmp_conf)\n box.append(c)\n boxes.append(box)\n if len(boxes) == 0:\n bcx0 = xs0[max_ind]\n bcy0 = ys0[max_ind]\n bcx1 = xs1[max_ind]\n bcy1 = ys1[max_ind]\n bcx2 = xs2[max_ind]\n bcy2 = ys2[max_ind]\n bcx3 = xs3[max_ind]\n bcy3 = ys3[max_ind]\n bcx4 = xs4[max_ind]\n bcy4 = ys4[max_ind]\n bcx5 = xs5[max_ind]\n bcy5 = ys5[max_ind]\n bcx6 = xs6[max_ind]\n bcy6 = ys6[max_ind]\n bcx7 = xs7[max_ind]\n bcy7 = ys7[max_ind]\n bcx8 = xs8[max_ind]\n bcy8 = ys8[max_ind]\n cls_max_conf = cls_max_confs[max_ind]\n cls_max_id = cls_max_ids[max_ind]\n det_conf = det_confs[max_ind]\n box = [bcx0/w, bcy0/h, bcx1/w, bcy1/h, bcx2/w, bcy2/h, bcx3/w, bcy3/h, bcx4/w, bcy4/h, bcx5/w, bcy5/h, bcx6/w, bcy6/h, bcx7/w, bcy7/h, bcx8/w, bcy8/h, det_conf, cls_max_conf, cls_max_id]\n boxes.append(box)\n all_boxes.append(boxes)\n else:\n all_boxes.append(boxes)\n\n all_boxes.append(boxes)\n t3 = time.time()\n if False:\n print('---------------------------------')\n print('matrix computation : %f' % (t1-t0))\n print(' gpu to cpu : %f' % (t2-t1))\n print(' boxes filter : %f' % (t3-t2))\n print('---------------------------------')\n return all_boxes\n\n\n\ndef read_truths(lab_path, num_keypoints=9):\n num_labels = 2*num_keypoints+3 # +2 for width, height, +1 for class label\n if os.path.getsize(lab_path):\n truths = np.loadtxt(lab_path)\n truths = truths.reshape(truths.size//num_labels, num_labels) # to avoid single truth problem\n return truths\n else:\n return np.array([])\n\ndef read_truths_args(lab_path, num_keypoints=9):\n num_labels = 2 * num_keypoints + 1\n truths = read_truths(lab_path)\n new_truths = []\n for i in range(truths.shape[0]):\n for j in range(num_labels):\n new_truths.append(truths[i][j])\n return np.array(new_truths)\n\ndef read_pose(lab_path):\n if os.path.getsize(lab_path):\n truths = np.loadtxt(lab_path)\n # truths = truths.reshape(truths.size/21, 21) # to avoid single truth problem\n return truths\n else:\n return np.array([])\n\ndef load_class_names(namesfile):\n class_names = []\n with open(namesfile, 'r') as fp:\n lines = fp.readlines()\n for line in lines:\n line = line.rstrip()\n class_names.append(line)\n return class_names\n\ndef image2torch(img):\n width = img.width\n height = img.height\n img = torch.ByteTensor(torch.ByteStorage.from_buffer(img.tobytes()))\n img = img.view(height, width, 3).transpose(0,1).transpose(0,2).contiguous()\n img = img.view(1, 3, height, width)\n img = img.float().div(255.0)\n return img\n\ndef read_data_cfg(datacfg):\n options = dict()\n options['gpus'] = '0'\n options['num_workers'] = '10'\n with open(datacfg, 'r') as fp:\n lines = fp.readlines()\n\n for line in lines:\n line = line.strip()\n if line == '':\n continue\n key,value = line.split('=')\n key = key.strip()\n value = value.strip()\n options[key] = value\n return options\n\ndef scale_bboxes(bboxes, width, height):\n import copy\n dets = copy.deepcopy(bboxes)\n for i in range(len(dets)):\n dets[i][0] = dets[i][0] * width\n dets[i][1] = dets[i][1] * height\n dets[i][2] = dets[i][2] * width\n dets[i][3] = dets[i][3] * height\n return dets\n\ndef file_lines(thefilepath):\n count = 0\n thefile = open(thefilepath, 'rb')\n while True:\n buffer = thefile.read(8192*1024)\n if not buffer:\n break\n count += buffer.count(b'\\n')\n thefile.close( )\n return count\n\ndef get_image_size(fname):\n '''Determine the image type of fhandle and return its size.\n from draco'''\n with open(fname, 'rb') as fhandle:\n head = fhandle.read(24)\n if len(head) != 24:\n return\n if imghdr.what(fname) == 'png':\n check = struct.unpack('>i', head[4:8])[0]\n if check != 0x0d0a1a0a:\n return\n width, height = struct.unpack('>ii', head[16:24])\n elif imghdr.what(fname) == 'gif':\n width, height = struct.unpack('<HH', head[6:10])\n elif imghdr.what(fname) == 'jpeg' or imghdr.what(fname) == 'jpg':\n try:\n fhandle.seek(0) # Read 0xff next\n size = 2\n ftype = 0\n while not 0xc0 <= ftype <= 0xcf:\n fhandle.seek(size, 1)\n byte = fhandle.read(1)\n while ord(byte) == 0xff:\n byte = fhandle.read(1)\n ftype = ord(byte)\n size = struct.unpack('>H', fhandle.read(2))[0] - 2\n # We are at a SOFn block\n fhandle.seek(1, 1) # Skip `precision' byte.\n height, width = struct.unpack('>HH', fhandle.read(4))\n except Exception: #IGNORE:W0703\n return\n else:\n return\n return width, height\n\ndef logging(message):\n print('%s %s' % (time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()), message))\n\n# def read_pose(lab_path):\n# if os.path.getsize(lab_path):\n# truths = np.loadtxt(lab_path)\n# return truths\n# else:\n# return np.array([])\n\n\n\n\ndef do_detect(model, img, conf_thresh, nms_thresh, use_cuda=1):\n model.eval()\n t0 = time.time()\n\n if isinstance(img, Image.Image):\n width = img.width\n height = img.height\n img = torch.ByteTensor(torch.ByteStorage.from_buffer(img.tobytes()))\n img = img.view(height, width, 3).transpose(0,1).transpose(0,2).contiguous()\n img = img.view(1, 3, height, width)\n img = img.float().div(255.0)\n elif type(img) == np.ndarray: # cv2 image\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = torch.from_numpy(img.transpose(2,0,1)).float().div(255.0).unsqueeze(0)\n else:\n print(\"unknow image type\")\n exit(-1)\n\n t1 = time.time()\n\n if use_cuda:\n img = img.cuda()\n img = torch.autograd.Variable(img)\n t2 = time.time()\n\n output = model(img)\n output = output.data\n #for j in range(100):\n # sys.stdout.write('%f ' % (output.storage()[j]))\n #print('')\n t3 = time.time()\n\n # boxes = get_region_boxes(output, conf_thresh, model.num_classes, model.anchors, model.num_anchors)[0]\n boxes = get_region_boxes(output, model.num_classes, model.num_keypoints )\n for j in range(len(boxes)):\n print(\"boxes at\", j, boxes[j])\n t4 = time.time()\n\n boxes = nms(boxes, nms_thresh)\n t5 = time.time()\n\n if False:\n print('-----------------------------------')\n print(' image to tensor : %f' % (t1 - t0))\n print(' tensor to cuda : %f' % (t2 - t1))\n print(' predict : %f' % (t3 - t2))\n print('get_region_boxes : %f' % (t4 - t3))\n print(' nms : %f' % (t5 - t4))\n print(' total : %f' % (t5 - t0))\n print('-----------------------------------')\n return boxes\n\n\ndef nms(boxes, nms_thresh):\n if len(boxes) == 0:\n return boxes\n\n det_confs = torch.zeros(len(boxes))\n for i in range(len(boxes)):\n # det_confs[i] = 1-boxes[i][4]\n det_confs[i] = 1-boxes[i]\n\n _,sortIds = torch.sort(det_confs)\n out_boxes = []\n for i in range(len(boxes)):\n box_i = boxes[sortIds[i]]\n if box_i > 0:\n out_boxes.append(box_i)\n for j in range(i+1, len(boxes)):\n box_j = boxes[sortIds[j]]\n if bbox_iou(box_i, box_j, x1y1x2y2=False) > nms_thresh:\n print(box_i, box_j, bbox_iou(box_i, box_j, x1y1x2y2=False))\n box_j = 0\n return out_boxes\n\ndef bbox_iou(box1, box2, x1y1x2y2=False):\n if x1y1x2y2:\n mx = min(box1[0], box2[0])\n Mx = max(box1[2], box2[2])\n my = min(box1[1], box2[1])\n My = max(box1[3], box2[3])\n w1 = box1[2] - box1[0]\n h1 = box1[3] - box1[1]\n w2 = box2[2] - box2[0]\n h2 = box2[3] - box2[1]\n else:\n mx = min(box1[0]-box1[2]/2.0, box2[0]-box2[2]/2.0)\n Mx = max(box1[0]+box1[2]/2.0, box2[0]+box2[2]/2.0)\n my = min(box1[1]-box1[3]/2.0, box2[1]-box2[3]/2.0)\n My = max(box1[1]+box1[3]/2.0, box2[1]+box2[3]/2.0)\n w1 = box1[2]\n h1 = box1[3]\n w2 = box2[2]\n h2 = box2[3]\n uw = Mx - mx\n uh = My - my\n cw = w1 + w2 - uw\n ch = h1 + h2 - uh\n carea = 0\n if cw <= 0 or ch <= 0:\n return 0.0\n\n area1 = w1 * h1\n area2 = w2 * h2\n carea = cw * ch\n uarea = area1 + area2 - carea\n return carea/uarea\n"
] |
[
[
"scipy.spatial.cKDTree",
"numpy.arccos",
"numpy.min",
"torch.exp",
"torch.sum",
"numpy.max",
"torch.sigmoid",
"torch.nn.Softmax",
"torch.autograd.Variable",
"torch.FloatTensor",
"numpy.transpose",
"numpy.array",
"numpy.reshape",
"numpy.zeros",
"torch.max",
"torch.linspace",
"numpy.loadtxt",
"torch.sort",
"numpy.trace",
"numpy.ascontiguousarray",
"numpy.ones",
"torch.mean"
]
] |
scuervo91/resimpy
|
[
"b291621b373c61081304ad47231d027913204fe0"
] |
[
"resimpy/grid.py"
] |
[
"#########################################################################\n# Most of the Code has been taken from the next Github Repository: #\n# https://github.com/BinWang0213/PyGRDECL #\n# Code is used to load and manipulate Eclipse Data Grid\n#########################################################################\n\nimport numpy as np \nimport pyvista as pv \nimport vtk\nfrom shapely.geometry import Point\nimport math\nimport os\nimport pandas as pd \nfrom pydantic import BaseModel, Field, Extra, validator\nfrom enum import Enum\nfrom typing import List, Optional, Union, Dict\n\npetrophysical_properties = ['PORO','PERMX','PERMY','PERMZ','SW','RT']\n\nSupportKeyWords=[\n 'SPECGRID', #Dimenion of the corner point grid\n 'DIMENS', #Define the dimension of the cartesian grid\n 'TOPS','DX','DY','DZ',\n 'COORD','ZCORN',\n 'PORO',\n 'PERMX' , 'PERMXY', 'PERMXZ', \n 'PERMYX', 'PERMY' , 'PERMYZ', \n 'PERMZX', 'PERMZY', 'PERMZ',\n 'ACTNUM',\n 'SATNUM', 'NTG',\n 'INCLUDE',\n \n]\n\nKeyWordsDatatypes=[#Corrsponding data types\n int,\n int,\n int,int,int,int,\n float,float,\n float,\n float,float,float,\n float,float,float,\n float,float,float,\n int,\n int,float\n]\n\ndef parseDataArray(DataArray):\n \"\"\"Parse special dataArray format in GRDECL \n example:\n 5*3.0=[3.0 3.0 3.0 3.0 3.0]\n 1.0 2*3.0 5.0=[1.0 3.0 3.0 5.0]\n \n Author:Bin Wang(binwang.0213@gmail.com)\n Date: Sep. 2018\n \"\"\"\n\n data=[]\n error_count=0\n for value in DataArray:\n if(is_number(value)==2):\n num,val=value.split('*')\n for i in range(int(num)): data.append(val)\n elif(is_number(value)==1):\n data.append(value)\n else:\n error_count+=1\n \n if(error_count>0):\n print(DataArray)\n \n assert error_count==0, '[Error] Can not find any numeric value!'\n \n return data\n \ndef is_number(s):\n #Determine a string is a number or not\n #Used in [read_GRDECL] [getBlkdata]\n try:\n float(s)\n return True\n except ValueError:\n pass\n \n try:\n import unicodedata\n unicodedata.numeric(s)\n return True\n except (TypeError, ValueError):\n pass\n \n try: #Special format N*val= [val val val ....]\n num, val = s.split('*')\n return 2\n except ValueError:\n pass\n \n return False\n\n## Auxilary functions\ndef cell_id(i,j,k,nx,ny):\n \"\"\"\n Get the cell Id given i,j,k indexes. \n * --- * --- * --- *\n | 0,2 | 1,2 | 2,2 | <- Cell id 6,7,8\n * --- * --- * --- *\n | 0,1 | 1,1 | 2,1 | <- Cell id 3,4,5\n * --- * --- * --- *\n | 0,0 | 1,0 | 2,0 | <- Cell id 0,1,2\n * --- * --- * --- *\n \"\"\"\n cell = (nx*j+i)+k*nx*ny\n\n return cell\n\ndef cell_ijk(cell_id,nx,ny):\n \"\"\"\n Get the cell indexes i,j,k given the cell id\n * --- * --- * --- *\n | 0,2 | 1,2 | 2,2 | <- Cell id 6,7,8\n * --- * --- * --- *\n | 0,1 | 1,1 | 2,1 | <- Cell id 3,4,5\n * --- * --- * --- *\n | 0,0 | 1,0 | 2,0 | <- Cell id 0,1,2\n * --- * --- * --- *\n \"\"\"\n if cell_id==0:\n return 0,0,0\n k=math.ceil(cell_id/(nx*ny))-1\n j=math.ceil((cell_id-(nx*ny)*k)/nx)-1\n i=math.ceil(cell_id-(nx*ny*k)-nx*j)\n return i,j,k\n\n#Interpolate z on pillars\ndef interpolate_z_pillar(z,p):\n \"\"\"\n Obtain the eight coords for a cell\n X,Y coords has to be interpolated from Z\n xy1=xy0+k*z\n Pillar=np.array([[x0 y0 z0],[x1 y1 z1]])\n \"\"\"\n x = ((p[0,0]-p[1,0])/(p[0,2]-p[1,2]))*(z-p[0,2])+p[0,0]\n y = ((p[0,1]-p[1,1])/(p[0,2]-p[1,2]))*(z-p[0,2])+p[0,1]\n\n xyz = np.array([x,y,z])\n return xyz\n\n#3D Rotation funtions\ndef rotation(points,azimuth,dip,plunge):\n assert points.ndim == 2\n\n azi_rad = np.radians(azimuth)\n dip_rad = np.radians(dip)\n plg_rad = np.radians(plunge)\n\n ry = np.array([\n [np.cos(plg_rad),0,-np.sin(plg_rad)],\n [0,1,0],\n [np.sin(plg_rad),0,np.cos(plg_rad)],\n ])\n rx = np.array([\n [1,0,0],\n [0,np.cos(dip_rad),np.sin(dip_rad)],\n [0,-np.sin(dip_rad),np.cos(dip_rad)]\n ])\n rz = np.array([\n [np.cos(azi_rad),-np.sin(azi_rad),0],\n [np.sin(azi_rad),np.cos(azi_rad),0],\n [0,0,1]\n ])\n\n rot = np.matmul(np.matmul(ry,rx),rz)\n\n rot_points = np.matmul(points,rot)\n\n return rot_points\n\ndef RemoveCommentLines(data,commenter='--'):\n #Remove comment and empty lines\n data_lines=data.strip().split('\\n')\n newdata=[]\n for line in data_lines:\n if line.startswith(commenter) or not line.strip():\n # skip comments and blank lines\n continue \n newdata.append(line)\n return '\\n'.join(newdata)\n\n\ndef scanKeyword(data):\n #scan and find the keyword\n #e.g. ['INIT','DX','2500*30.0'] -> ['DX','2500*30.0']\n for key in SupportKeyWords:\n if (key in data) and (data.find(key)!=0):\n return data[data.find(key):-1]\n return data\n## Grid Class\nclass GridTypeEnum(str, Enum): \n cartesian = 'cartesian'\n corner_point = 'corner_point'\n \nclass Grid(BaseModel):\n \"\"\"\n Class for Reservoir Simulation Grid \n * Cartesian\n * Corner-Point\n \"\"\"\n grid_type: GridTypeEnum = Field(None)\n nx: int = Field(None, gt=0)\n ny: int = Field(None, gt=0)\n nz: int = Field(None, gt=0)\n tops: Optional[Union[List[float],float]] = Field(None)\n dx: Optional[Union[List[float],float]] = Field(None)\n dy: Optional[Union[List[float],float]] = Field(None)\n dz: Optional[Union[List[float],float]] = Field(None)\n origin: Point = Field(Point(0,0,0))\n azimuth: float = Field(0, ge=0, le=360)\n dip: float = Field(0, ge=0, le=90)\n plunge: float = Field(0, ge=0, le=90)\n coord: Optional[List[float]] = Field(None)\n zcorn: Optional[List[float]] = Field(None)\n spatial_data: Optional[Dict[str,Union[List[float],float]]] = Field(None)\n skiped_keywords: int = Field(0)\n class Config:\n arbitrary_types_allowed = True\n validate_assignment = True\n extra = Extra.forbid\n \n @validator('tops')\n def match_tops(cls,v,values):\n if values['grid_type'] != GridTypeEnum.cartesian:\n raise ValueError('tops must be set on cartesian grid')\n n = values['nx'] * values['ny'] * values['nz']\n ntop = values['nx'] * values['ny']\n if isinstance(v,list):\n assert any([len(v) == n, len(v) == ntop])\n return v \n else: \n return np.full(ntop,v).tolist()\n \n @validator('dx','dy','dz')\n def match_deltas(cls,v,values):\n if values['grid_type'] != GridTypeEnum.cartesian:\n raise ValueError('Deltas must be set on cartesian grid')\n n = values['nx'] * values['ny'] * values['nz']\n if isinstance(v,list):\n assert len(v) == n \n return v \n else: \n return np.full(n,v).tolist()\n \n \n @validator('origin')\n def check_origin_coord(cls,v,values):\n if values['grid_type'] != GridTypeEnum.cartesian:\n raise ValueError('Origin must be set only on castesian grid')\n assert v.has_z\n return v\n \n \n @validator('coord')\n def check_coord(cls,v,values):\n if values['grid_type'] == GridTypeEnum.cartesian:\n raise ValueError('Coord must be set on corner point grid')\n length = 6*(values['nx']+1)*(values['ny']+1)\n assert len(v) == length, f'list must be of length{length}'\n return v\n \n @validator('zcorn')\n def check_zcorn(cls,v,values):\n if values['grid_type'] == GridTypeEnum.cartesian:\n raise ValueError('Coord must be set on corner point grid')\n length = 8 * values['nx'] * values['ny'] * values['nz']\n assert len(v) == length, f'list must be of length{length}'\n return v\n \n @validator('spatial_data')\n def check_spatial_data(cls,v,values):\n n = values['nx'] * values['ny'] * values['nz']\n for i in v:\n if isinstance(v[i],list):\n assert len(v[i]) == n\n return v \n else: \n return np.full(n,v[i]).tolist()\n \n @property\n def n(self):\n return self.nx * self.ny * self.nz\n\n def cartesian_vertices_coord(self):\n #Vertices coordinates starting at 0,0,0\n x_vert_cord = np.concatenate((np.zeros(1),np.array(self.dx).reshape((self.nx,self.ny,self.nz),order='f')[:,0,0]),axis=0).cumsum()\n y_vert_cord = np.concatenate((np.zeros(1),np.array(self.dy).reshape((self.nx,self.ny,self.nz),order='f')[0,:,0]),axis=0).cumsum()\n z_vert_cord = np.concatenate((np.zeros(1),np.array(self.dz).reshape((self.nx,self.ny,self.nz),order='f')[0,0,:]),axis=0).cumsum()\n\n points = np.zeros(((self.nx+1)*(self.ny+1)*(self.nz+1),3))\n for k in range(self.nz+1):\n for j in range(self.ny+1):\n for i in range(self.nx+1):\n l = cell_id(i,j,k,self.nx+1,self.ny+1)\n points[l,0] = x_vert_cord[i]\n points[l,1] = y_vert_cord[j]\n points[l,2] = z_vert_cord[k]\n\n #Get rotated points with respect 0,0,0\n rot_points = rotation(points,self.azimuth,self.dip,self.plunge)\n \n #Adjust the coordinates according with Origin Point\n origin = np.array([self.origin.x,self.origin.y,self.tops[0]])\n\n _vertices_coord = rot_points + origin\n return _vertices_coord\n\n def cartesian_center_point_coord(self):\n #Vertices coordinates starting at 0,0,0\n x_vert_cord = np.concatenate((np.zeros(1),np.array(self.dx).reshape((self.nx,self.ny,self.nz),order='f')[:,0,0]),axis=0).cumsum()\n y_vert_cord = np.concatenate((np.zeros(1),np.array(self.dy).reshape((self.nx,self.ny,self.nz),order='f')[0,:,0]),axis=0).cumsum()\n z_vert_cord = -np.concatenate((np.zeros(1),np.array(self.dz).reshape((self.nx,self.ny,self.nz),order='f')[0,0,:]),axis=0).cumsum()\n\n center = np.zeros(((self.nx)*(self.ny)*(self.nz),3))\n for k in range(self.nz):\n for j in range(self.ny):\n for i in range(self.nx):\n l = cell_id(i,j,k,self.nx,self.ny)\n center[l,0] = np.mean((x_vert_cord[i], x_vert_cord[i+1]))\n center[l,1] = np.mean((y_vert_cord[j], y_vert_cord[j+1]))\n center[l,2] = np.mean((z_vert_cord[k], z_vert_cord[k+1]))\n\n #Get rotated points with respect 0,0,0\n rot_points = rotation(center,self.azimuth,self.dip,self.plunge)\n \n #Adjust the coordinates according with Origin Point\n origin = np.array([self.origin.x,self.origin.y,self.origin.z])\n\n _center_coord = rot_points + origin\n return _center_coord\n\n\n#####################################################\n############## Methods ###########################\n\n def add_spatial_data(self,key,array):\n array = np.atleast_1d(array).flatten(order='F')\n assert self.n == array.shape[0]\n spatial_dict = {key:array.tolist()}\n if self.spatial_data is None:\n self.spatial_data = spatial_dict\n else:\n self.spatial_data.update(spatial_dict)\n\n def read_IncludeFile(self,filename_include,NumData):\n \"\"\"Read Include data file\n this data file just a series of values\n e.g. 0.2 0.3 12.23 ....\n \n Author:Bin Wang(binwang.0213@gmail.com)\n Date: Aug. 2018\n \"\"\"\n\n f=open(filename_include)\n contents=f.read()\n block_dataset=contents.strip().split() #Sepeart input file by slash /\n block_dataset=np.array(block_dataset,dtype=float)\n if(len(block_dataset)!=NumData):\n print('Data size %s is not equal to defined block dimension (NX*NY*NZ) %s'%(len(block_dataset),NumData))\n return block_dataset\n \n def LoadVar(self,Keyword,DataArray,DataSize):\n \"\"\"Load varables into class\n example:\n \n Author:Bin Wang(binwang.0213@gmail.com)\n Date: Sep. 2018\n \"\"\"\n if(Keyword in SupportKeyWords):#KeyWords Check\n assert len(DataArray)==DataSize,'\\n [Error-%s] Incompatible data size! %d-%d' %(Keyword,len(DataArray),DataSize)\n KeywordID=SupportKeyWords.index(Keyword)\n print(' [%s] '%(Keyword),end='')\n self.add_spatial_data(Keyword,np.array(DataArray,dtype=KeyWordsDatatypes[KeywordID]))\n else:\n print(' [Warnning] Unsupport keywords[%s]' % (Keyword))\n self.skiped_keywords+=1\n \n def read_GRDECL(self,file):\n \"\"\"Read input file(GRDECL) of Reservoir Simulator- Petrel (Eclipse) \n file format:http://petrofaq.org/wiki/Eclipse_Input_Data\n \n Arguments\n ---------\n NX, NY, NZ -- Grid dimension.\n blockData_raw -- [0] Keywords [1] values\n \n Author:Bin Wang(binwang.0213@gmail.com)\n Date: Sep. 2017\n \"\"\"\n debug=0\n\n print('[Input] Reading ECLIPSE/PETREL file \\\"%s\\\" ....'%(file))\n\n #Read whole file into list\n f=open(file)\n contents=f.read()\n contents=RemoveCommentLines(contents,commenter='--')\n contents_in_block=contents.strip().split('/') #Sepeart input file by slash /\n contents_in_block = [x for x in contents_in_block if x]#Remove empty block at the end\n NumKeywords=len(contents_in_block)\n print(f'Num Keywords {NumKeywords}')\n GoodFlag=0\n for i,block in enumerate(contents_in_block):#Keyword, Block-wise\n #Clean the data where no spliter \\ provided\n block=scanKeyword(block)\n\n blockData_raw=block.strip().split()\n Keyword=''\n DataArray=[]\n if(len(blockData_raw)>1):\n if(blockData_raw[0]=='ECHO'): #This keyword may next to real keyword\n Keyword,DataArray=blockData_raw[1],blockData_raw[2:] \n else:\n Keyword,DataArray=blockData_raw[0],blockData_raw[1:]\n\n #Read Grid Dimension [SPECGRID] or [DIMENS] \n print(Keyword)\n if(Keyword=='DIMENS'):\n DataArray=np.array(DataArray[:3],dtype=int)\n self.grid_type='cartesian'\n self.nx,self.ny,self.nz=DataArray[0],DataArray[1],DataArray[2]\n print(\" Grid Type=%s Grid\" %(self.grid_type))\n print(\" Grid Dimension(NX,NY,NZ): (%s x %s x %s)\"%(self.nx,self.ny,self.nz))\n print(\" NumOfGrids=%s\"%(self.n))\n print(' NumOfKeywords=%s'%(NumKeywords))\n print(\" Reading Keyword %d [%s] \" %(i+1,Keyword),end='')\n GoodFlag=1\n continue\n elif(Keyword=='SPECGRID'):\n DataArray=np.array(DataArray[:3],dtype=int)\n self.grid_type='corner_point'\n self.nx,self.ny,self.nz=DataArray[0],DataArray[1],DataArray[2]\n print(\" Grid Type=%s\" %(self.grid_type))\n print(\" Grid Dimension(NX,NY,NZ): (%s x %s x %s)\"%(self.nx,self.ny,self.nz))\n print(\" NumOfGrids=%s\"%(self.n))\n print(' NumOfKeywords=%s'%(NumKeywords))\n print(\" Reading Keywords [%s] \" %(Keyword),end='')\n GoodFlag=1\n continue\n \n if(self.grid_type is None):#Skip unnecessary keywords\n continue\n\n if(Keyword in SupportKeyWords): #We need parse the special format in \n if Keyword == 'INCLUDE':\n #if(len(DataArray)==1 and '.' in DataArray[0]):\n folder_name=os.path.dirname(file)\n self.read_GRDECL(os.path.join(folder_name,DataArray[0].replace(\"'\",\"\")))\n continue\n #DataArray=self.read_IncludeFile(os.path.join(folder_name,DataArray[0]),self.n)\n print(f'------{Keyword}------')\n\n try:\n DataArray=parseDataArray(DataArray)\n except Exception as e:\n print(e)\n continue\n \n\n #Read Grid spatial information, x,y,z ordering\n #Corner point cell\n if(Keyword=='COORD'):# Pillar coords\n assert len(DataArray)==6*(self.nx+1)*(self.ny+1),'[Error] Incompatible COORD data size!'\n self.coord=np.array(DataArray,dtype=float).tolist() \n elif(Keyword=='ZCORN'):# Depth coords\n assert len(DataArray)==8*self.n, '[Error] Incompatible ZCORN data size!'\n self.zcorn=np.array(DataArray,dtype=float)\n \n #Cartesian cell\n elif(Keyword=='DX'):# Grid size in X dir\n assert len(DataArray)==self.n, '[Error] Incompatible DX data size!'\n self.dx=np.array(DataArray,dtype=float).tolist() \n elif(Keyword=='DY'):# Grid size in Y dir\n assert len(DataArray)==self.n, '[Error] Incompatible DY data size!'\n self.dy=np.array(DataArray,dtype=float).tolist() \n elif(Keyword=='DZ'):# Grid size in Z dir\n assert len(DataArray)==self.n, '[Error] Incompatible DZ data size!'\n self.dz=np.array(DataArray,dtype=float).tolist() \n elif(Keyword=='TOPS'):# TOP position\n assert any([len(DataArray)==self.n,len(DataArray)==self.nx*self.ny]), '[Error] Incompatible TOPS data size!'\n self.tops=np.array(DataArray,dtype=float).tolist() \n\n #Read Grid Properties information\n else:\n try:\n self.LoadVar(Keyword,DataArray,DataSize=self.n)\n except Exception as e:\n print(e)\n continue\n\n f.close()\n #assert GoodFlag==1,'Can not find grid dimension info, [SPECGRID] or [DIMENS]!'\n print('.....Done!')\n\n\n #Genetrate TOPS for cartesian grid if TOPS if not given\n if(self.grid_type=='cartesian' and self.tops is None):\n tops=np.zeros(self.n)\n for k in range(self.nz-1):\n for j in range(self.ny):\n for i in range(self.nx):\n ijk=cell_id(i,j,k,self.nx,self.ny)\n ijk_next=cell_id(i,j,k+1,self.nx,self.ny)\n tops[ijk_next] = tops[ijk] + self.dz[ijk]\n self.tops=tops.tolist()\n\n\n def to_ecl(self, filename=None, keywords=None, one_file=False, return_string=False, save_file=True):\n \n \n list_str =[]\n key_added = []\n \n if self.grid_type == 'cartesian':\n\n if keywords is None:\n keywords = ['TOPS','DX','DY','DZ']\n else:\n assert isinstance(keywords,list)\n keywords_type = [i for i in keywords if i in ['TOPS','DX','DY','DZ']]\n else: \n if keywords is None:\n keywords = ['COORD','ZCORN','SPECGRID']\n else:\n assert isinstance(keywords,list)\n keywords_type = [i for i in keywords if i in ['COORD','ZCORN']]\n \n if 'SPECGRID' in keywords:\n print('SPECGRID')\n list_str.append(f'SPECGRID\\n {self.nx} {self.ny} {self.nz} 1 F /\\n') \n key_added.append('SPECGRID')\n \n if len(keywords_type)>0:\n for k in keywords_type:\n print(k)\n key_str = \"\"\n key_str += f'{k.upper()}\\n'\n key_str += ' ' + ' '.join([str(v) + '\\n' if (i+1)%10==0 else str(v) for i,v in enumerate(getattr(self,k.lower()))]) + '/\\n'\n list_str.append(key_str)\n key_added.append(k)\n \n \n keywords_spatial = [i for i in keywords if i not in ['SPECGRID','COORD','ZCORN','TOPS','DX','DY','DZ']]\n\n if all([bool(self.spatial_data),len(keywords_spatial)>0]):\n for key in keywords_spatial:\n print(key)\n key_str =\"\"\n try:\n key_str += key + '\\n'\n key_str += ' ' + ' '.join([str(v) + '\\n' if (i+1)%10==0 else str(v) for i,v in enumerate(getattr(self,'spatial_data')[key])]) + '/\\n'\n list_str.append(key_str) \n key_added.append(key) \n except:\n pass\n \n if save_file: \n if one_file==True:\n if filename is None:\n filename ='grid.GRDECL'\n try:\n string = \"\".join(list_str)\n with open(filename,'w') as text_file:\n text_file.write(string)\n except Exception as e:\n print(e)\n pass\n \n else:\n if filename is None:\n filename = '.' \n filename = os.path.abspath(filename)\n print(filename)\n for i, key in enumerate(list_str):\n \n try:\n with open(os.path.join(filename,key_added[i]+'.prop'),'w') as text_file:\n text_file.write(key)\n except Exception as e:\n print(e)\n pass\n \n if return_string:\n return \"\".join(list_str)\n \n \n \n def get_cell_id(self,i,j,k):\n \"\"\"\n Get the cell Id given i,j,k indexes. \n * --- * --- * --- *\n | 0,2 | 1,2 | 2,2 | <- Cell id 6,7,8\n * --- * --- * --- *\n | 0,1 | 1,1 | 2,1 | <- Cell id 3,4,5\n * --- * --- * --- *\n | 0,0 | 1,0 | 2,0 | <- Cell id 0,1,2\n * --- * --- * --- *\n \"\"\"\n c_id = cell_id(i,j,k,self.nx,self.ny)\n return c_id\n\n def get_cell_ijk(self,cell_id):\n \"\"\"\n Get the cell indexes i,j,k given the cell id\n * --- * --- * --- *\n | 0,2 | 1,2 | 2,2 | <- Cell id 6,7,8\n * --- * --- * --- *\n | 0,1 | 1,1 | 2,1 | <- Cell id 3,4,5\n * --- * --- * --- *\n | 0,0 | 1,0 | 2,0 | <- Cell id 0,1,2\n * --- * --- * --- *\n \"\"\"\n i,j,k=cell_ijk(cell_id,self.nx,self.ny)\n return i,j,k\n\n def get_pillar(self,pillar_id:int):\n \"\"\"\n Get the Top and Bottom coordinates of a pillar id\n \"\"\"\n if self.grid_type == 'corner_point':\n id_top=[6*pillar_id+0,6*pillar_id+1,6*pillar_id+2]\n id_bottom=[6*pillar_id+3,6*pillar_id+4,6*pillar_id+5]\n top_point=np.array([self.coord[i] for i in id_top])\n bottom_point=np.array([self.coord[i] for i in id_bottom])\n else:\n raise ValueError('Pillar are only set in a Corner Point Grid')\n return np.array([top_point,bottom_point])\n\n def get_cell_pillars(self,i,j):\n \"\"\"Obtain the four pillars (p0,p1,p2,p3) of a corner point cell\n The index of pillar\n \n 3x3x1 system (2D X-Y plane)\n 12--- 13 --- 14 ---15\n | | | | <- Cell 6,7,8\n 8 --- 9 --- 10 ---11\n | | | | <- Cell 3,4,5\n 4 --- 5 --- 6 --- 7\n | | | | <- Cell 0,1,2\n 0 --- 1 --- 2 --- 3\n \n The pillars index for a grid follows below ordering (XY Plane)\n p2 p3\n *------*\n | |\n | |\n *------*\n p0 p1\n\n \"\"\"\n if self.grid_type == 'corner_point':\n p0 = cell_id(i,j,0,self.nx+1,self.ny+1)\n p1 = cell_id(i+1,j,0,self.nx+1,self.ny+1)\n p2 = cell_id(i,j+1,0,self.nx+1,self.ny+1)\n p3 = cell_id(i+1,j+1,0,self.nx+1,self.ny+1)\n\n pls = [self.get_pillar(p0),self.get_pillar(p1),self.get_pillar(p2),self.get_pillar(p3)]\n else:\n raise ValueError('Pillar are only set in a Corner Point Grid')\n return np.array(pls)\n\n def get_vertices_id(self,i,j,k,order='GRD'):\n \"\"\"\n Cartesian Grid\n\n Get the cell Id given i,j,k indexes. \n 13 --- 14 --- 15 --- 16\n | 0,2 | 1,2 | 2,2 | <- Cell id 6,7,8\n 9 --- 10 --- 11 --- 12\n | 0,1 | 1,1 | 2,1 | <- Cell id 3,4,5\n 5 --- 6 --- 7 --- 8\n | 0,0 | 1,0 | 2,0 | <- Cell id 0,1,2\n 1 --- 2 --- 3 --- 4\n\n Corner Point Grid\n\n 3x3x1 system (2D X-Y plane)\n 30---31,32---33,34---35\n | | | | <- Cell 6,7,8\n 24---25,26---27,28---29\n 18---19,20---21,22---23\n | | | | <- Cell 3,4,5\n 12---13,14---15,16---17\n 6 --- 7,8 --- 9,10---11\n | | | | <- Cell 0,1,2\n 0 --- 1,2 --- 3,4 --- 5\n Node order convention for a 3D cell\n 6----7\n - - <-Bottom Face\n 4----5\n 2----3\n - - <-Top Face\n 0----1\n \"\"\" \n if self.grid_type == 'cartesian':\n nx,ny=self.nx+1,self.ny+1\n p0=cell_id(i,j,k,nx,ny)\n p1=cell_id(i+1,j,k,nx,ny)\n p2=cell_id(i,j+1,k,nx,ny)\n p3=cell_id(i+1,j+1,k,nx,ny)\n\n p4=cell_id(i,j,k+1,nx,ny)\n p5=cell_id(i+1,j,k+1,nx,ny)\n p6=cell_id(i,j+1,k+1,nx,ny)\n p7=cell_id(i+1,j+1,k+1,nx,ny)\n\n if order == 'GRD':\n points = [p0,p1,p2,p3,p4,p5,p6,p7]\n elif order == 'VTK':\n points = [p4,p5,p7,p6,p0,p1,p3,p2]\n\n return np.array(points)\n\n if self.grid_type == 'corner_point':\n nx,ny=2*self.nx,2*self.ny\n p0=cell_id(2*i,2*j,2*k,nx,ny)\n p1=cell_id(2*i+1,2*j,2*k,nx,ny)\n p2=cell_id(2*i,2*j+1,2*k,nx,ny)\n p3=cell_id(2*i+1,2*j+1,2*k,nx,ny)\n\n p4=cell_id(2*i,2*j,2*k+1,nx,ny)\n p5=cell_id(2*i+1,2*j,2*k+1,nx,ny)\n p6=cell_id(2*i,2*j+1,2*k+1,nx,ny)\n p7=cell_id(2*i+1,2*j+1,2*k+1,nx,ny)\n\n if order == 'GRD':\n points = [p0,p1,p2,p3,p4,p5,p6,p7]\n elif order == 'VTK':\n points = [p4,p5,p7,p6,p0,p1,p3,p2]\n\n return np.array(points)\n\n\n def get_vertices_z(self,i,j,k):\n \"\"\"\n Node order convention for a 3D cell\n 6----7\n - - <-Bottom Face\n 4----5\n 2----3\n - - <-Top Face\n 0----1\n \"\"\"\n # Get the z coord for a cell\n if self.grid_type == 'corner_point':\n p = self.get_vertices_id(i,j,k)\n z = [self.zcorn[i] for i in p]\n return np.array(z)\n elif self.grid_type == 'cartesian':\n p= self.get_vertices_id(i,j,k)\n z = [self.cartesian_vertices_coord()[i,2] for i in p]\n return np.array(z)\n\n # Pending for cartessian grid\n\n def get_vertices_coords(self,i,j,k,order='GRD'):\n if self.grid_type == 'corner_point':\n coords=[]\n pillars = self.get_cell_pillars(i,j)\n cell_z =self.get_vertices_z(i,j,k)\n\n for i in range(8):\n p_id = i%4\n coords.append(interpolate_z_pillar(cell_z[i],pillars[p_id]))\n \n if order == 'GRD':\n v_coord = np.array(coords)\n elif order == 'VTK':\n v_coord = np.array(coords)[[4,5,7,6,0,1,3,2],:]\n\n return v_coord\n\n elif self.grid_type == 'cartesian':\n p= self.get_vertices_id(i,j,k)\n coords = [[self.cartesian_vertices_coord()[pi,0],self.cartesian_vertices_coord()[pi,1],self.cartesian_vertices_coord()[pi,2]] for pi in p]\n if order == 'GRD':\n v_coord = np.array(coords)\n elif order == 'VTK':\n v_coord = np.array(coords)[[4,5,7,6,0,1,3,2],:]\n\n return v_coord\n\n\n def get_vertices_face_z(self,i,j,k, face=None):\n \"\"\"\n Get the Z coords for a cell\n \n 6----7\n - - <-Bottom Face\n 4----5\n 2----3\n - - <-Top Face\n 0----1 \n Follow getCornerPointCellIdx convention:\n X-, [0,2,4,6]\n X+, [1,3,5,7]\n Y-, [0,1,4,5]\n Y+, [2,3,6,7]\n Z+, [0,1,2,3]\n Z-, [4,5,6,7]\n \"\"\"\n assert face is not None, 'A face must be choosen'\n points_id = self.get_vertices_id(i,j,k)\n if(face==\"X-\"): face_id=[points_id[0],points_id[2],points_id[4],points_id[6]]\n if(face==\"X+\"): face_id=[points_id[1],points_id[3],points_id[5],points_id[7]]\n if(face==\"Y-\"): face_id=[points_id[0],points_id[1],points_id[4],points_id[5]]\n if(face==\"Y+\"): face_id=[points_id[2],points_id[3],points_id[6],points_id[7]]\n if(face==\"Z-\"): face_id=[points_id[4],points_id[5],points_id[6],points_id[7]]\n if(face==\"Z+\"): face_id=[points_id[0],points_id[1],points_id[2],points_id[3]]\n\n if self.grid_type == 'cartesian':\n z_face = [self.cartesian_vertices_coord()[i,2] for i in face_id]\n elif self.grid_type == 'corner_point':\n z_face = [self.zcorn[i] for i in face_id]\n\n return np.array(z_face)\n\n def get_vertices_face_coords(self,i,j,k, face=None):\n \"\"\"\n Get the Z coords for a cell\n \n 6----7\n - - <-Bottom Face\n 4----5\n 2----3\n - - <-Top Face\n 0----1 \n Follow getCornerPointCellIdx convention:\n X-, [0,2,4,6]\n X+, [1,3,5,7]\n Y-, [0,1,4,5]\n Y+, [2,3,6,7]\n Z+, [0,1,2,3]\n Z-, [4,5,6,7]\n \"\"\"\n assert face is not None, 'A face must be choosen'\n points_id = self.get_vertices_id(i,j,k)\n if (face==\"X-\"): \n face_id=[points_id[0],points_id[2],points_id[4],points_id[6]]\n ind = [0,2,4,6]\n elif (face==\"X+\"): \n face_id=[points_id[1],points_id[3],points_id[5],points_id[7]]\n ind = [1,3,5,7]\n elif (face==\"Y-\"): \n face_id=[points_id[0],points_id[1],points_id[4],points_id[5]]\n ind = [0,1,4,5]\n elif (face==\"Y+\"): \n face_id=[points_id[2],points_id[3],points_id[6],points_id[7]]\n ind = [2,3,6,7]\n elif (face==\"Z-\"): \n face_id=[points_id[4],points_id[5],points_id[6],points_id[7]]\n ind = [4,5,6,7]\n elif (face==\"Z+\"): \n face_id=[points_id[0],points_id[1],points_id[2],points_id[3]]\n ind = [0,1,2,3]\n\n if self.grid_type == 'cartesian':\n z_face = [self.cartesian_vertices_coord()[i,:] for i in face_id]\n elif self.grid_type == 'corner_point':\n v_cord = self.get_vertices_coords(i,j,k)\n z_face = v_cord[ind,:]\n\n return np.array(z_face)\n\n\n def get_center_coord(self,i,j,k):\n \"\"\"\n Get the cell Id given i,j,k indexes. \n * --- * --- * --- *\n | 0,2 | 1,2 | 2,2 | <- Cell id 6,7,8\n * --- * --- * --- *\n | 0,1 | 1,1 | 2,1 | <- Cell id 3,4,5\n * --- * --- * --- *\n | 0,0 | 1,0 | 2,0 | <- Cell id 0,1,2\n * --- * --- * --- *\n \"\"\"\n cid = self.get_cell_id(i,j,k)\n if self.grid_type == 'cartesian':\n center = self.cartesian_center_point_coord()[cid,:]\n elif self.grid_type == 'corner_point':\n points = self.get_vertices_coords(i,j,k)\n center = points.mean(axis=0)\n \n return center\n\n def get_vtk(self):\n \"\"\"\n Get the pyvista Object\n https://docs.pyvista.org/examples/00-load/create-unstructured-surface.html#sphx-glr-examples-00-load-create-unstructured-surface-py\n \"\"\"\n \n #Identify the cell data connections\n offset = np.arange(0,9*self.n,step=9)\n\n points = np.zeros((self.n*8,3))\n #Cells\n for k in range(self.nz):\n for j in range(self.ny):\n for i in range(self.nx):\n c_idx = self.get_cell_id(i,j,k)\n #cells_array[c_idx,:] = self.get_vertices_id(i,j,k, order='VTK')\n\n ind_from = 8*c_idx\n ind_to = 8*(c_idx+1)\n points[ind_from:ind_to,:] = self.get_vertices_coords(i,j,k, order = 'VTK')\n\n # Make a vector of shape self.n, make 2D and append to cell array then flatten C order\n cell_array = np.arange(self.n*8).reshape((self.n,8))\n cells = np.append(np.full(self.n,8).reshape((self.n,1)),cell_array,1).flatten()\n\n # cell type array. Contains the cell type of each cell\n cell_type = np.array([vtk.VTK_HEXAHEDRON]*self.n)\n\n grid = pv.UnstructuredGrid(offset, cells, cell_type, points)\n\n if self.spatial_data is not None:\n for i in self.spatial_data.items():\n grid.cell_arrays[i[0]] = i[1]\n\n return grid\n\n\n\n\n\n\n\n \n\n\n \n\n\n\n\n\n\n \n \n\n \n\n \n\n\n\n\n"
] |
[
[
"numpy.full",
"numpy.array",
"numpy.sin",
"numpy.matmul",
"numpy.zeros",
"numpy.mean",
"numpy.radians",
"numpy.arange",
"numpy.atleast_1d",
"numpy.cos"
]
] |
krikyn/Strong-Paraphrase-Generation-2020
|
[
"3d5b6f4fd0d4b4f96ed6bdd91b7000d3d80fc901"
] |
[
"paraphrase/vae/utils/batch_loader.py"
] |
[
"import collections\nimport os\nimport re\n\nimport numpy as np\nfrom six.moves import cPickle\n\nfrom .functional import *\n\n\nclass BatchLoader:\n def __init__(self, data_files, idx_files, tensor_files, path='../../'):\n self.data_files = data_files\n self.idx_files = idx_files\n self.tensor_files = tensor_files\n\n self.blind_symbol = ''\n self.pad_token = '_'\n self.go_token = '>'\n self.end_token = '|'\n self.a_token = '?'\n\n idx_exists = fold(f_and,\n [os.path.exists(file) for file in self.idx_files],\n True)\n\n tensors_exists = fold(f_and,\n [os.path.exists(file) for target in self.tensor_files\n for file in target],\n True)\n\n if idx_exists and tensors_exists:\n self.load_preprocessed(self.data_files,\n self.idx_files,\n self.tensor_files)\n print('preprocessed data was found and loaded')\n else:\n self.preprocess(self.data_files,\n self.idx_files,\n self.tensor_files)\n print('data have preprocessed')\n\n self.word_embedding_index = 0\n\n def clean_whole_data(self, string):\n string = re.sub(r'^[\\d\\:]+ ', '', string, 0, re.M)\n string = re.sub(r'\\n\\s{11}', ' ', string, 0, re.M)\n string = re.sub(r'\\n{2}', '\\n', string, 0, re.M)\n\n return string.lower()\n\n def clean_str(self, string):\n\n string = re.sub(r\"[^가-힣A-Za-z0-9(),!?:;.\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\"\\.\", \" . \", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\":\", \" : \", string)\n string = re.sub(r\";\", \" ; \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" ( \", string)\n string = re.sub(r\"\\)\", \" ) \", string)\n string = re.sub(r\"\\?\", \" ? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip()\n\n def build_character_vocab(self, data):\n chars = list(set(data)) + [self.blind_symbol, self.pad_token, self.go_token, self.end_token]\n chars_vocab_size = len(chars)\n idx_to_char = chars\n char_to_idx = {x: i for i, x in enumerate(idx_to_char)}\n\n return chars_vocab_size, idx_to_char, char_to_idx\n\n def build_word_vocab(self, sentences):\n word_counts = collections.Counter(sentences)\n idx_to_word = [x[0] for x in word_counts.most_common()]\n idx_to_word = list(sorted(idx_to_word)) + [self.pad_token, self.go_token, self.end_token]\n\n words_vocab_size = len(idx_to_word)\n word_to_idx = {x: i for i, x in enumerate(idx_to_word)}\n\n return words_vocab_size, idx_to_word, word_to_idx\n\n def preprocess(self, data_files, idx_files, tensor_files):\n\n data = [open(file, \"r\").read() for file in data_files]\n merged_data = data[0] + '\\n' + data[1]\n\n self.chars_vocab_size, self.idx_to_char, self.char_to_idx = self.build_character_vocab(merged_data)\n\n with open(idx_files[1], 'wb') as f:\n cPickle.dump(self.idx_to_char, f)\n\n data_words = [[line.split() for line in target.split('\\n')] for target in data]\n merged_data_words = merged_data.split()\n\n self.words_vocab_size, self.idx_to_word, self.word_to_idx = self.build_word_vocab(merged_data_words)\n self.max_word_len = np.amax([len(word) for word in self.idx_to_word])\n self.max_seq_len = np.amax([len(line) for target in data_words for line in target])\n self.num_lines = [len(target) for target in data_words]\n\n with open(idx_files[0], 'wb') as f:\n cPickle.dump(self.idx_to_word, f)\n\n self.word_tensor = np.array(\n [[list(map(self.word_to_idx.get, line)) for line in target] for target in data_words])\n print(self.word_tensor.shape)\n for i, path in enumerate(tensor_files[0]):\n np.save(path, self.word_tensor[i])\n\n self.character_tensor = np.array(\n [[list(map(self.encode_characters, line)) for line in target] for target in data_words])\n for i, path in enumerate(tensor_files[1]):\n np.save(path, self.character_tensor[i])\n\n self.just_words = [word for line in self.word_tensor[0] for word in line]\n\n def load_preprocessed(self, data_files, idx_files, tensor_files):\n\n data = [open(file, \"r\", encoding='utf-8').read() for file in data_files]\n data_words = [[line.split() for line in target.split('\\n')] for target in data]\n self.max_seq_len = np.amax([len(line) for target in data_words for line in target])\n self.num_lines = [len(target) for target in data_words]\n\n [self.idx_to_word, self.idx_to_char] = [cPickle.load(open(file, \"rb\")) for file in idx_files]\n\n [self.words_vocab_size, self.chars_vocab_size] = [len(idx) for idx in [self.idx_to_word, self.idx_to_char]]\n\n [self.word_to_idx, self.char_to_idx] = [dict(zip(idx, range(len(idx)))) for idx in\n [self.idx_to_word, self.idx_to_char]]\n\n self.max_word_len = np.amax([len(word) for word in self.idx_to_word])\n\n [self.word_tensor, self.character_tensor] = [\n np.array([np.load(target, allow_pickle=True) for target in input_type])\n for input_type in tensor_files]\n\n self.just_words = [word for line in self.word_tensor[0] for word in line]\n\n def next_batch(self, batch_size, target_str, start_index):\n target = 0\n\n indexes = np.array(range(start_index, start_index + batch_size))\n\n encoder_word_input = [self.word_tensor[target][index] for index in indexes]\n\n encoder_character_input = [self.character_tensor[target][index] for index in indexes]\n input_seq_len = [len(line) for line in encoder_word_input]\n max_input_seq_len = np.amax(input_seq_len)\n\n encoded_words = [[idx for idx in line] for line in encoder_word_input]\n decoder_word_input = [[self.word_to_idx[self.go_token]] + line for line in encoder_word_input]\n decoder_character_input = [[self.encode_characters(self.go_token)] + line for line in encoder_character_input]\n decoder_output = [line + [self.word_to_idx[self.end_token]] for line in encoded_words]\n\n for i, line in enumerate(decoder_word_input):\n line_len = input_seq_len[i]\n to_add = max_input_seq_len - line_len\n decoder_word_input[i] = line + [self.word_to_idx[self.pad_token]] * to_add\n\n for i, line in enumerate(decoder_character_input):\n line_len = input_seq_len[i]\n to_add = max_input_seq_len - line_len\n decoder_character_input[i] = line + [self.encode_characters(self.pad_token)] * to_add\n\n for i, line in enumerate(decoder_output):\n line_len = input_seq_len[i]\n to_add = max_input_seq_len - line_len\n decoder_output[i] = line + [self.word_to_idx[self.pad_token]] * to_add\n\n for i, line in enumerate(encoder_word_input):\n line_len = input_seq_len[i]\n to_add = max_input_seq_len - line_len\n encoder_word_input[i] = [self.word_to_idx[self.pad_token]] * to_add + line[::-1]\n\n for i, line in enumerate(encoder_character_input):\n line_len = input_seq_len[i]\n to_add = max_input_seq_len - line_len\n encoder_character_input[i] = [self.encode_characters(self.pad_token)] * to_add + line[::-1]\n\n return np.array(encoder_word_input), np.array(encoder_character_input), \\\n np.array(decoder_word_input), np.array(decoder_character_input), np.array(decoder_output)\n\n def next_embedding_seq(self, seq_len):\n\n words_len = len(self.just_words)\n seq = [self.just_words[i % words_len]\n for i in np.arange(self.word_embedding_index, self.word_embedding_index + seq_len)]\n\n result = []\n for i in range(seq_len - 2):\n result.append([seq[i + 1], seq[i]])\n result.append([seq[i + 1], seq[i + 2]])\n\n self.word_embedding_index = (self.word_embedding_index + seq_len) % words_len - 2\n\n result = np.array(result)\n return result[:, 0], result[:, 1]\n\n def go_input(self, batch_size):\n go_word_input = [[self.word_to_idx[self.go_token]] for _ in range(batch_size)]\n go_character_input = [[self.encode_characters(self.go_token)] for _ in range(batch_size)]\n\n return np.array(go_word_input), np.array(go_character_input)\n\n def encode_word(self, idx):\n result = np.zeros(self.words_vocab_size)\n result[idx] = 1\n return result\n\n def decode_word(self, word_idx):\n word = self.idx_to_word[word_idx]\n return word\n\n def sample_word_from_distribution(self, distribution):\n ix = np.random.choice(range(self.words_vocab_size), p=distribution.ravel())\n x = np.zeros((self.words_vocab_size, 1))\n x[ix] = 1\n return self.idx_to_word[np.argmax(x)]\n\n def encode_characters(self, characters):\n word_len = len(characters)\n to_add = self.max_word_len - word_len\n characters_idx = [self.char_to_idx[i] for i in characters] + to_add * [self.char_to_idx['']]\n return characters_idx\n\n def decode_characters(self, characters_idx):\n characters = [self.idx_to_char[i] for i in characters_idx]\n return ''.join(characters)\n"
] |
[
[
"numpy.array",
"numpy.zeros",
"numpy.load",
"numpy.save",
"numpy.amax",
"numpy.arange",
"numpy.argmax"
]
] |
jeremiedbb/scipy
|
[
"2bea64c334b18fd445a7945b350d7ace2dc22913",
"2bea64c334b18fd445a7945b350d7ace2dc22913"
] |
[
"scipy/spatial/kdtree.py",
"scipy/io/wavfile.py"
] |
[
"# Copyright Anne M. Archibald 2008\n# Released under the scipy license\nfrom __future__ import division, print_function, absolute_import\n\nimport sys\nimport numpy as np\nfrom heapq import heappush, heappop\nimport scipy.sparse\n\n__all__ = ['minkowski_distance_p', 'minkowski_distance',\n 'distance_matrix',\n 'Rectangle', 'KDTree']\n\n\ndef minkowski_distance_p(x, y, p=2):\n \"\"\"\n Compute the pth power of the L**p distance between two arrays.\n\n For efficiency, this function computes the L**p distance but does\n not extract the pth root. If `p` is 1 or infinity, this is equal to\n the actual L**p distance.\n\n Parameters\n ----------\n x : (M, K) array_like\n Input array.\n y : (N, K) array_like\n Input array.\n p : float, 1 <= p <= infinity\n Which Minkowski p-norm to use.\n\n Examples\n --------\n >>> from scipy.spatial import minkowski_distance_p\n >>> minkowski_distance_p([[0,0],[0,0]], [[1,1],[0,1]])\n array([2, 1])\n\n \"\"\"\n x = np.asarray(x)\n y = np.asarray(y)\n\n # Find smallest common datatype with float64 (return type of this function) - addresses #10262.\n # Don't just cast to float64 for complex input case.\n common_datatype = np.promote_types(np.promote_types(x.dtype, y.dtype), 'float64')\n\n # Make sure x and y are NumPy arrays of correct datatype.\n x = x.astype(common_datatype)\n y = y.astype(common_datatype)\n\n if p == np.inf:\n return np.amax(np.abs(y-x), axis=-1)\n elif p == 1:\n return np.sum(np.abs(y-x), axis=-1)\n else:\n return np.sum(np.abs(y-x)**p, axis=-1)\n\n\ndef minkowski_distance(x, y, p=2):\n \"\"\"\n Compute the L**p distance between two arrays.\n\n Parameters\n ----------\n x : (M, K) array_like\n Input array.\n y : (N, K) array_like\n Input array.\n p : float, 1 <= p <= infinity\n Which Minkowski p-norm to use.\n\n Examples\n --------\n >>> from scipy.spatial import minkowski_distance\n >>> minkowski_distance([[0,0],[0,0]], [[1,1],[0,1]])\n array([ 1.41421356, 1. ])\n\n \"\"\"\n x = np.asarray(x)\n y = np.asarray(y)\n if p == np.inf or p == 1:\n return minkowski_distance_p(x, y, p)\n else:\n return minkowski_distance_p(x, y, p)**(1./p)\n\n\nclass Rectangle(object):\n \"\"\"Hyperrectangle class.\n\n Represents a Cartesian product of intervals.\n \"\"\"\n def __init__(self, maxes, mins):\n \"\"\"Construct a hyperrectangle.\"\"\"\n self.maxes = np.maximum(maxes,mins).astype(float)\n self.mins = np.minimum(maxes,mins).astype(float)\n self.m, = self.maxes.shape\n\n def __repr__(self):\n return \"<Rectangle %s>\" % list(zip(self.mins, self.maxes))\n\n def volume(self):\n \"\"\"Total volume.\"\"\"\n return np.prod(self.maxes-self.mins)\n\n def split(self, d, split):\n \"\"\"\n Produce two hyperrectangles by splitting.\n\n In general, if you need to compute maximum and minimum\n distances to the children, it can be done more efficiently\n by updating the maximum and minimum distances to the parent.\n\n Parameters\n ----------\n d : int\n Axis to split hyperrectangle along.\n split : float\n Position along axis `d` to split at.\n\n \"\"\"\n mid = np.copy(self.maxes)\n mid[d] = split\n less = Rectangle(self.mins, mid)\n mid = np.copy(self.mins)\n mid[d] = split\n greater = Rectangle(mid, self.maxes)\n return less, greater\n\n def min_distance_point(self, x, p=2.):\n \"\"\"\n Return the minimum distance between input and points in the hyperrectangle.\n\n Parameters\n ----------\n x : array_like\n Input.\n p : float, optional\n Input.\n\n \"\"\"\n return minkowski_distance(0, np.maximum(0,np.maximum(self.mins-x,x-self.maxes)),p)\n\n def max_distance_point(self, x, p=2.):\n \"\"\"\n Return the maximum distance between input and points in the hyperrectangle.\n\n Parameters\n ----------\n x : array_like\n Input array.\n p : float, optional\n Input.\n\n \"\"\"\n return minkowski_distance(0, np.maximum(self.maxes-x,x-self.mins),p)\n\n def min_distance_rectangle(self, other, p=2.):\n \"\"\"\n Compute the minimum distance between points in the two hyperrectangles.\n\n Parameters\n ----------\n other : hyperrectangle\n Input.\n p : float\n Input.\n\n \"\"\"\n return minkowski_distance(0, np.maximum(0,np.maximum(self.mins-other.maxes,other.mins-self.maxes)),p)\n\n def max_distance_rectangle(self, other, p=2.):\n \"\"\"\n Compute the maximum distance between points in the two hyperrectangles.\n\n Parameters\n ----------\n other : hyperrectangle\n Input.\n p : float, optional\n Input.\n\n \"\"\"\n return minkowski_distance(0, np.maximum(self.maxes-other.mins,other.maxes-self.mins),p)\n\n\nclass KDTree(object):\n \"\"\"\n kd-tree for quick nearest-neighbor lookup\n\n This class provides an index into a set of k-D points which\n can be used to rapidly look up the nearest neighbors of any point.\n\n Parameters\n ----------\n data : (N,K) array_like\n The data points to be indexed. This array is not copied, and\n so modifying this data will result in bogus results.\n leafsize : int, optional\n The number of points at which the algorithm switches over to\n brute-force. Has to be positive.\n\n Raises\n ------\n RuntimeError\n The maximum recursion limit can be exceeded for large data\n sets. If this happens, either increase the value for the `leafsize`\n parameter or increase the recursion limit by::\n\n >>> import sys\n >>> sys.setrecursionlimit(10000)\n\n See Also\n --------\n cKDTree : Implementation of `KDTree` in Cython\n\n Notes\n -----\n The algorithm used is described in Maneewongvatana and Mount 1999.\n The general idea is that the kd-tree is a binary tree, each of whose\n nodes represents an axis-aligned hyperrectangle. Each node specifies\n an axis and splits the set of points based on whether their coordinate\n along that axis is greater than or less than a particular value.\n\n During construction, the axis and splitting point are chosen by the\n \"sliding midpoint\" rule, which ensures that the cells do not all\n become long and thin.\n\n The tree can be queried for the r closest neighbors of any given point\n (optionally returning only those within some maximum distance of the\n point). It can also be queried, with a substantial gain in efficiency,\n for the r approximate closest neighbors.\n\n For large dimensions (20 is already large) do not expect this to run\n significantly faster than brute force. High-dimensional nearest-neighbor\n queries are a substantial open problem in computer science.\n\n The tree also supports all-neighbors queries, both with arrays of points\n and with other kd-trees. These do use a reasonably efficient algorithm,\n but the kd-tree is not necessarily the best data structure for this\n sort of calculation.\n\n \"\"\"\n def __init__(self, data, leafsize=10):\n self.data = np.asarray(data)\n self.n, self.m = np.shape(self.data)\n self.leafsize = int(leafsize)\n if self.leafsize < 1:\n raise ValueError(\"leafsize must be at least 1\")\n self.maxes = np.amax(self.data,axis=0)\n self.mins = np.amin(self.data,axis=0)\n\n self.tree = self.__build(np.arange(self.n), self.maxes, self.mins)\n\n class node(object):\n if sys.version_info[0] >= 3:\n def __lt__(self, other):\n return id(self) < id(other)\n\n def __gt__(self, other):\n return id(self) > id(other)\n\n def __le__(self, other):\n return id(self) <= id(other)\n\n def __ge__(self, other):\n return id(self) >= id(other)\n\n def __eq__(self, other):\n return id(self) == id(other)\n\n class leafnode(node):\n def __init__(self, idx):\n self.idx = idx\n self.children = len(idx)\n\n class innernode(node):\n def __init__(self, split_dim, split, less, greater):\n self.split_dim = split_dim\n self.split = split\n self.less = less\n self.greater = greater\n self.children = less.children+greater.children\n\n def __build(self, idx, maxes, mins):\n if len(idx) <= self.leafsize:\n return KDTree.leafnode(idx)\n else:\n data = self.data[idx]\n # maxes = np.amax(data,axis=0)\n # mins = np.amin(data,axis=0)\n d = np.argmax(maxes-mins)\n maxval = maxes[d]\n minval = mins[d]\n if maxval == minval:\n # all points are identical; warn user?\n return KDTree.leafnode(idx)\n data = data[:,d]\n\n # sliding midpoint rule; see Maneewongvatana and Mount 1999\n # for arguments that this is a good idea.\n split = (maxval+minval)/2\n less_idx = np.nonzero(data <= split)[0]\n greater_idx = np.nonzero(data > split)[0]\n if len(less_idx) == 0:\n split = np.amin(data)\n less_idx = np.nonzero(data <= split)[0]\n greater_idx = np.nonzero(data > split)[0]\n if len(greater_idx) == 0:\n split = np.amax(data)\n less_idx = np.nonzero(data < split)[0]\n greater_idx = np.nonzero(data >= split)[0]\n if len(less_idx) == 0:\n # _still_ zero? all must have the same value\n if not np.all(data == data[0]):\n raise ValueError(\"Troublesome data array: %s\" % data)\n split = data[0]\n less_idx = np.arange(len(data)-1)\n greater_idx = np.array([len(data)-1])\n\n lessmaxes = np.copy(maxes)\n lessmaxes[d] = split\n greatermins = np.copy(mins)\n greatermins[d] = split\n return KDTree.innernode(d, split,\n self.__build(idx[less_idx],lessmaxes,mins),\n self.__build(idx[greater_idx],maxes,greatermins))\n\n def __query(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf):\n\n side_distances = np.maximum(0,np.maximum(x-self.maxes,self.mins-x))\n if p != np.inf:\n side_distances **= p\n min_distance = np.sum(side_distances)\n else:\n min_distance = np.amax(side_distances)\n\n # priority queue for chasing nodes\n # entries are:\n # minimum distance between the cell and the target\n # distances between the nearest side of the cell and the target\n # the head node of the cell\n q = [(min_distance,\n tuple(side_distances),\n self.tree)]\n # priority queue for the nearest neighbors\n # furthest known neighbor first\n # entries are (-distance**p, i)\n neighbors = []\n\n if eps == 0:\n epsfac = 1\n elif p == np.inf:\n epsfac = 1/(1+eps)\n else:\n epsfac = 1/(1+eps)**p\n\n if p != np.inf and distance_upper_bound != np.inf:\n distance_upper_bound = distance_upper_bound**p\n\n while q:\n min_distance, side_distances, node = heappop(q)\n if isinstance(node, KDTree.leafnode):\n # brute-force\n data = self.data[node.idx]\n ds = minkowski_distance_p(data,x[np.newaxis,:],p)\n for i in range(len(ds)):\n if ds[i] < distance_upper_bound:\n if len(neighbors) == k:\n heappop(neighbors)\n heappush(neighbors, (-ds[i], node.idx[i]))\n if len(neighbors) == k:\n distance_upper_bound = -neighbors[0][0]\n else:\n # we don't push cells that are too far onto the queue at all,\n # but since the distance_upper_bound decreases, we might get\n # here even if the cell's too far\n if min_distance > distance_upper_bound*epsfac:\n # since this is the nearest cell, we're done, bail out\n break\n # compute minimum distances to the children and push them on\n if x[node.split_dim] < node.split:\n near, far = node.less, node.greater\n else:\n near, far = node.greater, node.less\n\n # near child is at the same distance as the current node\n heappush(q,(min_distance, side_distances, near))\n\n # far child is further by an amount depending only\n # on the split value\n sd = list(side_distances)\n if p == np.inf:\n min_distance = max(min_distance, abs(node.split-x[node.split_dim]))\n elif p == 1:\n sd[node.split_dim] = np.abs(node.split-x[node.split_dim])\n min_distance = min_distance - side_distances[node.split_dim] + sd[node.split_dim]\n else:\n sd[node.split_dim] = np.abs(node.split-x[node.split_dim])**p\n min_distance = min_distance - side_distances[node.split_dim] + sd[node.split_dim]\n\n # far child might be too far, if so, don't bother pushing it\n if min_distance <= distance_upper_bound*epsfac:\n heappush(q,(min_distance, tuple(sd), far))\n\n if p == np.inf:\n return sorted([(-d,i) for (d,i) in neighbors])\n else:\n return sorted([((-d)**(1./p),i) for (d,i) in neighbors])\n\n def query(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf):\n \"\"\"\n Query the kd-tree for nearest neighbors\n\n Parameters\n ----------\n x : array_like, last dimension self.m\n An array of points to query.\n k : int, optional\n The number of nearest neighbors to return.\n eps : nonnegative float, optional\n Return approximate nearest neighbors; the kth returned value\n is guaranteed to be no further than (1+eps) times the\n distance to the real kth nearest neighbor.\n p : float, 1<=p<=infinity, optional\n Which Minkowski p-norm to use.\n 1 is the sum-of-absolute-values \"Manhattan\" distance\n 2 is the usual Euclidean distance\n infinity is the maximum-coordinate-difference distance\n distance_upper_bound : nonnegative float, optional\n Return only neighbors within this distance. This is used to prune\n tree searches, so if you are doing a series of nearest-neighbor\n queries, it may help to supply the distance to the nearest neighbor\n of the most recent point.\n\n Returns\n -------\n d : float or array of floats\n The distances to the nearest neighbors.\n If x has shape tuple+(self.m,), then d has shape tuple if\n k is one, or tuple+(k,) if k is larger than one. Missing\n neighbors (e.g. when k > n or distance_upper_bound is\n given) are indicated with infinite distances. If k is None,\n then d is an object array of shape tuple, containing lists\n of distances. In either case the hits are sorted by distance\n (nearest first).\n i : integer or array of integers\n The locations of the neighbors in self.data. i is the same\n shape as d.\n\n Examples\n --------\n >>> from scipy import spatial\n >>> x, y = np.mgrid[0:5, 2:8]\n >>> tree = spatial.KDTree(list(zip(x.ravel(), y.ravel())))\n >>> tree.data\n array([[0, 2],\n [0, 3],\n [0, 4],\n [0, 5],\n [0, 6],\n [0, 7],\n [1, 2],\n [1, 3],\n [1, 4],\n [1, 5],\n [1, 6],\n [1, 7],\n [2, 2],\n [2, 3],\n [2, 4],\n [2, 5],\n [2, 6],\n [2, 7],\n [3, 2],\n [3, 3],\n [3, 4],\n [3, 5],\n [3, 6],\n [3, 7],\n [4, 2],\n [4, 3],\n [4, 4],\n [4, 5],\n [4, 6],\n [4, 7]])\n >>> pts = np.array([[0, 0], [2.1, 2.9]])\n >>> tree.query(pts)\n (array([ 2. , 0.14142136]), array([ 0, 13]))\n >>> tree.query(pts[0])\n (2.0, 0)\n\n \"\"\"\n x = np.asarray(x)\n if np.shape(x)[-1] != self.m:\n raise ValueError(\"x must consist of vectors of length %d but has shape %s\" % (self.m, np.shape(x)))\n if p < 1:\n raise ValueError(\"Only p-norms with 1<=p<=infinity permitted\")\n retshape = np.shape(x)[:-1]\n if retshape != ():\n if k is None:\n dd = np.empty(retshape,dtype=object)\n ii = np.empty(retshape,dtype=object)\n elif k > 1:\n dd = np.empty(retshape+(k,),dtype=float)\n dd.fill(np.inf)\n ii = np.empty(retshape+(k,),dtype=int)\n ii.fill(self.n)\n elif k == 1:\n dd = np.empty(retshape,dtype=float)\n dd.fill(np.inf)\n ii = np.empty(retshape,dtype=int)\n ii.fill(self.n)\n else:\n raise ValueError(\"Requested %s nearest neighbors; acceptable numbers are integers greater than or equal to one, or None\")\n for c in np.ndindex(retshape):\n hits = self.__query(x[c], k=k, eps=eps, p=p, distance_upper_bound=distance_upper_bound)\n if k is None:\n dd[c] = [d for (d,i) in hits]\n ii[c] = [i for (d,i) in hits]\n elif k > 1:\n for j in range(len(hits)):\n dd[c+(j,)], ii[c+(j,)] = hits[j]\n elif k == 1:\n if len(hits) > 0:\n dd[c], ii[c] = hits[0]\n else:\n dd[c] = np.inf\n ii[c] = self.n\n return dd, ii\n else:\n hits = self.__query(x, k=k, eps=eps, p=p, distance_upper_bound=distance_upper_bound)\n if k is None:\n return [d for (d,i) in hits], [i for (d,i) in hits]\n elif k == 1:\n if len(hits) > 0:\n return hits[0]\n else:\n return np.inf, self.n\n elif k > 1:\n dd = np.empty(k,dtype=float)\n dd.fill(np.inf)\n ii = np.empty(k,dtype=int)\n ii.fill(self.n)\n for j in range(len(hits)):\n dd[j], ii[j] = hits[j]\n return dd, ii\n else:\n raise ValueError(\"Requested %s nearest neighbors; acceptable numbers are integers greater than or equal to one, or None\")\n\n def __query_ball_point(self, x, r, p=2., eps=0):\n R = Rectangle(self.maxes, self.mins)\n\n def traverse_checking(node, rect):\n if rect.min_distance_point(x, p) > r / (1. + eps):\n return []\n elif rect.max_distance_point(x, p) < r * (1. + eps):\n return traverse_no_checking(node)\n elif isinstance(node, KDTree.leafnode):\n d = self.data[node.idx]\n return node.idx[minkowski_distance(d, x, p) <= r].tolist()\n else:\n less, greater = rect.split(node.split_dim, node.split)\n return traverse_checking(node.less, less) + \\\n traverse_checking(node.greater, greater)\n\n def traverse_no_checking(node):\n if isinstance(node, KDTree.leafnode):\n return node.idx.tolist()\n else:\n return traverse_no_checking(node.less) + \\\n traverse_no_checking(node.greater)\n\n return traverse_checking(self.tree, R)\n\n def query_ball_point(self, x, r, p=2., eps=0):\n \"\"\"Find all points within distance r of point(s) x.\n\n Parameters\n ----------\n x : array_like, shape tuple + (self.m,)\n The point or points to search for neighbors of.\n r : positive float\n The radius of points to return.\n p : float, optional\n Which Minkowski p-norm to use. Should be in the range [1, inf].\n eps : nonnegative float, optional\n Approximate search. Branches of the tree are not explored if their\n nearest points are further than ``r / (1 + eps)``, and branches are\n added in bulk if their furthest points are nearer than\n ``r * (1 + eps)``.\n\n Returns\n -------\n results : list or array of lists\n If `x` is a single point, returns a list of the indices of the\n neighbors of `x`. If `x` is an array of points, returns an object\n array of shape tuple containing lists of neighbors.\n\n Notes\n -----\n If you have many points whose neighbors you want to find, you may save\n substantial amounts of time by putting them in a KDTree and using\n query_ball_tree.\n\n Examples\n --------\n >>> from scipy import spatial\n >>> x, y = np.mgrid[0:5, 0:5]\n >>> points = np.c_[x.ravel(), y.ravel()]\n >>> tree = spatial.KDTree(points)\n >>> tree.query_ball_point([2, 0], 1)\n [5, 10, 11, 15]\n\n Query multiple points and plot the results:\n\n >>> import matplotlib.pyplot as plt\n >>> points = np.asarray(points)\n >>> plt.plot(points[:,0], points[:,1], '.')\n >>> for results in tree.query_ball_point(([2, 0], [3, 3]), 1):\n ... nearby_points = points[results]\n ... plt.plot(nearby_points[:,0], nearby_points[:,1], 'o')\n >>> plt.margins(0.1, 0.1)\n >>> plt.show()\n\n \"\"\"\n x = np.asarray(x)\n if x.shape[-1] != self.m:\n raise ValueError(\"Searching for a %d-dimensional point in a \"\n \"%d-dimensional KDTree\" % (x.shape[-1], self.m))\n if len(x.shape) == 1:\n return self.__query_ball_point(x, r, p, eps)\n else:\n retshape = x.shape[:-1]\n result = np.empty(retshape, dtype=object)\n for c in np.ndindex(retshape):\n result[c] = self.__query_ball_point(x[c], r, p=p, eps=eps)\n return result\n\n def query_ball_tree(self, other, r, p=2., eps=0):\n \"\"\"Find all pairs of points whose distance is at most r\n\n Parameters\n ----------\n other : KDTree instance\n The tree containing points to search against.\n r : float\n The maximum distance, has to be positive.\n p : float, optional\n Which Minkowski norm to use. `p` has to meet the condition\n ``1 <= p <= infinity``.\n eps : float, optional\n Approximate search. Branches of the tree are not explored\n if their nearest points are further than ``r/(1+eps)``, and\n branches are added in bulk if their furthest points are nearer\n than ``r * (1+eps)``. `eps` has to be non-negative.\n\n Returns\n -------\n results : list of lists\n For each element ``self.data[i]`` of this tree, ``results[i]`` is a\n list of the indices of its neighbors in ``other.data``.\n\n \"\"\"\n results = [[] for i in range(self.n)]\n\n def traverse_checking(node1, rect1, node2, rect2):\n if rect1.min_distance_rectangle(rect2, p) > r/(1.+eps):\n return\n elif rect1.max_distance_rectangle(rect2, p) < r*(1.+eps):\n traverse_no_checking(node1, node2)\n elif isinstance(node1, KDTree.leafnode):\n if isinstance(node2, KDTree.leafnode):\n d = other.data[node2.idx]\n for i in node1.idx:\n results[i] += node2.idx[minkowski_distance(d,self.data[i],p) <= r].tolist()\n else:\n less, greater = rect2.split(node2.split_dim, node2.split)\n traverse_checking(node1,rect1,node2.less,less)\n traverse_checking(node1,rect1,node2.greater,greater)\n elif isinstance(node2, KDTree.leafnode):\n less, greater = rect1.split(node1.split_dim, node1.split)\n traverse_checking(node1.less,less,node2,rect2)\n traverse_checking(node1.greater,greater,node2,rect2)\n else:\n less1, greater1 = rect1.split(node1.split_dim, node1.split)\n less2, greater2 = rect2.split(node2.split_dim, node2.split)\n traverse_checking(node1.less,less1,node2.less,less2)\n traverse_checking(node1.less,less1,node2.greater,greater2)\n traverse_checking(node1.greater,greater1,node2.less,less2)\n traverse_checking(node1.greater,greater1,node2.greater,greater2)\n\n def traverse_no_checking(node1, node2):\n if isinstance(node1, KDTree.leafnode):\n if isinstance(node2, KDTree.leafnode):\n for i in node1.idx:\n results[i] += node2.idx.tolist()\n else:\n traverse_no_checking(node1, node2.less)\n traverse_no_checking(node1, node2.greater)\n else:\n traverse_no_checking(node1.less, node2)\n traverse_no_checking(node1.greater, node2)\n\n traverse_checking(self.tree, Rectangle(self.maxes, self.mins),\n other.tree, Rectangle(other.maxes, other.mins))\n return results\n\n def query_pairs(self, r, p=2., eps=0):\n \"\"\"\n Find all pairs of points within a distance.\n\n Parameters\n ----------\n r : positive float\n The maximum distance.\n p : float, optional\n Which Minkowski norm to use. `p` has to meet the condition\n ``1 <= p <= infinity``.\n eps : float, optional\n Approximate search. Branches of the tree are not explored\n if their nearest points are further than ``r/(1+eps)``, and\n branches are added in bulk if their furthest points are nearer\n than ``r * (1+eps)``. `eps` has to be non-negative.\n\n Returns\n -------\n results : set\n Set of pairs ``(i,j)``, with ``i < j``, for which the corresponding\n positions are close.\n\n \"\"\"\n results = set()\n\n def traverse_checking(node1, rect1, node2, rect2):\n if rect1.min_distance_rectangle(rect2, p) > r/(1.+eps):\n return\n elif rect1.max_distance_rectangle(rect2, p) < r*(1.+eps):\n traverse_no_checking(node1, node2)\n elif isinstance(node1, KDTree.leafnode):\n if isinstance(node2, KDTree.leafnode):\n # Special care to avoid duplicate pairs\n if id(node1) == id(node2):\n d = self.data[node2.idx]\n for i in node1.idx:\n for j in node2.idx[minkowski_distance(d,self.data[i],p) <= r]:\n if i < j:\n results.add((i,j))\n else:\n d = self.data[node2.idx]\n for i in node1.idx:\n for j in node2.idx[minkowski_distance(d,self.data[i],p) <= r]:\n if i < j:\n results.add((i,j))\n elif j < i:\n results.add((j,i))\n else:\n less, greater = rect2.split(node2.split_dim, node2.split)\n traverse_checking(node1,rect1,node2.less,less)\n traverse_checking(node1,rect1,node2.greater,greater)\n elif isinstance(node2, KDTree.leafnode):\n less, greater = rect1.split(node1.split_dim, node1.split)\n traverse_checking(node1.less,less,node2,rect2)\n traverse_checking(node1.greater,greater,node2,rect2)\n else:\n less1, greater1 = rect1.split(node1.split_dim, node1.split)\n less2, greater2 = rect2.split(node2.split_dim, node2.split)\n traverse_checking(node1.less,less1,node2.less,less2)\n traverse_checking(node1.less,less1,node2.greater,greater2)\n\n # Avoid traversing (node1.less, node2.greater) and\n # (node1.greater, node2.less) (it's the same node pair twice\n # over, which is the source of the complication in the\n # original KDTree.query_pairs)\n if id(node1) != id(node2):\n traverse_checking(node1.greater,greater1,node2.less,less2)\n\n traverse_checking(node1.greater,greater1,node2.greater,greater2)\n\n def traverse_no_checking(node1, node2):\n if isinstance(node1, KDTree.leafnode):\n if isinstance(node2, KDTree.leafnode):\n # Special care to avoid duplicate pairs\n if id(node1) == id(node2):\n for i in node1.idx:\n for j in node2.idx:\n if i < j:\n results.add((i,j))\n else:\n for i in node1.idx:\n for j in node2.idx:\n if i < j:\n results.add((i,j))\n elif j < i:\n results.add((j,i))\n else:\n traverse_no_checking(node1, node2.less)\n traverse_no_checking(node1, node2.greater)\n else:\n # Avoid traversing (node1.less, node2.greater) and\n # (node1.greater, node2.less) (it's the same node pair twice\n # over, which is the source of the complication in the\n # original KDTree.query_pairs)\n if id(node1) == id(node2):\n traverse_no_checking(node1.less, node2.less)\n traverse_no_checking(node1.less, node2.greater)\n traverse_no_checking(node1.greater, node2.greater)\n else:\n traverse_no_checking(node1.less, node2)\n traverse_no_checking(node1.greater, node2)\n\n traverse_checking(self.tree, Rectangle(self.maxes, self.mins),\n self.tree, Rectangle(self.maxes, self.mins))\n return results\n\n def count_neighbors(self, other, r, p=2.):\n \"\"\"\n Count how many nearby pairs can be formed.\n\n Count the number of pairs (x1,x2) can be formed, with x1 drawn\n from self and x2 drawn from ``other``, and where\n ``distance(x1, x2, p) <= r``.\n This is the \"two-point correlation\" described in Gray and Moore 2000,\n \"N-body problems in statistical learning\", and the code here is based\n on their algorithm.\n\n Parameters\n ----------\n other : KDTree instance\n The other tree to draw points from.\n r : float or one-dimensional array of floats\n The radius to produce a count for. Multiple radii are searched with\n a single tree traversal.\n p : float, 1<=p<=infinity, optional\n Which Minkowski p-norm to use\n\n Returns\n -------\n result : int or 1-D array of ints\n The number of pairs. Note that this is internally stored in a numpy\n int, and so may overflow if very large (2e9).\n\n \"\"\"\n def traverse(node1, rect1, node2, rect2, idx):\n min_r = rect1.min_distance_rectangle(rect2,p)\n max_r = rect1.max_distance_rectangle(rect2,p)\n c_greater = r[idx] > max_r\n result[idx[c_greater]] += node1.children*node2.children\n idx = idx[(min_r <= r[idx]) & (r[idx] <= max_r)]\n if len(idx) == 0:\n return\n\n if isinstance(node1,KDTree.leafnode):\n if isinstance(node2,KDTree.leafnode):\n ds = minkowski_distance(self.data[node1.idx][:,np.newaxis,:],\n other.data[node2.idx][np.newaxis,:,:],\n p).ravel()\n ds.sort()\n result[idx] += np.searchsorted(ds,r[idx],side='right')\n else:\n less, greater = rect2.split(node2.split_dim, node2.split)\n traverse(node1, rect1, node2.less, less, idx)\n traverse(node1, rect1, node2.greater, greater, idx)\n else:\n if isinstance(node2,KDTree.leafnode):\n less, greater = rect1.split(node1.split_dim, node1.split)\n traverse(node1.less, less, node2, rect2, idx)\n traverse(node1.greater, greater, node2, rect2, idx)\n else:\n less1, greater1 = rect1.split(node1.split_dim, node1.split)\n less2, greater2 = rect2.split(node2.split_dim, node2.split)\n traverse(node1.less,less1,node2.less,less2,idx)\n traverse(node1.less,less1,node2.greater,greater2,idx)\n traverse(node1.greater,greater1,node2.less,less2,idx)\n traverse(node1.greater,greater1,node2.greater,greater2,idx)\n\n R1 = Rectangle(self.maxes, self.mins)\n R2 = Rectangle(other.maxes, other.mins)\n if np.shape(r) == ():\n r = np.array([r])\n result = np.zeros(1,dtype=int)\n traverse(self.tree, R1, other.tree, R2, np.arange(1))\n return result[0]\n elif len(np.shape(r)) == 1:\n r = np.asarray(r)\n n, = r.shape\n result = np.zeros(n,dtype=int)\n traverse(self.tree, R1, other.tree, R2, np.arange(n))\n return result\n else:\n raise ValueError(\"r must be either a single value or a one-dimensional array of values\")\n\n def sparse_distance_matrix(self, other, max_distance, p=2.):\n \"\"\"\n Compute a sparse distance matrix\n\n Computes a distance matrix between two KDTrees, leaving as zero\n any distance greater than max_distance.\n\n Parameters\n ----------\n other : KDTree\n\n max_distance : positive float\n\n p : float, optional\n\n Returns\n -------\n result : dok_matrix\n Sparse matrix representing the results in \"dictionary of keys\" format.\n\n \"\"\"\n result = scipy.sparse.dok_matrix((self.n,other.n))\n\n def traverse(node1, rect1, node2, rect2):\n if rect1.min_distance_rectangle(rect2, p) > max_distance:\n return\n elif isinstance(node1, KDTree.leafnode):\n if isinstance(node2, KDTree.leafnode):\n for i in node1.idx:\n for j in node2.idx:\n d = minkowski_distance(self.data[i],other.data[j],p)\n if d <= max_distance:\n result[i,j] = d\n else:\n less, greater = rect2.split(node2.split_dim, node2.split)\n traverse(node1,rect1,node2.less,less)\n traverse(node1,rect1,node2.greater,greater)\n elif isinstance(node2, KDTree.leafnode):\n less, greater = rect1.split(node1.split_dim, node1.split)\n traverse(node1.less,less,node2,rect2)\n traverse(node1.greater,greater,node2,rect2)\n else:\n less1, greater1 = rect1.split(node1.split_dim, node1.split)\n less2, greater2 = rect2.split(node2.split_dim, node2.split)\n traverse(node1.less,less1,node2.less,less2)\n traverse(node1.less,less1,node2.greater,greater2)\n traverse(node1.greater,greater1,node2.less,less2)\n traverse(node1.greater,greater1,node2.greater,greater2)\n traverse(self.tree, Rectangle(self.maxes, self.mins),\n other.tree, Rectangle(other.maxes, other.mins))\n\n return result\n\n\ndef distance_matrix(x, y, p=2, threshold=1000000):\n \"\"\"\n Compute the distance matrix.\n\n Returns the matrix of all pair-wise distances.\n\n Parameters\n ----------\n x : (M, K) array_like\n Matrix of M vectors in K dimensions.\n y : (N, K) array_like\n Matrix of N vectors in K dimensions.\n p : float, 1 <= p <= infinity\n Which Minkowski p-norm to use.\n threshold : positive int\n If ``M * N * K`` > `threshold`, algorithm uses a Python loop instead\n of large temporary arrays.\n\n Returns\n -------\n result : (M, N) ndarray\n Matrix containing the distance from every vector in `x` to every vector\n in `y`.\n\n Examples\n --------\n >>> from scipy.spatial import distance_matrix\n >>> distance_matrix([[0,0],[0,1]], [[1,0],[1,1]])\n array([[ 1. , 1.41421356],\n [ 1.41421356, 1. ]])\n\n \"\"\"\n\n x = np.asarray(x)\n m, k = x.shape\n y = np.asarray(y)\n n, kk = y.shape\n\n if k != kk:\n raise ValueError(\"x contains %d-dimensional vectors but y contains %d-dimensional vectors\" % (k, kk))\n\n if m*n*k <= threshold:\n return minkowski_distance(x[:,np.newaxis,:],y[np.newaxis,:,:],p)\n else:\n result = np.empty((m,n),dtype=float) # FIXME: figure out the best dtype\n if m < n:\n for i in range(m):\n result[i,:] = minkowski_distance(x[i],y,p)\n else:\n for j in range(n):\n result[:,j] = minkowski_distance(x,y[j],p)\n return result\n",
"\"\"\"\nModule to read / write wav files using NumPy arrays\n\nFunctions\n---------\n`read`: Return the sample rate (in samples/sec) and data from a WAV file.\n\n`write`: Write a NumPy array as a WAV file.\n\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nimport sys\nimport numpy\nimport struct\nimport warnings\n\n\n__all__ = [\n 'WavFileWarning',\n 'read',\n 'write'\n]\n\n\nclass WavFileWarning(UserWarning):\n pass\n\n\nWAVE_FORMAT_PCM = 0x0001\nWAVE_FORMAT_IEEE_FLOAT = 0x0003\nWAVE_FORMAT_EXTENSIBLE = 0xfffe\nKNOWN_WAVE_FORMATS = (WAVE_FORMAT_PCM, WAVE_FORMAT_IEEE_FLOAT)\n\n# assumes file pointer is immediately\n# after the 'fmt ' id\n\n\ndef _read_fmt_chunk(fid, is_big_endian):\n \"\"\"\n Returns\n -------\n size : int\n size of format subchunk in bytes (minus 8 for \"fmt \" and itself)\n format_tag : int\n PCM, float, or compressed format\n channels : int\n number of channels\n fs : int\n sampling frequency in samples per second\n bytes_per_second : int\n overall byte rate for the file\n block_align : int\n bytes per sample, including all channels\n bit_depth : int\n bits per sample\n \"\"\"\n if is_big_endian:\n fmt = '>'\n else:\n fmt = '<'\n\n size = res = struct.unpack(fmt+'I', fid.read(4))[0]\n bytes_read = 0\n\n if size < 16:\n raise ValueError(\"Binary structure of wave file is not compliant\")\n\n res = struct.unpack(fmt+'HHIIHH', fid.read(16))\n bytes_read += 16\n\n format_tag, channels, fs, bytes_per_second, block_align, bit_depth = res\n\n if format_tag == WAVE_FORMAT_EXTENSIBLE and size >= (16+2):\n ext_chunk_size = struct.unpack(fmt+'H', fid.read(2))[0]\n bytes_read += 2\n if ext_chunk_size >= 22:\n extensible_chunk_data = fid.read(22)\n bytes_read += 22\n raw_guid = extensible_chunk_data[2+4:2+4+16]\n # GUID template {XXXXXXXX-0000-0010-8000-00AA00389B71} (RFC-2361)\n # MS GUID byte order: first three groups are native byte order,\n # rest is Big Endian\n if is_big_endian:\n tail = b'\\x00\\x00\\x00\\x10\\x80\\x00\\x00\\xAA\\x00\\x38\\x9B\\x71'\n else:\n tail = b'\\x00\\x00\\x10\\x00\\x80\\x00\\x00\\xAA\\x00\\x38\\x9B\\x71'\n if raw_guid.endswith(tail):\n format_tag = struct.unpack(fmt+'I', raw_guid[:4])[0]\n else:\n raise ValueError(\"Binary structure of wave file is not compliant\")\n\n if format_tag not in KNOWN_WAVE_FORMATS:\n raise ValueError(\"Unknown wave file format\")\n\n # move file pointer to next chunk\n if size > (bytes_read):\n fid.read(size - bytes_read)\n\n return (size, format_tag, channels, fs, bytes_per_second, block_align,\n bit_depth)\n\n\n# assumes file pointer is immediately after the 'data' id\ndef _read_data_chunk(fid, format_tag, channels, bit_depth, is_big_endian,\n mmap=False):\n if is_big_endian:\n fmt = '>I'\n else:\n fmt = '<I'\n\n # Size of the data subchunk in bytes\n size = struct.unpack(fmt, fid.read(4))[0]\n\n # Number of bytes per sample\n bytes_per_sample = bit_depth//8\n if bit_depth == 8:\n dtype = 'u1'\n else:\n if is_big_endian:\n dtype = '>'\n else:\n dtype = '<'\n if format_tag == WAVE_FORMAT_PCM:\n dtype += 'i%d' % bytes_per_sample\n else:\n dtype += 'f%d' % bytes_per_sample\n if not mmap:\n data = numpy.frombuffer(fid.read(size), dtype=dtype)\n else:\n start = fid.tell()\n data = numpy.memmap(fid, dtype=dtype, mode='c', offset=start,\n shape=(size//bytes_per_sample,))\n fid.seek(start + size)\n\n if channels > 1:\n data = data.reshape(-1, channels)\n return data\n\n\ndef _skip_unknown_chunk(fid, is_big_endian):\n if is_big_endian:\n fmt = '>I'\n else:\n fmt = '<I'\n\n data = fid.read(4)\n # call unpack() and seek() only if we have really read data from file\n # otherwise empty read at the end of the file would trigger\n # unnecessary exception at unpack() call\n # in case data equals somehow to 0, there is no need for seek() anyway\n if data:\n size = struct.unpack(fmt, data)[0]\n fid.seek(size, 1)\n\n\ndef _read_riff_chunk(fid):\n str1 = fid.read(4) # File signature\n if str1 == b'RIFF':\n is_big_endian = False\n fmt = '<I'\n elif str1 == b'RIFX':\n is_big_endian = True\n fmt = '>I'\n else:\n # There are also .wav files with \"FFIR\" or \"XFIR\" signatures?\n raise ValueError(\"File format {}... not \"\n \"understood.\".format(repr(str1)))\n\n # Size of entire file\n file_size = struct.unpack(fmt, fid.read(4))[0] + 8\n\n str2 = fid.read(4)\n if str2 != b'WAVE':\n raise ValueError(\"Not a WAV file.\")\n\n return file_size, is_big_endian\n\n\ndef read(filename, mmap=False):\n \"\"\"\n Open a WAV file\n\n Return the sample rate (in samples/sec) and data from a WAV file.\n\n Parameters\n ----------\n filename : string or open file handle\n Input wav file.\n mmap : bool, optional\n Whether to read data as memory-mapped.\n Only to be used on real files (Default: False).\n\n .. versionadded:: 0.12.0\n\n Returns\n -------\n rate : int\n Sample rate of wav file.\n data : numpy array\n Data read from wav file. Data-type is determined from the file;\n see Notes.\n\n Notes\n -----\n This function cannot read wav files with 24-bit data.\n\n Common data types: [1]_\n\n ===================== =========== =========== =============\n WAV format Min Max NumPy dtype\n ===================== =========== =========== =============\n 32-bit floating-point -1.0 +1.0 float32\n 32-bit PCM -2147483648 +2147483647 int32\n 16-bit PCM -32768 +32767 int16\n 8-bit PCM 0 255 uint8\n ===================== =========== =========== =============\n\n Note that 8-bit PCM is unsigned.\n\n References\n ----------\n .. [1] IBM Corporation and Microsoft Corporation, \"Multimedia Programming\n Interface and Data Specifications 1.0\", section \"Data Format of the\n Samples\", August 1991\n http://www.tactilemedia.com/info/MCI_Control_Info.html\n\n Examples\n --------\n >>> from os.path import dirname, join as pjoin\n >>> import scipy.io as sio\n\n Get the filename for an example .wav file from the tests/data directory.\n\n >>> data_dir = pjoin(dirname(sio.__file__), 'tests', 'data')\n >>> wav_fname = pjoin(data_dir, 'test-44100Hz-2ch-32bit-float-be.wav')\n\n Load the .wav file contents.\n\n >>> samplerate, data = sio.wavfile.read(wav_fname)\n >>> print(f\"number of channels = {data.shape[1]}\")\n number of channels = 2\n >>> length = data.shape[0] / samplerate\n >>> print(f\"length = {length}s\")\n length = 0.01s\n\n Plot the waveform.\n\n >>> import matplotlib.pyplot as plt\n >>> import numpy as np\n >>> time = np.linspace(0., length, data.shape[0])\n >>> plt.plot(time, data[:, 0], label=\"Left channel\")\n >>> plt.plot(time, data[:, 1], label=\"Right channel\")\n >>> plt.legend()\n >>> plt.xlabel(\"Time [s]\")\n >>> plt.ylabel(\"Amplitude\")\n >>> plt.show()\n\n \"\"\"\n if hasattr(filename, 'read'):\n fid = filename\n mmap = False\n else:\n fid = open(filename, 'rb')\n\n try:\n file_size, is_big_endian = _read_riff_chunk(fid)\n fmt_chunk_received = False\n data_chunk_received = False\n channels = 1\n bit_depth = 8\n format_tag = WAVE_FORMAT_PCM\n while fid.tell() < file_size:\n # read the next chunk\n chunk_id = fid.read(4)\n\n if not chunk_id:\n if data_chunk_received:\n # End of file but data successfully read\n warnings.warn(\n \"Reached EOF prematurely; finished at {:d} bytes, \"\n \"expected {:d} bytes from header.\"\n .format(fid.tell(), file_size),\n WavFileWarning, stacklevel=2)\n break\n else:\n raise ValueError(\"Unexpected end of file.\")\n elif len(chunk_id) < 4:\n raise ValueError(\"Incomplete wav chunk.\")\n\n if chunk_id == b'fmt ':\n fmt_chunk_received = True\n fmt_chunk = _read_fmt_chunk(fid, is_big_endian)\n format_tag, channels, fs = fmt_chunk[1:4]\n bit_depth = fmt_chunk[6]\n if bit_depth not in (8, 16, 32, 64, 96, 128):\n raise ValueError(\"Unsupported bit depth: the wav file \"\n \"has {}-bit data.\".format(bit_depth))\n elif chunk_id == b'fact':\n _skip_unknown_chunk(fid, is_big_endian)\n elif chunk_id == b'data':\n data_chunk_received = True\n if not fmt_chunk_received:\n raise ValueError(\"No fmt chunk before data\")\n data = _read_data_chunk(fid, format_tag, channels, bit_depth,\n is_big_endian, mmap)\n elif chunk_id == b'LIST':\n # Someday this could be handled properly but for now skip it\n _skip_unknown_chunk(fid, is_big_endian)\n elif chunk_id in (b'JUNK', b'Fake'):\n # Skip alignment chunks without warning\n _skip_unknown_chunk(fid, is_big_endian)\n else:\n warnings.warn(\"Chunk (non-data) not understood, skipping it.\",\n WavFileWarning, stacklevel=2)\n _skip_unknown_chunk(fid, is_big_endian)\n finally:\n if not hasattr(filename, 'read'):\n fid.close()\n else:\n fid.seek(0)\n\n return fs, data\n\n\ndef write(filename, rate, data):\n \"\"\"\n Write a NumPy array as a WAV file.\n\n Parameters\n ----------\n filename : string or open file handle\n Output wav file.\n rate : int\n The sample rate (in samples/sec).\n data : ndarray\n A 1-D or 2-D NumPy array of either integer or float data-type.\n\n Notes\n -----\n * Writes a simple uncompressed WAV file.\n * To write multiple-channels, use a 2-D array of shape\n (Nsamples, Nchannels).\n * The bits-per-sample and PCM/float will be determined by the data-type.\n\n Common data types: [1]_\n\n ===================== =========== =========== =============\n WAV format Min Max NumPy dtype\n ===================== =========== =========== =============\n 32-bit floating-point -1.0 +1.0 float32\n 32-bit PCM -2147483648 +2147483647 int32\n 16-bit PCM -32768 +32767 int16\n 8-bit PCM 0 255 uint8\n ===================== =========== =========== =============\n\n Note that 8-bit PCM is unsigned.\n\n References\n ----------\n .. [1] IBM Corporation and Microsoft Corporation, \"Multimedia Programming\n Interface and Data Specifications 1.0\", section \"Data Format of the\n Samples\", August 1991\n http://www.tactilemedia.com/info/MCI_Control_Info.html\n\n Examples\n --------\n Create a 100Hz sine wave, sampled at 44100Hz.\n Write to 16-bit PCM, Mono.\n\n >>> from scipy.io.wavfile import write\n >>> samplerate = 44100; fs = 100\n >>> t = np.linspace(0., 1., samplerate)\n >>> amplitude = np.iinfo(np.int16).max\n >>> data = amplitude * np.sin(2. * np.pi * fs * t)\n >>> write(\"example.wav\", samplerate, data)\n\n \"\"\"\n if hasattr(filename, 'write'):\n fid = filename\n else:\n fid = open(filename, 'wb')\n\n fs = rate\n\n try:\n dkind = data.dtype.kind\n if not (dkind == 'i' or dkind == 'f' or (dkind == 'u' and\n data.dtype.itemsize == 1)):\n raise ValueError(\"Unsupported data type '%s'\" % data.dtype)\n\n header_data = b''\n\n header_data += b'RIFF'\n header_data += b'\\x00\\x00\\x00\\x00'\n header_data += b'WAVE'\n\n # fmt chunk\n header_data += b'fmt '\n if dkind == 'f':\n format_tag = WAVE_FORMAT_IEEE_FLOAT\n else:\n format_tag = WAVE_FORMAT_PCM\n if data.ndim == 1:\n channels = 1\n else:\n channels = data.shape[1]\n bit_depth = data.dtype.itemsize * 8\n bytes_per_second = fs*(bit_depth // 8)*channels\n block_align = channels * (bit_depth // 8)\n\n fmt_chunk_data = struct.pack('<HHIIHH', format_tag, channels, fs,\n bytes_per_second, block_align, bit_depth)\n if not (dkind == 'i' or dkind == 'u'):\n # add cbSize field for non-PCM files\n fmt_chunk_data += b'\\x00\\x00'\n\n header_data += struct.pack('<I', len(fmt_chunk_data))\n header_data += fmt_chunk_data\n\n # fact chunk (non-PCM files)\n if not (dkind == 'i' or dkind == 'u'):\n header_data += b'fact'\n header_data += struct.pack('<II', 4, data.shape[0])\n\n # check data size (needs to be immediately before the data chunk)\n if ((len(header_data)-4-4) + (4+4+data.nbytes)) > 0xFFFFFFFF:\n raise ValueError(\"Data exceeds wave file size limit\")\n\n fid.write(header_data)\n\n # data chunk\n fid.write(b'data')\n fid.write(struct.pack('<I', data.nbytes))\n if data.dtype.byteorder == '>' or (data.dtype.byteorder == '=' and\n sys.byteorder == 'big'):\n data = data.byteswap()\n _array_tofile(fid, data)\n\n # Determine file size and place it in correct\n # position at start of the file.\n size = fid.tell()\n fid.seek(4)\n fid.write(struct.pack('<I', size-8))\n\n finally:\n if not hasattr(filename, 'write'):\n fid.close()\n else:\n fid.seek(0)\n\n\nif sys.version_info[0] >= 3:\n def _array_tofile(fid, data):\n # ravel gives a c-contiguous buffer\n fid.write(data.ravel().view('b').data)\nelse:\n def _array_tofile(fid, data):\n fid.write(data.tostring())\n"
] |
[
[
"numpy.promote_types",
"numpy.minimum",
"numpy.copy",
"numpy.ndindex",
"numpy.empty",
"numpy.nonzero",
"numpy.prod",
"numpy.arange",
"numpy.argmax",
"numpy.array",
"numpy.zeros",
"numpy.shape",
"numpy.amax",
"numpy.amin",
"numpy.searchsorted",
"numpy.asarray",
"numpy.sum",
"numpy.abs",
"numpy.all",
"numpy.maximum"
],
[
"numpy.memmap"
]
] |
XDong18/yolact
|
[
"e602d89904a674fc25dfc5db09a87baf0a04bde6"
] |
[
"train.py"
] |
[
"from data import *\nfrom utils.augmentations import SSDAugmentation, BaseTransform\nfrom utils.functions import MovingAverage, SavePath\nfrom utils.logger import Log\nfrom utils import timer\nfrom layers.modules import MultiBoxLoss\nfrom yolact import Yolact\nimport os\nimport sys\nimport time\nimport math, random\nfrom pathlib import Path\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\nimport torch.nn.init as init\nimport torch.utils.data as data\nimport numpy as np\nimport argparse\nimport datetime\n\n# Oof\nimport eval as eval_script\n\ndef str2bool(v):\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")\n\n\nparser = argparse.ArgumentParser(\n description='Yolact Training Script')\nparser.add_argument('--batch_size', default=8, type=int,\n help='Batch size for training')\nparser.add_argument('--resume', default=None, type=str,\n help='Checkpoint state_dict file to resume training from. If this is \"interrupt\"'\\\n ', the model will resume training from the interrupt file.')\nparser.add_argument('--start_iter', default=-1, type=int,\n help='Resume training at this iter. If this is -1, the iteration will be'\\\n 'determined from the file name.')\nparser.add_argument('--num_workers', default=4, type=int,\n help='Number of workers used in dataloading')\nparser.add_argument('--cuda', default=True, type=str2bool,\n help='Use CUDA to train model')\nparser.add_argument('--lr', '--learning_rate', default=None, type=float,\n help='Initial learning rate. Leave as None to read this from the config.')\nparser.add_argument('--momentum', default=None, type=float,\n help='Momentum for SGD. Leave as None to read this from the config.')\nparser.add_argument('--decay', '--weight_decay', default=None, type=float,\n help='Weight decay for SGD. Leave as None to read this from the config.')\nparser.add_argument('--gamma', default=None, type=float,\n help='For each lr step, what to multiply the lr by. Leave as None to read this from the config.')\nparser.add_argument('--save_folder', default='weights/',\n help='Directory for saving checkpoint models.')\nparser.add_argument('--log_folder', default='logs/',\n help='Directory for saving logs.')\nparser.add_argument('--config', default=None,\n help='The config object to use.')\nparser.add_argument('--save_interval', default=10000, type=int,\n help='The number of iterations between saving the model.')\nparser.add_argument('--validation_size', default=5000, type=int,\n help='The number of images to use for validation.')\nparser.add_argument('--validation_epoch', default=2, type=int,\n help='Output validation information every n iterations. If -1, do no validation.')\nparser.add_argument('--keep_latest', dest='keep_latest', action='store_true',\n help='Only keep the latest checkpoint instead of each one.')\nparser.add_argument('--keep_latest_interval', default=100000, type=int,\n help='When --keep_latest is on, don\\'t delete the latest file at these intervals. This should be a multiple of save_interval or 0.')\nparser.add_argument('--dataset', default=None, type=str,\n help='If specified, override the dataset specified in the config with this one (example: coco2017_dataset).')\nparser.add_argument('--no_log', dest='log', action='store_false',\n help='Don\\'t log per iteration information into log_folder.')\nparser.add_argument('--log_gpu', dest='log_gpu', action='store_true',\n help='Include GPU information in the logs. Nvidia-smi tends to be slow, so set this with caution.')\nparser.add_argument('--no_interrupt', dest='interrupt', action='store_false',\n help='Don\\'t save an interrupt when KeyboardInterrupt is caught.')\nparser.add_argument('--batch_alloc', default=None, type=str,\n help='If using multiple GPUS, you can set this to be a comma separated list detailing which GPUs should get what local batch size (It should add up to your total batch size).')\nparser.add_argument('--no_autoscale', dest='autoscale', action='store_false',\n help='YOLACT will automatically scale the lr and the number of iterations depending on the batch size. Set this if you want to disable that.')\n\nparser.set_defaults(keep_latest=False, log=True, log_gpu=False, interrupt=True, autoscale=True)\nargs = parser.parse_args()\n\nif args.config is not None:\n set_cfg(args.config)\n\nif args.dataset is not None:\n set_dataset(args.dataset)\n\nif args.autoscale and args.batch_size != 8:\n factor = args.batch_size / 8\n if __name__ == '__main__':\n print('Scaling parameters by %.2f to account for a batch size of %d.' % (factor, args.batch_size))\n\n cfg.lr *= factor\n cfg.max_iter //= factor\n cfg.lr_steps = [x // factor for x in cfg.lr_steps]\n\n# Update training parameters from the config if necessary\ndef replace(name):\n if getattr(args, name) == None: setattr(args, name, getattr(cfg, name))\nreplace('lr')\nreplace('decay')\nreplace('gamma')\nreplace('momentum')\n\n# This is managed by set_lr\ncur_lr = args.lr\n\nif torch.cuda.device_count() == 0:\n print('No GPUs detected. Exiting...')\n exit(-1)\n\nif args.batch_size // torch.cuda.device_count() < 6:\n if __name__ == '__main__':\n print('Per-GPU batch size is less than the recommended limit for batch norm. Disabling batch norm.')\n cfg.freeze_bn = True\n\nloss_types = ['B', 'C', 'M', 'P', 'D', 'E', 'S', 'I']\n\nif torch.cuda.is_available():\n if args.cuda:\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n if not args.cuda:\n print(\"WARNING: It looks like you have a CUDA device, but aren't \" +\n \"using CUDA.\\nRun with --cuda for optimal training speed.\")\n torch.set_default_tensor_type('torch.FloatTensor')\nelse:\n torch.set_default_tensor_type('torch.FloatTensor')\n\nclass NetLoss(nn.Module):\n \"\"\"\n A wrapper for running the network and computing the loss\n This is so we can more efficiently use DataParallel.\n \"\"\"\n \n def __init__(self, net:Yolact, criterion:MultiBoxLoss):\n super().__init__()\n\n self.net = net\n self.criterion = criterion\n \n def forward(self, images, targets, masks, num_crowds):\n preds = self.net(images)\n losses = self.criterion(self.net, preds, targets, masks, num_crowds)\n return losses\n\nclass CustomDataParallel(nn.DataParallel):\n \"\"\"\n This is a custom version of DataParallel that works better with our training data.\n It should also be faster than the general case.\n \"\"\"\n\n def scatter(self, inputs, kwargs, device_ids):\n # More like scatter and data prep at the same time. The point is we prep the data in such a way\n # that no scatter is necessary, and there's no need to shuffle stuff around different GPUs.\n devices = ['cuda:' + str(x) for x in device_ids]\n splits = prepare_data(inputs[0], devices, allocation=args.batch_alloc)\n\n return [[split[device_idx] for split in splits] for device_idx in range(len(devices))], \\\n [kwargs] * len(devices)\n\n def gather(self, outputs, output_device):\n out = {}\n\n for k in outputs[0]:\n out[k] = torch.stack([output[k].to(output_device) for output in outputs])\n \n return out\n\ndef train():\n if not os.path.exists(args.save_folder):\n os.mkdir(args.save_folder)\n\n dataset = COCODetection(image_path=cfg.dataset.train_images,\n info_file=cfg.dataset.train_info,\n transform=SSDAugmentation(MEANS))\n \n if args.validation_epoch > 0:\n setup_eval()\n val_dataset = COCODetection(image_path=cfg.dataset.valid_images,\n info_file=cfg.dataset.valid_info,\n transform=BaseTransform(MEANS))\n\n # Parallel wraps the underlying module, but when saving and loading we don't want that\n yolact_net = Yolact()\n net = yolact_net\n net.train()\n\n if args.log:\n log = Log(cfg.name, args.log_folder, dict(args._get_kwargs()),\n overwrite=(args.resume is None), log_gpu_stats=args.log_gpu)\n\n # I don't use the timer during training (I use a different timing method).\n # Apparently there's a race condition with multiple GPUs, so disable it just to be safe.\n timer.disable_all()\n\n # Both of these can set args.resume to None, so do them before the check \n if args.resume == 'interrupt':\n args.resume = SavePath.get_interrupt(args.save_folder)\n elif args.resume == 'latest':\n args.resume = SavePath.get_latest(args.save_folder, cfg.name)\n\n if args.resume is not None:\n print('Resuming training, loading {}...'.format(args.resume))\n yolact_net.load_weights(args.resume)\n\n if args.start_iter == -1:\n args.start_iter = SavePath.from_str(args.resume).iteration\n else:\n print('Initializing weights...')\n yolact_net.init_weights(backbone_path='./weights/' + cfg.backbone.path)\n\n optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum,\n weight_decay=args.decay)\n criterion = MultiBoxLoss(num_classes=cfg.num_classes,\n pos_threshold=cfg.positive_iou_threshold,\n neg_threshold=cfg.negative_iou_threshold,\n negpos_ratio=cfg.ohem_negpos_ratio)\n\n if args.batch_alloc is not None:\n args.batch_alloc = [int(x) for x in args.batch_alloc.split(',')]\n if sum(args.batch_alloc) != args.batch_size:\n print('Error: Batch allocation (%s) does not sum to batch size (%s).' % (args.batch_alloc, args.batch_size))\n exit(-1)\n\n net = CustomDataParallel(NetLoss(net, criterion))\n if args.cuda:\n net = net.cuda()\n \n # Initialize everything\n if not cfg.freeze_bn: yolact_net.freeze_bn() # Freeze bn so we don't kill our means\n yolact_net(torch.zeros(1, 3, cfg.max_size, cfg.max_size).cuda())\n if not cfg.freeze_bn: yolact_net.freeze_bn(True)\n\n # loss counters\n loc_loss = 0\n conf_loss = 0\n iteration = max(args.start_iter, 0)\n last_time = time.time()\n\n epoch_size = len(dataset) // args.batch_size\n num_epochs = math.ceil(cfg.max_iter / epoch_size)\n \n # Which learning rate adjustment step are we on? lr' = lr * gamma ^ step_index\n step_index = 0\n\n data_loader = data.DataLoader(dataset, args.batch_size,\n num_workers=args.num_workers,\n shuffle=True, collate_fn=detection_collate,\n pin_memory=True)\n \n \n save_path = lambda epoch, iteration: SavePath(cfg.name, epoch, iteration).get_path(root=args.save_folder)\n time_avg = MovingAverage()\n\n global loss_types # Forms the print order\n loss_avgs = { k: MovingAverage(100) for k in loss_types }\n\n print('Begin training!')\n print()\n # try-except so you can use ctrl+c to save early and stop training\n try:\n for epoch in range(num_epochs):\n # Resume from start_iter\n if (epoch+1)*epoch_size < iteration:\n continue\n \n for datum in data_loader:\n # Stop if we've reached an epoch if we're resuming from start_iter\n if iteration == (epoch+1)*epoch_size:\n break\n\n # Stop at the configured number of iterations even if mid-epoch\n if iteration == cfg.max_iter:\n break\n\n # Change a config setting if we've reached the specified iteration\n changed = False\n for change in cfg.delayed_settings:\n if iteration >= change[0]:\n changed = True\n cfg.replace(change[1])\n\n # Reset the loss averages because things might have changed\n for avg in loss_avgs:\n avg.reset()\n \n # If a config setting was changed, remove it from the list so we don't keep checking\n if changed:\n cfg.delayed_settings = [x for x in cfg.delayed_settings if x[0] > iteration]\n\n # Warm up by linearly interpolating the learning rate from some smaller value\n if cfg.lr_warmup_until > 0 and iteration <= cfg.lr_warmup_until:\n set_lr(optimizer, (args.lr - cfg.lr_warmup_init) * (iteration / cfg.lr_warmup_until) + cfg.lr_warmup_init)\n\n # Adjust the learning rate at the given iterations, but also if we resume from past that iteration\n while step_index < len(cfg.lr_steps) and iteration >= cfg.lr_steps[step_index]:\n step_index += 1\n set_lr(optimizer, args.lr * (args.gamma ** step_index))\n \n # Zero the grad to get ready to compute gradients\n optimizer.zero_grad()\n\n # Forward Pass + Compute loss at the same time (see CustomDataParallel and NetLoss)\n losses = net(datum)\n \n losses = { k: (v).mean() for k,v in losses.items() } # Mean here because Dataparallel\n loss = sum([losses[k] for k in losses])\n \n # no_inf_mean removes some components from the loss, so make sure to backward through all of it\n # all_loss = sum([v.mean() for v in losses.values()])\n\n # Backprop\n loss.backward() # Do this to free up vram even if loss is not finite\n if torch.isfinite(loss).item():\n optimizer.step()\n \n # Add the loss to the moving average for bookkeeping\n for k in losses:\n loss_avgs[k].add(losses[k].item())\n\n cur_time = time.time()\n elapsed = cur_time - last_time\n last_time = cur_time\n\n # Exclude graph setup from the timing information\n if iteration != args.start_iter:\n time_avg.add(elapsed)\n\n if iteration % 10 == 0:\n eta_str = str(datetime.timedelta(seconds=(cfg.max_iter-iteration) * time_avg.get_avg())).split('.')[0]\n \n total = sum([loss_avgs[k].get_avg() for k in losses])\n loss_labels = sum([[k, loss_avgs[k].get_avg()] for k in loss_types if k in losses], [])\n \n print(('[%3d] %7d ||' + (' %s: %.3f |' * len(losses)) + ' T: %.3f || ETA: %s || timer: %.3f')\n % tuple([epoch, iteration] + loss_labels + [total, eta_str, elapsed]), flush=True)\n\n if args.log:\n precision = 5\n loss_info = {k: round(losses[k].item(), precision) for k in losses}\n loss_info['T'] = round(loss.item(), precision)\n\n if args.log_gpu:\n log.log_gpu_stats = (iteration % 10 == 0) # nvidia-smi is sloooow\n \n log.log('train', loss=loss_info, epoch=epoch, iter=iteration,\n lr=round(cur_lr, 10), elapsed=elapsed)\n\n log.log_gpu_stats = args.log_gpu\n \n iteration += 1\n\n if iteration % args.save_interval == 0 and iteration != args.start_iter:\n if args.keep_latest:\n latest = SavePath.get_latest(args.save_folder, cfg.name)\n\n print('Saving state, iter:', iteration)\n yolact_net.save_weights(save_path(epoch, iteration))\n\n if args.keep_latest and latest is not None:\n if args.keep_latest_interval <= 0 or iteration % args.keep_latest_interval != args.save_interval:\n print('Deleting old save...')\n os.remove(latest)\n \n # This is done per epoch\n if args.validation_epoch > 0:\n if epoch % args.validation_epoch == 0 and epoch > 0:\n compute_validation_map(epoch, iteration, yolact_net, val_dataset, log if args.log else None)\n \n # Compute validation mAP after training is finished\n compute_validation_map(epoch, iteration, yolact_net, val_dataset, log if args.log else None)\n except KeyboardInterrupt:\n if args.interrupt:\n print('Stopping early. Saving network...')\n \n # Delete previous copy of the interrupted network so we don't spam the weights folder\n SavePath.remove_interrupt(args.save_folder)\n \n yolact_net.save_weights(save_path(epoch, repr(iteration) + '_interrupt'))\n exit()\n\n yolact_net.save_weights(save_path(epoch, iteration))\n\n\ndef set_lr(optimizer, new_lr):\n for param_group in optimizer.param_groups:\n param_group['lr'] = new_lr\n \n global cur_lr\n cur_lr = new_lr\n\ndef gradinator(x):\n x.requires_grad = False\n return x\n\ndef prepare_data(datum, devices:list=None, allocation:list=None):\n with torch.no_grad():\n if devices is None:\n devices = ['cuda:0'] if args.cuda else ['cpu']\n if allocation is None:\n allocation = [args.batch_size // len(devices)] * (len(devices) - 1)\n allocation.append(args.batch_size - sum(allocation)) # The rest might need more/less\n \n images, (targets, masks, num_crowds) = datum\n\n cur_idx = 0\n for device, alloc in zip(devices, allocation):\n for _ in range(alloc):\n images[cur_idx] = gradinator(images[cur_idx].to(device))\n targets[cur_idx] = gradinator(targets[cur_idx].to(device))\n masks[cur_idx] = gradinator(masks[cur_idx].to(device))\n cur_idx += 1\n\n if cfg.preserve_aspect_ratio:\n # Choose a random size from the batch\n _, h, w = images[random.randint(0, len(images)-1)].size()\n\n for idx, (image, target, mask, num_crowd) in enumerate(zip(images, targets, masks, num_crowds)):\n images[idx], targets[idx], masks[idx], num_crowds[idx] \\\n = enforce_size(image, target, mask, num_crowd, w, h)\n \n cur_idx = 0\n split_images, split_targets, split_masks, split_numcrowds \\\n = [[None for alloc in allocation] for _ in range(4)]\n\n for device_idx, alloc in enumerate(allocation):\n split_images[device_idx] = torch.stack(images[cur_idx:cur_idx+alloc], dim=0)\n split_targets[device_idx] = targets[cur_idx:cur_idx+alloc]\n split_masks[device_idx] = masks[cur_idx:cur_idx+alloc]\n split_numcrowds[device_idx] = num_crowds[cur_idx:cur_idx+alloc]\n\n cur_idx += alloc\n\n return split_images, split_targets, split_masks, split_numcrowds\n\ndef no_inf_mean(x:torch.Tensor):\n \"\"\"\n Computes the mean of a vector, throwing out all inf values.\n If there are no non-inf values, this will return inf (i.e., just the normal mean).\n \"\"\"\n\n no_inf = [a for a in x if torch.isfinite(a)]\n\n if len(no_inf) > 0:\n return sum(no_inf) / len(no_inf)\n else:\n return x.mean()\n\ndef compute_validation_loss(net, data_loader, criterion):\n global loss_types\n\n with torch.no_grad():\n losses = {}\n \n # Don't switch to eval mode because we want to get losses\n iterations = 0\n for datum in data_loader:\n images, targets, masks, num_crowds = prepare_data(datum)\n out = net(images)\n\n wrapper = ScatterWrapper(targets, masks, num_crowds)\n _losses = criterion(out, wrapper, wrapper.make_mask())\n \n for k, v in _losses.items():\n v = v.mean().item()\n if k in losses:\n losses[k] += v\n else:\n losses[k] = v\n\n iterations += 1\n if args.validation_size <= iterations * args.batch_size:\n break\n \n for k in losses:\n losses[k] /= iterations\n \n \n loss_labels = sum([[k, losses[k]] for k in loss_types if k in losses], [])\n print(('Validation ||' + (' %s: %.3f |' * len(losses)) + ')') % tuple(loss_labels), flush=True)\n\ndef compute_validation_map(epoch, iteration, yolact_net, dataset, log:Log=None):\n with torch.no_grad():\n yolact_net.eval()\n \n start = time.time()\n print()\n print(\"Computing validation mAP (this may take a while)...\", flush=True)\n val_info = eval_script.evaluate(yolact_net, dataset, train_mode=True)\n end = time.time()\n\n if log is not None:\n log.log('val', val_info, elapsed=(end - start), epoch=epoch, iter=iteration)\n\n yolact_net.train()\n\ndef setup_eval():\n eval_script.parse_args(['--no_bar', '--max_images='+str(args.validation_size)])\n\nif __name__ == '__main__':\n train()\n"
] |
[
[
"torch.zeros",
"torch.stack",
"torch.set_default_tensor_type",
"torch.no_grad",
"torch.isfinite",
"torch.cuda.device_count",
"torch.cuda.is_available",
"torch.utils.data.DataLoader"
]
] |
abhishirk/Capstone
|
[
"84149fc56bb90303f5fa5a40a8ace7e569eae918"
] |
[
"ros/src/waypoint_updater/waypoint_updater.py"
] |
[
"#!/usr/bin/env python\n\nimport rospy\nfrom geometry_msgs.msg import PoseStamped\nfrom styx_msgs.msg import Lane, Waypoint\n\nimport tf\nfrom std_msgs.msg import Int32, Bool\nimport math\nimport numpy as np\n\n'''\nThis node will publish waypoints from the car's current position to some `x` distance ahead.\n\nAs mentioned in the doc, you should ideally first implement a version which does not care\nabout traffic lights or obstacles.\n\nOnce you have created dbw_node, you will update this node to use the status of traffic lights too.\n\nPlease note that our simulator also provides the exact location of traffic lights and their\ncurrent status in `/vehicle/traffic_lights` message. You can use this message to build this node\nas well as to verify your TL classifier.\n\nTODO (for Yousuf and Aaron): Stopline location for each traffic light.\n'''\n\nLOOKAHEAD_WPS = 200 # Number of waypoints we will publish. You can change this number\n\n\nclass WaypointUpdater(object):\n def __init__(self):\n rospy.init_node('waypoint_updater')\n\n rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)\n rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)\n\n # TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below\n\n\n self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)\n\n # TODO: Add other member variables you need below\n # self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)\n\n self.pose = None\n self.base_waypoints = None\n self.waypoints_2d = None\n self.waypoint_tree = None\n\n self.loop()\n\n def loop(self):\n rate = rospy.Rate(50)\n #time.sleep(3000)\n while not rospy.is_shutdown():\n #rospy.loginfo('inside self.loop')\n #rospy.loginfo(self.pose)\n if self.pose and self.base_waypoints :\n rospy.loginfo('inside self.loop')\n closest_waypoint_idx = self.get_closest_waypoint_id()\n self.publish_waypoints(closest_waypoint_idx)\n rate.sleep()\n\n def get_closest_waypoint_id(self):\n x = self.pose.pose.position.x\n y = self.pose.pose.position.y\n rospy.loginfo('get closest waypoint')\n closest_idx = self.waypoint_tree.query([x,y],1)[1]\n rospy.loginfo(closest_idx)\n\n # check closest is ahead or behind the car\n closest_coord = self.waypoints_2d[closest_idx]\n prev_coord = self.waypoints_2d[closest_idx - 1]\n\n # Equation for hyperplane through closest_coords\n cl_vect = np.array(closest_coord)\n prev_vect= np.array(prev_coord)\n pos_vect = np.array([x,y])\n\n val = np.dot(cl_vect-prev_vect,pos_vect-cl_vect)\n\n if val>0:\n closest_idx = (closest_idx+1)%len(self.waypoints_2d)\n \n return closest_idx\n\n def publish_waypoints(self,closest_idx):\n lane = Lane()\n rospy.loginfo('inside publish waypoints')\n lane.header = self.base_waypoints.header\n rospy.loginfo(' header set')\n lane.waypoints = self.base_waypoints.waypoints[closest_idx : closest_idx + LOOKAHEAD_WPS]\n rospy.loginfo(' waypoints set, size :')\n rospy.loginfo( len(lane.waypoints))\n self.final_waypoints_pub.publish(lane)\n rospy.loginfo(' published')\n\n def pose_cb(self, msg):\n self.pose = msg\n\n def waypoints_cb(self, waypoints):\n rospy.loginfo('In funnction waypoints_cb') \n self.base_waypoints = waypoints\n if not self.waypoints_2d:\n rospy.loginfo('setting kd tree ')\n self.waypoints_2d = [[waypoint.pose.pose.position.x,waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]\n #rospy.loginfo(self.waypoints_2d)\n self.waypoint_tree = KDTree(self.waypoints_2d)\n rospy.loginfo(len(self.waypoint_tree.data))\n\n def traffic_cb(self, msg):\n # TODO: Callback for /traffic_waypoint message. Implement\n pass\n\n def obstacle_cb(self, msg):\n # TODO: Callback for /obstacle_waypoint message. We will implement it later\n pass\n\n def get_waypoint_velocity(self, waypoint):\n return waypoint.twist.twist.linear.x\n\n def set_waypoint_velocity(self, waypoints, waypoint, velocity):\n waypoints[waypoint].twist.twist.linear.x = velocity\n\n def distance(self, waypoints, wp1, wp2):\n dist = 0\n dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)\n for i in range(wp1, wp2+1):\n dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)\n wp1 = i\n return dist\n\n\nif __name__ == '__main__':\n try:\n WaypointUpdater()\n except rospy.ROSInterruptException:\n rospy.logerr('Could not start waypoint updater node.')\n"
] |
[
[
"numpy.array",
"numpy.dot"
]
] |
hzm2016/dm_control
|
[
"c24ec9f5f3cb3c25c6571c89c9f60bf3350f5711"
] |
[
"dm_control/rl/control_test.py"
] |
[
"# Copyright 2017 The dm_control Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"Control Environment tests.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Internal dependencies.\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\n\nfrom dm_control.rl import control\n\nimport mock\nimport numpy as np\n\nfrom dm_control.rl import specs\n\n_CONSTANT_REWARD_VALUE = 1.0\n_CONSTANT_OBSERVATION = {'observations': np.asarray(_CONSTANT_REWARD_VALUE)}\n\n_ACTION_SPEC = specs.BoundedArraySpec(\n shape=(1,), dtype=np.float, minimum=0.0, maximum=1.0)\n_OBSERVATION_SPEC = {'observations': specs.ArraySpec(shape=(), dtype=np.float)}\n\n\nclass EnvironmentTest(parameterized.TestCase):\n\n def setUp(self):\n self._task = mock.Mock(spec=control.Task)\n self._task.initialize_episode = mock.Mock()\n self._task.get_observation = mock.Mock(return_value=_CONSTANT_OBSERVATION)\n self._task.get_reward = mock.Mock(return_value=_CONSTANT_REWARD_VALUE)\n self._task.get_termination = mock.Mock(return_value=None)\n self._task.action_spec = mock.Mock(return_value=_ACTION_SPEC)\n self._task.observation_spec.side_effect = NotImplementedError()\n\n self._physics = mock.Mock(spec=control.Physics)\n self._physics.time = mock.Mock(return_value=0.0)\n\n self._physics.reset_context = mock.MagicMock()\n\n self._env = control.Environment(physics=self._physics, task=self._task)\n\n def test_environment_calls(self):\n self._env.action_spec()\n self._task.action_spec.assert_called_with(self._physics)\n\n self._env.reset()\n self._task.initialize_episode.assert_called_with(self._physics)\n self._task.get_observation.assert_called_with(self._physics)\n\n action = [1]\n time_step = self._env.step(action)\n\n self._task.before_step.assert_called()\n self._task.after_step.assert_called_with(self._physics)\n self._task.get_termination.assert_called_with(self._physics)\n\n self.assertEquals(_CONSTANT_REWARD_VALUE, time_step.reward)\n\n def test_timeout(self):\n self._physics.time = mock.Mock(return_value=2.)\n env = control.Environment(\n physics=self._physics, task=self._task, time_limit=1.)\n env.reset()\n time_step = env.step([1])\n self.assertTrue(time_step.last())\n\n time_step = env.step([1])\n self.assertTrue(time_step.first())\n\n def test_observation_spec(self):\n observation_spec = self._env.observation_spec()\n self.assertEqual(_OBSERVATION_SPEC, observation_spec)\n\n def test_redundant_args_error(self):\n with self.assertRaises(ValueError):\n control.Environment(physics=self._physics, task=self._task,\n n_sub_steps=2, control_timestep=0.1)\n\n def test_control_timestep(self):\n self._physics.timestep.return_value = .002\n env = control.Environment(\n physics=self._physics, task=self._task, n_sub_steps=5)\n self.assertEqual(.01, env.control_timestep())\n\n def test_flatten_observations(self):\n multimodal_obs = dict(_CONSTANT_OBSERVATION)\n multimodal_obs['sensor'] = np.zeros(7, dtype=np.bool)\n self._task.get_observation = mock.Mock(return_value=multimodal_obs)\n env = control.Environment(\n physics=self._physics, task=self._task, flat_observation=True)\n timestep = env.reset()\n self.assertEqual(len(timestep.observation), 1)\n self.assertEqual(timestep.observation[control.FLAT_OBSERVATION_KEY].size,\n 1 + 7)\n\n\nclass ComputeNStepsTest(parameterized.TestCase):\n\n @parameterized.parameters((0.2, 0.1, 2), (.111, .111, 1), (100, 5, 20),\n (0.03, 0.005, 6))\n def testComputeNSteps(self, control_timestep, physics_timestep, expected):\n steps = control.compute_n_steps(control_timestep, physics_timestep)\n self.assertEquals(expected, steps)\n\n @parameterized.parameters((3, 2), (.003, .00101))\n def testComputeNStepsFailures(self, control_timestep, physics_timestep):\n with self.assertRaises(ValueError):\n control.compute_n_steps(control_timestep, physics_timestep)\n\nif __name__ == '__main__':\n absltest.main()\n"
] |
[
[
"numpy.asarray",
"numpy.zeros"
]
] |
adrische/actuary
|
[
"1b446c3a66ef831a0727ff4d3ea1e1cc3b838af9"
] |
[
"selected-topics-in-life-insurance/basic-interest-rate-models.py"
] |
[
"# Simple interest rate processes - some examples\n#\n# - Brownian motion\n# - AR(1)-model\n# - Vasiček-model\n# - Cox-Ingersoll-Ross-model\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom math import sqrt\n\n\n# Brownian motion / Random walk\ndef sim_brownian(steps, start=0, sigma=1, discretization=1):\n steps_mod = int(steps / discretization)\n noise = sqrt(discretization) * sigma * np.random.standard_normal(steps_mod)\n return(start + np.cumsum(noise))\n\n\n# AR(1)-model\ndef sim_ar1(length_out, start=0, sigma=1, phi=1):\n noise = sigma * np.random.standard_normal(length_out)\n ar1 = np.zeros(length_out)\n for i in range(length_out - 1):\n ar1[i + 1] = phi * ar1[i] + noise[i]\n return(start + ar1)\n\n\n# Vasiček-model\ndef sim_vasicek(steps, start=0, sigma=1,\n reversion_level=None, reversion_strength=1,\n discretization=1):\n if reversion_level is None:\n reversion_level = start\n steps_mod = int(steps / discretization)\n v = np.zeros(steps_mod)\n v[0] = start\n for i in range(steps_mod - 1):\n noise = sqrt(discretization) * sigma * np.random.standard_normal()\n dv = reversion_strength * (reversion_level - v[i]) * discretization + noise\n v[i + 1] = v[i] + dv\n return(v)\n\n\n# Cox-Ingersoll-Ross-model\ndef sim_cir(steps, start=0, sigma=1,\n reversion_level=None, reversion_strength=1,\n discretization=1):\n if reversion_level is None:\n reversion_level = start\n steps_mod = int(steps / discretization)\n v = np.zeros(steps_mod)\n v[0] = start\n for i in range(steps_mod - 1):\n # It is not ensured that v[i] is positive. This true in the \n # continuous setting, but not necessarily in this discrete setting\n # (especially when 'discretization' is large compared to 'sigma').\n noise = sqrt(discretization * v[i]) * sigma * np.random.standard_normal()\n dv = reversion_strength * (reversion_level - v[i]) * discretization + noise\n v[i + 1] = v[i] + dv\n return(v)\n\n\n# Parameters\nstart = 0.03\nsigma = 0.05\nphi = 0.99\nsteps = 20\nreversion_level = 0.01\nreversion_strength = 1.5\ndiscretization = 0.01\n\nsteps_mod = int(steps / discretization)\n\n# Simulate\nI_Brownian = sim_brownian(steps, start, sigma, discretization)\nI_ar1 = sim_ar1(steps_mod, start, sqrt(discretization) * sigma, phi)\nI_V = sim_vasicek(steps,\n start,\n sigma,\n reversion_level,\n reversion_strength,\n discretization)\nI_cir = sim_cir(steps,\n start,\n sigma,\n reversion_level,\n reversion_strength,\n discretization)\n\n# Plot\nplt.plot(np.column_stack((I_Brownian, I_ar1, I_V, I_cir)))\nplt.legend(['Brownian motion', 'AR(1)-model',\n 'Vasiček-model', 'Cox-Ingersoll-Ross-model'])\nplt.title('Basic interest rate models')\nplt.xlabel('Time')\nplt.ylabel('Interest rate')\nplt.xticks(range(0, steps_mod, int(1 / discretization)),\n range(0, steps))\nplt.show()\n"
] |
[
[
"numpy.random.standard_normal",
"numpy.zeros",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"numpy.cumsum",
"matplotlib.pyplot.show",
"numpy.column_stack"
]
] |
Cospel/age_prediction_cnn
|
[
"de3f293d70faa791f7c7076e049daf96b47c650b"
] |
[
"facematch/age_prediction/utils/utils.py"
] |
[
"import numpy as np\nfrom scipy.special import softmax\nCLASSES_NUMBER = 100\nMAX_AGE = 100\nRANGE_LENGTH = 5\nAGE_RANGES_UPPER_THRESH = 80\n\n\ndef build_age_vector(age, deviation):\n \"\"\"Build AGE vector as a normal probability histogram\"\"\"\n # Sample from a normal distribution using numpy's random number generator\n mean, std = age, deviation - 2\n bins_number = deviation * 2 + 2\n samples = np.random.normal(mean, 3, size=10000)\n\n age_vec = np.zeros(shape=(MAX_AGE))\n\n # Compute a histogram of the sample\n bins = np.linspace(mean - deviation, mean + deviation, bins_number)\n histogram, bins = np.histogram(samples, bins=bins)\n\n # Get index of mean in histogram and start / end of histogram in AGE vector\n mean_ind = np.where(histogram == np.amax(histogram))[0][0]\n start_ind = mean - mean_ind\n end_ind = start_ind + histogram.shape[0]\n\n # handle borders of the probability distribution range falling outside the main range [0..100]\n if start_ind < 0:\n histogram = histogram[abs(start_ind) :]\n\n if end_ind > MAX_AGE:\n histogram = histogram[: (MAX_AGE - (end_ind))]\n\n end_ind = min(MAX_AGE, end_ind)\n start_ind = max(0, start_ind)\n age_vec[start_ind:end_ind] = histogram\n\n # Normalize age histogram\n age_vec = age_vec / age_vec.sum()\n return age_vec\n\n\ndef age_ranges_number():\n \"\"\"Calculates total number of classes for age range mode\"\"\"\n return int(AGE_RANGES_UPPER_THRESH / RANGE_LENGTH) + 1\n\n\ndef get_age_range_index(age):\n \"\"\"Calculates index of 5-year age range for specified age value\"\"\"\n age = min(age, AGE_RANGES_UPPER_THRESH)\n\n return int(age / RANGE_LENGTH)\n\n\ndef get_range(index):\n \"\"\"Returns range for given index\"\"\"\n if index == age_ranges_number() - 1:\n return (RANGE_LENGTH * index, None)\n\n return (RANGE_LENGTH * index, RANGE_LENGTH * (index + 1))\n\n\nif __name__ == \"__main__\":\n vector = build_age_vector(20, 5)\n print(vector, vector[15], vector[25], vector.sum())\n import matplotlib.pyplot as plt\n plt.bar(np.arange(0,100, 1), vector) \n plt.ylabel('prob')\n plt.xlabel('age')\n plt.title('age vector')\n plt.show()\n"
] |
[
[
"numpy.random.normal",
"numpy.histogram",
"numpy.zeros",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.title",
"numpy.arange",
"numpy.amax",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show",
"numpy.linspace"
]
] |
ssloy/least-squares-course
|
[
"e51206d795bd8385779f13fd611ed91624095d04",
"e51206d795bd8385779f13fd611ed91624095d04"
] |
[
"src/ch6/3-caricature/silhouette-better.py",
"presentation/listings/poisson-2d.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef amplify(x):\n n = len(x)\n A = np.matrix(np.zeros((2*n,n)))\n b = np.matrix(np.zeros((2*n,1)))\n for i in range(n):\n A[i, i] = 1. # amplify the curvature\n A[i, (i+1)%n] = -1.\n b[i, 0] = (x[i] - x[(i+1)%n])*1.9\n\n A[n+i, i] = 1*.3 # light data fitting term\n b[n+i, 0] = x[i]*.3\n return (np.linalg.inv(A.T*A)*A.T*b).tolist()\n\nx = [100,100,97,93,91,87,84,83,85,87,88,89,90,90,90,88,87,86,84,82,80,\n 77,75,72,69,66,62,58,54,47,42,38,34,32,28,24,22,20,17,15,13,12,9,\n 7,8,9,8,6,0,0,2,0,0,2,3,2,0,0,1,4,8,11,14,19,24,27,25,23,21,19]\ny = [0,25,27,28,30,34,37,41,44,47,51,54,59,64,66,70,74,78,80,83,86,90,93,\n 95,96,98,99,99,100,99,99,99,98,98,96,94,93,91,90,87,85,79,75,70,65,\n 62,60,58,52,49,46,44,41,37,34,30,27,20,17,15,16,17,17,19,18,14,11,6,4,1]\n\nplt.plot(x+[x[0]], y+[y[0]], 'g--')\n\nx = amplify(x)\ny = amplify(y)\n\nplt.plot(x+[x[0]], y+[y[0]], 'k-', linewidth=3)\nplt.axis('off')\nplt.gca().set_aspect('equal', adjustable='box')\nplt.show()\n\n",
"import matplotlib.image as mpimg\nimport scipy.sparse\nfrom scipy.sparse.linalg import lsmr\nbase = mpimg.imread('baseball.png')\nfoot = mpimg.imread('football.png')\nox,oy, w,h = 100,60, len(foot[0]),len(foot) # working rectangle\n\nA = scipy.sparse.lil_matrix((2*w+2*h + 2*(w-1)*(h-1), w*h))\nfor i in range(0,w):\n A[ i, i ] = 1 # top data fitting\n A[w+i, i+(h-1)*w] = 1 # bottom data fitting\nfor j in range(0,h):\n A[2*w +j, j*w] = 1 # left data fitting\n A[2*w+h+j, w-1+j*w] = 1 # right data fitting\ncnt = 2*w+2*h\nfor j in range(0,h-1): # gradient matrix\n for i in range(0,w-1):\n A[cnt, i + j*w] = -1\n A[cnt, i+1 + j*w] = 1\n A[cnt+1, i + j *w] = -1\n A[cnt+1, i + (j+1)*w] = 1\n cnt += 2\nA = A.tocsc() # sparse row matrix for fast matrix-vector multiplication\n\nfor channel in range(3):\n b = A.dot(foot[:,:,channel].flatten()) # fill the gradient part of the r.h.s.\n b[0:w] = base[oy,ox:ox+w,channel] # top data fitting\n b[w:2*w] = base[oy+h,ox:ox+w,channel] # bottom data fitting\n b[2*w :2*w+h] = base[oy:oy+h, ox, channel] # left data fitting\n b[2*w+h:2*w+2*h] = base[oy:oy+h, ox+w, channel] # right data fitting\n\n x = lsmr(A, b)[0] # call the least squares solver\n base[oy:oy+h,ox:ox+h, channel] = x.reshape((h, w)) # glue the football\nmpimg.imsave('result.png', base)\n"
] |
[
[
"numpy.zeros",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.gca",
"numpy.linalg.inv",
"matplotlib.pyplot.axis"
],
[
"scipy.sparse.linalg.lsmr",
"matplotlib.image.imread",
"matplotlib.image.imsave"
]
] |
Chan1998/MAAC
|
[
"8eabafe346b7b238663de4ad12f8771d7db6071a"
] |
[
"multiagent/environment.py"
] |
[
"import gym\nfrom gym import spaces\nfrom gym.envs.registration import EnvSpec\nimport numpy as np\n\n# environment for all agents in the multiagent world\n# currently code assumes that no agents will be created/destroyed at runtime!\nclass MultiAgentEnv(gym.Env):\n metadata = {\n 'render.modes' : ['human', 'rgb_array']\n }\n\n def __init__(self, world, reset_callback=None, reward_callback=None,\n observation_callback=None, info_callback=None,\n done_callback=None, post_step_callback=None,\n shared_viewer=True, discrete_action=False):\n\n self.world = world\n self.agents = self.world.policy_agents\n # set required vectorized gym env property\n self.n = len(world.policy_agents)\n # scenario callbacks\n self.reset_callback = reset_callback\n self.reward_callback = reward_callback\n self.observation_callback = observation_callback\n self.info_callback = info_callback\n self.done_callback = done_callback\n self.post_step_callback = post_step_callback\n # environment parameters\n self.discrete_action_space = discrete_action\n # if true, action is a number 0...N, otherwise action is a one-hot N-dimensional vector\n self.discrete_action_input = False\n # if true, even the action is continuous, action will be performed discretely\n self.force_discrete_action = world.discrete_action if hasattr(world, 'discrete_action') else False\n # if true, every agent has the same reward\n self.shared_reward = False\n self.time = 0\n\n # configure spaces\n self.action_space = []\n self.observation_space = []\n for agent in self.agents:\n total_action_space = []\n # physical action space\n if self.discrete_action_space:\n u_action_space = spaces.Discrete(world.dim_p * 2 + 1)\n else:\n u_action_space = spaces.Box(low=-agent.u_range, high=+agent.u_range, shape=(world.dim_p,))\n if agent.movable:\n total_action_space.append(u_action_space)\n # communication action space\n c_action_space = spaces.Discrete(world.dim_c)\n if not agent.silent:\n total_action_space.append(c_action_space)\n # total action space\n if len(total_action_space) > 1:\n # all action spaces are discrete, so simplify to MultiDiscrete action space\n if all([isinstance(act_space, spaces.Discrete) for act_space in total_action_space]):\n act_space = spaces.MultiDiscrete([[0,act_space.n-1] for act_space in total_action_space])\n else:\n act_space = spaces.Tuple(total_action_space)\n self.action_space.append(act_space)\n else:\n self.action_space.append(total_action_space[0])\n # observation space\n obs_dim = len(observation_callback(agent, self.world))\n self.observation_space.append(spaces.Box(low=-np.inf, high=+np.inf, shape=(obs_dim,)))\n agent.action.c = np.zeros(self.world.dim_c)\n\n # rendering\n self.shared_viewer = shared_viewer\n if self.shared_viewer:\n self.viewers = [None]\n else:\n self.viewers = [None] * self.n\n self._reset_render()\n\n def _seed(self, seed=None):\n if seed is None:\n np.random.seed(1)\n else:\n np.random.seed(seed)\n\n\n def step(self, action_n):\n obs_n = []\n reward_n = []\n done_n = []\n info_n = {'n': []}\n self.agents = self.world.policy_agents\n # set action for each agent\n for i, agent in enumerate(self.agents):\n self._set_action(action_n[i], agent, self.action_space[i])\n # advance world state\n self.world.step()\n # record observation for each agent\n for agent in self.agents:\n obs_n.append(self._get_obs(agent))\n reward_n.append(self._get_reward(agent))\n done_n.append(self._get_done(agent))\n\n info_n['n'].append(self._get_info(agent))\n\n # all agents get total reward in cooperative case\n reward = np.sum(reward_n)\n if self.shared_reward:\n reward_n = [reward] * self.n\n if self.post_step_callback is not None:\n self.post_step_callback(self.world)\n return obs_n, reward_n, done_n, info_n\n\n def reset(self):\n # reset world\n self.reset_callback(self.world)\n # reset renderer\n self._reset_render()\n # record observations for each agent\n obs_n = []\n self.agents = self.world.policy_agents\n for agent in self.agents:\n obs_n.append(self._get_obs(agent))\n return obs_n\n\n # get info used for benchmarking\n def _get_info(self, agent):\n if self.info_callback is None:\n return {}\n return self.info_callback(agent, self.world)\n\n # get observation for a particular agent\n def _get_obs(self, agent):\n if self.observation_callback is None:\n return np.zeros(0)\n return self.observation_callback(agent, self.world)\n\n # get dones for a particular agent\n # unused right now -- agents are allowed to go beyond the viewing screen\n def _get_done(self, agent):\n if self.done_callback is None:\n return False\n return self.done_callback(agent, self.world)\n\n # get reward for a particular agent\n def _get_reward(self, agent):\n if self.reward_callback is None:\n return 0.0\n return self.reward_callback(agent, self.world)\n\n # set env action for a particular agent\n def _set_action(self, action, agent, action_space, time=None):\n agent.action.u = np.zeros(self.world.dim_p)\n agent.action.c = np.zeros(self.world.dim_c)\n # process action\n if isinstance(action_space, spaces.MultiDiscrete):\n act = []\n size = action_space.high - action_space.low + 1\n index = 0\n for s in size:\n act.append(action[index:(index+s)])\n index += s\n action = act\n else:\n action = [action]\n\n if agent.movable:\n # physical action\n if self.discrete_action_input:\n agent.action.u = np.zeros(self.world.dim_p)\n # process discrete action\n if action[0] == 1: agent.action.u[0] = -1.0\n if action[0] == 2: agent.action.u[0] = +1.0\n if action[0] == 3: agent.action.u[1] = -1.0\n if action[0] == 4: agent.action.u[1] = +1.0\n else:\n if self.force_discrete_action:\n d = np.argmax(action[0])\n action[0][:] = 0.0\n action[0][d] = 1.0\n if self.discrete_action_space:\n agent.action.u[0] += action[0][1] - action[0][2]\n agent.action.u[1] += action[0][3] - action[0][4]\n else:\n agent.action.u = action[0]\n sensitivity = 5.0\n if agent.accel is not None:\n sensitivity = agent.accel\n agent.action.u *= sensitivity\n action = action[1:]\n if not agent.silent:\n # communication action\n if self.discrete_action_input:\n agent.action.c = np.zeros(self.world.dim_c)\n agent.action.c[action[0]] = 1.0\n else:\n agent.action.c = action[0]\n action = action[1:]\n # make sure we used all elements of action\n assert len(action) == 0\n\n # reset rendering assets\n def _reset_render(self):\n self.render_geoms = None\n self.render_geoms_xform = None\n\n # render environment\n def _render(self, mode='human', close=True):\n if close:\n # close any existic renderers\n for i,viewer in enumerate(self.viewers):\n if viewer is not None:\n viewer.close()\n self.viewers[i] = None\n return []\n\n if mode == 'human':\n alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n message = ''\n for agent in self.world.agents:\n comm = []\n for other in self.world.agents:\n if other is agent: continue\n if np.all(other.state.c == 0):\n word = '_'\n else:\n word = alphabet[np.argmax(other.state.c)]\n message += (other.name + ' to ' + agent.name + ': ' + word + ' ')\n # print(message)\n\n for i in range(len(self.viewers)):\n # create viewers (if necessary)\n if self.viewers[i] is None:\n # import rendering only if we need it (and don't import for headless machines)\n #from gym.envs.classic_control import rendering\n from multiagent import rendering\n self.viewers[i] = rendering.Viewer(700,700)\n\n # create rendering geometry\n if self.render_geoms is None:\n # import rendering only if we need it (and don't import for headless machines)\n #from gym.envs.classic_control import rendering\n from multiagent import rendering\n self.render_geoms = []\n self.render_geoms_xform = []\n self.comm_geoms = []\n for entity in self.world.entities:\n geom = rendering.make_circle(entity.size)\n xform = rendering.Transform()\n entity_comm_geoms = []\n if 'agent' in entity.name:\n geom.set_color(*entity.color, alpha=0.5)\n if not entity.silent:\n dim_c = self.world.dim_c\n # make circles to represent communication\n for ci in range(dim_c):\n comm = rendering.make_circle(entity.size / dim_c)\n comm.set_color(1, 1, 1)\n comm.add_attr(xform)\n offset = rendering.Transform()\n comm_size = (entity.size / dim_c)\n offset.set_translation(ci * comm_size * 2 -\n entity.size + comm_size, 0)\n comm.add_attr(offset)\n entity_comm_geoms.append(comm)\n else:\n geom.set_color(*entity.color)\n geom.add_attr(xform)\n self.render_geoms.append(geom)\n self.render_geoms_xform.append(xform)\n self.comm_geoms.append(entity_comm_geoms)\n for wall in self.world.walls:\n corners = ((wall.axis_pos - 0.5 * wall.width, wall.endpoints[0]),\n (wall.axis_pos - 0.5 * wall.width, wall.endpoints[1]),\n (wall.axis_pos + 0.5 * wall.width, wall.endpoints[1]),\n (wall.axis_pos + 0.5 * wall.width, wall.endpoints[0]))\n if wall.orient == 'H':\n corners = tuple(c[::-1] for c in corners)\n geom = rendering.make_polygon(corners)\n if wall.hard:\n geom.set_color(*wall.color)\n else:\n geom.set_color(*wall.color, alpha=0.5)\n self.render_geoms.append(geom)\n\n # add geoms to viewer\n for viewer in self.viewers:\n viewer.geoms = []\n for geom in self.render_geoms:\n viewer.add_geom(geom)\n for entity_comm_geoms in self.comm_geoms:\n for geom in entity_comm_geoms:\n viewer.add_geom(geom)\n\n results = []\n for i in range(len(self.viewers)):\n from multiagent import rendering\n # update bounds to center around agent\n cam_range = 1\n if self.shared_viewer:\n pos = np.zeros(self.world.dim_p)\n else:\n pos = self.agents[i].state.p_pos\n self.viewers[i].set_bounds(pos[0]-cam_range,pos[0]+cam_range,pos[1]-cam_range,pos[1]+cam_range)\n # update geometry positions\n for e, entity in enumerate(self.world.entities):\n self.render_geoms_xform[e].set_translation(*entity.state.p_pos)\n if 'agent' in entity.name:\n self.render_geoms[e].set_color(*entity.color, alpha=0.5)\n if not entity.silent:\n for ci in range(self.world.dim_c):\n color = 1 - entity.state.c[ci]\n self.comm_geoms[e][ci].set_color(color, color, color)\n else:\n self.render_geoms[e].set_color(*entity.color)\n # render to display or array\n results.append(self.viewers[i].render(return_rgb_array = mode=='rgb_array'))\n\n return results\n\n # create receptor field locations in local coordinate frame\n def _make_receptor_locations(self, agent):\n receptor_type = 'polar'\n range_min = 0.05 * 2.0\n range_max = 1.00\n dx = []\n # circular receptive field\n if receptor_type == 'polar':\n for angle in np.linspace(-np.pi, +np.pi, 8, endpoint=False):\n for distance in np.linspace(range_min, range_max, 3):\n dx.append(distance * np.array([np.cos(angle), np.sin(angle)]))\n # add origin\n dx.append(np.array([0.0, 0.0]))\n # grid receptive field\n if receptor_type == 'grid':\n for x in np.linspace(-range_max, +range_max, 5):\n for y in np.linspace(-range_max, +range_max, 5):\n dx.append(np.array([x,y]))\n return dx\n\n\n# vectorized wrapper for a batch of multi-agent environments\n# assumes all environments have the same observation and action space\nclass BatchMultiAgentEnv(gym.Env):\n metadata = {\n 'runtime.vectorized': True,\n 'render.modes' : ['human', 'rgb_array']\n }\n\n def __init__(self, env_batch):\n self.env_batch = env_batch\n\n @property\n def n(self):\n return np.sum([env.n for env in self.env_batch])\n\n @property\n def action_space(self):\n return self.env_batch[0].action_space\n\n @property\n def observation_space(self):\n return self.env_batch[0].observation_space\n\n def _step(self, action_n, time):\n obs_n = []\n reward_n = []\n done_n = []\n info_n = {'n': []}\n i = 0\n for env in self.env_batch:\n obs, reward, done, _ = env.step(action_n[i:(i+env.n)], time)\n i += env.n\n obs_n += obs\n # reward = [r / len(self.env_batch) for r in reward]\n reward_n += reward\n done_n += done\n return obs_n, reward_n, done_n, info_n\n\n def _reset(self):\n obs_n = []\n for env in self.env_batch:\n obs_n += env.reset()\n return obs_n\n\n # render environment\n def _render(self, mode='human', close=True):\n results_n = []\n for env in self.env_batch:\n results_n += env.render(mode, close)\n return results_n\n"
] |
[
[
"numpy.array",
"numpy.sin",
"numpy.zeros",
"numpy.random.seed",
"numpy.sum",
"numpy.argmax",
"numpy.cos",
"numpy.all",
"numpy.linspace"
]
] |
eitrheim/Resume-Screening-and-Selection
|
[
"4ee2dd0d6ba917bcf244c704ef5042fe7596e600"
] |
[
"khc_home_edited/method.py"
] |
[
"import pandas as pd\r\nimport numpy as np\r\nimport csv\r\nimport pickle\r\n\r\nJobs_path = \"TestDescriptions.csv\"\r\nJobs = pd.read_csv(Jobs_path, delimiter=',')\r\n\r\ndef get_JobID():\r\n IDs = np.array(Jobs.index.values.tolist())\r\n IDs = np.unique(IDs)\r\n IDs = IDs.tolist()\r\n return(IDs)\r\n\r\ndef get_Info(ID):\r\n return Jobs[Jobs.index == ID]\r\n\r\npickle.dump(Jobs, open('data.pkl','wb'))"
] |
[
[
"pandas.read_csv",
"numpy.unique"
]
] |
SOOIN-KIM/lab-python
|
[
"4b85dc11c76e2d4f89be0d01864f9f61f3c6e2cc"
] |
[
"scratch14/ex03.py"
] |
[
"from sklearn.datasets import load_iris\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\n\ndef logistic(x):\n '''Logistic Sigmoid 함수'''\n return 1 / (1 + math.exp(-x))\n\n\ndef predict(row, betas):\n '''row의 x1, x2값과 betos의 b0, b1, b2를 사용해서\n 회귀식 y = b0 _ b1 * x2 + b2* x2를 만들고,\n 회귀식을 로지스틱 함수의 파라미터에 전달해서 예측값을 알아냄'''\n y_hat = betas[0]\n for i in range(len(betas) - 1):\n y_hat += betas[i+1] * row[i]\n return logistic(y_hat)\n\ndef coefficioent_sgd(dataset, learning_rate, epochs):\n '''회귀식 y =b0 +b1* x1 + b2 * x2의 계수들(b0, b1, b2)을\n stochastic gradient descent 방법으로 추정(estimate)'''\n # 회귀식에서 가장 처음에 사용할 betas 초깃값을 0으로 시작\n betas = [0 for _ in range(len(dataset[0]))]\n for epoch in range(epochs): # epochs 회수만큼 반복\n sse = 0\n for sample in dataset: # 데이터 세트에서 row 개수만큼 반복\n prediction = predict(sample,betas)\n error = sample[-1] - prediction\n sse += error ** 2\n # 계수들(b0, b1, b2)를 아래와 같은 방법으로 업데이트\n # b_new = b + learning_rate * error * prediction * (1-prediction)\n betas[0] = betas[0] + learning_rate * error * prediction * (1 - prediction)\n for i in range(len(sample) - 1):\n betas[i + 1] = betas[i + 1] + learning_rate * error * prediction * (1 - prediction) * sample[i]\n\n\n\n\n\nif __name__ == '__main__':\n iris = load_iris()\n print(iris.DESCR)\n X = iris.data # iris['data']\n y = iris.target # iris['target]\n features = iris.feature_names\n\n for i in range(len(features)):\n plt.scatter(X[:,i],y, label=features[i])\n plt.legend()\n plt.show()\n\n # petal -length, petal-width가 calss(품종)을 분류할 떄 상관관계가 높아 보임.\n X = X[: ,2:4] # pl, pw만 선택\n print(X[:5])\n\n # setosa 5개, setosa가 아닌 품종 5개를 샘플링\n indices = [x for x in range(0,100,10)]\n sample_data = np.c_[X[indices, :], y[indices]]\n print(sample_data)\n\n np.random.seed(1218)\n betas = np.random.random(3)\n for sample in sample_data:\n prediction = predict(sample, betas)\n # 오류 = 실제값 - 예측값\n error = sample[-1] - prediction\n print(f'실제값={sample[-1]}, 예측값={prediction}, 오차={error}')\n"
] |
[
[
"numpy.random.seed",
"matplotlib.pyplot.legend",
"numpy.random.random",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.show",
"sklearn.datasets.load_iris"
]
] |
Phillistan16/fastestimator
|
[
"54c9254098aee89520814ed54b6e6016b821424f",
"54c9254098aee89520814ed54b6e6016b821424f",
"54c9254098aee89520814ed54b6e6016b821424f"
] |
[
"apphub/NLP/language_modeling/ptb_tf.py",
"test/PR_test/integration_test/backend/test_set_lr.py",
"test/PR_test/integration_test/schedule/test_epoch_scheduler.py"
] |
[
"# Copyright 2019 The FastEstimator Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport tempfile\n\nimport fastestimator as fe\nimport numpy as np\nimport tensorflow as tf\nfrom fastestimator.dataset.data.penn_treebank import load_data\nfrom fastestimator.op.numpyop import NumpyOp\nfrom fastestimator.op.tensorop.loss import CrossEntropy\nfrom fastestimator.op.tensorop.model import ModelOp, UpdateOp\nfrom fastestimator.trace import Trace\nfrom fastestimator.trace.adapt import EarlyStopping, LRScheduler\nfrom fastestimator.trace.io import BestModelSaver\n\n\nclass CreateInputAndTarget(NumpyOp):\n def forward(self, data, state):\n return data[:-1], data[1:]\n\n\nclass Perplexity(Trace):\n def on_epoch_end(self, data):\n ce = data[\"ce\"]\n data.write_with_log(self.outputs[0], np.exp(ce))\n\n\ndef build_model(vocab_size, embedding_dim, rnn_units, seq_length):\n model = tf.keras.Sequential([\n tf.keras.layers.Embedding(vocab_size, embedding_dim, batch_input_shape=[None, seq_length]),\n tf.keras.layers.LSTM(rnn_units, return_sequences=True, recurrent_initializer='glorot_uniform'),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(vocab_size)\n ])\n return model\n\n\ndef lr_schedule(step, init_lr):\n if step <= 1725:\n lr = init_lr + init_lr * (step - 1) / 1725\n else:\n lr = max(2 * init_lr * ((6900 - step + 1725) / 6900), 1.0)\n return lr\n\n\ndef get_estimator(epochs=30,\n batch_size=128,\n seq_length=20,\n vocab_size=10000,\n data_dir=None,\n max_train_steps_per_epoch=None,\n save_dir=tempfile.mkdtemp()):\n train_data, eval_data, _, _ = load_data(root_dir=data_dir, seq_length=seq_length + 1)\n pipeline = fe.Pipeline(train_data=train_data,\n eval_data=eval_data,\n batch_size=batch_size,\n ops=CreateInputAndTarget(inputs=\"x\", outputs=(\"x\", \"y\")),\n drop_last=True)\n # step 2\n model = fe.build(model_fn=lambda: build_model(vocab_size, embedding_dim=300, rnn_units=600, seq_length=seq_length),\n optimizer_fn=lambda: tf.optimizers.SGD(1.0, momentum=0.9))\n\n network = fe.Network(ops=[\n ModelOp(model=model, inputs=\"x\", outputs=\"y_pred\", mode=None),\n CrossEntropy(inputs=(\"y_pred\", \"y\"), outputs=\"ce\", form=\"sparse\", from_logits=True),\n UpdateOp(model=model, loss_name=\"ce\")\n ])\n # step 3\n traces = [\n Perplexity(inputs=\"ce\", outputs=\"perplexity\", mode=\"eval\"),\n LRScheduler(model=model, lr_fn=lambda step: lr_schedule(step, init_lr=1.0)),\n BestModelSaver(model=model, save_dir=save_dir, metric='perplexity', save_best_mode='min', load_best_final=True),\n EarlyStopping(monitor=\"perplexity\", patience=5)\n ]\n estimator = fe.Estimator(pipeline=pipeline,\n network=network,\n epochs=epochs,\n traces=traces,\n max_train_steps_per_epoch=max_train_steps_per_epoch)\n return estimator\n\n\nif __name__ == \"__main__\":\n est = get_estimator()\n est.fit()\n",
"# Copyright 2021 The FastEstimator Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport unittest\n\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_addons as tfa\nimport torch\n\nimport fastestimator as fe\nfrom fastestimator.test.unittest_util import is_equal\n\n\nclass TestSetLr(unittest.TestCase):\n def test_set_lr_tf(self):\n m = fe.build(fe.architecture.tensorflow.LeNet, optimizer_fn=lambda: tf.optimizers.Adam(1e-4))\n fe.backend.set_lr(m, 2e-4)\n self.assertTrue(np.allclose(fe.backend.get_lr(model=m), 2e-4))\n\n def test_set_lr_tf_weight_decay(self):\n m = fe.build(fe.architecture.tensorflow.LeNet,\n optimizer_fn=lambda: tfa.optimizers.SGDW(weight_decay=1e-5, learning_rate=1e-4))\n fe.backend.set_lr(m, 2e-4)\n self.assertTrue(np.allclose(tf.keras.backend.get_value(m.current_optimizer.weight_decay), 2e-5))\n\n def test_set_lr_torch(self):\n m = fe.build(fe.architecture.pytorch.LeNet, optimizer_fn=lambda x: torch.optim.Adam(params=x, lr=1e-4))\n fe.backend.set_lr(m, 2e-4)\n self.assertTrue(np.allclose(fe.backend.get_lr(model=m), 2e-4))\n",
"# Copyright 2020 The FastEstimator Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport tempfile\nimport unittest\n\nimport numpy as np\nfrom torch.utils.data.dataset import Dataset as TorchDS\n\nimport fastestimator as fe\nfrom fastestimator.dataset import NumpyDataset\nfrom fastestimator.op.numpyop import NumpyOp\nfrom fastestimator.op.tensorop import TensorOp\nfrom fastestimator.op.tensorop.model import ModelOp\nfrom fastestimator.schedule import EpochScheduler\nfrom fastestimator.test.unittest_util import sample_system_object, sample_system_object_torch\nfrom fastestimator.trace import Trace\n\n\nclass TestNonTraceableDataset(TorchDS):\n def __init__(self, data, var):\n super().__init__()\n self.data = data\n self.var = var\n\n def __getitem__(self, idx):\n return self.data[idx]\n\n def __len__(self):\n return len(self.data)\n\n\nclass TestDataset(NumpyDataset):\n def __init__(self, data, var):\n super().__init__(data)\n self.var = var\n\n\nclass TestNumpyOp(NumpyOp):\n def __init__(self, inputs, outputs, mode, var):\n super().__init__(inputs=inputs, outputs=outputs, mode=mode)\n self.var = var\n\n\nclass TestTensorOp(TensorOp):\n def __init__(self, inputs, outputs, mode, var):\n super().__init__(inputs=inputs, outputs=outputs, mode=mode)\n self.var = var\n\n\nclass TestTrace(Trace):\n def __init__(self, var1):\n super().__init__()\n self.var1 = var1\n\n\nclass TestEpochScheduler(unittest.TestCase):\n def test_save_and_load_state_with_ds_scheduler_tf(self):\n def instantiate_system():\n system = sample_system_object()\n x_train = np.ones((2, 28, 28, 3))\n y_train = np.ones((2, ))\n train_data = EpochScheduler(epoch_dict={1: TestDataset(data={'x': x_train, 'y': y_train}, var=1)})\n system.pipeline = fe.Pipeline(train_data=train_data, batch_size=1)\n return system\n\n system = instantiate_system()\n\n # make some changes\n new_var = 2\n system.pipeline.data[\"train\"].get_current_value(1).var = new_var\n\n # save the state\n save_path = tempfile.mkdtemp()\n system.save_state(save_dir=save_path)\n\n # reinstantiate system and load the state\n system = instantiate_system()\n system.load_state(save_path)\n\n loaded_var = system.pipeline.data[\"train\"].get_current_value(1).var\n self.assertEqual(loaded_var, new_var)\n\n def test_save_and_load_state_with_ds_scheduler_torch(self):\n def instantiate_system():\n system = sample_system_object_torch()\n x_train = np.ones((2, 3, 28, 28))\n y_train = np.ones((2, ))\n train_data = EpochScheduler(epoch_dict={1: TestDataset(data={'x': x_train, 'y': y_train}, var=1)})\n system.pipeline = fe.Pipeline(train_data=train_data, batch_size=1)\n return system\n\n system = instantiate_system()\n\n # make some changes\n new_var = 2\n system.pipeline.data[\"train\"].get_current_value(1).var = new_var\n\n # save the state\n save_path = tempfile.mkdtemp()\n system.save_state(save_dir=save_path)\n\n # reinstantiate system and load the state\n system = instantiate_system()\n system.load_state(save_path)\n\n loaded_var = system.pipeline.data[\"train\"].get_current_value(1).var\n self.assertEqual(loaded_var, new_var)\n\n def test_save_and_load_state_with_top_scheduler_tf(self):\n def instantiate_system():\n system = sample_system_object()\n model = fe.build(model_fn=fe.architecture.tensorflow.LeNet, optimizer_fn='adam', model_name='tf')\n system.network = fe.Network(ops=[\n EpochScheduler(epoch_dict={1: TestTensorOp(inputs=\"x_out\", outputs=\"x_out\", mode=\"train\", var=1)}),\n ModelOp(model=model, inputs=\"x_out\", outputs=\"y_pred\")\n ])\n return system\n\n system = instantiate_system()\n\n # make some changes\n new_var = 2\n system.network.ops[0].get_current_value(1).var = new_var\n\n # save the state\n save_path = tempfile.mkdtemp()\n system.save_state(save_dir=save_path)\n\n # reinstantiate system and load the state\n system = instantiate_system()\n system.load_state(save_path)\n loaded_var = system.network.ops[0].get_current_value(1).var\n\n self.assertEqual(loaded_var, new_var)\n\n def test_save_and_load_state_with_top_scheduler_torch(self):\n def instantiate_system():\n system = sample_system_object_torch()\n model = fe.build(model_fn=fe.architecture.pytorch.LeNet, optimizer_fn='adam', model_name='torch')\n system.network = fe.Network(ops=[\n EpochScheduler(epoch_dict={1: TestTensorOp(inputs=\"x_out\", outputs=\"x_out\", mode=\"train\", var=1)}),\n ModelOp(model=model, inputs=\"x_out\", outputs=\"y_pred\")\n ])\n return system\n\n system = instantiate_system()\n\n # make some changes\n new_var = 2\n system.network.ops[0].get_current_value(1).var = new_var\n\n # save the state\n save_path = tempfile.mkdtemp()\n system.save_state(save_dir=save_path)\n\n # reinstantiate system and load the state\n system = instantiate_system()\n system.load_state(save_path)\n loaded_var = system.network.ops[0].get_current_value(1).var\n\n self.assertEqual(loaded_var, new_var)\n\n def test_save_and_load_state_with_nop_scheduler_tf(self):\n def instantiate_system():\n system = sample_system_object()\n system.pipeline.ops = [\n EpochScheduler(epoch_dict={1: TestNumpyOp(inputs=\"x\", outputs=\"x\", mode=\"train\", var=1)})\n ]\n return system\n\n system = instantiate_system()\n\n # make some changes\n new_var = 2\n system.pipeline.ops[0].get_current_value(1).var = new_var\n\n # save the state\n save_path = tempfile.mkdtemp()\n system.save_state(save_path)\n\n # reinstantiate system and load the state\n system = instantiate_system()\n system.load_state(save_path)\n loaded_var = system.pipeline.ops[0].get_current_value(1).var\n\n self.assertEqual(loaded_var, new_var)\n\n def test_save_and_load_state_with_nop_scheduler_torch(self):\n def instantiate_system():\n system = sample_system_object_torch()\n system.pipeline.ops = [\n EpochScheduler(epoch_dict={1: TestNumpyOp(inputs=\"x\", outputs=\"x\", mode=\"train\", var=1)})\n ]\n return system\n\n system = instantiate_system()\n\n # make some changes\n new_var = 2\n system.pipeline.ops[0].get_current_value(1).var = new_var\n\n # save the state\n save_path = tempfile.mkdtemp()\n system.save_state(save_path)\n\n # reinstantiate system and load the state\n system = instantiate_system()\n system.load_state(save_path)\n loaded_var = system.pipeline.ops[0].get_current_value(1).var\n\n self.assertEqual(loaded_var, new_var)\n\n def test_save_and_load_state_with_trace_scheduler_tf(self):\n def instantiate_system():\n system = sample_system_object()\n system.traces.append(EpochScheduler(epoch_dict={1: TestTrace(var1=1)}))\n return system\n\n system = instantiate_system()\n\n # make some changes\n new_var = 2\n system.traces[0].get_current_value(1).var1 = new_var\n\n # save the state\n save_path = tempfile.mkdtemp()\n system.save_state(save_path)\n\n # reinstantiate system and load the state\n system = instantiate_system()\n system.load_state(save_path)\n loaded_var = system.traces[0].get_current_value(1).var1\n\n self.assertEqual(loaded_var, new_var)\n\n def test_save_and_load_state_with_trace_scheduler_torch(self):\n def instantiate_system():\n system = sample_system_object_torch()\n system.traces.append(EpochScheduler(epoch_dict={1: TestTrace(var1=1)}))\n return system\n\n system = instantiate_system()\n\n # make some changes\n new_var = 2\n system.traces[0].get_current_value(1).var1 = new_var\n\n # save the state\n save_path = tempfile.mkdtemp()\n system.save_state(save_path)\n\n # reinstantiate system and load the state\n system = instantiate_system()\n system.load_state(save_path)\n loaded_var = system.traces[0].get_current_value(1).var1\n\n self.assertEqual(loaded_var, new_var)\n\n def test_save_and_load_state_with_non_traceable_entries(self):\n def instantiate_system():\n system = sample_system_object()\n x_train = np.ones((2, 28, 28, 3))\n y_train = np.ones((2, ))\n data = {0: {'x': x_train[0], 'y': y_train[0]}, 1: {'x': x_train[1], 'y': y_train[1]}}\n train_data = EpochScheduler(epoch_dict={\n 1: TestNonTraceableDataset(data=data, var=3), 2: TestNonTraceableDataset(data=data, var=7), 3: None\n })\n system.pipeline = fe.Pipeline(train_data=train_data, batch_size=1)\n return system\n\n system = instantiate_system()\n\n # make some changes\n new_var1 = 4\n system.pipeline.data[\"train\"].get_current_value(1).var = new_var1\n new_var2 = 99\n system.pipeline.data[\"train\"].get_current_value(2).var = new_var2\n\n # save the state\n save_path = tempfile.mkdtemp()\n system.save_state(save_dir=save_path)\n\n # reinstantiate system and load the state\n system = instantiate_system()\n system.load_state(save_path)\n\n with self.subTest('Check that epoch dict is still populated'):\n self.assertEqual(3, len(system.pipeline.data['train'].epoch_dict))\n with self.subTest('Check that classes are still intact'):\n self.assertTrue(isinstance(system.pipeline.data['train'].get_current_value(1), TestNonTraceableDataset))\n self.assertTrue(isinstance(system.pipeline.data['train'].get_current_value(2), TestNonTraceableDataset))\n self.assertTrue(system.pipeline.data['train'].get_current_value(3) is None)\n with self.subTest('Check that the 1st epoch dict entry was not restored'):\n # Since the dataset is not traceable changes shouldn't get restored\n loaded_var = system.pipeline.data[\"train\"].get_current_value(1).var\n self.assertEqual(loaded_var, 3)\n with self.subTest('Check that the 2nd epoch dict entry was not restored'):\n loaded_var = system.pipeline.data[\"train\"].get_current_value(2).var\n self.assertEqual(loaded_var, 7)\n\n def test_save_and_load_state_with_hybrid_entries(self):\n def instantiate_system():\n system = sample_system_object()\n x_train = np.ones((2, 28, 28, 3))\n y_train = np.ones((2, ))\n data = {0: {'x': x_train[0], 'y': y_train[0]}, 1: {'x': x_train[1], 'y': y_train[1]}}\n train_data = EpochScheduler(epoch_dict={\n 1: TestNonTraceableDataset(data=data, var=3), 2: TestDataset(data={\n 'x': x_train, 'y': y_train\n }, var=7)\n })\n system.pipeline = fe.Pipeline(train_data=train_data, batch_size=1)\n return system\n\n system = instantiate_system()\n\n # make some changes\n new_var1 = 4\n system.pipeline.data[\"train\"].get_current_value(1).var = new_var1\n new_var2 = 99\n system.pipeline.data[\"train\"].get_current_value(2).var = new_var2\n\n # save the state\n save_path = tempfile.mkdtemp()\n system.save_state(save_dir=save_path)\n\n # reinstantiate system and load the state\n system = instantiate_system()\n system.load_state(save_path)\n\n with self.subTest('Check that epoch dict is still populated'):\n self.assertEqual(2, len(system.pipeline.data['train'].epoch_dict))\n with self.subTest('Check that classes are still intact'):\n self.assertTrue(isinstance(system.pipeline.data['train'].get_current_value(1), TestNonTraceableDataset))\n self.assertTrue(isinstance(system.pipeline.data['train'].get_current_value(2), TestDataset))\n with self.subTest('Check that the 1st epoch dict entry was not restored'):\n # Since the dataset is not traceable changes shouldn't get restored\n loaded_var = system.pipeline.data[\"train\"].get_current_value(1).var\n self.assertEqual(loaded_var, 3)\n with self.subTest('Check that the 2nd epoch dict entry was restored'):\n loaded_var = system.pipeline.data[\"train\"].get_current_value(2).var\n self.assertEqual(loaded_var, new_var2)\n"
] |
[
[
"numpy.exp",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.Embedding",
"tensorflow.keras.layers.LSTM",
"tensorflow.optimizers.SGD"
],
[
"torch.optim.Adam",
"tensorflow.optimizers.Adam",
"tensorflow.keras.backend.get_value"
],
[
"numpy.ones"
]
] |
mingruimingrui/torch-collections
|
[
"f7c20b28b63de76c763983338aa4c825904ef4cd"
] |
[
"torch_collections/modules/_l2_normalization.py"
] |
[
"\"\"\" Copied from \"\"\"\n\nimport torch\n\n\nclass L2Normalization(torch.nn.Module):\n def __init__(self, alpha=1.0):\n super(L2Normalization, self).__init__()\n self.alpha = alpha\n\n def forward(self, v):\n # v_ = v / norm * alpha\n # norm = sqrt(norm_sq)\n # norm_sq = sum(v ** 2)\n v_sq= torch.pow(v, 2)\n\n norm_sq = torch.sum(v_sq, dim=1).add(1e-5)\n norm = torch.sqrt(norm_sq).view(-1,1).expand_as(v)\n\n v_ = v.div(norm).mul(self.alpha)\n\n return v_\n"
] |
[
[
"torch.sum",
"torch.sqrt",
"torch.pow"
]
] |
TomaszOdrzygozdz/gym-splendor
|
[
"aeb00605e105628188143a4bbd6280e9eb41c4f9"
] |
[
"monte_carlo_tree_search/mcts_algorithms/multi_process/multi_mcts.py"
] |
[
"# from mpi4py import MPI\n\nimport numpy as np\n\nfrom gym_splendor_code.envs.mechanics.game_settings import USE_TQDM\nfrom monte_carlo_tree_search.evaluation_policies.abstract_evaluation_policy import EvaluationPolicy\nfrom monte_carlo_tree_search.rollout_policies.random_rollout import RandomRollout\nfrom monte_carlo_tree_search.trees.deterministic_tree import DeterministicTreeNode\n\nif USE_TQDM:\n from tqdm import tqdm\n\nfrom gym_splendor_code.envs.mechanics.abstract_observation import DeterministicObservation\nfrom gym_splendor_code.envs.mechanics.action import Action\nfrom monte_carlo_tree_search.mcts_algorithms.single_process.single_mcts import SingleMCTS\nfrom monte_carlo_tree_search.rollout_policies.abstract_rolluot_policy import RolloutPolicy\n\n# comm = MPI.COMM_WORLD\n# my_rank = MPI.COMM_WORLD.Get_rank()\n# main_thread = my_rank==0\n\nclass MultiMCTS:\n def __init__(self,\n mpi_communicator,\n iteration_limit: int = 1000,\n rollout_policy: RolloutPolicy = RandomRollout(distribution='uniform_on_types'),\n evaluation_policy: EvaluationPolicy = None,\n rollout_repetition: int = 0,\n exploration_ceofficient: int = 0.4) -> None:\n\n self.mpi_communicator = mpi_communicator\n self.my_rank = self.mpi_communicator.Get_rank()\n self.my_comm_size = mpi_communicator.Get_size()\n self.main_process = self.my_rank == 0\n self.mcts = SingleMCTS(iteration_limit=iteration_limit, exploration_parameter=exploration_ceofficient,\n evaluation_policy=evaluation_policy, )\n self.iterations_done_so_far = 0\n\n #method needed only for main thread:\n def create_root(self, observation : DeterministicObservation):\n if self.main_process:\n self.mcts.create_root(observation)\n else:\n pass\n\n\n def prepare_list_of_states_to_rollout(self, leaf: DeterministicTreeNode, iteration_limit: int, choose_best=None):\n assert leaf.expanded(), 'Leaf is not yet expanded'\n\n children = leaf.children\n not_terminal_children = [child for child in children if child.terminal == False]\n terminal_children = [child for child in children if child.terminal == True]\n\n n_child_to_rollout = min(len(not_terminal_children), iteration_limit)\n childs_per_process = int(n_child_to_rollout/ self.my_comm_size)\n\n states_to_rollout = []\n\n if choose_best is not None and iteration_limit > 1:\n k_max = max(int(n_child_to_rollout*choose_best),1)\n if k_max > 1:\n #read nodes evaluations:\n nodes_list = []\n nodes_values_list = []\n\n for node in not_terminal_children:\n if node.value_acc.get() is not None:\n nodes_list.append(node)\n nodes_values_list.append(node.value_acc.get())\n\n max_ind = list(np.argpartition(nodes_values_list, k_max)[-k_max:])\n max_nodes_list = []\n max_values = []\n for idx in max_ind:\n max_nodes_list.append(nodes_list[idx])\n max_values.append(nodes_values_list[idx])\n not_terminal_children = max_nodes_list\n n_child_to_rollout = len(max_nodes_list)\n childs_per_process = int(n_child_to_rollout / self.my_comm_size)\n\n remaining = n_child_to_rollout % self.my_comm_size\n\n for process_number in range(self.my_comm_size):\n if process_number < remaining:\n states_for_i_th_process = {i*self.my_comm_size + process_number: not_terminal_children[i*self.my_comm_size + process_number].observation for i in range(0,childs_per_process + 1)}\n states_to_rollout.append(states_for_i_th_process)\n if process_number >= remaining and process_number < n_child_to_rollout:\n states_for_i_th_process = {i * self.my_comm_size + process_number: not_terminal_children[i * self.my_comm_size + process_number].observation for i in\n range(0, childs_per_process)}\n states_to_rollout.append(states_for_i_th_process)\n if process_number >= n_child_to_rollout:\n states_to_rollout.append({})\n\n\n return terminal_children, states_to_rollout, n_child_to_rollout\n\n\n def run_mcts_pass(self, iteration_limit, rollout_repetition, choose_best, best_backprop=True, totally_expand=True):\n\n if self.main_process:\n leaf, search_path = self.mcts._tree_traversal()\n self.mcts._expand_leaf(leaf)\n\n if not totally_expand:\n iteration_limit_for_expand = iteration_limit - self.iterations_done_so_far\n if totally_expand:\n iteration_limit_for_expand = 10000\n\n states_to_rollout = None\n jobs_to_do = None\n if self.main_process:\n terminal_children, states_to_rollout, jobs_to_do = self.prepare_list_of_states_to_rollout(leaf, iteration_limit_for_expand, choose_best=None)\n\n jobs_done= self.mpi_communicator.bcast(jobs_to_do, root=0)\n my_nodes_to_rollout = self.mpi_communicator.scatter(states_to_rollout, root=0)\n\n #first eval nodes\n if self.mcts.tree_mode == 'evaluation' or self.mcts.tree_mode == 'combined':\n my_results = self._evaluate_many_nodes(my_nodes_to_rollout)\n combined_results = self.mpi_communicator.gather(my_results, root=0)\n if self.main_process:\n flattened_results = self.flatten_list_of_dicts(combined_results)\n if self.mcts.tree_mode == 'evaluation' or self.mcts.tree_mode == 'combined':\n self._backpropagate_many_results('evaluation', search_path, flattened_results)\n #print('MULTI MCTS PASS backrop evaluations')\n\n #now rollout nodes:\n if self.mcts.tree_mode == 'rollout':\n for _ in range(rollout_repetition):\n my_results = self._rollout_many_nodes(my_nodes_to_rollout)\n combined_results = self.mpi_communicator.gather(my_results, root=0)\n if self.main_process:\n flattened_results = self.flatten_list_of_dicts(combined_results)\n self._backpropagate_many_results('rollout', search_path, flattened_results)\n\n\n\n if self.mcts.tree_mode == 'combined':\n if self.main_process:\n _, states_to_rollout, jobs_to_do = self.prepare_list_of_states_to_rollout(leaf,\n iteration_limit_for_expand,\n choose_best=choose_best)\n jobs_done = self.mpi_communicator.bcast(jobs_to_do, root=0)\n my_nodes_to_rollout = self.mpi_communicator.scatter(states_to_rollout, root=0)\n for _ in range(rollout_repetition):\n my_results = self._rollout_many_nodes(my_nodes_to_rollout)\n combined_results = self.mpi_communicator.gather(my_results, root=0)\n #if self.main_process:\n if self.main_process:\n flattened_results = self.flatten_list_of_dicts(combined_results)\n if self.mcts.tree_mode == 'combined':\n self._backpropagate_many_results('rollout', search_path, flattened_results)\n\n #colloect values for terminal children:\n if self.main_process:\n for terminal_child in terminal_children:\n for _ in range(rollout_repetition):\n value = 0\n winner_id = leaf.winner_id\n if leaf.perfect_value is not None:\n value = leaf.perfect_value\n local_search_path = search_path + [terminal_child]\n self.mcts._backpropagate('rollout', local_search_path, winner_id, value)\n\n return jobs_done\n\n def _rollout_many_nodes(self, dict_of_states):\n rollout_results_dict = {}\n if len(dict_of_states) > 0:\n for i in dict_of_states:\n winner_id, value = self.mcts._rollout(dict_of_states[i])\n rollout_results_dict[i] = (winner_id, value)\n return rollout_results_dict\n\n def _evaluate_many_nodes(self, dict_of_states):\n evaluation_results_dict = {}\n if len(dict_of_states) > 0:\n for i in dict_of_states:\n evaluated_player_id, value = self.mcts._evaluate(dict_of_states[i])\n evaluation_results_dict[i] = (evaluated_player_id, value)\n return evaluation_results_dict\n\n def _backpropagate_many_results(self, backprop_mode, search_path, rollout_results, best_backprop=0.5):\n if len(rollout_results) > 0:\n threshold_for_backprop = -1\n if best_backprop is not None:\n rollout_results_copy = rollout_results.copy()\n #print('Rollout results len = {}'.format(len(rollout_results_copy)))\n top_index = max(int(best_backprop*len(rollout_results_copy)),1)\n #print('Top index = {}'.format(top_index))\n values_list = []\n for i in rollout_results_copy.keys():\n _, local_value = rollout_results_copy[i]\n values_list.append(local_value)\n\n threshold_for_backprop = values_list[np.argsort(values_list)[-top_index]]\n\n for i in rollout_results:\n _, value = rollout_results[i]\n if value >= threshold_for_backprop:\n this_child = search_path[-1].children[i]\n this_particular_search_path = search_path + [this_child]\n if backprop_mode == 'rollout':\n winner_id, value = rollout_results[i]\n self.mcts._backpropagate(this_particular_search_path, winner_id, value)\n if backprop_mode == 'evaluation':\n evaluated_player_id, value = rollout_results[i]\n self.mcts._backpropagate_evaluation(this_particular_search_path, evaluated_player_id, value)\n if value < threshold_for_backprop:\n this_child = search_path[-1].children[i]\n this_child.value_acc.add(value)\n\n def flatten_list_of_dicts(self, list_of_dicts):\n combined_dict = {}\n for rollout_dict in list_of_dicts:\n combined_dict.update(rollout_dict)\n return combined_dict\n\n def move_root(self, action : Action):\n if self.main_process:\n self.mcts.move_root(action)\n else:\n pass\n\n def original_root(self):\n return self.mcts.original_root\n\n def choose_action(self):\n if self.main_process:\n return self.mcts.choose_action()\n else:\n return None\n\n def create_progress_bar(self, lenght):\n if self.main_process:\n if USE_TQDM:\n self.progress_bar = tqdm(total=lenght, postfix=None)\n\n def set_progress_bar(self, value):\n if self.main_process:\n self.progress_bar.n = min(value, self.progress_bar.total-1)\n self.progress_bar.update()\n\n def run_simulation(self, iteration_limit, rollout_repetition, only_best):\n\n iterations_done_so_far = 0\n while iterations_done_so_far < iteration_limit:\n limit_for_this_pass = iteration_limit - iterations_done_so_far\n jobs_done = self.run_mcts_pass(limit_for_this_pass, rollout_repetition, only_best)\n if jobs_done == 0:\n break\n iterations_done_so_far += jobs_done\n\n def return_root(self):\n return self.mcts.root\n\n\n"
] |
[
[
"numpy.argsort",
"numpy.argpartition"
]
] |
AWNystrom/SparseInteraction
|
[
"68ac222d7a826a344675d0e5196d82cb1711a69a"
] |
[
"sparse_polynomial_features/test.py"
] |
[
"import pyximport; pyximport.install()\nfrom sparse_polynomial_features import SparsePolynomialFeatures\nfrom sklearn.preprocessing import PolynomialFeatures\nimport unittest\nfrom scipy.sparse import random\nfrom code import interact\nimport cPickle\nfrom numpy import array\nfrom numpy.random import choice\nfrom scipy.sparse import csr_matrix\n\nprimes = cPickle.load(open('primes.pickle'))\n\n\"\"\"\ndef test_this(rows, cols, density, interaction_only, degree):\n print(rows, cols, density, interaction_only, degree)\n X_sparse = random(rows, cols, density).tocsr()\n X_dense = X_sparse.toarray()\n \n poly_d = PolynomialFeatures(degree=degree, interaction_only=interaction_only, include_bias=False).fit_transform(X_dense)\n \n try:\n poly_s = SparsePolynomialFeatures(degree=degree, interaction_only=interaction_only).fit_transform(X_sparse).toarray()\n except:\n interact(local=locals())\n\n f1 = get_val_dist(poly_d)\n f2 = get_val_dist(poly_s)\n \n all_vals = set()\n all_vals.update(f1.keys())\n all_vals.update(f2.keys())\n \n for v in all_vals:\n assert f1[v] == f2[v]\n\ndef get_val_dist(X):\n f = {}\n for v in X.flatten():\n f[v] = f.get(v, 0) + 1\n return f\n\nfor interaction_only in [True]:#[True, False]:\n for degree in [2, 3]:\n for density in [0., 0.5, 1.]:\n for rows in [1, 10]:\n for cols in [1, 10]:\n test_this(rows, cols, density, interaction_only, degree)\n\"\"\"\n\ndef via_primes(N, D, degree, interaction_only):\n\n X_dense = array(choice(primes, N*D, replace=False), dtype=float).reshape((N, D))\n X_sparse = csr_matrix(X_dense)\n poly_d = PolynomialFeatures(degree=degree, interaction_only=interaction_only,\n include_bias=False).fit_transform(X_dense)\n poly_s = SparsePolynomialFeatures(degree=degree, \n interaction_only=interaction_only).fit_transform(X_sparse).toarray()\n\n #Figure out the mapping on the first one, then ensure that it holds for all the others.\n #row_d and row_s should always agree in the same spot. \n sparse_inds_vals = sorted(enumerate(poly_s[0, :]), key=lambda item: item[1])\n dense_inds_vals = sorted(enumerate(poly_d[0, :]), key=lambda item: item[1])\n sparse_inds = zip(*sparse_inds_vals)[0]\n dense_inds = zip(*dense_inds_vals)[0]\n s_to_d_ind = dict(zip(sparse_inds, dense_inds))\n \n for row in xrange(1, N):\n row_d = poly_d[row, :].flatten()\n row_s = poly_s[row, :].flatten()\n assert all(row_s[j] == row_d[s_to_d_ind[j]] for j in range(D))\n print(len(row_s.flatten()))\n\nif __name__ == '__main__':\n via_primes(10, 50, 2, False)"
] |
[
[
"scipy.sparse.csr_matrix",
"sklearn.preprocessing.PolynomialFeatures",
"numpy.random.choice"
]
] |
faustomorales/tensorflow
|
[
"63b84e3b732f050e53902481fa8cb02791a5d789"
] |
[
"tensorflow/compiler/xla/python/xla_client.py"
] |
[
"# Lint as: python3\n# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"An XLA client in Python.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport collections\nimport enum # pylint: disable=g-bad-import-order\nimport inspect\nimport itertools\nimport os\n\nfrom absl import logging\nimport numpy as np\n\n# Note this module does *not* depend on any Python protocol buffers. The XLA\n# Python bindings are currently packaged both as part of jaxlib and as part\n# of TensorFlow. If we use protocol buffers here, then importing both jaxlib\n# and TensorFlow may fail with duplicate protocol buffer message definitions.\n\nfrom tensorflow.compiler.xla.python import xla_extension as _xla\nfrom tensorflow.compiler.xla.python.xla_extension import ops\n\n# Most functions are snake_case for consistency with other modules, whereas\n# method names of ComputationBuilder and Computation are CamelCase for\n# consistency with XLA.\n# pylint: disable=invalid-name\n\n\nclass Backend(object, metaclass=abc.ABCMeta):\n \"\"\"Abstract base class for XLA backends.\"\"\"\n\n def __init__(self, platform):\n \"\"\"Creates a new Backend.\n\n Args:\n platform: A string naming the platform; for example 'gpu'.\n \"\"\"\n self.platform = platform\n\n @abc.abstractmethod\n def device_count(self):\n \"\"\"Returns the number of devices known to the backend.\"\"\"\n\n @abc.abstractmethod\n def local_device_count(self):\n \"\"\"Returns the number of devices local to this host.\"\"\"\n\n @abc.abstractmethod\n def devices(self):\n \"\"\"Returns a list of `device_count()` Device subclasses.\"\"\"\n\n @abc.abstractmethod\n def host_id(self):\n \"\"\"Returns the integer ID of this host.\"\"\"\n\n @abc.abstractmethod\n def buffer_from_pyval(self, pyval, device=None):\n \"\"\"Allocates a fresh buffer and populates it with `pyval`.\"\"\"\n\n @abc.abstractmethod\n def make_tuple(self, c_buffers, device):\n \"\"\"Makes a tuple from a sequence of backend buffer objects.\"\"\"\n\n @abc.abstractmethod\n def compile(self, computation, compile_options):\n \"\"\"Compiles a computation. Returns an executable.\"\"\"\n\n @abc.abstractmethod\n def get_default_device_assignment(self, num_replicas):\n \"\"\"Returns the default device assignment that `compile` would use.\n\n If `compile_options.device_assignment` isn't set, `compile` will pick a\n deterministic device assignment based on the number of replicas, possibly\n optimizing for device locality. This method returns that assignment, which\n is useful for e.g. manually replicating a value before passing it to a\n compiled executable.\n\n Args:\n num_replicas: the number of replicas needed.\n\n Returns:\n A list of Devices of length `num_replicas` indexed by replica ID.\n \"\"\"\n\n\nclass LocalBackend(Backend):\n \"\"\"XLA backend implemented using the in-process xla::LocalClient API.\"\"\"\n\n def __init__(self, platform, client):\n \"\"\"Creates a new LocalBackend.\n\n Args:\n platform: A string; the user-visible platform name, e.g. 'gpu'.\n client: An _xla.PyLocalClient object.\n \"\"\"\n super(LocalBackend, self).__init__(platform)\n self.client = client\n\n def device_count(self):\n return self.client.device_count()\n\n def local_device_count(self):\n return self.client.local_device_count()\n\n def devices(self):\n return self.client.devices()\n\n def local_devices(self):\n return self.client.local_devices()\n\n def host_id(self):\n return self.client.host_id()\n\n def buffer_from_pyval(self, pyval, device=None):\n if device is None:\n device = self.local_devices()[0]\n return _xla.PyLocalBuffer.from_python(pyval, self.client, device)\n\n def make_tuple(self, c_buffers, device):\n return _xla.PyLocalBuffer.make_tuple(c_buffers, self.client, device)\n\n def compile(self, c_computation, compile_options):\n options = _xla.ExecutableBuildOptions()\n options.num_replicas = compile_options.num_replicas\n if compile_options.result_layout:\n options.result_layout = compile_options.result_layout\n options.debug_options.xla_cpu_fast_math_honor_infs = True\n options.debug_options.xla_cpu_fast_math_honor_nans = True\n options.debug_options.xla_cpu_fast_math_honor_division = True\n options.debug_options.xla_cpu_fast_math_honor_functions = True\n options.debug_options.xla_gpu_enable_fast_min_max = False\n return _xla.LocalExecutable.Compile(c_computation,\n compile_options.argument_layouts,\n options, self.client,\n compile_options.device_assignment)\n\n def get_default_device_assignment(self, num_replicas):\n return self.client.GetDefaultDeviceAssignment(num_replicas)\n\n def serialize(self, executable):\n return self.client.SerializeExecutable(executable)\n\n def deserialize(self, serialized_executable):\n return self.client.DeserializeExecutable(serialized_executable, self.client)\n\n\nxla_platform_names = {\n 'cpu': 'Host',\n 'gpu': 'CUDA',\n}\n\n\ndef _cpu_backend_factory():\n client = _xla.LocalClient.Get(\n platform='cpu',\n xla_platform_id=xla_platform_names['cpu'],\n asynchronous=True)\n return LocalBackend(platform='cpu', client=client)\n\n\ndef _gpu_backend_factory():\n \"\"\"Returns a GPU backend. BFC allocator is used by default.\"\"\"\n allocator = os.getenv('XLA_PYTHON_CLIENT_ALLOCATOR', 'default').lower()\n memory_fraction = os.getenv('XLA_PYTHON_CLIENT_MEM_FRACTION')\n preallocate = os.getenv('XLA_PYTHON_CLIENT_PREALLOCATE')\n if allocator not in ('default', 'platform', 'bfc'):\n raise ValueError(\n 'XLA_PYTHON_CLIENT_ALLOCATOR env var must be \"default\", \"platform\", or '\n '\"bfc\", got \"%s\"' % allocator)\n config = _xla.AllocatorConfig()\n if allocator == 'default':\n config.kind = _xla.AllocatorConfig.Kind.DEFAULT\n if allocator == 'platform':\n config.kind = _xla.AllocatorConfig.Kind.PLATFORM\n if allocator == 'bfc':\n config.kind = _xla.AllocatorConfig.Kind.BFC\n if memory_fraction:\n config.memory_fraction = float(memory_fraction)\n config.preallocate = preallocate not in ('0', 'false', 'False')\n\n client = _xla.LocalClient.Get(\n platform='gpu',\n xla_platform_id=xla_platform_names['gpu'],\n asynchronous=True,\n allocator_config=config)\n return LocalBackend(platform='gpu', client=client)\n\n\n# Backend factories, keyed by user-visible name, in increasing priority order.\n_local_backend_factories = collections.OrderedDict([\n ('cpu', _cpu_backend_factory),\n ('gpu', _gpu_backend_factory),\n])\n\n\ndef register_local_backend_factory(name, factory):\n _local_backend_factories[name] = factory\n\n\n_local_backends = None\n\n\ndef _get_local_backends():\n \"\"\"Instantiates all known local backends.\"\"\"\n global _local_backends\n if _local_backends is not None:\n return _local_backends\n\n _local_backends = collections.OrderedDict()\n for name, factory in _local_backend_factories.items():\n logging.vlog(2, \"Initializing backend '%s'\" % name)\n try:\n backend = factory()\n except RuntimeError:\n if name == 'cpu':\n # We always expect CPU to initialize successfully.\n raise\n else:\n # If the backend isn't built into the binary, or if it has no devices,\n # we expect a RuntimeError.\n continue\n _local_backends[name] = backend\n return _local_backends\n\n\ndef get_local_backend(name=None):\n \"\"\"Returns a local backend.\n\n Args:\n name: the backend name. If `None`, a default local backend is returned,\n typically `gpu` if one is present, or `cpu` if not. If a string, the named\n backend is returned or an exception raised.\n\n Returns:\n A LocalBackend object.\n \"\"\"\n backends = _get_local_backends()\n if name is not None:\n try:\n return backends[name]\n except KeyError:\n raise RuntimeError('Unknown backend {}'.format(name))\n\n return list(backends.values())[-1]\n\n\nclass OpMetadata(object):\n \"\"\"Python representation of a xla.OpMetadata protobuf.\"\"\"\n __slots__ = ('op_type', 'op_name', 'source_file', 'source_line')\n\n def __init__(self, op_type='', op_name='', source_file='', source_line=0):\n self.op_type = op_type\n self.op_name = op_name\n self.source_file = source_file\n self.source_line = source_line\n\n\ndef CurrentSourceInfoMetadata(op_type=None, op_name=None, skip_frames=1):\n \"\"\"Helper for use in source mapping that returns an OpMetadata object.\"\"\"\n full_filename, lineno = inspect.stack()[skip_frames][1:3]\n filename = os.path.basename(full_filename)\n return OpMetadata(\n op_type=op_type,\n op_name=op_name,\n source_file=filename,\n source_line=lineno)\n\n\nPrimitiveType = _xla.PrimitiveType\n\nbfloat16 = _xla.bfloat16_dtype()\n\nXLA_ELEMENT_TYPE_TO_DTYPE = {\n PrimitiveType.PRED: np.dtype('bool'),\n PrimitiveType.S8: np.dtype('int8'),\n PrimitiveType.S16: np.dtype('int16'),\n PrimitiveType.S32: np.dtype('int32'),\n PrimitiveType.S64: np.dtype('int64'),\n PrimitiveType.U8: np.dtype('uint8'),\n PrimitiveType.U16: np.dtype('uint16'),\n PrimitiveType.U32: np.dtype('uint32'),\n PrimitiveType.U64: np.dtype('uint64'),\n PrimitiveType.BF16: np.dtype(bfloat16),\n PrimitiveType.F16: np.dtype('float16'),\n PrimitiveType.F32: np.dtype('float32'),\n PrimitiveType.F64: np.dtype('float64'),\n PrimitiveType.C64: np.dtype('complex64'),\n PrimitiveType.C128: np.dtype('complex128'),\n PrimitiveType.TUPLE: np.dtype(np.object),\n PrimitiveType.TOKEN: np.dtype(np.object),\n}\n\n# Note the conversion on the key. Numpy has a known issue wherein dtype hashing\n# doesn't work as expected (https://github.com/numpy/numpy/issues/7242). Thus,\n# when keying by dtype in this dict, we use the string form of dtypes.\nDTYPE_TO_XLA_ELEMENT_TYPE = {\n str(dt): et for et, dt in XLA_ELEMENT_TYPE_TO_DTYPE.items()\n}\n\n\ndef dtype_to_etype(dtype):\n \"\"\"Convenience function for reading DTYPE_TO_XLA_ELEMENT_TYPE.\"\"\"\n return DTYPE_TO_XLA_ELEMENT_TYPE[str(np.dtype(dtype))]\n\n\nShape = _xla.Shape\nShape.__doc__ = \"\"\"\nA Shape is an object defined in C++ that duck types like the following class:\n\nclass Shape(object):\n '''Represents an XLA shape.\n\n A shape is either an array shape, having rank-many integer\n dimensions and an element type (represented by a Numpy dtype), or it\n is a tuple shape, having a shape for every tuple component:\n\n type shape =\n TupleShape of shape list\n | ArrayShape of { dimensions: int list; element_type: dtype }\n '''\n\n @staticmethod\n def tuple_shape(tuple_shapes) -> Shape:\n \"Construct a tuple shape.\"\n\n @staticmethod\n def array_shape(element_type, dimensions, minor_to_major=None) -> Shape:\n\n @staticmethod\n def from_pyval(pyval) -> Shape:\n \"Returns a Shape that describes a tuple-tree of Numpy arrays.\"\n\n def __init__(self, str) -> Shape:\n \"Parses a shape string.\"\n def __eq__(self, other: Shape) -> bool:\n def __ne__(self, other: Shape) -> bool:\n def __hash__(self):\n def __repr__(self):\n def is_tuple(self) -> bool:\n def is_array(self) -> bool:\n def tuple_shapes(self) -> [Shape]:\n def numpy_dtype(self) -> np.dtype:\n \"Like element_type(), but returns dtype('O') for a tuple shape.\"\n def xla_element_type(self) -> PrimitiveType:\n def element_type(self) -> np.dtype:\n def dimensions(self) -> (int, int, ...):\n def rank(self) -> int:\n def with_major_to_minor_layout_if_absent(self) -> Shape:\n \"Returns a copy with missing layouts set to major-to-minor.\"\n\n def to_serialized_proto(self) -> bytes:\n \"Returns 'shape' as a serialized proto.\"\n\"\"\"\n\nProgramShape = _xla.ProgramShape\nProgramShape.__doc__ = \"\"\"\nA ProgramShape is a C++ object that duck types like the following class.\n\nclass ProgramShape(object):\n def __init__(self, parameter_shapes, result_shape):\n def parameter_shapes(self) -> [Shape]:\n def result_shape(self) -> Shape:\n def __repr__(self):\n\"\"\"\n\n\nclass Buffer(object):\n \"\"\"Represents a handle to data owned by XLA.\n\n The referent is ready for use in executing a local, compiled\n Computation. On XLA platforms involving a device (e.g. GPU), this\n means the referent is in device memory.\n \"\"\"\n\n @staticmethod\n def from_pyval(pyval, device=None, backend=None):\n \"\"\"Copies the `pyval` to a freshly allocated on-device buffer.\"\"\"\n backend = backend or get_local_backend()\n return backend.buffer_from_pyval(pyval, device)\n\n @staticmethod\n def make_tuple(buffers, device, backend=None):\n backend = backend or get_local_backend()\n return backend.make_tuple(buffers, device)\n\n # Buffer is not an instantiable type and exists only for its static methods.\n # The underlying buffer objects are C++ object with the following\n # API:\n # def shape(self) -> Shape:\n # def device(self) -> int:\n # def delete(self):\n # def destructure(self) -> [Buffer]\n # def is_deleted(self) -> bool:\n # def block_host_until_ready(self):\n # \"\"\"Blocks the calling thread until the buffer is ready on device.\"\"\"\n # def copy_to_host_async(self):\n # \"\"\"Requests a copy of the buffer to the host.\n #\n # Does not block waiting for the copy. Values fetched are available via\n # `to_py()`; the purpose of `copy_to_host_async` is to prefetch values\n # for subsequent `to_py()` calls, especially when requesting many values\n # at once.\n # \"\"\"\n # def to_py(self):\n # \"\"\"Returns the value of the buffer as a Python tuple tree of ndarrays.\"\"\"\n #\n # TODO(phawkins): remove Buffer and its static methods completely, have\n # clients call methods on Backend to create buffers.\n\n\n# TODO(phawkins): Alias for backward compatibility. Remove after JAX drops\n# compatibility with Jaxlib versions older than 0.1.13.\nLocalBuffer = Buffer\n\n\ndef shape_from_pyval(pyval):\n \"\"\"Returns a Shape that describes a tuple-tree of Numpy arrays.\"\"\"\n\n def convert(pyval):\n if isinstance(pyval, tuple):\n return Shape.tuple_shape(tuple(convert(elt) for elt in pyval))\n else:\n return Shape.array_shape(pyval.dtype, np.shape(pyval))\n\n return convert(pyval)\n\n\ndef transfer_to_infeed(value, device=None):\n \"\"\"Transfers the given value into the XLA infeed queue.\n\n XLA's infeed queue is a single queue that feeds the \"XLA virtual machine\" with\n a totally ordered stream of values. This is dequeued from XLA computations via\n the Infeed() operation.\n\n Args:\n value: the value that the caller would like to enqueue into the XLA infeed\n queue\n device: the device to infeed the value to. Each device has a\n distinct infeed queue.\n \"\"\"\n # TODO(phawkins): support non-default backends.\n backend = get_local_backend()\n device = device or backend.local_devices()[0]\n backend.client.TransferToInfeed(value, device)\n\n\ndef transfer_from_outfeed(shape, device=None):\n \"\"\"Transfers a literal of the given shape from `device`'s outfeed.\n\n Args:\n shape: The shape of the value to transfer from outfeed.\n device: The device from which to transfer the outfeed value. Each device has\n a distinct outfeed queue..\n\n Returns:\n The literal value that is produced from the outfeed queue.\n \"\"\"\n # TODO(phawkins): support non-default backends.\n backend = get_local_backend()\n device = device or backend.local_devices()[0]\n return backend.client.TransferFromOutfeed(\n shape.with_major_to_minor_layout_if_absent(), device)\n\n\nDeviceAssignment = _xla.DeviceAssignment\nDeviceAssignment.__doc__ = \"\"\"\nA DeviceAssignment is a C++ object with the following signature.\n\ndef create(assignment):\n '''Builds a device assignment.\n\n Args:\n assignment: a 2D numpy array of device ordinal integers, indexed by\n [replica][computation_in_replica].\n Returns:\n A device assignment.\n '''\n\ndef replica_count():\n '''Returns the number of replicas.'''\ndef computation_count():\n '''Returns the number of computations per replica.'''\n\"\"\"\n\n\nDevice = _xla.Device\n\n\nclass CompileOptions(object):\n \"\"\"Python object for XLA compile options.\n\n These options can be passed to the 'compile' step when using a local XLA\n client.\n \"\"\"\n\n def __init__(self):\n self.xla_dump_to = None\n self.dump_hlo_pass_re = None\n self.dump_hlo_module_re = None\n self.dump_hlo_as_text = None\n self.dump_hlo_as_proto = None\n self.hlo_profile = None\n self.num_replicas = 1\n self.argument_layouts = None\n self.result_layout = None\n self.device_assignment = None\n\n\nclass Computation(object):\n \"\"\"Python wrapper for an XLA Computation.\n\n A Computation can be compiled to form an Executable, or used as a\n subcomputation in ComputationBuilder methods.\n \"\"\"\n\n def __init__(self, c_computation, backend=None):\n self._c_computation = c_computation\n # The backend argument is deprecated. Pass a backend to Compile() instead.\n self._backend = backend\n\n @property\n def computation(self):\n return self._c_computation\n\n def GetSerializedProto(self):\n \"\"\"Gets the serialized HloModuleProto proto object in this computation.\n\n Returns:\n A string containing a serialized HloModuleProto proto containing the\n computation and its dependencies.\n \"\"\"\n return self.computation.GetSerializedProto()\n\n def GetHloText(self):\n \"\"\"Get the textual HLO representation of this computation.\n\n Returns:\n A string containing the textual HLO.\n \"\"\"\n return self.computation.GetHloText()\n\n def GetHloDotGraph(self):\n \"\"\"Get a Graphviz Dot representation of this computation.\n\n Returns:\n A string containing the graphviz dot graph.\n \"\"\"\n return self.computation.GetHloDotGraph()\n\n def Compile(self, argument_shapes=None, compile_options=None, backend=None):\n \"\"\"Compiles a computation.\n\n Computations are the result of a \"ComputationBuild'ing\" process.\n\n Arguments:\n argument_shapes: Deprecated. Use compile_options.argument_layouts instead.\n compile_options: options to use for compilation, includes an optional laid\n out result shape for the computation.\n backend: a `Backend` for which an executable should be generated.\n\n Returns:\n A Executable instance.\n \"\"\"\n backend = backend or self._backend or get_local_backend()\n\n compile_options = compile_options or CompileOptions()\n if argument_shapes:\n compile_options.argument_layouts = argument_shapes\n return backend.compile(self.computation, compile_options)\n\n def GetProgramShape(self):\n return self._c_computation.GetProgramShape()\n\n def GetReturnValueShape(self):\n return self._c_computation.GetProgramShape().result_shape()\n\n def Hash(self):\n return self._c_computation.Hash()\n\n\n# An Executable is a C++ class that duck types with the following API:\n# class Executable(object):\n# def local_devices(self) -> [Device]:\n# def Execute(self, arguments : [Buffer]) -> Buffer:\n# \"\"\"Execute on one replica with Buffer arguments and return value.\"\"\"\n#\n# def SizeOfGeneratedCodeInBytes(self) -> int:\n# \"\"\"Return generated binary size, or -1 if not known.\"\"\"\n#\n# def ExecutePerReplica(self, arguments: [[Buffer]]) -> [Buffer]:\n# \"\"\"Execute on many replicas with Buffer arguments and return value.\n#\n# Args:\n# arguments: A sequence of sequences of Buffers. The i'th inner sequence\n# comprises the arguments for execution on the i'th replica.\n#\n# Returns:\n# A list of the computation's outputs for each replica, as a Buffer. If\n# a shallow sequence of arguments was passed in for `arguments`, then the\n# sole, zero'th replica's output is returned instead, as a Buffer.\n# \"\"\"\n#\n# There are different implementations of Executable for different backends.\n\n\ndef execute_with_python_values(executable, arguments=(), backend=None):\n \"\"\"Execute on one replica with Python values as arguments and output.\"\"\"\n\n backend = backend or get_local_backend()\n\n def put(arg):\n return Buffer.from_pyval(\n arg, device=executable.local_devices()[0], backend=backend)\n\n arguments = [put(arg) for arg in arguments]\n return executable.Execute(arguments).to_py()\n\n\ndef execute_with_python_values_replicated(executable, arguments, backend=None):\n \"\"\"Execute on many replicas with Python values as arguments and output.\n\n Arguments:\n executable: the program to run.\n arguments: a list of lists of Python values indexed by\n `[replica][arg_num]` to pass as inputs.\n backend: the backend we are targeting.\n\n Returns:\n A list of python values, one per replica.\n \"\"\"\n backend = backend or get_local_backend()\n devices = executable.local_devices()\n # pylint: disable=g-complex-comprehension\n flat_args = [(arg, devices[replica])\n for replica, replica_args in enumerate(arguments)\n for arg in replica_args]\n flat_arg_buffers = [\n backend.buffer_from_pyval(pyval, device) for pyval, device in flat_args\n ]\n arg_buffers = []\n for replica_args in arguments:\n arg_buffers.append(flat_arg_buffers[:len(replica_args)])\n flat_arg_buffers = flat_arg_buffers[len(replica_args):]\n return [out.to_py() for out in executable.ExecutePerReplica(arg_buffers)]\n\n\nclass PaddingType(enum.Enum):\n VALID = 1\n SAME = 2\n\n\ndef _convert_padding_type_to_pad_values(padding_type, lhs_dims, rhs_dims,\n window_strides):\n \"\"\"Maps PaddingType or string to pad values (list of pairs of ints).\"\"\"\n if not isinstance(padding_type, (str, PaddingType)):\n msg = 'padding_type must be str or PaddingType, got {}.'\n raise TypeError(msg.format(type(padding_type)))\n\n if isinstance(padding_type, str):\n if padding_type.upper() == 'VALID':\n padding_type = PaddingType.VALID\n elif padding_type.upper() == 'SAME':\n padding_type = PaddingType.SAME\n else:\n msg = 'Unknown padding type string: expected \"VALID\" or \"SAME\", got {}.'\n raise ValueError(msg.format(padding_type))\n\n if padding_type == PaddingType.VALID:\n return [(0, 0)] * len(window_strides)\n elif padding_type == PaddingType.SAME:\n out_shape = np.ceil(np.true_divide(lhs_dims, window_strides)).astype(int)\n pad_sizes = [\n max((out_size - 1) * stride + filter_size - in_size, 0)\n for out_size, stride, filter_size, in_size in zip(\n out_shape, window_strides, rhs_dims, lhs_dims)\n ]\n return [(pad_size // 2, pad_size - pad_size // 2) for pad_size in pad_sizes]\n else:\n msg = 'Unexpected PaddingType value: {}'\n raise ValueError(msg.format(padding_type))\n\n\nclass ComputationBuilder(object):\n \"\"\"XLA computation builder.\n\n Enqueues XLA ops in sequence and in order to build a\n Computation, which in turn can be compiled into a\n LocalExecutable, which in turn can be locally executed.\n \"\"\"\n\n # The methods of this class map 1-to-1 onto the XLA C++\n # computation builder API. Therefore, there's no need to laboriously list\n # arguments and return values for every method, especially where it's obvious.\n #\n # pylint: disable=g-doc-return-or-yield\n # pylint: disable=g-doc-args\n\n def __init__(self, name):\n self._builder = _xla.XlaBuilder(name)\n self._parameter_numbering = itertools.count()\n\n def Build(self, root=None, backend=None):\n \"\"\"Builds a `Computation` from the contents of the builder.\n\n Args:\n root: if not None, the operator containing the return value of the\n computation.\n\n Returns:\n A `Computation`.\n \"\"\"\n if root is not None:\n return Computation(self._builder.Build(root), backend=backend)\n else:\n return Computation(self._builder.Build(), backend=backend)\n\n def GetShape(self, operand):\n return self._builder.GetShape(operand)\n\n def SetOpMetadata(self, op_metadata):\n \"\"\"Set metadata for operations that are about to be enqueued.\"\"\"\n self._builder.SetOpMetadata(op_metadata)\n\n def ClearOpMetadata(self):\n \"\"\"Clear metadata for operations that are about to be enqueued.\"\"\"\n self._builder.ClearOpMetadata()\n\n def SetSharding(self, sharding):\n \"\"\"Set sharding that will be attached to all instructions until cleared.\"\"\"\n self._builder.SetSharding(sharding)\n\n def ClearSharding(self):\n \"\"\"Clears the sharding.\n\n Ops will be shared according to the default placement policy.\n \"\"\"\n self._builder.ClearSharding()\n\n def CreateToken(self):\n \"\"\"Enqueues a CreateToken op onto the computation.\n\n Returns:\n An XlaOp, representing a fresh token.\n \"\"\"\n return ops.CreateToken(self._builder)\n\n def AfterAll(self, tokens):\n \"\"\"Enqueues a after-all op onto the computation.\n\n `AfterAll` takes a variadic number of tokens and produces a single token.\n\n Args:\n tokens: a list of `XlaOp` values representing predecessor tokens.\n\n Returns:\n An `XlaOp`.\n \"\"\"\n return ops.AfterAll(self._builder, tokens)\n\n def Infeed(self, shape, token=None):\n \"\"\"Enqueues an infeed op onto the computation.\n\n Infeed operations dequeue data of the given shape from the device's infeed\n queue for subsequent use in the computation.\n\n Args:\n shape: a `Shape` describing the shape of the infed value.\n token: an optional `XlaOp` representing a token after which the infeed\n effect should be sequenced.\n Returns:\n An XlaOp, representing a (value, token) pair.\n \"\"\"\n if token is None:\n token = ops.CreateToken(self._builder)\n return ops.InfeedWithToken(token,\n shape.with_major_to_minor_layout_if_absent())\n\n def Outfeed(self, operand, token=None):\n \"\"\"Enqueues an outfeed op onto the computation.\n\n Outfeed operations enqueue data, using the given operand, onto the XLA\n outfeed queue for subsequent dequeue via the client API.\n\n Args:\n operand: an `XlaOp` representing the data to outfeed.\n token: an `XlaOp` representing a token after which the outfeed should be\n sequenced.\n Returns:\n An `XlaOp` representing a token.\n \"\"\"\n if token is None:\n token = ops.CreateToken(self._builder)\n return ops.OutfeedWithToken(operand, token, self._builder.GetShape(operand),\n '')\n\n def Constant(self, value):\n \"\"\"Enqueues a constant op onto the computation.\n\n Args:\n value: value for the constant, as a np.array with an explicit dtype set to\n one of the supported types.\n\n Returns:\n An XlaOp.\n \"\"\"\n return ops.ConstantLiteral(self._builder, value)\n\n def ConstantF32Scalar(self, value):\n \"\"\"Convenience method to enqueue a scalar F32 constant op.\n\n Args:\n value: a floating-point number.\n\n Returns:\n An XlaOp.\n \"\"\"\n return self.Constant(np.array(value, dtype=np.float32))\n\n def ConstantF64Scalar(self, value):\n \"\"\"Convenience method to enqueue a scalar F32 constant op.\n\n Args:\n value: a floating-point number.\n\n Returns:\n An XlaOp.\n \"\"\"\n return self.Constant(np.array(value, dtype=np.float64))\n\n def ConstantS32Scalar(self, value):\n \"\"\"Convenience method to enqueue a scalar S32 constant op.\n\n Args:\n value: a floating-point number.\n\n Returns:\n An XlaOp.\n \"\"\"\n return self.Constant(np.array(value, dtype=np.int32))\n\n def ConstantS64Scalar(self, value):\n \"\"\"Convenience method to enqueue a scalar S64 constant op.\n\n Args:\n value: a floating-point number.\n\n Returns:\n An XlaOp.\n \"\"\"\n return self.Constant(np.array(value, dtype=np.int64))\n\n def ConstantPredScalar(self, value):\n \"\"\"Convenience method to enqueue a scalar PRED constant op.\n\n Args:\n value: a boolean value.\n\n Returns:\n An XlaOp.\n \"\"\"\n return self.Constant(np.array(value, dtype=np.bool))\n\n def ParameterWithShape(self, shape, name=None, parameter_num=None):\n \"\"\"Enqueues a Parameter op onto the computation, given a shape.\n\n Args:\n shape: the parameter's shape as a Shape object.\n name: optional string name for the parameter.\n parameter_num: parameter number in the computation function. If None, the\n next linear parameter number is used. The default value capability can\n be used for auto-numbering. If you're using auto-numbering for some\n parameters, use it for *all* parameters to avoid clashes.\n\n Returns:\n An XlaOp.\n \"\"\"\n if name is None:\n name = ''\n if parameter_num is None:\n parameter_num = next(self._parameter_numbering)\n\n return ops.Parameter(self._builder, parameter_num,\n shape.with_major_to_minor_layout_if_absent(),\n name.encode('utf8'))\n\n def ParameterFromNumpy(self, value, name=None, parameter_num=None):\n \"\"\"Enqueues a Parameter op onto the computation.\n\n Args:\n value: a Numpy array, or a nested tuple thereof, from which the shape is\n inferred.\n name: as in ParameterWithShape.\n parameter_num: as in ParameterWithShape.\n\n Returns:\n An XlaOp.\n \"\"\"\n return self.ParameterWithShape(\n shape_from_pyval(value), name=name, parameter_num=parameter_num)\n\n def Iota(self, dtype, size):\n \"\"\"Enqueues an iota constant onto the computation.\n\n Args:\n dtype: expected numpy dtype of the output.\n size: integer, the number of elements in the array.\n\n Returns:\n An XlaOp representing the added iota constant.\n \"\"\"\n element_type = DTYPE_TO_XLA_ELEMENT_TYPE[str(np.dtype(dtype))]\n return ops.Iota(self._builder, element_type, size)\n\n def BroadcastedIota(self, dtype, shape, dimension):\n \"\"\"Enqueues a broadcasted iota constant onto the computation.\n\n Args:\n dtype: expected numpy dtype of the output.\n shape: tuple of integers, the expected output shape (dimensions).\n dimension: positive integer, dimension along which to increment values.\n\n Returns:\n An XlaOp representing the added broadcasted iota constant.\n \"\"\"\n element_type = DTYPE_TO_XLA_ELEMENT_TYPE[str(np.dtype(dtype))]\n xla_shape = _xla.Shape.array_shape(element_type, shape, None)\n return ops.Iota(self._builder, xla_shape, dimension)\n\n def Concatenate(self, operands, dimension):\n \"\"\"Enqueues a concatenate operation onto the computation.\n\n Args:\n operands: the operands to concatenate.\n dimension: the dimension in which to perform the concatenation.\n\n Returns:\n An XlaOp representing the added concatenate op.\n \"\"\"\n return ops.ConcatInDim(self._builder, list(operands), dimension)\n\n def ReplicaId(self):\n \"\"\"Enqueues a ReplicaId operation onto the computation.\n\n Returns:\n A LocalOp representing the replica id.\n \"\"\"\n return _xla.ops.ReplicaId(self._builder)\n\n def Pad(self, operand, padding_value, padding_config):\n \"\"\"Enqueues a Pad operation onto the computation.\n\n Args:\n operand: XlaOp representing the array to pad.\n padding_value: XlaOp representing the scalar pad value.\n padding_config: either a PaddingConfig or a list of integer triples\n (edge_padding_low, edge_padding_high, interior_padding) representing the\n configuration of the padding operation.\n\n Returns:\n An XlaOp representing the added Pad op.\n \"\"\"\n if isinstance(padding_config, tuple) or isinstance(padding_config, list):\n padding_config = GetPaddingConfigFromTriples(padding_config)\n return ops.Pad(operand, padding_value, padding_config)\n\n def Reshape(self, operand, dimensions, new_sizes):\n \"\"\"Enqueues a reshape op onto the computation.\n\n Args:\n operand: XlaOp representing the array to be reshaped.\n dimensions: sequence of integers encoding the order in which dimensions\n are collapsed or None, in which case dimensions are flattened in order.\n new_sizes: sequence of integers encoding the new dimension sizes (shape).\n\n Returns:\n An XlaOp representing the added Reshape op.\n \"\"\"\n if dimensions is None:\n ndim = len(self.GetShape(operand).dimensions())\n dimensions = tuple(range(ndim))\n return ops.Reshape(operand, dimensions, new_sizes)\n\n def AllReduce(self, operand, computation, replica_groups=None):\n \"\"\"AllReduce op.\n\n Args:\n operand: XlaOp representing the input array\n computation: a Computation object - binary reduction function.\n replica_groups: optional, list of lists of ints encoding a partition of\n the set {0, 1, ..., num_replicas} into equally-sized replica groups\n within which the all-to-all is performed. If not supplied or None (the\n default), all replicas belong to the same group.\n\n Returns:\n An XlaOp that represents the all-reduced result.\n \"\"\"\n replica_groups_protos = _get_replica_groups_protos(replica_groups)\n return ops.AllReduce(operand, computation.computation,\n replica_groups_protos, None, None)\n\n def AllToAll(self,\n operand,\n split_dimension,\n concat_dimension,\n replica_groups=None):\n \"\"\"AllToAll op.\n\n Args:\n operand: XlaOp representing the input array\n split_dimension: the dimension along which the operand is split\n concat_dimension: the dimension along which the split blocks are\n concatenated\n replica_groups: optional, list of lists of ints encoding a partition of\n the set {0, 1, ..., num_replicas} into equally-sized replica groups\n within which the all-to-all is performed. If not supplied or None (the\n default), all replicas belong to the same group.\n\n Returns:\n An XlaOp that represents the all-to-all concatenation.\n \"\"\"\n replica_groups_protos = _get_replica_groups_protos(replica_groups)\n if not replica_groups:\n split_count = 1\n else:\n split_count = len(replica_groups[0])\n if not all(split_count == len(g) for g in replica_groups):\n raise ValueError('Replica groups must be equally sized')\n return ops.AllToAll(operand, split_dimension, concat_dimension, split_count,\n replica_groups_protos)\n\n def CrossReplicaSum(self, operand, replica_groups=None):\n \"\"\"CrossReplicaSum op.\n\n Args:\n operand: the operand to sum across replica instances.\n replica_groups: optional, list of lists of ints encoding a partition of\n the set {0, 1, ..., num_replicas} into equally-sized replica groups\n within which the cross-replica sum is performed. If not supplied or None\n (the default), all replicas belong to the same group.\n\n Returns:\n An XlaOp that represents on each replica the sum of its group's values.\n \"\"\"\n replica_groups_protos = _get_replica_groups_protos(replica_groups)\n return ops.CrossReplicaSum(operand, replica_groups_protos)\n\n def Trans(self, operand):\n \"\"\"Specialized matrix transpose op.\"\"\"\n return ops.Transpose(operand, [1, 0])\n\n def Transpose(self, operand, permutation):\n \"\"\"Transpose op.\"\"\"\n return ops.Transpose(operand, permutation)\n\n def SelectAndScatter(self, operand, select, window_dimensions, window_strides,\n padding, source, init_value, scatter):\n \"\"\"Select and scatter op, used by the gradient of ReduceWindow.\n\n Args:\n operand: XlaOp for array of dimension N and type T over which the windows\n slide.\n select: Computation of type (T, T) -> Pred to apply to the elements of\n each window to indicate which element is selected.\n window_dimensions: sequence of N integers for dimensions of the window.\n window_strides: sequence of N integers for the strides of the window.\n padding: PaddingType representing either 'SAME' or 'VALID ' padding.\n source: XlaOp for array of type T with values to scatter.\n init_value: XlaOp of scalar type T for initial out value.\n scatter: Computation of type (T, T) -> T to apply to each scatter source\n element with its destination element.\n\n Returns:\n An XlaOp representing the added SelectAndScatter op.\n \"\"\"\n pads = _convert_padding_type_to_pad_values(\n padding,\n self.GetShape(operand).dimensions(), window_dimensions, window_strides)\n return ops.SelectAndScatterWithGeneralPadding(operand, select.computation,\n window_dimensions,\n window_strides, pads, source,\n init_value,\n scatter.computation)\n\n def Slice(self, operand, start_indices, limit_indices, strides=None):\n \"\"\"Enqueues a slice operation onto the computation.\n\n Args:\n operand: XlaOp for the N dimensional array to be sliced.\n start_indices: iterable of N integers containing the starting indices of\n the slice for each dimension.\n limit_indices: iterable of N integers containing the ending indices\n (exclusive) of the slice for each dimension.\n strides: optional iterable of N integers containing the stride sizes for\n each dimension.\n\n Returns:\n An XlaOp representing the added Slice op.\n \"\"\"\n if strides is None:\n start_indices = list(start_indices)\n strides = [1] * len(start_indices)\n return ops.Slice(operand, start_indices, limit_indices, strides)\n\n def DynamicSlice(self, operand, start_indices, slice_sizes):\n \"\"\"Enqueues a slice op with dynamic start indices onto the computation.\n\n Args:\n operand: XlaOp for the N dimensional array to be sliced.\n start_indices: XlaOp for the 1D array of N integers containing the\n starting indices of the slice.\n slice_sizes: iterable of N integers containing the slice sizes in each\n dimension.\n\n Returns:\n An XlaOp representing the added DynamicSlice op.\n \"\"\"\n slice_sizes = list(slice_sizes)\n if isinstance(start_indices, _xla.XlaOp):\n start_indices = [\n ops.Reshape(ops.Slice(start_indices, [i], [i + 1], [1]), [])\n for i in range(len(slice_sizes))\n ]\n return ops.DynamicSlice(operand, list(start_indices), slice_sizes)\n\n def DynamicUpdateSlice(self, operand, update, start_indices):\n \"\"\"Enqueues a dynamic update slice operation onto the computation.\n\n Args:\n operand: XlaOp for the N dimensional array to be updated.\n update: N dimensional array comprising the slice update.\n start_indices: Rank-1 array of N integers comprising the starting indices\n of the slice along each dimension.\n\n Returns:\n An XlaOp representing the added DynamicUpdateSlice op.\n \"\"\"\n if isinstance(start_indices, _xla.XlaOp):\n ndims = self._builder.GetShape(start_indices).dimensions()[0]\n start_indices = [\n ops.Reshape(ops.Slice(start_indices, [i], [i + 1], [1]), [])\n for i in range(ndims)\n ]\n return ops.DynamicUpdateSlice(operand, update, list(start_indices))\n\n def Tuple(self, *elems):\n \"\"\"Enqueues a tuple operation onto the computation.\n\n Args:\n elems: a sequence of tuple operands (each a XlaOp).\n\n Returns:\n An XlaOp representing the added Tuple op.\n \"\"\"\n return ops.Tuple(self._builder, list(elems))\n\n def Call(self, computation_to_apply, operands):\n \"\"\"Enqueues a call operation onto the computation.\n\n Args:\n computation_to_apply: a Computation object.\n operands: an iterable of XlaOp. The number and types of operands must\n match the arity of computation_to_apply.\n\n Returns:\n An XlaOp representing the added call op.\n \"\"\"\n return ops.Call(self._builder, computation_to_apply.computation,\n list(operands))\n\n def CustomCall(self,\n call_target_name,\n operands,\n shape_with_layout,\n operand_shapes_with_layout,\n opaque=None):\n \"\"\"Enqueues a custom call operation onto the computation.\n\n Args:\n call_target_name: the name of the function to call.\n operands: an iterable of XlaOp. The number and types of operands must\n match the arity of `operand_shapes_with_layout`.\n shape_with_layout: the shape of the operator's output, with layout.\n operand_shapes_with_layout: the shapes of `operands`, including the\n expected layouts.\n opaque: an opaque string passed to the backend.\n\n Returns:\n An XlaOp representing the added custom call op.\n \"\"\"\n opaque = opaque or b''\n return ops.CustomCall(self._builder, call_target_name,\n list(operands), shape_with_layout,\n list(operand_shapes_with_layout), opaque)\n\n def Map(self, operands, computation_to_apply, dimensions):\n \"\"\"Enqueues a map operation onto the computation.\n\n Args:\n operands: an iterable of XlaOp.\n computation_to_apply: a Computation object.\n dimensions: dimensions over which to apply map the function.\n\n Returns:\n An XlaOp representing the added Map op.\n \"\"\"\n return ops.Map(self._builder, list(operands),\n computation_to_apply.computation, dimensions, [])\n\n def Reduce(self, operand, init_value, computation_to_apply, dimensions):\n \"\"\"Enqueues a reduction operation onto the computation.\n\n Args:\n operand: reduction operand (XlaOp).\n init_value: reduction initial value (XlaOp).\n computation_to_apply: a Computation object - binary reduction function.\n dimensions: sequence of dimensions (integers) to reduce on.\n\n Returns:\n An XlaOp representing the added Reduce op.\n \"\"\"\n return ops.Reduce(self._builder, [operand], [init_value],\n computation_to_apply.computation, dimensions)\n\n def ReduceWindow(self, operand, init_value, computation_to_apply,\n window_dimensions, window_strides, padding):\n \"\"\"Enqueues a windowed reduction operation onto the computation.\n\n Args:\n operand: reduction operand (XlaOp).\n init_value: reduction initial value (XlaOp).\n computation_to_apply: a binary reduction function (Computation).\n window_dimensions: dimensions of window (sequence of integers).\n window_strides: strides for window (sequence of integers).\n padding: PaddingType representing either 'SAME' or 'VALID' padding.\n\n Returns:\n An XlaOp representing the added ReduceWindow op.\n \"\"\"\n pads = _convert_padding_type_to_pad_values(\n padding,\n self.GetShape(operand).dimensions(), window_dimensions, window_strides)\n return ops.ReduceWindowWithGeneralPadding(operand, init_value,\n computation_to_apply.computation,\n window_dimensions, window_strides,\n (), (), pads)\n\n def ReduceWindowWithGeneralPadding(self, operand, init_value,\n computation_to_apply, window_dimensions,\n window_strides, base_dilations,\n window_dilations, padding):\n \"\"\"Enqueues a windowed reduction operation onto the computation.\n\n Args:\n operand: reduction operand (XlaOp).\n init_value: reduction initial value (XlaOp).\n computation_to_apply: a binary reduction function (Computation).\n window_dimensions: dimensions of window (sequence of integers).\n window_strides: strides for window (sequence of integers).\n base_dilations: dilations for the base (sequence of integers).\n window_dilations: dilations for window (sequence of integers).\n padding: length-N array-like of pairs of integers of (low, high) padding.\n\n Returns:\n An XlaOp representing the added ReduceWindow op.\n \"\"\"\n return ops.ReduceWindowWithGeneralPadding(operand, init_value,\n computation_to_apply.computation,\n window_dimensions, window_strides,\n base_dilations, window_dilations,\n padding)\n\n def RngNormal(self, mu, sigma, dims):\n \"\"\"Enqueues an RngNormal operation onto the computation.\n\n Args:\n mu: An XlaOp to an F32 scalar specifying the mean.\n sigma: An XlaOp to an F32 scalar specifying the standard deviation.\n dims: A 1D array-like of nonnegative integers specifying the dimensions.\n Returns: a XlaOp to the generated array of F32 values.\n \"\"\"\n shape = _xla.Shape.array_shape(self.GetShape(mu).xla_element_type(), dims)\n return ops.RngNormal(mu, sigma, shape)\n\n def RngUniform(self, a, b, dims):\n \"\"\"Enqueues an RngUniform operation onto the computation.\n\n Args:\n a: a XlaOp to an F32, S32, or U32 scalar (consistent with the type of b)\n specifying the low end of the interval [a, b) over which values are\n generated.\n b: a XlaOp to an F32, S32, or U32 scalar (consistent with the type of a)\n specifying the high end of the interval [a, b) over which values are\n generated.\n dims: A 1D array-like of nonnegative integers specifying the dimensions.\n Returns: a XlaOp to the generated array of values with the same numeric type\n (F32, S32, or U32) as the arguments a and b.\n \"\"\"\n shape = _xla.Shape.array_shape(self.GetShape(a).xla_element_type(), dims)\n return ops.RngUniform(a, b, shape)\n\n def While(self, cond, body, init):\n \"\"\"Enqueues a While operation onto the computation.\n\n Args:\n cond: a Computation for the loop condition, which has type T -> PRED\n body: a Computation for the loop body, which has type T -> T\n init: a XlaOp for the initial parameter, which has type T\n Returns: a XlaOp representing the While operation.\n \"\"\"\n return ops.While(cond.computation, body.computation, init)\n\n def Conditional(self, pred, true_operand, true_computation, false_operand,\n false_computation):\n \"\"\"Enqueues a Conditional operation onto the computation.\n\n Args:\n predicate: a XlaOp to test, which has scalar type PRED\n true_operand: a XlaOp of type T_0\n true_computation: a Computation to apply to true_operand, type T_0 -> S\n false_operand: a ComputationDatahandle of type T_1\n false_computation: a Computation to apply to false_operand, type T_1 -> S\n Returns: a XlaOp representing the Conditional operation.\n \"\"\"\n return ops.Conditional(pred, true_operand, true_computation.computation,\n false_operand, false_computation.computation)\n\n def IsConstant(self, operand):\n \"\"\"Checks whether the given operand is a compile-time constant.\n\n Args:\n operand: a ComputationDataHandle to test.\n Returns: bool indicating whether `operand` is a compile-time constant,\n meaning its value does not depend on any parametersor, or on stateful\n operators such as `RngNormal` or `Infeed`.\n \"\"\"\n return self._builder.IsConstant(operand)\n\n def BuildConstantSubGraph(self, operand):\n \"\"\"Builds a constant sub graph.\n\n Args:\n operand: a XlaOp to test.\n Returns: a Computation that is rooted on the given `operand` which is a\n compile-time constant.\n \"\"\"\n return ops.BuildConstantSubGraph(operand)\n\n def DotGeneral(self, lhs, rhs, dimension_numbers, precision_config=None):\n \"\"\"Enqueues a general dot operation onto the computation.\n\n Args:\n lhs: XlaOp for the left-hand-side array.\n rhs: XlaOp for the right-hand-side array.\n dimension_numbers: either a DotDimensionNumbers or a nested tuple\n ((lhs_contract, rhs_contract), (lhs_batch, rhs_batch)) of lists of\n integers representing the dimensions to treat as contracting dimensions\n and batch dimensions on each input operand.\n Returns: a XlaOp representing the DotGeneral operation.\n \"\"\"\n if isinstance(dimension_numbers, tuple):\n dimension_numbers = GetDotDimensionsFromLists(dimension_numbers)\n return ops.DotGeneral(\n lhs, rhs, dimension_numbers, precision_config=precision_config)\n\n def Conv(self,\n lhs,\n rhs,\n window_strides,\n padding,\n feature_group_count=1,\n batch_group_count=1,\n precision_config=None):\n \"\"\"Enqueues a Conv operation onto the computation.\n\n Args:\n lhs: XlaOp for the rank N+2 array of inputs.\n rhs: XlaOp for the rank N+2 array of kernel weights.\n window_strides: length-N array-like of integer kernel strides.\n padding: PaddingType representing either 'SAME' or 'VALID' padding.\n feature_group_count: number of feature groups for grouped convolution.\n batch_group_count: number of batch groups for grouped convolution.\n Returns: a XlaOp representing the Conv operation.\n \"\"\"\n pads = _convert_padding_type_to_pad_values(\n padding,\n self.GetShape(lhs).dimensions()[2:],\n self.GetShape(rhs).dimensions()[2:], window_strides)\n return self.ConvGeneralDilated(\n lhs,\n rhs,\n window_strides,\n pads, [], [],\n dimension_numbers=None,\n feature_group_count=feature_group_count,\n batch_group_count=batch_group_count,\n precision_config=precision_config)\n\n def ConvWithGeneralPadding(self,\n lhs,\n rhs,\n window_strides,\n padding,\n lhs_dilation,\n rhs_dilation,\n feature_group_count=1,\n batch_group_count=1,\n precision_config=None):\n \"\"\"Enqueues a ConvWithGeneralPadding operation onto the computation.\n\n Args:\n lhs: XlaOp for the rank N+2 array of inputs.\n rhs: XlaOp for the rank N+2 array of kernel weights.\n window_strides: length-N array-like of kernel strides.\n padding: length-N array-like of pairs of integers of (low, high) padding.\n lhs_dilation: length-N array-like of dilation factors.\n rhs_dilation: length-N array-like of dilation factors.\n feature_group_count: number of feature groups for grouped convolution.\n batch_group_count: number of batch groups for grouped convolution.\n\n Returns:\n A ComputationdataHandle representing the added ConvWithGeneralPadding op.\n \"\"\"\n return self.ConvGeneralDilated(\n lhs,\n rhs,\n list(window_strides),\n list(padding),\n list(lhs_dilation),\n list(rhs_dilation),\n dimension_numbers=None,\n feature_group_count=feature_group_count,\n batch_group_count=batch_group_count,\n precision_config=precision_config)\n\n def _GetConvDimensionNumbers(self, num_spatial_dims):\n \"\"\"Create ConvolutionDimensionNumbers proto for convolutions.\"\"\"\n nd = num_spatial_dims\n dimension_numbers = ConvolutionDimensionNumbers()\n dimension_numbers.input_batch_dimension = 0\n dimension_numbers.input_feature_dimension = 1\n dimension_numbers.output_batch_dimension = 0\n dimension_numbers.output_feature_dimension = 1\n dimension_numbers.kernel_output_feature_dimension = 0\n dimension_numbers.kernel_input_feature_dimension = 1\n dimension_numbers.input_spatial_dimensions.extend(range(2, 2 + nd))\n dimension_numbers.kernel_spatial_dimensions.extend(range(2, 2 + nd))\n dimension_numbers.output_spatial_dimensions.extend(range(2, 2 + nd))\n return dimension_numbers\n\n def ConvGeneralDilated(self,\n lhs,\n rhs,\n window_strides,\n padding,\n lhs_dilation,\n rhs_dilation,\n dimension_numbers=None,\n feature_group_count=1,\n batch_group_count=1,\n precision_config=None):\n \"\"\"Enqueues a ConvGeneralDilated operation onto the computation.\n\n Args:\n lhs: XlaOp for the rank N+2 array of inputs.\n rhs: XlaOp for the rank N+2 array of kernel weights.\n window_strides: length-N array-like of integer kernel strides.\n padding: length-N array-like of pairs of integers of (low, high) padding.\n lhs_dilation: length-N array-like of integer dilation factors.\n rhs_dilation: length-N array-like of integer dilation factors.\n dimension_numbers: optional, either a ConvolutionDimensionNumbers object\n or a tuple (lhs_spec, rhs_spec, out_spec). Each element is a string of\n length N+2 identifying by position: (1) batch dimensions in lhs, rhs,\n and the output with the character 'N', (2) feature dimensions in lhs\n and the output with the character 'C', (3) input and output feature\n dimensions in rhs with the characters 'I' and 'O' respectively, and\n (4) spatial dimension correspondences between lhs, rhs, and the output\n using any distinct characters. For example, to indicate dimension\n numbers consistent with the Conv operation with two spatial\n dimensions, one could use ('NCHW', 'OIHW', 'NCHW'). As another\n example, to indicate dimension numbers consistent with the TensorFlow\n Conv2D operation, one could use ('NHWC', 'HWIO', 'NHWC'). When using\n the latter form of convolution dimension specification, window strides\n are associated with spatial dimension character labels according to\n the order in which the labels appear in the rhs_spec string, so that\n window_strides[0] is matched with the dimension corresponding to the\n first character appearing in rhs_spec that is not 'I' or 'O'. By\n default, use the same dimension numbering as Conv and\n ConvWithGeneralPadding.\n feature_group_count: number of feature groups for grouped convolution.\n batch_group_count: number of batch groups for grouped convolution.\n Returns: a XlaOp representing the ConvGeneralDilated operation.\n \"\"\"\n if dimension_numbers is None:\n dimension_numbers = self._GetConvDimensionNumbers(len(window_strides))\n elif isinstance(dimension_numbers, tuple):\n lhs_spec, rhs_spec, out_spec = dimension_numbers\n dimension_numbers = ConvolutionDimensionNumbers()\n\n dimension_numbers.input_batch_dimension = lhs_spec.index('N')\n dimension_numbers.input_feature_dimension = lhs_spec.index('C')\n dimension_numbers.output_batch_dimension = out_spec.index('N')\n dimension_numbers.output_feature_dimension = out_spec.index('C')\n dimension_numbers.kernel_output_feature_dimension = rhs_spec.index('O')\n dimension_numbers.kernel_input_feature_dimension = rhs_spec.index('I')\n\n dimension_numbers.kernel_spatial_dimensions.extend(\n i for i, c in enumerate(rhs_spec) if c not in {'I', 'O'})\n dimension_numbers.input_spatial_dimensions.extend(\n sorted((i for i, c in enumerate(lhs_spec) if c not in {'N', 'C'}),\n key=lambda i: rhs_spec.index(lhs_spec[i])))\n dimension_numbers.output_spatial_dimensions.extend(\n sorted((i for i, c in enumerate(out_spec) if c not in {'N', 'C'}),\n key=lambda i: rhs_spec.index(out_spec[i])))\n return ops.ConvGeneralDilated(\n lhs,\n rhs,\n window_strides,\n padding,\n lhs_dilation,\n rhs_dilation,\n dimension_numbers,\n feature_group_count,\n batch_group_count,\n precision_config=precision_config)\n\n def Sort(self, operands, dimension=-1, comparator=None):\n \"\"\"Enqueues a sort operation onto the computation.\n\n Args:\n operands: either an XlaOp or a sequence of XlaOps to sort. All operands\n must be arrays with the same dimensions.\n dimension: the array dimension over which to sort.\n comparator: a comparator XlaComputation. See the XLA operation semantics\n for details.\n\n Returns:\n Either an XlaOp or a tuple of XlaOps (if `operands` was an XlaOp or\n a tuple of XlaOps, respectively.)\n \"\"\"\n operands = (\n list(operands)\n if isinstance(operands, collections.Sequence) else [operands])\n return ops.Sort(self._builder, operands, dimension,\n comparator.computation if comparator else None)\n\n def SortKeyVal(self, keys, values, dimension=-1):\n \"\"\"Enqueues a key-value sort operation onto the computation.\n\n Deprecated. Use `Sort` instead.\n \"\"\"\n return ops.Sort(self._builder, [keys, values], dimension)\n\n def QR(self, a, full_matrices=True):\n \"\"\"Enqueues a QR decomposition onto the computation.\"\"\"\n return self.Tuple(*ops.QR(a, full_matrices))\n\n def TriangularSolve(self,\n a,\n b,\n left_side=False,\n lower=False,\n transpose_a=False,\n conjugate_a=False,\n unit_diagonal=False):\n \"\"\"Enqueues a triangular-solve operation onto the computation.\"\"\"\n if not transpose_a:\n transpose = _xla.TriangularSolveOptions_Transpose.NO_TRANSPOSE\n if conjugate_a:\n a = self.Conj(a)\n else:\n transpose = (\n _xla.TriangularSolveOptions_Transpose.ADJOINT\n if conjugate_a else _xla.TriangularSolveOptions_Transpose.TRANSPOSE)\n return ops.TriangularSolve(a, b, left_side, lower, unit_diagonal, transpose)\n\n def Eigh(self, a, full_matrices=True):\n \"\"\"Enqueues a symmetric/Hermitian eigendecomposition.\"\"\"\n return self.Tuple(*ops.Eigh(a, full_matrices))\n\n def SVD(self, a):\n \"\"\"Enqueues a singular value decomposition.\"\"\"\n return self.Tuple(*ops.SVD(a))\n\n def Gather(self,\n a,\n start_indices,\n dimension_numbers,\n slice_sizes,\n indices_are_sorted=False):\n \"\"\"Enqueues a Gather operation onto the computation.\"\"\"\n return ops.Gather(a, start_indices, dimension_numbers, slice_sizes,\n indices_are_sorted)\n\n def Scatter(self,\n a,\n scatter_indices,\n updates,\n update_computation,\n dimension_numbers,\n indices_are_sorted=False,\n unique_indices=False):\n \"\"\"Enqueues a Scatter operation onto the computation.\"\"\"\n return ops.Scatter(a, scatter_indices, updates,\n update_computation.computation, dimension_numbers,\n indices_are_sorted, unique_indices)\n\n def Fft(self, operand, fft_type, fft_lengths):\n \"\"\"Enqueues a FFT operation onto the computation.\"\"\"\n return ops.Fft(operand, fft_type, fft_lengths)\n\n\nFftType = _xla.FftType\n\n_UNARY_OPS = [\n 'Not',\n 'Clz',\n 'Abs',\n 'Exp',\n 'Expm1',\n 'Floor',\n 'Round',\n 'Ceil',\n 'Log',\n 'Log1p',\n 'Sign',\n 'Cos',\n 'Sin',\n 'Tanh',\n 'IsFinite',\n 'Sqrt',\n 'Rsqrt',\n 'Square',\n 'Reciprocal',\n 'Neg',\n 'Erf',\n 'Erfc',\n 'ErfInv',\n 'Lgamma',\n 'Digamma',\n 'BesselI0e',\n 'BesselI1e',\n 'Acos',\n 'Asin',\n 'Atan',\n 'Tan',\n 'Acosh',\n 'Asinh',\n 'Atanh',\n 'Cosh',\n 'Sinh',\n 'Real',\n 'Imag',\n 'Conj',\n]\n\n_BINARY_OPS = [\n 'Eq',\n 'Ne',\n 'Ge',\n 'Gt',\n 'Lt',\n 'Le',\n 'Add',\n 'Sub',\n 'Mul',\n 'Div',\n 'Rem',\n 'Max',\n 'Min',\n 'And',\n 'Or',\n 'Xor',\n 'Pow',\n 'ShiftLeft',\n 'ShiftRightArithmetic',\n 'ShiftRightLogical',\n 'Atan2',\n 'Complex',\n 'NextAfter',\n]\n\n_OTHER_OPS = [\n 'BitcastConvertType',\n 'Broadcast',\n 'BroadcastInDim',\n 'Cholesky',\n 'Clamp',\n 'Collapse',\n 'CollectivePermute',\n 'ConvertElementType',\n 'Dot',\n 'GetTupleElement',\n 'ReducePrecision',\n 'RegularizedIncompleteBeta',\n 'Rev',\n 'Select',\n 'SliceInDim',\n]\n\n\ndef _forward_methods_to_local_builder():\n \"\"\"Forward remaining ComputationBuilder methods to the C API.\n\n Set up methods, corresponding to XLA operations,\n whose calls are forwarded in a boilerplate manner to the underlying\n _xla.ops API.\n \"\"\"\n\n def forward_op(target_method):\n\n def forward(builder, *args, **kwargs):\n del builder\n return target_method(*args, **kwargs)\n\n return forward\n\n for method_name in itertools.chain(_UNARY_OPS, _BINARY_OPS, _OTHER_OPS):\n forward = forward_op(getattr(ops, method_name))\n forward.__name__ = method_name\n setattr(ComputationBuilder, method_name, forward)\n\n\n_forward_methods_to_local_builder()\n\n\ndef register_custom_call_target(name, fn, platform='cpu'):\n \"\"\"Registers a custom call target.\n\n Args:\n name: bytes containing the name of the function.\n fn: a PyCapsule object containing the function pointer.\n platform: the target platform.\n \"\"\"\n _xla.RegisterCustomCallTarget(name, fn, xla_platform_names[platform])\n\n# Deprecated. Use register_custom_call_target instead.\nregister_cpu_custom_call_target = register_custom_call_target\n\n\nclass PaddingConfigDimension(object):\n \"\"\"Python representation of a xla.PaddingConfigDimension protobuf.\"\"\"\n __slots__ = ('edge_padding_low', 'edge_padding_high', 'interior_padding')\n\n def __init__(self):\n self.edge_padding_low = 0\n self.edge_padding_high = 0\n self.interior_padding = 0\n\n\nclass PaddingConfig(object):\n \"\"\"Python representation of a xla.PaddingConfig protobuf.\"\"\"\n __slots__ = ('dimensions',)\n\n def __init__(self):\n self.dimensions = []\n\n\ndef GetPaddingConfigFromTriples(triples):\n \"\"\"Create PaddingConfig proto from list of triples of integers.\"\"\"\n padding_config = PaddingConfig()\n for lo, hi, interior in triples:\n dimension = PaddingConfigDimension()\n dimension.edge_padding_low = lo\n dimension.edge_padding_high = hi\n dimension.interior_padding = interior\n padding_config.dimensions.append(dimension)\n return padding_config\n\n\nclass DotDimensionNumbers(object):\n \"\"\"Python representation of a xla.DotDimensionNumbers protobuf.\"\"\"\n __slots__ = ('lhs_contracting_dimensions', 'rhs_contracting_dimensions',\n 'lhs_batch_dimensions', 'rhs_batch_dimensions')\n\n def __init__(self):\n self.lhs_contracting_dimensions = []\n self.rhs_contracting_dimensions = []\n self.lhs_batch_dimensions = []\n self.rhs_batch_dimensions = []\n\n\ndef GetDotDimensionsFromLists(dimension_numbers):\n (lhs_contract, rhs_contract), (lhs_batch, rhs_batch) = dimension_numbers\n dot_dims_proto = DotDimensionNumbers()\n dot_dims_proto.lhs_contracting_dimensions.extend(lhs_contract)\n dot_dims_proto.rhs_contracting_dimensions.extend(rhs_contract)\n dot_dims_proto.lhs_batch_dimensions.extend(lhs_batch)\n dot_dims_proto.rhs_batch_dimensions.extend(rhs_batch)\n return dot_dims_proto\n\n\nclass ConvolutionDimensionNumbers(object):\n \"\"\"Python representation of a xla.ConvolutionDimensionNumbers protobuf.\"\"\"\n __slots__ = ('input_batch_dimension', 'input_feature_dimension',\n 'input_spatial_dimensions', 'kernel_input_feature_dimension',\n 'kernel_output_feature_dimension', 'kernel_spatial_dimensions',\n 'output_batch_dimension', 'output_feature_dimension',\n 'output_spatial_dimensions')\n\n def __init__(self):\n self.input_batch_dimension = 0\n self.input_feature_dimension = 0\n self.input_spatial_dimensions = []\n self.kernel_input_feature_dimension = 0\n self.kernel_output_feature_dimension = 0\n self.kernel_spatial_dimensions = []\n self.output_batch_dimension = 0\n self.output_feature_dimension = 0\n self.output_spatial_dimensions = []\n\n\nclass OpSharding(object):\n \"\"\"Python representation of a xla.OpSharding protobuf.\"\"\"\n __slots__ = ('type', 'tile_assignment_dimensions', 'tile_assignment_devices',\n 'tuple_shardings')\n\n Type = _xla.OpSharding_Type\n\n def __init__(self):\n self.type = self.Type.REPLICATED\n self.tile_assignment_dimensions = []\n self.tile_assignment_devices = []\n self.tuple_shardings = []\n\n\nclass PrecisionConfig(object):\n \"\"\"Python representation of a xla.PrecisionConfig protobuf.\"\"\"\n __slots__ = ('operand_precision',)\n\n Precision = _xla.PrecisionConfig_Precision\n\n def __init__(self):\n self.operand_precision = []\n\n\nclass GatherDimensionNumbers(object):\n \"\"\"Python representation of a xla.GatherDimensionNumbers protobuf.\"\"\"\n __slots__ = ('offset_dims', 'collapsed_slice_dims', 'start_index_map',\n 'index_vector_dim')\n\n def __init__(self):\n self.offset_dims = []\n self.collapsed_slice_dims = []\n self.start_index_map = []\n self.index_vector_dim = 0\n\n\nclass ScatterDimensionNumbers(object):\n \"\"\"Python representation of a xla.ScatterDimensionNumbers protobuf.\"\"\"\n __slots__ = ('update_window_dims', 'inserted_window_dims',\n 'scatter_dims_to_operand_dims', 'index_vector_dim')\n\n def __init__(self):\n self.update_window_dims = []\n self.inserted_window_dims = []\n self.scatter_dims_to_operand_dims = []\n self.index_vector_dim = 0\n\n\nclass ReplicaGroup(object):\n \"\"\"Python representation of a xla.ReplicaGroup protobuf.\"\"\"\n __slots__ = ('replica_ids',)\n\n def __init__(self):\n self.replica_ids = []\n\n\ndef _make_replica_group_proto(replica_group):\n replica_group_proto = ReplicaGroup()\n replica_group_proto.replica_ids.extend(replica_group)\n return replica_group_proto\n\n\ndef _get_replica_groups_protos(replica_groups):\n if replica_groups is None:\n replica_groups_protos = [] # special value for XLA API\n else:\n replica_groups = list(replica_groups)\n replica_groups_protos = [\n _make_replica_group_proto(group) for group in replica_groups\n ]\n return replica_groups_protos\n"
] |
[
[
"tensorflow.compiler.xla.python.xla_extension.ops.Scatter",
"tensorflow.compiler.xla.python.xla_extension.ops.Fft",
"tensorflow.compiler.xla.python.xla_extension.AllocatorConfig",
"tensorflow.compiler.xla.python.xla_extension.ops.Reshape",
"numpy.true_divide",
"tensorflow.compiler.xla.python.xla_extension.ExecutableBuildOptions",
"tensorflow.compiler.xla.python.xla_extension.ops.CrossReplicaSum",
"tensorflow.compiler.xla.python.xla_extension.ops.Gather",
"tensorflow.compiler.xla.python.xla_extension.ops.AfterAll",
"tensorflow.compiler.xla.python.xla_extension.ops.BuildConstantSubGraph",
"numpy.dtype",
"tensorflow.compiler.xla.python.xla_extension.ops.TriangularSolve",
"tensorflow.compiler.xla.python.xla_extension.PyLocalBuffer.from_python",
"tensorflow.compiler.xla.python.xla_extension.ops.AllReduce",
"tensorflow.compiler.xla.python.xla_extension.LocalClient.Get",
"tensorflow.compiler.xla.python.xla_extension.ops.Conditional",
"tensorflow.compiler.xla.python.xla_extension.ops.SVD",
"tensorflow.compiler.xla.python.xla_extension.ops.SelectAndScatterWithGeneralPadding",
"tensorflow.compiler.xla.python.xla_extension.ops.AllToAll",
"tensorflow.compiler.xla.python.xla_extension.RegisterCustomCallTarget",
"tensorflow.compiler.xla.python.xla_extension.ops.Transpose",
"tensorflow.compiler.xla.python.xla_extension.ops.ConvGeneralDilated",
"tensorflow.compiler.xla.python.xla_extension.LocalExecutable.Compile",
"tensorflow.compiler.xla.python.xla_extension.XlaBuilder",
"tensorflow.compiler.xla.python.xla_extension.ops.Pad",
"tensorflow.compiler.xla.python.xla_extension.PyLocalBuffer.make_tuple",
"numpy.array",
"tensorflow.compiler.xla.python.xla_extension.ops.QR",
"tensorflow.compiler.xla.python.xla_extension.Shape.array_shape",
"tensorflow.compiler.xla.python.xla_extension.ops.Iota",
"tensorflow.compiler.xla.python.xla_extension.ops.Slice",
"numpy.shape",
"tensorflow.compiler.xla.python.xla_extension.ops.ReplicaId",
"tensorflow.compiler.xla.python.xla_extension.ops.Reduce",
"tensorflow.compiler.xla.python.xla_extension.ops.While",
"tensorflow.compiler.xla.python.xla_extension.ops.DotGeneral",
"tensorflow.compiler.xla.python.xla_extension.ops.Eigh",
"tensorflow.compiler.xla.python.xla_extension.ops.RngNormal",
"tensorflow.compiler.xla.python.xla_extension.ops.ReduceWindowWithGeneralPadding",
"tensorflow.compiler.xla.python.xla_extension.ops.CreateToken",
"tensorflow.compiler.xla.python.xla_extension.ops.ConstantLiteral",
"tensorflow.compiler.xla.python.xla_extension.ops.RngUniform",
"tensorflow.compiler.xla.python.xla_extension.bfloat16_dtype",
"tensorflow.compiler.xla.python.xla_extension.ops.Sort"
]
] |
fastyangmh/AudioGANomaly
|
[
"d877f050606765b17bb6755bd70277857326b5e1"
] |
[
"src/predict.py"
] |
[
"# import\nfrom src.project_parameters import ProjectParameters\nfrom src.model import create_model\nimport torch\nfrom DeepLearningTemplate.data_preparation import parse_transforms, AudioLoader\nfrom DeepLearningTemplate.predict import AudioPredictDataset\nfrom typing import TypeVar, Any\nT_co = TypeVar('T_co', covariant=True)\nfrom os.path import isfile\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nimport numpy as np\n\n\n# class\nclass AudioPredictDataset(AudioPredictDataset):\n def __init__(self, root, loader, transform) -> None:\n super().__init__(root, loader, transform)\n\n def __getitem__(self, index) -> T_co:\n sample = super().__getitem__(index)\n # convert the range of the sample to 0~1\n sample = (sample - sample.min()) / (sample.max() - sample.min())\n return sample\n\n\nclass Predict:\n def __init__(self, project_parameters) -> None:\n self.model = create_model(project_parameters=project_parameters).eval()\n if project_parameters.device == 'cuda' and torch.cuda.is_available():\n self.model = self.model.cuda()\n self.transform = parse_transforms(\n transforms_config=project_parameters.transforms_config)['predict']\n self.device = project_parameters.device\n self.batch_size = project_parameters.batch_size\n self.num_workers = project_parameters.num_workers\n self.classes = project_parameters.classes\n self.loader = AudioLoader(sample_rate=project_parameters.sample_rate)\n self.in_chans=project_parameters.in_chans\n\n def predict(self, inputs) -> Any:\n result = []\n fake_samples = []\n if isfile(path=inputs):\n # predict the file\n sample = self.loader(path=inputs)\n in_chans, _ = sample.shape\n if in_chans != self.in_chans:\n sample = sample.mean(0)\n sample = torch.cat(\n [sample[None] for idx in range(self.in_chans)])\n # the transformed sample dimension is (1, in_chans, freq, time)\n sample = self.transform(sample)[None]\n # convert the range of the sample to 0~1\n sample = (sample - sample.min()) / (sample.max() - sample.min())\n if self.device == 'cuda' and torch.cuda.is_available():\n sample = sample.cuda()\n with torch.no_grad():\n score, sample_hat = self.model(sample)\n result.append([score.item()])\n fake_samples.append(sample_hat.cpu().data.numpy())\n else:\n # predict the file from folder\n dataset = AudioPredictDataset(root=inputs,\n loader=self.loader,\n transform=self.transform)\n pin_memory = True if self.device == 'cuda' and torch.cuda.is_available(\n ) else False\n data_loader = DataLoader(dataset=dataset,\n batch_size=self.batch_size,\n shuffle=False,\n num_workers=self.num_workers,\n pin_memory=pin_memory)\n with torch.no_grad():\n for sample in tqdm(data_loader):\n if self.device == 'cuda' and torch.cuda.is_available():\n sample = sample.cuda()\n score, sample_hat = self.model(sample)\n result.append(score.tolist())\n fake_samples.append(sample_hat.cpu().data.numpy())\n result = np.concatenate(result, 0).reshape(-1, 1)\n fake_samples = np.concatenate(fake_samples, 0)\n print(', '.join(self.classes))\n print(result)\n return result, fake_samples\n\n\nif __name__ == '__main__':\n # project parameters\n project_parameters = ProjectParameters().parse()\n\n # predict file\n result = Predict(project_parameters=project_parameters).predict(\n inputs=project_parameters.root)\n"
] |
[
[
"numpy.concatenate",
"torch.no_grad",
"torch.cuda.is_available",
"torch.utils.data.DataLoader"
]
] |
CrtomirJuren/npTDMS
|
[
"cbebbf31d3977af06dbfa7f9602c89627134621d"
] |
[
"study/matplotlib-datatime.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 27 13:22:05 2020\n\n@author: crtjur\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport numpy as np\nimport datetime as dt\n\nnp.random.seed(1)\n\nN = 100\ny = np.random.rand(N)\n\nstart = dt.datetime.now()\nduration = 10\ndelta = dt.timedelta(days=1)\nprint(delta.total_seconds())\n\nend = start + dt.timedelta(days=100)\ndays = mdates.drange(start, end, delta)\nprint(days)\n\nprint(\"type(start): \")\nprint(type(start))\nprint(\"type(end): \")\nprint(type(end))\nprint(\"type(delta): \")\nprint(type(delta))\n\nplt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))\nplt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=5))\nplt.plot(days,y)\nplt.gcf().autofmt_xdate()\nplt.show()"
] |
[
[
"numpy.random.rand",
"numpy.random.seed",
"matplotlib.dates.DateFormatter",
"matplotlib.pyplot.plot",
"matplotlib.dates.drange",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.show",
"matplotlib.pyplot.gca",
"matplotlib.dates.DayLocator"
]
] |
zouguojian/Traffic-speed-prediction
|
[
"4b9917a9e1147c37b64e51be3c060af4bdb9544d",
"4b9917a9e1147c37b64e51be3c060af4bdb9544d"
] |
[
"MT-STFLN /model/normalization.py",
"MT-STFLN /comparison_model/fi_gru.py"
] |
[
"# -- coding: utf-8 --\nimport tensorflow as tf\n\nclass Normalization(object):\n def __init__(self,inputs,out_size,is_training=False):\n self.inputs=inputs\n self.out_size=out_size\n self.is_training=is_training\n\n def feed_forward(self):\n '''\n the feed forward layer\n :return: [batch,time_size,filed_size,new_features]\n '''\n inputs_shape = self.inputs.get_shape().as_list() # list type\n with tf.variable_scope('w',reuse=tf.AUTO_REUSE):\n w=tf.Variable(tf.random_normal(shape=[inputs_shape[-1], self.out_size], stddev=0.1, dtype=tf.float32))\n self.inputs=tf.reshape(self.inputs,shape=[-1,inputs_shape[-1]]) #改变数据维度,为了后序的tf.matmul(self.inputs,w)的计算\n self.inputs=tf.nn.tanh(tf.matmul(self.inputs,w)) #这里可以添加激活函数,如:tanh\n inputs_shape[-1]=self.out_size\n self.inputs=tf.reshape(self.inputs,shape=[i if i!=None else -1 for i in inputs_shape]) #变成为原来输出时候的维度\n return self.inputs\n\n def normal(self):\n '''\n if is_training is true ,use the normalization function\n else: do not\n :return:\n '''\n if self.is_training:\n # self.inputs=tf.sparse_to_dense(self.inputs,)\n self.inputs=tf.layers.batch_normalization(self.inputs, training=True)",
"# -- coding: utf-8 --\n\nimport tensorflow as tf\n\nclass rnns(object):\n def __init__(self, batch_size, predict_time=1, layer_num=1, nodes=128, placeholders=None):\n '''\n\n :param batch_size:\n :param layer_num:\n :param nodes:\n :param is_training:\n '''\n self.batch_size=batch_size\n self.layer_num=layer_num\n self.nodes=nodes\n self.predict_time=predict_time\n self.placeholders=placeholders\n self.encoder()\n self.decoder()\n\n def encoder(self):\n '''\n :return: shape is [batch size, time size, hidden size]\n '''\n\n def cell():\n lstm_cell=tf.nn.rnn_cell.GRUCell(num_units=self.nodes)\n lstm_cell_=tf.nn.rnn_cell.DropoutWrapper(cell=lstm_cell,output_keep_prob=1.0)\n return lstm_cell_\n self.e_mlstm=tf.nn.rnn_cell.MultiRNNCell([cell() for _ in range(self.layer_num)])\n self.e_initial_state = self.e_mlstm.zero_state(self.batch_size, tf.float32)\n\n def decoder(self):\n def cell():\n lstm_cell=tf.nn.rnn_cell.GRUCell(num_units=self.nodes)\n lstm_cell_=tf.nn.rnn_cell.DropoutWrapper(cell=lstm_cell,output_keep_prob=1-self.placeholders['dropout'])\n return lstm_cell_\n self.d_mlstm=tf.nn.rnn_cell.MultiRNNCell([cell() for _ in range(self.layer_num)])\n self.d_initial_state = self.d_mlstm.zero_state(self.batch_size, tf.float32)\n\n def encoding(self, inputs):\n '''\n :param inputs:\n :return: shape is [batch size, time size, hidden size]\n '''\n # out put the store data\n with tf.variable_scope('encoder_lstm'):\n self.ouputs, self.state = tf.nn.dynamic_rnn(cell=self.e_mlstm, inputs=inputs,initial_state=self.e_initial_state,dtype=tf.float32)\n return self.ouputs\n\n def decoding(self, encoder_hs, site_num):\n '''\n :param encoder_hs:\n :return: shape is [batch size, prediction size]\n '''\n\n pres = []\n h_state = encoder_hs[:, -1, :]\n initial_state=self.d_initial_state\n\n for i in range(self.predict_time):\n h_state = tf.expand_dims(input=h_state, axis=1)\n\n with tf.variable_scope('decoder_lstm'):\n h_state, state = tf.nn.dynamic_rnn(cell=self.d_mlstm, inputs=h_state,\n initial_state=initial_state, dtype=tf.float32)\n initial_state = state\n\n h_state=tf.reshape(h_state,shape=[-1,self.nodes])\n\n results = tf.layers.dense(inputs=h_state, units=1, name='layer', reuse=tf.AUTO_REUSE)\n pre=tf.reshape(results,shape=[-1,site_num])\n # to store the prediction results for road nodes on each time\n pres.append(tf.expand_dims(pre, axis=-1))\n\n return tf.concat(pres, axis=-1,name='output_y')\n\nimport numpy as np\nif __name__ == '__main__':\n train_data=np.random.random(size=[32,3,16])\n x=tf.placeholder(tf.float32, shape=[32, 3, 16])\n r=lstm(32,10,2,128)\n hs=r.encoding(x)\n\n print(hs.shape)\n\n pre=r.decoding(hs)\n print(pre.shape)"
] |
[
[
"tensorflow.matmul",
"tensorflow.layers.batch_normalization",
"tensorflow.reshape",
"tensorflow.variable_scope",
"tensorflow.random_normal"
],
[
"tensorflow.concat",
"tensorflow.expand_dims",
"tensorflow.nn.rnn_cell.GRUCell",
"tensorflow.reshape",
"tensorflow.variable_scope",
"tensorflow.placeholder",
"tensorflow.layers.dense",
"numpy.random.random",
"tensorflow.nn.rnn_cell.DropoutWrapper",
"tensorflow.nn.dynamic_rnn"
]
] |
nickrobinson251/hypersearch
|
[
"152be98f16d2a9e19b691d394386beab1a00615a"
] |
[
"examples/optimize_hyperparameters.py"
] |
[
"\"\"\"Search for Hyperparameters.\"\"\"\nimport argparse\nimport logging\nimport os\nimport dask_ml.joblib # think this is needed to regiaster dask joblib context\nfrom dask.distributed import Client, progress\nfrom sklearn.datasets import load_digits\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.externals import joblib\n\nfrom hypersearch import search, dump, launch_cluster, parse_params\n\n\nCURRENT_DIR = os.path.dirname(os.path.realpath(__file__))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description=\"Optimize model hyperparameters.\")\n parser.add_argument(\"filepath\", type=str,\n help=\"Path to file where model will be saved.\")\n parser.add_argument(\"--method\", choices=['bayes', 'grid', 'randomized'],\n default=\"randomized\",\n help=\"Hyperparameter search method.\")\n parser.add_argument(\"--params\", type=str,\n help=\"Path to yaml file defining search space.\")\n parser.add_argument(\"--cluster\",\n choices=['local', 'LSF', 'Moab', 'PBS', 'SGE', 'Slurm'],\n default=\"local\",\n help=\"Type of cluster to launch.\")\n parser.add_argument(\"--scale\", type=int,\n help=\"Number of cluster workers to requesat.\")\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n args = parse_args()\n\n # replace with your data\n digits = load_digits()\n X, y = digits.data, digits.target\n\n # replace with your model\n model = RandomForestClassifier(n_estimators=20)\n\n # replace with your hyperparameter search config\n config = os.path.join(CURRENT_DIR, \"params.yaml\")\n params = parse_params(config, args.method)\n\n # set up context manager for distributing computations\n LOGGER = logging.getLogger(__name__)\n CLUSTER = launch_cluster(args.cluster, args.scale)\n CLIENT = Client(CLUSTER)\n LOGGER.warn(\"Web dashboard now running at http://localhost:8787/status\")\n with joblib.parallel_backend('dask.distributed'):\n result = search(model, X, y, params, method=args.method)\n # try to get progress bar in terminal; prefer web daskboad\n progress(result)\n dump(result, args.filepath)\n"
] |
[
[
"sklearn.ensemble.RandomForestClassifier",
"sklearn.datasets.load_digits",
"sklearn.externals.joblib.parallel_backend"
]
] |
jeanoliveira92/metaheuristicas-mosp
|
[
"96be6ca4c45603a16c8c7b83b2ee34fd0c79b146"
] |
[
"HeuristicaPopulacional/graspPathRelinkMixed.py"
] |
[
"import random\nimport numpy as np\nimport globals as g\nimport math\nfrom HeuristicaRefinamento import heuristicaRefinamento as hr\nfrom HeuristicaConstrutiva import heuristicaConstrutiva as hc\nfrom HeuristicaPopulacional import grasp\n\n# HEURISTICA POPULACIONAL GRASP PATH RELINK MIXED - FIRST IMPROVEMEMENT\ndef graspPathRelinkMixedFim(ordemDasPilhas):\n resultadoBom = np.max(hc.PilhasAbertas(ordemDasPilhas))\n # ORDENA A MATRIZ DE FORMA CRESCENTE\n matOrd = grasp.gerarMatrizOrdenada()\n # LISTA D CANDIDATOS\n ls = []\n i = 0\n while i < 150:\n ordemDasPilhasAtual = grasp.construtivaGrasp(matOrd)\n ordemDasPilhasAtual = hr.FirstImprovementMethod(ordemDasPilhasAtual)\n #ADICIONA A LISTA DE CANDIDATOS O RESULTADO ATUAL\n if(len(ls) < 2):\n # VERIFICA REPETICAO\n tem = [np.all(ordemDasPilhasAtual == x) for x in ls]\n # ADICIONA SE NAO TIVER REPETIDO\n if (not np.any(tem)):\n ls.append(list(ordemDasPilhasAtual))\n else:\n # ESCOLHER UM VETOR\n orderPilhasCandidata = random.choice(ls)\n ordemDasPilhasAtual = MixedPathRelink(list(ordemDasPilhasAtual), list(orderPilhasCandidata))\n\n if(len(ls) < 20):\n # VERIFICA REPETICAO\n tem = [np.all(ordemDasPilhasAtual == x) for x in ls]\n\n # ADICIONA SE NAO TIVER REPETIDO\n if (not np.any(tem)):\n ls.append(list(ordemDasPilhasAtual))\n else:\n # indices = list(range(0, len(ordemDasPilhasAtual)))\n # matPilhasAbertas = [ [np.max(hc.PilhasAbertas(i)), i] for i in ls]\n removeIten = 21\n\n last = np.max(hc.PilhasAbertas(ordemDasPilhasAtual))\n\n atual = last\n k = 0\n for j in ls:\n temp = np.max(hc.PilhasAbertas(j))\n if (temp > last and temp > atual):\n removeIten = k\n last = temp\n k = k + 1\n\n if (removeIten < 20):\n ls.pop(removeIten)\n\n tem = [np.all(ordemDasPilhasAtual == x) for x in ls]\n # ADICIONA SE NAO TIVER REPETIDO\n if (not np.any(tem)):\n ls.append(list(ordemDasPilhasAtual))\n\n # matPilhasAbertas = np.array(matPilhasAbertas)\n # print(matPilhasAbertas[matPilhasAbertas[:,0].argsort()])\n\n resultadoMelhor = np.max(hc.PilhasAbertas(ordemDasPilhasAtual))\n if resultadoMelhor < resultadoBom :\n ordemDasPilhas = ordemDasPilhasAtual\n resultadoBom = resultadoMelhor\n i = -1\n\n i = i+1\n return ordemDasPilhas\n\ndef MixedPathRelink(OPA, OPC):\n piorSolucao = []\n melhorSolucao = []\n melhorCusto = 0\n solucaoSaida = []\n # VERIFICA QUAL DAS DUAS PILHAS É A MAIOR OU MENOR\n pilhaA = np.max(hc.PilhasAbertas(OPA))\n pilhaC = np.max(hc.PilhasAbertas(OPC))\n if(pilhaA < pilhaC):\n melhorSolucao = OPA\n piorSolucao = OPC\n melhorCusto = pilhaA\n else:\n melhorSolucao = OPC\n piorSolucao = OPA\n melhorCusto = pilhaC\n\n solucaoSaida = melhorSolucao\n\n # VAI REALIZAR A TROCA ENTRE MOVER PARA DIREITA, OU PARA A ESQUERDA\n switch = True\n custoAtual = 0\n while (list(piorSolucao) != list(melhorSolucao)):\n # MENOR PARA O MAIOR\n if(switch):\n # CRIA-SE UM VETOR COM INDICE DOS ELEMENTOS DIFERENTES\n vetNaoSimetricos = [i for i in range(len(piorSolucao)) if piorSolucao[i] != melhorSolucao[i]]\n # REALIZA A TROCAs\n for i in range(len(vetNaoSimetricos) - 1):\n piorSolucao[vetNaoSimetricos[i]], piorSolucao[vetNaoSimetricos[i + 1]] = piorSolucao[ vetNaoSimetricos[i + 1]], piorSolucao[vetNaoSimetricos[i]]\n\n custoAtual = np.max(hc.PilhasAbertas(piorSolucao))\n\n switch = False\n\n # MAIOR PARA O MENOR\n else:\n # CRIA-SE UM VETOR COM INDICE DOS ELEMENTOS DIFERENTES\n vetNaoSimetricos = [i for i in range(len(melhorSolucao)) if melhorSolucao[i] != piorSolucao[i]]\n # REALIZA A TROCA\n for i in reversed(range(len(vetNaoSimetricos) - 1)):\n melhorSolucao[vetNaoSimetricos[i]], melhorSolucao[vetNaoSimetricos[i + 1]] = melhorSolucao[vetNaoSimetricos[i + 1]], melhorSolucao[vetNaoSimetricos[i]]\n\n custoAtual = np.max(hc.PilhasAbertas(melhorSolucao))\n\n switch = True\n\n if custoAtual <= melhorCusto:\n solucaoSaida = piorSolucao\n melhorCusto = custoAtual\n\n return solucaoSaida"
] |
[
[
"numpy.all",
"numpy.any"
]
] |
chrismile/cuMat
|
[
"8bfe48393cc93aa4555c7b81b5b4f44c142b4ebb"
] |
[
"benchmarks/batched_reduction/MakePlotsLinear.py"
] |
[
"import sys\nimport os\nimport json\nimport matplotlib.pyplot as plt\nimport math\nimport seaborn as sns\n\nsetPath = sys.argv[1]\nsetName = setPath[setPath.rfind('/')+1:]\n\nresultFile = setPath + \".json\"\nwith open(resultFile, 'r') as f:\n results = json.load(f)\n\nsize = results[\"Size\"]\nsets = [\"Row\", \"Column\", \"Batch\"]\nmethods = [\"CUB\", \n \"Thread\", \n \"Warp\", \n \"Block64\", \"Block128\", \"Block256\", \"Block512\", \n \"Device1\", \"Device2\", \"Device4\", \"Device8\", \"Device16\", \"Device32\"]\nxlabel = \"2^N entries along reduced axis\"\nylabel = \"Time (ms)\"\nxdata = [math.log2(vx[0]) for vx in results[sets[0]]]\nxscale = 'linear'\nyscale = 'log'\ncolors = \\\n sns.color_palette(\"muted\", 3)+ \\\n sns.color_palette(\"ch:4.5,-.2,dark=.3\", 4)+ \\\n sns.color_palette(\"ch:3.5,-.2,dark=.3\", 6)\n\nfor set in sets:\n # now create the plot\n plt.figure(dpi=500)\n for (i,m),col in zip(enumerate(methods), colors):\n plt.plot(xdata, [vx[i+1] for vx in results[set]], \n '-o', label=m, color=col)\n plt.xscale(xscale)\n plt.yscale(yscale)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(\"Reduction axis: \" + set + \"\\nTotal vector size: \" + str(size))\n plt.legend()\n plt.xticks(xdata)\n plt.savefig(setPath+\"_\"+set+'.png', bbox_inches='tight', dpi=500)\n"
] |
[
[
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.xticks"
]
] |
j-bac/GSPCA
|
[
"81521d768d1e96f6101e46412b67e24266e681d6"
] |
[
"GSPCA.py"
] |
[
"import scipy\nimport numpy as np\n\ndef GSPCA( data, labels, nComp, param ):\n #GSPCA calculates generalised advanced supervised PCA with respect to [1].\n # [ V, D ] = GSPCA( data, labels, nComp, kind ) return n-by-nComp\n # matrix V with PCs as columns and diagonal nComp-by-nComp\n # matrix D with eigenvalues corresponding to PCs. \n # data is n-by-m matrix of data (covariance matrix is unacceptable). Data\n # MUST be centred before.\n # labels is numeric vector with n elements. The same labels corresponds\n # to points of the same class. Number of unique values in labels is\n # L. Classes are numerated in the order of increasing value of labels.\n # nComp is number of required component.\n # param is parameter of method:\n # scalar numeric value is parameter of intraclass attraction: the\n # functional to maximise is mean squared distances between points\n # of different classes minus param multiplied to sum of mean\n # squared distances between points of each class\n # numeric vector with L elements is vector of attractions in each\n # class: the functional to maximise is mean squared distances\n # between points of different classes minus sum of sum of mean\n # squared distances between points of each class multiplied by\n # corresponding element of vector param.\n # numeric matrix L-by-L is matrix of repulsion coefficients. The\n # elements upper than main diagonal are coefficients of repulsion\n # between corresponding clusses. The diagonal elements are\n # attraction coefficients for corresponding classes.\n #\n #References\n #1. Mirkes, Evgeny M., Gorban, Alexander N., Zinovyev, Andrei Y.,\n # Supervised PCA, Available online in https://github.com/Mirkes/SupervisedPCA/wiki\n #2. Gorban, Alexander N., Zinovyev, Andrei Y. “Principal Graphs and Manifolds”, \n # Chapter 2 in: Handbook of Research on Machine Learning Applications and Trends: \n # Algorithms, Methods, and Techniques, Emilio Soria Olivas et al. (eds), \n # IGI Global, Hershey, PA, USA, 2009, pp. 28-59.\n #3. Zinovyev, Andrei Y. \"Visualisation of multidimensional data\" Krasnoyarsk: KGTU,\n # p. 180 (2000) (In Russian).\n #4. Koren, Yehuda, and Liran Carmel. \"Robust linear dimensionality\n # reduction.\" Visualization and Computer Graphics, IEEE Transactions on\n # 10.4 (2004): 459-470.\n #\n #Licensed from CC0 1.0 Universal - Author Evgeny Mirkes https://github.com/Mirkes/SupervisedPCA/blob/master/\n\n #Get sizes of data\n n, m = data.shape\n data = data.astype(float)\n labels = labels.astype(float)\n # List of classes\n labs = np.unique(labels)\n # Number of classes\n L = len(labs)\n # Check the type of nComp\n if nComp > m or nComp < 1:\n raise ValueError('Incorrect value of nComp: it must be positive integer equal to or less than m')\n\n # Form matrix of coefficients\n if type(param) in [int,float]:\n coef = np.ones((L,L))\n coef = coef + np.diag((param - 1) * np.diag(coef))\n elif len(param.shape) == 1:\n if len(param) != L:\n raise ValueError(['Argument param must be scalar, or vector with L elements of L-by-L matrix,\\n where L is number of classes (unique values in labels)'])\n coef = np.ones((L,L))\n coef = coef + np.diag(np.diag(param - 1))\n elif len(param.shape) == 2:\n [a, b] = param.shape\n if a != L or b != L:\n raise ValueError(['Argument param must be scalar, or vector with L elements of L-by-L matrix,\\n where L is number of classes (unique values in labels)'])\n else:\n raise ValueError(['Argument param must be scalar, or vector with L elements of L-by-L matrix,\\n where L is number of classes (unique values in labels)'])\n\n # Symmetrize coef matrix\n coef = coef - np.tril(coef, -1) + np.triu(coef, 1).T\n\n # Calculate diagonal terms of Laplacian matrix without devision by\n # number of elements in class\n diagV = np.diag(coef)\n diagC = np.sum(coef,axis=0) - diagV\n\n # Calculate transformed covariance matrix\n M = np.zeros((m,m))\n means = np.zeros((L, m))\n # Loop to form the diagonal terms and calculate means\n for c in range(L):\n # Get index of class\n ind = labels == labs[c]\n # Calculate mean\n means[c, :] = np.mean(data[ind, :],axis=0)\n # Calculate coefficient for Identity term\n nc = np.sum(ind,axis=0)\n coefD = diagC[c] / nc - 2 * diagV[c] / (nc - 1)\n # Add the diagonal term\n M = (M + 2 * diagV[c] * nc / (nc - 1) * (means[[c], :].T @ means[[c], :])\n + coefD * data[ind, :].T @ data[ind, :])\n\n # Loop for off diagonal parts\n for c in range(L - 1):\n for cc in range(c + 1, L):\n tmp = means[[c], :].T @ means[[cc], :]\n M = M - coef[c, cc] * (tmp + tmp.T)\n\n #Request calculations from eigs\n if nComp<m-1:\n D, V = scipy.sparse.linalg.eigs(M, nComp)\n else:\n D, V = scipy.linalg.eig(M)\n ind = np.argsort(D)[::-1]\n V = V[:,ind]\n D = D[ind]\n\n return V, D"
] |
[
[
"numpy.zeros",
"numpy.sum",
"numpy.ones",
"numpy.mean",
"numpy.triu",
"scipy.sparse.linalg.eigs",
"scipy.linalg.eig",
"numpy.tril",
"numpy.argsort",
"numpy.diag",
"numpy.unique"
]
] |
VicFic2006/StravaActivityAnalyser
|
[
"c09f98bde4af79dd10a113240a38e10aee807cae"
] |
[
"fetch/fetch.py"
] |
[
"import pandas as pd\nimport requests\nimport json\nimport time\n\ndef thething(strava_tokens, ind):\n idofathlete = strava_tokens['athlete']['id']\n\n ## If access_token has expired then use the refresh_token to get the new access_token\n if strava_tokens['expires_at'] < time.time():\n print(\"🔐-access_token expired. Requesting new access_token-🔐.\")\n #Make Strava auth API call with current refresh token\n response = requests.post(\n url = 'https://www.strava.com/oauth/token',\n data = {\n 'client_id': INSERT_ID_HERE,\n 'client_secret': 'INSERT CLIENT SECRET HERE',\n 'grant_type': 'refresh_token',\n 'refresh_token': strava_tokens['refresh_token']\n }\n )\n\n #Save response as json in new variable\n new_strava_tokens = response.json()\n # Save new tokens to file\n # with open('strava_tokens.json', 'w') as outfile:\n # json.dump(new_strava_tokens, outfile)\n #Use new Strava tokens from now\n strava_tokens = new_strava_tokens\n\n # set start_date_local as yesterday.\n nowtime = time.time();\n cutoffday_midnight = (int(nowtime // 86400)) * 86400 - (10*86400) - 19800;\n # 19800 to deduct 5:30 Hrs to\n\n # Loop through all activities\n page = 1\n url = \"https://www.strava.com/api/v3/activities\"\n access_token = strava_tokens['access_token']\n print(access_token)\n \n ## Create the dataframe ready for the API call to store your activity data\n activities = pd.DataFrame(\n columns = [\n \"athlete_id\",\n \"id\",\n \"name\",\n \"start_date_local\",\n \"distance\",\n \"moving_time\",\n \"elapsed_time\",\n \"total_elevation_gain\"\n ]\n )\n\n while True:\n\n # get page of activities from Strava\n r = requests.get(url + '?access_token=' + access_token + '&per_page=10'+'&after='+ str(cutoffday_midnight) + '&page=' + str(page))\n r = r.json()\n # if no results then exit loop\n if (not r) and (page != 1):\n break\n elif (not r) and (page == 1):\n print(\"❌-This person didn't do any activites-❌\")\n activities.loc[0,'athlete_id'] = idofathlete\n break\n \n #print(json.dumps(r))\n # otherwise add new data to dataframe\n for x in range(len(r)):\n if (r[x]['type'] == 'Ride'):\n activities.loc[x + (page-1)*30,'athlete_id'] = r[x]['athlete']['id']\n activities.loc[x + (page-1)*30,'id'] = r[x]['id']\n activities.loc[x + (page-1)*30,'name'] = r[x]['name']\n activities.loc[x + (page-1)*30,'start_date_local'] = r[x]['start_date_local']\n activities.loc[x + (page-1)*30,'distance'] = r[x]['distance']\n activities.loc[x + (page-1)*30,'moving_time'] = r[x]['moving_time']\n activities.loc[x + (page-1)*30,'elapsed_time'] = r[x]['elapsed_time']\n activities.loc[x + (page-1)*30,'total_elevation_gain'] = r[x]['total_elevation_gain']\n # increment page\n page += 1\n \n print(\"👆-------\"+str(ind)+\"-------👆\")\n activities.to_csv('day_activity.csv', mode='a', index=False, header=False)\n\n# Main code\noct_kcc = pd.read_csv('octkcc.csv')\n\nfor index, row in oct_kcc.iterrows():\n #print(row['athlet_id'])\n print(row['profile_json'])\n jaison = json.loads(row['profile_json'])\n #print(jaison['access_token'])\n thething(jaison, int(index))\n"
] |
[
[
"pandas.DataFrame",
"pandas.read_csv"
]
] |
eort/nipype
|
[
"04d0159686a8d656905e9e06110287c6c60c1523"
] |
[
"nipype/interfaces/spm/model.py"
] |
[
"# -*- coding: utf-8 -*-\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"The spm module provides basic functions for interfacing with matlab\nand spm to access spm tools.\n\n Change directory to provide relative paths for doctests\n >>> import os\n >>> filepath = os.path.dirname( os.path.realpath( __file__ ) )\n >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))\n >>> os.chdir(datadir)\n\n\"\"\"\nfrom __future__ import (print_function, division, unicode_literals,\n absolute_import)\nfrom builtins import str, bytes\n\n# Standard library imports\nimport os\nfrom glob import glob\n\n# Third-party imports\nimport numpy as np\nimport scipy.io as sio\n\n# Local imports\nfrom ... import logging\nfrom ...utils.filemanip import (filename_to_list, list_to_filename,\n split_filename)\nfrom ..base import (Bunch, traits, TraitedSpec, File, Directory,\n OutputMultiPath, InputMultiPath, isdefined)\nfrom .base import (SPMCommand, SPMCommandInputSpec, scans_for_fnames,\n ImageFileSPM)\n\n__docformat__ = 'restructuredtext'\niflogger = logging.getLogger('interface')\n\n\nclass Level1DesignInputSpec(SPMCommandInputSpec):\n spm_mat_dir = Directory(\n exists=True, field='dir', desc='directory to store SPM.mat file (opt)')\n timing_units = traits.Enum(\n 'secs',\n 'scans',\n field='timing.units',\n desc='units for specification of onsets',\n mandatory=True)\n interscan_interval = traits.Float(\n field='timing.RT', desc='Interscan interval in secs', mandatory=True)\n microtime_resolution = traits.Int(\n field='timing.fmri_t',\n desc=('Number of time-bins per scan '\n 'in secs (opt)'))\n microtime_onset = traits.Float(\n field='timing.fmri_t0',\n desc=('The onset/time-bin in seconds for '\n 'alignment (opt)'))\n session_info = traits.Any(\n field='sess',\n desc=('Session specific information generated '\n 'by ``modelgen.SpecifyModel``'),\n mandatory=True)\n factor_info = traits.List(\n traits.Dict(traits.Enum('name', 'levels')),\n field='fact',\n desc=('Factor specific information '\n 'file (opt)'))\n bases = traits.Dict(\n traits.Enum('hrf', 'fourier', 'fourier_han', 'gamma', 'fir'),\n field='bases',\n desc=\"\"\"\n dict {'name':{'basesparam1':val,...}}\n name : string\n Name of basis function (hrf, fourier, fourier_han,\n gamma, fir)\n\n hrf :\n derivs : 2-element list\n Model HRF Derivatives. No derivatives: [0,0],\n Time derivatives : [1,0], Time and Dispersion\n derivatives: [1,1]\n fourier, fourier_han, gamma, fir:\n length : int\n Post-stimulus window length (in seconds)\n order : int\n Number of basis functions\n\"\"\",\n mandatory=True)\n volterra_expansion_order = traits.Enum(\n 1, 2, field='volt', desc=('Model interactions - '\n 'yes:1, no:2'))\n global_intensity_normalization = traits.Enum(\n 'none',\n 'scaling',\n field='global',\n desc=('Global intensity '\n 'normalization - '\n 'scaling or none'))\n mask_image = File(\n exists=True,\n field='mask',\n desc='Image for explicitly masking the analysis')\n mask_threshold = traits.Either(\n traits.Enum('-Inf'),\n traits.Float(),\n desc=\"Thresholding for the mask\",\n default='-Inf',\n usedefault=True)\n model_serial_correlations = traits.Enum(\n 'AR(1)',\n 'FAST',\n 'none',\n field='cvi',\n desc=('Model serial correlations '\n 'AR(1), FAST or none. FAST '\n 'is available in SPM12'))\n\n\nclass Level1DesignOutputSpec(TraitedSpec):\n spm_mat_file = File(exists=True, desc='SPM mat file')\n\n\nclass Level1Design(SPMCommand):\n \"\"\"Generate an SPM design matrix\n\n http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=59\n\n Examples\n --------\n\n >>> level1design = Level1Design()\n >>> level1design.inputs.timing_units = 'secs'\n >>> level1design.inputs.interscan_interval = 2.5\n >>> level1design.inputs.bases = {'hrf':{'derivs': [0,0]}}\n >>> level1design.inputs.session_info = 'session_info.npz'\n >>> level1design.run() # doctest: +SKIP\n\n \"\"\"\n\n input_spec = Level1DesignInputSpec\n output_spec = Level1DesignOutputSpec\n\n _jobtype = 'stats'\n _jobname = 'fmri_spec'\n\n def _format_arg(self, opt, spec, val):\n \"\"\"Convert input to appropriate format for spm\n \"\"\"\n if opt in ['spm_mat_dir', 'mask_image']:\n return np.array([str(val)], dtype=object)\n if opt in ['session_info']: # , 'factor_info']:\n if isinstance(val, dict):\n return [val]\n else:\n return val\n return super(Level1Design, self)._format_arg(opt, spec, val)\n\n def _parse_inputs(self):\n \"\"\"validate spm realign options if set to None ignore\n \"\"\"\n einputs = super(Level1Design,\n self)._parse_inputs(skip=('mask_threshold'))\n for sessinfo in einputs[0]['sess']:\n sessinfo['scans'] = scans_for_fnames(\n filename_to_list(sessinfo['scans']), keep4d=False)\n if not isdefined(self.inputs.spm_mat_dir):\n einputs[0]['dir'] = np.array([str(os.getcwd())], dtype=object)\n return einputs\n\n def _make_matlab_command(self, content):\n \"\"\"validates spm options and generates job structure\n if mfile is True uses matlab .m file\n else generates a job structure and saves in .mat\n \"\"\"\n if isdefined(self.inputs.mask_image):\n # SPM doesn't handle explicit masking properly, especially\n # when you want to use the entire mask image\n postscript = \"load SPM;\\n\"\n postscript += (\"SPM.xM.VM = spm_vol('%s');\\n\" % list_to_filename(\n self.inputs.mask_image))\n postscript += \"SPM.xM.I = 0;\\n\"\n postscript += \"SPM.xM.T = [];\\n\"\n postscript += (\"SPM.xM.TH = ones(size(SPM.xM.TH))*(%s);\\n\" %\n self.inputs.mask_threshold)\n postscript += (\"SPM.xM.xs = struct('Masking', \"\n \"'explicit masking only');\\n\")\n postscript += \"save SPM SPM;\\n\"\n else:\n postscript = None\n return super(Level1Design, self)._make_matlab_command(\n content, postscript=postscript)\n\n def _list_outputs(self):\n outputs = self._outputs().get()\n spm = os.path.join(os.getcwd(), 'SPM.mat')\n outputs['spm_mat_file'] = spm\n return outputs\n\n\nclass EstimateModelInputSpec(SPMCommandInputSpec):\n spm_mat_file = File(\n exists=True,\n field='spmmat',\n copyfile=True,\n mandatory=True,\n desc='Absolute path to SPM.mat')\n estimation_method = traits.Dict(\n traits.Enum('Classical', 'Bayesian2', 'Bayesian'),\n field='method',\n mandatory=True,\n desc=('Dictionary of either Classical: 1, Bayesian: 1, '\n 'or Bayesian2: 1 (dict)'))\n write_residuals = traits.Bool(\n field='write_residuals', desc=\"Write individual residual images\")\n flags = traits.Dict(desc='Additional arguments')\n\n\nclass EstimateModelOutputSpec(TraitedSpec):\n mask_image = ImageFileSPM(\n exists=True, desc='binary mask to constrain estimation')\n beta_images = OutputMultiPath(\n ImageFileSPM(exists=True), desc='design parameter estimates')\n residual_image = ImageFileSPM(\n exists=True, desc='Mean-squared image of the residuals')\n residual_images = OutputMultiPath(\n ImageFileSPM(exists=True),\n desc=\"individual residual images (requires `write_residuals`\")\n RPVimage = ImageFileSPM(exists=True, desc='Resels per voxel image')\n spm_mat_file = File(exists=True, desc='Updated SPM mat file')\n labels = ImageFileSPM(exists=True, desc=\"label file\")\n SDerror = OutputMultiPath(\n ImageFileSPM(exists=True),\n desc=\"Images of the standard deviation of the error\")\n ARcoef = OutputMultiPath(\n ImageFileSPM(exists=True), desc=\"Images of the AR coefficient\")\n Cbetas = OutputMultiPath(\n ImageFileSPM(exists=True), desc=\"Images of the parameter posteriors\")\n SDbetas = OutputMultiPath(\n ImageFileSPM(exists=True),\n desc=\"Images of the standard deviation of parameter posteriors\")\n\n\nclass EstimateModel(SPMCommand):\n \"\"\"Use spm_spm to estimate the parameters of a model\n\n http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=69\n\n Examples\n --------\n >>> est = EstimateModel()\n >>> est.inputs.spm_mat_file = 'SPM.mat'\n >>> est.inputs.estimation_method = {'Classical': 1}\n >>> est.run() # doctest: +SKIP\n \"\"\"\n input_spec = EstimateModelInputSpec\n output_spec = EstimateModelOutputSpec\n _jobtype = 'stats'\n _jobname = 'fmri_est'\n\n def _format_arg(self, opt, spec, val):\n \"\"\"Convert input to appropriate format for spm\n \"\"\"\n if opt == 'spm_mat_file':\n return np.array([str(val)], dtype=object)\n if opt == 'estimation_method':\n if isinstance(val, (str, bytes)):\n return {'{}'.format(val): 1}\n else:\n return val\n return super(EstimateModel, self)._format_arg(opt, spec, val)\n\n def _parse_inputs(self):\n \"\"\"validate spm realign options if set to None ignore\n \"\"\"\n einputs = super(EstimateModel, self)._parse_inputs(skip=('flags'))\n if isdefined(self.inputs.flags):\n einputs[0].update(\n {flag: val\n for (flag, val) in self.inputs.flags.items()})\n return einputs\n\n def _list_outputs(self):\n outputs = self._outputs().get()\n pth = os.path.dirname(self.inputs.spm_mat_file)\n outtype = 'nii' if '12' in self.version.split('.')[0] else 'img'\n spm = sio.loadmat(self.inputs.spm_mat_file, struct_as_record=False)\n\n betas = [vbeta.fname[0] for vbeta in spm['SPM'][0, 0].Vbeta[0]]\n if ('Bayesian' in self.inputs.estimation_method.keys()\n or 'Bayesian2' in self.inputs.estimation_method.keys()):\n outputs['labels'] = os.path.join(pth, 'labels.{}'.format(outtype))\n outputs['SDerror'] = glob(os.path.join(pth, 'Sess*_SDerror*'))\n outputs['ARcoef'] = glob(os.path.join(pth, 'Sess*_AR_*'))\n if betas:\n outputs['Cbetas'] = [\n os.path.join(pth, 'C{}'.format(beta)) for beta in betas\n ]\n outputs['SDbetas'] = [\n os.path.join(pth, 'SD{}'.format(beta)) for beta in betas\n ]\n\n if 'Classical' in self.inputs.estimation_method.keys():\n outputs['residual_image'] = os.path.join(\n pth, 'ResMS.{}'.format(outtype))\n outputs['RPVimage'] = os.path.join(pth, 'RPV.{}'.format(outtype))\n if self.inputs.write_residuals:\n outputs['residual_images'] = glob(os.path.join(pth, 'Res_*'))\n if betas:\n outputs['beta_images'] = [\n os.path.join(pth, beta) for beta in betas\n ]\n\n outputs['mask_image'] = os.path.join(pth, 'mask.{}'.format(outtype))\n outputs['spm_mat_file'] = os.path.join(pth, 'SPM.mat')\n return outputs\n\n\nclass EstimateContrastInputSpec(SPMCommandInputSpec):\n spm_mat_file = File(\n exists=True,\n field='spmmat',\n desc='Absolute path to SPM.mat',\n copyfile=True,\n mandatory=True)\n contrasts = traits.List(\n traits.Either(\n traits.Tuple(traits.Str, traits.Enum('T'), traits.List(traits.Str),\n traits.List(traits.Float)),\n traits.Tuple(traits.Str, traits.Enum('T'), traits.List(traits.Str),\n traits.List(traits.Float), traits.List(traits.Float)),\n traits.Tuple(traits.Str, traits.Enum('F'),\n traits.List(\n traits.Either(\n traits.Tuple(traits.Str, traits.Enum('T'),\n traits.List(traits.Str),\n traits.List(traits.Float)),\n traits.Tuple(traits.Str, traits.Enum('T'),\n traits.List(traits.Str),\n traits.List(traits.Float),\n traits.List(traits.Float)))))),\n desc=\"\"\"List of contrasts with each contrast being a list of the form:\n [('name', 'stat', [condition list], [weight list], [session list])]\n If session list is None or not provided, all sessions are used. For\n F contrasts, the condition list should contain previously defined\n T-contrasts.\"\"\",\n mandatory=True)\n beta_images = InputMultiPath(\n File(exists=True),\n desc=('Parameter estimates of the '\n 'design matrix'),\n copyfile=False,\n mandatory=True)\n residual_image = File(\n exists=True,\n desc='Mean-squared image of the residuals',\n copyfile=False,\n mandatory=True)\n use_derivs = traits.Bool(\n desc='use derivatives for estimation', xor=['group_contrast'])\n group_contrast = traits.Bool(\n desc='higher level contrast', xor=['use_derivs'])\n\n\nclass EstimateContrastOutputSpec(TraitedSpec):\n con_images = OutputMultiPath(\n File(exists=True), desc='contrast images from a t-contrast')\n spmT_images = OutputMultiPath(\n File(exists=True), desc='stat images from a t-contrast')\n ess_images = OutputMultiPath(\n File(exists=True), desc='contrast images from an F-contrast')\n spmF_images = OutputMultiPath(\n File(exists=True), desc='stat images from an F-contrast')\n spm_mat_file = File(exists=True, desc='Updated SPM mat file')\n\n\nclass EstimateContrast(SPMCommand):\n \"\"\"Use spm_contrasts to estimate contrasts of interest\n\n Examples\n --------\n >>> import nipype.interfaces.spm as spm\n >>> est = spm.EstimateContrast()\n >>> est.inputs.spm_mat_file = 'SPM.mat'\n >>> cont1 = ('Task>Baseline','T', ['Task-Odd','Task-Even'],[0.5,0.5])\n >>> cont2 = ('Task-Odd>Task-Even','T', ['Task-Odd','Task-Even'],[1,-1])\n >>> contrasts = [cont1,cont2]\n >>> est.inputs.contrasts = contrasts\n >>> est.run() # doctest: +SKIP\n\n \"\"\"\n\n input_spec = EstimateContrastInputSpec\n output_spec = EstimateContrastOutputSpec\n _jobtype = 'stats'\n _jobname = 'con'\n\n def _make_matlab_command(self, _):\n \"\"\"validates spm options and generates job structure\n \"\"\"\n contrasts = []\n cname = []\n for i, cont in enumerate(self.inputs.contrasts):\n cname.insert(i, cont[0])\n contrasts.insert(i,\n Bunch(\n name=cont[0],\n stat=cont[1],\n conditions=cont[2],\n weights=None,\n sessions=None))\n if len(cont) >= 4:\n contrasts[i].weights = cont[3]\n if len(cont) >= 5:\n contrasts[i].sessions = cont[4]\n script = \"% generated by nipype.interfaces.spm\\n\"\n script += \"spm_defaults;\\n\"\n script += (\"jobs{1}.stats{1}.con.spmmat = {'%s'};\\n\" %\n self.inputs.spm_mat_file)\n script += \"load(jobs{1}.stats{1}.con.spmmat{:});\\n\"\n script += \"SPM.swd = '%s';\\n\" % os.getcwd()\n script += \"save(jobs{1}.stats{1}.con.spmmat{:},'SPM');\\n\"\n script += \"names = SPM.xX.name;\\n\"\n # get names for columns\n if (isdefined(self.inputs.group_contrast)\n and self.inputs.group_contrast):\n script += \"condnames=names;\\n\"\n else:\n if self.inputs.use_derivs:\n script += \"pat = 'Sn\\([0-9]*\\) (.*)';\\n\"\n else:\n script += (\"pat = 'Sn\\([0-9]*\\) (.*)\\*bf\\(1\\)|Sn\\([0-9]*\\) \"\n \".*\\*bf\\([2-9]\\)|Sn\\([0-9]*\\) (.*)';\\n\")\n script += \"t = regexp(names,pat,'tokens');\\n\"\n # get sessidx for columns\n script += \"pat1 = 'Sn\\(([0-9].*)\\)\\s.*';\\n\"\n script += \"t1 = regexp(names,pat1,'tokens');\\n\"\n script += (\"for i0=1:numel(t),condnames{i0}='';condsess(i0)=0;if \"\n \"~isempty(t{i0}{1}),condnames{i0} = t{i0}{1}{1};\"\n \"condsess(i0)=str2num(t1{i0}{1}{1});end;end;\\n\")\n # BUILD CONTRAST SESSION STRUCTURE\n for i, contrast in enumerate(contrasts):\n if contrast.stat == 'T':\n script += (\"consess{%d}.tcon.name = '%s';\\n\" %\n (i + 1, contrast.name))\n script += (\n \"consess{%d}.tcon.convec = zeros(1,numel(names));\\n\" %\n (i + 1))\n for c0, cond in enumerate(contrast.conditions):\n script += (\"idx = strmatch('%s',condnames,'exact');\\n\" %\n (cond))\n script += ((\"if isempty(idx), throw(MException(\"\n \"'CondName:Chk', sprintf('Condition %%s not \"\n \"found in design','%s'))); end;\\n\") % cond)\n if contrast.sessions:\n for sno, sw in enumerate(contrast.sessions):\n script += (\"sidx = find(condsess(idx)==%d);\\n\" %\n (sno + 1))\n script += ((\"consess{%d}.tcon.convec(idx(sidx)) \"\n \"= %f;\\n\") %\n (i + 1, sw * contrast.weights[c0]))\n else:\n script += (\"consess{%d}.tcon.convec(idx) = %f;\\n\" %\n (i + 1, contrast.weights[c0]))\n for i, contrast in enumerate(contrasts):\n if contrast.stat == 'F':\n script += (\"consess{%d}.fcon.name = '%s';\\n\" %\n (i + 1, contrast.name))\n for cl0, fcont in enumerate(contrast.conditions):\n try:\n tidx = cname.index(fcont[0])\n except:\n Exception(\"Contrast Estimate: could not get index of\"\n \" T contrast. probably not defined prior \"\n \"to the F contrasts\")\n script += ((\"consess{%d}.fcon.convec{%d} = \"\n \"consess{%d}.tcon.convec;\\n\") %\n (i + 1, cl0 + 1, tidx + 1))\n script += \"jobs{1}.stats{1}.con.consess = consess;\\n\"\n script += (\"if strcmp(spm('ver'),'SPM8'), spm_jobman('initcfg');\"\n \"jobs=spm_jobman('spm5tospm8',{jobs});end\\n\")\n script += \"spm_jobman('run',jobs);\"\n return script\n\n def _list_outputs(self):\n outputs = self._outputs().get()\n pth, _ = os.path.split(self.inputs.spm_mat_file)\n spm = sio.loadmat(self.inputs.spm_mat_file, struct_as_record=False)\n con_images = []\n spmT_images = []\n for con in spm['SPM'][0, 0].xCon[0]:\n con_images.append(str(os.path.join(pth, con.Vcon[0, 0].fname[0])))\n spmT_images.append(str(os.path.join(pth, con.Vspm[0, 0].fname[0])))\n if con_images:\n outputs['con_images'] = con_images\n outputs['spmT_images'] = spmT_images\n spm12 = '12' in self.version.split('.')[0]\n if spm12:\n ess = glob(os.path.join(pth, 'ess*.nii'))\n else:\n ess = glob(os.path.join(pth, 'ess*.img'))\n if len(ess) > 0:\n outputs['ess_images'] = sorted(ess)\n if spm12:\n spmf = glob(os.path.join(pth, 'spmF*.nii'))\n else:\n spmf = glob(os.path.join(pth, 'spmF*.img'))\n if len(spmf) > 0:\n outputs['spmF_images'] = sorted(spmf)\n outputs['spm_mat_file'] = self.inputs.spm_mat_file\n return outputs\n\n\nclass ThresholdInputSpec(SPMCommandInputSpec):\n spm_mat_file = File(\n exists=True,\n desc='absolute path to SPM.mat',\n copyfile=True,\n mandatory=True)\n stat_image = File(\n exists=True, desc='stat image', copyfile=False, mandatory=True)\n contrast_index = traits.Int(\n mandatory=True, desc='which contrast in the SPM.mat to use')\n use_fwe_correction = traits.Bool(\n True,\n usedefault=True,\n desc=('whether to use FWE (Bonferroni) '\n 'correction for initial threshold '\n '(height_threshold_type has to be '\n 'set to p-value)'))\n use_topo_fdr = traits.Bool(\n True,\n usedefault=True,\n desc=('whether to use FDR over cluster extent '\n 'probabilities'))\n height_threshold = traits.Float(\n 0.05,\n usedefault=True,\n desc=('value for initial thresholding '\n '(defining clusters)'))\n height_threshold_type = traits.Enum(\n 'p-value',\n 'stat',\n usedefault=True,\n desc=('Is the cluster forming '\n 'threshold a stat value or '\n 'p-value?'))\n extent_fdr_p_threshold = traits.Float(\n 0.05,\n usedefault=True,\n desc=('p threshold on FDR corrected '\n 'cluster size probabilities'))\n extent_threshold = traits.Int(\n 0, usedefault=True, desc='Minimum cluster size in voxels')\n force_activation = traits.Bool(\n False,\n usedefault=True,\n desc=('In case no clusters survive the '\n 'topological inference step this '\n 'will pick a culster with the highes '\n 'sum of t-values. Use with care.'))\n\n\nclass ThresholdOutputSpec(TraitedSpec):\n thresholded_map = File(exists=True)\n n_clusters = traits.Int()\n pre_topo_fdr_map = File(exists=True)\n pre_topo_n_clusters = traits.Int()\n activation_forced = traits.Bool()\n cluster_forming_thr = traits.Float()\n\n\nclass Threshold(SPMCommand):\n \"\"\"Topological FDR thresholding based on cluster extent/size. Smoothness is\n estimated from GLM residuals but is assumed to be the same for all of the\n voxels.\n\n Examples\n --------\n\n >>> thresh = Threshold()\n >>> thresh.inputs.spm_mat_file = 'SPM.mat'\n >>> thresh.inputs.stat_image = 'spmT_0001.img'\n >>> thresh.inputs.contrast_index = 1\n >>> thresh.inputs.extent_fdr_p_threshold = 0.05\n >>> thresh.run() # doctest: +SKIP\n \"\"\"\n input_spec = ThresholdInputSpec\n output_spec = ThresholdOutputSpec\n\n def _gen_thresholded_map_filename(self):\n _, fname, ext = split_filename(self.inputs.stat_image)\n return os.path.abspath(fname + \"_thr\" + ext)\n\n def _gen_pre_topo_map_filename(self):\n _, fname, ext = split_filename(self.inputs.stat_image)\n return os.path.abspath(fname + \"_pre_topo_thr\" + ext)\n\n def _make_matlab_command(self, _):\n script = \"con_index = %d;\\n\" % self.inputs.contrast_index\n script += \"cluster_forming_thr = %f;\\n\" % self.inputs.height_threshold\n if self.inputs.use_fwe_correction:\n script += \"thresDesc = 'FWE';\\n\"\n else:\n script += \"thresDesc = 'none';\\n\"\n\n if self.inputs.use_topo_fdr:\n script += \"use_topo_fdr = 1;\\n\"\n else:\n script += \"use_topo_fdr = 0;\\n\"\n\n if self.inputs.force_activation:\n script += \"force_activation = 1;\\n\"\n else:\n script += \"force_activation = 0;\\n\"\n script += (\"cluster_extent_p_fdr_thr = %f;\\n\" %\n self.inputs.extent_fdr_p_threshold)\n script += \"stat_filename = '%s';\\n\" % self.inputs.stat_image\n script += (\"height_threshold_type = '%s';\\n\" %\n self.inputs.height_threshold_type)\n script += \"extent_threshold = %d;\\n\" % self.inputs.extent_threshold\n\n script += \"load %s;\\n\" % self.inputs.spm_mat_file\n script += \"\"\"\nFWHM = SPM.xVol.FWHM;\ndf = [SPM.xCon(con_index).eidf SPM.xX.erdf];\nSTAT = SPM.xCon(con_index).STAT;\nR = SPM.xVol.R;\nS = SPM.xVol.S;\nn = 1;\n\nswitch thresDesc\n case 'FWE'\n cluster_forming_thr = spm_uc(cluster_forming_thr,df,STAT,R,n,S);\n\n case 'none'\n if strcmp(height_threshold_type, 'p-value')\n cluster_forming_thr = spm_u(cluster_forming_thr^(1/n),df,STAT);\n end\nend\n\nstat_map_vol = spm_vol(stat_filename);\n[stat_map_data, stat_map_XYZmm] = spm_read_vols(stat_map_vol);\n\nZ = stat_map_data(:)';\n[x,y,z] = ind2sub(size(stat_map_data),(1:numel(stat_map_data))');\nXYZ = cat(1, x', y', z');\n\nXYZth = XYZ(:, Z >= cluster_forming_thr);\nZth = Z(Z >= cluster_forming_thr);\n\n\"\"\"\n script += ((\"spm_write_filtered(Zth,XYZth,stat_map_vol.dim',\"\n \"stat_map_vol.mat,'thresholded map', '%s');\\n\") %\n self._gen_pre_topo_map_filename())\n script += \"\"\"\nmax_size = 0;\nmax_size_index = 0;\nth_nclusters = 0;\nnclusters = 0;\nif isempty(XYZth)\n thresholded_XYZ = [];\n thresholded_Z = [];\nelse\n if use_topo_fdr\n V2R = 1/prod(FWHM(stat_map_vol.dim > 1));\n [uc,Pc,ue] = spm_uc_clusterFDR(cluster_extent_p_fdr_thr,df,STAT,R,n,Z,XYZ,V2R,cluster_forming_thr);\n end\n\n voxel_labels = spm_clusters(XYZth);\n nclusters = max(voxel_labels);\n\n thresholded_XYZ = [];\n thresholded_Z = [];\n\n for i = 1:nclusters\n cluster_size = sum(voxel_labels==i);\n if cluster_size > extent_threshold && (~use_topo_fdr || (cluster_size - uc) > -1)\n thresholded_XYZ = cat(2, thresholded_XYZ, XYZth(:,voxel_labels == i));\n thresholded_Z = cat(2, thresholded_Z, Zth(voxel_labels == i));\n th_nclusters = th_nclusters + 1;\n end\n if force_activation\n cluster_sum = sum(Zth(voxel_labels == i));\n if cluster_sum > max_size\n max_size = cluster_sum;\n max_size_index = i;\n end\n end\n end\nend\n\nactivation_forced = 0;\nif isempty(thresholded_XYZ)\n if force_activation && max_size ~= 0\n thresholded_XYZ = XYZth(:,voxel_labels == max_size_index);\n thresholded_Z = Zth(voxel_labels == max_size_index);\n th_nclusters = 1;\n activation_forced = 1;\n else\n thresholded_Z = [0];\n thresholded_XYZ = [1 1 1]';\n th_nclusters = 0;\n end\nend\n\nfprintf('activation_forced = %d\\\\n',activation_forced);\nfprintf('pre_topo_n_clusters = %d\\\\n',nclusters);\nfprintf('n_clusters = %d\\\\n',th_nclusters);\nfprintf('cluster_forming_thr = %f\\\\n',cluster_forming_thr);\n\n\"\"\"\n script += ((\"spm_write_filtered(thresholded_Z,thresholded_XYZ,\"\n \"stat_map_vol.dim',stat_map_vol.mat,'thresholded map',\"\n \" '%s');\\n\") % self._gen_thresholded_map_filename())\n\n return script\n\n def aggregate_outputs(self, runtime=None):\n outputs = self._outputs()\n setattr(outputs, 'thresholded_map',\n self._gen_thresholded_map_filename())\n setattr(outputs, 'pre_topo_fdr_map', self._gen_pre_topo_map_filename())\n for line in runtime.stdout.split('\\n'):\n if line.startswith(\"activation_forced = \"):\n setattr(outputs, 'activation_forced',\n line[len(\"activation_forced = \"):].strip() == \"1\")\n elif line.startswith(\"n_clusters = \"):\n setattr(outputs, 'n_clusters',\n int(line[len(\"n_clusters = \"):].strip()))\n elif line.startswith(\"pre_topo_n_clusters = \"):\n setattr(outputs, 'pre_topo_n_clusters',\n int(line[len(\"pre_topo_n_clusters = \"):].strip()))\n elif line.startswith(\"cluster_forming_thr = \"):\n setattr(outputs, 'cluster_forming_thr',\n float(line[len(\"cluster_forming_thr = \"):].strip()))\n return outputs\n\n def _list_outputs(self):\n outputs = self._outputs().get()\n outputs['thresholded_map'] = self._gen_thresholded_map_filename()\n outputs['pre_topo_fdr_map'] = self._gen_pre_topo_map_filename()\n return outputs\n\n\nclass ThresholdStatisticsInputSpec(SPMCommandInputSpec):\n spm_mat_file = File(\n exists=True,\n desc='absolute path to SPM.mat',\n copyfile=True,\n mandatory=True)\n stat_image = File(\n exists=True, desc='stat image', copyfile=False, mandatory=True)\n contrast_index = traits.Int(\n mandatory=True, desc='which contrast in the SPM.mat to use')\n height_threshold = traits.Float(\n desc=('stat value for initial '\n 'thresholding (defining clusters)'),\n mandatory=True)\n extent_threshold = traits.Int(\n 0, usedefault=True, desc=\"Minimum cluster size in voxels\")\n\n\nclass ThresholdStatisticsOutputSpec(TraitedSpec):\n voxelwise_P_Bonf = traits.Float()\n voxelwise_P_RF = traits.Float()\n voxelwise_P_uncor = traits.Float()\n voxelwise_P_FDR = traits.Float()\n clusterwise_P_RF = traits.Float()\n clusterwise_P_FDR = traits.Float()\n\n\nclass ThresholdStatistics(SPMCommand):\n \"\"\"Given height and cluster size threshold calculate theoretical\n probabilities concerning false positives\n\n Examples\n --------\n\n >>> thresh = ThresholdStatistics()\n >>> thresh.inputs.spm_mat_file = 'SPM.mat'\n >>> thresh.inputs.stat_image = 'spmT_0001.img'\n >>> thresh.inputs.contrast_index = 1\n >>> thresh.inputs.height_threshold = 4.56\n >>> thresh.run() # doctest: +SKIP\n \"\"\"\n input_spec = ThresholdStatisticsInputSpec\n output_spec = ThresholdStatisticsOutputSpec\n\n def _make_matlab_command(self, _):\n script = \"con_index = %d;\\n\" % self.inputs.contrast_index\n script += \"cluster_forming_thr = %f;\\n\" % self.inputs.height_threshold\n script += \"stat_filename = '%s';\\n\" % self.inputs.stat_image\n script += \"extent_threshold = %d;\\n\" % self.inputs.extent_threshold\n script += \"load '%s'\\n\" % self.inputs.spm_mat_file\n script += \"\"\"\nFWHM = SPM.xVol.FWHM;\ndf = [SPM.xCon(con_index).eidf SPM.xX.erdf];\nSTAT = SPM.xCon(con_index).STAT;\nR = SPM.xVol.R;\nS = SPM.xVol.S;\nn = 1;\n\nvoxelwise_P_Bonf = spm_P_Bonf(cluster_forming_thr,df,STAT,S,n)\nvoxelwise_P_RF = spm_P_RF(1,0,cluster_forming_thr,df,STAT,R,n)\n\nstat_map_vol = spm_vol(stat_filename);\n[stat_map_data, stat_map_XYZmm] = spm_read_vols(stat_map_vol);\n\nZ = stat_map_data(:);\nZum = Z;\n\n switch STAT\n case 'Z'\n VPs = (1-spm_Ncdf(Zum)).^n;\n voxelwise_P_uncor = (1-spm_Ncdf(cluster_forming_thr)).^n\n case 'T'\n VPs = (1 - spm_Tcdf(Zum,df(2))).^n;\n voxelwise_P_uncor = (1 - spm_Tcdf(cluster_forming_thr,df(2))).^n\n case 'X'\n VPs = (1-spm_Xcdf(Zum,df(2))).^n;\n voxelwise_P_uncor = (1-spm_Xcdf(cluster_forming_thr,df(2))).^n\n case 'F'\n VPs = (1 - spm_Fcdf(Zum,df)).^n;\n voxelwise_P_uncor = (1 - spm_Fcdf(cluster_forming_thr,df)).^n\n end\n VPs = sort(VPs);\n\nvoxelwise_P_FDR = spm_P_FDR(cluster_forming_thr,df,STAT,n,VPs)\n\nV2R = 1/prod(FWHM(stat_map_vol.dim > 1));\n\nclusterwise_P_RF = spm_P_RF(1,extent_threshold*V2R,cluster_forming_thr,df,STAT,R,n)\n\n[x,y,z] = ind2sub(size(stat_map_data),(1:numel(stat_map_data))');\nXYZ = cat(1, x', y', z');\n\n[u, CPs, ue] = spm_uc_clusterFDR(0.05,df,STAT,R,n,Z,XYZ,V2R,cluster_forming_thr);\n\nclusterwise_P_FDR = spm_P_clusterFDR(extent_threshold*V2R,df,STAT,R,n,cluster_forming_thr,CPs')\n\"\"\"\n return script\n\n def aggregate_outputs(self, runtime=None, needed_outputs=None):\n outputs = self._outputs()\n cur_output = \"\"\n for line in runtime.stdout.split('\\n'):\n if cur_output != \"\" and len(line.split()) != 0:\n setattr(outputs, cur_output, float(line))\n cur_output = \"\"\n continue\n if (len(line.split()) != 0 and line.split()[0] in [\n \"clusterwise_P_FDR\", \"clusterwise_P_RF\",\n \"voxelwise_P_Bonf\", \"voxelwise_P_FDR\", \"voxelwise_P_RF\",\n \"voxelwise_P_uncor\"\n ]):\n cur_output = line.split()[0]\n continue\n\n return outputs\n\n\nclass FactorialDesignInputSpec(SPMCommandInputSpec):\n spm_mat_dir = Directory(\n exists=True, field='dir', desc='directory to store SPM.mat file (opt)')\n # Need to make an alias of InputMultiPath; the inputs below are not Path\n covariates = InputMultiPath(\n traits.Dict(\n key_trait=traits.Enum('vector', 'name', 'interaction',\n 'centering')),\n field='cov',\n desc=('covariate dictionary {vector, name, '\n 'interaction, centering}'))\n threshold_mask_none = traits.Bool(\n field='masking.tm.tm_none',\n xor=['threshold_mask_absolute', 'threshold_mask_relative'],\n desc='do not use threshold masking')\n threshold_mask_absolute = traits.Float(\n field='masking.tm.tma.athresh',\n xor=['threshold_mask_none', 'threshold_mask_relative'],\n desc='use an absolute threshold')\n threshold_mask_relative = traits.Float(\n field='masking.tm.tmr.rthresh',\n xor=['threshold_mask_absolute', 'threshold_mask_none'],\n desc=('threshold using a '\n 'proportion of the global '\n 'value'))\n use_implicit_threshold = traits.Bool(\n field='masking.im',\n desc=('use implicit mask NaNs or '\n 'zeros to threshold'))\n explicit_mask_file = File(\n field='masking.em', # requires cell\n desc='use an implicit mask file to threshold')\n global_calc_omit = traits.Bool(\n field='globalc.g_omit',\n xor=['global_calc_mean', 'global_calc_values'],\n desc='omit global calculation')\n global_calc_mean = traits.Bool(\n field='globalc.g_mean',\n xor=['global_calc_omit', 'global_calc_values'],\n desc='use mean for global calculation')\n global_calc_values = traits.List(\n traits.Float,\n field='globalc.g_user.global_uval',\n xor=['global_calc_mean', 'global_calc_omit'],\n desc='omit global calculation')\n no_grand_mean_scaling = traits.Bool(\n field='globalm.gmsca.gmsca_no',\n desc=('do not perform grand mean '\n 'scaling'))\n global_normalization = traits.Enum(\n 1,\n 2,\n 3,\n field='globalm.glonorm',\n desc=('global normalization None-1, '\n 'Proportional-2, ANCOVA-3'))\n\n\nclass FactorialDesignOutputSpec(TraitedSpec):\n spm_mat_file = File(exists=True, desc='SPM mat file')\n\n\nclass FactorialDesign(SPMCommand):\n \"\"\"Base class for factorial designs\n\n http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=77\n\n \"\"\"\n\n input_spec = FactorialDesignInputSpec\n output_spec = FactorialDesignOutputSpec\n _jobtype = 'stats'\n _jobname = 'factorial_design'\n\n def _format_arg(self, opt, spec, val):\n \"\"\"Convert input to appropriate format for spm\n \"\"\"\n if opt in ['spm_mat_dir', 'explicit_mask_file']:\n return np.array([str(val)], dtype=object)\n if opt in ['covariates']:\n outlist = []\n mapping = {\n 'name': 'cname',\n 'vector': 'c',\n 'interaction': 'iCFI',\n 'centering': 'iCC'\n }\n for dictitem in val:\n outdict = {}\n for key, keyval in list(dictitem.items()):\n outdict[mapping[key]] = keyval\n outlist.append(outdict)\n return outlist\n return super(FactorialDesign, self)._format_arg(opt, spec, val)\n\n def _parse_inputs(self):\n \"\"\"validate spm realign options if set to None ignore\n \"\"\"\n einputs = super(FactorialDesign, self)._parse_inputs()\n if not isdefined(self.inputs.spm_mat_dir):\n einputs[0]['dir'] = np.array([str(os.getcwd())], dtype=object)\n return einputs\n\n def _list_outputs(self):\n outputs = self._outputs().get()\n spm = os.path.join(os.getcwd(), 'SPM.mat')\n outputs['spm_mat_file'] = spm\n return outputs\n\n\nclass OneSampleTTestDesignInputSpec(FactorialDesignInputSpec):\n in_files = traits.List(\n File(exists=True),\n field='des.t1.scans',\n mandatory=True,\n minlen=2,\n desc='input files')\n\n\nclass OneSampleTTestDesign(FactorialDesign):\n \"\"\"Create SPM design for one sample t-test\n\n Examples\n --------\n\n >>> ttest = OneSampleTTestDesign()\n >>> ttest.inputs.in_files = ['cont1.nii', 'cont2.nii']\n >>> ttest.run() # doctest: +SKIP\n \"\"\"\n\n input_spec = OneSampleTTestDesignInputSpec\n\n def _format_arg(self, opt, spec, val):\n \"\"\"Convert input to appropriate format for spm\n \"\"\"\n if opt in ['in_files']:\n return np.array(val, dtype=object)\n return super(OneSampleTTestDesign, self)._format_arg(opt, spec, val)\n\n\nclass TwoSampleTTestDesignInputSpec(FactorialDesignInputSpec):\n # very unlikely that you will have a single image in one group, so setting\n # parameters to require at least two files in each group [SG]\n group1_files = traits.List(\n File(exists=True),\n field='des.t2.scans1',\n mandatory=True,\n minlen=2,\n desc='Group 1 input files')\n group2_files = traits.List(\n File(exists=True),\n field='des.t2.scans2',\n mandatory=True,\n minlen=2,\n desc='Group 2 input files')\n dependent = traits.Bool(\n field='des.t2.dept',\n desc=('Are the measurements dependent between '\n 'levels'))\n unequal_variance = traits.Bool(\n field='des.t2.variance',\n desc=('Are the variances equal or unequal '\n 'between groups'))\n\n\nclass TwoSampleTTestDesign(FactorialDesign):\n \"\"\"Create SPM design for two sample t-test\n\n Examples\n --------\n\n >>> ttest = TwoSampleTTestDesign()\n >>> ttest.inputs.group1_files = ['cont1.nii', 'cont2.nii']\n >>> ttest.inputs.group2_files = ['cont1a.nii', 'cont2a.nii']\n >>> ttest.run() # doctest: +SKIP\n \"\"\"\n\n input_spec = TwoSampleTTestDesignInputSpec\n\n def _format_arg(self, opt, spec, val):\n \"\"\"Convert input to appropriate format for spm\n \"\"\"\n if opt in ['group1_files', 'group2_files']:\n return np.array(val, dtype=object)\n return super(TwoSampleTTestDesign, self)._format_arg(opt, spec, val)\n\n\nclass PairedTTestDesignInputSpec(FactorialDesignInputSpec):\n paired_files = traits.List(\n traits.List(File(exists=True), minlen=2, maxlen=2),\n field='des.pt.pair',\n mandatory=True,\n minlen=2,\n desc='List of paired files')\n grand_mean_scaling = traits.Bool(\n field='des.pt.gmsca', desc='Perform grand mean scaling')\n ancova = traits.Bool(\n field='des.pt.ancova', desc='Specify ancova-by-factor regressors')\n\n\nclass PairedTTestDesign(FactorialDesign):\n \"\"\"Create SPM design for paired t-test\n\n Examples\n --------\n\n >>> pttest = PairedTTestDesign()\n >>> pttest.inputs.paired_files = [['cont1.nii','cont1a.nii'],['cont2.nii','cont2a.nii']]\n >>> pttest.run() # doctest: +SKIP\n \"\"\"\n\n input_spec = PairedTTestDesignInputSpec\n\n def _format_arg(self, opt, spec, val):\n \"\"\"Convert input to appropriate format for spm\n \"\"\"\n if opt in ['paired_files']:\n return [dict(scans=np.array(files, dtype=object)) for files in val]\n return super(PairedTTestDesign, self)._format_arg(opt, spec, val)\n\n\nclass MultipleRegressionDesignInputSpec(FactorialDesignInputSpec):\n in_files = traits.List(\n File(exists=True),\n field='des.mreg.scans',\n mandatory=True,\n minlen=2,\n desc='List of files')\n include_intercept = traits.Bool(\n True,\n field='des.mreg.incint',\n usedefault=True,\n desc='Include intercept in design')\n user_covariates = InputMultiPath(\n traits.Dict(key_trait=traits.Enum('vector', 'name', 'centering')),\n field='des.mreg.mcov',\n desc=('covariate dictionary {vector, '\n 'name, centering}'))\n\n\nclass MultipleRegressionDesign(FactorialDesign):\n \"\"\"Create SPM design for multiple regression\n\n Examples\n --------\n\n >>> mreg = MultipleRegressionDesign()\n >>> mreg.inputs.in_files = ['cont1.nii','cont2.nii']\n >>> mreg.run() # doctest: +SKIP\n \"\"\"\n\n input_spec = MultipleRegressionDesignInputSpec\n\n def _format_arg(self, opt, spec, val):\n \"\"\"Convert input to appropriate format for spm\n \"\"\"\n if opt in ['in_files']:\n return np.array(val, dtype=object)\n if opt in ['user_covariates']:\n outlist = []\n mapping = {'name': 'cname', 'vector': 'c', 'centering': 'iCC'}\n for dictitem in val:\n outdict = {}\n for key, keyval in list(dictitem.items()):\n outdict[mapping[key]] = keyval\n outlist.append(outdict)\n return outlist\n return (super(MultipleRegressionDesign, self)._format_arg(\n opt, spec, val))\n"
] |
[
[
"numpy.array",
"scipy.io.loadmat"
]
] |
lucateo/lenstronomy
|
[
"3ab6cfd4adea2222f02d3f0f1a9cb5390c533aab",
"3ab6cfd4adea2222f02d3f0f1a9cb5390c533aab",
"3ab6cfd4adea2222f02d3f0f1a9cb5390c533aab",
"3ab6cfd4adea2222f02d3f0f1a9cb5390c533aab"
] |
[
"test/test_LensModel/test_Profiles/test_sersic_lens.py",
"lenstronomy/LensModel/Profiles/chameleon.py",
"lenstronomy/LensModel/Profiles/spemd.py",
"test/test_GalKin/test_numeric_kinematics.py"
] |
[
"__author__ = 'sibirrer'\n\nimport lenstronomy.Util.derivative_util as calc_util\nfrom lenstronomy.LensModel.Profiles.sersic import Sersic\nfrom lenstronomy.LightModel.Profiles.sersic import Sersic as Sersic_light\nfrom lenstronomy.LensModel.Profiles.sersic_ellipse_kappa import SersicEllipseKappa\nfrom lenstronomy.Util.param_util import ellipticity2phi_q\n\nimport numpy as np\nimport pytest\nimport numpy.testing as npt\n\n\nclass TestSersic(object):\n \"\"\"\n tests the Gaussian methods\n \"\"\"\n def setup(self):\n\n self.sersic_2 = SersicEllipseKappa()\n self.sersic = Sersic()\n self.sersic_light = Sersic_light()\n\n def test_function(self):\n\n x = 1\n y = 2\n n_sersic = 2.\n R_sersic = 1.\n k_eff = 0.2\n values = self.sersic.function(x, y, n_sersic, R_sersic, k_eff)\n npt.assert_almost_equal(values, 1.0272982586319199, decimal=10)\n\n x = np.array([0])\n y = np.array([0])\n values = self.sersic.function(x, y, n_sersic, R_sersic, k_eff)\n npt.assert_almost_equal(values[0], 0., decimal=9)\n\n x = np.array([2,3,4])\n y = np.array([1,1,1])\n values = self.sersic.function(x, y, n_sersic, R_sersic, k_eff)\n\n npt.assert_almost_equal(values[0], 1.0272982586319199, decimal=10)\n npt.assert_almost_equal(values[1], 1.3318743892966658, decimal=10)\n npt.assert_almost_equal(values[2], 1.584299393114988, decimal=10)\n\n def test_derivatives(self):\n x = np.array([1])\n y = np.array([2])\n n_sersic = 2.\n R_sersic = 1.\n k_eff = 0.2\n f_x, f_y = self.sersic.derivatives(x, y, n_sersic, R_sersic, k_eff)\n f_x2, f_y2 = self.sersic_2.derivatives(x, y, n_sersic, R_sersic, k_eff, 0, 0.00000001)\n\n assert f_x[0] == 0.16556078301997193\n assert f_y[0] == 0.33112156603994386\n npt.assert_almost_equal(f_x2[0], f_x[0])\n npt.assert_almost_equal(f_y2[0], f_y[0])\n\n x = np.array([0])\n y = np.array([0])\n f_x, f_y = self.sersic.derivatives(x, y, n_sersic, R_sersic, k_eff)\n f_x2, f_y2 = self.sersic_2.derivatives(x, y, n_sersic, R_sersic, k_eff, 0, 0.00000001)\n assert f_x[0] == 0\n assert f_y[0] == 0\n npt.assert_almost_equal(f_x2[0], f_x[0])\n npt.assert_almost_equal(f_y2[0], f_y[0])\n\n x = np.array([1,3,4])\n y = np.array([2,1,1])\n values = self.sersic.derivatives(x, y, n_sersic, R_sersic, k_eff)\n values2 = self.sersic_2.derivatives(x, y, n_sersic, R_sersic, k_eff, 0, 0.00000001)\n assert values[0][0] == 0.16556078301997193\n assert values[1][0] == 0.33112156603994386\n assert values[0][1] == 0.2772992378623737\n assert values[1][1] == 0.092433079287457892\n npt.assert_almost_equal(values2[0][0], values[0][0])\n npt.assert_almost_equal(values2[1][0], values[1][0])\n npt.assert_almost_equal(values2[0][1], values[0][1])\n npt.assert_almost_equal(values2[1][1], values[1][1])\n\n values2 = self.sersic_2.derivatives(0.3, -0.2, n_sersic, R_sersic, k_eff, 0, 0.00000001)\n values = self.sersic.derivatives(0.3, -0.2, n_sersic, R_sersic, k_eff, 0, 0.00000001)\n npt.assert_almost_equal(values2[0], values[0])\n npt.assert_almost_equal(values2[1], values[1])\n\n def test_differentails(self):\n x_, y_ = 1., 1\n n_sersic = 2.\n R_sersic = 1.\n k_eff = 0.2\n r = np.sqrt(x_**2 + y_**2)\n\n d_alpha_dr = self.sersic.d_alpha_dr(x_, y_, n_sersic, R_sersic, k_eff)\n alpha = self.sersic.alpha_abs(x_, y_, n_sersic, R_sersic, k_eff)\n\n f_xx_ = d_alpha_dr * calc_util.d_r_dx(x_, y_) * x_/r + alpha * calc_util.d_x_diffr_dx(x_, y_)\n f_yy_ = d_alpha_dr * calc_util.d_r_dy(x_, y_) * y_/r + alpha * calc_util.d_y_diffr_dy(x_, y_)\n f_xy_ = d_alpha_dr * calc_util.d_r_dy(x_, y_) * x_/r + alpha * calc_util.d_x_diffr_dy(x_, y_)\n\n f_xx = (d_alpha_dr/r - alpha/r**2) * y_**2/r + alpha/r\n f_yy = (d_alpha_dr/r - alpha/r**2) * x_**2/r + alpha/r\n f_xy = (d_alpha_dr/r - alpha/r**2) * x_*y_/r\n npt.assert_almost_equal(f_xx, f_xx_, decimal=10)\n npt.assert_almost_equal(f_yy, f_yy_, decimal=10)\n npt.assert_almost_equal(f_xy, f_xy_, decimal=10)\n\n def test_hessian(self):\n x = np.array([1])\n y = np.array([2])\n n_sersic = 2.\n R_sersic = 1.\n k_eff = 0.2\n f_xx, f_yy,f_xy = self.sersic.hessian(x, y, n_sersic, R_sersic, k_eff)\n assert f_xx[0] == 0.1123170666045793\n npt.assert_almost_equal(f_yy[0], -0.047414082641598576, decimal=10)\n npt.assert_almost_equal(f_xy[0], -0.10648743283078525 , decimal=10)\n x = np.array([1,3,4])\n y = np.array([2,1,1])\n values = self.sersic.hessian(x, y, n_sersic, R_sersic, k_eff)\n assert values[0][0] == 0.1123170666045793\n npt.assert_almost_equal(values[1][0], -0.047414082641598576, decimal=10)\n npt.assert_almost_equal(values[2][0], -0.10648743283078525 , decimal=10)\n npt.assert_almost_equal(values[0][1], -0.053273787681591328, decimal=10)\n npt.assert_almost_equal(values[1][1], 0.076243427402007985, decimal=10)\n npt.assert_almost_equal(values[2][1], -0.048568955656349749, decimal=10)\n\n f_xx2, f_yy2, f_xy2 = self.sersic_2.hessian(x, y, n_sersic, R_sersic, k_eff, 0.0000001, 0)\n npt.assert_almost_equal(f_xx2, values[0])\n npt.assert_almost_equal(f_yy2, values[1], decimal=6)\n npt.assert_almost_equal(f_xy2, values[2], decimal=6)\n\n def test_alpha_abs(self):\n x = 1.\n dr = 0.0000001\n n_sersic = 2.5\n R_sersic = .5\n k_eff = 0.2\n alpha_abs = self.sersic.alpha_abs(x, 0, n_sersic, R_sersic, k_eff)\n f_dr = self.sersic.function(x + dr, 0, n_sersic, R_sersic, k_eff)\n f_ = self.sersic.function(x, 0, n_sersic, R_sersic, k_eff)\n alpha_abs_num = -(f_dr - f_)/dr\n npt.assert_almost_equal(alpha_abs_num, alpha_abs, decimal=3)\n\n def test_dalpha_dr(self):\n x = 1.\n dr = 0.0000001\n n_sersic = 1.\n R_sersic = .5\n k_eff = 0.2\n d_alpha_dr = self.sersic.d_alpha_dr(x, 0, n_sersic, R_sersic, k_eff)\n alpha_dr = self.sersic.alpha_abs(x + dr, 0, n_sersic, R_sersic, k_eff)\n alpha = self.sersic.alpha_abs(x, 0, n_sersic, R_sersic, k_eff)\n d_alpha_dr_num = (alpha_dr - alpha)/dr\n npt.assert_almost_equal(d_alpha_dr, d_alpha_dr_num, decimal=3)\n\n def test_mag_sym(self):\n \"\"\"\n\n :return:\n \"\"\"\n r = 2.\n angle1 = 0.\n angle2 = 1.5\n x1 = r * np.cos(angle1)\n y1 = r * np.sin(angle1)\n\n x2 = r * np.cos(angle2)\n y2 = r * np.sin(angle2)\n n_sersic = 4.5\n R_sersic = 2.5\n k_eff = 0.8\n f_xx1, f_yy1, f_xy1 = self.sersic.hessian(x1, y1, n_sersic, R_sersic, k_eff)\n f_xx2, f_yy2, f_xy2 = self.sersic.hessian(x2, y2, n_sersic, R_sersic, k_eff)\n kappa_1 = (f_xx1 + f_yy1) / 2\n kappa_2 = (f_xx2 + f_yy2) / 2\n npt.assert_almost_equal(kappa_1, kappa_2, decimal=10)\n A_1 = (1 - f_xx1) * (1 - f_yy1) - f_xy1**2\n A_2 = (1 - f_xx2) * (1 - f_yy2) - f_xy2 ** 2\n npt.assert_almost_equal(A_1, A_2, decimal=10)\n\n def test_convergernce(self):\n \"\"\"\n test the convergence and compares it with the original Sersic profile\n :return:\n \"\"\"\n x = np.array([0, 0, 0, 0, 0])\n y = np.array([0.5, 1, 1.5, 2, 2.5])\n n_sersic = 4.5\n R_sersic = 2.5\n k_eff = 0.2\n f_xx, f_yy, f_xy = self.sersic.hessian(x, y, n_sersic, R_sersic, k_eff)\n kappa = (f_xx + f_yy) / 2.\n assert kappa[0] > 0\n flux = self.sersic_light.function(x, y, amp=1., R_sersic=R_sersic, n_sersic=n_sersic)\n flux /= flux[0]\n kappa /= kappa[0]\n npt.assert_almost_equal(flux[1], kappa[1], decimal=5)\n\n xvalues = np.linspace(0.5, 3., 100)\n\n e1, e2 = 0.4, 0.\n q = ellipticity2phi_q(e1, e2)[1]\n kappa_ellipse = self.sersic_2.projected_mass(xvalues, 0, q, n_sersic, R_sersic, k_eff)\n fxx, fyy, _ = self.sersic_2.hessian(xvalues, 0, n_sersic, R_sersic, k_eff, e1, e2)\n\n npt.assert_almost_equal(kappa_ellipse, 0.5*(fxx + fyy), decimal=5)\n\n def test_sersic_util(self):\n n = 1.\n Re = 2.\n k, bn = self.sersic.k_bn(n, Re)\n Re_new = self.sersic.k_Re(n, k)\n assert Re == Re_new\n\nif __name__ == '__main__':\n pytest.main()",
"from lenstronomy.LensModel.Profiles.nie import NIE\nfrom lenstronomy.LensModel.Profiles.point_mass import PointMass\nfrom lenstronomy.LensModel.Profiles.base_profile import LensProfileBase\nimport lenstronomy.Util.param_util as param_util\nimport numpy as np\n\nfrom lenstronomy.Util.package_util import exporter\nexport, __all__ = exporter()\n\n\n@export\nclass Chameleon(LensProfileBase):\n \"\"\"\n class of the Chameleon model (See Suyu+2014) an elliptical truncated double isothermal profile\n\n \"\"\"\n param_names = ['alpha_1', 'w_c', 'w_t', 'e1', 'e2', 'center_x', 'center_y']\n lower_limit_default = {'alpha_1': 0, 'w_c': 0, 'w_t': 0, 'e1': -0.8, 'e2': -0.8, 'center_x': -100, 'center_y': -100}\n upper_limit_default = {'alpha_1': 100, 'w_c': 100, 'w_t': 100, 'e1': 0.8, 'e2': 0.8, 'center_x': 100, 'center_y': 100}\n\n def __init__(self, static=False):\n self._nie_1 = NIE()\n self._nie_2 = NIE()\n super(Chameleon, self).__init__()\n self._static = static\n\n def function(self, x, y, alpha_1, w_c, w_t, e1, e2, center_x=0, center_y=0):\n \"\"\"\n\n :param x: ra-coordinate\n :param y: dec-coordinate\n :param alpha_1: deflection angle at 1 (arcseconds) from the center\n :param w_c: see Suyu+2014\n :param w_t: see Suyu+2014\n :param e1: ellipticity parameter\n :param e2: ellipticity parameter\n :param center_x: ra center\n :param center_y: dec center\n :return: lensing potential\n \"\"\"\n\n theta_E_conv, w_c, w_t, s_scale_1, s_scale_2 = self.param_convert(alpha_1, w_c, w_t, e1, e2)\n f_1 = self._nie_1.function(x, y, theta_E_conv, e1, e2, s_scale_1, center_x, center_y)\n f_2 = self._nie_2.function(x, y, theta_E_conv, e1, e2, s_scale_2, center_x, center_y)\n f_ = f_1 - f_2\n return f_\n\n def derivatives(self, x, y, alpha_1, w_c, w_t, e1, e2, center_x=0, center_y=0):\n \"\"\"\n\n :param x: ra-coordinate\n :param y: dec-coordinate\n :param alpha_1: deflection angle at 1 (arcseconds) from the center\n :param w_c: see Suyu+2014\n :param w_t: see Suyu+2014\n :param e1: ellipticity parameter\n :param e2: ellipticity parameter\n :param center_x: ra center\n :param center_y: dec center\n :return: deflection angles (RA, DEC)\n \"\"\"\n theta_E_conv, w_c, w_t, s_scale_1, s_scale_2 = self.param_convert(alpha_1, w_c, w_t, e1, e2)\n f_x_1, f_y_1 = self._nie_1.derivatives(x, y, theta_E_conv, e1, e2, s_scale_1, center_x, center_y)\n f_x_2, f_y_2 = self._nie_2.derivatives(x, y, theta_E_conv, e1, e2, s_scale_2, center_x, center_y)\n f_x = f_x_1 - f_x_2\n f_y = f_y_1 - f_y_2\n return f_x, f_y\n\n def hessian(self, x, y, alpha_1, w_c, w_t, e1, e2, center_x=0, center_y=0):\n \"\"\"\n\n :param x: ra-coordinate\n :param y: dec-coordinate\n :param alpha_1: deflection angle at 1 (arcseconds) from the center\n :param w_c: see Suyu+2014\n :param w_t: see Suyu+2014\n :param e1: ellipticity parameter\n :param e2: ellipticity parameter\n :param center_x: ra center\n :param center_y: dec center\n :return: second derivatives of the lensing potential (Hessian: f_xx, f_yy, f_xy)\n \"\"\"\n theta_E_conv, w_c, w_t, s_scale_1, s_scale_2 = self.param_convert(alpha_1, w_c, w_t, e1, e2)\n f_xx_1, f_yy_1, f_xy_1 = self._nie_1.hessian(x, y, theta_E_conv, e1, e2, s_scale_1, center_x, center_y)\n f_xx_2, f_yy_2, f_xy_2 = self._nie_2.hessian(x, y, theta_E_conv, e1, e2, s_scale_2, center_x, center_y)\n f_xx = f_xx_1 - f_xx_2\n f_yy = f_yy_1 - f_yy_2\n f_xy = f_xy_1 - f_xy_2\n return f_xx, f_yy, f_xy\n\n def param_convert(self, alpha_1, w_c, w_t, e1, e2):\n \"\"\"\n convert the parameter alpha_1 (deflection angle one arcsecond from the center) into the\n \"Einstein radius\" scale parameter of the two NIE profiles\n\n :param alpha_1: deflection angle at 1 (arcseconds) from the center\n :param w_c: see Suyu+2014\n :param w_t: see Suyu+2014\n :return:\n \"\"\"\n if self._static is True:\n return self._theta_convert_static, self._w_c_static, self._w_t_stactic, self._s_scale_1_static, self._s_scale_2_static\n return self._param_convert(alpha_1, w_c, w_t, e1, e2)\n\n def _param_convert(self, alpha_1, w_c, w_t, e1, e2):\n if not w_t >= w_c:\n return 0, w_t, w_c, 1, 1\n s_scale_1 = w_c\n s_scale_2 = w_t\n f_x_1, f_y_1 = self._nie_1.derivatives(1, 0, theta_E=1, e1=0, e2=0, s_scale=s_scale_1)\n f_x_2, f_y_2 = self._nie_2.derivatives(1, 0, theta_E=1, e1=0, e2=0, s_scale=s_scale_2)\n f_x = f_x_1 - f_x_2\n theta_E_convert = alpha_1 / f_x\n phi_G, q = param_util.ellipticity2phi_q(e1, e2)\n s_scale_1 = np.sqrt(4 * w_c ** 2 / (1. + q) ** 2)\n s_scale_2 = np.sqrt(4 * w_t ** 2 / (1. + q) ** 2)\n return theta_E_convert, w_c, w_t, s_scale_1, s_scale_2\n\n def set_static(self, alpha_1, w_c, w_t, e1, e2, center_x=0, center_y=0):\n \"\"\"\n\n :param logM:\n :param concentration:\n :param center_x:\n :param center_y:\n :return:\n \"\"\"\n self._static = True\n self._theta_convert_static, self._w_c_static, self._w_t_stactic, self._s_scale_1_static, self._s_scale_2_static = self._param_convert(alpha_1, w_c, w_t, e1, e2)\n self._nie_1.set_static(self._theta_convert_static, e1, e2, self._s_scale_1_static, center_x, center_y)\n self._nie_2.set_static(self._theta_convert_static, e1, e2, self._s_scale_2_static, center_x, center_y)\n\n def set_dynamic(self):\n \"\"\"\n\n :return:\n \"\"\"\n self._static = False\n if hasattr(self, '_theta_convert_static'):\n del self._theta_convert_static\n if hasattr(self, '_w_c_static'):\n del self._w_c_static\n if hasattr(self, '_w_t_stactic'):\n del self._w_t_stactic\n if hasattr(self, '_s_scale_1_static'):\n del self._s_scale_1_static\n if hasattr(self, '_s_scale_2_static'):\n del self._s_scale_2_static\n self._nie_1.set_dynamic()\n self._nie_2.set_dynamic()\n\n\n@export\nclass DoubleChameleon(LensProfileBase):\n \"\"\"\n class of the Chameleon model (See Suyu+2014) an elliptical truncated double isothermal profile\n\n \"\"\"\n param_names = ['alpha_1', 'ratio', 'w_c1', 'w_t1', 'e11', 'e21', 'w_c2', 'w_t2', 'e12', 'e22', 'center_x', 'center_y']\n lower_limit_default = {'alpha_1': 0, 'ratio': 0, 'w_c1': 0, 'w_t1': 0, 'e11': -0.8, 'e21': -0.8,\n 'w_c2': 0, 'w_t2': 0, 'e12': -0.8, 'e22': -0.8,\n 'center_x': -100, 'center_y': -100}\n upper_limit_default = {'alpha_1': 100, 'ratio': 100, 'w_c1': 100, 'w_t1': 100, 'e11': 0.8, 'e21': 0.8,\n 'w_c2': 100, 'w_t2': 100, 'e12': 0.8, 'e22': 0.8,\n 'center_x': 100, 'center_y': 100}\n\n def __init__(self):\n self._chameleon_1 = Chameleon()\n self._chameleon_2 = Chameleon()\n super(DoubleChameleon, self).__init__()\n\n def function(self, x, y, alpha_1, ratio, w_c1, w_t1, e11, e21, w_c2, w_t2, e12, e22, center_x=0, center_y=0):\n \"\"\"\n :param x: ra-coordinate\n :param y: dec-coordinate\n :param alpha_1: deflection angle at 1 (arcseconds) from the center\n :param ratio: ratio of deflection amplitude at radius = 1 of the first to second Chameleon profile\n :param w_c1: Suyu+2014 for first profile\n :param w_t1: Suyu+2014 for first profile\n :param e11: ellipticity parameter for first profile\n :param e21: ellipticity parameter for first profile\n :param w_c2: Suyu+2014 for second profile\n :param w_t2: Suyu+2014 for second profile\n :param e12: ellipticity parameter for second profile\n :param e22: ellipticity parameter for second profile\n :param center_x: ra center\n :param center_y: dec center\n :return: lensing potential\n \"\"\"\n\n f_1 = self._chameleon_1.function(x, y, alpha_1 / (1. + 1. / ratio), w_c1, w_t1, e11, e21, center_x, center_y)\n f_2 = self._chameleon_2.function(x, y, alpha_1 / (1. + ratio), w_c2, w_t2, e12, e22, center_x, center_y)\n return f_1 + f_2\n\n def derivatives(self, x, y, alpha_1, ratio, w_c1, w_t1, e11, e21, w_c2, w_t2, e12, e22, center_x=0, center_y=0):\n \"\"\"\n :param x: ra-coordinate\n :param y: dec-coordinate\n :param alpha_1: deflection angle at 1 (arcseconds) from the center\n :param ratio: ratio of deflection amplitude at radius = 1 of the first to second Chameleon profile\n :param w_c1: Suyu+2014 for first profile\n :param w_t1: Suyu+2014 for first profile\n :param e11: ellipticity parameter for first profile\n :param e21: ellipticity parameter for first profile\n :param w_c2: Suyu+2014 for second profile\n :param w_t2: Suyu+2014 for second profile\n :param e12: ellipticity parameter for second profile\n :param e22: ellipticity parameter for second profile^V\n :param center_x: ra center\n :param center_y: dec center\n :return: deflection angles (RA, DEC)\n \"\"\"\n f_x1, f_y1 = self._chameleon_1.derivatives(x, y, alpha_1 / (1. + 1. / ratio), w_c1, w_t1, e11, e21, center_x, center_y)\n f_x2, f_y2 = self._chameleon_2.derivatives(x, y, alpha_1 / (1. + ratio), w_c2, w_t2, e12, e22, center_x, center_y)\n return f_x1 + f_x2, f_y1 + f_y2\n\n def hessian(self, x, y, alpha_1, ratio, w_c1, w_t1, e11, e21, w_c2, w_t2, e12, e22, center_x=0, center_y=0):\n \"\"\"\n :param x: ra-coordinate\n :param y: dec-coordinate\n :param alpha_1: deflection angle at 1 (arcseconds) from the center\n :param ratio: ratio of deflection amplitude at radius = 1 of the first to second Chameleon profile\n :param w_c1: Suyu+2014 for first profile\n :param w_t1: Suyu+2014 for first profile\n :param e11: ellipticity parameter for first profile\n :param e21: ellipticity parameter for first profile\n :param w_c2: Suyu+2014 for second profile\n :param w_t2: Suyu+2014 for second profile\n :param e12: ellipticity parameter for second profile\n :param e22: ellipticity parameter for second profile\n :param center_x: ra center\n :param center_y: dec center\n :return: second derivatives of the lensing potential (Hessian: f_xx, f_yy, f_xy)\n \"\"\"\n f_xx1, f_yy1, f_xy1 = self._chameleon_1.hessian(x, y, alpha_1 / (1. + 1. / ratio), w_c1, w_t1, e11, e21, center_x, center_y)\n f_xx2, f_yy2, f_xy2 = self._chameleon_2.hessian(x, y, alpha_1 / (1. + ratio), w_c2, w_t2, e12, e22, center_x, center_y)\n return f_xx1 + f_xx2, f_yy1 + f_yy2, f_xy1 + f_xy2\n\n def set_static(self, alpha_1, ratio, w_c1, w_t1, e11, e21, w_c2, w_t2, e12, e22, center_x=0, center_y=0):\n self._chameleon_1.set_static(alpha_1 / (1. + 1. / ratio), w_c1, w_t1, e11, e21, center_x, center_y)\n self._chameleon_2.set_static(alpha_1 / (1. + ratio), w_c2, w_t2, e12, e22, center_x, center_y)\n\n def set_dynamic(self):\n self._chameleon_1.set_dynamic()\n self._chameleon_2.set_dynamic()\n\n\n@export\nclass TripleChameleon(LensProfileBase):\n \"\"\"\n class of the Chameleon model (See Suyu+2014) an elliptical truncated double isothermal profile\n\n \"\"\"\n param_names = ['alpha_1', 'ratio12', 'ratio13', 'w_c1', 'w_t1', 'e11', 'e21', 'w_c2', 'w_t2', 'e12', 'e22', 'w_c3', 'w_t3', 'e13',\n 'e23', 'center_x', 'center_y']\n lower_limit_default = {'alpha_1': 0, 'ratio12': 0, 'ratio13': 0, 'w_c1': 0, 'w_t1': 0, 'e11': -0.8, 'e21': -0.8,\n 'w_c2': 0, 'w_t2': 0, 'e12': -0.8, 'e22': -0.8,\n 'w_c3': 0, 'w_t3': 0, 'e13': -0.8, 'e23': -0.8,\n 'center_x': -100, 'center_y': -100}\n upper_limit_default = {'alpha_1': 100, 'ratio12': 100, 'ratio13': 100, 'w_c1': 100, 'w_t1': 100, 'e11': 0.8, 'e21': 0.8,\n 'w_c2': 100, 'w_t2': 100, 'e12': 0.8, 'e22': 0.8,\n 'w_c3': 100, 'w_t3': 100, 'e13': 0.8, 'e23': 0.8,\n 'center_x': 100, 'center_y': 100}\n\n def __init__(self):\n self._chameleon_1 = Chameleon()\n self._chameleon_2 = Chameleon()\n self._chameleon_3 = Chameleon()\n super(TripleChameleon, self).__init__()\n\n def _ratio_definition(self, alpha_1, ratio12, ratio13):\n \"\"\"\n\n :param alpha_1: deflection angle at 1 arcsecond\n :param ratio12: ratio of first to second amplitude\n :param ratio13: ratio of first to third amplitude\n :return: amplitudes of individual chameleon profiles\n \"\"\"\n amp1 = alpha_1 / (1. + 1. / ratio12 + 1. / ratio13)\n amp2 = amp1 / ratio12\n amp3 = amp1 / ratio13\n return amp1, amp2, amp3\n\n def function(self, x, y, alpha_1, ratio12, ratio13, w_c1, w_t1, e11, e21, w_c2, w_t2, e12, e22, w_c3, w_t3, e13, e23,\n center_x=0, center_y=0):\n \"\"\"\n\n :param alpha_1:\n :param ratio12: ratio of first to second amplitude\n :param ratio13: ratio of first to third amplitude\n :param w_c1:\n :param w_t1:\n :param e11:\n :param e21:\n :param w_c2:\n :param w_t2:\n :param e12:\n :param e22:\n :param center_x:\n :param center_y:\n :return:\n \"\"\"\n amp1, amp2, amp3 = self._ratio_definition(alpha_1, ratio12, ratio13)\n f_1 = self._chameleon_1.function(x, y, amp1, w_c1, w_t1, e11, e21, center_x, center_y)\n f_2 = self._chameleon_2.function(x, y, amp2, w_c2, w_t2, e12, e22, center_x, center_y)\n f_3 = self._chameleon_3.function(x, y, amp3, w_c3, w_t3, e13, e23, center_x, center_y)\n return f_1 + f_2 + f_3\n\n def derivatives(self, x, y, alpha_1, ratio12, ratio13, w_c1, w_t1, e11, e21, w_c2, w_t2, e12, e22, w_c3, w_t3, e13, e23,\n center_x=0, center_y=0):\n \"\"\"\n\n :param alpha_1:\n :param ratio12: ratio of first to second amplitude\n :param ratio13: ratio of first to third amplidute\n :param w_c1:\n :param w_t1:\n :param e11:\n :param e21:\n :param w_c2:\n :param w_t2:\n :param e12:\n :param e22:\n :param center_x:\n :param center_y:\n :return:\n \"\"\"\n amp1, amp2, amp3 = self._ratio_definition(alpha_1, ratio12, ratio13)\n f_x1, f_y1 = self._chameleon_1.derivatives(x, y, amp1, w_c1, w_t1, e11, e21, center_x, center_y)\n f_x2, f_y2 = self._chameleon_2.derivatives(x, y, amp2, w_c2, w_t2, e12, e22, center_x, center_y)\n f_x3, f_y3 = self._chameleon_3.derivatives(x, y, amp3, w_c3, w_t3, e13, e23, center_x, center_y)\n return f_x1 + f_x2 + f_x3, f_y1 + f_y2 + f_y3\n\n def hessian(self, x, y, alpha_1, ratio12, ratio13, w_c1, w_t1, e11, e21, w_c2, w_t2, e12, e22, w_c3, w_t3, e13, e23,\n center_x=0, center_y=0):\n \"\"\"\n\n :param alpha_1:\n :param ratio12: ratio of first to second amplitude\n :param ratio13: ratio of first to third amplidute\n :param w_c1:\n :param w_t1:\n :param e11:\n :param e21:\n :param w_c2:\n :param w_t2:\n :param e12:\n :param e22:\n :param center_x:\n :param center_y:\n :return:\n \"\"\"\n amp1, amp2, amp3 = self._ratio_definition(alpha_1, ratio12, ratio13)\n f_xx1, f_yy1, f_xy1 = self._chameleon_1.hessian(x, y, amp1, w_c1, w_t1, e11, e21, center_x, center_y)\n f_xx2, f_yy2, f_xy2 = self._chameleon_2.hessian(x, y, amp2, w_c2, w_t2, e12, e22, center_x, center_y)\n f_xx3, f_yy3, f_xy3 = self._chameleon_3.hessian(x, y, amp3, w_c3, w_t3, e13, e23, center_x, center_y)\n return f_xx1 + f_xx2 + f_xx3, f_yy1 + f_yy2 + f_yy3, f_xy1 + f_xy2 + f_xy3\n\n def set_static(self, alpha_1, ratio12, ratio13, w_c1, w_t1, e11, e21, w_c2, w_t2, e12, e22, w_c3, w_t3, e13, e23,\n center_x=0, center_y=0):\n amp1, amp2, amp3 = self._ratio_definition(alpha_1, ratio12, ratio13)\n self._chameleon_1.set_static(amp1, w_c1, w_t1, e11, e21, center_x, center_y)\n self._chameleon_2.set_static(amp2, w_c2, w_t2, e12, e22, center_x, center_y)\n self._chameleon_3.set_static(amp3, w_c3, w_t3, e13, e23, center_x, center_y)\n\n def set_dynamic(self):\n self._chameleon_1.set_dynamic()\n self._chameleon_2.set_dynamic()\n self._chameleon_3.set_dynamic()\n\n\n@export\nclass DoubleChameleonPointMass(LensProfileBase):\n \"\"\"\n class of the Chameleon model (See Suyu+2014) an elliptical truncated double isothermal profile\n\n \"\"\"\n param_names = ['alpha_1', 'ratio_chameleon', 'ratio_pointmass', 'w_c1', 'w_t1', 'e11', 'e21', 'w_c2', 'w_t2',\n 'e12', 'e22', 'center_x', 'center_y']\n lower_limit_default = {'alpha_1': 0, 'ratio_chameleon': 0, 'ratio_pointmass': 0, 'w_c1': 0, 'w_t1': 0, 'e11': -0.8,\n 'e21': -0.8, 'w_c2': 0, 'w_t2': 0, 'e12': -0.8, 'e22': -0.8,\n 'center_x': -100, 'center_y': -100}\n upper_limit_default = {'alpha_1': 100, 'ratio_chameleon': 100, 'ratio_pointmass': 100, 'w_c1': 100, 'w_t1': 100, 'e11': 0.8, 'e21': 0.8,\n 'w_c2': 100, 'w_t2': 100, 'e12': 0.8, 'e22': 0.8,\n 'center_x': 100, 'center_y': 100}\n\n def __init__(self):\n self.chameleon = DoubleChameleon()\n self.pointMass = PointMass()\n super(DoubleChameleonPointMass, self).__init__()\n\n def function(self, x, y, alpha_1, ratio_pointmass, ratio_chameleon, w_c1, w_t1, e11, e21, w_c2, w_t2, e12, e22,\n center_x=0, center_y=0):\n \"\"\"\n #TODO chose better parameterization for combining point mass and Chameleon profiles\n :param x: ra-coordinate\n :param y: dec-coordinate\n :param alpha_1: deflection angle at 1 (arcseconds) from the center\n :param ratio_pointmass: ratio of point source Einstein radius to combined Chameleon deflection angle at r=1\n :param ratio_chameleon: ratio in deflection angles at r=1 for the two Chameleon profiles\n :param w_c1: Suyu+2014 for first profile\n :param w_t1: Suyu+2014 for first profile\n :param e11: ellipticity parameter for first profile\n :param e21: ellipticity parameter for first profile\n :param w_c2: Suyu+2014 for second profile\n :param w_t2: Suyu+2014 for second profile\n :param e12: ellipticity parameter for second profile\n :param e22: ellipticity parameter for second profile\n :param center_x: ra center\n :param center_y: dec center\n :return:\n \"\"\"\n f_1 = self.pointMass.function(x, y, alpha_1 / (1. + 1. / ratio_pointmass), center_x, center_y)\n f_2 = self.chameleon.function(x, y, alpha_1 / (1. + ratio_pointmass), ratio_chameleon, w_c1, w_t1, e11, e21,\n w_c2, w_t2, e12, e22, center_x, center_y)\n return f_1 + f_2\n\n def derivatives(self, x, y, alpha_1, ratio_pointmass, ratio_chameleon, w_c1, w_t1, e11, e21, w_c2, w_t2, e12, e22,\n center_x=0, center_y=0):\n \"\"\"\n\n :param x:\n :param y:\n :param alpha_1:\n :param ratio_pointmass: ratio of point source Einstein radius to combined Chameleon deflection angle at r=1\n :param ratio_chameleon: ratio in deflection angles at r=1 for the two Chameleon profiles\n :param w_c1: Suyu+2014 for first profile\n :param w_t1: Suyu+2014 for first profile\n :param e11: ellipticity parameter for first profile\n :param e21: ellipticity parameter for first profile\n :param w_c2: Suyu+2014 for second profile\n :param w_t2: Suyu+2014 for second profile\n :param e12: ellipticity parameter for second profile\n :param e22: ellipticity parameter for second profile\n :param center_x: ra center\n :param center_y: dec center\n :return:\n \"\"\"\n f_x1, f_y1 = self.pointMass.derivatives(x, y, alpha_1 / (1. + 1. / ratio_pointmass), center_x, center_y)\n f_x2, f_y2 = self.chameleon.derivatives(x, y, alpha_1 / (1. + ratio_pointmass), ratio_chameleon, w_c1, w_t1,\n e11, e21, w_c2, w_t2, e12, e22, center_x, center_y)\n return f_x1 + f_x2, f_y1 + f_y2\n\n def hessian(self, x, y, alpha_1, ratio_pointmass, ratio_chameleon, w_c1, w_t1, e11, e21, w_c2, w_t2, e12, e22,\n center_x=0, center_y=0):\n \"\"\"\n\n :param x:\n :param y:\n :param alpha_1:\n :param ratio_pointmass: ratio of point source Einstein radius to combined Chameleon deflection angle at r=1\n :param ratio_chameleon: ratio in deflection angles at r=1 for the two Chameleon profiles\n :param w_c1: Suyu+2014 for first profile\n :param w_t1: Suyu+2014 for first profile\n :param e11: ellipticity parameter for first profile\n :param e21: ellipticity parameter for first profile\n :param w_c2: Suyu+2014 for second profile\n :param w_t2: Suyu+2014 for second profile\n :param e12: ellipticity parameter for second profile\n :param e22: ellipticity parameter for second profile\n :param center_x: ra center\n :param center_y: dec center\n :return:\n \"\"\"\n f_xx1, f_yy1, f_xy1 = self.pointMass.hessian(x, y, alpha_1 / (1. + 1. / ratio_pointmass), center_x, center_y)\n f_xx2, f_yy2, f_xy2 = self.chameleon.hessian(x, y, alpha_1 / (1. + ratio_pointmass), ratio_chameleon, w_c1, w_t1,\n e11, e21, w_c2, w_t2, e12, e22, center_x, center_y)\n return f_xx1 + f_xx2, f_yy1 + f_yy2, f_xy1 + f_xy2\n",
"__author__ = 'sibirrer'\n\nimport numpy as np\nimport lenstronomy.Util.param_util as param_util\nfrom lenstronomy.LensModel.Profiles.base_profile import LensProfileBase\n\n__all__ = ['SPEMD']\n\n\nclass SPEMD(LensProfileBase):\n \"\"\"\n class for smooth power law ellipse mass density profile. This class effectively performs the FASTELL calculations\n by Renan Barkana. The parameters are changed and represent a spherically averaged Einstein radius an a logarithmic\n 3D mass profile slope.\n\n The Einstein ring parameter converts to the definition used by GRAVLENS as follow:\n (theta_E / theta_E_gravlens) = sqrt[ (1+q^2) / (2 q) ]\n\n\n FASTELL has the following defintions:\n The parameters are position (x1,x2), overall factor\n (q), power (gam), axis ratio (arat) which is <=1, core radius\n squared (s2), and the output potential (phi).\n The projected mass density distribution, in units of the\n critical density, is kappa(x1,x2)=q [u2+s2]^(-gam), where\n u2=[x1^2+x2^2/(arat^2)].\n \"\"\"\n param_names = ['theta_E', 'gamma', 'e1', 'e2', 's_scale', 'center_x', 'center_y']\n lower_limit_default = {'theta_E': 0, 'gamma': 0, 'e1': -0.5, 'e2': -0.5, 's_scale': 0, 'center_x': -100, 'center_y': -100}\n upper_limit_default = {'theta_E': 100, 'gamma': 100, 'e1': 0.5, 'e2': 0.5, 's_scale': 100, 'center_x': 100, 'center_y': 100}\n\n def __init__(self, suppress_fastell=False):\n \"\"\"\n\n \"\"\"\n try:\n from fastell4py import fastell4py\n self._fastell4py_bool = True\n self.fastell4py = fastell4py\n except:\n self._fastell4py_bool = False\n if suppress_fastell:\n ImportWarning(\"module fastell4py not installed. You can get it from here: \"\n \"https://github.com/sibirrer/fastell4py \"\n \"Make sure you have a fortran compiler such that the installation works properly.\")\n Warning(\"SPEMD model outputs are replaced by zeros as fastell4py package is not installed!\")\n else:\n raise ImportError(\"module fastell4py not installed. You can get it from here: \"\n \"https://github.com/sibirrer/fastell4py \"\n \"Make sure you have a fortran compiler such that the installation works properly.\")\n super(SPEMD, self).__init__()\n\n def function(self, x, y, theta_E, gamma, e1, e2, s_scale, center_x=0, center_y=0):\n \"\"\"\n\n :param x: x-coordinate (angle)\n :param y: y-coordinate (angle)\n :param theta_E: Einstein radius (angle), pay attention to specific definition!\n :param gamma: logarithmic slope of the power-law profile. gamma=2 corresponds to isothermal\n :param e1: eccentricity component\n :param e2: eccentricity component\n :param s_scale: smoothing scale in the center of the profile (angle)\n :param center_x: x-position of lens center\n :param center_y: y-position of lens center\n :return: lensing potential\n \"\"\"\n x1, x2, q_fastell, gam, s2, q, phi_G = self.param_transform(x, y, theta_E, gamma, e1, e2, s_scale, center_x,\n center_y)\n compute_bool = self._parameter_constraints(q_fastell, gam, s2, q)\n if self._fastell4py_bool and self.is_not_empty(x1, x2) and compute_bool:\n potential = self.fastell4py.ellipphi(x1, x2, q_fastell, gam, arat=q, s2=s2)\n n = len(np.atleast_1d(x))\n if n <= 1:\n if np.shape(x) == ():\n return np.array(potential[0])\n else:\n potential = np.zeros_like(x1)\n return potential\n\n def derivatives(self, x, y, theta_E, gamma, e1, e2, s_scale, center_x=0, center_y=0):\n \"\"\"\n\n :param x: x-coordinate (angle)\n :param y: y-coordinate (angle)\n :param theta_E: Einstein radius (angle), pay attention to specific definition!\n :param gamma: logarithmic slope of the power-law profile. gamma=2 corresponds to isothermal\n :param e1: eccentricity component\n :param e2: eccentricity component\n :param s_scale: smoothing scale in the center of the profile\n :param center_x: x-position of lens center\n :param center_y: y-position of lens center\n :return: deflection angles alpha_x, alpha_y\n \"\"\"\n x1, x2, q_fastell, gam, s2, q, phi_G = self.param_transform(x, y, theta_E, gamma, e1, e2, s_scale, center_x,\n center_y)\n compute_bool = self._parameter_constraints(q_fastell, gam, s2, q)\n if self._fastell4py_bool and self.is_not_empty(x1, x2) and compute_bool:\n f_x_prim, f_y_prim = self.fastell4py.fastelldefl(x1, x2, q_fastell, gam, arat=q, s2=s2)\n else:\n f_x_prim, f_y_prim = np.zeros_like(x1), np.zeros_like(x1)\n cos_phi = np.cos(phi_G)\n sin_phi = np.sin(phi_G)\n\n f_x = cos_phi*f_x_prim - sin_phi*f_y_prim\n f_y = sin_phi*f_x_prim + cos_phi*f_y_prim\n return f_x, f_y\n\n def hessian(self, x, y, theta_E, gamma, e1, e2, s_scale, center_x=0, center_y=0):\n \"\"\"\n\n :param x: x-coordinate (angle)\n :param y: y-coordinate (angle)\n :param theta_E: Einstein radius (angle), pay attention to specific definition!\n :param gamma: logarithmic slope of the power-law profile. gamma=2 corresponds to isothermal\n :param e1: eccentricity component\n :param e2: eccentricity component\n :param s_scale: smoothing scale in the center of the profile\n :param center_x: x-position of lens center\n :param center_y: y-position of lens center\n :return: Hessian components f_xx, f_yy, f_xy\n \"\"\"\n x1, x2, q_fastell, gam, s2, q, phi_G = self.param_transform(x, y, theta_E, gamma, e1, e2, s_scale, center_x, center_y)\n compute_bool = self._parameter_constraints(q_fastell, gam, s2, q)\n if self._fastell4py_bool and self.is_not_empty(x1, x2) and compute_bool:\n f_x_prim, f_y_prim, f_xx_prim, f_yy_prim, f_xy_prim = self.fastell4py.fastellmag(x1, x2, q_fastell, gam,\n arat=q, s2=s2)\n n = len(np.atleast_1d(x))\n if n <= 1:\n if np.shape(x) == ():\n f_xx_prim, f_yy_prim, f_xy_prim = np.array(f_xx_prim[0]), np.array(f_yy_prim[0]), np.array(\n f_xy_prim[0])\n else:\n f_xx_prim, f_yy_prim, f_xy_prim = np.zeros_like(x1), np.zeros_like(x1), np.zeros_like(x1)\n kappa = (f_xx_prim + f_yy_prim)/2\n gamma1_value = (f_xx_prim - f_yy_prim)/2\n gamma2_value = f_xy_prim\n\n gamma1 = np.cos(2*phi_G)*gamma1_value-np.sin(2*phi_G)*gamma2_value\n gamma2 = +np.sin(2*phi_G)*gamma1_value+np.cos(2*phi_G)*gamma2_value\n\n f_xx = kappa + gamma1\n f_yy = kappa - gamma1\n f_xy = gamma2\n return f_xx, f_yy, f_xy\n\n def param_transform(self, x, y, theta_E, gamma, e1, e2, s_scale, center_x=0, center_y=0):\n \"\"\"\n transforms parameters in the format of fastell4py\n\n :param x: x-coordinate (angle)\n :param y: y-coordinate (angle)\n :param theta_E: Einstein radius (angle), pay attention to specific definition!\n :param gamma: logarithmic slope of the power-law profile. gamma=2 corresponds to isothermal\n :param e1: eccentricity component\n :param e2: eccentricity component\n :param s_scale: smoothing scale in the center of the profile\n :param center_x: x-position of lens center\n :param center_y: y-position of lens center\n :return: x-rotated, y-rotated, q_fastell, gam, s2, q, phi_G\n \"\"\"\n phi_G, q = param_util.ellipticity2phi_q(e1, e2)\n x = np.array(x)\n y = np.array(y)\n x_shift = x - center_x\n y_shift = y - center_y\n q_fastell, gam, s2 = self.convert_params(theta_E, gamma, q, s_scale)\n cos_phi = np.cos(phi_G)\n sin_phi = np.sin(phi_G)\n\n x1 = cos_phi * x_shift + sin_phi * y_shift\n x2 = -sin_phi * x_shift + cos_phi * y_shift\n return x1, x2, q_fastell, gam, s2, q, phi_G\n\n @staticmethod\n def convert_params(theta_E, gamma, q, s_scale):\n \"\"\"\n converts parameter defintions into quantities used by the FASTELL fortran library\n\n :param theta_E: Einstein radius\n :param gamma: 3D power-law slope of mass profile\n :param q: axis ratio minor/major\n :param s_scale: float, smoothing scale in the core\n :return: pre-factors to SPEMP profile for FASTELL\n \"\"\"\n gam = (gamma-1)/2.\n q_fastell = (3-gamma)/2. * (theta_E ** 2 / q) ** gam\n s2 = s_scale ** 2\n return q_fastell, gam, s2\n\n @staticmethod\n def is_not_empty(x1, x2):\n \"\"\"\n Check if float or not an empty array\n :return: True if x1 and x2 are either floats/ints or an non-empty array, False if e.g. objects are []\n :rtype: bool\n \"\"\"\n assert type(x1) == type(x2)\n\n if isinstance(x1, (list, tuple, np.ndarray)):\n if len(x1) != 0 and len(x2) != 0:\n return True\n else:\n return False\n else:\n return True\n\n @staticmethod\n def _parameter_constraints(q_fastell, gam, s2, q):\n \"\"\"\n sets bounds to parameters due to numerical stability\n\n FASTELL has the following defintions:\n The parameters are position (x1,x2), overall factor\n (q), power (gam), axis ratio (arat) which is <=1, core radius\n squared (s2), and the output potential (phi).\n The projected mass density distribution, in units of the\n critical density, is kappa(x1,x2)=q [u2+s2]^(-gam), where\n u2=[x1^2+x2^2/(arat^2)].\n\n :param q_fastell: float, normalization of lens model, q_fastell = (3-gamma)/2. * (theta_E ** 2 / q) ** gam\n :param gam: float, slope parameter, gam = (gamma-1)/2.\n :param q: axis ratio\n :param s2: square of smoothing scale of the core\n :return: bool of whether or not to let the fastell provide to be evaluated or instead return zero(s)\n \"\"\"\n if q_fastell < 0 or s2 < 0.0000000000001 or q > 1 or q < 0.01 or gam > 0.999 or gam < 0.001 or \\\n not np.isfinite(q_fastell):\n return False\n return True\n",
"\"\"\"\nTests for `Galkin` module.\n\"\"\"\nimport pytest\nimport numpy as np\nimport numpy.testing as npt\n\nfrom lenstronomy.GalKin.numeric_kinematics import NumericKinematics\nfrom lenstronomy.GalKin.analytic_kinematics import AnalyticKinematics\n\n\nclass TestMassProfile(object):\n\n def setup(self):\n pass\n\n def test_mass_3d(self):\n kwargs_model = {'mass_profile_list': ['HERNQUIST'], 'light_profile_list': ['HERNQUIST'],\n 'anisotropy_model': 'isotropic'}\n massProfile = NumericKinematics(kwargs_model=kwargs_model, kwargs_cosmo={'d_d': 1., 'd_s': 2., 'd_ds': 1.})\n r = 0.3\n kwargs_profile = [{'sigma0': 1., 'Rs': 0.5}]\n mass_3d = massProfile._mass_3d_interp(r, kwargs_profile)\n mass_3d_exact = massProfile.mass_3d(r, kwargs_profile)\n npt.assert_almost_equal(mass_3d/mass_3d_exact, 1., decimal=3)\n\n def test_sigma_r2(self):\n \"\"\"\n tests the solution of the Jeans equation for sigma**2(r), where r is the 3d radius.\n Test is compared to analytic OM solution with power-law and Hernquist light profile\n\n :return:\n \"\"\"\n light_profile_list = ['HERNQUIST']\n r_eff = 1.5\n Rs = 0.551 * r_eff\n kwargs_light = [{'Rs': Rs, 'amp': 1.}] # effective half light radius (2d projected) in arcsec\n # 0.551 *\n # mass profile\n mass_profile_list = ['SPP']\n theta_E = 1.2\n gamma = 2.\n kwargs_mass = [{'theta_E': theta_E, 'gamma': gamma}] # Einstein radius (arcsec) and power-law slope\n\n # anisotropy profile\n anisotropy_type = 'OM'\n r_ani = 2.\n kwargs_anisotropy = {'r_ani': r_ani} # anisotropy radius [arcsec]\n\n kwargs_cosmo = {'d_d': 1000, 'd_s': 1500, 'd_ds': 800}\n kwargs_numerics = {'interpol_grid_num': 500, 'log_integration': True,\n 'max_integrate': 100}\n\n kwargs_model = {'mass_profile_list': mass_profile_list,\n 'light_profile_list': light_profile_list,\n 'anisotropy_model': anisotropy_type}\n analytic_kin = AnalyticKinematics(kwargs_cosmo)\n numeric_kin = NumericKinematics(kwargs_model, kwargs_cosmo, **kwargs_numerics)\n rho0_r0_gamma = analytic_kin._rho0_r0_gamma(theta_E, gamma)\n r_array = np.logspace(-2, 0.5, 10)\n sigma_r_analytic_array = []\n sigma_r_num_array = []\n for r in r_array:\n sigma_r2_analytic = analytic_kin._sigma_r2(r=r, a=Rs, gamma=gamma, r_ani=r_ani, rho0_r0_gamma=rho0_r0_gamma)\n sigma_r2_num = numeric_kin.sigma_r2(r, kwargs_mass, kwargs_light, kwargs_anisotropy)\n sigma_r_analytic = np.sqrt(sigma_r2_analytic) / 1000\n sigma_r_num = np.sqrt(sigma_r2_num) / 1000\n sigma_r_num_array.append(sigma_r_num)\n sigma_r_analytic_array.append(sigma_r_analytic)\n #import matplotlib.pyplot as plt\n #plt.semilogx(r_array, np.array(sigma_r_analytic_array)/np.array(sigma_r_num_array), label='analytic')\n #plt.semilogx(r_array, sigma_r_num_array, label='numeric')\n #plt.legend()\n #plt.show()\n npt.assert_almost_equal(sigma_r_num_array, sigma_r_analytic_array, decimal=-1)\n\n def test_delete_cache(self):\n kwargs_cosmo = {'d_d': 1000, 'd_s': 1500, 'd_ds': 800}\n kwargs_numerics = {'interpol_grid_num': 500, 'log_integration': True,\n 'max_integrate': 100}\n\n kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': 1}\n kwargs_model = {'mass_profile_list': [],\n 'light_profile_list': [],\n 'anisotropy_model': 'const'}\n numeric_kin = NumericKinematics(kwargs_model, kwargs_cosmo, **kwargs_numerics)\n numeric_kin._interp_jeans_integral = 1\n numeric_kin._log_mass_3d = 2\n numeric_kin.delete_cache()\n assert hasattr(numeric_kin, '_log_mass_3d') is False\n assert hasattr(numeric_kin, '_interp_jeans_integral') is False\n\n\nif __name__ == '__main__':\n pytest.main()\n"
] |
[
[
"numpy.array",
"numpy.sin",
"numpy.testing.assert_almost_equal",
"numpy.sqrt",
"numpy.cos",
"numpy.linspace"
],
[
"numpy.sqrt"
],
[
"numpy.zeros_like",
"numpy.array",
"numpy.sin",
"numpy.shape",
"numpy.atleast_1d",
"numpy.cos",
"numpy.isfinite"
],
[
"numpy.testing.assert_almost_equal",
"numpy.logspace",
"numpy.sqrt"
]
] |
Manas02/ScaffoldGPT
|
[
"44641a7ed9afcf94f3b2c338935d9fcae3a5d64e"
] |
[
"svg.py"
] |
[
"#!/usr/bin/env python\n# coding: utf-8\n\n# Part of Scaffold Generative Pretraining Project\n# Author : Manas Mahale <manas.mahale@bcp.edu.in>\n\nimport re\nimport logging\n\n\nfrom rdkit import Chem\nfrom rdkit.Chem import rdDepictor\nfrom rdkit.Chem.Draw import rdMolDraw2D\nfrom rdkit import RDLogger \n\nfrom model.utils import set_seed\nfrom model.model import GPT, GPTConfig\nfrom model.utils import sample\n\nimport torch\nfrom torch.utils.data import Dataset\n\n# set up logging\n\nlogging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO)\n\n# make deterministic\nset_seed(42)\n\n\nclass CharDataset(Dataset):\n def __init__(self, data, block_size):\n chars = []\n for i in text:\n for j in set(i):\n chars.append(j) \n chars = sorted(list(set(chars))) + ['<pad>']\n data_size, vocab_size = len(text), len(chars)\n print('Data has %d SMILES \\n%d unique characters.' % (data_size, vocab_size)) \n self.stoi = {ch:i for i,ch in enumerate(chars)}\n self.itos = {i:ch for i,ch in enumerate(chars)}\n self.vocab_size = vocab_size\n self.data = data\n self.block_size = block_size\n \n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n chunk = self.data[idx:idx + 1][0]\n dix = [self.stoi[s] for s in chunk] + [self.stoi['<pad>']] * (self.block_size - len(chunk))\n x = torch.tensor(dix[:-1], dtype=torch.long)\n y = torch.tensor(dix[1:], dtype=torch.long)\n return x, y\n\n\nblock_size = 64\n\ntext = [i.strip() for i in open('./data/final/train.txt', 'r').readlines()]\ntrain_dataset = CharDataset(text, block_size)\n\n\nmconf = GPTConfig(train_dataset.vocab_size, train_dataset.block_size,\n n_layer=2, n_head=2, n_embd=16)\nmodel = GPT(mconf)\n\nmodel.load_state_dict(torch.load('./ckpt/big_model.bin'))\n\nRDLogger.DisableLog('rdApp.*') \nprint('\\n**Generating Scaffold SMILES**\\n')\nvalid = []\nfor n in range(1, 501):\n context = \"C\"\n x = torch.tensor([train_dataset.stoi[s] for s in context], dtype=torch.long)[None,...]\n y = sample(model, x, 25, temperature=1.0, sample=True, top_k=10)[0]\n completion = ''.join([train_dataset.itos[int(i)] for i in y])\n\n smiles = re.sub(\"<pad>\",\"\",completion)\n m = Chem.MolFromSmiles(smiles, sanitize=False)\n if m is not None:\n print(n, smiles)\n valid.append(smiles)\nprint('\\n', len(valid)/5,'% Valid')\n\ndef plot_rdkit_svg_grid(mols, mols_per_row=2, filename=\"generated\"):\n svg = Chem.Draw.MolsToGridImage(mols, molsPerRow=mols_per_row, useSVG=True)\n if filename is not None:\n if not filename.endswith('.svg'):\n filename += '.svg'\n with open(filename, 'w') as f:\n f.write(svg)\n return svg \n\nplot_rdkit_svg_grid([Chem.MolFromSmiles(i) for i in valid])\n"
] |
[
[
"torch.tensor",
"torch.load"
]
] |
hi-ogawa/magnum-bindings
|
[
"5f324bdcde828d9ffc3bcd8e562480875586c54b"
] |
[
"src/python/magnum/test/test_scenegraph_numpy.py"
] |
[
"#\n# This file is part of Magnum.\n#\n# Copyright © 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019\n# Vladimír Vondruš <mosra@centrum.cz>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n#\n\nimport unittest\n\nfrom magnum import *\nfrom magnum import scenegraph\nfrom magnum.scenegraph.matrix import Object3D, Scene3D\n\ntry:\n import numpy as np\nexcept ModuleNotFoundError:\n raise unittest.SkipTest(\"numpy not installed\")\n\nclass Object(unittest.TestCase):\n def test_transformation(self):\n scene = Scene3D()\n\n a = Object3D(scene)\n\n # like a.rotate_local(Deg(35.0), Vector3.x_axis()), but way uglier,\n # another could be scipy.spatial.transform.Rotation but that's meh as\n # well\n a.transform_local(np.array(\n [[1.0, 0.0, 0.0, 0.0],\n [0.0, 0.819152, -0.573576, 0.0],\n [0.0, 0.573576, 0.819152, 0.0],\n [0.0, 0.0, 0.0, 1.0]]))\n self.assertEqual(a.transformation, Matrix4.rotation_x(Deg(35.0)))\n self.assertEqual(a.absolute_transformation(), Matrix4.rotation_x(Deg(35.0)))\n\n b = Object3D(a)\n b.translate(np.array([3.0, 4.0, 5.0], dtype='float32'))\n self.assertEqual(b.transformation, Matrix4.translation((3.0, 4.0, 5.0)))\n self.assertEqual(b.absolute_transformation(),\n Matrix4.rotation_x(Deg(35.0))@\n Matrix4.translation((3.0, 4.0, 5.0)))\n\n c = Object3D(scene)\n self.assertEqual(c.transformation, Matrix4.identity_init())\n self.assertEqual(c.absolute_transformation(), Matrix4.identity_init())\n"
] |
[
[
"numpy.array"
]
] |
PaulTran47/xlnet
|
[
"ebf4f69a296d299eaeaa4160259ae213e2ddc0a7"
] |
[
"train_gpu.py"
] |
[
"\"\"\"Pretraining on GPUs.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os, sys\nimport math\nimport json\nimport time\nimport numpy as np\n\nfrom absl import flags\nimport absl.logging as _logging # pylint: disable=unused-import\n\nimport tensorflow as tf\n\nimport data_utils\nimport model_utils\nfrom gpu_utils import assign_to_gpu, average_grads_and_vars\nimport function_builder\n\n\n# GPU config\nflags.DEFINE_integer(\"num_hosts\", default=1,\n help=\"Number of hosts\")\nflags.DEFINE_integer(\"num_core_per_host\", default=8,\n help=\"Number of cores per host\")\nflags.DEFINE_bool(\"use_tpu\", default=False,\n help=\"Whether to use TPUs for training.\")\n\n# Experiment (data/checkpoint/directory) config\nflags.DEFINE_integer(\"num_passes\", default=1,\n help=\"Number of passed used for training.\")\nflags.DEFINE_string(\"record_info_dir\", default=None,\n help=\"Path to local directory containing `record_info-lm.json`.\")\nflags.DEFINE_string(\"model_dir\", default=None,\n help=\"Estimator model_dir.\")\nflags.DEFINE_string(\"init_checkpoint\", default=None,\n help=\"checkpoint path for initializing the model.\")\n\n# Optimization config\nflags.DEFINE_float(\"learning_rate\", default=1e-4,\n help=\"Maximum learning rate.\")\nflags.DEFINE_float(\"clip\", default=1.0,\n help=\"Gradient clipping value.\")\n# for cosine decay\nflags.DEFINE_float(\"min_lr_ratio\", default=0.001,\n help=\"Minimum ratio learning rate.\")\nflags.DEFINE_integer(\"warmup_steps\", default=0,\n help=\"Number of steps for linear lr warmup.\")\nflags.DEFINE_float(\"adam_epsilon\", default=1e-8,\n help=\"Adam epsilon\")\nflags.DEFINE_string(\"decay_method\", default=\"poly\",\n help=\"poly or cos\")\nflags.DEFINE_float(\"weight_decay\", default=0.0,\n help=\"weight decay\")\n\n# Training config\nflags.DEFINE_integer(\"train_batch_size\", default=16,\n help=\"Size of train batch.\")\nflags.DEFINE_integer(\"train_steps\", default=100000,\n help=\"Total number of training steps.\")\nflags.DEFINE_integer(\"iterations\", default=1000,\n help=\"Number of iterations per repeat loop.\")\nflags.DEFINE_integer(\"save_steps\", default=None,\n help=\"number of steps for model checkpointing.\")\n\n# Data config\nflags.DEFINE_integer('seq_len', default=0,\n help='Sequence length for pretraining.')\nflags.DEFINE_integer('reuse_len', default=0,\n help=\"How many tokens to be reused in the next batch. \"\n \"Could be half of seq_len\")\nflags.DEFINE_bool(\"bi_data\", default=True,\n help=\"Use bidirectional data streams, i.e., forward & backward.\")\nflags.DEFINE_integer(\"mask_alpha\", default=6,\n help=\"How many tokens to form a group.\")\nflags.DEFINE_integer(\"mask_beta\", default=1,\n help=\"How many tokens to mask within each group.\")\nflags.DEFINE_integer(\"num_predict\", default=None,\n help=\"Number of tokens to predict in partial prediction.\")\nflags.DEFINE_integer('perm_size', default=None,\n help='perm size.')\nflags.DEFINE_bool(\"uncased\", False,\n help=\"Use uncased inputs or not.\")\nflags.DEFINE_integer(\"n_token\", 32000, help=\"Vocab size\")\n\n# Model config\nflags.DEFINE_integer(\"mem_len\", default=0,\n help=\"Number of steps to cache\")\nflags.DEFINE_bool(\"same_length\", default=False,\n help=\"Same length attention\")\nflags.DEFINE_integer(\"clamp_len\", default=-1,\n help=\"Clamp length\")\n\nflags.DEFINE_integer(\"n_layer\", default=6,\n help=\"Number of layers.\")\nflags.DEFINE_integer(\"d_model\", default=32,\n help=\"Dimension of the model.\")\nflags.DEFINE_integer(\"d_embed\", default=32,\n help=\"Dimension of the embeddings.\")\nflags.DEFINE_integer(\"n_head\", default=4,\n help=\"Number of attention heads.\")\nflags.DEFINE_integer(\"d_head\", default=8,\n help=\"Dimension of each attention head.\")\nflags.DEFINE_integer(\"d_inner\", default=32,\n help=\"Dimension of inner hidden size in positionwise feed-forward.\")\nflags.DEFINE_float(\"dropout\", default=0.0,\n help=\"Dropout rate.\")\nflags.DEFINE_float(\"dropatt\", default=0.0,\n help=\"Attention dropout rate.\")\nflags.DEFINE_bool(\"untie_r\", default=False,\n help=\"Untie r_w_bias and r_r_bias\")\nflags.DEFINE_string(\"summary_type\", default=\"last\",\n help=\"Method used to summarize a sequence into a compact vector.\")\nflags.DEFINE_string(\"ff_activation\", default=\"relu\",\n help=\"Activation type used in position-wise feed-forward.\")\nflags.DEFINE_bool(\"use_bfloat16\", False,\n help=\"Whether to use bfloat16.\")\n\n# Parameter initialization\nflags.DEFINE_enum(\"init\", default=\"normal\",\n enum_values=[\"normal\", \"uniform\"],\n help=\"Initialization method.\")\nflags.DEFINE_float(\"init_std\", default=0.02,\n help=\"Initialization std when init is normal.\")\nflags.DEFINE_float(\"init_range\", default=0.1,\n help=\"Initialization std when init is uniform.\")\n\n# TFRecord Path\nflags.DEFINE_integer(\"pass_id\", 0, help=\"ID of the current pass.\"\n \"Different passes sample different negative segment.\")\nflags.DEFINE_integer(\"task\", 0, help=\"The Task ID. This value is used when \"\n \"using multiple workers to identify each worker.\")\n\nFLAGS = flags.FLAGS\n\n\ndef get_model_fn():\n def model_fn(features, labels, mems, is_training):\n #### Get loss from inputs\n total_loss, new_mems, monitor_dict = function_builder.get_loss(\n FLAGS, features, labels, mems, is_training)\n\n #### Check model parameters\n num_params = sum([np.prod(v.shape) for v in tf.trainable_variables()])\n tf.logging.info('#params: {}'.format(num_params))\n\n # GPU\n assert is_training\n all_vars = tf.trainable_variables()\n grads = tf.gradients(total_loss, all_vars)\n grads_and_vars = list(zip(grads, all_vars))\n\n return total_loss, new_mems, grads_and_vars\n\n return model_fn\n\n\ndef single_core_graph(is_training, features, mems):\n model_fn = get_model_fn()\n\n model_ret = model_fn(\n features=features,\n labels=None,\n mems=mems,\n is_training=is_training)\n\n return model_ret\n\n\ndef create_mems_tf(bsz_per_core):\n mems = [tf.placeholder(dtype=tf.float32,\n shape=[FLAGS.mem_len, bsz_per_core, FLAGS.d_model])\n for layer in range(FLAGS.n_layer)]\n\n return mems\n\n\ndef initialize_mems_np(bsz_per_core):\n mems_np = [np.zeros(shape=[FLAGS.mem_len, bsz_per_core, FLAGS.d_model],\n dtype=np.float32)\n for layer in range(FLAGS.n_layer)]\n\n return mems_np\n\n\ndef train(ps_device):\n ##### Get input function and model function\n\n train_input_fn, record_info_dict = data_utils.get_input_fn(\n tfrecord_dir=FLAGS.record_info_dir,\n split=\"train\",\n task=FLAGS.task,\n pass_id=FLAGS.pass_id,\n bsz_per_host=FLAGS.train_batch_size,\n seq_len=FLAGS.seq_len,\n reuse_len=FLAGS.reuse_len,\n bi_data=FLAGS.bi_data,\n num_hosts=1,\n num_core_per_host=1, # set to one no matter how many GPUs\n perm_size=FLAGS.perm_size,\n mask_alpha=FLAGS.mask_alpha,\n mask_beta=FLAGS.mask_beta,\n uncased=FLAGS.uncased,\n num_passes=FLAGS.num_passes,\n use_bfloat16=FLAGS.use_bfloat16,\n num_predict=FLAGS.num_predict)\n\n # for key, info in record_info_dict.items():\n tf.logging.info(\"num of batches {}\".format(record_info_dict[\"num_batch\"]))\n\n ##### Create input tensors / placeholders\n bsz_per_core = FLAGS.train_batch_size // FLAGS.num_core_per_host\n\n params = {\n \"batch_size\": FLAGS.train_batch_size # the whole batch\n }\n train_set = train_input_fn(params)\n\n example = train_set.make_one_shot_iterator().get_next()\n\n if FLAGS.num_core_per_host > 1:\n examples = [{} for _ in range(FLAGS.num_core_per_host)]\n for key in example.keys():\n vals = tf.split(example[key], FLAGS.num_core_per_host, 0)\n for device_id in range(FLAGS.num_core_per_host):\n examples[device_id][key] = vals[device_id]\n else:\n examples = [example]\n\n ##### Create computational graph\n tower_mems, tower_losses, tower_new_mems, tower_grads_and_vars = [], [], [], []\n\n for i in range(FLAGS.num_core_per_host):\n reuse = True if i > 0 else None\n with tf.device(assign_to_gpu(i, ps_device)), \\\n tf.variable_scope(tf.get_variable_scope(), reuse=reuse):\n\n # The mems for each tower is a dictionary\n mems_i = {}\n if FLAGS.mem_len:\n mems_i[\"mems\"] = create_mems_tf(bsz_per_core)\n\n loss_i, new_mems_i, grads_and_vars_i = single_core_graph(\n is_training=True,\n features=examples[i],\n mems=mems_i)\n\n tower_mems.append(mems_i)\n tower_losses.append(loss_i)\n tower_new_mems.append(new_mems_i)\n tower_grads_and_vars.append(grads_and_vars_i)\n\n ## average losses and gradients across towers\n if len(tower_losses) > 1:\n loss = tf.add_n(tower_losses) / len(tower_losses)\n grads_and_vars = average_grads_and_vars(tower_grads_and_vars)\n else:\n loss = tower_losses[0]\n grads_and_vars = tower_grads_and_vars[0]\n\n ## get train op\n train_op, learning_rate, gnorm = model_utils.get_train_op(FLAGS, None,\n grads_and_vars=grads_and_vars)\n global_step = tf.train.get_global_step()\n\n ##### Training loop\n # initialize mems\n tower_mems_np = []\n for i in range(FLAGS.num_core_per_host):\n mems_i_np = {}\n for key in tower_mems[i].keys():\n mems_i_np[key] = initialize_mems_np(bsz_per_core)\n tower_mems_np.append(mems_i_np)\n\n saver = tf.train.Saver()\n\n gpu_options = tf.GPUOptions(allow_growth=True)\n\n model_utils.init_from_checkpoint(FLAGS, global_vars=True)\n\n with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,\n gpu_options=gpu_options)) as sess:\n sess.run(tf.global_variables_initializer())\n\n fetches = [loss, tower_new_mems, global_step, gnorm, learning_rate, train_op]\n\n total_loss, prev_step = 0., -1\n while True:\n feed_dict = {}\n for i in range(FLAGS.num_core_per_host):\n for key in tower_mems_np[i].keys():\n for m, m_np in zip(tower_mems[i][key], tower_mems_np[i][key]):\n feed_dict[m] = m_np\n\n fetched = sess.run(fetches, feed_dict=feed_dict)\n\n loss_np, tower_mems_np, curr_step = fetched[:3]\n total_loss += loss_np\n\n if curr_step > 0 and curr_step % FLAGS.iterations == 0:\n curr_loss = total_loss / FLAGS.iterations\n tf.logging.info(\"[{}] | gnorm {:.2f} lr {:8.6f} \"\n \"| loss {:.2f} | pplx {:>7.2f}, bpc {:>7.4f}\".format(\n curr_step, fetched[-3], fetched[-2],\n curr_loss, math.exp(curr_loss), curr_loss / math.log(2)))\n total_loss, prev_step = 0., curr_step\n\n if curr_step > 0 and curr_step % FLAGS.save_steps == 0:\n save_path = os.path.join(FLAGS.model_dir, \"model.ckpt\")\n saver.save(sess, save_path)\n tf.logging.info(\"Model saved in path: {}\".format(save_path))\n\n if curr_step >= FLAGS.train_steps:\n break\n\n\ndef main(unused_argv):\n del unused_argv # Unused\n\n tf.logging.set_verbosity(tf.logging.INFO)\n\n tf.logging.info(\"n_token {}\".format(FLAGS.n_token))\n\n if not tf.gfile.Exists(FLAGS.model_dir):\n tf.gfile.MakeDirs(FLAGS.model_dir)\n\n train(\"/gpu:0\")\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n"
] |
[
[
"tensorflow.trainable_variables",
"tensorflow.logging.set_verbosity",
"numpy.zeros",
"tensorflow.gfile.Exists",
"tensorflow.train.Saver",
"tensorflow.gradients",
"tensorflow.add_n",
"tensorflow.ConfigProto",
"tensorflow.gfile.MakeDirs",
"numpy.prod",
"tensorflow.placeholder",
"tensorflow.get_variable_scope",
"tensorflow.split",
"tensorflow.app.run",
"tensorflow.train.get_global_step",
"tensorflow.global_variables_initializer",
"tensorflow.GPUOptions"
]
] |
nothingbutpassion/dldiy
|
[
"53c6365fb5689b47ec62cf3bb4c3d5bde621e8f4"
] |
[
"datasets/widerface.py"
] |
[
"# coding: utf-8\r\nimport os\r\nimport zipfile\r\nimport pickle\r\nimport numpy as np\r\nimport PIL.Image as Image\r\n\r\ndataset_dir = os.path.dirname(os.path.abspath(__file__)) + \"/widerface\"\r\nsave_file = dataset_dir + \"/winderface.pkl\"\r\n\r\ndef _extract_files(root):\r\n for name in [\"wider_face_split\", \"WIDER_train\", \"WIDER_val\", \"WIDER_test\"]:\r\n if not os.path.exists(root + \"/\" + name):\r\n zip_name = root + \"/\" + name + \".zip\"\r\n assert(os.path.exists(zip_name))\r\n with zipfile.ZipFile(zip_name) as f:\r\n print(\"extracting %s ...\" % zip_name)\r\n f.extractall(root)\r\n print(\"saved as %s\" % root + \"/\" + name)\r\n\r\ndef _parse_bbx(root, image_dir, bbox_file):\r\n data = []\r\n with open(root + \"/\" + bbox_file, \"r\") as f:\r\n lines = f.readlines()\r\n i = 0\r\n while i < len(lines):\r\n sample = {}\r\n sample[\"image\"] = root + \"/\" + image_dir +\"/\" + lines[i].strip()\r\n sample[\"boxes\"] = []\r\n boxes_num = int(lines[i+1])\r\n for j in range(i+2, i+2+boxes_num):\r\n box = lines[j].split()\r\n sample[\"boxes\"].append(box)\r\n if len(sample[\"boxes\"]) > 0:\r\n data.append(sample)\r\n i = i + 2 + boxes_num\r\n return data\r\n\r\ndef _parse_filelist(root, image_dir, list_file):\r\n data = []\r\n with open(root + \"/\" + list_file, \"r\") as f:\r\n lines = f.readlines()\r\n for line in lines:\r\n path = root + \"/\" + image_dir + \"/\" + line.strip()\r\n data.append(path)\r\n return data\r\n\r\ndef init_data(root):\r\n _extract_files(root)\r\n train_data = _parse_bbx(root, \"WIDER_train/images\",\"/wider_face_split/wider_face_train_bbx_gt.txt\")\r\n val_data = _parse_bbx(root, \"WIDER_val/images\", \"/wider_face_split/wider_face_val_bbx_gt.txt\")\r\n test_data = _parse_filelist(root, \"WIDER_test/images\", \"wider_face_split/wider_face_test_filelist.txt\")\r\n dataset = (train_data, val_data, test_data)\r\n print(\"creating pickle file ...\")\r\n with open(save_file, \"wb\") as f:\r\n pickle.dump(dataset, f, -1)\r\n print(\"saved as \" + save_file) \r\n\r\ndef select(data, blur=None, expression=None, illumination=None, invalid=None, occlusion=None, pose=None, min_size=12):\r\n \"\"\"Attached the mappings between attribute names and label values.\r\n blur:\r\n clear->0\r\n normal blur->1\r\n heavy blur->2\r\n\r\n expression:\r\n typical expression->0\r\n exaggerate expression->1\r\n\r\n illumination:\r\n normal illumination->0\r\n extreme illumination->1\r\n\r\n occlusion:\r\n no occlusion->0\r\n partial occlusion->1\r\n heavy occlusion->2\r\n\r\n pose:\r\n typical pose->0\r\n atypical pose->1\r\n\r\n invalid:\r\n false->0(valid image)\r\n true->1(invalid image)\r\n\r\n The format of txt ground truth.\r\n File name\r\n Number of bounding box\r\n x1, y1, w, h, blur, expression, illumination, invalid, occlusion, pose\r\n \"\"\"\r\n result = []\r\n for sample in data:\r\n image = sample[\"image\"]\r\n bboxes = []\r\n zeros = 0\r\n for box in sample[\"boxes\"]:\r\n b = [float(s) for s in box[:4]]\r\n attributes = box[4:]\r\n requirements = [blur, expression, illumination, invalid, occlusion, pose]\r\n passed = True\r\n for i in range(len(attributes)):\r\n if requirements[i] and not (attributes[i] in requirements[i]):\r\n passed = False\r\n break\r\n if not passed:\r\n continue\r\n # NOTES:\r\n # some box' w, h is 0 (or too small), should exclude\r\n if b[2] < 1 or b[3] < 1:\r\n zeros += 1\r\n if b[2] >= min_size and b[3] >= min_size:\r\n bboxes.append(b)\r\n if len(bboxes) > 0 and len(bboxes) == len(sample[\"boxes\"]) - zeros:\r\n result.append({\"image\": image, \"boxes\": bboxes})\r\n return result\r\n\r\ndef transform(data, num_sample, crop_size, output_size, resize_rate=0.5, flip_rate=0.5):\r\n result = []\r\n index = int(np.random.rand()*len(data))\r\n while (len(result) < num_sample):\r\n index = index+1 if index < len(data)-1 else 0 \r\n sample = data[index]\r\n image = sample[\"image\"]\r\n iw, ih = Image.open(image).size\r\n for i in range(11):\r\n resize = True if np.random.rand() < resize_rate else False\r\n if resize:\r\n cw, ch = crop_size\r\n else:\r\n cw, ch = output_size\r\n if iw < cw or ih < ch:\r\n continue\r\n x = int((iw - cw)*np.random.rand())\r\n y = int((ih - ch)*np.random.rand())\r\n candidates = [b for b in sample[\"boxes\"] if x < b[0]+b[2]/2 and b[0]+b[2]/2 < x+cw and y < b[1]+b[3]/2 and b[1]+b[3]/2 < y+ch]\r\n boxes = [[b[0]-x, b[1]-y, b[2], b[3]] for b in candidates if b[0] > x and b[1] > y and b[0]+b[2] < x+cw and b[1]+b[3] < y+ch]\r\n if len(candidates) == 0 or len(candidates) != len(boxes):\r\n continue\r\n flip = True if np.random.rand() < flip_rate else False\r\n result.append({\"image\": image, \"crop\": [x, y, x+cw, y+ch], \"boxes\": boxes, \"resize\": resize, \"flip\": flip})\r\n if len(result) % 100 == 0:\r\n print(\"croped %d samples\" % len(result))\r\n break\r\n return result\r\n\r\n\r\ndef load_data(root=dataset_dir):\r\n \"\"\"WIDERFace: http://mmlab.ie.cuhk.edu.hk/projects/WIDERFace/\r\n \"\"\"\r\n assert(os.path.exists(root))\r\n if not os.path.exists(save_file):\r\n init_data(root)\r\n with open(save_file, \"rb\") as f:\r\n dataset = pickle.load(f)\r\n return dataset\r\n\r\n"
] |
[
[
"numpy.random.rand"
]
] |
BlueAnon/BD-BNN
|
[
"89b71510167333a5e83b072fe0aebac55263cc66"
] |
[
"train.py"
] |
[
"import argparse\nimport os\nimport sys\nimport random\nimport shutil\nimport time\nimport warnings\nimport logging\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.distributed as dist\nimport torch.optim\nimport torch.multiprocessing as mp\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\n\nimport datetime\n\nfrom tensorboardX import SummaryWriter\nimport numpy as np\nfrom loader import dataloader_cifar10, dataloader_cifar100, dataloader_imagenet\nfrom utils import utils\nfrom models import cifar10 as cifar_models\nfrom models import imagenet as imagenet_models\n\nfrom models.imagenet.resnet_bi_imagenet_set_2 import HardBinaryConv_react\nfrom models.imagenet.resnet_bi_imagenet_set_2_2 import HardBinaryConv\nfrom models.bin_module.binarized_modules import HardBinaryConv_cifar\nfrom kurtosis import KurtosisWeight, RidgeRegularization, WeightRegularization\nfrom functools import reduce\nfrom utils import KD_loss\n\n# import models as cifar10_models\nimport matplotlib\n# matplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nos.environ['TORCH_HOME'] = 'models'\n# writer = None\n# Logger handle\nimport torchvision.models as models\n\nTORCH_MODEL_NAMES = sorted(name for name in models.__dict__\n if name.islower() and not name.startswith(\"__\")\n and callable(models.__dict__[name]))\n\nCIFAR10_MODEL_NAMES = sorted(name for name in cifar_models.__dict__\n if name.islower() and not name.startswith(\"__\")\n and callable(cifar_models.__dict__[name]))\n\nIMAGENET_MODEL_NAMES = sorted(name for name in imagenet_models.__dict__\n if name.islower() and not name.startswith(\"__\")\n and callable(imagenet_models.__dict__[name]))\n\nCOSTUME_MODEL_NAMES = sorted(map(lambda s: s.lower(),\n set(IMAGENET_MODEL_NAMES + CIFAR10_MODEL_NAMES)))\n\nALL_MODEL_NAMES = sorted(map(lambda s: s.lower(),\n set(COSTUME_MODEL_NAMES + TORCH_MODEL_NAMES)))\n\nparser = argparse.ArgumentParser(description='PyTorch ImageNet Training')\nparser.add_argument('data', metavar='DIR',\n help='path to dataset')\nparser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',\n choices=ALL_MODEL_NAMES,\n help='model architecture: ' +\n ' | '.join(ALL_MODEL_NAMES) +\n ' (default: resnet18)')\nparser.add_argument('-j', '--workers', default=4, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\nparser.add_argument('--epochs', default=90, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('-b', '--batch-size', default=256, type=int,\n metavar='N',\n help='mini-batch size (default: 256), this is the total '\n 'batch size of all GPUs on the current node when '\n 'using Data Parallel or Distributed Data Parallel')\nparser.add_argument('-lr', '--learning-rate', default=0.1, type=float,\n metavar='LR', help='initial learning rate', dest='lr')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\nparser.add_argument('-wd', '--weight-decay', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)',\n dest='weight_decay')\nparser.add_argument('-p', '--print-freq', default=10, type=int,\n metavar='N', help='print frequency (default: 50<-10)')\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',\n help='evaluate model on validation set')\nparser.add_argument('--pretrained', dest='pretrained', action='store_true',\n help='use pre-trained model')\nparser.add_argument('--world-size', default=1, type=int,\n help='number of nodes for distributed training')\nparser.add_argument('--rank', default=0, type=int,\n help='node rank for distributed training')\nparser.add_argument('--dist-url', default=\"tcp://127.0.0.1:23456\", type=str,\n help='url used to set up distributed training')\nparser.add_argument('--master-addr', default=\"132.68.39.200\", type=str,\n help='address used to set up distributed training')\nparser.add_argument('--dist-backend', default='nccl', type=str,\n help='distributed backend')\nparser.add_argument('--seed', default=None, type=int,\n help='seed for initializing training. ')\nparser.add_argument('--gpu', default=None, type=int,\n help='GPU id to use.')\nparser.add_argument('--multiprocessing-distributed', action='store_true',\n help='Use multi-processing distributed training to launch '\n 'N processes per node, which has N GPUs. This is the '\n 'fastest way to use PyTorch for either single node or '\n 'multi node data parallel training')\n\nparser.add_argument('--log_path', default='log', type=str,\n help='log for tensorboardX.')\n\nparser.add_argument('--custom_resnet', dest='custom_resnet', action='store_true',\n help='use custom_resnet model')\nparser.add_argument('--reset_resume', dest='reset_resume', action='store_true',\n help='reset resume parameters')\nparser.add_argument('--ede', action='store_true',\n help='use ede backprop')\nparser.add_argument('--w-kurtosis-target', type=float, default=1.8,\n help='weight kurtosis value')\nparser.add_argument('--w-lambda-kurtosis', type=float, default=1.0,\n help='lambda for kurtosis regularization in the Loss')\nparser.add_argument('--w-kurtosis', action='store_true',\n help='use kurtosis for weights regularization')\nparser.add_argument('--weight-name', nargs='+', type=str,\n help='param name to add kurtosis loss')\nparser.add_argument('--remove-weight-name', nargs='+', type=str,\n help='layer name to remove from kurtosis loss')\nparser.add_argument('--kurtosis-mode', dest='kurtosis_mode', default='avg', choices=['max', 'sum', 'avg'],\n type=lambda s: s.lower(), help='kurtosis regularization mode')\nparser.add_argument('--diffkurt', action='store_true', default=False,\n help='train with different kurtosis target for each layer')\nparser.add_argument('--kurtepoch', type=int, metavar='N', default=0,\n help='train with kurtosis starting at epoch ')\nparser.add_argument('--twoblock', action='store_true', default=False,\n help='2 different type of blocks')\nparser.add_argument('--dataset', dest='dataset', default='cifar10', choices=['cifar10', 'cifar100', 'imagenet'],\n type=lambda s: s.lower(), help='dataset')\nparser.add_argument('--imagenet_setting', action='store_true',\n help='use imagenet setting_step_1')\nparser.add_argument('--imagenet_setting_step_1', action='store_true',\n help='use imagenet setting')\nparser.add_argument('--imagenet_setting_step_2', action='store_true',\n help='use imagenet setting')\nparser.add_argument('--imagenet_setting_step_2_ts', action='store_true',\n help='use imagenet setting')\nparser.add_argument('-a_teacher', '--arch_teacher', metavar='ARCH_T', default='resnet18',\n choices=ALL_MODEL_NAMES,\n help='model architecture: ' +\n ' | '.join(ALL_MODEL_NAMES) +\n ' (default: resnet18)')\nparser.add_argument('--custom_resnet_teacher', dest='custom_resnet_teacher', action='store_true',\n help='use custom_resnet model')\nparser.add_argument('--resume_teacher', default='', type=str, metavar='PATH',\n help='path to teacher (default: none)')\n\n# knoeledge distilation\nparser.add_argument('--kd', action='store_true',help='use kd')\nparser.add_argument('--react', action='store_true',help='use react training')\nparser.add_argument('--alpha', default=0.9, type=float, help='weight for KD (Hinton)')\nparser.add_argument('--temperature', default=4, type=float)\nparser.add_argument('--beta', default=200, type=float)\nparser.add_argument('--qk_dim', default=128, type=int)\nbest_acc1 = 0\nbest_epoch = 0\nmsglogger = logging.getLogger()\n\ndef main():\n args = parser.parse_args()\n if args.seed is not None:\n random.seed(args.seed)\n torch.manual_seed(args.seed)\n cudnn.deterministic = True\n warnings.warn('You have chosen to seed training. '\n 'This will turn on the CUDNN deterministic setting, '\n 'which can slow down your training considerably! '\n 'You may see unexpected behavior when restarting '\n 'from checkpoints.')\n\n # log\n datatime_str = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n args.log_path = os.path.join(args.log_path, str(args.w_kurtosis_target),datatime_str)\n\n if args.gpu is not None:\n warnings.warn('You have chosen a specific GPU. This will completely '\n 'disable data parallelism.')\n\n if args.dist_url == \"env://\" and args.world_size == -1:\n args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n args.distributed = args.world_size > 1 or args.multiprocessing_distributed\n\n ngpus_per_node = torch.cuda.device_count()\n\n if args.multiprocessing_distributed:\n # Since we have ngpus_per_node processes per node, the total world_size\n # needs to be adjusted accordingly\n args.world_size = ngpus_per_node * args.world_size\n # Use torch.multiprocessing.spawn to launch distributed processes: the\n mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))\n\n else:\n # Simply call main_worker function\n main_worker(args.gpu, ngpus_per_node, args)\n\ndef main_worker(gpu, ngpus_per_node, args):\n global best_acc1\n global best_epoch\n global writer\n # global msglogger\n writer = SummaryWriter(args.log_path)\n\n log_format = '%(asctime)s %(message)s'\n logging.basicConfig(level=logging.INFO,\n format=log_format, datefmt='%m/%d %I:%M:%S %p')\n fh = logging.FileHandler(os.path.join(args.log_path + '/log.txt'))\n fh.setFormatter(logging.Formatter(log_format))\n logging.getLogger().addHandler(fh)\n msglogger = logging.getLogger()\n args.gpu = gpu\n if args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n\n msglogger.info('log dir is:{} '.format(args.log_path))\n msglogger.info(args)\n if args.gpu is not None:\n print(\"Use GPU: {} for training\".format(args.gpu))\n\n if args.distributed:\n if args.dist_url == \"env://\" and args.rank == -1:\n args.rank = int(os.environ[\"RANK\"])\n os.environ['MASTER_PORT'] = '29500'\n if args.multiprocessing_distributed:\n # For multiprocessing distributed training, rank needs to be the\n # global rank among all the processes\n args.rank = gpu\n os.environ['MASTER_PORT'] = '2950' + str(gpu)\n os.environ['MASTER_ADDR'] = args.master_addr\n\n dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,\n world_size=args.world_size, rank=args.rank)\n if args.imagenet_setting_step_2_ts:\n if args.dataset == 'imagenet':\n if args.custom_resnet_teacher:\n model_teacher = imagenet_models.__dict__[args.arch_teacher](pretrained=True)\n else:\n model_teacher = models.__dict__[args.arch_teacher](pretrained=True)\n else:\n model_teacher = cifar_models.__dict__[args.arch_teacher]()\n model_teacher = nn.DataParallel(model_teacher, device_ids=[args.gpu]).cuda()\n if args.resume_teacher:\n if os.path.isfile(args.resume):\n msglogger.info(\"=> loading checkpoint for teacher'{}'\".format(args.resume_teacher))\n if args.gpu is None:\n checkpoint = torch.load(args.resume_teacher)\n else:\n # Map model to be loaded to specified single gpu.\n loc = 'cuda:{}'.format(args.gpu)\n checkpoint = torch.load(args.resume_teacher, map_location=loc)\n\n model_teacher.load_state_dict(checkpoint['state_dict'])\n msglogger.info(\"=> loaded checkpoint teacher'{}' (epoch {}) acc {}\"\n .format(args.resume_teacher, checkpoint['epoch'], best_acc1))\n else:\n msglogger.info(\"=> no checkpoint found at '{}'\".format(args.resume_teacher))\n\n for p in model_teacher.parameters():\n p.requires_grad = False\n model_teacher.eval()\n\n # create model\n if args.custom_resnet:\n msglogger.info(\"=> using resnet18 custom model '{}'\".format(args.arch))\n if args.dataset == 'cifar10':\n model = cifar_models.__dict__[args.arch]()\n else:\n model = imagenet_models.__dict__[args.arch](args.pretrained)\n else:\n msglogger.info(\"=> creating model '{}'\".format(args.arch))\n model = models.__dict__[args.arch](pretrained=args.pretrained)\n\n msglogger.info(model)\n\n if args.distributed:\n # For multiprocessing distributed, DistributedDataParallel constructor\n # should always set the single device scope, otherwise,\n # DistributedDataParallel will use all available devices.\n if args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n model.cuda(args.gpu)\n # When using a single GPU per process and per\n # DistributedDataParallel, we need to divide the batch size\n # ourselves based on the total number of GPUs we have\n args.batch_size = int(args.batch_size / ngpus_per_node)\n args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])\n else:\n model.cuda()\n # DistributedDataParallel will divide and allocate batch_size to all\n # available GPUs if device_ids are not set\n model = torch.nn.parallel.DistributedDataParallel(model)\n elif args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n model = torch.nn.DataParallel(model, device_ids=[args.gpu]).cuda()\n else:\n model = torch.nn.DataParallel(model).cuda()\n\n # define loss function (criterion) and optimizer\n\n criterion = nn.CrossEntropyLoss().cuda()\n optimizer = torch.optim.SGD(model.parameters(), args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epochs, eta_min=0)\n if args.dataset == 'imagenet':\n all_parameters = model.parameters()\n weight_parameters = []\n for pname, p in model.named_parameters():\n if p.ndimension() == 4 or 'conv' in pname:\n weight_parameters.append(p)\n weight_parameters_id = list(map(id, weight_parameters))\n other_parameters = list(filter(lambda p: id(p) not in weight_parameters_id, all_parameters))\n\n optimizer = torch.optim.Adam(\n [{'params': other_parameters},\n {'params': weight_parameters, 'weight_decay': args.weight_decay}],\n lr=args.lr, )\n scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lambda step: (1.0 - step / args.epochs), last_epoch=-1)\n\n\n if args.imagenet_setting_step_2_ts:\n criterion_kl = KD_loss.DistributionLoss_layer().cuda(args.gpu)\n if args.react:\n criterion_kl = None\n criterion_kl_c = KD_loss.DistributionLoss().cuda(args.gpu)\n\n # optionally resume from a checkpoint\n if args.resume:\n if os.path.isfile(args.resume):\n msglogger.info(\"=> loading checkpoint '{}'\".format(args.resume))\n if args.gpu is None:\n checkpoint = torch.load(args.resume)\n else:\n # Map model to be loaded to specified single gpu.\n loc = 'cuda:{}'.format(args.gpu)\n checkpoint = torch.load(args.resume, map_location=loc)\n if not args.reset_resume:\n args.start_epoch = checkpoint['epoch']\n best_epoch = checkpoint['epoch']\n best_acc1 = checkpoint['best_acc1']\n if args.gpu is not None:\n best_acc1 = best_acc1.to(args.gpu)\n\n model.load_state_dict(checkpoint['state_dict'])\n msglogger.info(\"=> loaded checkpoint '{}' (epoch {}) acc {}\"\n .format(args.resume, args.start_epoch, best_acc1))\n else:\n msglogger.info(\"=> no checkpoint found at '{}'\".format(args.resume))\n\n cudnn.benchmark = True\n\n # Data loading code\n if args.dataset == \"imagenet\":\n train_loader = dataloader_imagenet(split='train', batch_size=args.batch_size, data_path=args.data, distributed=False)\n val_loader = dataloader_imagenet(split='test', batch_size=args.batch_size, data_path=args.data, distributed=False)\n elif args.dataset == \"cifar10\":\n train_loader = dataloader_cifar10(split='train', batch_size=args.batch_size)\n val_loader = dataloader_cifar10(split='test', batch_size=args.batch_size)\n else:\n train_loader = dataloader_cifar100(split='train', batch_size=args.batch_size)\n val_loader = dataloader_cifar100(split='test', batch_size=args.batch_size)\n\n\n if args.evaluate:\n acc = validate(val_loader, model, criterion, args, 0)\n msglogger.info('test acc : {}'.format(acc))\n return\n\n # Kurtosis regularization on weights tensors\n weight_to_hook = {}\n if args.w_kurtosis:\n if args.weight_name[0] == 'all':\n all_convs = [n + '.weight' for n, m in model.named_modules() if\n (isinstance(m, nn.Conv2d) or isinstance(m,HardBinaryConv_react) or isinstance(m,HardBinaryConv) or isinstance(m,HardBinaryConv_cifar))]\n weight_name = all_convs[1:]\n if args.remove_weight_name:\n for name in weight_name:\n if args.remove_weight_name[0] in name:\n weight_name.remove(name)\n msglogger.info(\"weight_name : {}, remove {}\".format(weight_name, args.remove_weight_name))\n else:\n weight_name = args.weight_name\n for name in weight_name:\n curr_param = utils.find_weight_tensor_by_name(model, name)\n if curr_param is None:\n name = name.replace(\"weight\", 'float_weight') # QAT name\n curr_param = utils.find_weight_tensor_by_name(model, name)\n weight_to_hook[name] = curr_param\n\n for epoch in range(args.start_epoch, args.epochs):\n if args.ede:\n # * compute t/k in back-propagation\n t, k = utils.cpt_tk(epoch,args.epochs)\n for name, module in model.named_modules():\n if isinstance(module, nn.Conv2d) :\n module.k = k.cuda()\n module.t = t.cuda()\n\n if args.imagenet_setting_step_2_ts:\n train_teacher_student(train_loader, model, model_teacher, criterion, criterion_kl, criterion_kl_c, optimizer, epoch, args, weight_to_hook)\n else:\n train(train_loader, model, criterion, optimizer, epoch, args, weight_to_hook)\n\n acc1 = validate(val_loader, model, criterion, args, epoch)\n scheduler.step()\n msglogger.info('----LR----- {}'.format(scheduler.get_lr()))\n is_best = acc1 > best_acc1\n if is_best:\n best_epoch = epoch\n best_acc1 = max(acc1, best_acc1)\n writer.add_scalar(\"Best val Acc1\", best_acc1, epoch)\n msglogger.info(' ***** Best acc is Acc@1 {}, epoch {}, log {}'.format(best_acc1, best_epoch, args.log_path))\n if not args.multiprocessing_distributed or (args.multiprocessing_distributed\n and args.rank % ngpus_per_node == 0):\n utils.save_checkpoint({\n 'epoch': epoch + 1,\n 'arch': args.arch,\n 'state_dict': model.state_dict(),\n 'best_acc1': best_acc1,\n 'optimizer' : optimizer.state_dict(),\n }, is_best, save_path=args.log_path)\n\ndef train(train_loader, model, criterion, optimizer, epoch, args, weight_to_hook=None):\n batch_time = utils.AverageMeter('Time', ':6.3f')\n data_time = utils.AverageMeter('Data', ':6.3f')\n losses = utils.AverageMeter('Loss', ':.4e')\n losses_kurt = utils.AverageMeter('Loss_kurt', ':.4e')\n losses_ce = utils.AverageMeter('Loss_ce', ':.4e')\n top1 = utils.AverageMeter('Acc@1', ':6.2f')\n top5 = utils.AverageMeter('Acc@5', ':6.2f')\n\n progress = utils.ProgressMeter(\n len(train_loader),\n [batch_time, data_time, losses, losses_kurt, losses_ce,top1, top5],\n msglogger, prefix=\"Epoch: [{}]\".format(epoch))\n\n model.train()\n end = time.time()\n for i, (images, target) in enumerate(train_loader):\n # measure data loading time\n data_time.update(time.time() - end)\n ############ kurt ###########\n hookF_weights = {}\n hookF_weights_L2 = {}\n hookF_weights_wr = {}\n idx = 0\n if args.diffkurt:\n if args.dataset == 'imagenet':\n args.w_kurtosis_target = [1.8, 1.4, 1.4, 1.4,\n 1.4, 1.2, 1.4, 1.2, 1.2,\n 1.4, 1.4, 1.4, 1.2, 1.2,\n 1.2, 1.2, 1.4, 1, 1]\n else:\n args.w_kurtosis_target = [1.4, 1.4, 1.4, 1.4,\n 1.4, 1.4, 1.4, 1.4, 1.4,\n 1.4, 1.4, 1.4, 1.4, 1.4,\n 1.8, 1.8, 1.8, 1.8, 2.2]\n else:\n args.w_kurtosis_target = [args.w_kurtosis_target] * len(weight_to_hook)\n for name, w_tensor in weight_to_hook.items():\n hookF_weights[name] = KurtosisWeight(w_tensor, name, kurtosis_target=args.w_kurtosis_target[idx], k_mode=args.kurtosis_mode)\n if args.w_l2_reg:\n hookF_weights_L2[name] = RidgeRegularization(w_tensor, name)\n if args.w_wr_reg:\n hookF_weights_wr[name] = WeightRegularization(w_tensor, name)\n idx=idx+1\n ############ kurt ###########\n\n if args.gpu is not None:\n images = images.cuda(args.gpu, non_blocking=True)\n target = target.cuda(args.gpu, non_blocking=True)\n\n images = images.cuda()\n output = model(images)\n orig_loss = criterion(output, target)\n ############ kurt loss ###########\n w_k_scale = 0\n w_kurtosis_regularization = 0\n if args.w_kurtosis and args.kurtepoch <= epoch:\n w_temp_values = []\n w_temp_kld_values = []\n w_kurtosis_loss = 0\n for w_kurt_inst in hookF_weights.values():\n w_kurt_inst.fn_regularization()\n w_temp_values.append(w_kurt_inst.kurtosis_loss)\n w_temp_kld_values.append(w_kurt_inst.KLDiv_loss)\n if args.kurtosis_mode == 'sum':\n w_kurtosis_loss = reduce((lambda a, b: a + b), w_temp_values)\n elif args.kurtosis_mode == 'avg':\n w_kurtosis_loss = reduce((lambda a, b: a + b), w_temp_values)\n w_kurtosis_loss = w_kurtosis_loss / len(weight_to_hook)\n elif args.kurtosis_mode == 'max':\n w_kurtosis_loss = reduce((lambda a, b: max(a, b)), w_temp_values)\n w_kurtosis_regularization = (10 ** w_k_scale) * args.w_lambda_kurtosis * w_kurtosis_loss\n ############ kurt loss ###########\n\n loss = orig_loss + w_kurtosis_regularization\n\n # measure accuracy and record loss\n acc1, acc5 = utils.accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), images.size(0))\n if args.w_kurtosis:\n losses_kurt.update(w_kurtosis_regularization.item(), images.size(0))\n losses_ce.update(orig_loss.item(), images.size(0))\n top1.update(acc1[0], images.size(0))\n top5.update(acc5[0], images.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n progress.display(i)\n # msglogger.info(progress.display(i))\n remain_epoch = args.epochs - epoch\n remain_iters = remain_epoch * len(train_loader) + (len(train_loader) - i)\n remain_seconds = remain_iters * batch_time.get_avg()\n seconds = (remain_seconds//1) % 60\n minutes = (remain_seconds//(1*60)) % 60\n hours = (remain_seconds//(1*60*60)) % 24\n days = (remain_seconds//(1*60*60*24))\n time_stamp = \"\"\n if (days > 0): time_stamp += \"{} days, \".format(days)\n if (hours > 0) : time_stamp += \"{} hr, \".format(hours)\n if (minutes > 0) : time_stamp += \"{} min, \".format(minutes)\n if (seconds > 0) : time_stamp += \"{} sec, \".format(seconds) \n msglogger.info(\">>>>>>>>>>>> Remaining Times: {} <<<<<<<<<<<<<<<<<\".format(time_stamp) )\n\n writer.add_scalar(\"Train Loss\", loss.item(), epoch)\n writer.add_scalar(\"Train Acc1\", top1.avg, epoch)\n writer.add_scalar(\"Train Acc5\", top5.avg, epoch)\n\ndef train_teacher_student(train_loader, model_stud, model_teacher, criterion, criterion_kl, criterion_kl_c, optimizer, epoch, args,weight_to_hook=None):\n batch_time = utils.AverageMeter('Time', ':6.3f')\n data_time = utils.AverageMeter('Data', ':6.3f')\n losses = utils.AverageMeter('Loss', ':.4e')\n losses_kl = utils.AverageMeter('Loss_kl', ':.4e')\n losses_kl_c = utils.AverageMeter('Loss_kl_c', ':.4e')\n losses_kurt = utils.AverageMeter('Loss_kurt', ':.4e')\n losses_ce = utils.AverageMeter('Loss_ce', ':.4e')\n top1 = utils.AverageMeter('Acc@1', ':6.2f')\n top5 = utils.AverageMeter('Acc@5', ':6.2f')\n progress = utils.ProgressMeter(\n len(train_loader),\n [batch_time, data_time, losses, losses_kl, losses_kl_c, losses_kurt, losses_ce, top1, top5],\n msglogger, prefix=\"Epoch: [{}]\".format(epoch))\n\n model_stud.train()\n end = time.time()\n for i, (images, target) in enumerate(train_loader):\n data_time.update(time.time() - end)\n if args.gpu is not None:\n images = images.cuda(args.gpu, non_blocking=True)\n target = target.cuda(args.gpu, non_blocking=True)\n images = images.cuda()\n\n ############ kurt ###########\n hookF_weights = {}\n hookF_weights_L2 = {}\n hookF_weights_wr = {}\n idx = 0\n if args.diffkurt:\n args.w_kurtosis_target = [1.8, 1.8, 1.8, 1.8,\n 1.8, 1.8, 1.4, 1.8, 1.8,\n 1.8, 1.4, 1.4, 1.4, 1.4,\n 1.8, 1.2, 1.4, 1.2, 1.2]\n else:\n args.w_kurtosis_target = [args.w_kurtosis_target] * len(weight_to_hook)\n for name, w_tensor in weight_to_hook.items():\n hookF_weights[name] = KurtosisWeight(w_tensor, name, kurtosis_target=args.w_kurtosis_target[idx],\n k_mode=args.kurtosis_mode)\n if args.w_l2_reg:\n hookF_weights_L2[name] = RidgeRegularization(w_tensor, name)\n if args.w_wr_reg:\n hookF_weights_wr[name] = WeightRegularization(w_tensor, name)\n idx = idx + 1\n ############ kurt ###########\n # compute output\n output_stud = model_stud(images)\n output_teacher = model_teacher(images)\n ############ KL div loss ###########\n if args.react:\n args.alpha = args.alpha\n args.beta = 0\n args.w_lambda_ce = 0\n loss_kl = 0\n else:\n loss_kl = criterion_kl(output_stud, output_teacher,model_stud,model_teacher,args.temperature) * args.beta\n loss_kl_c = criterion_kl_c(output_stud, output_teacher) * args.alpha\n ############ cross entrophy loss ###########\n orig_loss = criterion(output_stud, target) * args.w_lambda_ce\n ############ kurt loss ###########\n w_k_scale = 0\n w_kurtosis_regularization = 0\n if args.w_kurtosis and args.kurtepoch <= epoch:\n w_temp_values = []\n w_temp_kld_values = []\n w_kurtosis_loss = 0\n for w_kurt_inst in hookF_weights.values():\n w_kurt_inst.fn_regularization()\n w_temp_values.append(w_kurt_inst.kurtosis_loss)\n w_temp_kld_values.append(w_kurt_inst.KLDiv_loss)\n if args.kurtosis_mode == 'sum':\n w_kurtosis_loss = reduce((lambda a, b: a + b), w_temp_values)\n elif args.kurtosis_mode == 'avg':\n w_kurtosis_loss = reduce((lambda a, b: a + b), w_temp_values)\n w_kurtosis_loss = w_kurtosis_loss / len(weight_to_hook)\n elif args.kurtosis_mode == 'max':\n w_kurtosis_loss = reduce((lambda a, b: max(a, b)), w_temp_values)\n w_kurtosis_regularization = (10 ** w_k_scale) * args.w_lambda_kurtosis * w_kurtosis_loss\n ############ kurt loss ###########\n\n loss = (loss_kl) +loss_kl_c + orig_loss + ( w_kurtosis_regularization)\n\n acc1, acc5 = utils.accuracy(output_stud, target, topk=(1, 5))\n losses.update(loss.item(), images.size(0))\n losses_ce.update(orig_loss.item(), images.size(0))\n if args.w_kurtosis:\n losses_kurt.update(w_kurtosis_regularization.item(), images.size(0))\n if not args.react:\n losses_kl.update(loss_kl.item(), images.size(0))\n losses_kl_c.update(loss_kl_c.item(), images.size(0))\n top1.update(acc1[0], images.size(0))\n top5.update(acc5[0], images.size(0))\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n progress.display(i)\n remain_epoch = args.epochs - epoch\n remain_iters = remain_epoch * len(train_loader) + (len(train_loader) - i)\n remain_seconds = remain_iters * batch_time.get_avg()\n seconds = (remain_seconds//1) % 60\n minutes = (remain_seconds//(1*60)) % 60\n hours = (remain_seconds//(1*60*60)) % 24\n days = (remain_seconds//(1*60*60*24))\n time_stamp = \"\"\n if (days > 0): time_stamp += \"{} days, \".format(days)\n if (hours > 0) : time_stamp += \"{} hr, \".format(hours)\n if (minutes > 0) : time_stamp += \"{} min, \".format(minutes)\n if (seconds > 0) : time_stamp += \"{} sec, \".format(seconds)\n msglogger.info(\">>>>>>>>>>>> Remaining Times: {} <<<<<<<<<<<<<<<<<\".format(time_stamp) )\n\n\n writer.add_scalar(\"Train Loss\", loss.item(), epoch)\n writer.add_scalar(\"Train Acc1\", top1.avg, epoch)\n writer.add_scalar(\"Train Acc5\", top5.avg, epoch)\n\ndef validate(val_loader, model, criterion, args, epoch):\n batch_time = utils.AverageMeter('Time', ':6.3f')\n losses = utils.AverageMeter('Loss', ':.4e')\n top1 = utils.AverageMeter('Acc@1', ':6.2f')\n top5 = utils.AverageMeter('Acc@5', ':6.2f')\n progress = utils.ProgressMeter(\n len(val_loader),\n [batch_time, losses, top1, top5],\n msglogger, prefix='Test: ')\n\n model.eval().cuda()\n \n with torch.no_grad():\n end = time.time()\n for i, (images, target) in enumerate(val_loader):\n if args.gpu is not None:\n images = images.cuda(args.gpu, non_blocking=True)\n target = target.cuda(args.gpu, non_blocking=True)\n\n output = model(images)\n loss = criterion(output, target)\n\n acc1, acc5 = utils.accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), images.size(0))\n top1.update(acc1[0], images.size(0))\n top5.update(acc5[0], images.size(0))\n\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n progress.display(i)\n\n writer.add_scalar(\"Val Loss\", loss.item(), epoch)\n writer.add_scalar(\"Val Acc1\", top1.avg, epoch)\n writer.add_scalar(\"Val Acc5\", top5.avg, epoch)\n\n return top1.avg\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.distributed.init_process_group",
"torch.no_grad",
"torch.optim.Adam",
"torch.multiprocessing.spawn",
"torch.nn.parallel.DistributedDataParallel",
"torch.cuda.device_count",
"torch.manual_seed",
"torch.cuda.set_device",
"torch.load",
"torch.optim.lr_scheduler.LambdaLR",
"torch.nn.CrossEntropyLoss",
"torch.nn.DataParallel"
]
] |
lucaspettit/GifTheRipper
|
[
"c60ba5872cd34f149d21aaf7a785edb15cdc5a6e"
] |
[
"silverDataScrub.py"
] |
[
"# Silver Data Scrub\n#\n# 1. Queries .GIF files from <src>\n# 2. Converts .GIF to set of JPG's\n# 3. Saves JPG's to <dest>\n\nimport cv2\nfrom datetime import datetime\nimport imageio\nimport os\nimport numpy as np\nfrom random import randint\nimport json\n\n# face detection\nimport mxnet as mx\nfrom MTCNN.mtcnn_detector import MtcnnDetector\n\n\ndef squarify(bb):\n L, T, R, B = bb\n w, h = R - L, B - T\n size = max(w, h)\n L = int(L - ((size - w) / 2))\n T = int(T - ((size - h) / 2))\n R = int(L + size)\n B = int(T + size)\n return L, T, R, B\n\n\ndef resizeRect(rect, targetDim):\n if targetDim <= 0:\n raise ValueError('resizeRect: parameter \"targetDim\" <= 0')\n l, t, r, b = rect\n cw, ch = r - l, b - t\n\n dw, dh = targetDim - cw, targetDim - ch\n\n l = int(l - (dw / 2))\n t = int(t - (dh / 2))\n\n return l, t, l + targetDim, t + targetDim\n\n\ndef resize_img(img, max_dim=1024):\n h, w = img.shape[:2]\n if w > h:\n x = max_dim / w\n else:\n x = max_dim / h\n\n _w = int(w * x)\n _h = int(h * x)\n\n img = cv2.resize(img, (_w, _h), interpolation=cv2.INTER_CUBIC)\n\n return img\n\n\ndef normifyBoundingBoxes(bbs):\n size = max([B - T for _, T, _, B in bbs])\n return [resizeRect(bb, size) for bb in bbs]\n\n\ndef loadFrames(paths):\n gifs = []\n for gifpath in paths:\n gif = []\n for path in gifpath:\n gif.append(cv2.imread(path))\n gifs.append(gif)\n return gifs\n\n\ndef markPeopleOnFrames(frames, data):\n peopleColors = {}\n for frameNum, people in data:\n frame = frames[frameNum]\n\n for person in people:\n if person['id'] in peopleColors:\n color = peopleColors[person['id']]\n else:\n color = randint(0, 255), randint(0, 255), randint(0, 255)\n peopleColors[person['id']] = color\n\n (l, t, r, b) = person['boundingbox']\n frame = cv2.rectangle(frame, (l, t), (r, b), color, 3)\n\n return frames\n\n\ndef detectFaces(detector, frames, min_face_size=32):\n frames_bb = []\n landmarks = []\n fh, fw = frames[0].shape[0], frames[0].shape[1]\n\n for frame in frames:\n res = detector.detect_face(frame)\n if res is not None:\n bbs = []\n lmds = []\n boxes = res[0]\n points = res[1]\n for b, p in zip(boxes, points):\n # prep bounding boxes (they need to be constrained t within the bounds of the image\n L, T, R, B = b[:-1]\n L = max(0, L)\n T = max(0, T)\n R = min(fw, R)\n B = min(fh, B)\n\n if (R - L) >= min_face_size and (B - T) >= min_face_size:\n bbs.append(squarify((L, T, R, B)))\n\n try:\n right_eye = p[0], p[5]\n left_eye = p[1], p[6]\n center = int(right_eye[0] + ((left_eye[0] - right_eye[0]) / 2.0)), \\\n int(right_eye[1] + ((left_eye[1] - right_eye[1]) / 2.0))\n # load landmarks\n lmd = {\n 'right_eye': (p[0], p[5]),\n 'left_eye': (p[1], p[6]),\n 'center_eyes': center,\n 'nose': (p[2], p[7]),\n 'right_mouth': (p[3], p[8]),\n 'left_mouth': (p[4], p[9])\n }\n\n lmds.append(lmd)\n except Exception as e:\n raise Exception('detectFace failed to extract landmarks: %s' % str(p))\n\n frames_bb.append(bbs)\n landmarks.append(lmds)\n\n else:\n frames_bb.append([])\n landmarks.append([])\n return frames_bb, landmarks\n\n\ndef playClip(frames, delay=100):\n cv2.namedWindow('display')\n for frame in frames:\n cv2.imshow('display', frame)\n cv2.waitKey(delay)\n cv2.destroyWindow('display')\n\n\ndef compare_bb(prev, curr):\n threshold = 0.2 * (prev[2] - prev[0])\n\n for p, c in zip(prev, curr):\n if abs(p - c) > threshold:\n return False\n return True\n\n\ndef trackFaces(frames_bb, frames_lmks, min_series=5):\n people = {}\n prev_bbs = []\n new_person_id = 0\n\n for frameId, (frame_bb, frame_lmk) in enumerate(zip(frames_bb, frames_lmks)):\n tmp_bbs = []\n for curr_bb, curr_lmk in zip(frame_bb, frame_lmk):\n # check for known issues\n W = curr_bb[2] - curr_bb[0]\n H = curr_bb[3] - curr_bb[1]\n if W != H:\n raise ValueError('SlitError: FrameId = %d' % frameId)\n if W == 0 or H == 0:\n raise ValueError('NoBytesError: FrameId = %d' % frameId)\n if W < 16 or H < 16:\n continue\n\n identified = False\n for prev, _id in prev_bbs:\n if compare_bb(prev, curr_bb):\n people[_id]['boundingbox'].append(curr_bb)\n people[_id]['landmark'].append(curr_lmk)\n people[_id]['frameNum'].append(frameId)\n tmp_bbs.append((curr_bb, _id))\n identified = True\n\n if not identified:\n people[new_person_id] = {\n 'boundingbox': [curr_bb],\n 'landmark': [curr_lmk],\n 'frameNum': [frameId]\n }\n tmp_bbs.append((curr_bb, new_person_id))\n new_person_id += 1\n identified = True\n\n prev_bbs = list(tmp_bbs)\n\n # prune off people with out enough data in their series\n people = {_id: data for _id, data in people.items() if len(data['boundingbox']) >= min_series}\n\n frame_data = {}\n for personId, data in people.items():\n frameNums = data['frameNum']\n bbs = data['boundingbox']\n lmks = data['landmark']\n for bb, lmk, frameNum in zip(bbs, lmks, frameNums):\n if frameNum in frame_data:\n frame_data[frameNum].append({\n 'id': personId,\n 'boundingbox': bb,\n 'landmark': lmk\n })\n else:\n frame_data[frameNum] = [{\n 'id': personId,\n 'boundingbox': bb,\n 'landmark': lmk\n }]\n frame_data = [(frameNum, data) for frameNum, data in frame_data.items()]\n sorted(frame_data, key=lambda x: x[0])\n return frame_data\n\n\ndef extractFrames(reader):\n # some gif's only update pixels that change. to check for this, i'm looking at the alpha channel\n # to see if there is an alpha score less than the max (255)\n hasAlphaUpdate = np.min(np.asarray(reader[1])[:, :, -1]) < 255\n frames = []\n\n if not hasAlphaUpdate:\n for frameId, frame in enumerate(reader):\n frame = np.asarray(frame[:, :, :3], dtype=np.uint8)\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frames.append(frame)\n else:\n frame = np.asarray(reader[0][:, :, :3], dtype=np.uint8)\n frames.append(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n\n prev = np.asarray(frame)\n for frameId, frame in zip([i + i for i in range(len(reader) - 1)], reader[1:]):\n colors = np.asarray(frame[:, :, :3], dtype=np.uint8)\n alphas = np.asarray(frame[:, :, 3], dtype=np.uint8)\n prev[alphas > 0] = 0\n curr = prev + colors\n frames.append(cv2.cvtColor(curr, cv2.COLOR_BGR2RGB))\n prev = curr\n\n return frames, hasAlphaUpdate\n\n\ndef cropBoundingBox(bb, frame):\n frameH, frameW = frame.shape[0], frame.shape[1]\n if len(frame.shape) == 3:\n channels = frame.shape[2]\n elif len(frame.shape) == 2:\n channels = 1\n else:\n raise ValueError('Unrecognized frame.shape: frame.shape = ' + str(frame.shape))\n\n left, top, right, bottom = bb\n width, height = right - left, bottom - top\n snip = np.zeros(width * height * channels, dtype=np.uint8).reshape((width, height, channels))\n\n capture_left = max(left, 0)\n capture_top = max(top, 0)\n capture_right = min(right, frameW)\n capture_bottom = min(bottom, frameH)\n\n copy_left = -left if left < 0 else 0\n copy_top = -top if top < 0 else 0\n copy_right = capture_right - capture_left + copy_left\n copy_bottom = capture_bottom - capture_top + copy_top\n\n snip[copy_top:copy_bottom, copy_left:copy_right] = frame[capture_top:capture_bottom, capture_left:capture_right]\n return snip\n\n\ndef run(src, dest, process_name='silverDataScrub', resource_dir='', gpu=False):\n if not os.path.isdir(src):\n raise (NotADirectoryError('\"%s\" is not a directory' % src))\n\n if not os.path.isdir(dest):\n print('destination folder not found. creating one.')\n os.makedirs(dest)\n\n logdir = os.path.join(resource_dir, 'log')\n resdir = os.path.join(resource_dir, 'res')\n configdir = os.path.join(resource_dir, 'config')\n\n # build detector\n print('loading MTCNN model')\n detector = MtcnnDetector(model_folder=os.path.join('MTCNN', 'model'),\n ctx=mx.gpu(0) if gpu else mx.cpu(0),\n num_worker=4,\n accurate_landmark=True)\n print('done!')\n\n # get the list of files\n files = []\n VIDEO_EXTENSIONS = ('.gif',)\n STILL_EXTENSIONS = ('.jpg', '.png', '.jpeg', '.bmp', '.tiff')\n VALID_EXTENSIONS = ('.gif', '.jpg', '.png', '.jpeg', '.bmp', '.tiff')\n for f in os.listdir(src):\n ext = os.path.splitext(f)[-1].lower()\n if ext in VALID_EXTENSIONS:\n files.append(os.path.join(src, f))\n try:\n files = sorted(files, key=lambda x: int(os.path.splitext(os.path.basename(x))[0]))\n except Exception as e:\n pass\n\n # convert and save all gifs as jpgs\n errors = []\n alphaCount = 0\n gif_with_face = 0\n\n # build paths\n for x in (logdir, resdir, configdir):\n if not os.path.isdir(x):\n print('directory %s not found. creating one.' % x)\n os.makedirs(x)\n\n logfile = os.path.join(logdir, 'Log-Detect-%s.txt' % process_name)\n configfile = os.path.join(configdir, 'Config-Detect-%s.json' % process_name)\n\n print('loading config file')\n if os.path.isfile(configfile):\n with open(configfile) as f:\n config = json.load(f)\n else:\n config = {'startIndex': 0}\n startIndex = config['startIndex']\n print('done!')\n print('start index = %d' % startIndex)\n\n # initial log write\n if not os.path.isfile(logfile):\n with open(logfile, 'w') as f:\n f.write('Starting silver scrub\\n')\n f.write('%d files found\\n' % len(files))\n else:\n with open(logfile, 'a+') as f:\n f.write('Starting silver scrub\\n')\n f.write('%d files found\\n' % len(files))\n\n print('Starting silver scrub')\n print('%d files found' % len(files))\n\n batch_size = 80 if gpu else 10\n SERIES_MIN_FRAMES = 5\n STILL_MIN_FRAMES = 1\n MAX_IMG_SIZE = 1024\n\n for fileNum, f in enumerate(files[startIndex:]):\n #print('running batch %d' % fileNum)\n\n # dump log\n if (fileNum + 1) % batch_size == 0:\n logstr = '----------------------\\n'\n logstr += 'file # : %d-%d\\n' % (fileNum + startIndex - batch_size + 1,\n fileNum + startIndex + 1)\n logstr += 'imgs w/faces: %d\\n' % gif_with_face\n logstr += 'fail : %d\\n' % len(errors)\n logstr += 'num alpha : %d\\n' % alphaCount\n logstr += 'time : %s\\n' % datetime.now().strftime('%m/%d %H:%M:%S')\n\n if len(errors) > 0:\n logstr += 'Failed files:\\n'\n for filename, errorMessage in errors:\n logstr += '\\tfilename: %s\\n\\terror : %s\\n' % (filename, errorMessage)\n\n # print log data\n print(logstr)\n\n # write log file && reset vars\n try:\n with open(logfile, 'a+') as logger:\n logger.write(logstr)\n except FileNotFoundError as e:\n logfile_splitlist = os.path.basename(logfile).split('(')\n if len(logfile_splitlist) == 1:\n logcount = 1\n else:\n logcount = int(logfile_splitlist[-1].split(')')[0]) + 1\n logfile_splitlist = logfile.split('.')\n logfile = '%s(%d).%s' % (logfile_splitlist[0], logcount, logfile_splitlist[1])\n\n with open(logfile, 'w') as logger:\n logger.write(logstr)\n\n errors = []\n gif_with_face = 0\n alphaCount = 0\n logstr = ''\n\n # write config file\n config['startIndex'] = fileNum + startIndex\n with open(configfile, 'w') as configger:\n json.dump(config, configger)\n\n file_name, file_ext = os.path.splitext(os.path.basename(f))\n\n # scrub process starts here\n try:\n if file_ext in VIDEO_EXTENSIONS:\n # print(' reading video %s' % file_name)\n reader = imageio.mimread(uri=f, memtest=False)\n\n # print(' extracting frames')\n frames, hasAlphaUpdate = extractFrames(reader)\n frame_shape = list(reader.shape[:3])\n del reader\n is_series = True\n if hasAlphaUpdate:\n alphaCount += 1\n elif file_ext in STILL_EXTENSIONS:\n # print(' reading still %s' % file_name)\n img = cv2.imread(f)\n # skip if image is None.\n # this happens when OpenCV cannot read a file.\n if img is None:\n continue\n\n if max(img.shape[:2]) > MAX_IMG_SIZE:\n img = resize_img(img, max_dim=MAX_IMG_SIZE)\n frames = [img]\n hasAlphaUpdate = False\n is_series = False\n frame_shape = list(img.shape) if len(img.shape) >= 3 else list(img.shape) + [1]\n # print(' %d frames' % len(frames))\n else:\n continue\n\n # detect faces\n # print(' detecting faces')\n bbs, lmds = detectFaces(detector, frames)\n if len(bbs) == 0:\n continue\n\n gif_with_face += 1\n # print(' tracking faces')\n people = trackFaces(bbs, lmds, min_series=SERIES_MIN_FRAMES if is_series else STILL_MIN_FRAMES)\n\n # dump people data\n if len(people) > 0:\n # convert all the int32 to int\n # print(' dumping people JSON data')\n _frames = []\n for frameId, p in people:\n faces = []\n for face in p:\n faces.append(\n {\n 'id': int(face['id']),\n 'boundingbox': [int(face['boundingbox'][i]) for i in range(4)],\n 'landmark': {'right_eye': [int(face['landmark']['right_eye'][i]) for i in range(2)],\n 'left_eye': [int(face['landmark']['left_eye'][i]) for i in range(2)],\n 'center_eyes': [int(face['landmark']['center_eyes'][i]) for i in range(2)],\n 'right_mouth': [int(face['landmark']['right_mouth'][i]) for i in range(2)],\n 'left_mouth': [int(face['landmark']['left_mouth'][i]) for i in range(2)],\n 'nose': [int(face['landmark']['nose'][i]) for i in range(2)]\n }\n })\n _frames.append((frameId, faces))\n _people = {\n 'filename': file_name,\n 'filetype': file_ext,\n 'frames': _frames,\n 'shape': list(frame_shape)\n }\n\n with open(os.path.join(dest, '%s.json' % file_name), 'w') as people_file:\n json.dump(_people, people_file, indent=2)\n del _people\n\n del frames, people, bbs, lmds\n\n except Exception as e:\n error_msg = 'Exception occured: %s\\n' % str(e)\n error_msg += (' - FileName = %s' % file_name)\n error_msg += (' - FileExt = %s' % file_ext)\n error_msg += (' - Error = %s' % str(e))\n errors.append((file_name, error_msg))\n\n\nif __name__ == '__main__':\n from argparse import ArgumentParser\n\n parser = ArgumentParser()\n parser.add_argument('--src', required=True, type=str, help='Source folder with Images/GIFs/Videos')\n parser.add_argument('--dest', required=True, type=str, help='Destination folder for .JSON objects to be saved')\n parser.add_argument('--pname', required=False, type=str, default='silverDataScrub', help='Process name for resource files')\n parser.add_argument('--resdir', required=False, type=str, default='', help='Directory to store resource files')\n parser.add_argument('--gpu', required=False, type=str, default=False, help='Flag to denote GPU support (True for GPU, False for CPU)')\n args = parser.parse_args()\n\n run(src=args.src, dest=args.dest, process_name=args.pname, resource_dir=args.resdir, gpu=args.gpu)"
] |
[
[
"numpy.asarray",
"numpy.zeros"
]
] |
miaobin/DirectML
|
[
"d4657006a60a7b7d9baf17638c42aee27258c836"
] |
[
"PyTorch/yolov3/utils/torch_utils.py"
] |
[
"# YOLOv3 PyTorch utils\n\nimport datetime\nimport logging\nimport math\nimport os\nimport platform\nimport subprocess\nimport time\nfrom contextlib import contextmanager\nfrom copy import deepcopy\nfrom pathlib import Path\n\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision\n\ntry:\n import thop # for FLOPS computation\nexcept ImportError:\n thop = None\nlogger = logging.getLogger(__name__)\n\n\n@contextmanager\ndef torch_distributed_zero_first(local_rank: int):\n \"\"\"\n Decorator to make all processes in distributed training wait for each local_master to do something.\n \"\"\"\n if local_rank not in [-1, 0]:\n torch.distributed.barrier()\n yield\n if local_rank == 0:\n torch.distributed.barrier()\n\n\ndef init_torch_seeds(seed=0):\n # Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html\n torch.manual_seed(seed)\n if seed == 0: # slower, more reproducible\n cudnn.benchmark, cudnn.deterministic = False, True\n else: # faster, less reproducible\n cudnn.benchmark, cudnn.deterministic = True, False\n\n\ndef date_modified(path=__file__):\n # return human-readable file modification date, i.e. '2021-3-26'\n t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime)\n return f'{t.year}-{t.month}-{t.day}'\n\n\ndef git_describe(path=Path(__file__).parent): # path must be a directory\n # return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe\n s = f'git -C {path} describe --tags --long --always'\n try:\n return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1]\n except subprocess.CalledProcessError as e:\n return '' # not a git repository\n\n\ndef select_device(device='', batch_size=None):\n # device = 'cpu' or '0' or '0,1,2,3'\n if (device.lower() == 'dml'):\n return torch.device('dml')\n s = f'YOLOv3 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string\n cpu = device.lower() == 'cpu'\n if cpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False\n elif device: # non-cpu device requested\n # os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable\n assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability\n\n cuda = not cpu and torch.cuda.is_available()\n if cuda:\n devices = device.split(',') if device else range(torch.cuda.device_count()) # i.e. 0,1,6,7\n n = len(devices) # device count\n if n > 1 and batch_size: # check batch_size is divisible by device_count\n assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'\n space = ' ' * len(s)\n for i, d in enumerate(devices):\n p = torch.cuda.get_device_properties(i)\n s += f\"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\\n\" # bytes to MB\n else:\n s += 'CPU\\n'\n\n logger.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe\n return torch.device('cuda:0' if cuda else 'cpu')\n\n\ndef time_synchronized():\n # pytorch-accurate time\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n return time.time()\n\n\ndef profile(x, ops, n=100, device=None):\n # profile a pytorch module or list of modules. Example usage:\n # x = torch.randn(16, 3, 640, 640) # input\n # m1 = lambda x: x * torch.sigmoid(x)\n # m2 = nn.SiLU()\n # profile(x, [m1, m2], n=100) # profile speed over 100 iterations\n\n device = device or torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n x = x.to(device)\n x.requires_grad = True\n print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '')\n print(f\"\\n{'Params':>12s}{'GFLOPS':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}\")\n for m in ops if isinstance(ops, list) else [ops]:\n m = m.to(device) if hasattr(m, 'to') else m # device\n m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type\n dtf, dtb, t = 0., 0., [0., 0., 0.] # dt forward, backward\n try:\n flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPS\n except:\n flops = 0\n\n for _ in range(n):\n t[0] = time_synchronized()\n y = m(x)\n t[1] = time_synchronized()\n try:\n _ = y.sum().backward()\n t[2] = time_synchronized()\n except: # no backward method\n t[2] = float('nan')\n dtf += (t[1] - t[0]) * 1000 / n # ms per op forward\n dtb += (t[2] - t[1]) * 1000 / n # ms per op backward\n\n s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list'\n s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list'\n p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters\n print(f'{p:12}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}')\n\n\ndef is_parallel(model):\n # Returns True if model is of type DP or DDP\n return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)\n\n\ndef de_parallel(model):\n # De-parallelize a model: returns single-GPU model if model is of type DP or DDP\n return model.module if is_parallel(model) else model\n\n\ndef intersect_dicts(da, db, exclude=()):\n # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values\n return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape}\n\n\ndef initialize_weights(model):\n for m in model.modules():\n t = type(m)\n if t is nn.Conv2d:\n pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif t is nn.BatchNorm2d:\n m.eps = 1e-3\n m.momentum = 0.03\n elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]:\n m.inplace = True\n\n\ndef find_modules(model, mclass=nn.Conv2d):\n # Finds layer indices matching module class 'mclass'\n return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)]\n\n\ndef sparsity(model):\n # Return global model sparsity\n a, b = 0., 0.\n for p in model.parameters():\n a += p.numel()\n b += (p == 0).sum()\n return b / a\n\n\ndef prune(model, amount=0.3):\n # Prune model to requested global sparsity\n import torch.nn.utils.prune as prune\n print('Pruning model... ', end='')\n for name, m in model.named_modules():\n if isinstance(m, nn.Conv2d):\n prune.l1_unstructured(m, name='weight', amount=amount) # prune\n prune.remove(m, 'weight') # make permanent\n print(' %.3g global sparsity' % sparsity(model))\n\n\ndef fuse_conv_and_bn(conv, bn):\n # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/\n fusedconv = nn.Conv2d(conv.in_channels,\n conv.out_channels,\n kernel_size=conv.kernel_size,\n stride=conv.stride,\n padding=conv.padding,\n groups=conv.groups,\n bias=True).requires_grad_(False).to(conv.weight.device)\n\n # prepare filters\n w_conv = conv.weight.clone().view(conv.out_channels, -1)\n w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))\n fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape))\n\n # prepare spatial bias\n b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias\n b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))\n fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)\n\n return fusedconv\n\n\ndef model_info(model, verbose=False, img_size=640):\n # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320]\n n_p = sum(x.numel() for x in model.parameters()) # number parameters\n n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients\n if verbose:\n print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))\n for i, (name, p) in enumerate(model.named_parameters()):\n name = name.replace('module_list.', '')\n print('%5g %40s %9s %12g %20s %10.3g %10.3g' %\n (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))\n\n try: # FLOPS\n from thop import profile\n stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32\n img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input\n flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS\n img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float\n fs = ', %.1f GFLOPS' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPS\n except (ImportError, Exception):\n fs = ''\n\n logger.info(f\"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}\")\n\n\ndef load_classifier(name='resnet101', n=2):\n # Loads a pretrained model reshaped to n-class output\n model = torchvision.models.__dict__[name](pretrained=True)\n\n # ResNet model properties\n # input_size = [3, 224, 224]\n # input_space = 'RGB'\n # input_range = [0, 1]\n # mean = [0.485, 0.456, 0.406]\n # std = [0.229, 0.224, 0.225]\n\n # Reshape output to n classes\n filters = model.fc.weight.shape[1]\n model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True)\n model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True)\n model.fc.out_features = n\n return model\n\n\ndef scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416)\n # scales img(bs,3,y,x) by ratio constrained to gs-multiple\n if ratio == 1.0:\n return img\n else:\n h, w = img.shape[2:]\n s = (int(h * ratio), int(w * ratio)) # new size\n img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize\n if not same_shape: # pad/crop img\n h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)]\n return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean\n\n\ndef copy_attr(a, b, include=(), exclude=()):\n # Copy attributes from b to a, options to only include [...] and to exclude [...]\n for k, v in b.__dict__.items():\n if (len(include) and k not in include) or k.startswith('_') or k in exclude:\n continue\n else:\n setattr(a, k, v)\n\n\nclass ModelEMA:\n \"\"\" Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models\n Keep a moving average of everything in the model state_dict (parameters and buffers).\n This is intended to allow functionality like\n https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage\n A smoothed version of the weights is necessary for some training schemes to perform well.\n This class is sensitive where it is initialized in the sequence of model init,\n GPU assignment and distributed training wrappers.\n \"\"\"\n\n def __init__(self, model, decay=0.9999, updates=0):\n # Create EMA\n self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA\n # if next(model.parameters()).device.type != 'cpu':\n # self.ema.half() # FP16 EMA\n self.updates = updates # number of EMA updates\n self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs)\n for p in self.ema.parameters():\n p.requires_grad_(False)\n\n def update(self, model):\n # Update EMA parameters\n with torch.no_grad():\n self.updates += 1\n d = self.decay(self.updates)\n\n msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict\n for k, v in self.ema.state_dict().items():\n if v.dtype.is_floating_point:\n v *= d\n v += (1. - d) * msd[k].detach()\n\n def update_attr(self, model, include=(), exclude=('process_group', 'reducer')):\n # Update EMA attributes\n copy_attr(self.ema, model, include, exclude)\n"
] |
[
[
"torch.zeros",
"torch.device",
"torch.nn.utils.prune.l1_unstructured",
"torch.sqrt",
"torch.cuda.synchronize",
"torch.nn.functional.interpolate",
"torch.cuda.get_device_properties",
"torch.no_grad",
"torch.nn.utils.prune.remove",
"torch.cuda.device_count",
"torch.manual_seed",
"torch.mm",
"torch.nn.Conv2d",
"torch.cuda.is_available",
"torch.nn.functional.pad",
"torch.distributed.barrier"
]
] |
zhaohj2017/HSCC20-Repeatability
|
[
"47a152058a2c3aa83104b76a0f866fab9a30d9f3"
] |
[
"cases/eg3_darboux/superp.py"
] |
[
"import torch\nimport numpy as np\nfrom functools import reduce\nfrom operator import mul\n\n############################################\n# set default data type to double; for GPU\n# training use float\n############################################\ntorch.set_default_dtype(torch.float64)\ntorch.set_default_tensor_type(torch.DoubleTensor)\n# torch.set_default_dtype(torch.float32)\n# torch.set_default_tensor_type(torch.FloatTensor)\n\nVERBOSE = 1 # set to 1 to display epoch and batch losses in the training process\n\nFINE_TUNE = 0 # set to 1 for fine-tuning a pre-trained model\n\n############################################\n# set the network architecture\n############################################\nD_H = 10 # the number of neurons of each hidden layer\nN_H = 1 # then number of hidden layers\n\n\n############################################\n# for activation function definition\n############################################\nBENT_DEG = 0.0001\n\n\n############################################\n# set loss function definition\n############################################\nTOL_INIT = 0.0\nTOL_SAFE = 0.0\nTOL_BOUNDARY = 0.05\nTOL_LIE = 0.005\nTOL_NORM_LIE = 0.0\nWEIGHT_LIE = 1\nWEIGHT_NORM_LIE = 0\n\nDECAY_LIE = 1\nDECAY_INIT = 1\nDECAY_UNSAFE = 1\n\n\n############################################\n# for optimization method tunning: LBFGS\n############################################\nLBFGS_NUM_ITER = 1\nLBFGS_TOL_GRAD = 1e-05\nLBFGS_TOL_CHANGE = 1e-09\nLBFGS_NUM_HISTORY = 100\nLBFGS_LINE_SEARCH_FUN = None\n\n\nTOL_OPTIMIZER_RESET = -1\nSHRINK_RATE_FACTOR = 10\nFRACTION_INSTABLE_BATCH = 10000000000000000000\nNUM_BATCH_ITR = 3\n\n\n############################################\n# set the training super parameters\n############################################\nEPOCHS = 100\n\n\n############################################\n# my own scheduling policy: \n# rate = alpha / (1 + beta * epoch^gamma)\n############################################\nALPHA = 0.1 # initial learning rate\nBETA = 0 # if beta equals 0 then constant rate = alpha\nGAMMA = 0 # when beta is nonzero, larger gamma gives faster drop of rate\n\n\n############################################\n# training termination flags\n############################################\nLOSS_OPT_FLAG = 1e-16\nTOL_MAX_GRAD = 6\n\n\n############################################\n# for training set generation\n############################################\nTOL_DATA_GEN = 1e-16\n\nDATA_EXP_I = np.array([5, 5]) # for sampling from initial; length = prob.DIM\nDATA_LEN_I = np.power(2, DATA_EXP_I) # the number of samples for each dimension of domain\nBLOCK_EXP_I = np.array([3, 3]) # 0 <= BATCH_EXP <= DATA_EXP\nBLOCK_LEN_I = np.power(2, BLOCK_EXP_I) # number of batches for each dimension\n # for this example, it is important to set the size of initial and unsafe not too large\n # compared with the size of each batch of domain-lie\nDATA_EXP_U = np.array([7, 7]) # for sampling from initial; length = prob.DIM\nDATA_LEN_U = np.power(2, DATA_EXP_U) # the number of samples for each dimension of domain\nBLOCK_EXP_U = np.array([5, 5]) # 0 <= BATCH_EXP <= DATA_EXP\nBLOCK_LEN_U = np.power(2, BLOCK_EXP_U) # number of batches for each dimension\n\nDATA_EXP_D = np.array([8, 8]) # for sampling from initial; length = prob.DIM\nDATA_LEN_D = np.power(2, DATA_EXP_D) # the number of samples for each dimension of domain\nBLOCK_EXP_D = np.array([6, 6]) # 0 <= BATCH_EXP <= DATA_EXP\nBLOCK_LEN_D = np.power(2, BLOCK_EXP_D) # number of batches for each dimension\n\n\n############################################\n# number of mini_batches\n############################################\nBATCHES_I = reduce(mul, list(BLOCK_LEN_I))\nBATCHES_U = reduce(mul, list(BLOCK_LEN_U))\nBATCHES_D = reduce(mul, list(BLOCK_LEN_D))\n\nBATCHES = max(BATCHES_I, BATCHES_U, BATCHES_D)\n\n############################################\n# for plotting\n############################################\nPLOT_EXP_B = np.array([8, 8]) # sampling from domain for plotting the boundary of barrier using contour plot\nPLOT_LEN_B = np.power(2, PLOT_EXP_B) # the number of samples for each dimension of domain, usually larger than superp.DATA_LEN_D\n\nPLOT_EXP_V = np.array([7, 7]) # sampling from domain for plotting the vector field\nPLOT_LEN_V = np.power(2, PLOT_EXP_V) # the number of samples for each dimension of domain, usually equal to superp.DATA_LEN_D\n\nPLOT_EXP_P = np.array([7, 7]) # sampling from domain for plotting the scattering sampling points, should be equal to superp.DATA_LEN_D\nPLOT_LEN_P = np.power(2, PLOT_EXP_P) # the number of samples for each dimension of domain\n\nPLOT_VEC_SCALE = None\n\n"
] |
[
[
"torch.set_default_dtype",
"torch.set_default_tensor_type",
"numpy.array",
"numpy.power"
]
] |
javakian/Paddle
|
[
"10018f1561cb8f75f8df982dcf2217e50cee2647"
] |
[
"python/paddle/fluid/tests/unittests/test_cond.py"
] |
[
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport numpy as np\nimport unittest\n\nimport paddle.fluid as fluid\nimport paddle.fluid.core as core\nimport paddle.fluid.layers as layers\nimport paddle.fluid.framework as framework\nfrom paddle.fluid.backward import append_backward\nfrom paddle.fluid.executor import Executor\nfrom paddle.fluid.framework import Program, program_guard\nfrom simple_nets import simple_fc_net_with_inputs, batchnorm_fc_with_inputs\n\nnp.random.seed(123)\n\n\nclass TestCondInputOutput(unittest.TestCase):\n def test_return_single_var(self):\n \"\"\"\n pseudocode:\n\n if 0.23 < 0.1:\n return 2\n else:\n return -1\n \"\"\"\n\n def true_func():\n return layers.fill_constant(shape=[2, 3], dtype='int32', value=2)\n\n def false_func():\n return layers.fill_constant(shape=[3, 2], dtype='int32', value=-1)\n\n main_program = Program()\n startup_program = Program()\n with program_guard(main_program, startup_program):\n x = layers.fill_constant(shape=[1], dtype='float32', value=0.1)\n y = layers.fill_constant(shape=[1], dtype='float32', value=0.23)\n pred = layers.less_than(y, x)\n out = layers.cond(pred, true_func, false_func)\n # out is one tensor\n\n place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(\n ) else fluid.CPUPlace()\n exe = fluid.Executor(place)\n ret = exe.run(main_program, fetch_list=[out.name])\n self.assertTrue(\n np.allclose(np.asarray(ret), np.full((3, 2), -1, np.int32)))\n\n def test_return_var_tuple(self):\n \"\"\"\n pseudocode:\n\n if True:\n return 1, True\n else:\n return 3, 2\n \"\"\"\n\n def true_func():\n return layers.fill_constant(\n shape=[1, 2], dtype='int32', value=1), layers.fill_constant(\n shape=[2, 3], dtype='bool', value=True)\n\n def false_func():\n return layers.fill_constant(\n shape=[3, 4], dtype='float32', value=3), layers.fill_constant(\n shape=[4, 5], dtype='int64', value=2)\n\n main_program = Program()\n startup_program = Program()\n with program_guard(main_program, startup_program):\n pred = layers.fill_constant(shape=[1], dtype='bool', value=True)\n out = layers.cond(pred, true_func, false_func)\n # out is a tuple containing 2 tensors\n\n place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(\n ) else fluid.CPUPlace()\n exe = fluid.Executor(place)\n ret = exe.run(main_program, fetch_list=out)\n self.assertTrue(\n np.allclose(np.asarray(ret[0]), np.full((1, 2), 1, np.int32)))\n self.assertTrue(\n np.allclose(np.asarray(ret[1]), np.full((2, 3), True, np.bool)))\n\n def test_pass_and_modify_var(self):\n \"\"\"\n pseudocode:\n for i in range(5):\n a = 7\n if i % 2 == 0:\n a = a * (i + 1)\n else:\n a = a - (i - 1)\n \"\"\"\n\n def true_func(a, i):\n a = a * (i + 1)\n return a\n\n def false_func(a, i):\n a = a - (i - 1)\n return a\n\n main_program = Program()\n startup_program = Program()\n with program_guard(main_program, startup_program):\n a = layers.fill_constant(shape=[3, 2, 1], dtype='int32', value=7)\n i = fluid.data(name=\"i\", shape=[1], dtype='int32')\n pred = ((i % 2) == 0)\n a = layers.cond(pred, lambda: true_func(a, i),\n lambda: false_func(a, i))\n place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(\n ) else fluid.CPUPlace()\n exe = fluid.Executor(place)\n for feed_i in range(5):\n expected_a = 7 * (feed_i + 1) if feed_i % 2 == 0 else 8 - feed_i\n ret = exe.run(main_program,\n feed={'i': np.full((1), feed_i, np.int32)},\n fetch_list=[a])\n self.assertTrue(\n np.allclose(\n np.asarray(ret), np.full((3, 2, 1), expected_a, np.int32)))\n\n def test_return_none(self):\n \"\"\"\n pseudocode: test doing nothing in branches\n for i in range(5):\n if i % 2 == 0:\n pass\n else:\n pass\n \"\"\"\n\n def true_func():\n pass\n\n def false_func():\n return None\n\n main_program = Program()\n startup_program = Program()\n with program_guard(main_program, startup_program):\n i = fluid.data(name=\"i\", shape=[1], dtype='int32')\n pred = ((i % 2) == 0)\n out1 = layers.cond(pred, true_func, false_func)\n out2 = layers.cond(pred, None, false_func)\n out3 = layers.cond(pred, true_func, None)\n place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(\n ) else fluid.CPUPlace()\n exe = fluid.Executor(place)\n for feed_i in range(5):\n # Test that output is None is runnable\n exe.run(main_program, feed={'i': np.full((1), feed_i, np.int32)})\n self.assertIsNone(out1)\n self.assertIsNone(out2)\n self.assertIsNone(out3)\n\n def test_wrong_structure_exception(self):\n \"\"\"\n test returning different number of tensors cannot merge into output\n \"\"\"\n\n def func_return_none():\n return None\n\n def func_return_one_tensor():\n return layers.fill_constant(shape=[2, 7], dtype='int32', value=3)\n\n def func_return_two_tensors():\n return layers.fill_constant(\n shape=[3, 1], dtype='int32', value=7), layers.fill_constant(\n shape=[3, 1], dtype='int32', value=8)\n\n main_program = Program()\n startup_program = Program()\n with program_guard(main_program, startup_program):\n i = fluid.data(name=\"i\", shape=[1], dtype='int32')\n pred = ((i % 2) == 0)\n with self.assertRaises(Exception) as e:\n out = layers.cond(pred, i, func_return_one_tensor)\n self.assertEqual(\"The true_fn in cond must be callable\",\n str(e.exception))\n\n with self.assertRaises(Exception) as e:\n out = layers.cond(pred, func_return_one_tensor, np.asarray([3]))\n self.assertEqual(\"The false_fn in cond must be callable\",\n str(e.exception))\n\n with self.assertRaises(Exception) as e:\n out = layers.cond(pred, func_return_none,\n func_return_one_tensor)\n self.assertTrue(\n \"Incompatible return values of true_fn and false_fn in cond\" in\n str(e.exception))\n\n with self.assertRaises(Exception) as e:\n out = layers.cond(pred, func_return_two_tensors,\n func_return_none)\n self.assertTrue(\n \"Incompatible return values of true_fn and false_fn in cond\" in\n str(e.exception))\n\n with self.assertRaises(Exception) as e:\n out = layers.cond(pred, func_return_one_tensor,\n func_return_two_tensors)\n self.assertTrue(\n \"Incompatible return values of true_fn and false_fn in cond\" in\n str(e.exception))\n\n\nclass TestCondNestedControlFlow(unittest.TestCase):\n def test_cond_inside_cond(self):\n \"\"\"\n pseudocode:\n for i in range(1, 10):\n a = 2 * i\n if i < 5:\n if i >= 3:\n return a + a \n else:\n return a - a\n else:\n if i < 8:\n return a * a\n else:\n return a / a\n \"\"\"\n\n def less_than_branch(i, a):\n return layers.cond(i >= 3.0, lambda: layers.elementwise_add(a, a),\n lambda: layers.elementwise_sub(a, a))\n\n def greater_equal_branch(i, a):\n return layers.cond(i < 8.0, lambda: layers.elementwise_mul(a, a),\n lambda: layers.elementwise_div(a, a))\n\n main_program = Program()\n startup_program = Program()\n with program_guard(main_program, startup_program):\n i = fluid.data(name=\"i\", shape=[1], dtype='float32')\n a = 2.0 * i\n out = layers.cond(i < 5.0, lambda: less_than_branch(i, a),\n lambda: greater_equal_branch(i, a))\n mean = layers.mean(out)\n append_backward(mean)\n\n place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(\n ) else fluid.CPUPlace()\n exe = fluid.Executor(place)\n for feed_i in range(0, 10):\n expected_a = 2.0 * feed_i\n if feed_i < 5:\n expected_ret = expected_a + expected_a if feed_i >= 3 else 0.0\n expected_a_grad = 2.0 if feed_i >= 3 else 0.0\n else:\n expected_ret = expected_a * expected_a if feed_i < 8 else 1.0\n expected_a_grad = 2.0 * expected_a if feed_i < 8 else 0.0\n ret = exe.run(main_program,\n feed={'i': np.full((1), feed_i, np.float32)},\n fetch_list=[out.name, a.grad_name])\n self.assertEqual(ret[0][0], expected_ret)\n self.assertEqual(ret[1][0], expected_a_grad)\n\n\nclass TestCondBackward(unittest.TestCase):\n def backward_value_helper(self, cond_func):\n \"\"\"\n Helper function that compares calculated backward value is close to dy/dx\n \"\"\"\n main_program = Program()\n main_program.random_seed = 123\n startup_program = Program()\n startup_program.random_seed = 123\n with program_guard(main_program, startup_program):\n img = fluid.data(name='image', shape=[-1, 9], dtype='float32')\n img.stop_gradient = False\n label = fluid.data(name='label', shape=[-1, 1], dtype='int64')\n i = fluid.data(name=\"i\", shape=[1], dtype='int32')\n loss = cond_func(i, img, label)\n append_backward(loss)\n place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(\n ) else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(startup_program)\n\n delta = 0.005\n for feed_i in range(0, 10):\n feed_img = np.random.random(size=[1, 9]).astype(np.float32)\n feed_label = np.random.randint(\n low=0, high=10, size=[1, 1], dtype=np.int64)\n img_grad, loss_value = exe.run(\n main_program,\n feed={\n 'i': np.full((1), feed_i, np.int32),\n 'image': feed_img,\n 'label': feed_label\n },\n fetch_list=[img.grad_name, loss.name])\n\n numerical_grad = np.zeros(shape=[1, 9], dtype=np.float32)\n feed_img_delta = np.copy(feed_img)\n for j in range(9):\n feed_img_delta[0][j] = feed_img[0][j] + delta\n loss_delta = exe.run(main_program,\n feed={\n 'i': np.full((1), feed_i, np.int32),\n 'image': feed_img_delta,\n 'label': feed_label\n },\n fetch_list=[loss.name])\n numerical_grad[0][j] = (loss_delta[0] - loss_value[0]) / delta\n feed_img_delta[0][j] = feed_img[0][j]\n self.assertTrue(\n np.isclose(\n img_grad, numerical_grad, atol=0.05, rtol=0.05).all())\n\n def add_optimizer_helper(self, cond_func):\n \"\"\"\n Test that program is runnable when add optimizer\n \"\"\"\n main_program = Program()\n startup_program = Program()\n with program_guard(main_program, startup_program):\n img = fluid.data(name='image', shape=[-1, 784], dtype='float32')\n label = fluid.data(name='label', shape=[-1, 1], dtype='int64')\n i = fluid.data(name=\"i\", shape=[1], dtype='int32')\n loss = cond_func(i, img, label)\n optimizer = fluid.optimizer.SGD(learning_rate=0.1)\n optimizer.minimize(loss)\n\n place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(\n ) else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(startup_program)\n\n for feed_i in range(0, 10):\n feed_img = np.random.random(size=[16, 784]).astype(np.float32)\n feed_label = np.random.randint(\n low=0, high=10, size=[16, 1], dtype=np.int64)\n exe.run(main_program,\n feed={\n 'i': np.full((1), feed_i, np.int32),\n 'image': feed_img,\n 'label': feed_label\n },\n fetch_list=[loss])\n\n def test_cond_backward(self):\n def cond_func(i, img, label):\n predicate = ((i % 2) == 0)\n return layers.cond(predicate,\n lambda: simple_fc_net_with_inputs(img, label, class_num=10),\n lambda: batchnorm_fc_with_inputs(img, label, class_num=10))\n\n self.backward_value_helper(cond_func)\n self.add_optimizer_helper(cond_func)\n\n def test_half_nested_cond_backward(self):\n def branch(i, img, label):\n return layers.cond((i % 2) == 0, lambda: simple_fc_net_with_inputs(img, label, class_num=10),\n lambda: batchnorm_fc_with_inputs(img, label, class_num=10))\n\n def cond_func_simple_net_at_true(i, img, label):\n return layers.cond(i < 5, lambda: branch(i, img, label),\n lambda: layers.mean(img))\n\n def cond_func_simple_net_at_false(i, img, label):\n return layers.cond(i < 5, lambda: layers.mean(img),\n lambda: branch(i, img, label))\n\n self.backward_value_helper(cond_func_simple_net_at_true)\n self.add_optimizer_helper(cond_func_simple_net_at_true)\n self.backward_value_helper(cond_func_simple_net_at_false)\n self.add_optimizer_helper(cond_func_simple_net_at_false)\n\n def test_nested_cond_backward(self):\n def branch(i, img, label, mod_two):\n\n if mod_two:\n predicate = ((i % 2) == 0)\n else:\n predicate = ((i % 2) != 0)\n return layers.cond(predicate, lambda: simple_fc_net_with_inputs(img, label, class_num=10),\n lambda: batchnorm_fc_with_inputs(img, label, class_num=10))\n\n def cond_func(i, img, label):\n return layers.cond(i < 5, lambda: branch(i, img, label, True),\n lambda: branch(i, img, label, False))\n\n self.backward_value_helper(cond_func)\n self.add_optimizer_helper(cond_func)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"numpy.full",
"numpy.isclose",
"numpy.asarray",
"numpy.zeros",
"numpy.random.seed",
"numpy.copy",
"numpy.random.randint",
"numpy.random.random"
]
] |
rafzi/DeepThings
|
[
"d12e8e8ad9f9ebaa3b0d55f547c0b3c7f1baf636"
] |
[
"scripts/plotresults.py"
] |
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# 1: YOLOv2, 2: AlexNet, 3: VGG-16, 4: GoogLeNet\nmodel = 4\nLINEPLOT = True\n\ndfs = pd.read_excel(\"t.xlsx\", sheet_name=None, header=None)\nif model == 1:\n ms = \"YOLOv2\"\nelif model == 2:\n ms = \"AlexNet\"\nelif model == 3:\n ms = \"VGG-16\"\nelif model == 4:\n ms = \"GoogLeNet\"\nsh = dfs[ms]\nprint(sh)\n\n\nlabels = [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\"]\nx = np.arange(len(labels))\n\nplt.rcParams.update({\"font.size\": 11})\nfig, ax = plt.subplots()\nplt.subplots_adjust(top=0.95, right=0.95)\n\n# Workaround for this: https://bugs.python.org/issue32790\ndef fmtFlt(f, digits):\n s = (\"{:#.\" + str(digits) + \"g}\").format(f)\n sz = len(s) - 1\n if sz < digits:\n s += \"0\"\n if s[-1] == \".\":\n s = s[:-1]\n return s\n\ndef autolabel(rects):\n \"\"\"Attach a text label above each bar in *rects*, displaying its height.\"\"\"\n for rect in rects:\n height = rect.get_height()\n ax.annotate(fmtFlt(height, 3),\n xy=(rect.get_x() + 1.2*rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom', rotation=90, fontsize=9.5)\n\ndef addData(speed, fused):\n y = []\n lineindex = -4 + (speed)*(13+4)\n addindex = 1 if fused else 0\n for i in range(0, 6):\n y.append(sh[5*2 + addindex][lineindex] / sh[i*2 + addindex][lineindex])\n y = np.array(y)# / 1000\n y = np.flip(y)\n l = (\"OWP @ \" if fused else \"LOP @ \") + \\\n (\"1 GBit/s\" if speed == 1 else (\"100 MBit/s\" if speed == 2 else \"10 MBit/s\"))\n color = \"C1\" if fused else \"C0\"\n if LINEPLOT:\n color = \"C3\" if speed == 1 else (\"C4\" if speed == 2 else \"C1\")\n #line = \"o\" if speed == 1 else (\"v\" if speed == 2 else \"s\")\n line = \"o\" if fused else \"s\"\n line += \"--\" if fused else \"-\"\n ax.plot(x, y, line, label=l, color=color)\n else:\n barw = 0.15\n bars = 6\n i = 2 * (-speed+4-1) + int(fused)\n #patterns = [\"\\\\\\\\\", \"//\", \"||\", \"--\", \"..\", \"OO\"]\n patterns = [\"\\\\\\\\\", \"\\\\\\\\\", \"//\", \"//\", \"..\", \"..\"]\n g = ax.bar(x + barw/2 - bars/2*barw + i * barw, y, barw, label=l, color=color,\n hatch=patterns[i], alpha=0.99)\n #autolabel(g)\n\n# 1: 1gbit, 2: 100mbit, 3: 10mbit\naddData(1, True)\naddData(1, False)\naddData(2, True)\naddData(2, False)\naddData(3, True)\naddData(3, False)\n\n\n#plt.ylim(plt.ylim()*1.1)\nybot, ytop = plt.ylim()\nplt.ylim(ybot, ytop*1.05)\nax.set_xlabel(\"Number of devices\")\nax.set_ylabel(\"Run time speedup over one device\")\nax.set_xticks(x)\nax.set_xticklabels(labels)\nax.legend()\n\nplt.savefig(\"plot_runtime.pdf\")\nplt.show()\n"
] |
[
[
"matplotlib.pyplot.rcParams.update",
"numpy.array",
"matplotlib.pyplot.savefig",
"pandas.read_excel",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots_adjust",
"numpy.flip"
]
] |
10jqka-aicubes/opinion_classification
|
[
"43f193522b033bd857d294737b3f9dbaac7aed9f"
] |
[
"opinion_classification/electra/finetune/preprocessing.py"
] |
[
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Code for serializing raw fine-tuning data into tfrecords\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport os\nimport random\nimport numpy as np\n\n# import tensorflow.compat.v1 as tf\nimport tensorflow as tf\nimport configure_finetuning\nfrom finetune import feature_spec\n\nfrom util import utils\n\nimport pdb\n\n\nclass Preprocessor(object):\n \"\"\"Class for loading, preprocessing, and serializing fine-tuning datasets.\"\"\"\n\n def __init__(self, config: configure_finetuning.FinetuningConfig, tasks):\n self._config = config\n self._tasks = tasks\n self._name_to_task = {task.name: task for task in tasks}\n\n self._feature_specs = feature_spec.get_shared_feature_specs(config)\n for task in tasks:\n self._feature_specs += task.get_feature_specs()\n self._name_to_feature_config = {spec.name: spec.get_parsing_spec() for spec in self._feature_specs}\n assert len(self._name_to_feature_config) == len(self._feature_specs)\n\n def prepare_train(self):\n return self._serialize_dataset(self._tasks, True, \"train\")\n\n def prepare_predict(self, tasks, split):\n return self._serialize_dataset(tasks, False, split)\n\n def _serialize_dataset(self, tasks, is_training, split):\n \"\"\"Write out the dataset as tfrecords.\"\"\"\n dataset_name = \"_\".join(sorted([task.name for task in tasks]))\n dataset_name += \"_\" + split\n dataset_prefix = os.path.join(self._config.preprocessed_data_dir, dataset_name)\n tfrecords_path = dataset_prefix + \".tfrecord\"\n metadata_path = dataset_prefix + \".metadata\"\n batch_size = self._config.train_batch_size if is_training else self._config.eval_batch_size\n\n utils.log(\"Loading dataset\", dataset_name)\n n_examples = None\n if self._config.use_tfrecords_if_existing and tf.gfile.Exists(metadata_path):\n n_examples = utils.load_json(metadata_path)[\"n_examples\"]\n\n if n_examples is None:\n utils.log(\"Existing tfrecords not found so creating\")\n examples = []\n\n for task in tasks:\n task_examples = task.get_examples(split)\n examples += task_examples\n\n if is_training:\n random.shuffle(examples)\n utils.mkdir(tfrecords_path.rsplit(\"/\", 1)[0])\n n_examples = self.serialize_examples(examples, is_training, tfrecords_path, batch_size)\n utils.write_json({\"n_examples\": n_examples}, metadata_path)\n\n input_fn = self._input_fn_builder(tfrecords_path, is_training)\n if is_training:\n steps = int(n_examples // batch_size * self._config.num_train_epochs)\n else:\n steps = n_examples // batch_size\n return input_fn, steps\n\n def serialize_examples(self, examples, is_training, output_file, batch_size):\n \"\"\"Convert a set of `InputExample`s to a TFRecord file.\"\"\"\n n_examples = 0\n with tf.python_io.TFRecordWriter(output_file) as writer:\n for (ex_index, example) in enumerate(examples):\n if ex_index % 2000 == 0:\n utils.log(\"Writing example {:} of {:}\".format(ex_index, len(examples)))\n for tf_example in self._example_to_tf_example(\n example, is_training, log=self._config.log_examples and ex_index < 1\n ):\n writer.write(tf_example.SerializeToString())\n n_examples += 1\n # add padding so the dataset is a multiple of batch_size\n while n_examples % batch_size != 0:\n writer.write(self._make_tf_example(task_id=len(self._config.task_names)).SerializeToString())\n n_examples += 1\n return n_examples\n\n def _example_to_tf_example(self, example, is_training, log=False):\n # pdb.set_trace()\n examples = self._name_to_task[example.task_name].featurize(example, is_training, log)\n if not isinstance(examples, list):\n examples = [examples]\n for example in examples:\n yield self._make_tf_example(**example)\n\n def _make_tf_example(self, **kwargs):\n \"\"\"Make a tf.train.Example from the provided features.\"\"\"\n for k in kwargs:\n if k not in self._name_to_feature_config:\n raise ValueError(\"Unknown feature\", k)\n features = collections.OrderedDict()\n for spec in self._feature_specs:\n if spec.name in kwargs:\n values = kwargs[spec.name]\n else:\n values = spec.get_default_values()\n if (\n isinstance(values, int)\n or isinstance(values, bool)\n or isinstance(values, float)\n or isinstance(values, np.float32)\n or (isinstance(values, np.ndarray) and values.size == 1)\n ):\n values = [values]\n if spec.is_int_feature:\n feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n else:\n feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))\n features[spec.name] = feature\n return tf.train.Example(features=tf.train.Features(feature=features))\n\n def _input_fn_builder(self, input_file, is_training):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n d = tf.data.TFRecordDataset(input_file)\n\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n return d.apply(\n tf.contrib.data.map_and_batch(\n self._decode_tfrecord, batch_size=params[\"batch_size\"], drop_remainder=True\n )\n )\n\n return input_fn\n\n def _decode_tfrecord(self, record):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, self._name_to_feature_config)\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n\n for name, tensor in example.items():\n if tensor.dtype == tf.int64:\n example[name] = tf.cast(tensor, tf.int32)\n else:\n example[name] = tensor\n\n return example\n"
] |
[
[
"tensorflow.train.Features",
"tensorflow.data.TFRecordDataset",
"tensorflow.gfile.Exists",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.contrib.data.map_and_batch",
"tensorflow.parse_single_example",
"tensorflow.cast"
]
] |
vidhiJain/lab
|
[
"43a3eb2aa3121b226c849690e636dcd4c04a49cf"
] |
[
"lab/helpers/pytorch/device.py"
] |
[
"import torch\n\nfrom lab.configs import BaseConfigs\n\n\nclass DeviceInfo:\n def __init__(self, *,\n use_cuda: bool,\n cuda_device: int):\n self.use_cuda = use_cuda\n self.cuda_device = cuda_device\n self.cuda_count = torch.cuda.device_count()\n\n self.is_cuda = self.use_cuda and torch.cuda.is_available()\n if not self.is_cuda:\n self.device = torch.device('cpu')\n else:\n if self.cuda_device < self.cuda_count:\n self.device = torch.device('cuda', self.cuda_device)\n else:\n self.device = torch.device('cuda', self.cuda_count - 1)\n\n def __str__(self):\n if not self.is_cuda:\n return \"CPU\"\n\n if self.cuda_device < self.cuda_count:\n return f\"GPU:{self.cuda_device} - {torch.cuda.get_device_name(self.cuda_device)}\"\n else:\n return (f\"GPU:{self.cuda_count - 1}({self.cuda_device}) \"\n f\"- {torch.cuda.get_device_name(self.cuda_count - 1)}\")\n\n\nclass DeviceConfigs(BaseConfigs):\n cuda_device: int = 0\n use_cuda: bool = True\n\n device_info: DeviceInfo\n\n device: torch.device\n\n\n@DeviceConfigs.calc(DeviceConfigs.device)\ndef _device(c: DeviceConfigs):\n return c.device_info.device\n\n\nDeviceConfigs.set_hyperparams(DeviceConfigs.cuda_device, DeviceConfigs.use_cuda,\n is_hyperparam=False)\n\n\n@DeviceConfigs.calc(DeviceConfigs.device_info)\ndef _device_info(c: DeviceConfigs):\n return DeviceInfo(use_cuda=c.use_cuda,\n cuda_device=c.cuda_device)\n"
] |
[
[
"torch.device",
"torch.cuda.is_available",
"torch.cuda.get_device_name",
"torch.cuda.device_count"
]
] |
zwc662/disentangling-vae
|
[
"7eeace2a30f8034e222be6a906f53748b3b2bb6e"
] |
[
"exp_adv/A3C/model.py"
] |
[
"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef normalized_columns_initializer(weights, std=1.0):\n out = torch.randn(weights.size())\n out *= std / torch.sqrt(out.pow(2).sum(1, keepdim=True))\n return out\n\n\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n weight_shape = list(m.weight.data.size())\n fan_in = np.prod(weight_shape[1:4])\n fan_out = np.prod(weight_shape[2:4]) * weight_shape[0]\n w_bound = np.sqrt(6. / (fan_in + fan_out))\n m.weight.data.uniform_(-w_bound, w_bound)\n m.bias.data.fill_(0)\n elif classname.find('Linear') != -1:\n weight_shape = list(m.weight.data.size())\n fan_in = weight_shape[1]\n fan_out = weight_shape[0]\n w_bound = np.sqrt(6. / (fan_in + fan_out))\n m.weight.data.uniform_(-w_bound, w_bound)\n m.bias.data.fill_(0)\n\n\nclass ActorCritic(torch.nn.Module):\n def __init__(self, num_inputs, action_space):\n super(ActorCritic, self).__init__()\n self.conv1 = nn.Conv2d(num_inputs, 32, 3, stride=2, padding=1)\n self.conv2 = nn.Conv2d(32, 32, 3, stride=2, padding=1)\n self.conv3 = nn.Conv2d(32, 32, 3, stride=2, padding=1)\n self.conv4 = nn.Conv2d(32, 32, 3, stride=2, padding=1)\n\n self.lstm = nn.LSTMCell(32 * 2 * 2, 256)\n\n num_outputs = action_space\n self.critic_linear = nn.Linear(256, 1)\n self.actor_linear = nn.Linear(256, num_outputs)\n\n self.apply(weights_init)\n self.actor_linear.weight.data = normalized_columns_initializer(\n self.actor_linear.weight.data, 0.01)\n self.actor_linear.bias.data.fill_(0)\n self.critic_linear.weight.data = normalized_columns_initializer(\n self.critic_linear.weight.data, 1.0)\n self.critic_linear.bias.data.fill_(0)\n\n self.lstm.bias_ih.data.fill_(0)\n self.lstm.bias_hh.data.fill_(0)\n\n self.train()\n\n def forward(self, inputs):\n inputs, (hx, cx) = inputs\n x = F.elu(self.conv1(inputs))\n x = F.elu(self.conv2(x))\n x = F.elu(self.conv3(x))\n x = F.elu(self.conv4(x))\n \n x = x.view(-1, 32 * 2 * 2)\n hx, cx = self.lstm(x, (hx, cx))\n x = hx\n\n return self.critic_linear(x), self.actor_linear(x), (hx, cx)\n"
] |
[
[
"torch.nn.Linear",
"torch.nn.LSTMCell",
"torch.nn.Conv2d",
"numpy.prod",
"numpy.sqrt"
]
] |
victor-torres/arche
|
[
"1bc31a1397e15860be7c3762be86c47f9e839704"
] |
[
"src/arche/rules/price.py"
] |
[
"from arche.readers.schema import TaggedFields\nfrom arche.rules.result import Result\nfrom arche.tools.helpers import is_number, ratio_diff\nimport pandas as pd\n\n\ndef compare_was_now(df: pd.DataFrame, tagged_fields: TaggedFields):\n \"\"\"Compare price_was and price_now tagged fields\"\"\"\n\n price_was_fields = tagged_fields.get(\"product_price_was_field\")\n price_fields = tagged_fields.get(\"product_price_field\")\n items_number = len(df.index)\n\n result = Result(\"Compare Price Was And Now\")\n\n if (\n price_was_fields\n and price_was_fields[0] in df.columns\n and price_fields\n and price_fields[0] in df.columns\n ):\n price_field = price_fields[0]\n price_was_field = price_was_fields[0]\n prices = df.copy()\n prices[price_was_field] = prices[price_was_field].astype(float)\n prices[price_field] = prices[price_field].astype(float)\n\n df_prices_less = pd.DataFrame(\n prices[prices[price_was_field] < prices[price_field]],\n columns=[\"_key\", price_was_field, price_field],\n )\n\n price_less_percent = \"{:.2%}\".format(len(df_prices_less) / items_number)\n\n if not df_prices_less.empty:\n error = f\"Past price is less than current for {len(df_prices_less)} items\"\n result.add_error(\n f\"{price_less_percent} ({len(df_prices_less)}) of \"\n f\"items with {price_was_field} < {price_field}\",\n detailed=f\"{error}:\\n{list(df_prices_less['_key'])}\",\n )\n\n df_prices_equals = pd.DataFrame(\n prices[prices[price_was_field] == prices[price_field]],\n columns=[\"_key\", price_was_field, price_field],\n )\n price_equal_percent = \"{:.2%}\".format(len(df_prices_equals) / items_number)\n\n if not df_prices_equals.empty:\n result.add_warning(\n (\n f\"{price_equal_percent} ({len(df_prices_equals)}) \"\n f\"of items with {price_was_field} = {price_field}\"\n ),\n detailed=(\n f\"Prices equal for {len(df_prices_equals)} items:\\n\"\n f\"{list(df_prices_equals['_key'])}\"\n ),\n )\n\n result.err_items_count = len(df_prices_equals) + len(df_prices_less)\n result.items_count = len(df.index)\n\n else:\n result.add_info(\n \"product_price_field or product_price_was_field tags were not \"\n \"found in schema\"\n )\n return result\n\n\ndef compare_prices_for_same_urls(\n source_df: pd.DataFrame, target_df: pd.DataFrame, tagged_fields: TaggedFields\n):\n \"\"\"For each pair of items that have the same `product_url_field` tagged field,\n compare `product_price_field` field\n\n Returns:\n A result containing pairs of items with same `product_url_field`\n from `source_df` and `target_df` which `product_price_field` differ,\n missing and new `product_url_field` tagged fields.\n \"\"\"\n result = Result(\"Compare Prices For Same Urls\")\n url_field = tagged_fields.get(\"product_url_field\")\n if not url_field:\n result.add_info(\"product_url_field tag is not set\")\n return result\n\n url_field = url_field[0]\n price_field = tagged_fields.get(\"product_price_field\")\n\n source_df = source_df.dropna(subset=[url_field])\n target_df = target_df.dropna(subset=[url_field])\n\n same_urls = source_df[(source_df[url_field].isin(target_df[url_field].values))][\n url_field\n ]\n new_urls = source_df[~(source_df[url_field].isin(target_df[url_field].values))][\n url_field\n ]\n missing_urls = target_df[(~target_df[url_field].isin(source_df[url_field].values))][\n url_field\n ]\n\n missing_detailed_messages = []\n for url in missing_urls:\n key = target_df.loc[target_df[url_field] == url][\"_key\"].iloc[0]\n missing_detailed_messages.append(f\"Missing {url} from {key}\")\n\n result.add_info(\n f\"{len(missing_urls)} urls missing from the tested job\",\n detailed=\"\\n\".join(missing_detailed_messages),\n )\n result.add_info(f\"{len(new_urls)} new urls in the tested job\")\n result.add_info(f\"{len(same_urls)} same urls in both jobs\")\n\n diff_prices_count = 0\n if not price_field:\n result.add_info(\"product_price_field tag is not set\")\n else:\n price_field = price_field[0]\n detailed_messages = []\n for url in same_urls:\n if url.strip() != \"nan\":\n source_price = source_df[source_df[url_field] == url][price_field].iloc[\n 0\n ]\n target_price = target_df[target_df[url_field] == url][price_field].iloc[\n 0\n ]\n\n if (\n is_number(source_price)\n and is_number(target_price)\n and ratio_diff(source_price, target_price) > 0.1\n ):\n diff_prices_count += 1\n source_key = source_df[source_df[url_field] == url][\"_key\"].iloc[0]\n target_key = target_df[target_df[url_field] == url][\"_key\"].iloc[0]\n msg = (\n f\"different prices for url: {url}\\nsource price is {source_price} \"\n f\"for {source_key}\\ntarget price is {target_price} for {target_key}\"\n )\n detailed_messages.append(msg)\n\n res = f\"{len(same_urls)} checked, {diff_prices_count} errors\"\n if detailed_messages:\n result.add_error(res, detailed=\"\\n\".join(detailed_messages))\n else:\n result.add_info(res)\n\n return result\n\n\ndef compare_names_for_same_urls(\n source_df: pd.DataFrame, target_df: pd.DataFrame, tagged_fields: TaggedFields\n):\n \"\"\"For each pair of items that have the same `product_url_field` tagged field,\n compare `name_field` field\"\"\"\n\n result = Result(\"Compare Names Per Url\")\n url_field = tagged_fields.get(\"product_url_field\")\n if not url_field:\n result.add_info(\"product_url_field tag is not set\")\n return result\n\n url_field = url_field[0]\n name_field = tagged_fields.get(\"name_field\")\n\n diff_names_count = 0\n if not name_field:\n result.add_info(\"name_field tag is not set\")\n return result\n\n name_field = name_field[0]\n if any(\n [\n name_field not in source_df.columns.values,\n name_field not in target_df.columns.values,\n ]\n ):\n return\n\n same_urls = source_df[(source_df[url_field].isin(target_df[url_field].values))][\n url_field\n ]\n\n detailed_messages = []\n for url in same_urls:\n if url.strip() != \"nan\":\n source_name = source_df[source_df[url_field] == url][name_field].iloc[0]\n target_name = target_df[target_df[url_field] == url][name_field].iloc[0]\n\n if (\n source_name != target_name\n and source_name.strip() != \"nan\"\n and target_name.strip() != \"nan\"\n ):\n diff_names_count += 1\n source_key = source_df[source_df[url_field] == url][\"_key\"].iloc[0]\n target_key = target_df[target_df[url_field] == url][\"_key\"].iloc[0]\n msg = (\n f\"different names for url: {url}\\nsource name is {source_name} \"\n f\"for {source_key}\\ntarget name is {target_name} for {target_key}\"\n )\n detailed_messages.append(msg)\n\n res = f\"{len(same_urls)} checked, {diff_names_count} errors\"\n if detailed_messages:\n result.add_error(res, detailed=\"\\n\".join(detailed_messages))\n else:\n result.add_info(res)\n\n return result\n\n\ndef compare_prices_for_same_names(\n source_df: pd.DataFrame, target_df: pd.DataFrame, tagged_fields: TaggedFields\n):\n result = Result(\"Compare Prices For Same Names\")\n name_field = tagged_fields.get(\"name_field\")\n if not name_field:\n result.add_info(\"name_field tag is not set\")\n return result\n\n name_field = name_field[0]\n\n product_url_field = tagged_fields.get(\"product_url_field\")\n if not product_url_field:\n result.add_info(\"product_url_field tag is not set\")\n else:\n product_url_field = product_url_field[0]\n source_df = source_df[source_df[name_field].notnull()]\n target_df = target_df[target_df[name_field].notnull()]\n\n same_names = source_df[(source_df[name_field].isin(target_df[name_field].values))][\n name_field\n ]\n new_names = source_df[~(source_df[name_field].isin(target_df[name_field].values))][\n name_field\n ]\n missing_names = target_df[\n ~(target_df[name_field].isin(source_df[name_field].values))\n ][name_field]\n\n detailed_messages = []\n for name in missing_names:\n target_key = target_df.loc[target_df[name_field] == name][\"_key\"].iloc[0]\n msg = f\"Missing {name} from {target_key}\"\n if product_url_field:\n url = target_df.loc[target_df[name_field] == name][product_url_field].iloc[\n 0\n ]\n detailed_messages.append(f\"{msg}\\n{url}\")\n\n result.add_info(\n f\"{len(missing_names)} names missing from the tested job\",\n detailed=\"\\n\".join(detailed_messages),\n )\n result.add_info(f\"{len(new_names)} new names in the tested job\")\n result.add_info(f\"{len(same_names)} same names in both jobs\")\n\n price_tag = \"product_price_field\"\n price_field = tagged_fields.get(price_tag)\n if not price_field:\n result.add_info(\"product_price_field tag is not set\")\n return result\n\n price_field = price_field[0]\n count = 0\n\n detailed_messages = []\n for name in same_names:\n if name.strip() != \"nan\":\n source_price = source_df[source_df[name_field] == name][price_field].iloc[0]\n target_price = target_df[target_df[name_field] == name][price_field].iloc[0]\n if is_number(source_price) and is_number(target_price):\n if ratio_diff(source_price, target_price) > 0.1:\n count += 1\n source_key = source_df[source_df[name_field] == name][\"_key\"].iloc[\n 0\n ]\n target_key = target_df[target_df[name_field] == name][\"_key\"].iloc[\n 0\n ]\n msg = (\n f\"different price for {name}\\nsource price is {source_price} \"\n f\"for {source_key}\\ntarget price is {target_price} for {target_key}\"\n )\n detailed_messages.append(msg)\n\n result_msg = f\"{len(same_names)} checked, {count} errors\"\n if detailed_messages:\n result.add_error(result_msg, detailed=\"\\n\".join(detailed_messages))\n else:\n result.add_info(result_msg)\n\n return result\n"
] |
[
[
"pandas.DataFrame"
]
] |
gurcani/dycon
|
[
"64313471a9222682dce12f8623eb5d0563a8bb5c"
] |
[
"plot_dd.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 1 23:40:16 2020\n\n@author: ogurcan\n\"\"\"\n\nimport networkx as nx\nimport h5py as h5\nimport matplotlib.pylab as plt\nimport numpy as np\n\nnwflname='run-GOY/nwfile.pkl'\ngr=nx.read_gpickle(nwflname)\ncc=nx.bipartite.degrees(gr,nx.bipartite.sets(gr)[0])[1]\nkn=np.sort(np.array([l for l in nx.bipartite.sets(gr)[0]]))\nN=kn.shape[0]\nccn=[cc[l] for l in kn]\nplt.plot(np.arange(N),ccn,'x-')\n\nnwflname='run-WS04-static/nwfile.pkl'\ngr=nx.read_gpickle(nwflname)\ncc=nx.bipartite.degrees(gr,nx.bipartite.sets(gr)[0])[1]\nkn=np.sort(np.array([l for l in nx.bipartite.sets(gr)[0]]))\nN=kn.shape[0]\nccn=[cc[l] for l in kn]\nplt.plot(np.arange(N),ccn,'s-')\n\nnwflname='run-NW04-static/nwfile.pkl'\ngr=nx.read_gpickle(nwflname)\ncc=nx.bipartite.degrees(gr,nx.bipartite.sets(gr)[0])[1]\nkn=np.sort(np.array([l for l in nx.bipartite.sets(gr)[0]]))\nN=kn.shape[0]\nccn=[cc[l] for l in kn]\nplt.plot(np.arange(N),ccn,'o-')\n\nplt.legend(['GOY','WS','NW'])\nplt.xlabel('n')\nplt.ylabel('Degree')\n"
] |
[
[
"matplotlib.pylab.xlabel",
"matplotlib.pylab.legend",
"numpy.arange",
"matplotlib.pylab.ylabel"
]
] |
jonoconway/nz_snow_tools
|
[
"7002fb401fb48225260fada6fd5b5b7ca5ad1184"
] |
[
"nz_snow_tools/met/interp_met_data_hourly_jobst_data.py"
] |
[
"\"\"\"\nGenerate hourly temperature and precipitation data for use in the snow model\n\nThe daily gridded fields from Andi Jobst at 250m for each calander year\nplus vcsn radiation feilds\nare downscaled to hourly using the same methods given in \"Simulations of seasonal snow for the South Island, New Zealand\"\nClark et al, 2009\n\n\"\"\"\n\nfrom __future__ import division\n\nimport datetime as dt\nimport netCDF4 as nc\nimport numpy as np\nimport pickle\nimport mpl_toolkits.basemap as basemap\n\nfrom nz_snow_tools.util.utils import process_precip, process_temp, create_mask_from_shpfile, make_regular_timeseries, calc_toa, trim_lat_lon_bounds, \\\n setup_nztm_dem\n\nfrom nz_snow_tools.util.write_fsca_to_netcdf import write_nztm_grids_to_netcdf, setup_nztm_grid_netcdf\n\nfrom nz_snow_tools.met.interp_met_data_hourly_vcsn_data import load_new_vscn, interpolate_met, daily_to_hourly_temp_grids, daily_to_hourly_swin_grids\n\n\ndef load_jobst(variable, dts_to_take, nc_file_in, mask_dem,origin):\n nc_file = nc.Dataset(nc_file_in + '01-Jan-{}to31-dec-{}.nc'.format(dts_to_take[0].year, dts_to_take[0].year))\n # nc_datetimes = nc.num2date(nc_file.variables['time'][:], nc_file.variables['time'].units)\n data = nc_file.variables[variable][:]\n # the data is in a funny grid - need to swap last two axes, then flip to align with vcsn grid\n\n # plt.imshow(np.flipud(np.transpose(hi_res_max_temp,(0,2,1))[0][mask]),origin=0)\n if mask_dem == True:\n hi_res_precip_trimmed = []\n for precip in data:\n if origin == 'topleft':\n _, _, trimmed_precip, _, _ = trim_lat_lon_bounds(mask, lat_array, lon_array, np.transpose(precip), y_centres, x_centres)\n elif origin == 'bottomleft':\n _, _, trimmed_precip, _, _ = trim_lat_lon_bounds(mask, lat_array, lon_array, np.flipud(np.transpose(precip)), y_centres, x_centres)\n hi_res_precip_trimmed.append(trimmed_precip)\n data = np.asarray(hi_res_precip_trimmed)\n\n if variable in ['tmin', 'tmax']:\n data = data + 273.15\n\n return data\n\n\nif __name__ == '__main__':\n\n # dem control\n origin = 'topleft' # orientation of output dem options are 'topleft' or 'bottomleft'. assume that input Dem is 'topleft'\n output_dem = 'nztm250m' # identifier for output dem\n dem_file = 'Z:/GIS_DATA/Topography/DEM_NZSOS/clutha_dem_250m.tif'\n # mask control\n mask_dem = True # boolean to set whether or not to mask the output dem - assume mask is bottom left\n catchment = 'Nevis'\n mask_created = True # boolean to set whether or not the mask has already been created - assume mask is bottom left\n mask_folder = 'T:/DSC-Snow/Masks' # location of numpy catchment mask. must be writeable if mask_created == False\n mask_shpfile = 'Z:/GIS_DATA/Hydrology/Catchments/{}.shp'.format(\n catchment) # shapefile containing polyline or polygon of catchment in WGS84. Not needed if mask_created==True\n # time control\n years_to_take = range(2000, 2016 + 1) # range(2001, 2013 + 1)\n ta_version = 'jobst_ucc'\n # input met data\n nc_file_rain = 'Z:/JOBST_met_data/PRECIP_WITH_UCC/CLUTHA_PRECIP_250m_' # provide only partial filename up to 01-Jan-2000to31-dec-2000\n nc_file_tmax = 'Z:/JOBST_met_data/Tmax/CLUTHA_Tmax_250m_'\n nc_file_tmin = 'Z:/JOBST_met_data/Tmin/CLUTHA_Tmin_250m_'\n nc_file_srad = 'Z:/newVCSN/srad_vclim_clidb_1972010100_2017102000_south-island_p05_daily.nc'\n\n # output met data\n met_out_folder = 'T:/DSC-Snow/input_data_hourly'\n\n ####\n\n # set up input and output DEM for processing\n # output DEM\n nztm_dem, x_centres, y_centres, lat_array, lon_array = setup_nztm_dem(dem_file, origin=origin)\n data_id = '{}_{}'.format(catchment, output_dem) # name to identify the output data\n if mask_dem == True:\n # Get the masks for the individual regions of interest\n if mask_created == True: # load precalculated mask\n mask = np.load(mask_folder + '/{}_{}.npy'.format(catchment, output_dem))\n if origin == 'topleft':\n mask = np.flipud(mask)\n else: # create mask and save to npy file\n # masks = get_masks() #TODO set up for multiple masks\n mask = create_mask_from_shpfile(lat_array, lon_array, mask_shpfile)\n if origin == 'topleft': # flip to save as bottom left\n mask = np.flipud(mask)\n np.save(mask_folder + '/{}_{}.npy'.format(catchment, output_dem), mask)\n if origin == 'topleft': # flip back to topleft\n mask = np.flipud(mask)\n # Trim down the number of latitudes requested so it all stays in memory\n lats, lons, elev, northings, eastings = trim_lat_lon_bounds(mask, lat_array, lon_array, nztm_dem, y_centres, x_centres)\n _, _, trimmed_mask, _, _ = trim_lat_lon_bounds(mask, lat_array, lon_array, mask.copy(), y_centres, x_centres)\n else:\n mask = None\n lats = lat_array\n lons = lon_array\n elev = nztm_dem\n northings = y_centres\n eastings = x_centres\n\n for year_to_take in years_to_take:\n # load data\n # create timestamp to get - this is in NZST\n dts_to_take = np.asarray(make_regular_timeseries(dt.datetime(year_to_take, 1, 1), dt.datetime(year_to_take, 12, 31), 86400))\n # pull only data needed.\n # this loads data for 00h NZST that corresponds to the day to come in i.e. min@ 8am, max @ 2pm , total sw and total rain for 1/1/2000 at 2000-01-01 00:00:00\n # precip_daily = load_new_vscn('rain', dts_to_take, nc_file_rain)\n # max_temp_daily = load_new_vscn('tmax', dts_to_take, nc_file_tmax)\n # min_temp_daily = load_new_vscn('tmin', dts_to_take, nc_file_tmin)\n sw_rad_daily = load_new_vscn('srad', dts_to_take, nc_file_srad)\n # load grid (assume same for all input data)\n ds = nc.Dataset(nc_file_srad)\n vcsn_elev = np.flipud(ds.variables['elevation'][:])\n vcsn_lats = ds.variables['latitude'][::-1]\n vcsn_lons = ds.variables['longitude'][:]\n hy_index = np.ones(dts_to_take.shape, dtype='int')\n\n # check dimensions and projection of the new data\n # nztm_elev_check = interpolate_met(np.asarray([vcsn_elev]), Precip, vcsn_lons, vcsn_lats, vcsn_elev, lons,\n # lats, elev)\n # plt.imshow(nztm_elev_check[0], origin=0)\n # plt.imshow(elev, origin=0)\n start_dt = dts_to_take[0]\n finish_dt = dts_to_take[-1]\n\n # interpolate data to fine grid\n hi_res_precip = load_jobst('precipitation', dts_to_take, nc_file_rain, mask_dem,origin=origin)\n hi_res_max_temp = load_jobst('tmax', dts_to_take, nc_file_tmax, mask_dem,origin=origin)\n hi_res_min_temp = load_jobst('tmin', dts_to_take, nc_file_tmin, mask_dem,origin=origin)\n hi_res_sw_rad = interpolate_met(sw_rad_daily, 'srad', vcsn_lons, vcsn_lats, np.ma.fix_invalid(vcsn_elev).data, lons, lats, elev)\n\n if mask_dem:\n hi_res_precip[:, trimmed_mask == False] = np.nan\n hi_res_max_temp[:, trimmed_mask == False] = np.nan\n hi_res_min_temp[:, trimmed_mask == False] = np.nan\n hi_res_sw_rad[:, trimmed_mask == False] = np.nan\n\n # process and write\n hourly_dt = np.asarray(make_regular_timeseries(start_dt + dt.timedelta(hours=1), finish_dt + dt.timedelta(days=1), 3600))\n out_nc_file = setup_nztm_grid_netcdf(met_out_folder + '/met_inp_{}_{}_{}_{}.nc'.format(data_id, year_to_take, ta_version, origin),\n None, ['air_temperature', 'precipitation_amount', 'surface_downwelling_shortwave_flux'],\n hourly_dt, northings, eastings, lats, lons, elev)\n day_weightings = []\n num_days = hi_res_precip.shape[0]\n for i in range(num_days):\n # Do the temporal downsampling for one day\n # precip is random cascade for each day. NOTE original VCSN data has almost correct timestamp - ie. total from 9am.\n hourly_precip, day_weightings_1 = process_precip(hi_res_precip[i],\n one_day=True) # TODO: align to 9am-9am - currently counts pretends it is midnight-midnight\n # air temperature is three part sinusoidal between min at 8am and max at 2pm. NOTE original VCSN data has correct timestamp - ie. minimum to 9am, maximum from 9am.\n # hourly_temp = daily_to_hourly_temp_grids(hi_res_max_temp[i], hi_res_min_temp[i], single_dt=True) #\n if i == 0:\n hourly_temp = daily_to_hourly_temp_grids(hi_res_max_temp[i:i + 2], hi_res_min_temp[i:i + 2])\n hourly_temp = hourly_temp[:24]\n elif i == num_days:\n hourly_temp = daily_to_hourly_temp_grids(hi_res_max_temp[i - 1:], hi_res_min_temp[i - 1:])\n hourly_temp = hourly_temp[-24:]\n else:\n hourly_temp = daily_to_hourly_temp_grids(hi_res_max_temp[i - 1:i + 2], hi_res_min_temp[i - 1:i + 2])\n hourly_temp = hourly_temp[24:48]\n #\n hourly_swin = daily_to_hourly_swin_grids(hi_res_sw_rad[i], lats, lons, hourly_dt[i * 24: (i + 1) * 24], single_dt=True)\n\n for var, data in zip(['air_temperature', 'precipitation_amount', 'surface_downwelling_shortwave_flux'],\n [hourly_temp, hourly_precip, hourly_swin]):\n out_nc_file.variables[var][i * 24: (i + 1) * 24, :, :] = data\n day_weightings.extend(day_weightings_1)\n out_nc_file.close()\n\n pickle.dump(day_weightings, open(met_out_folder + '/met_inp_{}_{}_{}_{}_daywts.pkl'.format(data_id, year_to_take, ta_version, origin), 'wb'), -1)\n"
] |
[
[
"numpy.asarray",
"numpy.ma.fix_invalid",
"numpy.ones",
"numpy.flipud",
"numpy.transpose"
]
] |
UmbraVenus/streamlit
|
[
"d9ff6b4221b2d1424d5156932bd49986a5ec8a40"
] |
[
"agraph.py"
] |
[
"import streamlit as st\nfrom streamlit_agraph import agraph, Node, Edge, Config\nimport pandas as pd\nimport numpy as np\n\n@st.cache(suppress_st_warning=True)\ndef get_graph(file):\n\n nodes = []\n edges = []\n df = pd.read_csv(file)\n for x in np.unique(df[[\"Source\", \"Target\"]].values):\n nodes.append(Node(id=x))\n for index, row in df.iterrows():\n edges.append(Edge(source=row[\"Source\"], target=row[\"Target\"], type=\"CURVE_SMOOTH\"))\n config = Config(width=1000, \n height=800, \n directed=False,\n nodeHighlightBehavior=True, \n highlightColor=\"#F7A7A6\", # or \"blue\"\n collapsible=True,\n # coming soon (set for all): node_size=1000, node_color=\"blue\"\n ) \n\n return_value = agraph(nodes=nodes, \n edges=edges, \n config=config)\n return return_value\n\ndef app():\n get_graph(\"BERT_edge_list2.csv\")"
] |
[
[
"pandas.read_csv",
"numpy.unique"
]
] |
thorstenhater/arbor
|
[
"29b09f2d752cb1243eddda31ff0b06675b52fc1f"
] |
[
"python/example/single_cell_detailed_recipe.py"
] |
[
"#!/usr/bin/env python3\n\nimport arbor\nimport pandas\nimport seaborn\nimport sys\nfrom arbor import mechanism as mech\n\n#(1) Creat a cell.\n\n# Create the morphology\n\n# Read the SWC filename from input\n# Example from docs: morph.swc\n\nif len(sys.argv) < 2:\n print(\"No SWC file passed to the program\")\n sys.exit(0)\n\nfilename = sys.argv[1]\nmorph = arbor.load_swc_arbor(filename)\n\n# Create and populate the label dictionary.\n\nlabels = arbor.label_dict()\n\n# Regions:\n\nlabels['soma'] = '(tag 1)'\nlabels['axon'] = '(tag 2)'\nlabels['dend'] = '(tag 3)'\nlabels['last'] = '(tag 4)'\n\nlabels['all'] = '(all)'\n\nlabels['gt_1.5'] = '(radius-ge (region \"all\") 1.5)'\nlabels['custom'] = '(join (region \"last\") (region \"gt_1.5\"))'\n\n# Locsets:\n\nlabels['root'] = '(root)'\nlabels['terminal'] = '(terminal)'\nlabels['custom_terminal'] = '(restrict (locset \"terminal\") (region \"custom\"))'\nlabels['axon_terminal'] = '(restrict (locset \"terminal\") (region \"axon\"))'\n\n# Create and populate the decor.\n\ndecor = arbor.decor()\n\n# Set the default properties.\n\ndecor.set_property(Vm =-55)\n\n# Override the defaults.\n\ndecor.paint('\"custom\"', tempK=270)\ndecor.paint('\"soma\"', Vm=-50)\n\n# Paint density mechanisms.\n\ndecor.paint('\"all\"', 'pas')\ndecor.paint('\"custom\"', 'hh')\ndecor.paint('\"dend\"', mech('Ih', {'gbar': 0.001}))\n\n# Place stimuli and spike detectors.\n\ndecor.place('\"root\"', arbor.iclamp(10, 1, current=2))\ndecor.place('\"root\"', arbor.iclamp(30, 1, current=2))\ndecor.place('\"root\"', arbor.iclamp(50, 1, current=2))\ndecor.place('\"axon_terminal\"', arbor.spike_detector(-10))\n\n# Set cv_policy\n\nsoma_policy = arbor.cv_policy_single('\"soma\"')\ndflt_policy = arbor.cv_policy_max_extent(1.0)\npolicy = dflt_policy | soma_policy\ndecor.discretization(policy)\n\n# Create a cell\n\ncell = arbor.cable_cell(morph, labels, decor)\n\n# (2) Declare a probe.\n\nprobe = arbor.cable_probe_membrane_voltage('\"custom_terminal\"')\n\n# (3) Create a recipe class and instantiate a recipe\n\nclass single_recipe (arbor.recipe):\n\n def __init__(self, cell, probes):\n # The base C++ class constructor must be called first, to ensure that\n # all memory in the C++ class is initialized correctly.\n arbor.recipe.__init__(self)\n self.the_cell = cell\n self.the_probes = probes\n\n self.the_cat = arbor.default_catalogue()\n self.the_cat.extend(arbor.allen_catalogue(), \"\")\n\n self.the_props = arbor.cable_global_properties()\n self.the_props.set_property(Vm=-65, tempK=300, rL=35.4, cm=0.01)\n self.the_props.set_ion(ion='na', int_con=10, ext_con=140, rev_pot=50, method='nernst/na')\n self.the_props.set_ion(ion='k', int_con=54.4, ext_con=2.5, rev_pot=-77)\n self.the_props.set_ion(ion='ca', int_con=5e-5, ext_con=2, rev_pot=132.5)\n\n self.the_props.register(self.the_cat)\n\n def num_cells(self):\n return 1\n\n def num_sources(self, gid):\n return 1\n\n def num_targets(self, gid):\n return 0\n\n def cell_kind(self, gid):\n return arbor.cell_kind.cable\n\n def cell_description(self, gid):\n return self.the_cell\n\n def probes(self, gid):\n return self.the_probes\n\n def connections_on(self, gid):\n return []\n\n def gap_junction_on(self, gid):\n return []\n\n def event_generators(self, gid):\n return []\n\n def global_properties(self, gid):\n return self.the_props\n\nrecipe = single_recipe(cell, [probe])\n\n# (4) Create an execution context\n\ncontext = arbor.context()\n\n# (5) Create a domain decomposition\n\ndomains = arbor.partition_load_balance(recipe, context)\n\n# (6) Create a simulation\n\nsim = arbor.simulation(recipe, domains, context)\n\n# Instruct the simulation to record the spikes and sample the probe\n\nsim.record(arbor.spike_recording.all)\n\nprobe_id = arbor.cell_member(0,0)\nhandle = sim.sample(probe_id, arbor.regular_schedule(0.02))\n\n# (7) Run the simulation\n\nsim.run(tfinal=100, dt=0.025)\n\n# (8) Print or display the results\n\nspikes = sim.spikes()\nprint(len(spikes), 'spikes recorded:')\nfor s in spikes:\n print(s)\n\ndata = []\nmeta = []\nfor d, m in sim.samples(handle):\n data.append(d)\n meta.append(m)\n\ndf = pandas.DataFrame()\nfor i in range(len(data)):\n df = df.append(pandas.DataFrame({'t/ms': data[i][:, 0], 'U/mV': data[i][:, 1], 'Location': str(meta[i]), 'Variable':'voltage'}))\nseaborn.relplot(data=df, kind=\"line\", x=\"t/ms\", y=\"U/mV\",hue=\"Location\",col=\"Variable\",ci=None).savefig('single_cell_recipe_result.svg')"
] |
[
[
"pandas.DataFrame"
]
] |
zizglu/DEND-1-Data-modeling-with-Postgres
|
[
"568ba2b4e8d7bec8391074b9d2c3f10c5b498edd"
] |
[
"etl.py"
] |
[
"import os\nimport glob\nimport psycopg2\nimport pandas as pd\nfrom sql_queries import *\nimport io\n\ndef get_files( root_directory , file_search_query = '*' ):\n '''\n lookup for json file in root_directory and return a list of full file path\n \n Parameters\n ----------\n root_directory : str - filepath\n file_search_query : str - a query string for files, will be appended to each founded folder\n Returns\n -------\n list(str) : a list of full path on each files\n Raise\n -----\n ValueError if filepath is empty \n '''\n \n if( root_directory == \"\" ):\n raise ValueError('filepath must be defined')\n \n all_files = []\n #iterate over the folder tree structure\n for root, dirs, files in os.walk( root_directory ):\n #search json files on each folder\n files = glob.glob( os.path.join( root , file_search_query ) )\n for f in files :\n #concatenate result in the list\n all_files.append(os.path.abspath(f))\n \n return all_files\n\n\ndef create_dataframe_from_jsonfiles(root_directory):\n '''\n Concatenante all json files founded in root_directory\n Drop duplicate directly\n Parameters\n ---------- \n root_directory str - the folder to import in a dataframe\n Returns\n ------- \n a Pandas dataframe\n '''\n \n #get all files\n files = get_files(root_directory , '*.json' )\n\n #load each files in a data frame\n df_from_each_json = ( pd.read_json( f , lines = True ) for f in files )\n\n #concatenate each dataframe in one\n df_concatenated = pd.concat( df_from_each_json , ignore_index = True )\n\n #drop duplicates\n return df_concatenated.drop_duplicates(inplace=False) \n\n \ndef clean_dataframe_for_export( df , force_empty_string_to_null = True , null_string = 'NULL' , sep='\\t' , sep_replace_with=''):\n '''\n Prepare (&uniformise) the DF for CSV exportation\n Replace NaN or empty cell with NULL \n \n Parameters\n ---------- \n df (Dataframe) - the Pandas dataframe\n force_empty_string_to_null (bool) - by default empty strings are nullified\n null_string (str) - the string used to represent NULL (by def)\n sep (str) - to separator choosed in CSV file..\n sep_replace_with (str) - .. to replce with\n Returns\n ------- \n a Pandas dataframe\n ''' \n #NaN becomes NULL\n df = df.fillna(null_string)\n \n #replace all empty cells with NULL\n if( force_empty_string_to_null ): df = df.replace('', null_string)\n \n #avoid usage of tab in all cells...\n df = df.replace(sep, sep_replace_with)\n \n return df\n\n\ndef lookup_song_and_artist( params , cur , query ):\n '''\n >> Function to apply in a dataframe that came from a Log Dataset <<\n Based on params[], query the DB for artist & song ID\n Parameters\n ----------\n It is tricky to use to because params must be well ordered\n params[0] : str - artist name\n params[1] : str - song title\n params[2] : decimal - length of songs\n cur : cursor - the psycopg cursor used to trigger the query\n query : str - the sql query to execute with params (to be passed bay apply(args=) )\n Returns\n -------\n list(songid, artistid) : \n - a list of related entity db primary key\n - 'NULL', 'NULL' if nothing found\n - 'Error' , 'exception error' in case of exceptions\n '''\n #concretise params\n artist = params[0]\n song = params[1]\n length = params[2]\n \n \n try:\n # query the db\n cur.execute( query , (artist, song , length))\n results = cur.fetchone()\n except psycopg2.Error as e:\n # catch the error and return an empty result\n results = 'Error' , e \n \n if results:\n #ok we have a match or an error\n songid, artistid = results\n else:\n #instead we nulls\n songid, artistid = 'NULL', 'NULL'\n \n return ( songid, artistid ) \n\n\ndef copy_df_to_db( cur , df , tablename , table_columns , with_index=False , separator='\\t'):\n '''\n Manage the df copy to db. \n This functions manage the creation of the CSV to be imported\n \n Parameters\n ---------- \n cur - cursor to the database where the copy_from will be performed\n df - Pandas dataframe to export to SQL\n tablename - table where Pandas data will be imported\n table_columns (str) - list of the sql columns in tablename\n with_index=False - do we export the df index as first column ?\n separator='\\t' - separator \n '''\n # create a buffer for CSV infos\n buffer = io.StringIO()\n \n #serialize the dataframe into the buffer (no header at all)\n df.to_csv( buffer , index=with_index , header=False, sep=separator)\n \n # move the pointer at the start of the stream to do another iteration\n buffer.seek(0)\n \n try:\n cur.copy_from(buffer, tablename , sep=separator , columns=table_columns , null='NULL' )\n except psycopg2.Error as e:\n print('Error while processing table{} : {}'.format(tablename , e )) \n \ndef process_song_files(cur, filepath):\n '''\n ETL on Song Dataset\n \n Parameters\n ---------- \n cur (cursor) - allow execution of SQL queries in the database\n filepath (str) - place where json song files resides\n '''\n \n ### EXTRACT ###\n df_songs_json = create_dataframe_from_jsonfiles(filepath)\n \n ### TRANSFORM ###\n #extract songs infos and load them to the DB\n df_songs = df_songs_json[['song_id','title','artist_id','year','duration']].copy()\n df_songs = clean_dataframe_for_export(df_songs)\n \n \n #extract artists infos and load them to the DB\n df_artists = df_songs_json[['artist_id', 'artist_name', 'artist_location', 'artist_latitude', 'artist_longitude']].copy()\n df_artists.drop_duplicates(inplace=True) \n df_artists = clean_dataframe_for_export(df_artists)\n\n ### LOAD ###\n copy_df_to_db(cur, df_artists, 'artists',( 'artist_id', 'name', 'location', 'lattitude', 'longitude' ) )\n copy_df_to_db(cur, df_songs, 'songs', ( 'song_id', 'title', 'artist_id', 'year', 'duration') ) \n\n\ndef process_log_files(cur, filepath):\n '''\n ETL on Log Dataset\n \n Parameters\n ---------- \n cur (cursor) - allow execution of SQL queries in the database\n filepath (str) - place where json song files resides\n '''\n \n ### EXTRACT ###\n #load log's\n df_logs_json = create_dataframe_from_jsonfiles('data/log_data')\n #filter on NextSong logs entries\n df_NextSong = df_logs_json[ df_logs_json['page'] == 'NextSong' ].copy()\n \n ### TRANSFORM ###\n \n df_NextSong = clean_dataframe_for_export(df_NextSong)\n \n #convert the ts column to a datetime column\n df_NextSong['start_time'] = pd.to_datetime( df_NextSong['ts'], unit='ms')\n df_NextSong['start_time'] = df_NextSong['start_time'].astype('datetime64[s]')\n\n #append column needed for times table\n df_NextSong['hour'] = df_NextSong['start_time'].dt.hour\n df_NextSong['day'] = df_NextSong['start_time'].dt.day\n df_NextSong['week'] = df_NextSong['start_time'].dt.week\n df_NextSong['month'] = df_NextSong['start_time'].dt.month\n df_NextSong['year'] = df_NextSong['start_time'].dt.year\n df_NextSong['weekday'] = df_NextSong['start_time'].dt.weekday\n\n #avoid duplicate caused by type mismatch\n df_NextSong['userId'] = pd.to_numeric( df_NextSong['userId'])\n\n\n ## prepare the dataframes for import\n \n df_times = df_NextSong[['start_time', 'hour', 'day', 'week', 'month', 'year', 'weekday']]\n \n \n df_users = df_NextSong[['userId', 'firstName', 'lastName', 'gender', 'level']].copy()\n #clean duplicates on userID\n df_users.drop_duplicates( inplace=True, keep='last' , subset=['userId', 'firstName', 'lastName', 'gender'])\n \n \n df_songplay = df_NextSong[['userId', 'level', 'sessionId', 'location', 'userAgent','artist','song','length']].copy()\n \n # keep the 1:1 relation beetween times & songplay table\n df_songplay['time_id'] = df_songplay.index\n \n #lookup artist & song IDs\n df_songplay['songid'], df_songplay['artistid'] = zip(*df_songplay[['artist','song' , 'length' ]].apply( lookup_song_and_artist , axis=1 , args=(cur, song_select,)))\n #rearrange the column\n df_songplay = df_songplay[['time_id','userId','level','songid', 'artistid','sessionId','location','userAgent']]\n\n \n ### LOAD ###\n copy_df_to_db(cur, df_times, 'times', ( 'time_id', 'start_time', 'hour', 'day', 'week', 'month', 'year', 'weekday') , with_index=True )\n copy_df_to_db(cur, df_users, 'users', ('user_id', 'first_name', 'last_name', 'gender', 'level') )\n copy_df_to_db(cur, df_songplay, 'songplays', ('songplay_id', 'time_id', 'user_id', 'level', 'song_id', 'artist_id', 'session_id', 'location', 'user_agent'), with_index=True )\n \n\ndef main():\n '''\n ETL lauching point\n ''' \n conn = None\n cur = None\n\n try:\n conn = psycopg2.connect( DSN_SPARKIFY )\n conn.autocommit = True\n cur = conn.cursor()\n except psycopg2.Error as e:\n print('Error while initializing the db connection : {}'.format( e )) \n\n print( '*********** Processing song_data.....')\n process_song_files(cur, 'data/song_data')\n \n print( '*********** Processing log_data.....')\n process_log_files(cur, 'data/log_data')\n\n if( conn ):conn.close()\n print( '*********** Processing terminated.....')\n \n\nif __name__ == \"__main__\":\n main()"
] |
[
[
"pandas.to_datetime",
"pandas.read_json",
"pandas.to_numeric",
"pandas.concat"
]
] |
GusFurtado/DadosAbertosBrasil
|
[
"d8848f4c5c1107b3b67fd4ef73689541d06c3e28"
] |
[
"DadosAbertosBrasil/camara.py"
] |
[
"'''\nMódulo para captura dos dados abertos da Câmara dos Deputados do Brasil.\n\nMini-Tutorial\n-------------\n1. Importe o módulo `camara`.\n>>> from DadosAbertosBrasil import camara\n\n2. Busque o código do objeto de estudo utilizando as funções `lista`.\n>>> camara.lista_deputados( ... )\n\n3. Instancie o objeto de estudo utilizando o código encontrado.\n>>> dep = camara.Deputado(cod)\n\n4. Utilize os atributos da classe para obter informações básicas do objeto.\n>>> dep.dados\n\n5. Utilize os métodos da classe para obter informações detalhadas do objeto.\n>>> dep.despesas( ... )\n\nDocumentação da API original\n----------------------------\nhttps://dadosabertos.camara.leg.br/swagger/api.html\n'''\n\n\n\nimport pandas as _pd\n\nfrom ._utils import parse\nfrom ._utils.get_data import get_data\n\n\n\ndef _get(\n path: str,\n params: dict = None\n ) -> dict:\n return get_data(\n endpoint = 'https://dadosabertos.camara.leg.br/api/v2/',\n path = path,\n params = params\n )\n\n\n\ndef _df(\n dados: dict,\n index_col: str = None\n ) -> _pd.DataFrame:\n '''\n Converte dados brutos da API em um DataFrame.\n\n Parâmetros\n ----------\n dados : dict\n Dados brutos da API.\n index_col : str (default=None)\n Nome da coluna que será usada como index do DataFrame.\n\n Retorna\n -------\n pandas.core.frame.DataFrame\n Dados convertidos em DataFrame.\n '''\n\n df = _pd.DataFrame(dados['dados'])\n if (index_col is not None) and (not df.empty):\n df.set_index(index_col, inplace=True)\n\n return df\n\n\n\nclass Bloco:\n '''\n Informações sobre um bloco partidário específico.\n\n Parâmetros\n ----------\n cod: int\n Código numérico do bloco partidário do qual se deseja informações.\n\n Atributos\n ---------\n dados : dict\n Conjunto completo de dados.\n cod : int\n Código numérico do bloco partidário.\n legislatura : str\n Legislatura do bloco partidário.\n nome : str\n Nome do bloco partidário.\n uri : str\n Endereço para coleta de dados direta pela API do bloco partidário.\n\n Exemplos\n --------\n Obter o nome do bloco #576.\n >>> bl = camara.Bloco(cod=576)\n >>> bl.nome\n ... 'PSL, PTB'\n\n --------------------------------------------------------------------------\n '''\n\n def __init__(self, cod:int):\n self.cod = cod\n self.dados = _get(['blocos', str(cod)])['dados']\n self.legislatura = self.dados['idLegislatura']\n self.nome = self.dados['nome']\n self.uri = self.dados['uri']\n\n\n def __repr__(self):\n return f\"DadosAbertosBrasil.camara: Bloco {self.nome}\"\n\n\n\nclass Deputado:\n '''\n Retorna os dados cadastrais de um parlamentar que, em algum momento\n da história e por qualquer período, entrou em exercício na Câmara.\n\n Parâmetros\n ----------\n cod : int\n Código do parlamentar.\n\n Atributos\n ---------\n dados : dict\n Conjunto completo de dados.\n cod : int\n Código de identificação.\n condicao_eleitoral : str\n Condição eleitoral.\n cpf : str\n Número do CPF.\n descricao_status : str\n Descrição do último status.\n email : str\n E-mail.\n escolaridade : str\n Escolaridade.\n falecimento : str\n Data de falecimento no formato 'AAAA-MM-DD'.\n Retorna vazio caso o parlamentar não tenha falecido.\n foto : str\n URL da foto.\n gabinete : dict\n Informações de identificação e contato do gabinete.\n legislatura : int\n ID da legislatura mais recente.\n municipio_nascimento : str\n Município de nascimento.\n nascimento : str\n Data de nascimento no formato 'AAAA-MM-DD'.\n nome : str\n Nome mais comum.\n nome_completo : str\n Nome civil completo.\n nome_eleitoral : str\n Nome utilizado na campanha eleitoral.\n partido : str\n Último partido.\n rede_social : list\n Lista de nomes em redes sociais.\n sexo : str\n - 'M': Masculino;\n - 'F': Feminino.\n situacao : str\n Situação do último status.\n uf : str\n Sigla da Unidade Federativa pela qual foi eleito.\n uf_nascimento : str\n Unidade Federativa de nascimento.\n ultima_atualizacao : str\n Dia e horário da última atualização de status.\n uri : str\n Endereço para coleta de dados direta pela API.\n website : str\n Website.\n\n Exemplos\n --------\n Coletar partido mais recente do deputado Rodrigo Maia.\n >>> cod = 74693 # Código do deputado\n >>> dep = camara.Deputado(cod=cod)\n >>> dep.partido\n ... 'DEM'\n\n --------------------------------------------------------------------------\n '''\n\n def __init__(self, cod:int):\n self.cod = cod\n self.dados = _get(['deputados', str(cod)])['dados']\n self.condicao_eleitoral = self.dados['ultimoStatus']['condicaoEleitoral']\n self.cpf = self.dados['cpf']\n self.descricao_status = self.dados['ultimoStatus']['descricaoStatus']\n self.email = self.dados['ultimoStatus']['email']\n self.escolaridade = self.dados['escolaridade']\n self.falecimento = self.dados['dataFalecimento']\n self.foto = self.dados['ultimoStatus']['urlFoto']\n self.gabinete = self.dados['ultimoStatus']['gabinete']\n self.legislatura = self.dados['ultimoStatus']['idLegislatura']\n self.municipio_nascimento = self.dados['municipioNascimento']\n self.nascimento = self.dados['dataNascimento']\n self.nome = self.dados['ultimoStatus']['nome']\n self.nome_completo = self.dados['nomeCivil']\n self.nome_eleitoral = self.dados['ultimoStatus']['nomeEleitoral']\n self.partido = self.dados['ultimoStatus']['siglaPartido']\n self.rede_social = self.dados['redeSocial']\n self.sexo = self.dados['sexo']\n self.situacao = self.dados['ultimoStatus']['situacao']\n self.uf = self.dados['ultimoStatus']['siglaUf']\n self.uf_nascimento = self.dados['ufNascimento']\n self.ultima_atualizacao = self.dados['ultimoStatus']['data']\n self.uri = self.dados['uri']\n self.website = self.dados['urlWebsite']\n\n\n def __repr__(self):\n return f\"DadosAbertosBrasil.camara: Deputad{'a' if self.sexo == 'F' else 'o'} {self.nome_eleitoral}\"\n\n\n def despesas(\n self,\n legislatura: int = None,\n ano: int = None,\n mes: int = None,\n fornecedor: int = None,\n pagina: int = 1,\n itens: int = None,\n asc: bool = True,\n ordenar_por: str = 'ano'\n ) -> _pd.DataFrame:\n '''\n As despesas com exercício parlamentar do deputado.\n\n Dá acesso aos registros de pagamentos e reembolsos feitos pela Câmara\n em prol do deputado, a título da Cota para Exercício da Atividade\n Parlamentar, a chamada \"cota parlamentar\".\n Se não forem passados os parâmetros de tempo, o serviço retorna os\n dados dos seis meses anteriores à requisição.\n\n Parâmetros\n ----------\n legislatura : int (default=None)\n ID da legislatura em que tenham ocorrido as despesas.\n ano : int (default=None)\n Ano de ocorrência das despesas.\n mes : int (default=None)\n Número do mês de ocorrência das despesas.\n fornecedor : int (default=None)\n CNPJ de uma pessoa jurídica, ou CPF de uma pessoa física,\n fornecedora do produto ou serviço (apenas números).\n pagina : int (default=1)\n Número da página de resultados, a partir de 1, que se deseja\n obter com a requisição, contendo o número de itens definido\n pelo parâmetro `itens`. Se omitido, assume o valor 1.\n itens : int (default=None)\n Número máximo de itens na página que se deseja obter com\n esta requisição.\n asc : bool (default=True)\n Se os registros são ordenados no sentido ascendente:\n - True: De A a Z ou 0 a 9 (ascendente);\n - False: De Z a A ou 9 a 0 (descendente).\n ordenar_por : str (default='ano')\n Nome do campo pelo qual a lista deverá ser ordenada:\n qualquer um dos campos do retorno, e também idLegislatura.\n\n Retorna\n -------\n pandas.core.frame.DataFrame\n Lista de despesas com exercício parlamentar do deputado.\n\n ----------------------------------------------------------------------\n '''\n\n params = {}\n if legislatura is not None:\n params['idLegislatura'] = legislatura\n if ano is not None:\n params['ano'] = ano\n if mes is not None:\n params['mes'] = mes\n if fornecedor is not None:\n params['cnpjCpfFornecedor'] = fornecedor\n params['pagina'] = pagina\n if itens is not None:\n params['itens'] = itens\n params['ordem'] = 'asc' if asc else 'desc'\n params['ordenarPor'] = ordenar_por\n\n path = ['deputados', str(self.cod), 'despesas']\n dados = _get(path=path, params=params)\n return _df(dados)\n\n\n def discursos(\n self,\n legislatura: int = None,\n inicio: str = None,\n fim: str = None,\n pagina: int = 1,\n itens: int = None,\n asc: bool = True,\n ordenar_por: str = 'dataHoraInicio'\n ) -> _pd.DataFrame:\n '''\n Os discursos feitos por um deputado em eventos diversos.\n\n Retorna uma lista de informações sobre os pronunciamentos feitos\n pelo deputado que tenham sido registrados, em quaisquer eventos,\n nos sistemas da Câmara.\n Caso os parâmetros de tempo não sejam configurados na requisição,\n são buscados os discursos ocorridos nos sete dias anteriores ao\n da requisição.\n\n Parâmetros\n ----------\n legislatura : int (default=None)\n Número da legislatura a qual os dados buscados devem corresponder.\n inicio : str (default=None)\n Data de início de um intervalo de tempo, no formato 'AAAA-MM-DD'.\n fim : str (default=None)\n Data de término de um intervalo de tempo, no formato 'AAAA-MM-DD'.\n itens : int (default=None)\n Número máximo de itens na página que se deseja obter com esta\n requisição.\n pagina : int (default=1)\n Número da página de resultados, a partir de 1, que se deseja\n obter com a requisição, contendo o número de itens definido\n pelo parâmetro `itens`. Se omitido, assume o valor 1.\n asc : bool (default=True)\n Se os registros são ordenados no sentido ascendente:\n - True: De A a Z ou 0 a 9 (ascendente);\n - False: De Z a A ou 9 a 0 (descendente).\n ordenar_por : str (default='dataHoraInicio')\n Qual dos elementos da representação deverá ser usado para aplicar\n ordenação à lista.\n\n Retorna\n -------\n pandas.core.frame.DataFrame\n Lista de discursos feitos por um deputado em eventos diversos.\n\n ----------------------------------------------------------------------\n '''\n\n params = {}\n if legislatura is not None:\n params['idLegislatura'] = legislatura\n if inicio is not None:\n params['dataInicio'] = parse.data(inicio, 'camara')\n if fim is not None:\n params['dataFim'] = parse.data(fim, 'camara')\n params['pagina'] = pagina\n if itens is not None:\n params['itens'] = itens\n params['ordem'] = 'asc' if asc else 'desc'\n params['ordenarPor'] = ordenar_por\n\n path = ['deputados', str(self.cod), 'discursos']\n dados = _get(path=path, params=params)\n return _df(dados)\n\n\n def eventos(\n self,\n legislatura: int = None,\n inicio: str = None,\n fim: str = None,\n pagina: int = 1,\n itens: int = None,\n asc: bool = True,\n ordenar_por: str = 'dataHoraInicio',\n index: bool = False\n ) -> _pd.DataFrame:\n '''\n Uma lista de eventos com a participação do parlamentar.\n\n Retorna uma lista de objetos evento nos quais a participação do\n parlamentar era ou é prevista.\n Se não forem passados parâmetros de tempo, são retornados os eventos\n num período de cinco dias, sendo dois antes e dois depois do dia da\n requisição.\n\n Parâmetros\n ----------\n legislatura : int (default=None)\n Número da legislatura a qual os dados buscados devem corresponder.\n inicio : str (default=None)\n Data de início de um intervalo de tempo, no formato 'AAAA-MM-DD'.\n fim : str (default=None)\n Data de término de um intervalo de tempo, no formato 'AAAA-MM-DD'.\n pagina : int (default=1)\n Número da página de resultados, a partir de 1, que se deseja\n obter com a requisição, contendo o número de itens definido\n pelo parâmetro `itens`. Se omitido, assume o valor 1.\n itens : int (default=None)\n Número máximo de itens na página que se deseja obter com esta\n requisição.\n asc : bool (default=True)\n Se os registros são ordenados no sentido ascendente:\n - True: De A a Z ou 0 a 9 (ascendente);\n - False: De Z a A ou 9 a 0 (descendente).\n ordenar_por : str (default='dataHoraInicio')\n Qual dos elementos da representação deverá ser usado para aplicar\n ordenação à lista.\n index : bool (default=False)\n Se True, define a coluna `id` como index do DataFrame.\n\n Retorna\n -------\n pandas.core.frame.DataFrame\n Lista de discursos feitos por um deputado em eventos diversos.\n\n ----------------------------------------------------------------------\n '''\n\n params = {}\n if legislatura is not None:\n params['idLegislatura'] = legislatura\n if inicio is not None:\n params['dataInicio'] = parse.data(inicio, 'camara')\n if fim is not None:\n params['dataFim'] = parse.data(fim, 'camara')\n params['pagina'] = pagina\n if itens is not None:\n params['itens'] = itens\n params['ordem'] = 'asc' if asc else 'desc'\n params['ordenarPor'] = ordenar_por\n\n path = ['deputados', str(self.cod), 'eventos']\n dados = _get(path=path, params=params)\n index_col = 'id' if index else None\n return _df(dados, index_col)\n\n\n def frentes(\n self,\n index: bool = False\n ) -> _pd.DataFrame:\n '''\n As frentes parlamentares das quais um deputado é integrante.\n\n Retorna uma lista de informações básicas sobre as frentes\n parlamentares das quais o parlamentar seja membro, ou, no caso de\n frentes existentes em legislaturas anteriores, tenha encerrado a\n legislatura como integrante.\n\n Parâmetros\n ----------\n index : bool (default=False)\n Se True, define a coluna `id` como index do DataFrame.\n\n Retorna\n -------\n pandas.core.frame.DataFrame\n Lista de frentes parlamentares das quais um deputado é integrante.\n\n ----------------------------------------------------------------------\n '''\n\n path = ['deputados', str(self.cod), 'frentes']\n dados = _get(path=path, params=None)\n index_col = 'id' if index else None\n return _df(dados, index_col)\n\n\n def orgaos(\n self,\n legislatura: int = None,\n inicio: str = None,\n fim: str = None,\n pagina: int = 1,\n itens: int = None,\n asc: bool = True,\n ordenar_por: str = 'dataInicio',\n index: bool = False\n ) -> _pd.DataFrame:\n '''\n Os órgãos dos quais um deputado é integrante.\n\n Retorna uma lista de órgãos, como as comissões e procuradorias,\n dos quais o deputado participa ou participou durante um intervalo\n de tempo.\n Cada item identifica um órgão, o cargo ocupado pelo parlamentar neste\n órgão (como presidente, vice-presidente, titular ou suplente) e as\n datas de início e fim da ocupação deste cargo.\n Se não for passado algum parâmetro de tempo, são retornados os órgãos\n ocupados pelo parlamentar no momento da requisição. Neste caso a\n lista será vazia se o deputado não estiver em exercício.\n\n Parâmetros\n ----------\n inicio : str (default=None)\n Data de início de um intervalo de tempo, no formato 'AAAA-MM-DD'.\n fim : str (default=None)\n Data de término de um intervalo de tempo, no formato 'AAAA-MM-DD'.\n pagina : int (default=1)\n Número da página de resultados, a partir de 1, que se deseja\n obter com a requisição, contendo o número de itens definido\n pelo parâmetro `itens`. Se omitido, assume o valor 1.\n itens : int (default=None)\n Número máximo de itens na página que se deseja obter com esta\n requisição.\n asc : bool (default=True)\n Se os registros são ordenados no sentido ascendente:\n - True: De A a Z ou 0 a 9 (ascendente);\n - False: De Z a A ou 9 a 0 (descendente).\n ordenar_por : str (default='dataInicio')\n Qual dos elementos da representação deverá ser usado para aplicar\n ordenação à lista.\n index : bool (default=False)\n Se True, define a coluna `idOrgao` como index do DataFrame.\n\n Retorna\n -------\n pandas.core.frame.DataFrame\n Lista dos órgãos dos quais um deputado é integrante.\n\n ----------------------------------------------------------------------\n '''\n\n params = {}\n if inicio is not None:\n params['dataInicio'] = parse.data(inicio, 'camara')\n if fim is not None:\n params['dataFim'] = parse.data(fim, 'camara')\n params['pagina'] = pagina\n if itens is not None:\n params['itens'] = itens\n params['ordem'] = 'asc' if asc else 'desc'\n params['ordenarPor'] = ordenar_por\n\n path = ['deputados', str(self.cod), 'orgaos']\n dados = _get(path=path, params=params)\n index_col = 'idOrgao' if index else None\n return _df(dados, index_col)\n\n\n\nclass Evento:\n '''\n Retorna um conjunto detalhado de informações sobre o evento da Câmara.\n\n Parâmetros\n ----------\n cod : int\n Código numérico do evento do qual se deseja informações.\n\n Atributos\n ---------\n dados : dict\n Conjunto completo de dados.\n cod : int\n Código numérico do evento.\n andar : str\n Andar do prédio onde ocorreu o evento.\n descricao : str\n Descrição do evento.\n descricao_tipo : str\n Tipo do evento.\n fases : str\n Fases do evento.\n fim : str\n Data e horário que o evento foi finalizado no formato 'AAAA-MM-DD'.\n inicio : str\n Data e horário que o evento foi iniciado no formato 'AAAA-MM-DD'.\n local : str\n Local onde ocorreu o evento.\n local_externo : str\n Local externo do evento.\n lista_orgaos : list of dict\n Lista de orgãos e informações sobre os mesmos.\n predio : str\n Prédio que ocorreu o evento.\n requerimentos : list of dict\n Requerimentos do evento.\n sala : str\n Sala do prédio onde ocorreu o evento.\n situacao : str\n Situação do evento.\n uri : str\n Endereço para coleta de dados direta pela API do evento.\n uri_convidados : str\n Endereço para coleta de dados direta pela API dos convidados.\n uri_deputados : str\n Endereço para coleta de dados direta pela API dos deputados.\n url_documento_pauta : str\n Endereço URL para visualizar a pauta do evento.\n url_registro : str\n Endereço URL onde o evento foi registrado.\n\n Exemplos\n --------\n Obter a URL para assistir ao evento #59265.\n >>> ev = camara.Evento(cod=59265)\n >>> ev.url_registro\n ... 'https://www.youtube.com/watch?v=8D2gjMrTnMA'\n\n --------------------------------------------------------------------------\n '''\n\n def __init__(self, cod:int):\n self.cod = cod\n self.dados = _get(['eventos', str(cod)])['dados']\n self.andar = self.dados['localCamara']['andar']\n self.descricao = self.dados['descricao']\n self.descricao_tipo = self.dados['descricaoTipo']\n self.fases = self.dados['fases']\n self.fim = self.dados['dataHoraFim']\n self.inicio = self.dados['dataHoraInicio']\n self.local = self.dados['localCamara']['nome']\n self.local_externo = self.dados['localExterno']\n self.lista_orgaos = self.dados['orgaos']\n self.predio = self.dados['localCamara']['predio']\n self.requerimentos = self.dados['requerimentos']\n self.sala = self.dados['localCamara']['sala']\n self.situacao = self.dados['situacao']\n self.uri = self.dados['uri']\n self.uri_convidados = self.dados['uriConvidados']\n self.uri_deputados = self.dados['uriDeputados']\n self.url_documento_pauta = self.dados['urlDocumentoPauta']\n self.url_registro = self.dados['urlRegistro']\n\n\n def __repr__(self):\n return f\"DadosAbertosBrasil.camara: Evento {self.descricao}\"\n\n\n def deputados(\n self,\n index: bool = False\n ) -> _pd.DataFrame:\n '''\n Os deputados participantes de um evento específico.\n\n Retorna uma lista de dados resumidos sobre deputados participantes do\n evento. Se o evento já ocorreu, a lista identifica os deputados que\n efetivamente registraram presença no evento. Se o evento ainda não\n ocorreu, a lista mostra os deputados que devem participar do evento,\n por serem convidados ou por serem membros do(s) órgão(s) responsável\n pelo evento.\n\n Parâmetros\n ----------\n index : bool (default=False)\n Se True, define a coluna `id` como index do DataFrame.\n\n Retorna\n -------\n pandas.core.frame.DataFrame\n Lista dos deputados participantes de um evento específico.\n\n ----------------------------------------------------------------------\n '''\n\n path = ['eventos', str(self.cod), 'deputados']\n dados = _get(path=path, params=None)\n index_col = 'id' if index else None\n return _df(dados, index_col)\n\n\n def orgaos(\n self,\n index: bool = False\n ) -> _pd.DataFrame:\n '''\n Lista de órgãos organizadores do evento.\n\n Retorna uma lista em que cada item é um conjunto mínimo de dados sobre\n o(s) órgão(s) responsável(is) pelo evento.\n\n Parâmetros\n ----------\n index : bool (default=False)\n Se True, define a coluna `id` como index do DataFrame.\n\n Retorna\n -------\n pandas.core.frame.DataFrame\n Lista de órgãos organizadores do evento.\n\n ----------------------------------------------------------------------\n '''\n\n path = ['eventos', str(self.cod), 'orgaos']\n dados = _get(path=path, params=None)\n index_col = 'id' if index else None\n return _df(dados, index_col)\n\n\n def pauta(\n self,\n index: bool = False\n ) -> _pd.DataFrame:\n '''\n Lista de proposições que foram ou deverão ser avaliadas em um evento\n de caráter deliberativo.\n\n Se o evento for de caráter deliberativo (uma reunião ordinária,\n por exemplo) este serviço retorna a lista de proposições previstas\n para avaliação pelos parlamentares. Cada item identifica, se as\n informações estiverem disponíveis, a proposição avaliada, o regime\n de preferência para avaliação, o relator e seu parecer, o resultado\n da apreciação e a votação realizada.\n\n Parâmetros\n ----------\n index : bool (default=False)\n Se True, define a coluna `ordem` como index do DataFrame.\n\n Retorna\n -------\n pandas.core.frame.DataFrame\n Lista de proposições do evento.\n\n ----------------------------------------------------------------------\n '''\n\n path = ['eventos', str(self.cod), 'pauta']\n dados = _get(path=path, params=None)\n index_col = 'ordem' if index else None\n return _df(dados, index_col)\n\n\n def votacoes(\n self,\n index: bool = False\n ) -> _pd.DataFrame:\n '''\n Informações detalhadas de votações sobre o evento.\n\n Retorna uma lista de dados básicos sobre votações que tenham sido\n realizadas no evento. Votações só ocorrem em eventos de caráter\n deliberativo. Dados complementares sobre cada votação listada podem\n ser obtidos no recurso.\n\n Parâmetros\n ----------\n index : bool (default=False)\n Se True, define a coluna `id` como index do DataFrame.\n\n Retorna\n -------\n pandas.core.frame.DataFrame\n Lista de de votações sobre o evento.\n\n ----------------------------------------------------------------------\n '''\n\n path = ['eventos', str(self.cod), 'votacoes']\n dados = _get(path=path, params=None)\n index_col = 'id' if index else None\n return _df(dados, index_col)\n\n\n\nclass Frente:\n '''\n Informações detalhadas sobre uma frente parlamentar.\n\n Parâmetros\n ----------\n cod : int\n Código numérico da frente parlamentar da qual se deseja informações.\n\n Atributos\n ---------\n dados : dict\n Conjunto completo de dados.\n cod : int\n Código numérico da frente parlamentar.\n coordenador : dict\n Informações do(a) coordenador(a) da frente parlamentar.\n documento : str\n URL do documento da frente parlamentar.\n email : str\n E-mail de contato.\n id_sitacao : int\n ID da situação da frente parlamentar.\n keywords : str\n Palavras-chaves da frente parlamentar.\n legislatura : int\n ID da legislatura da frente parlamentar.\n situacao : str\n Situação da frente parlamentar.\n telefone : str\n Telefone de contato.\n titulo : str\n Título da frente parlamentar.\n uri : str\n Endereço para coleta de dados direta pela API da frente parlamentar.\n website : str\n URL do website da frente parlamentar.\n\n Exemplos\n --------\n Obter título da frente parlamentar #54258.\n >>> fr = camara.Frente(cod=54258)\n >>> fr.url_registro\n ... 'Frente Parlamentar Mista da Telessaúde'\n\n --------------------------------------------------------------------------\n '''\n\n def __init__(self, cod:int):\n self.cod = cod\n self.dados = _get(['frentes', str(cod)])['dados']\n self.coordenador = self.dados['coordenador']\n self.documento = self.dados['urlDocumento']\n self.email = self.dados['email']\n self.id_sitacao = self.dados['idSituacao']\n self.keywords = self.dados['keywords']\n self.legislatura = self.dados['idLegislatura']\n self.situacao = self.dados['situacao']\n self.telefone = self.dados['telefone']\n self.titulo = self.dados['titulo']\n self.uri = self.dados['uri']\n self.website = self.dados['urlWebsite']\n\n\n def __repr__(self):\n return f\"DadosAbertosBrasil.camara: {self.titulo}\"\n \n\n def membros(\n self,\n index: bool = False\n ) -> _pd.DataFrame:\n '''\n Os deputados que participam da frente parlamentar.\n\n Uma lista dos deputados participantes da frente parlamentar e os\n papéis que exerceram nessa frente (signatário, coordenador ou\n presidente). Observe que, mesmo no caso de frentes parlamentares\n mistas (compostas por deputados e senadores), são retornados apenas\n dados sobre os deputados.\n\n Parâmetros\n ----------\n index : bool (default=False)\n Se True, define a coluna `id` como index do DataFrame.\n\n Retorna\n -------\n pandas.core.frame.DataFrame\n Lista dos deputados que participam da frente parlamentar.\n\n ----------------------------------------------------------------------\n '''\n\n path = ['frentes', str(self.cod), 'membros']\n dados = _get(path=path, params=None)\n index_col = 'id' if index else None\n return _df(dados, index_col)\n\n\n\nclass Legislatura:\n '''\n Informações extras sobre uma determinada legislatura da Câmara.\n\n Parâmetros\n ----------\n cod : int\n Código numérico da legislatura da qual se deseja informações.\n\n Atributos\n ---------\n dados : dict\n Conjunto completo de dados.\n cod : int\n Código numérico da legislatura.\n inicio : str\n Primeiro dia da legislatura.\n fim : str\n Último dia da legislatura.\n uri : str\n Endereço para coleta de dados direta pela API da legislatura.\n\n Exemplos\n --------\n Obter o primeiro e último dia da legislatura #56.\n >>> leg = camara.Legislatura(cod=54)\n >>> leg.inicio\n ... '2011-02-01'\n >>> leg.fim\n ... '2015-01-31'\n\n --------------------------------------------------------------------------\n '''\n\n def __init__(self, cod:int):\n self.cod = cod\n self.dados = _get(['legislaturas', str(cod)])['dados']\n self.fim = self.dados['dataFim']\n self.inicio = self.dados['dataInicio']\n self.uri = self.dados['uri']\n\n\n def __repr__(self):\n return f\"DadosAbertosBrasil.camara: Legislatura {self.cod}\"\n\n\n def mesa(\n self,\n inicio: str = None,\n fim: str = None,\n index: bool = False\n ) -> _pd.DataFrame:\n '''\n Quais deputados fizeram parte da Mesa Diretora em uma legislatura.\n\n Retorna uma lista com dados básicos sobre todos os deputados que\n ocuparam algum posto na Mesa Diretora da Câmara em algum período de\n tempo dentro da legislatura. Normalmente, cada legislatura tem duas\n Mesas Diretoras, com presidente, dois vice-presidentes, quatro\n secretários parlamentares e os suplentes dos secretários.\n\n Parâmetros\n ----------\n inicio : str (default=None)\n Dia de início do intervalo de tempo do qual se deseja saber a\n composição da Mesa, no formato 'AAAA-MM-DD'.\n fim : str (default=None)\n Data de término do intervalo de tempo do qual se deseja saber a\n composição da Mesa, no formato 'AAAA-MM-DD'.\n index : bool (default=False)\n Se True, define a coluna `id` como index do DataFrame.\n\n Retorna\n -------\n pandas.core.frame.DataFrame\n Lista dos deputados que participam da frente parlamentar.\n\n ----------------------------------------------------------------------\n '''\n\n params = {}\n if inicio is not None:\n params['dataInicio'] = parse.data(inicio, 'camara')\n if fim is not None:\n params['dataFim'] = parse.data(fim, 'camara')\n\n path = ['legislaturas', str(self.cod), 'mesa']\n dados = _get(path=path, params=params)\n index_col = 'id' if index else None\n return _df(dados, index_col)\n\n\n\nclass Orgao:\n '''\n Informações detalhadas sobre um órgão da Câmara.\n\n Parâmetros\n ----------\n cod : int\n Código numérico do órgão do qual se deseja informações.\n\n Atributos\n ---------\n dados : dict\n Conjunto completo de dados.\n cod : int\n Código numérico do órgão.\n apelido : str\n Apelido do órgão.\n casa : str\n Casa do órgão.\n cod_tipo : int\n Código do tipo do órgão.\n fim : str\n Data final do órgão.\n inicio : str\n Data inicial do órgão.\n instalacao : str\n Data de instalação do órgão.\n nome : str\n Nome do órgão.\n nome_publicacao : str\n Nome de publicação.\n sala : str\n Sala do órgão.\n sigla : str\n Sigla do órgão.\n tipo : str\n Tipo do órgão.\n uri : str\n Endereço para coleta de dados direta pela API do órgão.\n urlWebsite : str\n URL para acessar o website do órgão.\n\n Exemplos\n --------\n Obter o apelido do órgão #4.\n >>> org = camara.Orgao(cod=4)\n >>> org.apelido\n ... 'Mesa Diretora'\n \n --------------------------------------------------------------------------\n '''\n\n def __init__(self, cod:int):\n self.cod = cod\n self.dados = _get(['orgaos', str(cod)])['dados']\n self.apelido = self.dados['apelido']\n self.casa = self.dados['casa']\n self.cod_tipo = self.dados['codTipoOrgao']\n self.fim = self.dados['dataFim']\n self.inicio = self.dados['dataInicio']\n self.instalacao = self.dados['dataInstalacao']\n self.nome = self.dados['nome']\n self.nome_publicacao = self.dados['nomePublicacao']\n self.sala = self.dados['sala']\n self.sigla = self.dados['sigla']\n self.tipo = self.dados['tipoOrgao']\n self.uri = self.dados['uri']\n self.urlWebsite = self.dados['urlWebsite']\n\n\n def __repr__(self):\n return f\"DadosAbertosBrasil.camara: Órgão {self.nome}\"\n\n\n\n def eventos(\n self,\n tipo_evento: str = None,\n inicio: str = None,\n fim: str = None,\n pagina: int = 1,\n itens: int = None,\n asc: bool = True,\n ordenar_por: str = 'dataHoraInicio',\n index: bool = False\n ) -> _pd.DataFrame:\n '''\n Os eventos ocorridos ou previstos em um órgão legislativo.\n\n Retorna uma lista de informações resumidas dos eventos realizados\n (ou a realizar) pelo órgão legislativo. Por padrão, são retornados\n eventos em andamento ou previstos para o mesmo dia, dois dias antes\n e dois dias depois da requisição. Parâmetros podem ser passados para\n alterar esse período, bem como os tipos de eventos.\n\n Parâmetros\n ----------\n tipo_evento : str (default=None)\n Identificador numérico do tipo de evento que se deseja obter.\n inicio : str (default=None)\n Data de início de um intervalo de tempo, no formato 'AAAA-MM-DD'.\n fim : str (default=None)\n Data de término de um intervalo de tempo, no formato 'AAAA-MM-DD'.\n pagina : int (default=1)\n Número da página de resultados, a partir de 1, que se deseja\n obter com a requisição, contendo o número de itens definido\n pelo parâmetro `itens`. Se omitido, assume o valor 1.\n itens : int (default=None)\n Número máximo de itens na página que se deseja obter com esta\n requisição.\n asc : bool (default=True)\n Se os registros são ordenados no sentido ascendente:\n - True: De A a Z ou 0 a 9 (ascendente);\n - False: De Z a A ou 9 a 0 (descendente).\n ordenar_por : str (default='dataHoraInicio')\n Qual dos elementos da representação deverá ser usado para aplicar\n ordenação à lista.\n index : bool (default=False)\n Se True, define a coluna `id` como index do DataFrame.\n\n Retorna\n -------\n pandas.core.frame.DataFrame\n Lista de discursos feitos por um deputado em eventos diversos.\n\n ----------------------------------------------------------------------\n '''\n\n params = {}\n if tipo_evento is not None:\n params['idTipoEvento'] = tipo_evento\n if inicio is not None:\n params['dataInicio'] = parse.data(inicio, 'camara')\n if fim is not None:\n params['dataFim'] = parse.data(fim, 'camara')\n params['pagina'] = pagina\n if itens is not None:\n params['itens'] = itens\n params['ordem'] = 'asc' if asc else 'desc'\n params['ordenarPor'] = ordenar_por\n\n path = ['orgaos', str(self.cod), 'eventos']\n dados = _get(path=path, params=params)\n index_col = 'id' if index else None\n return _df(dados, index_col)\n\n\n def membros(\n self,\n inicio: str = None,\n fim: str = None,\n pagina: int = 1,\n itens: int = None,\n index: bool = False\n ) -> _pd.DataFrame:\n '''\n Lista de cargos de um órgão e parlamentares que os ocupam.\n\n Retorna uma lista de dados resumidos que identificam cada parlamentar\n e o cargo ou posição que ocupa ou ocupou no órgão parlamentar durante\n um certo período de tempo. Se não forem passados parâmetros que\n delimitem esse período, o serviço retorna os membros do órgão no\n momento da requisição. Se o órgão não existir mais ou não estiver\n instalado, é retornada uma lista vazia.\n\n Parâmetros\n ----------\n inicio : str (default=None)\n Data de início de um intervalo de tempo, no formato 'AAAA-MM-DD'.\n fim : str (default=None)\n Data de término de um intervalo de tempo, no formato 'AAAA-MM-DD'.\n pagina : int (default=1)\n Número da página de resultados, a partir de 1, que se deseja\n obter com a requisição, contendo o número de itens definido\n pelo parâmetro `itens`. Se omitido, assume o valor 1.\n itens : int (default=None)\n Número máximo de itens na “página” que se deseja obter com esta\n requisição.\n index : bool (default=False)\n Se True, define a coluna `id` como index do DataFrame.\n\n Retorna\n -------\n pandas.core.frame.DataFrame\n Lista de cargos de um órgão e parlamentares que os ocupam.\n\n ----------------------------------------------------------------------\n '''\n\n params = {}\n if inicio is not None:\n params['dataInicio'] = parse.data(inicio, 'camara')\n if fim is not None:\n params['dataFim'] = parse.data(fim, 'camara')\n params['pagina'] = pagina\n if itens is not None:\n params['itens'] = itens\n\n path = ['orgaos', str(self.cod), 'membros']\n dados = _get(path=path, params=params)\n index_col = 'id' if index else None\n return _df(dados, index_col)\n\n\n def votacoes(\n self,\n proposicao: int = None,\n inicio: str = None,\n fim: str = None,\n pagina: int = 1,\n itens: int = None,\n asc: bool = False,\n ordenar_por: str = 'dataHoraRegistro',\n index: bool = False\n ) -> _pd.DataFrame:\n '''\n Uma lista de eventos com a participação do parlamentar.\n\n Retorna uma lista de dados básicos de votações que tenham sido\n realizadas em eventos realizados no órgão. Se este for um órgão\n permanente da Câmara, são retornados, por padrão, dados sobre as\n votações realizadas pelo órgão nos últimos 30 dias. Esse período pode\n ser alterado com o uso dos parâmetros `inicio` e/ou `fim`, que por\n enquanto são limitados a selecionar somente votações ocorridas em um\n mesmo ano.\n Caso este seja um órgão temporário, como uma comissão especial, são\n listadas por padrão todas as votações ocorridas no órgão, em qualquer\n período de tempo.\n Dados complementares sobre cada votação listada podem ser obtidos com\n o objeto `camara.Votacao`.\n\n Parâmetros\n ----------\n proposicao : int (default=None)\n Código numérico da proposição, que podem ser obtidos por meio da\n função `camara.lista_proposicoes`. Se presente, listará as\n votações que tiveram a proposição como objeto de votação ou que\n afetaram as proposições listadas.\n inicio : str (default=None)\n Data de início de um intervalo de tempo, no formato 'AAAA-MM-DD'.\n fim : str (default=None)\n Data de término de um intervalo de tempo, no formato 'AAAA-MM-DD'.\n pagina : int (default=1)\n Número da página de resultados, a partir de 1, que se deseja\n obter com a requisição, contendo o número de itens definido\n pelo parâmetro `itens`. Se omitido, assume o valor 1.\n itens : int (default=None)\n Número máximo de itens na página que se deseja obter com esta\n requisição.\n asc : bool (default=False)\n Se os registros são ordenados no sentido ascendente:\n - True: De A a Z ou 0 a 9 (ascendente);\n - False: De Z a A ou 9 a 0 (descendente).\n ordenar_por : str (default='dataHoraRegistro')\n Qual dos elementos da representação deverá ser usado para aplicar\n ordenação à lista.\n index : bool (default=False)\n Se True, define a coluna `id` como index do DataFrame.\n\n Retorna\n -------\n pandas.core.frame.DataFrame\n Lista de discursos feitos por um deputado em eventos diversos.\n\n ----------------------------------------------------------------------\n '''\n\n params = {}\n if proposicao is not None:\n params['idProposicao'] = proposicao\n if inicio is not None:\n params['dataInicio'] = parse.data(inicio, 'camara')\n if fim is not None:\n params['dataFim'] = parse.data(fim, 'camara')\n params['pagina'] = pagina\n if itens is not None:\n params['itens'] = itens\n params['ordem'] = 'asc' if asc else 'desc'\n params['ordenarPor'] = ordenar_por\n\n path = ['orgaos', str(self.cod), 'votacoes']\n dados = _get(path=path, params=params)\n index_col = 'id' if index else None\n return _df(dados, index_col)\n\n\n\nclass Partido:\n '''\n Informações detalhadas sobre um partido.\n\n Parâmetros\n ----------\n cod : int\n Código numérico do partido do qual se deseja informações.\n\n Atributos\n ---------\n dados : dict\n Conjunto completo de dados.\n cod : int\n Código numérico do partido.\n facebook : str\n URL da página no Facebook do partido.\n legislatura : str\n Código numérico da última legislatura.\n lider : dict\n Informações sobre o líder do partido.\n logo : str\n URL da logo do partido.\n nome : str\n Nome completo do partido.\n numero : int\n Número eleitoral do partido.\n sigla : str\n Sigla do partido.\n situacao : str\n Situação do partido.\n total_membros : str\n Total de membros do partido.\n total_posse : str\n Total de posse do partido.\n ultima_atualizacao : str\n Última atualização das informações sobre o partido.\n uri : str\n Endereço para coleta de dados direta pela API do partido.\n uri_membros : str\n Endereço para coleta de dados direta pela API dos membros do partido.\n website : str\n URL do website do partido.\n\n\n Exemplos\n --------\n Obter o nome completo do partido #36899.\n >>> p = camara.Partido(cod=36899)\n >>> p.nome\n ... 'Movimento Democrático Brasileiro'\n\n --------------------------------------------------------------------------\n '''\n\n def __init__(self, cod:int):\n self.cod = cod\n self.dados = _get(['partidos', str(cod)])['dados']\n self.facebook = self.dados['urlFacebook']\n self.legislatura = self.dados['status']['idLegislatura']\n self.lider = self.dados['status']['lider']\n self.logo = self.dados['urlLogo']\n self.nome = self.dados['nome']\n self.numero = self.dados['numeroEleitoral']\n self.sigla = self.dados['sigla']\n self.situacao = self.dados['status']['situacao']\n self.total_membros = self.dados['status']['totalMembros']\n self.total_posse = self.dados['status']['totalPosse']\n self.ultima_atualizacao = self.dados['status']['data']\n self.uri = self.dados['uri']\n self.uri_membros = self.dados['status']['uriMembros']\n self.website = self.dados['urlWebSite']\n\n\n def __repr__(self):\n return f\"DadosAbertosBrasil.camara: {self.nome}\"\n\n\n def membros(\n self,\n inicio: str = None,\n fim: str = None,\n legislatura: int = None,\n pagina: int = 1,\n itens: int = None,\n ordenar_por: str = None,\n asc: bool = True,\n index: bool = False\n ) -> _pd.DataFrame:\n '''\n Uma lista dos parlamentares de um partido durante um período.\n\n Retorna uma lista de deputados que estão ou estiveram em exercício\n pelo partido. Opcionalmente, pode-se usar os parâmetros `inicio`,\n `fim` ou `legislatura` para se obter uma lista de deputados filiados\n ao partido num certo intervalo de tempo. Isso é equivalente à função\n `lista_deputados` com filtro por partido, mas é melhor para obter\n informações sobre membros de partidos já extintos.\n\n Parâmetros\n ----------\n inicio : str (default=None)\n Data de início de um intervalo de tempo, no formato 'AAAA-MM-DD'.\n fim : str (default=None)\n Data de término de um intervalo de tempo, no formato 'AAAA-MM-DD'.\n legislatura : int (default=None)\n Número da legislatura, à qual os dados buscados devem corresponder.\n pagina : int (default=1)\n Número da página de resultados, a partir de 1, que se deseja\n obter com a requisição, contendo o número de itens definido\n pelo parâmetro `itens`. Se omitido, assume o valor 1.\n itens : int (default=None)\n Número máximo de itens na página que se deseja obter com esta\n requisição.\n asc : bool (default=True)\n Se os registros são ordenados no sentido ascendente:\n - True: De A a Z ou 0 a 9 (ascendente);\n - False: De Z a A ou 9 a 0 (descendente).\n ordenar_por : str (default=None)\n Qual dos elementos da representação deverá ser usado para aplicar\n ordenação à lista.\n index : bool (default=False)\n Se True, define a coluna `id` como index do DataFrame.\n\n Retorna\n -------\n pandas.core.frame.DataFrame\n Lista dos parlamentares de um partido durante um período.\n\n ----------------------------------------------------------------------\n '''\n\n params = {}\n if inicio is not None:\n params['dataInicio'] = parse.data(inicio, 'camara')\n if fim is not None:\n params['dataFim'] = parse.data(fim, 'camara')\n if legislatura is not None:\n params['idLegislatura'] = legislatura\n params['pagina'] = pagina\n if itens is not None:\n params['itens'] = itens\n params['ordem'] = 'asc' if asc else 'desc'\n if ordenar_por is not None:\n params['ordenarPor'] = ordenar_por\n\n path = ['partidos', str(self.cod), 'membros']\n dados = _get(path=path, params=params)\n index_col = 'id' if index else None\n return _df(dados, index_col)\n\n\n\nclass Proposicao:\n '''\n Informações detalhadas sobre uma proposição específica.\n\n Parâmetros\n ----------\n cod : int\n Código numérico da proposição da qual se deseja informações.\n\n Atributos\n ---------\n dados : dict\n Conjunto completo de dados.\n cod : int\n Código numérico da proposição.\n uri : str\n Endereço para coleta de dados direta pela API da proposição.\n tipo_sigla : str\n Sigla do tipo de proposição.\n tipo_codigo : int\n Código numérico do tipo de proposição.\n numero : int\n Número da proposição.\n ano : int\n Ano da proposição.\n ementa : str\n Ementa da proposição.\n apresentacao : str\n Horário da apresentação da proposição no formato 'AAAA-MM-DD HH:MM'.\n uri_orgao_numerador : str\n Endereço para coleta de dados direta pela API do órgão numerador.\n ultima_atualizacao : str\n Data da última atualização do status da proposição.\n sequencia : int\n Sequência da proposição.\n sigla_orgao : str\n Sigla do órgão.\n uri_orgao : str\n Endereço para coleta de dados direta pela API do órgão.\n uri_ultimo_relator : str\n Endereço para coleta de dados direta pela API do último relaltor.\n regime : str\n Regime da proposição.\n descricao_tramitacao : str\n Descrição da tramitação.\n cod_tipo_tramitacao : str\n Código do tipo da tramitação.\n descricao_situacao : str\n Descrição da situação da proposição.\n cod_situacao : int\n Código númerico da situação da proposição.\n despacho : str\n Despacho.\n url : str\n URL da proposição.\n ambito : str\n Âmbito da proposição.\n uri_autores : str\n Endereço para coleta de dados direta pela API dos autores.\n descricao_tipo : str\n Descrição do tipo da proposição.\n ementa_detalhada : str\n Ementa detalhada da proposição.\n keywords : str\n Palavras-chaves da proposição.\n uri_proposicao_principal : str\n Endereço para coleta de dados direta pela API da proposição principal.\n uri_proposicao_anterior : str\n Endereço para coleta de dados direta pela API da proposição anterior.\n uri_proposicao_posterior : str\n Endereço para coleta de dados direta pela API da proposição posterior.\n url_inteiro_teor : str\n URL do inteiro teor.\n urn_final : str\n URN final.\n texto : str\n Texto da proposição.\n justificativa : str\n Justificativa da proposição.\n\n Exemplos\n --------\n Obter a ementa da proposição #15990.\n >>> prop = camara.Proposicao(cod=15990)\n >>> prop.ementa\n ... ''Cria salvaguardas para a tecnologia no campo nuclear...'\n\n --------------------------------------------------------------------------\n '''\n\n def __init__(self, cod:int):\n self.cod = cod\n self.dados = _get(['proposicoes', str(cod)])['dados']\n self.uri = self.dados['uri']\n self.tipo_sigla = self.dados['siglaTipo']\n self.tipo_codigo = self.dados['codTipo']\n self.numero = self.dados['numero']\n self.ano = self.dados['ano']\n self.ementa = self.dados['ementa']\n self.apresentacao = self.dados['dataApresentacao']\n self.uri_orgao_numerador = self.dados['uriOrgaoNumerador']\n self.ultima_atualizacao = self.dados['statusProposicao']['dataHora']\n self.sequencia = self.dados['statusProposicao']['sequencia']\n self.sigla_orgao = self.dados['statusProposicao']['siglaOrgao']\n self.uri_orgao = self.dados['statusProposicao']['uriOrgao']\n self.uri_ultimo_relator = self.dados['statusProposicao']['uriUltimoRelator']\n self.regime = self.dados['statusProposicao']['regime']\n self.descricao_tramitacao = self.dados['statusProposicao']['descricaoTramitacao']\n self.cod_tipo_tramitacao = self.dados['statusProposicao']['codTipoTramitacao']\n self.descricao_situacao = self.dados['statusProposicao']['descricaoSituacao']\n self.cod_situacao = self.dados['statusProposicao']['codSituacao']\n self.despacho = self.dados['statusProposicao']['despacho']\n self.url = self.dados['statusProposicao']['url']\n self.ambito = self.dados['statusProposicao']['ambito']\n self.uri_autores = self.dados['uriAutores']\n self.descricao_tipo = self.dados['descricaoTipo']\n self.ementa_detalhada = self.dados['ementaDetalhada']\n self.keywords = self.dados['keywords']\n self.uri_proposicao_principal = self.dados['uriPropPrincipal']\n self.uri_proposicao_anterior = self.dados['uriPropAnterior']\n self.uri_proposicao_posterior = self.dados['uriPropPosterior']\n self.url_inteiro_teor = self.dados['urlInteiroTeor']\n self.urn_final = self.dados['urnFinal']\n self.texto = self.dados['texto']\n self.justificativa = self.dados['justificativa']\n\n\n def __repr__(self):\n return f\"DadosAbertosBrasil.camara: Proposição {self.cod}\"\n\n\n def autores(self) -> _pd.DataFrame:\n '''\n Lista pessoas e/ou entidades autoras da proposição.\n\n Retorna uma lista em que cada item identifica uma pessoa ou entidade\n que é autora da proposição. Além de deputados, também podem ser\n autores de proposições os senadores, a sociedade civil, assembleias\n legislativas e os poderes Executivo e Judiciário.\n Pelo Regimento da Câmara, todos os que assinam uma proposição são\n considerados autores (art. 102), tanto os proponentes quanto os\n apoiadores.\n Para obter mais informações sobre cada autor, é recomendável acessar,\n se disponível, a URL que é valor do campo uri.\n\n Retorna\n -------\n pandas.core.frame.DataFrame\n Lista pessoas e/ou entidades autoras da proposição.\n\n ----------------------------------------------------------------------\n '''\n\n path = ['proposicoes', str(self.cod), 'autores']\n dados = _get(path=path, params=None)\n return _df(dados, None)\n\n\n def relacionadas(\n self,\n index: bool = False\n ) -> _pd.DataFrame:\n '''\n Uma lista de proposições relacionadas a uma em especial.\n\n Lista de informações básicas sobre proposições que de alguma forma se\n relacionam com a proposição, como pareceres, requerimentos,\n substitutivos, etc.\n\n Parâmetros\n ----------\n index : bool (default=False)\n Se True, define a coluna `id` como index do DataFrame.\n\n Retorna\n -------\n pandas.core.frame.DataFrame\n Lista de proposições relacionadas a uma em especial.\n\n ----------------------------------------------------------------------\n '''\n\n path = ['proposicoes', str(self.cod), 'relacionadas']\n dados = _get(path=path, params=None)\n index_col = 'id' if index else None\n return _df(dados, index_col)\n\n\n def temas(\n self,\n index: bool = False\n ) -> _pd.DataFrame:\n '''\n Lista de áreas temáticas de uma proposição.\n\n Lista em que cada item traz informações sobre uma área temática à qual\n a proposição se relaciona, segundo classificação oficial do Centro de\n Documentação e Informação da Câmara.\n\n Parâmetros\n ----------\n index : bool (default=False)\n Se True, define a coluna `codTema` como index do DataFrame.\n\n Retorna\n -------\n pandas.core.frame.DataFrame\n Lista de áreas temáticas de uma proposição.\n\n ----------------------------------------------------------------------\n '''\n\n path = ['proposicoes', str(self.cod), 'temas']\n dados = _get(path=path, params=None)\n index_col = 'codTema' if index else None\n return _df(dados, index_col)\n\n\n def tramitacoes(\n self,\n inicio: str = None,\n fim: str = None,\n index: bool = False\n ) -> _pd.DataFrame:\n '''\n O histórico de passos na tramitação de uma proposta.\n\n Lista que traz, como cada item, um “retrato” de informações que podem\n ser alteradas a cada etapa de tramitação na vida da proposição (como\n regime de tramitação e situação) e informações sobre o que causou esse\n novo estado. Esta representação das tramitações ainda é provisória.\n\n Parâmetros\n ----------\n inicio : str (default=None)\n Data de início da tramitação, no formato 'AAAA-MM-DD'.\n fim : str (default=None)\n Data de término da tramitação, no formato 'AAAA-MM-DD'.\n index : bool (default=False)\n Se True, define a coluna `sequencia` como index do DataFrame.\n\n Retorna\n -------\n pandas.core.frame.DataFrame\n Lista de passos na tramitação de uma proposta.\n\n ----------------------------------------------------------------------\n '''\n\n params = {}\n if inicio is not None:\n params['dataInicio'] = parse.data(inicio, 'camara')\n if fim is not None:\n params['dataFim'] = parse.data(fim, 'camara')\n\n path = ['proposicoes', str(self.cod), 'tramitacoes']\n dados = _get(path=path, params=params)\n index_col = 'sequencia' if index else None\n return _df(dados, index_col)\n\n\n def votacoes(\n self,\n asc: bool = False,\n ordenar_por: str = 'dataHoraRegistro',\n index: bool = False\n ) -> _pd.DataFrame:\n '''\n Informações detalhadas de votações sobre a proposição.\n\n Retorna uma lista de identificadores básicos sobre as votações na\n Câmara que tiveram a proposição como objeto ou como afetada pelos seus\n resultados. Dados complementares sobre cada votação listada podem ser\n obtidos pelo objeto `camara.Votacao`.\n\n Parâmetros\n ----------\n asc : bool (default=False)\n Se os registros são ordenados no sentido ascendente:\n - True: De A a Z ou 0 a 9 (ascendente);\n - False: De Z a A ou 9 a 0 (descendente).\n ordenar_por : str (default='dataHoraRegistro')\n Qual dos elementos da representação deverá ser usado para aplicar\n ordenação à lista.\n index : bool (default=False)\n Se True, define a coluna `id` como index do DataFrame.\n\n Retorna\n -------\n pandas.core.frame.DataFrame\n Lista de votações sobre a proposição.\n\n ----------------------------------------------------------------------\n '''\n\n params = {}\n params['ordem'] = 'asc' if asc else 'desc'\n params['ordenarPor'] = ordenar_por\n\n path = ['proposicoes', str(self.cod), 'votacoes']\n dados = _get(path=path, params=params)\n index_col = 'id' if index else None\n return _df(dados, index_col)\n\n\n\nclass Votacao:\n '''\n Informações detalhadas sobre uma votação da Câmara.\n\n Retorna um conjunto detalhado de dados sobre a votação, tais como as\n proposições que podem ter sido o objeto da votação e os efeitos de\n tramitação de outras proposições que eventualmente tenham sido cadastrados\n em consequência desta votação.\n\n Parâmetros\n ----------\n cod : str\n Código alfa-numérico da votação da qual se deseja informações.\n\n Atributos\n ---------\n dados : dict\n Conjunto completo de dados.\n cod : str\n Código alfa-numérico da votação.\n aprovacao : int\n Aprovação da votação.\n data : str\n Data da votação.\n data_regitro : str\n Data e horário de registro da votação.\n data_ultima_abertura : str\n Data e horário da última abertura da votação.\n descricao : str\n Descrição da votação.\n efeitos_registrados : list\n Lista de efeitos registrados.\n evento : int\n Código numérico do evento da votação.\n orgao : int\n Código numérico do órgão da votação.\n objetos_possiveis : list of dict\n Lista de objetos possíveis.\n proposicoes_afetadas : str\n Proposições afetadas.\n sigla_orgao : str\n Sigla do órgão.\n ultima_apresentacao_proposicao : dict\n Última apresentação da proposição.\n uri : str\n Endereço para coleta de dados direta pela API da votação.\n uri_evento : str\n Endereço para coleta de dados direta pela API do evento.\n uri_orgao : str\n Endereço para coleta de dados direta pela API do órgão.\n\n Exemplos\n --------\n Obter a data da votação #2265603-43.\n >>> vot = camara.Votacao(cod='2265603-43')\n >>> vot.data\n ... '2020-12-22'\n \n --------------------------------------------------------------------------\n '''\n\n def __init__(self, cod:int):\n self.cod = cod\n self.dados = _get(['votacoes', str(cod)])['dados']\n self.aprovacao = self.dados['aprovacao']\n self.data = self.dados['data']\n self.data_regitro = self.dados['dataHoraRegistro']\n self.data_ultima_abertura = self.dados['dataHoraUltimaAberturaVotacao']\n self.descricao = self.dados['descricao']\n self.efeitos_registrados = self.dados['efeitosRegistrados']\n self.evento = self.dados['idEvento']\n self.orgao = self.dados['idOrgao']\n self.objetos_possiveis = self.dados['objetosPossiveis']\n self.proposicoes_afetadas = self.dados['proposicoesAfetadas']\n self.sigla_orgao = self.dados['siglaOrgao']\n self.ultima_apresentacao_proposicao = self.dados['ultimaApresentacaoProposicao']\n self.uri = self.dados['uri']\n self.uri_evento = self.dados['uriEvento']\n self.uri_orgao = self.dados['uriOrgao']\n\n\n def __repr__(self):\n return f\"DadosAbertosBrasil.camara: Votação {self.cod}\"\n\n\n def orientacoes(self, index=False) -> _pd.DataFrame:\n '''\n O voto recomendado pelas lideranças aos seus deputados na votação.\n\n Em muitas votações, os líderes de partidos e blocos – as bancadas –\n fazem recomendações de voto para seus parlamentares. Essas orientações\n de uma votação também são feitas pelas lideranças de Governo, Minoria\n e as mais recentes Maioria e Oposição. Uma liderança também pode\n liberar a bancada para que cada deputado vote como quiser, ou entrar\n em obstrução, para que seus parlamentares não sejam contados para o\n quórum da votação.\n Se a votação teve orientações, este recurso retorna uma lista em que\n cada item contém os identificadores de um partido, bloco ou liderança,\n e o posicionamento ou voto que foi recomendado aos seus parlamentares.\n Até o momento, só estão disponíveis dados sobre orientações dadas em\n votações no Plenário.\n\n Parâmetros\n ----------\n index : bool (default=False)\n Se True, define a coluna `codPartidoBloco` como index do DataFrame.\n\n Retorna\n -------\n pandas.core.frame.DataFrame\n Lista de recomendações pelas lideranças aos seus deputados.\n\n ----------------------------------------------------------------------\n '''\n\n path = ['votacoes', str(self.cod), 'orientacoes']\n dados = _get(path=path, params=None)\n index_col = 'codPartidoBloco' if index else None\n return _df(dados, index_col)\n\n\n def votos(self) -> _pd.DataFrame:\n '''\n Como cada parlamentar votou em uma votação nominal e aberta.\n\n Se a votação da Câmara é nominal e não foi secreta, retorna uma lista\n em que cada item contém os identificadores básicos de um deputado e o\n voto ou posicionamento que ele registrou.\n O resultado é uma lista vazia se a votação foi uma votação simbólica,\n em que os votos individuais não são contabilizados. Mas há algumas\n votações simbólicas que também têm registros de \"votos\": nesses casos,\n normalmente se trata de parlamentares que pediram expressamente que\n seus posicionamentos fossem registrados.\n Não são listados parlamentares ausentes à votação.\n\n Retorna\n -------\n pandas.core.frame.DataFrame\n Lista de parlamentares.\n\n ----------------------------------------------------------------------\n '''\n\n path = ['votacoes', str(self.cod), 'votos']\n dados = _get(path=path, params=None)\n return _df(dados, None)\n\n\n\ndef lista_blocos(\n legislatura: int = None,\n pagina: int = 1,\n itens: int = None,\n asc: bool = True,\n ordenar_por: str = 'nome',\n index: bool = False\n ) -> _pd.DataFrame:\n '''\n Lista de dados sobre os blocos partidários.\n\n Nas atividades parlamentares, partidos podem se juntar em blocos\n partidários. Quando associados, os partidos passam a trabalhar como se\n fossem um \"partidão\", com um só líder e um mesmo conjunto de vice-líderes.\n Os blocos só podem existir até o fim da legislatura em que foram criados:\n na legislatura seguinte, os mesmos partidos, se associados, formam um novo\n bloco. Este recurso é uma lista dos blocos em atividade no momento da\n requisição. Se forem passados números de legislaturas com o parâmetro\n `legislatura`, são listados também os blocos formados e extintos nessas\n legislaturas.\n\n Parâmetros\n ----------\n legislatura : int (default=None)\n Número da legislatura a qual os dados buscados devem corresponder.\n pagina : int (default=1)\n Número da página de resultados, a partir de 1, que se deseja\n obter com a requisição, contendo o número de itens definido\n pelo parâmetro `itens`. Se omitido, assume o valor 1.\n itens : int (default=None)\n Número máximo de itens na página que se deseja obter com esta\n requisição.\n asc : bool (default=True)\n Se os registros são ordenados no sentido ascendente:\n - True: De A a Z ou 0 a 9 (ascendente);\n - False: De Z a A ou 9 a 0 (descendente).\n ordenar_por : str (default='nome')\n Qual dos elementos da representação deverá ser usado para aplicar\n ordenação à lista.\n index : bool (default=False)\n Se True, define a coluna `id` como index do DataFrame.\n\n Retorna\n -------\n pandas.core.frame.DataFrame\n Lista de dados sobre os blocos partidários.\n\n --------------------------------------------------------------------------\n '''\n\n params = {}\n if legislatura is not None:\n params['idLegislatura'] = legislatura\n params['pagina'] = pagina\n if itens is not None:\n params['itens'] = itens\n params['ordem'] = 'asc' if asc else 'desc'\n params['ordenarPor'] = ordenar_por\n\n dados = _get(path='blocos', params=params)\n index_col = 'id' if index else None\n return _df(dados, index_col)\n\n\n\ndef lista_deputados(\n nome: str = None,\n legislatura: int = None,\n uf: str = None,\n partido: str = None,\n sexo: str = None,\n inicio: str = None,\n fim: str = None,\n pagina: int = 1,\n itens: int = None,\n asc: bool = True,\n ordenar_por: str = 'nome',\n index: bool = False\n ) -> _pd.DataFrame:\n '''\n Listagem e busca de deputados, segundo critérios.\n\n Retorna uma lista de dados básicos sobre deputados que estiveram em\n exercício parlamentar em algum intervalo de tempo. Se não for passado um\n parâmetro de tempo, como `legislatura` ou `inicio`, a lista enumerará\n somente os deputados em exercício no momento da requisição.\n\n Parâmetros\n ----------\n nome : str (default=None)\n Parte do nome dos parlamentares.\n legislatura : int (default=None)\n Número da legislatura a qual os dados buscados devem corresponder.\n uf : str (default=None)\n Sigla da unidade federativa (estados e Distrito Federal).\n Se None, serão retornados deputados de todos os estados.\n partido : str (default=None)\n Sigla do partido ao qual sejam filiados os deputados.\n Para obter as siglas válidas, consulte a função `camara.lista_partidos`.\n Atenção: partidos diferentes podem usar a mesma sigla em diferentes\n legislaturas.\n sexo : str (default=None)\n Letra que designe o gênero dos parlamentares que se deseja buscar,\n - 'M': Masculino;\n - 'F': Feminino.\n inicio : str (default=None)\n Data de início de um intervalo de tempo, no formato 'AAAA-MM-DD'.\n fim : str (default=None)\n Data de término de um intervalo de tempo, no formato 'AAAA-MM-DD'.\n pagina : int (default=1)\n Número da página de resultados, a partir de 1, que se deseja\n obter com a requisição, contendo o número de itens definido\n pelo parâmetro `itens`. Se omitido, assume o valor 1.\n itens : int (default=None)\n Número máximo de itens na página que se deseja obter com esta\n requisição.\n asc : bool (default=True)\n Se os registros são ordenados no sentido ascendente:\n - True: De A a Z ou 0 a 9 (ascendente);\n - False: De Z a A ou 9 a 0 (descendente).\n ordenar_por : str (default='nome')\n Qual dos elementos da representação deverá ser usado para aplicar\n ordenação à lista.\n index : bool (default=False)\n Se True, define a coluna `id` como index do DataFrame.\n\n Retorna\n -------\n pandas.core.frame.DataFrame\n Lista de deputados.\n\n --------------------------------------------------------------------------\n '''\n\n params = {}\n if nome is not None:\n params['nome'] = nome\n if legislatura is not None:\n params['idLegislatura'] = legislatura\n if uf is not None:\n params['siglaUf'] = parse.uf(uf)\n if partido is not None:\n params['siglaPartido'] = partido\n if sexo is not None:\n params['siglaSexo'] = sexo\n if inicio is not None:\n params['dataInicio'] = parse.data(inicio, 'camara')\n if fim is not None:\n params['dataFim'] = parse.data(fim, 'camara')\n params['pagina'] = pagina\n if itens is not None:\n params['itens'] = itens\n params['ordem'] = 'asc' if asc else 'desc'\n params['ordenarPor'] = ordenar_por\n\n dados = _get(path='deputados', params=params)\n index_col = 'id' if index else None\n return _df(dados, index_col)\n\n\n\ndef lista_eventos(\n tipo_evento: int = None,\n situacao: int = None,\n tipo_orgao: int = None,\n orgao: int = None,\n inicio: str = None,\n fim: str = None,\n hora_inicio: str = None,\n hora_fim: str = None,\n pagina: int = 1,\n itens: int = None,\n asc: bool = True,\n ordenar_por: str = 'dataHoraInicio',\n index: bool = False\n ) -> _pd.DataFrame:\n '''\n Lista de eventos ocorridos ou previstos nos diversos órgãos da Câmara.\n\n Retorna uma lista cujos elementos trazem informações básicas sobre eventos\n dos órgãos legislativos da Câmara, previstos ou já ocorridos, em um certo\n intervalo de tempo. Esse intervalo pode ser configurado pelos parâmetros\n de data e hora listados abaixo. Se nenhum for passado, são listados\n eventos dos cinco dias anteriores, dos cinco dias seguintes e do próprio\n dia em que é feita a requisição.\n\n Parâmetros\n ----------\n tipo_evento : int (default=None)\n Identificador numérico do tipo de evento que se deseja obter.\n Os valores válidos podem ser obtidos pela função\n `camara.referencias('tiposEvento')`.\n situacao : int (default=None)\n Identificador numéricos do tipo de situação de evento.\n Valores válidos podem ser obtidos pela função\n `camara.referencias('situacoesEvento')`.\n tipo_orgao : int (default=None)\n Identificador numérico do tipo de órgão realizador dos eventos que se\n deseja obter. Os valores válidos podem ser obtidos pela função\n `camara.referencias('tiposOrgao').\n orgao : int (default=None)\n Identificador numérico do órgão. Os identificadores podem ser obtidos\n pela função `camara.lista_orgaos`.\n inicio : str (default=None)\n Data de início de um intervalo de tempo, no formato 'AAAA-MM-DD'.\n fim : str (default=None)\n Data de término de um intervalo de tempo, no formato 'AAAA-MM-DD'.\n hora_inicio : str (default=None)\n Hora inicial de um intervalo de tempo, no formato 'HH:MM', em horário\n de Brasília.\n hora_fim : str (default=None)\n Hora final de um intervalo de tempo, no formato 'HH:MM', em horário\n de Brasília.\n pagina : int (default=1)\n Número da página de resultados, a partir de 1, que se deseja\n obter com a requisição, contendo o número de itens definido\n pelo parâmetro `itens`. Se omitido, assume o valor 1.\n itens : int (default=None)\n Número máximo de itens na página que se deseja obter com esta\n requisição.\n asc : bool (default=True)\n Se os registros são ordenados no sentido ascendente:\n - True: De A a Z ou 0 a 9 (ascendente);\n - False: De Z a A ou 9 a 0 (descendente).\n ordenar_por : str (default='dataHoraInicio')\n Qual dos elementos da representação deverá ser usado para aplicar\n ordenação à lista.\n index : bool (default=False)\n Se True, define a coluna `id` como index do DataFrame.\n\n Retorna\n -------\n pandas.core.frame.DataFrame\n Lista de eventos ocorridos ou previstos nos diversos órgãos da\n Câmara.\n\n --------------------------------------------------------------------------\n '''\n\n params = {}\n\n if tipo_evento is not None:\n params['codTipoEvento'] = tipo_evento\n if situacao is not None:\n params['codSituacao'] = situacao\n if tipo_orgao is not None:\n params['codTipoOrgao'] = tipo_orgao\n if orgao is not None:\n params['idOrgao'] = orgao\n if inicio is not None:\n params['dataInicio'] = parse.data(inicio, 'camara')\n if fim is not None:\n params['dataFim'] = parse.data(fim, 'camara')\n if hora_inicio is not None:\n params['horaInicio'] = hora_inicio\n if hora_fim is not None:\n params['horaFim'] = hora_fim\n params['pagina'] = pagina\n if itens is not None:\n params['itens'] = itens\n params['ordem'] = 'asc' if asc else 'desc'\n params['ordenarPor'] = ordenar_por\n\n dados = _get(path='eventos', params=params)\n index_col = 'id' if index else None\n return _df(dados, index_col)\n\n\n\ndef lista_frentes(\n legislatura: int = None,\n pagina: int = 1,\n index: bool = False\n ) -> _pd.DataFrame:\n '''\n Lista de frentes parlamentares de uma ou mais legislaturas.\n\n Retorna uma lista de informações sobre uma frente parlamentar - um\n agrupamento oficial de parlamentares em torno de um determinado tema ou\n proposta. As frentes existem até o fim da legislatura em que foram\n criadas, e podem ser recriadas a cada legislatura. Algumas delas são\n compostas por deputados e senadores.\n Um número de legislatura pode ser passado como parâmetro, mas se for\n omitido são retornadas todas as frentes parlamentares criadas desde 2003.\n\n Parâmetros\n ----------\n legislatura : int (default=None)\n Número da legislatura a qual os dados buscados devem corresponder.\n pagina : int (default=1)\n Número da página de resultados, a partir de 1, que se deseja\n obter com a requisição, contendo o número de itens definido\n pelo parâmetro `itens`. Se omitido, assume o valor 1.\n index : bool (default=False)\n Se True, define a coluna `id` como index do DataFrame.\n\n Retorna\n -------\n pandas.core.frame.DataFrame\n Lista de frentes parlamentares de uma ou mais legislaturas.\n\n --------------------------------------------------------------------------\n '''\n\n params = {}\n\n if legislatura is not None:\n params['idLegislatura'] = legislatura\n params['pagina'] = pagina\n\n dados = _get(path='frentes', params=params)\n index_col = 'id' if index else None\n return _df(dados, index_col)\n\n\n\ndef lista_legislaturas(\n data: str = None,\n pagina: int = 1,\n itens: int = None,\n asc: bool = False,\n ordenar_por: str = 'id',\n index: bool = False\n ) -> _pd.DataFrame:\n '''\n Os períodos de mandatos e atividades parlamentares da Câmara.\n\n Legislatura é o nome dado ao período de trabalhos parlamentares entre uma\n eleição e outra. Esta função retorna uma lista em que cada item contém as\n informações básicas sobre um desses períodos. Os números que identificam\n as legislaturas são sequenciais, desde a primeira que ocorreu.\n\n Parâmetros\n ----------\n data : str (default=None)\n Data no formato 'AAAA-MM-DD'. Se este parâmetro estiver presente, a\n função retornará as informações básicas sobre a legislatura que estava\n em curso na data informada.\n pagina : int (default=1)\n Número da página de resultados, a partir de 1, que se deseja\n obter com a requisição, contendo o número de itens definido\n pelo parâmetro `itens`. Se omitido, assume o valor 1.\n itens : int (default=None)\n Número máximo de itens na página que se deseja obter com esta\n requisição.\n asc : bool (default=False)\n Se os registros são ordenados no sentido ascendente:\n - True: De A a Z ou 0 a 9 (ascendente);\n - False: De Z a A ou 9 a 0 (descendente).\n ordenar_por : str (default='id')\n Qual dos elementos da representação deverá ser usado para aplicar\n ordenação à lista.\n index : bool (default=False)\n Se True, define a coluna `id` como index do DataFrame.\n\n Retorna\n -------\n pandas.core.frame.DataFrame\n Lista de legislaturas da Câmara.\n\n --------------------------------------------------------------------------\n '''\n\n params = {}\n\n if data is not None:\n params['data'] = data\n params['pagina'] = pagina\n if itens is not None:\n params['itens'] = itens\n params['ordem'] = 'asc' if asc else 'desc'\n params['ordenarPor'] = ordenar_por\n\n dados = _get(path='legislaturas', params=params)\n index_col = 'id' if index else None\n return _df(dados, index_col)\n\n\n\ndef lista_orgaos(\n sigla: str = None,\n tipo: int = None,\n inicio: str = None,\n fim: str = None,\n pagina: int = 1,\n itens: int = None,\n asc: bool = True,\n ordenar_por: str = 'id',\n index: bool = False\n ) -> _pd.DataFrame:\n '''\n Lista das comissões e outros órgãos legislativos da Câmara.\n\n Retorna uma lista de informações básicas sobre os órgãos legislativos e\n seus identificadores, tipos e descrições. É possível filtrar a lista por\n identificadores, tipos de órgãos, sigla, situação do órgão ou período de\n tempo em que os órgãos estiveram ativos, se aplicável.\n \n Parâmetros\n ----------\n sigla : str (default=None)\n Sigla oficialmente usadas para designar o órgão da câmara.\n tipo : int (default=None)\n Código numérico do tipo de órgãos que se deseja buscar dados. Pode ser\n obtido pela função `camara.referencias`.\n inicio : str (default=None)\n Data de início, no formato 'AAAA-MM-DD', de um intervalo de tempo no\n qual os órgãos buscados devem ter estado em atividade.\n fim : str (default=None)\n Data de término, no formato 'AAAA-MM-DD', de um intervalo de tempo no\n qual os órgãos buscados devem ter estado em atividade.\n pagina : int (default=1)\n Número da página de resultados, a partir de 1, que se deseja\n obter com a requisição, contendo o número de itens definido\n pelo parâmetro `itens`. Se omitido, assume o valor 1.\n itens : int (default=None)\n Número máximo de itens na página que se deseja obter com esta\n requisição.\n asc : bool (default=True)\n Se os registros são ordenados no sentido ascendente:\n - True: De A a Z ou 0 a 9 (ascendente);\n - False: De Z a A ou 9 a 0 (descendente).\n ordenar_por : str (default='id')\n Qual dos elementos da representação deverá ser usado para aplicar\n ordenação à lista.\n index : bool (default=False)\n Se True, define a coluna `id` como index do DataFrame.\n\n Retorna\n -------\n pandas.core.frame.DataFrame\n Lista das comissões e outros órgãos legislativos da Câmara.\n\n --------------------------------------------------------------------------\n '''\n\n params = {}\n\n if sigla is not None:\n params['sigla'] = sigla\n if tipo is not None:\n params['codTipoOrgao'] = tipo\n if inicio is not None:\n params['dataInicio'] = parse.data(inicio, 'camara')\n if fim is not None:\n params['dataFim'] = parse.data(fim, 'camara')\n params['pagina'] = pagina\n if itens is not None:\n params['itens'] = itens\n params['ordem'] = 'asc' if asc else 'desc'\n params['ordenarPor'] = ordenar_por\n\n dados = _get(path='orgaos', params=params)\n index_col = 'id' if index else None\n return _df(dados, index_col)\n\n\n\ndef lista_partidos(\n legislatura: int = None,\n inicio: str = None,\n fim: str = None,\n pagina: int = 1,\n itens: int = None,\n asc: bool = True,\n ordenar_por: str = 'sigla',\n index: bool = False\n ) -> _pd.DataFrame:\n '''\n Os partidos políticos que têm ou já tiveram parlamentares em exercício na\n Câmara.\n\n Retorna uma lista de dados básicos sobre os partidos políticos que têm ou\n já tiveram deputados na Câmara. Se não forem passados parâmetros, a função\n retorna os partidos que têm deputados em exercício no momento da\n requisição. É possível obter uma lista de partidos representados na Câmara\n em um certo intervalo de datas ou de legislaturas.\n \n Parâmetros\n ----------\n legislatura : int (default=None)\n Número da legislatura a qual os dados buscados devem corresponder.\n inicio : str (default=None)\n Data de início de um intervalo de tempo, no formato 'AAAA-MM-DD'.\n fim : str (default=None)\n Data de término de um intervalo de tempo, no formato 'AAAA-MM-DD'.\n pagina : int (default=1)\n Número da página de resultados, a partir de 1, que se deseja\n obter com a requisição, contendo o número de itens definido\n pelo parâmetro `itens`. Se omitido, assume o valor 1.\n itens : int (default=None)\n Número máximo de itens na página que se deseja obter com esta\n requisição.\n asc : bool (default=True)\n Se os registros são ordenados no sentido ascendente:\n - True: De A a Z ou 0 a 9 (ascendente);\n - False: De Z a A ou 9 a 0 (descendente).\n ordenar_por : str (default='sigla')\n Qual dos elementos da representação deverá ser usado para aplicar\n ordenação à lista.\n index : bool (default=False)\n Se True, define a coluna `id` como index do DataFrame.\n\n Retorna\n -------\n pandas.core.frame.DataFrame\n Lista de partidos políticos que têm ou já tiveram parlamentares em\n exercício na Câmara.\n\n --------------------------------------------------------------------------\n '''\n\n params = {}\n\n if legislatura is not None:\n params['idLegislatura'] = legislatura\n if inicio is not None:\n params['dataInicio'] = parse.data(inicio, 'camara')\n if fim is not None:\n params['dataFim'] = parse.data(fim, 'camara')\n params['pagina'] = pagina\n if itens is not None:\n params['itens'] = itens\n params['ordem'] = 'asc' if asc else 'desc'\n params['ordenarPor'] = ordenar_por\n\n dados = _get(path='partidos', params=params)\n index_col = 'id' if index else None\n return _df(dados, index_col)\n\n\n\ndef lista_proposicoes(\n tipo: str = None,\n numero: int = None,\n ano: int = None,\n autor_cod: int = None,\n autor_nome: str = None,\n partido_sigla: str = None,\n partido_cod: int = None,\n autor_uf: str = None,\n keyword: str = None,\n tramitacao_senado: bool = None,\n apresentacao_inicio: str = None,\n apresentacao_fim: str = None,\n situacao: int = None,\n tema: int = None,\n inicio: str = None,\n fim: str = None,\n pagina: int = 1,\n itens: int = None,\n asc: bool = True,\n ordenar_por: str = 'id',\n index: bool = False\n ) -> _pd.DataFrame:\n '''\n Lista de proposições na Câmara.\n\n Lista de informações básicas sobre projetos de lei, resoluções, medidas\n provisórias, emendas, pareceres e todos os outros tipos de proposições na\n Câmara. Por padrão, são retornadas todas as proposições que foram\n apresentadas ou tiveram alguma mudança de situação nos últimos 30 dias.\n Esse intervalo de tramitação pode ser configurado pelos parâmetros\n `inicio` e `fim`.\n\n Se for(em) passado(s) um ou mais dos parâmetros `id`, `ano`,\n `apresentacao_inicio`, `apresentacao_fim`, `autor_cod`, `autor_nome`,\n o intervalo de tramitação só será levado em consideração se os parâmetros\n `inico` e/ou `fim` estiverem explicitamente configurados. Se não\n estiverem, poderão ser listadas proposições que não tiveram tramitação\n recente (e a resposta pode demorar bastante).\n \n Parâmetros\n ----------\n tipo : str (default=None)\n Sigla do tipo das proposições que se deseja obter. A lista de tipos e\n siglas existentes pode ser obtida pela função `camara.referencias`.\n numero : int (default=None)\n Número oficialmente atribuídos às proposições segundo o art. 137 do\n Regimento Interno, como “PL 1234/2016”\n ano : int (default=None)\n Ano de apresentação das proposições que serão listadas no formato\n 'AAAA'.\n autor_cod : int (default=None)\n Código numérico identificador do deputado autor das proposições que\n serão listadas.\n autor_nome : str (default=None)\n Nome ou parte do nome do(s) autor(es) das proposições que se deseja\n obter. Deve estar entre aspas.\n partido_sigla : str (default=None)\n Sigla do partido a que pertençam os autores das proposições a serem\n listadas.\n partido_cod : int (default=None)\n Identificador numérico do partido a que pertençam os autores das\n proposições que serão listadas. Esses identificadores podem ser\n obtidos pela função `camara.lista_partidos` e são mais precisos do\n que as siglas, que podem ser usadas por partidos diferentes em épocas\n diferentes.\n autor_uf : str (default=None)\n Sigla da unidade da federação (estados e Distrito Federal) pela qual\n o(s) autor(es) das proposições selecionadas tenha(m) sido eleito(s).\n keyword : str (default=None)\n Palavra-chave sobre o tema a que a proposição se relaciona.\n tramitacao_senado : bool (default=None)\n Buscar proposições que já tenha tramitado no Senado.\n inicio : str (default=None)\n Data do início do intervalo de tempo em que tenha havido tramitação\n das proposições a serem listadas, no formato 'AAAA-MM-DD'. Se omitido,\n é assumido como a data de 30 dias anteriores à proposição.\n fim : str (default=None)\n Data do fim do intervalo de tempo em que tenha havido tramitação das\n proposições a serem listadas. Se omitido, é considerado ser o dia em\n que é feita a requisição.\n apresentacao_inicio : str (default=None)\n Data do início do intervalo de tempo em que tenham sido apresentadas\n as proposições a serem listadas, no formato 'AAAA-MM-DD'.\n apresentacao_fim : str (default=None)\n Data do fim do intervalo de tempo em que tenham sido apresentadas as\n proposições a serem listadas.\n situacao : int (default=None)\n Código numérico do tipo de situação em que se encontram as proposições\n que serão listadas. As situações possíveis podem ser obtidas pela\n função `camara.referencias`. Atenção: este parâmetro pode apresentar\n resultados inesperados, por problemas com o registro dos dados.\n tema : int (default=None)\n Código numérico das áreas temáticas das proposições que serão\n listadas. Os temas possíveis podem ser obtidos pela função\n `camara.referencias`.\n pagina : int (default=1)\n Número da página de resultados, a partir de 1, que se deseja\n obter com a requisição, contendo o número de itens definido\n pelo parâmetro `itens`. Se omitido, assume o valor 1.\n itens : int (default=None)\n Número máximo de itens na página que se deseja obter com esta\n requisição.\n asc : bool (default=True)\n Se os registros são ordenados no sentido ascendente:\n - True: De A a Z ou 0 a 9 (ascendente);\n - False: De Z a A ou 9 a 0 (descendente).\n ordenar_por : str (default='id')\n Qual dos elementos da representação deverá ser usado para aplicar\n ordenação à lista.\n index : bool (default=False)\n Se True, define a coluna `id` como index do DataFrame.\n\n Retorna\n -------\n pandas.core.frame.DataFrame\n Lista de proposições na Câmara.\n\n --------------------------------------------------------------------------\n '''\n\n params = {}\n\n if tipo is not None:\n params['siglaTipo'] = tipo\n if numero is not None:\n params['numero'] = numero\n if ano is not None:\n params['ano'] = ano\n if autor_cod is not None:\n params['idDeputadoAutor'] = autor_cod\n if autor_nome is not None:\n params['autor'] = autor_nome\n if partido_sigla is not None:\n params['siglaPartidoAutor'] = partido_sigla\n if partido_cod is not None:\n params['idPartidoAutor'] = partido_cod\n if autor_uf is not None:\n params['siglaUfAutor'] = parse.uf(autor_uf)\n if keyword is not None:\n params['keywords'] = keyword\n if tramitacao_senado is not None:\n params['tramitacaoSenado'] = 'true' if tramitacao_senado else 'false'\n if apresentacao_inicio is not None:\n params['dataApresentacaoInicio'] = apresentacao_inicio\n if apresentacao_fim is not None:\n params['dataApresentacaoFim'] = apresentacao_fim\n if situacao is not None:\n params['codSituacao'] = situacao\n if tema is not None:\n params['codTema'] = tema\n if inicio is not None:\n params['dataInicio'] = parse.data(inicio, 'camara')\n if fim is not None:\n params['dataFim'] = parse.data(fim, 'camara')\n params['pagina'] = pagina\n if itens is not None:\n params['itens'] = itens\n params['ordem'] = 'asc' if asc else 'desc'\n params['ordenarPor'] = ordenar_por\n\n dados = _get(path='proposicoes', params=params)\n index_col = 'id' if index else None\n return _df(dados, index_col)\n\n\n\ndef lista_votacoes(\n proposicao: int = None,\n evento: int = None,\n orgao: int = None,\n inicio: str = None,\n fim: str = None,\n pagina: int = 1,\n itens: int = None,\n asc: bool = False,\n ordenar_por: str = 'dataHoraRegistro',\n index: bool = False\n ) -> _pd.DataFrame:\n '''\n Lista de votações na Câmara.\n\n Retorna uma lista de informações básicas sobre as votações ocorridas em\n eventos dos diversos órgãos da Câmara. Se não forem passados parâmetros\n que delimitem o intervalo de tempo da pesquisa, são retornados dados sobre\n todas as votações ocorridas nos últimos 30 dias, em eventos de todos os\n órgãos.\n\n Os parâmetros de data permitem estender o período, mas por enquanto é\n necessário que as duas datas sejam de um mesmo ano. Quando apenas uma\n delas está presente, são retornadas somente as votações ocorridas no mesmo\n ano, antes de `fim` ou após `inicio`.\n \n Parâmetros\n ----------\n proposicao : int (default=None)\n Código numérico da proposição, que podem ser obtidos pela função\n `camara.lista_proposições`. Se presente, listará as votações que\n tiveram a proposição como objeto de votação ou que afetaram as\n proposições listadas.\n evento : int (default=None)\n Código numérico do evento realizado na Câmara, no qual tenham sido\n realizadas as votações a serem listadas. Os códigos podem ser obtidos\n pela função `camara.lista_eventos`. Somente os eventos deliberativos\n podem ter votações. Os eventos podem ter ocorrido fora do intervalo de\n tempo padrão ou definido por `inicio` e/ou `fim`.\n orgao : int (default=None)\n Código numérico do órgão da Câmara. Se presente, serão retornadas\n somente votações do órgão enumerado. Os códigos existentes podem ser\n obtidos pela função `camara.lista_orgaos`.\n inicio : str (default=None)\n Data em formato 'AAAA-MM-DD' para início do intervalo de tempo no qual\n tenham sido realizadas as votações a serem listadas. Se usado sozinho,\n esse parâmetro faz com que sejam retornadas votações ocorridas dessa\n data até o fim do mesmo ano. Se usado com `fim`, as duas datas devem\n ser de um mesmo ano.\n fim : str (default=None)\n Data em formato 'AAAA-MM-DD' que define o fim do intervalo de tempo no\n qual tenham sido realizadas as votações a serem listadas. Se usado\n sozinho, esse parâmetro faz com que sejam retornadas todas as votações\n ocorridas desde 1º de janeiro do mesmo ano até esta data. Se usado com\n `inicio`, é preciso que as duas datas sejam de um mesmo ano.\n pagina : int (default=1)\n Número da página de resultados, a partir de 1, que se deseja\n obter com a requisição, contendo o número de itens definido\n pelo parâmetro `itens`. Se omitido, assume o valor 1.\n itens : int (default=None)\n Número máximo de itens na página que se deseja obter com esta\n requisição.\n asc : bool (default=False)\n Se os registros são ordenados no sentido ascendente:\n - True: De A a Z ou 0 a 9 (ascendente);\n - False: De Z a A ou 9 a 0 (descendente).\n ordenar_por : str (default='dataHoraRegistro')\n Qual dos elementos da representação deverá ser usado para aplicar\n ordenação à lista.\n index : bool (default=False)\n Se True, define a coluna `id` como index do DataFrame.\n\n Retorna\n -------\n pandas.core.frame.DataFrame\n Lista de votações na Câmara.\n\n --------------------------------------------------------------------------\n '''\n\n params = {}\n\n if proposicao is not None:\n params['idProposicao'] = proposicao\n if evento is not None:\n params['idEvento'] = evento\n if orgao is not None:\n params['idOrgao'] = orgao\n if inicio is not None:\n params['dataInicio'] = parse.data(inicio, 'camara')\n if fim is not None:\n params['dataFim'] = parse.data(fim, 'camara')\n params['pagina'] = pagina\n if itens is not None:\n params['itens'] = itens\n params['ordem'] = 'asc' if asc else 'desc'\n params['ordenarPor'] = ordenar_por\n\n dados = _get(path='votacoes', params=params)\n index_col = 'id' if index else None\n return _df(dados, index_col)\n\n\n\ndef referencias(\n lista: str,\n index: bool = False\n ) -> _pd.DataFrame:\n '''\n Listas de valores válidos para as funções deste módulo.\n\n Parâmetros\n ----------\n lista : str\n Referências que serão listadas. Deve ser uma destas opções:\n - 'autores'\n - 'temas'\n - 'eventos'\n - 'orgaos'\n - 'proposicoes'\n - 'tramitacoes'\n - 'ufs'\n - 'situacoes_deputados'\n - 'situacoes_eventos'\n - 'situacoes_orgaos'\n - 'situacoes_proposicoes'\n index : bool (default=False)\n Se True, define a coluna `cod` como index do DataFrame.\n\n Retorna\n -------\n pandas.core.frame.DataFrame\n Lista das referências válidas.\n '''\n \n referencia = {\n 'autores': 'proposicoes/codTipoAutor',\n 'temas': 'proposicoes/codTema',\n 'eventos': 'tiposEvento',\n 'orgaos': 'tiposOrgao',\n 'proposicoes': 'tiposProposicao',\n 'tramitacoes': 'tiposTramitacao',\n 'ufs': 'uf',\n 'situacoes_deputados': 'situacoesDeputado',\n 'situacoes_eventos': 'situacoesEvento',\n 'situacoes_orgaos': 'situacoesOrgao',\n 'situacoes_proposicoes': 'situacoesProposicao'\n }\n \n if lista in referencia.keys():\n data = _get(f'referencias/{referencia[lista]}')\n else:\n raise TypeError('Referência inválida. Insira um dos seguintes valores para `lista`: ' \\\n + ', '.join(list(referencia.keys())))\n \n df = _pd.DataFrame(data['dados'])\n if index:\n df.set_index('cod', inplace=True)\n \n return df"
] |
[
[
"pandas.DataFrame"
]
] |
dpnemergut/VELOCIraptor-STF
|
[
"5eb6351723002a9813753b112a9a8ff62efce295"
] |
[
"examples/catalocomparison_VRinputonly.py"
] |
[
"#!/usr/bin/env python2.7\n# -*- coding: utf-8 -*-\n\n\"\"\"\n\n This python script reads two input VR particle catalog files and quickly compares them. It determines\n if the catalogs match. If they do not, further tests are run. For a perfect match, particles are in the\n same order. Information is passed to the script via a simple text file that has the following format\n VRrefbasefilename VRrefinputformat\n VRcompbasefilename VRcompinputformat\n\n\"\"\"\n\n\nimport sys,os,string,time,re,struct\nfrom subprocess import call\nimport numpy as np\n\n#load VR python routines\npathtovelociraptor=sys.argv[0].split('examples')[0]\nsys.path.append(pathtovelociraptor+'/tools/')\nimport velociraptor_python_tools as vpt\n\ndef PerfectCrossMatch(VRdata):\n ref_properties_num = VRdata['ref']['properties']['num']\n cmp_properties_num = VRdata['comp']['properties']['num']\n ref_particles_npart = VRdata['ref']['particles']['Npart']\n cmp_particles_npart = VRdata['comp']['particles']['Npart']\n print(\"\")\n print(\"==========================\")\n print(\"VRdata['ref']['properties']['num']\", ref_properties_num)\n print(\"VRdata['comp']['properties']['num']\", cmp_properties_num)\n print(\"VRdata['ref']['particles']['Npart']\", ref_particles_npart)\n print(\"VRdata['comp']['particles']['Npart']\", cmp_particles_npart)\n print(\"VRdata['ref']['particles']['Npart'].size\", ref_particles_npart.size)\n print(\"VRdata['comp']['particles']['Npart'].size\", cmp_particles_npart.size)\n\n iflag1 = (ref_properties_num != cmp_properties_num)\n iflag2 = (ref_particles_npart.size != cmp_particles_npart.size)\n if (iflag1):\n print(\"\")\n print(\"==========================\")\n print('Catalog contains different number of objects ... Not perfect match')\n if (iflag2):\n print(\"\")\n print(\"==========================\")\n print('Particle catalog contains different number of particles ... Not perfect match')\n if (iflag1 or iflag2):\n return 0\n \n # TODO: Encapsulate block to ParticleComparison\n num = VRdata['ref']['particles']['Npart'].size\n ref = np.concatenate(VRdata['ref']['particles']['Particle_IDs'])\n comp = np.concatenate(VRdata['comp']['particles']['Particle_IDs'])\n print(\"\")\n print(\"==========================\")\n print(\"Particle ID comparison\")\n print(\"Size: \", num)\n print(\"ref\", ref)\n print(\"comp\", comp)\n if (np.array_equal(ref,comp) == False):\n if (np.where(np.isin(ref,comp))[0].size == num):\n print(\"\")\n print(\"==========================\")\n print('Particle catalog contains same number of particles but IDs in different order ... Not perfect match but close')\n return 1\n else:\n list_ref_missing_particles = np.setdiff1d(ref,comp)\n print(\"\")\n print(\"==========================\")\n print('Particle catalog contains same number of particles but IDs differ ... Not perfect match')\n print(\"Unique values in ref not present in comp\")\n print(list_ref_missing_particles)\n return 0\n return 2\n\ndef CheckProperties(VRdata):\n iflag1 = (VRdata['ref']['properties']['num'] != VRdata['comp']['properties']['num'])\n iflag2 = (VRdata['ref']['particles']['Npart'].size != VRdata['comp']['particles']['Npart'].size)\n proplist = ['Mass_tot', 'Vmax']\n if (iflag1 == True):\n return 0\n partdiff = np.zeros(VRdata['ref']['properties']['num'], dtype = np.int32)\n propdiff = np.zeros(VRdata['ref']['properties']['num'], dtype = np.int32)\n num = VRdata['ref']['properties']['num']\n #number of objects the same but particle list ordered differently\n time1 = time.clock()\n for i in range(num):\n if not np.array_equal(VRdata['ref']['particles']['Particle_IDs'][i], VRdata['comp']['particles']['Particle_IDs'][i]):\n partdiff[i] = 1\n for prop in proplist:\n if (VRdata['ref']['properties'][prop][i] != VRdata['comp']['properties'][prop][i]):\n propdiff[i] = 1\n numpartdiff = np.sum(partdiff)\n numpropdiff = np.sum(propdiff)\n print('Finished processing individual objects in ', time.clock()-time1)\n if (numpartdiff > 0):\n print('Difference in particles', numpartdiff, ' of', num)\n if (numpropdiff > 0):\n print('Difference in properties', numpropdiff, ' of', num)\n if (numpropdiff == 0 and numpartdiff > 0):\n print('Difference in order of particles but not resulting properties, nor number of particles in each object')\n return 1\n return 0\n\n#if __name__ == '__main__':\n\nprint('Running', sys.argv[0])\nprint('Input is file name of config file')\nprint('Config file should contain the following')\nprint('VRrefbasefilename VRrefinputformat')\nprint('VRcompbasefilename VRcompinputformat')\n\nif (os.path.isfile(sys.argv[1])==False):\n print(\"Missing input info file\",sys.argv[1])\n exit(1)\n\n#load the plot info file,\nprint(\"Reading reference VR file\", sys.argv[1])\ninfofile=open(sys.argv[1], 'r')\nVRdata = {'label': None}\n\ntime1=time.clock()\nfor label in ['ref', 'comp']:\n data = infofile.readline().strip().split(' ')\n VRdata[label]= {'filename': None, 'inputformat': None, 'particles': None, 'properties': None, 'num': 0}\n VRdata[label]['filename'], VRdata[label]['inputformat'] = data[0], np.int32(data[1])\n print('Reading ',label,' stored in ',VRdata[label]['filename'])\n VRdata[label]['particles'] = vpt.ReadParticleDataFile(VRdata[label]['filename'], VRdata[label]['inputformat'])\n VRdata[label]['properties'], numhalos = vpt.ReadPropertyFile(VRdata[label]['filename'], VRdata[label]['inputformat'])\n VRdata[label]['properties']['num'] = numhalos\n\nprint('Finished reading information', time.clock()-time1)\nprint('Checking for perfect match')\niflag = PerfectCrossMatch(VRdata)\nif (iflag == 1):\n CheckProperties(VRdata)\n\n# Return an overall PASS or FAIL\nif iflag == 0:\n print('\\n*********************')\n print('* Comparison FAILED *')\n print('*********************\\n')\n exit(1)\nelse:\n print('\\n*********************')\n print('* Comparison PASSED *')\n print('*********************\\n')\n exit(0)\n"
] |
[
[
"numpy.concatenate",
"numpy.array_equal",
"numpy.setdiff1d",
"numpy.zeros",
"numpy.sum",
"numpy.int32",
"numpy.isin"
]
] |
tuananhbui89/Adversarial-Divergence-Reduction
|
[
"7304fa514b6c044aa19afce87c3794f9619e9665"
] |
[
"ADR_tf/utils_grad.py"
] |
[
"import tensorflow as tf \nimport numpy as np\nimport matplotlib.pyplot as plt \nfrom mpl_toolkits import mplot3d\nfrom matplotlib.ticker import FormatStrFormatter\nfrom matplotlib import cm\nfrom matplotlib.colors import ListedColormap, LinearSegmentedColormap\n\ndef plot_grad_wrt_x_or_z(model, sess, x, y, eps, savepath, title): \n for plotgrad in ['x', 'z']:\n figpath = savepath + 'wrt_{}.png'.format(plotgrad)\n figtitle = title + ',wrt={}'.format(plotgrad)\n if plotgrad == 'x': \n c_grad = sess.run(model.X_grad, feed_dict={model.x_eval:x, model.y_eval:y})\n c_input = x\n elif plotgrad == 'z': \n c_grad, c_input = sess.run([model.z_grad, model.z_eval], feed_dict={model.x_eval:x, model.y_eval:y})\n\n sign_grad = np.sign(c_grad)\n p_rand = get_random_perpendicular_vector(sign_grad)\n grid_x, grid_y, grid_z, grid_l = get_loss_grid(model, sess, c_input, y,\n grad=c_grad, rand=p_rand, \n eps=eps, num_points=17, plotgrad=plotgrad)\n \n if np.prod(np.shape(x)) == 784: \n plot_grid(figpath, np.reshape(x, [28,28]), grid_x, grid_y, grid_z, grid_l, figtitle)\n else:\n plot_grid(figpath, np.reshape(x, [32,32,3]), grid_x, grid_y, grid_z, grid_l, figtitle)\n\n\ndef get_random_perpendicular_vector(grad, random_state=6789):\n org_shape = np.shape(grad)\n grad = np.reshape(grad, [1,-1])\n random = np.random.RandomState(random_state)\n rand = random.choice([1, -1], grad.shape)\n assert(len(grad.shape)==2)\n assert(np.shape(grad)[0]==1)\n\n while np.sum(rand * grad) != 0:\n if np.sum(rand * grad) % 2 == 1:\n idx = random.randint(rand.shape[1])\n rand[0, idx] = 0\n else:\n # modify an entry to increase or decrease the dot product\n idx = random.randint(rand.shape[1])\n if np.sum(rand * grad) < 0 and rand[0, idx] * grad[0, idx] < 0:\n rand[0, idx] = -rand[0, idx]\n elif np.sum(rand * grad) > 0 and rand[0, idx] * grad[0, idx] > 0:\n rand[0, idx] = -rand[0, idx]\n return np.reshape(rand, org_shape)\n\ndef get_loss_grid(model, sess, image, label, grad, rand, eps=0.5, num_points=17, plotgrad='x'): \n if type(eps) is not list:\n x = np.outer(np.linspace(-eps, eps, num_points), np.ones(num_points))\n else: \n x = np.outer(np.linspace(eps[0], eps[1], num_points), np.ones(num_points))\n y = x.copy().T\n\n x_flatten = x.flatten()\n y_flatten = y.flatten()\n N = len(x_flatten)\n\n # assert(np.max(image) < 2) \n rand = rand #/ 255. \n grad = grad #/ 255. \n\n neighbor_labels = [label] * N\n neighbor_images = np.zeros((N, *image.shape[1:]))\n for t, (i, j) in enumerate(zip(x_flatten, y_flatten)):\n neighbor_images[t] = image + i * rand + j * grad\n\n neighbor_images = np.expand_dims(neighbor_images, axis=1) # [N, 1, d]\n\n z = np.zeros(N)\n cl = np.zeros(N)\n for i in range(N):\n if plotgrad == 'x': \n z[i], cl[i] = sess.run([model.mean_xent_eval, model.X_cls],feed_dict={model.x_eval: neighbor_images[i], model.y_eval: neighbor_labels[i]})\n elif plotgrad == 'z': \n z[i], cl[i] = sess.run([model.mean_xent_eval, model.X_cls],feed_dict={model.z_eval: neighbor_images[i], model.y_eval: neighbor_labels[i]})\n\n z = z.reshape(num_points, num_points)\n cl = cl.reshape(num_points, num_points)\n\n return x, y, z, cl \n\ndef normalize_image(image): \n print('normalize_image, image range: ', np.max(image), np.min(image))\n if image.dtype in ['float32', 'float64']: \n assert(np.max(image) >= 100.) \n image = image / 255. \n\n elif image.dtype in ['int32', 'int64']: \n assert(np.max(image) == 255)\n image = image.astype(np.float32)\n image = image / 255.\n\n return image\n\ndef plot_grid(figname, image, x, y, z, cl, title): \n max_z = np.max(z)\n cmap = ListedColormap(['blue', 'orange', 'green', 'red', 'purple', \n 'brown', 'pink', 'gray', 'olive', 'cyan'])\n\n # visualize\n size = 5\n fig = plt.figure(figsize=(size * 3, size))\n\n # first subplot\n ax = fig.add_subplot(1, 3, 1)\n ax.imshow(normalize_image(image))\n ax.grid(False)\n ax.axis('off')\n ax.set_title('Input image')\n\n # second subplot\n ax = fig.add_subplot(1, 3, 2, projection='3d')\n ax.plot_surface(y, x, z, cmap=cm.coolwarm, linewidth=0, antialiased=False, shade=True, alpha=0.5)\n\n ax.xaxis.set_major_formatter(FormatStrFormatter('%d'))\n ax.yaxis.set_major_formatter(FormatStrFormatter('%d'))\n ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))\n ax.set_zlim(0, max_z)\n ax.set_xlabel('Grad')\n ax.set_ylabel('Rand')\n ax.set_title('Prediction Surface')\n\n # Third subplot \n ax = fig.add_subplot(1, 3, 3)\n psm = ax.pcolormesh(cl, cmap=cmap, rasterized=True, vmin=0, vmax=10)\n fig.colorbar(psm, ax=ax)\n start, end = ax.get_xlim()\n ax.set_xticks(np.arange(start, end, (start-end)/10.))\n ax.set_xticklabels([str(t) for t in np.arange(start, end, (start-end)/10.)])\n start, end = ax.get_ylim()\n ax.set_yticks(np.arange(start, end, (start-end)/10.))\n ax.set_yticklabels([str(t) for t in np.arange(start, end, (start-end)/10.)])\n ax.xaxis.set_major_formatter(FormatStrFormatter('%.02f'))\n ax.yaxis.set_major_formatter(FormatStrFormatter('%.02f'))\n ax.xaxis.set_major_formatter(FormatStrFormatter('%.02f'))\n ax.yaxis.set_major_formatter(FormatStrFormatter('%.02f'))\n ax.set_xlabel('Grad')\n ax.set_ylabel('Rand')\n ax.set_title(title) \n\n plt.tight_layout()\n plt.savefig(figname, dpi=300)\n plt.close()\n\ndef plot_mul_grid(figname, image, xes, yes, zes, cles, titles): \n nb = 1 + len(zes)\n\n size = 5 \n fig = plt.figure(figsize=(size*nb, 2*size)) # 2 row and nb col \n\n ax = fig.add_subplot(2, nb, 1)\n ax.imshow(normalize_image(image))\n ax.grid(False)\n ax.axis('off')\n ax.set_title('Input image')\n cmap = ListedColormap(['blue', 'orange', 'green', 'red', 'purple', \n 'brown', 'pink', 'gray', 'olive', 'cyan'])\n for i , (x, y, z, cl, title) in enumerate(zip(xes, yes, zes, cles, titles)): \n max_z = np.max(z)\n ax = fig.add_subplot(2, nb, i+2, projection='3d')\n ax.plot_surface(y, x, z, cmap=cm.coolwarm, linewidth=0, antialiased=False, shade=True, alpha=0.5)\n\n ax.xaxis.set_major_formatter(FormatStrFormatter('%.02f'))\n ax.yaxis.set_major_formatter(FormatStrFormatter('%.02f'))\n ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))\n ax.set_zlim(0, max_z)\n ax.set_xlabel('Grad')\n ax.set_ylabel('Rand')\n ax.set_title(title)\n\n ax = fig.add_subplot(2, nb, i+2+nb)\n psm = ax.pcolormesh(cl, cmap=cmap, rasterized=True, vmin=0, vmax=10)\n fig.colorbar(psm, ax=ax)\n start, end = ax.get_xlim()\n ax.set_xticks(np.arange(start, end, (start-end)/10.))\n ax.set_xticklabels([str(t) for t in np.arange(start, end, (start-end)/10.)])\n start, end = ax.get_ylim()\n ax.set_yticks(np.arange(start, end, (start-end)/10.))\n ax.set_yticklabels([str(t) for t in np.arange(start, end, (start-end)/10.)])\n ax.xaxis.set_major_formatter(FormatStrFormatter('%.02f'))\n ax.yaxis.set_major_formatter(FormatStrFormatter('%.02f'))\n ax.xaxis.set_major_formatter(FormatStrFormatter('%.02f'))\n ax.yaxis.set_major_formatter(FormatStrFormatter('%.02f'))\n ax.set_xlabel('Grad')\n ax.set_ylabel('Rand')\n ax.set_title(title) \n\n plt.tight_layout()\n plt.savefig(figname, dpi=300)\n plt.close() \n\ndef plot_mul_grid_with_adv(figname, image, adv, xes, yes, zes, cles, titles): \n nb = 1 + len(zes)\n\n size = 5 \n fig = plt.figure(figsize=(size*nb, 2*size)) \n\n ax = fig.add_subplot(2, nb, 1)\n ax.imshow(normalize_image(image))\n ax.grid(False)\n ax.axis('off')\n ax.set_title('Input image')\n\n ax = fig.add_subplot(2, nb, 1+nb)\n ax.imshow(normalize_image(adv))\n ax.grid(False)\n ax.axis('off')\n ax.set_title('Adversarial image')\n\n cmap = ListedColormap(['blue', 'orange', 'green', 'red', 'purple', \n 'brown', 'pink', 'gray', 'olive', 'cyan'])\n\n for i , (x, y, z, cl, title) in enumerate(zip(xes, yes, zes, cles, titles)): \n max_z = np.max(z)\n ax = fig.add_subplot(2, nb, i+2, projection='3d')\n ax.plot_surface(y, x, z, cmap=cm.coolwarm, linewidth=0, antialiased=False, shade=True, alpha=0.5)\n\n ax.xaxis.set_major_formatter(FormatStrFormatter('%.02f'))\n ax.yaxis.set_major_formatter(FormatStrFormatter('%.02f'))\n ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))\n ax.set_zlim(0, max_z)\n ax.set_xlabel('Grad')\n ax.set_ylabel('Rand')\n ax.set_title(title)\n\n ax = fig.add_subplot(2, nb, i+2+nb)\n psm = ax.pcolormesh(cl, cmap=cmap, rasterized=True, vmin=0, vmax=10)\n fig.colorbar(psm, ax=ax)\n start, end = ax.get_xlim()\n ax.set_xticks(np.arange(start, end, (start-end)/10.))\n ax.set_xticklabels([str(t) for t in np.arange(start, end, (start-end)/10.)])\n start, end = ax.get_ylim()\n ax.set_yticks(np.arange(start, end, (start-end)/10.))\n ax.set_yticklabels([str(t) for t in np.arange(start, end, (start-end)/10.)])\n ax.xaxis.set_major_formatter(FormatStrFormatter('%.02f'))\n ax.yaxis.set_major_formatter(FormatStrFormatter('%.02f'))\n ax.set_xlabel('Grad')\n ax.set_ylabel('Rand')\n ax.set_title(title) \n ax.grid(True)\n\n plt.tight_layout()\n plt.savefig(figname, dpi=300)\n plt.close() \n\ndef save_image(image, savepath): \n plt.figure()\n plt.imshow(normalize_image(image))\n plt.axis('off') \n plt.savefig(savepath, dpi=300)\n plt.close()\n"
] |
[
[
"numpy.min",
"numpy.sign",
"numpy.max",
"matplotlib.pyplot.savefig",
"numpy.arange",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.axis",
"numpy.expand_dims",
"numpy.reshape",
"numpy.zeros",
"matplotlib.pyplot.close",
"numpy.shape",
"matplotlib.pyplot.figure",
"matplotlib.ticker.FormatStrFormatter",
"matplotlib.colors.ListedColormap",
"numpy.random.RandomState",
"numpy.sum",
"numpy.ones",
"numpy.linspace"
]
] |
itchono/Electric-Atoms
|
[
"6f72cc5c400f9a73b641cb21f317cdb4e98e7838"
] |
[
"Magnetic Coil Project/4_DoubleNumba.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport pickle\nimport time\nfrom numba import njit\n\nimport cProfile\n\n\n# store shape as a series of points\n\n# coil points stored as columns of 3 x n matrix, in cm\n# current stored in amps\n\n# assume infinitely thin conductor\n\nCOIL = np.array([[0,0,0], [10, 0, 0], [10, 10, 0], [20, 10, 0]]).T\nCURRENT = 1\n\ndef getEquidistantPoints(p1, p2, parts):\n # from stackoverflow\n return np.column_stack((np.linspace(p1[0], p2[0], parts+1),\n np.linspace(p1[1], p2[1], parts+1),\n np.linspace(p1[2], p2[2], parts+1)))\n\ndef sliceCoil(coil, steplength):\n '''\n Slices a coil into smaller steplength-sized pieces\n Takes on the order of 1-3 ms currently for the simple coil\n '''\n newcoil = np.zeros((1, 3)) # fill with dummy column\n\n segment_starts = coil[:,:-1]\n segment_ends = coil[:,1:]\n # determine start and end of each segment\n\n segments = segment_ends-segment_starts\n segment_lengths = mag_r = np.apply_along_axis(np.linalg.norm, 0, segments)\n # create segments; determine start and end of each segment, as well as segment lengths\n\n # chop up into smaller bits (elements)\n stepnumbers = (segment_lengths/steplength).astype(int)\n # determine how many steps we must chop each segment into\n\n for i in range(segments.shape[0]):\n # still slow; TODO how to turn into numpy?\n newrows = getEquidistantPoints(segment_starts[:,i], segment_ends[:,i], stepnumbers[i])\n # set of new interpolated points to feed in\n newcoil = np.vstack((newcoil, newrows))\n\n return newcoil[1:,:].T # return non-dummy columns\n\n@njit\ndef calculateField(coil, current, position):\n '''\n Calculates magnetic field vector as a result of some position vector tuple (x, y, z)\n attempting to use numba for performance improvement\n '''\n position = np.array(position)\n position = np.reshape(position, (1,3))\n\n FACTOR = 10**(-7) # equals mu_0 / 4pi\n\n B = np.zeros((1,3))\n\n for i in range(coil.shape[1]-1):\n start = coil[:,i]\n end = coil[:,i+1]\n # determine start and end points of our line segment\n\n dl = end - start\n dl = dl.T\n midstep = (start + end)/2 \n midstep = midstep.T\n # this is the effective position of our element (r' in the paper)\n\n # WEIRD REALIGNMENTS FOR NUMBA TO WORK PLEASE\n\n db = current * np.cross(dl, (position - midstep)) * FACTOR / (np.linalg.norm(position - midstep) ** 3) \n # Biot-Savart Law\n\n B += db\n \n return B[0]\n\n@njit\ndef produceModel(coil, current, startpoint, steplength):\n '''\n Generates a set of field vector values for each tuple (x, y, z) in the space\n\n Coil: Input Coil Positions in format specified above, already sub-divided into small pieces\n Current: Amount of current in amps flowing through coil from [start of coil] to [end of coil]\n Startpoint: (x, y, z) = (0, 0, 0) position of the box (30 x 15 x 15) cm\n Steplength: Spatial resolution (in cm)\n '''\n\n model = {}\n\n BOX_SIZE = (30, 15, 15) # dimensions of box\n\n for x in range(0, BOX_SIZE[0] + steplength, steplength):\n for y in range(0, BOX_SIZE[1] + steplength, steplength):\n for z in range(0, BOX_SIZE[2] + steplength, steplength):\n # print(\"Point {}\".format((x,y,z)))\n model[(x+startpoint[0],y+startpoint[1],z+startpoint[2])] = calculateField(coil, current, (x+startpoint[0],y+startpoint[1],z+startpoint[2]))\n\n return model\n\nif __name__ == \"__main__\":\n '''\n Numba Speed Test\n Record to beat: 9 seconds [numpy]\n '''\n chopped = sliceCoil(COIL, 1)\n\n for i in range(20):\n t_start = time.perf_counter()\n\n model = produceModel(chopped, CURRENT, (-7.5, -2.5, -2.5), 1)\n # Producemodel is NOT the bottleneck, not much benefit compared to v3\n\n t_end = time.perf_counter()\n\n print(\"T: {}\".format(t_end-t_start))\n # STUPID FAST --> 0.5 seconds\n \n \n\n \n\n\n\n"
] |
[
[
"numpy.array",
"numpy.linalg.norm",
"numpy.reshape",
"numpy.zeros",
"numpy.apply_along_axis",
"numpy.linspace",
"numpy.cross",
"numpy.vstack"
]
] |
jsgilberto/imu
|
[
"ace256362e8fa767c0e7c769f9cff641a3e66f76"
] |
[
"main.py"
] |
[
"import pygame\nfrom pygame.locals import *\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom sensor import Sensor\nfrom madgwick import Madgwick\nimport numpy as np\nimport math\n\nvertices = (\n (1, -1, -1),\n (1, 1, -1),\n (-1, 1, -1),\n (-1, -1, -1),\n (1, -1, 1),\n (1, 1, 1),\n (-1, -1, 1),\n (-1, 1, 1)\n )\n\nedges = (\n (0,1),\n (0,3),\n (0,4),\n (2,1),\n (2,3),\n (2,7),\n (6,3),\n (6,4),\n (6,7),\n (5,1),\n (5,4),\n (5,7)\n )\n\ndef Cube():\n glBegin(GL_LINES)\n for edge in edges:\n for vertex in edge:\n glVertex3fv(vertices[vertex])\n glEnd()\n\ndef main():\n pygame.init()\n display = (800,600)\n pygame.display.set_mode(display, DOUBLEBUF|OPENGL)\n\n gluPerspective(45, (display[0]/display[1]), 0.1, 50.0)\n glTranslatef(0.0,0.0, -5)\n\n mpu9250 = Sensor('/dev/ttyACM0', 115200)\n m = Madgwick()\n\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n # sensor data\n # mpu9250.update_values()\n \n # using average values\n mpu9250.update_values_with_moving_average(15)\n accel = mpu9250._accel_avg\n gyro = mpu9250._gyro_avg\n mag = mpu9250._mag_avg\n # madgwick algorithm\n m.update(accel, gyro * np.pi / 180, mag)\n q = m._quaternion_avg\n q = np.squeeze(np.asarray(q))\n\n glMatrixMode(GL_MODELVIEW)\n glLoadMatrixf(Madgwick.quat_to_rotmatrix(q))\n glScalef(0.2, 0.2, 0.2)\n #glRotatef(q[0], q[1], q[2], q[3])\n #glRotatef(1, 3, 1, 1)\n glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)\n Cube()\n pygame.display.flip()\n pygame.time.wait(10)\n\n\nif __name__ == \"__main__\":\n main()"
] |
[
[
"numpy.asarray"
]
] |
amustafa9/SEG-2020-Spatiotemporal-modeling-for-seismic-inversion
|
[
"6443710367e05e71e71cf4cbfe8671767a52b26a"
] |
[
"train-2D-TCN.py"
] |
[
"\n# imports\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport torch\nfrom os.path import join\nimport zipfile\nfrom utils import extract, standardize\nfrom datasets import SeismicDataset2D\nfrom torch.utils.data import DataLoader\nfrom model2D import Model2D\nfrom sklearn.metrics import r2_score\nimport errno\nimport argparse\n\n \ndef preprocess(no_wells):\n \"\"\"Function initializes data, performs standardization, and train test split\n \n Parameters:\n ----------\n no_wells : int,\n number of evenly spaced wells and seismic samples to be evenly sampled \n from seismic section.\n\n \n Returns\n -------\n seismic : array_like, shape(num_traces, depth samples)\n 2-D array containing seismic section \n \n model : array_like, shape(num_wells, depth samples)\n 2-D array containing model section \n\n \"\"\"\n \n # get project root directory\n project_root = os.getcwd()\n \n if ~os.path.isdir('data'): # if data directory does not exists then extract\n extract('data.zip', project_root)\n \n \n # Load data\n seismic = np.load(join('data','poststack_seam_seismic.npy')).squeeze()[:, 50:]\n seismic = seismic[::2, :]\n \n # Load targets and standardize data\n model = np.load(join('data','seam_elastic_model.npy'))[::3,:,::2][:, :, 50:]\n model = model[:,0,:] * model[:,2,:]\n \n # standardize\n seismic, model = standardize(seismic, model, no_wells)\n \n return seismic, model\n\n\ndef train(**kwargs):\n \"\"\"Function trains 2-D TCN as specified in the paper\"\"\"\n \n # obtain data\n seismic, model = preprocess(kwargs['no_wells'])\n \n \n # specify width of seismic image samples around each pseudolog\n width = 7\n offset = int(width/2)\n \n # specify pseudolog positions for training and validation\n traces_seam_train = np.linspace(offset, len(model)-offset-1, kwargs['no_wells'], dtype=int)\n traces_seam_validation = np.linspace(offset, len(model)-offset-1, 3, dtype=int)\n \n seam_train_dataset = SeismicDataset2D(seismic, model, traces_seam_train, width)\n seam_train_loader = DataLoader(seam_train_dataset, batch_size = len(seam_train_dataset))\n \n seam_val_dataset = SeismicDataset2D(seismic, model, traces_seam_validation, width)\n seam_val_loader = DataLoader(seam_val_dataset, batch_size = len(seam_val_dataset))\n \n \n # define device for training\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n # set up models\n model_seam = Model2D(1,1,[10, 30, 60, 90, 120], 9, 0.4).to(device)\n \n # Set up loss\n criterion = torch.nn.MSELoss()\n \n \n optimizer_seam = torch.optim.Adam(model_seam.parameters(),\n weight_decay=0.0001,\n lr=0.001)\n \n # start training \n for epoch in range(kwargs['epochs']):\n \n model_seam.train()\n optimizer_seam.zero_grad()\n \n \n for x,y in seam_train_loader:\n y_pred, x_hat = model_seam(x)\n loss_train = criterion(y_pred, y) + criterion(x_hat, x)\n \n for x, y in seam_val_loader:\n model_seam.eval()\n y_pred, _ = model_seam(x)\n val_loss = criterion(y_pred, y)\n \n \n loss_train.backward()\n optimizer_seam.step()\n \n print('Epoch: {} | Seam Loss: {:0.4f} | Val Loss: {:0.4f} \\\n '.format(epoch, loss_train.item(), val_loss.item()))\n\n \n # save trained models\n if not os.path.isdir('saved_models'): # check if directory for saved models exists\n os.mkdir('saved_models')\n \n torch.save(model_seam.state_dict(), 'saved_models/model_seam.pth')\n\ndef test(**kwargs):\n \"\"\"Function tests the trained network on SEAM and Marmousi sections and \n prints out the results\"\"\"\n \n # obtain data\n seismic, model = preprocess(kwargs['no_wells'])\n \n \n # define device for training\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n \n # specify width of seismic image samples around each pseudolog\n width = 7\n offset = int(width/2)\n \n # specify pseudolog positions for testing \n traces_seam_test = np.linspace(offset, len(model)-offset-1, len(model)-int(2*offset), dtype=int)\n \n seam_test_dataset = SeismicDataset2D(seismic, model, traces_seam_test, width)\n seam_test_loader = DataLoader(seam_test_dataset, batch_size = 8)\n \n # load saved models\n if not os.path.isdir('saved_models'):\n raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), 'saved_models')\n \n # set up models\n model_seam = Model2D(1,1,[10, 30, 60, 90, 120], 9, 0.4).to(device)\n model_seam.load_state_dict(torch.load('saved_models/model_seam.pth'))\n \n # infer on SEAM\n print(\"\\nInferring on SEAM...\")\n x, y = seam_test_dataset[0] # get a sample\n AI_pred = torch.zeros((len(seam_test_dataset), y.shape[-1])).float().to(device)\n AI_act = torch.zeros((len(seam_test_dataset), y.shape[-1])).float().to(device)\n \n mem = 0\n with torch.no_grad():\n for i, (x,y) in enumerate(seam_test_loader):\n model_seam.eval()\n y_pred, _ = model_seam(x)\n AI_pred[mem:mem+len(x)] = y_pred.squeeze().data\n AI_act[mem:mem+len(x)] = y.squeeze().data\n mem += len(x)\n del x, y, y_pred\n \n vmin, vmax = AI_act.min(), AI_act.max()\n\n AI_pred = AI_pred.detach().cpu().numpy()\n AI_act = AI_act.detach().cpu().numpy()\n print('r^2 score: {:0.4f}'.format(r2_score(AI_act.T, AI_pred.T)))\n print('MSE: {:0.4f}'.format(np.sum((AI_pred-AI_act).ravel()**2)/AI_pred.size))\n print('MAE: {:0.4f}'.format(np.sum(np.abs(AI_pred - AI_act)/AI_pred.size)))\n print('MedAE: {:0.4f}'.format(np.median(np.abs(AI_pred - AI_act))))\n \n fig, (ax1, ax2) = plt.subplots(2,1, figsize=(12,12))\n ax1.imshow(AI_pred.T, vmin=vmin, vmax=vmax, extent=(0,35000,15000,0))\n ax1.set_aspect(35/30)\n ax1.set_xlabel('Distance Eastimg (m)')\n ax1.set_ylabel('Depth (m)')\n ax1.set_title('Predicted')\n ax2.imshow(AI_act.T, vmin=vmin, vmax=vmax, extent=(0,35000,15000,0))\n ax2.set_aspect(35/30)\n ax2.set_xlabel('Distance Eastimg (m)')\n ax2.set_ylabel('Depth (m)')\n ax2.set_title('Ground-Truth')\n plt.show()\n\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Hyperparams')\n \n parser.add_argument('--epochs', nargs='?', type=int, default=900,\n help='Number of epochs. Default = 1000')\n parser.add_argument('--no_wells', nargs='?', type=int, default=12,\n help='Number of sampled pseudologs for seismic section. Default = 12.')\n\n\n args = parser.parse_args()\n \n train(no_wells=args.no_wells, epochs=args.epochs)\n \n test(no_wells=args.no_wells, epochs=args.epochs)"
] |
[
[
"torch.nn.MSELoss",
"torch.no_grad",
"matplotlib.pyplot.subplots",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.load",
"numpy.abs",
"sklearn.metrics.r2_score",
"matplotlib.pyplot.show"
]
] |
1iyc/T2T-Analysis
|
[
"faed5fb1ed62e981e8d3b2bd534785798e60e849"
] |
[
"tensor2tensor/models/video/basic_stochastic.py"
] |
[
"# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Basic models for testing simple tasks.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensor2tensor.layers import common_attention\nfrom tensor2tensor.layers import common_layers\nfrom tensor2tensor.layers import common_video\nfrom tensor2tensor.layers import discretization\n\nfrom tensor2tensor.models.video import base_vae\nfrom tensor2tensor.models.video import basic_deterministic\nfrom tensor2tensor.models.video import basic_deterministic_params\n\nfrom tensor2tensor.utils import registry\n\nimport tensorflow as tf\n\ntfl = tf.layers\n\n\n@registry.register_model\nclass NextFrameBasicStochastic(\n basic_deterministic.NextFrameBasicDeterministic,\n base_vae.NextFrameBaseVae):\n \"\"\"Stochastic version of basic next-frame model.\"\"\"\n\n def inject_latent(self, layer, inputs, target):\n \"\"\"Inject a VAE-style latent.\"\"\"\n # Latent for stochastic model\n filters = 128\n full_video = tf.stack(inputs + [target], axis=1)\n latent_mean, latent_std = self.construct_latent_tower(\n full_video, time_axis=1)\n latent = common_video.get_gaussian_tensor(latent_mean, latent_std)\n latent = tfl.flatten(latent)\n latent = tf.expand_dims(latent, axis=1)\n latent = tf.expand_dims(latent, axis=1)\n latent_mask = tfl.dense(latent, filters, name=\"latent_mask\")\n zeros_mask = tf.zeros(\n common_layers.shape_list(layer)[:-1] + [filters], dtype=tf.float32)\n layer = tf.concat([layer, latent_mask + zeros_mask], axis=-1)\n extra_loss = self.get_kl_loss([latent_mean], [latent_std])\n return layer, extra_loss\n\n\n@registry.register_model\nclass NextFrameBasicStochasticDiscrete(\n basic_deterministic.NextFrameBasicDeterministic):\n \"\"\"Basic next-frame model with a tiny discrete latent.\"\"\"\n\n def inject_latent(self, layer, inputs, target):\n \"\"\"Inject a deterministic latent based on the target frame.\"\"\"\n hparams = self.hparams\n final_filters = common_layers.shape_list(layer)[-1]\n filters = hparams.hidden_size\n kernel = (4, 4)\n layer_shape = common_layers.shape_list(layer)\n\n def add_bits(layer, bits):\n z_mul = tfl.dense(bits, final_filters, name=\"unbottleneck_mul\")\n if not hparams.complex_addn:\n return layer + z_mul\n layer *= tf.nn.sigmoid(z_mul)\n z_add = tfl.dense(bits, final_filters, name=\"unbottleneck_add\")\n layer += z_add\n return layer\n\n if not self.is_training:\n if hparams.full_latent_tower:\n rand = tf.random_uniform(layer_shape[:-1] + [hparams.bottleneck_bits])\n bits = 2.0 * tf.to_float(tf.less(0.5, rand)) - 1.0\n else:\n bits, _ = discretization.predict_bits_with_lstm(\n layer, hparams.latent_predictor_state_size, hparams.bottleneck_bits,\n temperature=hparams.latent_predictor_temperature)\n bits = tf.expand_dims(tf.expand_dims(bits, axis=1), axis=2)\n return add_bits(layer, bits), 0.0\n\n # Embed.\n frames = tf.concat(inputs + [target], axis=-1)\n x = tfl.dense(\n frames, filters, name=\"latent_embed\",\n bias_initializer=tf.random_normal_initializer(stddev=0.01))\n x = common_attention.add_timing_signal_nd(x)\n\n if hparams.full_latent_tower:\n for i in range(hparams.num_compress_steps):\n with tf.variable_scope(\"latent_downstride%d\" % i):\n x = common_layers.make_even_size(x)\n if i < hparams.filter_double_steps:\n filters *= 2\n x = common_attention.add_timing_signal_nd(x)\n x = tfl.conv2d(x, filters, kernel,\n activation=common_layers.belu,\n strides=(2, 2), padding=\"SAME\")\n x = common_layers.layer_norm(x)\n else:\n x = common_layers.double_discriminator(x)\n x = tf.expand_dims(tf.expand_dims(x, axis=1), axis=1)\n\n bits, bits_clean = discretization.tanh_discrete_bottleneck(\n x, hparams.bottleneck_bits, hparams.bottleneck_noise,\n hparams.discretize_warmup_steps, hparams.mode)\n if not hparams.full_latent_tower:\n _, pred_loss = discretization.predict_bits_with_lstm(\n layer, hparams.latent_predictor_state_size, hparams.bottleneck_bits,\n target_bits=bits_clean)\n\n return add_bits(layer, bits), pred_loss\n\n\n@registry.register_hparams\ndef next_frame_basic_stochastic():\n \"\"\"Basic 2-frame conv model with stochastic tower.\"\"\"\n hparams = basic_deterministic_params.next_frame_basic_deterministic()\n hparams.stochastic_model = True\n hparams.add_hparam(\"latent_channels\", 1)\n hparams.add_hparam(\"latent_std_min\", -5.0)\n hparams.add_hparam(\"num_iterations_1st_stage\", 15000)\n hparams.add_hparam(\"num_iterations_2nd_stage\", 15000)\n hparams.add_hparam(\"latent_loss_multiplier\", 1e-3)\n hparams.add_hparam(\"latent_loss_multiplier_dynamic\", False)\n hparams.add_hparam(\"latent_loss_multiplier_alpha\", 1e-5)\n hparams.add_hparam(\"latent_loss_multiplier_epsilon\", 1.0)\n hparams.add_hparam(\"latent_loss_multiplier_schedule\", \"constant\")\n hparams.add_hparam(\"latent_num_frames\", 0) # 0 means use all frames.\n hparams.add_hparam(\"anneal_end\", 50000)\n hparams.add_hparam(\"information_capacity\", 0.0)\n return hparams\n\n\n@registry.register_hparams\ndef next_frame_sampling_stochastic():\n \"\"\"Basic 2-frame conv model with stochastic tower.\"\"\"\n hparams = basic_deterministic_params.next_frame_sampling()\n hparams.stochastic_model = True\n hparams.add_hparam(\"latent_channels\", 1)\n hparams.add_hparam(\"latent_std_min\", -5.0)\n hparams.add_hparam(\"num_iterations_1st_stage\", 15000)\n hparams.add_hparam(\"num_iterations_2nd_stage\", 15000)\n hparams.add_hparam(\"latent_loss_multiplier\", 1e-3)\n hparams.add_hparam(\"latent_loss_multiplier_dynamic\", False)\n hparams.add_hparam(\"latent_loss_multiplier_alpha\", 1e-5)\n hparams.add_hparam(\"latent_loss_multiplier_epsilon\", 1.0)\n hparams.add_hparam(\"latent_loss_multiplier_schedule\", \"constant\")\n hparams.add_hparam(\"latent_num_frames\", 0) # 0 means use all frames.\n hparams.add_hparam(\"anneal_end\", 40000)\n hparams.add_hparam(\"information_capacity\", 0.0)\n return hparams\n\n\n@registry.register_hparams\ndef next_frame_basic_stochastic_discrete():\n \"\"\"Basic 2-frame conv model with stochastic discrete latent.\"\"\"\n hparams = basic_deterministic_params.next_frame_sampling()\n hparams.batch_size = 2\n hparams.video_num_target_frames = 16\n hparams.scheduled_sampling_mode = \"prob_inverse_lin\"\n hparams.scheduled_sampling_decay_steps = 40000\n hparams.scheduled_sampling_max_prob = 1.0\n hparams.dropout = 0.3\n hparams.learning_rate_constant = 0.002\n hparams.learning_rate_warmup_steps = 2000\n hparams.learning_rate_schedule = \"linear_warmup * constant\"\n hparams.add_hparam(\"bottleneck_bits\", 64)\n hparams.add_hparam(\"bottleneck_noise\", 0.02)\n hparams.add_hparam(\"discretize_warmup_steps\", 40000)\n hparams.add_hparam(\"full_latent_tower\", False)\n hparams.add_hparam(\"latent_predictor_state_size\", 128)\n hparams.add_hparam(\"latent_predictor_temperature\", 0.5)\n hparams.add_hparam(\"complex_addn\", True)\n return hparams\n\n\n@registry.register_ranged_hparams\ndef next_frame_stochastic_discrete_range(rhp):\n \"\"\"Next frame stochastic discrete tuning grid.\"\"\"\n rhp.set_float(\"learning_rate_constant\", 0.001, 0.01)\n rhp.set_float(\"dropout\", 0.2, 0.6)\n rhp.set_int(\"filter_double_steps\", 3, 5)\n rhp.set_discrete(\"hidden_size\", [64, 96, 128])\n rhp.set_discrete(\"bottleneck_bits\", [32, 64, 128, 256])\n rhp.set_discrete(\"video_num_target_frames\", [4])\n rhp.set_float(\"bottleneck_noise\", 0.0, 0.2)\n"
] |
[
[
"tensorflow.concat",
"tensorflow.less",
"tensorflow.expand_dims",
"tensorflow.random_uniform",
"tensorflow.variable_scope",
"tensorflow.stack",
"tensorflow.nn.sigmoid",
"tensorflow.random_normal_initializer"
]
] |
pana1990/models
|
[
"68d983b9bc91d9db9a2b7cbe1b1a69d44921e210"
] |
[
"official/nlp/transformer/compute_bleu.py"
] |
[
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Script to compute official BLEU score.\n\nSource:\nhttps://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/bleu_hook.py\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport re\nimport sys\nimport unicodedata\n\nfrom absl import app as absl_app\nfrom absl import flags\nimport six\nfrom six.moves import range\nimport tensorflow as tf\n\nfrom official.nlp.transformer.utils import metrics\nfrom official.nlp.transformer.utils import tokenizer\nfrom official.utils.flags import core as flags_core\n\n\nclass UnicodeRegex(object):\n \"\"\"Ad-hoc hack to recognize all punctuation and symbols.\"\"\"\n\n def __init__(self):\n punctuation = self.property_chars(\"P\")\n self.nondigit_punct_re = re.compile(r\"([^\\d])([\" + punctuation + r\"])\")\n self.punct_nondigit_re = re.compile(r\"([\" + punctuation + r\"])([^\\d])\")\n self.symbol_re = re.compile(\"([\" + self.property_chars(\"S\") + \"])\")\n\n def property_chars(self, prefix):\n return \"\".join(\n six.unichr(x)\n for x in range(sys.maxunicode)\n if unicodedata.category(six.unichr(x)).startswith(prefix))\n\n\nuregex = UnicodeRegex()\n\n\ndef bleu_tokenize(string):\n r\"\"\"Tokenize a string following the official BLEU implementation.\n\n See https://github.com/moses-smt/mosesdecoder/'\n 'blob/master/scripts/generic/mteval-v14.pl#L954-L983\n In our case, the input string is expected to be just one line\n and no HTML entities de-escaping is needed.\n So we just tokenize on punctuation and symbols,\n except when a punctuation is preceded and followed by a digit\n (e.g. a comma/dot as a thousand/decimal separator).\n\n Note that a numer (e.g. a year) followed by a dot at the end of sentence\n is NOT tokenized,\n i.e. the dot stays with the number because `s/(\\p{P})(\\P{N})/ $1 $2/g`\n does not match this case (unless we add a space after each sentence).\n However, this error is already in the original mteval-v14.pl\n and we want to be consistent with it.\n\n Args:\n string: the input string\n\n Returns:\n a list of tokens\n \"\"\"\n string = uregex.nondigit_punct_re.sub(r\"\\1 \\2 \", string)\n string = uregex.punct_nondigit_re.sub(r\" \\1 \\2\", string)\n string = uregex.symbol_re.sub(r\" \\1 \", string)\n return string.split()\n\n\ndef bleu_wrapper(ref_filename, hyp_filename, case_sensitive=False):\n \"\"\"Compute BLEU for two files (reference and hypothesis translation).\"\"\"\n ref_lines = tokenizer.native_to_unicode(\n tf.io.gfile.GFile(ref_filename).read()).strip().splitlines()\n hyp_lines = tokenizer.native_to_unicode(\n tf.io.gfile.GFile(hyp_filename).read()).strip().splitlines()\n return bleu_on_list(ref_lines, hyp_lines, case_sensitive)\n\n\ndef bleu_on_list(ref_lines, hyp_lines, case_sensitive=False):\n \"\"\"Compute BLEU for two list of strings (reference and hypothesis).\"\"\"\n if len(ref_lines) != len(hyp_lines):\n raise ValueError(\n \"Reference and translation files have different number of \"\n \"lines (%d VS %d). If training only a few steps (100-200), the \"\n \"translation may be empty.\" % (len(ref_lines), len(hyp_lines)))\n if not case_sensitive:\n ref_lines = [x.lower() for x in ref_lines]\n hyp_lines = [x.lower() for x in hyp_lines]\n ref_tokens = [bleu_tokenize(x) for x in ref_lines]\n hyp_tokens = [bleu_tokenize(x) for x in hyp_lines]\n return metrics.compute_bleu(ref_tokens, hyp_tokens) * 100\n\n\ndef main(unused_argv):\n if FLAGS.bleu_variant in (\"both\", \"uncased\"):\n score = bleu_wrapper(FLAGS.reference, FLAGS.translation, False)\n tf.logging.info(\"Case-insensitive results: %f\" % score)\n\n if FLAGS.bleu_variant in (\"both\", \"cased\"):\n score = bleu_wrapper(FLAGS.reference, FLAGS.translation, True)\n tf.logging.info(\"Case-sensitive results: %f\" % score)\n\n\ndef define_compute_bleu_flags():\n \"\"\"Add flags for computing BLEU score.\"\"\"\n flags.DEFINE_string(\n name=\"translation\",\n default=None,\n help=flags_core.help_wrap(\"File containing translated text.\"))\n flags.mark_flag_as_required(\"translation\")\n\n flags.DEFINE_string(\n name=\"reference\",\n default=None,\n help=flags_core.help_wrap(\"File containing reference translation.\"))\n flags.mark_flag_as_required(\"reference\")\n\n flags.DEFINE_enum(\n name=\"bleu_variant\",\n short_name=\"bv\",\n default=\"both\",\n enum_values=[\"both\", \"uncased\", \"cased\"],\n case_sensitive=False,\n help=flags_core.help_wrap(\n \"Specify one or more BLEU variants to calculate. Variants: \\\"cased\\\"\"\n \", \\\"uncased\\\", or \\\"both\\\".\"))\n\n\nif __name__ == \"__main__\":\n tf.logging.set_verbosity(tf.logging.INFO)\n define_compute_bleu_flags()\n FLAGS = flags.FLAGS\n absl_app.run(main)\n"
] |
[
[
"tensorflow.logging.set_verbosity",
"tensorflow.logging.info",
"tensorflow.io.gfile.GFile"
]
] |
adarbha/Tic-Tac-Toe
|
[
"b3d2eae888ca97a3bd2658e3ef975293a0891012"
] |
[
"tests/test_game.py"
] |
[
"import unittest\nimport numpy as np\nfrom tictactoeadarbha.game import Game\nfrom tictactoeadarbha.player import Player\n\nclass TestGame(unittest.TestCase):\n\n def setUp(self):\n self.game = Game()\n self.player1 = Player('p1','X')\n self.player2 = Player('p2','-')\n\n\n def test_initialization(self):\n self.assertIsNone(self.game.board)\n self.assertIsNone(self.game.player1)\n self.assertIsNone(self.game.player2)\n self.assertIsNone(self.game.chance)\n\n def test_register_players(self):\n self.game.register_player1(self.player1)\n self.assertEqual(self.player1.name,'p1', \"Player1 name is not initiated accurately\")\n \n self.game.register_player2(self.player1)\n self.assertIsNone(self.game.player2, \"Player1 initiated in game as Player2 also\")\n \n self.game.register_player2(self.player2)\n self.assertEqual(self.player2.name, 'p2', \"Player2 name not initialized accurately\")\n\n def test_start(self):\n self.game.register_player1(self.player1)\n self.game.register_player2(self.player2)\n\n self.game.start()\n \n self.assertEqual(self.game.board[0][0], '0')\n self.assertEqual(self.game.player_dict['p1'], self.player1)\n self.assertEqual(self.game.chance, 'p1')\n\n def test_place_marker(self):\n self.game.register_player1(self.player1)\n self.game.register_player2(self.player2)\n\n self.game.start()\n\n self.game.place_marker('42', 0, 0)\n self.assertEqual(self.game.board[0,0], '0')\n\n self.game.place_marker('p1', 0, 0)\n self.assertEqual(self.game.board[0,0], 'X')\n\n self.game.place_marker('p1', 0, 1)\n self.assertEqual(self.game.board[0,1], '0')\n\n self.assertEqual(self.game.chance, 'p2')\n\n self.game.place_marker('p2', 0, 1)\n self.assertEqual(self.game.board[0,1], '-')\n\n self.assertEqual(self.game.chance, 'p1')\n \n\n def test_is_board_full(self):\n self.game.register_player1(self.player1)\n self.game.register_player2(self.player2)\n\n self.game.start()\n\n self.assertFalse(self.game.is_board_full())\n\n self.game.board = np.array([['X','-','-'],['-','X','X'],['-','X','X']]).astype(str)\n self.assertTrue(self.game.is_board_full())\n\n\n\n\n def test_is_game_over(self):\n self.game.register_player1(self.player1)\n self.game.register_player2(self.player2)\n\n self.game.start()\n\n self.assertFalse(self.game.is_game_over())\n\n self.game.board = np.array([['X','X','X'],['0','0','0'],['0','0','0']])\n\n self.assertTrue(self.game.is_game_over())\n\nif __name__ == '__main__':\n unittest.main()\n\n "
] |
[
[
"numpy.array"
]
] |
GG-yuki/bugs
|
[
"aabd576e9e57012a3390007af890b7c6ab6cdda8",
"aabd576e9e57012a3390007af890b7c6ab6cdda8",
"aabd576e9e57012a3390007af890b7c6ab6cdda8"
] |
[
"python/AND/main.py",
"python/pytorch-mobilenet-v3/test.py",
"python/paper_design_cifar100/support.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2018-09-27 15:09:03\n# @Author : Jiabo (Raymond) Huang (jiabo.huang@qmul.ac.uk)\n# @Link : https://github.com/Raymond-sci\n\nimport torch\nimport torch.backends.cudnn as cudnn\n\nimport sys\nimport os\nimport time\nfrom datetime import datetime\n\nimport models\nimport datasets\n\nfrom lib import protocols\nfrom lib.non_parametric_classifier import NonParametricClassifier\nfrom lib.criterion import Criterion\nfrom lib.ans_discovery import ANsDiscovery\nfrom lib.utils import AverageMeter, time_progress, adjust_learning_rate\n\nfrom packages import session\nfrom packages import lr_policy\nfrom packages import optimizers\nfrom packages.config import CONFIG as cfg\nfrom packages.loggers.std_logger import STDLogger as logger\nfrom packages.loggers.tf_logger import TFLogger as SummaryWriter\n\n\ndef require_args():\n \n # dataset to be used\n cfg.add_argument('--dataset', default='cifar10', type=str,\n help='dataset to be used. (default: cifar10)')\n \n # network to be used\n cfg.add_argument('--network', default='resnet18', type=str,\n help='backbone to be used. (default: ResNet18)')\n\n # optimizer to be used\n cfg.add_argument('--optimizer', default='sgd', type=str,\n help='optimizer to be used. (default: sgd)')\n\n # lr policy to be used\n cfg.add_argument('--lr-policy', default='step', type=str,\n help='lr policy to be used. (default: step)')\n\n # args for protocol\n cfg.add_argument('--protocol', default='knn', type=str,\n help='protocol used to validate model')\n\n # args for network training\n cfg.add_argument('--max-epoch', default=200, type=int,\n help='max epoch per round. (default: 200)')\n cfg.add_argument('--max-round', default=5, type=int, \n help='max iteration, including initialisation one. '\n '(default: 5)')\n cfg.add_argument('--iter-size', default=1, type=int,\n help='caffe style iter size. (default: 1)')\n cfg.add_argument('--display-freq', default=1, type=int,\n help='display step')\n cfg.add_argument('--test-only', action='store_true', \n help='test only')\n\n\ndef main():\n\n logger.info('Start to declare training variables')\n cfg.device = device = 'cuda' if torch.cuda.is_available() else 'cpu'\n best_acc = 0. # best test accuracy\n start_epoch = 0 # start from epoch 0 or last checkpoint epoch\n start_round = 0 # start for iter 0 or last checkpoint iter\n\n logger.info('Start to prepare data')\n trainset, trainloader, testset, testloader = datasets.get(cfg.dataset, instant=True)\n # cheat labels are used to compute neighbourhoods consistency only\n cheat_labels = torch.tensor(trainset.labels).long().to(device)\n ntrain, ntest = len(trainset), len(testset)\n logger.info('Totally got %d training and %d test samples' % (ntrain, ntest))\n\n logger.info('Start to build model')\n net = models.get(cfg.network, instant=True)\n npc = NonParametricClassifier(cfg.low_dim, ntrain, cfg.npc_temperature, cfg.npc_momentum)\n ANs_discovery = ANsDiscovery(ntrain)\n criterion = Criterion()\n optimizer = optimizers.get(cfg.optimizer, instant=True, params=net.parameters())\n lr_handler = lr_policy.get(cfg.lr_policy, instant=True)\n protocol = protocols.get(cfg.protocol)\n \n # data parallel\n if device == 'cuda':\n if (cfg.network.lower().startswith('alexnet') or\n cfg.network.lower().startswith('vgg')):\n net.features = torch.nn.DataParallel(net.features,\n device_ids=range(len(cfg.gpus.split(','))))\n else:\n net = torch.nn.DataParallel(net, device_ids=range(\n len(cfg.gpus.split(','))))\n cudnn.benchmark = True\n\n net, npc, ANs_discovery, criterion = (net.to(device), npc.to(device), \n ANs_discovery.to(device), criterion.to(device))\n \n # load ckpt file if necessary\n if cfg.resume:\n assert os.path.exists(cfg.resume), \"Resume file not found: %s\" % cfg.resume\n logger.info('Start to resume from %s' % cfg.resume)\n ckpt = torch.load(cfg.resume)\n net.load_state_dict(ckpt['net'])\n optimizer.load_state_dict(ckpt['optimizer'])\n npc = npc.load_state_dict(ckpt['npc'])\n ANs_discovery.load_state_dict(ckpt['ANs_discovery'])\n best_acc = ckpt['acc']\n start_epoch = ckpt['epoch']\n start_round = ckpt['round']\n\n # test if necessary\n if cfg.test_only:\n logger.info('Testing at beginning...')\n acc = protocol(net, npc, trainloader, testloader, 200,\n cfg.npc_temperature, True, device)\n logger.info('Evaluation accuracy at %d round and %d epoch: %.2f%%' %\n (start_round, start_epoch, acc * 100))\n sys.exit(0)\n\n logger.info('Start the progressive training process from round: %d, '\n 'epoch: %d, best acc is %.4f...' % (start_round, start_epoch, best_acc))\n round = start_round\n global_writer = SummaryWriter(cfg.debug,\n log_dir=os.path.join(cfg.tfb_dir, 'global'))\n while (round < cfg.max_round):\n\n # variables are initialized to different value in the first round\n is_first_round = True if round == start_round else False\n best_acc = best_acc if is_first_round else 0\n\n if not is_first_round:\n logger.info('Start to mining ANs at %d round' % round)\n ANs_discovery.update(round, npc, cheat_labels)\n logger.info('ANs consistency at %d round is %.2f%%' %\n (round, ANs_discovery.consistency * 100))\n\n ANs_num = ANs_discovery.anchor_indexes.shape[0]\n global_writer.add_scalar('ANs/Number', ANs_num, round)\n global_writer.add_scalar('ANs/Consistency', ANs_discovery.consistency, round)\n\n # declare local writer\n writer = SummaryWriter(cfg.debug, log_dir=os.path.join(cfg.tfb_dir,\n '%04d-%05d' % (round, ANs_num)))\n logger.info('Start training at %d/%d round' % (round, cfg.max_round))\n\n\n # start to train for an epoch\n epoch = start_epoch if is_first_round else 0\n lr = cfg.base_lr\n while lr > 0 and epoch < cfg.max_epoch:\n\n # get learning rate according to current epoch\n lr = lr_handler.update(epoch)\n\n train(round, epoch, net, trainloader, optimizer, npc, criterion,\n ANs_discovery, lr, writer)\n\n logger.info('Start to evaluate...')\n acc = protocol(net, npc, trainloader, testloader, 200,\n cfg.npc_temperature, False, device)\n writer.add_scalar('Evaluate/Rank-1', acc, epoch)\n\n logger.info('Evaluation accuracy at %d round and %d epoch: %.1f%%'\n % (round, epoch, acc * 100))\n logger.info('Best accuracy at %d round and %d epoch: %.1f%%'\n % (round, epoch, best_acc * 100))\n\n is_best = acc >= best_acc\n best_acc = max(acc, best_acc)\n if is_best and not cfg.debug:\n target = os.path.join(cfg.ckpt_dir, '%04d-%05d.ckpt'\n % (round, ANs_num))\n logger.info('Saving checkpoint to %s' % target)\n state = {\n 'net': net.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'ANs_discovery' : ANs_discovery.state_dict(),\n 'npc' : npc.state_dict(),\n 'acc': acc,\n 'epoch': epoch + 1,\n 'round' : round,\n 'session' : cfg.session\n }\n torch.save(state, target)\n epoch += 1\n\n # log best accuracy after each iteration\n global_writer.add_scalar('Evaluate/best_acc', best_acc, round)\n round += 1\n\n# Training\ndef train(round, epoch, net, trainloader, optimizer, npc, criterion,\n ANs_discovery, lr, writer):\n\n # tracking variables\n train_loss = AverageMeter()\n data_time = AverageMeter()\n batch_time = AverageMeter()\n\n # switch the model to train mode\n net.train()\n # adjust learning rate\n adjust_learning_rate(optimizer, lr)\n\n end = time.time()\n start_time = datetime.now()\n optimizer.zero_grad()\n for batch_idx, (inputs, _, indexes) in enumerate(trainloader):\n data_time.update(time.time() - end)\n inputs, indexes = inputs.to(cfg.device), indexes.to(cfg.device)\n\n features = net(inputs)\n outputs = npc(features, indexes)\n loss = criterion(outputs, indexes, ANs_discovery) / cfg.iter_size\n\n loss.backward()\n train_loss.update(loss.item() * cfg.iter_size, inputs.size(0))\n\n if batch_idx % cfg.iter_size == 0:\n optimizer.step()\n optimizer.zero_grad()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if batch_idx % cfg.display_freq != 0:\n continue\n\n writer.add_scalar('Train/Learning_Rate', lr,\n epoch * len(trainloader) + batch_idx)\n writer.add_scalar('Train/Loss', train_loss.val,\n epoch * len(trainloader) + batch_idx)\n\n\n elapsed_time, estimated_time = time_progress(batch_idx + 1,\n len(trainloader), batch_time.sum)\n logger.info('Round: {round} Epoch: {epoch}/{tot_epochs} '\n 'Progress: {elps_iters}/{tot_iters} ({elps_time}/{est_time}) '\n 'Data: {data_time.avg:.3f} LR: {learning_rate:.5f} '\n 'Loss: {train_loss.val:.4f} ({train_loss.avg:.4f})'.format(\n round=round, epoch=epoch, tot_epochs=cfg.max_epoch,\n elps_iters=batch_idx, tot_iters=len(trainloader),\n elps_time=elapsed_time, est_time=estimated_time,\n data_time=data_time, learning_rate=lr,\n train_loss=train_loss))\n\nif __name__ == '__main__':\n \n session.run(__name__)\n",
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Identity(nn.Module):\n def __init__(self, channel):\n super(Identity, self).__init__()\n\n def forward(self, x):\n return x\n\n\nclass MobileBottleneck(nn.Module):\n def __init__(self, exp, se=False):\n super(MobileBottleneck, self).__init__()\n if se:\n SELayer = SEModule\n else:\n SELayer = Identity\n\n self.conv = nn.Sequential(\n SELayer(exp),\n )\n\n def forward(self, x):\n return self.conv(x)\n\nnet = MobileBottleneck(2,False)\nx = torch.randn(1,1,2,3)\nprint(x)\ny = net(x)\nprint(y)",
"\"\"\"\n/********************************************************************\n*\n* 文件名:support.py\n*\n* 文件描述:封装类\n*\n* 创建人: qiwei_ji, 2020年10月4日\n*\n* 版本号:1.2\n*\n* 修改记录:64\n*\n********************************************************************/\n\"\"\"\nimport torch\nimport torch.nn as nn\nfrom torchvision import transforms, datasets\nimport torchvision.models as models\nimport torch.optim as optim\nimport datetime\nfrom torch.autograd import Variable\nimport numpy as np\nfrom typing import Optional\n\n\n# 保存网络\ndef save_net(net, epoch):\n save_epoch = ('./pkl/epoch_%d_net' % epoch)\n torch.save(net, save_epoch + '.pkl') # 保存整个网络\n torch.save(net.state_dict(), './pkl/net_params.pkl') # 只保存网络中的参数 (速度快, 占内存少)\n print('save_done')\n\n\n# 提取网络\ndef load_net(net):\n # restore entire net1 to net2\n model_load = torch.load('./pkl/epoch_' + net + '_net.pkl')\n return model_load\n\n\n# 图像归一化后裁剪,最后尺寸224*224*3\ndef train_transform():\n data_transform = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n ])\n return data_transform\n\n\ndef test_transform():\n data_transform = transforms.Compose([\n transforms.ToTensor(),\n ])\n return data_transform\n\n\n# 定义训练过程\ndef train(net, epochs, lr, train_loader, test_loader, weight_decay=5e-4):\n # 刷新txt\n model_name = format(net.__class__.__name__)\n net.train()\n with open('./experiment_record(first)/' + model_name + '/result.txt', \"w\") as f:\n f.write(\"开始实验\\n\") # 自带文件关闭功能,不需要再写f.close()\n\n # 定义loss和optimizer\n cirterion = nn.CrossEntropyLoss()\n optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9, weight_decay=weight_decay)\n scheduler = optim.lr_scheduler.MultiStepLR(optimizer=optimizer, milestones=[50, 100, 150], gamma=0.1)\n max_acc = 0\n loadingtime = 0\n loadingtime2 = 0\n\n for epoch in range(epochs):\n starttime = datetime.datetime.now() # 计时\n\n # 训练开始\n running_loss = 0.0\n train_correct = 0\n train_total = 0\n print(\"yes\\n\")\n for i, data in enumerate(train_loader, 0):\n inputs, train_labels = data\n inputs, labels = Variable(inputs), Variable(train_labels)\n inputs, labels = inputs.cuda(), labels.cuda()\n optimizer.zero_grad()\n outputs = net(inputs)\n loss = cirterion(outputs, labels.long())\n _, train_predicted = torch.max(outputs.data, 1)\n train_correct += (train_predicted == labels.data).sum()\n loss.backward()\n optimizer.step()\n running_loss += loss.item()\n train_total += train_labels.size(0)\n\n # 训练计时\n endtime = datetime.datetime.now()\n loadingtime = (endtime - starttime).seconds\n\n # 打印训练结果\n print('train %d epoch loss: %.3f acc: %.3f load:%d' % (\n epoch + 1, running_loss / train_total, 100 * train_correct / train_total, loadingtime))\n f = open('./experiment_record(first)/' + model_name + '/result.txt', \"a\")\n f.write('train %d epoch loss: %.3f acc: %.3f load:%d \\n' % (\n epoch + 1, running_loss / train_total, 100 * train_correct / train_total, loadingtime))\n f.close()\n\n # 模型test\n correct = 0\n test_loss = 0.0\n test_total = 0\n net.eval()\n with torch.no_grad():\n for data in test_loader:\n testimages, testlabels = data\n testimages, testlabels = Variable(testimages), Variable(testlabels)\n testimages, testlabels = testimages.cuda(), testlabels.cuda()\n net = net.eval()\n outputs = net(testimages)\n loss = cirterion(outputs, testlabels.long())\n _, test_predicted = torch.max(outputs.data, 1)\n test_loss += loss.item()\n test_total += testlabels.size(0)\n correct += (test_predicted == testlabels.data).sum()\n\n # 测试计时\n endtime2 = datetime.datetime.now()\n loadingtime2 = (endtime2 - endtime).seconds\n acc = 100 * correct / test_total\n if max_acc < acc:\n max_acc = acc\n # 打印测试结果n\n print('test %d epoch loss: %.3f acc: %.3f load:%d ' %\n (epoch + 1, test_loss / test_total, acc, loadingtime2))\n f = open('./experiment_record(first)/' + model_name + '/result.txt', \"a\")\n f.write('test %d epoch loss: %.3f acc: %.3f load:%d\\n' %\n (epoch + 1, test_loss / test_total, acc, loadingtime2))\n f.close()\n\n scheduler.step()\n\n if (epoch + 1) / 50 == 1:\n print('epoch decrease 10x')\n save_net(net, epoch)\n if (epoch + 1) / 100 == 1:\n print('epoch decrease 10x')\n save_net(net, epoch)\n if (epoch + 1) / 150 == 1:\n print('epoch decrease 10x')\n save_net(net, epoch)\n\n return max_acc, loadingtime, loadingtime2\n\n\ndef set_random_seed(seed: Optional[int] = None) -> int:\n \"\"\"\n Set the random seed for numpy and torch.\n Parameters\n ----------\n seed: int or None, default None\n Specify initial random seed.\n Returns\n -------\n seed : int\n \"\"\"\n seed = np.random.randint(10_000) if seed is None else seed\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed) # type: ignore\n return seed\n\n\ndef load_model(num_classes=10):\n net = models.mobilenet_v2(pretrained=False)\n net.classifier = nn.Sequential(\n nn.Dropout(0.3),\n nn.Linear(in_features=1280, out_features=num_classes, bias=True))\n return net\n\n\ndef write_result(model, epochs, batch_size, num_workers, lr, max_acc, weight_decay, traintime, testtime):\n f = open('./experiment_record(first)/' + model + '/final_result.txt', \"a\")\n f.write('model %s train %d epoch batch_size %d num_workers %d lr %f max_acc: %.3f '\n 'weight_decay %f traintime %d testtime %d\\n' %\n (model, epochs, batch_size, num_workers, lr, max_acc, weight_decay, traintime, testtime))\n f.close()\n\n\nclass Data_loader:\n\n def __init__(self, root, num_workers, batch_size):\n self.root = root\n self.num_workers = num_workers\n self.batch_size = batch_size\n self.train_transform = train_transform()\n self.test_transform = test_transform()\n\n def trainloader(self):\n train_dataset = datasets.CIFAR100(root=self.root, train=True, transform=self.train_transform, download=True)\n train_loader = torch.utils.data.DataLoader(train_dataset,\n batch_size=self.batch_size,\n shuffle=True,\n num_workers=self.num_workers)\n return train_loader\n\n def testloader(self):\n test_dataset = datasets.CIFAR100(root=self.root, train=False, transform=self.test_transform, download=True)\n test_loader = torch.utils.data.DataLoader(test_dataset,\n batch_size=self.batch_size,\n shuffle=True,\n num_workers=self.num_workers)\n return test_loader\n"
] |
[
[
"torch.save",
"torch.cuda.is_available",
"torch.tensor",
"torch.load"
],
[
"torch.randn"
],
[
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.cuda.manual_seed_all",
"torch.max",
"numpy.random.seed",
"torch.autograd.Variable",
"torch.save",
"torch.no_grad",
"torch.optim.lr_scheduler.MultiStepLR",
"torch.manual_seed",
"numpy.random.randint",
"torch.utils.data.DataLoader",
"torch.load",
"torch.nn.CrossEntropyLoss"
]
] |
simoncometto/PowerFlow2
|
[
"306f83475494262646683c0d7d0364da829c8d4f"
] |
[
"powerflow.py"
] |
[
"\n'''Simón Cometto 11/10/2019 Río Cuarto'''\n\n# coding=utf-8\nimport numpy as np\nimport scipy.sparse as sparse\nfrom math import cos, sin\n\nclass powerflow:\n '''\n\n '''\n def __init__(self, filename=''):\n with open(filename) as cdf:\n # Leo el archivo hasta llegar a la sección de BUS DATA\n words = ['', ]\n while words[0] != 'BUS':\n line = cdf.readline()\n words = line.split(' ')\n words = [item for item in words if item] # Elimino los elementos vacios\n\n # Leo la cantidad de nodos en la 4ta columna\n self.n = int(words[3])\n n = self.n\n # Creo la Ybus (matriz de admitancia) nula de n x n numeros complejos\n self.Ybus = np.zeros((n, n), dtype=np.complex128)\n\n # Creo los vectores con las variables en cada nodo:\n self.load = np.zeros(n, dtype=np.complex128) # P + jQ\n self.generation = np.zeros(n, dtype=np.complex128) # P + jQ\n self.voltage = np.zeros(n, dtype=np.float) # V(por unidad), angulo en grados\n self.angle = np.zeros(n, dtype=np.float) # Angulo en grados\n\n self.PV_buses = np.array((0,2), dtype=int) #Vector que contiene el índice del nodo PV, y la tensión del nodo\n\n self.Q_inj = np.zeros(n, dtype=np.float64)\n self.P_inj = np.zeros(n, dtype=np.float64)\n\n #Inicializo el valor del swing bus, pero en un nodo que no existe\n self.swing_bus = n+1\n\n # Leo las siguientes n lineas con la info de cada nodo\n for i in range(n):\n line = cdf.readline()\n words = line.split(' ')\n words = [item for item in words if item] # Elimino los elementos vacios\n\n self.voltage[i] = float(words[7])\n self.angle[i] = np.deg2rad(float(words[8]))\n self.load[i] = complex(float(words[9]), float(words[10]))\n self.generation[i] = complex(float(words[11]), float(words[12]))\n\n self.Q_inj[i] = self.generation[i].imag - self.load[i].imag\n self.P_inj[i] = self.generation[i].real - self.load[i].real\n\n # Asigno el swing_bus\n if (int(words[6]) == 3):\n self.swing_bus = i\n self.swing_bus_angle = self.angle[i]\n self.swing_bus_voltage = self.voltage[i]\n #Como en los PV buses no se conoce ni P ni Q, se asignan valores nulos\n self.P_inj[i] = 0\n self.Q_inj[i] = 0\n\n # PV buses\n if (int(words[6]) == 2):\n self.PV_buses = np.vstack((self.PV_buses, [i,float(words[14])])) #El índice y la tensión del bus\n self.Q_inj[i] = 0 #Como en los PV buses se desconoce Q, se asigno un valor nulo\n\n # Leo el archivo hasta llegar a la sección de BRANCH DATA\n while words[0] != 'BRANCH':\n line = cdf.readline()\n words = line.split(' ')\n words = [item for item in words if item] # Elimino los elementos vacios\n\n # Leo las lineas de la sección Branch\n while True: # Salgo con un break en el próximo if\n line = cdf.readline()\n words = line.split(' ')\n words = [item for item in words if item] # Elimino los elementos vacios\n\n # Si llego al fin de la sección indicado por un -999\\n salgo del bucle\n if words[0] == '-999\\n':\n break\n\n i = int(words[0]) - 1\n j = int(words[1]) - 1 # La impedancia entre el nodo i y el nodo j\n self.Ybus[i, j] = self.Ybus[j, i] = -1 / complex(float(words[6]), float(\n words[7])) # Asigno la impendancia R + jX\n self.Ybus[i, i] = self.Ybus[j, j] = complex(0, float(\n words[8])) # En la diagonal sumo Charging B ''la impedancia paralelo del equivalente pi''\n\n # Recorro la matriz de admitacnia para asignarle a la diagonal la suma de las filas\n for i in range(0, n):\n for j in range(0, n):\n if j != i:\n self.Ybus[i, i] += -self.Ybus[i, j]\n\n self.init_v_theta()\n #np.savetxt('Ybus.txt', self.Ybus, fmt='%+9.4f', delimiter=' ')\n return\n\n def init_v_theta(self, init_voltage=1, init_angle=0):\n self.v = np.empty(self.n, dtype=np.float64)\n self.theta = np.empty(self.n, dtype=np.float64)\n\n for i in range(self.n):\n self.v[i] = init_voltage\n self.theta[i] = init_angle\n\n if np.any(self.PV_buses[:,0]==i):\n l = np.argwhere(self.PV_buses[:,0]==i)\n self.v[i] = self.PV_buses[l[0],1]\n\n if i == self.swing_bus:\n self.theta[i] = self.swing_bus_angle\n self.v[i] = self.swing_bus_voltage\n\n def reducir(self, x):\n '''Elimina las filas (y columas si es una matrix) que corresponden a Q del jacobiano y a V'''\n # Reducir un vector\n if x.ndim == 1:\n PV_buses_Q = self.PV_buses[:, 0] + self.n - 1\n filas_a_eliminar = np.append([self.swing_bus], [self.swing_bus + self.n - 1], )\n filas_a_eliminar = np.append(filas_a_eliminar, np.int32(PV_buses_Q))\n return np.delete(x, filas_a_eliminar, 0)\n\n # Reducir una matriz\n else:\n PV_buses_Q = self.PV_buses[:, 0] + self.n - 1\n filas_a_eliminar = np.append([self.swing_bus], [self.swing_bus+self.n-1], )\n filas_a_eliminar = np.append(filas_a_eliminar, np.int32(PV_buses_Q))\n\n columnas_a_eliminar = filas_a_eliminar\n x = np.delete(x, filas_a_eliminar, 0)\n return np.delete(x, columnas_a_eliminar, 1)\n\n def J(self):\n '''Computa el jacobiano para un valor de tensión y ángulo dado\n :parameter x: un vactor de 2*(n-1) donde n es la cantidad de nodos del sistema\n :returns jacobiano: una matriz de 2(n-1) x 2(n-1)\n '''\n\n #Cuatro matrices cuadradadas que despues se unen para formar el jacobiano\n J11 = np.zeros((self.n, self.n), dtype=np.float64)\n J12 = np.zeros((self.n, self.n), dtype=np.float64)\n J21 = np.zeros((self.n, self.n), dtype=np.float64)\n J22 = np.zeros((self.n, self.n), dtype=np.float64)\n\n for i in range(self.n):\n for j in range(self.n):\n # Saltear el swing_bus\n if (i == self.swing_bus or j == self.swing_bus):\n continue\n\n # Elementos que no son de la diagonal\n # ---------------------------------------------------------------------------------------------\n if (i != j):\n v_i = self.v[i]\n v_j = self.v[j]\n theta_i = self.theta[i]\n theta_j = self.theta[j]\n delta_theta = theta_i - theta_j\n G_ij = self.Ybus[i,j].real\n B_ij = self.Ybus[i,j].imag\n\n cos_theta = cos(delta_theta)\n sin_theta = sin(delta_theta)\n\n a = v_i * v_j\n b = a * G_ij\n c = a * B_ij\n # dP/dtheta\n J11[i, j] = b * sin_theta - c * cos_theta\n # dQ/dtheta\n J21[i, j] = -b * cos_theta + c * sin_theta\n\n d = v_i * G_ij\n e = v_i * B_ij\n # dP/dV\n J12[i, j] = d * cos(delta_theta) + e * sin(delta_theta)\n # dQ/dV\n J22[i, j] = d * sin(delta_theta) - e * cos(delta_theta)\n\n # Elementos de la diagonal\n # ---------------------------------------------------------------------------------------------\n else:\n v_i = self.v[i]\n G_ii = self.Ybus[i,i].real\n B_ii = self.Ybus[i,i].imag\n\n P_i = self.last_P[i]\n Q_i = self.last_Q[i]\n\n # dP/dtheta\n J11[i, j] = - Q_i - B_ii * (v_i ** 2)\n # dP/dV\n J21[i, j] = P_i / v_i + G_ii * v_i\n # dQ/dtheta\n J21[i, j] = P_i - G_ii * (v_i ** 2)\n # dQ/dV\n J22[i, j] = Q_i / v_i - B_ii * v_i\n\n # --------------------------------------------------------------------------------\n np.savetxt('jacobiano11.txt', J12, fmt='%+7.2f', delimiter=' ')\n\n J1 = np.hstack([J11, J12])\n J2 = np.hstack([J21, J22])\n J = np.vstack([J1, J2])\n\n return J\n\n def f(self):\n ''' Computa deltaP y deltaQ para un valor de tensión y ángulo dado\n :parameter x un vactor de 2*(n-1) donde n es la cantidad de nodos del sistema\n :returns delta_PQ: una vector de 2(n-1)'''\n\n P = np.zeros(self.n, dtype=np.float)\n Q = np.zeros(self.n, dtype=np.float)\n\n for i in range(self.n):\n for j in range(self.n):\n\n if (i == self.swing_bus): # Saltear el swing_bus\n continue\n\n is_PV_bus = False #Variable para indicar si es un PV bus o no.\n if (np.any(self.PV_buses[:,0]==i)):\n is_PV_bus = True\n\n #Se leen todas las variables necesarias\n B_ij = self.Ybus[i,j].imag\n G_ij = self.Ybus[i,j].real\n theta_i = self.theta[i]\n theta_j = self.theta[j]\n delta_theta = theta_i - theta_j\n v_i = self.v[i]\n v_j = self.v[j]\n a = v_i * v_j * G_ij\n b = v_i * v_j * B_ij\n\n #Se calcula y asignan los valores\n P[i] += a * cos(delta_theta) + b * sin(delta_theta)\n if not is_PV_bus: #Si no es un PV_bus entonces se calcula Q\n Q[i] += a * sin(delta_theta) - b * cos(delta_theta)\n\n #Guardo estas dos copias para luego usarlas en el cálculo de las diagonales del Jacobiano\n self.last_P = P * 100\n self.last_Q = Q * 100\n\n return (self.P_inj - P*100, self.Q_inj - Q*100)\n\n def solve_newton(self, init_v=1, init_angle=0):\n self.init_v_theta(init_v, init_angle)\n\n P, Q = self.f()\n f = np.append(P,Q)\n f_reducido = self.reducir(f)\n J_reducido = self.reducir(self.J())\n delta_x = np.linalg.solve(J_reducido, f_reducido)\n\n x = np.append(self.theta, self.v)\n x = delta_x - self.reducir(x)\n return x\n\n def disp_matrix(self, mat):\n '''Representa la topología de la matriz mediante un mapa de bits'''\n #Se quiero mostrar un vector, apilo uno arriba de otro para representarlo como una matriz\n if mat.ndim == 1:\n mat = np.vstack((mat, mat))\n \n import matplotlib.pyplot as plt\n if sparse.issparse(mat):\n mat = mat.todense()\n\n mat_plot = mat != 0.0\n plt.matshow(mat_plot)\n plt.show()\n\n\n#---------------------------------------------------------------------------------------------\n\nif __name__ == '__main__':\n\n ieee14bus = powerflow('IEEE14cdf.txt')\n\n x = ieee14bus.solve_newton()\n print(x)\n print(np.around(ieee14bus.last_P, 2))\n print(ieee14bus.P_inj)\n\n J = ieee14bus.J()\n #ieee14bus.disp_matrix(J)\n ieee14bus.disp_matrix(ieee14bus.reducir(J))\n #ieee14bus.disp_matrix(ieee14bus.Ybus)\n\n Yinv = np.linalg.inv(ieee14bus.Ybus)\n ieee14bus.disp_matrix(Yinv*10000000) #???"
] |
[
[
"scipy.sparse.issparse",
"numpy.array",
"numpy.delete",
"numpy.empty",
"numpy.savetxt",
"numpy.zeros",
"matplotlib.pyplot.matshow",
"numpy.int32",
"numpy.any",
"numpy.argwhere",
"matplotlib.pyplot.show",
"numpy.linalg.solve",
"numpy.append",
"numpy.hstack",
"numpy.around",
"numpy.linalg.inv",
"numpy.vstack"
]
] |
imanolarrieta/RL
|
[
"072a8c328652f45e053baecd640f04adf7f84b49",
"072a8c328652f45e053baecd640f04adf7f84b49",
"072a8c328652f45e053baecd640f04adf7f84b49"
] |
[
"rlpy/Policies/SwimmerPolicy.py",
"examples/cartpole_orig/independent.py",
"examples/hiv/kifdd.py"
] |
[
"from .Policy import Policy\n\nimport numpy as np\nfrom scipy.io import loadmat\nfrom rlpy.Tools import __rlpy_location__, cartesian\nimport os\n\n\nclass SwimmerPolicy(Policy):\n\n \"\"\"\n Fixed deterministic policy for a 3-link swimmer.\n It is a discretized version from Yuval Tassas MATLAB implmentation\n The policy seems to be almost optimal.\n\n .. seealso::\n Tassa, Y., Erez, T., & Smart, B. (2007).\n *Receding Horizon Differential Dynamic Programming.*\n In Advances in Neural Information Processing Systems.\n \"\"\"\n\n default_location = os.path.join(\n __rlpy_location__,\n \"Policies\",\n \"swimmer3.mat\")\n\n def __init__(self, representation,\n filename=default_location, epsilon=0.1, seed=1):\n super(SwimmerPolicy, self).__init__(representation, seed)\n E = loadmat(filename)[\"E\"]\n self.locs = E[\"Locs\"][0][0]\n self.nlocs = E[\"Nlocs\"][0][0]\n self.scale = E[\"scale\"][0][0]\n self.gains = E[\"Gains\"][0][0]\n self.d = 3\n self.eGreedy = False\n self.epsilon = epsilon\n d = self.d\n # incidator variables for angles in a state representation\n self.angles = np.zeros(2 + self.d * 2 + 1, dtype=np.bool)\n self.angles[2:2 + self.d - 1] = True\n self.actions = cartesian((d - 1) * [[-2., 0., 2]])\n self.actions_num = len(self.actions)\n\n def pi(self, s, terminal, p_actions):\n coin = self.random_state.rand()\n if coin < self.epsilon:\n return self.random_state.choice(p_actions)\n else:\n if self.eGreedy:\n b_actions = self.representation.bestActions(\n s,\n terminal,\n p_actions)\n return self.random_state.choice(b_actions)\n else:\n return self.pi_sam(s, terminal, p_actions)\n \"\"\"\n def turnOffExploration(self):\n self.old_epsilon = self.epsilon\n self.epsilon = 0\n self.eGreedy = True\n\n def turnOnExploration(self):\n self.epsilon = self.old_epsilon\n self.eGreedy = False\n \"\"\"\n\n def pi_sam(self, s, terminal, p_actions):\n #import ipdb; ipdb.set_trace()\n # d = size(x,1);\n # x_a = [x(~angles,:); sin(x(angles,:)); cos(x(angles,:))];\n x_a = np.hstack(\n (s[self.angles == False],\n np.sin(s[self.angles]),\n np.cos(s[self.angles])))\n\n # M = (1:d);\n M = np.arange(len(self.angles))\n idx = np.argsort(\n np.hstack((M[self.angles == False], M[self.angles], M[self.angles] + .5)))\n # [v, ix] = sort([M(~angles) M(angles) M(angles)+0.5]);\n # x_a = x_a(ix,:);\n x_a = x_a[idx]\n Nx = np.dot(self.scale, x_a)\n Ndiff = Nx[:, None] - self.nlocs\n dist = np.sum(Ndiff ** 2, axis=0)\n w = np.argmin(dist)\n\n diff = s - self.locs[:, w]\n diff[self.angles] = np.mod(\n diff[self.angles] + np.pi,\n 2 * np.pi) - np.pi\n k = np.hstack((np.ones(1), diff.flatten()))\n u = np.dot(self.gains[:, :, w], k)\n dd = np.sum((u[None, :] - self.actions[p_actions]) ** 2, axis=1)\n aid = np.argmin(dd)\n return p_actions[aid]\n",
"\"\"\"\nCart-pole balancing with independent discretization\n\"\"\"\nfrom rlpy.Domains.FiniteTrackCartPole import FiniteCartPoleBalanceOriginal, FiniteCartPoleBalanceModern\nfrom rlpy.Agents import SARSA, Q_LEARNING\nfrom rlpy.Representations import *\nfrom rlpy.Policies import eGreedy\nfrom rlpy.Experiments import Experiment\nimport numpy as np\nfrom hyperopt import hp\n\nparam_space = {'discretization': hp.quniform(\"discretization\", 3, 5, 1),\n 'boyan_N0': hp.loguniform(\"boyan_N0\", np.log(1e1), np.log(1e5)),\n 'initial_learn_rate': hp.loguniform(\"initial_learn_rate\", np.log(5e-2), np.log(1))}\n\n\ndef make_experiment(\n exp_id=1, path=\"./Results/Temp/{domain}/{agent}/{representation}/\",\n boyan_N0=235,\n initial_learn_rate=.05,\n discretization=5):\n opt = {}\n opt[\"path\"] = path\n opt[\"exp_id\"] = exp_id\n opt[\"max_steps\"] = 30000\n opt[\"num_policy_checks\"] = 20\n opt[\"checks_per_policy\"] = 1\n\n domain = FiniteCartPoleBalanceOriginal(good_reward=0.)\n opt[\"domain\"] = domain\n representation = IndependentDiscretization(\n domain,\n discretization=discretization)\n policy = eGreedy(representation, epsilon=0.1)\n opt[\"agent\"] = Q_LEARNING(\n policy, representation, discount_factor=domain.discount_factor,\n lambda_=0.9, initial_learn_rate=initial_learn_rate,\n learn_rate_decay_mode=\"boyan\", boyan_N0=boyan_N0)\n experiment = Experiment(**opt)\n return experiment\n\nif __name__ == '__main__':\n from rlpy.Tools.run import run_profiled\n # run_profiled(make_experiment)\n experiment = make_experiment(1)\n experiment.run(visualize_learning=True)\n experiment.plot()\n # experiment.save()\n",
"from rlpy.Domains.HIVTreatment import HIVTreatment\nfrom rlpy.Agents import Q_Learning\nfrom rlpy.Representations import *\nfrom rlpy.Policies import eGreedy\nfrom rlpy.Experiments import Experiment\nimport numpy as np\nfrom hyperopt import hp\nfrom rlpy.Representations import KernelizediFDD\n\nparam_space = {\n 'kernel_resolution':\n hp.loguniform(\"kernel_resolution\", np.log(5), np.log(50)),\n 'discover_threshold':\n hp.loguniform(\n \"discover_threshold\",\n np.log(1e4),\n np.log(1e8)),\n 'lambda_': hp.uniform(\"lambda_\", 0., 1.),\n 'boyan_N0': hp.loguniform(\"boyan_N0\", np.log(1e1), np.log(1e5)),\n 'initial_learn_rate': hp.loguniform(\"initial_learn_rate\", np.log(5e-2), np.log(1))}\n\n\ndef make_experiment(\n exp_id=1, path=\"./Results/Temp/{domain}/{agent}/{representation}/\",\n discover_threshold=8948708.75,\n boyan_N0=627.12,\n lambda_=0.5433,\n initial_learn_rate=0.59812,\n kernel_resolution=24.340):\n opt = {}\n opt[\"path\"] = path\n opt[\"exp_id\"] = exp_id\n opt[\"max_steps\"] = 150000\n opt[\"num_policy_checks\"] = 30\n opt[\"checks_per_policy\"] = 1\n active_threshold = 0.01\n max_base_feat_sim = 0.5\n sparsify = 1\n\n domain = HIVTreatment()\n opt[\"domain\"] = domain\n # domain = FiniteCartPoleBalanceModern()\n kernel_width = (domain.statespace_limits[:, 1] - domain.statespace_limits[:, 0]) \\\n / kernel_resolution\n representation = KernelizediFDD(domain, sparsify=sparsify,\n kernel=gaussian_kernel,\n kernel_args=[kernel_width],\n active_threshold=active_threshold,\n discover_threshold=discover_threshold,\n normalization=True,\n max_active_base_feat=10,\n max_base_feat_sim=max_base_feat_sim)\n policy = eGreedy(representation, epsilon=0.1)\n # agent = SARSA(representation,policy,domain,initial_learn_rate=initial_learn_rate,\n # lambda_=.0, learn_rate_decay_mode=\"boyan\", boyan_N0=boyan_N0)\n opt[\"agent\"] = Q_Learning(\n policy, representation, discount_factor=domain.discount_factor,\n lambda_=lambda_, initial_learn_rate=initial_learn_rate,\n learn_rate_decay_mode=\"boyan\", boyan_N0=boyan_N0)\n experiment = Experiment(**opt)\n return experiment\n\nif __name__ == '__main__':\n from rlpy.Tools.run import run_profiled\n run_profiled(make_experiment)\n #experiment = make_experiment(1)\n # experiment.run(visualize_learning=True)\n # experiment.plot()\n # experiment.save()\n"
] |
[
[
"numpy.sin",
"numpy.dot",
"numpy.zeros",
"numpy.argmin",
"numpy.sum",
"scipy.io.loadmat",
"numpy.ones",
"numpy.cos",
"numpy.hstack",
"numpy.mod"
],
[
"numpy.log"
],
[
"numpy.log"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.