repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
Lagergren-Lab/tomexo
[ "cf3693b5feebaeea4a41fc83dd33bcb288349a62" ]
[ "gdac_preproc.py" ]
[ "#! /usr/bin/env python3\n\nimport numpy as np\nimport pandas as pd\nimport os\n\n\nfilter_silents = True\nhm_coeff = 0.02 # Threshold of mutation rate of genes to be considered\n\ninput_dir = 'gdac.broadinstitute.org_GBM.Mutation_Packager_Calls.Level_3.2016012800.0.0'\ndf_full_dir = 'gdac_firehose_gbm_full.csv' # can be set to None\nintogen_genes_dir = 'IntOGen-DriverGenes_GBM_TCGA.tsv'\ndf_filtered_dir = 'gdac_firehose_GBM.csv'\n\n# ----------------------------------------- #\n# ------- Builing the full DataFrame ------ #\n# ----------------------------------------- #\n\nfiles_list = []\nfor file_name in os.listdir(input_dir):\n if file_name.startswith('TCGA'):\n files_list.append(file_name)\nn_patients = len(files_list)\ndf_full = pd.DataFrame()\nfor i, file_name in enumerate(files_list):\n file_address = os.path.join(input_dir, file_name)\n df_input = pd.read_csv(file_address, sep='\\t', comment='#')\n if filter_silents:\n df_input = df_input[~df_input.Variant_Classification.isin(['Silent', 'RNA'])]\n for index, row in df_input.iterrows():\n df_full.at[row.Tumor_Sample_Barcode, row.Hugo_Symbol] = True\ndf_full = df_full.fillna(False).astype(int)\ndf_full = df_full.sort_index(axis='index')\ndf_full = df_full.sort_index(axis='columns')\nif df_full_dir is not None:\n df_full.to_csv(df_full_dir)\n\n# ----------------------------------------- #\n# -------------- Filtering ---------------- #\n# ----------------------------------------- #\n\nintogen_genes_list = list(pd.read_csv(intogen_genes_dir, sep='\\t', comment='#').Symbol)\nn_tumors, n_genes = df_full.shape\n# finding intogen genes:\ngene_names = list(df_full.columns)\nintogen_list = []\nfor gene in intogen_genes_list:\n if gene in gene_names:\n intogen_list.append(gene_names.index(gene))\nintogen_list = np.array(intogen_list)\n# finding highly mutated genes:\nth_hm = 0.02*n_tumors\nhm_list = np.where(df_full.sum()>=th_hm)[0]\n# Constucting the list of genes based on the filters:\ngenes_to_keep = np.intersect1d(intogen_list, hm_list)\n# Filtering and saving the resulting df:\nfiltered_df = df_full.iloc[:, genes_to_keep]\n## Comment lines for csv file:\ncomment = '# GDAC firehose dataset \\n'\ncomment += '# Number of tumors: %i \\n' % n_tumors\ncomment += '# Number of genes before filtering: %i \\n' % n_genes\ncomment += '# Number of genes after filtering: %i \\n' % len(genes_to_keep)\nwith open(df_filtered_dir, 'w') as f:\n f.write(comment)\nwith open(df_filtered_dir, 'a') as f: \n filtered_df.to_csv(f, sep=',')" ]
[ [ "pandas.DataFrame", "numpy.array", "numpy.intersect1d", "pandas.read_csv" ] ]
JuliusSchwartz/FlowMO
[ "e221d989914f906501e1ad19cd3629d88eac1785" ]
[ "Theano-master/theano/tensor/tests/test_raw_random.py" ]
[ "from __future__ import absolute_import, print_function, division\nimport numpy\nimport pickle\n\nfrom theano.tests import unittest_tools as utt\n\nfrom theano.tensor.raw_random import *\nfrom theano.tensor import (raw_random, ivector, dvector, iscalar, dcol,\n dtensor3)\nfrom theano import tensor\n\nfrom theano import compile, config, gof\n\n__docformat__ = \"restructuredtext en\"\n\n\nclass T_random_function(utt.InferShapeTester):\n def setUp(self):\n utt.seed_rng()\n\n def test_basic_usage(self):\n rf = RandomFunction(numpy.random.RandomState.uniform, tensor.dvector)\n assert not rf.inplace\n assert getattr(rf, 'destroy_map', {}) == {}\n\n rng_R = random_state_type()\n\n # If calling RandomFunction directly, all args have to be specified,\n # because shape will have to be moved to the end\n post_r, out = rf(rng_R, (4,), 0., 1.)\n\n assert out.type == tensor.dvector\n\n f = compile.function([rng_R], out)\n\n rng_state0 = numpy.random.RandomState(utt.fetch_seed())\n\n f_0 = f(rng_state0)\n f_1 = f(rng_state0)\n\n assert numpy.all(f_0 == f_1)\n\n def test_inplace_norun(self):\n rf = RandomFunction(numpy.random.RandomState.uniform, tensor.dvector,\n inplace=True)\n assert rf.inplace\n assert getattr(rf, 'destroy_map', {}) != {}\n\n def test_args(self):\n \"\"\"Test that arguments to RandomFunction are honored\"\"\"\n rf2 = RandomFunction(numpy.random.RandomState.uniform, tensor.dvector)\n rf4 = RandomFunction(numpy.random.RandomState.uniform, tensor.dvector,\n inplace=True)\n rng_R = random_state_type()\n\n # use make_node to override some of the self.args\n post_r2, out2 = rf2(rng_R, (4,), -2, 2) # NOT INPLACE\n post_r4, out4 = rf4(rng_R, (4,), -4, 4) # INPLACE\n post_r2_4, out2_4 = rf2(rng_R, (4, ), -4.0, 2) # NOT INPLACE\n post_r2_4_4, out2_4_4 = rf2(rng_R, (4, ), -4.0, 4.0) # NOT INPLACE\n\n # configure out4 to be computed inplace\n # The update expression means that the random state rng_R will\n # be maintained by post_r4\n f = compile.function(\n [compile.In(rng_R,\n value=numpy.random.RandomState(utt.fetch_seed()),\n update=post_r4,\n mutable=True)],\n [out2, out4, out2_4, out2_4_4],\n accept_inplace=True)\n\n f2, f4, f2_4, f2_4_4 = f()\n f2b, f4b, f2_4b, f2_4_4b = f()\n\n # print f2\n # print f4\n # print f2_4\n # print f2_4_4\n\n # print f2b\n # print f4b\n # print f2_4b\n # print f2_4_4b\n\n # setting bounds is same as multiplying by 2\n assert numpy.allclose(f2 * 2, f4), (f2, f4)\n\n # retrieving from non-inplace generator\n # is same as inplace one for first call\n assert numpy.allclose(f2_4_4, f4), (f2_4_4, f4)\n\n # f4 changes from call to call, that the update has worked\n assert not numpy.allclose(f4, f4b), (f4, f4b)\n\n def test_inplace_optimization(self):\n \"\"\"Test that FAST_RUN includes the random_make_inplace optimization\"\"\"\n #inplace = False\n rf2 = RandomFunction(numpy.random.RandomState.uniform, tensor.dvector)\n rng_R = random_state_type()\n\n # If calling RandomFunction directly, all args have to be specified,\n # because shape will have to be moved to the end\n post_r2, out2 = rf2(rng_R, (4,), 0., 1.)\n\n f = compile.function(\n [compile.In(rng_R,\n value=numpy.random.RandomState(utt.fetch_seed()),\n update=post_r2,\n mutable=True)],\n out2,\n mode='FAST_RUN') # DEBUG_MODE can't pass the id-based\n # test below\n\n # test that the RandomState object stays the same from function call to\n # function call, but that the values returned change from call to call.\n\n id0 = id(f[rng_R])\n val0 = f()\n assert id0 == id(f[rng_R])\n val1 = f()\n assert id0 == id(f[rng_R])\n\n assert not numpy.allclose(val0, val1)\n\n def test_no_inplace(self):\n \"\"\"Test that when not running inplace, the RandomState is\n not updated\"\"\"\n rf = RandomFunction('uniform', tensor.dvector)\n rng_R = random_state_type()\n\n post_r, out = rf(rng_R, (3,), 0., 1.)\n f = compile.function([rng_R], [post_r, out])\n rng = numpy.random.RandomState(utt.fetch_seed())\n\n rng0, val0 = f(rng)\n rng_ = numpy.random.RandomState(utt.fetch_seed())\n # rng should still be in a fresh state\n self.assertTrue(rng_R.type.values_eq(rng, rng_))\n # rng0 should be in an updated state\n self.assertFalse(rng_R.type.values_eq(rng, rng0))\n\n f2 = compile.function(\n [compile.In(rng_R,\n value=rng,\n update=post_r,\n mutable=False)],\n [post_r, out])\n rng2, val2 = f2()\n # rng should be in a fresh state\n self.assertTrue(rng_R.type.values_eq(rng, rng_))\n # rng2 should be in an updated state\n self.assertFalse(rng_R.type.values_eq(rng, rng2))\n # The updated state should be the same for both functions\n self.assertTrue(rng_R.type.values_eq(rng2, rng0))\n\n rng3, val3 = f2()\n # rng2 should not have changed\n self.assertTrue(rng_R.type.values_eq(rng2, rng0))\n # rng3 should be an updated again version of rng2\n self.assertFalse(rng_R.type.values_eq(rng3, rng2))\n self.assertFalse(rng_R.type.values_eq(rng3, rng))\n\n def test_random_function_ndim(self):\n \"\"\"Test that random_function helper function accepts argument ndim\"\"\"\n rng_R = random_state_type()\n\n # ndim is an optional argument indicating the length of the 'shape'\n # ndim not specified, OK\n post_out4, out4 = uniform(rng_R, (4,))\n\n # ndim specified, consistent with shape, OK\n post_out1_4, out1_4 = uniform(rng_R, (4, ), ndim=1)\n post_out2_4_4, out2_4_4 = uniform(rng_R, (4, 4), ndim=2)\n\n # ndim specified, but not compatible with shape\n self.assertRaises(ValueError, uniform, rng_R, (4,), ndim=2)\n\n f_ok = compile.function(\n [compile.In(rng_R,\n value=numpy.random.RandomState(utt.fetch_seed()),\n update=post_out2_4_4,\n mutable=True)],\n [out4, out1_4, out2_4_4],\n accept_inplace=True)\n\n # The correct cases should execute properly\n o4, o1_4, o2_4_4 = f_ok()\n\n # Check the sanity of the answers\n self.assertTrue(numpy.allclose(o4, o1_4))\n self.assertTrue(numpy.allclose(o4, o2_4_4[0]))\n\n def test_random_function_noshape_args(self):\n '''Test if random_function helper works with args but without shape'''\n rng_R = random_state_type()\n\n # No shape, default args -> OK\n post_out, out = uniform(rng_R, size=None, ndim=2)\n f = compile.function(\n [compile.In(rng_R,\n value=numpy.random.RandomState(utt.fetch_seed()),\n update=post_out,\n mutable=True)],\n [out],\n accept_inplace=True)\n o, = f()\n\n # No shape, args that have to be broadcasted -> OK\n low = tensor.TensorType(dtype='float64',\n broadcastable=(False, True, True))()\n high = tensor.TensorType(dtype='float64',\n broadcastable=(True, True, True, False))()\n post_out2, out2 = uniform(rng_R, size=None, ndim=2, low=low, high=high)\n self.assertEqual(out2.ndim, 4)\n self.assertEqual(out2.broadcastable, (True, False, True, False))\n\n g = compile.function(\n [low,\n high,\n compile.In(rng_R,\n value=numpy.random.RandomState(utt.fetch_seed()),\n update=post_out2,\n mutable=True)],\n [out2],\n accept_inplace=True)\n low_v = [[[3]], [[4]], [[-5]]]\n high_v = [[[[5, 8]]]]\n o2, = g(low_v, high_v)\n self.assertEqual(o2.shape, (1, 3, 1, 2))\n\n def test_random_function_noshape_noargs(self):\n '''Test if random_function helper works without args or shape'''\n rng_R = random_state_type()\n\n # No shape, no args -> TypeError\n self.assertRaises(TypeError, poisson, rng_R, size=None, ndim=2)\n\n def test_random_function_ndim_added(self):\n \"\"\"Test that random_function helper function accepts ndim_added as\n keyword argument\"\"\"\n # If using numpy's uniform distribution, ndim_added should be 0,\n # because the shape provided as argument is the output shape.\n # Specifying a different ndim_added will change the Op's output ndim,\n # so numpy.uniform will produce a result of incorrect shape,\n # and a ValueError should be raised.\n def ndim_added_deco(ndim_added):\n def randomfunction(random_state, size=(), low=0.0, high=0.0,\n ndim=None):\n ndim, size, bcast = raw_random._infer_ndim_bcast(ndim, size)\n if ndim_added < 0:\n bcast = bcast[:ndim_added]\n else:\n bcast = bcast + ((False,) * ndim_added)\n assert len(bcast) == ndim + ndim_added\n op = RandomFunction('uniform',\n tensor.TensorType(dtype='float64',\n broadcastable=bcast),\n ndim_added=ndim_added)\n return op(random_state, size, low, high)\n return randomfunction\n\n uni_1 = ndim_added_deco(1)\n uni_0 = ndim_added_deco(0)\n uni_m1 = ndim_added_deco(-1)\n\n rng_R = random_state_type()\n\n p_uni11, uni11 = uni_1(rng_R, size=(4,))\n p_uni12, uni12 = uni_1(rng_R, size=(3, 4))\n p_uni01, uni01 = uni_0(rng_R, size=(4,))\n p_uni02, uni02 = uni_0(rng_R, size=(3, 4))\n p_unim11, unim11 = uni_m1(rng_R, size=(4,))\n p_unim12, unim12 = uni_m1(rng_R, size=(3, 4))\n\n self.assertEqual(uni11.ndim, 2)\n self.assertEqual(uni12.ndim, 3)\n self.assertEqual(uni01.ndim, 1)\n self.assertEqual(uni02.ndim, 2)\n self.assertEqual(unim11.ndim, 0)\n self.assertEqual(unim12.ndim, 1)\n\n f11 = compile.function(\n [compile.In(rng_R,\n value=numpy.random.RandomState(utt.fetch_seed()),\n update=p_uni11, mutable=True)],\n [uni11], accept_inplace=True)\n f12 = compile.function(\n [compile.In(rng_R,\n value=numpy.random.RandomState(utt.fetch_seed()),\n update=p_uni12, mutable=True)],\n [uni12], accept_inplace=True)\n fm11 = compile.function(\n [compile.In(rng_R,\n value=numpy.random.RandomState(utt.fetch_seed()),\n update=p_unim11, mutable=True)],\n [unim11], accept_inplace=True)\n fm12 = compile.function(\n [compile.In(rng_R,\n value=numpy.random.RandomState(utt.fetch_seed()),\n update=p_unim12, mutable=True)],\n [unim12], accept_inplace=True)\n f0 = compile.function(\n [compile.In(rng_R,\n value=numpy.random.RandomState(utt.fetch_seed()),\n update=p_uni02, mutable=True)],\n [uni01, uni02], accept_inplace=True)\n self.assertRaises(ValueError, f11)\n self.assertRaises(ValueError, f12)\n self.assertRaises(ValueError, fm11)\n self.assertRaises(ValueError, fm12)\n u01, u02 = f0()\n self.assertTrue(numpy.allclose(u01, u02[0]))\n\n def test_uniform(self):\n \"\"\"Test that raw_random.uniform generates the same results as numpy.\"\"\"\n # Check over two calls to see if the random state is correctly updated.\n rng_R = random_state_type()\n # Use non-default parameters\n post_r, out = uniform(rng_R, (4,), -2.0, 2.0)\n\n f = compile.function(\n [compile.In(rng_R,\n value=numpy.random.RandomState(utt.fetch_seed()),\n update=post_r, mutable=True)],\n [out], accept_inplace=True)\n\n numpy_rng = numpy.random.RandomState(utt.fetch_seed())\n val0 = f()\n val1 = f()\n numpy_val0 = numpy_rng.uniform(-2.0, 2.0, size=(4,))\n numpy_val1 = numpy_rng.uniform(-2.0, 2.0, size=(4,))\n self.assertTrue(numpy.allclose(val0, numpy_val0))\n self.assertTrue(numpy.allclose(val1, numpy_val1))\n\n def test_binomial(self):\n \"\"\"Test that raw_random.binomial generates the same results\n as numpy.\"\"\"\n # Check over two calls to see if the random state is correctly updated.\n rng_R = random_state_type()\n # Use non-default parameters, and larger dimensions because of\n # the integer nature of the result\n post_r, bin = binomial(rng_R, (7, 12), 5, 0.8)\n\n f = compile.function(\n [compile.In(rng_R,\n value=numpy.random.RandomState(utt.fetch_seed()),\n update=post_r, mutable=True)],\n [bin], accept_inplace=True)\n\n numpy_rng = numpy.random.RandomState(utt.fetch_seed())\n val0 = f()\n val1 = f()\n numpy_val0 = numpy_rng.binomial(5, 0.8, size=(7, 12))\n numpy_val1 = numpy_rng.binomial(5, 0.8, size=(7, 12))\n self.assertTrue(numpy.all(val0 == numpy_val0))\n self.assertTrue(numpy.all(val1 == numpy_val1))\n\n def test_normal(self):\n \"\"\"Test that raw_random.normal generates the same results as numpy.\"\"\"\n # Check over two calls to see if the random state is correctly updated.\n rng_R = random_state_type()\n # Use non-default parameters\n post_r, out = normal(rng_R, (2, 3), 4.0, 2.0)\n\n f = compile.function(\n [compile.In(rng_R,\n value=numpy.random.RandomState(utt.fetch_seed()),\n update=post_r, mutable=True)],\n [out], accept_inplace=True)\n\n numpy_rng = numpy.random.RandomState(utt.fetch_seed())\n val0 = f()\n val1 = f()\n numpy_val0 = numpy_rng.normal(4.0, 2.0, size=(2, 3))\n numpy_val1 = numpy_rng.normal(4.0, 2.0, size=(2, 3))\n self.assertTrue(numpy.allclose(val0, numpy_val0))\n self.assertTrue(numpy.allclose(val1, numpy_val1))\n\n def test_random_integers(self):\n \"\"\"Test that raw_random.random_integers generates the same\n results as numpy.\"\"\"\n # Check over two calls to see if the random state is correctly updated.\n rng_R = random_state_type()\n # Use non-default parameters, and larger dimensions because of\n # the integer nature of the result\n post_r, out = random_integers(rng_R, (11, 8), -3, 16)\n\n f = compile.function(\n [compile.In(rng_R,\n value=numpy.random.RandomState(utt.fetch_seed()),\n update=post_r, mutable=True)],\n [out], accept_inplace=True)\n\n numpy_rng = numpy.random.RandomState(utt.fetch_seed())\n val0 = f()\n val1 = f()\n numpy_val0 = numpy_rng.random_integers(-3, 16, size=(11, 8))\n numpy_val1 = numpy_rng.random_integers(-3, 16, size=(11, 8))\n self.assertTrue(numpy.allclose(val0, numpy_val0))\n self.assertTrue(numpy.allclose(val1, numpy_val1))\n\n def test_permutation_helper(self):\n \"\"\"Test that raw_random.permutation_helper generates the same\n results as numpy,\n and that the 'ndim_added' keyword behaves correctly.\"\"\"\n # permutation_helper needs \"ndim_added=1\", because its output\n # is one dimension more than its \"shape\" argument (and there's\n # no way to determine that automatically).\n # Check the working case, over two calls to see if the random\n # state is correctly updated.\n rf = RandomFunction(permutation_helper, tensor.imatrix, 8,\n ndim_added=1)\n rng_R = random_state_type()\n post_r, out = rf(rng_R, (7,), 8)\n\n f = compile.function(\n [compile.In(rng_R,\n value=numpy.random.RandomState(utt.fetch_seed()),\n update=post_r, mutable=True)],\n [out], accept_inplace=True)\n\n numpy_rng = numpy.random.RandomState(utt.fetch_seed())\n val0 = f()\n val1 = f()\n # numpy_rng.permutation outputs one vector at a time,\n # so we call it iteratively to generate all the samples.\n numpy_val0 = numpy.asarray([numpy_rng.permutation(8)\n for i in range(7)])\n numpy_val1 = numpy.asarray([numpy_rng.permutation(8)\n for i in range(7)])\n self.assertTrue(numpy.all(val0 == numpy_val0))\n self.assertTrue(numpy.all(val1 == numpy_val1))\n\n # This call lacks \"ndim_added=1\", so ndim_added defaults to 0.\n # A ValueError should be raised.\n rf0 = RandomFunction(permutation_helper, tensor.imatrix, 8)\n post_r0, out0 = rf0(rng_R, (7,), 8)\n f0 = compile.function(\n [compile.In(rng_R,\n value=numpy.random.RandomState(utt.fetch_seed()),\n update=post_r0, mutable=True)],\n [out0], accept_inplace=True)\n self.assertRaises(ValueError, f0)\n\n # Here, ndim_added is 2 instead of 1. A ValueError should be raised.\n rf2 = RandomFunction(permutation_helper, tensor.imatrix, 8,\n ndim_added=2)\n post_r2, out2 = rf2(rng_R, (7,), 8)\n f2 = compile.function(\n [compile.In(rng_R,\n value=numpy.random.RandomState(utt.fetch_seed()),\n update=post_r2, mutable=True)],\n [out2], accept_inplace=True)\n self.assertRaises(ValueError, f2)\n \n def test_choice(self):\n \"\"\"Test that raw_random.choice generates the same\n results as numpy.\"\"\"\n # numpy.random.choice is only available for numpy versions >= 1.7\n major, minor, _ = numpy.version.short_version.split('.')\n if (int(major), int(minor)) < (1, 7):\n raise utt.SkipTest('choice requires at NumPy version >= 1.7 '\n '(%s)' % numpy.__version__)\n \n # Check over two calls to see if the random state is correctly updated.\n rng_R = random_state_type()\n # Use non-default parameters, and larger dimensions because of\n # the integer nature of the result\n post_r, out = choice(rng_R, (11, 8), 10, 1, 0)\n\n f = compile.function(\n [compile.In(rng_R,\n value=numpy.random.RandomState(utt.fetch_seed()),\n update=post_r, mutable=True)],\n [out], accept_inplace=True)\n\n numpy_rng = numpy.random.RandomState(utt.fetch_seed())\n val0 = f()\n val1 = f()\n numpy_val0 = numpy_rng.choice(10, (11, 8), True, None)\n numpy_val1 = numpy_rng.choice(10, (11, 8), True, None)\n self.assertTrue(numpy.allclose(val0, numpy_val0))\n self.assertTrue(numpy.allclose(val1, numpy_val1))\n\n def test_poisson(self):\n \"\"\"Test that raw_random.poisson generates the same\n results as numpy.\"\"\"\n # Check over two calls to see if the random state is correctly updated.\n rng_R = random_state_type()\n # Use non-default parameters, and larger dimensions because of\n # the integer nature of the result\n post_r, out = poisson(rng_R, lam=5, size=(11, 8))\n\n f = compile.function(\n [compile.In(rng_R,\n value=numpy.random.RandomState(utt.fetch_seed()),\n update=post_r, mutable=True)],\n [out], accept_inplace=True)\n\n numpy_rng = numpy.random.RandomState(utt.fetch_seed())\n val0 = f()\n val1 = f()\n numpy_val0 = numpy_rng.poisson(5, size=(11, 8))\n numpy_val1 = numpy_rng.poisson(5, size=(11, 8))\n self.assertTrue(numpy.allclose(val0, numpy_val0))\n self.assertTrue(numpy.allclose(val1, numpy_val1))\n\n def test_permutation(self):\n \"\"\"Test that raw_random.permutation generates the same\n results as numpy.\"\"\"\n rng_R = random_state_type()\n post_r, out = permutation(rng_R, size=(9,), n=6)\n f = compile.function(\n [compile.In(rng_R,\n value=numpy.random.RandomState(utt.fetch_seed()),\n update=post_r, mutable=True)],\n [out], accept_inplace=True)\n\n numpy_rng = numpy.random.RandomState(utt.fetch_seed())\n # Check over two calls to see if the random state is correctly updated.\n # numpy_rng.permutation outputs one vector at a time,\n # so we call it iteratively to generate all the samples.\n val0 = f()\n val1 = f()\n numpy_val0 = numpy.asarray([numpy_rng.permutation(6)\n for i in range(9)])\n numpy_val1 = numpy.asarray([numpy_rng.permutation(6)\n for i in range(9)])\n self.assertTrue(numpy.all(val0 == numpy_val0))\n self.assertTrue(numpy.all(val1 == numpy_val1))\n\n # Test that we can generate a list: have size=None or ().\n for ndim in [1, None]:\n post_r, out = permutation(rng_R, n=10, size=None, ndim=ndim)\n inp = compile.In(rng_R,\n value=numpy.random.RandomState(utt.fetch_seed()),\n update=post_r, mutable=True)\n f = theano.function([inp], out)\n o = f()\n assert o.shape == (10,)\n assert (numpy.sort(o) == numpy.arange(10)).all()\n # Wrong number of dimensions asked\n self.assertRaises(TypeError, permutation, rng_R, size=None, ndim=2)\n\n def test_multinomial(self):\n \"\"\"Test that raw_random.multinomial generates the same\n results as numpy.\"\"\"\n # Check over two calls to see if the random state is correctly updated.\n rng_R = random_state_type()\n post_r, out = multinomial(rng_R, (7, 3), 6, [0.2] * 5)\n\n f = compile.function(\n [compile.In(rng_R,\n value=numpy.random.RandomState(utt.fetch_seed()),\n update=post_r, mutable=True)],\n [out], accept_inplace=True)\n\n numpy_rng = numpy.random.RandomState(utt.fetch_seed())\n val0, = f()\n val1, = f()\n numpy_val0 = numpy_rng.multinomial(6, [0.2] * 5, (7, 3))\n numpy_val1 = numpy_rng.multinomial(6, [0.2] * 5, (7, 3))\n self.assertTrue(numpy.all(val0 == numpy_val0))\n self.assertTrue(numpy.all(val1 == numpy_val1))\n\n self.assertTrue(val0.shape == (7, 3, 5))\n self.assertTrue(val1.shape == (7, 3, 5))\n\n def test_symbolic_shape(self):\n rng_R = random_state_type()\n shape = tensor.lvector()\n post_r, out = uniform(rng_R, shape, ndim=2)\n f = compile.function([rng_R, shape], out)\n rng_state0 = numpy.random.RandomState(utt.fetch_seed())\n\n assert f(rng_state0, [2, 3]).shape == (2, 3)\n assert f(rng_state0, [4, 8]).shape == (4, 8)\n\n self.assertRaises(ValueError, f, rng_state0, [4])\n self.assertRaises(ValueError, f, rng_state0, [4, 3, 4, 5])\n\n def test_mixed_shape(self):\n # Test when the provided shape is a tuple of ints and scalar vars\n rng_R = random_state_type()\n shape0 = tensor.lscalar()\n shape = (shape0, 3)\n post_r, u = uniform(rng_R, size=shape, ndim=2)\n f = compile.function([rng_R, shape0], u)\n rng_state0 = numpy.random.RandomState(utt.fetch_seed())\n\n assert f(rng_state0, 2).shape == (2, 3)\n assert f(rng_state0, 8).shape == (8, 3)\n\n post_r, v = uniform(rng_R, size=shape)\n g = compile.function([rng_R, shape0], v)\n assert g(rng_state0, 2).shape == (2, 3)\n assert g(rng_state0, 8).shape == (8, 3)\n\n def test_mixed_shape_bcastable(self):\n # Test when the provided shape is a tuple of ints and scalar vars\n rng_R = random_state_type()\n shape0 = tensor.lscalar()\n shape = (shape0, 1)\n post_r, u = uniform(rng_R, size=shape, ndim=2)\n assert u.broadcastable == (False, True)\n f = compile.function([rng_R, shape0], u)\n rng_state0 = numpy.random.RandomState(utt.fetch_seed())\n\n assert f(rng_state0, 2).shape == (2, 1)\n assert f(rng_state0, 8).shape == (8, 1)\n\n post_r, v = uniform(rng_R, size=shape)\n assert v.broadcastable == (False, True)\n g = compile.function([rng_R, shape0], v)\n assert g(rng_state0, 2).shape == (2, 1)\n assert g(rng_state0, 8).shape == (8, 1)\n\n def test_default_shape(self):\n rng_R = random_state_type()\n post_r, out = uniform(rng_R)\n f = compile.function([rng_R], [post_r, out], accept_inplace=True)\n\n rng_state0 = numpy.random.RandomState(utt.fetch_seed())\n numpy_rng = numpy.random.RandomState(utt.fetch_seed())\n post0, val0 = f(rng_state0)\n post1, val1 = f(post0)\n numpy_val0 = numpy.asarray(numpy_rng.uniform(),\n dtype=theano.config.floatX)\n numpy_val1 = numpy.asarray(numpy_rng.uniform(),\n dtype=theano.config.floatX)\n\n assert numpy.all(val0 == numpy_val0)\n assert numpy.all(val1 == numpy_val1)\n\n post_r, out = multinomial(rng_R)\n g = compile.function([rng_R], [post_r, out], accept_inplace=True)\n post2, val2 = g(post1)\n numpy_val2 = numpy.asarray(numpy_rng.multinomial(n=1, pvals=[.5, .5]),\n dtype=theano.config.floatX)\n\n assert numpy.all(val2 == numpy_val2)\n\n def test_vector_arguments(self):\n rng_R = random_state_type()\n low = tensor.vector()\n post_r, out = uniform(rng_R, low=low, high=1)\n assert out.ndim == 1\n f = compile.function([rng_R, low], [post_r, out], accept_inplace=True)\n\n def as_floatX(thing):\n return numpy.asarray(thing, dtype=theano.config.floatX)\n\n rng_state0 = numpy.random.RandomState(utt.fetch_seed())\n numpy_rng = numpy.random.RandomState(utt.fetch_seed())\n post0, val0 = f(rng_state0, [-5, .5, 0, 1])\n post1, val1 = f(post0, as_floatX([.9]))\n numpy_val0 = as_floatX(numpy_rng.uniform(low=[-5, .5, 0, 1], high=1))\n numpy_val1 = as_floatX(numpy_rng.uniform(low=as_floatX([.9]), high=1))\n\n assert numpy.all(val0 == numpy_val0)\n assert numpy.all(val1 == numpy_val1)\n\n high = tensor.vector()\n post_rb, outb = uniform(rng_R, low=low, high=high)\n assert outb.ndim == 1\n fb = compile.function([rng_R, low, high], [post_rb, outb],\n accept_inplace=True)\n\n post0b, val0b = fb(post1, [-4., -2], [-1, 0])\n post1b, val1b = fb(post0b, [-4.], [-1])\n numpy_val0b = as_floatX(numpy_rng.uniform(low=[-4., -2], high=[-1, 0]))\n numpy_val1b = as_floatX(numpy_rng.uniform(low=[-4.], high=[-1]))\n assert numpy.all(val0b == numpy_val0b)\n assert numpy.all(val1b == numpy_val1b)\n self.assertRaises(ValueError, fb, post1b, [-4., -2], [-1, 0, 1])\n # TODO: do we want that?\n #self.assertRaises(ValueError, fb, post1b, [-4., -2], [-1])\n\n size = tensor.lvector()\n post_rc, outc = uniform(rng_R, low=low, high=high, size=size, ndim=1)\n fc = compile.function([rng_R, low, high, size], [post_rc, outc],\n accept_inplace=True)\n post0c, val0c = fc(post1b, [-4., -2], [-1, 0], [2])\n post1c, val1c = fc(post0c, [-4.], [-1], [1])\n numpy_val0c = as_floatX(numpy_rng.uniform(low=[-4., -2], high=[-1, 0]))\n numpy_val1c = as_floatX(numpy_rng.uniform(low=[-4.], high=[-1]))\n assert numpy.all(val0c == numpy_val0c)\n assert numpy.all(val1c == numpy_val1c)\n self.assertRaises(ValueError, fc, post1c, [-4., -2], [-1, 0], [1])\n self.assertRaises(ValueError, fc, post1c, [-4., -2], [-1, 0], [1, 2])\n self.assertRaises(ValueError, fc, post1c, [-4., -2], [-1, 0], [2, 1])\n self.assertRaises(ValueError, fc, post1c, [-4., -2], [-1], [1])\n # TODO: do we want that?\n #self.assertRaises(ValueError, fc, post1c, [-4., -2], [-1], [2])\n\n def test_broadcast_arguments(self):\n rng_R = random_state_type()\n low = tensor.dvector()\n high = tensor.dcol()\n post_r, out = uniform(rng_R, low=low, high=high)\n assert out.ndim == 2\n f = compile.function([rng_R, low, high], [post_r, out],\n accept_inplace=True)\n\n rng_state0 = numpy.random.RandomState(utt.fetch_seed())\n numpy_rng = numpy.random.RandomState(utt.fetch_seed())\n post0, val0 = f(rng_state0, [-5, .5, 0, 1], [[1.]])\n post1, val1 = f(post0, [.9], [[1.], [1.1], [1.5]])\n post2, val2 = f(post1, [-5, .5, 0, 1], [[1.], [1.1], [1.5]])\n\n numpy_val0 = numpy_rng.uniform(low=[-5, .5, 0, 1], high=[1.])\n numpy_val1 = numpy_rng.uniform(low=[.9], high=[[1.], [1.1], [1.5]])\n numpy_val2 = numpy_rng.uniform(low=[-5, .5, 0, 1],\n high=[[1.], [1.1], [1.5]])\n\n assert numpy.all(val0 == numpy_val0), (val0, numpy_val0)\n assert numpy.all(val1 == numpy_val1)\n assert numpy.all(val2 == numpy_val2)\n\n def test_uniform_vector(self):\n rng_R = random_state_type()\n low = tensor.vector()\n high = tensor.vector()\n post_r, out = uniform(rng_R, low=low, high=high)\n assert out.ndim == 1\n f = compile.function([rng_R, low, high], [post_r, out],\n accept_inplace=True)\n\n def as_floatX(thing):\n return numpy.asarray(thing, dtype=theano.config.floatX)\n low_val = as_floatX([.1, .2, .3])\n high_val = as_floatX([1.1, 2.2, 3.3])\n rng = numpy.random.RandomState(utt.fetch_seed())\n numpy_rng = numpy.random.RandomState(utt.fetch_seed())\n\n # Arguments of size (3,)\n rng0, val0 = f(rng, low_val, high_val)\n numpy_val0 = as_floatX(numpy_rng.uniform(low=low_val, high=high_val))\n assert numpy.all(val0 == numpy_val0)\n\n # arguments of size (2,)\n rng1, val1 = f(rng0, low_val[:-1], high_val[:-1])\n numpy_val1 = as_floatX(numpy_rng.uniform(low=low_val[:-1],\n high=high_val[:-1]))\n assert numpy.all(val1 == numpy_val1)\n\n # Specifying the size explicitly\n g = compile.function([rng_R, low, high],\n uniform(rng_R, low=low, high=high, size=(3,)),\n accept_inplace=True)\n rng2, val2 = g(rng1, low_val, high_val)\n numpy_val2 = as_floatX(numpy_rng.uniform(low=low_val, high=high_val,\n size=(3,)))\n assert numpy.all(val2 == numpy_val2)\n self.assertRaises(ValueError, g, rng2, low_val[:-1], high_val[:-1])\n\n def test_binomial_vector(self):\n rng_R = random_state_type()\n n = tensor.lvector()\n prob = tensor.vector()\n post_r, out = binomial(rng_R, n=n, p=prob)\n assert out.ndim == 1\n f = compile.function([rng_R, n, prob], [post_r, out],\n accept_inplace=True)\n\n n_val = [1, 2, 3]\n prob_val = numpy.asarray([.1, .2, .3], dtype=config.floatX)\n rng = numpy.random.RandomState(utt.fetch_seed())\n numpy_rng = numpy.random.RandomState(utt.fetch_seed())\n\n # Arguments of size (3,)\n rng0, val0 = f(rng, n_val, prob_val)\n numpy_val0 = numpy_rng.binomial(n=n_val, p=prob_val)\n assert numpy.all(val0 == numpy_val0)\n\n # arguments of size (2,)\n rng1, val1 = f(rng0, n_val[:-1], prob_val[:-1])\n numpy_val1 = numpy_rng.binomial(n=n_val[:-1], p=prob_val[:-1])\n assert numpy.all(val1 == numpy_val1)\n\n # Specifying the size explicitly\n g = compile.function([rng_R, n, prob],\n binomial(rng_R, n=n, p=prob, size=(3,)),\n accept_inplace=True)\n rng2, val2 = g(rng1, n_val, prob_val)\n numpy_val2 = numpy_rng.binomial(n=n_val, p=prob_val, size=(3,))\n assert numpy.all(val2 == numpy_val2)\n self.assertRaises(ValueError, g, rng2, n_val[:-1], prob_val[:-1])\n\n def test_normal_vector(self):\n rng_R = random_state_type()\n avg = tensor.vector()\n std = tensor.vector()\n post_r, out = normal(rng_R, avg=avg, std=std)\n assert out.ndim == 1\n f = compile.function([rng_R, avg, std], [post_r, out],\n accept_inplace=True)\n\n def as_floatX(thing):\n return numpy.asarray(thing, dtype=theano.config.floatX)\n\n avg_val = [1, 2, 3]\n std_val = as_floatX([.1, .2, .3])\n rng = numpy.random.RandomState(utt.fetch_seed())\n numpy_rng = numpy.random.RandomState(utt.fetch_seed())\n\n # Arguments of size (3,)\n rng0, val0 = f(rng, avg_val, std_val)\n numpy_val0 = as_floatX(numpy_rng.normal(loc=as_floatX(avg_val),\n scale=as_floatX(std_val)))\n assert numpy.all(val0 == numpy_val0)\n\n # arguments of size (2,)\n rng1, val1 = f(rng0, avg_val[:-1], std_val[:-1])\n numpy_val1 = numpy.asarray(numpy_rng.normal(loc=avg_val[:-1],\n scale=std_val[:-1]),\n dtype=theano.config.floatX)\n assert numpy.all(val1 == numpy_val1)\n\n # Specifying the size explicitly\n g = compile.function([rng_R, avg, std],\n normal(rng_R, avg=avg, std=std, size=(3,)),\n accept_inplace=True)\n rng2, val2 = g(rng1, avg_val, std_val)\n numpy_val2 = numpy.asarray(numpy_rng.normal(loc=avg_val, scale=std_val,\n size=(3,)),\n dtype=theano.config.floatX)\n assert numpy.all(val2 == numpy_val2)\n self.assertRaises(ValueError, g, rng2, avg_val[:-1], std_val[:-1])\n\n def test_random_integers_vector(self):\n rng_R = random_state_type()\n low = tensor.lvector()\n high = tensor.lvector()\n post_r, out = random_integers(rng_R, low=low, high=high)\n assert out.ndim == 1\n f = compile.function([rng_R, low, high], [post_r, out],\n accept_inplace=True)\n\n low_val = [100, 200, 300]\n high_val = [110, 220, 330]\n rng = numpy.random.RandomState(utt.fetch_seed())\n numpy_rng = numpy.random.RandomState(utt.fetch_seed())\n\n # Arguments of size (3,)\n rng0, val0 = f(rng, low_val, high_val)\n numpy_val0 = numpy.asarray([numpy_rng.random_integers(low=lv, high=hv)\n for lv, hv in zip(low_val, high_val)])\n assert numpy.all(val0 == numpy_val0)\n\n # arguments of size (2,)\n rng1, val1 = f(rng0, low_val[:-1], high_val[:-1])\n numpy_val1 = numpy.asarray([numpy_rng.random_integers(low=lv, high=hv)\n for lv, hv in zip(low_val[:-1], high_val[:-1])])\n assert numpy.all(val1 == numpy_val1)\n\n # Specifying the size explicitly\n g = compile.function([rng_R, low, high],\n random_integers(rng_R, low=low, high=high, size=(3,)),\n accept_inplace=True)\n rng2, val2 = g(rng1, low_val, high_val)\n numpy_val2 = numpy.asarray([numpy_rng.random_integers(low=lv, high=hv)\n for lv, hv in zip(low_val, high_val)])\n assert numpy.all(val2 == numpy_val2)\n self.assertRaises(ValueError, g, rng2, low_val[:-1], high_val[:-1])\n\n # Vectorized permutation don't make sense: the only parameter, n,\n # controls one dimension of the returned tensor.\n\n def test_multinomial_vector(self):\n rng_R = random_state_type()\n n = tensor.lvector()\n pvals = tensor.matrix()\n post_r, out = multinomial(rng_R, n=n, pvals=pvals)\n assert out.ndim == 2\n f = compile.function([rng_R, n, pvals], [post_r, out],\n accept_inplace=True)\n\n n_val = [1, 2, 3]\n pvals_val = [[.1, .9], [.2, .8], [.3, .7]]\n pvals_val = numpy.asarray(pvals_val, dtype=config.floatX)\n rng = numpy.random.RandomState(utt.fetch_seed())\n numpy_rng = numpy.random.RandomState(utt.fetch_seed())\n\n # Arguments of size (3,)\n rng0, val0 = f(rng, n_val, pvals_val)\n numpy_val0 = numpy.asarray([numpy_rng.multinomial(n=nv, pvals=pv)\n for nv, pv in zip(n_val, pvals_val)])\n assert numpy.all(val0 == numpy_val0)\n\n # arguments of size (2,)\n rng1, val1 = f(rng0, n_val[:-1], pvals_val[:-1])\n numpy_val1 = numpy.asarray([numpy_rng.multinomial(n=nv, pvals=pv)\n for nv, pv in zip(n_val[:-1], pvals_val[:-1])])\n assert numpy.all(val1 == numpy_val1)\n\n # Specifying the size explicitly\n g = compile.function([rng_R, n, pvals],\n multinomial(rng_R, n=n, pvals=pvals, size=(3,)),\n accept_inplace=True)\n rng2, val2 = g(rng1, n_val, pvals_val)\n numpy_val2 = numpy.asarray([numpy_rng.multinomial(n=nv, pvals=pv)\n for nv, pv in zip(n_val, pvals_val)])\n assert numpy.all(val2 == numpy_val2)\n self.assertRaises(ValueError, g, rng2, n_val[:-1], pvals_val[:-1])\n\n def test_multinomial_tensor3_a(self):\n # Test the examples given in the multinomial documentation regarding\n # tensor3 objects\n rng_R = random_state_type()\n n = 9\n pvals = tensor.dtensor3()\n post_r, out = multinomial(rng_R, n=n, pvals=pvals, size=(1, -1))\n assert out.ndim == 3\n assert out.broadcastable == (True, False, False)\n\n f = compile.function([rng_R, pvals], [post_r, out],\n accept_inplace=True)\n\n rng = numpy.random.RandomState(utt.fetch_seed())\n numpy_rng = numpy.random.RandomState(utt.fetch_seed())\n\n pvals_val = numpy.asarray([[[.1, .9], [.2, .8], [.3, .7]]])\n assert pvals_val.shape == (1, 3, 2)\n\n new_rng, draw = f(rng, pvals_val)\n assert draw.shape == (1, 3, 2)\n assert numpy.allclose(draw.sum(axis=2), 9)\n\n def test_multinomial_tensor3_b(self):\n # Test the examples given in the multinomial documentation regarding\n # tensor3 objects\n rng_R = random_state_type()\n n = 9\n pvals = tensor.dtensor3()\n post_r, out = multinomial(rng_R, n=n, pvals=pvals, size=(10, 1, -1))\n assert out.ndim == 4\n assert out.broadcastable == (False, True, False, False)\n\n f = compile.function([rng_R, pvals], [post_r, out],\n accept_inplace=True)\n\n rng = numpy.random.RandomState(utt.fetch_seed())\n numpy_rng = numpy.random.RandomState(utt.fetch_seed())\n\n pvals_val = numpy.asarray([[[.1, .9], [.2, .8], [.3, .7]]])\n assert pvals_val.shape == (1, 3, 2)\n\n out_rng, draw = f(rng, pvals_val)\n assert draw.shape == (10, 1, 3, 2)\n assert numpy.allclose(draw.sum(axis=3), 9)\n\n def test_dtype(self):\n rng_R = random_state_type()\n low = tensor.lscalar()\n high = tensor.lscalar()\n post_r, out = random_integers(rng_R, low=low, high=high, size=(20, ),\n dtype='int8')\n assert out.dtype == 'int8'\n f = compile.function([rng_R, low, high], [post_r, out])\n\n rng = numpy.random.RandomState(utt.fetch_seed())\n rng0, val0 = f(rng, 0, 9)\n assert val0.dtype == 'int8'\n\n rng1, val1 = f(rng0, 255, 257)\n assert val1.dtype == 'int8'\n assert numpy.all(abs(val1) <= 1)\n\n def test_dtype_normal_uniform_687(self):\n # Regression test for #687.\n rng_R = random_state_type()\n assert uniform(rng_R, low=tensor.constant(0, dtype='float64'),\n dtype='float32')[1].dtype == 'float32'\n\n assert normal(rng_R, avg=tensor.constant(0, dtype='float64'),\n dtype='float32')[1].dtype == 'float32'\n\n def setUp(self):\n super(T_random_function, self).setUp()\n\n def test_infer_shape(self):\n rng_R = random_state_type()\n rng_R_val = numpy.random.RandomState(utt.fetch_seed())\n\n # no shape specified, default args\n post_r, out = uniform(rng_R)\n self._compile_and_check([rng_R], [out], [rng_R_val],\n RandomFunction)\n\n post_r, out = uniform(rng_R, size=None, ndim=2)\n self._compile_and_check([rng_R], [out], [rng_R_val],\n RandomFunction)\n\n \"\"\"\n #infer_shape don't work for multinomial.\n #The parameter ndim_added is set to 1 and in this case, the infer_shape\n #inplementation don't know how to infer the shape\n post_r, out = multinomial(rng_R)\n\n self._compile_and_check([rng_R], [out], [rng_R_val],\n RandomFunction)\n \"\"\"\n\n # no shape specified, args have to be broadcasted\n low = tensor.TensorType(dtype='float64',\n broadcastable=(False, True, True))()\n high = tensor.TensorType(dtype='float64',\n broadcastable=(True, True, True, False))()\n post_r, out = uniform(rng_R, size=None, ndim=2, low=low, high=high)\n low_val = [[[3]], [[4]], [[-5]]]\n high_val = [[[[5, 8]]]]\n self._compile_and_check([rng_R, low, high], [out],\n [rng_R_val, low_val, high_val],\n RandomFunction)\n\n # multinomial, specified shape\n \"\"\"\n #infer_shape don't work for multinomial\n n = iscalar()\n pvals = dvector()\n size_val = (7, 3)\n n_val = 6\n pvals_val = [0.2] * 5\n post_r, out = multinomial(rng_R, size=size_val, n=n, pvals=pvals,\n ndim=2)\n\n self._compile_and_check([rng_R, n, pvals], [out],\n [rng_R_val, n_val, pvals_val],\n RandomFunction)\n \"\"\"\n\n # uniform vector low and high\n low = dvector()\n high = dvector()\n post_r, out = uniform(rng_R, low=low, high=1)\n low_val = [-5, .5, 0, 1]\n self._compile_and_check([rng_R, low], [out], [rng_R_val, low_val],\n RandomFunction)\n\n low_val = [.9]\n self._compile_and_check([rng_R, low], [out], [rng_R_val, low_val],\n RandomFunction)\n\n post_r, out = uniform(rng_R, low=low, high=high)\n low_val = [-4., -2]\n high_val = [-1, 0]\n self._compile_and_check([rng_R, low, high], [out], [rng_R_val, low_val,\n high_val], RandomFunction)\n\n low_val = [-4.]\n high_val = [-1]\n self._compile_and_check([rng_R, low, high], [out], [rng_R_val, low_val,\n high_val], RandomFunction)\n\n # uniform broadcasting low and high\n low = dvector()\n high = dcol()\n post_r, out = uniform(rng_R, low=low, high=high)\n low_val = [-5, .5, 0, 1]\n high_val = [[1.]]\n self._compile_and_check([rng_R, low, high], [out], [rng_R_val, low_val,\n high_val], RandomFunction)\n\n low_val = [.9]\n high_val = [[1.], [1.1], [1.5]]\n self._compile_and_check([rng_R, low, high], [out], [rng_R_val, low_val,\n high_val], RandomFunction)\n\n low_val = [-5, .5, 0, 1]\n high_val = [[1.], [1.1], [1.5]]\n self._compile_and_check([rng_R, low, high], [out], [rng_R_val, low_val,\n high_val], RandomFunction)\n\n # uniform with vector slice\n low = dvector()\n high = dvector()\n post_r, out = uniform(rng_R, low=low, high=high)\n low_val = [.1, .2, .3]\n high_val = [1.1, 2.2, 3.3]\n size_val = (3, )\n self._compile_and_check([rng_R, low, high], [out],\n [rng_R_val, low_val[:-1],\n high_val[:-1]], RandomFunction)\n\n # uniform with explicit size and size implicit in parameters\n # NOTE 1: Would it be desirable that size could also be supplied\n # as a Theano variable?\n post_r, out = uniform(rng_R, size=size_val, low=low, high=high)\n self._compile_and_check([rng_R, low, high], [out], [rng_R_val, low_val,\n high_val], RandomFunction)\n\n # binomial with vector slice\n n = ivector()\n prob = dvector()\n post_r, out = binomial(rng_R, n=n, p=prob)\n n_val = [1, 2, 3]\n prob_val = [.1, .2, .3]\n size_val = (3, )\n self._compile_and_check([rng_R, n, prob], [out],\n [rng_R_val, n_val[:-1],\n prob_val[:-1]], RandomFunction)\n\n # binomial with explicit size and size implicit in parameters\n # cf. NOTE 1\n post_r, out = binomial(rng_R, n=n, p=prob, size=size_val)\n self._compile_and_check([rng_R, n, prob], [out], [rng_R_val, n_val,\n prob_val], RandomFunction)\n\n # normal with vector slice\n avg = dvector()\n std = dvector()\n post_r, out = normal(rng_R, avg=avg, std=std)\n avg_val = [1, 2, 3]\n std_val = [.1, .2, .3]\n size_val = (3, )\n self._compile_and_check([rng_R, avg, std], [out],\n [rng_R_val, avg_val[:-1],\n std_val[:-1]], RandomFunction)\n\n # normal with explicit size and size implicit in parameters\n # cf. NOTE 1\n post_r, out = normal(rng_R, avg=avg, std=std, size=size_val)\n self._compile_and_check([rng_R, avg, std], [out], [rng_R_val, avg_val,\n std_val], RandomFunction)\n\n # multinomial with tensor-3 probabilities\n \"\"\"\n #multinomial infer_shape don't work.\n pvals = dtensor3()\n n = iscalar()\n post_r, out = multinomial(rng_R, n=n, pvals=pvals, size=(1, -1))\n pvals_val = [[[.1, .9], [.2, .8], [.3, .7]]]\n n_val = 9\n\n self._compile_and_check([rng_R, n, pvals], [out],\n [rng_R_val, n_val,\n pvals_val], RandomFunction)\n\n post_r, out = multinomial(rng_R, n=n, pvals=pvals, size=(10, 1, -1))\n\n self._compile_and_check([rng_R, n, pvals], [out],\n [rng_R_val, n_val,\n pvals_val], RandomFunction)\n \"\"\"\n\n def test_pkl(self):\n # Test pickling of RandomFunction.\n # binomial was created by calling RandomFunction on a string,\n # random_integers by calling it on a function.\n rng_r = random_state_type()\n mode = None\n if theano.config.mode in [\"DEBUG_MODE\", \"DebugMode\"]:\n mode = 'FAST_COMPILE'\n post_bin_r, bin_sample = binomial(rng_r, (3, 5), 1, .3)\n f = theano.function([rng_r], [post_bin_r, bin_sample], mode=mode)\n pkl_f = pickle.dumps(f)\n\n post_int_r, int_sample = random_integers(rng_r, (3, 5), -1, 8)\n g = theano.function([rng_r], [post_int_r, int_sample], mode=mode)\n pkl_g = pickle.dumps(g)\n pickle.loads(pkl_g)\n\n\nif __name__ == '__main__':\n from theano.tests import main\n main(\"test_raw_random\")\n" ]
[ [ "numpy.asarray", "numpy.allclose", "numpy.arange", "numpy.sort", "numpy.version.short_version.split", "numpy.all" ] ]
NogaBar/open_lth
[ "09bcea21e69708549ecff2659690162a6c45f9ca", "09bcea21e69708549ecff2659690162a6c45f9ca" ]
[ "platforms/base.py", "datasets/base.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport abc\nfrom dataclasses import dataclass\nimport os\nimport torch\n\nfrom foundations.hparams import Hparams\nimport platforms.platform\n\n\n@dataclass\nclass Platform(Hparams):\n num_workers: int = 0\n\n _name: str = 'Platform Hyperparameters'\n _description: str = 'Hyperparameters that control the plaform on which the job is run.'\n _num_workers: str = 'The number of worker threads to use for data loading.'\n gpu: str = '7'\n os.environ['CUDA_VISIBLE_DEVICES'] = gpu\n\n # Manage the available devices and the status of distributed training.\n\n @property\n def device_str(self):\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ['CUDA_VISIBLE_DEVICES'] = self.gpu\n # GPU device.\n if torch.cuda.is_available() and torch.cuda.device_count() > 0:\n device_ids = ','.join([str(x) for x in range(torch.cuda.device_count())])\n return f'cuda:{device_ids}'\n\n # CPU device.\n else:\n return 'cpu'\n\n @property\n def torch_device(self):\n return torch.device(self.device_str)\n\n @property\n def is_parallel(self):\n os.environ['CUDA_VISIBLE_DEVICES'] = self.gpu\n return torch.cuda.is_available() and torch.cuda.device_count() > 1\n\n @property\n def is_distributed(self):\n return False\n\n @property\n def rank(self):\n return 0\n\n @property\n def world_size(self):\n return 1\n\n @property\n def is_primary_process(self):\n return not self.is_distributed or self.rank == 0\n\n def barrier(self):\n pass\n\n # Manage the location of important files.\n\n @property\n @abc.abstractmethod\n def root(self):\n \"\"\"The root directory where data will be stored.\"\"\"\n pass\n\n @property\n @abc.abstractmethod\n def dataset_root(self):\n \"\"\"The root directory where datasets will be stored.\"\"\"\n pass\n\n @property\n @abc.abstractmethod\n def imagenet_root(self):\n \"\"\"The directory where imagenet will be stored.\"\"\"\n pass\n\n # Mediate access to files.\n @staticmethod\n def open(file, mode='r'):\n return open(file, mode)\n\n @staticmethod\n def exists(file):\n return os.path.exists(file)\n\n @staticmethod\n def makedirs(path):\n return os.makedirs(path)\n\n @staticmethod\n def isdir(path):\n return os.path.isdir(path)\n\n @staticmethod\n def listdir(path):\n return os.listdir(path)\n\n @staticmethod\n def save_model(model, path, *args, **kwargs):\n return torch.save(model, path, *args, **kwargs)\n\n @staticmethod\n def load_model(path, *args, **kwargs):\n return torch.load(path, *args, **kwargs)\n\n # Run jobs. Called by the command line interface.\n\n def run_job(self, f):\n \"\"\"Run a function that trains a network.\"\"\"\n old_platform = platforms.platform._PLATFORM\n platforms.platform._PLATFORM = self\n f()\n platforms.platform._PLATFORM = old_platform\n", "# Copyright (c) Facebook, Inc. and its affiliates.\n\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport abc\nimport numpy as np\nfrom PIL import Image\nimport torch\nimport torchvision\n\nfrom platforms.platform import get_platform\n\n\nclass Dataset(abc.ABC, torch.utils.data.Dataset):\n \"\"\"The base class for all datasets in this framework.\"\"\"\n\n @staticmethod\n @abc.abstractmethod\n def num_test_examples() -> int:\n pass\n\n @staticmethod\n @abc.abstractmethod\n def num_train_examples() -> int:\n pass\n\n @staticmethod\n @abc.abstractmethod\n def num_classes() -> int:\n pass\n\n @staticmethod\n @abc.abstractmethod\n def get_train_set(use_augmentation: bool) -> 'Dataset':\n pass\n\n @staticmethod\n @abc.abstractmethod\n def get_test_set() -> 'Dataset':\n pass\n\n def __init__(self, examples: np.ndarray, labels, dataset: torch.utils.data.Dataset = None):\n \"\"\"Create a dataset object.\n\n examples is a numpy array of the examples (or the information necessary to get them).\n Only the first dimension matters for use in this abstract class.\n\n labels is a numpy array of the labels. Each entry is a zero-indexed integer encoding\n of the label.\n \"\"\"\n if examples is None:\n self.dataset = dataset\n self._examples = None\n self._labels = None\n self._subsampled = False\n return\n\n if examples.shape[0] != labels.shape[0]:\n raise ValueError('Different number of examples ({}) and labels ({}).'.format(\n examples.shape[0], examples.shape[0]))\n self._examples = examples\n self._labels = labels if isinstance(labels, np.ndarray) else labels.numpy()\n self._subsampled = False\n\n def randomize_labels(self, seed: int, fraction: float) -> None:\n \"\"\"Randomize the labels of the specified fraction of the dataset.\"\"\"\n if self._labels is None:\n raise NotImplementedError\n num_to_randomize = np.ceil(len(self._labels) * fraction).astype(int)\n randomized_labels = np.random.RandomState(seed=seed).randint(self.num_classes(), size=num_to_randomize)\n examples_to_randomize = np.random.RandomState(seed=seed+1).permutation(len(self._labels))[:num_to_randomize]\n self._labels[examples_to_randomize] = randomized_labels\n\n def subsample(self, seed: int, fraction: float) -> None:\n \"\"\"Subsample the dataset.\"\"\"\n if self._labels is None:\n raise NotImplementedError\n if self._subsampled:\n raise ValueError('Cannot subsample more than once.')\n self._subsampled = True\n\n examples_to_retain = np.ceil(len(self._labels) * fraction).astype(int)\n examples_to_retain = np.random.RandomState(seed=seed+1).permutation(len(self._labels))[:examples_to_retain]\n self._examples = self._examples[examples_to_retain]\n self._labels = self._labels[examples_to_retain]\n\n def __len__(self):\n if self._labels is None:\n return len(self.dataset)\n return self._labels.size\n\n def __getitem__(self, index):\n \"\"\"If there is custom logic for example loading, this method should be overridden.\"\"\"\n if not self._labels:\n return self.dataset[index]\n return self._examples[index], self._labels[index]\n\n\nclass ImageDataset(Dataset):\n @abc.abstractmethod\n def example_to_image(self, example: np.ndarray) -> Image: pass\n\n def __init__(self, examples, labels, dataset: torch.utils.data.Dataset = None, image_transforms=None, tensor_transforms=None,\n joint_image_transforms=None, joint_tensor_transforms=None):\n super(ImageDataset, self).__init__(examples, labels, dataset)\n self._image_transforms = image_transforms or []\n self._tensor_transforms = tensor_transforms or []\n self._tensor_transforms = tensor_transforms or []\n self._joint_image_transforms = joint_image_transforms or []\n self._joint_tensor_transforms = joint_tensor_transforms or []\n\n self._composed = None\n\n def __getitem__(self, index):\n if not self._composed:\n # self._composed = torchvision.transforms.Compose(\n # self._image_transforms + [torchvision.transforms.ToTensor()] + self._tensor_transforms)\n self._composed = torchvision.transforms.Compose([torchvision.transforms.ToTensor()] +\n self._image_transforms + self._tensor_transforms)\n if self._labels is None:\n example, label = self.dataset[index]\n else:\n example, label = self._examples[index], self._labels[index]\n example = self.example_to_image(example)\n for t in self._joint_image_transforms: example, label = t(example, label)\n example = self._composed(example)\n for t in self._joint_tensor_transforms: example, label = t(example, label)\n return example, label\n\n def blur(self, blur_factor: float) -> None:\n \"\"\"Add a transformation that blurs the image by downsampling by blur_factor.\"\"\"\n\n def blur_transform(image):\n size = list(image.size)\n image = torchvision.transforms.Resize([int(s / blur_factor) for s in size])(image)\n image = torchvision.transforms.Resize(size)(image)\n return image\n self._image_transforms.append(blur_transform)\n\n def unsupervised_rotation(self, seed: int):\n \"\"\"Switch the task to unsupervised rotation.\"\"\"\n\n self._labels = np.random.RandomState(seed=seed).randint(4, size=self._labels.size)\n\n def rotate_transform(image, label):\n return torchvision.transforms.RandomRotation(label*90)(image), label\n self._joint_image_transforms.append(rotate_transform)\n\n\nclass ShuffleSampler(torch.utils.data.sampler.Sampler):\n def __init__(self, num_examples):\n self._num_examples = num_examples\n self._seed = -1\n\n def __iter__(self):\n if self._seed == -1:\n indices = list(range(self._num_examples))\n elif self._seed is None:\n indices = torch.randperm(self._num_examples).tolist()\n else:\n g = torch.Generator()\n if self._seed is not None: g.manual_seed(self._seed)\n indices = torch.randperm(self._num_examples, generator=g).tolist()\n\n return iter(indices)\n\n def __len__(self):\n return self._num_examples\n\n def shuffle_dataorder(self, seed: int):\n self._seed = seed\n\n\nclass DistributedShuffleSampler(torch.utils.data.distributed.DistributedSampler):\n def __init__(self, dataset):\n super(DistributedShuffleSampler, self).__init__(\n dataset, num_replicas=get_platform().world_size, rank=get_platform().rank)\n self._seed = -1\n\n def __iter__(self):\n indices = torch.arange(len(self.dataset))\n\n if self._seed != -1:\n g = torch.Generator()\n g.manual_seed(self._seed or np.random.randint(10e6))\n perm = torch.randperm(len(indices), generator=g)\n indices = indices[perm]\n\n indices = indices[self.rank:self.total_size:self.num_replicas]\n return iter(indices.tolist())\n\n def shuffle_dataorder(self, seed: int):\n self._seed = seed\n\n\nclass DataLoader(torch.utils.data.DataLoader):\n \"\"\"A wrapper that makes it possible to access the custom shuffling logic.\"\"\"\n\n def __init__(self, dataset: Dataset, batch_size: int, num_workers: int, pin_memory: bool = True):\n if get_platform().is_distributed:\n self._sampler = DistributedShuffleSampler(dataset)\n else:\n self._sampler = ShuffleSampler(len(dataset))\n\n self._iterations_per_epoch = np.ceil(len(dataset) / batch_size).astype(int)\n\n if get_platform().is_distributed:\n batch_size //= get_platform().world_size\n num_workers //= get_platform().world_size\n\n super(DataLoader, self).__init__(\n dataset, batch_size, sampler=self._sampler, num_workers=num_workers,\n pin_memory=pin_memory and get_platform().torch_device.type == 'cuda')\n\n def shuffle(self, seed: int):\n self._sampler.shuffle_dataorder(seed)\n\n @property\n def iterations_per_epoch(self):\n return self._iterations_per_epoch\n" ]
[ [ "torch.device", "torch.save", "torch.cuda.device_count", "torch.cuda.is_available", "torch.load" ], [ "torch.randperm", "torch.Generator", "numpy.random.randint", "numpy.random.RandomState" ] ]
adriensas/flair
[ "f01b0e7ff9a87d3862acae50aeaffdc8e8b8ac21", "f01b0e7ff9a87d3862acae50aeaffdc8e8b8ac21" ]
[ "flair/trainers/trainer.py", "flair/models/sequence_tagger_utils/viterbi.py" ]
[ "import copy\nimport datetime\nimport inspect\nimport logging\nimport os\nimport sys\nimport time\nimport warnings\nfrom inspect import signature\nfrom pathlib import Path\nfrom typing import Any, Dict, Optional, Tuple, Type, Union, cast\n\nimport torch\nfrom torch.optim.sgd import SGD\nfrom torch.utils.data.dataset import ConcatDataset\n\nfrom flair.nn import Model\n\ntry:\n from apex import amp\nexcept ImportError:\n amp = None\n\nimport random\n\nfrom torch.optim.lr_scheduler import OneCycleLR # type: ignore\n\nimport flair\nimport flair.nn\nfrom flair.data import Corpus, Dictionary, MultiCorpus, _len_dataset\nfrom flair.datasets import DataLoader\nfrom flair.optim import ExpAnnealLR, LinearSchedulerWithWarmup\nfrom flair.training_utils import (\n AnnealOnPlateau,\n WeightExtractor,\n add_file_handler,\n identify_dynamic_embeddings,\n init_output_file,\n log_line,\n store_embeddings,\n)\n\nlog = logging.getLogger(\"flair\")\n\n\nclass ModelTrainer:\n def __init__(\n self,\n model: flair.nn.Model,\n corpus: Corpus,\n ):\n \"\"\"\n Initialize a model trainer\n :param model: The model that you want to train. The model should inherit from flair.nn.Model # noqa: E501\n :param corpus: The dataset used to train the model, should be of type Corpus\n \"\"\"\n self.model: flair.nn.Model = model\n self.corpus: Corpus = corpus\n\n @staticmethod\n def check_for_and_delete_previous_best_models(base_path):\n all_best_model_names = [filename for filename in os.listdir(base_path) if filename.startswith(\"best-model\")]\n if len(all_best_model_names) != 0:\n warnings.warn(\n \"There should be no best model saved at epoch 1 except there \"\n \"is a model from previous trainings\"\n \" in your training folder. All previous best models will be deleted.\"\n )\n for single_model in all_best_model_names:\n previous_best_path = os.path.join(base_path, single_model)\n if os.path.exists(previous_best_path):\n os.remove(previous_best_path)\n\n def train(\n self,\n base_path: Union[Path, str],\n learning_rate: float = 0.1,\n mini_batch_size: int = 32,\n eval_batch_size: int = None,\n mini_batch_chunk_size: Optional[int] = None,\n max_epochs: int = 100,\n train_with_dev: bool = False,\n train_with_test: bool = False,\n monitor_train: bool = False,\n monitor_test: bool = False,\n main_evaluation_metric: Tuple[str, str] = (\"micro avg\", \"f1-score\"),\n scheduler=AnnealOnPlateau,\n anneal_factor: float = 0.5,\n patience: int = 3,\n min_learning_rate: float = 0.0001,\n initial_extra_patience: int = 0,\n optimizer: Union[torch.optim.Optimizer, Type[torch.optim.Optimizer]] = SGD,\n cycle_momentum: bool = False,\n warmup_fraction: float = 0.1,\n embeddings_storage_mode: str = \"cpu\",\n checkpoint: bool = False,\n save_final_model: bool = True,\n anneal_with_restarts: bool = False,\n anneal_with_prestarts: bool = False,\n anneal_against_dev_loss: bool = False,\n batch_growth_annealing: bool = False,\n shuffle: bool = True,\n param_selection_mode: bool = False,\n write_weights: bool = False,\n num_workers: Optional[int] = None,\n sampler=None,\n use_amp: bool = False,\n amp_opt_level: str = \"O1\",\n eval_on_train_fraction: float = 0.0,\n eval_on_train_shuffle: bool = False,\n save_model_each_k_epochs: int = 0,\n tensorboard_comment: str = \"\",\n use_swa: bool = False,\n use_final_model_for_eval: bool = False,\n gold_label_dictionary_for_eval: Optional[Dictionary] = None,\n create_file_logs: bool = True,\n create_loss_file: bool = True,\n epoch: int = 0,\n use_tensorboard: bool = False,\n tensorboard_log_dir=None,\n metrics_for_tensorboard=[],\n optimizer_state_dict: Optional[Dict[str, Any]] = None,\n scheduler_state_dict: Optional[Dict[str, Any]] = None,\n save_optimizer_state: bool = False,\n **kwargs,\n ) -> dict:\n \"\"\"\n Trains any class that implements the flair.nn.Model interface.\n :param base_path: Main path to which all output during training is logged and models are saved # noqa: E501\n :param learning_rate: Initial learning rate (or max, if scheduler is OneCycleLR) # noqa: E501\n :param mini_batch_size: Size of mini-batches during training\n :param mini_batch_chunk_size: If mini-batches are larger than this number, they get broken down into chunks of this size for processing purposes # noqa: E501\n :param max_epochs: Maximum number of epochs to train. Terminates training if this number is surpassed. # noqa: E501\n :param scheduler: The learning rate scheduler to use\n :param checkpoint: If True, a full checkpoint is saved at end of each epoch # noqa: E501\n :param cycle_momentum: If scheduler is OneCycleLR, whether the scheduler should cycle also the momentum # noqa: E501\n :param anneal_factor: The factor by which the learning rate is annealed\n :param patience: Patience is the number of epochs with no improvement the Trainer waits # noqa: E501\n until annealing the learning rate\n :param min_learning_rate: If the learning rate falls below this threshold, training terminates # noqa: E501\n :param warmup_fraction: Fraction of warmup steps if the scheduler is LinearSchedulerWithWarmup # noqa: E501\n :param train_with_dev: If True, the data from dev split is added to the training data # noqa: E501\n :param train_with_test: If True, the data from test split is added to the training data # noqa: E501\n :param monitor_train: If True, training data is evaluated at end of each epoch\n :param monitor_test: If True, test data is evaluated at end of each epoch\n :param embeddings_storage_mode: One of 'none' (all embeddings are deleted and freshly recomputed), # noqa: E501\n 'cpu' (embeddings are stored on CPU) or 'gpu' (embeddings are stored on GPU)\n :param save_final_model: If True, final model is saved\n :param anneal_with_restarts: If True, the last best model is restored when annealing the learning rate # noqa: E501\n :param shuffle: If True, data is shuffled during training\n :param param_selection_mode: If True, testing is performed against dev data. Use this mode when doing # noqa: E501\n parameter selection.\n :param num_workers: Number of workers in your data loader.\n :param sampler: You can pass a data sampler here for special sampling of data. # noqa: E501\n :param eval_on_train_fraction: the fraction of train data to do the evaluation on, # noqa: E501\n if 0. the evaluation is not performed on fraction of training data,\n if 'dev' the size is determined from dev set size\n :param eval_on_train_shuffle: if True the train data fraction is determined on the start of training # noqa: E501\n and kept fixed during training, otherwise it's sampled at beginning of each epoch # noqa: E501\n :param save_model_each_k_epochs: Each k epochs, a model state will be written out. If set to '5', a model will # noqa: E501\n be saved each 5 epochs. Default is 0 which means no model saving.\n :param main_evaluation_metric: Type of metric to use for best model tracking and learning rate scheduling (if dev data is available, otherwise loss will be used), currently only applicable for text_classification_model # noqa: E501\n :param tensorboard_comment: Comment to use for tensorboard logging\n :param create_file_logs: If True, the logs will also be stored in a file 'training.log' in the model folder # noqa: E501\n :param create_loss_file: If True, the loss will be writen to a file 'loss.tsv' in the model folder # noqa: E501\n :param optimizer: The optimizer to use (typically SGD or Adam)\n :param epoch: The starting epoch (normally 0 but could be higher if you continue training model) # noqa: E501\n :param use_tensorboard: If True, writes out tensorboard information\n :param tensorboard_log_dir: Directory into which tensorboard log files will be written # noqa: E501\n :param metrics_for_tensorboard: List of tuples that specify which metrics (in addition to the main_score) shall be plotted in tensorboard, could be [(\"macro avg\", 'f1-score'), (\"macro avg\", 'precision')] for example # noqa: E501\n :param kwargs: Other arguments for the Optimizer\n :return:\n \"\"\"\n\n # create a model card for this model with Flair and PyTorch version\n model_card: Dict[str, Any] = {\n \"flair_version\": flair.__version__,\n \"pytorch_version\": torch.__version__,\n }\n\n # also record Transformers version if library is loaded\n try:\n import transformers\n\n model_card[\"transformers_version\"] = transformers.__version__\n except ImportError:\n pass\n\n # remember all parameters used in train() call\n local_variables = locals()\n training_parameters = {}\n for parameter in signature(self.train).parameters:\n training_parameters[parameter] = local_variables[parameter]\n model_card[\"training_parameters\"] = training_parameters\n\n # add model card to model\n self.model.model_card = model_card\n assert self.corpus.train\n if use_tensorboard:\n try:\n from torch.utils.tensorboard import SummaryWriter\n\n if tensorboard_log_dir is not None and not os.path.exists(tensorboard_log_dir):\n os.mkdir(tensorboard_log_dir)\n writer = SummaryWriter(log_dir=tensorboard_log_dir, comment=tensorboard_comment)\n log.info(f\"tensorboard logging path is {tensorboard_log_dir}\")\n\n except ImportError:\n log_line(log)\n log.warning(\"ATTENTION! PyTorch >= 1.1.0 and pillow are required\" \"for TensorBoard support!\")\n log_line(log)\n use_tensorboard = False\n pass\n\n if use_amp:\n if sys.version_info < (3, 0):\n raise RuntimeError(\"Apex currently only supports Python 3. Aborting.\")\n if amp is None:\n raise RuntimeError(\n \"Failed to import apex. Please install apex from \"\n \"https://www.github.com/nvidia/apex \"\n \"to enable mixed-precision training.\"\n )\n\n if not eval_batch_size:\n eval_batch_size = mini_batch_size\n if mini_batch_chunk_size is None:\n mini_batch_chunk_size = mini_batch_size\n if learning_rate < min_learning_rate:\n min_learning_rate = learning_rate / 10\n\n initial_learning_rate = learning_rate\n\n base_path = Path(base_path)\n base_path.mkdir(exist_ok=True, parents=True)\n\n self.check_for_and_delete_previous_best_models(base_path)\n\n # determine what splits (train, dev, test) to evaluate and log\n log_train = True if monitor_train else False\n log_test = True if (not param_selection_mode and self.corpus.test and monitor_test) else False\n log_dev = False if train_with_dev or not self.corpus.dev else True\n log_train_part = True if (eval_on_train_fraction == \"dev\" or eval_on_train_fraction > 0.0) else False\n\n if log_train_part:\n train_part_size = (\n _len_dataset(self.corpus.dev)\n if eval_on_train_fraction == \"dev\"\n else int(_len_dataset(self.corpus.train) * eval_on_train_fraction)\n )\n\n assert train_part_size > 0\n if not eval_on_train_shuffle:\n train_part_indices = list(range(train_part_size))\n train_part = torch.utils.data.dataset.Subset(self.corpus.train, train_part_indices)\n\n # prepare loss logging file and set up header\n loss_txt = init_output_file(base_path, \"loss.tsv\") if create_loss_file else None\n\n weight_extractor = WeightExtractor(base_path)\n\n # if optimizer class is passed, instantiate:\n if not isinstance(optimizer, torch.optim.Optimizer):\n kwargs[\"lr\"] = learning_rate\n optimizer = optimizer(self.model.parameters(), **kwargs)\n\n if use_swa:\n import torchcontrib\n\n optimizer = torchcontrib.optim.SWA(optimizer, swa_start=10, swa_freq=5, swa_lr=learning_rate)\n\n if use_amp:\n self.model, optimizer = amp.initialize(self.model, optimizer, opt_level=amp_opt_level)\n\n optimizer = cast(torch.optim.Optimizer, optimizer)\n\n # load existing optimizer state dictionary if it exists\n if optimizer_state_dict:\n optimizer.load_state_dict(optimizer_state_dict)\n\n # minimize training loss if training with dev data, else maximize dev score\n anneal_mode = \"min\" if train_with_dev or anneal_against_dev_loss else \"max\"\n best_validation_score = 100000000000 if train_with_dev or anneal_against_dev_loss else 0.0\n\n dataset_size = _len_dataset(self.corpus.train)\n if train_with_dev:\n dataset_size += _len_dataset(self.corpus.dev)\n\n # if scheduler is passed as a class, instantiate\n if inspect.isclass(scheduler):\n if scheduler == OneCycleLR:\n scheduler = OneCycleLR(\n optimizer,\n max_lr=learning_rate,\n steps_per_epoch=dataset_size // mini_batch_size + 1,\n epochs=max_epochs - epoch,\n # if we load a checkpoint, we have already trained for epoch\n pct_start=0.0,\n cycle_momentum=cycle_momentum,\n )\n elif scheduler == LinearSchedulerWithWarmup:\n steps_per_epoch = (dataset_size + mini_batch_size - 1) / mini_batch_size\n num_train_steps = int(steps_per_epoch * max_epochs)\n num_warmup_steps = int(num_train_steps * warmup_fraction)\n\n scheduler = LinearSchedulerWithWarmup(\n optimizer,\n num_train_steps=num_train_steps,\n num_warmup_steps=num_warmup_steps,\n )\n else:\n scheduler = scheduler(\n optimizer,\n factor=anneal_factor,\n patience=patience,\n initial_extra_patience=initial_extra_patience,\n mode=anneal_mode,\n verbose=True,\n )\n\n # load existing scheduler state dictionary if it exists\n if scheduler_state_dict:\n scheduler.load_state_dict(scheduler_state_dict)\n\n # update optimizer and scheduler in model card\n model_card[\"training_parameters\"][\"optimizer\"] = optimizer\n model_card[\"training_parameters\"][\"scheduler\"] = scheduler\n\n if isinstance(scheduler, OneCycleLR) and batch_growth_annealing:\n raise ValueError(\"Batch growth with OneCycle policy is not implemented.\")\n\n train_data = self.corpus.train\n\n # if training also uses dev/train data, include in training set\n if train_with_dev or train_with_test:\n\n parts = [self.corpus.train]\n if train_with_dev and self.corpus.dev:\n parts.append(self.corpus.dev)\n if train_with_test and self.corpus.test:\n parts.append(self.corpus.test)\n\n train_data = ConcatDataset(parts)\n\n # initialize sampler if provided\n if sampler is not None:\n # init with default values if only class is provided\n if inspect.isclass(sampler):\n sampler = sampler()\n # set dataset to sample from\n sampler.set_dataset(train_data)\n shuffle = False\n\n dev_score_history = []\n dev_loss_history = []\n train_loss_history = []\n\n micro_batch_size = mini_batch_chunk_size\n\n # this field stores the names of all dynamic embeddings in the model (determined after first forward pass)\n dynamic_embeddings = None\n\n # At any point you can hit Ctrl + C to break out of training early.\n try:\n if create_file_logs:\n log_handler = add_file_handler(log, base_path / \"training.log\")\n else:\n log_handler = None\n\n log_line(log)\n log.info(f'Model: \"{self.model}\"')\n log_line(log)\n log.info(f'Corpus: \"{self.corpus}\"')\n log_line(log)\n log.info(\"Parameters:\")\n log.info(f' - learning_rate: \"{learning_rate}\"')\n log.info(f' - mini_batch_size: \"{mini_batch_size}\"')\n log.info(f' - patience: \"{patience}\"')\n log.info(f' - anneal_factor: \"{anneal_factor}\"')\n log.info(f' - max_epochs: \"{max_epochs}\"')\n log.info(f' - shuffle: \"{shuffle}\"')\n log.info(f' - train_with_dev: \"{train_with_dev}\"')\n log.info(f' - batch_growth_annealing: \"{batch_growth_annealing}\"')\n log_line(log)\n log.info(f'Model training base path: \"{base_path}\"')\n log_line(log)\n log.info(f\"Device: {flair.device}\")\n log_line(log)\n log.info(f\"Embeddings storage mode: {embeddings_storage_mode}\")\n\n previous_learning_rate = learning_rate\n momentum = 0\n for group in optimizer.param_groups:\n if \"momentum\" in group:\n momentum = group[\"momentum\"]\n\n for epoch in range(epoch + 1, max_epochs + 1):\n log_line(log)\n\n # update epoch in model card\n model_card[\"training_parameters\"][\"epoch\"] = epoch\n\n if anneal_with_prestarts:\n last_epoch_model_state_dict = copy.deepcopy(self.model.state_dict())\n\n if eval_on_train_shuffle:\n train_part_indices = list(range(_len_dataset(self.corpus.train)))\n random.shuffle(train_part_indices)\n train_part_indices = train_part_indices[:train_part_size]\n train_part = torch.utils.data.dataset.Subset(self.corpus.train, train_part_indices)\n\n # get new learning rate\n for group in optimizer.param_groups:\n learning_rate = group[\"lr\"]\n\n if learning_rate != previous_learning_rate and batch_growth_annealing:\n mini_batch_size *= 2\n\n # reload last best model if annealing with restarts is enabled\n if (\n (anneal_with_restarts or anneal_with_prestarts)\n and learning_rate != previous_learning_rate\n and os.path.exists(base_path / \"best-model.pt\")\n ):\n if anneal_with_restarts:\n log.info(\"resetting to best model\")\n self.model.load_state_dict(self.model.load(base_path / \"best-model.pt\").state_dict())\n if anneal_with_prestarts:\n log.info(\"resetting to pre-best model\")\n self.model.load_state_dict(self.model.load(base_path / \"pre-best-model.pt\").state_dict())\n\n previous_learning_rate = learning_rate\n if use_tensorboard:\n writer.add_scalar(\"learning_rate\", learning_rate, epoch)\n\n # stop training if learning rate becomes too small\n if (\n not isinstance(scheduler, (OneCycleLR, LinearSchedulerWithWarmup))\n and learning_rate < min_learning_rate\n ):\n log_line(log)\n log.info(\"learning rate too small - quitting training!\")\n log_line(log)\n break\n\n batch_loader = DataLoader(\n train_data,\n batch_size=mini_batch_size,\n shuffle=shuffle if epoch > 1 else False, # never shuffle the first epoch\n num_workers=0 if num_workers is None else num_workers,\n sampler=sampler,\n )\n\n self.model.train()\n\n train_loss: float = 0\n\n seen_batches = 0\n total_number_of_batches = len(batch_loader)\n\n modulo = max(1, int(total_number_of_batches / 10))\n\n # process mini-batches\n batch_time = 0.0\n average_over = 0\n for batch_no, batch in enumerate(batch_loader):\n\n start_time = time.time()\n\n # zero the gradients on the model and optimizer\n self.model.zero_grad()\n optimizer.zero_grad()\n\n # if necessary, make batch_steps\n batch_steps = [batch]\n if len(batch) > micro_batch_size:\n batch_steps = [batch[x : x + micro_batch_size] for x in range(0, len(batch), micro_batch_size)]\n\n # forward and backward for batch\n for batch_step in batch_steps:\n\n # forward pass\n loss = self.model.forward_loss(batch_step)\n\n if isinstance(loss, tuple):\n average_over += loss[1]\n loss = loss[0]\n\n # Backward\n if use_amp:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n train_loss += loss.item()\n\n # identify dynamic embeddings (always deleted) on first sentence\n if not dynamic_embeddings:\n dynamic_embeddings = identify_dynamic_embeddings(batch[0])\n\n # depending on memory mode, embeddings are moved to CPU, GPU or deleted\n store_embeddings(batch, embeddings_storage_mode, dynamic_embeddings)\n\n # do the optimizer step\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), 5.0)\n optimizer.step()\n\n # do the scheduler step if one-cycle or linear decay\n if isinstance(scheduler, (OneCycleLR, LinearSchedulerWithWarmup)):\n scheduler.step()\n # get new learning rate\n for group in optimizer.param_groups:\n learning_rate = group[\"lr\"]\n if \"momentum\" in group:\n momentum = group[\"momentum\"]\n if \"betas\" in group:\n momentum, _ = group[\"betas\"]\n\n seen_batches += 1\n\n batch_time += time.time() - start_time\n if seen_batches % modulo == 0:\n momentum_info = f\" - momentum: {momentum:.4f}\" if cycle_momentum else \"\"\n intermittent_loss = train_loss / average_over if average_over > 0 else train_loss / seen_batches\n log.info(\n f\"epoch {epoch} - iter {seen_batches}/\"\n f\"{total_number_of_batches} - loss \"\n f\"{intermittent_loss:.8f} - samples/sec:\"\n f\" {mini_batch_size * modulo / batch_time:.2f}\"\n f\" - lr: {learning_rate:.6f}{momentum_info}\"\n )\n batch_time = 0.0\n iteration = epoch * total_number_of_batches + batch_no\n if not param_selection_mode and write_weights:\n weight_extractor.extract_weights(self.model.state_dict(), iteration)\n\n if average_over != 0:\n train_loss /= average_over\n\n self.model.eval()\n\n if save_model_each_k_epochs > 0 and epoch % save_model_each_k_epochs == 0:\n print(\"saving model of current epoch\")\n model_name = \"model_epoch_\" + str(epoch) + \".pt\"\n self.model.save(base_path / model_name, checkpoint=save_optimizer_state)\n\n log_line(log)\n log.info(f\"EPOCH {epoch} done: loss {train_loss:.4f}\" f\" - lr {learning_rate:.7f}\")\n\n if use_tensorboard:\n writer.add_scalar(\"train_loss\", train_loss, epoch)\n\n # evaluate on train / dev / test split depending on training settings\n result_line: str = \"\"\n\n if log_train:\n train_eval_result = self.model.evaluate(\n self.corpus.train,\n gold_label_type=self.model.label_type,\n mini_batch_size=eval_batch_size,\n num_workers=num_workers,\n embedding_storage_mode=embeddings_storage_mode,\n main_evaluation_metric=main_evaluation_metric,\n gold_label_dictionary=gold_label_dictionary_for_eval,\n )\n result_line += f\"\\t{train_eval_result.log_line}\"\n\n # depending on memory mode, embeddings are moved to CPU, GPU or deleted\n store_embeddings(self.corpus.train, embeddings_storage_mode, dynamic_embeddings)\n\n if log_train_part:\n train_part_eval_result = self.model.evaluate(\n train_part,\n gold_label_type=self.model.label_type,\n mini_batch_size=eval_batch_size,\n num_workers=num_workers,\n embedding_storage_mode=embeddings_storage_mode,\n main_evaluation_metric=main_evaluation_metric,\n gold_label_dictionary=gold_label_dictionary_for_eval,\n )\n result_line += f\"\\t{train_part_eval_result.loss}\" f\"\\t{train_part_eval_result.log_line}\"\n\n log.info(\n f\"TRAIN_SPLIT : loss {train_part_eval_result.loss}\"\n f\" - {main_evaluation_metric[1]}\"\n f\" ({main_evaluation_metric[0]})\"\n f\" {round(train_part_eval_result.main_score, 4)}\"\n )\n if use_tensorboard:\n for (metric_class_avg_type, metric_type) in metrics_for_tensorboard:\n writer.add_scalar(\n f\"train_{metric_class_avg_type}_{metric_type}\",\n train_part_eval_result.classification_report[metric_class_avg_type][metric_type],\n epoch,\n )\n\n if log_dev:\n assert self.corpus.dev\n dev_eval_result = self.model.evaluate(\n self.corpus.dev,\n gold_label_type=self.model.label_type,\n mini_batch_size=eval_batch_size,\n num_workers=num_workers,\n out_path=base_path / \"dev.tsv\",\n embedding_storage_mode=embeddings_storage_mode,\n main_evaluation_metric=main_evaluation_metric,\n gold_label_dictionary=gold_label_dictionary_for_eval,\n )\n result_line += f\"\\t{dev_eval_result.loss}\\t{dev_eval_result.log_line}\"\n log.info(\n f\"DEV : loss {dev_eval_result.loss}\"\n f\" - {main_evaluation_metric[1]}\"\n f\" ({main_evaluation_metric[0]})\"\n f\" {round(dev_eval_result.main_score, 4)}\"\n )\n # calculate scores using dev data if available\n # append dev score to score history\n dev_score_history.append(dev_eval_result.main_score)\n dev_loss_history.append(dev_eval_result.loss)\n\n dev_score = dev_eval_result.main_score\n\n # depending on memory mode, embeddings are moved to CPU, GPU or deleted\n store_embeddings(self.corpus.dev, embeddings_storage_mode, dynamic_embeddings)\n\n if use_tensorboard:\n writer.add_scalar(\"dev_loss\", dev_eval_result.loss, epoch)\n writer.add_scalar(\"dev_score\", dev_eval_result.main_score, epoch)\n for (\n metric_class_avg_type,\n metric_type,\n ) in metrics_for_tensorboard:\n writer.add_scalar(\n f\"dev_{metric_class_avg_type}_{metric_type}\",\n dev_eval_result.classification_report[metric_class_avg_type][metric_type],\n epoch,\n )\n\n if log_test:\n assert self.corpus.test\n test_eval_result = self.model.evaluate(\n self.corpus.test,\n gold_label_type=self.model.label_type,\n mini_batch_size=eval_batch_size,\n num_workers=num_workers,\n out_path=base_path / \"test.tsv\",\n embedding_storage_mode=embeddings_storage_mode,\n main_evaluation_metric=main_evaluation_metric,\n gold_label_dictionary=gold_label_dictionary_for_eval,\n )\n result_line += f\"\\t{test_eval_result.loss}\\t{test_eval_result.log_line}\"\n log.info(\n f\"TEST : loss {test_eval_result.loss} -\"\n f\" {main_evaluation_metric[1]}\"\n f\" ({main_evaluation_metric[0]}) \"\n f\" {round(test_eval_result.main_score, 4)}\"\n )\n\n # depending on memory mode, embeddings are moved to CPU, GPU or deleted\n store_embeddings(self.corpus.test, embeddings_storage_mode, dynamic_embeddings)\n\n if use_tensorboard:\n writer.add_scalar(\"test_loss\", test_eval_result.loss, epoch)\n writer.add_scalar(\"test_score\", test_eval_result.main_score, epoch)\n for (\n metric_class_avg_type,\n metric_type,\n ) in metrics_for_tensorboard:\n writer.add_scalar(\n f\"test_{metric_class_avg_type}_{metric_type}\",\n test_eval_result.classification_report[metric_class_avg_type][metric_type],\n epoch,\n )\n\n # determine if this is the best model or if we need to anneal\n current_epoch_has_best_model_so_far = False\n # default mode: anneal against dev score\n if not train_with_dev and not anneal_against_dev_loss:\n if dev_score > best_validation_score:\n current_epoch_has_best_model_so_far = True\n best_validation_score = dev_score\n\n if isinstance(scheduler, AnnealOnPlateau):\n scheduler.step(dev_score, dev_eval_result.loss)\n\n # alternative: anneal against dev loss\n if not train_with_dev and anneal_against_dev_loss:\n if dev_eval_result.loss < best_validation_score:\n current_epoch_has_best_model_so_far = True\n best_validation_score = dev_eval_result.loss\n\n if isinstance(scheduler, AnnealOnPlateau):\n scheduler.step(dev_eval_result.loss)\n\n # alternative: anneal against train loss\n if train_with_dev:\n if train_loss < best_validation_score:\n current_epoch_has_best_model_so_far = True\n best_validation_score = train_loss\n\n if isinstance(scheduler, AnnealOnPlateau):\n scheduler.step(train_loss)\n\n train_loss_history.append(train_loss)\n\n # determine bad epoch number\n try:\n bad_epochs = scheduler.num_bad_epochs\n except AttributeError:\n bad_epochs = 0\n for group in optimizer.param_groups:\n new_learning_rate = group[\"lr\"]\n if new_learning_rate != previous_learning_rate:\n bad_epochs = patience + 1\n if previous_learning_rate == initial_learning_rate:\n bad_epochs += initial_extra_patience\n\n # log bad epochs\n log.info(f\"BAD EPOCHS (no improvement): {bad_epochs}\")\n\n if loss_txt is not None:\n # output log file\n with open(loss_txt, \"a\") as f:\n\n # make headers on first epoch\n if epoch == 1:\n f.write(\"EPOCH\\tTIMESTAMP\\tBAD_EPOCHS\" \"\\tLEARNING_RATE\\tTRAIN_LOSS\")\n\n if log_train:\n f.write(\"\\tTRAIN_\" + \"\\tTRAIN_\".join(train_eval_result.log_header.split(\"\\t\")))\n\n if log_train_part:\n f.write(\n \"\\tTRAIN_PART_LOSS\\tTRAIN_PART_\"\n + \"\\tTRAIN_PART_\".join(train_part_eval_result.log_header.split(\"\\t\"))\n )\n\n if log_dev:\n f.write(\"\\tDEV_LOSS\\tDEV_\" + \"\\tDEV_\".join(dev_eval_result.log_header.split(\"\\t\")))\n\n if log_test:\n f.write(\"\\tTEST_LOSS\\tTEST_\" + \"\\tTEST_\".join(test_eval_result.log_header.split(\"\\t\")))\n\n f.write(\n f\"\\n{epoch}\\t{datetime.datetime.now():%H:%M:%S}\"\n f\"\\t{bad_epochs}\"\n f\"\\t{learning_rate:.4f}\\t{train_loss}\"\n )\n f.write(result_line)\n\n # if checkpoint is enabled, save model at each epoch\n if checkpoint and not param_selection_mode:\n self.model.save(base_path / \"checkpoint.pt\", checkpoint=True)\n\n # Check whether to save best model\n if (\n (not train_with_dev or anneal_with_restarts or anneal_with_prestarts)\n and not param_selection_mode\n and current_epoch_has_best_model_so_far\n and not use_final_model_for_eval\n ):\n log.info(\"saving best model\")\n self.model.save(base_path / \"best-model.pt\", checkpoint=save_optimizer_state)\n\n if anneal_with_prestarts:\n current_state_dict = self.model.state_dict()\n self.model.load_state_dict(last_epoch_model_state_dict)\n self.model.save(base_path / \"pre-best-model.pt\")\n self.model.load_state_dict(current_state_dict)\n\n if use_swa:\n import torchcontrib\n\n cast(torchcontrib.optim.SWA, optimizer).swap_swa_sgd()\n\n # if we do not use dev data for model selection, save final model\n if save_final_model and not param_selection_mode:\n self.model.save(base_path / \"final-model.pt\", checkpoint=save_optimizer_state)\n\n except KeyboardInterrupt:\n log_line(log)\n log.info(\"Exiting from training early.\")\n\n if not param_selection_mode:\n log.info(\"Saving model ...\")\n self.model.save(base_path / \"final-model.pt\", checkpoint=save_optimizer_state)\n log.info(\"Done.\")\n except Exception:\n if create_file_logs:\n log_handler.close()\n log.removeHandler(log_handler)\n raise\n finally:\n if use_tensorboard:\n writer.close()\n\n # test best model if test data is present\n if self.corpus.test and not train_with_test:\n final_score = self.final_test(\n base_path=base_path,\n eval_mini_batch_size=eval_batch_size,\n num_workers=num_workers,\n main_evaluation_metric=main_evaluation_metric,\n gold_label_dictionary_for_eval=gold_label_dictionary_for_eval,\n )\n else:\n final_score = 0\n log.info(\"Test data not provided setting final score to 0\")\n\n if create_file_logs:\n log_handler.close()\n log.removeHandler(log_handler)\n\n return {\n \"test_score\": final_score,\n \"dev_score_history\": dev_score_history,\n \"train_loss_history\": train_loss_history,\n \"dev_loss_history\": dev_loss_history,\n }\n\n def resume(\n self,\n model: Model,\n **trainer_args,\n ):\n\n assert model.model_card is not None\n self.model = model\n # recover all arguments that were used to train this model\n args_used_to_train_model = model.model_card[\"training_parameters\"]\n\n # you can overwrite params with your own\n for param in trainer_args:\n args_used_to_train_model[param] = trainer_args[param]\n if param == \"optimizer\" and \"optimizer_state_dict\" in args_used_to_train_model:\n del args_used_to_train_model[\"optimizer_state_dict\"]\n if param == \"scheduler\" and \"scheduler_state_dict\" in args_used_to_train_model:\n del args_used_to_train_model[\"scheduler_state_dict\"]\n\n # surface nested arguments\n kwargs = args_used_to_train_model[\"kwargs\"]\n del args_used_to_train_model[\"kwargs\"]\n\n # resume training with these parameters\n self.train(**args_used_to_train_model, **kwargs)\n\n def fine_tune(\n self,\n base_path: Union[Path, str],\n learning_rate: float = 5e-5,\n max_epochs: int = 10,\n optimizer=torch.optim.AdamW,\n scheduler=LinearSchedulerWithWarmup,\n warmup_fraction: float = 0.1,\n mini_batch_size: int = 4,\n embeddings_storage_mode: str = \"none\",\n use_final_model_for_eval: bool = True,\n **trainer_args,\n ):\n\n return self.train(\n base_path=base_path,\n learning_rate=learning_rate,\n max_epochs=max_epochs,\n optimizer=optimizer,\n scheduler=scheduler,\n warmup_fraction=warmup_fraction,\n mini_batch_size=mini_batch_size,\n embeddings_storage_mode=embeddings_storage_mode,\n use_final_model_for_eval=use_final_model_for_eval,\n **trainer_args,\n )\n\n def final_test(\n self,\n base_path: Union[Path, str],\n eval_mini_batch_size: int,\n main_evaluation_metric: Tuple[str, str],\n num_workers: Optional[int] = 8,\n gold_label_dictionary_for_eval: Optional[Dictionary] = None,\n ):\n base_path = Path(base_path)\n base_path.mkdir(exist_ok=True, parents=True)\n\n log_line(log)\n\n self.model.eval()\n\n if (base_path / \"best-model.pt\").exists():\n self.model.load_state_dict(self.model.load(base_path / \"best-model.pt\").state_dict())\n else:\n log.info(\"Testing using last state of model ...\")\n\n assert self.corpus.test\n test_results = self.model.evaluate(\n self.corpus.test,\n gold_label_type=self.model.label_type,\n mini_batch_size=eval_mini_batch_size,\n num_workers=num_workers,\n out_path=base_path / \"test.tsv\",\n embedding_storage_mode=\"none\",\n main_evaluation_metric=main_evaluation_metric,\n gold_label_dictionary=gold_label_dictionary_for_eval,\n )\n\n log.info(test_results.log_line)\n log.info(test_results.detailed_results)\n log_line(log)\n\n # if we are training over multiple datasets, do evaluation for each\n if isinstance(self.corpus, MultiCorpus):\n for subcorpus in self.corpus.corpora:\n log_line(log)\n if subcorpus.test:\n subcorpus_results = self.model.evaluate(\n subcorpus.test,\n gold_label_type=self.model.label_type,\n mini_batch_size=eval_mini_batch_size,\n num_workers=num_workers,\n out_path=base_path / f\"{subcorpus.name}-test.tsv\",\n embedding_storage_mode=\"none\",\n main_evaluation_metric=main_evaluation_metric,\n )\n log.info(subcorpus.name)\n log.info(subcorpus_results.log_line)\n\n # get and return the final test score of best model\n final_score = test_results.main_score\n\n return final_score\n\n def find_learning_rate(\n self,\n base_path: Union[Path, str],\n optimizer,\n mini_batch_size: int = 32,\n start_learning_rate: float = 1e-7,\n end_learning_rate: float = 10,\n iterations: int = 1000,\n stop_early: bool = True,\n file_name: str = \"learning_rate.tsv\",\n **kwargs,\n ) -> Path:\n best_loss = None\n\n # cast string to Path\n base_path = Path(base_path)\n base_path.mkdir(exist_ok=True, parents=True)\n learning_rate_tsv = init_output_file(base_path, file_name)\n\n with open(learning_rate_tsv, \"a\") as f:\n f.write(\"ITERATION\\tTIMESTAMP\\tLEARNING_RATE\\tTRAIN_LOSS\\n\")\n\n optimizer = optimizer(self.model.parameters(), lr=start_learning_rate, **kwargs)\n\n train_data = self.corpus.train\n\n scheduler = ExpAnnealLR(optimizer, end_learning_rate, iterations)\n\n model_state = self.model.state_dict()\n self.model.train()\n\n step = 0\n\n loss_list = []\n average_loss_list = []\n\n while step < iterations:\n\n batch_loader = DataLoader(train_data, batch_size=mini_batch_size, shuffle=True)\n\n for batch in batch_loader:\n step += 1\n\n # forward pass\n loss = self.model.forward_loss(batch)\n if isinstance(loss, tuple):\n loss = loss[0]\n\n # update optimizer and scheduler\n optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), 5.0)\n optimizer.step()\n scheduler.step()\n\n learning_rate = scheduler.get_lr()[0]\n\n # append current loss to list of losses for all iterations\n loss_list.append(loss.item())\n\n # compute averaged loss\n import statistics\n\n moving_avg_loss = statistics.mean(loss_list)\n average_loss_list.append(moving_avg_loss)\n\n if len(average_loss_list) > 10:\n drop = average_loss_list[-10] - moving_avg_loss\n else:\n drop = 0.0\n\n if not best_loss or moving_avg_loss < best_loss:\n best_loss = moving_avg_loss\n\n if step > iterations:\n break\n\n if stop_early and (moving_avg_loss > 4 * best_loss or torch.isnan(loss)):\n log_line(log)\n log.info(\"loss diverged - stopping early!\")\n step = iterations\n break\n\n with open(str(learning_rate_tsv), \"a\") as f:\n f.write(f\"{step}\\t{learning_rate}\\t{loss.item()}\" f\"\\t{moving_avg_loss}\\t{drop}\\n\")\n\n self.model.load_state_dict(model_state)\n self.model.to(flair.device)\n\n log_line(log)\n log.info(f\"learning rate finder finished - plot {learning_rate_tsv}\")\n log_line(log)\n\n return Path(learning_rate_tsv)\n", "from typing import Tuple\n\nimport numpy as np\nimport torch\nimport torch.nn\nfrom torch.nn.functional import softmax\nfrom torch.nn.utils.rnn import pack_padded_sequence\n\nimport flair\nfrom flair.data import Dictionary, Label, List\n\nSTART_TAG: str = \"<START>\"\nSTOP_TAG: str = \"<STOP>\"\n\n\nclass ViterbiLoss(torch.nn.Module):\n \"\"\"\n Calculates the loss for each sequence up to its length t.\n \"\"\"\n\n def __init__(self, tag_dictionary: Dictionary):\n \"\"\"\n :param tag_dictionary: tag_dictionary of task\n \"\"\"\n super(ViterbiLoss, self).__init__()\n self.tag_dictionary = tag_dictionary\n self.tagset_size = len(tag_dictionary)\n self.start_tag = tag_dictionary.get_idx_for_item(START_TAG)\n self.stop_tag = tag_dictionary.get_idx_for_item(STOP_TAG)\n\n def forward(self, features_tuple: tuple, targets: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Forward propagation of Viterbi Loss\n\n :param features_tuple: CRF scores from forward method in shape (batch size, seq len, tagset size, tagset size),\n lengths of sentences in batch, transitions from CRF\n :param targets: true tags for sentences which will be converted to matrix indices.\n :return: average Viterbi Loss over batch size\n \"\"\"\n features, lengths, transitions = features_tuple\n\n batch_size = features.size(0)\n seq_len = features.size(1)\n\n targets, targets_matrix_indices = self._format_targets(targets, lengths)\n targets_matrix_indices = torch.tensor(targets_matrix_indices, dtype=torch.long).unsqueeze(2).to(flair.device)\n\n # scores_at_targets[range(features.shape[0]), lengths.values -1]\n # Squeeze crf scores matrices in 1-dim shape and gather scores at targets by matrix indices\n scores_at_targets = torch.gather(features.view(batch_size, seq_len, -1), 2, targets_matrix_indices)\n scores_at_targets = pack_padded_sequence(scores_at_targets, lengths, batch_first=True)[0]\n transitions_to_stop = transitions[\n np.repeat(self.stop_tag, features.shape[0]),\n [target[length - 1] for target, length in zip(targets, lengths)],\n ]\n gold_score = scores_at_targets.sum() + transitions_to_stop.sum()\n\n scores_upto_t = torch.zeros(batch_size, self.tagset_size, device=flair.device)\n\n for t in range(max(lengths)):\n batch_size_t = sum(\n [length > t for length in lengths]\n ) # since batch is ordered, we can save computation time by reducing our effective batch_size\n\n if t == 0:\n # Initially, get scores from <start> tag to all other tags\n scores_upto_t[:batch_size_t] = (\n scores_upto_t[:batch_size_t] + features[:batch_size_t, t, :, self.start_tag]\n )\n else:\n # We add scores at current timestep to scores accumulated up to previous timestep, and log-sum-exp\n # Remember, the cur_tag of the previous timestep is the prev_tag of this timestep\n scores_upto_t[:batch_size_t] = self._log_sum_exp(\n features[:batch_size_t, t, :, :] + scores_upto_t[:batch_size_t].unsqueeze(1), dim=2\n )\n\n all_paths_scores = self._log_sum_exp(scores_upto_t + transitions[self.stop_tag].unsqueeze(0), dim=1).sum()\n\n viterbi_loss = all_paths_scores - gold_score\n\n return viterbi_loss\n\n @staticmethod\n def _log_sum_exp(tensor, dim):\n \"\"\"\n Calculates the log-sum-exponent of a tensor's dimension in a numerically stable way.\n\n :param tensor: tensor\n :param dim: dimension to calculate log-sum-exp of\n :return: log-sum-exp\n \"\"\"\n m, _ = torch.max(tensor, dim)\n m_expanded = m.unsqueeze(dim).expand_as(tensor)\n return m + torch.log(torch.sum(torch.exp(tensor - m_expanded), dim))\n\n def _format_targets(self, targets: torch.Tensor, lengths: torch.IntTensor):\n \"\"\"\n Formats targets into matrix indices.\n CRF scores contain per sentence, per token a (tagset_size x tagset_size) matrix, containing emission score for\n token j + transition prob from previous token i. Means, if we think of our rows as \"to tag\" and our columns\n as \"from tag\", the matrix in cell [10,5] would contain the emission score for tag 10 + transition score\n from previous tag 5 and could directly be addressed through the 1-dim indices (10 + tagset_size * 5) = 70,\n if our tagset consists of 12 tags.\n\n :param targets: targets as in tag dictionary\n :param lengths: lengths of sentences in batch\n \"\"\"\n targets_per_sentence = []\n\n targets_list = targets.tolist()\n for cut in lengths:\n targets_per_sentence.append(targets_list[:cut])\n targets_list = targets_list[cut:]\n\n for t in targets_per_sentence:\n t += [self.tag_dictionary.get_idx_for_item(STOP_TAG)] * (int(lengths.max().item()) - len(t))\n\n matrix_indices = list(\n map(\n lambda s: [self.tag_dictionary.get_idx_for_item(START_TAG) + (s[0] * self.tagset_size)]\n + [s[i] + (s[i + 1] * self.tagset_size) for i in range(0, len(s) - 1)],\n targets_per_sentence,\n )\n )\n\n return targets_per_sentence, matrix_indices\n\n\nclass ViterbiDecoder:\n \"\"\"\n Decodes a given sequence using the Viterbi algorithm.\n \"\"\"\n\n def __init__(self, tag_dictionary: Dictionary):\n \"\"\"\n :param tag_dictionary: Dictionary of tags for sequence labeling task\n \"\"\"\n self.tag_dictionary = tag_dictionary\n self.tagset_size = len(tag_dictionary)\n self.start_tag = tag_dictionary.get_idx_for_item(START_TAG)\n self.stop_tag = tag_dictionary.get_idx_for_item(STOP_TAG)\n\n def decode(self, features_tuple: tuple, probabilities_for_all_classes: bool) -> Tuple[List, List]:\n \"\"\"\n Decoding function returning the most likely sequence of tags.\n :param features_tuple: CRF scores from forward method in shape (batch size, seq len, tagset size, tagset size),\n lengths of sentence in batch, transitions of CRF\n :param probabilities_for_all_classes: whether to return probabilities for all tags\n :return: decoded sequences\n \"\"\"\n features, lengths, transitions = features_tuple\n all_tags = []\n\n batch_size = features.size(0)\n seq_len = features.size(1)\n\n # Create a tensor to hold accumulated sequence scores at each current tag\n scores_upto_t = torch.zeros(batch_size, seq_len + 1, self.tagset_size).to(flair.device)\n # Create a tensor to hold back-pointers\n # i.e., indices of the previous_tag that corresponds to maximum accumulated score at current tag\n # Let pads be the <end> tag index, since that was the last tag in the decoded sequence\n backpointers = (\n torch.ones((batch_size, seq_len + 1, self.tagset_size), dtype=torch.long, device=flair.device)\n * self.stop_tag\n )\n\n for t in range(seq_len):\n batch_size_t = sum([length > t for length in lengths]) # effective batch size (sans pads) at this timestep\n terminates = [i for i, length in enumerate(lengths) if length == t + 1]\n\n if t == 0:\n scores_upto_t[:batch_size_t, t] = features[:batch_size_t, t, :, self.start_tag]\n backpointers[:batch_size_t, t, :] = (\n torch.ones((batch_size_t, self.tagset_size), dtype=torch.long) * self.start_tag\n )\n else:\n # We add scores at current timestep to scores accumulated up to previous timestep, and\n # choose the previous timestep that corresponds to the max. accumulated score for each current timestep\n scores_upto_t[:batch_size_t, t], backpointers[:batch_size_t, t, :] = torch.max(\n features[:batch_size_t, t, :, :] + scores_upto_t[:batch_size_t, t - 1].unsqueeze(1), dim=2\n )\n\n # If sentence is over, add transition to STOP-tag\n if terminates:\n scores_upto_t[terminates, t + 1], backpointers[terminates, t + 1, :] = torch.max(\n scores_upto_t[terminates, t].unsqueeze(1) + transitions[self.stop_tag].unsqueeze(0), dim=2\n )\n\n # Decode/trace best path backwards\n decoded = torch.zeros((batch_size, backpointers.size(1)), dtype=torch.long, device=flair.device)\n pointer = torch.ones((batch_size, 1), dtype=torch.long, device=flair.device) * self.stop_tag\n\n for t in list(reversed(range(backpointers.size(1)))):\n decoded[:, t] = torch.gather(backpointers[:, t, :], 1, pointer).squeeze(1)\n pointer = decoded[:, t].unsqueeze(1)\n\n # Sanity check\n assert torch.equal(\n decoded[:, 0], torch.ones((batch_size), dtype=torch.long, device=flair.device) * self.start_tag\n )\n\n # remove start-tag and backscore to stop-tag\n scores_upto_t = scores_upto_t[:, :-1, :]\n decoded = decoded[:, 1:]\n\n # Max + Softmax to get confidence score for predicted label and append label to each token\n scores = softmax(scores_upto_t, dim=2)\n confidences = torch.max(scores, dim=2)\n\n tags = []\n for tag_seq, tag_seq_conf, length_seq in zip(decoded, confidences.values, lengths):\n tags.append(\n [\n Label(self.tag_dictionary.get_item_for_index(tag), conf.item())\n for tag, conf in list(zip(tag_seq, tag_seq_conf))[:length_seq]\n ]\n )\n\n if probabilities_for_all_classes:\n all_tags = self._all_scores_for_token(scores, lengths)\n\n return tags, all_tags\n\n def _all_scores_for_token(self, scores: torch.Tensor, lengths: torch.IntTensor):\n \"\"\"\n Returns all scores for each tag in tag dictionary.\n :param scores: Scores for current sentence.\n \"\"\"\n scores = scores.numpy()\n prob_tags_per_sentence = []\n for scores_sentence, length in zip(scores, lengths):\n scores_sentence = scores_sentence[:length]\n prob_tags_per_sentence.append(\n [\n [\n Label(self.tag_dictionary.get_item_for_index(score_id), score)\n for score_id, score in enumerate(score_dist)\n ]\n for score_dist in scores_sentence\n ]\n )\n return prob_tags_per_sentence\n" ]
[ [ "torch.optim.lr_scheduler.OneCycleLR", "torch.isnan", "torch.utils.data.dataset.ConcatDataset", "torch.utils.data.dataset.Subset", "torch.utils.tensorboard.SummaryWriter" ], [ "torch.zeros", "torch.max", "torch.gather", "torch.ones", "torch.tensor", "torch.nn.utils.rnn.pack_padded_sequence", "torch.nn.functional.softmax", "numpy.repeat", "torch.exp" ] ]
tingidev/PyDev
[ "afd55187a4666b611bff49dbe63710fdc876ca7a" ]
[ "app.py" ]
[ "# Based on: https://www.statworx.com/at/blog/how-to-build-a-dashboard-in-python-plotly-dash-step-by-step-tutorial/\n# Padding: https://www.htmldog.com/guides/css/beginner/margins/\n\n# System\nimport base64\nimport datetime\nimport io\n\n# Dash\nimport dash\nfrom dash.dependencies import Input, Output, State\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table\n\n# Packages\nimport pandas as pd\nimport numpy as np\nimport plotly.express as px\n\n# Initialise app\napp = dash.Dash(__name__)\n\n# Define app\napp.layout = html.Div(children=[\n html.Div(\n id='main-div',\n className='row',\n children=[\n html.Div(\n id='left-col-div',\n className='four columns div-user-controls',\n style={'borderStyle': 'dashed'},\n children=[\n html.Div(\n id='upload-div',\n style={'margin-right': '55px'},\n children=[\n html.H2('PyDev - Data Explorer and Visualization'),\n html.P(\n 'Perform a first-pass exploration of your machine-learning data',\n style={'margin-bottom': '60px'}\n ),\n dcc.Upload(\n id='upload-data',\n children=html.Div([\n 'Drag and Drop or ',\n html.A('Select Files')\n ]),\n style={\n 'borderWidth': '1px',\n 'borderStyle': 'dashed',\n 'borderRadius': '5px',\n 'textAlign': 'center',\n 'margin-bottom': '60px'},\n multiple=True\n ),\n ]),\n html.Div(\n id='meta-table-div',\n children=[],\n style={\n 'borderStyle': 'dashed',\n 'margin-right': '55px'}\n )\n ]),\n dcc.Store(id='memory'),\n html.Div(\n id='right-col-div',\n className='eight columns div-for-charts bg-grey',\n style={\n 'margin-left': '0%',\n 'borderStyle': 'dashed',\n 'overflow-y': 'auto'},\n children=[\n html.Div(\n id='table-data-upload',\n style={\n 'margin': '55px',\n 'margin-top': '64px',\n 'margin-bottom': '32px',\n 'borderStyle': 'dashed'}\n ),\n html.Div(\n id='graph-data-upload',\n style={\n 'margin': '55px',\n 'margin-top': '32px',\n 'borderStyle': 'dashed'}\n )\n ])\n ])\n])\n\n# Parse data file\ndef parse_contents(contents, filename, date):\n content_type, content_string = contents.split(',')\n decoded = base64.b64decode(content_string)\n try:\n if 'csv' in filename:\n # Assume that the user uploaded a CSV file\n df = pd.read_csv(\n io.StringIO(decoded.decode('utf-8')))\n elif 'xls' in filename:\n # Assume that the user uploaded an excel file\n df = pd.read_excel(io.BytesIO(decoded))\n elif 'json' in filename:\n # Assume that the user uploaded a JSON file\n df = pd.read_json(decoded.decode('utf-8'))\n except Exception as e:\n print(e)\n return html.Div([\n 'There was an error processing this file.'\n ])\n\n return df.to_json(orient='split')\n\n# Callback to store file in Store element after upload\n@app.callback(Output('memory', 'data'),\n Input('upload-data', 'contents'),\n State('upload-data', 'filename'),\n State('upload-data', 'last_modified'))\ndef update_output(list_of_contents, list_of_names, list_of_dates):\n if list_of_contents is not None:\n children = [\n parse_contents(c, n, d) for c, n, d in\n zip(list_of_contents, list_of_names, list_of_dates)]\n return children\n\n# Callback to fill meta data table\n@app.callback(Output('meta-table-div', 'children'),\n Input('memory', 'data'),\n State('upload-data', 'filename'),\n State('upload-data', 'last_modified'))\ndef update_meta_table(data, filename, date):\n table = html.Div()\n if data:\n data = data[0]\n df = pd.read_json(data, orient='split')\n table = html.Div(\n # className='row',\n children=[\n html.H2('File name: '+filename[0]),\n html.H2('Last modified: '+str(datetime.datetime.fromtimestamp(date[0]))[:19]),\n ])\n return table\n\n# Callback to fill main data table\n@app.callback(Output('table-data-upload', 'children'),\n Input('memory', 'data'))\ndef update_table(data):\n table = html.Div()\n if data:\n data = data[0]\n df = pd.read_json(data, orient='split')\n table = html.Div(\n children=[\n dash_table.DataTable(\n data=df.to_dict('records'),\n columns=[{'name': i, 'id': i} for i in df.columns],\n style_as_list_view=True,\n page_size=10,\n # style_table={\n # 'overflowX': 'auto'},\n style_header={\n 'backgroundColor': '#1E1E1E',\n 'fontWeight': 'bold'},\n style_cell={\n 'backgroundColor': '#31302F',\n 'color': 'white',\n 'overflow': 'hidden',\n 'textOverflow': 'ellipsis',\n 'maxWidth': 50},\n style_data_conditional=[\n # {'if': {'column_type': 'numeric'},\n # 'textAlign': 'center'},\n {'if': {'state': 'selected'},\n 'backgroundColor': '#1E1E1E'}\n ]\n ),\n html.Hr() # horizontal line\n ],\n className='bg-grey',\n )\n return table\n\n# Callback to fill main data graph\n@app.callback(Output('graph-data-upload', 'children'),\n Input('memory', 'data'))\ndef update_table(data):\n graph = html.Div()\n if data:\n data = data[0]\n df = pd.read_json(data, orient='split')\n graph = html.Div(children=[\n dcc.Graph(\n config={'displayModeBar': False},\n animate=True,\n figure=px.line(df,\n x='Age',\n y='Survived',\n # color='stock',\n template='plotly_dark').update_layout({\n 'plot_bgcolor': 'rgba(0, 0, 0, 0)',\n 'paper_bgcolor': 'rgba(0, 0, 0, 0)'})\n ),\n ])\n return graph\n\n# Run app\nif __name__ == '__main__':\n app.run_server(debug=True)\n" ]
[ [ "pandas.read_json" ] ]
IIGROUP/PUM
[ "f66f92bc92f95baf015f1e003c661c8a8b3bcb66" ]
[ "visualization/visualize_recall_diff.py" ]
[ "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# visualization code for categorical recall difference between two models\n\"\"\"\nExample:\n python visualization/visualize_recall_diff.py \\\n --baseline_result ../VCTree-Scene-Graph-Generation/caches/prd_recall.pkl \\\n --new_model_result checkpoints/predcls-cross_att-vis_gaussian/caches/test_prediction-0.pkl\n\"\"\"\nimport argparse\nimport os\nimport pickle\nimport json\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport numpy as np\nfrom collections import defaultdict\nimport csv\n\n\nmatplotlib.use('Agg')\nOLD_DATA_PATH = '../Large-Scale-VRD.pytorch/data/'\n\ndef parse_args():\n \"\"\"Parse in command line arguments\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--baseline_result', type=str,\n default='../VCTree-Scene-Graph-Generation/caches/prd_recall.pkl',\n help='path of baseline result')\n parser.add_argument(\n '--new_model_result', type=str,\n default='checkpoints/predcls-cross_att-vis_gaussian/caches/test_prediction-0.pkl',\n help='path of new model result')\n parser.add_argument(\n '--k', type=int,\n default=100,\n help='recall@k to visualize')\n\n return parser.parse_args()\n\n\ndef parse_category_test_results(file_path):\n data = pickle.load(open(file_path, 'rb'))\n if 'eval_results' in data:\n eval_res = data['eval_results']['prd_recall']\n else:\n eval_res = data\n prd2recall = eval_res[args.k]\n return prd2recall\n\n\ndef sort_dict(dict_in, key=lambda item: item[1]):\n return {k: v for k, v in sorted(dict_in.items(), key=key, reverse=True)}\n\n\ndef autolabel(ax, rects):\n \"\"\"\n Attach a text label above each bar in *rects*, displaying its height.\n refer to https://matplotlib.org/3.1.1/gallery/lines_bars_and_markers/barchart.html\n \"\"\"\n fixed_args = dict(\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom',\n rotation=90,\n fontsize=14\n )\n for rect in rects:\n height = rect.get_height()\n if height >= 0:\n text = '+%.2f' % height\n y = height\n else:\n text = '%.2f' % height\n y = 0\n ax.annotate(text,\n xy=(rect.get_x() + rect.get_width() / 2, y),\n **fixed_args)\n\n\ndef rgb_values_0_to_1(r, g, b):\n return r / 255, g / 255, b / 255\n\n\ndef draw_bar_figure(data, save_path):\n # TODO there may be something wrong with the predicate counts\n with open(os.path.join(OLD_DATA_PATH, 'cache/vg_prd_count.pkl'), 'rb') as f:\n prd_counts = pickle.load(f)\n prd_counts = sort_dict(prd_counts)\n with open(os.path.join(OLD_DATA_PATH, 'vg/predicates.json')) as f:\n prd_label_list = json.load(f) # a list of labels\n names = []\n nums = []\n for k in prd_counts.keys():\n name = prd_label_list[k]\n num = data[k+1] * 100\n # Filter out trivial values\n if abs(num) > 0.01:\n names.append(name)\n nums.append(num)\n plt.figure(figsize=(15, 8))\n ax = plt.gca()\n ax.yaxis.grid(zorder=0) # Set z-order to make sure the gridlines behind the bars\n rects = bar_list = ax.bar(names, nums, zorder=3)\n print('y data', nums)\n for i in range(len(bar_list)):\n if nums[i] < 0:\n bar_list[i].set_color(rgb_values_0_to_1(237, 125, 49))\n else:\n bar_list[i].set_color(rgb_values_0_to_1(68, 114, 196))\n plt.xticks(np.arange(len(names)), names, rotation=90)\n # Set parameters for tick labels\n plt.tick_params(axis='x', which='major', labelsize=21)\n plt.tick_params(axis='y', which='major', labelsize=14)\n plt.ylabel('R@%d Improvement (%%)' % args.k, fontsize=23)\n plt.ylim((None, 16)) # a trick to to make sure the texts are all inside the figure\n # Show number above the bars\n autolabel(ax, rects)\n plt.tight_layout()\n plt.savefig(save_path + '.pdf')\n print('figure saved at', save_path + '.pdf')\n plt.close()\n\n\ndef save_as_csv(baseline_data, new_data, save_path):\n with open(os.path.join(OLD_DATA_PATH, 'cache/vg_prd_count.pkl'), 'rb') as f:\n prd_counts = pickle.load(f)\n prd_counts = sort_dict(prd_counts)\n with open(os.path.join(OLD_DATA_PATH, 'vg/predicates.json')) as f:\n prd_label_list = json.load(f) # a list of labels\n writer = csv.writer(open(save_path + '.csv', 'w'))\n writer.writerow(['', 'baseline', 'new']) # writer headers\n for k in prd_counts.keys():\n name = prd_label_list[k]\n writer.writerow([name, baseline_data[k+1] * 100, new_data[k+1] * 100])\n print('csv saved at', save_path + '.csv')\n\n\nif __name__ == '__main__':\n args = parse_args()\n assert args.new_model_result != ''\n if args.baseline_result == '':\n # Use a default dict where every value is 0\n baseline_prd2recall = defaultdict(int)\n else:\n baseline_prd2recall = parse_category_test_results(args.baseline_result)\n new_model_prd2recall = parse_category_test_results(args.new_model_result)\n figure_name = '%s-vs-%s' % \\\n (args.new_model_result.split('/')[-3], args.baseline_result.split('/')[-3] if args.baseline_result else '')\n save_base_name = os.path.join(os.path.dirname(args.new_model_result), figure_name)\n save_as_csv(baseline_prd2recall, new_model_prd2recall, save_base_name)\n recall_diff = {k: v - baseline_prd2recall[k] for k, v in new_model_prd2recall.items()}\n draw_bar_figure(recall_diff, save_base_name)\n" ]
[ [ "matplotlib.use", "matplotlib.pyplot.savefig", "matplotlib.pyplot.ylim", "matplotlib.pyplot.close", "matplotlib.pyplot.figure", "matplotlib.pyplot.tick_params", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.gca" ] ]
szmmm/speechchain
[ "909724c6f305588a52958f64f584ad21696b5173" ]
[ "test/test_e2e_tts_fastspeech.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Copyright 2019 Tomoki Hayashi\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\nimport json\nimport os\nimport shutil\nimport tempfile\n\nfrom argparse import Namespace\n\nimport numpy as np\nimport pytest\nimport torch\n\nfrom espnet.nets.pytorch_backend.e2e_tts_fastspeech import FeedForwardTransformer\nfrom espnet.nets.pytorch_backend.e2e_tts_transformer import Transformer\nfrom espnet.nets.pytorch_backend.fastspeech.duration_calculator import DurationCalculator\nfrom espnet.nets.pytorch_backend.fastspeech.length_regulator import LengthRegulator\nfrom espnet.nets.pytorch_backend.nets_utils import pad_list\n\n\ndef prepare_inputs(idim, odim, ilens, olens,\n device=torch.device('cpu')):\n ilens = torch.LongTensor(ilens).to(device)\n olens = torch.LongTensor(olens).to(device)\n xs = [np.random.randint(0, idim, l) for l in ilens]\n ys = [np.random.randn(l, odim) for l in olens]\n xs = pad_list([torch.from_numpy(x).long() for x in xs], 0).to(device)\n ys = pad_list([torch.from_numpy(y).float() for y in ys], 0).to(device)\n labels = ys.new_zeros(ys.size(0), ys.size(1))\n for i, l in enumerate(olens):\n labels[i, l - 1:] = 1\n\n batch = {\n \"xs\": xs,\n \"ilens\": ilens,\n \"ys\": ys,\n \"labels\": labels,\n \"olens\": olens,\n }\n\n return batch\n\n\ndef make_transformer_args(**kwargs):\n defaults = dict(\n embed_dim=0,\n eprenet_conv_layers=0,\n eprenet_conv_filts=0,\n eprenet_conv_chans=0,\n dprenet_layers=2,\n dprenet_units=256,\n adim=32,\n aheads=4,\n elayers=2,\n eunits=128,\n dlayers=2,\n dunits=128,\n postnet_layers=5,\n postnet_filts=5,\n postnet_chans=512,\n eprenet_dropout_rate=0.1,\n dprenet_dropout_rate=0.5,\n postnet_dropout_rate=0.1,\n transformer_enc_dropout_rate=0.1,\n transformer_enc_positional_dropout_rate=0.1,\n transformer_enc_attn_dropout_rate=0.0,\n transformer_dec_dropout_rate=0.1,\n transformer_dec_positional_dropout_rate=0.1,\n transformer_dec_attn_dropout_rate=0.3,\n transformer_enc_dec_attn_dropout_rate=0.0,\n use_masking=True,\n bce_pos_weight=1.0,\n use_batch_norm=True,\n use_scaled_pos_enc=True,\n encoder_normalize_before=True,\n decoder_normalize_before=True,\n encoder_concat_after=False,\n decoder_concat_after=False,\n transformer_init=\"pytorch\",\n initial_encoder_alpha=1.0,\n initial_decoder_alpha=1.0,\n reduction_factor=1,\n loss_type=\"L1\",\n use_guided_attn_loss=False,\n num_heads_applied_guided_attn=2,\n num_layers_applied_guided_attn=2,\n guided_attn_loss_sigma=0.4,\n modules_applied_guided_attn=[\"encoder\", \"decoder\", \"encoder-decoder\"]\n )\n defaults.update(kwargs)\n return defaults\n\n\ndef make_feedforward_transformer_args(**kwargs):\n defaults = dict(\n adim=32,\n aheads=4,\n elayers=2,\n eunits=128,\n dlayers=2,\n dunits=128,\n duration_predictor_layers=2,\n duration_predictor_chans=128,\n duration_predictor_kernel_size=3,\n duration_predictor_dropout_rate=0.1,\n positionwise_layer_type=\"linear\",\n positionwise_conv_kernel_size=1,\n transformer_enc_dropout_rate=0.1,\n transformer_enc_positional_dropout_rate=0.1,\n transformer_enc_attn_dropout_rate=0.0,\n transformer_dec_dropout_rate=0.1,\n transformer_dec_positional_dropout_rate=0.1,\n transformer_dec_attn_dropout_rate=0.3,\n transformer_enc_dec_attn_dropout_rate=0.0,\n use_masking=True,\n use_scaled_pos_enc=True,\n encoder_normalize_before=True,\n decoder_normalize_before=True,\n encoder_concat_after=False,\n decoder_concat_after=False,\n transformer_init=\"pytorch\",\n initial_encoder_alpha=1.0,\n initial_decoder_alpha=1.0,\n transfer_encoder_from_teacher=False,\n transferred_encoder_module=\"all\",\n reduction_factor=1,\n teacher_model=None,\n )\n defaults.update(kwargs)\n return defaults\n\n\n@pytest.mark.parametrize(\n \"model_dict\", [\n ({}),\n ({\"use_masking\": False}),\n ({\"use_scaled_pos_enc\": False}),\n ({\"positionwise_layer_type\": \"conv1d\", \"positionwise_conv_kernel_size\": 3}),\n ({\"encoder_normalize_before\": False}),\n ({\"decoder_normalize_before\": False}),\n ({\"encoder_normalize_before\": False, \"decoder_normalize_before\": False}),\n ({\"encoder_concat_after\": True}),\n ({\"decoder_concat_after\": True}),\n ({\"encoder_concat_after\": True, \"decoder_concat_after\": True}),\n ])\ndef test_fastspeech_trainable_and_decodable(model_dict):\n # make args\n idim, odim = 10, 25\n teacher_model_args = make_transformer_args(**model_dict)\n model_args = make_feedforward_transformer_args(**model_dict)\n\n # setup batch\n ilens = [10, 5]\n olens = [20, 15]\n batch = prepare_inputs(idim, odim, ilens, olens)\n\n # define teacher model and save it\n teacher_model = Transformer(idim, odim, Namespace(**teacher_model_args))\n tmpdir = tempfile.mkdtemp(prefix=\"tmp_\", dir=\"/tmp\")\n torch.save(teacher_model.state_dict(), tmpdir + \"/model.dummy.best\")\n with open(tmpdir + \"/model.json\", 'wb') as f:\n f.write(json.dumps((idim, odim, teacher_model_args),\n indent=4, ensure_ascii=False, sort_keys=True).encode('utf_8'))\n\n # define model\n model_args[\"teacher_model\"] = tmpdir + \"/model.dummy.best\"\n model = FeedForwardTransformer(idim, odim, Namespace(**model_args))\n optimizer = torch.optim.Adam(model.parameters())\n\n # trainable\n loss = model(**batch).mean()\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # decodable\n model.eval()\n with torch.no_grad():\n model.inference(batch[\"xs\"][0][:batch[\"ilens\"][0]])\n model.calculate_all_attentions(**batch)\n\n # remove tmpdir\n if os.path.exists(tmpdir):\n shutil.rmtree(tmpdir)\n\n\n@pytest.mark.skipif(not torch.cuda.is_available(), reason=\"gpu required\")\n@pytest.mark.parametrize(\n \"model_dict\", [\n ({}),\n ({\"use_masking\": False}),\n ({\"use_scaled_pos_enc\": False}),\n ({\"encoder_normalize_before\": False}),\n ({\"decoder_normalize_before\": False}),\n ({\"encoder_normalize_before\": False, \"decoder_normalize_before\": False}),\n ({\"encoder_concat_after\": True}),\n ({\"decoder_concat_after\": True}),\n ({\"encoder_concat_after\": True, \"decoder_concat_after\": True}),\n ])\ndef test_fastspeech_gpu_trainable(model_dict):\n # make args\n idim, odim = 10, 25\n teacher_model_args = make_transformer_args(**model_dict)\n model_args = make_feedforward_transformer_args(**model_dict)\n\n # setup batch\n ilens = [10, 5]\n olens = [20, 15]\n device = torch.device('cuda')\n batch = prepare_inputs(idim, odim, ilens, olens, device=device)\n\n # define teacher model and save it\n teacher_model = Transformer(idim, odim, Namespace(**teacher_model_args))\n tmpdir = tempfile.mkdtemp(prefix=\"tmp_\", dir=\"/tmp\")\n torch.save(teacher_model.state_dict(), tmpdir + \"/model.dummy.best\")\n with open(tmpdir + \"/model.json\", 'wb') as f:\n f.write(json.dumps((idim, odim, teacher_model_args),\n indent=4, ensure_ascii=False, sort_keys=True).encode('utf_8'))\n\n # define model\n model_args[\"teacher_model\"] = tmpdir + \"/model.dummy.best\"\n model = FeedForwardTransformer(idim, odim, Namespace(**model_args))\n model.to(device)\n optimizer = torch.optim.Adam(model.parameters())\n\n # trainable\n loss = model(**batch).mean()\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # remove tmpdir\n if os.path.exists(tmpdir):\n shutil.rmtree(tmpdir)\n\n\n@pytest.mark.skipif(torch.cuda.device_count() < 2, reason=\"multi gpu required\")\n@pytest.mark.parametrize(\n \"model_dict\", [\n ({}),\n ({\"use_masking\": False}),\n ({\"use_scaled_pos_enc\": False}),\n ({\"encoder_normalize_before\": False}),\n ({\"decoder_normalize_before\": False}),\n ({\"encoder_normalize_before\": False, \"decoder_normalize_before\": False}),\n ({\"encoder_concat_after\": True}),\n ({\"decoder_concat_after\": True}),\n ({\"encoder_concat_after\": True, \"decoder_concat_after\": True}),\n ])\ndef test_fastspeech_multi_gpu_trainable(model_dict):\n # make args\n idim, odim = 10, 25\n teacher_model_args = make_transformer_args(**model_dict)\n model_args = make_feedforward_transformer_args(**model_dict)\n\n # setup batch\n ilens = [10, 5]\n olens = [20, 15]\n device = torch.device('cuda')\n batch = prepare_inputs(idim, odim, ilens, olens, device=device)\n\n # define teacher model and save it\n teacher_model = Transformer(idim, odim, Namespace(**teacher_model_args))\n tmpdir = tempfile.mkdtemp(prefix=\"tmp_\", dir=\"/tmp\")\n torch.save(teacher_model.state_dict(), tmpdir + \"/model.dummy.best\")\n with open(tmpdir + \"/model.json\", 'wb') as f:\n f.write(json.dumps((idim, odim, teacher_model_args),\n indent=4, ensure_ascii=False, sort_keys=True).encode('utf_8'))\n\n # define model\n ngpu = 2\n device_ids = list(range(ngpu))\n model_args[\"teacher_model\"] = tmpdir + \"/model.dummy.best\"\n model = FeedForwardTransformer(idim, odim, Namespace(**model_args))\n model = torch.nn.DataParallel(model, device_ids)\n model.to(device)\n optimizer = torch.optim.Adam(model.parameters())\n\n # trainable\n loss = model(**batch).mean()\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # remove tmpdir\n if os.path.exists(tmpdir):\n shutil.rmtree(tmpdir)\n\n\n@pytest.mark.parametrize(\n \"model_dict\", [\n ({}),\n ({\"use_scaled_pos_enc\": False}),\n ({\"init_encoder_module\": \"embed\"}),\n ({\"encoder_normalize_before\": False}),\n ({\"decoder_normalize_before\": False}),\n ({\"encoder_normalize_before\": False, \"decoder_normalize_before\": False}),\n ({\"encoder_concat_after\": True}),\n ({\"decoder_concat_after\": True}),\n ({\"encoder_concat_after\": True, \"decoder_concat_after\": True}),\n ])\ndef test_initialization(model_dict):\n # make args\n idim, odim = 10, 25\n teacher_model_args = make_transformer_args(**model_dict)\n model_args = make_feedforward_transformer_args(**model_dict)\n\n # define teacher model and save it\n teacher_model = Transformer(idim, odim, Namespace(**teacher_model_args))\n tmpdir = tempfile.mkdtemp(prefix=\"tmp_\", dir=\"/tmp\")\n torch.save(teacher_model.state_dict(), tmpdir + \"/model.dummy.best\")\n with open(tmpdir + \"/model.json\", 'wb') as f:\n f.write(json.dumps((idim, odim, teacher_model_args),\n indent=4, ensure_ascii=False, sort_keys=True).encode('utf_8'))\n\n # define model\n model_args[\"teacher_model\"] = tmpdir + \"/model.dummy.best\"\n model_args[\"transfer_encoder_from_teacher\"] = True\n model = FeedForwardTransformer(idim, odim, Namespace(**model_args))\n\n # check initialization\n if model_args[\"transferred_encoder_module\"] == \"all\":\n for p1, p2 in zip(model.encoder.parameters(), model.teacher.encoder.parameters()):\n np.testing.assert_array_equal(p1.data.cpu().numpy(), p2.data.cpu().numpy())\n else:\n np.testing.assert_array_equal(\n model.encoder.embed[0].weight.data.cpu().numpy(),\n model.teacher.encoder.embed[0].weight.data.cpu().numpy()\n )\n\n # remove tmpdir\n if os.path.exists(tmpdir):\n shutil.rmtree(tmpdir)\n\n\ndef test_length_regulator():\n # prepare inputs\n idim = 5\n ilens = [10, 5, 3]\n xs = pad_list([torch.randn((ilen, idim)) for ilen in ilens], 0.0)\n ds = pad_list([torch.arange(ilen) for ilen in ilens], 0)\n\n # test with non-zero durations\n length_regulator = LengthRegulator()\n xs_expand = length_regulator(xs, ds, ilens)\n assert int(xs_expand.shape[1]) == int(ds.sum(dim=-1).max())\n\n # test with duration including zero\n ds[:, 2] = 0\n xs_expand = length_regulator(xs, ds, ilens)\n assert int(xs_expand.shape[1]) == int(ds.sum(dim=-1).max())\n\n\ndef test_duration_calculator():\n # define duration calculator\n idim, odim = 10, 25\n teacher_model_args = make_transformer_args()\n teacher = Transformer(idim, odim, Namespace(**teacher_model_args))\n duration_calculator = DurationCalculator(teacher)\n\n # setup batch\n ilens = [10, 5, 3]\n olens = [20, 15, 10]\n batch = prepare_inputs(idim, odim, ilens, olens)\n\n # calculate durations\n ds = duration_calculator(batch[\"xs\"], batch[\"ilens\"], batch[\"ys\"], batch[\"olens\"])\n np.testing.assert_array_equal(ds.sum(dim=-1).cpu().numpy(), batch[\"olens\"].cpu().numpy())\n" ]
[ [ "torch.device", "torch.arange", "torch.no_grad", "numpy.random.randn", "torch.cuda.device_count", "torch.from_numpy", "torch.cuda.is_available", "numpy.random.randint", "torch.LongTensor", "torch.randn", "torch.nn.DataParallel" ] ]
dhimmel/pubmedpy
[ "9d716768f5ab798ec448154588e4fd99afd7584a" ]
[ "pubmedpy/esummary.py" ]
[ "import collections\nimport contextlib\nimport datetime\nimport itertools\nimport locale\nimport logging\nimport re\nimport threading\nfrom typing import List, Optional\n\nimport pandas\nimport tqdm\nimport lxml.etree\n\nfrom .xml import iter_extract_elems\nfrom .utils import PathType\n\nlocale_lock = threading.Lock()\n\n\n@contextlib.contextmanager\ndef setlocale(name: str):\n \"\"\"\n Context manager to temporarily set locale for datetime.datetime.strptime\n https://stackoverflow.com/a/24070673/4651668\n \"\"\"\n with locale_lock:\n saved = locale.setlocale(locale.LC_ALL)\n try:\n yield locale.setlocale(locale.LC_ALL, name)\n finally:\n locale.setlocale(locale.LC_ALL, saved)\n\n\ndef parse_date_text(text: str) -> datetime.date:\n \"\"\"\n Parse an `eSummaryResult/DocSum/Item[@Name='History']/Item[@Type='Date']`\n element.\n The time on the date is discarded. A `datetime.date` object is returned\n \"\"\"\n with setlocale(\"C\"):\n return datetime.datetime.strptime(text, \"%Y/%m/%d %H:%M\").date()\n\n\ndef parse_pubdate_text(text: str) -> datetime.date:\n \"\"\"\n Parse the text contained by the following elements:\n\n `eSummaryResult/DocSum/Item[@Name='PubDate' @Type='Date']`\n `eSummaryResult/DocSum/Item[@Name='EPubDate' @Type='Date']`\n\n See https://www.nlm.nih.gov/bsd/licensee/elements_article_source.html\n A `datetime.date` object is returned.\n \"\"\"\n return datetime.datetime.strptime(text, \"%Y %b %d\").date()\n\n\ndef parse_esummary_history(docsum: lxml.etree._Element) -> dict:\n \"\"\"\n docsum is an xml Element.\n \"\"\"\n # Extract all historical dates\n date_pairs = list()\n seen = set()\n for item in docsum.findall(\"Item[@Name='History']/Item[@Type='Date']\"):\n name = item.get(\"Name\")\n try:\n date_ = parse_date_text(item.text)\n except ValueError as e:\n id_ = int(docsum.findtext(\"Id\"))\n msg = f\"article {id_}; name: {name}; \" f\"date: {item.text}; error: {e}\"\n logging.warning(msg)\n continue\n\n date_pair = name, date_\n if date_pair in seen:\n continue\n seen.add(date_pair)\n date_pairs.append(date_pair)\n date_pairs.sort(key=lambda x: x[0])\n history = collections.OrderedDict()\n for name, group in itertools.groupby(date_pairs, key=lambda x: x[0]):\n for i, (name, date_) in enumerate(group):\n history[f\"{name}_{i}\"] = date_\n return history\n\n\ndef parse_esummary_pubdates(docsum: lxml.etree._Element) -> dict:\n \"\"\"\n Parse PubDate and EPubDate. Infer first published date.\n \"\"\"\n pubdates = collections.OrderedDict()\n for key, name in (\"pub\", \"PubDate\"), (\"epub\", \"EPubDate\"):\n xpath = f\"Item[@Name='{name}'][@Type='Date']\"\n text = docsum.findtext(xpath)\n try:\n pubdates[key] = parse_pubdate_text(text)\n except ValueError as e:\n id_ = int(docsum.findtext(\"Id\"))\n msg = f\"article {id_}; name: {key}; \" f\"date: {text}; error: {e}\"\n logging.info(msg)\n continue\n dates = set(pubdates.values())\n dates.discard(None)\n if dates:\n pubdates[\"published\"] = min(dates)\n return pubdates\n\n\ndef parse_esummary_article_info(elem: lxml.etree._Element) -> dict:\n \"\"\"\n Extract general article information\n \"\"\"\n article = collections.OrderedDict()\n article[\"pubmed_id\"] = int(elem.findtext(\"Id\"))\n article[\"journal_nlm_id\"] = elem.findtext(\"Item[@Name='NlmUniqueID']\")\n article[\"journal\"] = elem.findtext(\"Item[@Name='Source']\")\n article[\"title\"] = elem.findtext(\"Item[@Name='Title']\")\n article[\"doi\"] = elem.findtext(\"Item[@Name='DOI']\")\n # https://www.ncbi.nlm.nih.gov/books/NBK3827/table/pubmedhelp.T.publication_types/\n article[\"publication_types\"] = \" | \".join(\n x.text for x in elem.findall(\"Item[@Name='PubTypeList']/Item[@Name='PubType']\")\n )\n # get incoming citation count. https://github.com/dhimmel/pubmedpy/issues/2\n pmc_cited_by_count = elem.findtext(\"Item[@Name='PmcRefCount']\")\n try:\n pmc_cited_by_count = int(pmc_cited_by_count)\n except (TypeError, ValueError):\n pmc_cited_by_count = None\n article[\"pmc_cited_by_count\"] = pmc_cited_by_count\n return article\n\n\ndef parse_esummary(elem: lxml.etree._Element) -> dict:\n \"\"\"\n Extract pubmed, journal, and date information from an eSummaryResult/DocSum\n \"\"\"\n article = parse_esummary_article_info(elem)\n article.update(parse_esummary_pubdates(elem))\n article.update(parse_esummary_history(elem))\n return article\n\n\ndef extract_articles_from_esummaries(\n path: PathType, n_articles: Optional[int] = None, tqdm=tqdm.tqdm\n) -> List[dict]:\n \"\"\"\n Extract a list of articles (dictionaries with date information) from a\n an eSummaryResult XML file. Specify `n_articles` to enable a progress bar.\n \"\"\"\n if n_articles is not None:\n progress_bar = tqdm(total=n_articles, unit=\"articles\")\n\n articles = list()\n for elem in iter_extract_elems(path, tag=\"DocSum\"):\n article = parse_esummary(elem)\n articles.append(article)\n if n_articles is not None:\n progress_bar.update(1)\n\n if n_articles is not None:\n progress_bar.close()\n return articles\n\n\ndef articles_to_dataframe(articles: List[dict]) -> pandas.DataFrame:\n \"\"\"\n Convert a list of articles created by `extract_articles_from_esummaries`\n into a pandas.DataFrame.\n \"\"\"\n article_df = pandas.DataFrame(articles)\n article_df = article_df.sort_values(by=\"pubmed_id\")\n # Enforce a consistent column ordering\n columns = article_df.columns[2:].tolist()\n columns = [\n \"pubmed_id\",\n \"journal_nlm_id\",\n \"journal\",\n \"doi\",\n *sorted(x for x in columns if re.search(\"pub(?!med)\", x)),\n *sorted(x for x in columns if re.search(\"_[0-9]+$\", x)),\n \"title\",\n \"pmc_cited_by_count\",\n ]\n article_df = article_df[columns]\n return article_df\n" ]
[ [ "pandas.DataFrame" ] ]
GuiiFerrari/pyRANSAC-3D
[ "d9fa1371d972ccb50d80067a5719b3cd5be63cbc" ]
[ "pyransac3d/cuboid.py" ]
[ "import numpy as np\nimport random\n\nclass Cuboid:\n \"\"\" \n Implementation for box (Cuboid) RANSAC.\n\n A cuboid is defined as convex polyhedron bounded by six faces formed by three orthogonal normal vectors. Cats love to play with this kind of geometry.\n This method uses 6 points to find 3 best plane equations orthogonal to eachother. \n\n We could use a recursive planar RANSAC, but it would use 9 points instead. Orthogonality makes this algorithm more efficient. \n\n ![Cuboid](https://raw.githubusercontent.com/leomariga/pyRANSAC-3D/master/doc/cuboid.gif \"Cuboid\")\n\n ---\n \"\"\"\n def __init__(self):\n self.inliers = []\n self.equation = []\n\n\n\n\n def fit(self, pts, thresh=0.05, maxIteration=5000):\n \"\"\" \n Find the best equation for 3 planes which define a complete cuboid.\n\n :param pts: 3D point cloud as a `np.array (N,3)`.\n :param thresh: Threshold distance from the cylinder radius which is considered inlier.\n :param maxIteration: Number of maximum iteration which RANSAC will loop over.\n :returns:\n - `best_eq`: Array of 3 best planes's equation `np.array (3, 4)`\n - `best_inliers`: Inlier's index from the original point cloud. `np.array (1, M)`\n ---\n \"\"\"\n n_points = pts.shape[0]\n print(n_points)\n best_eq = []\n best_inliers = []\n\n for it in range(maxIteration):\n plane_eq = []\n\n # Samples 3 random points \n id_samples = random.sample(range(1, n_points-1), 6)\n pt_samples = pts[id_samples]\n\n\n # We have to find the plane equation described by those 3 points\n # We find first 2 vectors that are part of this plane\n # A = pt2 - pt1\n # B = pt3 - pt1\n\n vecA = pt_samples[1,:] - pt_samples[0,:]\n vecB = pt_samples[2,:] - pt_samples[0,:]\n\n # Now we compute the cross product of vecA and vecB to get vecC which is normal to the plane\n vecC = np.cross(vecA, vecB)\n\n # The plane equation will be vecC[0]*x + vecC[1]*y + vecC[0]*z = -k\n # We have to use a point to find k\n vecC = vecC / np.linalg.norm(vecC) # Normal\n\n k = -np.sum(np.multiply(vecC, pt_samples[1,:]))\n plane_eq.append([vecC[0], vecC[1], vecC[2], k])\n\n # Now we use another point to find a orthogonal plane 2\n # Calculate distance from the point to the first plane\n dist_p4_plane = (plane_eq[0][0]*pt_samples[3,0]+plane_eq[0][1]*pt_samples[3,1]+plane_eq[0][2]*pt_samples[3,2]+plane_eq[0][3])/np.sqrt(plane_eq[0][0]**2+plane_eq[0][1]**2+plane_eq[0][2]**2)\n \n # vecC is already normal (module 1) so we only have to discount from the point, the distance*unity = distance*normal\n # A simple way of understanding this is we move our point along the normal until it reaches the plane\n p4_proj_plane = pt_samples[3,0]-dist_p4_plane*vecC\n\n # Now, with help of our point p5 we can find another plane P2 which contains p4, p4_proj, p5 and \n vecD = p4_proj_plane - pt_samples[3,:]\n vecE = pt_samples[4,:] - pt_samples[3,:]\n vecF = np.cross(vecD, vecE)\n vecF = vecF / np.linalg.norm(vecF) # Normal\n k = -np.sum(np.multiply(vecF, pt_samples[4,:]))\n plane_eq.append([vecF[0], vecF[1], vecF[2], k])\n\n # The last plane will be orthogonal to the first and sacond plane (and its normals will be orthogonal to first and second planes' normal)\n vecG = np.cross(vecC, vecF)\n\n k = -np.sum(np.multiply(vecG, pt_samples[5,:]))\n plane_eq.append([vecG[0], vecG[1], vecG[2], k])\n plane_eq = np.asarray(plane_eq)\n # We have to find the value D for the last plane.\n\n # Distance from a point to a plane \n # https://mathworld.wolfram.com/Point-PlaneDistance.html\n pt_id_inliers = [] # list of inliers ids\n dist_pt = []\n for id_plane in range(plane_eq.shape[0]):\n dist_pt.append(np.abs((plane_eq[id_plane,0]*pts[:,0]+plane_eq[id_plane,1]*pts[:, 1]+plane_eq[id_plane,2]*pts[:, 2]+plane_eq[id_plane,3])/np.sqrt(plane_eq[id_plane,0]**2+plane_eq[id_plane,1]**2+plane_eq[id_plane,2]**2)))\n \n # Select indexes where distance is biggers than the threshold\n dist_pt = np.asarray(dist_pt)\n min_dist_pt = np.amin(dist_pt, axis=0)\n pt_id_inliers = np.where(np.abs(min_dist_pt) <= thresh)[0]\n\n\n if(len(pt_id_inliers) > len(best_inliers)):\n best_eq = plane_eq\n best_inliers = pt_id_inliers\n self.inliers = best_inliers\n self.equation = best_eq\n return best_eq, best_inliers\n" ]
[ [ "numpy.linalg.norm", "numpy.asarray", "numpy.multiply", "numpy.sqrt", "numpy.amin", "numpy.abs", "numpy.cross" ] ]
damianmatusik96/SoilIdentificationSystem
[ "2be32759c3381c8e985623147414659fd63714e4" ]
[ "app/main.py" ]
[ "from app.IndentificationSystem import data_cluster, data_handler_3, data_cluster_3\nfrom app.IndentificationSystem.data.ProfilePredictor import ProfilePredictor\nimport matplotlib.pyplot as plt\n\n\nif __name__ == \"__main__\":\n data_cluster.choose_best_cluster(2, 8)\n profile_predictor = ProfilePredictor(data_handler_3.raw_data_pr, data_cluster)\n\n profile_predictor.get_best_profile()\n data_cluster_3.choose_best_cluster(2, 8)\n plt.show()\n # data_cluster_3.choose_best_cluster(2, 8)\n" ]
[ [ "matplotlib.pyplot.show" ] ]
nasoboleva/magenta
[ "38614f87e4670ecfcca171c7c7a59ae96ab886df" ]
[ "magenta/models/coconet/lib_tfsampling.py" ]
[ "# Copyright 2019 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Defines the graph for sampling from Coconet.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport time\n\nimport lib_graph\nimport lib_hparams\nimport numpy as np\nimport tensorflow as tf\n\nFLAGS = tf.app.flags.FLAGS\n\n\nclass CoconetSampleGraph(object):\n \"\"\"Graph for Gibbs sampling from Coconet.\"\"\"\n\n def __init__(self, chkpt_path, placeholders=None):\n \"\"\"Initializes inputs for the Coconet sampling graph.\n\n Does not build or restore the graph. That happens lazily if you call run(),\n or explicitly using instantiate_sess_and_restore_checkpoint.\n\n Args:\n chkpt_path: Checkpoint directory for loading the model.\n Uses the latest checkpoint.\n placeholders: Optional placeholders.\n \"\"\"\n self.chkpt_path = chkpt_path\n self.hparams = lib_hparams.load_hparams(chkpt_path)\n if placeholders is None:\n self.placeholders = self.get_placeholders()\n else:\n self.placeholders = placeholders\n self.samples = None\n self.sess = None\n\n def get_placeholders(self):\n hparams = self.hparams\n return dict(\n pianorolls=tf.placeholder(\n tf.bool,\n [None, None, hparams.num_pitches, hparams.num_instruments],\n \"pianorolls\"),\n # The default value is only used for checking if completion masker\n # should be evoked. It can't be used directly as the batch size\n # and length of pianorolls are unknown during static time.\n outer_masks=tf.placeholder_with_default(\n np.zeros(\n (1, 1, hparams.num_pitches, hparams.num_instruments),\n dtype=np.float32),\n [None, None, hparams.num_pitches, hparams.num_instruments],\n \"outer_masks\"),\n sample_steps=tf.placeholder_with_default(0, (), \"sample_steps\"),\n total_gibbs_steps=tf.placeholder_with_default(\n 0, (), \"total_gibbs_steps\"),\n current_step=tf.placeholder_with_default(0, (), \"current_step\"),\n temperature=tf.placeholder_with_default(0.99, (), \"temperature\"))\n\n @property\n def inputs(self):\n return self.placeholders\n\n def make_outer_masks(self, outer_masks, input_pianorolls):\n \"\"\"Returns outer masks, if all zeros created by completion masking.\"\"\"\n outer_masks = tf.to_float(outer_masks)\n # If outer_masks come in as all zeros, it means there's no masking,\n # which also means nothing will be generated. In this case, use\n # completion mask to make new outer masks.\n outer_masks = tf.cond(\n tf.reduce_all(tf.equal(outer_masks, 0)),\n lambda: make_completion_masks(input_pianorolls),\n lambda: outer_masks)\n return outer_masks\n\n def build_sample_graph(self, input_pianorolls=None, outer_masks=None,\n total_gibbs_steps=None):\n \"\"\"Builds the tf.while_loop based sampling graph.\n\n Args:\n input_pianorolls: Optional input pianorolls override. If None, uses the\n pianorolls placeholder.\n outer_masks: Optional input outer_masks override. If None, uses the\n outer_masks placeholder.\n total_gibbs_steps: Optional input total_gibbs_steps override. If None,\n uses the total_gibbs_steps placeholder.\n Returns:\n The output op of the graph.\n \"\"\"\n if input_pianorolls is None:\n input_pianorolls = self.inputs[\"pianorolls\"]\n if outer_masks is None:\n outer_masks = self.inputs[\"outer_masks\"]\n\n tt = tf.shape(input_pianorolls)[1]\n sample_steps = tf.to_float(self.inputs[\"sample_steps\"])\n if total_gibbs_steps is None:\n total_gibbs_steps = self.inputs[\"total_gibbs_steps\"]\n temperature = self.inputs[\"temperature\"]\n\n input_pianorolls = tf.to_float(input_pianorolls)\n outer_masks = self.make_outer_masks(outer_masks, input_pianorolls)\n\n # Calculate total_gibbs_steps as steps * num_instruments if not given.\n total_gibbs_steps = tf.cond(\n tf.equal(total_gibbs_steps, 0),\n lambda: tf.to_float(tt * self.hparams.num_instruments),\n lambda: tf.to_float(total_gibbs_steps))\n\n # sample_steps is set to total_gibbs_steps if not given.\n sample_steps = tf.cond(\n tf.equal(sample_steps, 0),\n lambda: total_gibbs_steps,\n lambda: tf.to_float(sample_steps))\n\n def infer_step(pianorolls, step_count):\n \"\"\"Called by tf.while_loop, takes a Gibbs step.\"\"\"\n mask_prob = compute_mask_prob_from_yao_schedule(step_count,\n total_gibbs_steps)\n # 1 indicates mask out, 0 is not mask.\n masks = make_bernoulli_masks(tf.shape(pianorolls), mask_prob,\n outer_masks)\n\n logits = self.predict(pianorolls, masks)\n samples = sample_with_temperature(logits, temperature=temperature)\n\n outputs = pianorolls * (1 - masks) + samples * masks\n\n check_completion_op = tf.assert_equal(\n tf.where(tf.equal(tf.reduce_max(masks, axis=2), 1.),\n tf.reduce_max(outputs, axis=2),\n tf.reduce_max(pianorolls, axis=2)),\n 1.)\n with tf.control_dependencies([check_completion_op]):\n outputs = tf.identity(outputs)\n\n step_count += 1\n return outputs, step_count\n\n current_step = tf.to_float(self.inputs[\"current_step\"])\n\n # Initializes pianorolls by evaluating the model once to fill in all gaps.\n logits = self.predict(tf.to_float(input_pianorolls), outer_masks)\n samples = sample_with_temperature(logits, temperature=temperature)\n tf.get_variable_scope().reuse_variables()\n\n self.samples, current_step = tf.while_loop(\n lambda samples, current_step: current_step < sample_steps,\n infer_step, [samples, current_step],\n shape_invariants=[\n tf.TensorShape([None, None, None, None]),\n tf.TensorShape(None),\n ],\n back_prop=False,\n parallel_iterations=1,\n name=\"coco_while\")\n self.samples.set_shape(input_pianorolls.shape)\n return self.samples\n\n def predict(self, pianorolls, masks):\n \"\"\"Evalutes the model once and returns predictions.\"\"\"\n direct_inputs = dict(\n pianorolls=pianorolls, masks=masks,\n lengths=tf.to_float([tf.shape(pianorolls)[1]]))\n\n model = lib_graph.build_graph(\n is_training=False,\n hparams=self.hparams,\n direct_inputs=direct_inputs,\n use_placeholders=False)\n self.logits = model.logits\n return self.logits\n\n def instantiate_sess_and_restore_checkpoint(self):\n \"\"\"Instantiates session and restores from self.chkpt_path.\"\"\"\n if self.samples is None:\n self.build_sample_graph()\n sess = tf.Session()\n saver = tf.train.Saver()\n chkpt_fpath = tf.train.latest_checkpoint(self.chkpt_path)\n tf.logging.info(\"loading checkpoint %s\", chkpt_fpath)\n saver.restore(sess, chkpt_fpath)\n tf.get_variable_scope().reuse_variables()\n self.sess = sess\n return self.sess\n\n def run(self,\n pianorolls,\n masks=None,\n sample_steps=0,\n current_step=0,\n total_gibbs_steps=0,\n temperature=0.99,\n timeout_ms=0):\n \"\"\"Given input pianorolls, runs Gibbs sampling to fill in the rest.\n\n When total_gibbs_steps is 0, total_gibbs_steps is set to\n time * instruments. If faster sampling is desired on the expanse of sample\n quality, total_gibbs_steps can be explicitly set to a lower number,\n possibly to the value of sample_steps if do not plan on stopping sample\n early to obtain intermediate results.\n\n This function can be used to return intermediate results by setting the\n sample_steps to when results should be returned and leaving\n total_gibbs_steps to be 0.\n\n To continue sampling from intermediate results, set current_step to the\n number of steps taken, and feed in the intermediate pianorolls. Again\n leaving total_gibbs_steps as 0.\n\n Builds the graph and restores checkpoint if necessary.\n\n Args:\n pianorolls: a 4D numpy array of shape (batch, time, pitch, instrument)\n masks: a 4D numpy array of the same shape as pianorolls, with 1s\n indicating mask out. If is None, then the masks will be where have 1s\n where there are no notes, indicating to the model they should be\n filled in.\n sample_steps: an integer indicating the number of steps to sample in this\n call. If set to 0, then it defaults to total_gibbs_steps.\n current_step: an integer indicating how many steps might have already\n sampled before.\n total_gibbs_steps: an integer indicating the total number of steps that\n a complete sampling procedure would take.\n temperature: a float indicating the temperature for sampling from softmax.\n timeout_ms: Timeout for session.Run. Set to zero for no timeout.\n\n Returns:\n A dictionary, consisting of \"pianorolls\" which is a 4D numpy array of\n the sampled results and \"time_taken\" which is the time taken in sampling.\n \"\"\"\n if self.sess is None:\n # Build graph and restore checkpoint.\n self.instantiate_sess_and_restore_checkpoint()\n\n if masks is None:\n masks = np.zeros_like(pianorolls)\n\n start_time = time.time()\n run_options = None\n if timeout_ms:\n run_options = tf.RunOptions(timeout_in_ms=timeout_ms)\n new_piece = self.sess.run(\n self.samples,\n feed_dict={\n self.placeholders[\"pianorolls\"]: pianorolls,\n self.placeholders[\"outer_masks\"]: masks,\n self.placeholders[\"sample_steps\"]: sample_steps,\n self.placeholders[\"total_gibbs_steps\"]: total_gibbs_steps,\n self.placeholders[\"current_step\"]: current_step,\n self.placeholders[\"temperature\"]: temperature\n }, options=run_options)\n\n label = \"independent blocked gibbs\"\n time_taken = (time.time() - start_time) / 60.0\n tf.logging.info(\"exit %s (%.3fmin)\" % (label, time_taken))\n return dict(pianorolls=new_piece, time_taken=time_taken)\n\n\ndef make_completion_masks(pianorolls, outer_masks=1.):\n pianorolls = tf.to_float(pianorolls)\n masks = tf.reduce_all(tf.equal(pianorolls, 0), axis=2, keep_dims=True)\n inner_masks = tf.to_float(masks) + 0 * pianorolls\n return inner_masks * outer_masks\n\n\ndef make_bernoulli_masks(shape, pm, outer_masks=1.):\n bb = shape[0]\n tt = shape[1]\n pp = shape[2]\n ii = shape[3]\n probs = tf.random_uniform([bb, tt, ii])\n masks = tf.tile(tf.to_float(tf.less(probs, pm))[:, :, None, :], [1, 1, pp, 1])\n return masks * outer_masks\n\n\ndef sample_with_temperature(logits, temperature):\n \"\"\"Either argmax after softmax or random sample along the pitch axis.\n\n Args:\n logits: a Tensor of shape (batch, time, pitch, instrument).\n temperature: a float 0.0=argmax 1.0=random\n\n Returns:\n a Tensor of the same shape, with one_hots on the pitch dimension.\n \"\"\"\n logits = tf.transpose(logits, [0, 1, 3, 2])\n pitch_range = tf.shape(logits)[-1]\n\n def sample_from_logits(logits):\n with tf.control_dependencies([tf.assert_greater(temperature, 0.0)]):\n logits = tf.identity(logits)\n reshaped_logits = (\n tf.reshape(logits, [-1, tf.shape(logits)[-1]]) / temperature)\n choices = tf.multinomial(reshaped_logits, 1)\n choices = tf.reshape(choices,\n tf.shape(logits)[:logits.get_shape().ndims - 1])\n return choices\n\n choices = tf.cond(tf.equal(temperature, 0.0),\n lambda: tf.argmax(tf.nn.softmax(logits), -1),\n lambda: sample_from_logits(logits))\n samples_onehot = tf.one_hot(choices, pitch_range)\n return tf.transpose(samples_onehot, [0, 1, 3, 2])\n\n\ndef compute_mask_prob_from_yao_schedule(i, n, pmin=0.1, pmax=0.9, alpha=0.7):\n wat = (pmax - pmin) * i/ n\n return tf.maximum(pmin, pmax - wat / alpha)\n\n\ndef main(unused_argv):\n checkpoint_path = FLAGS.checkpoint\n sampler = CoconetSampleGraph(checkpoint_path)\n\n batch_size = 1\n decode_length = 4\n target_shape = [batch_size, decode_length, 46, 4]\n pianorolls = np.zeros(target_shape, dtype=np.float32)\n generated_piece = sampler.run(pianorolls, sample_steps=16, temperature=0.99)\n tf.logging.info(\"num of notes in piece %d\", np.sum(generated_piece))\n\n tf.logging.info(\"Done.\")\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n" ]
[ [ "tensorflow.multinomial", "tensorflow.to_float", "tensorflow.control_dependencies", "tensorflow.nn.softmax", "tensorflow.one_hot", "tensorflow.identity", "numpy.zeros_like", "tensorflow.shape", "tensorflow.train.latest_checkpoint", "tensorflow.less", "tensorflow.random_uniform", "tensorflow.TensorShape", "tensorflow.train.Saver", "tensorflow.logging.info", "tensorflow.transpose", "tensorflow.get_variable_scope", "tensorflow.app.run", "numpy.zeros", "tensorflow.Session", "tensorflow.assert_greater", "tensorflow.placeholder", "tensorflow.RunOptions", "tensorflow.placeholder_with_default", "numpy.sum", "tensorflow.equal", "tensorflow.reduce_max", "tensorflow.maximum" ] ]
lauramsmith/fine-tuning-locomotion
[ "96d7c81458511c0a7a11b59cf8c2c3fb8df8a64b", "96d7c81458511c0a7a11b59cf8c2c3fb8df8a64b" ]
[ "motion_imitation/envs/env_wrappers/reset_task.py", "motion_imitation/envs/env_wrappers/logging_wrapper.py" ]
[ "# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nfrom pybullet_utils import transformations\n\nfrom motion_imitation.robots import a1\nfrom motion_imitation.utilities import motion_data\nfrom motion_imitation.utilities import pose3d\n\nSITTING_POSE = np.array([0.0, 0.0, 0.11, 0.0, 0.0, 0.0, 1.0] + [0, 1.17752553, -2.69719727]*4)\nSTANDING_POSE = np.array([0.0, 0.0, 0.25870023, 0.0, 0.0, 0.0, 1.0] + [0, 0.9, -1.8] * 4)\n\nJOINT_WEIGHTS = np.array([1.0, 0.75, 0.5] * 4)\n\nclass ResetTask(object):\n \"\"\"Imitation reference motion task.\"\"\"\n\n def __init__(self, terminal_conditions=(), real_robot=False):\n self._terminal_conditions = terminal_conditions\n self._env = None\n self._default_pose = None\n\n self._joint_pose_idx = None\n self._joint_pose_size = None\n \n self._stand_prob = 0.2\n self._sit_prob = 0.2\n\n self._fall_init_rot_x_min = -3.0 / 4.0 * np.pi\n self._fall_init_rot_x_max = 3.0 / 4.0 * np.pi\n\n self._real_robot = real_robot\n\n return\n\n def __call__(self, env):\n return self.reward(env)\n\n def reset(self, env):\n \"\"\"Resets the internal state of the task.\"\"\"\n assert(self._stand_prob + self._sit_prob <= 1.0)\n\n self._env = env\n\n if (self._joint_pose_idx is None or self._env.hard_reset):\n self._build_joint_data()\n\n if self._real_robot:\n return \n\n rand_val = self._rand_uniform(0, 1)\n if (rand_val < self._stand_prob):\n self._init_stand_pose()\n elif (rand_val < self._stand_prob + self._sit_prob):\n self._init_sit_pose()\n else:\n self._init_fall_pose(self._fall_init_rot_x_min, self._fall_init_rot_x_max)\n\n return\n\n def update(self, env):\n \"\"\"Updates the internal state of the task.\"\"\"\n del env\n return\n\n def done(self, env):\n \"\"\"Checks if the episode is over.\"\"\"\n del env\n done = any([done_fn(self._env) for done_fn in self._terminal_conditions])\n return done\n\n def build_target_obs(self):\n tar_obs = np.array([])\n return tar_obs\n\n def get_target_obs_bounds(self):\n low = np.array([])\n high = np.array([])\n return low, high\n\n def reward(self, env):\n \"\"\"Get the reward without side effects.\"\"\"\n del env\n\n roll_w = 0.5\n stand_w = 0.5\n roll_threshold = np.cos(0.2 * np.pi)\n \n r_roll, root_cos_dist = self._calc_reward_roll()\n\n if (root_cos_dist > roll_threshold):\n r_stand = self._calc_reward_stand()\n else:\n r_stand = 0.0\n\n reward = roll_w * r_roll + stand_w * r_stand\n\n return reward\n\n def _calc_reward_roll(self):\n up = np.array([0, 0, 1])\n root_rot = self._env.robot.GetTrueBaseOrientation()\n root_up = pose3d.QuaternionRotatePoint(up, root_rot)\n cos_dist = up.dot(root_up)\n\n r_roll = (0.5 * cos_dist + 0.5) ** 2\n\n return r_roll, cos_dist\n\n def _calc_reward_stand(self):\n tar_h = STANDING_POSE[2]\n pos_size = motion_data.MotionData.POS_SIZE\n rot_size = motion_data.MotionData.ROT_SIZE\n\n root_pos = self._env.robot.GetBasePosition()\n root_h = root_pos[2]\n h_err = tar_h - root_h\n h_err /= tar_h\n h_err = np.clip(h_err, 0.0, 1.0)\n r_height = 1.0 - h_err\n\n tar_pose = STANDING_POSE[(pos_size + rot_size):]\n joint_pose = self._env.robot.GetTrueMotorAngles()\n pose_diff = tar_pose - joint_pose\n pose_diff = JOINT_WEIGHTS * JOINT_WEIGHTS * pose_diff * pose_diff\n pose_err = np.sum(pose_diff)\n r_pose = np.exp(-0.6 * pose_err)\n\n tar_vel = 0.0\n joint_vel = self._env.robot.GetMotorVelocities()\n vel_diff = tar_vel - joint_vel\n vel_diff = vel_diff * vel_diff\n vel_err = np.sum(vel_diff)\n r_vel = np.exp(-0.02 * vel_err)\n\n r_stand = 0.2 * r_height + 0.6 * r_pose + 0.2 * r_vel\n\n return r_stand\n\n def _calc_reward_end_effector(self, ref_joint_angles):\n \"\"\"Get the end effector reward for sim or real A1 robot.\"\"\"\n pos_size = motion_data.MotionData.POS_SIZE\n rot_size = motion_data.MotionData.ROT_SIZE\n\n ref_base_pos = np.array(STANDING_POSE[:pos_size])\n ref_base_rot = np.array(STANDING_POSE[pos_size:(pos_size + rot_size)])\n rel_feet_pos_ref = a1.foot_positions_in_base_frame(ref_joint_angles)\n rel_feet_pos_robot = self._env.robot.GetFootPositionsInBaseFrame()\n end_eff_err = 0\n\n for rel_foot_pos_ref, rel_foot_pos_robot in zip(rel_feet_pos_ref,\n rel_feet_pos_robot):\n rel_foot_pos_diff = rel_foot_pos_ref - rel_foot_pos_robot\n end_eff_err += rel_foot_pos_diff[0]**2 + rel_foot_pos_diff[1]**2\n\n foot_height_ref = pose3d.PoseTransformPoint(\n point=rel_foot_pos_ref,\n position=ref_base_pos,\n quat=ref_base_rot)[2]\n\n foot_height_robot = pose3d.PoseTransformPoint(\n point=rel_foot_pos_robot,\n position=self._env.robot.GetBasePosition(),\n quat=self._env.robot.GetBaseOrientation())[2]\n\n end_eff_err += 3.0 * (foot_height_ref - foot_height_robot)**2\n\n r_end_eff = np.exp(-40 * end_eff_err)\n return r_end_eff\n\n def _get_pybullet_client(self):\n \"\"\"Get bullet client from the environment\"\"\"\n return self._env._pybullet_client\n\n def _get_num_joints(self):\n \"\"\"Get the number of joints in the character's body.\"\"\"\n pyb = self._get_pybullet_client()\n return pyb.getNumJoints(self._env.robot.quadruped)\n\n def _init_stand_pose(self):\n self._set_pose(STANDING_POSE)\n self._env.robot.ReceiveObservation()\n return\n \n def _init_sit_pose(self):\n self._set_pose(SITTING_POSE)\n self._env.robot.ReceiveObservation()\n return\n\n def _init_fall_pose(self, rot_x_min, rot_x_max):\n pyb = self._get_pybullet_client()\n pos_size = motion_data.MotionData.POS_SIZE\n rot_size = motion_data.MotionData.ROT_SIZE\n\n pose = self._get_pose()\n root_pos = np.array([0, 0, self._rand_uniform(low=0.4, high=0.5)])\n root_rot = self._rand_uniform(low=[rot_x_min, -np.pi/4, -np.pi],\n high=[rot_x_max, np.pi/4, np.pi])\n root_rot = pyb.getQuaternionFromEuler(root_rot)\n\n joint_lim_low = self._env.robot._joint_angle_lower_limits\n joint_lim_high = self._env.robot._joint_angle_upper_limits\n joint_pose_size = len(joint_lim_low)\n \n stand_pose = STANDING_POSE[-joint_pose_size:]\n joint_dir = self._randint(0, 2, joint_pose_size).astype(np.float32)\n lim_pose = (1.0 - joint_dir) * joint_lim_low + joint_dir * joint_lim_high\n \n pose_lerp = self._rand_uniform(low=0, high=1, size=joint_pose_size)\n pose_lerp = pose_lerp * pose_lerp * pose_lerp\n joint_pose = (1.0 - pose_lerp) * stand_pose + pose_lerp * lim_pose\n\n pose = np.concatenate([root_pos, root_rot, joint_pose])\n self._set_pose(pose)\n\n for _ in range(1000):\n pyb.stepSimulation()\n\n self._env.robot.ReceiveObservation()\n \n return\n \n def _build_joint_data(self):\n \"\"\"Precomputes joint data to facilitating accessing data from motion frames.\"\"\"\n num_joints = self._get_num_joints()\n self._joint_pose_idx = np.zeros(num_joints, dtype=np.int32)\n self._joint_pose_size = np.zeros(num_joints, dtype=np.int32)\n\n for j in range(num_joints):\n pyb = self._get_pybullet_client()\n j_info = pyb.getJointInfo(self._env.robot.quadruped, j)\n j_state = pyb.getJointStateMultiDof(self._env.robot.quadruped, j)\n\n j_pose_idx = j_info[3]\n j_pose_size = len(j_state[0])\n\n if (j_pose_idx < 0):\n assert (j_pose_size == 0)\n if (j == 0):\n j_pose_idx = 0\n else:\n j_pose_idx = self._joint_pose_idx[j - 1] + self._joint_pose_size[j - 1]\n\n self._joint_pose_idx[j] = j_pose_idx\n self._joint_pose_size[j] = j_pose_size\n\n return\n\n def _get_pose(self):\n root_pos = self._env.robot.GetBasePosition()\n root_rot = self._env.robot.GetTrueBaseOrientation()\n joint_pose = self._env.robot.GetTrueMotorAngles()\n pose = np.concatenate([root_pos, root_rot, joint_pose])\n return pose\n \n def _set_pose(self, pose):\n \"\"\"Set the state of a character to the given pose and velocity.\n\n Args:\n phys_model: handle of the character\n pose: pose to be applied to the character\n vel: velocity to be applied to the character\n \"\"\"\n pyb = self._get_pybullet_client()\n phys_model = self._env.robot.quadruped\n\n root_pos = pose[0:motion_data.MotionData.POS_SIZE]\n root_rot = pose[motion_data.MotionData.POS_SIZE:(motion_data.MotionData.POS_SIZE + motion_data.MotionData.ROT_SIZE)]\n pyb.resetBasePositionAndOrientation(phys_model, root_pos, root_rot)\n\n num_joints = self._get_num_joints()\n for j in range(num_joints):\n q_idx = self._get_joint_pose_idx(j)\n q_size = self._get_joint_pose_size(j)\n\n if (q_size > 0):\n j_pose = pose[q_idx:(q_idx + q_size)]\n j_vel = np.zeros_like(j_pose)\n pyb.resetJointStateMultiDof(phys_model, j, j_pose, j_vel)\n\n return\n\n def _get_joint_pose_idx(self, j):\n \"\"\"Get the starting index of the pose data for a give joint in a pose array.\"\"\"\n idx = self._joint_pose_idx[j]\n return idx\n\n def _get_joint_pose_size(self, j):\n \"\"\"Get the size of the pose data for a give joint in a pose array.\"\"\"\n pose_size = self._joint_pose_size[j]\n assert (pose_size == 1 or\n pose_size == 0), \"Only support 1D and 0D joints at the moment.\"\n return pose_size\n\n def _rand_uniform(self, low, high, size=None):\n \"\"\"Samples random float between [val_min, val_max].\"\"\"\n if hasattr(self._env, \"np_random\"):\n rand_val = self._env.np_random.uniform(low=low, high=high, size=size)\n else:\n rand_val = np.random.uniform(low=low, high=high, size=size)\n return rand_val\n\n def _randint(self, low, high, size=None):\n \"\"\"Samples random integer between [val_min, val_max].\"\"\"\n if hasattr(self._env, \"np_random\"):\n rand_val = self._env.np_random.randint(low, high, size=size)\n else:\n rand_val = np.random.randint(low, high, size=size)\n return rand_val\n\n\nclass RollTask(ResetTask):\n \"\"\"Imitation reference motion task.\"\"\"\n\n def __init__(self, terminal_conditions=()):\n super().__init__(terminal_conditions)\n\n self._stand_prob = 0.0\n self._sit_prob = 0.2\n\n return\n\n def reward(self, env):\n \"\"\"Get the reward without side effects.\"\"\"\n del env\n \n r_roll, _ = self._calc_reward_roll()\n torques = self._env.robot.GetMotorTorques()\n torque_penalty = np.sum(np.abs(torques))\n\n reward = r_roll - 1e-3 * torque_penalty\n\n return reward\n\nclass StandTask(ResetTask):\n \"\"\"Imitation reference motion task.\"\"\"\n\n def __init__(self, terminal_conditions=()):\n super().__init__(terminal_conditions)\n\n self._fall_init_rot_x_min = -1.0 / 4.0 * np.pi\n self._fall_init_rot_x_max = 1.0 / 4.0 * np.pi\n\n return\n \n def reward(self, env):\n \"\"\"Get the reward without side effects.\"\"\"\n del env\n \n r_stand = self._calc_reward_stand()\n reward = r_stand\n\n return reward\n", "\"\"\"Env wrapper that saves logs.\"\"\"\n\nimport atexit\nimport os\n\nimport numpy as np\nfrom phasespace import phasespace_robot_tracker\n\n\nclass LoggingWrapper(object):\n \"\"\"Env wrapper that saves logs.\"\"\"\n\n def __init__(self,\n env,\n output_dir,\n mocap_grpc_server=None,\n verbose=True,\n separate_episodes=False):\n \"\"\"Constructor.\n\n Args:\n env: An instance (possibly wrapped) of LocomotionGymEnv.\n output_dir: Where to save logs.\n mocap_grpc_server: Hostname and port of the gRPC server outputting marker\n data protos\n (e.g. \"localhost:12345\"). If None, don't look for mocap data.\n verbose: If True, print a message every time a log is saved.\n separate_episodes: If True, save one log file per episode. If False, save\n all episodes as one log file.\n \"\"\"\n if mocap_grpc_server:\n self._mocap_tracker = phasespace_robot_tracker.PhaseSpaceRobotTracker(\n server=mocap_grpc_server)\n else:\n self._mocap_tracker = None\n self._env = env\n self._robot = self._env.robot\n self._output_dir = output_dir\n os.makedirs(self._output_dir, exist_ok=True)\n self._verbose = verbose\n self._separate_episodes = separate_episodes\n self._clear_logs()\n self._episode_counter = 0\n atexit.register(self.log, verbose=True)\n\n def __getattr__(self, attr):\n return getattr(self._env, attr)\n\n def _clear_logs(self):\n self._linear_vels = []\n self._rpys = []\n self._angular_vels = []\n self._timestamps = []\n self._input_actions = []\n self._processed_actions = []\n self._joint_angles = []\n self._motor_temperatures = []\n self._mocap_positions = []\n self._mocap_rpys = []\n\n def step(self, action):\n self._input_actions.append(action)\n if self._mocap_tracker:\n self._mocap_tracker.update()\n obs, reward, done, info = self._env.step(action)\n\n self._processed_actions.append(self._robot.last_action)\n self._linear_vels.append(self._robot.GetBaseVelocity())\n self._rpys.append(self._robot.GetBaseRollPitchYaw())\n self._angular_vels.append(self._robot.GetBaseRollPitchYawRate())\n self._joint_angles.append(self._robot.GetMotorAngles())\n self._timestamps.append(self._robot.GetTimeSinceReset())\n if hasattr(self._robot, \"motor_temperatures\"):\n self._motor_temperatures.append(self._robot.motor_temperatures)\n if self._mocap_tracker:\n self._mocap_positions.append(self._mocap_tracker.get_base_position())\n self._mocap_rpys.append(self._mocap_tracker.get_base_roll_pitch_yaw())\n\n return obs, reward, done, info\n\n def log(self, verbose):\n if self._separate_episodes:\n out_file = os.path.join(\n self._output_dir,\n \"log_episode_{:07d}.npz\".format(self._episode_counter))\n else:\n out_file = os.path.join(self._output_dir, \"log_all_episodes.npz\")\n np.savez(\n out_file,\n input_actions=self._input_actions,\n processed_actions=self._processed_actions,\n timestamps=self._timestamps,\n linear_vels=self._linear_vels,\n rpys=self._rpys,\n angular_vels=self._angular_vels,\n joint_angles=self._joint_angles,\n motor_temperatures=self._motor_temperatures,\n mocap_positions=self._mocap_positions,\n mocap_rpys=self._mocap_rpys,\n )\n if verbose:\n print(\"logged to: {}\".format(out_file))\n self._clear_logs()\n\n def reset(self, *args, **kwargs):\n if self._separate_episodes:\n self.log(self._verbose)\n self._episode_counter += 1\n return self._env.reset(*args, **kwargs)\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.zeros_like", "numpy.zeros", "numpy.sum", "numpy.exp", "numpy.random.uniform", "numpy.random.randint", "numpy.cos", "numpy.clip", "numpy.abs" ], [ "numpy.savez" ] ]
conica-cui/python-pcl
[ "1d83d2d7ce9ce2c22ff5855249459bfc22025000", "b54e80e7da94ac9e2279b95fdac597f1de7145d7" ]
[ "tests/test_filters.py", "examples/external/laspy/visualization_test_rgb.py" ]
[ "import os.path\nimport pickle\nimport shutil\nimport tempfile\nimport unittest\n\nimport pcl\nimport numpy as np\n\nfrom nose.plugins.attrib import attr\n\n\n_data = [(i, 2 * i, 3 * i + 0.2) for i in range(5)]\n_DATA = \"\"\"0.0, 0.0, 0.2;\n 1.0, 2.0, 3.2;\n 2.0, 4.0, 6.2;\n 3.0, 6.0, 9.2;\n 4.0, 8.0, 12.2\"\"\"\n\n\n_data2 = np.array(\n [[0.0080142, 0.694695, -0.26015],\n [-0.342265, -0.446349, 0.214207],\n [0.173687, -0.84253, -0.400481],\n [-0.874475, 0.706127, -0.117635],\n [0.908514, -0.598159, 0.744714]], dtype=np.float32)\n\n\n# Filter\n### ApproximateVoxelGrid ###\n\n\nclass TestApproximateVoxelGrid(unittest.TestCase):\n def setUp(self):\n self.p = pcl.load(\n \"tests\" +\n os.path.sep +\n \"tutorials\" +\n os.path.sep +\n \"table_scene_lms400.pcd\")\n self.fil = self.p.make_ApproximateVoxelGrid()\n # self.fil = pcl.ApproximateVoxelGrid()\n # self.fil.set_InputCloud(self.p)\n\n def test_VoxelGrid(self):\n x = 0.01\n y = 0.01\n z = 0.01\n self.fil.set_leaf_size(x, y, z)\n result = self.fil.filter()\n # check\n self.assertTrue(result.size < self.p.size)\n # self.assertEqual(result.size, 719)\n\n\n### ConditionalRemoval ###\n# Appveyor NG\n@attr('pcl_ver_0_4')\n@attr('pcl_over_17')\nclass TestConditionalRemoval(unittest.TestCase):\n def setUp(self):\n # self.p = pcl.load(\"tests\" + os.path.sep + \"flydracyl.pcd\")\n # self.p = pcl.PointCloud(_data)\n self.p = pcl.PointCloud(_data2)\n self.fil = self.p.make_ConditionalRemoval()\n self.fil.set_Condition(pcl.ConditionAnd())\n\n # result\n # nan nan nan\n # -0.342265 -0.446349 0.214207\n # nan nan nan\n # nan nan nan\n # 0.908514 -0.598159 0.744714\n def test_Condition(self):\n range_cond = self.p.make_ConditionAnd()\n range_cond.add_Comparison2('z', pcl.CythonCompareOp_Type.GT, 0.0)\n range_cond.add_Comparison2('z', pcl.CythonCompareOp_Type.LT, 0.8)\n # build the filter\n self.fil.set_KeepOrganized(True)\n # apply filter\n cloud_filtered = self.fil.filter()\n\n # check\n expected = np.array([-0.342265, -0.446349, 0.214207])\n datas = np.around(cloud_filtered.to_array()[1].tolist(), decimals=6)\n self.assertEqual(datas.tolist(), expected.tolist())\n\n expected2 = np.array([0.908514, -0.598159, 0.744714])\n datas = np.around(cloud_filtered.to_array()[4].tolist(), decimals=6)\n self.assertEqual(datas.tolist(), expected2.tolist())\n\n\n# def set_KeepOrganized(self, flag):\n# self.fil.setKeepOrganized(flag)\n#\n# def filter(self):\n# \"\"\"\n# Apply the filter according to the previously set parameters and return\n# a new pointcloud\n# \"\"\"\n# cdef PointCloud pc = PointCloud()\n# self.fil.filter(pc.thisptr()[0])\n# return pc\n\n\n### ConditionAnd ###\nclass TestConditionAnd(unittest.TestCase):\n def setUp(self):\n self.p = pcl.load(\"tests\" + os.path.sep + \"flydracyl.pcd\")\n\n def test_Tutorial(self):\n pass\n\n\n### CropBox ###\n# base : pcl/tests cpp source code[TEST (CropBox, Filters)]\nclass TestCropBox(unittest.TestCase):\n\n def setUp(self):\n input = pcl.PointCloud()\n points = np.zeros((9, 3), dtype=np.float32)\n points[0] = 0.0, 0.0, 0.0\n points[1] = 0.9, 0.9, 0.9\n points[2] = 0.9, 0.9, -0.9\n points[3] = 0.9, -0.9, 0.9\n points[4] = -0.9, 0.9, 0.9\n points[5] = 0.9, -0.9, -0.9\n points[6] = -0.9, -0.9, 0.9\n points[7] = -0.9, 0.9, -0.9\n points[8] = -0.9, -0.9, -0.9\n input.from_array(points)\n self.p = input\n\n def testException(self):\n self.assertRaises(TypeError, pcl.CropHull)\n\n def testCrop(self):\n cropBoxFilter = self.p.make_cropbox()\n # Cropbox slighlty bigger then bounding box of points\n cropBoxFilter.set_Min(-1.0, -1.0, -1.0, 1.0)\n cropBoxFilter.set_Max(1.0, 1.0, 1.0, 1.0)\n\n # Indices\n # vector<int> indices;\n # cropBoxFilter.filter(indices)\n\n # Cloud\n cloud_out = cropBoxFilter.filter()\n\n # Should contain all\n # self.assertEqual(indices.size, 9)\n self.assertEqual(cloud_out.size, 9)\n self.assertEqual(cloud_out.width, 9)\n self.assertEqual(cloud_out.height, 1)\n\n # IndicesConstPtr removed_indices;\n # removed_indices = cropBoxFilter.get_RemovedIndices ()\n cropBoxFilter.get_RemovedIndices()\n # self.assertEqual(removed_indices.size, 0)\n # self.assertEqual(lemn(removed_indices), 0)\n\n # Test setNegative\n cropBoxFilter.set_Negative(True)\n cloud_out_negative = cropBoxFilter.filter()\n # self.assertEqual(cloud_out_negative.size, 0)\n\n # cropBoxFilter.filter(indices)\n # self.assertEqual(indices.size, 0)\n\n cropBoxFilter.set_Negative(False)\n cloud_out = cropBoxFilter.filter()\n\n # Translate crop box up by 1\n tx = 0\n ty = 1\n tz = 0\n cropBoxFilter.set_Translation(tx, ty, tz)\n # indices = cropBoxFilter.filter()\n cloud_out = cropBoxFilter.filter()\n\n # self.assertEqual(indices.size, 5)\n self.assertEqual(cloud_out.size, 5)\n\n # removed_indices = cropBoxFilter.get_RemovedIndices ()\n cropBoxFilter.get_RemovedIndices()\n # self.assertEqual(removed_indices.size, 4)\n\n # Test setNegative\n cropBoxFilter.set_Negative(True)\n cloud_out_negative = cropBoxFilter.filter()\n # self.assertEqual(cloud_out_negative.size, 4)\n\n # indices = cropBoxFilter.filter()\n # self.assertEqual(indices.size, 4)\n\n cropBoxFilter.set_Negative(False)\n cloud_out = cropBoxFilter.filter()\n\n # Rotate crop box up by 45\n # cropBoxFilter.setRotation (Eigen::Vector3f (0.0, 45.0f * float (M_PI) / 180.0, 0.0f))\n # cropBoxFilter.filter(indices)\n # cropBoxFilter.filter(cloud_out)\n rx = 0.0\n ry = 45.0 * (3.141592 / 180.0)\n rz = 0.0\n cropBoxFilter.set_Rotation(rx, ry, rz)\n # indices = cropBoxFilter.filter()\n cloud_out = cropBoxFilter.filter()\n\n # self.assertEqual(indices.size, 1)\n self.assertEqual(cloud_out.size, 1)\n self.assertEqual(cloud_out.width, 1)\n self.assertEqual(cloud_out.height, 1)\n\n # removed_indices = cropBoxFilter.get_RemovedIndices ()\n # self.assertEqual(removed_indices.size, 8)\n cropBoxFilter.get_RemovedIndices()\n\n # Test setNegative\n cropBoxFilter.set_Negative(True)\n cloud_out_negative = cropBoxFilter.filter()\n # self.assertEqual(cloud_out_negative.size, 8)\n\n # indices = cropBoxFilter.filter()\n # self.assertEqual(indices.size, 8)\n\n cropBoxFilter.set_Negative(False)\n cloud_out = cropBoxFilter.filter()\n\n # // Rotate point cloud by -45\n # cropBoxFilter.set_Transform (getTransformation (0.0, 0.0, 0.0, 0.0, 0.0, -45.0f * float (M_PI) / 180.0f))\n # indices = cropBoxFilter.filter()\n # cloud_out = cropBoxFilter.filter()\n #\n # # self.assertEqual(indices.size, 3)\n # self.assertEqual(cloud_out.size, 3)\n # self.assertEqual(cloud_out.width, 3)\n # self.assertEqual(cloud_out.height, 1)\n ##\n\n # removed_indices = cropBoxFilter.get_RemovedIndices ()\n # self.assertEqual(removed_indices.size, 6)\n cropBoxFilter.get_RemovedIndices()\n\n # // Test setNegative\n cropBoxFilter.set_Negative(True)\n cloud_out_negative = cropBoxFilter.filter()\n # self.assertEqual(cloud_out_negative.size, 6)\n\n # indices = cropBoxFilter.filter()\n # self.assertEqual(indices.size, 6)\n\n cropBoxFilter.set_Negative(False)\n cloud_out = cropBoxFilter.filter()\n\n # Translate point cloud down by -1\n # # cropBoxFilter.setTransform (getTransformation(0, -1, 0, 0, 0, -45.0 * float (M_PI) / 180.0))\n # # cropBoxFilter.filter(indices)\n # cropBoxFilter.filter(cloud_out)\n #\n # # self.assertEqual(indices.size, 2)\n # self.assertEqual(cloud_out.size, 2)\n # self.assertEqual(cloud_out.width, 2)\n # self.assertEqual(cloud_out.height, 1)\n ##\n\n # removed_indices = cropBoxFilter.get_RemovedIndices ()\n # self.assertEqual(removed_indices.size, 7)\n\n # Test setNegative\n cropBoxFilter.set_Negative(True)\n cloud_out_negative = cropBoxFilter.filter()\n # self.assertEqual(cloud_out_negative.size, 7)\n\n # indices = cropBoxFilter.filter()\n # self.assertEqual(indices.size, 7)\n\n cropBoxFilter.set_Negative(False)\n cloud_out = cropBoxFilter.filter()\n\n # // Remove point cloud rotation\n # cropBoxFilter.set_Transform (getTransformation(0, -1, 0, 0, 0, 0))\n # indices = cropBoxFilter.filter()\n # cloud_out = cropBoxFilter.filter()\n\n # self.assertEqual(indices.size, 0)\n # self.assertEqual(cloud_out.size, 0)\n # self.assertEqual(cloud_out.width, 0)\n # self.assertEqual(cloud_out.height, 1)\n\n # removed_indices = cropBoxFilter.get_RemovedIndices ()\n # self.assertEqual(removed_indices.size, 9)\n\n # Test setNegative\n cropBoxFilter.set_Negative(True)\n cloud_out_negative = cropBoxFilter.filter()\n # self.assertEqual(cloud_out_negative.size, 9)\n\n # indices = cropBoxFilter.filter()\n # self.assertEqual(indices.size, 9)\n\n # PCLPointCloud2\n # // ------------------------------------------------------------------\n # Create cloud with center point and corner points\n # PCLPointCloud2::Ptr input2 (new PCLPointCloud2)\n # pcl::toPCLPointCloud2 (*input, *input2)\n #\n # Test the PointCloud<PointT> method\n # CropBox<PCLPointCloud2> cropBoxFilter2(true)\n # cropBoxFilter2.setInputCloud (input2)\n #\n # Cropbox slighlty bigger then bounding box of points\n # cropBoxFilter2.setMin (min_pt)\n # cropBoxFilter2.setMax (max_pt)\n #\n # Indices\n # vector<int> indices2;\n # cropBoxFilter2.filter (indices2)\n #\n # Cloud\n # PCLPointCloud2 cloud_out2;\n # cropBoxFilter2.filter (cloud_out2)\n #\n # // Should contain all\n # self.assertEqual(indices2.size, 9)\n # self.assertEqual(indices2.size, int (cloud_out2.width * cloud_out2.height))\n #\n # IndicesConstPtr removed_indices2;\n # removed_indices2 = cropBoxFilter2.get_RemovedIndices ()\n # self.assertEqual(removed_indices2.size, 0)\n #\n # // Test setNegative\n # PCLPointCloud2 cloud_out2_negative;\n # cropBoxFilter2.setNegative (true)\n # cropBoxFilter2.filter (cloud_out2_negative)\n # self.assertEqual(cloud_out2_negative.width), 0)\n #\n # cropBoxFilter2.filter (indices2)\n # self.assertEqual(indices2.size, 0)\n #\n # cropBoxFilter2.setNegative (false)\n # cropBoxFilter2.filter (cloud_out2)\n #\n # // Translate crop box up by 1\n # cropBoxFilter2.setTranslation (Eigen::Vector3f(0, 1, 0))\n # cropBoxFilter2.filter (indices2)\n # cropBoxFilter2.filter (cloud_out2)\n #\n # self.assertEqual(indices2.size, 5)\n # self.assertEqual(indices2.size, int (cloud_out2.width * cloud_out2.height))\n #\n # removed_indices2 = cropBoxFilter2.get_RemovedIndices ()\n # self.assertEqual(removed_indices2.size, 4)\n #\n # // Test setNegative\n # cropBoxFilter2.setNegative (true)\n # cropBoxFilter2.filter (cloud_out2_negative)\n # self.assertEqual(cloud_out2_negative.width), 4)\n #\n # cropBoxFilter2.filter (indices2)\n # self.assertEqual(indices2.size, 4)\n #\n # cropBoxFilter2.setNegative (false)\n # cropBoxFilter2.filter (cloud_out2)\n #\n # // Rotate crop box up by 45\n # cropBoxFilter2.setRotation (Eigen::Vector3f (0.0, 45.0f * float (M_PI) / 180.0, 0.0f))\n # cropBoxFilter2.filter (indices2)\n # cropBoxFilter2.filter (cloud_out2)\n #\n # self.assertEqual(indices2.size, 1)\n # self.assertEqual(indices2.size, int (cloud_out2.width * cloud_out2.height))\n #\n # // Rotate point cloud by -45\n # cropBoxFilter2.setTransform (getTransformation (0.0, 0.0, 0.0, 0.0, 0.0, -45.0f * float (M_PI) / 180.0f))\n # cropBoxFilter2.filter (indices2)\n # cropBoxFilter2.filter (cloud_out2)\n #\n # self.assertEqual(indices2.size, 3)\n # self.assertEqual(cloud_out2.width * cloud_out2.height), 3)\n #\n # removed_indices2 = cropBoxFilter2.get_RemovedIndices ()\n # self.assertEqual(removed_indices2.size, 6)\n #\n # // Test setNegative\n # cropBoxFilter2.setNegative (true)\n # cropBoxFilter2.filter (cloud_out2_negative)\n # self.assertEqual(cloud_out2_negative.width), 6)\n #\n # cropBoxFilter2.filter (indices2)\n # self.assertEqual(indices2.size, 6)\n #\n # cropBoxFilter2.setNegative (false)\n # cropBoxFilter2.filter (cloud_out2)\n #\n # // Translate point cloud down by -1\n # cropBoxFilter2.setTransform (getTransformation (0.0, -1.0, 0.0, 0.0, 0.0, -45.0f * float (M_PI) / 180.0f))\n # cropBoxFilter2.filter (indices2)\n # cropBoxFilter2.filter (cloud_out2)\n #\n # self.assertEqual(indices2.size, 2)\n # self.assertEqual(cloud_out2.width * cloud_out2.height), 2)\n #\n # removed_indices2 = cropBoxFilter2.get_RemovedIndices ()\n # self.assertEqual(removed_indices2.size, 7)\n #\n # // Test setNegative\n # cropBoxFilter2.setNegative (true)\n # cropBoxFilter2.filter (cloud_out2_negative)\n # self.assertEqual(cloud_out2_negative.width), 7)\n #\n # cropBoxFilter2.filter (indices2)\n # self.assertEqual(indices2.size, 7)\n #\n # cropBoxFilter2.setNegative (false)\n # cropBoxFilter2.filter (cloud_out2)\n #\n # // Remove point cloud rotation\n # cropBoxFilter2.setTransform (getTransformation(0, -1, 0, 0, 0, 0))\n # cropBoxFilter2.filter (indices2)\n # cropBoxFilter2.filter (cloud_out2)\n #\n # self.assertEqual(indices2.size, 0)\n # self.assertEqual(cloud_out2.width * cloud_out2.height), 0)\n #\n # removed_indices2 = cropBoxFilter2.get_RemovedIndices ()\n # self.assertEqual(removed_indices2.size, 9)\n #\n # // Test setNegative\n # cropBoxFilter2.setNegative (true)\n # cropBoxFilter2.filter (cloud_out2_negative)\n # self.assertEqual(cloud_out2_negative.width), 9)\n #\n # cropBoxFilter2.filter (indices2)\n # self.assertEqual(indices2.size, 9)\n #\n # cropBoxFilter2.setNegative (false)\n # cropBoxFilter2.filter (cloud_out2)\n\n\n### CropHull ###\nclass TestCropHull(unittest.TestCase):\n\n def setUp(self):\n self.pc = pcl.load(\n \"tests\" +\n os.path.sep +\n \"tutorials\" +\n os.path.sep +\n \"table_scene_mug_stereo_textured.pcd\")\n\n def testException(self):\n self.assertRaises(TypeError, pcl.CropHull)\n\n def testCropHull(self):\n filterCloud = pcl.PointCloud()\n vt = pcl.Vertices()\n# # // inside point\n# # cloud->push_back(pcl::PointXYZ(M_PI * 0.3, M_PI * 0.3, 0))\n# # // hull points\n# # cloud->push_back(pcl::PointXYZ(0,0,0))\n# # cloud->push_back(pcl::PointXYZ(M_PI,0,0))\n# # cloud->push_back(pcl::PointXYZ(M_PI,M_PI*0.5,0))\n# # cloud->push_back(pcl::PointXYZ(0,M_PI*0.5,0))\n# # cloud->push_back(pcl::PointXYZ(0,0,0))\n# # // outside point\n# # cloud->push_back(pcl::PointXYZ(-M_PI * 0.3, -M_PI * 0.3, 0))\n# points_2 = np.array([\n# [1 * 0.3, 1 * 0.3, 0],\n# [0, 0, 0],\n# [1, 0, 0],\n# [1, 1 * 0.5, 0],\n# [0, 1 * 0.5, 0],\n# [0, 0, 0],\n# # [-1 * 0.3 , -1 * 0.3, 0]\n# ], dtype=np.float32)\n# filterCloud.from_array(points_2)\n# # print(filterCloud)\n#\n# vertices_point_1 = np.array([1, 2, 3, 4, 5], dtype=np.int)\n# vt.from_array(vertices_point_1)\n# # print(vt)\n# # vt.vertices.push_back(1)\n# # vt.vertices.push_back(2)\n# # vt.vertices.push_back(3)\n# # vt.vertices.push_back(4)\n# # vt.vertices.push_back(5)\n# # vertices = vector[pcl.Vertices]\n# # vertices.push_back(vt)\n#\n# outputCloud = pcl.PointCloud()\n# # crophull = pcl.CropHull()\n# # crophull.setInputCloud(self.pc)\n# crophull = self.pc.make_crophull()\n# # crophull.setHullIndices(vertices)\n# # crophull.setHullIndices(vt)\n# # crophull.setHullCloud(filterCloud)\n# # crophull.setDim(2)\n# # crophull.setCropOutside(false)\n# crophull.SetParameter(filterCloud, vt)\n# # indices = vector[int]\n# # cropHull.filter(indices)\n# # outputCloud = cropHull.filter()\n# # print(\"before: \" + outputCloud)\n# crophull.filter(outputCloud)\n# # print(outputCloud)\n\n\n### FieldComparison ###\nclass TestFieldComparison(unittest.TestCase):\n\n def setUp(self):\n self.p = pcl.load(\"tests/table_scene_mug_stereo_textured_noplane.pcd\")\n compare = CompareOp2\n thresh = 1.0\n self.fil = pcl.FieldComparison(compare, thresh)\n\n\n### PassThroughFilter ###\nclass TestPassthroughFilter(unittest.TestCase):\n\n def setUp(self):\n self.p = pcl.load(\"tests/table_scene_mug_stereo_textured_noplane.pcd\")\n\n def testFilter(self):\n fil = self.p.make_passthrough_filter()\n fil.set_filter_field_name(\"z\")\n fil.set_filter_limits(0, 0.75)\n c = fil.filter()\n self.assertTrue(c.size < self.p.size)\n self.assertEqual(c.size, 7751)\n\n def testFilterBoth(self):\n total = self.p.size\n fil = self.p.make_passthrough_filter()\n fil.set_filter_field_name(\"z\")\n fil.set_filter_limits(0, 0.75)\n front = fil.filter().size\n fil.set_filter_limits(0.75, 100)\n back = fil.filter().size\n self.assertEqual(total, front + back)\n\n\n### ProjectInliers ###\nclass TestProjectInliers(unittest.TestCase):\n\n def setUp(self):\n self.p = pcl.load(\"tests/table_scene_mug_stereo_textured_noplane.pcd\")\n self.fil = self.p.make_ProjectInliers()\n # self.fil = pcl.ProjectInliers()\n # self.fil.set_InputCloud(self.p)\n\n def test_model_type(self):\n # param1\n m = pcl.SACMODEL_PLANE\n self.fil.set_model_type(m)\n result_param = self.fil.get_model_type()\n self.assertEqual(result_param, pcl.SACMODEL_PLANE)\n\n # param2\n m = pcl.SACMODEL_LINE\n self.fil.set_model_type(m)\n result_param = self.fil.get_model_type()\n self.assertEqual(result_param, pcl.SACMODEL_LINE)\n\n # param3\n m = pcl.SACMODEL_CIRCLE2D\n self.fil.set_model_type(m)\n result_param = self.fil.get_model_type()\n self.assertEqual(result_param, pcl.SACMODEL_CIRCLE2D)\n\n # param4\n m = pcl.SACMODEL_CIRCLE3D\n self.fil.set_model_type(m)\n result_param = self.fil.get_model_type()\n self.assertEqual(result_param, pcl.SACMODEL_CIRCLE3D)\n\n # param5\n m = pcl.SACMODEL_SPHERE\n self.fil.set_model_type(m)\n result_param = self.fil.get_model_type()\n self.assertEqual(result_param, pcl.SACMODEL_SPHERE)\n\n # param6\n m = pcl.SACMODEL_CYLINDER\n self.fil.set_model_type(m)\n result_param = self.fil.get_model_type()\n self.assertEqual(result_param, pcl.SACMODEL_CYLINDER)\n\n # param7\n m = pcl.SACMODEL_CONE\n self.fil.set_model_type(m)\n result_param = self.fil.get_model_type()\n self.assertEqual(result_param, pcl.SACMODEL_CONE)\n\n # param8\n m = pcl.SACMODEL_TORUS\n self.fil.set_model_type(m)\n result_param = self.fil.get_model_type()\n self.assertEqual(result_param, pcl.SACMODEL_TORUS)\n\n # param9\n m = pcl.SACMODEL_PARALLEL_LINE\n self.fil.set_model_type(m)\n result_param = self.fil.get_model_type()\n self.assertEqual(result_param, pcl.SACMODEL_PARALLEL_LINE)\n\n # param10\n m = pcl.SACMODEL_PERPENDICULAR_PLANE\n self.fil.set_model_type(m)\n result_param = self.fil.get_model_type()\n self.assertEqual(result_param, pcl.SACMODEL_PERPENDICULAR_PLANE)\n\n # param11\n m = pcl.SACMODEL_PARALLEL_LINES\n self.fil.set_model_type(m)\n result_param = self.fil.get_model_type()\n self.assertEqual(result_param, pcl.SACMODEL_PARALLEL_LINES)\n\n # param12\n m = pcl.SACMODEL_NORMAL_PLANE\n self.fil.set_model_type(m)\n result_param = self.fil.get_model_type()\n self.assertEqual(result_param, pcl.SACMODEL_NORMAL_PLANE)\n\n # param13\n m = pcl.SACMODEL_NORMAL_SPHERE\n self.fil.set_model_type(m)\n result_param = self.fil.get_model_type()\n self.assertEqual(result_param, pcl.SACMODEL_NORMAL_SPHERE)\n\n # param14\n m = pcl.SACMODEL_REGISTRATION\n self.fil.set_model_type(m)\n result_param = self.fil.get_model_type()\n self.assertEqual(result_param, pcl.SACMODEL_REGISTRATION)\n\n # param15\n m = pcl.SACMODEL_PARALLEL_PLANE\n self.fil.set_model_type(m)\n result_param = self.fil.get_model_type()\n self.assertEqual(result_param, pcl.SACMODEL_PARALLEL_PLANE)\n\n # param16\n m = pcl.SACMODEL_NORMAL_PARALLEL_PLANE\n self.fil.set_model_type(m)\n result_param = self.fil.get_model_type()\n self.assertEqual(result_param, pcl.SACMODEL_NORMAL_PARALLEL_PLANE)\n\n # param17\n m = pcl.SACMODEL_STICK\n self.fil.set_model_type(m)\n result_param = self.fil.get_model_type()\n self.assertEqual(result_param, pcl.SACMODEL_STICK)\n pass\n\n def test_copy_all_data(self):\n self.fil.set_copy_all_data(True)\n datas = self.fil.get_copy_all_data()\n # result\n self.assertEqual(datas, True)\n\n self.fil.set_copy_all_data(False)\n datas = self.fil.get_copy_all_data()\n # result2\n self.assertEqual(datas, False)\n\n\n### RadiusOutlierRemoval ###\n# class TestRadiusOutlierRemoval(unittest.TestCase):\n# \n# def setUp(self):\n# # self.p = pcl.load(\"tests/table_scene_mug_stereo_textured_noplane.pcd\")\n# self.p = pcl.PointCloud(_data2)\n# print(self.p.size)\n# self.fil = self.p.make_RadiusOutlierRemoval()\n# # self.fil = pcl.RadiusOutlierRemoval()\n# # self.fil.set_InputCloud(self.p)\n# \n# def test_radius_seach(self):\n# radius = 15.8\n# self.fil.set_radius_search(radius)\n# result = self.fil.get_radius_search()\n# self.assertEqual(radius, result)\n# \n# min_pts = 2\n# self.fil.set_MinNeighborsInRadius(2)\n# result = self.fil.get_MinNeighborsInRadius()\n# self.assertEqual(min_pts, result)\n# \n# result_point = self.fil.filter()\n# \n# # check\n# # new instance is returned\n# # self.assertNotEqual(self.p, result)\n# # filter retains the same number of points\n# # self.assertNotEqual(result_point.size, 0)\n# # self.assertNotEqual(self.p.size, result_point.size)\n\n\n### StatisticalOutlierRemovalFilter ###\nclass TestStatisticalOutlierRemovalFilter(unittest.TestCase):\n\n def setUp(self):\n self.p = pcl.load(\n \"tests\" +\n os.path.sep +\n \"table_scene_mug_stereo_textured_noplane.pcd\")\n self.fil = self.p.make_statistical_outlier_filter()\n # self.fil = pcl.StatisticalOutlierRemovalFilter()\n # self.fil.set_InputCloud(self.p)\n\n def _tpos(self, c):\n self.assertEqual(c.size, 22745)\n self.assertEqual(c.width, 22745)\n self.assertEqual(c.height, 1)\n self.assertTrue(c.is_dense)\n\n def _tneg(self, c):\n self.assertEqual(c.size, 1015)\n self.assertEqual(c.width, 1015)\n self.assertEqual(c.height, 1)\n self.assertTrue(c.is_dense)\n\n def testFilterPos(self):\n fil = self.p.make_statistical_outlier_filter()\n fil.set_mean_k(50)\n self.assertEqual(fil.mean_k, 50)\n fil.set_std_dev_mul_thresh(1.0)\n self.assertEqual(fil.stddev_mul_thresh, 1.0)\n c = fil.filter()\n self._tpos(c)\n\n def testFilterNeg(self):\n fil = self.p.make_statistical_outlier_filter()\n fil.set_mean_k(50)\n fil.set_std_dev_mul_thresh(1.0)\n self.assertEqual(fil.negative, False)\n fil.set_negative(True)\n self.assertEqual(fil.negative, True)\n c = fil.filter()\n self._tneg(c)\n\n def testFilterPosNeg(self):\n fil = self.p.make_statistical_outlier_filter()\n fil.set_mean_k(50)\n fil.set_std_dev_mul_thresh(1.0)\n c = fil.filter()\n self._tpos(c)\n fil.set_negative(True)\n c = fil.filter()\n self._tneg(c)\n\n\n### VoxelGridFilter ###\nclass TestVoxelGridFilter(unittest.TestCase):\n\n def setUp(self):\n self.p = pcl.load(\n \"tests\" +\n os.path.sep +\n \"table_scene_mug_stereo_textured_noplane.pcd\")\n self.fil = self.p.make_voxel_grid_filter()\n # self.fil = pcl.VoxelGridFilter()\n # self.fil.set_InputCloud(self.p)\n\n def testFilter(self):\n self.fil.set_leaf_size(0.01, 0.01, 0.01)\n c = self.fil.filter()\n self.assertTrue(c.size < self.p.size)\n self.assertEqual(c.size, 719)\n\n\n### Official Test Base ###\np_65558 = np.array([-0.058448, -0.189095, 0.723415], dtype=np.float32)\np_84737 = np.array([-0.088929, -0.152957, 0.746095], dtype=np.float32)\np_57966 = np.array([0.123646, -0.397528, 1.393187], dtype=np.float32)\np_39543 = np.array([0.560287, -0.545020, 1.602833], dtype=np.float32)\np_17766 = np.array([0.557854, -0.711976, 1.762013], dtype=np.float32)\np_70202 = np.array([0.150500, -0.160329, 0.646596], dtype=np.float32)\np_102219 = np.array([0.175637, -0.101353, 0.661631], dtype=np.float32)\np_81765 = np.array([0.223189, -0.151714, 0.708332], dtype=np.float32)\n\n# class TESTFastBilateralFilter(unittest.TestCase):\n# def setUp(self):\n# self.p = pcl.load(\"tests\" + os.path.sep + \"milk_cartoon_all_small_clorox.pcd\")\n#\n# def testFastBilateralFilter(self):\n# fbf = pcl.FastBilateralFilter()\n# fbf.setInputCloud(cloud)\n# fbf.setSigmaS (5)\n# fbf.setSigmaR (0.03f)\n# cloud_filtered = fbf.filter()\n# # for (size_t dim = 0; dim < 3; ++dim):\n# for dim range(0:3):\n# EXPECT_NEAR (p_84737[dim], cloud_filtered[84737][dim], 1e-3)\n# EXPECT_NEAR (p_57966[dim], cloud_filtered[57966][dim], 1e-3)\n# EXPECT_NEAR (p_39543[dim], cloud_filtered[39543][dim], 1e-3)\n# EXPECT_NEAR (p_17766[dim], cloud_filtered[17766][dim], 1e-3)\n# EXPECT_NEAR (p_70202[dim], cloud_filtered[70202][dim], 1e-3)\n# EXPECT_NEAR (p_102219[dim], cloud_filtered[102219][dim], 1e-3)\n# EXPECT_NEAR (p_81765[dim], cloud_filtered[81765][dim], 1e-3)\n# pass\n\n\n# class TESTFastBilateralFilterOMP(unittest.TestCase):\n#\n# def setUp(self):\n# self.p = pcl.load(\"tests\" + os.path.sep + \"milk_cartoon_all_small_clorox.pcd\")\n#\n# sigma_s = [2.341, 5.2342, 10.29380]\n# sigma_r = [0.0123, 0.023, 0.0345]\n# for (size_t i = 0; i < 3; i++)\n# FastBilateralFilter<PointXYZ> fbf;\n# fbf.setInputCloud (cloud);\n# fbf.setSigmaS (sigma_s[i]);\n# fbf.setSigmaR (sigma_r[i]);\n# PointCloud<PointXYZ>::Ptr cloud_filtered (new PointCloud<PointXYZ> ());\n# fbf.filter (*cloud_filtered);\n#\n# FastBilateralFilterOMP<PointXYZ> fbf_omp (0);\n# fbf_omp.setInputCloud (cloud);\n# fbf_omp.setSigmaS (sigma_s[i]);\n# fbf_omp.setSigmaR (sigma_r[i]);\n# PointCloud<PointXYZ>::Ptr cloud_filtered_omp (new PointCloud<PointXYZ> ());\n# fbf_omp.filter (*cloud_filtered_omp);\n# PCL_INFO (\"[FastBilateralFilterOMP] filtering took %f ms\\n\", tt.toc ());\n#\n#\n# EXPECT_EQ (cloud_filtered_omp->points.size (), cloud_filtered->points.size ());\n# for (size_t j = 0; j < cloud_filtered_omp->size (); ++j)\n# {\n# if (pcl_isnan (cloud_filtered_omp->at (j).x))\n# EXPECT_TRUE (pcl_isnan (cloud_filtered->at (j).x));\n# else\n# {\n# EXPECT_NEAR (cloud_filtered_omp->at (j).x, cloud_filtered->at (j).x, 1e-3);\n# EXPECT_NEAR (cloud_filtered_omp->at (j).y, cloud_filtered->at (j).y, 1e-3);\n# EXPECT_NEAR (cloud_filtered_omp->at (j).z, cloud_filtered->at (j).z, 1e-3);\n# }\n# }\n\n\ndef suite():\n suite = unittest.TestSuite()\n\n # Filter\n suite.addTests(unittest.makeSuite(TestApproximateVoxelGrid))\n suite.addTests(unittest.makeSuite(TestConditionalRemoval))\n # suite.addTests(unittest.makeSuite(TestConditionAnd))\n suite.addTests(unittest.makeSuite(TestCropBox))\n suite.addTests(unittest.makeSuite(TestCropHull))\n suite.addTests(unittest.makeSuite(TestFieldComparison))\n suite.addTests(unittest.makeSuite(TestPassthroughFilter))\n suite.addTests(unittest.makeSuite(TestProjectInliers))\n # suite.addTests(unittest.makeSuite(TestRadiusOutlierRemoval))\n suite.addTests(unittest.makeSuite(TestStatisticalOutlierRemovalFilter))\n suite.addTests(unittest.makeSuite(TestVoxelGridFilter))\n\n # PointCloudLibrary Official Base Test?\n # suite.addTests(unittest.makeSuite(TestFastBilateralFilter))\n\n return suite\n\n\nif __name__ == '__main__':\n testSuite = suite()\n unittest.TextTestRunner().run(testSuite)\n", "from laspy import file\nimport numpy as np\nimport pcl\nimport pcl.pcl_visualization\n\n\ndef main():\n # RGB : NG\n # f = file.File('28XXX10000075-18.las', mode='r')\n f = file.File('28W0608011101-1.las', mode='r')\n # f = file.File('28XXX00020001-1.las', mode='r')\n # f = file.File('simple1_4.las', mode='r')\n\n # check las file version\n # RGB contains\n if f._header.data_format_id in (2, 3, 5):\n red = (f.red)\n green = (f.green)\n blue = (f.blue)\n # 16bit to convert 8bit data(data Storage First 8 bits case)\n red = np.right_shift(red, 8).astype(np.uint8)\n green = np.right_shift(green, 8).astype(np.uint8)\n blue = np.right_shift(blue, 8).astype(np.uint8)\n # (data Storage After 8 bits case)\n # red = red.astype(np.uint8)\n # green = green.astype(np.uint8)\n # blue = blue.astype(np.uint8)\n red = red.astype(np.uint32)\n green = green.astype(np.uint32)\n blue = blue.astype(np.uint32)\n rgb = np.left_shift(red, 16) + np.left_shift(green,\n 8) + np.left_shift(blue, 0)\n ptcloud = np.vstack((f.x, f.y, f.z, rgb)).transpose()\n cloud = pcl.PointCloud_PointXYZRGBA()\n # set raw points\n # cloud.from_array(np.array(ptcloud, dtype=np.float32))\n # set point centered\n mean_param = np.concatenate([np.mean(ptcloud, 0)[0:3], np.zeros(1)])\n ptcloud_centred = ptcloud - mean_param\n # print(ptcloud_centred)\n cloud.from_array(np.array(ptcloud_centred, dtype=np.float32))\n\n # Visualization\n visual = pcl.pcl_visualization.CloudViewing()\n visual.ShowColorACloud(cloud, b'cloud')\n\n else:\n ptcloud = np.vstack((f.x, f.y, f.z)).transpose()\n mean_param = np.mean(ptcloud, 0)\n cloud = pcl.PointCloud()\n # set raw points\n # cloud.from_array(np.array(ptcloud, dtype=np.float32))\n # set point centered\n # mean_param = np.concatenate([np.mean(ptcloud, 0)[0:3], np.zeros(1)])\n ptcloud_centred = ptcloud - mean_param\n # print(ptcloud_centred)\n cloud.from_array(np.array(ptcloud_centred, dtype=np.float32))\n\n # Visualization\n visual = pcl.pcl_visualization.CloudViewing()\n visual.ShowMonochromeCloud(cloud, b'cloud')\n\n v = True\n while v:\n v = not(visual.WasStopped())\n\n\nif __name__ == \"__main__\":\n # import cProfile\n # cProfile.run('main()', sort='time')\n main()\n" ]
[ [ "numpy.array", "numpy.zeros" ], [ "numpy.array", "numpy.left_shift", "numpy.zeros", "numpy.mean", "numpy.right_shift", "numpy.vstack" ] ]
theodumont/client
[ "7402ac67ada5bc8078078a49fd3e0cb4b6172307" ]
[ "wandb/sklearn/utils.py" ]
[ "import numpy as np\nimport pandas as pd\nfrom six.moves.collections_abc import Sequence, Iterable\nimport sklearn\nimport scipy\nimport wandb\n\n\ndef encode_labels(df):\n le = sklearn.preprocessing.LabelEncoder()\n # apply le on categorical feature columns\n categorical_cols = df.select_dtypes(\n exclude=[\"int\", \"float\", \"float64\", \"float32\", \"int32\", \"int64\"]\n ).columns\n df[categorical_cols] = df[categorical_cols].apply(lambda col: le.fit_transform(col))\n\n\ndef test_types(**kwargs):\n test_passed = True\n for k, v in kwargs.items():\n # check for incorrect types\n if (\n (k == \"X\")\n or (k == \"X_test\")\n or (k == \"y\")\n or (k == \"y_test\")\n or (k == \"y_true\")\n or (k == \"y_probas\")\n ):\n # FIXME: do this individually\n if not isinstance(\n v,\n (\n Sequence,\n Iterable,\n np.ndarray,\n np.generic,\n pd.DataFrame,\n pd.Series,\n list,\n ),\n ):\n wandb.termerror(\"%s is not an array. Please try again.\" % (k))\n test_passed = False\n # check for classifier types\n if k == \"model\":\n if (not sklearn.base.is_classifier(v)) and (\n not sklearn.base.is_regressor(v)\n ):\n wandb.termerror(\n \"%s is not a classifier or regressor. Please try again.\" % (k)\n )\n test_passed = False\n elif k == \"clf\" or k == \"binary_clf\":\n if not (sklearn.base.is_classifier(v)):\n wandb.termerror(\"%s is not a classifier. Please try again.\" % (k))\n test_passed = False\n elif k == \"regressor\":\n if not sklearn.base.is_regressor(v):\n wandb.termerror(\"%s is not a regressor. Please try again.\" % (k))\n test_passed = False\n elif k == \"clusterer\":\n if not (getattr(v, \"_estimator_type\", None) == \"clusterer\"):\n wandb.termerror(\"%s is not a clusterer. Please try again.\" % (k))\n test_passed = False\n return test_passed\n\n\ndef test_fitted(model):\n try:\n model.predict(np.zeros((7, 3)))\n except sklearn.exceptions.NotFittedError:\n wandb.termerror(\"Please fit the model before passing it in.\")\n return False\n except AttributeError:\n # Some clustering models (LDA, PCA, Agglomerative) don't implement ``predict``\n try:\n sklearn.utils.validation.check_is_fitted(\n model,\n [\n \"coef_\",\n \"estimator_\",\n \"labels_\",\n \"n_clusters_\",\n \"children_\",\n \"components_\",\n \"n_components_\",\n \"n_iter_\",\n \"n_batch_iter_\",\n \"explained_variance_\",\n \"singular_values_\",\n \"mean_\",\n ],\n all_or_any=any,\n )\n return True\n except sklearn.exceptions.NotFittedError:\n wandb.termerror(\"Please fit the model before passing it in.\")\n return False\n except Exception:\n # Assume it's fitted, since ``NotFittedError`` wasn't raised\n return True\n\n\n# Test Asummptions for plotting parameters and datasets\ndef test_missing(**kwargs):\n test_passed = True\n for k, v in kwargs.items():\n # Missing/empty params/datapoint arrays\n if v is None:\n wandb.termerror(\"%s is None. Please try again.\" % (k))\n test_passed = False\n if (k == \"X\") or (k == \"X_test\"):\n if isinstance(v, scipy.sparse.csr.csr_matrix):\n v = v.toarray()\n elif isinstance(v, (pd.DataFrame, pd.Series)):\n v = v.to_numpy()\n elif isinstance(v, list):\n v = np.asarray(v)\n\n # Warn the user about missing values\n missing = 0\n missing = np.count_nonzero(pd.isnull(v))\n if missing > 0:\n wandb.termwarn(\"%s contains %d missing values. \" % (k, missing))\n test_passed = False\n # Ensure the dataset contains only integers\n non_nums = 0\n if v.ndim == 1:\n non_nums = sum(\n 1\n for val in v\n if (\n not isinstance(val, (int, float, complex))\n and not isinstance(val, np.number)\n )\n )\n else:\n non_nums = sum(\n 1\n for sl in v\n for val in sl\n if (\n not isinstance(val, (int, float, complex))\n and not isinstance(val, np.number)\n )\n )\n if non_nums > 0:\n wandb.termerror(\n \"%s contains values that are not numbers. Please vectorize, label encode or one hot encode %s and call the plotting function again.\"\n % (k, k)\n )\n test_passed = False\n return test_passed\n" ]
[ [ "pandas.isnull", "sklearn.preprocessing.LabelEncoder", "sklearn.utils.validation.check_is_fitted", "numpy.asarray", "numpy.zeros", "sklearn.base.is_regressor", "sklearn.base.is_classifier" ] ]
mizterbas/hoc
[ "91f4875dc4546b80d40bbb4a422f0c6849491faf" ]
[ "squad_utils.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# Lint as: python2, python3\n\"\"\"Utility functions for SQuAD v1.1/v2.0 datasets.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n# from __future__ import google_type_annotations\nfrom __future__ import print_function\nimport collections\nimport json\nimport math\nimport re\nimport string\nimport sys\nfrom albert import fine_tuning_utils\nfrom albert import modeling\nfrom albert import optimization\nfrom albert import tokenization\nimport numpy as np\nimport six\nfrom six.moves import map\nfrom six.moves import range\nimport tensorflow.compat.v1 as tf\nfrom tensorflow.contrib import data as contrib_data\nfrom tensorflow.contrib import layers as contrib_layers\nfrom tensorflow.contrib import tpu as contrib_tpu\n\n_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"PrelimPrediction\",\n [\"feature_index\", \"start_index\", \"end_index\",\n \"start_log_prob\", \"end_log_prob\"])\n\n_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"NbestPrediction\", [\"text\", \"start_log_prob\", \"end_log_prob\"])\n\nRawResult = collections.namedtuple(\"RawResult\",\n [\"unique_id\",\n \"start_log_prob\",\n \"end_log_prob\"])\n\nRawResultV2 = collections.namedtuple(\n \"RawResultV2\",\n [\"unique_id\", \"start_top_log_probs\", \"start_top_index\",\n \"end_top_log_probs\", \"end_top_index\", \"cls_logits\"])\n\n\nclass SquadExample(object):\n \"\"\"A single training/test example for simple sequence classification.\n\n For examples without an answer, the start and end position are -1.\n \"\"\"\n\n def __init__(self,\n qas_id,\n question_text,\n paragraph_text,\n orig_answer_text=None,\n start_position=None,\n end_position=None,\n is_impossible=False):\n self.qas_id = qas_id\n self.question_text = question_text\n self.paragraph_text = paragraph_text\n self.orig_answer_text = orig_answer_text\n self.start_position = start_position\n self.end_position = end_position\n self.is_impossible = is_impossible\n\n def __str__(self):\n return self.__repr__()\n\n def __repr__(self):\n s = \"\"\n s += \"qas_id: %s\" % (tokenization.printable_text(self.qas_id))\n s += \", question_text: %s\" % (\n tokenization.printable_text(self.question_text))\n s += \", paragraph_text: [%s]\" % (\" \".join(self.paragraph_text))\n if self.start_position:\n s += \", start_position: %d\" % (self.start_position)\n if self.start_position:\n s += \", end_position: %d\" % (self.end_position)\n if self.start_position:\n s += \", is_impossible: %r\" % (self.is_impossible)\n return s\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self,\n unique_id,\n example_index,\n doc_span_index,\n tok_start_to_orig_index,\n tok_end_to_orig_index,\n token_is_max_context,\n tokens,\n input_ids,\n input_mask,\n segment_ids,\n paragraph_len,\n p_mask=None,\n start_position=None,\n end_position=None,\n is_impossible=None):\n self.unique_id = unique_id\n self.example_index = example_index\n self.doc_span_index = doc_span_index\n self.tok_start_to_orig_index = tok_start_to_orig_index\n self.tok_end_to_orig_index = tok_end_to_orig_index\n self.token_is_max_context = token_is_max_context\n self.tokens = tokens\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.paragraph_len = paragraph_len\n self.start_position = start_position\n self.end_position = end_position\n self.is_impossible = is_impossible\n self.p_mask = p_mask\n\n\ndef read_squad_examples(input_file, is_training):\n \"\"\"Read a SQuAD json file into a list of SquadExample.\"\"\"\n with tf.gfile.Open(input_file, \"r\") as reader:\n input_data = json.load(reader)[\"data\"]\n\n examples = []\n for entry in input_data:\n for paragraph in entry[\"paragraphs\"]:\n paragraph_text = paragraph[\"context\"]\n\n for qa in paragraph[\"qas\"]:\n qas_id = qa[\"id\"]\n question_text = qa[\"question\"]\n start_position = None\n orig_answer_text = None\n is_impossible = False\n\n if is_training:\n is_impossible = qa.get(\"is_impossible\", False)\n if (len(qa[\"answers\"]) != 1) and (not is_impossible):\n raise ValueError(\n \"For training, each question should have exactly 1 answer.\")\n if not is_impossible:\n answer = qa[\"answers\"][0]\n orig_answer_text = answer[\"text\"]\n start_position = answer[\"answer_start\"]\n else:\n start_position = -1\n orig_answer_text = \"\"\n\n example = SquadExample(\n qas_id=qas_id,\n question_text=question_text,\n paragraph_text=paragraph_text,\n orig_answer_text=orig_answer_text,\n start_position=start_position,\n is_impossible=is_impossible)\n examples.append(example)\n\n return examples\n\n\ndef _convert_index(index, pos, m=None, is_start=True):\n \"\"\"Converts index.\"\"\"\n if index[pos] is not None:\n return index[pos]\n n = len(index)\n rear = pos\n while rear < n - 1 and index[rear] is None:\n rear += 1\n front = pos\n while front > 0 and index[front] is None:\n front -= 1\n assert index[front] is not None or index[rear] is not None\n if index[front] is None:\n if index[rear] >= 1:\n if is_start:\n return 0\n else:\n return index[rear] - 1\n return index[rear]\n if index[rear] is None:\n if m is not None and index[front] < m - 1:\n if is_start:\n return index[front] + 1\n else:\n return m - 1\n return index[front]\n if is_start:\n if index[rear] > index[front] + 1:\n return index[front] + 1\n else:\n return index[rear]\n else:\n if index[rear] > index[front] + 1:\n return index[rear] - 1\n else:\n return index[front]\n\n\ndef convert_examples_to_features(examples, tokenizer, max_seq_length,\n doc_stride, max_query_length, is_training,\n output_fn, do_lower_case):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n cnt_pos, cnt_neg = 0, 0\n unique_id = 1000000000\n max_n, max_m = 1024, 1024\n f = np.zeros((max_n, max_m), dtype=np.float32)\n\n for (example_index, example) in enumerate(examples):\n\n if example_index % 100 == 0:\n tf.logging.info(\"Converting {}/{} pos {} neg {}\".format(\n example_index, len(examples), cnt_pos, cnt_neg))\n\n query_tokens = tokenization.encode_ids(\n tokenizer.sp_model,\n tokenization.preprocess_text(\n example.question_text, lower=do_lower_case))\n\n if len(query_tokens) > max_query_length:\n query_tokens = query_tokens[0:max_query_length]\n\n paragraph_text = example.paragraph_text\n para_tokens = tokenization.encode_pieces(\n tokenizer.sp_model,\n tokenization.preprocess_text(\n example.paragraph_text, lower=do_lower_case),\n return_unicode=False)\n\n chartok_to_tok_index = []\n tok_start_to_chartok_index = []\n tok_end_to_chartok_index = []\n char_cnt = 0\n para_tokens = [six.ensure_text(token, \"utf-8\") for token in para_tokens]\n for i, token in enumerate(para_tokens):\n new_token = six.ensure_text(token).replace(\n tokenization.SPIECE_UNDERLINE.decode(\"utf-8\"), \" \")\n chartok_to_tok_index.extend([i] * len(new_token))\n tok_start_to_chartok_index.append(char_cnt)\n char_cnt += len(new_token)\n tok_end_to_chartok_index.append(char_cnt - 1)\n\n tok_cat_text = \"\".join(para_tokens).replace(\n tokenization.SPIECE_UNDERLINE.decode(\"utf-8\"), \" \")\n n, m = len(paragraph_text), len(tok_cat_text)\n\n if n > max_n or m > max_m:\n max_n = max(n, max_n)\n max_m = max(m, max_m)\n f = np.zeros((max_n, max_m), dtype=np.float32)\n\n g = {}\n\n def _lcs_match(max_dist, n=n, m=m):\n \"\"\"Longest-common-substring algorithm.\"\"\"\n f.fill(0)\n g.clear()\n\n ### longest common sub sequence\n # f[i, j] = max(f[i - 1, j], f[i, j - 1], f[i - 1, j - 1] + match(i, j))\n for i in range(n):\n\n # note(zhiliny):\n # unlike standard LCS, this is specifically optimized for the setting\n # because the mismatch between sentence pieces and original text will\n # be small\n for j in range(i - max_dist, i + max_dist):\n if j >= m or j < 0: continue\n\n if i > 0:\n g[(i, j)] = 0\n f[i, j] = f[i - 1, j]\n\n if j > 0 and f[i, j - 1] > f[i, j]:\n g[(i, j)] = 1\n f[i, j] = f[i, j - 1]\n\n f_prev = f[i - 1, j - 1] if i > 0 and j > 0 else 0\n if (tokenization.preprocess_text(\n paragraph_text[i], lower=do_lower_case,\n remove_space=False) == tok_cat_text[j]\n and f_prev + 1 > f[i, j]):\n g[(i, j)] = 2\n f[i, j] = f_prev + 1\n\n max_dist = abs(n - m) + 5\n for _ in range(2):\n _lcs_match(max_dist)\n if f[n - 1, m - 1] > 0.8 * n: break\n max_dist *= 2\n\n orig_to_chartok_index = [None] * n\n chartok_to_orig_index = [None] * m\n i, j = n - 1, m - 1\n while i >= 0 and j >= 0:\n if (i, j) not in g: break\n if g[(i, j)] == 2:\n orig_to_chartok_index[i] = j\n chartok_to_orig_index[j] = i\n i, j = i - 1, j - 1\n elif g[(i, j)] == 1:\n j = j - 1\n else:\n i = i - 1\n\n if (all(v is None for v in orig_to_chartok_index) or\n f[n - 1, m - 1] < 0.8 * n):\n tf.logging.info(\"MISMATCH DETECTED!\")\n continue\n\n tok_start_to_orig_index = []\n tok_end_to_orig_index = []\n for i in range(len(para_tokens)):\n start_chartok_pos = tok_start_to_chartok_index[i]\n end_chartok_pos = tok_end_to_chartok_index[i]\n start_orig_pos = _convert_index(chartok_to_orig_index, start_chartok_pos,\n n, is_start=True)\n end_orig_pos = _convert_index(chartok_to_orig_index, end_chartok_pos,\n n, is_start=False)\n\n tok_start_to_orig_index.append(start_orig_pos)\n tok_end_to_orig_index.append(end_orig_pos)\n\n if not is_training:\n tok_start_position = tok_end_position = None\n\n if is_training and example.is_impossible:\n tok_start_position = 0\n tok_end_position = 0\n\n if is_training and not example.is_impossible:\n start_position = example.start_position\n end_position = start_position + len(example.orig_answer_text) - 1\n\n start_chartok_pos = _convert_index(orig_to_chartok_index, start_position,\n is_start=True)\n tok_start_position = chartok_to_tok_index[start_chartok_pos]\n\n end_chartok_pos = _convert_index(orig_to_chartok_index, end_position,\n is_start=False)\n tok_end_position = chartok_to_tok_index[end_chartok_pos]\n assert tok_start_position <= tok_end_position\n\n def _piece_to_id(x):\n if six.PY2 and isinstance(x, six.text_type):\n x = six.ensure_binary(x, \"utf-8\")\n return tokenizer.sp_model.PieceToId(x)\n\n all_doc_tokens = list(map(_piece_to_id, para_tokens))\n\n # The -3 accounts for [CLS], [SEP] and [SEP]\n max_tokens_for_doc = max_seq_length - len(query_tokens) - 3\n\n # We can have documents that are longer than the maximum sequence length.\n # To deal with this we do a sliding window approach, where we take chunks\n # of the up to our max length with a stride of `doc_stride`.\n _DocSpan = collections.namedtuple( # pylint: disable=invalid-name\n \"DocSpan\", [\"start\", \"length\"])\n doc_spans = []\n start_offset = 0\n while start_offset < len(all_doc_tokens):\n length = len(all_doc_tokens) - start_offset\n if length > max_tokens_for_doc:\n length = max_tokens_for_doc\n doc_spans.append(_DocSpan(start=start_offset, length=length))\n if start_offset + length == len(all_doc_tokens):\n break\n start_offset += min(length, doc_stride)\n\n for (doc_span_index, doc_span) in enumerate(doc_spans):\n tokens = []\n token_is_max_context = {}\n segment_ids = []\n p_mask = []\n\n cur_tok_start_to_orig_index = []\n cur_tok_end_to_orig_index = []\n\n tokens.append(tokenizer.sp_model.PieceToId(\"[CLS]\"))\n segment_ids.append(0)\n p_mask.append(0)\n for token in query_tokens:\n tokens.append(token)\n segment_ids.append(0)\n p_mask.append(1)\n tokens.append(tokenizer.sp_model.PieceToId(\"[SEP]\"))\n segment_ids.append(0)\n p_mask.append(1)\n\n for i in range(doc_span.length):\n split_token_index = doc_span.start + i\n\n cur_tok_start_to_orig_index.append(\n tok_start_to_orig_index[split_token_index])\n cur_tok_end_to_orig_index.append(\n tok_end_to_orig_index[split_token_index])\n\n is_max_context = _check_is_max_context(doc_spans, doc_span_index,\n split_token_index)\n token_is_max_context[len(tokens)] = is_max_context\n tokens.append(all_doc_tokens[split_token_index])\n segment_ids.append(1)\n p_mask.append(0)\n tokens.append(tokenizer.sp_model.PieceToId(\"[SEP]\"))\n segment_ids.append(1)\n p_mask.append(1)\n\n paragraph_len = len(tokens)\n input_ids = tokens\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n p_mask.append(1)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n span_is_impossible = example.is_impossible\n start_position = None\n end_position = None\n if is_training and not span_is_impossible:\n # For training, if our document chunk does not contain an annotation\n # we throw it out, since there is nothing to predict.\n doc_start = doc_span.start\n doc_end = doc_span.start + doc_span.length - 1\n out_of_span = False\n if not (tok_start_position >= doc_start and\n tok_end_position <= doc_end):\n out_of_span = True\n if out_of_span:\n # continue\n start_position = 0\n end_position = 0\n span_is_impossible = True\n else:\n doc_offset = len(query_tokens) + 2\n start_position = tok_start_position - doc_start + doc_offset\n end_position = tok_end_position - doc_start + doc_offset\n\n if is_training and span_is_impossible:\n start_position = 0\n end_position = 0\n\n if example_index < 20:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"unique_id: %s\" % (unique_id))\n tf.logging.info(\"example_index: %s\" % (example_index))\n tf.logging.info(\"doc_span_index: %s\" % (doc_span_index))\n tf.logging.info(\"tok_start_to_orig_index: %s\" % \" \".join(\n [str(x) for x in cur_tok_start_to_orig_index]))\n tf.logging.info(\"tok_end_to_orig_index: %s\" % \" \".join(\n [str(x) for x in cur_tok_end_to_orig_index]))\n tf.logging.info(\"token_is_max_context: %s\" % \" \".join([\n \"%d:%s\" % (x, y) for (x, y) in six.iteritems(token_is_max_context)\n ]))\n tf.logging.info(\"input_pieces: %s\" % \" \".join(\n [tokenizer.sp_model.IdToPiece(x) for x in tokens]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\n \"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n\n if is_training and span_is_impossible:\n tf.logging.info(\"impossible example span\")\n\n if is_training and not span_is_impossible:\n pieces = [tokenizer.sp_model.IdToPiece(token) for token in\n tokens[start_position: (end_position + 1)]]\n answer_text = tokenizer.sp_model.DecodePieces(pieces)\n tf.logging.info(\"start_position: %d\" % (start_position))\n tf.logging.info(\"end_position: %d\" % (end_position))\n tf.logging.info(\n \"answer: %s\" % (tokenization.printable_text(answer_text)))\n\n # note(zhiliny): With multi processing,\n # the example_index is actually the index within the current process\n # therefore we use example_index=None to avoid being used in the future.\n # The current code does not use example_index of training data.\n if is_training:\n feat_example_index = None\n else:\n feat_example_index = example_index\n\n feature = InputFeatures(\n unique_id=unique_id,\n example_index=feat_example_index,\n doc_span_index=doc_span_index,\n tok_start_to_orig_index=cur_tok_start_to_orig_index,\n tok_end_to_orig_index=cur_tok_end_to_orig_index,\n token_is_max_context=token_is_max_context,\n tokens=[tokenizer.sp_model.IdToPiece(x) for x in tokens],\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n paragraph_len=paragraph_len,\n start_position=start_position,\n end_position=end_position,\n is_impossible=span_is_impossible,\n p_mask=p_mask)\n\n # Run callback\n output_fn(feature)\n\n unique_id += 1\n if span_is_impossible:\n cnt_neg += 1\n else:\n cnt_pos += 1\n\n tf.logging.info(\"Total number of instances: {} = pos {} neg {}\".format(\n cnt_pos + cnt_neg, cnt_pos, cnt_neg))\n\n\ndef _check_is_max_context(doc_spans, cur_span_index, position):\n \"\"\"Check if this is the 'max context' doc span for the token.\"\"\"\n\n # Because of the sliding window approach taken to scoring documents, a single\n # token can appear in multiple documents. E.g.\n # Doc: the man went to the store and bought a gallon of milk\n # Span A: the man went to the\n # Span B: to the store and bought\n # Span C: and bought a gallon of\n # ...\n #\n # Now the word 'bought' will have two scores from spans B and C. We only\n # want to consider the score with \"maximum context\", which we define as\n # the *minimum* of its left and right context (the *sum* of left and\n # right context will always be the same, of course).\n #\n # In the example the maximum context for 'bought' would be span C since\n # it has 1 left context and 3 right context, while span B has 4 left context\n # and 0 right context.\n best_score = None\n best_span_index = None\n for (span_index, doc_span) in enumerate(doc_spans):\n end = doc_span.start + doc_span.length - 1\n if position < doc_span.start:\n continue\n if position > end:\n continue\n num_left_context = position - doc_span.start\n num_right_context = end - position\n score = min(num_left_context, num_right_context) + 0.01 * doc_span.length\n if best_score is None or score > best_score:\n best_score = score\n best_span_index = span_index\n\n return cur_span_index == best_span_index\n\n\ndef _get_best_indexes(logits, n_best_size):\n \"\"\"Get the n-best logits from a list.\"\"\"\n index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)\n\n best_indexes = []\n for i in range(len(index_and_score)):\n if i >= n_best_size:\n break\n best_indexes.append(index_and_score[i][0])\n return best_indexes\n\n\ndef _compute_softmax(scores):\n \"\"\"Compute softmax probability over raw logits.\"\"\"\n if not scores:\n return []\n\n max_score = None\n for score in scores:\n if max_score is None or score > max_score:\n max_score = score\n\n exp_scores = []\n total_sum = 0.0\n for score in scores:\n x = math.exp(score - max_score)\n exp_scores.append(x)\n total_sum += x\n\n probs = []\n for score in exp_scores:\n probs.append(score / total_sum)\n return probs\n\n\nclass FeatureWriter(object):\n \"\"\"Writes InputFeature to TF example file.\"\"\"\n\n def __init__(self, filename, is_training):\n self.filename = filename\n self.is_training = is_training\n self.num_features = 0\n self._writer = tf.python_io.TFRecordWriter(filename)\n\n def process_feature(self, feature):\n \"\"\"Write a InputFeature to the TFRecordWriter as a tf.train.Example.\"\"\"\n self.num_features += 1\n\n def create_int_feature(values):\n feature = tf.train.Feature(\n int64_list=tf.train.Int64List(value=list(values)))\n return feature\n\n features = collections.OrderedDict()\n features[\"unique_ids\"] = create_int_feature([feature.unique_id])\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n features[\"p_mask\"] = create_int_feature(feature.p_mask)\n\n if self.is_training:\n features[\"start_positions\"] = create_int_feature([feature.start_position])\n features[\"end_positions\"] = create_int_feature([feature.end_position])\n impossible = 0\n if feature.is_impossible:\n impossible = 1\n features[\"is_impossible\"] = create_int_feature([impossible])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n self._writer.write(tf_example.SerializeToString())\n\n def close(self):\n self._writer.close()\n\n\ndef input_fn_builder(input_file, seq_length, is_training,\n drop_remainder, use_tpu, bsz, is_v2):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n name_to_features = {\n \"unique_ids\": tf.FixedLenFeature([], tf.int64),\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n }\n # p_mask is not required for SQuAD v1.1\n if is_v2:\n name_to_features[\"p_mask\"] = tf.FixedLenFeature([seq_length], tf.int64)\n\n if is_training:\n name_to_features[\"start_positions\"] = tf.FixedLenFeature([], tf.int64)\n name_to_features[\"end_positions\"] = tf.FixedLenFeature([], tf.int64)\n name_to_features[\"is_impossible\"] = tf.FixedLenFeature([], tf.int64)\n\n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n if use_tpu:\n batch_size = params[\"batch_size\"]\n else:\n batch_size = bsz\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.apply(\n contrib_data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d\n\n return input_fn\n\n\ndef create_v1_model(albert_config, is_training, input_ids, input_mask,\n segment_ids, use_one_hot_embeddings, use_einsum,\n hub_module):\n \"\"\"Creates a classification model.\"\"\"\n (_, final_hidden) = fine_tuning_utils.create_albert(\n albert_config=albert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings,\n use_einsum=use_einsum,\n hub_module=hub_module)\n\n final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3)\n batch_size = final_hidden_shape[0]\n seq_length = final_hidden_shape[1]\n hidden_size = final_hidden_shape[2]\n\n output_weights = tf.get_variable(\n \"cls/squad/output_weights\", [2, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"cls/squad/output_bias\", [2], initializer=tf.zeros_initializer())\n\n final_hidden_matrix = tf.reshape(final_hidden,\n [batch_size * seq_length, hidden_size])\n logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n\n logits = tf.reshape(logits, [batch_size, seq_length, 2])\n logits = tf.transpose(logits, [2, 0, 1])\n\n unstacked_logits = tf.unstack(logits, axis=0)\n\n (start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1])\n\n return (start_logits, end_logits)\n\n\ndef v1_model_fn_builder(albert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings, use_einsum, hub_module):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n if \"unique_ids\" in features:\n unique_ids = features[\"unique_ids\"]\n else:\n unique_ids = None\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (start_logits, end_logits) = create_v1_model(\n albert_config=albert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings,\n use_einsum=use_einsum,\n hub_module=hub_module)\n\n # Assign names to the logits so that we can refer to them as output tensors.\n start_logits = tf.identity(start_logits, name=\"start_logits\")\n end_logits = tf.identity(end_logits, name=\"end_logits\")\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n seq_length = modeling.get_shape_list(input_ids)[1]\n\n def compute_loss(logits, positions):\n one_hot_positions = tf.one_hot(\n positions, depth=seq_length, dtype=tf.float32)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n loss = -tf.reduce_mean(\n tf.reduce_sum(one_hot_positions * log_probs, axis=-1))\n return loss\n\n start_positions = features[\"start_positions\"]\n end_positions = features[\"end_positions\"]\n\n start_loss = compute_loss(start_logits, start_positions)\n end_loss = compute_loss(end_logits, end_positions)\n\n total_loss = (start_loss + end_loss) / 2.0\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = contrib_tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n \"start_log_prob\": start_logits,\n \"end_log_prob\": end_logits,\n }\n if unique_ids is not None:\n predictions[\"unique_ids\"] = unique_ids\n output_spec = contrib_tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n else:\n raise ValueError(\n \"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n return output_spec\n\n return model_fn\n\n\ndef accumulate_predictions_v1(result_dict, all_examples, all_features,\n all_results, n_best_size, max_answer_length):\n \"\"\"accumulate predictions for each positions in a dictionary.\"\"\"\n example_index_to_features = collections.defaultdict(list)\n for feature in all_features:\n example_index_to_features[feature.example_index].append(feature)\n\n unique_id_to_result = {}\n for result in all_results:\n unique_id_to_result[result.unique_id] = result\n\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n scores_diff_json = collections.OrderedDict()\n\n for (example_index, example) in enumerate(all_examples):\n if example_index not in result_dict:\n result_dict[example_index] = {}\n features = example_index_to_features[example_index]\n\n prelim_predictions = []\n min_null_feature_index = 0 # the paragraph slice with min mull score\n null_start_logit = 0 # the start logit at the slice with min null score\n null_end_logit = 0 # the end logit at the slice with min null score\n for (feature_index, feature) in enumerate(features):\n if feature.unique_id not in result_dict[example_index]:\n result_dict[example_index][feature.unique_id] = {}\n result = unique_id_to_result[feature.unique_id]\n start_indexes = _get_best_indexes(result.start_log_prob, n_best_size)\n end_indexes = _get_best_indexes(result.end_log_prob, n_best_size)\n for start_index in start_indexes:\n for end_index in end_indexes:\n doc_offset = feature.tokens.index(\"[SEP]\") + 1\n # We could hypothetically create invalid predictions, e.g., predict\n # that the start of the span is in the question. We throw out all\n # invalid predictions.\n if start_index - doc_offset >= len(feature.tok_start_to_orig_index):\n continue\n if end_index - doc_offset >= len(feature.tok_end_to_orig_index):\n continue\n if not feature.token_is_max_context.get(start_index, False):\n continue\n if end_index < start_index:\n continue\n length = end_index - start_index + 1\n if length > max_answer_length:\n continue\n start_log_prob = result.start_log_prob[start_index]\n end_log_prob = result.end_log_prob[end_index]\n start_idx = start_index - doc_offset\n end_idx = end_index - doc_offset\n if (start_idx, end_idx) not in result_dict[example_index][feature.unique_id]:\n result_dict[example_index][feature.unique_id][(start_idx, end_idx)] = []\n result_dict[example_index][feature.unique_id][(start_idx, end_idx)].append((start_log_prob, end_log_prob))\n\n\ndef write_predictions_v1(result_dict, all_examples, all_features,\n all_results, n_best_size, max_answer_length,\n output_prediction_file, output_nbest_file):\n \"\"\"Write final predictions to the json file and log-odds of null if needed.\"\"\"\n tf.logging.info(\"Writing predictions to: %s\" % (output_prediction_file))\n tf.logging.info(\"Writing nbest to: %s\" % (output_nbest_file))\n\n example_index_to_features = collections.defaultdict(list)\n for feature in all_features:\n example_index_to_features[feature.example_index].append(feature)\n\n unique_id_to_result = {}\n for result in all_results:\n unique_id_to_result[result.unique_id] = result\n\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n scores_diff_json = collections.OrderedDict()\n\n for (example_index, example) in enumerate(all_examples):\n features = example_index_to_features[example_index]\n\n prelim_predictions = []\n # keep track of the minimum score of null start+end of position 0\n score_null = 1000000 # large and positive\n min_null_feature_index = 0 # the paragraph slice with min mull score\n null_start_logit = 0 # the start logit at the slice with min null score\n null_end_logit = 0 # the end logit at the slice with min null score\n for (feature_index, feature) in enumerate(features):\n for ((start_idx, end_idx), logprobs) in \\\n result_dict[example_index][feature.unique_id].items():\n start_log_prob = 0\n end_log_prob = 0\n for logprob in logprobs:\n start_log_prob += logprob[0]\n end_log_prob += logprob[1]\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=feature_index,\n start_index=start_idx,\n end_index=end_idx,\n start_log_prob=start_log_prob / len(logprobs),\n end_log_prob=end_log_prob / len(logprobs)))\n\n prelim_predictions = sorted(\n prelim_predictions,\n key=lambda x: (x.start_log_prob + x.end_log_prob),\n reverse=True)\n\n seen_predictions = {}\n nbest = []\n for pred in prelim_predictions:\n if len(nbest) >= n_best_size:\n break\n feature = features[pred.feature_index]\n if pred.start_index >= 0: # this is a non-null prediction\n tok_start_to_orig_index = feature.tok_start_to_orig_index\n tok_end_to_orig_index = feature.tok_end_to_orig_index\n start_orig_pos = tok_start_to_orig_index[pred.start_index]\n end_orig_pos = tok_end_to_orig_index[pred.end_index]\n\n paragraph_text = example.paragraph_text\n final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip()\n if final_text in seen_predictions:\n continue\n\n seen_predictions[final_text] = True\n else:\n final_text = \"\"\n seen_predictions[final_text] = True\n\n nbest.append(\n _NbestPrediction(\n text=final_text,\n start_log_prob=pred.start_log_prob,\n end_log_prob=pred.end_log_prob))\n\n # In very rare edge cases we could have no valid predictions. So we\n # just create a nonce prediction in this case to avoid failure.\n if not nbest:\n nbest.append(\n _NbestPrediction(text=\"empty\", start_log_prob=0.0, end_log_prob=0.0))\n\n assert len(nbest) >= 1\n\n total_scores = []\n best_non_null_entry = None\n for entry in nbest:\n total_scores.append(entry.start_log_prob + entry.end_log_prob)\n if not best_non_null_entry:\n if entry.text:\n best_non_null_entry = entry\n\n probs = _compute_softmax(total_scores)\n\n nbest_json = []\n for (i, entry) in enumerate(nbest):\n output = collections.OrderedDict()\n output[\"text\"] = entry.text\n output[\"probability\"] = probs[i]\n output[\"start_log_prob\"] = entry.start_log_prob\n output[\"end_log_prob\"] = entry.end_log_prob\n nbest_json.append(output)\n\n assert len(nbest_json) >= 1\n\n all_predictions[example.qas_id] = nbest_json[0][\"text\"]\n all_nbest_json[example.qas_id] = nbest_json\n\n with tf.gfile.GFile(output_prediction_file, \"w\") as writer:\n writer.write(json.dumps(all_predictions, indent=4) + \"\\n\")\n\n with tf.gfile.GFile(output_nbest_file, \"w\") as writer:\n writer.write(json.dumps(all_nbest_json, indent=4) + \"\\n\")\n\n return all_predictions\n\n\n####### following are from official SQuAD v1.1 evaluation scripts\ndef normalize_answer_v1(s):\n \"\"\"Lower text and remove punctuation, articles and extra whitespace.\"\"\"\n\n def remove_articles(text):\n return re.sub(r\"\\b(a|an|the)\\b\", \" \", text)\n\n def white_space_fix(text):\n return \" \".join(text.split())\n\n def remove_punc(text):\n exclude = set(string.punctuation)\n return \"\".join(ch for ch in text if ch not in exclude)\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(remove_articles(remove_punc(lower(s))))\n\n\ndef f1_score(prediction, ground_truth):\n prediction_tokens = normalize_answer_v1(prediction).split()\n ground_truth_tokens = normalize_answer_v1(ground_truth).split()\n common = (\n collections.Counter(prediction_tokens)\n & collections.Counter(ground_truth_tokens))\n num_same = sum(common.values())\n if num_same == 0:\n return 0\n precision = 1.0 * num_same / len(prediction_tokens)\n recall = 1.0 * num_same / len(ground_truth_tokens)\n f1 = (2 * precision * recall) / (precision + recall)\n return f1\n\n\ndef exact_match_score(prediction, ground_truth):\n return (normalize_answer_v1(prediction) == normalize_answer_v1(ground_truth))\n\n\ndef metric_max_over_ground_truths(metric_fn, prediction, ground_truths):\n scores_for_ground_truths = []\n for ground_truth in ground_truths:\n score = metric_fn(prediction, ground_truth)\n scores_for_ground_truths.append(score)\n return max(scores_for_ground_truths)\n\n\ndef evaluate_v1(dataset, predictions):\n f1 = exact_match = total = 0\n for article in dataset:\n for paragraph in article[\"paragraphs\"]:\n for qa in paragraph[\"qas\"]:\n total += 1\n if qa[\"id\"] not in predictions:\n message = (\"Unanswered question \" + six.ensure_str(qa[\"id\"]) +\n \" will receive score 0.\")\n print(message, file=sys.stderr)\n continue\n ground_truths = [x[\"text\"] for x in qa[\"answers\"]]\n # ground_truths = list(map(lambda x: x[\"text\"], qa[\"answers\"]))\n prediction = predictions[qa[\"id\"]]\n exact_match += metric_max_over_ground_truths(exact_match_score,\n prediction, ground_truths)\n f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths)\n\n exact_match = 100.0 * exact_match / total\n f1 = 100.0 * f1 / total\n\n return {\"exact_match\": exact_match, \"f1\": f1}\n\n####### above are from official SQuAD v1.1 evaluation scripts\n####### following are from official SQuAD v2.0 evaluation scripts\ndef make_qid_to_has_ans(dataset):\n qid_to_has_ans = {}\n for article in dataset:\n for p in article['paragraphs']:\n for qa in p['qas']:\n qid_to_has_ans[qa['id']] = bool(qa['answers'])\n return qid_to_has_ans\n\ndef normalize_answer_v2(s):\n \"\"\"Lower text and remove punctuation, articles and extra whitespace.\"\"\"\n def remove_articles(text):\n regex = re.compile(r'\\b(a|an|the)\\b', re.UNICODE)\n return re.sub(regex, ' ', text)\n def white_space_fix(text):\n return ' '.join(text.split())\n def remove_punc(text):\n exclude = set(string.punctuation)\n return ''.join(ch for ch in text if ch not in exclude)\n def lower(text):\n return text.lower()\n return white_space_fix(remove_articles(remove_punc(lower(s))))\n\ndef get_tokens(s):\n if not s: return []\n return normalize_answer_v2(s).split()\n\ndef compute_exact(a_gold, a_pred):\n return int(normalize_answer_v2(a_gold) == normalize_answer_v2(a_pred))\n\ndef compute_f1(a_gold, a_pred):\n gold_toks = get_tokens(a_gold)\n pred_toks = get_tokens(a_pred)\n common = collections.Counter(gold_toks) & collections.Counter(pred_toks)\n num_same = sum(common.values())\n if len(gold_toks) == 0 or len(pred_toks) == 0:\n # If either is no-answer, then F1 is 1 if they agree, 0 otherwise\n return int(gold_toks == pred_toks)\n if num_same == 0:\n return 0\n precision = 1.0 * num_same / len(pred_toks)\n recall = 1.0 * num_same / len(gold_toks)\n f1 = (2 * precision * recall) / (precision + recall)\n return f1\n\ndef get_raw_scores(dataset, preds):\n exact_scores = {}\n f1_scores = {}\n for article in dataset:\n for p in article['paragraphs']:\n for qa in p['qas']:\n qid = qa['id']\n gold_answers = [a['text'] for a in qa['answers']\n if normalize_answer_v2(a['text'])]\n if not gold_answers:\n # For unanswerable questions, only correct answer is empty string\n gold_answers = ['']\n if qid not in preds:\n print('Missing prediction for %s' % qid)\n continue\n a_pred = preds[qid]\n # Take max over all gold answers\n exact_scores[qid] = max(compute_exact(a, a_pred) for a in gold_answers)\n f1_scores[qid] = max(compute_f1(a, a_pred) for a in gold_answers)\n return exact_scores, f1_scores\n\ndef apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh):\n new_scores = {}\n for qid, s in scores.items():\n pred_na = na_probs[qid] > na_prob_thresh\n if pred_na:\n new_scores[qid] = float(not qid_to_has_ans[qid])\n else:\n new_scores[qid] = s\n return new_scores\n\ndef make_eval_dict(exact_scores, f1_scores, qid_list=None):\n if not qid_list:\n total = len(exact_scores)\n return collections.OrderedDict([\n ('exact', 100.0 * sum(exact_scores.values()) / total),\n ('f1', 100.0 * sum(f1_scores.values()) / total),\n ('total', total),\n ])\n else:\n total = len(qid_list)\n return collections.OrderedDict([\n ('exact', 100.0 * sum(exact_scores[k] for k in qid_list) / total),\n ('f1', 100.0 * sum(f1_scores[k] for k in qid_list) / total),\n ('total', total),\n ])\n\n\ndef find_best_thresh(preds, scores, na_probs, qid_to_has_ans):\n num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])\n cur_score = num_no_ans\n best_score = cur_score\n best_thresh = 0.0\n qid_list = sorted(na_probs, key=lambda k: na_probs[k])\n for i, qid in enumerate(qid_list):\n if qid not in scores: continue\n if qid_to_has_ans[qid]:\n diff = scores[qid]\n else:\n if preds[qid]:\n diff = -1\n else:\n diff = 0\n cur_score += diff\n if cur_score > best_score:\n best_score = cur_score\n best_thresh = na_probs[qid]\n return 100.0 * best_score / len(scores), best_thresh\n\n\ndef find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):\n best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans)\n best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans)\n main_eval['best_exact'] = best_exact\n main_eval['best_exact_thresh'] = exact_thresh\n main_eval['best_f1'] = best_f1\n main_eval['best_f1_thresh'] = f1_thresh\n\n\ndef merge_eval(main_eval, new_eval, prefix):\n for k in new_eval:\n main_eval['%s_%s' % (prefix, k)] = new_eval[k]\n\n####### above are from official SQuAD v2.0 evaluation scripts\n\ndef accumulate_predictions_v2(result_dict, cls_dict, all_examples,\n all_features, all_results, n_best_size,\n max_answer_length, start_n_top, end_n_top):\n \"\"\"accumulate predictions for each positions in a dictionary.\"\"\"\n\n example_index_to_features = collections.defaultdict(list)\n for feature in all_features:\n example_index_to_features[feature.example_index].append(feature)\n\n unique_id_to_result = {}\n for result in all_results:\n unique_id_to_result[result.unique_id] = result\n\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n scores_diff_json = collections.OrderedDict()\n\n for (example_index, example) in enumerate(all_examples):\n if example_index not in result_dict:\n result_dict[example_index] = {}\n features = example_index_to_features[example_index]\n\n prelim_predictions = []\n # keep track of the minimum score of null start+end of position 0\n score_null = 1000000 # large and positive\n\n for (feature_index, feature) in enumerate(features):\n if feature.unique_id not in result_dict[example_index]:\n result_dict[example_index][feature.unique_id] = {}\n result = unique_id_to_result[feature.unique_id]\n cur_null_score = result.cls_logits\n\n # if we could have irrelevant answers, get the min score of irrelevant\n score_null = min(score_null, cur_null_score)\n\n doc_offset = feature.tokens.index(\"[SEP]\") + 1\n for i in range(start_n_top):\n for j in range(end_n_top):\n start_log_prob = result.start_top_log_probs[i]\n start_index = result.start_top_index[i]\n\n j_index = i * end_n_top + j\n\n end_log_prob = result.end_top_log_probs[j_index]\n end_index = result.end_top_index[j_index]\n # We could hypothetically create invalid predictions, e.g., predict\n # that the start of the span is in the question. We throw out all\n # invalid predictions.\n if start_index - doc_offset >= len(feature.tok_start_to_orig_index):\n continue\n if start_index - doc_offset < 0:\n continue\n if end_index - doc_offset >= len(feature.tok_end_to_orig_index):\n continue\n if not feature.token_is_max_context.get(start_index, False):\n continue\n if end_index < start_index:\n continue\n length = end_index - start_index + 1\n if length > max_answer_length:\n continue\n start_idx = start_index - doc_offset\n end_idx = end_index - doc_offset\n if (start_idx, end_idx) not in result_dict[example_index][feature.unique_id]:\n result_dict[example_index][feature.unique_id][(start_idx, end_idx)] = []\n result_dict[example_index][feature.unique_id][(start_idx, end_idx)].append((start_log_prob, end_log_prob))\n if example_index not in cls_dict:\n cls_dict[example_index] = []\n cls_dict[example_index].append(score_null)\n\n\ndef write_predictions_v2(result_dict, cls_dict, all_examples, all_features,\n all_results, n_best_size, max_answer_length,\n output_prediction_file,\n output_nbest_file, output_null_log_odds_file,\n null_score_diff_threshold):\n \"\"\"Write final predictions to the json file and log-odds of null if needed.\"\"\"\n tf.logging.info(\"Writing predictions to: %s\" % (output_prediction_file))\n tf.logging.info(\"Writing nbest to: %s\" % (output_nbest_file))\n\n example_index_to_features = collections.defaultdict(list)\n for feature in all_features:\n example_index_to_features[feature.example_index].append(feature)\n\n unique_id_to_result = {}\n for result in all_results:\n unique_id_to_result[result.unique_id] = result\n\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n scores_diff_json = collections.OrderedDict()\n\n for (example_index, example) in enumerate(all_examples):\n features = example_index_to_features[example_index]\n\n prelim_predictions = []\n # keep track of the minimum score of null start+end of position 0\n # score_null = 1000000 # large and positive\n\n for (feature_index, feature) in enumerate(features):\n for ((start_idx, end_idx), logprobs) in \\\n result_dict[example_index][feature.unique_id].items():\n start_log_prob = 0\n end_log_prob = 0\n for logprob in logprobs:\n start_log_prob += logprob[0]\n end_log_prob += logprob[1]\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=feature_index,\n start_index=start_idx,\n end_index=end_idx,\n start_log_prob=start_log_prob / len(logprobs),\n end_log_prob=end_log_prob / len(logprobs)))\n\n prelim_predictions = sorted(\n prelim_predictions,\n key=lambda x: (x.start_log_prob + x.end_log_prob),\n reverse=True)\n\n seen_predictions = {}\n nbest = []\n for pred in prelim_predictions:\n if len(nbest) >= n_best_size:\n break\n feature = features[pred.feature_index]\n\n tok_start_to_orig_index = feature.tok_start_to_orig_index\n tok_end_to_orig_index = feature.tok_end_to_orig_index\n start_orig_pos = tok_start_to_orig_index[pred.start_index]\n end_orig_pos = tok_end_to_orig_index[pred.end_index]\n\n paragraph_text = example.paragraph_text\n final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip()\n\n if final_text in seen_predictions:\n continue\n\n seen_predictions[final_text] = True\n\n nbest.append(\n _NbestPrediction(\n text=final_text,\n start_log_prob=pred.start_log_prob,\n end_log_prob=pred.end_log_prob))\n\n # In very rare edge cases we could have no valid predictions. So we\n # just create a nonce prediction in this case to avoid failure.\n if not nbest:\n nbest.append(\n _NbestPrediction(\n text=\"\",\n start_log_prob=-1e6,\n end_log_prob=-1e6))\n\n total_scores = []\n best_non_null_entry = None\n for entry in nbest:\n total_scores.append(entry.start_log_prob + entry.end_log_prob)\n if not best_non_null_entry:\n best_non_null_entry = entry\n\n probs = _compute_softmax(total_scores)\n\n nbest_json = []\n for (i, entry) in enumerate(nbest):\n output = collections.OrderedDict()\n output[\"text\"] = entry.text\n output[\"probability\"] = probs[i]\n output[\"start_log_prob\"] = entry.start_log_prob\n output[\"end_log_prob\"] = entry.end_log_prob\n nbest_json.append(output)\n\n assert len(nbest_json) >= 1\n assert best_non_null_entry is not None\n\n score_diff = sum(cls_dict[example_index]) / len(cls_dict[example_index])\n scores_diff_json[example.qas_id] = score_diff\n # predict null answers when null threshold is provided\n if null_score_diff_threshold is None or score_diff < null_score_diff_threshold:\n all_predictions[example.qas_id] = best_non_null_entry.text\n else:\n all_predictions[example.qas_id] = \"\"\n\n all_nbest_json[example.qas_id] = nbest_json\n assert len(nbest_json) >= 1\n\n with tf.gfile.GFile(output_prediction_file, \"w\") as writer:\n writer.write(json.dumps(all_predictions, indent=4) + \"\\n\")\n\n with tf.gfile.GFile(output_nbest_file, \"w\") as writer:\n writer.write(json.dumps(all_nbest_json, indent=4) + \"\\n\")\n\n with tf.gfile.GFile(output_null_log_odds_file, \"w\") as writer:\n writer.write(json.dumps(scores_diff_json, indent=4) + \"\\n\")\n return all_predictions, scores_diff_json\n\n\ndef create_v2_model(albert_config, is_training, input_ids, input_mask,\n segment_ids, use_one_hot_embeddings, features,\n max_seq_length, start_n_top, end_n_top, dropout_prob,\n hub_module):\n \"\"\"Creates a classification model.\"\"\"\n (_, output) = fine_tuning_utils.create_albert(\n albert_config=albert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings,\n use_einsum=True,\n hub_module=hub_module)\n\n bsz = tf.shape(output)[0]\n return_dict = {}\n output = tf.transpose(output, [1, 0, 2])\n\n # invalid position mask such as query and special symbols (PAD, SEP, CLS)\n p_mask = tf.cast(features[\"p_mask\"], dtype=tf.float32)\n\n # logit of the start position\n with tf.variable_scope(\"start_logits\"):\n start_logits = tf.layers.dense(\n output,\n 1,\n kernel_initializer=modeling.create_initializer(\n albert_config.initializer_range))\n start_logits = tf.transpose(tf.squeeze(start_logits, -1), [1, 0])\n start_logits_masked = start_logits * (1 - p_mask) - 1e30 * p_mask\n start_log_probs = tf.nn.log_softmax(start_logits_masked, -1)\n\n # logit of the end position\n with tf.variable_scope(\"end_logits\"):\n if is_training:\n # during training, compute the end logits based on the\n # ground truth of the start position\n start_positions = tf.reshape(features[\"start_positions\"], [-1])\n start_index = tf.one_hot(start_positions, depth=max_seq_length, axis=-1,\n dtype=tf.float32)\n start_features = tf.einsum(\"lbh,bl->bh\", output, start_index)\n start_features = tf.tile(start_features[None], [max_seq_length, 1, 1])\n end_logits = tf.layers.dense(\n tf.concat([output, start_features], axis=-1),\n albert_config.hidden_size,\n kernel_initializer=modeling.create_initializer(\n albert_config.initializer_range),\n activation=tf.tanh,\n name=\"dense_0\")\n end_logits = contrib_layers.layer_norm(end_logits, begin_norm_axis=-1)\n\n end_logits = tf.layers.dense(\n end_logits,\n 1,\n kernel_initializer=modeling.create_initializer(\n albert_config.initializer_range),\n name=\"dense_1\")\n end_logits = tf.transpose(tf.squeeze(end_logits, -1), [1, 0])\n end_logits_masked = end_logits * (1 - p_mask) - 1e30 * p_mask\n end_log_probs = tf.nn.log_softmax(end_logits_masked, -1)\n else:\n # during inference, compute the end logits based on beam search\n\n start_top_log_probs, start_top_index = tf.nn.top_k(\n start_log_probs, k=start_n_top)\n start_index = tf.one_hot(start_top_index,\n depth=max_seq_length, axis=-1, dtype=tf.float32)\n start_features = tf.einsum(\"lbh,bkl->bkh\", output, start_index)\n end_input = tf.tile(output[:, :, None],\n [1, 1, start_n_top, 1])\n start_features = tf.tile(start_features[None],\n [max_seq_length, 1, 1, 1])\n end_input = tf.concat([end_input, start_features], axis=-1)\n end_logits = tf.layers.dense(\n end_input,\n albert_config.hidden_size,\n kernel_initializer=modeling.create_initializer(\n albert_config.initializer_range),\n activation=tf.tanh,\n name=\"dense_0\")\n end_logits = contrib_layers.layer_norm(end_logits, begin_norm_axis=-1)\n end_logits = tf.layers.dense(\n end_logits,\n 1,\n kernel_initializer=modeling.create_initializer(\n albert_config.initializer_range),\n name=\"dense_1\")\n end_logits = tf.reshape(end_logits, [max_seq_length, -1, start_n_top])\n end_logits = tf.transpose(end_logits, [1, 2, 0])\n end_logits_masked = end_logits * (\n 1 - p_mask[:, None]) - 1e30 * p_mask[:, None]\n end_log_probs = tf.nn.log_softmax(end_logits_masked, -1)\n end_top_log_probs, end_top_index = tf.nn.top_k(\n end_log_probs, k=end_n_top)\n end_top_log_probs = tf.reshape(\n end_top_log_probs,\n [-1, start_n_top * end_n_top])\n end_top_index = tf.reshape(\n end_top_index,\n [-1, start_n_top * end_n_top])\n\n if is_training:\n return_dict[\"start_log_probs\"] = start_log_probs\n return_dict[\"end_log_probs\"] = end_log_probs\n else:\n return_dict[\"start_top_log_probs\"] = start_top_log_probs\n return_dict[\"start_top_index\"] = start_top_index\n return_dict[\"end_top_log_probs\"] = end_top_log_probs\n return_dict[\"end_top_index\"] = end_top_index\n\n # an additional layer to predict answerability\n with tf.variable_scope(\"answer_class\"):\n # get the representation of CLS\n cls_index = tf.one_hot(tf.zeros([bsz], dtype=tf.int32),\n max_seq_length,\n axis=-1, dtype=tf.float32)\n cls_feature = tf.einsum(\"lbh,bl->bh\", output, cls_index)\n\n # get the representation of START\n start_p = tf.nn.softmax(start_logits_masked, axis=-1,\n name=\"softmax_start\")\n start_feature = tf.einsum(\"lbh,bl->bh\", output, start_p)\n\n # note(zhiliny): no dependency on end_feature so that we can obtain\n # one single `cls_logits` for each sample\n ans_feature = tf.concat([start_feature, cls_feature], -1)\n ans_feature = tf.layers.dense(\n ans_feature,\n albert_config.hidden_size,\n activation=tf.tanh,\n kernel_initializer=modeling.create_initializer(\n albert_config.initializer_range),\n name=\"dense_0\")\n ans_feature = tf.layers.dropout(ans_feature, dropout_prob,\n training=is_training)\n cls_logits = tf.layers.dense(\n ans_feature,\n 1,\n kernel_initializer=modeling.create_initializer(\n albert_config.initializer_range),\n name=\"dense_1\",\n use_bias=False)\n cls_logits = tf.squeeze(cls_logits, -1)\n\n return_dict[\"cls_logits\"] = cls_logits\n\n return return_dict\n\n\ndef v2_model_fn_builder(albert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings, max_seq_length, start_n_top,\n end_n_top, dropout_prob, hub_module):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n # unique_ids = features[\"unique_ids\"]\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n outputs = create_v2_model(\n albert_config=albert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings,\n features=features,\n max_seq_length=max_seq_length,\n start_n_top=start_n_top,\n end_n_top=end_n_top,\n dropout_prob=dropout_prob,\n hub_module=hub_module)\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n seq_length = modeling.get_shape_list(input_ids)[1]\n\n def compute_loss(log_probs, positions):\n one_hot_positions = tf.one_hot(\n positions, depth=seq_length, dtype=tf.float32)\n\n loss = - tf.reduce_sum(one_hot_positions * log_probs, axis=-1)\n loss = tf.reduce_mean(loss)\n return loss\n\n start_loss = compute_loss(\n outputs[\"start_log_probs\"], features[\"start_positions\"])\n end_loss = compute_loss(\n outputs[\"end_log_probs\"], features[\"end_positions\"])\n\n total_loss = (start_loss + end_loss) * 0.5\n\n cls_logits = outputs[\"cls_logits\"]\n is_impossible = tf.reshape(features[\"is_impossible\"], [-1])\n regression_loss = tf.nn.sigmoid_cross_entropy_with_logits(\n labels=tf.cast(is_impossible, dtype=tf.float32), logits=cls_logits)\n regression_loss = tf.reduce_mean(regression_loss)\n\n # note(zhiliny): by default multiply the loss by 0.5 so that the scale is\n # comparable to start_loss and end_loss\n total_loss += regression_loss * 0.5\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = contrib_tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n \"unique_ids\": features[\"unique_ids\"],\n \"start_top_index\": outputs[\"start_top_index\"],\n \"start_top_log_probs\": outputs[\"start_top_log_probs\"],\n \"end_top_index\": outputs[\"end_top_index\"],\n \"end_top_log_probs\": outputs[\"end_top_log_probs\"],\n \"cls_logits\": outputs[\"cls_logits\"]\n }\n output_spec = contrib_tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n else:\n raise ValueError(\n \"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n\n return output_spec\n\n return model_fn\n\n\ndef evaluate_v2(result_dict, cls_dict, prediction_json, eval_examples,\n eval_features, all_results, n_best_size, max_answer_length,\n output_prediction_file, output_nbest_file,\n output_null_log_odds_file):\n null_score_diff_threshold = None\n predictions, na_probs = write_predictions_v2(\n result_dict, cls_dict, eval_examples, eval_features,\n all_results, n_best_size, max_answer_length,\n output_prediction_file, output_nbest_file,\n output_null_log_odds_file, null_score_diff_threshold)\n\n na_prob_thresh = 1.0 # default value taken from the eval script\n qid_to_has_ans = make_qid_to_has_ans(prediction_json) # maps qid to True/False\n has_ans_qids = [k for k, v in qid_to_has_ans.items() if v]\n no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v]\n exact_raw, f1_raw = get_raw_scores(prediction_json, predictions)\n exact_thresh = apply_no_ans_threshold(exact_raw, na_probs, qid_to_has_ans,\n na_prob_thresh)\n f1_thresh = apply_no_ans_threshold(f1_raw, na_probs, qid_to_has_ans,\n na_prob_thresh)\n out_eval = make_eval_dict(exact_thresh, f1_thresh)\n find_all_best_thresh(out_eval, predictions, exact_raw, f1_raw, na_probs, qid_to_has_ans)\n null_score_diff_threshold = out_eval[\"best_f1_thresh\"]\n\n predictions, na_probs = write_predictions_v2(\n result_dict, cls_dict,eval_examples, eval_features,\n all_results, n_best_size, max_answer_length,\n output_prediction_file, output_nbest_file,\n output_null_log_odds_file, null_score_diff_threshold)\n\n qid_to_has_ans = make_qid_to_has_ans(prediction_json) # maps qid to True/False\n has_ans_qids = [k for k, v in qid_to_has_ans.items() if v]\n no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v]\n exact_raw, f1_raw = get_raw_scores(prediction_json, predictions)\n exact_thresh = apply_no_ans_threshold(exact_raw, na_probs, qid_to_has_ans,\n na_prob_thresh)\n f1_thresh = apply_no_ans_threshold(f1_raw, na_probs, qid_to_has_ans,\n na_prob_thresh)\n out_eval = make_eval_dict(exact_thresh, f1_thresh)\n out_eval[\"null_score_diff_threshold\"] = null_score_diff_threshold\n return out_eval\n" ]
[ [ "tensorflow.contrib.tpu.TPUEstimatorSpec", "tensorflow.compat.v1.zeros", "tensorflow.compat.v1.logging.info", "tensorflow.compat.v1.transpose", "tensorflow.compat.v1.nn.bias_add", "tensorflow.compat.v1.tile", "tensorflow.compat.v1.identity", "tensorflow.compat.v1.matmul", "tensorflow.compat.v1.shape", "tensorflow.compat.v1.trainable_variables", "tensorflow.compat.v1.reduce_sum", "tensorflow.compat.v1.train.init_from_checkpoint", "tensorflow.compat.v1.to_int32", "tensorflow.compat.v1.gfile.GFile", "tensorflow.compat.v1.reshape", "tensorflow.compat.v1.unstack", "tensorflow.compat.v1.zeros_initializer", "tensorflow.compat.v1.data.TFRecordDataset", "tensorflow.compat.v1.FixedLenFeature", "tensorflow.contrib.layers.layer_norm", "tensorflow.compat.v1.reduce_mean", "tensorflow.compat.v1.cast", "tensorflow.compat.v1.layers.dropout", "tensorflow.compat.v1.train.Features", "tensorflow.compat.v1.train.Scaffold", "numpy.zeros", "tensorflow.compat.v1.variable_scope", "tensorflow.compat.v1.einsum", "tensorflow.compat.v1.nn.top_k", "tensorflow.compat.v1.nn.log_softmax", "tensorflow.compat.v1.python_io.TFRecordWriter", "tensorflow.compat.v1.parse_single_example", "tensorflow.compat.v1.squeeze", "tensorflow.compat.v1.one_hot", "tensorflow.compat.v1.gfile.Open", "tensorflow.compat.v1.concat", "tensorflow.compat.v1.nn.softmax", "tensorflow.compat.v1.truncated_normal_initializer" ] ]
TrazLander/Traz-Fork-MCEdit-Unified
[ "829a6807ec3f64a6e936c5b5b9a0ec8e03c75954", "829a6807ec3f64a6e936c5b5b9a0ec8e03c75954" ]
[ "filters/topsoil.py", "editortools/thumbview.py" ]
[ "from numpy import zeros\nimport itertools\nfrom pymclevel import alphaMaterials\nfrom pymclevel.level import extractHeights\n\nam = alphaMaterials\n\n# naturally occuring materials\nblocks = [\n am.Grass,\n am.Dirt,\n am.Stone,\n am.Bedrock,\n am.Sand,\n am.Gravel,\n am.GoldOre,\n am.IronOre,\n am.CoalOre,\n am.LapisLazuliOre,\n am.DiamondOre,\n am.RedstoneOre,\n am.RedstoneOreGlowing,\n am.Netherrack,\n am.SoulSand,\n am.Clay,\n am.Glowstone\n]\nblocktypes = [b.ID for b in blocks]\n\n\ndef naturalBlockmask():\n blockmask = zeros((256,), dtype='bool')\n blockmask[blocktypes] = True\n return blockmask\n\n\ninputs = (\n (\"Depth\", (4, -128, 128)),\n (\"Pick a block:\", alphaMaterials.Grass),\n)\n\n\ndef perform(level, box, options):\n depth = options[\"Depth\"]\n blocktype = options[\"Pick a block:\"]\n\n #compute a truth table that we can index to find out whether a block\n # is naturally occuring and should be considered in a heightmap\n blockmask = naturalBlockmask()\n\n # always consider the chosen blocktype to be \"naturally occuring\" to stop\n # it from adding extra layers\n blockmask[blocktype.ID] = True\n\n #iterate through the slices of each chunk in the selection box\n for chunk, slices, point in level.getChunkSlices(box):\n # slicing the block array is straightforward. blocks will contain only\n # the area of interest in this chunk.\n blocks = chunk.Blocks[slices]\n data = chunk.Data[slices]\n\n # use indexing to look up whether or not each block in blocks is\n # naturally-occuring. these blocks will \"count\" for column height.\n maskedBlocks = blockmask[blocks]\n\n heightmap = extractHeights(maskedBlocks)\n\n for x, z in itertools.product(*map(xrange, heightmap.shape)):\n h = heightmap[x, z]\n if depth > 0:\n blocks[x, z, max(0, h - depth):h] = blocktype.ID\n data[x, z, max(0, h - depth):h] = blocktype.blockData\n else:\n #negative depth values mean to put a layer above the surface\n blocks[x, z, h:min(blocks.shape[2], h - depth)] = blocktype.ID\n data[x, z, h:min(blocks.shape[2], h - depth)] = blocktype.blockData\n\n #remember to do this to make sure the chunk is saved\n chunk.chunkChanged()\n", "from OpenGL import GLU, GL\nfrom numpy import array\nfrom albow import Widget\nfrom albow.openglwidgets import GLPerspective\nfrom glutils import FramebufferTexture, gl\nimport pymclevel\nfrom renderer import PreviewRenderer\n\n\nclass ThumbView(GLPerspective):\n def __init__(self, sch, **kw):\n GLPerspective.__init__(self, **kw) # self, xmin= -32, xmax=32, ymin= -32, ymax=32, near= -1000, far=1000)\n self.far = 16000\n self.schematic = sch\n self.renderer = PreviewRenderer(sch)\n self.fboSize = (128, 128)\n # self.renderer.position = (sch.Length / 2, 0, sch.Height / 2)\n\n def setup_modelview(self):\n GLU.gluLookAt(-self.schematic.Width * 2.8, self.schematic.Height * 2.5 + 1, -self.schematic.Length * 1.5,\n self.schematic.Width, 0, self.schematic.Length,\n 0, 1, 0)\n\n fbo = None\n\n def gl_draw_tex(self):\n self.clear()\n self.renderer.draw()\n\n def clear(self):\n if self.drawBackground:\n GL.glClearColor(0.25, 0.27, 0.77, 1.0)\n else:\n GL.glClearColor(0.0, 0.0, 0.0, 0.0)\n GL.glClear(GL.GL_DEPTH_BUFFER_BIT | GL.GL_COLOR_BUFFER_BIT)\n\n def gl_draw(self):\n if self.schematic.chunkCount > len(self.renderer.chunkRenderers):\n self.gl_draw_thumb()\n else:\n if self.fbo is None:\n w, h = self.fboSize\n self.fbo = FramebufferTexture(w, h, self.gl_draw_tex)\n GL.glMatrixMode(GL.GL_PROJECTION)\n GL.glLoadIdentity()\n GL.glMatrixMode(GL.GL_MODELVIEW)\n GL.glLoadIdentity()\n GL.glEnableClientState(GL.GL_TEXTURE_COORD_ARRAY)\n GL.glColor(1.0, 1.0, 1.0, 1.0)\n GL.glVertexPointer(2, GL.GL_FLOAT, 0, array([-1, -1,\n - 1, 1,\n 1, 1,\n 1, -1, ], dtype='float32'))\n GL.glTexCoordPointer(2, GL.GL_FLOAT, 0, array([0, 0, 0, 256, 256, 256, 256, 0], dtype='float32'))\n e = (GL.GL_TEXTURE_2D,)\n if not self.drawBackground:\n e += (GL.GL_ALPHA_TEST,)\n with gl.glEnable(*e):\n self.fbo.bind()\n GL.glDrawArrays(GL.GL_QUADS, 0, 4)\n GL.glDisableClientState(GL.GL_TEXTURE_COORD_ARRAY)\n\n drawBackground = True\n\n def gl_draw_thumb(self):\n GL.glPushAttrib(GL.GL_SCISSOR_BIT)\n r = self.rect\n r = r.move(*self.local_to_global_offset())\n GL.glScissor(r.x, self.get_root().height - r.y - r.height, r.width, r.height)\n with gl.glEnable(GL.GL_SCISSOR_TEST):\n self.clear()\n self.renderer.draw()\n GL.glPopAttrib()\n\n\nclass BlockThumbView(Widget):\n is_gl_container = True\n\n def __init__(self, materials, blockInfo=None, **kw):\n Widget.__init__(self, **kw)\n self.materials = materials\n self.blockInfo = blockInfo\n\n thumb = None\n _blockInfo = None\n\n @property\n def blockInfo(self):\n return self._blockInfo\n\n @blockInfo.setter\n def blockInfo(self, b):\n if self._blockInfo != b:\n if self.thumb:\n self.thumb.set_parent(None)\n self._blockInfo = b\n if b is None:\n return\n\n sch = pymclevel.MCSchematic(shape=(1, 1, 1), mats=self.materials)\n if b:\n sch.Blocks[:] = b.ID\n sch.Data[:] = b.blockData\n\n self.thumb = ThumbView(sch)\n self.add(self.thumb)\n self.thumb.size = self.size\n self.thumb.drawBackground = False\n for i in self.thumb.renderer.chunkWorker:\n pass\n" ]
[ [ "numpy.zeros" ], [ "numpy.array" ] ]
charles96322/conditional-image-generation
[ "8854612fbeb4ef485f1d1613b78d42f240692161", "8854612fbeb4ef485f1d1613b78d42f240692161" ]
[ "lib/inits.py", "lib/theano_utils.py" ]
[ "import numpy as np\nimport theano, theano.tensor as T\n\ndef He(shape, name, fan_in):\n \"\"\" He initialization of parameters \"\"\"\n rng = np.random.RandomState(1)\n W = rng.normal(0, np.sqrt(2. / fan_in), size=shape)\n return theano.shared(W, borrow=True, name=name).astype('float32')\n\ndef ConstantInit(shape, name, value):\n \"\"\" Constant initialization of parameters \"\"\"\n if value == 0:\n b = np.zeros(shape, dtype='float32')\n return theano.shared(b, name=name, borrow=True)", "# Taken from https://raw.githubusercontent.com/Newmu/dcgan_code/master/lib/theano_utils.py\nimport numpy as np\nimport theano\n\ndef intX(X):\n return np.asarray(X, dtype=np.int32)\n\ndef floatX(X):\n return np.asarray(X, dtype=theano.config.floatX)\n\ndef sharedX(X, dtype=theano.config.floatX, name=None):\n return theano.shared(np.asarray(X, dtype=dtype), name=name, borrow=True)\n\ndef shared0s(shape, dtype=theano.config.floatX, name=None):\n return sharedX(np.zeros(shape), dtype=dtype, name=name)\n\ndef sharedNs(shape, n, dtype=theano.config.floatX, name=None):\n return sharedX(np.ones(shape)*n, dtype=dtype, name=name)" ]
[ [ "numpy.zeros", "numpy.sqrt", "numpy.random.RandomState" ], [ "numpy.ones", "numpy.asarray", "numpy.zeros" ] ]
FranzForstmayr/scipy
[ "9a12908f843aed87203b32e45d1001353d90c548" ]
[ "scipy/fft/_fftlog_multimethods.py" ]
[ "'''Multimethods for fast Hankel transforms.\n'''\n\nimport numpy as np\n\nfrom ._basic import _dispatch\nfrom ._fftlog import fht as _fht\nfrom ._fftlog import ifht as _ifht\nfrom scipy._lib.uarray import Dispatchable\n\n\n__all__ = ['fht', 'ifht']\n\n\n@_dispatch\ndef fht(a, dln, mu, offset=0.0, bias=0.0):\n \"\"\"fht multimethod.\"\"\"\n return (Dispatchable(a, np.ndarray),)\n\n\n@_dispatch\ndef ifht(A, dln, mu, offset=0.0, bias=0.0):\n \"\"\"ifht multimethod.\"\"\"\n return (Dispatchable(A, np.ndarray),)\n\n\n# copy over the docstrings\nfht.__doc__ = _fht.__doc__\nifht.__doc__ = _ifht.__doc__\n" ]
[ [ "scipy._lib.uarray.Dispatchable" ] ]
Hoeze/kipoiseq
[ "f57493e90df12f3be6f9028bd9da8d478fbc3748" ]
[ "tests/extractors/test_protein.py" ]
[ "import pytest\nfrom pytest_mock import mocker\nimport pandas as pd\nfrom kipoiseq.transforms.functional import translate, rc_dna\nfrom kipoiseq.dataclasses import Interval, Variant\nfrom kipoiseq.extractors.protein import cut_transcript_seq, TranscriptSeqExtractor, ProteinSeqExtractor, \\\n ProteinVCFSeqExtractor, SingleSeqProteinVCFSeqExtractor, \\\n SingleVariantProteinVCFSeqExtractor\nfrom kipoiseq.extractors import CDSFetcher, gtf_row2interval, UTRFetcher\n\ngtf_file = 'tests/data/sample_1_protein.gtf'\nfasta_file = 'tests/data/demo_dna_seq.fa'\ntranscript_id = 'enst_test1'\nvcf_file = 'tests/data/singleVar_vcf_enst_test2.vcf.gz'\n\nintervals = [\n Interval('22', 580, 596, strand='+', attrs={'tag': 'cds_end_NF'}),\n Interval('22', 597, 610, strand='+', attrs={'tag': 'cds_end_NF'})\n]\n\n\ndef test_cut_seq():\n seq = 'ATCGATG'\n seq = cut_transcript_seq(seq, 'cds_end_NF')\n assert len(seq) == 6\n\n seq = 'ATCGATG'\n seq = cut_transcript_seq(seq, 'cds_end_NF,cds_start_NF')\n assert len(seq) == 3\n\n seq = 'ATCGATG'\n seq = cut_transcript_seq(seq, 'cds_start_NF')\n assert len(seq) == 9\n\n seq = 'ATCGATG'\n seq = cut_transcript_seq(seq, 'no_tag')\n assert len(seq) == 3\n\n\ndef test_gtf_row2interval():\n row = pd.Series({\n 'Chromosome': '22',\n 'Start': 10,\n 'End': 20,\n 'Strand': '-',\n 'tag': 'cds_end_NF'\n })\n expected_interval = Interval(chrom='22', start=10,\n end=20, name='', strand='-', attrs={'tag': 'cds_end_NF'})\n\n assert gtf_row2interval(row) == expected_interval\n\n\ndef test_CDSFetcher__read_cds():\n cds = CDSFetcher._read_cds(gtf_file, duplicate_attr=True)\n assert cds.shape[0] == 7\n\n assert cds.iloc[0].Chromosome == '22'\n assert cds.iloc[0].Start == 598\n assert cds.iloc[0].End == 3050\n\n assert cds.iloc[3].Start == 3\n assert cds.iloc[3].End == 300\n\n\n@pytest.fixture\ndef cds_fetcher():\n return CDSFetcher(gtf_file)\n\n\ndef test_CDSFetcher__len__(cds_fetcher):\n assert len(cds_fetcher) == 3\n\n\ndef test_CDSFetcher_get_cds(cds_fetcher):\n intervals = cds_fetcher.get_intervals(transcript_id)\n intervals[0] == Interval(chrom='22', start=598, end=3196, name='', strand='+')\n # TODO: Improve testcase with adding transcript with 2 cds\n\n\n@pytest.fixture\ndef transcript_seq_extractor():\n return TranscriptSeqExtractor(gtf_file, fasta_file)\n\n\ndef test_get_protein_seq(transcript_seq_extractor):\n transcript_id = 'enst_test2'\n seq = transcript_seq_extractor.get_protein_seq(transcript_id)\n txt_file = 'tests/data/Output_singleSeq_vcf_enst_test2.txt'\n expected_seq = open(txt_file).readline()\n assert seq[1:] == expected_seq[1:] # no expected mutation here\n\n\ndef test_TranscriptSeqExtractor_prepare_seq():\n seqs = ['ATCGATG']\n assert 'ATCGAT' == TranscriptSeqExtractor._prepare_seq(\n seqs, intervals, '+')\n assert 'CATCGA' == TranscriptSeqExtractor._prepare_seq(\n seqs, intervals, '-')\n\n\ndef test_TranscriptSeqExtractor_get_seq(transcript_seq_extractor):\n seq = transcript_seq_extractor.get_seq(transcript_id)\n assert len(seq) == 3196 - 598\n\n\ndef test_TranscriptSeqExtractor_get_item(transcript_seq_extractor):\n assert transcript_seq_extractor[0] == transcript_seq_extractor.get_seq(\n transcript_id)\n\n\n@pytest.fixture\ndef protein_seq_extractor():\n return ProteinSeqExtractor(gtf_file, fasta_file)\n\n\ndef test_ProteinSeqExtractor_prepare_seq(protein_seq_extractor):\n seqs = ['ATCGATG']\n\n pro_seq = protein_seq_extractor._prepare_seq(seqs, intervals, '+')\n assert pro_seq == 'ID'\n\n pro_seq = protein_seq_extractor._prepare_seq(seqs, intervals, '-')\n assert pro_seq == 'HR'\n\n\ndef test_ProteinVCFSeqExtractor__unstrand():\n unstrand_intervals = ProteinVCFSeqExtractor._unstrand(intervals)\n assert all(i.strand == '.' for i in unstrand_intervals)\n\n\n# TODO: write test for with sample_id\n\nclass TestExtractor(ProteinVCFSeqExtractor):\n def extract_query(\n self,\n variant_interval_queryable,\n ref_seqs,\n intervals,\n reverse_complement: bool,\n sample_id=None\n ):\n for seqs, variant_info in [\n (['ATC', 'GATG'], ['Var_Mutation_Mock']),\n (['CATC', 'GAT'], ['Var_Mutation_Mock']),\n ]:\n alt_seq = self._prepare_seq(\n seqs=seqs,\n intervals=intervals,\n reverse_complement=reverse_complement\n )\n yield alt_seq, variant_info\n\n\n@pytest.fixture\ndef protein_vcf_seq(mocker):\n extractor = TestExtractor(gtf_file, fasta_file, vcf_file)\n return extractor\n\n\ndef test_ProteinVCFSeqExtractor_extract_cds(protein_vcf_seq):\n protein_ref_seq, protein_alt_seqs = protein_vcf_seq.extract(intervals)\n protein_alt_seqs = list(protein_alt_seqs)\n\n assert protein_alt_seqs[0][0] == 'ID'\n assert protein_alt_seqs[1][0] == 'HR'\n\n\ndef test_ProteinVCFSeqExtractor_correctquery(mocker):\n protein_vcf_seq = TestExtractor(gtf_file, fasta_file, vcf_file)\n protein_vcf_seq.extract_query = mocker.MagicMock(\n return_value=(\n 'LATTGLWGP',\n iter([\n ('ID', ['Var_Mutation_Mock']),\n ('HR', ['Var_Mutation_Mock'])\n ])\n )\n )\n protein_ref_seq, protein_alt_seqs = protein_vcf_seq.extract(intervals)\n\n query = list(protein_vcf_seq.extract_query.call_args[1][\"variant_interval_queryable\"].variant_intervals)\n\n variants = list(query[0][0])\n assert len(variants) == 1\n assert variants[0].pos == 596\n interval = query[0][1]\n assert interval.start == 580\n\n variants = list(query[1][0])\n\n assert len(variants) == 1\n assert variants[0].pos == 598\n interval = query[1][1]\n assert interval.start == 597\n\n\ndef test_ProteinVCFSeqExtractor_extract(protein_vcf_seq):\n transcript_id = 'enst_test2'\n protein_ref_seq, protein_alt_seqs = protein_vcf_seq.get_seq(transcript_id)\n protein_alt_seqs = list(protein_alt_seqs)\n assert protein_alt_seqs[0][0] == 'HR'\n assert protein_alt_seqs[1][0] == 'ID'\n\n\n@pytest.fixture\ndef single_seq_protein():\n vcf_file = 'tests/data/singleVar_vcf_enst_test2.vcf.gz'\n return SingleSeqProteinVCFSeqExtractor(gtf_file, fasta_file, vcf_file)\n\n\ndef test_SingleSeqProteinVCFSeqExtractor_extract(single_seq_protein, transcript_seq_extractor):\n transcript_id = 'enst_test2'\n txt_file = 'tests/data/Output_singleSeq_vcf_enst_test2.txt'\n expected_seq = open(txt_file).readline()\n\n ref_seq, (alt_seq, variant_info) = single_seq_protein.get_seq(transcript_id)\n assert alt_seq == expected_seq\n\n vcf_file = 'tests/data/singleVar_vcf_enst_test1_diff_type_of_variants.vcf.gz'\n transcript_id = 'enst_test1'\n single_seq_protein = SingleSeqProteinVCFSeqExtractor(\n gtf_file, fasta_file, vcf_file)\n\n ref_seq, (alt_seq, variant_info) = single_seq_protein.get_seq(transcript_id)\n assert ref_seq == transcript_seq_extractor.get_protein_seq(transcript_id)\n assert len(alt_seq) == len(ref_seq)\n count = diff_between_two_seq(alt_seq, ref_seq)\n assert count == 1, 'Expected diff of 1 AA, but it was: ' + str(count)\n\n vcf_file = 'tests/data/singleSeq_vcf_enst_test2.vcf.gz'\n single_seq_protein = SingleSeqProteinVCFSeqExtractor(\n gtf_file, fasta_file, vcf_file)\n\n # transcripts without variants return the reference sequence\n alt_seqs = [alt_seq for t_id, (ref_seq, (alt_seq, variants)) in single_seq_protein.extract_all() if\n len(variants) > 0]\n assert len(alt_seqs) == 0\n\n\n@pytest.fixture\ndef single_variant_seq():\n vcf_file = 'tests/data/singleVar_vcf_enst_test2.vcf.gz'\n return SingleVariantProteinVCFSeqExtractor(gtf_file, fasta_file, vcf_file)\n\n\ndef diff_between_two_seq(seq1, seq2):\n count = 0\n for i in range(len(seq1)):\n if seq1[i] != seq2[i]:\n count += 1\n return count\n\n\ndef test_SingleVariantProteinVCFSeqExtractor_extract(single_variant_seq, transcript_seq_extractor):\n txt_file = 'tests/data/Output_singleVar_vcf_enst_test2.txt'\n expected_seq = open(txt_file).read().splitlines()\n\n # test single\n transcript_id = 'enst_test2'\n ref_seq, alt_seqs = single_variant_seq.get_seq(transcript_id)\n alt_seqs = list(alt_seqs)\n assert alt_seqs[0][0] == expected_seq[0]\n assert alt_seqs[1][0] == expected_seq[1]\n assert alt_seqs[2][0] == expected_seq[2]\n\n # test multiple\n transcript_id = ['enst_test1', 'enst_test2']\n transcript_seqs = single_variant_seq.get_seq(transcript_id)\n assert isinstance(transcript_seqs, list)\n transcript_seqs = [list(alt_seqs) for ref_seq, alt_seqs in transcript_seqs]\n assert transcript_seqs[1][0][0] == expected_seq[0]\n assert transcript_seqs[1][1][0] == expected_seq[1]\n assert transcript_seqs[1][2][0] == expected_seq[2]\n\n transcript_seqs = single_variant_seq.iter_seq(transcript_id)\n assert not isinstance(transcript_seqs, list)\n transcript_seqs = [list(alt_seqs) for ref_seq, alt_seqs in transcript_seqs]\n assert transcript_seqs[1][0][0] == expected_seq[0]\n assert transcript_seqs[1][1][0] == expected_seq[1]\n assert transcript_seqs[1][2][0] == expected_seq[2]\n\n counter = 0\n for tr_id, (ref_seq, t_id_seqs) in single_variant_seq.extract_all():\n t_id_seqs = [seq for seq, info in list(t_id_seqs)]\n if len(t_id_seqs) == 0:\n continue\n counter += len(t_id_seqs)\n for i, seq in enumerate(t_id_seqs):\n assert seq == expected_seq[i]\n assert tr_id == 'enst_test2'\n assert counter == 3, 'Number of variants in vcf 3, but # of seq was: ' + \\\n str(counter)\n\n vcf_file = 'tests/data/singleVar_vcf_enst_test1_diff_type_of_variants.vcf.gz'\n transcript_id = 'enst_test1'\n single_var_protein = SingleVariantProteinVCFSeqExtractor(\n gtf_file, fasta_file, vcf_file)\n ref_seq, alt_seqs = single_var_protein.get_seq(transcript_id)\n alt_seqs = list(alt_seqs)\n # alt_seqs = [seq for seq, info in list(single_var_protein.get_seq(transcript_id))]\n # ref_seq = transcript_seq_extractor.get_protein_seq(transcript_id)\n\n assert len(alt_seqs) == 1\n for alt_seq, variant_info in alt_seqs:\n assert len(alt_seq) == len(ref_seq)\n count = diff_between_two_seq(alt_seq, ref_seq)\n assert count == 1, 'Expected diff of 1 AA, but it was: ' + str(count)\n\n # this test should result in 0 sequences yielded\n vcf_file = 'tests/data/singleSeq_vcf_enst_test2.vcf.gz'\n single_var_protein = SingleVariantProteinVCFSeqExtractor(\n gtf_file, fasta_file, vcf_file)\n length = 0\n for tr_id, (ref_seq, t_id_seqs) in single_var_protein.extract_all():\n t_id_seqs = [seq for seq, info in list(t_id_seqs)]\n length += len(t_id_seqs)\n assert length == 0\n\n\n# TODO: add for all proteins.pep.all.fa\n\n# chr22_fasta_file = 'tests/data/chr22.fa.gz'\nchr22_gtf_file = 'tests/data/chr22_ENST00000319363.gtf'\n\n\n# chr22_5UTR_vcf_file = 'tests/data/chr22_ENST00000319363_5UTR.vcf.gz'\n\n\ndef test_5UTRFetcher__read_utr():\n utr5 = UTRFetcher._read_utr(chr22_gtf_file, feature_type=\"5UTR\")\n\n assert utr5.shape == (1, 12)\n\n assert utr5.iloc[0].Chromosome == 'chr22'\n assert utr5.iloc[0].Start == 17565848\n assert utr5.iloc[0].End == 17565981\n assert utr5.iloc[0].Strand == \"+\"\n\n utr5_from_cds = UTRFetcher._read_utr(chr22_gtf_file, feature_type=\"5UTR\", infer_from_cds=True)\n\n pd.testing.assert_frame_equal(left = utr5.drop(['exon_number', 'exon_id'], axis=1), right = utr5_from_cds.drop(['exon_number', 'exon_id'], axis=1), check_dtype=False)\n\n\ndef test_3UTRFetcher__read_utr():\n utr3 = UTRFetcher._read_utr(chr22_gtf_file, feature_type=\"3UTR\")\n\n assert utr3.shape == (1, 12)\n\n assert utr3.iloc[0].Chromosome == 'chr22'\n assert utr3.iloc[0].Start == 17590710\n assert utr3.iloc[0].End == 17596583\n assert utr3.iloc[0].Strand == \"+\"\n\n utr3_from_cds = UTRFetcher._read_utr(chr22_gtf_file, feature_type=\"3UTR\", infer_from_cds=True)\n\n pd.testing.assert_frame_equal(left=utr3.drop(['exon_number', 'exon_id'], axis=1),\n right=utr3_from_cds.drop(['exon_number', 'exon_id'], axis=1), check_dtype=False)\n" ]
[ [ "pandas.Series" ] ]
ShenDezhou/LSTM2
[ "b29ab680260673f407cb566be4af38aaf7d9ce8f" ]
[ "msr_dic/loadEmbedding.py" ]
[ "import codecs\nimport bz2\nimport numpy\n\nchars = []\ninit='。'\nwith codecs.open('../msr_dic/msr_dict.utf8', 'r', encoding='utf8') as f:\n lines = f.readlines()\n for line in lines:\n for w in line:\n if w == '\\n':\n continue\n else:\n chars.append(w)\nprint(len(chars))\nrxdict = dict(zip(chars, range(1, 1 + len(chars))))\n\nbz_file = bz2.BZ2File(\"../model/zhwiki_20180420_100d.txt.bz2\")\nwords, dims = bz_file.readline().strip().split(maxsplit=1)\nprint(words, dims)\nembedding_matrix = numpy.zeros((len(chars)+1, int(dims)), dtype=float)\n\n#for fast checking existance\nschar = set(chars)\n\nlines = bz_file.readlines()\ncounter = 0\nlenstats = {}\nfor line in lines:\n line = line.strip()\n word, coefs = line.split(maxsplit=1)\n word = word.decode(encoding=\"utf-8\")\n lenstats[len(word)] =lenstats.get(len(word), 0) + 1\n if word in schar:\n embedding_matrix[rxdict[word]] = numpy.fromstring(coefs, 'f', sep=' ')\n if counter % 10000 == 0 and counter!=0:\n print(\".\")\n counter += 1\n\nprint(lenstats)\nprint(embedding_matrix.shape)\n# 4698\n# 4529\n#print(embedding_matrix[rxdict['。']])\nzeroind = numpy.where(~embedding_matrix.any(axis=1))[0]\nprint(zeroind)\n\nembedding_matrix[zeroind] = embedding_matrix[rxdict[init]]\nnumpy.save(\"../msr_dic/zhwiki_embedding.npy\", embedding_matrix)\n\nzeroind = numpy.where(~embedding_matrix.any(axis=1))[0]\nprint(zeroind)" ]
[ [ "numpy.fromstring", "numpy.save" ] ]
MariBax/Face-masking-with-CV
[ "e211afe8ebe82553ee4089e7dc288bc127c81107" ]
[ "utils.py" ]
[ "import cv2\r\nimport dlib\r\nimport numpy as np\r\nimport math\r\nfrom scipy.spatial import distance as dist\r\n\r\n\r\n# CONSTANTS\r\n\r\nMOUTH_THRESH = 0.9\r\nEYE_AR_THRESH = 0.3\r\nEYE_AR_CONSEC_FRAMES = 3\r\nMOUTH_THRESH = 0.9\r\n\r\nMASK_INFO = { 'mask1.jpg': {\r\n \"src_points\": np.float32([[400, 480], [800, 480], [600, 600]]), \r\n \"dst_points\": 'eyes',\r\n \"transparent\": False},\r\n 'mask2.jpg': {\r\n \"src_points\": np.float32([[270, 400], [680, 400], [470, 550]]),\r\n \"dst_points\": 'eyes',\r\n \"transparent\": False},\r\n '1_cat_nose.PNG': {\r\n \"src_points\": np.float32([[500, 400], [450, 500], [550, 500]]),\r\n \"dst_points\": 'nose',\r\n \"transparent\": True},\r\n '2_new_year_hat.PNG': {\r\n \"src_points\": np.float32([[250, 750], [400, 850], [550, 750]]),\r\n \"dst_points\": 'brows',\r\n \"transparent\": True},\r\n 'hat.png': {\r\n \"src_points\": np.float32([[150, 620], [250, 644], [350, 620]]),\r\n \"dst_points\": 'brows',\r\n \"transparent\": False},\r\n 'moustache.png': {\r\n \"src_points\": np.float32([[200, 215], [290, 0], [400, 215]]),\r\n \"dst_points\": 'moustache',\r\n \"transparent\": False},\r\n '1_cat_left_ear.PNG': {\r\n \"src_points\": np.float32([[450, 900], [600, 780], [800, 650]]),\r\n \"dst_points\": 'left_ear',\r\n \"transparent\": True},\r\n 'face_mask.jpeg': {\r\n \"src_points\": np.float32([[120, 185], [35, 55], [185, 55]]),\r\n \"dst_points\": 'lips',\r\n \"transparent\": False}}\r\n\r\n\r\n\r\n########## MOUTH UTILS\r\n\r\ndef get_lips(shape):\r\n ulip = np.append(shape[48:55], shape[60:65][::-1], axis=0)\r\n blip = np.append(shape[54:60], [shape[48]], axis=0)\r\n blip = np.append(blip, [shape[60]], axis=0)\r\n blip = np.append(blip, shape[64:68][::-1], axis=0)\r\n return ulip, blip\r\n \r\ndef get_lip_thikness(lip):\r\n thikness = 0\r\n for i in [2, 3, 4]:\r\n distance = math.sqrt((lip[i][0] - lip[12-i][0])**2 +\r\n (lip[i][1] - lip[12-i][1])**2)\r\n thikness += distance\r\n return thikness / 3\r\n\r\ndef get_mouth_height(top_lip, bottom_lip):\r\n height = 0\r\n for i in [8, 9, 10]:\r\n distance = math.sqrt((top_lip[i][0] - bottom_lip[18-i][0])**2 + \r\n (top_lip[i][1] - bottom_lip[18-i][1])**2)\r\n height += distance\r\n return height / 3\r\n\r\ndef check_mouth_open(top_lip, bottom_lip):\r\n top_lip_height = get_lip_thikness(top_lip)\r\n bottom_lip_height = get_lip_thikness(bottom_lip)\r\n mouth_height = get_mouth_height(top_lip, bottom_lip)\r\n return mouth_height > min(top_lip_height, bottom_lip_height) * MOUTH_THRESH\r\n\r\n\r\n########## EYES UTILS\r\n\r\ndef eye_aspect_ratio(eye):\r\n A = dist.euclidean(eye[1], eye[5])\r\n B = dist.euclidean(eye[2], eye[4])\r\n C = dist.euclidean(eye[0], eye[3])\r\n return (A + B) / (2.0 * C)\r\n\r\ndef calc_eyes_ratio(leftEye, rightEye):\r\n leftEAR = eye_aspect_ratio(leftEye)\r\n rightEAR = eye_aspect_ratio(rightEye)\r\n return (leftEAR + rightEAR) / 2.0\r\n\r\ndef contouring(thresh, mid, img, right=False):\r\n cnts, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\n # cv2.drawContours(img, cnts, -1, (0, 255, 255), 3)\r\n\r\n cnts = sorted(cnts, key = cv2.contourArea)\r\n\r\n for cnt in cnts[-2:]:\r\n cnt = max(cnts, key = cv2.contourArea)\r\n M = cv2.moments(cnt)\r\n cx = int(M['m10'] / M['m00'])\r\n cy = int(M['m01'] / M['m00'])\r\n x, y, w, h = cv2.boundingRect(cnt)\r\n if right:\r\n cx += mid \r\n cv2.circle(img, (cx, cy), min(h, w) // 2, (0, 0, 255), 3)\r\n\r\n\r\n########## MISC\r\n\r\ndef overlay_transparent(background_img, img_to_overlay_t, x, y):\r\n # overlays a transparant PNG onto another image\r\n bg_img = background_img.copy()\r\n b, g, r, a = cv2.split(img_to_overlay_t)\r\n overlay_color = cv2.merge((b,g,r))\r\n alpha = a / 255.0\r\n h, w, _ = overlay_color.shape\r\n bg_img[y:y+h, x:x+w] = cv2.add(alpha[:, :, None] * overlay_color, (1 - alpha[:, :, None]) * bg_img[y:y+h, x:x+w])\r\n return bg_img\r\n" ]
[ [ "numpy.float32", "scipy.spatial.distance.euclidean", "numpy.append" ] ]
ItsTheSebbe/4vHelix_GUI
[ "6626d29bf9a2150b2ab49104918ab2faa5f45c30" ]
[ "supporting_scripts/tacoxDNA/src/libs/pdb.py" ]
[ "import numpy as np\nimport itertools\nfrom math import sqrt\nimport sys\nimport copy\n\nBASE_SHIFT = 1.13\nCOM_SHIFT = 0.5\nFROM_OXDNA_TO_ANGSTROM = 8.518\nFROM_ANGSTROM_TO_OXDNA = 1. / FROM_OXDNA_TO_ANGSTROM\n\nNAME_TO_BASE = {\n \"ADE\" : \"A\",\n \"CYT\" : \"C\",\n \"GUA\" : \"G\",\n \"THY\" : \"T\",\n \"URA\" : \"U\",\n }\n\nBASES = [\"A\", \"T\", \"G\", \"C\", \"U\"]\n\nclass Nucleotide(object):\n RNA_warning_printed = False\n \n def __init__(self, name, idx):\n object.__init__(self)\n self.name = name.strip()\n if self.name in NAME_TO_BASE.keys():\n self.base = NAME_TO_BASE[self.name]\n elif self.name in BASES:\n if self.name == \"U\" and not Nucleotide.RNA_warning_printed:\n print >> sys.stderr, \"WARNING: unsupported uracil detected: use at your own risk\"\n Nucleotide.RNA_warning_printed = True\n \n self.base = self.name\n else:\n self.base = name[1:]\n self.idx = idx\n self.base_atoms = []\n self.phosphate_atoms = []\n self.sugar_atoms = []\n self.named_atoms = {}\n self.ring_names = [\"C2\", \"C4\", \"C5\", \"C6\", \"N1\", \"N3\"]\n self.chain_id = None\n\n def get_atoms(self):\n return self.base_atoms + self.phosphate_atoms + self.sugar_atoms\n\n def add_atom(self, a):\n if 'P' in a.name or a.name == \"HO5'\": \n self.phosphate_atoms.append(a)\n elif \"'\" in a.name: \n self.sugar_atoms.append(a)\n else: \n self.base_atoms.append(a)\n \n self.named_atoms[a.name] = a\n if self.chain_id == None: \n self.chain_id = a.chain_id\n\n def get_com(self, atoms=None):\n if atoms == None: \n atoms = self.atoms\n com = np.array([0., 0., 0.])\n for a in atoms:\n com += a.pos\n\n return com / len(atoms)\n\n def compute_a3(self):\n base_com = self.get_com(self.base_atoms)\n # the O4' oxygen is always (at least for non pathological configurations, as far as I know) oriented 3' -> 5' with respect to the base's centre of mass\n parallel_to = self.named_atoms[\"O4'\"].pos - base_com\n self.a3 = np.array([0., 0., 0.])\n \n for perm in itertools.permutations(self.ring_names, 3):\n p = self.named_atoms[perm[0]]\n q = self.named_atoms[perm[1]]\n r = self.named_atoms[perm[2]]\n v1 = p.pos - q.pos\n v2 = p.pos - r.pos\n v1 /= sqrt(np.dot(v1, v1))\n v2 /= sqrt(np.dot(v2, v2))\n if abs(np.dot(v1, v2)) > 0.01 or 1:\n a3 = np.cross(v1, v2)\n a3 /= sqrt(np.dot(a3, a3))\n if np.dot(a3, parallel_to) < 0.: \n a3 = -a3\n self.a3 += a3\n\n self.a3 /= sqrt(np.dot(self.a3, self.a3))\n\n def compute_a1(self):\n if \"C\" in self.name or \"T\" in self.name or \"U\" in self.name:\n pairs = [ [\"N3\", \"C6\"], [\"C2\", \"N1\"], [\"C4\", \"C5\"] ]\n else:\n pairs = [ [\"N1\", \"C4\"], [\"C2\", \"N3\"], [\"C6\", \"C5\"] ]\n\n self.a1 = np.array([0., 0., 0.])\n for pair in pairs:\n p = self.named_atoms[pair[0]]\n q = self.named_atoms[pair[1]]\n diff = p.pos - q.pos\n self.a1 += diff\n\n self.a1 /= sqrt(np.dot(self.a1, self.a1))\n\n def compute_as(self):\n self.compute_a1()\n self.compute_a3()\n self.a2 = np.cross(self.a3, self.a1)\n self.check = abs(np.dot(self.a1, self.a3))\n \n def correct_for_large_boxes(self, box):\n map(lambda x: x.shift(-np.rint(x.pos / box ) * box), self.atoms)\n\n def to_pdb(self, chain_identifier, print_H, residue_serial, residue_suffix, residue_type):\n res = []\n for a in self.atoms:\n if not print_H and 'H' in a.name:\n continue\n if residue_type == \"5\": \n if 'P' in a.name:\n if a.name == 'P':\n phosphorus = a\n continue\n elif a.name == \"O5'\":\n O5prime = a\n elif residue_type == \"3\":\n if a.name == \"O3'\":\n O3prime = a\n res.append(a.to_pdb(chain_identifier, residue_serial, residue_suffix))\n \n # if the residue is a 3' or 5' end, it requires one more hydrogen linked to the O3' or O5', respectively\n if residue_type == \"5\":\n new_hydrogen = copy.deepcopy(phosphorus)\n new_hydrogen.name = \"HO5'\"\n \n # we put the new hydrogen at a distance 1 Angstrom from the O5' oxygen along the direction that, in a regular nucleotide, connects O5' and P\n dist_P_O = phosphorus.pos - O5prime.pos\n dist_P_O *= 1. / np.sqrt(np.dot(dist_P_O, dist_P_O))\n new_hydrogen.pos = O5prime.pos + dist_P_O\n res.append(new_hydrogen.to_pdb(chain_identifier, residue_serial, residue_suffix))\n elif residue_type == \"3\":\n new_hydrogen = copy.deepcopy(O3prime)\n new_hydrogen.name = \"HO3'\"\n \n # we put the new hydrogen at a distance 1 Angstrom from the O3' oxygen along a direction which is a linear combination of the three \n # orientations that approximately reproduce the crystallographic position\n new_distance = 0.2 * self.a2 - 0.2 * self.a1 - self.a3\n new_distance *= 1. / np.sqrt(np.dot(new_distance, new_distance))\n new_hydrogen.pos = O3prime.pos + new_distance\n res.append(new_hydrogen.to_pdb(chain_identifier, residue_serial, residue_suffix))\n\n return \"\\n\".join(res)\n\n def to_mgl(self):\n res = []\n for a in self.atoms:\n res.append(a.to_mgl())\n\n return \"\\n\".join(res)\n\n def rotate(self, R):\n com = self.get_com()\n for a in self.atoms:\n a.pos = np.dot(R, a.pos - com) + com\n\n self.compute_as()\n\n def set_com(self, new_com):\n com = self.get_com()\n for a in self.atoms:\n a.pos += new_com - com - COM_SHIFT * self.a1\n\n def set_base(self, new_base_com):\n atoms = [v for k, v in self.named_atoms.iteritems() if k in self.ring_names]\n ring_com = self.get_com(atoms)\n for a in self.atoms:\n a.pos += new_base_com - ring_com - BASE_SHIFT * self.a1\n\n self.compute_as()\n\n atoms = property(get_atoms)\n\n\nclass Atom(object):\n serial_atom = 1\n\n def __init__(self, pdb_line):\n object.__init__(self)\n # http://cupnet.net/pdb-format/\n self.name = pdb_line[12:16].strip()\n # some PDB files have * in place of '\n if \"*\" in self.name:\n self.name = self.name.replace(\"*\", \"'\")\n \n self.alternate = pdb_line[16]\n self.residue = pdb_line[17:20].strip()\n self.chain_id = pdb_line[21:22].strip()\n self.residue_idx = int(pdb_line[22:26])\n self.pos = np.array([float(pdb_line[30:38]), float(pdb_line[38:46]), float(pdb_line[46:54])])\n \n def is_hydrogen(self):\n return \"H\" in self.name\n\n def shift(self, diff):\n self.pos += diff\n\n def to_pdb(self, chain_identifier, residue_serial, residue_suffix):\n residue = self.residue + residue_suffix\n res = \"{:6s}{:5d} {:^4s}{:1s}{:3s} {:1s}{:4d}{:1s} {:8.3f}{:8.3f}{:8.3f}{:6.2f}{:6.2f} {:>2s}{:2s}\".format(\"ATOM\", Atom.serial_atom, self.name, \" \", residue, chain_identifier, residue_serial, \" \", self.pos[0], self.pos[1], self.pos[2], 1.00, 0.00, \" \", \" \", \" \")\n Atom.serial_atom += 1\n if Atom.serial_atom > 99999:\n Atom.serial_atom = 1\n return res\n\n def to_mgl(self):\n colors = {\"C\" : \"0,1,1\", \"P\" : \"1,1,0\", \"O\" : \"1,0,0\", \"H\" : \"0.5,0.5,0.5\", \"N\" : \"0,0,1\"}\n for c in colors:\n if c in self.name: color = colors[c]\n r = 0.5\n return \"%s %s %s @ %f C[%s]\" % (self.pos[0], self.pos[1], self.pos[2], r, color)\n" ]
[ [ "numpy.array", "numpy.dot", "numpy.cross", "numpy.rint" ] ]
Microsoft/onnxruntime
[ "0869f4f4ea9abeb4edf2e5d5b570880f77f81bfa" ]
[ "onnxruntime/python/torch_cpp_extensions/setup.py" ]
[ "# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n# --------------------------------------------------------------------------\n\nimport os\n\nfrom setuptools import setup\nfrom torch.utils import cpp_extension\n\nfilename = os.path.join(os.path.dirname(__file__), \"aten_op_executor/aten_op_executor.cc\")\n\nsetup(\n name=\"ort_torch_ext\",\n version=\"1.0\",\n ext_modules=[cpp_extension.CppExtension(name=\"ort_torch_ext.aten_op_executor\", sources=[filename])],\n packages=[\"ort_torch_ext\"],\n cmdclass={\"build_ext\": cpp_extension.BuildExtension},\n)\n" ]
[ [ "torch.utils.cpp_extension.CppExtension" ] ]
mhauskn/hfo
[ "b8b2a1d462823c6732f4d5581aa7fe2e371d55cb" ]
[ "hfo/hfo.py" ]
[ "from ctypes import *\nimport numpy as np\nfrom numpy.ctypeslib import as_ctypes\nimport os\n\nhfo_lib = cdll.LoadLibrary(os.path.join(os.path.dirname(__file__),\n 'libhfo_c.so'))\n\n\"\"\"Possible feature sets\"\"\"\nNUM_FEATURE_SETS = 2\nLOW_LEVEL_FEATURE_SET, HIGH_LEVEL_FEATURE_SET = list(range(NUM_FEATURE_SETS))\n\n\"\"\"\nAn enum of the possible HFO actions, including:\n [Low-Level] Dash(power, relative_direction)\n [Low-Level] Turn(direction)\n [Low-Level] Tackle(direction)\n [Low-Level] Kick(power, direction)\n [Mid-Level] Kick_To(target_x, target_y, speed)\n [Mid-Level] Move(target_x, target_y)\n [Mid-Level] Dribble(target_x, target_y)\n [Mid-Level] Intercept(): Intercept the ball\n [High-Level] Move(): Reposition player according to strategy\n [High-Level] Shoot(): Shoot the ball\n [High-Level] Pass(teammate_unum): Pass to teammate\n [High-Level] Dribble(): Offensive dribble\n [High-Level] Catch(): Catch the ball (Goalie Only)\n NOOP(): Do Nothing\n QUIT(): Quit the game\n\"\"\"\nNUM_HFO_ACTIONS = 20\nDASH,TURN,TACKLE,KICK,KICK_TO,MOVE_TO,DRIBBLE_TO,INTERCEPT,MOVE,SHOOT,PASS,DRIBBLE,CATCH,NOOP,QUIT,REDUCE_ANGLE_TO_GOAL,MARK_PLAYER,DEFEND_GOAL,GO_TO_BALL,REORIENT = list(range(NUM_HFO_ACTIONS))\nACTION_STRINGS = {DASH: \"Dash\",\n TURN: \"Turn\",\n TACKLE: \"Tackle\",\n KICK: \"Kick\",\n KICK_TO: \"KickTo\",\n MOVE_TO: \"MoveTo\",\n DRIBBLE_TO: \"DribbleTo\",\n INTERCEPT: \"Intercept\",\n MOVE: \"Move\",\n SHOOT: \"Shoot\",\n PASS: \"Pass\",\n DRIBBLE: \"Dribble\",\n CATCH: \"Catch\",\n NOOP: \"No-op\",\n QUIT: \"Quit\",\n REDUCE_ANGLE_TO_GOAL: \"Reduce_Angle_To_Goal\",\n MARK_PLAYER: \"Mark_Player\",\n DEFEND_GOAL: \"Defend_Goal\",\n GO_TO_BALL: \"Go_To_Ball\",\n REORIENT: \"Reorient\"}\n\n\"\"\"\nPossible game statuses:\n [IN_GAME] Game is currently active\n [GOAL] A goal has been scored by the offense\n [CAPTURED_BY_DEFENSE] The defense has captured the ball\n [OUT_OF_BOUNDS] Ball has gone out of bounds\n [OUT_OF_TIME] Trial has ended due to time limit\n [SERVER_DOWN] Server is not alive\n\"\"\"\nNUM_GAME_STATUS_STATES = 6\nIN_GAME, GOAL, CAPTURED_BY_DEFENSE, OUT_OF_BOUNDS, OUT_OF_TIME, SERVER_DOWN = list(range(NUM_GAME_STATUS_STATES))\nSTATUS_STRINGS = {IN_GAME: \"InGame\",\n GOAL: \"Goal\",\n CAPTURED_BY_DEFENSE: \"CapturedByDefense\",\n OUT_OF_BOUNDS: \"OutOfBounds\",\n OUT_OF_TIME: \"OutOfTime\",\n SERVER_DOWN: \"ServerDown\"}\n\n\"\"\"Possible sides.\"\"\"\nRIGHT, NEUTRAL, LEFT = list(range(-1,2))\n\nclass Player(Structure): pass\nPlayer._fields_ = [\n ('side', c_int),\n ('unum', c_int),\n]\n\nhfo_lib.HFO_new.argtypes = None\nhfo_lib.HFO_new.restype = c_void_p\nhfo_lib.HFO_del.argtypes = [c_void_p]\nhfo_lib.HFO_del.restype = None\nhfo_lib.connectToServer.argtypes = [c_void_p, c_int, c_char_p, c_int,\n c_char_p, c_char_p, c_bool, c_char_p]\nhfo_lib.connectToServer.restype = None\nhfo_lib.getStateSize.argtypes = [c_void_p]\nhfo_lib.getStateSize.restype = c_int\nhfo_lib.getState.argtypes = [c_void_p, c_void_p]\nhfo_lib.getState.restype = None\nhfo_lib.act.argtypes = [c_void_p, c_int, c_void_p]\nhfo_lib.act.restype = None\nhfo_lib.say.argtypes = [c_void_p, c_char_p]\nhfo_lib.say.restype = None\nhfo_lib.hear.argtypes = [c_void_p]\nhfo_lib.hear.restype = c_char_p\nhfo_lib.playerOnBall.argtypes = [c_void_p]\nhfo_lib.playerOnBall.restype = Player\nhfo_lib.step.argtypes = [c_void_p]\nhfo_lib.step.restype = c_int\nhfo_lib.numParams.argtypes = [c_int]\nhfo_lib.numParams.restype = c_int\nhfo_lib.getUnum.argtypes = [c_void_p]\nhfo_lib.getUnum.restype = c_int\nhfo_lib.getNumTeammates.argtypes = [c_void_p]\nhfo_lib.getNumTeammates.restype = c_int\nhfo_lib.getNumOpponents.argtypes = [c_void_p]\nhfo_lib.getNumOpponents.restype = c_int\n\nclass HFOEnvironment(object):\n def __init__(self):\n self.obj = hfo_lib.HFO_new()\n\n def __del__(self):\n hfo_lib.HFO_del(self.obj)\n\n def connectToServer(self,\n feature_set=LOW_LEVEL_FEATURE_SET,\n config_dir='bin/teams/base/config/formations-dt',\n server_port=6000,\n server_addr='localhost',\n team_name='base_left',\n play_goalie=False,\n record_dir=''):\n \"\"\"\n Connects to the server on the specified port. The\n following information is provided by the ./bin/HFO\n\n feature_set: High or low level state features\n config_dir: Config directory. Typically HFO/bin/teams/base/config/\n server_port: port to connect to server on\n server_addr: address of server\n team_name: Name of team to join.\n play_goalie: is this player the goalie\n record_dir: record agent's states/actions/rewards to this directory\n \"\"\"\n hfo_lib.connectToServer(self.obj,\n feature_set,\n config_dir.encode('utf-8'),\n server_port,server_addr.encode('utf-8'),\n team_name.encode('utf-8'),\n play_goalie,\n record_dir.encode('utf-8'))\n\n def getStateSize(self):\n \"\"\" Returns the number of state features \"\"\"\n return hfo_lib.getStateSize(self.obj)\n\n def getState(self, state_data=None):\n \"\"\" Returns the current state features \"\"\"\n if state_data is None:\n state_data = np.zeros(self.getStateSize(), dtype=np.float32)\n hfo_lib.getState(self.obj, as_ctypes(state_data))\n return state_data\n\n def act(self, action_type, *args):\n \"\"\" Performs an action in the environment \"\"\"\n n_params = hfo_lib.numParams(action_type)\n assert n_params == len(args), 'Incorrect number of params to act: '\\\n 'Required %d, provided %d'%(n_params, len(args))\n params = np.asarray(args, dtype=np.float32)\n hfo_lib.act(self.obj, action_type, params.ctypes.data_as(POINTER(c_float)))\n\n def say(self, message):\n \"\"\" Transmits a message \"\"\"\n hfo_lib.say(self.obj, message.encode('utf-8'))\n\n def hear(self):\n \"\"\" Returns the message heard from another player \"\"\"\n return hfo_lib.hear(self.obj).decode('utf-8')\n\n def playerOnBall(self):\n \"\"\" Returns a player object who last touched the ball \"\"\"\n return hfo_lib.playerOnBall(self.obj)\n\n def step(self):\n \"\"\" Advances the state of the environment \"\"\"\n return hfo_lib.step(self.obj)\n\n def actionToString(self, action):\n \"\"\" Returns a string representation of an action \"\"\"\n return ACTION_STRINGS[action]\n\n def statusToString(self, status):\n \"\"\" Returns a string representation of a game status \"\"\"\n return STATUS_STRINGS[status]\n\n def getUnum(self):\n \"\"\" Returns the uniform number of the agent \"\"\"\n return hfo_lib.getUnum(self.obj)\n\n def getNumTeammates(self):\n \"\"\" Returns the number of teammates of the agent \"\"\"\n return hfo_lib.getNumTeammates(self.obj)\n\n def getNumOpponents(self):\n \"\"\" Returns the number of opponents of the agent \"\"\"\n return hfo_lib.getNumOpponents(self.obj)\n" ]
[ [ "numpy.ctypeslib.as_ctypes", "numpy.asarray" ] ]
lucidrains/openprotein
[ "c3a996a2fd233e465760888d2255ce35be050c5e" ]
[ "experiments/tmhmm3/tm_util.py" ]
[ "# This file is part of the TMHMM3 project.\n#\n# @author Jeppe Hallgren\n#\n# For license information, please see the LICENSE file in the root directory.\n\nimport torch\nfrom torch.utils.data.dataset import Dataset\nimport numpy as np\nimport math\nimport random\n\nfrom util import write_out\n\nclass TMDataset(Dataset):\n def __init__(self, aa_list, label_list, remapped_labels_list_crf_hmm, remapped_labels_list_crf_marg, type_list, topology_list, prot_name_list, original_aa_string_list, original_label_string):\n assert len(aa_list) == len(label_list)\n assert len(aa_list) == len(type_list)\n assert len(aa_list) == len(topology_list)\n self.aa_list = aa_list\n self.label_list = label_list\n self.remapped_labels_list_crf_hmm = remapped_labels_list_crf_hmm\n self.remapped_labels_list_crf_marg = remapped_labels_list_crf_marg\n self.type_list = type_list\n self.topology_list = topology_list\n self.prot_name_list = prot_name_list\n self.original_aa_string_list = original_aa_string_list\n self.original_label_string = original_label_string\n\n def __getitem__(self, index):\n return self.aa_list[index], \\\n self.label_list[index], \\\n self.remapped_labels_list_crf_hmm[index], \\\n self.remapped_labels_list_crf_marg[index], \\\n self.type_list[index], \\\n self.topology_list[index], \\\n self.prot_name_list[index], \\\n self.original_aa_string_list[index], \\\n self.original_label_string[index]\n\n def __len__(self):\n return len(self.aa_list)\n\n def merge_samples_to_minibatch(samples):\n samples_list = []\n for s in samples:\n samples_list.append(s)\n # sort according to length of aa sequence\n samples_list.sort(key=lambda x: len(x[7]), reverse=True)\n aa_list, labels_list, remapped_labels_list_crf_hmm, remapped_labels_list_crf_marg, prot_type_list, prot_topology_list, prot_name, original_aa_string, original_label_string = zip(*samples_list)\n write_out(prot_type_list)\n return aa_list, labels_list, remapped_labels_list_crf_hmm, remapped_labels_list_crf_marg, prot_type_list, prot_topology_list, prot_name, original_aa_string, original_label_string\n\n def from_disk(dataset, use_gpu, re_map_labels=True):\n print(\"Constructing data set from disk...\")\n aa_list = []\n labels_list = []\n remapped_labels_list_crf_hmm = []\n remapped_labels_list_crf_marg = []\n prot_type_list = []\n prot_topology_list_all = []\n prot_aa_list_all = []\n prot_labels_list_all = []\n prot_name_list = []\n # sort according to length of aa sequence\n dataset.sort(key=lambda x: len(x[1]), reverse=True)\n for prot_name, prot_aa_list, prot_original_label_list, type_id, cluster_id in dataset:\n prot_name_list.append(prot_name)\n prot_aa_list_all.append(prot_aa_list)\n prot_labels_list_all.append(prot_original_label_list)\n aa_tmp_list_tensor = []\n labels = None\n remapped_labels_crf_hmm = None\n last_non_membrane_position = None\n if prot_original_label_list is not None:\n labels = []\n for topology_label in prot_original_label_list:\n if topology_label is \"L\":\n topology_label = \"I\"\n if topology_label is \"I\":\n last_non_membrane_position = \"I\"\n labels.append(3)\n elif topology_label is \"O\":\n last_non_membrane_position = \"O\"\n labels.append(4)\n elif topology_label is \"S\":\n last_non_membrane_position = \"S\"\n labels.append(2)\n elif topology_label is \"M\":\n if last_non_membrane_position is \"I\":\n labels.append(0)\n elif last_non_membrane_position is \"O\":\n labels.append(1)\n else:\n print(\"Error: unexpected label found in last_non_membrane_position:\", topology_label)\n else:\n print(\"Error: unexpected label found:\", topology_label, \"for protein\", prot_name)\n labels = torch.LongTensor(labels)\n remapped_labels_crf_hmm = []\n topology = label_list_to_topology(labels)\n # given topology, now calculate remapped labels\n for idx, (pos, l) in enumerate(topology):\n if l == 0: # I -> O\n membrane_length = topology[idx+1][0]-pos\n mm_beginning = 4\n for i in range(mm_beginning):\n remapped_labels_crf_hmm.append(5 + i)\n for i in range(40-(membrane_length-mm_beginning), 40):\n remapped_labels_crf_hmm.append(5 + i)\n elif l == 1: # O -> I\n membrane_length = topology[idx + 1][0] - pos\n mm_beginning = 4\n for i in range(mm_beginning):\n remapped_labels_crf_hmm.append(45 + i)\n for i in range(40 - (membrane_length - mm_beginning), 40):\n remapped_labels_crf_hmm.append(45 + i)\n elif l == 2: # S\n signal_length = topology[idx + 1][0] - pos\n remapped_labels_crf_hmm.append(2)\n for i in range(signal_length - 1):\n remapped_labels_crf_hmm.append(152 - ((signal_length - 1) - i))\n if remapped_labels_crf_hmm[-1] == 85:\n print(\"Too long signal peptide region found\", prot_name)\n else:\n if idx == (len(topology) - 1):\n for i in range(len(labels)-pos):\n remapped_labels_crf_hmm.append(l)\n else:\n for i in range(topology[idx+1][0]-pos):\n remapped_labels_crf_hmm.append(l)\n remapped_labels_crf_hmm = torch.LongTensor(remapped_labels_crf_hmm)\n\n remapped_labels_crf_marg = list([l + (type_id * 5) for l in labels])\n remapped_labels_crf_marg = torch.LongTensor(remapped_labels_crf_marg)\n\n # check that protein was properly parsed\n assert remapped_labels_crf_hmm.size() == labels.size()\n assert remapped_labels_crf_marg.size() == labels.size()\n\n\n if use_gpu:\n if labels is not None:\n labels = labels.cuda()\n remapped_labels_crf_hmm = remapped_labels_crf_hmm.cuda()\n remapped_labels_crf_marg = remapped_labels_crf_marg.cuda()\n aa_list.append(aa_tmp_list_tensor)\n labels_list.append(labels)\n remapped_labels_list_crf_hmm.append(remapped_labels_crf_hmm)\n remapped_labels_list_crf_marg.append(remapped_labels_crf_marg)\n prot_type_list.append(type_id)\n prot_topology_list_all.append(label_list_to_topology(labels))\n return TMDataset(aa_list, labels_list, remapped_labels_list_crf_hmm, remapped_labels_list_crf_marg, prot_type_list, prot_topology_list_all, prot_name_list, prot_aa_list_all, prot_labels_list_all)\n\n\ndef tm_contruct_dataloader_from_disk(tm_dataset, minibatch_size, balance_classes=False):\n if balance_classes:\n batch_sampler = RandomBatchClassBalancedSequentialSampler(tm_dataset, minibatch_size)\n else:\n batch_sampler = RandomBatchSequentialSampler(tm_dataset, minibatch_size)\n return torch.utils.data.DataLoader(tm_dataset,\n batch_sampler = batch_sampler,\n collate_fn = TMDataset.merge_samples_to_minibatch)\n\n\nclass RandomBatchClassBalancedSequentialSampler(torch.utils.data.sampler.Sampler):\n\n def __init__(self, dataset, batch_size):\n self.sampler = torch.utils.data.sampler.SequentialSampler(dataset)\n self.batch_size = batch_size\n self.dataset = dataset\n\n def sample_at_index(self, rows, offset, sample_num):\n assert sample_num < len(rows)\n sample_half = int(sample_num / 2)\n if offset - sample_half <= 0:\n # sample start has to be 0\n samples = rows[:sample_num]\n elif offset + sample_half + (sample_num % 2) > len(rows):\n # sample end has to be a end\n samples = rows[-(sample_num+1):-1]\n else:\n samples = rows[offset-sample_half:offset+sample_half+(sample_num % 2)]\n assert len(samples) == sample_num\n return samples\n\n def __iter__(self):\n data_class_map = {}\n data_class_map[0] = []\n data_class_map[1] = []\n data_class_map[2] = []\n data_class_map[3] = []\n\n for idx in self.sampler:\n data_class_map[self.dataset[idx][4]].append(idx)\n\n num_each_class = int(self.batch_size / 4)\n\n max_class_size = max([len(data_class_map[0]),len(data_class_map[1]),len(data_class_map[2]),len(data_class_map[3])])\n\n batch_num = int(max_class_size / num_each_class)\n if max_class_size % num_each_class != 0:\n batch_num += 1\n\n batch_relative_offset = (1.0 / float(batch_num)) / 2.0\n batches = []\n for i in range(batch_num):\n batch = []\n for class_id, data_rows in data_class_map.items():\n int_offset = int(batch_relative_offset * len(data_rows))\n batch.extend(self.sample_at_index(data_rows, int_offset, num_each_class))\n batch_relative_offset += 1.0 / float(batch_num)\n batches.append(batch)\n\n random.shuffle(batches)\n\n for batch in batches:\n write_out(\"Using minibatch from RandomBatchClassBalancedSequentialSampler\")\n yield batch\n\n def __len__(self):\n length = 0\n for idx in self.sampler:\n length += 1\n return length\n\nclass RandomBatchSequentialSampler(torch.utils.data.sampler.Sampler):\n\n def __init__(self, dataset, batch_size):\n self.sampler = torch.utils.data.sampler.SequentialSampler(dataset)\n self.batch_size = batch_size\n\n def __iter__(self):\n data = []\n for idx in self.sampler:\n data.append(idx)\n\n batch_num = int(len(data) / self.batch_size)\n if len(data) % self.batch_size != 0:\n batch_num += 1\n\n batch_order = list(range(batch_num))\n random.shuffle(batch_order)\n\n batch = []\n for batch_id in batch_order:\n write_out(\"Accessing minibatch #\" + str(batch_id))\n for i in range(self.batch_size):\n if i+(batch_id*self.batch_size) < len(data):\n batch.append(data[i+(batch_id*self.batch_size)])\n yield batch\n batch = []\n\n def __len__(self):\n length = 0;\n for idx in self.sampler:\n length += 1\n return length\n\n\ndef label_list_to_topology(labels):\n if isinstance(labels, list):\n labels = torch.LongTensor(labels)\n unique, count = torch.unique_consecutive(labels, return_counts=True)\n top_list = [torch.LongTensor((0, int(labels[0])))]\n prev_count = 0\n for i in range(1, unique.size(0)):\n prev_count += int(count[i-1])\n top_list.append(torch.LongTensor((prev_count, int(unique[i]))))\n return top_list\n\n\ndef remapped_labels_hmm_to_orginal_labels(labels):\n for idx, pl in enumerate(labels):\n if pl >= 5 and pl < 45:\n labels[idx] = 0\n if pl >= 45 and pl < 85:\n labels[idx] = 1\n if pl >= 85:\n labels[idx] = 2\n if isinstance(labels, list):\n labels = torch.LongTensor(labels)\n return labels\n\ndef original_labels_to_fasta(label_list):\n sequence = \"\"\n for label in label_list:\n if label == 0:\n sequence = sequence + \"M\"\n if label == 1:\n sequence = sequence + \"M\"\n if label == 2:\n sequence = sequence + \"S\"\n if label == 3:\n sequence = sequence + \"I\"\n if label == 4:\n sequence = sequence + \"O\"\n if label == 5:\n sequence = sequence + \"-\"\n return sequence\n\n\ndef get_predicted_type_from_labels(labels):\n labels = list([int(i) for i in labels])\n if 0 in labels or 1 in labels:\n if 2 in labels:\n return 1\n else:\n return 0\n else:\n if 2 in labels:\n return 2\n else:\n return 3\n\n\ndef is_topologies_equal(topology_a, topology_b, minimum_seqment_overlap=5):\n if len(topology_a) != len(topology_b):\n return False\n for idx, (position_a, label_a) in enumerate(topology_a):\n if label_a != topology_b[idx][1]:\n return False\n if label_a == 0 or label_a == 1:\n overlap_segment_start = max(topology_a[idx][0],topology_b[idx][0])\n overlap_segment_end = min(topology_a[idx+1][0],topology_b[idx+1][0])\n if overlap_segment_end-overlap_segment_start < minimum_seqment_overlap:\n return False\n return True\n\n\ndef parse_3line_format(lines):\n i = 0\n prot_list = []\n while(i < len(lines)):\n if lines[i].strip() is \"\":\n i += 1\n continue\n prot_name_comment = lines[i]\n type_string = None\n cluster_id = None\n if prot_name_comment.__contains__(\">\"):\n i += 1\n prot_name = prot_name_comment.split(\"|\")[0].split(\">\")[1]\n type_string = prot_name_comment.split(\"|\")[1]\n cluster_id = int(prot_name_comment.split(\"|\")[2])\n else:\n # assume this is data\n prot_name = \"> Unknown Protein Name\"\n prot_aa_list = lines[i].upper()\n i += 1\n if len(prot_aa_list) > 6000:\n print(\"Discarding protein\",prot_name,\"as length larger than 6000:\",len(prot_aa_list))\n if i < len(lines) and not lines[i].__contains__(\">\"):\n i += 1\n else:\n if i < len(lines) and not lines[i].__contains__(\">\"):\n prot_topology_list = lines[i].upper()\n i += 1\n if prot_topology_list.__contains__(\"S\"):\n if prot_topology_list.__contains__(\"M\"):\n type_id = 1\n assert type_string == \"SP+TM\"\n else:\n type_id = 2\n assert type_string == \"SP\"\n else:\n if prot_topology_list.__contains__(\"M\"):\n type_id = 0\n assert type_string == \"TM\"\n else:\n type_id = 3\n assert type_string == \"GLOBULAR\"\n else:\n type_id = None\n prot_topology_list = None\n prot_list.append((prot_name, prot_aa_list, prot_topology_list, type_id, cluster_id))\n\n return prot_list\n\n\ndef parse_datafile_from_disk(file):\n lines = list([line.strip() for line in open(file)])\n return parse_3line_format(lines)\n\n\ndef calculate_partitions(partitions_count, cluster_partitions, types):\n partition_distribution = torch.ones((partitions_count, len(torch.unique(types))), dtype=torch.long)\n partition_assignments = torch.zeros(cluster_partitions.shape[0],dtype=torch.long)\n\n for i in torch.unique(cluster_partitions):\n cluster_positions = (cluster_partitions == i).nonzero()\n cluster_types = types[cluster_positions]\n unique_types_in_cluster, type_count = torch.unique(cluster_types, return_counts=True)\n tmp_distribution = partition_distribution.clone()\n tmp_distribution[:,unique_types_in_cluster] += type_count\n relative_distribution = partition_distribution.double()/tmp_distribution.double()\n min_relative_distribution_group = torch.argmin(torch.sum(relative_distribution,dim=1))\n partition_distribution[min_relative_distribution_group,unique_types_in_cluster] += type_count\n partition_assignments[cluster_positions] = min_relative_distribution_group\n\n write_out(\"Loaded data into the following partitions\")\n write_out(\"[[ TM SP+TM SP Glob]\")\n write_out(partition_distribution-torch.ones(partition_distribution.shape,dtype=torch.long))\n return partition_assignments\n\n\ndef load_data_from_disk(filename, partition_rotation=0):\n print(\"Loading data from disk...\")\n data = parse_datafile_from_disk(filename)\n data_unzipped = list(zip(*data))\n partitions = calculate_partitions(\n cluster_partitions=torch.LongTensor(np.array(data_unzipped[4])),\n types=torch.LongTensor(np.array(data_unzipped[3])),\n partitions_count=5)\n train_set = []\n val_set = []\n test_set = []\n for idx, sample in enumerate(data):\n partition = int(partitions[idx]) # in range 0-4\n rotated = (partition + partition_rotation) % 5\n if int(rotated) <= 2:\n train_set.append(sample)\n elif int(rotated) == 3:\n val_set.append(sample)\n else:\n test_set.append(sample)\n\n print(\"Data splited as:\",\n len(train_set), \"train set\",\n len(val_set), \"validation set\",\n len(test_set), \"test set\")\n return train_set, val_set, test_set\n\n\ndef normalize_confusion_matrix(confusion_matrix):\n confusion_matrix = confusion_matrix.astype(np.float64)\n for i in range(4):\n sum = int(confusion_matrix[i].sum())\n if sum != 0:\n confusion_matrix[4][i] /= sum * 0.01 # 0.01 to convert to percentage\n for k in range(5):\n if sum != 0:\n confusion_matrix[i][k] /= sum * 0.01 # 0.01 to convert to percentage\n else:\n confusion_matrix[i][k] = math.nan\n return confusion_matrix.round(2)\n" ]
[ [ "torch.zeros", "numpy.array", "torch.unique", "torch.ones", "torch.utils.data.sampler.SequentialSampler", "torch.LongTensor", "torch.unique_consecutive", "torch.utils.data.DataLoader", "torch.sum" ] ]
opeltre/geomstats
[ "135d5bb6f19e29dd453c68399e04100a9e2c76bf", "135d5bb6f19e29dd453c68399e04100a9e2c76bf" ]
[ "tests/test_backend_tensorflow.py", "examples/plot_square_h2_poincare_disk.py" ]
[ "\"\"\"\nUnit tests for tensorflow backend.\n\"\"\"\n\nimport geomstats.backend as gs\nimport geomstats.tests\n\n\n@geomstats.tests.tf_only\nclass TestBackendTensorFlow(geomstats.tests.TestCase):\n def test_vstack(self):\n import tensorflow as tf\n with self.test_session():\n tensor_1 = tf.convert_to_tensor([[1., 2., 3.], [4., 5., 6.]])\n tensor_2 = tf.convert_to_tensor([[7., 8., 9.]])\n\n result = gs.vstack([tensor_1, tensor_2])\n expected = tf.convert_to_tensor([\n [1., 2., 3.],\n [4., 5., 6.],\n [7., 8., 9.]])\n self.assertAllClose(result, expected)\n\n def test_tensor_addition(self):\n with self.test_session():\n tensor_1 = gs.ones((1, 1))\n tensor_2 = gs.ones((0, 1))\n\n tensor_1 + tensor_2\n", "\"\"\"\nPlot a square on H2 with Poincare Disk visualization.\n\"\"\"\n\nimport os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport geomstats.visualization as visualization\nfrom geomstats.geometry.hyperbolic_space import HyperbolicSpace\n\nH2 = HyperbolicSpace(dimension=2)\nMETRIC = H2.metric\n\nSQUARE_SIZE = 50\n\n\ndef main():\n top = SQUARE_SIZE / 2.0\n bot = - SQUARE_SIZE / 2.0\n left = - SQUARE_SIZE / 2.0\n right = SQUARE_SIZE / 2.0\n corners_int = [(bot, left), (bot, right), (top, right), (top, left)]\n corners_ext = H2.intrinsic_to_extrinsic_coords(corners_int)\n n_steps = 20\n ax = plt.gca()\n for i, src in enumerate(corners_ext):\n dst_id = (i+1) % len(corners_ext)\n dst = corners_ext[dst_id]\n tangent_vec = METRIC.log(point=dst, base_point=src)\n geodesic = METRIC.geodesic(initial_point=src,\n initial_tangent_vec=tangent_vec)\n t = np.linspace(0, 1, n_steps)\n edge_points = geodesic(t)\n\n visualization.plot(\n edge_points,\n ax=ax,\n space='H2_poincare_disk',\n marker='.',\n color='black')\n\n plt.show()\n\n\nif __name__ == \"__main__\":\n if os.environ['GEOMSTATS_BACKEND'] == 'tensorflow':\n print('Examples with visualizations are only implemented '\n 'with numpy backend.\\n'\n 'To change backend, write: '\n 'export GEOMSTATS_BACKEND = \\'numpy\\'.')\n else:\n main()\n" ]
[ [ "tensorflow.convert_to_tensor" ], [ "matplotlib.pyplot.show", "matplotlib.pyplot.gca", "numpy.linspace" ] ]
witnessai/GRAN
[ "952c2b08a58f3b0087f0f18fd48f8e385e45908b", "952c2b08a58f3b0087f0f18fd48f8e385e45908b", "952c2b08a58f3b0087f0f18fd48f8e385e45908b", "952c2b08a58f3b0087f0f18fd48f8e385e45908b", "952c2b08a58f3b0087f0f18fd48f8e385e45908b", "952c2b08a58f3b0087f0f18fd48f8e385e45908b", "952c2b08a58f3b0087f0f18fd48f8e385e45908b" ]
[ "mmdet/datasets/coco_seen65.py", "mmdet/datasets/vg_unseen130.py", "mmdet/core/evaluation/bbox_overlaps.py", "mmdet/core/anchor/anchor_generator.py", "mmdet/models/detectors/test_mixins.py", "mmdet/models/bbox_heads/global_context_head_semantic.py", "mmdet/datasets/loader/sampler.py" ]
[ "import numpy as np\r\nfrom pycocotools.coco import COCO\r\n\r\nfrom .custom import CustomDataset\r\nfrom .registry import DATASETS\r\n\r\n\r\n@DATASETS.register_module\r\nclass CocoDatasetSeen65(CustomDataset):\r\n\r\n CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', 'boat',\r\n 'traffic_light', 'fire_hydrant',\r\n 'stop_sign', 'bench', 'bird', 'dog',\r\n 'horse', 'sheep', 'cow', 'elephant', 'zebra', 'giraffe',\r\n 'backpack', 'umbrella', 'handbag', 'tie',\r\n 'skis', 'sports_ball', 'kite', 'baseball_bat',\r\n 'baseball_glove', 'skateboard', 'surfboard', 'tennis_racket',\r\n 'bottle', 'wine_glass', 'cup', 'knife', 'spoon', 'bowl',\r\n 'banana', 'apple', 'orange', 'broccoli', 'carrot',\r\n 'pizza', 'donut', 'cake', 'chair', 'couch',\r\n 'potted_plant', 'bed', 'dining_table', 'tv', 'laptop',\r\n 'remote', 'keyboard', 'cell_phone', 'microwave',\r\n 'oven', 'sink', 'refrigerator', 'book', 'clock',\r\n 'vase', 'scissors', 'teddy_bear', 'toothbrush')\r\n\r\n def load_annotations(self, ann_file):\r\n self.coco = COCO(ann_file)\r\n self.cat_ids = self.coco.getCatIds()\r\n self.cat2label = {\r\n cat_id: i + 1\r\n for i, cat_id in enumerate(self.cat_ids)\r\n }\r\n self.img_ids = self.coco.getImgIds()\r\n img_infos = []\r\n for i in self.img_ids:\r\n info = self.coco.loadImgs([i])[0]\r\n info['filename'] = info['file_name']\r\n img_infos.append(info)\r\n return img_infos\r\n\r\n def get_ann_info(self, idx):\r\n img_id = self.img_infos[idx]['id']\r\n ann_ids = self.coco.getAnnIds(imgIds=[img_id])\r\n ann_info = self.coco.loadAnns(ann_ids)\r\n return self._parse_ann_info(self.img_infos[idx], ann_info)\r\n\r\n def _filter_imgs(self, min_size=32):\r\n \"\"\"Filter images too small or without ground truths.\"\"\"\r\n valid_inds = []\r\n ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())\r\n for i, img_info in enumerate(self.img_infos):\r\n if self.img_ids[i] not in ids_with_ann:\r\n continue\r\n if min(img_info['width'], img_info['height']) >= min_size:\r\n valid_inds.append(i)\r\n return valid_inds\r\n\r\n def _parse_ann_info(self, img_info, ann_info):\r\n \"\"\"Parse bbox and mask annotation.\r\n\r\n Args:\r\n ann_info (list[dict]): Annotation info of an image.\r\n with_mask (bool): Whether to parse mask annotations.\r\n\r\n Returns:\r\n dict: A dict containing the following keys: bboxes, bboxes_ignore,\r\n labels, masks, seg_map. \"masks\" are raw annotations and not\r\n decoded into binary masks.\r\n \"\"\"\r\n gt_bboxes = []\r\n gt_labels = []\r\n gt_bboxes_ignore = []\r\n gt_masks_ann = []\r\n\r\n for i, ann in enumerate(ann_info):\r\n if ann.get('ignore', False):\r\n continue\r\n x1, y1, w, h = ann['bbox']\r\n if ann['area'] <= 0 or w < 1 or h < 1:\r\n continue\r\n bbox = [x1, y1, x1 + w - 1, y1 + h - 1]\r\n if ann.get('iscrowd', False):\r\n gt_bboxes_ignore.append(bbox)\r\n else:\r\n gt_bboxes.append(bbox)\r\n gt_labels.append(self.cat2label[ann['category_id']])\r\n gt_masks_ann.append(ann['segmentation'])\r\n\r\n if gt_bboxes:\r\n gt_bboxes = np.array(gt_bboxes, dtype=np.float32)\r\n gt_labels = np.array(gt_labels, dtype=np.int64)\r\n else:\r\n gt_bboxes = np.zeros((0, 4), dtype=np.float32)\r\n gt_labels = np.array([], dtype=np.int64)\r\n\r\n if gt_bboxes_ignore:\r\n gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)\r\n else:\r\n gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)\r\n\r\n seg_map = img_info['filename'].replace('jpg', 'png')\r\n\r\n ann = dict(\r\n bboxes=gt_bboxes,\r\n labels=gt_labels,\r\n bboxes_ignore=gt_bboxes_ignore,\r\n masks=gt_masks_ann,\r\n seg_map=seg_map)\r\n\r\n return ann\r\n", "import numpy as np\r\nfrom pycocotools.coco import COCO\r\n\r\nfrom .custom import CustomDataset\r\nfrom .registry import DATASETS\r\n\r\n\r\n@DATASETS.register_module\r\nclass VGDatasetUnSeen130(CustomDataset):\r\n\r\n CLASSES = (\r\n \"chain\",\r\n \"vent\",\r\n \"laptop\",\r\n \"propeller\",\r\n \"goggles\",\r\n \"motorcycle\",\r\n \"chair\",\r\n \"disk\",\r\n \"milk\",\r\n \"asphalt\",\r\n \"tire\",\r\n \"sky\",\r\n \"sugar\",\r\n \"window\",\r\n \"chimney\",\r\n \"lip\",\r\n \"gravel\",\r\n \"hat\",\r\n \"coffee\",\r\n \"garden\",\r\n \"cupcake\",\r\n \"kitten\",\r\n \"bead\",\r\n \"tank\",\r\n \"cage\",\r\n \"halo\",\r\n \"shoulder\",\r\n \"flamingo\",\r\n \"spatula\",\r\n \"cooler\",\r\n \"glove\",\r\n \"swimsuit\",\r\n \"steeple\",\r\n \"rack\",\r\n \"menagerie\",\r\n \"bicycle\",\r\n \"people\",\r\n \"house\",\r\n \"nut\",\r\n \"bacon\",\r\n \"goose\",\r\n \"zebra\",\r\n \"cattle\",\r\n \"ribbon\",\r\n \"sausage\",\r\n \"flower\",\r\n \"wristband\",\r\n \"tube\",\r\n \"sail\",\r\n \"ice\",\r\n \"numeral\",\r\n \"highway\",\r\n \"wallpaper\",\r\n \"utensil\",\r\n \"cart\",\r\n \"box\",\r\n \"sculpture\",\r\n \"bow\",\r\n \"pepperoni\",\r\n \"railway\",\r\n \"head\",\r\n \"stoplight\",\r\n \"jersey\",\r\n \"cheese\",\r\n \"snowboard\",\r\n \"writing\",\r\n \"hood\",\r\n \"belt\",\r\n \"horse\",\r\n \"teddy\",\r\n \"tarpaulin\",\r\n \"camera\",\r\n \"streetlight\",\r\n \"ivory\",\r\n \"emblem\",\r\n \"countertop\",\r\n \"fire\",\r\n \"poster\",\r\n \"smile\",\r\n \"holder\",\r\n \"hair\",\r\n \"fur\",\r\n \"car\",\r\n \"tree\",\r\n \"soup\",\r\n \"cake\",\r\n \"bathtub\",\r\n \"pickle\",\r\n \"toilet\",\r\n \"petal\",\r\n \"ceiling\",\r\n \"cone\",\r\n \"door\",\r\n \"pedestal\",\r\n \"mouse\",\r\n \"cream\",\r\n \"mountain\",\r\n \"microphone\",\r\n \"necktie\",\r\n \"floor\",\r\n \"sailboat\",\r\n \"headlight\",\r\n \"note\",\r\n \"field\",\r\n \"pad\",\r\n \"bristle\",\r\n \"balcony\",\r\n \"soap\",\r\n \"vegetation\",\r\n \"pizza\",\r\n \"plant\",\r\n \"foam\",\r\n \"skirt\",\r\n \"letter\",\r\n \"wave\",\r\n \"collar\",\r\n \"building\",\r\n \"land\",\r\n \"light\",\r\n \"nail\",\r\n \"paw\",\r\n \"mask\",\r\n \"face\",\r\n \"pipe\",\r\n \"bedspread\",\r\n \"mast\",\r\n \"road\",\r\n \"wine\",\r\n \"buoy\",\r\n \"hotdog\"\r\n)\r\n\r\n def load_annotations(self, ann_file):\r\n self.coco = COCO(ann_file)\r\n self.cat_ids = self.coco.getCatIds()\r\n self.cat2label = {\r\n cat_id: i + 1\r\n for i, cat_id in enumerate(self.cat_ids)\r\n }\r\n self.img_ids = self.coco.getImgIds()\r\n img_infos = []\r\n for i in self.img_ids:\r\n info = self.coco.loadImgs([i])[0]\r\n info['filename'] = info['file_name']\r\n img_infos.append(info)\r\n return img_infos\r\n\r\n def get_ann_info(self, idx):\r\n img_id = self.img_infos[idx]['id']\r\n ann_ids = self.coco.getAnnIds(imgIds=[img_id])\r\n ann_info = self.coco.loadAnns(ann_ids)\r\n return self._parse_ann_info(self.img_infos[idx], ann_info)\r\n\r\n def _filter_imgs(self, min_size=32):\r\n \"\"\"Filter images too small or without ground truths.\"\"\"\r\n valid_inds = []\r\n ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())\r\n for i, img_info in enumerate(self.img_infos):\r\n if self.img_ids[i] not in ids_with_ann:\r\n continue\r\n if min(img_info['width'], img_info['height']) >= min_size:\r\n valid_inds.append(i)\r\n return valid_inds\r\n\r\n def _parse_ann_info(self, img_info, ann_info):\r\n \"\"\"Parse bbox and mask annotation.\r\n\r\n Args:\r\n ann_info (list[dict]): Annotation info of an image.\r\n with_mask (bool): Whether to parse mask annotations.\r\n\r\n Returns:\r\n dict: A dict containing the following keys: bboxes, bboxes_ignore,\r\n labels, masks, seg_map. \"masks\" are raw annotations and not\r\n decoded into binary masks.\r\n \"\"\"\r\n gt_bboxes = []\r\n gt_labels = []\r\n gt_bboxes_ignore = []\r\n gt_masks_ann = []\r\n\r\n for i, ann in enumerate(ann_info):\r\n if ann.get('ignore', False):\r\n continue\r\n x1, y1, w, h = ann['bbox']\r\n if ann['area'] <= 0 or w < 1 or h < 1:\r\n continue\r\n bbox = [x1, y1, x1 + w - 1, y1 + h - 1]\r\n if ann.get('iscrowd', False):\r\n gt_bboxes_ignore.append(bbox)\r\n else:\r\n gt_bboxes.append(bbox)\r\n gt_labels.append(self.cat2label[ann['category_id']])\r\n gt_masks_ann.append(ann['segmentation'])\r\n\r\n if gt_bboxes:\r\n gt_bboxes = np.array(gt_bboxes, dtype=np.float32)\r\n gt_labels = np.array(gt_labels, dtype=np.int64)\r\n else:\r\n gt_bboxes = np.zeros((0, 4), dtype=np.float32)\r\n gt_labels = np.array([], dtype=np.int64)\r\n\r\n if gt_bboxes_ignore:\r\n gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)\r\n else:\r\n gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)\r\n\r\n seg_map = img_info['filename'].replace('jpg', 'png')\r\n\r\n ann = dict(\r\n bboxes=gt_bboxes,\r\n labels=gt_labels,\r\n bboxes_ignore=gt_bboxes_ignore,\r\n masks=gt_masks_ann,\r\n seg_map=seg_map)\r\n\r\n return ann\r\n", "import numpy as np\r\n\r\n\r\ndef bbox_overlaps(bboxes1, bboxes2, mode='iou'):\r\n \"\"\"Calculate the ious between each bbox of bboxes1 and bboxes2.\r\n\r\n Args:\r\n bboxes1(ndarray): shape (n, 4)\r\n bboxes2(ndarray): shape (k, 4)\r\n mode(str): iou (intersection over union) or iof (intersection\r\n over foreground)\r\n\r\n Returns:\r\n ious(ndarray): shape (n, k)\r\n \"\"\"\r\n\r\n assert mode in ['iou', 'iof']\r\n\r\n bboxes1 = bboxes1.astype(np.float32)\r\n bboxes2 = bboxes2.astype(np.float32)\r\n rows = bboxes1.shape[0]\r\n cols = bboxes2.shape[0]\r\n ious = np.zeros((rows, cols), dtype=np.float32)\r\n if rows * cols == 0:\r\n return ious\r\n exchange = False\r\n if bboxes1.shape[0] > bboxes2.shape[0]:\r\n bboxes1, bboxes2 = bboxes2, bboxes1\r\n ious = np.zeros((cols, rows), dtype=np.float32)\r\n exchange = True\r\n area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (\r\n bboxes1[:, 3] - bboxes1[:, 1] + 1)\r\n area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (\r\n bboxes2[:, 3] - bboxes2[:, 1] + 1)\r\n for i in range(bboxes1.shape[0]):\r\n x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0])\r\n y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1])\r\n x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2])\r\n y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3])\r\n overlap = np.maximum(x_end - x_start + 1, 0) * np.maximum(\r\n y_end - y_start + 1, 0)\r\n if mode == 'iou':\r\n union = area1[i] + area2 - overlap\r\n else:\r\n union = area1[i] if not exchange else area2\r\n ious[i, :] = overlap / union\r\n if exchange:\r\n ious = ious.T\r\n return ious\r\n", "import torch\r\n\r\n\r\nclass AnchorGenerator(object):\r\n \"\"\"\r\n Examples:\r\n >>> from mmdet.core import AnchorGenerator\r\n >>> self = AnchorGenerator(9, [1.], [1.])\r\n >>> all_anchors = self.grid_anchors((2, 2), device='cpu')\r\n >>> print(all_anchors)\r\n tensor([[ 0., 0., 8., 8.],\r\n [16., 0., 24., 8.],\r\n [ 0., 16., 8., 24.],\r\n [16., 16., 24., 24.]])\r\n \"\"\"\r\n\r\n def __init__(self, base_size, scales, ratios, scale_major=True, ctr=None):\r\n self.base_size = base_size\r\n self.scales = torch.Tensor(scales)\r\n self.ratios = torch.Tensor(ratios)\r\n self.scale_major = scale_major\r\n self.ctr = ctr\r\n self.base_anchors = self.gen_base_anchors()\r\n\r\n @property\r\n def num_base_anchors(self):\r\n return self.base_anchors.size(0)\r\n\r\n def gen_base_anchors(self):\r\n w = self.base_size\r\n h = self.base_size\r\n if self.ctr is None:\r\n x_ctr = 0.5 * (w - 1)\r\n y_ctr = 0.5 * (h - 1)\r\n else:\r\n x_ctr, y_ctr = self.ctr\r\n\r\n h_ratios = torch.sqrt(self.ratios)\r\n w_ratios = 1 / h_ratios\r\n if self.scale_major:\r\n ws = (w * w_ratios[:, None] * self.scales[None, :]).view(-1)\r\n hs = (h * h_ratios[:, None] * self.scales[None, :]).view(-1)\r\n else:\r\n ws = (w * self.scales[:, None] * w_ratios[None, :]).view(-1)\r\n hs = (h * self.scales[:, None] * h_ratios[None, :]).view(-1)\r\n\r\n # yapf: disable\r\n base_anchors = torch.stack(\r\n [\r\n x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1),\r\n x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1)\r\n ],\r\n dim=-1).round()\r\n # yapf: enable\r\n\r\n return base_anchors\r\n\r\n def _meshgrid(self, x, y, row_major=True):\r\n xx = x.repeat(len(y))\r\n yy = y.view(-1, 1).repeat(1, len(x)).view(-1)\r\n if row_major:\r\n return xx, yy\r\n else:\r\n return yy, xx\r\n\r\n def grid_anchors(self, featmap_size, stride=16, device='cuda'):\r\n base_anchors = self.base_anchors.to(device)\r\n\r\n feat_h, feat_w = featmap_size\r\n shift_x = torch.arange(0, feat_w, device=device) * stride\r\n shift_y = torch.arange(0, feat_h, device=device) * stride\r\n shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)\r\n shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1)\r\n shifts = shifts.type_as(base_anchors)\r\n # first feat_w elements correspond to the first row of shifts\r\n # add A anchors (1, A, 4) to K shifts (K, 1, 4) to get\r\n # shifted anchors (K, A, 4), reshape to (K*A, 4)\r\n\r\n all_anchors = base_anchors[None, :, :] + shifts[:, None, :]\r\n all_anchors = all_anchors.view(-1, 4)\r\n # first A rows correspond to A anchors of (0, 0) in feature map,\r\n # then (0, 1), (0, 2), ...\r\n return all_anchors\r\n\r\n def valid_flags(self, featmap_size, valid_size, device='cuda'):\r\n feat_h, feat_w = featmap_size\r\n valid_h, valid_w = valid_size\r\n assert valid_h <= feat_h and valid_w <= feat_w\r\n valid_x = torch.zeros(feat_w, dtype=torch.uint8, device=device)\r\n valid_y = torch.zeros(feat_h, dtype=torch.uint8, device=device)\r\n valid_x[:valid_w] = 1\r\n valid_y[:valid_h] = 1\r\n valid_xx, valid_yy = self._meshgrid(valid_x, valid_y)\r\n valid = valid_xx & valid_yy\r\n valid = valid[:, None].expand(\r\n valid.size(0), self.num_base_anchors).contiguous().view(-1)\r\n return valid\r\n", "from mmdet.core import (bbox2roi, bbox_mapping, merge_aug_bboxes,\r\n merge_aug_masks, merge_aug_proposals, multiclass_nms)\r\n\r\n\r\nclass RPNTestMixin(object):\r\n\r\n def simple_test_rpn(self, x, img_meta, rpn_test_cfg):\r\n rpn_outs = self.rpn_head(x)\r\n proposal_inputs = rpn_outs + (img_meta, rpn_test_cfg)\r\n proposal_list = self.rpn_head.get_bboxes(*proposal_inputs)\r\n return proposal_list\r\n\r\n def aug_test_rpn(self, feats, img_metas, rpn_test_cfg):\r\n imgs_per_gpu = len(img_metas[0])\r\n aug_proposals = [[] for _ in range(imgs_per_gpu)]\r\n for x, img_meta in zip(feats, img_metas):\r\n proposal_list = self.simple_test_rpn(x, img_meta, rpn_test_cfg)\r\n for i, proposals in enumerate(proposal_list):\r\n aug_proposals[i].append(proposals)\r\n # reorganize the order of 'img_metas' to match the dimensions\r\n # of 'aug_proposals'\r\n aug_img_metas = []\r\n for i in range(imgs_per_gpu):\r\n aug_img_meta = []\r\n for j in range(len(img_metas)):\r\n aug_img_meta.append(img_metas[j][i])\r\n aug_img_metas.append(aug_img_meta)\r\n # after merging, proposals will be rescaled to the original image size\r\n merged_proposals = [\r\n merge_aug_proposals(proposals, aug_img_meta, rpn_test_cfg)\r\n for proposals, aug_img_meta in zip(aug_proposals, aug_img_metas)\r\n ]\r\n return merged_proposals\r\n\r\n\r\nclass BBoxTestMixin(object):\r\n\r\n def simple_test_bboxes(self,\r\n x,\r\n img_meta,\r\n proposals,\r\n rcnn_test_cfg,\r\n rescale=False):\r\n \"\"\"Test only det bboxes without augmentation.\"\"\"\r\n rois = bbox2roi(proposals)\r\n roi_feats = self.bbox_roi_extractor(\r\n x[:len(self.bbox_roi_extractor.featmap_strides)], rois)\r\n if self.with_shared_head:\r\n roi_feats = self.shared_head(roi_feats)\r\n # if img_meta[0]['filename'] == 'data/coco/val2014/COCO_val2014_000000242605.jpg':\r\n # pass\r\n # import ipdb \r\n # ipdb.set_trace()\r\n if 'fusedfeat2sem' in str([x for x in self.bbox_head.modules()]):\r\n import torchvision\r\n import torch\r\n import ipdb\r\n # ipdb.set_trace()\r\n full_img_bboxes = torch.Tensor([[0, 0, 0, x[3].shape[3], x[3].shape[2]]]).cuda()\r\n full_img_feat = torchvision.ops.roi_align(x[3], full_img_bboxes, (16, 16))\r\n cls_score, bbox_pred = self.bbox_head(roi_feats, full_img_feat)\r\n else:\r\n cls_score, bbox_pred = self.bbox_head(roi_feats, rois)\r\n img_shape = img_meta[0]['img_shape']\r\n scale_factor = img_meta[0]['scale_factor']\r\n \r\n \r\n det_bboxes, det_labels = self.bbox_head.get_det_bboxes(\r\n rois,\r\n cls_score,\r\n bbox_pred,\r\n img_shape,\r\n scale_factor,\r\n rescale=rescale,\r\n cfg=rcnn_test_cfg)\r\n return det_bboxes, det_labels\r\n\r\n def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg):\r\n aug_bboxes = []\r\n aug_scores = []\r\n for x, img_meta in zip(feats, img_metas):\r\n # only one image in the batch\r\n img_shape = img_meta[0]['img_shape']\r\n scale_factor = img_meta[0]['scale_factor']\r\n flip = img_meta[0]['flip']\r\n # TODO more flexible\r\n proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,\r\n scale_factor, flip)\r\n rois = bbox2roi([proposals])\r\n # recompute feature maps to save GPU memory\r\n roi_feats = self.bbox_roi_extractor(\r\n x[:len(self.bbox_roi_extractor.featmap_strides)], rois)\r\n if self.with_shared_head:\r\n roi_feats = self.shared_head(roi_feats)\r\n if 'fusedfeat2sem' in str([x for x in self.bbox_head.modules()]):\r\n import torchvision\r\n import torch\r\n import ipdb\r\n # ipdb.set_trace()\r\n full_img_bboxes = torch.Tensor([[0, 0, 0, x[3].shape[3], x[3].shape[2]]]).cuda()\r\n full_img_feat = torchvision.ops.roi_align(x[3], full_img_bboxes, (16, 16))\r\n cls_score, bbox_pred = self.bbox_head(roi_feats, full_img_feat)\r\n elif self.bbox_head.has_matcher:\r\n cls_score, bbox_pred = self.bbox_head(roi_feats, rois)\r\n else:\r\n cls_score, bbox_pred = self.bbox_head(roi_feats)\r\n \r\n bboxes, scores = self.bbox_head.get_det_bboxes(\r\n rois,\r\n cls_score,\r\n bbox_pred,\r\n img_shape,\r\n scale_factor,\r\n rescale=False,\r\n cfg=None)\r\n aug_bboxes.append(bboxes)\r\n aug_scores.append(scores)\r\n # after merging, bboxes will be rescaled to the original image size\r\n merged_bboxes, merged_scores = merge_aug_bboxes(\r\n aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)\r\n det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,\r\n rcnn_test_cfg.score_thr,\r\n rcnn_test_cfg.nms,\r\n rcnn_test_cfg.max_per_img)\r\n return det_bboxes, det_labels\r\n\r\n\r\nclass MaskTestMixin(object):\r\n\r\n def simple_test_mask(self,\r\n x,\r\n img_meta,\r\n det_bboxes,\r\n det_labels,\r\n rescale=False):\r\n # image shape of the first image in the batch (only one)\r\n ori_shape = img_meta[0]['ori_shape']\r\n scale_factor = img_meta[0]['scale_factor']\r\n if det_bboxes.shape[0] == 0:\r\n segm_result = [[] for _ in range(self.mask_head.num_classes - 1)]\r\n else:\r\n # if det_bboxes is rescaled to the original image size, we need to\r\n # rescale it back to the testing scale to obtain RoIs.\r\n _bboxes = (\r\n det_bboxes[:, :4] * scale_factor if rescale else det_bboxes)\r\n mask_rois = bbox2roi([_bboxes])\r\n mask_feats = self.mask_roi_extractor(\r\n x[:len(self.mask_roi_extractor.featmap_strides)], mask_rois)\r\n if self.with_shared_head:\r\n mask_feats = self.shared_head(mask_feats)\r\n mask_pred = self.mask_head(mask_feats)\r\n segm_result = self.mask_head.get_seg_masks(mask_pred, _bboxes,\r\n det_labels,\r\n self.test_cfg.rcnn,\r\n ori_shape, scale_factor,\r\n rescale)\r\n return segm_result\r\n\r\n def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels):\r\n if det_bboxes.shape[0] == 0:\r\n segm_result = [[] for _ in range(self.mask_head.num_classes - 1)]\r\n else:\r\n aug_masks = []\r\n for x, img_meta in zip(feats, img_metas):\r\n img_shape = img_meta[0]['img_shape']\r\n scale_factor = img_meta[0]['scale_factor']\r\n flip = img_meta[0]['flip']\r\n _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,\r\n scale_factor, flip)\r\n mask_rois = bbox2roi([_bboxes])\r\n mask_feats = self.mask_roi_extractor(\r\n x[:len(self.mask_roi_extractor.featmap_strides)],\r\n mask_rois)\r\n if self.with_shared_head:\r\n mask_feats = self.shared_head(mask_feats)\r\n mask_pred = self.mask_head(mask_feats)\r\n # convert to numpy array to save memory\r\n aug_masks.append(mask_pred.sigmoid().cpu().numpy())\r\n merged_masks = merge_aug_masks(aug_masks, img_metas,\r\n self.test_cfg.rcnn)\r\n\r\n ori_shape = img_metas[0][0]['ori_shape']\r\n segm_result = self.mask_head.get_seg_masks(\r\n merged_masks,\r\n det_bboxes,\r\n det_labels,\r\n self.test_cfg.rcnn,\r\n ori_shape,\r\n scale_factor=1.0,\r\n rescale=False)\r\n return segm_result\r\n", "import torch\r\nimport torch.nn as nn\r\nfrom torch.nn.modules.utils import _pair\r\nfrom ..utils import ConvModule\r\n\r\n\r\nfrom mmdet.core import (auto_fp16)\r\n\r\nfrom ..registry import HEADS\r\n\r\n\r\n@HEADS.register_module\r\nclass GlobalContextSemanticHead(nn.Module):\r\n \"\"\"Simplest RoI head, with only two fc layers for semantic and\r\n regression respectively\"\"\"\r\n\r\n def __init__(self,\r\n roi_feat_size=7,\r\n in_channels=256,\r\n num_convs=3,\r\n conv_out_channels=256,\r\n num_fcs=1,\r\n fc_out_channels=1024,\r\n semantic_dims=1024,\r\n num_classes=49,\r\n conv_cfg=None,\r\n norm_cfg=None\r\n ):\r\n super(GlobalContextSemanticHead, self).__init__()\r\n\r\n self.roi_feat_size = _pair(roi_feat_size)\r\n self.roi_feat_area = self.roi_feat_size[0] * self.roi_feat_size[1]\r\n self.in_channels = in_channels\r\n self.fp16_enabled = False\r\n self.conv_out_channels = conv_out_channels\r\n self.fc_out_channels = fc_out_channels\r\n in_channels = self.in_channels\r\n self.conv_cfg = conv_cfg\r\n self.norm_cfg = norm_cfg\r\n\r\n self.out_dim = semantic_dims\r\n self.relu = nn.ReLU(inplace=True)\r\n self.convs, self.fcs, self.last_dim = self._add_conv_fc_branch(num_convs, num_fcs, in_channels)\r\n # self.fc2 = nn.Linear(self.last_dim, self.out_dim)\r\n self.final_fc = nn.Linear(self.last_dim, num_classes)\r\n\r\n\r\n\r\n self.debug_imgs = None\r\n\r\n def _add_conv_fc_branch(self,\r\n num_convs,\r\n num_fcs,\r\n in_channels):\r\n last_layer_dim = in_channels\r\n context_convs = nn.ModuleList()\r\n if num_convs > 0:\r\n for i in range(num_convs):\r\n conv_in_channels = (\r\n last_layer_dim if i == 0 else self.conv_out_channels)\r\n context_convs.append(\r\n ConvModule(\r\n conv_in_channels,\r\n self.conv_out_channels,\r\n 3,\r\n padding=1,\r\n conv_cfg=self.conv_cfg,\r\n norm_cfg=self.norm_cfg))\r\n last_layer_dim = self.conv_out_channels\r\n # add branch specific fc layers\r\n context_fcs = nn.ModuleList()\r\n if num_fcs > 0:\r\n last_layer_dim *= self.roi_feat_area\r\n for i in range(num_fcs):\r\n fc_in_channels = (\r\n last_layer_dim if i == 0 else self.fc_out_channels)\r\n context_fcs.append(\r\n nn.Linear(fc_in_channels, self.fc_out_channels))\r\n last_layer_dim = self.fc_out_channels\r\n return context_convs, context_fcs, last_layer_dim\r\n\r\n def init_weights(self):\r\n nn.init.normal_(self.final_fc.weight, 0, 0.001)\r\n nn.init.constant_(self.final_fc.bias, 0)\r\n # nn.init.normal_(self.fc2.weight, 0, 0.001)\r\n # nn.init.constant_(self.fc2.bias, 0)\r\n\r\n @auto_fp16()\r\n def forward(self, x):\r\n for conv in self.convs:\r\n x = conv(x)\r\n if x.dim() > 2:\r\n x = x.view(x.size(0), -1)\r\n for fc in self.fcs:\r\n x = self.relu(fc(x))\r\n # x = self.relu(self.fc2(x))\r\n x = self.final_fc(x) # 1024*49\r\n return x", "from __future__ import division\r\nimport math\r\n\r\nimport numpy as np\r\nimport torch\r\nfrom mmcv.runner.utils import get_dist_info\r\nfrom torch.utils.data import DistributedSampler as _DistributedSampler\r\nfrom torch.utils.data import Sampler\r\n\r\n\r\nclass DistributedSampler(_DistributedSampler):\r\n\r\n def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):\r\n super().__init__(dataset, num_replicas=num_replicas, rank=rank)\r\n self.shuffle = shuffle\r\n\r\n def __iter__(self):\r\n # deterministically shuffle based on epoch\r\n if self.shuffle:\r\n g = torch.Generator()\r\n g.manual_seed(self.epoch)\r\n indices = torch.randperm(len(self.dataset), generator=g).tolist()\r\n else:\r\n indices = torch.arange(len(self.dataset)).tolist()\r\n\r\n # add extra samples to make it evenly divisible\r\n indices += indices[:(self.total_size - len(indices))]\r\n assert len(indices) == self.total_size\r\n\r\n # subsample\r\n indices = indices[self.rank:self.total_size:self.num_replicas]\r\n assert len(indices) == self.num_samples\r\n\r\n return iter(indices)\r\n\r\n\r\nclass GroupSampler(Sampler):\r\n\r\n def __init__(self, dataset, samples_per_gpu=1):\r\n assert hasattr(dataset, 'flag')\r\n self.dataset = dataset\r\n self.samples_per_gpu = samples_per_gpu\r\n self.flag = dataset.flag.astype(np.int64)\r\n self.group_sizes = np.bincount(self.flag)\r\n self.num_samples = 0\r\n for i, size in enumerate(self.group_sizes):\r\n self.num_samples += int(np.ceil(\r\n size / self.samples_per_gpu)) * self.samples_per_gpu\r\n\r\n def __iter__(self):\r\n indices = []\r\n for i, size in enumerate(self.group_sizes):\r\n if size == 0:\r\n continue\r\n indice = np.where(self.flag == i)[0]\r\n assert len(indice) == size\r\n np.random.shuffle(indice)\r\n num_extra = int(np.ceil(size / self.samples_per_gpu)\r\n ) * self.samples_per_gpu - len(indice)\r\n indice = np.concatenate(\r\n [indice, np.random.choice(indice, num_extra)])\r\n indices.append(indice)\r\n indices = np.concatenate(indices)\r\n indices = [\r\n indices[i * self.samples_per_gpu:(i + 1) * self.samples_per_gpu]\r\n for i in np.random.permutation(\r\n range(len(indices) // self.samples_per_gpu))\r\n ]\r\n indices = np.concatenate(indices)\r\n indices = indices.astype(np.int64).tolist()\r\n assert len(indices) == self.num_samples\r\n return iter(indices)\r\n\r\n def __len__(self):\r\n return self.num_samples\r\n\r\n\r\nclass DistributedGroupSampler(Sampler):\r\n \"\"\"Sampler that restricts data loading to a subset of the dataset.\r\n It is especially useful in conjunction with\r\n :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each\r\n process can pass a DistributedSampler instance as a DataLoader sampler,\r\n and load a subset of the original dataset that is exclusive to it.\r\n .. note::\r\n Dataset is assumed to be of constant size.\r\n Arguments:\r\n dataset: Dataset used for sampling.\r\n num_replicas (optional): Number of processes participating in\r\n distributed training.\r\n rank (optional): Rank of the current process within num_replicas.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n dataset,\r\n samples_per_gpu=1,\r\n num_replicas=None,\r\n rank=None):\r\n _rank, _num_replicas = get_dist_info()\r\n if num_replicas is None:\r\n num_replicas = _num_replicas\r\n if rank is None:\r\n rank = _rank\r\n self.dataset = dataset\r\n self.samples_per_gpu = samples_per_gpu\r\n self.num_replicas = num_replicas\r\n self.rank = rank\r\n self.epoch = 0\r\n\r\n assert hasattr(self.dataset, 'flag')\r\n self.flag = self.dataset.flag\r\n self.group_sizes = np.bincount(self.flag)\r\n\r\n self.num_samples = 0\r\n for i, j in enumerate(self.group_sizes):\r\n self.num_samples += int(\r\n math.ceil(self.group_sizes[i] * 1.0 / self.samples_per_gpu /\r\n self.num_replicas)) * self.samples_per_gpu\r\n self.total_size = self.num_samples * self.num_replicas\r\n\r\n def __iter__(self):\r\n # deterministically shuffle based on epoch\r\n g = torch.Generator()\r\n g.manual_seed(self.epoch)\r\n\r\n indices = []\r\n for i, size in enumerate(self.group_sizes):\r\n if size > 0:\r\n indice = np.where(self.flag == i)[0]\r\n assert len(indice) == size\r\n indice = indice[list(torch.randperm(int(size),\r\n generator=g))].tolist()\r\n extra = int(\r\n math.ceil(\r\n size * 1.0 / self.samples_per_gpu / self.num_replicas)\r\n ) * self.samples_per_gpu * self.num_replicas - len(indice)\r\n # pad indice\r\n tmp = indice.copy()\r\n for _ in range(extra // size):\r\n indice.extend(tmp)\r\n indice.extend(tmp[:extra % size])\r\n indices.extend(indice)\r\n\r\n assert len(indices) == self.total_size\r\n\r\n indices = [\r\n indices[j] for i in list(\r\n torch.randperm(\r\n len(indices) // self.samples_per_gpu, generator=g))\r\n for j in range(i * self.samples_per_gpu, (i + 1) *\r\n self.samples_per_gpu)\r\n ]\r\n\r\n # subsample\r\n offset = self.num_samples * self.rank\r\n indices = indices[offset:offset + self.num_samples]\r\n assert len(indices) == self.num_samples\r\n\r\n return iter(indices)\r\n\r\n def __len__(self):\r\n return self.num_samples\r\n\r\n def set_epoch(self, epoch):\r\n self.epoch = epoch\r\n" ]
[ [ "numpy.array", "numpy.zeros" ], [ "numpy.array", "numpy.zeros" ], [ "numpy.minimum", "numpy.zeros", "numpy.maximum" ], [ "torch.zeros", "torch.sqrt", "torch.stack", "torch.arange", "torch.Tensor" ], [ "torch.Tensor" ], [ "torch.nn.Linear", "torch.nn.modules.utils._pair", "torch.nn.ModuleList", "torch.nn.init.constant_", "torch.nn.ReLU", "torch.nn.init.normal_" ], [ "numpy.bincount", "numpy.concatenate", "numpy.ceil", "numpy.random.choice", "torch.Generator", "numpy.random.shuffle", "numpy.where" ] ]
KungCheops/aoc2020
[ "6e11ddf93fd99e86b58af19fc328cd54f78834bb" ]
[ "day22/day22.py" ]
[ "import sys\nfrom collections import deque\nimport numpy as np\n\nclass Player():\n def __init__(self, id, deck):\n self.id = id\n self.deck = deque(deck)\n\n def __str__(self):\n return f'<Player({self.id}, {self.deck})>'\n\n def __repr__(self):\n return str(self)\n\n def play_card(self):\n return self.deck.popleft()\n\n def add_card(self, card):\n self.deck.append(card)\n\n def has_cards(self):\n return len(self.deck) > 0\n\ndef get_input():\n with open(sys.argv[2], 'r') as f:\n state = 0\n for line in f:\n line = parse_line(line)\n if state == 0:\n id = int(line[7:-1])\n deck = list()\n state = 1\n elif state == 1:\n if line == '':\n state = 0\n yield Player(id, deck)\n else:\n deck.append(int(line))\n yield Player(id, deck)\n\n\ndef parse_line(line):\n return line.strip()\n\ndef calculate_score(player):\n deck_size = len(player.deck)\n return sum((deck_size - i) * element for i, element in enumerate(player.deck))\n\ndef part1():\n players = list(get_input())\n while all(player.has_cards() for player in players):\n played_cards = [player.play_card() for player in players]\n winner = np.argmax(played_cards)\n for card in sorted(played_cards, reverse=True):\n players[winner].add_card(card)\n\n return max(calculate_score(player) for player in players)\n\nprevious_states = set()\n\ndef part2():\n players = list(get_input())\n while all(player.has_cards() for player in players):\n played_cards = [player.play_card() for player in players]\n winner = np.argmax(played_cards)\n for card in sorted(played_cards, reverse=True):\n players[winner].add_card(card)\n\n return max(calculate_score(player) for player in players)\n\nif __name__ == '__main__':\n if sys.argv[1] == '1':\n print(part1())\n elif sys.argv[1] == '2':\n print(part2())\n" ]
[ [ "numpy.argmax" ] ]
brodderickrodriguez/EMAworkbench
[ "90031223a4b6feb49633d45816e20981dc9415a0", "90031223a4b6feb49633d45816e20981dc9415a0" ]
[ "ema_workbench/examples/prim_constrained.py", "ema_workbench/analysis/scenario_discovery_util.py" ]
[ "'''\na short example on how to use the constrained prim function.\n\nfor more details see Kwakkel (2019) A generalized many‐objective optimization\napproach for scenario discovery, doi: https://doi.org/10.1002/ffo2.8\n\n'''\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom ema_workbench.analysis import prim\nfrom ema_workbench.util import ema_logging\n\nema_logging.log_to_stderr(ema_logging.INFO)\n\ndata = pd.read_csv('./data/bryant et al 2010 data.csv', index_col=False)\nx = data.iloc[:, 2:11]\ny = data.iloc[:, 15].values\n\nbox = prim.run_constrained_prim(x, y, peel_alpha=0.1)\n\nbox.show_tradeoff()\nbox.inspect(35)\nbox.inspect(35, style='graph')\n\nplt.show()\n", "'''\nScenario discovery utilities used by both :mod:`cart` and :mod:`prim`\n'''\nfrom __future__ import (absolute_import, print_function, division,\n unicode_literals)\n\nimport abc\nimport enum\nimport itertools\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nfrom mpl_toolkits.axes_grid1 import host_subplot # @UnresolvedImports\nimport numpy as np\nimport pandas as pd\nimport scipy as sp\nimport seaborn as sns\n\nfrom .plotting_util import COLOR_LIST, make_legend\n\n# Created on May 24, 2015\n#\n# .. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl>\n\n__all__ = [\"RuleInductionType\"]\n\n\nclass RuleInductionType(enum.Enum):\n REGRESSION = 'regression'\n '''constant indicating regression mode'''\n\n BINARY = 'binary'\n '''constant indicating binary classification mode. This is the most\n common used mode in scenario discovery'''\n\n CLASSIFICATION = 'classification'\n '''constant indicating classification mode'''\n\n\ndef _get_sorted_box_lims(boxes, box_init):\n '''Sort the uncertainties for each box in boxes based on a\n normalization given box_init. Unrestricted dimensions are dropped.\n The sorting is based on the normalization of the first box in boxes.\n\n Parameters\n ----------\n boxes : list of numpy structured arrays\n box_init : numpy structured array\n\n Returns\n -------\n tuple\n with the sorted boxes, and the list of restricted uncertainties\n\n '''\n\n # determine the uncertainties that are being restricted\n # in one or more boxes\n uncs = set()\n for box in boxes:\n us = _determine_restricted_dims(box, box_init)\n uncs = uncs.union(us)\n uncs = np.asarray(list(uncs))\n\n # normalize the range for the first box\n box_lim = boxes[0]\n nbl = _normalize(box_lim, box_init, uncs)\n box_size = nbl[:, 1] - nbl[:, 0]\n\n # sort the uncertainties based on the normalized size of the\n # restricted dimensions\n uncs = uncs[np.argsort(box_size)]\n box_lims = [box for box in boxes]\n\n return box_lims, uncs.tolist()\n\n\ndef _make_box(x):\n '''\n Make a box that encompasses all the data\n\n Parameters\n ----------\n x : DataFrame\n\n Returns\n -------\n DataFrame\n\n\n '''\n\n def limits(x):\n if (pd.api.types.is_integer_dtype(x.dtype)) or\\\n (pd.api.types.is_float_dtype(x.dtype)): # @UndefinedVariable\n return pd.Series([x.min(), x.max()])\n else:\n return pd.Series([set(x), set(x)])\n\n return x.apply(limits)\n\n\ndef _normalize(box_lim, box_init, uncertainties):\n '''Normalize the given box lim to the unit interval derived\n from box init for the specified uncertainties.\n\n Categorical uncertainties are normalized based on fractionated. So\n value specifies the fraction of categories in the box_lim.\n\n Parameters\n ----------\n box_lim : DataFrame\n box_init : DataFrame\n uncertainties : list of strings\n valid names of columns that exist in both structured\n arrays.\n\n Returns\n -------\n ndarray\n a numpy array of the shape (2, len(uncertainties) with the\n normalized box limits.\n\n\n '''\n\n # normalize the range for the first box\n norm_box_lim = np.zeros((len(uncertainties), box_lim.shape[0]))\n\n for i, u in enumerate(uncertainties):\n dtype = box_lim[u].dtype\n if dtype == np.dtype(object):\n nu = len(box_lim.loc[0, u]) / len(box_init.loc[0, u])\n nl = 0\n else:\n lower, upper = box_lim.loc[:, u]\n dif = (box_init.loc[1, u] - box_init.loc[0, u])\n a = 1 / dif\n b = -1 * box_init.loc[0, u] / dif\n nl = a * lower + b\n nu = a * upper + b\n norm_box_lim[i, :] = nl, nu\n return norm_box_lim\n\n\ndef _determine_restricted_dims(box_limits, box_init):\n '''returns a list of dimensions that is restricted\n\n Parameters\n ----------\n box_limits : pd.DataFrame\n box_init : pd.DataFrame\n\n Returns\n -------\n list of str\n\n '''\n cols = box_init.columns.values\n restricted_dims = cols[np.all(\n box_init.values == box_limits.values, axis=0) == False]\n# restricted_dims = [column for column in box_init.columns if not\n# np.all(box_init[column].values == box_limits[column].values)]\n return restricted_dims\n\n\ndef _determine_nr_restricted_dims(box_lims, box_init):\n '''\n\n determine the number of restriced dimensions of a box given\n compared to the inital box that contains all the data\n\n Parameters\n ----------\n box_lims : structured numpy array\n a specific box limit\n box_init : structured numpy array\n the initial box containing all data points\n\n\n Returns\n -------\n int\n\n '''\n\n return _determine_restricted_dims(box_lims, box_init).shape[0]\n\n\ndef _compare(a, b):\n '''compare two boxes, for each dimension return True if the\n same and false otherwise'''\n dtypesDesc = a.dtype.descr\n logical = np.ones((len(dtypesDesc,)), dtype=np.bool)\n for i, entry in enumerate(dtypesDesc):\n name = entry[0]\n logical[i] = logical[i] &\\\n (a[name][0] == b[name][0]) &\\\n (a[name][1] == b[name][1])\n return logical\n\n\ndef _in_box(x, boxlim):\n '''\n\n returns the a boolean index indicated which data points are inside\n and which are outside of the given box_lims\n\n Parameters\n ----------\n x : pd.DataFrame\n boxlim : pd.DataFrame\n\n Returns\n -------\n ndarray\n boolean 1D array\n\n Raises\n ------\n Attribute error if not numbered columns are not pandas\n category dtype\n\n '''\n\n x_numbered = x.select_dtypes(np.number)\n boxlim_numbered = boxlim.select_dtypes(np.number)\n logical = (boxlim_numbered.loc[0, :].values <= x_numbered.values) &\\\n (x_numbered.values <= boxlim_numbered.loc[1, :].values)\n logical = logical.all(axis=1)\n\n # TODO:: how to speed this up\n for column, values in x.select_dtypes(exclude=np.number).iteritems():\n entries = boxlim.loc[0, column]\n not_present = set(values.cat.categories.values) - entries\n\n if not_present:\n # what other options do we have here....\n l = pd.isnull(x[column].cat.remove_categories(list(entries)))\n logical = l & logical\n return logical\n\n\ndef _setup(results, classify, incl_unc=[]):\n \"\"\"helper function for setting up CART or PRIM\n\n Parameters\n ----------\n results : tuple of DataFrame and dict with numpy arrays\n the return from :meth:`perform_experiments`.\n classify : string, function or callable\n either a string denoting the outcome of interest to\n use or a function.\n incl_unc : list of strings\n\n Notes\n -----\n CART, PRIM, and feature scoring only work for a 1D numpy array\n for the dependent variable\n\n Raises\n ------\n TypeError\n if classify is not a string or a callable.\n\n \"\"\"\n x, outcomes = results\n\n if incl_unc:\n drop_names = set(x.columns.values.tolist()) - set(incl_unc)\n x = x.drop(drop_names, axis=1)\n if isinstance(classify, str):\n y = outcomes[classify]\n mode = RuleInductionType.REGRESSION\n elif callable(classify):\n y = classify(outcomes)\n mode = RuleInductionType.BINARY\n else:\n raise TypeError(\"unknown type for classify\")\n\n assert y.ndim == 1\n\n return x, y, mode\n\n\ndef _calculate_quasip(x, y, box, Hbox, Tbox):\n '''\n\n Parameters\n ----------\n x : DataFrame\n y : np.array\n box : DataFrame\n Hbox : int\n Tbox : int\n\n '''\n logical = _in_box(x, box)\n yi = y[logical]\n\n # total nr. of cases in box with one restriction removed\n Tj = yi.shape[0]\n\n # total nr. of cases of interest in box with one restriction\n # removed\n Hj = np.sum(yi)\n\n p = Hj / Tj\n\n Hbox = int(Hbox)\n Tbox = int(Tbox)\n\n # force one sided\n qp = sp.stats.binom_test(\n Hbox, Tbox, p, alternative='greater') # @UndefinedVariable\n\n return qp\n\n\ndef plot_pair_wise_scatter(x, y, boxlim, box_init, restricted_dims):\n ''' helper function for pair wise scatter plotting\n\n Parameters\n ----------\n x : DataFrame\n the experiments\n y : numpy array\n the outcome of interest\n box_lim : DataFrame\n a boxlim\n box_init : DataFrame\n restricted_dims : collection of strings\n list of uncertainties that define the boxlims\n\n '''\n\n x = x[restricted_dims]\n data = x.copy()\n\n # TODO:: have option to change\n # diag to CDF, gives you effectively the\n # regional sensitivity analysis results\n categorical_columns = data.select_dtypes('category').columns.values\n categorical_mappings = {}\n for column in categorical_columns:\n\n # reorder categorical data so we\n # can capture them in a single column\n categories_inbox = boxlim.at[0, column]\n categories_all = box_init.at[0, column]\n missing = categories_all - categories_inbox\n categories = list(categories_inbox) + list(missing)\n print(column, categories)\n data[column] = data[column].cat.set_categories(categories)\n\n # keep the mapping for updating ticklabels\n categorical_mappings[column] = dict(\n enumerate(data[column].cat.categories))\n\n # replace column with codes\n data[column] = data[column].cat.codes\n\n data['y'] = y # for testing\n grid = sns.pairplot(data=data, hue='y', vars=x.columns.values)\n\n cats = set(categorical_columns)\n for row, ylabel in zip(grid.axes, grid.y_vars):\n ylim = boxlim[ylabel]\n\n if ylabel in cats:\n y = -0.2\n height = len(ylim[0]) - 0.6 # 2 * 0.2\n else:\n y = ylim[0]\n height = ylim[1] - ylim[0]\n\n for ax, xlabel in zip(row, grid.x_vars):\n if ylabel == xlabel:\n continue\n\n if xlabel in cats:\n xlim = boxlim.at[0, xlabel]\n x = -0.2\n width = len(xlim) - 0.6 # 2 * 0.2\n else:\n xlim = boxlim[xlabel]\n x = xlim[0]\n width = xlim[1] - xlim[0]\n\n xy = x, y\n box = patches.Rectangle(xy, width, height, edgecolor='red',\n facecolor='none', lw=3)\n ax.add_patch(box)\n\n # do the yticklabeling for categorical rows\n for row, ylabel in zip(grid.axes, grid.y_vars):\n if ylabel in cats:\n ax = row[0]\n labels = []\n for entry in ax.get_yticklabels():\n _, value = entry.get_position()\n try:\n label = categorical_mappings[ylabel][value]\n except KeyError:\n label = ''\n labels.append(label)\n ax.set_yticklabels(labels)\n\n # do the xticklabeling for categorical columns\n for ax, xlabel in zip(grid.axes[-1], grid.x_vars):\n if xlabel in cats:\n labels = []\n locs = []\n mapping = categorical_mappings[xlabel]\n for i in range(-1, len(mapping) + 1):\n locs.append(i)\n try:\n label = categorical_mappings[xlabel][i]\n except KeyError:\n label = ''\n labels.append(label)\n ax.set_xticks(locs)\n ax.set_xticklabels(labels, rotation=90)\n return grid\n\n\ndef _setup_figure(uncs):\n '''\n\n helper function for creating the basic layout for the figures that\n show the box lims.\n\n '''\n nr_unc = len(uncs)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n # create the shaded grey background\n rect = mpl.patches.Rectangle((0, -0.5), 1, nr_unc + 1.5,\n alpha=0.25,\n facecolor=\"#C0C0C0\",\n edgecolor=\"#C0C0C0\")\n ax.add_patch(rect)\n ax.set_xlim(left=-0.2, right=1.2)\n ax.set_ylim(top=-0.5, bottom=nr_unc - 0.5)\n ax.yaxis.set_ticks([y for y in range(nr_unc)])\n ax.xaxis.set_ticks([0, 0.25, 0.5, 0.75, 1])\n ax.set_yticklabels(uncs[::-1])\n return fig, ax\n\n\ndef plot_box(boxlim, qp_values, box_init, uncs,\n coverage, density,\n ticklabel_formatter=\"{} ({})\",\n boxlim_formatter=\"{: .2g}\",\n table_formatter=\"{:.3g}\"):\n '''Helper function for parallel coordinate style visualization\n of a box\n\n Parameters\n ----------\n boxlim : DataFrame\n qp_values : dict\n box_init : DataFrame\n uncs : list\n coverage : float\n density : float\n ticklabel_formatter : str\n boxlim_formatter : str\n table_formatter : str\n\n Returns\n -------\n a Figure instance\n\n\n '''\n norm_box_lim = _normalize(boxlim, box_init, uncs)\n\n fig, ax = _setup_figure(uncs)\n for j, u in enumerate(uncs):\n # we want to have the most restricted dimension\n # at the top of the figure\n xj = len(uncs) - j - 1\n\n plot_unc(box_init, xj, j, 0, norm_box_lim,\n boxlim, u, ax)\n\n # new part\n dtype = box_init[u].dtype\n\n props = {'facecolor': 'white',\n 'edgecolor': 'white',\n 'alpha': 0.25}\n y = xj\n\n if dtype == object:\n elements = sorted(list(box_init[u][0]))\n max_value = (len(elements) - 1)\n values = boxlim.loc[0, u]\n x = [elements.index(entry) for entry in\n values]\n x = [entry / max_value for entry in x]\n\n for xi, label in zip(x, values):\n ax.text(xi, y - 0.2, label, ha='center', va='center',\n bbox=props, color='blue', fontweight='normal')\n\n else:\n props = {'facecolor': 'white',\n 'edgecolor': 'white',\n 'alpha': 0.25}\n\n # plot limit text labels\n x = norm_box_lim[j, 0]\n\n if not np.allclose(x, 0):\n label = boxlim_formatter.format(boxlim.loc[0, u])\n ax.text(x, y - 0.2, label, ha='center', va='center',\n bbox=props, color='blue', fontweight='normal')\n\n x = norm_box_lim[j][1]\n if not np.allclose(x, 1):\n label = boxlim_formatter.format(boxlim.loc[1, u])\n ax.text(x, y - 0.2, label, ha='center', va='center',\n bbox=props, color='blue', fontweight='normal')\n\n # plot uncertainty space text labels\n x = 0\n label = boxlim_formatter.format(box_init.loc[0, u])\n ax.text(x - 0.01, y, label, ha='right', va='center',\n bbox=props, color='black', fontweight='normal')\n\n x = 1\n label = boxlim_formatter.format(box_init.loc[1, u])\n ax.text(x + 0.01, y, label, ha='left', va='center',\n bbox=props, color='black', fontweight='normal')\n\n # set y labels\n qp_formatted = {}\n for key, values in qp_values.items():\n values = [vi for vi in values if vi != -1]\n\n if len(values) == 1:\n value = '{:.2g}'.format(values[0])\n else:\n value = '{:.2g}, {:.2g}'.format(*values)\n qp_formatted[key] = value\n\n labels = [ticklabel_formatter.format(u, qp_formatted[u]) for u in\n uncs]\n\n labels = labels[::-1]\n ax.set_yticklabels(labels)\n\n # remove x tick labels\n ax.set_xticklabels([])\n\n coverage = table_formatter.format(coverage)\n density = table_formatter.format(density)\n\n # add table to the left\n ax.table(cellText=[[coverage], [density]],\n colWidths=[0.1] * 2,\n rowLabels=['coverage', 'density'],\n colLabels=None,\n loc='right',\n bbox=[1.2, 0.9, 0.1, 0.1],)\n plt.subplots_adjust(left=0.1, right=0.75)\n\n return fig\n\n\ndef plot_ppt(peeling_trajectory):\n '''show the peeling and pasting trajectory in a figure'''\n\n ax = host_subplot(111)\n ax.set_xlabel(\"peeling and pasting trajectory\")\n\n par = ax.twinx()\n par.set_ylabel(\"nr. restricted dimensions\")\n\n ax.plot(peeling_trajectory['mean'], label=\"mean\")\n ax.plot(peeling_trajectory['mass'], label=\"mass\")\n ax.plot(peeling_trajectory['coverage'], label=\"coverage\")\n ax.plot(peeling_trajectory['density'], label=\"density\")\n par.plot(peeling_trajectory['res_dim'], label=\"restricted dims\")\n ax.grid(True, which='both')\n ax.set_ylim(bottom=0, top=1)\n\n fig = plt.gcf()\n\n make_legend(['mean', 'mass', 'coverage', 'density',\n 'restricted_dim'],\n ax, ncol=5, alpha=1)\n return fig\n\n\ndef plot_tradeoff(peeling_trajectory, cmap=mpl.cm.viridis): # @UndefinedVariable\n '''Visualize the trade off between coverage and density. Color\n is used to denote the number of restricted dimensions.\n\n Parameters\n ----------\n cmap : valid matplotlib colormap\n\n Returns\n -------\n a Figure instance\n\n '''\n\n fig = plt.figure()\n ax = fig.add_subplot(111, aspect='equal')\n\n boundaries = np.arange(-0.5,\n max(peeling_trajectory['res_dim']) + 1.5,\n step=1)\n ncolors = cmap.N\n norm = mpl.colors.BoundaryNorm(boundaries, ncolors)\n\n p = ax.scatter(peeling_trajectory['coverage'],\n peeling_trajectory['density'],\n c=peeling_trajectory['res_dim'],\n norm=norm,\n cmap=cmap)\n ax.set_ylabel('density')\n ax.set_xlabel('coverage')\n ax.set_ylim(bottom=0, top=1.2)\n ax.set_xlim(left=0, right=1.2)\n\n ticklocs = np.arange(0,\n max(peeling_trajectory['res_dim']) + 1,\n step=1)\n cb = fig.colorbar(p, spacing='uniform', ticks=ticklocs,\n drawedges=True)\n cb.set_label(\"nr. of restricted dimensions\")\n\n return fig\n\n\ndef plot_unc(box_init, xi, i, j, norm_box_lim, box_lim, u, ax,\n color=sns.color_palette()[0]):\n '''\n\n Parameters:\n ----------\n xi : int\n the row at which to plot\n i : int\n the index of the uncertainty being plotted\n j : int\n the index of the box being plotted\n u : string\n the uncertainty being plotted:\n ax : axes instance\n the ax on which to plot\n\n '''\n\n dtype = box_init[u].dtype\n\n y = xi - j * 0.1\n\n if dtype == object:\n elements = sorted(list(box_init[u][0]))\n max_value = (len(elements) - 1)\n box_lim = box_lim[u][0]\n x = [elements.index(entry) for entry in\n box_lim]\n x = [entry / max_value for entry in x]\n y = [y] * len(x)\n\n ax.scatter(x, y, edgecolor=color,\n facecolor=color)\n\n else:\n ax.plot(norm_box_lim[i], (y, y),\n c=color)\n\n\ndef plot_boxes(x, boxes, together):\n '''Helper function for plotting multiple boxlims\n\n Parameters\n ----------\n x : pd.DataFrame\n boxes : list of pd.DataFrame\n together : bool\n\n '''\n\n box_init = _make_box(x)\n box_lims, uncs = _get_sorted_box_lims(boxes, box_init)\n\n # normalize the box lims\n # we don't need to show the last box, for this is the\n # box_init, which is visualized by a grey area in this\n # plot.\n norm_box_lims = [_normalize(box_lim, box_init, uncs) for\n box_lim in boxes]\n\n if together:\n fig, ax = _setup_figure(uncs)\n\n for i, u in enumerate(uncs):\n colors = itertools.cycle(COLOR_LIST)\n # we want to have the most restricted dimension\n # at the top of the figure\n\n xi = len(uncs) - i - 1\n\n for j, norm_box_lim in enumerate(norm_box_lims):\n color = next(colors)\n plot_unc(box_init, xi, i, j, norm_box_lim,\n box_lims[j], u, ax, color)\n\n plt.tight_layout()\n return fig\n else:\n figs = []\n colors = itertools.cycle(COLOR_LIST)\n\n for j, norm_box_lim in enumerate(norm_box_lims):\n fig, ax = _setup_figure(uncs)\n ax.set_title('box {}'.format(j))\n color = next(colors)\n\n figs.append(fig)\n for i, u in enumerate(uncs):\n xi = len(uncs) - i - 1\n plot_unc(box_init, xi, i, 0, norm_box_lim,\n box_lims[j], u, ax, color)\n\n plt.tight_layout()\n return figs\n\n\nclass OutputFormatterMixin(object):\n __metaclass__ = abc.ABCMeta\n\n @abc.abstractproperty\n def boxes(self):\n '''Property for getting a list of box limits'''\n\n raise NotImplementedError\n\n @abc.abstractproperty\n def stats(self):\n '''property for getting a list of dicts containing the statistics\n for each box'''\n\n raise NotImplementedError\n\n def boxes_to_dataframe(self):\n '''convert boxes to pandas dataframe'''\n\n boxes = self.boxes\n\n # determine the restricted dimensions\n # print only the restricted dimension\n box_lims, uncs = _get_sorted_box_lims(boxes, _make_box(self.x))\n nr_boxes = len(boxes)\n dtype = float\n index = [\"box {}\".format(i + 1) for i in range(nr_boxes)]\n for value in box_lims[0].dtypes:\n if value == object:\n dtype = object\n break\n\n columns = pd.MultiIndex.from_product([index,\n ['min', 'max', ]])\n df_boxes = pd.DataFrame(np.zeros((len(uncs), nr_boxes * 2)),\n index=uncs,\n dtype=dtype,\n columns=columns)\n\n # TODO should be possible to make more efficient\n for i, box in enumerate(box_lims):\n for unc in uncs:\n values = box.loc[:, unc]\n values = values.rename({0: 'min', 1: 'max'})\n df_boxes.loc[unc][index[i]] = values\n return df_boxes\n\n def stats_to_dataframe(self):\n '''convert stats to pandas dataframe'''\n\n stats = self.stats\n\n index = pd.Index(['box {}'.format(i + 1) for i in range(len(stats))])\n\n return pd.DataFrame(stats, index=index)\n\n def show_boxes(self, together=False):\n '''display boxes\n\n Parameters\n ----------\n together : bool, otional\n\n '''\n plot_boxes(self.x, self.boxes, together=together)\n" ]
[ [ "matplotlib.pyplot.show", "pandas.read_csv" ], [ "numpy.sum", "pandas.DataFrame", "pandas.api.types.is_float_dtype", "scipy.stats.binom_test", "matplotlib.pyplot.figure", "numpy.allclose", "matplotlib.colors.BoundaryNorm", "numpy.all", "matplotlib.pyplot.gcf", "matplotlib.pyplot.tight_layout", "pandas.MultiIndex.from_product", "numpy.argsort", "matplotlib.patches.Rectangle", "matplotlib.pyplot.subplots_adjust", "numpy.dtype", "pandas.api.types.is_integer_dtype" ] ]
i4oolish/mindspore
[ "4276050f2494cfbf8682560a1647576f859991e8", "4276050f2494cfbf8682560a1647576f859991e8", "4276050f2494cfbf8682560a1647576f859991e8", "4276050f2494cfbf8682560a1647576f859991e8", "4276050f2494cfbf8682560a1647576f859991e8" ]
[ "mindspore/common/initializer.py", "tests/st/serving/generate_model.py", "model_zoo/official/cv/maskrcnn/src/util.py", "tests/ut/python/dataset/test_dataset_numpy_slices.py", "model_zoo/official/gnn/gcn/src/dataset.py" ]
[ "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Initializer for cell parameters.\"\"\"\nimport numbers\nimport math\n\nfrom functools import reduce\nimport numpy as np\nfrom scipy.stats import truncnorm\nfrom mindspore import log as logger\n\nfrom . import dtype as mstype\nfrom .tensor import Tensor\n\n_INITIALIZER_ALIAS = dict()\n\n\nclass Initializer:\n \"\"\"\n The base class of the initializer.\n\n Args:\n kwargs (dict): Keyword arguments for Initializer.\n\n Returns:\n Array, assigned array.\n \"\"\"\n def __init__(self, **kwargs):\n self._kwargs = kwargs\n self.shape = None\n self.dtype = None\n\n def _initialize(self, *kwargs):\n raise NotImplementedError('Must be overridden!')\n\n def __call__(self, arr):\n return self._initialize(arr)\n\n @property\n def shape(self):\n return self._shape\n\n @shape.setter\n def shape(self, shape):\n self._shape = shape\n\n @property\n def dtype(self):\n return self._dtype\n\n @dtype.setter\n def dtype(self, dtype):\n self._dtype = dtype\n\n def to_tensor(self, slice_index=None, shape=None):\n \"\"\"\n Get the tensor format data of this Initializer.\n\n Args:\n slice_index (int): Slice index of a parameter's slices.\n Used when initialize a slice of a parameter, it guarantee that\n devices use the same slice can generate the same tensor.\n shape (list[int]): Shape of the slice, used when initialize a slice of the parameter.\n \"\"\"\n arr = None\n if shape is None:\n shape = self.shape\n\n try:\n arr = np.ndarray(shape)\n except ValueError:\n msg = \"Error shape={}\".format(shape)\n logger.error(msg)\n raise ValueError(msg)\n\n if slice_index is not None:\n np.random.seed(slice_index)\n self.__call__(arr)\n return Tensor(arr, dtype=self.dtype)\n\ndef _register(*aliases):\n \"\"\"Return the alias register.\"\"\"\n def alias_reg(cls):\n name = cls.__name__\n name = name.lower()\n if name not in _INITIALIZER_ALIAS:\n _INITIALIZER_ALIAS[name] = cls\n\n for alias in aliases:\n if alias not in _INITIALIZER_ALIAS:\n _INITIALIZER_ALIAS[alias] = cls\n\n return cls\n\n return alias_reg\n\n\ndef _assignment(arr, num):\n \"\"\"Assign the value of `num` to `arr`.\"\"\"\n if arr.shape == ():\n arr = arr.reshape((1))\n arr[:] = num\n arr = arr.reshape(())\n else:\n if isinstance(num, np.ndarray):\n arr[:] = num[:]\n else:\n arr[:] = num\n return arr\n\n\n@_register('zeros')\nclass Zero(Initializer):\n \"\"\"\n Initialize the array to zero.\n\n Args:\n arr (Array): The array to be assigned.\n\n Returns:\n Array, assigned array.\n \"\"\"\n def _initialize(self, arr):\n _assignment(arr, 0)\n\n\n@_register('ones')\nclass One(Initializer):\n \"\"\"\n Initialize the array to one.\n\n Args:\n arr (Array): The array to be assigned.\n\n Returns:\n Array, assigned array.\n \"\"\"\n def _initialize(self, arr):\n _assignment(arr, 1)\n\n\ndef _calculate_in_and_out(arr):\n \"\"\"\n Calculate n_in and n_out.\n\n Args:\n arr (Array): Input array.\n\n Returns:\n Tuple, a tuple with two elements, the first element is `n_in` and the second element is `n_out`.\n \"\"\"\n dim = len(arr.shape)\n if dim < 2:\n raise ValueError(\"If initialize data with xavier uniform, the dimension of data must be greater than 1.\")\n\n n_in = arr.shape[1]\n n_out = arr.shape[0]\n\n if dim > 2:\n counter = reduce(lambda x, y: x * y, arr.shape[2:])\n n_in *= counter\n n_out *= counter\n return n_in, n_out\n\n\n@_register('xavier_uniform')\nclass XavierUniform(Initializer):\n r\"\"\"\n Initialize the array with xavier uniform algorithm, and from a uniform distribution collect samples within\n U[-boundary, boundary] where :math:`boundary = gain * \\sqrt{\\frac{6}{n_{in} + n_{out}}}`.\n\n Args:\n gain (Array): The array to be assigned. Default: 1.\n\n Returns:\n Array, assigned array.\n \"\"\"\n def __init__(self, gain=1):\n super(XavierUniform, self).__init__(gain=gain)\n self.gain = gain\n\n def _initialize(self, arr):\n n_in, n_out = _calculate_in_and_out(arr)\n\n boundary = self.gain * math.sqrt(6.0 / (n_in + n_out))\n data = np.random.uniform(-boundary, boundary, arr.shape)\n\n _assignment(arr, data)\n\n\n@_register('he_uniform')\nclass HeUniform(Initializer):\n r\"\"\"\n Initialize the array with He kaiming uniform algorithm, and from a uniform distribution collect samples within\n U[-boundary, boundary] where :math:`boundary = \\sqrt{\\frac{6}{n_{in}}}` where :math:`n_{in}` is the number of\n input units in the weight tensor.\n\n Args:\n arr (Array): The array to be assigned.\n\n Returns:\n Array, assigned array.\n \"\"\"\n\n def _initialize(self, arr):\n n_in, _ = _calculate_in_and_out(arr)\n\n boundary = math.sqrt(6.0 / n_in)\n data = np.random.uniform(-boundary, boundary, arr.shape)\n\n _assignment(arr, data)\n\n\nclass Constant(Initializer):\n \"\"\"\n Initialize a constant.\n\n Args:\n value (Union[int, numpy.ndarray]): The value to initialize.\n\n Returns:\n Array, initialize array.\n \"\"\"\n def __init__(self, value):\n super(Constant, self).__init__(value=value)\n self.value = value\n\n def _initialize(self, arr):\n _assignment(arr, self.value)\n\n\n@_register()\nclass Uniform(Initializer):\n \"\"\"\n Initialize a uniform array, and obtain values U(-scale, scale) from the uniform distribution\n to fill the input tensor.\n\n Args:\n scale (float): The scale of the array. Default: 0.07.\n\n Returns:\n Array, uniform array.\n \"\"\"\n def __init__(self, scale=0.07):\n super(Uniform, self).__init__(scale=scale)\n self.scale = scale\n\n def _initialize(self, arr):\n tmp = np.random.uniform(-self.scale, self.scale, arr.shape)\n _assignment(arr, tmp)\n\n\n@_register()\nclass Normal(Initializer):\n \"\"\"\n Initialize a normal array, and obtain values N(0, sigma) from the uniform distribution\n to fill the input tensor.\n\n Args:\n sigma (float): The sigma of the array. Default: 0.01.\n\n Returns:\n Array, normal array.\n \"\"\"\n def __init__(self, sigma=0.01):\n super(Normal, self).__init__(sigma=sigma)\n self.sigma = sigma\n\n def _initialize(self, arr):\n tmp = np.random.normal(0, self.sigma, arr.shape)\n _assignment(arr, tmp)\n\n\n@_register()\nclass TruncatedNormal(Initializer):\n \"\"\"\n Initialize a truncated normal distribution which is a bounded normal distribution within N(low, high).\n\n Args:\n sigma (float): The sigma of the array. Default: 0.01.\n\n Returns:\n Array, truncated normal array.\n \"\"\"\n def __init__(self, sigma=0.01):\n super(TruncatedNormal, self).__init__(sigma=sigma)\n self.sigma = sigma\n\n def _initialize(self, arr):\n tmp = truncnorm.rvs(-2, 2, loc=0, scale=self.sigma, size=arr.shape, random_state=None)\n _assignment(arr, tmp)\n\n\ndef initializer(init, shape=None, dtype=mstype.float32):\n \"\"\"\n Create and initialize a tensor.\n\n Args:\n init (Union[Tensor, str, Initializer, numbers.Number]): Initialize value.\n\n - `str`: The `init` should be the alias of the class inheriting from `Initializer` and the corresponding\n class will be called.\n\n - `Initializer`: The `init` should be the class inheriting from `Initializer` to initialize tensor.\n\n - `numbers.Number`: The `Constant` will be called to initialize tensor.\n\n shape (Union[tuple, list, int]): A list of integers, a tuple of integers or an integer as the shape of\n output. Default: None.\n dtype (:class:`mindspore.dtype`): The type of data in initialized tensor. Default: mindspore.float32.\n\n Returns:\n Union[Tensor, Initializer], When `init` is Tensor, the return is Tensor object,\n otherwise the return is Initialize object.\n\n Examples:\n >>> tensor = initializer('ones', [1, 2, 3], mindspore.float32)\n \"\"\"\n if not isinstance(init, (Tensor, numbers.Number, str, Initializer)):\n raise TypeError(\"Unsupported init type '{}'.\".format(type(init)))\n\n if isinstance(init, Tensor):\n init_shape = init.shape\n shape = shape if isinstance(shape, (tuple, list)) else [shape]\n if shape is not None and init_shape != tuple(shape):\n raise ValueError(\"The shape of init should be same as variable shape, but got the shape of init {} and \"\n \"the variable shape {}.\".format(list(init.shape), shape))\n return init\n\n if isinstance(shape, list):\n shape = tuple(shape)\n elif isinstance(shape, numbers.Number):\n shape = (shape,)\n\n if isinstance(init, Initializer):\n init.shape = init.shape if init.shape is not None else shape\n init.dtype = init.dtype if init.dtype is not None else dtype\n return init\n\n if isinstance(init, str):\n init_obj = _INITIALIZER_ALIAS[init.lower()]()\n if init_obj is None:\n raise ValueError(\"The class corresponding to '{}' was not found.\".format(init))\n init = init_obj\n init.shape = shape\n init.dtype = dtype\n return init\n\n if isinstance(init, numbers.Number):\n init_obj = Constant(init)\n init_obj.shape = shape\n init_obj.dtype = dtype\n return init_obj\n raise TypeError(\"Unsupported init type '{}'.\".format(type(init)))\n\n__all__ = [\n 'Initializer',\n 'initializer',\n 'TruncatedNormal',\n 'Normal',\n 'Uniform',\n 'HeUniform',\n 'XavierUniform',\n 'One',\n 'Zero',\n 'Constant']\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport random\nimport numpy as np\nimport mindspore.nn as nn\nimport mindspore.common.dtype as mstype\nimport mindspore.dataset as de\nfrom mindspore import Tensor, context\nfrom mindspore.ops import operations as P\nfrom mindspore.train.serialization import export\nfrom tests.st.networks.models.bert.src.bert_model import BertModel, BertConfig\n\nbert_net_cfg = BertConfig(\n batch_size=2,\n seq_length=32,\n vocab_size=21128,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=2,\n initializer_range=0.02,\n use_relative_positions=False,\n input_mask_from_dataset=True,\n token_type_ids_from_dataset=True,\n dtype=mstype.float32,\n compute_type=mstype.float16\n)\n\ncontext.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\")\n\nrandom.seed(1)\nnp.random.seed(1)\nde.config.set_seed(1)\n\nclass AddNet(nn.Cell):\n def __init__(self):\n super(AddNet, self).__init__()\n self.add = P.TensorAdd()\n\n def construct(self, x_, y_):\n return self.add(x_, y_)\n\ndef export_add_model():\n net = AddNet()\n x = np.ones(4).astype(np.float32)\n y = np.ones(4).astype(np.float32)\n export(net, Tensor(x), Tensor(y), file_name='add.pb', file_format='BINARY')\n\ndef export_bert_model():\n net = BertModel(bert_net_cfg, False)\n input_ids = np.random.randint(0, 1000, size=(2, 32), dtype=np.int32)\n segment_ids = np.zeros((2, 32), dtype=np.int32)\n input_mask = np.zeros((2, 32), dtype=np.int32)\n export(net, Tensor(input_ids), Tensor(segment_ids), Tensor(input_mask), file_name='bert.pb', file_format='BINARY')\n\nif __name__ == '__main__':\n export_add_model()\n export_bert_model()\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"coco eval for maskrcnn\"\"\"\nimport json\nimport numpy as np\nfrom pycocotools.coco import COCO\nfrom pycocotools.cocoeval import COCOeval\nfrom pycocotools import mask as maskUtils\nimport mmcv\n\nfrom src.config import config\n\n_init_value = np.array(0.0)\nsummary_init = {\n 'Precision/mAP': _init_value,\n 'Precision/mAP@.50IOU': _init_value,\n 'Precision/mAP@.75IOU': _init_value,\n 'Precision/mAP (small)': _init_value,\n 'Precision/mAP (medium)': _init_value,\n 'Precision/mAP (large)': _init_value,\n 'Recall/AR@1': _init_value,\n 'Recall/AR@10': _init_value,\n 'Recall/AR@100': _init_value,\n 'Recall/AR@100 (small)': _init_value,\n 'Recall/AR@100 (medium)': _init_value,\n 'Recall/AR@100 (large)': _init_value,\n}\n\n\ndef coco_eval(result_files, result_types, coco, max_dets=(100, 300, 1000), single_result=False):\n \"\"\"coco eval for maskrcnn\"\"\"\n anns = json.load(open(result_files['bbox']))\n if not anns:\n return summary_init\n if mmcv.is_str(coco):\n coco = COCO(coco)\n assert isinstance(coco, COCO)\n\n for res_type in result_types:\n result_file = result_files[res_type]\n assert result_file.endswith('.json')\n\n coco_dets = coco.loadRes(result_file)\n gt_img_ids = coco.getImgIds()\n det_img_ids = coco_dets.getImgIds()\n iou_type = 'bbox' if res_type == 'proposal' else res_type\n cocoEval = COCOeval(coco, coco_dets, iou_type)\n if res_type == 'proposal':\n cocoEval.params.useCats = 0\n cocoEval.params.maxDets = list(max_dets)\n\n tgt_ids = gt_img_ids if not single_result else det_img_ids\n\n if single_result:\n res_dict = dict()\n for id_i in tgt_ids:\n cocoEval = COCOeval(coco, coco_dets, iou_type)\n if res_type == 'proposal':\n cocoEval.params.useCats = 0\n cocoEval.params.maxDets = list(max_dets)\n\n cocoEval.params.imgIds = [id_i]\n cocoEval.evaluate()\n cocoEval.accumulate()\n cocoEval.summarize()\n res_dict.update({coco.imgs[id_i]['file_name']: cocoEval.stats[1]})\n\n cocoEval = COCOeval(coco, coco_dets, iou_type)\n if res_type == 'proposal':\n cocoEval.params.useCats = 0\n cocoEval.params.maxDets = list(max_dets)\n\n cocoEval.params.imgIds = tgt_ids\n cocoEval.evaluate()\n cocoEval.accumulate()\n cocoEval.summarize()\n\n summary_metrics = {\n 'Precision/mAP': cocoEval.stats[0],\n 'Precision/mAP@.50IOU': cocoEval.stats[1],\n 'Precision/mAP@.75IOU': cocoEval.stats[2],\n 'Precision/mAP (small)': cocoEval.stats[3],\n 'Precision/mAP (medium)': cocoEval.stats[4],\n 'Precision/mAP (large)': cocoEval.stats[5],\n 'Recall/AR@1': cocoEval.stats[6],\n 'Recall/AR@10': cocoEval.stats[7],\n 'Recall/AR@100': cocoEval.stats[8],\n 'Recall/AR@100 (small)': cocoEval.stats[9],\n 'Recall/AR@100 (medium)': cocoEval.stats[10],\n 'Recall/AR@100 (large)': cocoEval.stats[11],\n }\n\n return summary_metrics\n\n\ndef xyxy2xywh(bbox):\n _bbox = bbox.tolist()\n return [\n _bbox[0],\n _bbox[1],\n _bbox[2] - _bbox[0] + 1,\n _bbox[3] - _bbox[1] + 1,\n ]\n\ndef bbox2result_1image(bboxes, labels, num_classes):\n \"\"\"Convert detection results to a list of numpy arrays.\n\n Args:\n bboxes (Tensor): shape (n, 5)\n labels (Tensor): shape (n, )\n num_classes (int): class number, including background class\n\n Returns:\n list(ndarray): bbox results of each class\n \"\"\"\n if bboxes.shape[0] == 0:\n result = [np.zeros((0, 5), dtype=np.float32) for i in range(num_classes - 1)]\n else:\n result = [bboxes[labels == i, :] for i in range(num_classes - 1)]\n\n return result\n\ndef proposal2json(dataset, results):\n \"\"\"convert proposal to json mode\"\"\"\n img_ids = dataset.getImgIds()\n json_results = []\n dataset_len = dataset.get_dataset_size()*2\n for idx in range(dataset_len):\n img_id = img_ids[idx]\n bboxes = results[idx]\n for i in range(bboxes.shape[0]):\n data = dict()\n data['image_id'] = img_id\n data['bbox'] = xyxy2xywh(bboxes[i])\n data['score'] = float(bboxes[i][4])\n data['category_id'] = 1\n json_results.append(data)\n return json_results\n\ndef det2json(dataset, results):\n \"\"\"convert det to json mode\"\"\"\n cat_ids = dataset.getCatIds()\n img_ids = dataset.getImgIds()\n json_results = []\n dataset_len = len(img_ids)\n for idx in range(dataset_len):\n img_id = img_ids[idx]\n if idx == len(results): break\n result = results[idx]\n for label, result_label in enumerate(result):\n bboxes = result_label\n for i in range(bboxes.shape[0]):\n data = dict()\n data['image_id'] = img_id\n data['bbox'] = xyxy2xywh(bboxes[i])\n data['score'] = float(bboxes[i][4])\n data['category_id'] = cat_ids[label]\n json_results.append(data)\n return json_results\n\ndef segm2json(dataset, results):\n \"\"\"convert segm to json mode\"\"\"\n cat_ids = dataset.getCatIds()\n img_ids = dataset.getImgIds()\n bbox_json_results = []\n segm_json_results = []\n\n dataset_len = len(img_ids)\n assert dataset_len == len(results)\n for idx in range(dataset_len):\n img_id = img_ids[idx]\n if idx == len(results): break\n det, seg = results[idx]\n for label, det_label in enumerate(det):\n bboxes = det_label\n for i in range(bboxes.shape[0]):\n data = dict()\n data['image_id'] = img_id\n data['bbox'] = xyxy2xywh(bboxes[i])\n data['score'] = float(bboxes[i][4])\n data['category_id'] = cat_ids[label]\n bbox_json_results.append(data)\n\n if len(seg) == 2:\n segms = seg[0][label]\n mask_score = seg[1][label]\n else:\n segms = seg[label]\n mask_score = [bbox[4] for bbox in bboxes]\n for i in range(bboxes.shape[0]):\n data = dict()\n data['image_id'] = img_id\n data['score'] = float(mask_score[i])\n data['category_id'] = cat_ids[label]\n segms[i]['counts'] = segms[i]['counts'].decode()\n data['segmentation'] = segms[i]\n segm_json_results.append(data)\n return bbox_json_results, segm_json_results\n\ndef results2json(dataset, results, out_file):\n \"\"\"convert result convert to json mode\"\"\"\n result_files = dict()\n if isinstance(results[0], list):\n json_results = det2json(dataset, results)\n result_files['bbox'] = '{}.{}.json'.format(out_file, 'bbox')\n result_files['proposal'] = '{}.{}.json'.format(out_file, 'bbox')\n mmcv.dump(json_results, result_files['bbox'])\n elif isinstance(results[0], tuple):\n json_results = segm2json(dataset, results)\n result_files['bbox'] = '{}.{}.json'.format(out_file, 'bbox')\n result_files['segm'] = '{}.{}.json'.format(out_file, 'segm')\n mmcv.dump(json_results[0], result_files['bbox'])\n mmcv.dump(json_results[1], result_files['segm'])\n elif isinstance(results[0], np.ndarray):\n json_results = proposal2json(dataset, results)\n result_files['proposal'] = '{}.{}.json'.format(out_file, 'proposal')\n mmcv.dump(json_results, result_files['proposal'])\n else:\n raise TypeError('invalid type of results')\n return result_files\n\ndef get_seg_masks(mask_pred, det_bboxes, det_labels, img_meta, rescale, num_classes):\n \"\"\"Get segmentation masks from mask_pred and bboxes\"\"\"\n mask_pred = mask_pred.astype(np.float32)\n\n cls_segms = [[] for _ in range(num_classes - 1)]\n bboxes = det_bboxes[:, :4]\n labels = det_labels + 1\n\n ori_shape = img_meta[:2].astype(np.int32)\n scale_factor = img_meta[2:].astype(np.int32)\n\n if rescale:\n img_h, img_w = ori_shape[:2]\n else:\n img_h = np.round(ori_shape[0] * scale_factor[0]).astype(np.int32)\n img_w = np.round(ori_shape[1] * scale_factor[1]).astype(np.int32)\n scale_factor = 1.0\n\n for i in range(bboxes.shape[0]):\n bbox = (bboxes[i, :] / 1.0).astype(np.int32)\n label = labels[i]\n w = max(bbox[2] - bbox[0] + 1, 1)\n h = max(bbox[3] - bbox[1] + 1, 1)\n w = min(w, img_w - bbox[0])\n h = min(h, img_h - bbox[1])\n mask_pred_ = mask_pred[i, :, :]\n im_mask = np.zeros((img_h, img_w), dtype=np.uint8)\n bbox_mask = mmcv.imresize(mask_pred_, (w, h))\n bbox_mask = (bbox_mask > config.mask_thr_binary).astype(np.uint8)\n im_mask[bbox[1]:bbox[1] + h, bbox[0]:bbox[0] + w] = bbox_mask\n\n rle = maskUtils.encode(\n np.array(im_mask[:, :, np.newaxis], order='F'))[0]\n cls_segms[label - 1].append(rle)\n\n return cls_segms\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport sys\nimport pytest\nimport numpy as np\nimport pandas as pd\nimport mindspore.dataset as de\nfrom mindspore import log as logger\nimport mindspore.dataset.transforms.vision.c_transforms as vision\n\n\ndef test_numpy_slices_list_1():\n logger.info(\"Test Slicing a 1D list.\")\n\n np_data = [1, 2, 3]\n ds = de.NumpySlicesDataset(np_data, shuffle=False)\n\n for i, data in enumerate(ds):\n assert data[0] == np_data[i]\n\n\ndef test_numpy_slices_list_2():\n logger.info(\"Test Slicing a 2D list into 1D list.\")\n\n np_data = [[1, 2], [3, 4]]\n ds = de.NumpySlicesDataset(np_data, column_names=[\"col1\"], shuffle=False)\n\n for i, data in enumerate(ds):\n assert np.equal(data[0], np_data[i]).all()\n\n\ndef test_numpy_slices_list_3():\n logger.info(\"Test Slicing list in the first dimension.\")\n\n np_data = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]\n ds = de.NumpySlicesDataset(np_data, column_names=[\"col1\"], shuffle=False)\n\n for i, data in enumerate(ds):\n assert np.equal(data[0], np_data[i]).all()\n\n\ndef test_numpy_slices_list_append():\n logger.info(\"Test reading data of image list.\")\n\n DATA_DIR = [\"../data/dataset/test_tf_file_3_images/train-0000-of-0001.data\"]\n resize_height, resize_width = 2, 2\n\n data1 = de.TFRecordDataset(DATA_DIR)\n resize_op = vision.Resize((resize_height, resize_width))\n data1 = data1.map(input_columns=[\"image\"], operations=[vision.Decode(True), resize_op])\n\n res = []\n for data in data1.create_dict_iterator():\n res.append(data[\"image\"])\n\n ds = de.NumpySlicesDataset(res, column_names=[\"col1\"], shuffle=False)\n\n for i, data in enumerate(ds):\n assert np.equal(data, res[i]).all()\n\n\ndef test_numpy_slices_dict_1():\n logger.info(\"Test Dictionary structure data.\")\n\n np_data = {\"a\": [1, 2], \"b\": [3, 4]}\n ds = de.NumpySlicesDataset(np_data, shuffle=False)\n res = [[1, 3], [2, 4]]\n\n for i, data in enumerate(ds):\n assert data[0] == res[i][0]\n assert data[1] == res[i][1]\n\n\ndef test_numpy_slices_tuple_1():\n logger.info(\"Test slicing a list of tuple.\")\n\n np_data = [([1, 2], [3, 4]), ([11, 12], [13, 14]), ([21, 22], [23, 24])]\n ds = de.NumpySlicesDataset(np_data, shuffle=False)\n\n for i, data in enumerate(ds):\n assert np.equal(data, np_data[i]).all()\n\n assert sum([1 for _ in ds]) == 3\n\n\ndef test_numpy_slices_tuple_2():\n logger.info(\"Test slicing a tuple of list.\")\n\n np_data = ([1, 2], [3, 4], [5, 6])\n expected = [[1, 3, 5], [2, 4, 6]]\n ds = de.NumpySlicesDataset(np_data, shuffle=False)\n\n for i, data in enumerate(ds):\n assert np.equal(data, expected[i]).all()\n\n assert sum([1 for _ in ds]) == 2\n\n\ndef test_numpy_slices_tuple_3():\n logger.info(\"Test reading different dimension of tuple data.\")\n features, labels = np.random.sample((5, 2)), np.random.sample((5, 1))\n data = (features, labels)\n\n ds = de.NumpySlicesDataset(data, column_names=[\"col1\", \"col2\"], shuffle=False)\n\n for i, data in enumerate(ds):\n assert np.equal(data[0], features[i]).all()\n assert data[1] == labels[i]\n\n\ndef test_numpy_slices_csv_value():\n logger.info(\"Test loading value of csv file.\")\n csv_file = \"../data/dataset/testNumpySlicesDataset/heart.csv\"\n\n df = pd.read_csv(csv_file)\n target = df.pop(\"target\")\n df.pop(\"state\")\n np_data = (df.values, target.values)\n\n ds = de.NumpySlicesDataset(np_data, column_names=[\"col1\", \"col2\"], shuffle=False)\n\n for i, data in enumerate(ds):\n assert np.equal(np_data[0][i], data[0]).all()\n assert np.equal(np_data[1][i], data[1]).all()\n\n\ndef test_numpy_slices_csv_dict():\n logger.info(\"Test loading csv file as dict.\")\n\n csv_file = \"../data/dataset/testNumpySlicesDataset/heart.csv\"\n df = pd.read_csv(csv_file)\n df.pop(\"state\")\n res = df.values\n\n ds = de.NumpySlicesDataset(dict(df), shuffle=False)\n\n for i, data in enumerate(ds):\n assert np.equal(data, res[i]).all()\n\n\ndef test_numpy_slices_num_samplers():\n logger.info(\"Test num_samplers.\")\n\n np_data = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16]]\n ds = de.NumpySlicesDataset(np_data, shuffle=False, num_samples=2)\n\n for i, data in enumerate(ds):\n assert np.equal(data[0], np_data[i]).all()\n\n assert sum([1 for _ in ds]) == 2\n\n\ndef test_numpy_slices_distributed_sampler():\n logger.info(\"Test distributed sampler.\")\n\n np_data = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16]]\n ds = de.NumpySlicesDataset(np_data, shuffle=False, shard_id=0, num_shards=4)\n\n for i, data in enumerate(ds):\n assert np.equal(data[0], np_data[i * 4]).all()\n\n assert sum([1 for _ in ds]) == 2\n\n\ndef test_numpy_slices_distributed_shard_limit():\n logger.info(\"Test Slicing a 1D list.\")\n\n np_data = [1, 2, 3]\n num = sys.maxsize\n with pytest.raises(ValueError) as err:\n de.NumpySlicesDataset(np_data, num_shards=num, shard_id=0, shuffle=False)\n assert \"Input num_shards is not within the required interval of (1 to 2147483647).\" in str(err.value)\n\n\ndef test_numpy_slices_distributed_zero_shard():\n logger.info(\"Test Slicing a 1D list.\")\n\n np_data = [1, 2, 3]\n with pytest.raises(ValueError) as err:\n de.NumpySlicesDataset(np_data, num_shards=0, shard_id=0, shuffle=False)\n assert \"Input num_shards is not within the required interval of (1 to 2147483647).\" in str(err.value)\n\n\ndef test_numpy_slices_sequential_sampler():\n logger.info(\"Test numpy_slices_dataset with SequentialSampler and repeat.\")\n\n np_data = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16]]\n ds = de.NumpySlicesDataset(np_data, sampler=de.SequentialSampler()).repeat(2)\n\n for i, data in enumerate(ds):\n assert np.equal(data[0], np_data[i % 8]).all()\n\n\ndef test_numpy_slices_invalid_column_names_type():\n logger.info(\"Test incorrect column_names input\")\n np_data = [1, 2, 3]\n\n with pytest.raises(TypeError) as err:\n de.NumpySlicesDataset(np_data, column_names=[1], shuffle=False)\n assert \"Argument column_names[0] with value 1 is not of type (<class 'str'>,).\" in str(err.value)\n\n\ndef test_numpy_slices_invalid_column_names_string():\n logger.info(\"Test incorrect column_names input\")\n np_data = [1, 2, 3]\n\n with pytest.raises(ValueError) as err:\n de.NumpySlicesDataset(np_data, column_names=[\"\"], shuffle=False)\n assert \"column_names[0] should not be empty\" in str(err.value)\n\n\ndef test_numpy_slices_invalid_empty_column_names():\n logger.info(\"Test incorrect column_names input\")\n np_data = [1, 2, 3]\n\n with pytest.raises(ValueError) as err:\n de.NumpySlicesDataset(np_data, column_names=[], shuffle=False)\n assert \"column_names should not be empty\" in str(err.value)\n\n\ndef test_numpy_slices_invalid_empty_data_column():\n logger.info(\"Test incorrect column_names input\")\n np_data = []\n\n with pytest.raises(ValueError) as err:\n de.NumpySlicesDataset(np_data, shuffle=False)\n assert \"Argument data cannot be empty\" in str(err.value)\n\n\nif __name__ == \"__main__\":\n test_numpy_slices_list_1()\n test_numpy_slices_list_2()\n test_numpy_slices_list_3()\n test_numpy_slices_list_append()\n test_numpy_slices_dict_1()\n test_numpy_slices_tuple_1()\n test_numpy_slices_tuple_2()\n test_numpy_slices_tuple_3()\n test_numpy_slices_csv_value()\n test_numpy_slices_csv_dict()\n test_numpy_slices_num_samplers()\n test_numpy_slices_distributed_sampler()\n test_numpy_slices_distributed_shard_limit()\n test_numpy_slices_distributed_zero_shard()\n test_numpy_slices_sequential_sampler()\n test_numpy_slices_invalid_column_names_type()\n test_numpy_slices_invalid_column_names_string()\n test_numpy_slices_invalid_empty_column_names()\n test_numpy_slices_invalid_empty_data_column()\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\ncreate adjacency matrix, node features, labels, and mask for training.\n\"\"\"\nimport numpy as np\nimport scipy.sparse as sp\nimport mindspore.dataset as ds\n\n\ndef normalize_adj(adj):\n \"\"\"Symmetrically normalize adjacency matrix.\"\"\"\n rowsum = np.array(adj.sum(1))\n d_inv_sqrt = np.power(rowsum, -0.5).flatten()\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\n return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()\n\n\ndef get_adj_features_labels(data_dir):\n \"\"\"Get adjacency matrix, node features and labels from dataset.\"\"\"\n g = ds.GraphData(data_dir)\n nodes = g.get_all_nodes(0)\n nodes_list = nodes.tolist()\n row_tensor = g.get_node_feature(nodes_list, [1, 2])\n features = row_tensor[0]\n labels = row_tensor[1]\n\n nodes_num = labels.shape[0]\n class_num = labels.max() + 1\n labels_onehot = np.eye(nodes_num, class_num)[labels].astype(np.float32)\n\n neighbor = g.get_all_neighbors(nodes_list, 0)\n node_map = {node_id: index for index, node_id in enumerate(nodes_list)}\n adj = np.zeros([nodes_num, nodes_num], dtype=np.float32)\n for index, value in np.ndenumerate(neighbor):\n # The first column of neighbor is node_id, second column to last column are neighbors of the first column.\n # So we only care index[1] > 1.\n # If the node does not have that many neighbors, -1 is padded. So if value < 0, we will not deal with it.\n if value >= 0 and index[1] > 0:\n adj[node_map[neighbor[index[0], 0]], node_map[value]] = 1\n adj = sp.coo_matrix(adj)\n adj = adj + adj.T.multiply(adj.T > adj) + sp.eye(nodes_num)\n nor_adj = normalize_adj(adj)\n nor_adj = np.array(nor_adj.todense())\n return nor_adj, features, labels_onehot, labels\n\n\ndef get_mask(total, begin, end):\n \"\"\"Generate mask.\"\"\"\n mask = np.zeros([total]).astype(np.float32)\n mask[begin:end] = 1\n return mask\n" ]
[ [ "numpy.random.normal", "numpy.random.seed", "numpy.ndarray", "numpy.random.uniform", "scipy.stats.truncnorm.rvs" ], [ "numpy.random.seed", "numpy.ones", "numpy.random.randint", "numpy.zeros" ], [ "numpy.round", "numpy.array", "numpy.zeros" ], [ "numpy.random.sample", "numpy.equal", "pandas.read_csv" ], [ "numpy.isinf", "scipy.sparse.coo_matrix", "numpy.ndenumerate", "scipy.sparse.diags", "numpy.zeros", "scipy.sparse.eye", "numpy.eye", "numpy.power" ] ]
amaotone/caruta-contest-manager
[ "33bbbc8a8ff2903a2763a1270715f224c329e7a2" ]
[ "murasame/divider.py" ]
[ "import os\n\nimport pandas as pd\n\nfrom .utils import load_setting\n\n\nclass Divider(object):\n def __init__(self, df, files, base):\n self.data = df\n self.files = files\n self.base = base\n self.writers = dict()\n\n def _setup_writer(self, outdir):\n assert self.files\n os.makedirs(outdir, exist_ok=True)\n\n for name in self.files.keys():\n path = os.path.join(outdir, name)\n self.writers[name] = pd.ExcelWriter(path, engine='xlsxwriter')\n\n def save(self, outdir):\n self._setup_writer(outdir)\n\n for classname, member in self.data.groupby(self.base):\n member = member.drop(self.base, axis=1)\n for filename, classnames in self.files.items():\n if classname in classnames:\n target = self.writers[filename]\n break\n else:\n raise RuntimeError\n\n member.to_excel(target, classname, index=False)\n\n for w in self.writers.values():\n w.save()\n\n\ndef divider(df):\n setting = load_setting()['divider']\n div = Divider(df, setting[\"file\"], setting[\"base\"])\n div.save(load_setting()['system']['tmpdir'])\n" ]
[ [ "pandas.ExcelWriter" ] ]
qhy5755/xalpha
[ "99b9aa30d494b02533f518f125d46443cd9f0dd5" ]
[ "xalpha/indicator.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nmodule for implementation of indicator class, which is designed as MinIn for systems with netvalues\n\"\"\"\n\nimport pandas as pd\nfrom pyecharts import options as opts\nfrom pyecharts.charts import Kline, Line, Bar, Grid\nfrom pyecharts.commons.utils import JsCode\n\nfrom xalpha.cons import line_opts, opendate, yesterdayobj\n\n\ndef _upcount(ls):\n \"\"\"\n count the ratio of upmove days by given a list\n \"\"\"\n count = 0\n for i in range(len(ls) - 1):\n # somehow after pandas 0.23(22?), the input is a series(dataframe?) and old list supporting syntax are illegal\n if ls.iloc[i + 1] > ls.iloc[i]:\n count += 1\n return count / (len(ls) - 1)\n\n\nclass indicator:\n \"\"\"\n MixIn class provide quant indicator tool box which is desinged as interface for mulfix class as well\n as info class, who are both treated as a single fund with price table of net value.\n Most of the quant indexes, their name conventions, definitions and calculations are from\n `joinquant <https://www.joinquant.com/help/api/help?name=api#%E9%A3%8E%E9%99%A9%E6%8C%87%E6%A0%87>`_.\n Make sure first run obj.bcmkset() before you want to use functions in this class.\n \"\"\"\n\n def bcmkset(self, infoobj, start=None, riskfree=0.0371724, name=\"基金组合\"):\n \"\"\"\n Once you want to utilize the indicator tool box for analysis, first run bcmkset function to set\n the benchmark, otherwise most of the functions would raise error.\n\n :param infoobj: info obj, whose netvalue are used as benchmark\n :param start: datetime obj, indicating the starting date of all analysis.\n Note if use default start, there may be problems for some fundinfo obj, as lots of\n funds lack netvalues of several days from our API, resulting unequal length between\n benchmarks and fund net values.\n :param riskfree: float, annual rate in the unit of 100%, strongly suggest make this value\n consistent with the interest parameter when instanciate cashinfo() class\n \"\"\"\n self._pricegenerate(name)\n if start is None:\n self.start = self.price.iloc[0].date\n elif isinstance(start, str):\n self.start = pd.to_datetime(\n start, format=\"%Y-%m-%d\"\n ) # pd.Timestamp.strptime(start, \"%Y-%m-%d\")\n self.benchmark = infoobj\n\n self.riskfree = riskfree\n self.bmprice = self.benchmark.price[self.benchmark.price[\"date\"] >= self.start]\n self.price = self.price[self.price[\"date\"] >= self.start]\n self.bmprice = self.bmprice[self.bmprice[\"date\"].isin(self.price[\"date\"])]\n self.price = self.price[self.price[\"date\"].isin(self.bmprice[\"date\"])]\n\n # the price data is removed from the infoobj before start date\n\n def _pricegenerate(self, name):\n \"\"\"\n generate price table for mulfix class, the cinfo class has this attr by default\n \"\"\"\n if getattr(self, \"price\", None) is None: # 基金组合类,而非基金信息类\n times = pd.date_range(self.totcftable.iloc[0].date, yesterdayobj())\n netvalue = []\n for date in times:\n netvalue.append(self.unitvalue(date)) # may take a long time\n self.price = pd.DataFrame(data={\"date\": times, \"netvalue\": netvalue})\n self.price = self.price[self.price[\"date\"].isin(opendate)]\n self.name = name\n\n def comparison(self, date=yesterdayobj()):\n \"\"\"\n :returns: tuple of two pd.Dataframe, the first is for aim and the second if for the benchmark index\n all netvalues are normalized and set equal 1.00 on the self.start date\n \"\"\"\n partp = self.price[self.price[\"date\"] <= date]\n partm = self.bmprice[self.bmprice[\"date\"] <= date]\n normp = partp.iloc[0].netvalue\n normm = partm.iloc[0].netvalue\n partp[\"netvalue\"] = partp[\"netvalue\"] / normp\n partm[\"netvalue\"] = partm[\"netvalue\"] / normm\n return (partp, partm)\n\n def total_return(self, date=yesterdayobj()):\n return round(\n (\n self.price[self.price[\"date\"] <= date].iloc[-1].netvalue\n - self.price.iloc[0].netvalue\n )\n / self.price.iloc[0].netvalue,\n 4,\n )\n\n @staticmethod\n def annualized_returns(price, start, date=yesterdayobj()):\n \"\"\"\n :param price: price table of info().price\n :param start: datetime obj for starting date of calculation\n :param date: datetime obj for ending date of calculation\n :returns: float, annualized returns of the price table\n \"\"\"\n datediff = (price[price[\"date\"] <= date].iloc[-1].date - start).days\n totreturn = (\n price[price[\"date\"] <= date].iloc[-1].netvalue - price.iloc[0].netvalue\n ) / price.iloc[0].netvalue\n return round((1 + totreturn) ** (365 / datediff) - 1, 4)\n\n def total_annualized_returns(self, date=yesterdayobj()):\n return indicator.annualized_returns(self.price, self.start, date)\n\n def benchmark_annualized_returns(self, date=yesterdayobj()):\n return indicator.annualized_returns(self.bmprice, self.start, date)\n\n def pct_chg(self, freq=\"Y\", benchmark=True):\n \"\"\"\n 年度,月,周涨幅统计\n\n :param freq: str, default Y, could be M or W or anything pd.date_range accepts\n :return: pd.DataFrame with columns date and pct_chg\n \"\"\"\n if getattr(self, \"bmprice\", None) is None:\n benchmark = False\n\n ydf = pd.merge_asof(\n pd.DataFrame(\n pd.date_range(\n self.price[\"date\"].iloc[0], self.price[\"date\"].iloc[-1], freq=freq\n ),\n columns=[\"date\"],\n ),\n self.price,\n )\n ydf[\"pct_chg\"] = ydf[\"netvalue\"].pct_change()\n if benchmark:\n ydf = pd.merge_asof(ydf, self.bmprice, on=\"date\", suffixes=[\"\", \"_bc\"])\n ydf[\"pct_chg_benchmark\"] = ydf[\"netvalue_bc\"].pct_change()\n ydf[\"pct_chg_difference\"] = ydf[\"pct_chg\"] - ydf[\"pct_chg_benchmark\"]\n return ydf[[\"date\", \"pct_chg\", \"pct_chg_benchmark\", \"pct_chg_difference\"]]\n\n return ydf[[\"date\", \"pct_chg\"]]\n\n def beta(self, date=yesterdayobj()):\n bcmk = indicator.ratedaily(self.bmprice, date)\n bt = indicator.ratedaily(self.price, date)\n df = pd.DataFrame(data={\"bcmk\": bcmk, \"bt\": bt})\n res = df.cov()\n return res.loc[\"bcmk\", \"bt\"] / res.loc[\"bcmk\", \"bcmk\"]\n\n def alpha(self, date=yesterdayobj()):\n rp = self.total_annualized_returns(date)\n rm = self.benchmark_annualized_returns(date)\n beta = self.beta(date)\n return rp - (self.riskfree + beta * (rm - self.riskfree))\n\n def correlation_coefficient(self, date=yesterdayobj()):\n \"\"\"\n correlation coefficient between aim and benchmark values,\n 可以很好地衡量指数基金的追踪效果\n\n :returns: float between -1 and 1\n \"\"\"\n bcmk = indicator.ratedaily(self.bmprice, date)\n bt = indicator.ratedaily(self.price, date)\n df = pd.DataFrame(data={\"bcmk\": bcmk, \"bt\": bt})\n res = df.cov()\n return res.loc[\"bcmk\", \"bt\"] / (\n (res.loc[\"bcmk\", \"bcmk\"] ** 0.5) * res.loc[\"bt\", \"bt\"] ** 0.5\n )\n\n @staticmethod\n def ratedaily(price, date=yesterdayobj()):\n partp = price[price[\"date\"] <= date]\n return list(partp[\"netvalue\"].pct_change())[1:]\n # return [\n # (partp.iloc[i + 1].netvalue - partp.iloc[i].netvalue)\n # / partp.iloc[i].netvalue\n # for i in range(len(partp) - 1)\n # ]\n\n @staticmethod\n def volatility(price, date=yesterdayobj()):\n df = pd.DataFrame(data={\"rate\": indicator.ratedaily(price, date)})\n return df.std().rate * 15.8144\n\n def algorithm_volatility(self, date=yesterdayobj()):\n return indicator.volatility(self.price, date)\n\n def benchmark_volatility(self, date=yesterdayobj()):\n return indicator.volatility(self.bmprice, date)\n\n def sharpe(self, date=yesterdayobj()):\n rp = self.total_annualized_returns(date)\n return (rp - self.riskfree) / self.algorithm_volatility(date)\n\n def information_ratio(self, date=yesterdayobj()):\n rp = self.total_annualized_returns(date)\n rm = self.benchmark_annualized_returns(date)\n vp = indicator.ratedaily(self.price, date)\n vm = indicator.ratedaily(self.bmprice, date)\n diff = [vp[i] - vm[i] for i in range(len(vm))]\n df = pd.DataFrame(data={\"rate\": diff})\n var = df.std().rate\n var = var * 15.8144\n return (rp - rm) / var\n\n def max_drawdown(self, date=yesterdayobj()):\n \"\"\"\n 回测时间段的最大回撤\n\n :param date: date obj or string\n :returns: three elements tuple, the first two are the date obj of\n start and end of the time window, the third one is the drawdown amplitude in unit 1.\n \"\"\"\n li = [\n (row[\"date\"], row[\"netvalue\"])\n for i, row in self.price[self.price[\"date\"] <= date].iterrows()\n ]\n res = []\n for i, _ in enumerate(li):\n for j in range(i + 1, len(li)):\n res.append((li[i][0], li[j][0], (li[j][1] - li[i][1]) / li[i][1]))\n return min(res, key=lambda x: x[2])\n\n ## 以上基本为聚宽提供的整体量化指标,以下是其他短线技术面指标\n\n def ma(self, window=5, col=\"netvalue\"):\n \"\"\"\n 移动平均线指标\n give the moving average as a new column 'MA' in the price table, return None\n\n :param window: the date window of the MA calculation\n :param col: string, column name in dataframe you want to calculate\n \"\"\"\n self.price[\"MA\" + str(window)] = self.price[col].rolling(window=window).mean()\n\n def md(self, window=5, col=\"netvalue\"):\n \"\"\"\n 移动标准差指标\n give the moving standard deviation as a new column 'MD' in the price table, return None\n\n :param window: the date window of the MD calculation\n :param col: string, column name in dataframe you want to calculate\n \"\"\"\n self.price[\"MD\" + str(window)] = self.price[col].rolling(window=window).std()\n\n def ema(self, window=5, col=\"netvalue\"):\n \"\"\"\n 指数平均数指标\n give the exponential moving average as a new column 'EMA' in the price table, return None\n\n :param window: the span of date, where the decay factor alpha=2/(1+window)\n :param col: string, column name in dataframe you want to calculate\n \"\"\"\n self.price[\"EMA\" + str(window)] = self.price[col].ewm(span=window).mean()\n\n def macd(self, fast_window=12, slow_window=26, signal_window=9, col=\"netvalue\"):\n \"\"\"\n 指数平滑异同移动平均线\n give the MACD index as three new columns 'MACD_DIFF/DEM/OSC' in the price table, return None\n\n :param fast_window: int,\n :param slow_window: int,\n :param signal_window: int, the ema window of the signal line\n :param col: string, column name in dataframe you want to calculate\n \"\"\"\n EMAfast = pd.Series(self.price[col].ewm(span=fast_window).mean())\n EMAslow = pd.Series(self.price[col].ewm(span=slow_window).mean())\n # 短期ema和长期ema的差\n MACDDiff = pd.Series(EMAfast - EMAslow)\n # 该差的再次 ema 平均\n MACDDem = pd.Series(MACDDiff.ewm(span=signal_window).mean())\n # ema平均过的差和原来差的差\n MACDOsc = pd.Series(MACDDiff - MACDDem)\n self.price[\"MACD_DIFF_\" + str(fast_window) + \"_\" + str(slow_window)] = MACDDiff\n self.price[\"MACD_DEM_\" + str(fast_window) + \"_\" + str(slow_window)] = MACDDem\n self.price[\"MACD_OSC_\" + str(fast_window) + \"_\" + str(slow_window)] = MACDOsc\n\n def mtm(self, window=10, col=\"netvalue\"):\n \"\"\"\n 动量指标,并未附加动量的平均线指标,如需计算动量平均线指标,使用ma或emca函数,col参数选择MTM列即可\n give the MTM as a new column 'MTM' in the price table, return None\n\n :param window: int, the difference between price now and window days ago\n :param col: string, column name in dataframe you want to calculate\n \"\"\"\n self.price[\"MTM\" + str(window)] = self.price[col].diff(window)\n\n def roc(self, window=10, col=\"netvalue\"):\n \"\"\"\n 变动率指标\n give the ROC as a new column 'ROC' in the price table, return None, the ROC is in the unit of 1 instead of 1%\n\n :param window: int, the change rate between price now and window days ago\n :param col: string, column name in dataframe you want to calculate\n \"\"\"\n abdiff = self.price[col].diff(window)\n deno = self.price[col].shift(window)\n reladiff = pd.Series(abdiff / deno)\n self.price[\"ROC\" + str(window)] = reladiff\n\n def boll(self, window=10, deviation=2, col=\"netvalue\"):\n \"\"\"\n 布林线上下轨计算\n give the bolling upper and lower band in the price table, the middle line is just ma line\n\n :param window: int, the date window for ma and md\n :param deviation: int or float, how many times deviation of sigma\n :param col: string, column name in dataframe you want to calculate\n \"\"\"\n self.ma(window=window, col=col)\n self.md(window=window, col=col)\n self.price[\"BOLL_UPPER\"] = (\n self.price[\"MA\" + str(window)] + deviation * self.price[\"MD\" + str(window)]\n )\n self.price[\"BOLL_LOWER\"] = (\n self.price[\"MA\" + str(window)] - deviation * self.price[\"MD\" + str(window)]\n )\n\n def bias(self, window=10, col=\"netvalue\"):\n \"\"\"\n 乖离率\n give the bias as BIAS column in price table\n\n :param window: int, MA_window\n :param col: string, column name in dataframe you want to calculate\n \"\"\"\n self.ma(window=window, col=col)\n self.price[\"BIAS\" + str(window)] = (\n self.price[col] - self.price[\"MA\" + str(window)]\n ) / self.price[\"MA\" + str(window)]\n\n def rsi(self, window=14, col=\"netvalue\"):\n \"\"\"\n 相对强弱指标\n give the rsi as RSI column in price table\n\n :param window: int, MA_window\n :param col: string, column name in dataframe you want to calculate\n \"\"\"\n i = 0\n UpI = [0]\n DoI = [0]\n while i + 1 <= len(self.price) - 1:\n Move = self.price.loc[i + 1, col] - self.price.loc[i, col]\n if Move > 0:\n UpD = Move\n DoD = 0\n else:\n UpD = 0\n DoD = -Move\n UpI.append(UpD)\n DoI.append(DoD)\n i = i + 1\n\n UpI = pd.Series(UpI)\n DoI = pd.Series(DoI)\n PosDI = pd.Series(UpI.ewm(span=window).mean())\n NegDI = pd.Series(DoI.ewm(span=window).mean())\n self.price[\"RSI\" + str(window)] = pd.Series(PosDI / (PosDI + NegDI))\n\n def kdj(self, rsv_window=9, k_window=3, d_window=3, col=\"netvalue\"):\n \"\"\"\n KDJ 随机指标\n 由于该模块不涉及日内高低价的信息,因此区间最高价最低价都由极值收盘价代替,因此和其他软件计算的 kdj 指标可能存在出入。\n give k,d,j indexes as three columns KDJ_K/D/J in price table\n\n :param rsv_window: int\n :param k_window: int\n :param d_window: int\n :param col: string, column name in dataframe you want to calculate\n \"\"\"\n roll = self.price[col].rolling(window=rsv_window)\n rsv = (self.price[col] - roll.min()) / (roll.max() - roll.min())\n k = rsv.rolling(window=k_window).mean()\n d = k.rolling(window=d_window).mean()\n j = 3 * k - 2 * d\n self.price[\"KDJ_K\"] = k\n self.price[\"KDJ_D\"] = d\n self.price[\"KDJ_J\"] = j\n\n def wnr(self, window=14, col=\"netvalue\"):\n \"\"\"\n 威廉指标,这里取超卖结果接近0的约定(wnr*-1),事实上就是 rsv, 同样的区间极值价用极值收盘价替代\n give williams %R in WNR column in price table\n\n :param window: int\n :param col: string, column name in dataframe you want to calculate\n \"\"\"\n roll = self.price[col].rolling(window=window)\n wnr = (self.price[col] - roll.min()) / (roll.max() - roll.min())\n self.price[\"WNR\" + str(window)] = wnr\n\n def dma(self, fast_window=10, slow_window=50, ama_window=10, col=\"netvalue\"):\n \"\"\"\n 平行线差指标\n give different of moving average as columns DMA and AMA in price table\n\n :param fast_window: int\n :param slow_window: int\n :param ama_window: int\n :param col: string, column name in dataframe you want to calculate\n \"\"\"\n dma = (\n self.price[col].rolling(window=fast_window).mean()\n - self.price[col].rolling(window=slow_window).mean()\n )\n ama = dma.rolling(window=ama_window).mean()\n self.price[\"DMA\"] = dma\n self.price[\"AMA\"] = ama\n\n def bbi(self, col=\"netvalue\"):\n \"\"\"\n 多空指标\n give bull and bear line in column BBI in price table\n\n :param col: string, column name in dataframe you want to calculate\n \"\"\"\n bbi = self.price[col].rolling(3).mean()\n bbi = bbi + self.price[col].rolling(6).mean()\n bbi = bbi + self.price[col].rolling(12).mean()\n bbi = bbi + self.price[col].rolling(24).mean()\n bbi = bbi / 4\n self.price[\"BBI\"] = bbi\n\n def trix(self, window=10, ma_window=10, col=\"netvalue\"):\n \"\"\"\n 三重指数平滑平均线\n give the trix index in column TRIX, TRMA\n\n :param window: int\n :param col: string, column name in dataframe you want to calculate\n \"\"\"\n tr = self.price[col].ewm(span=window).mean()\n tr = tr.ewm(span=window).mean()\n tr = tr.ewm(span=window).mean()\n trix = tr.diff(1) / tr.shift(1)\n trma = trix.rolling(ma_window).mean()\n self.price[\"TRIX\" + str(window)] = trix\n self.price[\"TRMA\" + str(window)] = trma\n\n def psy(self, count_window=12, ma_window=6, col=\"netvalue\"):\n \"\"\"\n 心理线指标(衡量过去 count_window 天涨幅天数)\n give psy and psyma as column PSY and PSYMA in price table\n\n :param count_window: int\n :param ma_window: int\n :param col: string, column name in dataframe you want to calculate\n \"\"\"\n psy = self.price[col].rolling(count_window + 1).aggregate(_upcount)\n psyma = psy.rolling(ma_window).mean()\n self.price[\"PSY\" + str(count_window)] = psy\n self.price[\"PSYMA\" + str(count_window)] = psyma\n\n ## 以下是可视化部分\n\n def v_netvalue(self, end=yesterdayobj(), benchmark=True, rendered=True, vopts=None):\n \"\"\"\n visulaization on netvalue curve\n\n :param end: dateobject for indicating the end date in the figure, default to yesterday\n :param benchmark: bool, whether include benchmark's netvalue curve, default true\n :param vopts: dict, options for pyecharts instead of builtin settings\n \"\"\"\n if getattr(self, \"bmprice\", None) is None:\n benchmark = False\n if benchmark:\n a, b = self.comparison(end)\n else:\n a = self.price\n if vopts is None:\n vopts = line_opts\n line = Line()\n line.add_xaxis([d.date() for d in list(a.date)])\n line.add_yaxis(\n y_axis=list(a.netvalue), series_name=self.name, is_symbol_show=False\n )\n line.set_global_opts(**vopts)\n if benchmark:\n line.add_yaxis(\n series_name=self.benchmark.name,\n y_axis=list(b.netvalue),\n is_symbol_show=False,\n )\n if rendered:\n return line.render_notebook()\n else:\n return line\n\n def v_techindex(self, end=yesterdayobj(), col=None, rendered=True, vopts=None):\n \"\"\"\n visualization on netvalue curve and specified indicators\n\n :param end: date string or obj, the end date of the figure\n :param col: list, list of strings for price col name, eg.['MA5','BBI']\n remember generate these indicators before the visualization,\n these cols don't automatically generate for visualization\n :param vopts: dict, options for pyecharts instead of builtin settings\n \"\"\"\n partprice = self.price[self.price[\"date\"] <= end]\n xdata = [d.date() for d in list(partprice.date)]\n netvaldata = list(partprice.netvalue)\n if vopts is None:\n vopts = line_opts\n line = Line()\n line.add_xaxis(xdata)\n line.add_yaxis(series_name=\"netvalue\", y_axis=netvaldata, is_symbol_show=False)\n line.set_global_opts(**vopts)\n if col is not None:\n for ind in col:\n inddata = list(partprice[ind])\n line.add_yaxis(series_name=ind, y_axis=inddata, is_symbol_show=False)\n if rendered:\n return line.render_notebook()\n else:\n return line\n\n\ndef plot_kline(\n df,\n rendered=True,\n ucolor=\"#ef232a\",\n dcolor=\"#14b143\",\n ucolorborder=None,\n dcolorborder=None,\n ucolorvolume=None,\n dcolorvolume=None,\n col=\"\",\n):\n \"\"\"\n 针对 dataframe 直接画出标准看盘软件的上k线图下成交量图的形式\n\n :param df:\n :param rendered:\n :param ucolor: str for color when going up, default red in A stock as \"#ef232a\"\n :param dcolor: str for color when going down, default green in A stock as \"#14b143\"\n :param col:\n :return:\n \"\"\"\n # TODO: color changing seems to make no effect, possible issue with pyecharts\n if ucolorborder is None:\n ucolorborder = ucolor\n if dcolorborder is None:\n dcolorborder = dcolor\n if ucolorvolume is None:\n if ucolor != \"#ffffff\":\n ucolorvolume = ucolor\n else:\n ucolorvolume = ucolorborder\n if dcolorvolume is None:\n if dcolor != \"#ffffff\":\n dcolorvolume = dcolor\n else:\n dcolorvolume = dcolorborder\n\n kline = (\n Kline()\n .add_xaxis(xaxis_data=list(df[\"date\"]))\n .add_yaxis(\n series_name=\"\",\n itemstyle_opts=opts.ItemStyleOpts(\n color=ucolor,\n color0=dcolor,\n border_color=ucolorborder, # ucolor,\n border_color0=dcolorborder, # dcolor,\n ),\n y_axis=list(zip(df[\"open\"], df[\"close\"], df[\"low\"], df[\"high\"])),\n markpoint_opts=opts.MarkPointOpts(\n data=[\n opts.MarkPointItem(type_=\"max\", name=\"最大值\"),\n opts.MarkPointItem(type_=\"min\", name=\"最小值\"),\n ],\n symbol=\"pin\",\n symbol_size=[56, 40],\n # label_opts=opts.LabelOpts(color=\"#CCFFFF\",position=[\"top\", \"bottom\"])\n ),\n )\n .set_global_opts(\n datazoom_opts=[\n opts.DataZoomOpts(\n is_show=True,\n type_=\"slider\",\n range_start=50,\n range_end=100,\n xaxis_index=[0, 1],\n ),\n opts.DataZoomOpts(\n is_show=False,\n type_=\"inside\",\n range_start=50,\n range_end=100,\n xaxis_index=1,\n ),\n ],\n tooltip_opts=opts.TooltipOpts(\n is_show=True,\n trigger=\"axis\",\n trigger_on=\"mousemove\",\n axis_pointer_type=\"cross\",\n ),\n )\n )\n if col is not None:\n for c in col:\n line = (\n Line()\n .add_xaxis(xaxis_data=list(df[\"date\"]))\n .add_yaxis(\n series_name=c,\n y_axis=list(df[c]),\n is_smooth=True,\n linestyle_opts=opts.LineStyleOpts(opacity=0.5),\n label_opts=opts.LabelOpts(is_show=False),\n )\n )\n kline = kline.overlap(line)\n\n if \"volume\" in df.columns:\n vl = list(df[\"volume\"])\n elif \"amount\" in df.columns:\n vl = list(df[\"amount\"])\n else:\n vl = [0 for _ in range(len(df))]\n bar = (\n Bar()\n .add_js_funcs(\"var barData = {}\".format(list(df[\"close\"] - df[\"open\"])))\n .add_xaxis(xaxis_data=list(df[\"date\"]))\n .add_yaxis(\n series_name=\"\",\n yaxis_data=vl,\n label_opts=opts.LabelOpts(is_show=False),\n itemstyle_opts=opts.ItemStyleOpts(\n color=JsCode(\n \"\"\"\n function(params) {{\n var colorList;\n if (barData[params.dataIndex]>0) {{\n colorList = '{ucolor}';\n }} else {{\n colorList = '{dcolor}';\n }}\n return colorList;\n }}\n \"\"\".format(\n ucolor=ucolorvolume, dcolor=dcolorvolume\n )\n ) # escape {} when using format\n ),\n )\n .set_global_opts(\n tooltip_opts=opts.TooltipOpts(\n is_show=True,\n trigger=\"axis\",\n trigger_on=\"mousemove\",\n axis_pointer_type=\"cross\",\n ),\n )\n )\n grid_chart = Grid()\n grid_chart.add_js_funcs(\"var barData = {}\".format(list(df[\"close\"] - df[\"open\"])))\n grid_chart.add(\n kline,\n grid_opts=opts.GridOpts(\n pos_left=\"10%\", pos_right=\"1%\", pos_top=\"2%\", height=\"65%\"\n ),\n )\n\n grid_chart.add(\n bar,\n grid_opts=opts.GridOpts(\n pos_left=\"10%\", pos_right=\"1%\", pos_top=\"71%\", height=\"22%\"\n ),\n )\n if rendered:\n return grid_chart.render_notebook()\n else:\n return grid_chart\n\n\npd.DataFrame.v_kline = plot_kline\n" ]
[ [ "pandas.to_datetime", "pandas.merge_asof", "pandas.DataFrame", "pandas.date_range", "pandas.Series" ] ]
JamesBarciz/project_flask
[ "d123312b03a6a780a94319ed125b3944615aa8d3" ]
[ "src/predict.py" ]
[ "from src.orm_model import Tweet, Author\nimport pandas as pd\nfrom sklearn.linear_model import LogisticRegression\n# import pickle\n\n\ndef get_most_likely_author(tweet_body, spacy_model):\n authors = Author.query.all()\n features = pd.DataFrame()\n target = pd.Series()\n for a in authors:\n for t in a.tweets:\n if not len(features) > 0:\n features = pd.DataFrame(t.vect).T\n else:\n features = pd.concat([pd.DataFrame(t.vect).T, features])\n target = target.append(pd.Series([a.name]))\n target.reset_index(inplace=True, drop=True)\n features.reset_index(inplace=True, drop=True)\n model = LogisticRegression()\n model.fit(X=features, y=target)\n # with open('model', 'wb+') as f:\n # pickle.dump(model, f)\n # with open('model', 'rb') as f:\n # unpickler = pickle.Unpickler(f)\n # model = unpickler.load()\n likely_author = model.predict([spacy_model(tweet_body).vector])\n return likely_author\n" ]
[ [ "pandas.DataFrame", "sklearn.linear_model.LogisticRegression", "pandas.Series" ] ]
gaoxiao/tacotron2
[ "0a58682c8025f892b29898088ae275b9086887b6" ]
[ "infer_single.py" ]
[ "import warnings\n\nwarnings.filterwarnings(\"ignore\")\n\nimport sys\n\nimport matplotlib.pylab as plt\nimport scipy\n\nsys.path.append('waveglow/')\nimport numpy as np\nimport torch\n\nfrom hparams import create_hparams\nfrom train import load_model\nfrom text import text_to_sequence\n\n\ndef plot_data(data, figsize=(16, 4)):\n fig, axes = plt.subplots(1, len(data), figsize=figsize)\n for i in range(len(data)):\n axes[i].imshow(data[i], aspect='auto', origin='bottom',\n interpolation='none')\n\n\nhparams = create_hparams()\nhparams.sampling_rate = 22050\n# hparams.gate_threshold = 0.1\n\ncheckpoint_path = \"tacotron2_statedict.pt\"\n# checkpoint_path = \"outdir/checkpoint_12500\"\n# checkpoint_path = \"outdir/saved_10000\"\n#checkpoint_path = \"outdir_self_data/saved_170000\"\n\nmodel = load_model(hparams)\nmodel.load_state_dict(torch.load(checkpoint_path)['state_dict'])\n_ = model.cuda().eval().half()\n\nwaveglow_path = 'waveglow_256channels.pt'\n# waveglow_path = 'waveglow/checkpoints1/saved_356000'\nwaveglow = torch.load(waveglow_path)['model']\nwaveglow.cuda().eval().half()\nfor k in waveglow.convinv:\n k.float()\n\ntext = 'Littlelights is awesome!'\nsequence = np.array(text_to_sequence(text, ['english_cleaners']))[None, :]\nsequence = torch.autograd.Variable(torch.from_numpy(sequence)).cuda().long()\n\n# text_list = [\n# \"Read loudly, and be a super hero!\",\n# \"Join me to learn some words.\",\n# ]\n# sequence_list = [np.array(text_to_sequence(text, ['english_cleaners']))[None, :] for text in text_list]\n# sequence_list = torch.autograd.Variable(torch.from_numpy(sequence_list)).cuda().long()\n\nmel_outputs, mel_outputs_postnet, _, alignments = model.inference(sequence)\n\nwith torch.no_grad():\n audio = waveglow.infer(mel_outputs_postnet, sigma=0.666)\ndata = audio[0].data.cpu().numpy().astype(np.float32)\nscipy.io.wavfile.write('audio_output/{}.wav'.format(text), hparams.sampling_rate, data)\n" ]
[ [ "torch.no_grad", "torch.load", "torch.from_numpy" ] ]
ScapeQin/PerceptualSimilarity
[ "9b88429a599fa2f08dd33713c52d98843333d242" ]
[ "models/dist_model.py" ]
[ "import numpy as np\nimport torch\nfrom torch import nn\nimport os\nfrom collections import OrderedDict\nfrom torch.autograd import Variable\nimport itertools\nimport util.util as util\nfrom .base_model import BaseModel\nfrom . import networks_basic as networks\nfrom scipy.ndimage import zoom\nimport fractions\nimport functools\nimport skimage.transform\nfrom IPython import embed\n\nclass DistModel(BaseModel):\n def name(self):\n return self.model_name\n\n def initialize(self, model='net-lin', net='alex', pnet_rand=False, pnet_tune=False, model_path=None, colorspace='Lab', use_gpu=True, printNet=False, spatial=False, spatial_shape=None, spatial_order=1, spatial_factor=None, is_train=False, lr=.0001, beta1=0.5, version='0.1'):\n '''\n INPUTS\n model - ['net-lin'] for linearly calibrated network\n ['net'] for off-the-shelf network\n ['L2'] for L2 distance in Lab colorspace\n ['SSIM'] for ssim in RGB colorspace\n net - ['squeeze','alex','vgg']\n model_path - if None, will look in weights/[NET_NAME].pth\n colorspace - ['Lab','RGB'] colorspace to use for L2 and SSIM\n use_gpu - bool - whether or not to use a GPU\n printNet - bool - whether or not to print network architecture out\n spatial - bool - whether to output an array containing varying distances across spatial dimensions\n spatial_shape - if given, output spatial shape. if None then spatial shape is determined automatically via spatial_factor (see below).\n spatial_factor - if given, specifies upsampling factor relative to the largest spatial extent of a convolutional layer. if None then resized to size of input images.\n spatial_order - spline order of filter for upsampling in spatial mode, by default 1 (bilinear).\n is_train - bool - [True] for training mode\n lr - float - initial learning rate\n beta1 - float - initial momentum term for adam\n version - 0.1 for latest, 0.0 was original\n '''\n BaseModel.initialize(self, use_gpu=use_gpu)\n\n self.model = model\n self.net = net\n self.use_gpu = use_gpu\n self.is_train = is_train\n self.spatial = spatial\n self.spatial_shape = spatial_shape\n self.spatial_order = spatial_order\n self.spatial_factor = spatial_factor\n\n self.model_name = '%s [%s]'%(model,net)\n if(self.model == 'net-lin'): # pretrained net + linear layer\n self.net = networks.PNetLin(use_gpu=use_gpu,pnet_rand=pnet_rand, pnet_tune=pnet_tune, pnet_type=net,use_dropout=True,spatial=spatial,version=version)\n kw = {}\n if not use_gpu:\n kw['map_location'] = 'cpu'\n if(model_path is None):\n model_path = './weights/v%s/%s.pth'%(version,net)\n\n if(not is_train):\n print('Loading model from: %s'%model_path)\n self.net.load_state_dict(torch.load(model_path, **kw))\n\n elif(self.model=='net'): # pretrained network\n assert not self.spatial, 'spatial argument not supported yet for uncalibrated networks'\n self.net = networks.PNet(use_gpu=use_gpu,pnet_type=net)\n self.is_fake_net = True\n elif(self.model in ['L2','l2']):\n self.net = networks.L2(use_gpu=use_gpu,colorspace=colorspace) # not really a network, only for testing\n self.model_name = 'L2'\n elif(self.model in ['DSSIM','dssim','SSIM','ssim']):\n self.net = networks.DSSIM(use_gpu=use_gpu,colorspace=colorspace)\n self.model_name = 'SSIM'\n else:\n raise ValueError(\"Model [%s] not recognized.\" % self.model)\n\n self.parameters = list(self.net.parameters())\n\n if self.is_train: # training mode\n # extra network on top to go from distances (d0,d1) => predicted human judgment (h*)\n self.rankLoss = networks.BCERankingLoss(use_gpu=use_gpu)\n self.parameters+=self.rankLoss.parameters\n self.lr = lr\n self.old_lr = lr\n self.optimizer_net = torch.optim.Adam(self.parameters, lr=lr, betas=(beta1, 0.999))\n else: # test mode\n self.net.eval()\n\n if(printNet):\n print('---------- Networks initialized -------------')\n networks.print_network(self.net)\n print('-----------------------------------------------')\n\n def forward_pair(self,in1,in2,retPerLayer=False):\n if(retPerLayer):\n return self.net.forward(in1,in2, retPerLayer=True)\n else:\n return self.net.forward(in1,in2)\n\n def forward(self, in0, in1, retNumpy=True):\n ''' Function computes the distance between image patches in0 and in1\n INPUTS\n in0, in1 - torch.Tensor object of shape Nx3xXxY - image patch scaled to [-1,1]\n retNumpy - [False] to return as torch.Tensor, [True] to return as numpy array\n OUTPUT\n computed distances between in0 and in1\n '''\n\n self.input_ref = in0\n self.input_p0 = in1\n\n if(self.use_gpu):\n self.input_ref = self.input_ref.cuda()\n self.input_p0 = self.input_p0.cuda()\n\n self.var_ref = Variable(self.input_ref,requires_grad=True)\n self.var_p0 = Variable(self.input_p0,requires_grad=True)\n\n self.d0 = self.forward_pair(self.var_ref, self.var_p0)\n self.loss_total = self.d0\n\n def convert_output(d0):\n if(retNumpy):\n ans = d0.cpu().data.numpy()\n if not self.spatial:\n ans = ans.flatten()\n else:\n assert(ans.shape[0] == 1 and len(ans.shape) == 4)\n return ans[0,...].transpose([1, 2, 0]) # Reshape to usual numpy image format: (height, width, channels)\n return ans\n else:\n return d0\n\n if self.spatial:\n L = [convert_output(x) for x in self.d0]\n spatial_shape = self.spatial_shape\n if spatial_shape is None:\n if(self.spatial_factor is None):\n spatial_shape = (in0.size()[2],in0.size()[3])\n else:\n spatial_shape = (max([x.shape[0] for x in L])*self.spatial_factor, max([x.shape[1] for x in L])*self.spatial_factor)\n \n L = [skimage.transform.resize(x, spatial_shape, order=self.spatial_order, mode='edge') for x in L]\n \n L = np.mean(np.concatenate(L, 2) * len(L), 2)\n return L\n else:\n return convert_output(self.d0)\n\n # ***** TRAINING FUNCTIONS *****\n def optimize_parameters(self):\n self.forward_train()\n self.optimizer_net.zero_grad()\n self.backward_train()\n self.optimizer_net.step()\n self.clamp_weights()\n\n def clamp_weights(self):\n for module in self.net.modules():\n if(hasattr(module, 'weight') and module.kernel_size==(1,1)):\n module.weight.data = torch.clamp(module.weight.data,min=0)\n\n def set_input(self, data):\n self.input_ref = data['ref']\n self.input_p0 = data['p0']\n self.input_p1 = data['p1']\n self.input_judge = data['judge']\n\n if(self.use_gpu):\n self.input_ref = self.input_ref.cuda()\n self.input_p0 = self.input_p0.cuda()\n self.input_p1 = self.input_p1.cuda()\n self.input_judge = self.input_judge.cuda()\n\n self.var_ref = Variable(self.input_ref,requires_grad=True)\n self.var_p0 = Variable(self.input_p0,requires_grad=True)\n self.var_p1 = Variable(self.input_p1,requires_grad=True)\n\n def forward_train(self): # run forward pass\n self.d0 = self.forward_pair(self.var_ref, self.var_p0)\n self.d1 = self.forward_pair(self.var_ref, self.var_p1)\n self.acc_r = self.compute_accuracy(self.d0,self.d1,self.input_judge)\n\n # var_judge\n self.var_judge = Variable(1.*self.input_judge).view(self.d0.size())\n\n self.loss_total = self.rankLoss.forward(self.d0, self.d1, self.var_judge*2.-1.)\n return self.loss_total\n\n def backward_train(self):\n torch.mean(self.loss_total).backward()\n\n def compute_accuracy(self,d0,d1,judge):\n ''' d0, d1 are Variables, judge is a Tensor '''\n d1_lt_d0 = (d1<d0).cpu().data.numpy().flatten()\n judge_per = judge.cpu().numpy().flatten()\n return d1_lt_d0*judge_per + (1-d1_lt_d0)*(1-judge_per)\n\n def get_current_errors(self):\n retDict = OrderedDict([('loss_total', self.loss_total.data.cpu().numpy()),\n ('acc_r', self.acc_r)])\n\n for key in retDict.keys():\n retDict[key] = np.mean(retDict[key])\n\n return retDict\n\n def get_current_visuals(self):\n zoom_factor = 256/self.var_ref.data.size()[2]\n\n ref_img = util.tensor2im(self.var_ref.data)\n p0_img = util.tensor2im(self.var_p0.data)\n p1_img = util.tensor2im(self.var_p1.data)\n\n ref_img_vis = zoom(ref_img,[zoom_factor, zoom_factor, 1],order=0)\n p0_img_vis = zoom(p0_img,[zoom_factor, zoom_factor, 1],order=0)\n p1_img_vis = zoom(p1_img,[zoom_factor, zoom_factor, 1],order=0)\n\n return OrderedDict([('ref', ref_img_vis),\n ('p0', p0_img_vis),\n ('p1', p1_img_vis)])\n\n def save(self, path, label):\n self.save_network(self.net, path, '', label)\n self.save_network(self.rankLoss.net, path, 'rank', label)\n\n def update_learning_rate(self,nepoch_decay):\n lrd = self.lr / nepoch_decay\n lr = self.old_lr - lrd\n\n for param_group in self.optimizer_net.param_groups:\n param_group['lr'] = lr\n\n print('update lr [%s] decay: %f -> %f' % (type,self.old_lr, lr))\n self.old_lr = lr\n\n\n\ndef score_2afc_dataset(data_loader,func):\n ''' Function computes Two Alternative Forced Choice (2AFC) score using\n distance function 'func' in dataset 'data_loader'\n INPUTS\n data_loader - CustomDatasetDataLoader object - contains a TwoAFCDataset inside\n func - callable distance function - calling d=func(in0,in1) should take 2\n pytorch tensors with shape Nx3xXxY, and return numpy array of length N\n OUTPUTS\n [0] - 2AFC score in [0,1], fraction of time func agrees with human evaluators\n [1] - dictionary with following elements\n d0s,d1s - N arrays containing distances between reference patch to perturbed patches \n gts - N array in [0,1], preferred patch selected by human evaluators\n (closer to \"0\" for left patch p0, \"1\" for right patch p1,\n \"0.6\" means 60pct people preferred right patch, 40pct preferred left)\n scores - N array in [0,1], corresponding to what percentage function agreed with humans\n CONSTS\n N - number of test triplets in data_loader\n '''\n\n d0s = []\n d1s = []\n gts = []\n\n # bar = pb.ProgressBar(max_value=data_loader.load_data().__len__())\n for (i,data) in enumerate(data_loader.load_data()):\n d0s+=func(data['ref'],data['p0']).tolist()\n d1s+=func(data['ref'],data['p1']).tolist()\n gts+=data['judge'].cpu().numpy().flatten().tolist()\n # bar.update(i)\n\n d0s = np.array(d0s)\n d1s = np.array(d1s)\n gts = np.array(gts)\n scores = (d0s<d1s)*(1.-gts) + (d1s<d0s)*gts + (d1s==d0s)*.5\n\n return(np.mean(scores), dict(d0s=d0s,d1s=d1s,gts=gts,scores=scores))\n\ndef score_jnd_dataset(data_loader,func):\n ''' Function computes JND score using distance function 'func' in dataset 'data_loader'\n INPUTS\n data_loader - CustomDatasetDataLoader object - contains a JNDDataset inside\n func - callable distance function - calling d=func(in0,in1) should take 2\n pytorch tensors with shape Nx3xXxY, and return numpy array of length N\n OUTPUTS\n [0] - JND score in [0,1], mAP score (area under precision-recall curve)\n [1] - dictionary with following elements\n ds - N array containing distances between two patches shown to human evaluator\n sames - N array containing fraction of people who thought the two patches were identical\n CONSTS\n N - number of test triplets in data_loader\n '''\n\n ds = []\n gts = []\n\n # bar = pb.ProgressBar(max_value=data_loader.load_data().__len__())\n for (i,data) in enumerate(data_loader.load_data()):\n ds+=func(data['p0'],data['p1']).tolist()\n gts+=data['same'].cpu().numpy().flatten().tolist()\n # bar.update(i)\n\n sames = np.array(gts)\n ds = np.array(ds)\n\n sorted_inds = np.argsort(ds)\n ds_sorted = ds[sorted_inds]\n sames_sorted = sames[sorted_inds]\n\n TPs = np.cumsum(sames_sorted)\n FPs = np.cumsum(1-sames_sorted)\n FNs = np.sum(sames_sorted)-TPs\n\n precs = TPs/(TPs+FPs)\n recs = TPs/(TPs+FNs)\n score = util.voc_ap(recs,precs)\n\n return(score, dict(ds=ds,sames=sames))\n" ]
[ [ "numpy.concatenate", "numpy.array", "torch.autograd.Variable", "numpy.sum", "torch.optim.Adam", "numpy.mean", "torch.clamp", "numpy.argsort", "numpy.cumsum", "torch.load", "scipy.ndimage.zoom", "torch.mean" ] ]
tonysy/pointnet2_tf
[ "e878a8f3dce0d15745be2dc33aee6fc35002c0b8" ]
[ "tf_ops/build_operator.py" ]
[ "import os\nimport sys\n\nimport tensorflow as tf \ninclude_path = tf.sysconfig.get_include()\nlib_path = tf.sysconfig.get_lib()\n\nos.system('sh ./build.sh {} {}'.format(include_path, lib_path))" ]
[ [ "tensorflow.sysconfig.get_lib", "tensorflow.sysconfig.get_include" ] ]
sanchitintel/pytorch
[ "416f59308023b5d98f6ea4ecdd0bcd3829edb7a7", "416f59308023b5d98f6ea4ecdd0bcd3829edb7a7" ]
[ "test/test_quantization.py", "torch/onnx/symbolic_opset10.py" ]
[ "# -*- coding: utf-8 -*-\n\nfrom torch.testing._internal.common_utils import run_tests\n\n# Quantization core tests. These include tests for\n# - quantized kernels\n# - quantized functional operators\n# - quantized workflow modules\n# - quantized workflow operators\n# - quantized tensor\n\n# 1. Quantized Kernels\n# TODO: merge the different quantized op tests into one test class\nfrom quantization.core.test_quantized_op import TestQuantizedOps # noqa: F401\nfrom quantization.core.test_quantized_op import TestQNNPackOps # noqa: F401\nfrom quantization.core.test_quantized_op import TestQuantizedLinear # noqa: F401\nfrom quantization.core.test_quantized_op import TestQuantizedConv # noqa: F401\nfrom quantization.core.test_quantized_op import TestDynamicQuantizedLinear # noqa: F401\nfrom quantization.core.test_quantized_op import TestComparatorOps # noqa: F401\nfrom quantization.core.test_quantized_op import TestPadding # noqa: F401\nfrom quantization.core.test_quantized_op import TestQuantizedEmbeddingOps # noqa: F401\nfrom quantization.core.test_quantized_op import TestDynamicQuantizedRNNOp # noqa: F401\n# 2. Quantized Functional/Workflow Ops\nfrom quantization.core.test_quantized_functional import TestQuantizedFunctionalOps # noqa: F401\nfrom quantization.core.test_workflow_ops import TestFakeQuantizeOps # noqa: F401\nfrom quantization.core.test_workflow_ops import TestFusedObsFakeQuant # noqa: F401\n# 3. Quantized Tensor\nfrom quantization.core.test_quantized_tensor import TestQuantizedTensor # noqa: F401\n# 4. Modules\nfrom quantization.core.test_workflow_module import TestFakeQuantize # noqa: F401\nfrom quantization.core.test_workflow_module import TestObserver # noqa: F401\nfrom quantization.core.test_quantized_module import TestStaticQuantizedModule # noqa: F401\nfrom quantization.core.test_quantized_module import TestDynamicQuantizedModule # noqa: F401\nfrom quantization.core.test_workflow_module import TestRecordHistogramObserver # noqa: F401\nfrom quantization.core.test_workflow_module import TestHistogramObserver # noqa: F401\nfrom quantization.core.test_workflow_module import TestDistributed # noqa: F401\nfrom quantization.core.test_workflow_module import TestFusedObsFakeQuantModule # noqa: F401\n\n\n# Eager Mode Workflow. Tests for the functionality of APIs and different features implemented\n# using eager mode.\n\n# 1. Eager mode post training quantization\nfrom quantization.eager.test_quantize_eager_ptq import TestPostTrainingStatic # noqa: F401\nfrom quantization.eager.test_quantize_eager_ptq import TestPostTrainingDynamic # noqa: F401\nfrom quantization.eager.test_quantize_eager_ptq import TestEagerModeActivationOps # noqa: F401\nfrom quantization.eager.test_quantize_eager_ptq import TestFunctionalModule # noqa: F401\nfrom quantization.eager.test_quantize_eager_ptq import TestQuantizeONNXExport # noqa: F401\n# 2. Eager mode quantization aware training\nfrom quantization.eager.test_quantize_eager_qat import TestQuantizationAwareTraining # noqa: F401\nfrom quantization.eager.test_quantize_eager_qat import TestQATActivationOps # noqa: F401\nfrom quantization.eager.test_quantize_eager_qat import TestConvBNQATModule # noqa: F401\n# 3. Eager mode fusion passes\nfrom quantization.eager.test_fusion import TestFusion # noqa: F401\n# 4. Testing model numerics between quanitzed and FP32 models\nfrom quantization.eager.test_model_numerics import TestModelNumericsEager # noqa: F401\n# 5. Tooling: numeric_suite\nfrom quantization.eager.test_numeric_suite_eager import TestEagerModeNumericSuite # noqa: F401\n# 6. Equalization and Bias Correction\nfrom quantization.eager.test_equalize_eager import TestEqualizeEager # noqa: F401\nfrom quantization.eager.test_bias_correction_eager import TestBiasCorrection # noqa: F401\n\n\n# FX GraphModule Graph Mode Quantization. Tests for the functionality of APIs and different features implemented\n# using fx quantization.\ntry:\n from quantization.fx.test_quantize_fx import TestFuseFx # noqa: F401\n from quantization.fx.test_quantize_fx import TestQuantizeFx # noqa: F401\n from quantization.fx.test_quantize_fx import TestQuantizeFxOps # noqa: F401\n from quantization.fx.test_quantize_fx import TestQuantizeFxModels # noqa: F401\nexcept ImportError:\n # In FBCode we separate FX out into a separate target for the sake of dev\n # velocity. These are covered by a separate test target `quantization_fx`\n pass\n\ntry:\n from quantization.fx.test_numeric_suite_fx import TestFXGraphMatcher # noqa: F401\n from quantization.fx.test_numeric_suite_fx import TestFXGraphMatcherModels # noqa: F401\n from quantization.fx.test_numeric_suite_fx import TestFXNumericSuiteCoreAPIs # noqa: F401\n from quantization.fx.test_numeric_suite_fx import TestFXNumericSuiteCoreAPIsModels # noqa: F401\nexcept ImportError:\n pass\n\n# Equalization for FX mode\ntry:\n from quantization.fx.test_equalize_fx import TestEqualizeFx # noqa: F401\nexcept ImportError:\n pass\n\n# Backward Compatibility. Tests serialization and BC for quantized modules.\nfrom quantization.bc.test_backward_compatibility import TestSerialization # noqa: F401\n\n# JIT Graph Mode Quantization\nfrom quantization.jit.test_quantize_jit import TestQuantizeJit # noqa: F401\nfrom quantization.jit.test_quantize_jit import TestQuantizeJitPasses # noqa: F401\nfrom quantization.jit.test_quantize_jit import TestQuantizeJitOps # noqa: F401\nfrom quantization.jit.test_quantize_jit import TestQuantizeDynamicJitPasses # noqa: F401\nfrom quantization.jit.test_quantize_jit import TestQuantizeDynamicJitOps # noqa: F401\n# Quantization specific fusion passes\nfrom quantization.jit.test_fusion_passes import TestFusionPasses # noqa: F401\nfrom quantization.jit.test_deprecated_jit_quant import TestDeprecatedJitQuantized # noqa: F401\n\n# AO Migration tests\nfrom quantization.ao_migration.test_quantization import TestAOMigrationQuantization # noqa: F401\ntry:\n from quantization.ao_migration.test_quantization_fx import TestAOMigrationQuantizationFx # noqa: F401\nexcept ImportError:\n pass\n\nif __name__ == '__main__':\n run_tests()\n", "\nimport torch\nfrom torch.nn.modules.utils import _single, _pair, _triple\nimport torch.onnx\n# This import monkey-patches graph manipulation methods on Graph, used for the\n# ONNX symbolics\nimport torch.onnx.utils\n\nimport torch.onnx.symbolic_helper as sym_help\nfrom torch.onnx.symbolic_helper import parse_args, _unimplemented\nimport torch.onnx.symbolic_opset9\n\nfrom sys import maxsize\n\n# EDITING THIS FILE? READ THIS FIRST!\n# see Note [Edit Symbolic Files] in symbolic_helper.py\n\n# This file exports ONNX ops for opset 10\n# Opset 10 is supported by ONNX release 1.5.0\n# release on 04/24/19\n\n\ndef div(g, self, other, *args):\n if len(args) == 0:\n return torch.onnx.symbolic_opset9.true_divide(g, self, other)\n else:\n return _div_rounding_mode(g, self, other, *args)\n\n\n@parse_args(\"v\", \"v\", \"s\")\ndef _div_rounding_mode(g, self, other, rounding_mode):\n if rounding_mode == \"floor\":\n return _floor_divide(g, self, other)\n else:\n return torch.onnx.symbolic_opset9._div_rounding_mode(g, self, other, rounding_mode)\n\n\ndef _floor_divide(g, self, other):\n if sym_help._is_fp(self) or sym_help._is_fp(other):\n out = torch.onnx.symbolic_opset9.true_divide(g, self, other)\n return g.op(\"Floor\", out)\n else:\n # Integer division does trunction rounding\n div = g.op(\"Div\", self, other)\n # Division is negative if: self < 0 != other < 0\n zero = g.op(\"Constant\", value_t=torch.tensor(0, dtype=torch.int64))\n negative = g.op(\"Xor\",\n g.op(\"Less\", self, zero),\n g.op(\"Less\", other, zero))\n\n # For negative numbers with self % other != 0, subtract 1 to round down instead of up\n mod = g.op(\"Mod\", self, other, fmod_i=0)\n fixup_mask = g.op(\"And\", negative,\n g.op(\"Not\", g.op(\"Equal\", mod, zero)))\n\n one = g.op(\"Constant\", value_t=torch.tensor(1, dtype=torch.int64))\n fixup = g.op(\"Sub\", div, one)\n return g.op(\"Where\", fixup_mask, fixup, div)\n\n\n@parse_args(\"v\", \"i\", \"i\", \"none\")\ndef sort(g, self, dim, decending, out=None):\n return sym_help._sort_helper(g, self, dim, decending=decending, out=out)\n\n\n@parse_args(\"v\", \"v\", \"i\", \"i\", \"i\", \"none\")\ndef topk(g, self, k, dim, largest, sorted, out=None):\n return sym_help._topk_helper(g, self, k, dim, largest=largest, sorted=sorted, out=out)\n\n\ndef _max_pool(name, tuple_fn, ndims, return_indices):\n @parse_args(\"v\", \"is\", \"is\", \"is\", \"is\", \"i\")\n def symbolic_fn(g, input, kernel_size, stride, padding, dilation, ceil_mode):\n if not stride:\n stride = kernel_size\n kwargs = {\n \"kernel_shape_i\": tuple_fn(kernel_size),\n \"pads_i\": tuple_fn(padding) * 2,\n \"strides_i\": tuple_fn(stride),\n \"ceil_mode_i\": ceil_mode,\n }\n if set(tuple_fn(dilation)) != {1}:\n kwargs[\"dilations_i\"] = tuple_fn(dilation)\n # easy but hacky way to get flattened indices values\n # to be used to convert the indices values to non-flattened.\n # In ONNX the indices are computed as a flatten 1-D tensor,\n # so the values in indices are in [0, N x C x D1 x ... x Dn).\n # To convert the indices to the same format used by Pytorch,\n # we first execute a maxpool with a kernel and stride of 1 on the same input.\n # This will result in a tensor of indices in which each index will have it's own value.\n # Using this tensor as a reference, we extract the first index of each axis and subtract\n # it from each index of this axis in the indices to convert.\n # This step will result in a tensor were each dimension has values of indices within\n # the dimension it is in.\n # For more information :\n # https://github.com/pytorch/pytorch/pull/16455#issuecomment-460776407\n if return_indices:\n r, indices = g.op(\"MaxPool\", input, outputs=2, **kwargs)\n _, flattened_indices = g.op(\"MaxPool\", input, outputs=2,\n kernel_shape_i=[1 for _ in range(ndims)],\n strides_i=[1 for _ in range(ndims)])\n # convert indices to have non-flattened indices values\n from torch.onnx.symbolic_opset9 import sub\n s = sym_help._slice_helper(g, flattened_indices, axes=[2 + i for i in range(ndims)],\n starts=tuple_fn(0), ends=tuple_fn(1))\n indices = sub(g, indices, s)\n return r, indices\n else:\n r = g.op(\"MaxPool\", input, outputs=1, **kwargs)\n return r\n\n return symbolic_fn\n\n\nmax_pool1d = _max_pool(\"max_pool1d\", _single, 1, return_indices=False)\nmax_pool2d = _max_pool(\"max_pool2d\", _pair, 2, return_indices=False)\nmax_pool3d = _max_pool(\"max_pool3d\", _triple, 3, return_indices=False)\nmax_pool1d_with_indices = _max_pool(\"max_pool1d_with_indices\", _single, 1, return_indices=True)\nmax_pool2d_with_indices = _max_pool(\"max_pool2d_with_indices\", _pair, 2, return_indices=True)\nmax_pool3d_with_indices = _max_pool(\"max_pool3d_with_indices\", _triple, 3, return_indices=True)\n\n\ndef _avg_pool(name, tuple_fn):\n @parse_args(\"v\", \"is\", \"is\", \"is\", \"i\", \"i\", \"none\")\n def symbolic_fn(g, input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override=None):\n if not stride:\n stride = kernel_size\n padding = sym_help._avgpool_helper(tuple_fn, padding, kernel_size, stride, divisor_override, name)\n if count_include_pad:\n input = g.op(\"Pad\", input,\n pads_i=((0,) * 2 + padding) * 2,\n mode_s=\"constant\",\n value_f=0.)\n padding = (0,) * len(padding)\n output = g.op(\"AveragePool\", input,\n kernel_shape_i=tuple_fn(kernel_size),\n strides_i=tuple_fn(stride),\n pads_i=padding * 2,\n ceil_mode_i=ceil_mode)\n return output\n return symbolic_fn\n\n\navg_pool1d = _avg_pool(\"avg_pool1d\", _single)\navg_pool2d = _avg_pool(\"avg_pool2d\", _pair)\navg_pool3d = _avg_pool(\"avg_pool3d\", _triple)\n\n\ndef _interpolate(name, dim, interpolate_mode):\n def symbolic_fn(g, input, output_size, *args):\n scales, align_corners = sym_help._get_interpolate_attributes(g, interpolate_mode, args)\n sym_help._interpolate_warning(interpolate_mode)\n align_corners = sym_help._maybe_get_scalar(align_corners)\n if align_corners:\n return _unimplemented(name, \"align_corners == True\")\n if scales is None:\n scales = sym_help._interpolate_size_to_scales(g, input, output_size, dim)\n return g.op(\"Resize\", input, scales, mode_s=interpolate_mode)\n return symbolic_fn\n\n\nupsample_nearest1d = _interpolate(\"upsample_nearest1d\", 3, \"nearest\")\nupsample_nearest2d = _interpolate(\"upsample_nearest2d\", 4, \"nearest\")\nupsample_nearest3d = _interpolate(\"upsample_nearest3d\", 5, \"nearest\")\nupsample_linear1d = _interpolate(\"upsample_linear1d\", 3, \"linear\")\nupsample_bilinear2d = _interpolate(\"upsample_bilinear2d\", 4, \"linear\")\nupsample_trilinear3d = _interpolate(\"upsample_trilinear3d\", 5, \"linear\")\n\ndef __interpolate(g, input, size, scale_factor, mode , align_corners, recompute_scale_factor):\n scales, mode = sym_help._interpolate_get_scales_and_mode(g, input, size, scale_factor,\n mode , align_corners)\n return g.op(\"Resize\", input, scales, mode_s=mode)\n\n\ndef _slice(g, input, axes, starts, ends, steps=None, dynamic_slice=False):\n if dynamic_slice:\n starts = sym_help._unsqueeze_helper(g, starts, [0])\n ends = sym_help._unsqueeze_helper(g, ends, [0])\n if isinstance(axes, int):\n axes = g.op(\"Constant\", value_t=torch.tensor(axes))\n axes = sym_help._unsqueeze_helper(g, axes, [0])\n else:\n assert len(starts) == len(ends)\n assert len(starts) == len(axes)\n assert steps is None or len(starts) == len(steps)\n if len(starts) == 1 and starts[0] == 0 and ends[0] == 9223372036854775807\\\n and (steps is None or (len(steps) == 1 and steps[0] == 1)):\n return input\n axes = g.op(\"Constant\", value_t=torch.tensor(axes))\n starts = g.op(\"Constant\", value_t=torch.tensor(starts))\n ends = g.op(\"Constant\", value_t=torch.tensor(ends))\n if steps is None:\n return g.op(\"Slice\", input, starts, ends, axes)\n steps = g.op(\"Constant\", value_t=torch.tensor(steps))\n return g.op(\"Slice\", input, starts, ends, axes, steps)\n\n\ndef slice(g, self, *args):\n if len(args) == 4:\n # aten::slice(Tensor self, int dim, int? start=None, int? end=None, int step=1) -> Tensor\n dim, start, end, step = args\n elif len(args) == 3:\n # aten::slice(t[] l, int? start=None, int? end=None, int step=1) -> t[]\n start, end, step = args\n dim = 0\n else:\n raise NotImplementedError(\"Unknown aten::slice signature\")\n is_start_none = start.node().kind() == \"prim::Constant\" and start.type().kind() == 'NoneType'\n is_end_none = end.node().kind() == \"prim::Constant\" and end.type().kind() == 'NoneType'\n is_start_onnx_const = start.node().kind() == 'onnx::Constant'\n is_end_onnx_const = end.node().kind() == 'onnx::Constant'\n step = sym_help._parse_arg(step, 'i')\n if (not is_start_none and not is_start_onnx_const) or \\\n (not isinstance(end, int) and not is_end_none and not is_end_onnx_const) or \\\n (not isinstance(dim, int) and dim.node().kind() != 'onnx::Constant'):\n dynamic_slice = True\n if is_start_none:\n start = g.op(\"Constant\", value_t=torch.tensor(0))\n if is_end_none:\n end = g.op(\"Constant\", value_t=torch.tensor(9223372036854775807))\n else:\n start = [0 if is_start_none else sym_help._parse_arg(start, 'i')]\n end = [9223372036854775807 if is_end_none else sym_help._parse_arg(end, 'i')]\n dim = [sym_help._parse_arg(dim, 'i')]\n dynamic_slice = False\n return sym_help._slice_helper(g, self, axes=dim, starts=start, ends=end, steps=[step], dynamic_slice=dynamic_slice)\n\n\n@parse_args(\"v\", \"is\")\ndef flip(g, input, dims):\n return sym_help._slice_helper(g, input, axes=dims,\n starts=[-1] * len(dims),\n ends=[-9223372036854775807] * len(dims),\n steps=[-1] * len(dims))\n\n\ndef fmod(g, input, other):\n return g.op(\"Mod\", input, other, fmod_i=1)\n\n\n@parse_args(\"v\", \"v\", \"v\", \"i\", \"i\", \"i\", \"v\", \"i\", \"i\")\ndef embedding_bag(g,\n embedding_matrix,\n indices,\n offsets,\n scale_grad_by_freq,\n mode,\n sparse,\n per_sample_weights,\n include_last_offset,\n padding_idx):\n if scale_grad_by_freq and sym_help._training_mode:\n return sym_help._onnx_unsupported(\"embedding_bag with scale_grad_by_freq for training mode\")\n if padding_idx is not None and padding_idx >= 0:\n raise RuntimeError(\"embedding_bag with padding_idx\")\n from torch.onnx.symbolic_opset9 import select\n import warnings\n warnings.warn(\"Export of embedding_bag with dynamic input/offsets shape is not supported in opset 10. \"\n \"Please use opset 11 or higher to export model for dynamic input shape.'\")\n offsets_dim_0 = sym_help._get_tensor_dim_size(offsets, 0)\n if offsets_dim_0 is not None:\n if include_last_offset:\n offset_len = offsets_dim_0 - 1\n offsets_extended = offsets\n else:\n offset_len = offsets_dim_0\n offsets_extended = [offsets, g.op(\"Constant\", value_t=torch.tensor([maxsize]))]\n offsets_extended = g.op(\"Concat\", *offsets_extended, axis_i=0)\n list_ = []\n for i in range(offset_len):\n start_ = sym_help._unsqueeze_helper(g, select(g, offsets_extended, torch.tensor(0), torch.tensor(i)), [0])\n end_ = sym_help._unsqueeze_helper(g, select(g, offsets_extended, torch.tensor(0), torch.tensor(i + 1)), [0])\n axes_ = g.op(\"Constant\", value_t=torch.tensor([0]))\n indices_row = g.op(\"Slice\", indices, start_, end_, axes_)\n\n embeddings = g.op(\"Gather\", embedding_matrix, indices_row)\n if not sym_help._is_none(per_sample_weights):\n per_sample_weights_row = g.op(\"Slice\", per_sample_weights, start_, end_, axes_)\n per_sample_weights_row = sym_help._unsqueeze_helper(g, per_sample_weights_row, [1])\n embeddings = g.op(\"Mul\", embeddings, per_sample_weights_row)\n if mode == 0:\n embeddings = sym_help._reducesum_helper(g, embeddings, axes_i=[0], keepdims_i=0)\n elif mode == 1:\n embeddings = g.op(\"ReduceMean\", embeddings, axes_i=[0], keepdims_i=0)\n else:\n embeddings = g.op(\"ReduceMax\", embeddings, axes_i=[0], keepdims_i=0)\n\n embeddings = sym_help._unsqueeze_helper(g, embeddings, [0])\n list_.append(embeddings)\n\n output = g.op(\"Concat\", *list_, axis_i=0)\n # aten::embedding_bag returns a tuple of 4 elements: output, offset2bag, bag_size, max_indices.\n # But the last three outputs are not used in torch.nn.EmbeddingBag or torch.nn.functional.embedding_bag.\n return output, None, None, None\n else:\n return sym_help._onnx_unsupported(\"embedding_bag with unknown shape of offsets for opset 10 is not supported. \"\n \"please use opset 11 or higher.\")\n\n\n@parse_args(\"v\", \"t\", \"i\", \"i\", \"i\")\ndef fake_quantize_per_tensor_affine(g, inputs, scale, zero_point, quant_min=-128, quant_max=127):\n if quant_min not in [0, -128] or quant_max not in [127, 255]:\n raise RuntimeError(\n \"ONNX defines [0, 255] for quint8 and [-128, 127] for qint8, got [{}, {}]\".format(quant_min, quant_max))\n scale = scale.float().data # Avoid exporter generating double type\n zero_point_dtype = torch.int8 if quant_min == -128 else torch.uint8\n zero_point = torch.tensor(zero_point, dtype=zero_point_dtype) # ONNX requires zero_point to be tensor\n return g.op(\"DequantizeLinear\", g.op(\"QuantizeLinear\", inputs, scale, zero_point), scale, zero_point)\n\n\ndef isinf(g, input):\n from torch.onnx.symbolic_opset9 import _cast_Double # type: ignore[attr-defined]\n return g.op(\"IsInf\", _cast_Double(g, input, False))\n\n\ndef isfinite(g, input):\n from torch.onnx.symbolic_opset9 import isnan, __not_, __or_\n inf_node = isinf(g, input)\n nan_node = isnan(g, input)\n return __not_(g, __or_(g, inf_node, nan_node))\n" ]
[ [ "torch.testing._internal.common_utils.run_tests" ], [ "torch.onnx.symbolic_helper._avgpool_helper", "torch.onnx.symbolic_helper._parse_arg", "torch.onnx.symbolic_helper._get_tensor_dim_size", "torch.onnx.symbolic_helper.parse_args", "torch.onnx.symbolic_helper._onnx_unsupported", "torch.onnx.symbolic_helper._topk_helper", "torch.onnx.symbolic_helper._unsqueeze_helper", "torch.onnx.symbolic_helper._maybe_get_scalar", "torch.onnx.symbolic_helper._sort_helper", "torch.onnx.symbolic_helper._interpolate_get_scales_and_mode", "torch.onnx.symbolic_opset9.sub", "torch.onnx.symbolic_opset9.__or_", "torch.tensor", "torch.onnx.symbolic_helper._interpolate_size_to_scales", "torch.onnx.symbolic_helper._is_fp", "torch.onnx.symbolic_helper._is_none", "torch.onnx.symbolic_helper._reducesum_helper", "torch.onnx.symbolic_helper._slice_helper", "torch.onnx.symbolic_opset9.isnan", "torch.onnx.symbolic_opset9._cast_Double", "torch.onnx.symbolic_opset9._div_rounding_mode", "torch.onnx.symbolic_opset9.true_divide", "torch.onnx.symbolic_helper._interpolate_warning", "torch.onnx.symbolic_helper._get_interpolate_attributes", "torch.onnx.symbolic_helper._unimplemented" ] ]
Shyonokaze/FCN_STEM
[ "5ffb4f4bcea12646694e48246b7c2b0566cc120a" ]
[ "FCN.py" ]
[ "from __future__ import print_function\nimport tensorflow as tf\nimport numpy as np\n\nimport TensorflowUtils as utils\nimport read_MITSceneParsingData as scene_parsing\nimport datetime\nimport BatchDatsetReader as dataset\nfrom six.moves import xrange\n\nimport os.path as osp\n\nFLAGS = tf.flags.FLAGS\ntf.flags.DEFINE_integer(\"batch_size\", \"2\", \"batch size for training\")\ntf.flags.DEFINE_string(\"logs_dir\", r\"E:\\work\\01-Myproject\\imag_division\\FCN.tensorflow-master\\logs\", \"path to logs directory\")\ntf.flags.DEFINE_string(\"data_dir\", r\"E:\\work\\01-Myproject\\imag_division\\FCN.tensorflow-master\\Data_zoo\\STEM\", \"path to dataset\")\ntf.flags.DEFINE_float(\"learning_rate\", \"1e-4\", \"Learning rate for Adam Optimizer\")\ntf.flags.DEFINE_string(\"model_dir\", r\"E:\\work\\01-Myproject\\imag_division\\FCN.tensorflow-master\\Model_zoo\", \"Path to vgg model mat\")\ntf.flags.DEFINE_bool('debug', \"False\", \"Debug mode: True/ False\")\ntf.flags.DEFINE_string('mode', \"train\", \"Mode train/ test/ visualize\")\n\nMODEL_URL = 'http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-19.mat'\n\nMAX_ITERATION = 100 #最大步数\nNUM_OF_CLASSESS = 3 #分类数目\nIMAGE_SIZE = 2048 #图像大小\n\n\ndef vgg_net(weights, image):\n layers = (\n 'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',\n\n 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',\n\n 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',\n 'relu3_3', 'conv3_4', 'relu3_4', 'pool3',\n\n 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',\n 'relu4_3', 'conv4_4', 'relu4_4', 'pool4',\n\n 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',\n 'relu5_3', 'conv5_4', 'relu5_4'\n )\n\n net = {}\n current = image\n for i, name in enumerate(layers):\n kind = name[:4]\n if kind == 'conv':\n kernels, bias = weights[i][0][0][0][0]\n # matconvnet: weights are [width, height, in_channels, out_channels]\n # tensorflow: weights are [height, width, in_channels, out_channels]\n kernels = utils.get_variable(np.transpose(kernels, (1, 0, 2, 3)), name=name + \"_w\")\n bias = utils.get_variable(bias.reshape(-1), name=name + \"_b\")\n current = utils.conv2d_basic(current, kernels, bias)\n elif kind == 'relu':\n current = tf.nn.relu(current, name=name)\n if FLAGS.debug:\n utils.add_activation_summary(current)\n elif kind == 'pool':\n current = utils.avg_pool_2x2(current)\n net[name] = current\n\n return net\n\n\ndef inference(image, keep_prob):\n \"\"\"\n Semantic segmentation network definition\n :param image: input image. Should have values in range 0-255\n :param keep_prob:\n :return:\n \"\"\"\n print(\"setting up vgg initialized conv layers ...\")\n model_data = utils.get_model_data(FLAGS.model_dir, MODEL_URL)\n\n mean = model_data['normalization'][0][0][0]\n mean_pixel = np.mean(mean, axis=(0, 1))\n\n weights = np.squeeze(model_data['layers'])\n\n processed_image = utils.process_image(image, mean_pixel)\n\n with tf.variable_scope(\"inference\"):\n image_net = vgg_net(weights, processed_image)\n conv_final_layer = image_net[\"conv5_3\"]\n\n pool5 = utils.max_pool_2x2(conv_final_layer)\n\n W6 = utils.weight_variable([7, 7, 512, 4096], name=\"W6\")\n b6 = utils.bias_variable([4096], name=\"b6\")\n conv6 = utils.conv2d_basic(pool5, W6, b6)\n relu6 = tf.nn.relu(conv6, name=\"relu6\")\n if FLAGS.debug:\n utils.add_activation_summary(relu6)\n relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob)\n\n W7 = utils.weight_variable([1, 1, 4096, 4096], name=\"W7\")\n b7 = utils.bias_variable([4096], name=\"b7\")\n conv7 = utils.conv2d_basic(relu_dropout6, W7, b7)\n relu7 = tf.nn.relu(conv7, name=\"relu7\")\n if FLAGS.debug:\n utils.add_activation_summary(relu7)\n relu_dropout7 = tf.nn.dropout(relu7, keep_prob=keep_prob)\n\n W8 = utils.weight_variable([1, 1, 4096, NUM_OF_CLASSESS], name=\"W8\")\n b8 = utils.bias_variable([NUM_OF_CLASSESS], name=\"b8\")\n conv8 = utils.conv2d_basic(relu_dropout7, W8, b8)\n # annotation_pred1 = tf.argmax(conv8, dimension=3, name=\"prediction1\")\n\n # now to upscale to actual image size\n deconv_shape1 = image_net[\"pool4\"].get_shape()\n W_t1 = utils.weight_variable([4, 4, deconv_shape1[3].value, NUM_OF_CLASSESS], name=\"W_t1\")\n b_t1 = utils.bias_variable([deconv_shape1[3].value], name=\"b_t1\")\n conv_t1 = utils.conv2d_transpose_strided(conv8, W_t1, b_t1, output_shape=tf.shape(image_net[\"pool4\"]))\n fuse_1 = tf.add(conv_t1, image_net[\"pool4\"], name=\"fuse_1\")\n\n deconv_shape2 = image_net[\"pool3\"].get_shape()\n W_t2 = utils.weight_variable([4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name=\"W_t2\")\n b_t2 = utils.bias_variable([deconv_shape2[3].value], name=\"b_t2\")\n conv_t2 = utils.conv2d_transpose_strided(fuse_1, W_t2, b_t2, output_shape=tf.shape(image_net[\"pool3\"]))\n fuse_2 = tf.add(conv_t2, image_net[\"pool3\"], name=\"fuse_2\")\n\n shape = tf.shape(image)\n deconv_shape3 = tf.stack([shape[0], shape[1], shape[2], NUM_OF_CLASSESS])\n W_t3 = utils.weight_variable([16, 16, NUM_OF_CLASSESS, deconv_shape2[3].value], name=\"W_t3\")\n b_t3 = utils.bias_variable([NUM_OF_CLASSESS], name=\"b_t3\")\n conv_t3 = utils.conv2d_transpose_strided(fuse_2, W_t3, b_t3, output_shape=deconv_shape3, stride=8)\n\n annotation_pred = tf.argmax(conv_t3, dimension=3, name=\"prediction\")\n\n return tf.expand_dims(annotation_pred, dim=3), conv_t3\n\n\ndef train(loss_val, var_list):\n optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)\n grads = optimizer.compute_gradients(loss_val, var_list=var_list)\n if FLAGS.debug:\n # print(len(var_list))\n for grad, var in grads:\n utils.add_gradient_summary(grad, var)\n return optimizer.apply_gradients(grads)\n\n\ndef main(argv=None):\n keep_probability = tf.placeholder(tf.float32, name=\"keep_probabilty\")\n image = tf.placeholder(tf.float32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name=\"input_image\")\n\n#debug\n annotation = tf.placeholder(tf.int32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 1], name=\"annotation\")\n# annotation = tf.placeholder(tf.int32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name=\"annotation\")\n\n pred_annotation, logits = inference(image, keep_probability)\n tf.summary.image(\"input_image\", image, max_outputs=2)\n tf.summary.image(\"ground_truth\", tf.cast(annotation, tf.uint8), max_outputs=2)\n tf.summary.image(\"pred_annotation\", tf.cast(pred_annotation, tf.uint8), max_outputs=2)\n\n#debug\n loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,\n labels=tf.squeeze(annotation, squeeze_dims=[3]),\n name=\"entropy\")))\n\n# loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,\n# labels=annotation,\n# name=\"entropy\")))\n\n loss_summary = tf.summary.scalar(\"entropy\", loss)\n\n trainable_var = tf.trainable_variables()\n if FLAGS.debug:\n for var in trainable_var: utils.add_to_regularization_and_summary(var)\n train_op = train(loss, trainable_var)\n\n print(\"Setting up summary op...\")\n summary_op = tf.summary.merge_all()\n\n print(\"Setting up image reader...\")\n train_records, valid_records = scene_parsing.read_dataset(FLAGS.data_dir)\n print(len(train_records))\n print(len(valid_records))\n\n print(\"Setting up dataset reader\")\n image_options = {'resize': False, 'resize_size': IMAGE_SIZE} #不要resize,否则label.png会变成图片形式\n if FLAGS.mode == 'train':\n train_dataset_reader = dataset.BatchDatset(train_records, image_options)\n validation_dataset_reader = dataset.BatchDatset(valid_records, image_options)\n\n sess = tf.Session()\n\n print(\"Setting up Saver...\")\n saver = tf.train.Saver()\n\n # create two summary writers to show training loss and validation loss in the same graph\n # need to create two folders 'train' and 'validation' inside FLAGS.logs_dir\n train_writer = tf.summary.FileWriter(osp.join(FLAGS.logs_dir , 'train'), sess.graph)\n validation_writer = tf.summary.FileWriter(osp.join(FLAGS.logs_dir , 'validation'))\n\n sess.run(tf.global_variables_initializer())\n ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n print(\"Model restored...\")\n\n if FLAGS.mode == \"train\":\n for itr in xrange(MAX_ITERATION):\n train_images, train_annotations = train_dataset_reader.next_batch(FLAGS.batch_size)\n feed_dict = {image: train_images, annotation: train_annotations, keep_probability: 0.85}\n\n sess.run(train_op, feed_dict=feed_dict)\n \n if itr % 10 == 0:\n train_loss, summary_str = sess.run([loss, loss_summary], feed_dict=feed_dict)\n print(\"Step: %d, Train_loss:%g\" % (itr, train_loss))\n train_writer.add_summary(summary_str, itr)\n\n if itr % 500 == 0:\n valid_images, valid_annotations = validation_dataset_reader.next_batch(FLAGS.batch_size)\n valid_loss, summary_sva = sess.run([loss, loss_summary], feed_dict={image: valid_images, annotation: valid_annotations,\n keep_probability: 1.0})\n print(\"%s ---> Validation_loss: %g\" % (datetime.datetime.now(), valid_loss))\n\n # add validation loss to TensorBoard\n validation_writer.add_summary(summary_sva, itr)\n saver.save(sess, FLAGS.logs_dir + \"model.ckpt\", itr)\n \n \n elif FLAGS.mode == \"visualize\":\n valid_images, valid_annotations = validation_dataset_reader.get_random_batch(FLAGS.batch_size)\n pred = sess.run(pred_annotation, feed_dict={image: valid_images, annotation: valid_annotations,\n keep_probability: 1.0})\n valid_annotations = np.squeeze(valid_annotations, axis=3)\n pred = np.squeeze(pred, axis=3)\n\n for itr in range(FLAGS.batch_size):\n utils.save_image(valid_images[itr].astype(np.uint8), FLAGS.logs_dir, name=\"inp_\" + str(5+itr))\n utils.save_image(valid_annotations[itr].astype(np.uint8), FLAGS.logs_dir, name=\"gt_\" + str(5+itr))\n utils.save_image(pred[itr].astype(np.uint8), FLAGS.logs_dir, name=\"pred_\" + str(5+itr))\n print(\"Saved image: %d\" % itr)\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n" ]
[ [ "tensorflow.train.get_checkpoint_state", "numpy.mean", "tensorflow.flags.DEFINE_integer", "tensorflow.stack", "tensorflow.global_variables_initializer", "tensorflow.cast", "tensorflow.trainable_variables", "tensorflow.shape", "tensorflow.flags.DEFINE_string", "tensorflow.argmax", "tensorflow.train.Saver", "tensorflow.flags.DEFINE_float", "tensorflow.variable_scope", "numpy.transpose", "tensorflow.squeeze", "tensorflow.app.run", "tensorflow.flags.DEFINE_bool", "tensorflow.add", "tensorflow.nn.dropout", "tensorflow.train.AdamOptimizer", "tensorflow.nn.relu", "tensorflow.summary.scalar", "tensorflow.expand_dims", "tensorflow.Session", "tensorflow.placeholder", "tensorflow.summary.merge_all", "numpy.squeeze", "tensorflow.summary.image" ] ]
anandmy/PyBaMM
[ "dd8e5ebf85dc4324e163adad274ccb56c88f3698" ]
[ "tests/unit/test_simulation.py" ]
[ "import pybamm\nimport numpy as np\nimport pandas as pd\nimport os\nimport unittest\n\n\nclass TestSimulation(unittest.TestCase):\n def test_basic_ops(self):\n\n model = pybamm.lithium_ion.SPM()\n sim = pybamm.Simulation(model)\n\n self.assertEqual(model.__class__, sim._model_class)\n\n # check that the model is unprocessed\n self.assertEqual(sim._mesh, None)\n self.assertEqual(sim._disc, None)\n for val in list(sim.model.rhs.values()):\n self.assertTrue(val.has_symbol_of_classes(pybamm.Parameter))\n self.assertFalse(val.has_symbol_of_classes(pybamm.Matrix))\n\n sim.set_parameters()\n self.assertEqual(sim._mesh, None)\n self.assertEqual(sim._disc, None)\n for val in list(sim.model_with_set_params.rhs.values()):\n self.assertFalse(val.has_symbol_of_classes(pybamm.Parameter))\n self.assertFalse(val.has_symbol_of_classes(pybamm.Matrix))\n # Make sure model is unchanged\n self.assertNotEqual(sim.model, model)\n for val in list(model.rhs.values()):\n self.assertTrue(val.has_symbol_of_classes(pybamm.Parameter))\n self.assertFalse(val.has_symbol_of_classes(pybamm.Matrix))\n\n sim.build()\n self.assertFalse(sim._mesh is None)\n self.assertFalse(sim._disc is None)\n for val in list(sim.built_model.rhs.values()):\n self.assertFalse(val.has_symbol_of_classes(pybamm.Parameter))\n # skip test for scalar variables (e.g. discharge capacity)\n if val.size > 1:\n self.assertTrue(val.has_symbol_of_classes(pybamm.Matrix))\n\n def test_specs_deprecated(self):\n model = pybamm.lithium_ion.SPM()\n sim = pybamm.Simulation(model)\n with self.assertRaisesRegex(NotImplementedError, \"specs\"):\n sim.specs()\n\n def test_solve(self):\n\n sim = pybamm.Simulation(pybamm.lithium_ion.SPM())\n sim.solve([0, 600])\n self.assertFalse(sim._solution is None)\n for val in list(sim.built_model.rhs.values()):\n self.assertFalse(val.has_symbol_of_classes(pybamm.Parameter))\n # skip test for scalar variables (e.g. discharge capacity)\n if val.size > 1:\n self.assertTrue(val.has_symbol_of_classes(pybamm.Matrix))\n\n # test solve without check\n sim = pybamm.Simulation(pybamm.lithium_ion.SPM())\n sim.solve(t_eval=[0, 600], check_model=False)\n for val in list(sim.built_model.rhs.values()):\n self.assertFalse(val.has_symbol_of_classes(pybamm.Parameter))\n # skip test for scalar variables (e.g. discharge capacity)\n if val.size > 1:\n self.assertTrue(val.has_symbol_of_classes(pybamm.Matrix))\n\n def test_solve_non_battery_model(self):\n\n model = pybamm.BaseModel()\n v = pybamm.Variable(\"v\")\n model.rhs = {v: -v}\n model.initial_conditions = {v: 1}\n model.variables = {\"v\": v}\n sim = pybamm.Simulation(\n model, solver=pybamm.ScipySolver(rtol=1e-10, atol=1e-10)\n )\n\n sim.solve(np.linspace(0, 1, 100))\n np.testing.assert_array_equal(sim.solution.t, np.linspace(0, 1, 100))\n np.testing.assert_array_almost_equal(\n sim.solution[\"v\"].entries, np.exp(-np.linspace(0, 1, 100))\n )\n\n def test_solve_already_partially_processed_model(self):\n\n model = pybamm.lithium_ion.SPM()\n\n # Process model manually\n geometry = model.default_geometry\n param = model.default_parameter_values\n param.process_model(model)\n param.process_geometry(geometry)\n # Let simulation take over\n sim = pybamm.Simulation(model)\n sim.solve([0, 600])\n\n # Discretised manually\n mesh = pybamm.Mesh(geometry, model.default_submesh_types, model.default_var_pts)\n disc = pybamm.Discretisation(mesh, model.default_spatial_methods)\n disc.process_model(model)\n # Let simulation take over\n sim = pybamm.Simulation(model)\n sim.solve([0, 600])\n\n def test_reuse_commands(self):\n\n sim = pybamm.Simulation(pybamm.lithium_ion.SPM())\n\n sim.set_parameters()\n sim.set_parameters()\n\n sim.build()\n sim.build()\n\n sim.solve([0, 600])\n sim.solve([0, 600])\n\n sim.build()\n sim.solve([0, 600])\n sim.set_parameters()\n\n def test_set_crate(self):\n model = pybamm.lithium_ion.SPM()\n current_1C = model.default_parameter_values[\"Current function [A]\"]\n sim = pybamm.Simulation(model, C_rate=2)\n self.assertEqual(sim.parameter_values[\"Current function [A]\"], 2 * current_1C)\n self.assertEqual(sim.C_rate, 2)\n\n def test_get_variable_array(self):\n\n sim = pybamm.Simulation(pybamm.lithium_ion.SPM())\n sim.solve([0, 600])\n\n phi_s_n = sim.get_variable_array(\"Negative electrode potential\")\n\n self.assertIsInstance(phi_s_n, np.ndarray)\n\n c_s_n_surf, c_e = sim.get_variable_array(\n \"Negative particle surface concentration\", \"Electrolyte concentration\"\n )\n\n self.assertIsInstance(c_s_n_surf, np.ndarray)\n self.assertIsInstance(c_e, np.ndarray)\n\n def test_set_external_variable(self):\n model_options = {\n \"thermal\": \"lumped\",\n \"external submodels\": [\"thermal\", \"negative particle\"],\n }\n model = pybamm.lithium_ion.SPMe(model_options)\n sim = pybamm.Simulation(model)\n\n var = pybamm.standard_spatial_vars\n Nr = model.default_var_pts[var.r_n]\n\n T_av = 0\n c_s_n_av = np.ones((Nr, 1)) * 0.5\n external_variables = {\n \"Volume-averaged cell temperature\": T_av,\n \"X-averaged negative particle concentration\": c_s_n_av,\n }\n\n # Step\n dt = 0.1\n for _ in range(5):\n sim.step(dt, external_variables=external_variables)\n sim.plot(testing=True)\n\n # Solve\n t_eval = np.linspace(0, 3600)\n sim.solve(t_eval, external_variables=external_variables)\n sim.plot(testing=True)\n\n def test_step(self):\n\n dt = 0.001\n model = pybamm.lithium_ion.SPM()\n sim = pybamm.Simulation(model)\n sim.step(dt) # 1 step stores first two points\n tau = sim.model.timescale.evaluate()\n self.assertEqual(sim.solution.t.size, 2)\n self.assertEqual(sim.solution.y[0, :].size, 2)\n self.assertEqual(sim.solution.t[0], 0)\n self.assertEqual(sim.solution.t[1], dt / tau)\n sim.step(dt) # automatically append the next step\n self.assertEqual(sim.solution.t.size, 3)\n self.assertEqual(sim.solution.y[0, :].size, 3)\n self.assertEqual(sim.solution.t[0], 0)\n self.assertEqual(sim.solution.t[1], dt / tau)\n self.assertEqual(sim.solution.t[2], 2 * dt / tau)\n sim.step(dt, save=False) # now only store the two end step points\n self.assertEqual(sim.solution.t.size, 2)\n self.assertEqual(sim.solution.y[0, :].size, 2)\n self.assertEqual(sim.solution.t[0], 2 * dt / tau)\n self.assertEqual(sim.solution.t[1], 3 * dt / tau)\n\n def test_solve_with_inputs(self):\n model = pybamm.lithium_ion.SPM()\n param = model.default_parameter_values\n param.update({\"Current function [A]\": \"[input]\"})\n sim = pybamm.Simulation(model, parameter_values=param)\n sim.solve(t_eval=[0, 600], inputs={\"Current function [A]\": 1})\n np.testing.assert_array_equal(sim.solution.inputs[\"Current function [A]\"], 1)\n\n def test_step_with_inputs(self):\n dt = 0.001\n model = pybamm.lithium_ion.SPM()\n param = model.default_parameter_values\n param.update({\"Current function [A]\": \"[input]\"})\n sim = pybamm.Simulation(model, parameter_values=param)\n sim.step(\n dt, inputs={\"Current function [A]\": 1}\n ) # 1 step stores first two points\n tau = sim.model.timescale.evaluate()\n self.assertEqual(sim.solution.t.size, 2)\n self.assertEqual(sim.solution.y[0, :].size, 2)\n self.assertEqual(sim.solution.t[0], 0)\n self.assertEqual(sim.solution.t[1], dt / tau)\n np.testing.assert_array_equal(sim.solution.inputs[\"Current function [A]\"], 1)\n sim.step(\n dt, inputs={\"Current function [A]\": 2}\n ) # automatically append the next step\n self.assertEqual(sim.solution.t.size, 3)\n self.assertEqual(sim.solution.y[0, :].size, 3)\n self.assertEqual(sim.solution.t[0], 0)\n self.assertEqual(sim.solution.t[1], dt / tau)\n self.assertEqual(sim.solution.t[2], 2 * dt / tau)\n np.testing.assert_array_equal(\n sim.solution.inputs[\"Current function [A]\"], np.array([[1, 1, 2]])\n )\n\n def test_save_load(self):\n model = pybamm.lead_acid.LOQS()\n model.use_jacobian = True\n sim = pybamm.Simulation(model)\n\n sim.save(\"test.pickle\")\n sim_load = pybamm.load_sim(\"test.pickle\")\n self.assertEqual(sim.model.name, sim_load.model.name)\n\n # save after solving\n sim.solve([0, 600])\n sim.save(\"test.pickle\")\n sim_load = pybamm.load_sim(\"test.pickle\")\n self.assertEqual(sim.model.name, sim_load.model.name)\n\n # with python formats\n model.convert_to_format = None\n sim = pybamm.Simulation(model)\n sim.solve([0, 600])\n sim.save(\"test.pickle\")\n model.convert_to_format = \"python\"\n sim = pybamm.Simulation(model)\n sim.solve([0, 600])\n with self.assertRaisesRegex(\n NotImplementedError, \"Cannot save simulation if model format is python\"\n ):\n sim.save(\"test.pickle\")\n\n def test_save_load_dae(self):\n model = pybamm.lead_acid.LOQS({\"surface form\": \"algebraic\"})\n model.use_jacobian = True\n sim = pybamm.Simulation(model)\n\n # save after solving\n sim.solve([0, 600])\n sim.save(\"test.pickle\")\n sim_load = pybamm.load_sim(\"test.pickle\")\n self.assertEqual(sim.model.name, sim_load.model.name)\n\n # with python format\n model.convert_to_format = None\n sim = pybamm.Simulation(model)\n sim.solve([0, 600])\n sim.save(\"test.pickle\")\n\n # with Casadi solver\n model.convert_to_format = \"casadi\"\n sim = pybamm.Simulation(model, solver=pybamm.CasadiSolver())\n sim.solve([0, 600])\n sim.save(\"test.pickle\")\n sim_load = pybamm.load_sim(\"test.pickle\")\n self.assertEqual(sim.model.name, sim_load.model.name)\n\n def test_plot(self):\n sim = pybamm.Simulation(pybamm.lithium_ion.SPM())\n\n # test exception if not solved\n with self.assertRaises(ValueError):\n sim.plot()\n\n # now solve and plot\n t_eval = np.linspace(0, 100, 5)\n sim.solve(t_eval=t_eval)\n sim.plot(testing=True)\n\n # test quick_plot_vars deprecation error\n with self.assertRaisesRegex(NotImplementedError, \"'quick_plot_vars'\"):\n sim.plot(quick_plot_vars=[\"var\"])\n\n def test_drive_cycle_data(self):\n model = pybamm.lithium_ion.SPM()\n param = model.default_parameter_values\n param[\"Current function [A]\"] = \"[current data]US06\"\n\n with self.assertRaisesRegex(NotImplementedError, \"Drive cycle from data\"):\n pybamm.Simulation(model, parameter_values=param)\n\n def test_drive_cycle_interpolant(self):\n model = pybamm.lithium_ion.SPM()\n param = model.default_parameter_values\n # Import drive cycle from file\n drive_cycle = pd.read_csv(\n pybamm.get_parameters_filepath(\n os.path.join(\"input\", \"drive_cycles\", \"US06.csv\")\n ),\n comment=\"#\",\n skip_blank_lines=True,\n header=None,\n )\n\n timescale = param.evaluate(model.timescale)\n current_interpolant = pybamm.Interpolant(\n drive_cycle.to_numpy(), timescale * pybamm.t\n )\n\n param[\"Current function [A]\"] = current_interpolant\n\n time_data = drive_cycle.values[:, 0]\n\n sim = pybamm.Simulation(model, parameter_values=param)\n\n # check solution is returned at the times in the data\n sim.solve()\n tau = sim.model.timescale.evaluate()\n np.testing.assert_array_almost_equal(sim.solution.t, time_data / tau)\n\n # check warning raised if the largest gap in t_eval is bigger than the\n # smallest gap in the data\n with self.assertWarns(pybamm.SolverWarning):\n sim.solve(t_eval=np.linspace(0, 1, 100))\n\n # check warning raised if t_eval doesnt contain time_data , but has a finer\n # resolution (can still solve, but good for users to know they dont have\n # the solution returned at the data points)\n with self.assertWarns(pybamm.SolverWarning):\n sim.solve(t_eval=np.linspace(0, time_data[-1], 800))\n\n def test_discontinuous_current(self):\n def car_current(t):\n current = (\n 1 * (t >= 0) * (t <= 1000)\n - 0.5 * (1000 < t) * (t <= 2000)\n + 0.5 * (2000 < t)\n )\n return current\n\n model = pybamm.lithium_ion.DFN()\n param = model.default_parameter_values\n param[\"Current function [A]\"] = car_current\n\n sim = pybamm.Simulation(\n model, parameter_values=param, solver=pybamm.CasadiSolver(mode=\"fast\")\n )\n sim.solve([0, 3600])\n current = sim.solution[\"Current [A]\"]\n self.assertEqual(current(0), 1)\n self.assertEqual(current(1500), -0.5)\n self.assertEqual(current(3000), 0.5)\n\n def test_t_eval(self):\n model = pybamm.lithium_ion.SPM()\n sim = pybamm.Simulation(model)\n\n # test no t_eval\n with self.assertRaisesRegex(pybamm.SolverError, \"'t_eval' must be provided\"):\n sim.solve()\n\n # test t_eval list of length != 2\n with self.assertRaisesRegex(pybamm.SolverError, \"'t_eval' can be provided\"):\n sim.solve(t_eval=[0, 1, 2])\n\n # tets list gets turned into np.linspace(t0, tf, 100)\n sim.solve(t_eval=[0, 10])\n np.testing.assert_array_almost_equal(sim.t_eval, np.linspace(0, 10, 100))\n\n def test_battery_model_with_input_height(self):\n # load model\n model = pybamm.lithium_ion.SPM()\n # load parameter values and process model and geometry\n param = model.default_parameter_values\n param.update({\"Electrode height [m]\": \"[input]\"})\n # solve model for 1 minute\n t_eval = np.linspace(0, 60, 11)\n inputs = {\"Electrode height [m]\": 0.2}\n sim = pybamm.Simulation(model=model, parameter_values=param)\n sim.solve(t_eval=t_eval, inputs=inputs)\n\n\nif __name__ == \"__main__\":\n print(\"Add -v for more debug output\")\n import sys\n\n if \"-v\" in sys.argv:\n debug = True\n unittest.main()\n" ]
[ [ "numpy.array", "numpy.testing.assert_array_equal", "numpy.ones", "numpy.testing.assert_array_almost_equal", "numpy.linspace" ] ]
GuyTevet/MotionCLIP
[ "c2b9f40b0e721e42981f3e8b58133a1c51fde715" ]
[ "src/datasets/dataset.py" ]
[ "import random\n\nimport numpy as np\nimport torch\nfrom src.utils.action_label_to_idx import action_label_to_idx\nfrom ..utils.tensors import collate\nfrom ..utils.misc import to_torch\nimport src.utils.rotation_conversions as geometry\nUNSUPERVISED_BABEL_ACTION_CAT_LABELS_IDXS = [48, 50, 28, 38, 52, 11, 29, 19, 51, 22, 14, 21, 26, 10, 24]\n\nPOSE_REPS = [\"xyz\", \"rotvec\", \"rotmat\", \"rotquat\", \"rot6d\"]\n\n\nclass Dataset(torch.utils.data.Dataset):\n def __init__(self, num_frames=1, sampling=\"conseq\", sampling_step=1, split=\"train\",\n pose_rep=\"rot6d\", translation=True, glob=True, max_len=-1, min_len=-1, num_seq_max=-1, **kwargs):\n self.num_frames = num_frames\n self.sampling = sampling\n self.sampling_step = sampling_step\n self.split = split\n self.pose_rep = pose_rep\n self.translation = translation\n self.glob = glob\n self.max_len = max_len\n self.min_len = min_len\n self.num_seq_max = num_seq_max\n\n self.align_pose_frontview = kwargs.get('align_pose_frontview', False)\n self.use_action_cat_as_text_labels = kwargs.get('use_action_cat_as_text_labels', False)\n self.only_60_classes = kwargs.get('only_60_classes', False)\n self.leave_out_15_classes = kwargs.get('leave_out_15_classes', False)\n self.use_only_15_classes = kwargs.get('use_only_15_classes', False)\n\n if self.split not in [\"train\", \"val\", \"test\"]:\n raise ValueError(f\"{self.split} is not a valid split\")\n\n super().__init__()\n\n # to remove shuffling\n self._original_train = None\n self._original_test = None\n\n # TBD\n # self._actions\n # self._train/self._test\n # self._num_frames_in_video[data_index]\n # self._action_to_label[action]\n # self._label_to_action[label]\n # self._load_pose(data_index, frame_ix)\n # self._actions[ind] # => carefull changed here\n # self._action_classes[action]\n\n def action_to_label(self, action):\n return self._action_to_label[action]\n\n def label_to_action(self, label):\n import numbers\n if isinstance(label, numbers.Integral):\n return self._label_to_action[label]\n else: # if it is one hot vector\n label = np.argmax(label)\n return self._label_to_action[label]\n\n def get_pose_data(self, data_index, frame_ix):\n pose = self._load(data_index, frame_ix)\n label = self.get_label(data_index)\n return pose, label\n\n def get_clip_image(self, ind):\n clip_image = self._clip_images[ind]\n return clip_image\n\n def get_clip_image_emb(self, ind):\n clip_image = self._clip_images_emb[ind]\n return clip_image\n\n def get_clip_path(self, ind):\n clip_path = self._clip_pathes[ind]\n return clip_path\n\n def get_clip_text(self, ind, frame_ix):\n clip_text = self._clip_texts[ind][frame_ix]\n return clip_text\n\n def get_clip_action_cat(self, ind, frame_ix):\n actions_cat = self._actions_cat[ind][frame_ix]\n return actions_cat\n\n def get_label(self, ind):\n action = self.get_action(ind)\n return self.action_to_label(action)\n\n def get_action(self, ind):\n return self._actions[ind]\n\n def action_to_action_name(self, action):\n return self._action_classes[action]\n\n def label_to_action_name(self, label):\n action = self.label_to_action(label)\n return self.action_to_action_name(action)\n\n def __getitem__(self, index):\n if self.split == 'train':\n data_index = self._train[index]\n else:\n data_index = self._test[index]\n\n # inp, target = self._get_item_data_index(data_index)\n # return inp, target\n return self._get_item_data_index(data_index)\n\n def _load(self, ind, frame_ix):\n pose_rep = self.pose_rep\n if pose_rep == \"xyz\" or self.translation:\n if getattr(self, \"_load_joints3D\", None) is not None:\n # Locate the root joint of initial pose at origin\n joints3D = self._load_joints3D(ind, frame_ix)\n joints3D = joints3D - joints3D[0, 0, :]\n ret = to_torch(joints3D)\n if self.translation:\n ret_tr = ret[:, 0, :]\n else:\n if pose_rep == \"xyz\":\n raise ValueError(\"This representation is not possible.\")\n if getattr(self, \"_load_translation\") is None:\n raise ValueError(\"Can't extract translations.\")\n ret_tr = self._load_translation(ind, frame_ix)\n ret_tr = to_torch(ret_tr - ret_tr[0])\n\n if pose_rep != \"xyz\":\n if getattr(self, \"_load_rotvec\", None) is None:\n raise ValueError(\"This representation is not possible.\")\n else:\n pose = self._load_rotvec(ind, frame_ix)\n if not self.glob:\n pose = pose[:, 1:, :]\n pose = to_torch(pose)\n if self.align_pose_frontview:\n first_frame_root_pose_matrix = geometry.axis_angle_to_matrix(pose[0][0])\n all_root_poses_matrix = geometry.axis_angle_to_matrix(pose[:, 0, :])\n aligned_root_poses_matrix = torch.matmul(torch.transpose(first_frame_root_pose_matrix, 0, 1),\n all_root_poses_matrix)\n pose[:, 0, :] = geometry.matrix_to_axis_angle(aligned_root_poses_matrix)\n\n if self.translation:\n ret_tr = torch.matmul(torch.transpose(first_frame_root_pose_matrix, 0, 1).float(),\n torch.transpose(ret_tr, 0, 1))\n ret_tr = torch.transpose(ret_tr, 0, 1)\n\n if pose_rep == \"rotvec\":\n ret = pose\n elif pose_rep == \"rotmat\":\n ret = geometry.axis_angle_to_matrix(pose).view(*pose.shape[:2], 9)\n elif pose_rep == \"rotquat\":\n ret = geometry.axis_angle_to_quaternion(pose)\n elif pose_rep == \"rot6d\":\n ret = geometry.matrix_to_rotation_6d(geometry.axis_angle_to_matrix(pose))\n if pose_rep != \"xyz\" and self.translation:\n padded_tr = torch.zeros((ret.shape[0], ret.shape[2]), dtype=ret.dtype)\n padded_tr[:, :3] = ret_tr\n ret = torch.cat((ret, padded_tr[:, None]), 1)\n ret = ret.permute(1, 2, 0).contiguous()\n return ret.float()\n\n def _get_item_data_index(self, data_index):\n nframes = self._num_frames_in_video[data_index]\n\n if self.num_frames == -1 and (self.max_len == -1 or nframes <= self.max_len):\n frame_ix = np.arange(nframes)\n else:\n if self.num_frames == -2:\n if self.min_len <= 0:\n raise ValueError(\"You should put a min_len > 0 for num_frames == -2 mode\")\n if self.max_len != -1:\n max_frame = min(nframes, self.max_len)\n else:\n max_frame = nframes\n\n num_frames = random.randint(self.min_len, max(max_frame, self.min_len))\n else:\n num_frames = self.num_frames if self.num_frames != -1 else self.max_len\n # sampling goal: input: ----------- 11 nframes\n # o--o--o--o- 4 ninputs\n #\n # step number is computed like that: [(11-1)/(4-1)] = 3\n # [---][---][---][-\n # So step = 3, and we take 0 to step*ninputs+1 with steps\n # [o--][o--][o--][o-]\n # then we can randomly shift the vector\n # -[o--][o--][o--]o\n # If there are too much frames required\n if num_frames > nframes:\n fair = False # True\n if fair:\n # distills redundancy everywhere\n choices = np.random.choice(range(nframes),\n num_frames,\n replace=True)\n frame_ix = sorted(choices)\n else:\n # adding the last frame until done\n ntoadd = max(0, num_frames - nframes)\n lastframe = nframes - 1\n padding = lastframe * np.ones(ntoadd, dtype=int)\n frame_ix = np.concatenate((np.arange(0, nframes),\n padding))\n\n elif self.sampling in [\"conseq\", \"random_conseq\"]:\n step_max = (nframes - 1) // (num_frames - 1)\n if self.sampling == \"conseq\":\n if self.sampling_step == -1 or self.sampling_step * (num_frames - 1) >= nframes:\n step = step_max\n else:\n step = self.sampling_step\n elif self.sampling == \"random_conseq\":\n step = random.randint(1, step_max)\n\n lastone = step * (num_frames - 1)\n shift_max = nframes - lastone - 1\n shift = random.randint(0, max(0, shift_max - 1))\n frame_ix = shift + np.arange(0, lastone + 1, step)\n\n elif self.sampling == \"random\":\n choices = np.random.choice(range(nframes),\n num_frames,\n replace=False)\n frame_ix = sorted(choices)\n\n else:\n raise ValueError(\"Sampling not recognized.\")\n\n inp, target = self.get_pose_data(data_index, frame_ix)\n\n\n output = {'inp': inp, 'target': target}\n if hasattr(self, 'db') and 'clip_images' in self.db.keys():\n output['clip_image'] = self.get_clip_image(data_index)\n\n if hasattr(self, 'db') and 'clip_images_emb' in self.db.keys():\n output['clip_images_emb'] = self.get_clip_image_emb(data_index)\n\n if hasattr(self, 'db') and 'clip_pathes' in self.db.keys():\n output['clip_path'] = self.get_clip_path(data_index)\n\n if hasattr(self, 'db') and self.clip_label_text in self.db.keys():\n text_labels = self.get_clip_text(data_index, frame_ix)\n text_labels = \" and \".join(list(np.unique(text_labels)))\n output['clip_text'] = text_labels\n\n if hasattr(self, 'db') and 'action_cat' in self.db.keys() and self.use_action_cat_as_text_labels:\n categories = self.get_clip_action_cat(data_index, frame_ix)\n unique_cats = np.unique(categories)\n all_valid_cats = []\n for multi_cats in unique_cats:\n for cat in multi_cats.split(\",\"):\n if cat not in action_label_to_idx:\n continue\n cat_idx = action_label_to_idx[cat]\n if (cat_idx >= 120) or (self.only_60_classes and cat_idx >= 60) or (self.leave_out_15_classes and cat_idx in UNSUPERVISED_BABEL_ACTION_CAT_LABELS_IDXS):\n continue\n if self.use_only_15_classes and (cat_idx not in UNSUPERVISED_BABEL_ACTION_CAT_LABELS_IDXS):\n continue\n all_valid_cats.extend([cat])\n\n if len(all_valid_cats) == 0: # No valid category available\n return None\n\n choosen_cat = np.random.choice(all_valid_cats, size=1)[0]\n # Replace clip text\n output['clip_text'] = choosen_cat\n output['all_categories'] = all_valid_cats\n return output\n\n def get_label_sample(self, label, n=1, return_labels=False, return_index=False):\n if self.split == 'train':\n index = self._train\n else:\n index = self._test\n\n action = self.label_to_action(label)\n choices = np.argwhere(np.array(self._actions)[index] == action).squeeze(1)\n\n if n == 1:\n data_index = index[np.random.choice(choices)]\n data = self._get_item_data_index(data_index)\n x, y = data['inp'], data['target']\n assert (label == y)\n y = label\n else:\n data_index = np.random.choice(choices, n)\n x = np.stack([self._get_item_data_index(index[di])['inp'] for di in data_index])\n y = label * np.ones(n, dtype=int)\n if return_labels:\n if return_index:\n return x, y, data_index\n return x, y\n else:\n if return_index:\n return x, data_index\n return x\n\n def get_label_sample_batch(self, labels):\n samples = [self.get_label_sample(label, n=1, return_labels=True, return_index=False) for label in labels]\n samples = [{'inp': x[0], 'target': x[1]} for x in samples] # Fix this to adapt new collate func\n batch = collate(samples)\n x = batch[\"x\"]\n mask = batch[\"mask\"]\n lengths = mask.sum(1)\n return x, mask, lengths\n\n def get_mean_length_label(self, label):\n if self.num_frames != -1:\n return self.num_frames\n\n if self.split == 'train':\n index = self._train\n else:\n index = self._test\n\n action = self.label_to_action(label)\n choices = np.argwhere(self._actions[index] == action).squeeze(1)\n lengths = self._num_frames_in_video[np.array(index)[choices]]\n\n if self.max_len == -1:\n return np.mean(lengths)\n else:\n # make the lengths less than max_len\n lengths[lengths > self.max_len] = self.max_len\n return np.mean(lengths)\n\n def get_stats(self):\n if self.split == 'train':\n index = self._train\n else:\n index = self._test\n\n numframes = self._num_frames_in_video[index]\n allmeans = np.array([self.get_mean_length_label(x) for x in range(self.num_classes)])\n\n stats = {\"name\": self.dataname,\n \"number of classes\": self.num_classes,\n \"number of sequences\": len(self),\n \"duration: min\": int(numframes.min()),\n \"duration: max\": int(numframes.max()),\n \"duration: mean\": int(numframes.mean()),\n \"duration mean/action: min\": int(allmeans.min()),\n \"duration mean/action: max\": int(allmeans.max()),\n \"duration mean/action: mean\": int(allmeans.mean())}\n return stats\n\n def __len__(self):\n num_seq_max = getattr(self, \"num_seq_max\", -1)\n if num_seq_max == -1:\n from math import inf\n num_seq_max = inf\n\n if self.split == 'train':\n return min(len(self._train), num_seq_max)\n else:\n return min(len(self._test), num_seq_max)\n\n def __repr__(self):\n return f\"{self.dataname} dataset: ({len(self)}, _, ..)\"\n\n def update_parameters(self, parameters):\n for i in range(self.__len__()):\n inp = self[i]\n if inp is not None and inp['inp'] is not None:\n self.njoints, self.nfeats, _ = inp['inp'].shape\n parameters[\"num_classes\"] = self.num_classes\n parameters[\"nfeats\"] = self.nfeats\n parameters[\"njoints\"] = self.njoints\n break\n\n def shuffle(self):\n if self.split == 'train':\n random.shuffle(self._train)\n else:\n random.shuffle(self._test)\n\n def reset_shuffle(self):\n if self.split == 'train':\n if self._original_train is None:\n self._original_train = self._train\n else:\n self._train = self._original_train\n else:\n if self._original_test is None:\n self._original_test = self._test\n else:\n self._test = self._original_test\n\n" ]
[ [ "torch.zeros", "torch.cat", "numpy.array", "numpy.random.choice", "numpy.ones", "numpy.mean", "numpy.argmax", "numpy.arange", "numpy.argwhere", "torch.transpose", "numpy.unique" ] ]
akshitj1/mavsim_template_files
[ "25c646ed274385c13492ed64d9821c1b8033c8ef" ]
[ "mavsim_python/parameters/planner_parameters.py" ]
[ "import sys\nsys.path.append('..')\nimport numpy as np\nimport parameters.aerosonde_parameters as MAV\n\n# size of the waypoint array used for the path planner. This is the\n# maximum number of waypoints that might be transmitted to the path\n# manager.\nsize_waypoint_array = 100\n\n# airspeed commanded by planner\nVa0 = MAV.u0\n\n# max possible roll angle\nphi_max = np.radians(20)\n\n# minimum turn radius\nR_min = Va0**2 / MAV.gravity / np.tan(phi_max)\n\n# # create random city map\n# city_width = 2000; # the city is of size (width)x(width)\n# building_height = 300; # maximum height of buildings\n# #building_height = 1; # maximum height of buildings (for camera)\n# num_blocks = 5; # number of blocks in city\n# street_width = .8; # percent of block that is street.\n# P.map = createWorld(city_width, building_height, num_blocks, street_width);\n\n\n\n" ]
[ [ "numpy.radians", "numpy.tan" ] ]
emiliano-f/time-series-forecasting-crypto
[ "918da08105cdf784fdd7ca76e9707b226b3ebfd3" ]
[ "code/lstm.py" ]
[ "from keras.models import Sequential\nfrom keras.layers import Activation, Dense, Dropout, LSTM\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score\n\ndef train_test_split(df, test_size=0.2):\n split_row = len(df) - int(test_size * len(df))\n train_data = df.iloc[:split_row]\n test_data = df.iloc[split_row:]\n return (train_data, test_data)\n\ndef line_plot(line1, line2, label1=None, label2=None, title='', lw=2):\n fig, ax = plt.subplots(1, figsize=(13, 7))\n ax.plot(line1, label=label1, linewidth=lw)\n ax.plot(line2, label=label2, linewidth=lw)\n ax.set_ylabel('USD', fontsize=14)\n ax.set_xlabel('Date', fontsize=14)\n ax.set_title(title, fontsize=16)\n ax.legend(loc='best', fontsize=16);\n \ndef normalise_zero_base(df):\n return df / df.iloc[0] - 1\n\ndef normalise_min_max(df):\n return (df - df.min()) / (df.max() - df.min())\n\ndef extract_window_data(df, window_len=5, zero_base=True):\n window_data = []\n for idx in range(len(df) - window_len):\n tmp = df[idx: (idx + window_len)].copy()\n if zero_base:\n tmp = normalise_zero_base(tmp)\n window_data.append(tmp.values)\n return np.array(window_data)\n\ndef prepare_data(df, target_col, window_len=10, zero_base=True, test_size=0.2):\n datasets = train_test_split(df, test_size=test_size)\n train_data = datasets[0]\n test_data = datasets[1]\n X_train = extract_window_data(train_data, window_len, zero_base)\n X_test = extract_window_data(test_data, window_len, zero_base)\n y_train = train_data[target_col][window_len:].values\n y_test = test_data[target_col][window_len:].values\n if zero_base:\n y_train = y_train / train_data[target_col][:-window_len].values - 1\n y_test = y_test / test_data[target_col][:-window_len].values - 1\n\n return train_data, test_data, X_train, X_test, y_train, y_test\n\ndef build_lstm_model(input_data, output_size, neurons=100, activ_func='linear',\n dropout=0.2, loss='mse', optimizer='adam'):\n model = Sequential()\n model.add(LSTM(neurons, input_shape=(input_data.shape[1], input_data.shape[2])))\n model.add(Dropout(dropout))\n model.add(Dense(units=output_size))\n model.add(Activation(activ_func))\n\n model.compile(loss=loss, optimizer=optimizer)\n return model\n\nnames = [\"btc\", \"eth\", \"ada\"]\ntimes = [\"1d\", \"30m\"]\ntarget_col = 'Adj Close'\n\nnp.random.seed(42)\nwindow_len = 5\ntest_size = 0.2\nzero_base = True\nlstm_neurons = 100\nepochs = 20\nbatch_size = 32\nloss = 'mse'\ndropout = 0.2\noptimizer = 'adam'\n\nfor coin in names:\n for time in times:\n # Load\n path = '../data/' + coin + \"_\" + time + \".csv\" \n hist = pd.read_csv(path)\n hist = hist.set_index('Date')\n hist.index = pd.to_datetime(hist.index, yearfirst=True)\n \n # Plot\n datasets = train_test_split(hist, test_size=0.2)\n train = datasets[0]\n test = datasets[1]\n title = \"Historical Price \" + coin.upper() + \" \" + time \n line_plot(train[target_col], test[target_col], 'Training', 'Test', title)\n\n # Preparing data\n train, test, X_train, X_test, y_train, y_test = prepare_data(\n hist, target_col, window_len=window_len, zero_base=zero_base, test_size=test_size)\n\n # Create model\n model = build_lstm_model(\n X_train, output_size=1, neurons=lstm_neurons, dropout=dropout, loss=loss,\n optimizer=optimizer)\n \n # Fit\n history = model.fit(\n X_train, y_train, validation_data=(X_test, y_test), epochs=epochs, batch_size=batch_size, verbose=1, shuffle=True)\n\n\n # Plot LSTM\n #plt.plot(history.history['loss'],'r',linewidth=2, label='Train loss')\n #plt.plot(history.history['val_loss'], 'g',linewidth=2, label='Validation loss')\n #plt.title('LSTM')\n #plt.xlabel('Epochs')\n #plt.ylabel('MSE')\n #plt.show()\n\n # Predict\n targets = test[target_col][window_len:]\n preds = model.predict(X_test).squeeze()\n mean_absolute_error(preds, y_test)\n\n\n MAE=mean_squared_error(preds, y_test)\n MAE\n\n R2=r2_score(y_test, preds)\n R2\n\n preds = test[target_col].values[:-window_len] * (preds + 1)\n preds = pd.Series(index=targets.index, data=preds)\n line_plot(targets, preds, 'Actual', 'Prediction', title='Comparision: Actual and Predicted Prices', lw=3)\n #input(\"Next?\")" ]
[ [ "pandas.to_datetime", "numpy.array", "sklearn.metrics.mean_squared_error", "numpy.random.seed", "matplotlib.pyplot.subplots", "sklearn.metrics.mean_absolute_error", "pandas.Series", "sklearn.metrics.r2_score", "pandas.read_csv" ] ]
clementchadebec/benchmark_VAE
[ "943e231f9e5dfa40b4eec14d4536f1c229ad9be1" ]
[ "tests/test_VQVAE.py" ]
[ "import os\nfrom copy import deepcopy\n\nimport pytest\nimport torch\nfrom torch.optim import SGD, Adadelta, Adagrad, Adam, RMSprop\n\nfrom pythae.customexception import BadInheritanceError\nfrom pythae.models.base.base_utils import ModelOutput\nfrom pythae.models import VQVAE, VQVAEConfig\n\nfrom pythae.trainers import BaseTrainer, BaseTrainerConfig\nfrom pythae.pipelines import TrainingPipeline\nfrom tests.data.custom_architectures import (\n Decoder_AE_Conv,\n Encoder_AE_Conv,\n NetBadInheritance,\n)\n\nPATH = os.path.dirname(os.path.abspath(__file__))\n\n\n@pytest.fixture(params=[VQVAEConfig(), VQVAEConfig(latent_dim=4)])\ndef model_configs_no_input_dim(request):\n return request.param\n\n\n@pytest.fixture(\n params=[\n VQVAEConfig(\n input_dim=(1, 28, 28), latent_dim=4, num_embeddings=10\n ), # ! Needs squared latent_dim !\n VQVAEConfig(\n input_dim=(1, 28, 28),\n beta=0.02,\n latent_dim=4,\n ),\n ]\n)\ndef model_configs(request):\n return request.param\n\n\n@pytest.fixture\ndef custom_encoder(model_configs):\n return Encoder_AE_Conv(model_configs)\n\n\n@pytest.fixture\ndef custom_decoder(model_configs):\n return Decoder_AE_Conv(model_configs)\n\n\nclass Test_Model_Building:\n @pytest.fixture()\n def bad_net(self):\n return NetBadInheritance()\n\n def test_build_model(self, model_configs):\n model = VQVAE(model_configs)\n assert all(\n [\n model.input_dim == model_configs.input_dim,\n model.latent_dim == model_configs.latent_dim,\n ]\n )\n\n def test_raises_bad_inheritance(self, model_configs, bad_net):\n with pytest.raises(BadInheritanceError):\n model = VQVAE(model_configs, encoder=bad_net)\n\n with pytest.raises(BadInheritanceError):\n model = VQVAE(model_configs, decoder=bad_net)\n\n def test_raises_no_input_dim(\n self, model_configs_no_input_dim, custom_encoder, custom_decoder\n ):\n with pytest.raises(AttributeError):\n model = VQVAE(model_configs_no_input_dim)\n\n with pytest.raises(AttributeError):\n model = VQVAE(model_configs_no_input_dim, encoder=custom_encoder)\n\n with pytest.raises(AttributeError):\n model = VQVAE(model_configs_no_input_dim, decoder=custom_decoder)\n\n def test_build_custom_arch(self, model_configs, custom_encoder, custom_decoder):\n\n model = VQVAE(model_configs, encoder=custom_encoder, decoder=custom_decoder)\n\n assert model.encoder == custom_encoder\n assert not model.model_config.uses_default_encoder\n assert model.decoder == custom_decoder\n assert not model.model_config.uses_default_decoder\n\n model = VQVAE(model_configs, encoder=custom_encoder)\n\n assert model.encoder == custom_encoder\n assert not model.model_config.uses_default_encoder\n assert model.model_config.uses_default_decoder\n\n model = VQVAE(model_configs, decoder=custom_decoder)\n\n assert model.model_config.uses_default_encoder\n assert model.decoder == custom_decoder\n assert not model.model_config.uses_default_decoder\n\n\nclass Test_Model_Saving:\n def test_default_model_saving(self, tmpdir, model_configs):\n\n tmpdir.mkdir(\"dummy_folder\")\n dir_path = dir_path = os.path.join(tmpdir, \"dummy_folder\")\n\n model = VQVAE(model_configs)\n\n model.state_dict()[\"encoder.layers.0.0.weight\"][0] = 0\n\n model.save(dir_path=dir_path)\n\n assert set(os.listdir(dir_path)) == set([\"model_config.json\", \"model.pt\"])\n\n # reload model\n model_rec = VQVAE.load_from_folder(dir_path)\n\n # check configs are the same\n assert model_rec.model_config.__dict__ == model.model_config.__dict__\n\n assert all(\n [\n torch.equal(model_rec.state_dict()[key], model.state_dict()[key])\n for key in model.state_dict().keys()\n ]\n )\n\n def test_custom_encoder_model_saving(self, tmpdir, model_configs, custom_encoder):\n\n tmpdir.mkdir(\"dummy_folder\")\n dir_path = dir_path = os.path.join(tmpdir, \"dummy_folder\")\n\n model = VQVAE(model_configs, encoder=custom_encoder)\n\n model.state_dict()[\"encoder.layers.0.0.weight\"][0] = 0\n\n model.save(dir_path=dir_path)\n\n assert set(os.listdir(dir_path)) == set(\n [\"model_config.json\", \"model.pt\", \"encoder.pkl\"]\n )\n\n # reload model\n model_rec = VQVAE.load_from_folder(dir_path)\n\n # check configs are the same\n assert model_rec.model_config.__dict__ == model.model_config.__dict__\n\n assert all(\n [\n torch.equal(model_rec.state_dict()[key], model.state_dict()[key])\n for key in model.state_dict().keys()\n ]\n )\n\n def test_custom_decoder_model_saving(self, tmpdir, model_configs, custom_decoder):\n\n tmpdir.mkdir(\"dummy_folder\")\n dir_path = dir_path = os.path.join(tmpdir, \"dummy_folder\")\n\n model = VQVAE(model_configs, decoder=custom_decoder)\n\n model.state_dict()[\"encoder.layers.0.0.weight\"][0] = 0\n\n model.save(dir_path=dir_path)\n\n assert set(os.listdir(dir_path)) == set(\n [\"model_config.json\", \"model.pt\", \"decoder.pkl\"]\n )\n\n # reload model\n model_rec = VQVAE.load_from_folder(dir_path)\n\n # check configs are the same\n assert model_rec.model_config.__dict__ == model.model_config.__dict__\n\n assert all(\n [\n torch.equal(model_rec.state_dict()[key], model.state_dict()[key])\n for key in model.state_dict().keys()\n ]\n )\n\n def test_full_custom_model_saving(\n self, tmpdir, model_configs, custom_encoder, custom_decoder\n ):\n\n tmpdir.mkdir(\"dummy_folder\")\n dir_path = dir_path = os.path.join(tmpdir, \"dummy_folder\")\n\n model = VQVAE(model_configs, encoder=custom_encoder, decoder=custom_decoder)\n\n model.state_dict()[\"encoder.layers.0.0.weight\"][0] = 0\n\n model.save(dir_path=dir_path)\n\n assert set(os.listdir(dir_path)) == set(\n [\"model_config.json\", \"model.pt\", \"encoder.pkl\", \"decoder.pkl\"]\n )\n\n # reload model\n model_rec = VQVAE.load_from_folder(dir_path)\n\n # check configs are the same\n assert model_rec.model_config.__dict__ == model.model_config.__dict__\n\n assert all(\n [\n torch.equal(model_rec.state_dict()[key], model.state_dict()[key])\n for key in model.state_dict().keys()\n ]\n )\n\n def test_raises_missing_files(\n self, tmpdir, model_configs, custom_encoder, custom_decoder\n ):\n\n tmpdir.mkdir(\"dummy_folder\")\n dir_path = dir_path = os.path.join(tmpdir, \"dummy_folder\")\n\n model = VQVAE(model_configs, encoder=custom_encoder, decoder=custom_decoder)\n\n model.state_dict()[\"encoder.layers.0.0.weight\"][0] = 0\n\n model.save(dir_path=dir_path)\n\n os.remove(os.path.join(dir_path, \"decoder.pkl\"))\n\n # check raises decoder.pkl is missing\n with pytest.raises(FileNotFoundError):\n model_rec = VQVAE.load_from_folder(dir_path)\n\n os.remove(os.path.join(dir_path, \"encoder.pkl\"))\n\n # check raises encoder.pkl is missing\n with pytest.raises(FileNotFoundError):\n model_rec = VQVAE.load_from_folder(dir_path)\n\n os.remove(os.path.join(dir_path, \"model.pt\"))\n\n # check raises encoder.pkl is missing\n with pytest.raises(FileNotFoundError):\n model_rec = VQVAE.load_from_folder(dir_path)\n\n os.remove(os.path.join(dir_path, \"model_config.json\"))\n\n # check raises encoder.pkl is missing\n with pytest.raises(FileNotFoundError):\n model_rec = VQVAE.load_from_folder(dir_path)\n\n\nclass Test_Model_forward:\n @pytest.fixture\n def demo_data(self):\n data = torch.load(os.path.join(PATH, \"data/mnist_clean_train_dataset_sample\"))[\n :\n ]\n return data # This is an extract of 3 data from MNIST (unnormalized) used to test custom architecture\n\n @pytest.fixture\n def vae(self, model_configs, demo_data):\n model_configs.input_dim = tuple(demo_data[\"data\"][0].shape)\n return VQVAE(model_configs)\n\n def test_model_train_output(self, vae, demo_data):\n\n vae.train()\n\n out = vae(demo_data)\n\n assert isinstance(out, ModelOutput)\n\n assert set([\"loss\", \"recon_loss\", \"vq_loss\", \"recon_x\", \"z\"]) == set(out.keys())\n\n assert out.z.shape[0] == demo_data[\"data\"].shape[0]\n assert out.recon_x.shape == demo_data[\"data\"].shape\n\n\n@pytest.mark.slow\nclass Test_VQVAETraining:\n @pytest.fixture\n def train_dataset(self):\n return torch.load(os.path.join(PATH, \"data/mnist_clean_train_dataset_sample\"))\n\n @pytest.fixture(\n params=[BaseTrainerConfig(num_epochs=3, steps_saving=2, learning_rate=1e-5)]\n )\n def training_configs(self, tmpdir, request):\n tmpdir.mkdir(\"dummy_folder\")\n dir_path = os.path.join(tmpdir, \"dummy_folder\")\n request.param.output_dir = dir_path\n return request.param\n\n @pytest.fixture(\n params=[\n torch.rand(1),\n torch.rand(1),\n torch.rand(1),\n torch.rand(1),\n torch.rand(1),\n ]\n )\n def vae(self, model_configs, custom_encoder, custom_decoder, request):\n # randomized\n\n alpha = request.param\n\n if alpha < 0.25:\n model = VQVAE(model_configs)\n\n elif 0.25 <= alpha < 0.5:\n model = VQVAE(model_configs, encoder=custom_encoder)\n\n elif 0.5 <= alpha < 0.75:\n model = VQVAE(model_configs, decoder=custom_decoder)\n\n else:\n model = VQVAE(model_configs, encoder=custom_encoder, decoder=custom_decoder)\n\n return model\n\n @pytest.fixture(params=[None, Adagrad, Adam, Adadelta, SGD, RMSprop])\n def optimizers(self, request, vae, training_configs):\n if request.param is not None:\n optimizer = request.param(\n vae.parameters(), lr=training_configs.learning_rate\n )\n\n else:\n optimizer = None\n\n return optimizer\n\n def test_vae_train_step(self, vae, train_dataset, training_configs, optimizers):\n trainer = BaseTrainer(\n model=vae,\n train_dataset=train_dataset,\n training_config=training_configs,\n optimizer=optimizers,\n )\n\n start_model_state_dict = deepcopy(trainer.model.state_dict())\n\n step_1_loss = trainer.train_step(epoch=1)\n\n step_1_model_state_dict = deepcopy(trainer.model.state_dict())\n\n # check that weights were updated\n assert not all(\n [\n torch.equal(start_model_state_dict[key], step_1_model_state_dict[key])\n for key in start_model_state_dict.keys()\n ]\n )\n\n def test_vae_eval_step(self, vae, train_dataset, training_configs, optimizers):\n trainer = BaseTrainer(\n model=vae,\n train_dataset=train_dataset,\n eval_dataset=train_dataset,\n training_config=training_configs,\n optimizer=optimizers,\n )\n\n start_model_state_dict = deepcopy(trainer.model.state_dict())\n\n step_1_loss = trainer.eval_step(epoch=1)\n\n step_1_model_state_dict = deepcopy(trainer.model.state_dict())\n\n # check that weights were updated\n assert all(\n [\n torch.equal(start_model_state_dict[key], step_1_model_state_dict[key])\n for key in start_model_state_dict.keys()\n ]\n )\n\n def test_vae_main_train_loop(\n self, tmpdir, vae, train_dataset, training_configs, optimizers\n ):\n\n trainer = BaseTrainer(\n model=vae,\n train_dataset=train_dataset,\n eval_dataset=train_dataset,\n training_config=training_configs,\n optimizer=optimizers,\n )\n\n start_model_state_dict = deepcopy(trainer.model.state_dict())\n\n trainer.train()\n\n step_1_model_state_dict = deepcopy(trainer.model.state_dict())\n\n # check that weights were updated\n assert not all(\n [\n torch.equal(start_model_state_dict[key], step_1_model_state_dict[key])\n for key in start_model_state_dict.keys()\n ]\n )\n\n def test_checkpoint_saving(\n self, tmpdir, vae, train_dataset, training_configs, optimizers\n ):\n\n dir_path = training_configs.output_dir\n\n trainer = BaseTrainer(\n model=vae,\n train_dataset=train_dataset,\n training_config=training_configs,\n optimizer=optimizers,\n )\n\n # Make a training step\n step_1_loss = trainer.train_step(epoch=1)\n\n model = deepcopy(trainer.model)\n optimizer = deepcopy(trainer.optimizer)\n\n trainer.save_checkpoint(dir_path=dir_path, epoch=0, model=model)\n\n checkpoint_dir = os.path.join(dir_path, \"checkpoint_epoch_0\")\n\n assert os.path.isdir(checkpoint_dir)\n\n files_list = os.listdir(checkpoint_dir)\n\n assert set([\"model.pt\", \"optimizer.pt\", \"training_config.json\"]).issubset(\n set(files_list)\n )\n\n # check pickled custom decoder\n if not vae.model_config.uses_default_decoder:\n assert \"decoder.pkl\" in files_list\n\n else:\n assert not \"decoder.pkl\" in files_list\n\n # check pickled custom encoder\n if not vae.model_config.uses_default_encoder:\n assert \"encoder.pkl\" in files_list\n\n else:\n assert not \"encoder.pkl\" in files_list\n\n model_rec_state_dict = torch.load(os.path.join(checkpoint_dir, \"model.pt\"))[\n \"model_state_dict\"\n ]\n\n assert all(\n [\n torch.equal(\n model_rec_state_dict[key].cpu(), model.state_dict()[key].cpu()\n )\n for key in model.state_dict().keys()\n ]\n )\n\n # check reload full model\n model_rec = VQVAE.load_from_folder(os.path.join(checkpoint_dir))\n\n assert all(\n [\n torch.equal(\n model_rec.state_dict()[key].cpu(), model.state_dict()[key].cpu()\n )\n for key in model.state_dict().keys()\n ]\n )\n\n assert type(model_rec.encoder.cpu()) == type(model.encoder.cpu())\n assert type(model_rec.decoder.cpu()) == type(model.decoder.cpu())\n\n optim_rec_state_dict = torch.load(os.path.join(checkpoint_dir, \"optimizer.pt\"))\n\n assert all(\n [\n dict_rec == dict_optimizer\n for (dict_rec, dict_optimizer) in zip(\n optim_rec_state_dict[\"param_groups\"],\n optimizer.state_dict()[\"param_groups\"],\n )\n ]\n )\n\n assert all(\n [\n dict_rec == dict_optimizer\n for (dict_rec, dict_optimizer) in zip(\n optim_rec_state_dict[\"state\"], optimizer.state_dict()[\"state\"]\n )\n ]\n )\n\n def test_checkpoint_saving_during_training(\n self, tmpdir, vae, train_dataset, training_configs, optimizers\n ):\n #\n target_saving_epoch = training_configs.steps_saving\n\n dir_path = training_configs.output_dir\n\n trainer = BaseTrainer(\n model=vae,\n train_dataset=train_dataset,\n training_config=training_configs,\n optimizer=optimizers,\n )\n\n model = deepcopy(trainer.model)\n\n trainer.train()\n\n training_dir = os.path.join(\n dir_path, f\"VQVAE_training_{trainer._training_signature}\"\n )\n assert os.path.isdir(training_dir)\n\n checkpoint_dir = os.path.join(\n training_dir, f\"checkpoint_epoch_{target_saving_epoch}\"\n )\n\n assert os.path.isdir(checkpoint_dir)\n\n files_list = os.listdir(checkpoint_dir)\n\n # check files\n assert set([\"model.pt\", \"optimizer.pt\", \"training_config.json\"]).issubset(\n set(files_list)\n )\n\n # check pickled custom decoder\n if not vae.model_config.uses_default_decoder:\n assert \"decoder.pkl\" in files_list\n\n else:\n assert not \"decoder.pkl\" in files_list\n\n # check pickled custom encoder\n if not vae.model_config.uses_default_encoder:\n assert \"encoder.pkl\" in files_list\n\n else:\n assert not \"encoder.pkl\" in files_list\n\n model_rec_state_dict = torch.load(os.path.join(checkpoint_dir, \"model.pt\"))[\n \"model_state_dict\"\n ]\n\n assert not all(\n [\n torch.equal(model_rec_state_dict[key], model.state_dict()[key])\n for key in model.state_dict().keys()\n ]\n )\n\n def test_final_model_saving(\n self, tmpdir, vae, train_dataset, training_configs, optimizers\n ):\n\n dir_path = training_configs.output_dir\n\n trainer = BaseTrainer(\n model=vae,\n train_dataset=train_dataset,\n training_config=training_configs,\n optimizer=optimizers,\n )\n\n trainer.train()\n\n model = deepcopy(trainer._best_model)\n\n training_dir = os.path.join(\n dir_path, f\"VQVAE_training_{trainer._training_signature}\"\n )\n assert os.path.isdir(training_dir)\n\n final_dir = os.path.join(training_dir, f\"final_model\")\n assert os.path.isdir(final_dir)\n\n files_list = os.listdir(final_dir)\n\n assert set([\"model.pt\", \"model_config.json\", \"training_config.json\"]).issubset(\n set(files_list)\n )\n\n # check pickled custom decoder\n if not vae.model_config.uses_default_decoder:\n assert \"decoder.pkl\" in files_list\n\n else:\n assert not \"decoder.pkl\" in files_list\n\n # check pickled custom encoder\n if not vae.model_config.uses_default_encoder:\n assert \"encoder.pkl\" in files_list\n\n else:\n assert not \"encoder.pkl\" in files_list\n\n # check reload full model\n model_rec = VQVAE.load_from_folder(os.path.join(final_dir))\n\n assert all(\n [\n torch.equal(\n model_rec.state_dict()[key].cpu(), model.state_dict()[key].cpu()\n )\n for key in model.state_dict().keys()\n ]\n )\n\n assert type(model_rec.encoder.cpu()) == type(model.encoder.cpu())\n assert type(model_rec.decoder.cpu()) == type(model.decoder.cpu())\n\n def test_vae_training_pipeline(self, tmpdir, vae, train_dataset, training_configs):\n\n dir_path = training_configs.output_dir\n\n # build pipeline\n pipeline = TrainingPipeline(model=vae, training_config=training_configs)\n\n # Launch Pipeline\n pipeline(\n train_data=train_dataset.data, # gives tensor to pipeline\n eval_data=train_dataset.data, # gives tensor to pipeline\n )\n\n model = deepcopy(pipeline.trainer._best_model)\n\n training_dir = os.path.join(\n dir_path, f\"VQVAE_training_{pipeline.trainer._training_signature}\"\n )\n assert os.path.isdir(training_dir)\n\n final_dir = os.path.join(training_dir, f\"final_model\")\n assert os.path.isdir(final_dir)\n\n files_list = os.listdir(final_dir)\n\n assert set([\"model.pt\", \"model_config.json\", \"training_config.json\"]).issubset(\n set(files_list)\n )\n\n # check pickled custom decoder\n if not vae.model_config.uses_default_decoder:\n assert \"decoder.pkl\" in files_list\n\n else:\n assert not \"decoder.pkl\" in files_list\n\n # check pickled custom encoder\n if not vae.model_config.uses_default_encoder:\n assert \"encoder.pkl\" in files_list\n\n else:\n assert not \"encoder.pkl\" in files_list\n\n # check reload full model\n model_rec = VQVAE.load_from_folder(os.path.join(final_dir))\n\n assert all(\n [\n torch.equal(\n model_rec.state_dict()[key].cpu(), model.state_dict()[key].cpu()\n )\n for key in model.state_dict().keys()\n ]\n )\n\n assert type(model_rec.encoder.cpu()) == type(model.encoder.cpu())\n assert type(model_rec.decoder.cpu()) == type(model.decoder.cpu())\n" ]
[ [ "torch.rand", "torch.equal" ] ]
tommyreddad/tommy2tommy
[ "c634bedbc8b498abd272eecb27ca8dd2d013cdc8" ]
[ "tommy2tommy/optimizers/radam_test.py" ]
[ "import tensorflow as tf\nimport numpy as np\nfrom tommy2tommy.optimizers import radam\n\n\nclass RAdamTest(tf.test.TestCase):\n pass\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n" ]
[ [ "tensorflow.test.main" ] ]
njuaplusplus/AmI
[ "b5b93afdae135dd60df78cb7276b49ba82a924b4" ]
[ "src/utils.py" ]
[ "import cv2\nimport numpy as np\nimport re\n\n\ndef read_list(f):\n l = []\n for line in open(f, 'r'):\n l.append(line.strip())\n return l\n\n\ndef get_identity(img_name, names):\n indices = [i.start() for i in re.finditer('_', img_name)]\n name = img_name[:indices[len(indices)-5]]\n if name in names:\n return names.index(name)\n\n\ndef get_vgg_data(img_path):\n averageImg = [129.1863, 104.7624, 93.5940]\n img = cv2.imread(img_path)\n if img.shape[0] != 224:\n img = cv2.resize(img, (224,224), interpolation=cv2.INTER_CUBIC)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n data = np.float32(np.rollaxis(img, 2)[::-1])\n data[0] -= averageImg[2]\n data[1] -= averageImg[1]\n data[2] -= averageImg[0]\n return np.array([data])\n\n\ndef get_data(img_path):\n if '.npy' in img_path:\n return np.load(img_path)\n else:\n return get_vgg_data(img_path)\n\n\ndef get_prob(net, img_path):\n net.blobs['data'].data[...] = get_data(img_path)\n net.forward()\n return net.blobs['prob'].data[0].copy()\n\n\ndef get_layers(net):\n layers = []\n for layer in net.blobs:\n layers.append(layer)\n return layers\n\n\ndef get_layer_size(net, layer):\n return len(net.params[layer][0].data)" ]
[ [ "numpy.array", "numpy.load", "numpy.rollaxis" ] ]
antonmattsson/diabetic_retinopathy
[ "6aaa0ab3631d1cb0075d3acf2778822d62d1b532" ]
[ "src/data_separator.py" ]
[ "import numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom numpy.core.defchararray import add, replace\n\n# Separate the data set into test and train data\n\n# Read filenames from a text file listing all the images\nfilenames = np.genfromtxt('../data/train_list.txt', dtype=str)\n\n# Read in the labels of images\nall_labels = np.genfromtxt('../data/trainLabels.csv', skip_header=1, dtype=str, delimiter=',')\n\n# Plot the distribution of classes in the original data\nclasses, counts = np.unique(all_labels[:,1], return_counts=True)\nplt.figure(figsize=(8,6))\nplt.bar(classes, counts)\nplt.title('Distribution of retinopathy severity grades in the complete data set')\nplt.xlabel('Grade')\nplt.ylabel('Count')\nplt.savefig('../results/class_distribution.png')\n\n# Save class distribution in original data\nclass_dist = np.asarray((classes, counts), dtype=np.int).T\nnp.savetxt(fname='../results/class_distribution.csv', X=class_dist, delimiter=',')\n\n# Take a random sample of 3000 images aside as the test set\nnp.random.seed(38)\nnp.random.shuffle(all_labels)\ntest_labels = all_labels[:3000, :]\ntrain_labels = all_labels[3000:, :]\n\n# Plot and save distribution of test data\nclasses, counts = np.unique(test_labels[:,1], return_counts=True)\nplt.figure()\nplt.bar(classes, counts)\nplt.title('Distribution of retinopathy severity grades in test data')\nplt.xlabel('Grade')\nplt.ylabel('Count')\nplt.savefig('../results/class_distribution_test.png')\n\nclass_dist = np.asarray((classes, counts), dtype=np.int).T\nnp.savetxt(fname='../results/class_distribution_test.csv', X=class_dist, delimiter=',')\n\n# PLot and save distribution of train data\nclasses, counts = np.unique(train_labels[:,1], return_counts=True)\nplt.figure()\nplt.bar(classes, counts)\nplt.title('Distribution of retinopathy severity grades in train data')\nplt.xlabel('Grade')\nplt.ylabel('Count')\nplt.savefig('../results/class_distribution_train.png')\n\nclass_dist = np.asarray((classes, counts), dtype=np.int).T\nnp.savetxt(fname='../results/class_distribution_train.csv', X=class_dist, delimiter=',')\n\n# Save filenames separately\ntest_filenames = add(test_labels[:,0], np.full(shape=test_labels[:,0].shape, fill_value='.jpeg'))\nnp.savetxt(fname='../data/test_filenames.txt', X=test_filenames, delimiter='', fmt='%s')\ntrain_filenames = add(train_labels[:,0], np.full(shape=train_labels[:,0].shape, fill_value='.jpeg'))\nnp.savetxt(fname='../data/train_filenames.txt', X=train_filenames, delimiter='', fmt='%s')" ]
[ [ "matplotlib.use", "numpy.full", "numpy.savetxt", "numpy.asarray", "numpy.random.seed", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.savefig", "numpy.genfromtxt", "matplotlib.pyplot.title", "numpy.random.shuffle", "matplotlib.pyplot.figure", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.bar", "numpy.unique" ] ]
venushong667/rasa
[ "dc0af420818e263fb4ef97c0d7f1c65e1da83bd1" ]
[ "rasa/nlu/featurizers/dense_featurizer/lm_featurizer.py" ]
[ "from __future__ import annotations\nimport numpy as np\nimport logging\n\nfrom typing import Any, Optional, Text, List, Dict, Tuple\n\nfrom rasa.engine.graph import ExecutionContext, GraphComponent\nfrom rasa.engine.storage.resource import Resource\nfrom rasa.engine.storage.storage import ModelStorage\nfrom rasa.nlu.config import RasaNLUModelConfig\nfrom rasa.nlu.featurizers.dense_featurizer.dense_featurizer import DenseFeaturizer2\nfrom rasa.nlu.tokenizers.tokenizer import Token\nfrom rasa.shared.nlu.training_data.training_data import TrainingData\nfrom rasa.shared.nlu.training_data.message import Message\nfrom rasa.nlu.constants import (\n DENSE_FEATURIZABLE_ATTRIBUTES,\n SEQUENCE_FEATURES,\n SENTENCE_FEATURES,\n NO_LENGTH_RESTRICTION,\n NUMBER_OF_SUB_TOKENS,\n TOKENS_NAMES,\n)\nfrom rasa.shared.nlu.constants import (\n TEXT,\n ACTION_TEXT,\n)\nfrom rasa.utils import train_utils\nfrom rasa.nlu.featurizers.dense_featurizer._lm_featurizer import LanguageModelFeaturizer\n\nlogger = logging.getLogger(__name__)\n\n# TODO: remove after all references to old featurizer have been removed\nLanguageModelFeaturizer = LanguageModelFeaturizer\n\nMAX_SEQUENCE_LENGTHS = {\n \"bert\": 512,\n \"gpt\": 512,\n \"gpt2\": 512,\n \"xlnet\": NO_LENGTH_RESTRICTION,\n \"distilbert\": 512,\n \"roberta\": 512,\n}\n\n\nclass LanguageModelFeaturizerGraphComponent(DenseFeaturizer2, GraphComponent):\n \"\"\"A featurizer that uses transformer-based language models.\n\n This component loads a pre-trained language model\n from the Transformers library (https://github.com/huggingface/transformers)\n including BERT, GPT, GPT-2, xlnet, distilbert, and roberta.\n It also tokenizes and featurizes the featurizable dense attributes of\n each message.\n \"\"\"\n\n def __init__(\n self, config: Dict[Text, Any], execution_context: ExecutionContext,\n ) -> None:\n \"\"\"Initializes the featurizer with the model in the config.\"\"\"\n super(LanguageModelFeaturizerGraphComponent, self).__init__(\n execution_context.node_name, config\n )\n self._load_model_metadata()\n self._load_model_instance()\n\n @staticmethod\n def get_default_config() -> Dict[Text, Any]:\n \"\"\"Returns LanguageModelFeaturizer's default config.\"\"\"\n return {\n **DenseFeaturizer2.get_default_config(),\n # name of the language model to load.\n \"model_name\": \"bert\",\n # Pre-Trained weights to be loaded(string)\n \"model_weights\": None,\n # an optional path to a specific directory to download\n # and cache the pre-trained model weights.\n \"cache_dir\": None,\n }\n\n @classmethod\n def validate_config(cls, config: Dict[Text, Any]) -> None:\n \"\"\"Validates the configuration.\"\"\"\n pass\n\n @classmethod\n def create(\n cls,\n config: Dict[Text, Any],\n model_storage: ModelStorage,\n resource: Resource,\n execution_context: ExecutionContext,\n ) -> LanguageModelFeaturizerGraphComponent:\n \"\"\"Creates a LanguageModelFeaturizer.\n\n Loads the model specified in the config.\n \"\"\"\n return cls(config, execution_context)\n\n @staticmethod\n def required_packages() -> List[Text]:\n \"\"\"Returns the extra python dependencies required.\"\"\"\n return [\"transformers\"]\n\n def _load_model_metadata(self) -> None:\n \"\"\"Loads the metadata for the specified model and set them as properties.\n\n This includes the model name, model weights, cache directory and the\n maximum sequence length the model can handle.\n \"\"\"\n from rasa.nlu.utils.hugging_face.registry import (\n model_class_dict,\n model_weights_defaults,\n )\n\n self.model_name = self._config[\"model_name\"]\n\n if self.model_name not in model_class_dict:\n raise KeyError(\n f\"'{self.model_name}' not a valid model name. Choose from \"\n f\"{str(list(model_class_dict.keys()))} or create\"\n f\"a new class inheriting from this class to support your model.\"\n )\n\n self.model_weights = self._config[\"model_weights\"]\n self.cache_dir = self._config[\"cache_dir\"]\n\n if not self.model_weights:\n logger.info(\n f\"Model weights not specified. Will choose default model \"\n f\"weights: {model_weights_defaults[self.model_name]}\"\n )\n self.model_weights = model_weights_defaults[self.model_name]\n\n self.max_model_sequence_length = MAX_SEQUENCE_LENGTHS[self.model_name]\n\n def _load_model_instance(self) -> None:\n \"\"\"Tries to load the model instance.\n\n Model loading should be skipped in unit tests.\n See unit tests for examples.\n \"\"\"\n from rasa.nlu.utils.hugging_face.registry import (\n model_class_dict,\n model_tokenizer_dict,\n )\n\n logger.debug(f\"Loading Tokenizer and Model for {self.model_name}\")\n\n self.tokenizer = model_tokenizer_dict[self.model_name].from_pretrained(\n self.model_weights, cache_dir=self.cache_dir\n )\n self.model = model_class_dict[self.model_name].from_pretrained(\n self.model_weights, cache_dir=self.cache_dir\n )\n\n # Use a universal pad token since all transformer architectures do not have a\n # consistent token. Instead of pad_token_id we use unk_token_id because\n # pad_token_id is not set for all architectures. We can't add a new token as\n # well since vocabulary resizing is not yet supported for TF classes.\n # Also, this does not hurt the model predictions since we use an attention mask\n # while feeding input.\n self.pad_token_id = self.tokenizer.unk_token_id\n\n def _lm_tokenize(self, text: Text) -> Tuple[List[int], List[Text]]:\n \"\"\"Passes the text through the tokenizer of the language model.\n\n Args:\n text: Text to be tokenized.\n\n Returns: List of token ids and token strings.\n \"\"\"\n split_token_ids = self.tokenizer.encode(text, add_special_tokens=False)\n\n split_token_strings = self.tokenizer.convert_ids_to_tokens(split_token_ids)\n\n return split_token_ids, split_token_strings\n\n def _add_lm_specific_special_tokens(\n self, token_ids: List[List[int]]\n ) -> List[List[int]]:\n \"\"\"Adds the language and model-specific tokens used during training.\n\n Args:\n token_ids: List of token ids for each example in the batch.\n\n Returns: Augmented list of token ids for each example in the batch.\n \"\"\"\n from rasa.nlu.utils.hugging_face.registry import (\n model_special_tokens_pre_processors,\n )\n\n augmented_tokens = [\n model_special_tokens_pre_processors[self.model_name](example_token_ids)\n for example_token_ids in token_ids\n ]\n return augmented_tokens\n\n def _lm_specific_token_cleanup(\n self, split_token_ids: List[int], token_strings: List[Text]\n ) -> Tuple[List[int], List[Text]]:\n \"\"\"Cleans up special chars added by tokenizers of language models.\n\n Many language models add a special char in front/back of (some) words. We clean\n up those chars as they are not\n needed once the features are already computed.\n\n Args:\n split_token_ids: List of token ids received as output from the language\n model specific tokenizer.\n token_strings: List of token strings received as output from the language\n model specific tokenizer.\n\n Returns: Cleaned up token ids and token strings.\n \"\"\"\n from rasa.nlu.utils.hugging_face.registry import model_tokens_cleaners\n\n return model_tokens_cleaners[self.model_name](split_token_ids, token_strings)\n\n def _post_process_sequence_embeddings(\n self, sequence_embeddings: np.ndarray\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Computes sentence and sequence level representations for relevant tokens.\n\n Args:\n sequence_embeddings: Sequence level dense features received as output from\n language model.\n\n Returns: Sentence and sequence level representations.\n \"\"\"\n from rasa.nlu.utils.hugging_face.registry import (\n model_embeddings_post_processors,\n )\n\n sentence_embeddings = []\n post_processed_sequence_embeddings = []\n\n for example_embedding in sequence_embeddings:\n (\n example_sentence_embedding,\n example_post_processed_embedding,\n ) = model_embeddings_post_processors[self.model_name](example_embedding)\n\n sentence_embeddings.append(example_sentence_embedding)\n post_processed_sequence_embeddings.append(example_post_processed_embedding)\n\n return (\n np.array(sentence_embeddings),\n np.array(post_processed_sequence_embeddings),\n )\n\n def _tokenize_example(\n self, message: Message, attribute: Text\n ) -> Tuple[List[Token], List[int]]:\n \"\"\"Tokenizes a single message example.\n\n Many language models add a special char in front of (some) words and split\n words into sub-words. To ensure the entity start and end values matches the\n token values, use the tokens produced by the Tokenizer component. If\n individual tokens are split up into multiple tokens, we add this information\n to the respected token.\n\n Args:\n message: Single message object to be processed.\n attribute: Property of message to be processed, one of ``TEXT`` or\n ``RESPONSE``.\n\n Returns: List of token strings and token ids for the corresponding\n attribute of the message.\n \"\"\"\n tokens_in = message.get(TOKENS_NAMES[attribute])\n tokens_out = []\n\n token_ids_out = []\n\n for token in tokens_in:\n # use lm specific tokenizer to further tokenize the text\n split_token_ids, split_token_strings = self._lm_tokenize(token.text)\n\n if not split_token_ids:\n # fix the situation that `token.text` only contains whitespace or other\n # special characters, which cause `split_token_ids` and\n # `split_token_strings` be empty, finally cause\n # `self._lm_specific_token_cleanup()` to raise an exception\n continue\n\n (split_token_ids, split_token_strings) = self._lm_specific_token_cleanup(\n split_token_ids, split_token_strings\n )\n\n token_ids_out += split_token_ids\n\n token.set(NUMBER_OF_SUB_TOKENS, len(split_token_strings))\n\n tokens_out.append(token)\n\n return tokens_out, token_ids_out\n\n def _get_token_ids_for_batch(\n self, batch_examples: List[Message], attribute: Text\n ) -> Tuple[List[List[Token]], List[List[int]]]:\n \"\"\"Computes token ids and token strings for each example in batch.\n\n A token id is the id of that token in the vocabulary of the language model.\n\n Args:\n batch_examples: Batch of message objects for which tokens need to be\n computed.\n attribute: Property of message to be processed, one of ``TEXT`` or\n ``RESPONSE``.\n\n Returns: List of token strings and token ids for each example in the batch.\n \"\"\"\n batch_token_ids = []\n batch_tokens = []\n for example in batch_examples:\n\n example_tokens, example_token_ids = self._tokenize_example(\n example, attribute\n )\n batch_tokens.append(example_tokens)\n batch_token_ids.append(example_token_ids)\n\n return batch_tokens, batch_token_ids\n\n @staticmethod\n def _compute_attention_mask(\n actual_sequence_lengths: List[int], max_input_sequence_length: int\n ) -> np.ndarray:\n \"\"\"Computes a mask for padding tokens.\n\n This mask will be used by the language model so that it does not attend to\n padding tokens.\n\n Args:\n actual_sequence_lengths: List of length of each example without any\n padding.\n max_input_sequence_length: Maximum length of a sequence that will be\n present in the input batch. This is\n after taking into consideration the maximum input sequence the model\n can handle. Hence it can never be\n greater than self.max_model_sequence_length in case the model\n applies length restriction.\n\n Returns: Computed attention mask, 0 for padding and 1 for non-padding\n tokens.\n \"\"\"\n attention_mask = []\n\n for actual_sequence_length in actual_sequence_lengths:\n # add 1s for present tokens, fill up the remaining space up to max\n # sequence length with 0s (non-existing tokens)\n padded_sequence = [1] * min(\n actual_sequence_length, max_input_sequence_length\n ) + [0] * (\n max_input_sequence_length\n - min(actual_sequence_length, max_input_sequence_length)\n )\n attention_mask.append(padded_sequence)\n\n attention_mask = np.array(attention_mask).astype(np.float32)\n return attention_mask\n\n def _extract_sequence_lengths(\n self, batch_token_ids: List[List[int]]\n ) -> Tuple[List[int], int]:\n \"\"\"Extracts the sequence length for each example and maximum sequence length.\n\n Args:\n batch_token_ids: List of token ids for each example in the batch.\n\n Returns:\n Tuple consisting of: the actual sequence lengths for each example,\n and the maximum input sequence length (taking into account the\n maximum sequence length that the model can handle.\n \"\"\"\n # Compute max length across examples\n max_input_sequence_length = 0\n actual_sequence_lengths = []\n\n for example_token_ids in batch_token_ids:\n sequence_length = len(example_token_ids)\n actual_sequence_lengths.append(sequence_length)\n max_input_sequence_length = max(\n max_input_sequence_length, len(example_token_ids)\n )\n\n # Take into account the maximum sequence length the model can handle\n max_input_sequence_length = (\n max_input_sequence_length\n if self.max_model_sequence_length == NO_LENGTH_RESTRICTION\n else min(max_input_sequence_length, self.max_model_sequence_length)\n )\n\n return actual_sequence_lengths, max_input_sequence_length\n\n def _add_padding_to_batch(\n self, batch_token_ids: List[List[int]], max_sequence_length_model: int\n ) -> List[List[int]]:\n \"\"\"Adds padding so that all examples in the batch are of the same length.\n\n Args:\n batch_token_ids: Batch of examples where each example is a non-padded list\n of token ids.\n max_sequence_length_model: Maximum length of any input sequence in the batch\n to be fed to the model.\n\n Returns:\n Padded batch with all examples of the same length.\n \"\"\"\n padded_token_ids = []\n\n # Add padding according to max_sequence_length\n # Some models don't contain pad token, we use unknown token as padding token.\n # This doesn't affect the computation since we compute an attention mask\n # anyways.\n for example_token_ids in batch_token_ids:\n\n # Truncate any longer sequences so that they can be fed to the model\n if len(example_token_ids) > max_sequence_length_model:\n example_token_ids = example_token_ids[:max_sequence_length_model]\n\n padded_token_ids.append(\n example_token_ids\n + [self.pad_token_id]\n * (max_sequence_length_model - len(example_token_ids))\n )\n return padded_token_ids\n\n @staticmethod\n def _extract_nonpadded_embeddings(\n embeddings: np.ndarray, actual_sequence_lengths: List[int]\n ) -> np.ndarray:\n \"\"\"Extracts embeddings for actual tokens.\n\n Use pre-computed non-padded lengths of each example to extract embeddings\n for non-padding tokens.\n\n Args:\n embeddings: sequence level representations for each example of the batch.\n actual_sequence_lengths: non-padded lengths of each example of the batch.\n\n Returns:\n Sequence level embeddings for only non-padding tokens of the batch.\n \"\"\"\n nonpadded_sequence_embeddings = []\n for index, embedding in enumerate(embeddings):\n unmasked_embedding = embedding[: actual_sequence_lengths[index]]\n nonpadded_sequence_embeddings.append(unmasked_embedding)\n\n return np.array(nonpadded_sequence_embeddings)\n\n def _compute_batch_sequence_features(\n self, batch_attention_mask: np.ndarray, padded_token_ids: List[List[int]]\n ) -> np.ndarray:\n \"\"\"Feeds the padded batch to the language model.\n\n Args:\n batch_attention_mask: Mask of 0s and 1s which indicate whether the token\n is a padding token or not.\n padded_token_ids: Batch of token ids for each example. The batch is padded\n and hence can be fed at once.\n\n Returns:\n Sequence level representations from the language model.\n \"\"\"\n model_outputs = self.model(\n np.array(padded_token_ids), attention_mask=np.array(batch_attention_mask)\n )\n\n # sequence hidden states is always the first output from all models\n sequence_hidden_states = model_outputs[0]\n\n sequence_hidden_states = sequence_hidden_states.numpy()\n return sequence_hidden_states\n\n def _validate_sequence_lengths(\n self,\n actual_sequence_lengths: List[int],\n batch_examples: List[Message],\n attribute: Text,\n inference_mode: bool = False,\n ) -> None:\n \"\"\"Validates sequence length.\n\n Checks if sequence lengths of inputs are less than\n the max sequence length the model can handle.\n\n This method should throw an error during training, and log a debug\n message during inference if any of the input examples have a length\n greater than maximum sequence length allowed.\n\n Args:\n actual_sequence_lengths: original sequence length of all inputs\n batch_examples: all message instances in the batch\n attribute: attribute of message object to be processed\n inference_mode: whether this is during training or inference\n \"\"\"\n if self.max_model_sequence_length == NO_LENGTH_RESTRICTION:\n # There is no restriction on sequence length from the model\n return\n\n for sequence_length, example in zip(actual_sequence_lengths, batch_examples):\n if sequence_length > self.max_model_sequence_length:\n if not inference_mode:\n raise RuntimeError(\n f\"The sequence length of '{example.get(attribute)[:20]}...' \"\n f\"is too long({sequence_length} tokens) for the \"\n f\"model chosen {self.model_name} which has a maximum \"\n f\"sequence length of {self.max_model_sequence_length} tokens. \"\n f\"Either shorten the message or use a model which has no \"\n f\"restriction on input sequence length like XLNet.\"\n )\n logger.debug(\n f\"The sequence length of '{example.get(attribute)[:20]}...' \"\n f\"is too long({sequence_length} tokens) for the \"\n f\"model chosen {self.model_name} which has a maximum \"\n f\"sequence length of {self.max_model_sequence_length} tokens. \"\n f\"Downstream model predictions may be affected because of this.\"\n )\n\n def _add_extra_padding(\n self, sequence_embeddings: np.ndarray, actual_sequence_lengths: List[int]\n ) -> np.ndarray:\n \"\"\"Adds extra zero padding to match the original sequence length.\n\n This is only done if the input was truncated during the batch\n preparation of input for the model.\n Args:\n sequence_embeddings: Embeddings returned from the model\n actual_sequence_lengths: original sequence length of all inputs\n\n Returns:\n Modified sequence embeddings with padding if necessary\n \"\"\"\n if self.max_model_sequence_length == NO_LENGTH_RESTRICTION:\n # No extra padding needed because there wouldn't have been any\n # truncation in the first place\n return sequence_embeddings\n\n reshaped_sequence_embeddings = []\n for index, embedding in enumerate(sequence_embeddings):\n embedding_size = embedding.shape[-1]\n if actual_sequence_lengths[index] > self.max_model_sequence_length:\n embedding = np.concatenate(\n [\n embedding,\n np.zeros(\n (\n actual_sequence_lengths[index]\n - self.max_model_sequence_length,\n embedding_size,\n ),\n dtype=np.float32,\n ),\n ]\n )\n reshaped_sequence_embeddings.append(embedding)\n\n return np.array(reshaped_sequence_embeddings)\n\n def _get_model_features_for_batch(\n self,\n batch_token_ids: List[List[int]],\n batch_tokens: List[List[Token]],\n batch_examples: List[Message],\n attribute: Text,\n inference_mode: bool = False,\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Computes dense features of each example in the batch.\n\n We first add the special tokens corresponding to each language model. Next, we\n add appropriate padding and compute a mask for that padding so that it doesn't\n affect the feature computation. The padded batch is next fed to the language\n model and token level embeddings are computed. Using the pre-computed mask,\n embeddings for non-padding tokens are extracted and subsequently sentence\n level embeddings are computed.\n\n Args:\n batch_token_ids: List of token ids of each example in the batch.\n batch_tokens: List of token objects for each example in the batch.\n batch_examples: List of examples in the batch.\n attribute: attribute of the Message object to be processed.\n inference_mode: Whether the call is during training or during inference.\n\n Returns:\n Sentence and token level dense representations.\n \"\"\"\n # Let's first add tokenizer specific special tokens to all examples\n batch_token_ids_augmented = self._add_lm_specific_special_tokens(\n batch_token_ids\n )\n\n # Compute sequence lengths for all examples\n (\n actual_sequence_lengths,\n max_input_sequence_length,\n ) = self._extract_sequence_lengths(batch_token_ids_augmented)\n\n # Validate that all sequences can be processed based on their sequence\n # lengths and the maximum sequence length the model can handle\n self._validate_sequence_lengths(\n actual_sequence_lengths, batch_examples, attribute, inference_mode\n )\n\n # Add padding so that whole batch can be fed to the model\n padded_token_ids = self._add_padding_to_batch(\n batch_token_ids_augmented, max_input_sequence_length\n )\n\n # Compute attention mask based on actual_sequence_length\n batch_attention_mask = self._compute_attention_mask(\n actual_sequence_lengths, max_input_sequence_length\n )\n\n # Get token level features from the model\n sequence_hidden_states = self._compute_batch_sequence_features(\n batch_attention_mask, padded_token_ids\n )\n\n # Extract features for only non-padding tokens\n sequence_nonpadded_embeddings = self._extract_nonpadded_embeddings(\n sequence_hidden_states, actual_sequence_lengths\n )\n\n # Extract sentence level and post-processed features\n (\n sentence_embeddings,\n sequence_embeddings,\n ) = self._post_process_sequence_embeddings(sequence_nonpadded_embeddings)\n\n # Pad zeros for examples which were truncated in inference mode.\n # This is intentionally done after sentence embeddings have been\n # extracted so that they are not affected\n sequence_embeddings = self._add_extra_padding(\n sequence_embeddings, actual_sequence_lengths\n )\n\n # shape of matrix for all sequence embeddings\n batch_dim = len(sequence_embeddings)\n seq_dim = max(e.shape[0] for e in sequence_embeddings)\n feature_dim = sequence_embeddings[0].shape[1]\n shape = (batch_dim, seq_dim, feature_dim)\n\n # align features with tokens so that we have just one vector per token\n # (don't include sub-tokens)\n sequence_embeddings = train_utils.align_token_features(\n batch_tokens, sequence_embeddings, shape\n )\n\n # sequence_embeddings is a padded numpy array\n # remove the padding, keep just the non-zero vectors\n sequence_final_embeddings = []\n for embeddings, tokens in zip(sequence_embeddings, batch_tokens):\n sequence_final_embeddings.append(embeddings[: len(tokens)])\n sequence_final_embeddings = np.array(sequence_final_embeddings)\n\n return sentence_embeddings, sequence_final_embeddings\n\n def _get_docs_for_batch(\n self,\n batch_examples: List[Message],\n attribute: Text,\n inference_mode: bool = False,\n ) -> List[Dict[Text, Any]]:\n \"\"\"Computes language model docs for all examples in the batch.\n\n Args:\n batch_examples: Batch of message objects for which language model docs\n need to be computed.\n attribute: Property of message to be processed, one of ``TEXT`` or\n ``RESPONSE``.\n inference_mode: Whether the call is during inference or during training.\n\n\n Returns:\n List of language model docs for each message in batch.\n \"\"\"\n batch_tokens, batch_token_ids = self._get_token_ids_for_batch(\n batch_examples, attribute\n )\n\n (\n batch_sentence_features,\n batch_sequence_features,\n ) = self._get_model_features_for_batch(\n batch_token_ids, batch_tokens, batch_examples, attribute, inference_mode\n )\n\n # A doc consists of\n # {'sequence_features': ..., 'sentence_features': ...}\n batch_docs = []\n for index in range(len(batch_examples)):\n doc = {\n SEQUENCE_FEATURES: batch_sequence_features[index],\n SENTENCE_FEATURES: np.reshape(batch_sentence_features[index], (1, -1)),\n }\n batch_docs.append(doc)\n\n return batch_docs\n\n def process_training_data(\n self,\n training_data: TrainingData,\n config: Optional[RasaNLUModelConfig] = None,\n **kwargs: Any,\n ) -> TrainingData:\n \"\"\"Computes tokens and dense features for each message in training data.\n\n Args:\n training_data: NLU training data to be tokenized and featurized\n config: NLU pipeline config consisting of all components.\n \"\"\"\n batch_size = 64\n\n for attribute in DENSE_FEATURIZABLE_ATTRIBUTES:\n\n non_empty_examples = list(\n filter(lambda x: x.get(attribute), training_data.training_examples)\n )\n\n batch_start_index = 0\n\n while batch_start_index < len(non_empty_examples):\n\n batch_end_index = min(\n batch_start_index + batch_size, len(non_empty_examples)\n )\n # Collect batch examples\n batch_messages = non_empty_examples[batch_start_index:batch_end_index]\n\n # Construct a doc with relevant features\n # extracted(tokens, dense_features)\n batch_docs = self._get_docs_for_batch(batch_messages, attribute)\n\n for index, ex in enumerate(batch_messages):\n self._set_lm_features(batch_docs[index], ex, attribute)\n batch_start_index += batch_size\n\n return training_data\n\n def process(self, messages: List[Message]) -> List[Message]:\n \"\"\"Processes messages by computing tokens and dense features.\"\"\"\n for message in messages:\n self._process_message(message)\n return messages\n\n def _process_message(self, message: Message) -> Message:\n \"\"\"Processes a message by computing tokens and dense features.\"\"\"\n # processing featurizers operates only on TEXT and ACTION_TEXT attributes,\n # because all other attributes are labels which are featurized during\n # training and their features are stored by the model itself.\n for attribute in {TEXT, ACTION_TEXT}:\n if message.get(attribute):\n self._set_lm_features(\n self._get_docs_for_batch(\n [message], attribute=attribute, inference_mode=True\n )[0],\n message,\n attribute,\n )\n return message\n\n def _set_lm_features(\n self, doc: Dict[Text, Any], message: Message, attribute: Text = TEXT\n ) -> None:\n \"\"\"Adds the precomputed word vectors to the messages features.\"\"\"\n sequence_features = doc[SEQUENCE_FEATURES]\n sentence_features = doc[SENTENCE_FEATURES]\n\n self.add_features_to_message(\n sequence=sequence_features,\n sentence=sentence_features,\n attribute=attribute,\n message=message,\n )\n" ]
[ [ "numpy.array", "numpy.reshape", "numpy.zeros" ] ]
RuibingS/cimcb
[ "382f7d8fff30d3d276f18ac8c7dc686e0e643fa9" ]
[ "cimcb_lite/bootstrap/BC.py" ]
[ "import numpy as np\nimport scipy\nfrom scipy.stats import norm\nfrom .BaseBootstrap import BaseBootstrap\nfrom ..utils import nested_getattr\n\n\nclass BC(BaseBootstrap):\n \"\"\" Returns bootstrap confidence intervals using the bias-corrected boostrap interval.\n\n Parameters\n ----------\n model : object\n This object is assumed to store bootlist attributes in .model (e.g. modelPLS.model.x_scores_).\n\n X : array-like, shape = [n_samples, n_features]\n Predictor variables, where n_samples is the number of samples and n_features is the number of predictors.\n\n Y : array-like, shape = [n_samples, 1]\n Response variables, where n_samples is the number of samples.\n\n bootlist : array-like, shape = [n_bootlist, 1]\n List of attributes to calculate and return bootstrap confidence intervals.\n\n bootnum : a positive integer, (default 100)\n The number of bootstrap samples used in the computation.\n\n seed: integer or None (default None)\n Used to seed the generator for the resample with replacement.\n\n Returns\n -------\n bootci : dict of arrays\n Keys correspond to attributes in bootlist.\n Each array contains 95% confidence intervals.\n To return bootci, initalise then use method run().\n \"\"\"\n\n def __init__(self, model, X, Y, bootlist, bootnum=100, seed=None):\n super().__init__(model=model, X=X, Y=Y, bootlist=bootlist, bootnum=bootnum, seed=seed)\n self.stat = {}\n\n def calc_stat(self):\n \"\"\"Stores selected attributes (from self.bootlist) for the original model.\"\"\"\n self.stat = {}\n for i in self.bootlist:\n self.stat[i] = nested_getattr(self.model, i)\n\n def calc_bootidx(self):\n super().calc_bootidx()\n\n def calc_bootstat(self):\n super().calc_bootstat()\n\n def calc_bootci(self):\n self.bootci = {}\n for i in self.bootlist:\n self.bootci[i] = self.bootci_method(self.bootstat[i], self.stat[i])\n\n def run(self):\n self.calc_stat()\n self.calc_bootidx()\n self.calc_bootstat()\n self.calc_bootci()\n return self.bootci\n\n @staticmethod\n def bootci_method(bootstat, stat):\n \"\"\"Calculates bootstrap confidence intervals using the bias-corrected bootstrap interval.\"\"\"\n if stat.ndim == 1:\n nboot = len(bootstat)\n zalpha = norm.ppf(0.05 / 2)\n obs = stat # Observed mean\n meansum = np.zeros((1, len(obs))).flatten()\n for i in range(len(obs)):\n for j in range(len(bootstat)):\n if bootstat[j][i] >= obs[i]:\n meansum[i] = meansum[i] + 1\n prop = meansum / nboot # Proportion of times boot mean > obs mean\n z0 = -norm.ppf(prop)\n \n # new alpha\n pct1 = 100 * norm.cdf((2 * z0 + zalpha))\n pct2 = 100 * norm.cdf((2 * z0 - zalpha))\n boot_ci = []\n for i in range(len(pct1)):\n bootstat_i = [item[i] for item in bootstat]\n append_low = np.percentile(bootstat_i, pct1[i])\n append_upp = np.percentile(bootstat_i, pct2[i])\n boot_ci.append([append_low, append_upp])\n boot_ci = np.array(boot_ci)\n\n # Recursive component (to get ndim = 1, and append)\n else:\n ncomp = stat.shape[1]\n boot_ci = []\n for k in range(ncomp):\n bootstat_k = []\n for j in range(len(bootstat)):\n bootstat_k.append(bootstat[j][:, k])\n boot_ci_k = BC.bootci_method(bootstat_k, stat[:, k])\n boot_ci.append(boot_ci_k)\n boot_ci = np.array(boot_ci)\n return boot_ci\n" ]
[ [ "numpy.percentile", "numpy.array", "scipy.stats.norm.ppf", "scipy.stats.norm.cdf" ] ]
f6v/pandas
[ "bc65fe6c12dc78679ba8584eee83c6e3e243b5b9" ]
[ "pandas/core/indexes/period.py" ]
[ "from datetime import datetime, timedelta\nimport warnings\nimport weakref\n\nimport numpy as np\n\nfrom pandas._libs import index as libindex\nfrom pandas._libs.tslibs import NaT, frequencies as libfrequencies, iNaT, resolution\nfrom pandas._libs.tslibs.period import DIFFERENT_FREQ, IncompatibleFrequency, Period\nfrom pandas.util._decorators import Appender, Substitution, cache_readonly\n\nfrom pandas.core.dtypes.common import (\n ensure_platform_int,\n is_bool_dtype,\n is_datetime64_any_dtype,\n is_float,\n is_float_dtype,\n is_integer,\n is_integer_dtype,\n pandas_dtype,\n)\n\nfrom pandas.core import common as com\nfrom pandas.core.accessor import delegate_names\nfrom pandas.core.algorithms import unique1d\nfrom pandas.core.arrays.period import PeriodArray, period_array, validate_dtype_freq\nfrom pandas.core.base import _shared_docs\nimport pandas.core.indexes.base as ibase\nfrom pandas.core.indexes.base import _index_shared_docs, ensure_index\nfrom pandas.core.indexes.datetimelike import (\n DatetimeIndexOpsMixin,\n DatetimelikeDelegateMixin,\n)\nfrom pandas.core.indexes.datetimes import DatetimeIndex, Index, Int64Index\nfrom pandas.core.missing import isna\nfrom pandas.core.ops import get_op_result_name\nfrom pandas.core.tools.datetimes import DateParseError, parse_time_string\n\nfrom pandas.tseries import frequencies\nfrom pandas.tseries.offsets import DateOffset, Tick\n\n_index_doc_kwargs = dict(ibase._index_doc_kwargs)\n_index_doc_kwargs.update(dict(target_klass=\"PeriodIndex or list of Periods\"))\n\n\n# --- Period index sketch\n\n\ndef _new_PeriodIndex(cls, **d):\n # GH13277 for unpickling\n values = d.pop(\"data\")\n if values.dtype == \"int64\":\n freq = d.pop(\"freq\", None)\n values = PeriodArray(values, freq=freq)\n return cls._simple_new(values, **d)\n else:\n return cls(values, **d)\n\n\nclass PeriodDelegateMixin(DatetimelikeDelegateMixin):\n \"\"\"\n Delegate from PeriodIndex to PeriodArray.\n \"\"\"\n\n _delegate_class = PeriodArray\n _delegated_properties = PeriodArray._datetimelike_ops\n _delegated_methods = set(PeriodArray._datetimelike_methods) | {\n \"_addsub_int_array\",\n \"strftime\",\n }\n _raw_properties = {\"is_leap_year\"}\n\n\n@delegate_names(PeriodArray, PeriodDelegateMixin._delegated_properties, typ=\"property\")\n@delegate_names(\n PeriodArray, PeriodDelegateMixin._delegated_methods, typ=\"method\", overwrite=True\n)\nclass PeriodIndex(DatetimeIndexOpsMixin, Int64Index, PeriodDelegateMixin):\n \"\"\"\n Immutable ndarray holding ordinal values indicating regular periods in\n time such as particular years, quarters, months, etc.\n\n Index keys are boxed to Period objects which carries the metadata (eg,\n frequency information).\n\n Parameters\n ----------\n data : array-like (1d integer np.ndarray or PeriodArray), optional\n Optional period-like data to construct index with\n copy : bool\n Make a copy of input ndarray\n freq : string or period object, optional\n One of pandas period strings or corresponding objects\n start : starting value, period-like, optional\n If data is None, used as the start point in generating regular\n period data.\n\n .. deprecated:: 0.24.0\n\n periods : int, optional, > 0\n Number of periods to generate, if generating index. Takes precedence\n over end argument\n\n .. deprecated:: 0.24.0\n\n end : end value, period-like, optional\n If periods is none, generated index will extend to first conforming\n period on or just past end argument\n\n .. deprecated:: 0.24.0\n\n year : int, array, or Series, default None\n month : int, array, or Series, default None\n quarter : int, array, or Series, default None\n day : int, array, or Series, default None\n hour : int, array, or Series, default None\n minute : int, array, or Series, default None\n second : int, array, or Series, default None\n tz : object, default None\n Timezone for converting datetime64 data to Periods\n dtype : str or PeriodDtype, default None\n\n Attributes\n ----------\n day\n dayofweek\n dayofyear\n days_in_month\n daysinmonth\n end_time\n freq\n freqstr\n hour\n is_leap_year\n minute\n month\n quarter\n qyear\n second\n start_time\n week\n weekday\n weekofyear\n year\n\n Methods\n -------\n asfreq\n strftime\n to_timestamp\n\n See Also\n --------\n Index : The base pandas Index type.\n Period : Represents a period of time.\n DatetimeIndex : Index with datetime64 data.\n TimedeltaIndex : Index of timedelta64 data.\n period_range : Create a fixed-frequency PeriodIndex.\n\n Notes\n -----\n Creating a PeriodIndex based on `start`, `periods`, and `end` has\n been deprecated in favor of :func:`period_range`.\n\n Examples\n --------\n >>> idx = pd.PeriodIndex(year=year_arr, quarter=q_arr)\n \"\"\"\n\n _typ = \"periodindex\"\n _attributes = [\"name\", \"freq\"]\n\n # define my properties & methods for delegation\n _is_numeric_dtype = False\n _infer_as_myclass = True\n\n _data = None\n\n _engine_type = libindex.PeriodEngine\n _supports_partial_string_indexing = True\n\n # ------------------------------------------------------------------------\n # Index Constructors\n\n def __new__(\n cls,\n data=None,\n ordinal=None,\n freq=None,\n start=None,\n end=None,\n periods=None,\n tz=None,\n dtype=None,\n copy=False,\n name=None,\n **fields\n ):\n\n valid_field_set = {\n \"year\",\n \"month\",\n \"day\",\n \"quarter\",\n \"hour\",\n \"minute\",\n \"second\",\n }\n\n if not set(fields).issubset(valid_field_set):\n raise TypeError(\n \"__new__() got an unexpected keyword argument {}\".format(\n list(set(fields) - valid_field_set)[0]\n )\n )\n\n if name is None and hasattr(data, \"name\"):\n name = data.name\n\n if data is None and ordinal is None:\n # range-based.\n data, freq2 = PeriodArray._generate_range(start, end, periods, freq, fields)\n # PeriodArray._generate range does validate that fields is\n # empty when really using the range-based constructor.\n if not fields:\n msg = (\n \"Creating a PeriodIndex by passing range \"\n \"endpoints is deprecated. Use \"\n \"`pandas.period_range` instead.\"\n )\n # period_range differs from PeriodIndex for cases like\n # start=\"2000\", periods=4\n # PeriodIndex interprets that as A-DEC freq.\n # period_range interprets it as 'D' freq.\n cond = freq is None and (\n (start and not isinstance(start, Period))\n or (end and not isinstance(end, Period))\n )\n if cond:\n msg += (\n \" Note that the default `freq` may differ. Pass \"\n \"'freq=\\\"{}\\\"' to ensure the same output.\"\n ).format(freq2.freqstr)\n warnings.warn(msg, FutureWarning, stacklevel=2)\n freq = freq2\n\n data = PeriodArray(data, freq=freq)\n else:\n freq = validate_dtype_freq(dtype, freq)\n\n # PeriodIndex allow PeriodIndex(period_index, freq=different)\n # Let's not encourage that kind of behavior in PeriodArray.\n\n if freq and isinstance(data, cls) and data.freq != freq:\n # TODO: We can do some of these with no-copy / coercion?\n # e.g. D -> 2D seems to be OK\n data = data.asfreq(freq)\n\n if data is None and ordinal is not None:\n # we strangely ignore `ordinal` if data is passed.\n ordinal = np.asarray(ordinal, dtype=np.int64)\n data = PeriodArray(ordinal, freq)\n else:\n # don't pass copy here, since we copy later.\n data = period_array(data=data, freq=freq)\n\n if copy:\n data = data.copy()\n\n return cls._simple_new(data, name=name)\n\n @classmethod\n def _simple_new(cls, values, name=None, freq=None, **kwargs):\n \"\"\"\n Create a new PeriodIndex.\n\n Parameters\n ----------\n values : PeriodArray, PeriodIndex, Index[int64], ndarray[int64]\n Values that can be converted to a PeriodArray without inference\n or coercion.\n\n \"\"\"\n # TODO: raising on floats is tested, but maybe not useful.\n # Should the callers know not to pass floats?\n # At the very least, I think we can ensure that lists aren't passed.\n if isinstance(values, list):\n values = np.asarray(values)\n if is_float_dtype(values):\n raise TypeError(\"PeriodIndex._simple_new does not accept floats.\")\n if freq:\n freq = Period._maybe_convert_freq(freq)\n values = PeriodArray(values, freq=freq)\n\n if not isinstance(values, PeriodArray):\n raise TypeError(\"PeriodIndex._simple_new only accepts PeriodArray\")\n result = object.__new__(cls)\n result._data = values\n # For groupby perf. See note in indexes/base about _index_data\n result._index_data = values._data\n result.name = name\n result._reset_identity()\n return result\n\n # ------------------------------------------------------------------------\n # Data\n\n @property\n def values(self):\n return np.asarray(self)\n\n @property\n def freq(self):\n return self._data.freq\n\n @freq.setter\n def freq(self, value):\n value = Period._maybe_convert_freq(value)\n # TODO: When this deprecation is enforced, PeriodIndex.freq can\n # be removed entirely, and we'll just inherit.\n msg = (\n \"Setting {cls}.freq has been deprecated and will be \"\n \"removed in a future version; use {cls}.asfreq instead. \"\n \"The {cls}.freq setter is not guaranteed to work.\"\n )\n warnings.warn(msg.format(cls=type(self).__name__), FutureWarning, stacklevel=2)\n # PeriodArray._freq isn't actually mutable. We set the private _freq\n # here, but people shouldn't be doing this anyway.\n self._data._freq = value\n\n def _shallow_copy(self, values=None, **kwargs):\n # TODO: simplify, figure out type of values\n if values is None:\n values = self._data\n\n if isinstance(values, type(self)):\n values = values._values\n\n if not isinstance(values, PeriodArray):\n if isinstance(values, np.ndarray) and is_integer_dtype(values.dtype):\n values = PeriodArray(values, freq=self.freq)\n else:\n # in particular, I would like to avoid period_array here.\n # Some people seem to be calling use with unexpected types\n # Index.difference -> ndarray[Period]\n # DatetimelikeIndexOpsMixin.repeat -> ndarray[ordinal]\n # I think that once all of Datetime* are EAs, we can simplify\n # this quite a bit.\n values = period_array(values, freq=self.freq)\n\n # We don't allow changing `freq` in _shallow_copy.\n validate_dtype_freq(self.dtype, kwargs.get(\"freq\"))\n attributes = self._get_attributes_dict()\n\n attributes.update(kwargs)\n if not len(values) and \"dtype\" not in kwargs:\n attributes[\"dtype\"] = self.dtype\n return self._simple_new(values, **attributes)\n\n def _shallow_copy_with_infer(self, values=None, **kwargs):\n \"\"\" we always want to return a PeriodIndex \"\"\"\n return self._shallow_copy(values=values, **kwargs)\n\n @property\n def _box_func(self):\n \"\"\"Maybe box an ordinal or Period\"\"\"\n # TODO(DatetimeArray): Avoid double-boxing\n # PeriodArray takes care of boxing already, so we need to check\n # whether we're given an ordinal or a Period. It seems like some\n # places outside of indexes/period.py are calling this _box_func,\n # but passing data that's already boxed.\n def func(x):\n if isinstance(x, Period) or x is NaT:\n return x\n else:\n return Period._from_ordinal(ordinal=x, freq=self.freq)\n\n return func\n\n def _maybe_convert_timedelta(self, other):\n \"\"\"\n Convert timedelta-like input to an integer multiple of self.freq\n\n Parameters\n ----------\n other : timedelta, np.timedelta64, DateOffset, int, np.ndarray\n\n Returns\n -------\n converted : int, np.ndarray[int64]\n\n Raises\n ------\n IncompatibleFrequency : if the input cannot be written as a multiple\n of self.freq. Note IncompatibleFrequency subclasses ValueError.\n \"\"\"\n if isinstance(other, (timedelta, np.timedelta64, Tick, np.ndarray)):\n offset = frequencies.to_offset(self.freq.rule_code)\n if isinstance(offset, Tick):\n # _check_timedeltalike_freq_compat will raise if incompatible\n delta = self._data._check_timedeltalike_freq_compat(other)\n return delta\n elif isinstance(other, DateOffset):\n freqstr = other.rule_code\n base = libfrequencies.get_base_alias(freqstr)\n if base == self.freq.rule_code:\n return other.n\n\n msg = DIFFERENT_FREQ.format(\n cls=type(self).__name__, own_freq=self.freqstr, other_freq=other.freqstr\n )\n raise IncompatibleFrequency(msg)\n elif is_integer(other):\n # integer is passed to .shift via\n # _add_datetimelike_methods basically\n # but ufunc may pass integer to _add_delta\n return other\n\n # raise when input doesn't have freq\n msg = DIFFERENT_FREQ.format(\n cls=type(self).__name__, own_freq=self.freqstr, other_freq=None\n )\n raise IncompatibleFrequency(msg)\n\n # ------------------------------------------------------------------------\n # Rendering Methods\n\n def _format_native_types(self, na_rep=\"NaT\", quoting=None, **kwargs):\n # just dispatch, return ndarray\n return self._data._format_native_types(na_rep=na_rep, quoting=quoting, **kwargs)\n\n def _mpl_repr(self):\n # how to represent ourselves to matplotlib\n return self.astype(object).values\n\n @property\n def _formatter_func(self):\n return self.array._formatter(boxed=False)\n\n # ------------------------------------------------------------------------\n # Indexing\n\n @cache_readonly\n def _engine(self):\n # To avoid a reference cycle, pass a weakref of self to _engine_type.\n period = weakref.ref(self)\n return self._engine_type(period, len(self))\n\n @Appender(_index_shared_docs[\"contains\"])\n def __contains__(self, key):\n if isinstance(key, Period):\n if key.freq != self.freq:\n return False\n else:\n return key.ordinal in self._engine\n else:\n try:\n self.get_loc(key)\n return True\n except Exception:\n return False\n\n @cache_readonly\n def _int64index(self):\n return Int64Index._simple_new(self.asi8, name=self.name)\n\n # ------------------------------------------------------------------------\n # Index Methods\n\n def _coerce_scalar_to_index(self, item):\n \"\"\"\n we need to coerce a scalar to a compat for our index type\n\n Parameters\n ----------\n item : scalar item to coerce\n \"\"\"\n return PeriodIndex([item], **self._get_attributes_dict())\n\n def __array__(self, dtype=None):\n if is_integer_dtype(dtype):\n return self.asi8\n else:\n return self.astype(object).values\n\n def __array_wrap__(self, result, context=None):\n \"\"\"\n Gets called after a ufunc. Needs additional handling as\n PeriodIndex stores internal data as int dtype\n\n Replace this to __numpy_ufunc__ in future version\n \"\"\"\n if isinstance(context, tuple) and len(context) > 0:\n func = context[0]\n if func is np.add:\n pass\n elif func is np.subtract:\n name = self.name\n left = context[1][0]\n right = context[1][1]\n if isinstance(left, PeriodIndex) and isinstance(right, PeriodIndex):\n name = left.name if left.name == right.name else None\n return Index(result, name=name)\n elif isinstance(left, Period) or isinstance(right, Period):\n return Index(result, name=name)\n elif isinstance(func, np.ufunc):\n if \"M->M\" not in func.types:\n msg = \"ufunc '{0}' not supported for the PeriodIndex\"\n # This should be TypeError, but TypeError cannot be raised\n # from here because numpy catches.\n raise ValueError(msg.format(func.__name__))\n\n if is_bool_dtype(result):\n return result\n # the result is object dtype array of Period\n # cannot pass _simple_new as it is\n return type(self)(result, freq=self.freq, name=self.name)\n\n def asof_locs(self, where, mask):\n \"\"\"\n where : array of timestamps\n mask : array of booleans where data is not NA\n\n \"\"\"\n where_idx = where\n if isinstance(where_idx, DatetimeIndex):\n where_idx = PeriodIndex(where_idx.values, freq=self.freq)\n\n locs = self._ndarray_values[mask].searchsorted(\n where_idx._ndarray_values, side=\"right\"\n )\n\n locs = np.where(locs > 0, locs - 1, 0)\n result = np.arange(len(self))[mask].take(locs)\n\n first = mask.argmax()\n result[\n (locs == 0) & (where_idx._ndarray_values < self._ndarray_values[first])\n ] = -1\n\n return result\n\n @Appender(_index_shared_docs[\"astype\"])\n def astype(self, dtype, copy=True, how=\"start\"):\n dtype = pandas_dtype(dtype)\n\n if is_datetime64_any_dtype(dtype):\n # 'how' is index-specific, isn't part of the EA interface.\n tz = getattr(dtype, \"tz\", None)\n return self.to_timestamp(how=how).tz_localize(tz)\n\n # TODO: should probably raise on `how` here, so we don't ignore it.\n return super().astype(dtype, copy=copy)\n\n @Substitution(klass=\"PeriodIndex\")\n @Appender(_shared_docs[\"searchsorted\"])\n def searchsorted(self, value, side=\"left\", sorter=None):\n if isinstance(value, Period):\n if value.freq != self.freq:\n msg = DIFFERENT_FREQ.format(\n cls=type(self).__name__,\n own_freq=self.freqstr,\n other_freq=value.freqstr,\n )\n raise IncompatibleFrequency(msg)\n value = value.ordinal\n elif isinstance(value, str):\n try:\n value = Period(value, freq=self.freq).ordinal\n except DateParseError:\n raise KeyError(\"Cannot interpret '{}' as period\".format(value))\n\n return self._ndarray_values.searchsorted(value, side=side, sorter=sorter)\n\n @property\n def is_all_dates(self):\n return True\n\n @property\n def is_full(self):\n \"\"\"\n Returns True if this PeriodIndex is range-like in that all Periods\n between start and end are present, in order.\n \"\"\"\n if len(self) == 0:\n return True\n if not self.is_monotonic:\n raise ValueError(\"Index is not monotonic\")\n values = self.asi8\n return ((values[1:] - values[:-1]) < 2).all()\n\n @property\n def inferred_type(self):\n # b/c data is represented as ints make sure we can't have ambiguous\n # indexing\n return \"period\"\n\n def get_value(self, series, key):\n \"\"\"\n Fast lookup of value from 1-dimensional ndarray. Only use this if you\n know what you're doing\n \"\"\"\n s = com.values_from_object(series)\n try:\n return com.maybe_box(self, super().get_value(s, key), series, key)\n except (KeyError, IndexError):\n try:\n asdt, parsed, reso = parse_time_string(key, self.freq)\n grp = resolution.Resolution.get_freq_group(reso)\n freqn = resolution.get_freq_group(self.freq)\n\n vals = self._ndarray_values\n\n # if our data is higher resolution than requested key, slice\n if grp < freqn:\n iv = Period(asdt, freq=(grp, 1))\n ord1 = iv.asfreq(self.freq, how=\"S\").ordinal\n ord2 = iv.asfreq(self.freq, how=\"E\").ordinal\n\n if ord2 < vals[0] or ord1 > vals[-1]:\n raise KeyError(key)\n\n pos = np.searchsorted(self._ndarray_values, [ord1, ord2])\n key = slice(pos[0], pos[1] + 1)\n return series[key]\n elif grp == freqn:\n key = Period(asdt, freq=self.freq).ordinal\n return com.maybe_box(\n self, self._int64index.get_value(s, key), series, key\n )\n else:\n raise KeyError(key)\n except TypeError:\n pass\n\n period = Period(key, self.freq)\n key = period.value if isna(period) else period.ordinal\n return com.maybe_box(self, self._int64index.get_value(s, key), series, key)\n\n @Appender(_index_shared_docs[\"get_indexer\"] % _index_doc_kwargs)\n def get_indexer(self, target, method=None, limit=None, tolerance=None):\n target = ensure_index(target)\n\n if hasattr(target, \"freq\") and target.freq != self.freq:\n msg = DIFFERENT_FREQ.format(\n cls=type(self).__name__,\n own_freq=self.freqstr,\n other_freq=target.freqstr,\n )\n raise IncompatibleFrequency(msg)\n\n if isinstance(target, PeriodIndex):\n target = target.asi8\n\n if tolerance is not None:\n tolerance = self._convert_tolerance(tolerance, target)\n return Index.get_indexer(self._int64index, target, method, limit, tolerance)\n\n @Appender(_index_shared_docs[\"get_indexer_non_unique\"] % _index_doc_kwargs)\n def get_indexer_non_unique(self, target):\n target = ensure_index(target)\n\n if isinstance(target, PeriodIndex):\n target = target.asi8\n if hasattr(target, \"freq\") and target.freq != self.freq:\n msg = DIFFERENT_FREQ.format(\n cls=type(self).__name__,\n own_freq=self.freqstr,\n other_freq=target.freqstr,\n )\n raise IncompatibleFrequency(msg)\n\n indexer, missing = self._int64index.get_indexer_non_unique(target)\n return ensure_platform_int(indexer), missing\n\n def _get_unique_index(self, dropna=False):\n \"\"\"\n wrap Index._get_unique_index to handle NaT\n \"\"\"\n res = super()._get_unique_index(dropna=dropna)\n if dropna:\n res = res.dropna()\n return res\n\n @Appender(Index.unique.__doc__)\n def unique(self, level=None):\n # override the Index.unique method for performance GH#23083\n if level is not None:\n # this should never occur, but is retained to make the signature\n # match Index.unique\n self._validate_index_level(level)\n\n values = self._ndarray_values\n result = unique1d(values)\n return self._shallow_copy(result)\n\n def get_loc(self, key, method=None, tolerance=None):\n \"\"\"\n Get integer location for requested label\n\n Returns\n -------\n loc : int\n \"\"\"\n try:\n return self._engine.get_loc(key)\n except KeyError:\n if is_integer(key):\n raise\n\n try:\n asdt, parsed, reso = parse_time_string(key, self.freq)\n key = asdt\n except TypeError:\n pass\n except DateParseError:\n # A string with invalid format\n raise KeyError(\"Cannot interpret '{}' as period\".format(key))\n\n try:\n key = Period(key, freq=self.freq)\n except ValueError:\n # we cannot construct the Period\n # as we have an invalid type\n raise KeyError(key)\n\n try:\n ordinal = iNaT if key is NaT else key.ordinal\n if tolerance is not None:\n tolerance = self._convert_tolerance(tolerance, np.asarray(key))\n return self._int64index.get_loc(ordinal, method, tolerance)\n\n except KeyError:\n raise KeyError(key)\n\n def _maybe_cast_slice_bound(self, label, side, kind):\n \"\"\"\n If label is a string or a datetime, cast it to Period.ordinal according\n to resolution.\n\n Parameters\n ----------\n label : object\n side : {'left', 'right'}\n kind : {'ix', 'loc', 'getitem'}\n\n Returns\n -------\n bound : Period or object\n\n Notes\n -----\n Value of `side` parameter should be validated in caller.\n\n \"\"\"\n assert kind in [\"ix\", \"loc\", \"getitem\"]\n\n if isinstance(label, datetime):\n return Period(label, freq=self.freq)\n elif isinstance(label, str):\n try:\n _, parsed, reso = parse_time_string(label, self.freq)\n bounds = self._parsed_string_to_bounds(reso, parsed)\n return bounds[0 if side == \"left\" else 1]\n except Exception:\n raise KeyError(label)\n elif is_integer(label) or is_float(label):\n self._invalid_indexer(\"slice\", label)\n\n return label\n\n def _parsed_string_to_bounds(self, reso, parsed):\n if reso == \"year\":\n t1 = Period(year=parsed.year, freq=\"A\")\n elif reso == \"month\":\n t1 = Period(year=parsed.year, month=parsed.month, freq=\"M\")\n elif reso == \"quarter\":\n q = (parsed.month - 1) // 3 + 1\n t1 = Period(year=parsed.year, quarter=q, freq=\"Q-DEC\")\n elif reso == \"day\":\n t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day, freq=\"D\")\n elif reso == \"hour\":\n t1 = Period(\n year=parsed.year,\n month=parsed.month,\n day=parsed.day,\n hour=parsed.hour,\n freq=\"H\",\n )\n elif reso == \"minute\":\n t1 = Period(\n year=parsed.year,\n month=parsed.month,\n day=parsed.day,\n hour=parsed.hour,\n minute=parsed.minute,\n freq=\"T\",\n )\n elif reso == \"second\":\n t1 = Period(\n year=parsed.year,\n month=parsed.month,\n day=parsed.day,\n hour=parsed.hour,\n minute=parsed.minute,\n second=parsed.second,\n freq=\"S\",\n )\n else:\n raise KeyError(reso)\n return (t1.asfreq(self.freq, how=\"start\"), t1.asfreq(self.freq, how=\"end\"))\n\n def _get_string_slice(self, key):\n if not self.is_monotonic:\n raise ValueError(\"Partial indexing only valid for ordered time series\")\n\n key, parsed, reso = parse_time_string(key, self.freq)\n grp = resolution.Resolution.get_freq_group(reso)\n freqn = resolution.get_freq_group(self.freq)\n if reso in [\"day\", \"hour\", \"minute\", \"second\"] and not grp < freqn:\n raise KeyError(key)\n\n t1, t2 = self._parsed_string_to_bounds(reso, parsed)\n return slice(\n self.searchsorted(t1.ordinal, side=\"left\"),\n self.searchsorted(t2.ordinal, side=\"right\"),\n )\n\n def _convert_tolerance(self, tolerance, target):\n tolerance = DatetimeIndexOpsMixin._convert_tolerance(self, tolerance, target)\n if target.size != tolerance.size and tolerance.size > 1:\n raise ValueError(\"list-like tolerance size must match target index size\")\n return self._maybe_convert_timedelta(tolerance)\n\n def insert(self, loc, item):\n if not isinstance(item, Period) or self.freq != item.freq:\n return self.astype(object).insert(loc, item)\n\n idx = np.concatenate(\n (self[:loc].asi8, np.array([item.ordinal]), self[loc:].asi8)\n )\n return self._shallow_copy(idx)\n\n def join(self, other, how=\"left\", level=None, return_indexers=False, sort=False):\n \"\"\"\n See Index.join\n \"\"\"\n self._assert_can_do_setop(other)\n\n if not isinstance(other, PeriodIndex):\n return self.astype(object).join(\n other, how=how, level=level, return_indexers=return_indexers, sort=sort\n )\n\n result = Int64Index.join(\n self,\n other,\n how=how,\n level=level,\n return_indexers=return_indexers,\n sort=sort,\n )\n\n if return_indexers:\n result, lidx, ridx = result\n return self._apply_meta(result), lidx, ridx\n return self._apply_meta(result)\n\n @Appender(Index.intersection.__doc__)\n def intersection(self, other, sort=False):\n return Index.intersection(self, other, sort=sort)\n\n def _assert_can_do_setop(self, other):\n super()._assert_can_do_setop(other)\n\n # *Can't* use PeriodIndexes of different freqs\n # *Can* use PeriodIndex/DatetimeIndex\n if isinstance(other, PeriodIndex) and self.freq != other.freq:\n msg = DIFFERENT_FREQ.format(\n cls=type(self).__name__, own_freq=self.freqstr, other_freq=other.freqstr\n )\n raise IncompatibleFrequency(msg)\n\n def _wrap_setop_result(self, other, result):\n name = get_op_result_name(self, other)\n result = self._apply_meta(result)\n result.name = name\n return result\n\n def _apply_meta(self, rawarr):\n if not isinstance(rawarr, PeriodIndex):\n rawarr = PeriodIndex._simple_new(rawarr, freq=self.freq, name=self.name)\n return rawarr\n\n def __setstate__(self, state):\n \"\"\"Necessary for making this object picklable\"\"\"\n\n if isinstance(state, dict):\n super().__setstate__(state)\n\n elif isinstance(state, tuple):\n\n # < 0.15 compat\n if len(state) == 2:\n nd_state, own_state = state\n data = np.empty(nd_state[1], dtype=nd_state[2])\n np.ndarray.__setstate__(data, nd_state)\n\n # backcompat\n freq = Period._maybe_convert_freq(own_state[1])\n\n else: # pragma: no cover\n data = np.empty(state)\n np.ndarray.__setstate__(self, state)\n freq = None # ?\n\n data = PeriodArray(data, freq=freq)\n self._data = data\n\n else:\n raise Exception(\"invalid pickle state\")\n\n _unpickle_compat = __setstate__\n\n @property\n def flags(self):\n \"\"\" return the ndarray.flags for the underlying data \"\"\"\n warnings.warn(\n \"{obj}.flags is deprecated and will be removed \"\n \"in a future version\".format(obj=type(self).__name__),\n FutureWarning,\n stacklevel=2,\n )\n return self._ndarray_values.flags\n\n def item(self):\n \"\"\"\n return the first element of the underlying data as a python\n scalar\n\n .. deprecated:: 0.25.0\n\n \"\"\"\n warnings.warn(\n \"`item` has been deprecated and will be removed in a future version\",\n FutureWarning,\n stacklevel=2,\n )\n # TODO(DatetimeArray): remove\n if len(self) == 1:\n return self[0]\n else:\n # TODO: is this still necessary?\n # copy numpy's message here because Py26 raises an IndexError\n raise ValueError(\"can only convert an array of size 1 to a Python scalar\")\n\n @property\n def data(self):\n \"\"\" return the data pointer of the underlying data \"\"\"\n warnings.warn(\n \"{obj}.data is deprecated and will be removed \"\n \"in a future version\".format(obj=type(self).__name__),\n FutureWarning,\n stacklevel=2,\n )\n return np.asarray(self._data).data\n\n @property\n def base(self):\n \"\"\" return the base object if the memory of the underlying data is\n shared\n \"\"\"\n warnings.warn(\n \"{obj}.base is deprecated and will be removed \"\n \"in a future version\".format(obj=type(self).__name__),\n FutureWarning,\n stacklevel=2,\n )\n return np.asarray(self._data)\n\n def memory_usage(self, deep=False):\n result = super().memory_usage(deep=deep)\n if hasattr(self, \"_cache\") and \"_int64index\" in self._cache:\n result += self._int64index.memory_usage(deep=deep)\n return result\n\n\nPeriodIndex._add_comparison_ops()\nPeriodIndex._add_numeric_methods_disabled()\nPeriodIndex._add_logical_methods_disabled()\nPeriodIndex._add_datetimelike_methods()\n\n\ndef period_range(start=None, end=None, periods=None, freq=None, name=None):\n \"\"\"\n Return a fixed frequency PeriodIndex, with day (calendar) as the default\n frequency.\n\n Parameters\n ----------\n start : string or period-like, default None\n Left bound for generating periods\n end : string or period-like, default None\n Right bound for generating periods\n periods : integer, default None\n Number of periods to generate\n freq : string or DateOffset, optional\n Frequency alias. By default the freq is taken from `start` or `end`\n if those are Period objects. Otherwise, the default is ``\"D\"`` for\n daily frequency.\n\n name : string, default None\n Name of the resulting PeriodIndex\n\n Returns\n -------\n prng : PeriodIndex\n\n Notes\n -----\n Of the three parameters: ``start``, ``end``, and ``periods``, exactly two\n must be specified.\n\n To learn more about the frequency strings, please see `this link\n <http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.\n\n Examples\n --------\n\n >>> pd.period_range(start='2017-01-01', end='2018-01-01', freq='M')\n PeriodIndex(['2017-01', '2017-02', '2017-03', '2017-04', '2017-05',\n '2017-06', '2017-06', '2017-07', '2017-08', '2017-09',\n '2017-10', '2017-11', '2017-12', '2018-01'],\n dtype='period[M]', freq='M')\n\n If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor\n endpoints for a ``PeriodIndex`` with frequency matching that of the\n ``period_range`` constructor.\n\n >>> pd.period_range(start=pd.Period('2017Q1', freq='Q'),\n ... end=pd.Period('2017Q2', freq='Q'), freq='M')\n PeriodIndex(['2017-03', '2017-04', '2017-05', '2017-06'],\n dtype='period[M]', freq='M')\n \"\"\"\n if com.count_not_none(start, end, periods) != 2:\n raise ValueError(\n \"Of the three parameters: start, end, and periods, \"\n \"exactly two must be specified\"\n )\n if freq is None and (not isinstance(start, Period) and not isinstance(end, Period)):\n freq = \"D\"\n\n data, freq = PeriodArray._generate_range(start, end, periods, freq, fields={})\n data = PeriodArray(data, freq=freq)\n return PeriodIndex(data, name=name)\n" ]
[ [ "pandas._libs.tslibs.period.Period._maybe_convert_freq", "pandas._libs.tslibs.period.Period._from_ordinal", "pandas.core.accessor.delegate_names", "pandas.core.arrays.period.PeriodArray", "pandas.core.dtypes.common.is_datetime64_any_dtype", "pandas._libs.tslibs.period.IncompatibleFrequency", "numpy.where", "pandas.core.dtypes.common.is_float_dtype", "pandas.core.ops.get_op_result_name", "pandas.core.indexes.datetimes.Int64Index.join", "pandas.core.dtypes.common.ensure_platform_int", "pandas._libs.tslibs.resolution.Resolution.get_freq_group", "numpy.empty", "pandas._libs.tslibs.resolution.get_freq_group", "pandas.core.indexes.datetimes.Index.intersection", "numpy.ndarray.__setstate__", "pandas.core.missing.isna", "pandas.core.dtypes.common.pandas_dtype", "pandas._libs.tslibs.period.Period", "pandas.core.dtypes.common.is_integer_dtype", "pandas.core.tools.datetimes.parse_time_string", "pandas.util._decorators.Appender", "pandas.util._decorators.Substitution", "pandas.core.indexes.base.ensure_index", "pandas._libs.tslibs.frequencies.get_base_alias", "numpy.array", "pandas.core.common.values_from_object", "pandas.core.indexes.datetimes.Index.get_indexer", "pandas.core.dtypes.common.is_integer", "pandas.core.common.count_not_none", "pandas.core.arrays.period.PeriodArray._generate_range", "pandas.core.algorithms.unique1d", "numpy.searchsorted", "pandas.core.arrays.period.period_array", "pandas.tseries.frequencies.to_offset", "numpy.asarray", "pandas.core.indexes.datetimes.Index", "pandas.core.indexes.datetimes.Int64Index._simple_new", "pandas.core.indexes.datetimelike.DatetimeIndexOpsMixin._convert_tolerance", "pandas.core.dtypes.common.is_float", "pandas.core.arrays.period.validate_dtype_freq", "pandas.core.dtypes.common.is_bool_dtype" ] ]
johncollinsai/post-high-frequency-data
[ "88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4", "88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4", "88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4", "88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4", "88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4" ]
[ "venv/lib/python3.8/site-packages/statsmodels/stats/rates.py", "venv/lib/python3.8/site-packages/statsmodels/genmod/families/links.py", "venv/lib/python3.8/site-packages/statsmodels/tsa/regime_switching/tests/test_markov_autoregression.py", "venv/lib/python3.8/site-packages/pandas/tests/dtypes/test_inference.py", "venv/lib/python3.8/site-packages/pandas/tests/io/excel/test_writers.py" ]
[ "'''Test for ratio of Poisson intensities in two independent samples\n\nAuthor: Josef Perktold\nLicense: BSD-3\n\n'''\n\n\nimport numpy as np\nfrom scipy import stats\n\nfrom statsmodels.stats.base import HolderTuple\nfrom statsmodels.stats.weightstats import _zstat_generic2\n\n\ndef test_poisson_2indep(count1, exposure1, count2, exposure2, ratio_null=1,\n method='score', alternative='two-sided',\n etest_kwds=None):\n '''test for ratio of two sample Poisson intensities\n\n If the two Poisson rates are g1 and g2, then the Null hypothesis is\n\n - H0: g1 / g2 = ratio_null\n\n against one of the following alternatives\n\n - H1_2-sided: g1 / g2 != ratio_null\n - H1_larger: g1 / g2 > ratio_null\n - H1_smaller: g1 / g2 < ratio_null\n\n Parameters\n ----------\n count1 : int\n Number of events in first sample.\n exposure1 : float\n Total exposure (time * subjects) in first sample.\n count2 : int\n Number of events in second sample.\n exposure2 : float\n Total exposure (time * subjects) in second sample.\n ratio: float\n ratio of the two Poisson rates under the Null hypothesis. Default is 1.\n method : string\n Method for the test statistic and the p-value. Defaults to `'score'`.\n Current Methods are based on Gu et. al 2008.\n Implemented are 'wald', 'score' and 'sqrt' based asymptotic normal\n distribution, and the exact conditional test 'exact-cond', and its\n mid-point version 'cond-midp'. method='etest' and method='etest-wald'\n provide pvalues from `etest_poisson_2indep` using score or wald\n statistic respectively.\n see Notes.\n alternative : string\n The alternative hypothesis, H1, has to be one of the following\n\n - 'two-sided': H1: ratio of rates is not equal to ratio_null (default)\n - 'larger' : H1: ratio of rates is larger than ratio_null\n - 'smaller' : H1: ratio of rates is smaller than ratio_null\n etest_kwds: dictionary\n Additional parameters to be passed to the etest_poisson_2indep\n function, namely ygrid.\n\n Returns\n -------\n results : instance of HolderTuple class\n The two main attributes are test statistic `statistic` and p-value\n `pvalue`.\n\n Notes\n -----\n - 'wald': method W1A, wald test, variance based on separate estimates\n - 'score': method W2A, score test, variance based on estimate under Null\n - 'wald-log': W3A\n - 'score-log' W4A\n - 'sqrt': W5A, based on variance stabilizing square root transformation\n - 'exact-cond': exact conditional test based on binomial distribution\n - 'cond-midp': midpoint-pvalue of exact conditional test\n - 'etest': etest with score test statistic\n - 'etest-wald': etest with wald test statistic\n\n References\n ----------\n Gu, Ng, Tang, Schucany 2008: Testing the Ratio of Two Poisson Rates,\n Biometrical Journal 50 (2008) 2, 2008\n\n See Also\n --------\n tost_poisson_2indep\n etest_poisson_2indep\n '''\n\n # shortcut names\n y1, n1, y2, n2 = count1, exposure1, count2, exposure2\n d = n2 / n1\n r = ratio_null\n r_d = r / d\n\n if method in ['score']:\n stat = (y1 - y2 * r_d) / np.sqrt((y1 + y2) * r_d)\n dist = 'normal'\n elif method in ['wald']:\n stat = (y1 - y2 * r_d) / np.sqrt(y1 + y2 * r_d**2)\n dist = 'normal'\n elif method in ['sqrt']:\n stat = 2 * (np.sqrt(y1 + 3 / 8.) - np.sqrt((y2 + 3 / 8.) * r_d))\n stat /= np.sqrt(1 + r_d)\n dist = 'normal'\n elif method in ['exact-cond', 'cond-midp']:\n from statsmodels.stats import proportion\n bp = r_d / (1 + r_d)\n y_total = y1 + y2\n stat = None\n # TODO: why y2 in here and not y1, check definition of H1 \"larger\"\n pvalue = proportion.binom_test(y1, y_total, prop=bp,\n alternative=alternative)\n if method in ['cond-midp']:\n # not inplace in case we still want binom pvalue\n pvalue = pvalue - 0.5 * stats.binom.pmf(y1, y_total, bp)\n\n dist = 'binomial'\n elif method.startswith('etest'):\n if method.endswith('wald'):\n method_etest = 'wald'\n else:\n method_etest = 'score'\n if etest_kwds is None:\n etest_kwds = {}\n\n stat, pvalue = etest_poisson_2indep(\n count1, exposure1, count2, exposure2, ratio_null=ratio_null,\n method=method_etest, alternative=alternative, **etest_kwds)\n\n dist = 'poisson'\n else:\n raise ValueError('method not recognized')\n\n if dist == 'normal':\n stat, pvalue = _zstat_generic2(stat, 1, alternative)\n\n rates = (y1 / n1, y2 / n2)\n ratio = rates[0] / rates[1]\n res = HolderTuple(statistic=stat,\n pvalue=pvalue,\n distribution=dist,\n method=method,\n alternative=alternative,\n rates=rates,\n ratio=ratio,\n ratio_null=ratio_null)\n return res\n\n\ndef etest_poisson_2indep(count1, exposure1, count2, exposure2, ratio_null=1,\n method='score', alternative='2-sided', ygrid=None):\n \"\"\"E-test for ratio of two sample Poisson rates\n\n If the two Poisson rates are g1 and g2, then the Null hypothesis is\n\n - H0: g1 / g2 = ratio_null\n\n against one of the following alternatives\n\n - H1_2-sided: g1 / g2 != ratio_null\n - H1_larger: g1 / g2 > ratio_null\n - H1_smaller: g1 / g2 < ratio_null\n\n Parameters\n ----------\n count1 : int\n Number of events in first sample\n exposure1 : float\n Total exposure (time * subjects) in first sample\n count2 : int\n Number of events in first sample\n exposure2 : float\n Total exposure (time * subjects) in first sample\n ratio : float\n ratio of the two Poisson rates under the Null hypothesis. Default is 1.\n method : {\"score\", \"wald\"}\n Method for the test statistic that defines the rejection region.\n alternative : string\n The alternative hypothesis, H1, has to be one of the following\n\n 'two-sided': H1: ratio of rates is not equal to ratio_null (default)\n 'larger' : H1: ratio of rates is larger than ratio_null\n 'smaller' : H1: ratio of rates is smaller than ratio_null\n\n ygrid : None or 1-D ndarray\n Grid values for counts of the Poisson distribution used for computing\n the pvalue. By default truncation is based on an upper tail Poisson\n quantiles.\n\n Returns\n -------\n stat_sample : float\n test statistic for the sample\n pvalue : float\n\n References\n ----------\n Gu, Ng, Tang, Schucany 2008: Testing the Ratio of Two Poisson Rates,\n Biometrical Journal 50 (2008) 2, 2008\n\n \"\"\"\n y1, n1, y2, n2 = count1, exposure1, count2, exposure2\n d = n2 / n1\n r = ratio_null\n r_d = r / d\n\n eps = 1e-20 # avoid zero division in stat_func\n\n if method in ['score']:\n def stat_func(x1, x2):\n return (x1 - x2 * r_d) / np.sqrt((x1 + x2) * r_d + eps)\n # TODO: do I need these? return_results ?\n # rate2_cmle = (y1 + y2) / n2 / (1 + r_d)\n # rate1_cmle = rate2_cmle * r\n # rate1 = rate1_cmle\n # rate2 = rate2_cmle\n elif method in ['wald']:\n def stat_func(x1, x2):\n return (x1 - x2 * r_d) / np.sqrt(x1 + x2 * r_d**2 + eps)\n # rate2_mle = y2 / n2\n # rate1_mle = y1 / n1\n # rate1 = rate1_mle\n # rate2 = rate2_mle\n else:\n raise ValueError('method not recognized')\n\n # The sampling distribution needs to be based on the null hypotheis\n # use constrained MLE from 'score' calculation\n rate2_cmle = (y1 + y2) / n2 / (1 + r_d)\n rate1_cmle = rate2_cmle * r\n rate1 = rate1_cmle\n rate2 = rate2_cmle\n mean1 = n1 * rate1\n mean2 = n2 * rate2\n\n stat_sample = stat_func(y1, y2)\n\n # The following uses a fixed truncation for evaluating the probabilities\n # It will currently only work for small counts, so that sf at truncation\n # point is small\n # We can make it depend on the amount of truncated sf.\n # Some numerical optimization or checks for large means need to be added.\n if ygrid is None:\n threshold = stats.poisson.isf(1e-13, max(mean1, mean2))\n threshold = max(threshold, 100) # keep at least 100\n y_grid = np.arange(threshold + 1)\n pdf1 = stats.poisson.pmf(y_grid, mean1)\n pdf2 = stats.poisson.pmf(y_grid, mean2)\n\n stat_space = stat_func(y_grid[:, None], y_grid[None, :]) # broadcasting\n eps = 1e-15 # correction for strict inequality check\n\n if alternative in ['two-sided', '2-sided', '2s']:\n mask = np.abs(stat_space) >= np.abs(stat_sample) - eps\n elif alternative in ['larger', 'l']:\n mask = stat_space >= stat_sample - eps\n elif alternative in ['smaller', 's']:\n mask = stat_space <= stat_sample + eps\n else:\n raise ValueError('invalid alternative')\n\n pvalue = ((pdf1[:, None] * pdf2[None, :])[mask]).sum()\n return stat_sample, pvalue\n\n\ndef tost_poisson_2indep(count1, exposure1, count2, exposure2, low, upp,\n method='score'):\n '''Equivalence test based on two one-sided `test_proportions_2indep`\n\n This assumes that we have two independent binomial samples.\n\n The Null and alternative hypothesis for equivalence testing are\n\n - H0: g1 / g2 <= low or upp <= g1 / g2\n - H1: low < g1 / g2 < upp\n\n where g1 and g2 are the Poisson rates.\n\n Parameters\n ----------\n count1 : int\n Number of events in first sample\n exposure1 : float\n Total exposure (time * subjects) in first sample\n count2 : int\n Number of events in second sample\n exposure2 : float\n Total exposure (time * subjects) in second sample\n low, upp :\n equivalence margin for the ratio of Poisson rates\n method: string\n Method for the test statistic and the p-value. Defaults to `'score'`.\n Current Methods are based on Gu et. al 2008\n Implemented are 'wald', 'score' and 'sqrt' based asymptotic normal\n distribution, and the exact conditional test 'exact-cond', and its\n mid-point version 'cond-midp', see Notes\n\n Returns\n -------\n results : instance of HolderTuple class\n The two main attributes are test statistic `statistic` and p-value\n `pvalue`.\n\n Notes\n -----\n - 'wald': method W1A, wald test, variance based on separate estimates\n - 'score': method W2A, score test, variance based on estimate under Null\n - 'wald-log': W3A not implemented\n - 'score-log' W4A not implemented\n - 'sqrt': W5A, based on variance stabilizing square root transformation\n - 'exact-cond': exact conditional test based on binomial distribution\n - 'cond-midp': midpoint-pvalue of exact conditional test\n\n The latter two are only verified for one-sided example.\n\n References\n ----------\n Gu, Ng, Tang, Schucany 2008: Testing the Ratio of Two Poisson Rates,\n Biometrical Journal 50 (2008) 2, 2008\n\n See Also\n --------\n test_poisson_2indep\n '''\n\n tt1 = test_poisson_2indep(count1, exposure1, count2, exposure2,\n ratio_null=low, method=method,\n alternative='larger')\n tt2 = test_poisson_2indep(count1, exposure1, count2, exposure2,\n ratio_null=upp, method=method,\n alternative='smaller')\n\n idx_max = 0 if tt1.pvalue < tt2.pvalue else 1\n res = HolderTuple(statistic=[tt1.statistic, tt2.statistic][idx_max],\n pvalue=[tt1.pvalue, tt2.pvalue][idx_max],\n method=method,\n results_larger=tt1,\n results_smaller=tt2,\n title=\"Equivalence test for 2 independent Poisson rates\"\n )\n\n return res\n", "'''\nDefines the link functions to be used with GLM and GEE families.\n'''\n\nimport numpy as np\nimport scipy.stats\nFLOAT_EPS = np.finfo(float).eps\n\n\nclass Link(object):\n \"\"\"\n A generic link function for one-parameter exponential family.\n\n `Link` does nothing, but lays out the methods expected of any subclass.\n \"\"\"\n\n def __call__(self, p):\n \"\"\"\n Return the value of the link function. This is just a placeholder.\n\n Parameters\n ----------\n p : array_like\n Probabilities\n\n Returns\n -------\n g(p) : array_like\n The value of the link function g(p) = z\n \"\"\"\n return NotImplementedError\n\n def inverse(self, z):\n \"\"\"\n Inverse of the link function. Just a placeholder.\n\n Parameters\n ----------\n z : array_like\n `z` is usually the linear predictor of the transformed variable\n in the IRLS algorithm for GLM.\n\n Returns\n -------\n g^(-1)(z) : ndarray\n The value of the inverse of the link function g^(-1)(z) = p\n \"\"\"\n return NotImplementedError\n\n def deriv(self, p):\n \"\"\"\n Derivative of the link function g'(p). Just a placeholder.\n\n Parameters\n ----------\n p : array_like\n\n Returns\n -------\n g'(p) : ndarray\n The value of the derivative of the link function g'(p)\n \"\"\"\n return NotImplementedError\n\n def deriv2(self, p):\n \"\"\"Second derivative of the link function g''(p)\n\n implemented through numerical differentiation\n \"\"\"\n from statsmodels.tools.numdiff import _approx_fprime_cs_scalar\n return _approx_fprime_cs_scalar(p, self.deriv)\n\n def inverse_deriv(self, z):\n \"\"\"\n Derivative of the inverse link function g^(-1)(z).\n\n Parameters\n ----------\n z : array_like\n `z` is usually the linear predictor for a GLM or GEE model.\n\n Returns\n -------\n g'^(-1)(z) : ndarray\n The value of the derivative of the inverse of the link function\n\n Notes\n -----\n This reference implementation gives the correct result but is\n inefficient, so it can be overridden in subclasses.\n \"\"\"\n return 1 / self.deriv(self.inverse(z))\n\n def inverse_deriv2(self, z):\n \"\"\"\n Second derivative of the inverse link function g^(-1)(z).\n\n Parameters\n ----------\n z : array_like\n `z` is usually the linear predictor for a GLM or GEE model.\n\n Returns\n -------\n g'^(-1)(z) : ndarray\n The value of the second derivative of the inverse of the link\n function\n\n Notes\n -----\n This reference implementation gives the correct result but is\n inefficient, so it can be overridden in subclasses.\n \"\"\"\n iz = self.inverse(z)\n return -self.deriv2(iz) / self.deriv(iz)**3\n\n\nclass Logit(Link):\n \"\"\"\n The logit transform\n\n Notes\n -----\n call and derivative use a private method _clean to make trim p by\n machine epsilon so that p is in (0,1)\n\n Alias of Logit:\n logit = Logit()\n \"\"\"\n\n def _clean(self, p):\n \"\"\"\n Clip logistic values to range (eps, 1-eps)\n\n Parameters\n ----------\n p : array_like\n Probabilities\n\n Returns\n -------\n pclip : ndarray\n Clipped probabilities\n \"\"\"\n return np.clip(p, FLOAT_EPS, 1. - FLOAT_EPS)\n\n def __call__(self, p):\n \"\"\"\n The logit transform\n\n Parameters\n ----------\n p : array_like\n Probabilities\n\n Returns\n -------\n z : ndarray\n Logit transform of `p`\n\n Notes\n -----\n g(p) = log(p / (1 - p))\n \"\"\"\n p = self._clean(p)\n return np.log(p / (1. - p))\n\n def inverse(self, z):\n \"\"\"\n Inverse of the logit transform\n\n Parameters\n ----------\n z : array_like\n The value of the logit transform at `p`\n\n Returns\n -------\n p : ndarray\n Probabilities\n\n Notes\n -----\n g^(-1)(z) = exp(z)/(1+exp(z))\n \"\"\"\n z = np.asarray(z)\n t = np.exp(-z)\n return 1. / (1. + t)\n\n def deriv(self, p):\n \"\"\"\n Derivative of the logit transform\n\n Parameters\n ----------\n p : array_like\n Probabilities\n\n Returns\n -------\n g'(p) : ndarray\n Value of the derivative of logit transform at `p`\n\n Notes\n -----\n g'(p) = 1 / (p * (1 - p))\n\n Alias for `Logit`:\n logit = Logit()\n \"\"\"\n p = self._clean(p)\n return 1. / (p * (1 - p))\n\n def inverse_deriv(self, z):\n \"\"\"\n Derivative of the inverse of the logit transform\n\n Parameters\n ----------\n z : array_like\n `z` is usually the linear predictor for a GLM or GEE model.\n\n Returns\n -------\n g'^(-1)(z) : ndarray\n The value of the derivative of the inverse of the logit function\n \"\"\"\n t = np.exp(z)\n return t/(1 + t)**2\n\n def deriv2(self, p):\n \"\"\"\n Second derivative of the logit function.\n\n Parameters\n ----------\n p : array_like\n probabilities\n\n Returns\n -------\n g''(z) : ndarray\n The value of the second derivative of the logit function\n \"\"\"\n v = p * (1 - p)\n return (2*p - 1) / v**2\n\n\nclass logit(Logit):\n pass\n\n\nclass Power(Link):\n \"\"\"\n The power transform\n\n Parameters\n ----------\n power : float\n The exponent of the power transform\n\n Notes\n -----\n Aliases of Power:\n inverse = Power(power=-1)\n sqrt = Power(power=.5)\n inverse_squared = Power(power=-2.)\n identity = Power(power=1.)\n \"\"\"\n\n def __init__(self, power=1.):\n self.power = power\n\n def __call__(self, p):\n \"\"\"\n Power transform link function\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n z : array_like\n Power transform of x\n\n Notes\n -----\n g(p) = x**self.power\n \"\"\"\n if self.power == 1:\n return p\n else:\n return np.power(p, self.power)\n\n def inverse(self, z):\n \"\"\"\n Inverse of the power transform link function\n\n Parameters\n ----------\n `z` : array_like\n Value of the transformed mean parameters at `p`\n\n Returns\n -------\n `p` : ndarray\n Mean parameters\n\n Notes\n -----\n g^(-1)(z`) = `z`**(1/`power`)\n \"\"\"\n if self.power == 1:\n return z\n else:\n return np.power(z, 1. / self.power)\n\n def deriv(self, p):\n \"\"\"\n Derivative of the power transform\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n g'(p) : ndarray\n Derivative of power transform of `p`\n\n Notes\n -----\n g'(`p`) = `power` * `p`**(`power` - 1)\n \"\"\"\n if self.power == 1:\n return np.ones_like(p)\n else:\n return self.power * np.power(p, self.power - 1)\n\n def deriv2(self, p):\n \"\"\"\n Second derivative of the power transform\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n g''(p) : ndarray\n Second derivative of the power transform of `p`\n\n Notes\n -----\n g''(`p`) = `power` * (`power` - 1) * `p`**(`power` - 2)\n \"\"\"\n if self.power == 1:\n return np.zeros_like(p)\n else:\n return self.power * (self.power - 1) * np.power(p, self.power - 2)\n\n def inverse_deriv(self, z):\n \"\"\"\n Derivative of the inverse of the power transform\n\n Parameters\n ----------\n z : array_like\n `z` is usually the linear predictor for a GLM or GEE model.\n\n Returns\n -------\n g^(-1)'(z) : ndarray\n The value of the derivative of the inverse of the power transform\n function\n \"\"\"\n if self.power == 1:\n return np.ones_like(z)\n else:\n return np.power(z, (1 - self.power)/self.power) / self.power\n\n def inverse_deriv2(self, z):\n \"\"\"\n Second derivative of the inverse of the power transform\n\n Parameters\n ----------\n z : array_like\n `z` is usually the linear predictor for a GLM or GEE model.\n\n Returns\n -------\n g^(-1)'(z) : ndarray\n The value of the derivative of the inverse of the power transform\n function\n \"\"\"\n if self.power == 1:\n return np.zeros_like(z)\n else:\n return ((1 - self.power) *\n np.power(z, (1 - 2*self.power)/self.power) / self.power**2)\n\n\nclass inverse_power(Power):\n \"\"\"\n The inverse transform\n\n Notes\n -----\n g(p) = 1/p\n\n Alias of statsmodels.family.links.Power(power=-1.)\n \"\"\"\n def __init__(self):\n super(inverse_power, self).__init__(power=-1.)\n\n\nclass sqrt(Power):\n \"\"\"\n The square-root transform\n\n Notes\n -----\n g(`p`) = sqrt(`p`)\n\n Alias of statsmodels.family.links.Power(power=.5)\n \"\"\"\n def __init__(self):\n super(sqrt, self).__init__(power=.5)\n\n\nclass inverse_squared(Power):\n r\"\"\"\n The inverse squared transform\n\n Notes\n -----\n g(`p`) = 1/(`p`\\*\\*2)\n\n Alias of statsmodels.family.links.Power(power=2.)\n \"\"\"\n def __init__(self):\n super(inverse_squared, self).__init__(power=-2.)\n\n\nclass identity(Power):\n \"\"\"\n The identity transform\n\n Notes\n -----\n g(`p`) = `p`\n\n Alias of statsmodels.family.links.Power(power=1.)\n \"\"\"\n def __init__(self):\n super(identity, self).__init__(power=1.)\n\n\nclass Log(Link):\n \"\"\"\n The log transform\n\n Notes\n -----\n call and derivative call a private method _clean to trim the data by\n machine epsilon so that p is in (0,1). log is an alias of Log.\n \"\"\"\n\n def _clean(self, x):\n return np.clip(x, FLOAT_EPS, np.inf)\n\n def __call__(self, p, **extra):\n \"\"\"\n Log transform link function\n\n Parameters\n ----------\n x : array_like\n Mean parameters\n\n Returns\n -------\n z : ndarray\n log(x)\n\n Notes\n -----\n g(p) = log(p)\n \"\"\"\n x = self._clean(p)\n return np.log(x)\n\n def inverse(self, z):\n \"\"\"\n Inverse of log transform link function\n\n Parameters\n ----------\n z : ndarray\n The inverse of the link function at `p`\n\n Returns\n -------\n p : ndarray\n The mean probabilities given the value of the inverse `z`\n\n Notes\n -----\n g^{-1}(z) = exp(z)\n \"\"\"\n return np.exp(z)\n\n def deriv(self, p):\n \"\"\"\n Derivative of log transform link function\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n g'(p) : ndarray\n derivative of log transform of x\n\n Notes\n -----\n g'(x) = 1/x\n \"\"\"\n p = self._clean(p)\n return 1. / p\n\n def deriv2(self, p):\n \"\"\"\n Second derivative of the log transform link function\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n g''(p) : ndarray\n Second derivative of log transform of x\n\n Notes\n -----\n g''(x) = -1/x^2\n \"\"\"\n p = self._clean(p)\n return -1. / p**2\n\n def inverse_deriv(self, z):\n \"\"\"\n Derivative of the inverse of the log transform link function\n\n Parameters\n ----------\n z : ndarray\n The inverse of the link function at `p`\n\n Returns\n -------\n g^(-1)'(z) : ndarray\n The value of the derivative of the inverse of the log function,\n the exponential function\n \"\"\"\n return np.exp(z)\n\n\nclass log(Log):\n \"\"\"\n The log transform\n\n Notes\n -----\n log is a an alias of Log.\n \"\"\"\n pass\n\n\n# TODO: the CDFLink is untested\nclass CDFLink(Logit):\n \"\"\"\n The use the CDF of a scipy.stats distribution\n\n CDFLink is a subclass of logit in order to use its _clean method\n for the link and its derivative.\n\n Parameters\n ----------\n dbn : scipy.stats distribution\n Default is dbn=scipy.stats.norm\n\n Notes\n -----\n The CDF link is untested.\n \"\"\"\n\n def __init__(self, dbn=scipy.stats.norm):\n self.dbn = dbn\n\n def __call__(self, p):\n \"\"\"\n CDF link function\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n z : ndarray\n (ppf) inverse of CDF transform of p\n\n Notes\n -----\n g(`p`) = `dbn`.ppf(`p`)\n \"\"\"\n p = self._clean(p)\n return self.dbn.ppf(p)\n\n def inverse(self, z):\n \"\"\"\n The inverse of the CDF link\n\n Parameters\n ----------\n z : array_like\n The value of the inverse of the link function at `p`\n\n Returns\n -------\n p : ndarray\n Mean probabilities. The value of the inverse of CDF link of `z`\n\n Notes\n -----\n g^(-1)(`z`) = `dbn`.cdf(`z`)\n \"\"\"\n return self.dbn.cdf(z)\n\n def deriv(self, p):\n \"\"\"\n Derivative of CDF link\n\n Parameters\n ----------\n p : array_like\n mean parameters\n\n Returns\n -------\n g'(p) : ndarray\n The derivative of CDF transform at `p`\n\n Notes\n -----\n g'(`p`) = 1./ `dbn`.pdf(`dbn`.ppf(`p`))\n \"\"\"\n p = self._clean(p)\n return 1. / self.dbn.pdf(self.dbn.ppf(p))\n\n def deriv2(self, p):\n \"\"\"\n Second derivative of the link function g''(p)\n\n implemented through numerical differentiation\n \"\"\"\n p = self._clean(p)\n linpred = self.dbn.ppf(p)\n return - self.inverse_deriv2(linpred) / self.dbn.pdf(linpred)**3\n\n def deriv2_numdiff(self, p):\n \"\"\"\n Second derivative of the link function g''(p)\n\n implemented through numerical differentiation\n \"\"\"\n from statsmodels.tools.numdiff import _approx_fprime_scalar\n p = np.atleast_1d(p)\n # Note: special function for norm.ppf does not support complex\n return _approx_fprime_scalar(p, self.deriv, centered=True)\n\n def inverse_deriv(self, z):\n \"\"\"\n Derivative of the inverse link function\n\n Parameters\n ----------\n z : ndarray\n The inverse of the link function at `p`\n\n Returns\n -------\n g^(-1)'(z) : ndarray\n The value of the derivative of the inverse of the logit function.\n This is just the pdf in a CDFLink,\n \"\"\"\n return self.dbn.pdf(z)\n\n def inverse_deriv2(self, z):\n \"\"\"\n Second derivative of the inverse link function g^(-1)(z).\n\n Parameters\n ----------\n z : array_like\n `z` is usually the linear predictor for a GLM or GEE model.\n\n Returns\n -------\n g^(-1)''(z) : ndarray\n The value of the second derivative of the inverse of the link\n function\n\n Notes\n -----\n This method should be overwritten by subclasses.\n\n The inherited method is implemented through numerical differentiation.\n \"\"\"\n from statsmodels.tools.numdiff import _approx_fprime_scalar\n z = np.atleast_1d(z)\n\n # Note: special function for norm.ppf does not support complex\n return _approx_fprime_scalar(z, self.inverse_deriv, centered=True)\n\n\nclass probit(CDFLink):\n \"\"\"\n The probit (standard normal CDF) transform\n\n Notes\n -----\n g(p) = scipy.stats.norm.ppf(p)\n\n probit is an alias of CDFLink.\n \"\"\"\n\n def inverse_deriv2(self, z):\n \"\"\"\n Second derivative of the inverse link function\n\n This is the derivative of the pdf in a CDFLink\n\n \"\"\"\n return - z * self.dbn.pdf(z)\n\n def deriv2(self, p):\n \"\"\"\n Second derivative of the link function g''(p)\n\n \"\"\"\n p = self._clean(p)\n linpred = self.dbn.ppf(p)\n return linpred / self.dbn.pdf(linpred)**2\n\n\nclass cauchy(CDFLink):\n \"\"\"\n The Cauchy (standard Cauchy CDF) transform\n\n Notes\n -----\n g(p) = scipy.stats.cauchy.ppf(p)\n\n cauchy is an alias of CDFLink with dbn=scipy.stats.cauchy\n \"\"\"\n\n def __init__(self):\n super(cauchy, self).__init__(dbn=scipy.stats.cauchy)\n\n def deriv2(self, p):\n \"\"\"\n Second derivative of the Cauchy link function.\n\n Parameters\n ----------\n p : array_like\n Probabilities\n\n Returns\n -------\n g''(p) : ndarray\n Value of the second derivative of Cauchy link function at `p`\n \"\"\"\n p = self._clean(p)\n a = np.pi * (p - 0.5)\n d2 = 2 * np.pi**2 * np.sin(a) / np.cos(a)**3\n return d2\n\n def inverse_deriv2(self, z):\n\n return - 2 * z / (np.pi * (z**2 + 1)**2)\n\n\nclass CLogLog(Logit):\n \"\"\"\n The complementary log-log transform\n\n CLogLog inherits from Logit in order to have access to its _clean method\n for the link and its derivative.\n\n Notes\n -----\n CLogLog is untested.\n \"\"\"\n def __call__(self, p):\n \"\"\"\n C-Log-Log transform link function\n\n Parameters\n ----------\n p : ndarray\n Mean parameters\n\n Returns\n -------\n z : ndarray\n The CLogLog transform of `p`\n\n Notes\n -----\n g(p) = log(-log(1-p))\n \"\"\"\n p = self._clean(p)\n return np.log(-np.log(1 - p))\n\n def inverse(self, z):\n \"\"\"\n Inverse of C-Log-Log transform link function\n\n\n Parameters\n ----------\n z : array_like\n The value of the inverse of the CLogLog link function at `p`\n\n Returns\n -------\n p : ndarray\n Mean parameters\n\n Notes\n -----\n g^(-1)(`z`) = 1-exp(-exp(`z`))\n \"\"\"\n return 1 - np.exp(-np.exp(z))\n\n def deriv(self, p):\n \"\"\"\n Derivative of C-Log-Log transform link function\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n g'(p) : ndarray\n The derivative of the CLogLog transform link function\n\n Notes\n -----\n g'(p) = - 1 / ((p-1)*log(1-p))\n \"\"\"\n p = self._clean(p)\n return 1. / ((p - 1) * (np.log(1 - p)))\n\n def deriv2(self, p):\n \"\"\"\n Second derivative of the C-Log-Log ink function\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n g''(p) : ndarray\n The second derivative of the CLogLog link function\n \"\"\"\n p = self._clean(p)\n fl = np.log(1 - p)\n d2 = -1 / ((1 - p)**2 * fl)\n d2 *= 1 + 1 / fl\n return d2\n\n def inverse_deriv(self, z):\n \"\"\"\n Derivative of the inverse of the C-Log-Log transform link function\n\n Parameters\n ----------\n z : array_like\n The value of the inverse of the CLogLog link function at `p`\n\n Returns\n -------\n g^(-1)'(z) : ndarray\n The derivative of the inverse of the CLogLog link function\n \"\"\"\n return np.exp(z - np.exp(z))\n\n\nclass cloglog(CLogLog):\n \"\"\"\n The CLogLog transform link function.\n\n Notes\n -----\n g(`p`) = log(-log(1-`p`))\n\n cloglog is an alias for CLogLog\n cloglog = CLogLog()\n \"\"\"\n pass\n\n\nclass LogLog(Logit):\n \"\"\"\n The log-log transform\n\n LogLog inherits from Logit in order to have access to its _clean method\n for the link and its derivative.\n \"\"\"\n def __call__(self, p):\n \"\"\"\n Log-Log transform link function\n\n Parameters\n ----------\n p : ndarray\n Mean parameters\n\n Returns\n -------\n z : ndarray\n The LogLog transform of `p`\n\n Notes\n -----\n g(p) = -log(-log(p))\n \"\"\"\n p = self._clean(p)\n return -np.log(-np.log(p))\n\n def inverse(self, z):\n \"\"\"\n Inverse of Log-Log transform link function\n\n\n Parameters\n ----------\n z : array_like\n The value of the inverse of the LogLog link function at `p`\n\n Returns\n -------\n p : ndarray\n Mean parameters\n\n Notes\n -----\n g^(-1)(`z`) = exp(-exp(-`z`))\n \"\"\"\n return np.exp(-np.exp(-z))\n\n def deriv(self, p):\n \"\"\"\n Derivative of Log-Log transform link function\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n g'(p) : ndarray\n The derivative of the LogLog transform link function\n\n Notes\n -----\n g'(p) = - 1 /(p * log(p))\n \"\"\"\n p = self._clean(p)\n return -1. / (p * (np.log(p)))\n\n def deriv2(self, p):\n \"\"\"\n Second derivative of the Log-Log link function\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n g''(p) : ndarray\n The second derivative of the LogLog link function\n \"\"\"\n p = self._clean(p)\n d2 = (1 + np.log(p)) / (p * (np.log(p)))**2\n return d2\n\n def inverse_deriv(self, z):\n \"\"\"\n Derivative of the inverse of the Log-Log transform link function\n\n Parameters\n ----------\n z : array_like\n The value of the inverse of the LogLog link function at `p`\n\n Returns\n -------\n g^(-1)'(z) : ndarray\n The derivative of the inverse of the LogLog link function\n \"\"\"\n return np.exp(-np.exp(-z) - z)\n\n def inverse_deriv2(self, z):\n \"\"\"\n Second derivative of the inverse of the Log-Log transform link function\n\n Parameters\n ----------\n z : array_like\n The value of the inverse of the LogLog link function at `p`\n\n Returns\n -------\n g^(-1)''(z) : ndarray\n The second derivative of the inverse of the LogLog link function\n \"\"\"\n return self.inverse_deriv(z) * (np.exp(-z) - 1)\n\n\nclass loglog(LogLog):\n \"\"\"\n The LogLog transform link function.\n\n Notes\n -----\n g(`p`) = -log(-log(`p`))\n\n loglog is an alias for LogLog\n loglog = LogLog()\n \"\"\"\n pass\n\n\nclass NegativeBinomial(Link):\n '''\n The negative binomial link function\n\n Parameters\n ----------\n alpha : float, optional\n Alpha is the ancillary parameter of the Negative Binomial link\n function. It is assumed to be nonstochastic. The default value is 1.\n Permissible values are usually assumed to be in (.01, 2).\n '''\n\n def __init__(self, alpha=1.):\n self.alpha = alpha\n\n def _clean(self, x):\n return np.clip(x, FLOAT_EPS, np.inf)\n\n def __call__(self, p):\n '''\n Negative Binomial transform link function\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n z : ndarray\n The negative binomial transform of `p`\n\n Notes\n -----\n g(p) = log(p/(p + 1/alpha))\n '''\n p = self._clean(p)\n return np.log(p/(p + 1/self.alpha))\n\n def inverse(self, z):\n '''\n Inverse of the negative binomial transform\n\n Parameters\n ----------\n z : array_like\n The value of the inverse of the negative binomial link at `p`.\n\n Returns\n -------\n p : ndarray\n Mean parameters\n\n Notes\n -----\n g^(-1)(z) = exp(z)/(alpha*(1-exp(z)))\n '''\n return -1/(self.alpha * (1 - np.exp(-z)))\n\n def deriv(self, p):\n '''\n Derivative of the negative binomial transform\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n g'(p) : ndarray\n The derivative of the negative binomial transform link function\n\n Notes\n -----\n g'(x) = 1/(x+alpha*x^2)\n '''\n return 1/(p + self.alpha * p**2)\n\n def deriv2(self, p):\n '''\n Second derivative of the negative binomial link function.\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n g''(p) : ndarray\n The second derivative of the negative binomial transform link\n function\n\n Notes\n -----\n g''(x) = -(1+2*alpha*x)/(x+alpha*x^2)^2\n '''\n numer = -(1 + 2 * self.alpha * p)\n denom = (p + self.alpha * p**2)**2\n return numer / denom\n\n def inverse_deriv(self, z):\n '''\n Derivative of the inverse of the negative binomial transform\n\n Parameters\n ----------\n z : array_like\n Usually the linear predictor for a GLM or GEE model\n\n Returns\n -------\n g^(-1)'(z) : ndarray\n The value of the derivative of the inverse of the negative\n binomial link\n '''\n t = np.exp(z)\n return t / (self.alpha * (1-t)**2)\n\n\nclass nbinom(NegativeBinomial):\n \"\"\"\n The negative binomial link function.\n\n Notes\n -----\n g(p) = log(p/(p + 1/alpha))\n\n nbinom is an alias of NegativeBinomial.\n nbinom = NegativeBinomial(alpha=1.)\n \"\"\"\n pass\n", "\"\"\"\nTests for Markov Autoregression models\n\nAuthor: Chad Fulton\nLicense: BSD-3\n\"\"\"\n\nimport warnings\nimport os\n\nimport numpy as np\nfrom numpy.testing import assert_equal, assert_allclose\nimport pandas as pd\nimport pytest\n\nfrom statsmodels.tools import add_constant\nfrom statsmodels.tsa.regime_switching import markov_autoregression\n\ncurrent_path = os.path.dirname(os.path.abspath(__file__))\n\n\nrgnp = [2.59316421, 2.20217133, 0.45827562, 0.9687438,\n -0.24130757, 0.89647478, 2.05393219, 1.73353648,\n 0.93871289, -0.46477833, -0.80983406, -1.39763689,\n -0.39886093, 1.1918416, 1.45620048, 2.11808228,\n 1.08957863, 1.32390273, 0.87296367, -0.19773273,\n 0.45420215, 0.07221876, 1.1030364, 0.82097489,\n -0.05795795, 0.58447772, -1.56192672, -2.05041027,\n 0.53637183, 2.33676839, 2.34014559, 1.2339263,\n 1.8869648, -0.45920792, 0.84940469, 1.70139849,\n -0.28756312, 0.09594627, -0.86080289, 1.03447127,\n 1.23685944, 1.42004502, 2.22410631, 1.30210173,\n 1.03517699, 0.9253425, -0.16559951, 1.3444382,\n 1.37500131, 1.73222184, 0.71605635, 2.21032143,\n 0.85333031, 1.00238776, 0.42725441, 2.14368343,\n 1.43789184, 1.57959926, 2.27469826, 1.95962656,\n 0.25992399, 1.01946914, 0.49016398, 0.5636338,\n 0.5959546, 1.43082857, 0.56230122, 1.15388393,\n 1.68722844, 0.77438205, -0.09647045, 1.39600146,\n 0.13646798, 0.55223715, -0.39944872, -0.61671102,\n -0.08722561, 1.2101835, -0.90729755, 2.64916158,\n -0.0080694, 0.51111895, -0.00401437, 2.16821432,\n 1.92586732, 1.03504717, 1.85897219, 2.32004929,\n 0.25570789, -0.09855274, 0.89073682, -0.55896485,\n 0.28350255, -1.31155407, -0.88278776, -1.97454941,\n 1.01275265, 1.68264723, 1.38271284, 1.86073637,\n 0.4447377, 0.41449001, 0.99202275, 1.36283576,\n 1.59970522, 1.98845816, -0.25684232, 0.87786949,\n 3.1095655, 0.85324478, 1.23337317, 0.00314302,\n -0.09433369, 0.89883322, -0.19036628, 0.99772376,\n -2.39120054, 0.06649673, 1.26136017, 1.91637838,\n -0.3348029, 0.44207108, -1.40664911, -1.52129889,\n 0.29919869, -0.80197448, 0.15204792, 0.98585027,\n 2.13034606, 1.34397924, 1.61550522, 2.70930099,\n 1.24461412, 0.50835466, 0.14802167]\n\nrec = [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,\n 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]\n\n\ndef test_predict():\n # AR(1) without mean, k_regimes=2\n endog = np.ones(10)\n with pytest.warns(FutureWarning):\n markov_autoregression.MarkovAutoregression(\n endog,\n k_regimes=2,\n order=1,\n trend='nc'\n )\n mod = markov_autoregression.MarkovAutoregression(\n endog, k_regimes=2, order=1, trend='n')\n assert_equal(mod.nobs, 9)\n assert_equal(mod.endog, np.ones(9))\n\n params = np.r_[0.5, 0.5, 1., 0.1, 0.5]\n mod_resid = mod._resid(params)\n resids = np.zeros((2, 2, mod.nobs))\n # Resids when: S_{t} = 0\n resids[0, :, :] = np.ones(9) - 0.1 * np.ones(9)\n assert_allclose(mod_resid[0, :, :], resids[0, :, :])\n # Resids when: S_{t} = 1\n resids[1, :, :] = np.ones(9) - 0.5 * np.ones(9)\n assert_allclose(mod_resid[1, :, :], resids[1, :, :])\n\n # AR(1) with mean, k_regimes=2\n endog = np.arange(10)\n mod = markov_autoregression.MarkovAutoregression(\n endog, k_regimes=2, order=1)\n assert_equal(mod.nobs, 9)\n assert_equal(mod.endog, np.arange(1, 10))\n\n params = np.r_[0.5, 0.5, 2., 3., 1., 0.1, 0.5]\n mod_resid = mod._resid(params)\n resids = np.zeros((2, 2, mod.nobs))\n # Resids when: S_t = 0, S_{t-1} = 0\n resids[0, 0, :] = (np.arange(1, 10) - 2.) - 0.1 * (np.arange(9) - 2.)\n assert_allclose(mod_resid[0, 0, :], resids[0, 0, :])\n # Resids when: S_t = 0, S_{t-1} = 1\n resids[0, 1, :] = (np.arange(1, 10) - 2.) - 0.1 * (np.arange(9) - 3.)\n assert_allclose(mod_resid[0, 1, :], resids[0, 1, :])\n # Resids when: S_t = 1, S_{t-1} = 0\n resids[1, 0, :] = (np.arange(1, 10) - 3.) - 0.5 * (np.arange(9) - 2.)\n assert_allclose(mod_resid[1, 0, :], resids[1, 0, :])\n # Resids when: S_t = 1, S_{t-1} = 1\n resids[1, 1, :] = (np.arange(1, 10) - 3.) - 0.5 * (np.arange(9) - 3.)\n assert_allclose(mod_resid[1, 1, :], resids[1, 1, :])\n\n # AR(2) with mean, k_regimes=3\n endog = np.arange(10)\n mod = markov_autoregression.MarkovAutoregression(\n endog, k_regimes=3, order=2)\n assert_equal(mod.nobs, 8)\n assert_equal(mod.endog, np.arange(2, 10))\n\n params = np.r_[[0.3] * 6, 2., 3., 4, 1., 0.1, 0.5, 0.8, -0.05, -0.25, -0.4]\n mod_resid = mod._resid(params)\n resids = np.zeros((3, 3, 3, mod.nobs))\n # Resids when: S_t = 0, S_{t-1} = 0, S_{t-2} = 0\n resids[0, 0, 0, :] = (\n (np.arange(2, 10) - 2.) -\n 0.1 * (np.arange(1, 9) - 2.) -\n (-0.05) * (np.arange(8) - 2.))\n assert_allclose(mod_resid[0, 0, 0, :], resids[0, 0, 0, :])\n\n # Resids when: S_t = 1, S_{t-1} = 0, S_{t-2} = 0\n resids[1, 0, 0, :] = (\n (np.arange(2, 10) - 3.) -\n 0.5 * (np.arange(1, 9) - 2.) -\n (-0.25) * (np.arange(8) - 2.))\n assert_allclose(mod_resid[1, 0, 0, :], resids[1, 0, 0, :])\n\n # Resids when: S_t = 0, S_{t-1} = 2, S_{t-2} = 1\n resids[0, 2, 1, :] = (\n (np.arange(2, 10) - 2.) -\n 0.1 * (np.arange(1, 9) - 4.) -\n (-0.05) * (np.arange(8) - 3.))\n assert_allclose(mod_resid[0, 2, 1, :], resids[0, 2, 1, :])\n\n # AR(1) with mean + non-switching exog\n endog = np.arange(10)\n exog = np.r_[0.4, 5, 0.2, 1.2, -0.3, 2.5, 0.2, -0.7, 2., -1.1]\n mod = markov_autoregression.MarkovAutoregression(\n endog, k_regimes=2, order=1, exog=exog)\n assert_equal(mod.nobs, 9)\n assert_equal(mod.endog, np.arange(1, 10))\n\n params = np.r_[0.5, 0.5, 2., 3., 1.5, 1., 0.1, 0.5]\n mod_resid = mod._resid(params)\n resids = np.zeros((2, 2, mod.nobs))\n # Resids when: S_t = 0, S_{t-1} = 0\n resids[0, 0, :] = (\n (np.arange(1, 10) - 2. - 1.5 * exog[1:]) -\n 0.1 * (np.arange(9) - 2. - 1.5 * exog[:-1]))\n assert_allclose(mod_resid[0, 0, :], resids[0, 0, :])\n # Resids when: S_t = 0, S_{t-1} = 1\n resids[0, 1, :] = (\n (np.arange(1, 10) - 2. - 1.5 * exog[1:]) -\n 0.1 * (np.arange(9) - 3. - 1.5 * exog[:-1]))\n assert_allclose(mod_resid[0, 1, :], resids[0, 1, :])\n # Resids when: S_t = 1, S_{t-1} = 0\n resids[1, 0, :] = (\n (np.arange(1, 10) - 3. - 1.5 * exog[1:]) -\n 0.5 * (np.arange(9) - 2. - 1.5 * exog[:-1]))\n assert_allclose(mod_resid[1, 0, :], resids[1, 0, :])\n # Resids when: S_t = 1, S_{t-1} = 1\n resids[1, 1, :] = (\n (np.arange(1, 10) - 3. - 1.5 * exog[1:]) -\n 0.5 * (np.arange(9) - 3. - 1.5 * exog[:-1]))\n assert_allclose(mod_resid[1, 1, :], resids[1, 1, :])\n\n\ndef test_conditional_loglikelihoods():\n # AR(1) without mean, k_regimes=2, non-switching variance\n endog = np.ones(10)\n mod = markov_autoregression.MarkovAutoregression(\n endog, k_regimes=2, order=1)\n assert_equal(mod.nobs, 9)\n assert_equal(mod.endog, np.ones(9))\n\n params = np.r_[0.5, 0.5, 2., 3., 2., 0.1, 0.5]\n resid = mod._resid(params)\n conditional_likelihoods = (\n np.exp(-0.5 * resid**2 / 2) / np.sqrt(2 * np.pi * 2))\n assert_allclose(mod._conditional_loglikelihoods(params),\n np.log(conditional_likelihoods))\n\n # AR(1) without mean, k_regimes=3, switching variance\n endog = np.ones(10)\n mod = markov_autoregression.MarkovAutoregression(\n endog, k_regimes=3, order=1, switching_variance=True)\n assert_equal(mod.nobs, 9)\n assert_equal(mod.endog, np.ones(9))\n\n params = np.r_[[0.3]*6, 2., 3., 4., 1.5, 3., 4.5, 0.1, 0.5, 0.8]\n mod_conditional_loglikelihoods = mod._conditional_loglikelihoods(params)\n conditional_likelihoods = mod._resid(params)\n\n # S_t = 0\n conditional_likelihoods[0, :, :] = (\n np.exp(-0.5 * conditional_likelihoods[0, :, :]**2 / 1.5) /\n np.sqrt(2 * np.pi * 1.5))\n assert_allclose(mod_conditional_loglikelihoods[0, :, :],\n np.log(conditional_likelihoods[0, :, :]))\n # S_t = 1\n conditional_likelihoods[1, :, :] = (\n np.exp(-0.5 * conditional_likelihoods[1, :, :]**2 / 3.) /\n np.sqrt(2 * np.pi * 3.))\n assert_allclose(mod_conditional_loglikelihoods[1, :, :],\n np.log(conditional_likelihoods[1, :, :]))\n # S_t = 2\n conditional_likelihoods[2, :, :] = (\n np.exp(-0.5 * conditional_likelihoods[2, :, :]**2 / 4.5) /\n np.sqrt(2 * np.pi * 4.5))\n assert_allclose(mod_conditional_loglikelihoods[2, :, :],\n np.log(conditional_likelihoods[2, :, :]))\n\n\nclass MarkovAutoregression(object):\n @classmethod\n def setup_class(cls, true, endog, atol=1e-5, rtol=1e-7, **kwargs):\n cls.model = markov_autoregression.MarkovAutoregression(endog, **kwargs)\n cls.true = true\n cls.result = cls.model.smooth(cls.true['params'])\n cls.atol = atol\n cls.rtol = rtol\n\n def test_llf(self):\n assert_allclose(self.result.llf, self.true['llf'], atol=self.atol,\n rtol=self.rtol)\n\n def test_fit(self, **kwargs):\n # Test fitting against Stata\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n res = self.model.fit(disp=False, **kwargs)\n assert_allclose(res.llf, self.true['llf_fit'], atol=self.atol,\n rtol=self.rtol)\n\n @pytest.mark.smoke\n def test_fit_em(self, **kwargs):\n # Test EM fitting (smoke test)\n res_em = self.model._fit_em(**kwargs)\n assert_allclose(res_em.llf, self.true['llf_fit_em'], atol=self.atol,\n rtol=self.rtol)\n\n\nhamilton_ar2_short_filtered_joint_probabilities = np.array([\n [[[4.99506987e-02, 6.44048275e-04, 6.22227140e-05,\n 4.45756755e-06, 5.26645567e-07, 7.99846146e-07,\n 1.19425705e-05, 6.87762063e-03],\n [1.95930395e-02, 3.25884335e-04, 1.12955091e-04,\n 3.38537103e-04, 9.81927968e-06, 2.71696750e-05,\n 5.83828290e-03, 7.64261509e-02]],\n\n [[1.97113193e-03, 9.50372207e-05, 1.98390978e-04,\n 1.88188953e-06, 4.83449400e-07, 1.14872860e-05,\n 4.02918239e-06, 4.35015431e-04],\n [2.24870443e-02, 1.27331172e-03, 9.62155856e-03,\n 4.04178695e-03, 2.75516282e-04, 1.18179572e-02,\n 5.99778157e-02, 1.48149567e-01]]],\n\n\n [[[6.70912859e-02, 1.84223872e-02, 2.55621792e-04,\n 4.48500688e-05, 7.80481515e-05, 2.73734559e-06,\n 7.59835896e-06, 1.42930726e-03],\n [2.10053328e-02, 7.44036383e-03, 3.70388879e-04,\n 2.71878370e-03, 1.16152088e-03, 7.42182691e-05,\n 2.96490192e-03, 1.26774695e-02]],\n\n [[8.09335679e-02, 8.31016518e-02, 2.49149080e-02,\n 5.78825626e-04, 2.19019941e-03, 1.20179130e-03,\n 7.83659430e-05, 2.76363377e-03],\n [7.36967899e-01, 8.88697316e-01, 9.64463954e-01,\n 9.92270877e-01, 9.96283886e-01, 9.86863839e-01,\n 9.31117063e-01, 7.51241236e-01]]]])\n\n\nhamilton_ar2_short_predicted_joint_probabilities = np.array([[\n [[[1.20809334e-01, 3.76964436e-02, 4.86045844e-04,\n 4.69578023e-05, 3.36400588e-06, 3.97445190e-07,\n 6.03622290e-07, 9.01273552e-06],\n [3.92723623e-02, 1.47863379e-02, 2.45936108e-04,\n 8.52441571e-05, 2.55484811e-04, 7.41034525e-06,\n 2.05042201e-05, 4.40599447e-03]],\n\n [[4.99131230e-03, 1.48756005e-03, 7.17220245e-05,\n 1.49720314e-04, 1.42021122e-06, 3.64846209e-07,\n 8.66914462e-06, 3.04071516e-06],\n [4.70476003e-02, 1.69703652e-02, 9.60933974e-04,\n 7.26113047e-03, 3.05022748e-03, 2.07924699e-04,\n 8.91869322e-03, 4.52636381e-02]]],\n\n\n [[[4.99131230e-03, 6.43506069e-03, 1.76698327e-03,\n 2.45179642e-05, 4.30179435e-06, 7.48598845e-06,\n 2.62552503e-07, 7.28796600e-07],\n [1.62256192e-03, 2.01472650e-03, 7.13642497e-04,\n 3.55258493e-05, 2.60772139e-04, 1.11407276e-04,\n 7.11864528e-06, 2.84378568e-04]],\n\n [[5.97950448e-03, 7.76274317e-03, 7.97069493e-03,\n 2.38971340e-03, 5.55180599e-05, 2.10072977e-04,\n 1.15269812e-04, 7.51646942e-06],\n [5.63621989e-02, 7.06862760e-02, 8.52394030e-02,\n 9.25065601e-02, 9.51736612e-02, 9.55585689e-02,\n 9.46550451e-02, 8.93080931e-02]]]],\n\n\n\n [[[[3.92723623e-02, 1.22542551e-02, 1.58002431e-04,\n 1.52649118e-05, 1.09356167e-06, 1.29200377e-07,\n 1.96223855e-07, 2.92983500e-06],\n [1.27665503e-02, 4.80670161e-03, 7.99482261e-05,\n 2.77109335e-05, 8.30522919e-05, 2.40893443e-06,\n 6.66545485e-06, 1.43228843e-03]],\n\n [[1.62256192e-03, 4.83571884e-04, 2.33151963e-05,\n 4.86706634e-05, 4.61678312e-07, 1.18603191e-07,\n 2.81814142e-06, 9.88467229e-07],\n [1.52941031e-02, 5.51667911e-03, 3.12377744e-04,\n 2.36042810e-03, 9.91559466e-04, 6.75915830e-05,\n 2.89926399e-03, 1.47141776e-02]]],\n\n\n [[[4.70476003e-02, 6.06562252e-02, 1.66554040e-02,\n 2.31103828e-04, 4.05482745e-05, 7.05621631e-05,\n 2.47479309e-06, 6.86956236e-06],\n [1.52941031e-02, 1.89906063e-02, 6.72672133e-03,\n 3.34863029e-04, 2.45801156e-03, 1.05011361e-03,\n 6.70996238e-05, 2.68052335e-03]],\n\n [[5.63621989e-02, 7.31708248e-02, 7.51309569e-02,\n 2.25251946e-02, 5.23307566e-04, 1.98012644e-03,\n 1.08652148e-03, 7.08494735e-05],\n [5.31264334e-01, 6.66281623e-01, 8.03457913e-01,\n 8.71957394e-01, 8.97097216e-01, 9.00725317e-01,\n 8.92208794e-01, 8.41808970e-01]]]]])\n\n\nhamilton_ar2_short_smoothed_joint_probabilities = np.array([\n [[[1.29898189e-02, 1.66298475e-04, 1.29822987e-05,\n 9.95268382e-07, 1.84473346e-07, 7.18761267e-07,\n 1.69576494e-05, 6.87762063e-03],\n [5.09522472e-03, 8.41459714e-05, 2.35672254e-05,\n 7.55872505e-05, 3.43949612e-06, 2.44153330e-05,\n 8.28997024e-03, 7.64261509e-02]],\n\n [[5.90021731e-04, 2.55342733e-05, 4.50698224e-05,\n 5.30734135e-07, 1.80741761e-07, 1.11483792e-05,\n 5.98539007e-06, 4.35015431e-04],\n [6.73107901e-03, 3.42109009e-04, 2.18579464e-03,\n 1.13987259e-03, 1.03004157e-04, 1.14692946e-02,\n 8.90976350e-02, 1.48149567e-01]]],\n\n\n [[[6.34648123e-02, 1.79187451e-02, 2.37462147e-04,\n 3.55542558e-05, 7.63980455e-05, 2.90520820e-06,\n 8.17644492e-06, 1.42930726e-03],\n [1.98699352e-02, 7.23695477e-03, 3.44076057e-04,\n 2.15527721e-03, 1.13696383e-03, 7.87695658e-05,\n 3.19047276e-03, 1.26774695e-02]],\n\n [[8.81925054e-02, 8.33092133e-02, 2.51106301e-02,\n 5.81007470e-04, 2.19065072e-03, 1.20221350e-03,\n 7.56893839e-05, 2.76363377e-03],\n [8.03066603e-01, 8.90916999e-01, 9.72040418e-01,\n 9.96011175e-01, 9.96489179e-01, 9.87210535e-01,\n 8.99315113e-01, 7.51241236e-01]]]])\n\n\nclass TestHamiltonAR2Short(MarkovAutoregression):\n # This is just a set of regression tests\n @classmethod\n def setup_class(cls):\n true = {\n 'params': np.r_[0.754673, 0.095915, -0.358811, 1.163516,\n np.exp(-0.262658)**2, 0.013486, -0.057521],\n 'llf': -10.14066,\n 'llf_fit': -4.0523073,\n 'llf_fit_em': -8.885836\n }\n super(TestHamiltonAR2Short, cls).setup_class(\n true, rgnp[-10:], k_regimes=2, order=2, switching_ar=False)\n\n def test_fit_em(self):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n super(TestHamiltonAR2Short, self).test_fit_em()\n\n def test_filter_output(self, **kwargs):\n res = self.result\n\n # Filtered\n assert_allclose(res.filtered_joint_probabilities,\n hamilton_ar2_short_filtered_joint_probabilities)\n\n # Predicted\n desired = hamilton_ar2_short_predicted_joint_probabilities\n if desired.ndim > res.predicted_joint_probabilities.ndim:\n desired = desired.sum(axis=-2)\n assert_allclose(res.predicted_joint_probabilities, desired)\n\n def test_smoother_output(self, **kwargs):\n res = self.result\n\n # Filtered\n assert_allclose(res.filtered_joint_probabilities,\n hamilton_ar2_short_filtered_joint_probabilities)\n\n # Predicted\n desired = hamilton_ar2_short_predicted_joint_probabilities\n if desired.ndim > res.predicted_joint_probabilities.ndim:\n desired = desired.sum(axis=-2)\n assert_allclose(res.predicted_joint_probabilities, desired)\n\n # Smoothed, entry-by-entry\n assert_allclose(\n res.smoothed_joint_probabilities[..., -1],\n hamilton_ar2_short_smoothed_joint_probabilities[..., -1])\n assert_allclose(\n res.smoothed_joint_probabilities[..., -2],\n hamilton_ar2_short_smoothed_joint_probabilities[..., -2])\n assert_allclose(\n res.smoothed_joint_probabilities[..., -3],\n hamilton_ar2_short_smoothed_joint_probabilities[..., -3])\n assert_allclose(\n res.smoothed_joint_probabilities[..., :-3],\n hamilton_ar2_short_smoothed_joint_probabilities[..., :-3])\n\n\nhamilton_ar4_filtered = [\n 0.776712, 0.949192, 0.996320, 0.990258, 0.940111, 0.537442,\n 0.140001, 0.008942, 0.048480, 0.614097, 0.910889, 0.995463,\n 0.979465, 0.992324, 0.984561, 0.751038, 0.776268, 0.522048,\n 0.814956, 0.821786, 0.472729, 0.673567, 0.029031, 0.001556,\n 0.433276, 0.985463, 0.995025, 0.966067, 0.998445, 0.801467,\n 0.960997, 0.996431, 0.461365, 0.199357, 0.027398, 0.703626,\n 0.946388, 0.985321, 0.998244, 0.989567, 0.984510, 0.986811,\n 0.793788, 0.973675, 0.984848, 0.990418, 0.918427, 0.998769,\n 0.977647, 0.978742, 0.927635, 0.998691, 0.988934, 0.991654,\n 0.999288, 0.999073, 0.918636, 0.987710, 0.966876, 0.910015,\n 0.826150, 0.969451, 0.844049, 0.941525, 0.993363, 0.949978,\n 0.615206, 0.970915, 0.787585, 0.707818, 0.200476, 0.050835,\n 0.140723, 0.809850, 0.086422, 0.990344, 0.785963, 0.817425,\n 0.659152, 0.996578, 0.992860, 0.948501, 0.996883, 0.999712,\n 0.906694, 0.725013, 0.963690, 0.386960, 0.241302, 0.009078,\n 0.015789, 0.000896, 0.541530, 0.928686, 0.953704, 0.992741,\n 0.935877, 0.918958, 0.977316, 0.987941, 0.987300, 0.996769,\n 0.645469, 0.921285, 0.999917, 0.949335, 0.968914, 0.886025,\n 0.777141, 0.904381, 0.368277, 0.607429, 0.002491, 0.227610,\n 0.871284, 0.987717, 0.288705, 0.512124, 0.030329, 0.005177,\n 0.256183, 0.020955, 0.051620, 0.549009, 0.991715, 0.987892,\n 0.995377, 0.999833, 0.993756, 0.956164, 0.927714]\n\nhamilton_ar4_smoothed = [\n 0.968096, 0.991071, 0.998559, 0.958534, 0.540652, 0.072784,\n 0.010999, 0.006228, 0.172144, 0.898574, 0.989054, 0.998293,\n 0.986434, 0.993248, 0.976868, 0.858521, 0.847452, 0.675670,\n 0.596294, 0.165407, 0.035270, 0.127967, 0.007414, 0.004944,\n 0.815829, 0.998128, 0.998091, 0.993227, 0.999283, 0.921100,\n 0.977171, 0.971757, 0.124680, 0.063710, 0.114570, 0.954701,\n 0.994852, 0.997302, 0.999345, 0.995817, 0.996218, 0.994580,\n 0.933990, 0.996054, 0.998151, 0.996976, 0.971489, 0.999786,\n 0.997362, 0.996755, 0.993053, 0.999947, 0.998469, 0.997987,\n 0.999830, 0.999360, 0.953176, 0.992673, 0.975235, 0.938121,\n 0.946784, 0.986897, 0.905792, 0.969755, 0.995379, 0.914480,\n 0.772814, 0.931385, 0.541742, 0.394596, 0.063428, 0.027829,\n 0.124527, 0.286105, 0.069362, 0.995950, 0.961153, 0.962449,\n 0.945022, 0.999855, 0.998943, 0.980041, 0.999028, 0.999838,\n 0.863305, 0.607421, 0.575983, 0.013300, 0.007562, 0.000635,\n 0.001806, 0.002196, 0.803550, 0.972056, 0.984503, 0.998059,\n 0.985211, 0.988486, 0.994452, 0.994498, 0.998873, 0.999192,\n 0.870482, 0.976282, 0.999961, 0.984283, 0.973045, 0.786176,\n 0.403673, 0.275418, 0.115199, 0.257560, 0.004735, 0.493936,\n 0.907360, 0.873199, 0.052959, 0.076008, 0.001653, 0.000847,\n 0.062027, 0.021257, 0.219547, 0.955654, 0.999851, 0.997685,\n 0.998324, 0.999939, 0.996858, 0.969209, 0.927714]\n\n\nclass TestHamiltonAR4(MarkovAutoregression):\n @classmethod\n def setup_class(cls):\n # Results from E-views:\n # Dependent variable followed by a list of switching regressors:\n # rgnp c\n # List of non-switching regressors:\n # ar(1) ar(2) ar(3) ar(4)\n # Do not check \"Regime specific error variances\"\n # Switching type: Markov\n # Number of Regimes: 2\n # Probability regressors:\n # c\n # Method SWITCHREG\n # Sample 1951q1 1984q4\n true = {\n 'params': np.r_[0.754673, 0.095915, -0.358811, 1.163516,\n np.exp(-0.262658)**2, 0.013486, -0.057521,\n -0.246983, -0.212923],\n 'llf': -181.26339,\n 'llf_fit': -181.26339,\n 'llf_fit_em': -183.85444,\n 'bse_oim': np.r_[.0965189, .0377362, .2645396, .0745187, np.nan,\n .1199942, .137663, .1069103, .1105311, ]\n }\n super(TestHamiltonAR4, cls).setup_class(\n true, rgnp, k_regimes=2, order=4, switching_ar=False)\n\n def test_filtered_regimes(self):\n res = self.result\n assert_equal(len(res.filtered_marginal_probabilities[:, 1]),\n self.model.nobs)\n assert_allclose(res.filtered_marginal_probabilities[:, 1],\n hamilton_ar4_filtered, atol=1e-5)\n\n def test_smoothed_regimes(self):\n res = self.result\n assert_equal(len(res.smoothed_marginal_probabilities[:, 1]),\n self.model.nobs)\n assert_allclose(res.smoothed_marginal_probabilities[:, 1],\n hamilton_ar4_smoothed, atol=1e-5)\n\n def test_bse(self):\n # Cannot compare middle element of bse because we estimate sigma^2\n # rather than sigma\n bse = self.result.cov_params_approx.diagonal()**0.5\n assert_allclose(bse[:4], self.true['bse_oim'][:4], atol=1e-6)\n assert_allclose(bse[6:], self.true['bse_oim'][6:], atol=1e-6)\n\n\nclass TestHamiltonAR2Switch(MarkovAutoregression):\n # Results from Stata, see http://www.stata.com/manuals14/tsmswitch.pdf\n @classmethod\n def setup_class(cls):\n path = os.path.join(current_path, 'results',\n 'results_predict_rgnp.csv')\n results = pd.read_csv(path)\n\n true = {\n 'params': np.r_[.3812383, .3564492, -.0055216, 1.195482,\n .6677098**2, .3710719, .4621503, .7002937,\n -.3206652],\n 'llf': -179.32354,\n 'llf_fit': -179.38684,\n 'llf_fit_em': -184.99606,\n 'bse_oim': np.r_[.1424841, .0994742, .2057086, .1225987, np.nan,\n .1754383, .1652473, .187409, .1295937],\n 'smoothed0': results.iloc[3:]['switchar2_sm1'],\n 'smoothed1': results.iloc[3:]['switchar2_sm2'],\n 'predict0': results.iloc[3:]['switchar2_yhat1'],\n 'predict1': results.iloc[3:]['switchar2_yhat2'],\n 'predict_predicted': results.iloc[3:]['switchar2_pyhat'],\n 'predict_filtered': results.iloc[3:]['switchar2_fyhat'],\n 'predict_smoothed': results.iloc[3:]['switchar2_syhat'],\n }\n super(TestHamiltonAR2Switch, cls).setup_class(\n true, rgnp, k_regimes=2, order=2)\n\n def test_smoothed_marginal_probabilities(self):\n assert_allclose(self.result.smoothed_marginal_probabilities[:, 0],\n self.true['smoothed0'], atol=1e-6)\n assert_allclose(self.result.smoothed_marginal_probabilities[:, 1],\n self.true['smoothed1'], atol=1e-6)\n\n def test_predict(self):\n # Smoothed\n actual = self.model.predict(\n self.true['params'], probabilities='smoothed')\n assert_allclose(actual, self.true['predict_smoothed'], atol=1e-6)\n actual = self.model.predict(\n self.true['params'], probabilities=None)\n assert_allclose(actual, self.true['predict_smoothed'], atol=1e-6)\n\n actual = self.result.predict(probabilities='smoothed')\n assert_allclose(actual, self.true['predict_smoothed'], atol=1e-6)\n actual = self.result.predict(probabilities=None)\n assert_allclose(actual, self.true['predict_smoothed'], atol=1e-6)\n\n def test_bse(self):\n # Cannot compare middle element of bse because we estimate sigma^2\n # rather than sigma\n bse = self.result.cov_params_approx.diagonal()**0.5\n assert_allclose(bse[:4], self.true['bse_oim'][:4], atol=1e-7)\n assert_allclose(bse[6:], self.true['bse_oim'][6:], atol=1e-7)\n\n\nhamilton_ar1_switch_filtered = [\n 0.840288, 0.730337, 0.900234, 0.596492, 0.921618, 0.983828,\n 0.959039, 0.898366, 0.477335, 0.251089, 0.049367, 0.386782,\n 0.942868, 0.965632, 0.982857, 0.897603, 0.946986, 0.916413,\n 0.640912, 0.849296, 0.778371, 0.954420, 0.929906, 0.723930,\n 0.891196, 0.061163, 0.004806, 0.977369, 0.997871, 0.977950,\n 0.896580, 0.963246, 0.430539, 0.906586, 0.974589, 0.514506,\n 0.683457, 0.276571, 0.956475, 0.966993, 0.971618, 0.987019,\n 0.916670, 0.921652, 0.930265, 0.655554, 0.965858, 0.964981,\n 0.976790, 0.868267, 0.983240, 0.852052, 0.919150, 0.854467,\n 0.987868, 0.935840, 0.958138, 0.979535, 0.956541, 0.716322,\n 0.919035, 0.866437, 0.899609, 0.914667, 0.976448, 0.867252,\n 0.953075, 0.977850, 0.884242, 0.688299, 0.968461, 0.737517,\n 0.870674, 0.559413, 0.380339, 0.582813, 0.941311, 0.240020,\n 0.999349, 0.619258, 0.828343, 0.729726, 0.991009, 0.966291,\n 0.899148, 0.970798, 0.977684, 0.695877, 0.637555, 0.915824,\n 0.434600, 0.771277, 0.113756, 0.144002, 0.008466, 0.994860,\n 0.993173, 0.961722, 0.978555, 0.789225, 0.836283, 0.940383,\n 0.968368, 0.974473, 0.980248, 0.518125, 0.904086, 0.993023,\n 0.802936, 0.920906, 0.685445, 0.666524, 0.923285, 0.643861,\n 0.938184, 0.008862, 0.945406, 0.990061, 0.991500, 0.486669,\n 0.805039, 0.089036, 0.025067, 0.863309, 0.352784, 0.733295,\n 0.928710, 0.984257, 0.926597, 0.959887, 0.984051, 0.872682,\n 0.824375, 0.780157]\n\nhamilton_ar1_switch_smoothed = [\n 0.900074, 0.758232, 0.914068, 0.637248, 0.901951, 0.979905,\n 0.958935, 0.888641, 0.261602, 0.148761, 0.056919, 0.424396,\n 0.932184, 0.954962, 0.983958, 0.895595, 0.949519, 0.923473,\n 0.678898, 0.848793, 0.807294, 0.958868, 0.942936, 0.809137,\n 0.960892, 0.032947, 0.007127, 0.967967, 0.996551, 0.979278,\n 0.896181, 0.987462, 0.498965, 0.908803, 0.986893, 0.488720,\n 0.640492, 0.325552, 0.951996, 0.959703, 0.960914, 0.986989,\n 0.916779, 0.924570, 0.935348, 0.677118, 0.960749, 0.958966,\n 0.976974, 0.838045, 0.986562, 0.847774, 0.908866, 0.821110,\n 0.984965, 0.915302, 0.938196, 0.976518, 0.973780, 0.744159,\n 0.922006, 0.873292, 0.904035, 0.917547, 0.978559, 0.870915,\n 0.948420, 0.979747, 0.884791, 0.711085, 0.973235, 0.726311,\n 0.828305, 0.446642, 0.411135, 0.639357, 0.973151, 0.141707,\n 0.999805, 0.618207, 0.783239, 0.672193, 0.987618, 0.964655,\n 0.877390, 0.962437, 0.989002, 0.692689, 0.699370, 0.937934,\n 0.522535, 0.824567, 0.058746, 0.146549, 0.009864, 0.994072,\n 0.992084, 0.956945, 0.984297, 0.795926, 0.845698, 0.935364,\n 0.963285, 0.972767, 0.992168, 0.528278, 0.826349, 0.996574,\n 0.811431, 0.930873, 0.680756, 0.721072, 0.937977, 0.731879,\n 0.996745, 0.016121, 0.951187, 0.989820, 0.996968, 0.592477,\n 0.889144, 0.036015, 0.040084, 0.858128, 0.418984, 0.746265,\n 0.907990, 0.980984, 0.900449, 0.934741, 0.986807, 0.872818,\n 0.812080, 0.780157]\n\n\nclass TestHamiltonAR1Switch(MarkovAutoregression):\n @classmethod\n def setup_class(cls):\n # Results from E-views:\n # Dependent variable followed by a list of switching regressors:\n # rgnp c ar(1)\n # List of non-switching regressors: <blank>\n # Do not check \"Regime specific error variances\"\n # Switching type: Markov\n # Number of Regimes: 2\n # Probability regressors:\n # c\n # Method SWITCHREG\n # Sample 1951q1 1984q4\n true = {\n 'params': np.r_[0.85472458, 0.53662099, 1.041419, -0.479157,\n np.exp(-0.231404)**2, 0.243128, 0.713029],\n 'llf': -186.7575,\n 'llf_fit': -186.7575,\n 'llf_fit_em': -189.25446\n }\n super(TestHamiltonAR1Switch, cls).setup_class(\n true, rgnp, k_regimes=2, order=1)\n\n def test_filtered_regimes(self):\n assert_allclose(self.result.filtered_marginal_probabilities[:, 0],\n hamilton_ar1_switch_filtered, atol=1e-5)\n\n def test_smoothed_regimes(self):\n assert_allclose(self.result.smoothed_marginal_probabilities[:, 0],\n hamilton_ar1_switch_smoothed, atol=1e-5)\n\n def test_expected_durations(self):\n expected_durations = [6.883477, 1.863513]\n assert_allclose(self.result.expected_durations, expected_durations,\n atol=1e-5)\n\n\nhamilton_ar1_switch_tvtp_filtered = [\n 0.999996, 0.999211, 0.999849, 0.996007, 0.999825, 0.999991,\n 0.999981, 0.999819, 0.041745, 0.001116, 1.74e-05, 0.000155,\n 0.999976, 0.999958, 0.999993, 0.999878, 0.999940, 0.999791,\n 0.996553, 0.999486, 0.998485, 0.999894, 0.999765, 0.997657,\n 0.999619, 0.002853, 1.09e-05, 0.999884, 0.999996, 0.999997,\n 0.999919, 0.999987, 0.989762, 0.999807, 0.999978, 0.050734,\n 0.010660, 0.000217, 0.006174, 0.999977, 0.999954, 0.999995,\n 0.999934, 0.999867, 0.999824, 0.996783, 0.999941, 0.999948,\n 0.999981, 0.999658, 0.999994, 0.999753, 0.999859, 0.999330,\n 0.999993, 0.999956, 0.999970, 0.999996, 0.999991, 0.998674,\n 0.999869, 0.999432, 0.999570, 0.999600, 0.999954, 0.999499,\n 0.999906, 0.999978, 0.999712, 0.997441, 0.999948, 0.998379,\n 0.999578, 0.994745, 0.045936, 0.006816, 0.027384, 0.000278,\n 1.000000, 0.996382, 0.999541, 0.998130, 0.999992, 0.999990,\n 0.999860, 0.999986, 0.999997, 0.998520, 0.997777, 0.999821,\n 0.033353, 0.011629, 6.95e-05, 4.52e-05, 2.04e-06, 0.999963,\n 0.999977, 0.999949, 0.999986, 0.999240, 0.999373, 0.999858,\n 0.999946, 0.999972, 0.999991, 0.994039, 0.999817, 0.999999,\n 0.999715, 0.999924, 0.997763, 0.997944, 0.999825, 0.996592,\n 0.695147, 0.000161, 0.999665, 0.999928, 0.999988, 0.992742,\n 0.374214, 0.001569, 2.16e-05, 0.000941, 4.32e-05, 0.000556,\n 0.999955, 0.999993, 0.999942, 0.999973, 0.999999, 0.999919,\n 0.999438, 0.998738]\n\nhamilton_ar1_switch_tvtp_smoothed = [\n 0.999997, 0.999246, 0.999918, 0.996118, 0.999740, 0.999990,\n 0.999984, 0.999783, 0.035454, 0.000958, 1.53e-05, 0.000139,\n 0.999973, 0.999939, 0.999994, 0.999870, 0.999948, 0.999884,\n 0.997243, 0.999668, 0.998424, 0.999909, 0.999860, 0.998037,\n 0.999559, 0.002533, 1.16e-05, 0.999801, 0.999993, 0.999997,\n 0.999891, 0.999994, 0.990096, 0.999753, 0.999974, 0.048495,\n 0.009289, 0.000542, 0.005991, 0.999974, 0.999929, 0.999995,\n 0.999939, 0.999880, 0.999901, 0.996221, 0.999937, 0.999935,\n 0.999985, 0.999450, 0.999995, 0.999768, 0.999897, 0.998930,\n 0.999992, 0.999949, 0.999954, 0.999995, 0.999994, 0.998687,\n 0.999902, 0.999547, 0.999653, 0.999538, 0.999966, 0.999485,\n 0.999883, 0.999982, 0.999831, 0.996940, 0.999968, 0.998678,\n 0.999780, 0.993895, 0.055372, 0.020421, 0.022913, 0.000127,\n 1.000000, 0.997072, 0.999715, 0.996893, 0.999990, 0.999991,\n 0.999811, 0.999978, 0.999998, 0.999100, 0.997866, 0.999787,\n 0.034912, 0.009932, 5.91e-05, 3.99e-05, 1.77e-06, 0.999954,\n 0.999976, 0.999932, 0.999991, 0.999429, 0.999393, 0.999845,\n 0.999936, 0.999961, 0.999995, 0.994246, 0.999570, 1.000000,\n 0.999702, 0.999955, 0.998611, 0.998019, 0.999902, 0.998486,\n 0.673991, 0.000205, 0.999627, 0.999902, 0.999994, 0.993707,\n 0.338707, 0.001359, 2.36e-05, 0.000792, 4.47e-05, 0.000565,\n 0.999932, 0.999993, 0.999931, 0.999950, 0.999999, 0.999940,\n 0.999626, 0.998738]\n\nexpected_durations = [\n [710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],\n [710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],\n [710.7573, 1.000391], [710.7573, 1.000391], [1.223309, 1864.084],\n [1.223309, 1864.084], [1.223309, 1864.084], [1.223309, 1864.084],\n [710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],\n [710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],\n [710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],\n [710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],\n [710.7573, 1.000391], [1.223309, 1864.084], [1.223309, 1864.084],\n [710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],\n [710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],\n [710.7573, 1.000391], [710.7573, 1.000391], [1.223309, 1864.084],\n [1.223309, 1864.084], [1.223309, 1864.084], [1.223309, 1864.084],\n [710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],\n [710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],\n [710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],\n [710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],\n [710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],\n [710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],\n [710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],\n [710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],\n [710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],\n [710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],\n [710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],\n [710.7573, 1.000391], [710.7573, 1.000391], [1.223309, 1864.084],\n [1.223309, 1864.084], [1.223309, 1864.084], [1.223309, 1864.084],\n [710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],\n [710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],\n [710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],\n [710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],\n [1.223309, 1864.084], [1.223309, 1864.084], [1.223309, 1864.084],\n [1.223309, 1864.084], [1.223309, 1864.084], [710.7573, 1.000391],\n [710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],\n [710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],\n [710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],\n [710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],\n [710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],\n [710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],\n [1.223309, 1864.084], [1.223309, 1864.084], [710.7573, 1.000391],\n [710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],\n [1.223309, 1864.084], [1.223309, 1864.084], [1.223309, 1864.084],\n [1.223309, 1864.084], [1.223309, 1864.084], [1.223309, 1864.084],\n [710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],\n [710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],\n [710.7573, 1.000391], [710.7573, 1.000391]]\n\n\nclass TestHamiltonAR1SwitchTVTP(MarkovAutoregression):\n @classmethod\n def setup_class(cls):\n # Results from E-views:\n # Dependent variable followed by a list of switching regressors:\n # rgnp c ar(1)\n # List of non-switching regressors: <blank>\n # Do not check \"Regime specific error variances\"\n # Switching type: Markov\n # Number of Regimes: 2\n # Probability regressors:\n # c recession\n # Method SWITCHREG\n # Sample 1951q1 1984q4\n true = {\n 'params': np.r_[6.564923, 7.846371, -8.064123, -15.37636,\n 1.027190, -0.719760,\n np.exp(-0.217003)**2, 0.161489, 0.022536],\n 'llf': -163.914049,\n 'llf_fit': -161.786477,\n 'llf_fit_em': -163.914049\n }\n exog_tvtp = np.c_[np.ones(len(rgnp)), rec]\n super(TestHamiltonAR1SwitchTVTP, cls).setup_class(\n true, rgnp, k_regimes=2, order=1, exog_tvtp=exog_tvtp)\n\n @pytest.mark.skip # TODO(ChadFulton): give reason for skip\n def test_fit_em(self):\n pass\n\n def test_filtered_regimes(self):\n assert_allclose(self.result.filtered_marginal_probabilities[:, 0],\n hamilton_ar1_switch_tvtp_filtered, atol=1e-5)\n\n def test_smoothed_regimes(self):\n assert_allclose(self.result.smoothed_marginal_probabilities[:, 0],\n hamilton_ar1_switch_tvtp_smoothed, atol=1e-5)\n\n def test_expected_durations(self):\n assert_allclose(self.result.expected_durations, expected_durations,\n rtol=1e-5, atol=1e-7)\n\n\nclass TestFilardo(MarkovAutoregression):\n @classmethod\n def setup_class(cls):\n path = os.path.join(current_path, 'results', 'mar_filardo.csv')\n cls.mar_filardo = pd.read_csv(path)\n true = {\n 'params': np.r_[4.35941747, -1.6493936, 1.7702123, 0.9945672,\n 0.517298, -0.865888,\n np.exp(-0.362469)**2,\n 0.189474, 0.079344, 0.110944, 0.122251],\n 'llf': -586.5718,\n 'llf_fit': -586.5718,\n 'llf_fit_em': -586.5718\n }\n endog = cls.mar_filardo['dlip'].iloc[1:].values\n exog_tvtp = add_constant(\n cls.mar_filardo['dmdlleading'].iloc[:-1].values)\n super(TestFilardo, cls).setup_class(\n true, endog, k_regimes=2, order=4, switching_ar=False,\n exog_tvtp=exog_tvtp)\n\n @pytest.mark.skip # TODO(ChadFulton): give reason for skip\n def test_fit(self, **kwargs):\n pass\n\n @pytest.mark.skip # TODO(ChadFulton): give reason for skip\n def test_fit_em(self):\n pass\n\n def test_filtered_regimes(self):\n assert_allclose(self.result.filtered_marginal_probabilities[:, 0],\n self.mar_filardo['filtered_0'].iloc[5:], atol=1e-5)\n\n def test_smoothed_regimes(self):\n assert_allclose(self.result.smoothed_marginal_probabilities[:, 0],\n self.mar_filardo['smoothed_0'].iloc[5:], atol=1e-5)\n\n def test_expected_durations(self):\n assert_allclose(self.result.expected_durations,\n self.mar_filardo[['duration0', 'duration1']].iloc[5:],\n rtol=1e-5, atol=1e-7)\n\n\nclass TestFilardoPandas(MarkovAutoregression):\n @classmethod\n def setup_class(cls):\n path = os.path.join(current_path, 'results', 'mar_filardo.csv')\n cls.mar_filardo = pd.read_csv(path)\n cls.mar_filardo.index = pd.date_range('1948-02-01', '1991-04-01',\n freq='MS')\n true = {\n 'params': np.r_[4.35941747, -1.6493936, 1.7702123, 0.9945672,\n 0.517298, -0.865888,\n np.exp(-0.362469)**2,\n 0.189474, 0.079344, 0.110944, 0.122251],\n 'llf': -586.5718,\n 'llf_fit': -586.5718,\n 'llf_fit_em': -586.5718\n }\n endog = cls.mar_filardo['dlip'].iloc[1:]\n exog_tvtp = add_constant(\n cls.mar_filardo['dmdlleading'].iloc[:-1])\n super(TestFilardoPandas, cls).setup_class(\n true, endog, k_regimes=2, order=4, switching_ar=False,\n exog_tvtp=exog_tvtp)\n\n @pytest.mark.skip # TODO(ChadFulton): give reason for skip\n def test_fit(self, **kwargs):\n pass\n\n @pytest.mark.skip # TODO(ChadFulton): give reason for skip\n def test_fit_em(self):\n pass\n\n def test_filtered_regimes(self):\n assert_allclose(self.result.filtered_marginal_probabilities[0],\n self.mar_filardo['filtered_0'].iloc[5:], atol=1e-5)\n\n def test_smoothed_regimes(self):\n assert_allclose(self.result.smoothed_marginal_probabilities[0],\n self.mar_filardo['smoothed_0'].iloc[5:], atol=1e-5)\n\n def test_expected_durations(self):\n assert_allclose(self.result.expected_durations,\n self.mar_filardo[['duration0', 'duration1']].iloc[5:],\n rtol=1e-5, atol=1e-7)\n", "\"\"\"\nThese the test the public routines exposed in types/common.py\nrelated to inference and not otherwise tested in types/test_common.py\n\n\"\"\"\nimport collections\nfrom collections import namedtuple\nfrom datetime import (\n date,\n datetime,\n time,\n timedelta,\n)\nfrom decimal import Decimal\nfrom fractions import Fraction\nfrom io import StringIO\nimport itertools\nfrom numbers import Number\nimport re\n\nimport numpy as np\nimport pytest\nimport pytz\n\nfrom pandas._libs import (\n lib,\n missing as libmissing,\n ops as libops,\n)\nimport pandas.util._test_decorators as td\n\nfrom pandas.core.dtypes import inference\nfrom pandas.core.dtypes.common import (\n ensure_int32,\n is_bool,\n is_complex,\n is_datetime64_any_dtype,\n is_datetime64_dtype,\n is_datetime64_ns_dtype,\n is_datetime64tz_dtype,\n is_float,\n is_integer,\n is_number,\n is_scalar,\n is_scipy_sparse,\n is_timedelta64_dtype,\n is_timedelta64_ns_dtype,\n)\n\nimport pandas as pd\nfrom pandas import (\n Categorical,\n DataFrame,\n DateOffset,\n DatetimeIndex,\n Index,\n Interval,\n Period,\n PeriodIndex,\n Series,\n Timedelta,\n TimedeltaIndex,\n Timestamp,\n)\nimport pandas._testing as tm\nfrom pandas.core.arrays import (\n BooleanArray,\n FloatingArray,\n IntegerArray,\n)\n\n\n@pytest.fixture(params=[True, False], ids=str)\ndef coerce(request):\n return request.param\n\n\nclass MockNumpyLikeArray:\n \"\"\"\n A class which is numpy-like (e.g. Pint's Quantity) but not actually numpy\n\n The key is that it is not actually a numpy array so\n ``util.is_array(mock_numpy_like_array_instance)`` returns ``False``. Other\n important properties are that the class defines a :meth:`__iter__` method\n (so that ``isinstance(abc.Iterable)`` returns ``True``) and has a\n :meth:`ndim` property, as pandas special-cases 0-dimensional arrays in some\n cases.\n\n We expect pandas to behave with respect to such duck arrays exactly as\n with real numpy arrays. In particular, a 0-dimensional duck array is *NOT*\n a scalar (`is_scalar(np.array(1)) == False`), but it is not list-like either.\n \"\"\"\n\n def __init__(self, values):\n self._values = values\n\n def __iter__(self):\n iter_values = iter(self._values)\n\n def it_outer():\n yield from iter_values\n\n return it_outer()\n\n def __len__(self):\n return len(self._values)\n\n def __array__(self, t=None):\n return np.asarray(self._values, dtype=t)\n\n @property\n def ndim(self):\n return self._values.ndim\n\n @property\n def dtype(self):\n return self._values.dtype\n\n @property\n def size(self):\n return self._values.size\n\n @property\n def shape(self):\n return self._values.shape\n\n\n# collect all objects to be tested for list-like-ness; use tuples of objects,\n# whether they are list-like or not (special casing for sets), and their ID\nll_params = [\n ([1], True, \"list\"),\n ([], True, \"list-empty\"),\n ((1,), True, \"tuple\"),\n ((), True, \"tuple-empty\"),\n ({\"a\": 1}, True, \"dict\"),\n ({}, True, \"dict-empty\"),\n ({\"a\", 1}, \"set\", \"set\"),\n (set(), \"set\", \"set-empty\"),\n (frozenset({\"a\", 1}), \"set\", \"frozenset\"),\n (frozenset(), \"set\", \"frozenset-empty\"),\n (iter([1, 2]), True, \"iterator\"),\n (iter([]), True, \"iterator-empty\"),\n ((x for x in [1, 2]), True, \"generator\"),\n ((_ for _ in []), True, \"generator-empty\"),\n (Series([1]), True, \"Series\"),\n (Series([], dtype=object), True, \"Series-empty\"),\n (Series([\"a\"]).str, True, \"StringMethods\"),\n (Series([], dtype=\"O\").str, True, \"StringMethods-empty\"),\n (Index([1]), True, \"Index\"),\n (Index([]), True, \"Index-empty\"),\n (DataFrame([[1]]), True, \"DataFrame\"),\n (DataFrame(), True, \"DataFrame-empty\"),\n (np.ndarray((2,) * 1), True, \"ndarray-1d\"),\n (np.array([]), True, \"ndarray-1d-empty\"),\n (np.ndarray((2,) * 2), True, \"ndarray-2d\"),\n (np.array([[]]), True, \"ndarray-2d-empty\"),\n (np.ndarray((2,) * 3), True, \"ndarray-3d\"),\n (np.array([[[]]]), True, \"ndarray-3d-empty\"),\n (np.ndarray((2,) * 4), True, \"ndarray-4d\"),\n (np.array([[[[]]]]), True, \"ndarray-4d-empty\"),\n (np.array(2), False, \"ndarray-0d\"),\n (MockNumpyLikeArray(np.ndarray((2,) * 1)), True, \"duck-ndarray-1d\"),\n (MockNumpyLikeArray(np.array([])), True, \"duck-ndarray-1d-empty\"),\n (MockNumpyLikeArray(np.ndarray((2,) * 2)), True, \"duck-ndarray-2d\"),\n (MockNumpyLikeArray(np.array([[]])), True, \"duck-ndarray-2d-empty\"),\n (MockNumpyLikeArray(np.ndarray((2,) * 3)), True, \"duck-ndarray-3d\"),\n (MockNumpyLikeArray(np.array([[[]]])), True, \"duck-ndarray-3d-empty\"),\n (MockNumpyLikeArray(np.ndarray((2,) * 4)), True, \"duck-ndarray-4d\"),\n (MockNumpyLikeArray(np.array([[[[]]]])), True, \"duck-ndarray-4d-empty\"),\n (MockNumpyLikeArray(np.array(2)), False, \"duck-ndarray-0d\"),\n (1, False, \"int\"),\n (b\"123\", False, \"bytes\"),\n (b\"\", False, \"bytes-empty\"),\n (\"123\", False, \"string\"),\n (\"\", False, \"string-empty\"),\n (str, False, \"string-type\"),\n (object(), False, \"object\"),\n (np.nan, False, \"NaN\"),\n (None, False, \"None\"),\n]\nobjs, expected, ids = zip(*ll_params)\n\n\n@pytest.fixture(params=zip(objs, expected), ids=ids)\ndef maybe_list_like(request):\n return request.param\n\n\ndef test_is_list_like(maybe_list_like):\n obj, expected = maybe_list_like\n expected = True if expected == \"set\" else expected\n assert inference.is_list_like(obj) == expected\n\n\ndef test_is_list_like_disallow_sets(maybe_list_like):\n obj, expected = maybe_list_like\n expected = False if expected == \"set\" else expected\n assert inference.is_list_like(obj, allow_sets=False) == expected\n\n\ndef test_is_list_like_recursion():\n # GH 33721\n # interpreter would crash with SIGABRT\n def foo():\n inference.is_list_like([])\n foo()\n\n with tm.external_error_raised(RecursionError):\n foo()\n\n\ndef test_is_list_like_iter_is_none():\n # GH 43373\n # is_list_like was yielding false positives with __iter__ == None\n class NotListLike:\n def __getitem__(self, item):\n return self\n\n __iter__ = None\n\n assert not inference.is_list_like(NotListLike())\n\n\ndef test_is_sequence():\n is_seq = inference.is_sequence\n assert is_seq((1, 2))\n assert is_seq([1, 2])\n assert not is_seq(\"abcd\")\n assert not is_seq(np.int64)\n\n class A:\n def __getitem__(self):\n return 1\n\n assert not is_seq(A())\n\n\ndef test_is_array_like():\n assert inference.is_array_like(Series([], dtype=object))\n assert inference.is_array_like(Series([1, 2]))\n assert inference.is_array_like(np.array([\"a\", \"b\"]))\n assert inference.is_array_like(Index([\"2016-01-01\"]))\n assert inference.is_array_like(np.array([2, 3]))\n assert inference.is_array_like(MockNumpyLikeArray(np.array([2, 3])))\n\n class DtypeList(list):\n dtype = \"special\"\n\n assert inference.is_array_like(DtypeList())\n\n assert not inference.is_array_like([1, 2, 3])\n assert not inference.is_array_like(())\n assert not inference.is_array_like(\"foo\")\n assert not inference.is_array_like(123)\n\n\n@pytest.mark.parametrize(\n \"inner\",\n [\n [],\n [1],\n (1,),\n (1, 2),\n {\"a\": 1},\n {1, \"a\"},\n Series([1]),\n Series([], dtype=object),\n Series([\"a\"]).str,\n (x for x in range(5)),\n ],\n)\n@pytest.mark.parametrize(\"outer\", [list, Series, np.array, tuple])\ndef test_is_nested_list_like_passes(inner, outer):\n result = outer([inner for _ in range(5)])\n assert inference.is_list_like(result)\n\n\n@pytest.mark.parametrize(\n \"obj\",\n [\n \"abc\",\n [],\n [1],\n (1,),\n [\"a\"],\n \"a\",\n {\"a\"},\n [1, 2, 3],\n Series([1]),\n DataFrame({\"A\": [1]}),\n ([1, 2] for _ in range(5)),\n ],\n)\ndef test_is_nested_list_like_fails(obj):\n assert not inference.is_nested_list_like(obj)\n\n\n@pytest.mark.parametrize(\"ll\", [{}, {\"A\": 1}, Series([1]), collections.defaultdict()])\ndef test_is_dict_like_passes(ll):\n assert inference.is_dict_like(ll)\n\n\n@pytest.mark.parametrize(\n \"ll\",\n [\n \"1\",\n 1,\n [1, 2],\n (1, 2),\n range(2),\n Index([1]),\n dict,\n collections.defaultdict,\n Series,\n ],\n)\ndef test_is_dict_like_fails(ll):\n assert not inference.is_dict_like(ll)\n\n\n@pytest.mark.parametrize(\"has_keys\", [True, False])\n@pytest.mark.parametrize(\"has_getitem\", [True, False])\n@pytest.mark.parametrize(\"has_contains\", [True, False])\ndef test_is_dict_like_duck_type(has_keys, has_getitem, has_contains):\n class DictLike:\n def __init__(self, d):\n self.d = d\n\n if has_keys:\n\n def keys(self):\n return self.d.keys()\n\n if has_getitem:\n\n def __getitem__(self, key):\n return self.d.__getitem__(key)\n\n if has_contains:\n\n def __contains__(self, key) -> bool:\n return self.d.__contains__(key)\n\n d = DictLike({1: 2})\n result = inference.is_dict_like(d)\n expected = has_keys and has_getitem and has_contains\n\n assert result is expected\n\n\ndef test_is_file_like():\n class MockFile:\n pass\n\n is_file = inference.is_file_like\n\n data = StringIO(\"data\")\n assert is_file(data)\n\n # No read / write attributes\n # No iterator attributes\n m = MockFile()\n assert not is_file(m)\n\n MockFile.write = lambda self: 0\n\n # Write attribute but not an iterator\n m = MockFile()\n assert not is_file(m)\n\n # gh-16530: Valid iterator just means we have the\n # __iter__ attribute for our purposes.\n MockFile.__iter__ = lambda self: self\n\n # Valid write-only file\n m = MockFile()\n assert is_file(m)\n\n del MockFile.write\n MockFile.read = lambda self: 0\n\n # Valid read-only file\n m = MockFile()\n assert is_file(m)\n\n # Iterator but no read / write attributes\n data = [1, 2, 3]\n assert not is_file(data)\n\n\ntest_tuple = collections.namedtuple(\"test_tuple\", [\"a\", \"b\", \"c\"])\n\n\n@pytest.mark.parametrize(\"ll\", [test_tuple(1, 2, 3)])\ndef test_is_names_tuple_passes(ll):\n assert inference.is_named_tuple(ll)\n\n\n@pytest.mark.parametrize(\"ll\", [(1, 2, 3), \"a\", Series({\"pi\": 3.14})])\ndef test_is_names_tuple_fails(ll):\n assert not inference.is_named_tuple(ll)\n\n\ndef test_is_hashable():\n\n # all new-style classes are hashable by default\n class HashableClass:\n pass\n\n class UnhashableClass1:\n __hash__ = None\n\n class UnhashableClass2:\n def __hash__(self):\n raise TypeError(\"Not hashable\")\n\n hashable = (1, 3.14, np.float64(3.14), \"a\", (), (1,), HashableClass())\n not_hashable = ([], UnhashableClass1())\n abc_hashable_not_really_hashable = (([],), UnhashableClass2())\n\n for i in hashable:\n assert inference.is_hashable(i)\n for i in not_hashable:\n assert not inference.is_hashable(i)\n for i in abc_hashable_not_really_hashable:\n assert not inference.is_hashable(i)\n\n # numpy.array is no longer collections.abc.Hashable as of\n # https://github.com/numpy/numpy/pull/5326, just test\n # is_hashable()\n assert not inference.is_hashable(np.array([]))\n\n\n@pytest.mark.parametrize(\"ll\", [re.compile(\"ad\")])\ndef test_is_re_passes(ll):\n assert inference.is_re(ll)\n\n\n@pytest.mark.parametrize(\"ll\", [\"x\", 2, 3, object()])\ndef test_is_re_fails(ll):\n assert not inference.is_re(ll)\n\n\n@pytest.mark.parametrize(\n \"ll\", [r\"a\", \"x\", r\"asdf\", re.compile(\"adsf\"), r\"\\u2233\\s*\", re.compile(r\"\")]\n)\ndef test_is_recompilable_passes(ll):\n assert inference.is_re_compilable(ll)\n\n\n@pytest.mark.parametrize(\"ll\", [1, [], object()])\ndef test_is_recompilable_fails(ll):\n assert not inference.is_re_compilable(ll)\n\n\nclass TestInference:\n @pytest.mark.parametrize(\n \"arr\",\n [\n np.array(list(\"abc\"), dtype=\"S1\"),\n np.array(list(\"abc\"), dtype=\"S1\").astype(object),\n [b\"a\", np.nan, b\"c\"],\n ],\n )\n def test_infer_dtype_bytes(self, arr):\n result = lib.infer_dtype(arr, skipna=True)\n assert result == \"bytes\"\n\n @pytest.mark.parametrize(\n \"value, expected\",\n [\n (float(\"inf\"), True),\n (np.inf, True),\n (-np.inf, False),\n (1, False),\n (\"a\", False),\n ],\n )\n def test_isposinf_scalar(self, value, expected):\n # GH 11352\n result = libmissing.isposinf_scalar(value)\n assert result is expected\n\n @pytest.mark.parametrize(\n \"value, expected\",\n [\n (float(\"-inf\"), True),\n (-np.inf, True),\n (np.inf, False),\n (1, False),\n (\"a\", False),\n ],\n )\n def test_isneginf_scalar(self, value, expected):\n result = libmissing.isneginf_scalar(value)\n assert result is expected\n\n @pytest.mark.parametrize(\n \"convert_to_masked_nullable, exp\",\n [\n (\n True,\n BooleanArray(\n np.array([True, False], dtype=\"bool\"), np.array([False, True])\n ),\n ),\n (False, np.array([True, np.nan], dtype=\"object\")),\n ],\n )\n def test_maybe_convert_nullable_boolean(self, convert_to_masked_nullable, exp):\n # GH 40687\n arr = np.array([True, np.NaN], dtype=object)\n result = libops.maybe_convert_bool(\n arr, set(), convert_to_masked_nullable=convert_to_masked_nullable\n )\n if convert_to_masked_nullable:\n tm.assert_extension_array_equal(BooleanArray(*result), exp)\n else:\n result = result[0]\n tm.assert_numpy_array_equal(result, exp)\n\n @pytest.mark.parametrize(\"convert_to_masked_nullable\", [True, False])\n @pytest.mark.parametrize(\"coerce_numeric\", [True, False])\n @pytest.mark.parametrize(\n \"infinity\", [\"inf\", \"inF\", \"iNf\", \"Inf\", \"iNF\", \"InF\", \"INf\", \"INF\"]\n )\n @pytest.mark.parametrize(\"prefix\", [\"\", \"-\", \"+\"])\n def test_maybe_convert_numeric_infinities(\n self, coerce_numeric, infinity, prefix, convert_to_masked_nullable\n ):\n # see gh-13274\n result, _ = lib.maybe_convert_numeric(\n np.array([prefix + infinity], dtype=object),\n na_values={\"\", \"NULL\", \"nan\"},\n coerce_numeric=coerce_numeric,\n convert_to_masked_nullable=convert_to_masked_nullable,\n )\n expected = np.array([np.inf if prefix in [\"\", \"+\"] else -np.inf])\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize(\"convert_to_masked_nullable\", [True, False])\n def test_maybe_convert_numeric_infinities_raises(self, convert_to_masked_nullable):\n msg = \"Unable to parse string\"\n with pytest.raises(ValueError, match=msg):\n lib.maybe_convert_numeric(\n np.array([\"foo_inf\"], dtype=object),\n na_values={\"\", \"NULL\", \"nan\"},\n coerce_numeric=False,\n convert_to_masked_nullable=convert_to_masked_nullable,\n )\n\n @pytest.mark.parametrize(\"convert_to_masked_nullable\", [True, False])\n def test_maybe_convert_numeric_post_floatify_nan(\n self, coerce, convert_to_masked_nullable\n ):\n # see gh-13314\n data = np.array([\"1.200\", \"-999.000\", \"4.500\"], dtype=object)\n expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)\n nan_values = {-999, -999.0}\n\n out = lib.maybe_convert_numeric(\n data,\n nan_values,\n coerce,\n convert_to_masked_nullable=convert_to_masked_nullable,\n )\n if convert_to_masked_nullable:\n expected = FloatingArray(expected, np.isnan(expected))\n tm.assert_extension_array_equal(expected, FloatingArray(*out))\n else:\n out = out[0]\n tm.assert_numpy_array_equal(out, expected)\n\n def test_convert_infs(self):\n arr = np.array([\"inf\", \"inf\", \"inf\"], dtype=\"O\")\n result, _ = lib.maybe_convert_numeric(arr, set(), False)\n assert result.dtype == np.float64\n\n arr = np.array([\"-inf\", \"-inf\", \"-inf\"], dtype=\"O\")\n result, _ = lib.maybe_convert_numeric(arr, set(), False)\n assert result.dtype == np.float64\n\n def test_scientific_no_exponent(self):\n # See PR 12215\n arr = np.array([\"42E\", \"2E\", \"99e\", \"6e\"], dtype=\"O\")\n result, _ = lib.maybe_convert_numeric(arr, set(), False, True)\n assert np.all(np.isnan(result))\n\n def test_convert_non_hashable(self):\n # GH13324\n # make sure that we are handing non-hashables\n arr = np.array([[10.0, 2], 1.0, \"apple\"], dtype=object)\n result, _ = lib.maybe_convert_numeric(arr, set(), False, True)\n tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))\n\n def test_convert_numeric_uint64(self):\n arr = np.array([2 ** 63], dtype=object)\n exp = np.array([2 ** 63], dtype=np.uint64)\n tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set())[0], exp)\n\n arr = np.array([str(2 ** 63)], dtype=object)\n exp = np.array([2 ** 63], dtype=np.uint64)\n tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set())[0], exp)\n\n arr = np.array([np.uint64(2 ** 63)], dtype=object)\n exp = np.array([2 ** 63], dtype=np.uint64)\n tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set())[0], exp)\n\n @pytest.mark.parametrize(\n \"arr\",\n [\n np.array([2 ** 63, np.nan], dtype=object),\n np.array([str(2 ** 63), np.nan], dtype=object),\n np.array([np.nan, 2 ** 63], dtype=object),\n np.array([np.nan, str(2 ** 63)], dtype=object),\n ],\n )\n def test_convert_numeric_uint64_nan(self, coerce, arr):\n expected = arr.astype(float) if coerce else arr.copy()\n result, _ = lib.maybe_convert_numeric(arr, set(), coerce_numeric=coerce)\n tm.assert_almost_equal(result, expected)\n\n @pytest.mark.parametrize(\"convert_to_masked_nullable\", [True, False])\n def test_convert_numeric_uint64_nan_values(\n self, coerce, convert_to_masked_nullable\n ):\n arr = np.array([2 ** 63, 2 ** 63 + 1], dtype=object)\n na_values = {2 ** 63}\n\n expected = (\n np.array([np.nan, 2 ** 63 + 1], dtype=float) if coerce else arr.copy()\n )\n result = lib.maybe_convert_numeric(\n arr,\n na_values,\n coerce_numeric=coerce,\n convert_to_masked_nullable=convert_to_masked_nullable,\n )\n if convert_to_masked_nullable and coerce:\n expected = IntegerArray(\n np.array([0, 2 ** 63 + 1], dtype=\"u8\"),\n np.array([True, False], dtype=\"bool\"),\n )\n result = IntegerArray(*result)\n else:\n result = result[0] # discard mask\n tm.assert_almost_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"case\",\n [\n np.array([2 ** 63, -1], dtype=object),\n np.array([str(2 ** 63), -1], dtype=object),\n np.array([str(2 ** 63), str(-1)], dtype=object),\n np.array([-1, 2 ** 63], dtype=object),\n np.array([-1, str(2 ** 63)], dtype=object),\n np.array([str(-1), str(2 ** 63)], dtype=object),\n ],\n )\n @pytest.mark.parametrize(\"convert_to_masked_nullable\", [True, False])\n def test_convert_numeric_int64_uint64(\n self, case, coerce, convert_to_masked_nullable\n ):\n expected = case.astype(float) if coerce else case.copy()\n result, _ = lib.maybe_convert_numeric(\n case,\n set(),\n coerce_numeric=coerce,\n convert_to_masked_nullable=convert_to_masked_nullable,\n )\n\n tm.assert_almost_equal(result, expected)\n\n @pytest.mark.parametrize(\"convert_to_masked_nullable\", [True, False])\n def test_convert_numeric_string_uint64(self, convert_to_masked_nullable):\n # GH32394\n result = lib.maybe_convert_numeric(\n np.array([\"uint64\"], dtype=object),\n set(),\n coerce_numeric=True,\n convert_to_masked_nullable=convert_to_masked_nullable,\n )\n if convert_to_masked_nullable:\n result = FloatingArray(*result)\n else:\n result = result[0]\n assert np.isnan(result)\n\n @pytest.mark.parametrize(\"value\", [-(2 ** 63) - 1, 2 ** 64])\n def test_convert_int_overflow(self, value):\n # see gh-18584\n arr = np.array([value], dtype=object)\n result = lib.maybe_convert_objects(arr)\n tm.assert_numpy_array_equal(arr, result)\n\n def test_maybe_convert_objects_uint64(self):\n # see gh-4471\n arr = np.array([2 ** 63], dtype=object)\n exp = np.array([2 ** 63], dtype=np.uint64)\n tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)\n\n # NumPy bug: can't compare uint64 to int64, as that\n # results in both casting to float64, so we should\n # make sure that this function is robust against it\n arr = np.array([np.uint64(2 ** 63)], dtype=object)\n exp = np.array([2 ** 63], dtype=np.uint64)\n tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)\n\n arr = np.array([2, -1], dtype=object)\n exp = np.array([2, -1], dtype=np.int64)\n tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)\n\n arr = np.array([2 ** 63, -1], dtype=object)\n exp = np.array([2 ** 63, -1], dtype=object)\n tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)\n\n def test_maybe_convert_objects_datetime(self):\n # GH27438\n arr = np.array(\n [np.datetime64(\"2000-01-01\"), np.timedelta64(1, \"s\")], dtype=object\n )\n exp = arr.copy()\n out = lib.maybe_convert_objects(\n arr, convert_datetime=True, convert_timedelta=True\n )\n tm.assert_numpy_array_equal(out, exp)\n\n arr = np.array([pd.NaT, np.timedelta64(1, \"s\")], dtype=object)\n exp = np.array([np.timedelta64(\"NaT\"), np.timedelta64(1, \"s\")], dtype=\"m8[ns]\")\n out = lib.maybe_convert_objects(\n arr, convert_datetime=True, convert_timedelta=True\n )\n tm.assert_numpy_array_equal(out, exp)\n\n # with convert_timedelta=True, the nan is a valid NA value for td64\n arr = np.array([np.timedelta64(1, \"s\"), np.nan], dtype=object)\n exp = exp[::-1]\n out = lib.maybe_convert_objects(\n arr, convert_datetime=True, convert_timedelta=True\n )\n tm.assert_numpy_array_equal(out, exp)\n\n def test_maybe_convert_objects_dtype_if_all_nat(self):\n arr = np.array([pd.NaT, pd.NaT], dtype=object)\n out = lib.maybe_convert_objects(\n arr, convert_datetime=True, convert_timedelta=True\n )\n # no dtype_if_all_nat passed -> we dont guess\n tm.assert_numpy_array_equal(out, arr)\n\n out = lib.maybe_convert_objects(\n arr,\n convert_datetime=True,\n convert_timedelta=True,\n dtype_if_all_nat=np.dtype(\"timedelta64[ns]\"),\n )\n exp = np.array([\"NaT\", \"NaT\"], dtype=\"timedelta64[ns]\")\n tm.assert_numpy_array_equal(out, exp)\n\n out = lib.maybe_convert_objects(\n arr,\n convert_datetime=True,\n convert_timedelta=True,\n dtype_if_all_nat=np.dtype(\"datetime64[ns]\"),\n )\n exp = np.array([\"NaT\", \"NaT\"], dtype=\"datetime64[ns]\")\n tm.assert_numpy_array_equal(out, exp)\n\n def test_maybe_convert_objects_dtype_if_all_nat_invalid(self):\n # we accept datetime64[ns], timedelta64[ns], and EADtype\n arr = np.array([pd.NaT, pd.NaT], dtype=object)\n\n with pytest.raises(ValueError, match=\"int64\"):\n lib.maybe_convert_objects(\n arr,\n convert_datetime=True,\n convert_timedelta=True,\n dtype_if_all_nat=np.dtype(\"int64\"),\n )\n\n @pytest.mark.parametrize(\"dtype\", [\"datetime64[ns]\", \"timedelta64[ns]\"])\n def test_maybe_convert_objects_datetime_overflow_safe(self, dtype):\n stamp = datetime(2363, 10, 4) # Enterprise-D launch date\n if dtype == \"timedelta64[ns]\":\n stamp = stamp - datetime(1970, 1, 1)\n arr = np.array([stamp], dtype=object)\n\n out = lib.maybe_convert_objects(\n arr, convert_datetime=True, convert_timedelta=True\n )\n # no OutOfBoundsDatetime/OutOfBoundsTimedeltas\n tm.assert_numpy_array_equal(out, arr)\n\n def test_maybe_convert_objects_mixed_datetimes(self):\n ts = Timestamp(\"now\")\n vals = [ts, ts.to_pydatetime(), ts.to_datetime64(), pd.NaT, np.nan, None]\n\n for data in itertools.permutations(vals):\n data = np.array(list(data), dtype=object)\n expected = DatetimeIndex(data)._data._ndarray\n result = lib.maybe_convert_objects(data, convert_datetime=True)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_maybe_convert_objects_timedelta64_nat(self):\n obj = np.timedelta64(\"NaT\", \"ns\")\n arr = np.array([obj], dtype=object)\n assert arr[0] is obj\n\n result = lib.maybe_convert_objects(arr, convert_timedelta=True)\n\n expected = np.array([obj], dtype=\"m8[ns]\")\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"exp\",\n [\n IntegerArray(np.array([2, 0], dtype=\"i8\"), np.array([False, True])),\n IntegerArray(np.array([2, 0], dtype=\"int64\"), np.array([False, True])),\n ],\n )\n def test_maybe_convert_objects_nullable_integer(self, exp):\n # GH27335\n arr = np.array([2, np.NaN], dtype=object)\n result = lib.maybe_convert_objects(arr, convert_to_nullable_integer=True)\n\n tm.assert_extension_array_equal(result, exp)\n\n @pytest.mark.parametrize(\n \"convert_to_masked_nullable, exp\",\n [\n (True, IntegerArray(np.array([2, 0], dtype=\"i8\"), np.array([False, True]))),\n (False, np.array([2, np.nan], dtype=\"float64\")),\n ],\n )\n def test_maybe_convert_numeric_nullable_integer(\n self, convert_to_masked_nullable, exp\n ):\n # GH 40687\n arr = np.array([2, np.NaN], dtype=object)\n result = lib.maybe_convert_numeric(\n arr, set(), convert_to_masked_nullable=convert_to_masked_nullable\n )\n if convert_to_masked_nullable:\n result = IntegerArray(*result)\n tm.assert_extension_array_equal(result, exp)\n else:\n result = result[0]\n tm.assert_numpy_array_equal(result, exp)\n\n @pytest.mark.parametrize(\n \"convert_to_masked_nullable, exp\",\n [\n (\n True,\n FloatingArray(\n np.array([2.0, 0.0], dtype=\"float64\"), np.array([False, True])\n ),\n ),\n (False, np.array([2.0, np.nan], dtype=\"float64\")),\n ],\n )\n def test_maybe_convert_numeric_floating_array(\n self, convert_to_masked_nullable, exp\n ):\n # GH 40687\n arr = np.array([2.0, np.nan], dtype=object)\n result = lib.maybe_convert_numeric(\n arr, set(), convert_to_masked_nullable=convert_to_masked_nullable\n )\n if convert_to_masked_nullable:\n tm.assert_extension_array_equal(FloatingArray(*result), exp)\n else:\n result = result[0]\n tm.assert_numpy_array_equal(result, exp)\n\n def test_maybe_convert_objects_bool_nan(self):\n # GH32146\n ind = Index([True, False, np.nan], dtype=object)\n exp = np.array([True, False, np.nan], dtype=object)\n out = lib.maybe_convert_objects(ind.values, safe=1)\n tm.assert_numpy_array_equal(out, exp)\n\n @pytest.mark.parametrize(\n \"data0\",\n [\n True,\n 1,\n 1.0,\n 1.0 + 1.0j,\n np.int8(1),\n np.int16(1),\n np.int32(1),\n np.int64(1),\n np.float16(1),\n np.float32(1),\n np.float64(1),\n np.complex64(1),\n np.complex128(1),\n ],\n )\n @pytest.mark.parametrize(\n \"data1\",\n [\n True,\n 1,\n 1.0,\n 1.0 + 1.0j,\n np.int8(1),\n np.int16(1),\n np.int32(1),\n np.int64(1),\n np.float16(1),\n np.float32(1),\n np.float64(1),\n np.complex64(1),\n np.complex128(1),\n ],\n )\n def test_maybe_convert_objects_itemsize(self, data0, data1):\n # GH 40908\n data = [data0, data1]\n arr = np.array(data, dtype=\"object\")\n\n common_kind = np.find_common_type(\n [type(data0), type(data1)], scalar_types=[]\n ).kind\n kind0 = \"python\" if not hasattr(data0, \"dtype\") else data0.dtype.kind\n kind1 = \"python\" if not hasattr(data1, \"dtype\") else data1.dtype.kind\n if kind0 != \"python\" and kind1 != \"python\":\n kind = common_kind\n itemsize = max(data0.dtype.itemsize, data1.dtype.itemsize)\n elif is_bool(data0) or is_bool(data1):\n kind = \"bool\" if (is_bool(data0) and is_bool(data1)) else \"object\"\n itemsize = \"\"\n elif is_complex(data0) or is_complex(data1):\n kind = common_kind\n itemsize = 16\n else:\n kind = common_kind\n itemsize = 8\n\n expected = np.array(data, dtype=f\"{kind}{itemsize}\")\n result = lib.maybe_convert_objects(arr)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_mixed_dtypes_remain_object_array(self):\n # GH14956\n arr = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1], dtype=object)\n result = lib.maybe_convert_objects(arr, convert_datetime=True)\n tm.assert_numpy_array_equal(result, arr)\n\n @pytest.mark.parametrize(\n \"idx\",\n [\n pd.IntervalIndex.from_breaks(range(5), closed=\"both\"),\n pd.period_range(\"2016-01-01\", periods=3, freq=\"D\"),\n ],\n )\n def test_maybe_convert_objects_ea(self, idx):\n\n result = lib.maybe_convert_objects(\n np.array(idx, dtype=object),\n convert_period=True,\n convert_interval=True,\n )\n tm.assert_extension_array_equal(result, idx._data)\n\n\nclass TestTypeInference:\n\n # Dummy class used for testing with Python objects\n class Dummy:\n pass\n\n def test_inferred_dtype_fixture(self, any_skipna_inferred_dtype):\n # see pandas/conftest.py\n inferred_dtype, values = any_skipna_inferred_dtype\n\n # make sure the inferred dtype of the fixture is as requested\n assert inferred_dtype == lib.infer_dtype(values, skipna=True)\n\n @pytest.mark.parametrize(\"skipna\", [True, False])\n def test_length_zero(self, skipna):\n result = lib.infer_dtype(np.array([], dtype=\"i4\"), skipna=skipna)\n assert result == \"integer\"\n\n result = lib.infer_dtype([], skipna=skipna)\n assert result == \"empty\"\n\n # GH 18004\n arr = np.array([np.array([], dtype=object), np.array([], dtype=object)])\n result = lib.infer_dtype(arr, skipna=skipna)\n assert result == \"empty\"\n\n def test_integers(self):\n arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype=\"O\")\n result = lib.infer_dtype(arr, skipna=True)\n assert result == \"integer\"\n\n arr = np.array([1, 2, 3, np.int64(4), np.int32(5), \"foo\"], dtype=\"O\")\n result = lib.infer_dtype(arr, skipna=True)\n assert result == \"mixed-integer\"\n\n arr = np.array([1, 2, 3, 4, 5], dtype=\"i4\")\n result = lib.infer_dtype(arr, skipna=True)\n assert result == \"integer\"\n\n @pytest.mark.parametrize(\n \"arr, skipna\",\n [\n (np.array([1, 2, np.nan, np.nan, 3], dtype=\"O\"), False),\n (np.array([1, 2, np.nan, np.nan, 3], dtype=\"O\"), True),\n (np.array([1, 2, 3, np.int64(4), np.int32(5), np.nan], dtype=\"O\"), False),\n (np.array([1, 2, 3, np.int64(4), np.int32(5), np.nan], dtype=\"O\"), True),\n ],\n )\n def test_integer_na(self, arr, skipna):\n # GH 27392\n result = lib.infer_dtype(arr, skipna=skipna)\n expected = \"integer\" if skipna else \"integer-na\"\n assert result == expected\n\n def test_infer_dtype_skipna_default(self):\n # infer_dtype `skipna` default deprecated in GH#24050,\n # changed to True in GH#29876\n arr = np.array([1, 2, 3, np.nan], dtype=object)\n\n result = lib.infer_dtype(arr)\n assert result == \"integer\"\n\n def test_bools(self):\n arr = np.array([True, False, True, True, True], dtype=\"O\")\n result = lib.infer_dtype(arr, skipna=True)\n assert result == \"boolean\"\n\n arr = np.array([np.bool_(True), np.bool_(False)], dtype=\"O\")\n result = lib.infer_dtype(arr, skipna=True)\n assert result == \"boolean\"\n\n arr = np.array([True, False, True, \"foo\"], dtype=\"O\")\n result = lib.infer_dtype(arr, skipna=True)\n assert result == \"mixed\"\n\n arr = np.array([True, False, True], dtype=bool)\n result = lib.infer_dtype(arr, skipna=True)\n assert result == \"boolean\"\n\n arr = np.array([True, np.nan, False], dtype=\"O\")\n result = lib.infer_dtype(arr, skipna=True)\n assert result == \"boolean\"\n\n result = lib.infer_dtype(arr, skipna=False)\n assert result == \"mixed\"\n\n def test_floats(self):\n arr = np.array([1.0, 2.0, 3.0, np.float64(4), np.float32(5)], dtype=\"O\")\n result = lib.infer_dtype(arr, skipna=True)\n assert result == \"floating\"\n\n arr = np.array([1, 2, 3, np.float64(4), np.float32(5), \"foo\"], dtype=\"O\")\n result = lib.infer_dtype(arr, skipna=True)\n assert result == \"mixed-integer\"\n\n arr = np.array([1, 2, 3, 4, 5], dtype=\"f4\")\n result = lib.infer_dtype(arr, skipna=True)\n assert result == \"floating\"\n\n arr = np.array([1, 2, 3, 4, 5], dtype=\"f8\")\n result = lib.infer_dtype(arr, skipna=True)\n assert result == \"floating\"\n\n def test_decimals(self):\n # GH15690\n arr = np.array([Decimal(1), Decimal(2), Decimal(3)])\n result = lib.infer_dtype(arr, skipna=True)\n assert result == \"decimal\"\n\n arr = np.array([1.0, 2.0, Decimal(3)])\n result = lib.infer_dtype(arr, skipna=True)\n assert result == \"mixed\"\n\n result = lib.infer_dtype(arr[::-1], skipna=True)\n assert result == \"mixed\"\n\n arr = np.array([Decimal(1), Decimal(\"NaN\"), Decimal(3)])\n result = lib.infer_dtype(arr, skipna=True)\n assert result == \"decimal\"\n\n arr = np.array([Decimal(1), np.nan, Decimal(3)], dtype=\"O\")\n result = lib.infer_dtype(arr, skipna=True)\n assert result == \"decimal\"\n\n # complex is compatible with nan, so skipna has no effect\n @pytest.mark.parametrize(\"skipna\", [True, False])\n def test_complex(self, skipna):\n # gets cast to complex on array construction\n arr = np.array([1.0, 2.0, 1 + 1j])\n result = lib.infer_dtype(arr, skipna=skipna)\n assert result == \"complex\"\n\n arr = np.array([1.0, 2.0, 1 + 1j], dtype=\"O\")\n result = lib.infer_dtype(arr, skipna=skipna)\n assert result == \"mixed\"\n\n result = lib.infer_dtype(arr[::-1], skipna=skipna)\n assert result == \"mixed\"\n\n # gets cast to complex on array construction\n arr = np.array([1, np.nan, 1 + 1j])\n result = lib.infer_dtype(arr, skipna=skipna)\n assert result == \"complex\"\n\n arr = np.array([1.0, np.nan, 1 + 1j], dtype=\"O\")\n result = lib.infer_dtype(arr, skipna=skipna)\n assert result == \"mixed\"\n\n # complex with nans stays complex\n arr = np.array([1 + 1j, np.nan, 3 + 3j], dtype=\"O\")\n result = lib.infer_dtype(arr, skipna=skipna)\n assert result == \"complex\"\n\n # test smaller complex dtype; will pass through _try_infer_map fastpath\n arr = np.array([1 + 1j, np.nan, 3 + 3j], dtype=np.complex64)\n result = lib.infer_dtype(arr, skipna=skipna)\n assert result == \"complex\"\n\n def test_string(self):\n pass\n\n def test_unicode(self):\n arr = [\"a\", np.nan, \"c\"]\n result = lib.infer_dtype(arr, skipna=False)\n # This currently returns \"mixed\", but it's not clear that's optimal.\n # This could also return \"string\" or \"mixed-string\"\n assert result == \"mixed\"\n\n arr = [\"a\", np.nan, \"c\"]\n result = lib.infer_dtype(arr, skipna=True)\n assert result == \"string\"\n\n arr = [\"a\", \"c\"]\n result = lib.infer_dtype(arr, skipna=False)\n assert result == \"string\"\n\n @pytest.mark.parametrize(\n \"dtype, missing, skipna, expected\",\n [\n (float, np.nan, False, \"floating\"),\n (float, np.nan, True, \"floating\"),\n (object, np.nan, False, \"floating\"),\n (object, np.nan, True, \"empty\"),\n (object, None, False, \"mixed\"),\n (object, None, True, \"empty\"),\n ],\n )\n @pytest.mark.parametrize(\"box\", [Series, np.array])\n def test_object_empty(self, box, missing, dtype, skipna, expected):\n # GH 23421\n arr = box([missing, missing], dtype=dtype)\n\n result = lib.infer_dtype(arr, skipna=skipna)\n assert result == expected\n\n def test_datetime(self):\n\n dates = [datetime(2012, 1, x) for x in range(1, 20)]\n index = Index(dates)\n assert index.inferred_type == \"datetime64\"\n\n def test_infer_dtype_datetime64(self):\n arr = np.array(\n [np.datetime64(\"2011-01-01\"), np.datetime64(\"2011-01-01\")], dtype=object\n )\n assert lib.infer_dtype(arr, skipna=True) == \"datetime64\"\n\n @pytest.mark.parametrize(\"na_value\", [pd.NaT, np.nan])\n def test_infer_dtype_datetime64_with_na(self, na_value):\n # starts with nan\n arr = np.array([na_value, np.datetime64(\"2011-01-02\")])\n assert lib.infer_dtype(arr, skipna=True) == \"datetime64\"\n\n arr = np.array([na_value, np.datetime64(\"2011-01-02\"), na_value])\n assert lib.infer_dtype(arr, skipna=True) == \"datetime64\"\n\n @pytest.mark.parametrize(\n \"arr\",\n [\n np.array(\n [np.timedelta64(\"nat\"), np.datetime64(\"2011-01-02\")], dtype=object\n ),\n np.array(\n [np.datetime64(\"2011-01-02\"), np.timedelta64(\"nat\")], dtype=object\n ),\n np.array([np.datetime64(\"2011-01-01\"), Timestamp(\"2011-01-02\")]),\n np.array([Timestamp(\"2011-01-02\"), np.datetime64(\"2011-01-01\")]),\n np.array([np.nan, Timestamp(\"2011-01-02\"), 1.1]),\n np.array([np.nan, \"2011-01-01\", Timestamp(\"2011-01-02\")], dtype=object),\n np.array([np.datetime64(\"nat\"), np.timedelta64(1, \"D\")], dtype=object),\n np.array([np.timedelta64(1, \"D\"), np.datetime64(\"nat\")], dtype=object),\n ],\n )\n def test_infer_datetimelike_dtype_mixed(self, arr):\n assert lib.infer_dtype(arr, skipna=False) == \"mixed\"\n\n def test_infer_dtype_mixed_integer(self):\n arr = np.array([np.nan, Timestamp(\"2011-01-02\"), 1])\n assert lib.infer_dtype(arr, skipna=True) == \"mixed-integer\"\n\n @pytest.mark.parametrize(\n \"arr\",\n [\n np.array([Timestamp(\"2011-01-01\"), Timestamp(\"2011-01-02\")]),\n np.array([datetime(2011, 1, 1), datetime(2012, 2, 1)]),\n np.array([datetime(2011, 1, 1), Timestamp(\"2011-01-02\")]),\n ],\n )\n def test_infer_dtype_datetime(self, arr):\n assert lib.infer_dtype(arr, skipna=True) == \"datetime\"\n\n @pytest.mark.parametrize(\"na_value\", [pd.NaT, np.nan])\n @pytest.mark.parametrize(\n \"time_stamp\", [Timestamp(\"2011-01-01\"), datetime(2011, 1, 1)]\n )\n def test_infer_dtype_datetime_with_na(self, na_value, time_stamp):\n # starts with nan\n arr = np.array([na_value, time_stamp])\n assert lib.infer_dtype(arr, skipna=True) == \"datetime\"\n\n arr = np.array([na_value, time_stamp, na_value])\n assert lib.infer_dtype(arr, skipna=True) == \"datetime\"\n\n @pytest.mark.parametrize(\n \"arr\",\n [\n np.array([Timedelta(\"1 days\"), Timedelta(\"2 days\")]),\n np.array([np.timedelta64(1, \"D\"), np.timedelta64(2, \"D\")], dtype=object),\n np.array([timedelta(1), timedelta(2)]),\n ],\n )\n def test_infer_dtype_timedelta(self, arr):\n assert lib.infer_dtype(arr, skipna=True) == \"timedelta\"\n\n @pytest.mark.parametrize(\"na_value\", [pd.NaT, np.nan])\n @pytest.mark.parametrize(\n \"delta\", [Timedelta(\"1 days\"), np.timedelta64(1, \"D\"), timedelta(1)]\n )\n def test_infer_dtype_timedelta_with_na(self, na_value, delta):\n # starts with nan\n arr = np.array([na_value, delta])\n assert lib.infer_dtype(arr, skipna=True) == \"timedelta\"\n\n arr = np.array([na_value, delta, na_value])\n assert lib.infer_dtype(arr, skipna=True) == \"timedelta\"\n\n def test_infer_dtype_period(self):\n # GH 13664\n arr = np.array([Period(\"2011-01\", freq=\"D\"), Period(\"2011-02\", freq=\"D\")])\n assert lib.infer_dtype(arr, skipna=True) == \"period\"\n\n # non-homogeneous freqs -> mixed\n arr = np.array([Period(\"2011-01\", freq=\"D\"), Period(\"2011-02\", freq=\"M\")])\n assert lib.infer_dtype(arr, skipna=True) == \"mixed\"\n\n @pytest.mark.parametrize(\"klass\", [pd.array, Series, Index])\n @pytest.mark.parametrize(\"skipna\", [True, False])\n def test_infer_dtype_period_array(self, klass, skipna):\n # https://github.com/pandas-dev/pandas/issues/23553\n values = klass(\n [\n Period(\"2011-01-01\", freq=\"D\"),\n Period(\"2011-01-02\", freq=\"D\"),\n pd.NaT,\n ]\n )\n assert lib.infer_dtype(values, skipna=skipna) == \"period\"\n\n # periods but mixed freq\n values = klass(\n [\n Period(\"2011-01-01\", freq=\"D\"),\n Period(\"2011-01-02\", freq=\"M\"),\n pd.NaT,\n ]\n )\n # with pd.array this becomes PandasArray which ends up as \"unknown-array\"\n exp = \"unknown-array\" if klass is pd.array else \"mixed\"\n assert lib.infer_dtype(values, skipna=skipna) == exp\n\n def test_infer_dtype_period_mixed(self):\n arr = np.array(\n [Period(\"2011-01\", freq=\"M\"), np.datetime64(\"nat\")], dtype=object\n )\n assert lib.infer_dtype(arr, skipna=False) == \"mixed\"\n\n arr = np.array(\n [np.datetime64(\"nat\"), Period(\"2011-01\", freq=\"M\")], dtype=object\n )\n assert lib.infer_dtype(arr, skipna=False) == \"mixed\"\n\n @pytest.mark.parametrize(\"na_value\", [pd.NaT, np.nan])\n def test_infer_dtype_period_with_na(self, na_value):\n # starts with nan\n arr = np.array([na_value, Period(\"2011-01\", freq=\"D\")])\n assert lib.infer_dtype(arr, skipna=True) == \"period\"\n\n arr = np.array([na_value, Period(\"2011-01\", freq=\"D\"), na_value])\n assert lib.infer_dtype(arr, skipna=True) == \"period\"\n\n @pytest.mark.parametrize(\n \"data\",\n [\n [datetime(2017, 6, 12, 19, 30), datetime(2017, 3, 11, 1, 15)],\n [Timestamp(\"20170612\"), Timestamp(\"20170311\")],\n [\n Timestamp(\"20170612\", tz=\"US/Eastern\"),\n Timestamp(\"20170311\", tz=\"US/Eastern\"),\n ],\n [date(2017, 6, 12), Timestamp(\"20170311\", tz=\"US/Eastern\")],\n [np.datetime64(\"2017-06-12\"), np.datetime64(\"2017-03-11\")],\n [np.datetime64(\"2017-06-12\"), datetime(2017, 3, 11, 1, 15)],\n ],\n )\n def test_infer_datetimelike_array_datetime(self, data):\n assert lib.infer_datetimelike_array(data) == (\"datetime\", False)\n\n @pytest.mark.parametrize(\n \"data\",\n [\n [timedelta(2017, 6, 12), timedelta(2017, 3, 11)],\n [timedelta(2017, 6, 12), date(2017, 3, 11)],\n [np.timedelta64(2017, \"D\"), np.timedelta64(6, \"s\")],\n [np.timedelta64(2017, \"D\"), timedelta(2017, 3, 11)],\n ],\n )\n def test_infer_datetimelike_array_timedelta(self, data):\n assert lib.infer_datetimelike_array(data) == (\"timedelta\", False)\n\n def test_infer_datetimelike_array_date(self):\n arr = [date(2017, 6, 12), date(2017, 3, 11)]\n assert lib.infer_datetimelike_array(arr) == (\"date\", False)\n\n @pytest.mark.parametrize(\n \"data\",\n [\n [\"2017-06-12\", \"2017-03-11\"],\n [20170612, 20170311],\n [20170612.5, 20170311.8],\n [Dummy(), Dummy()],\n [Timestamp(\"20170612\"), Timestamp(\"20170311\", tz=\"US/Eastern\")],\n [Timestamp(\"20170612\"), 20170311],\n [timedelta(2017, 6, 12), Timestamp(\"20170311\", tz=\"US/Eastern\")],\n ],\n )\n def test_infer_datetimelike_array_mixed(self, data):\n assert lib.infer_datetimelike_array(data)[0] == \"mixed\"\n\n @pytest.mark.parametrize(\n \"first, expected\",\n [\n [[None], \"mixed\"],\n [[np.nan], \"mixed\"],\n [[pd.NaT], \"nat\"],\n [[datetime(2017, 6, 12, 19, 30), pd.NaT], \"datetime\"],\n [[np.datetime64(\"2017-06-12\"), pd.NaT], \"datetime\"],\n [[date(2017, 6, 12), pd.NaT], \"date\"],\n [[timedelta(2017, 6, 12), pd.NaT], \"timedelta\"],\n [[np.timedelta64(2017, \"D\"), pd.NaT], \"timedelta\"],\n ],\n )\n @pytest.mark.parametrize(\"second\", [None, np.nan])\n def test_infer_datetimelike_array_nan_nat_like(self, first, second, expected):\n first.append(second)\n assert lib.infer_datetimelike_array(first) == (expected, False)\n\n def test_infer_dtype_all_nan_nat_like(self):\n arr = np.array([np.nan, np.nan])\n assert lib.infer_dtype(arr, skipna=True) == \"floating\"\n\n # nan and None mix are result in mixed\n arr = np.array([np.nan, np.nan, None])\n assert lib.infer_dtype(arr, skipna=True) == \"empty\"\n assert lib.infer_dtype(arr, skipna=False) == \"mixed\"\n\n arr = np.array([None, np.nan, np.nan])\n assert lib.infer_dtype(arr, skipna=True) == \"empty\"\n assert lib.infer_dtype(arr, skipna=False) == \"mixed\"\n\n # pd.NaT\n arr = np.array([pd.NaT])\n assert lib.infer_dtype(arr, skipna=False) == \"datetime\"\n\n arr = np.array([pd.NaT, np.nan])\n assert lib.infer_dtype(arr, skipna=False) == \"datetime\"\n\n arr = np.array([np.nan, pd.NaT])\n assert lib.infer_dtype(arr, skipna=False) == \"datetime\"\n\n arr = np.array([np.nan, pd.NaT, np.nan])\n assert lib.infer_dtype(arr, skipna=False) == \"datetime\"\n\n arr = np.array([None, pd.NaT, None])\n assert lib.infer_dtype(arr, skipna=False) == \"datetime\"\n\n # np.datetime64(nat)\n arr = np.array([np.datetime64(\"nat\")])\n assert lib.infer_dtype(arr, skipna=False) == \"datetime64\"\n\n for n in [np.nan, pd.NaT, None]:\n arr = np.array([n, np.datetime64(\"nat\"), n])\n assert lib.infer_dtype(arr, skipna=False) == \"datetime64\"\n\n arr = np.array([pd.NaT, n, np.datetime64(\"nat\"), n])\n assert lib.infer_dtype(arr, skipna=False) == \"datetime64\"\n\n arr = np.array([np.timedelta64(\"nat\")], dtype=object)\n assert lib.infer_dtype(arr, skipna=False) == \"timedelta\"\n\n for n in [np.nan, pd.NaT, None]:\n arr = np.array([n, np.timedelta64(\"nat\"), n])\n assert lib.infer_dtype(arr, skipna=False) == \"timedelta\"\n\n arr = np.array([pd.NaT, n, np.timedelta64(\"nat\"), n])\n assert lib.infer_dtype(arr, skipna=False) == \"timedelta\"\n\n # datetime / timedelta mixed\n arr = np.array([pd.NaT, np.datetime64(\"nat\"), np.timedelta64(\"nat\"), np.nan])\n assert lib.infer_dtype(arr, skipna=False) == \"mixed\"\n\n arr = np.array([np.timedelta64(\"nat\"), np.datetime64(\"nat\")], dtype=object)\n assert lib.infer_dtype(arr, skipna=False) == \"mixed\"\n\n def test_is_datetimelike_array_all_nan_nat_like(self):\n arr = np.array([np.nan, pd.NaT, np.datetime64(\"nat\")])\n assert lib.is_datetime_array(arr)\n assert lib.is_datetime64_array(arr)\n assert not lib.is_timedelta_or_timedelta64_array(arr)\n\n arr = np.array([np.nan, pd.NaT, np.timedelta64(\"nat\")])\n assert not lib.is_datetime_array(arr)\n assert not lib.is_datetime64_array(arr)\n assert lib.is_timedelta_or_timedelta64_array(arr)\n\n arr = np.array([np.nan, pd.NaT, np.datetime64(\"nat\"), np.timedelta64(\"nat\")])\n assert not lib.is_datetime_array(arr)\n assert not lib.is_datetime64_array(arr)\n assert not lib.is_timedelta_or_timedelta64_array(arr)\n\n arr = np.array([np.nan, pd.NaT])\n assert lib.is_datetime_array(arr)\n assert lib.is_datetime64_array(arr)\n assert lib.is_timedelta_or_timedelta64_array(arr)\n\n arr = np.array([np.nan, np.nan], dtype=object)\n assert not lib.is_datetime_array(arr)\n assert not lib.is_datetime64_array(arr)\n assert not lib.is_timedelta_or_timedelta64_array(arr)\n\n assert lib.is_datetime_with_singletz_array(\n np.array(\n [\n Timestamp(\"20130101\", tz=\"US/Eastern\"),\n Timestamp(\"20130102\", tz=\"US/Eastern\"),\n ],\n dtype=object,\n )\n )\n assert not lib.is_datetime_with_singletz_array(\n np.array(\n [\n Timestamp(\"20130101\", tz=\"US/Eastern\"),\n Timestamp(\"20130102\", tz=\"CET\"),\n ],\n dtype=object,\n )\n )\n\n @pytest.mark.parametrize(\n \"func\",\n [\n \"is_datetime_array\",\n \"is_datetime64_array\",\n \"is_bool_array\",\n \"is_timedelta_or_timedelta64_array\",\n \"is_date_array\",\n \"is_time_array\",\n \"is_interval_array\",\n ],\n )\n def test_other_dtypes_for_array(self, func):\n func = getattr(lib, func)\n arr = np.array([\"foo\", \"bar\"])\n assert not func(arr)\n assert not func(arr.reshape(2, 1))\n\n arr = np.array([1, 2])\n assert not func(arr)\n assert not func(arr.reshape(2, 1))\n\n def test_date(self):\n\n dates = [date(2012, 1, day) for day in range(1, 20)]\n index = Index(dates)\n assert index.inferred_type == \"date\"\n\n dates = [date(2012, 1, day) for day in range(1, 20)] + [np.nan]\n result = lib.infer_dtype(dates, skipna=False)\n assert result == \"mixed\"\n\n result = lib.infer_dtype(dates, skipna=True)\n assert result == \"date\"\n\n @pytest.mark.parametrize(\n \"values\",\n [\n [date(2020, 1, 1), Timestamp(\"2020-01-01\")],\n [Timestamp(\"2020-01-01\"), date(2020, 1, 1)],\n [date(2020, 1, 1), pd.NaT],\n [pd.NaT, date(2020, 1, 1)],\n ],\n )\n @pytest.mark.parametrize(\"skipna\", [True, False])\n def test_infer_dtype_date_order_invariant(self, values, skipna):\n # https://github.com/pandas-dev/pandas/issues/33741\n result = lib.infer_dtype(values, skipna=skipna)\n assert result == \"date\"\n\n def test_is_numeric_array(self):\n\n assert lib.is_float_array(np.array([1, 2.0]))\n assert lib.is_float_array(np.array([1, 2.0, np.nan]))\n assert not lib.is_float_array(np.array([1, 2]))\n\n assert lib.is_integer_array(np.array([1, 2]))\n assert not lib.is_integer_array(np.array([1, 2.0]))\n\n def test_is_string_array(self):\n\n assert lib.is_string_array(np.array([\"foo\", \"bar\"]))\n assert not lib.is_string_array(\n np.array([\"foo\", \"bar\", pd.NA], dtype=object), skipna=False\n )\n assert lib.is_string_array(\n np.array([\"foo\", \"bar\", pd.NA], dtype=object), skipna=True\n )\n # NaN is not valid for string array, just NA\n assert not lib.is_string_array(\n np.array([\"foo\", \"bar\", np.nan], dtype=object), skipna=True\n )\n\n assert not lib.is_string_array(np.array([1, 2]))\n\n def test_to_object_array_tuples(self):\n r = (5, 6)\n values = [r]\n lib.to_object_array_tuples(values)\n\n # make sure record array works\n record = namedtuple(\"record\", \"x y\")\n r = record(5, 6)\n values = [r]\n lib.to_object_array_tuples(values)\n\n def test_object(self):\n\n # GH 7431\n # cannot infer more than this as only a single element\n arr = np.array([None], dtype=\"O\")\n result = lib.infer_dtype(arr, skipna=False)\n assert result == \"mixed\"\n result = lib.infer_dtype(arr, skipna=True)\n assert result == \"empty\"\n\n def test_to_object_array_width(self):\n # see gh-13320\n rows = [[1, 2, 3], [4, 5, 6]]\n\n expected = np.array(rows, dtype=object)\n out = lib.to_object_array(rows)\n tm.assert_numpy_array_equal(out, expected)\n\n expected = np.array(rows, dtype=object)\n out = lib.to_object_array(rows, min_width=1)\n tm.assert_numpy_array_equal(out, expected)\n\n expected = np.array(\n [[1, 2, 3, None, None], [4, 5, 6, None, None]], dtype=object\n )\n out = lib.to_object_array(rows, min_width=5)\n tm.assert_numpy_array_equal(out, expected)\n\n def test_is_period(self):\n assert lib.is_period(Period(\"2011-01\", freq=\"M\"))\n assert not lib.is_period(PeriodIndex([\"2011-01\"], freq=\"M\"))\n assert not lib.is_period(Timestamp(\"2011-01\"))\n assert not lib.is_period(1)\n assert not lib.is_period(np.nan)\n\n def test_categorical(self):\n\n # GH 8974\n arr = Categorical(list(\"abc\"))\n result = lib.infer_dtype(arr, skipna=True)\n assert result == \"categorical\"\n\n result = lib.infer_dtype(Series(arr), skipna=True)\n assert result == \"categorical\"\n\n arr = Categorical(list(\"abc\"), categories=[\"cegfab\"], ordered=True)\n result = lib.infer_dtype(arr, skipna=True)\n assert result == \"categorical\"\n\n result = lib.infer_dtype(Series(arr), skipna=True)\n assert result == \"categorical\"\n\n @pytest.mark.parametrize(\"asobject\", [True, False])\n def test_interval(self, asobject):\n idx = pd.IntervalIndex.from_breaks(range(5), closed=\"both\")\n if asobject:\n idx = idx.astype(object)\n\n inferred = lib.infer_dtype(idx, skipna=False)\n assert inferred == \"interval\"\n\n inferred = lib.infer_dtype(idx._data, skipna=False)\n assert inferred == \"interval\"\n\n inferred = lib.infer_dtype(Series(idx, dtype=idx.dtype), skipna=False)\n assert inferred == \"interval\"\n\n @pytest.mark.parametrize(\"value\", [Timestamp(0), Timedelta(0), 0, 0.0])\n def test_interval_mismatched_closed(self, value):\n\n first = Interval(value, value, closed=\"left\")\n second = Interval(value, value, closed=\"right\")\n\n # if closed match, we should infer \"interval\"\n arr = np.array([first, first], dtype=object)\n assert lib.infer_dtype(arr, skipna=False) == \"interval\"\n\n # if closed dont match, we should _not_ get \"interval\"\n arr2 = np.array([first, second], dtype=object)\n assert lib.infer_dtype(arr2, skipna=False) == \"mixed\"\n\n def test_interval_mismatched_subtype(self):\n first = Interval(0, 1, closed=\"left\")\n second = Interval(Timestamp(0), Timestamp(1), closed=\"left\")\n third = Interval(Timedelta(0), Timedelta(1), closed=\"left\")\n\n arr = np.array([first, second])\n assert lib.infer_dtype(arr, skipna=False) == \"mixed\"\n\n arr = np.array([second, third])\n assert lib.infer_dtype(arr, skipna=False) == \"mixed\"\n\n arr = np.array([first, third])\n assert lib.infer_dtype(arr, skipna=False) == \"mixed\"\n\n # float vs int subdtype are compatible\n flt_interval = Interval(1.5, 2.5, closed=\"left\")\n arr = np.array([first, flt_interval], dtype=object)\n assert lib.infer_dtype(arr, skipna=False) == \"interval\"\n\n @pytest.mark.parametrize(\"klass\", [pd.array, Series])\n @pytest.mark.parametrize(\"skipna\", [True, False])\n @pytest.mark.parametrize(\"data\", [[\"a\", \"b\", \"c\"], [\"a\", \"b\", pd.NA]])\n def test_string_dtype(self, data, skipna, klass, nullable_string_dtype):\n # StringArray\n val = klass(data, dtype=nullable_string_dtype)\n inferred = lib.infer_dtype(val, skipna=skipna)\n assert inferred == \"string\"\n\n @pytest.mark.parametrize(\"klass\", [pd.array, Series])\n @pytest.mark.parametrize(\"skipna\", [True, False])\n @pytest.mark.parametrize(\"data\", [[True, False, True], [True, False, pd.NA]])\n def test_boolean_dtype(self, data, skipna, klass):\n # BooleanArray\n val = klass(data, dtype=\"boolean\")\n inferred = lib.infer_dtype(val, skipna=skipna)\n assert inferred == \"boolean\"\n\n\nclass TestNumberScalar:\n def test_is_number(self):\n\n assert is_number(True)\n assert is_number(1)\n assert is_number(1.1)\n assert is_number(1 + 3j)\n assert is_number(np.int64(1))\n assert is_number(np.float64(1.1))\n assert is_number(np.complex128(1 + 3j))\n assert is_number(np.nan)\n\n assert not is_number(None)\n assert not is_number(\"x\")\n assert not is_number(datetime(2011, 1, 1))\n assert not is_number(np.datetime64(\"2011-01-01\"))\n assert not is_number(Timestamp(\"2011-01-01\"))\n assert not is_number(Timestamp(\"2011-01-01\", tz=\"US/Eastern\"))\n assert not is_number(timedelta(1000))\n assert not is_number(Timedelta(\"1 days\"))\n\n # questionable\n assert not is_number(np.bool_(False))\n assert is_number(np.timedelta64(1, \"D\"))\n\n def test_is_bool(self):\n assert is_bool(True)\n assert is_bool(False)\n assert is_bool(np.bool_(False))\n\n assert not is_bool(1)\n assert not is_bool(1.1)\n assert not is_bool(1 + 3j)\n assert not is_bool(np.int64(1))\n assert not is_bool(np.float64(1.1))\n assert not is_bool(np.complex128(1 + 3j))\n assert not is_bool(np.nan)\n assert not is_bool(None)\n assert not is_bool(\"x\")\n assert not is_bool(datetime(2011, 1, 1))\n assert not is_bool(np.datetime64(\"2011-01-01\"))\n assert not is_bool(Timestamp(\"2011-01-01\"))\n assert not is_bool(Timestamp(\"2011-01-01\", tz=\"US/Eastern\"))\n assert not is_bool(timedelta(1000))\n assert not is_bool(np.timedelta64(1, \"D\"))\n assert not is_bool(Timedelta(\"1 days\"))\n\n def test_is_integer(self):\n assert is_integer(1)\n assert is_integer(np.int64(1))\n\n assert not is_integer(True)\n assert not is_integer(1.1)\n assert not is_integer(1 + 3j)\n assert not is_integer(False)\n assert not is_integer(np.bool_(False))\n assert not is_integer(np.float64(1.1))\n assert not is_integer(np.complex128(1 + 3j))\n assert not is_integer(np.nan)\n assert not is_integer(None)\n assert not is_integer(\"x\")\n assert not is_integer(datetime(2011, 1, 1))\n assert not is_integer(np.datetime64(\"2011-01-01\"))\n assert not is_integer(Timestamp(\"2011-01-01\"))\n assert not is_integer(Timestamp(\"2011-01-01\", tz=\"US/Eastern\"))\n assert not is_integer(timedelta(1000))\n assert not is_integer(Timedelta(\"1 days\"))\n assert not is_integer(np.timedelta64(1, \"D\"))\n\n def test_is_float(self):\n assert is_float(1.1)\n assert is_float(np.float64(1.1))\n assert is_float(np.nan)\n\n assert not is_float(True)\n assert not is_float(1)\n assert not is_float(1 + 3j)\n assert not is_float(False)\n assert not is_float(np.bool_(False))\n assert not is_float(np.int64(1))\n assert not is_float(np.complex128(1 + 3j))\n assert not is_float(None)\n assert not is_float(\"x\")\n assert not is_float(datetime(2011, 1, 1))\n assert not is_float(np.datetime64(\"2011-01-01\"))\n assert not is_float(Timestamp(\"2011-01-01\"))\n assert not is_float(Timestamp(\"2011-01-01\", tz=\"US/Eastern\"))\n assert not is_float(timedelta(1000))\n assert not is_float(np.timedelta64(1, \"D\"))\n assert not is_float(Timedelta(\"1 days\"))\n\n def test_is_datetime_dtypes(self):\n\n ts = pd.date_range(\"20130101\", periods=3)\n tsa = pd.date_range(\"20130101\", periods=3, tz=\"US/Eastern\")\n\n assert is_datetime64_dtype(\"datetime64\")\n assert is_datetime64_dtype(\"datetime64[ns]\")\n assert is_datetime64_dtype(ts)\n assert not is_datetime64_dtype(tsa)\n\n assert not is_datetime64_ns_dtype(\"datetime64\")\n assert is_datetime64_ns_dtype(\"datetime64[ns]\")\n assert is_datetime64_ns_dtype(ts)\n assert is_datetime64_ns_dtype(tsa)\n\n assert is_datetime64_any_dtype(\"datetime64\")\n assert is_datetime64_any_dtype(\"datetime64[ns]\")\n assert is_datetime64_any_dtype(ts)\n assert is_datetime64_any_dtype(tsa)\n\n assert not is_datetime64tz_dtype(\"datetime64\")\n assert not is_datetime64tz_dtype(\"datetime64[ns]\")\n assert not is_datetime64tz_dtype(ts)\n assert is_datetime64tz_dtype(tsa)\n\n for tz in [\"US/Eastern\", \"UTC\"]:\n dtype = f\"datetime64[ns, {tz}]\"\n assert not is_datetime64_dtype(dtype)\n assert is_datetime64tz_dtype(dtype)\n assert is_datetime64_ns_dtype(dtype)\n assert is_datetime64_any_dtype(dtype)\n\n def test_is_timedelta(self):\n assert is_timedelta64_dtype(\"timedelta64\")\n assert is_timedelta64_dtype(\"timedelta64[ns]\")\n assert not is_timedelta64_ns_dtype(\"timedelta64\")\n assert is_timedelta64_ns_dtype(\"timedelta64[ns]\")\n\n tdi = TimedeltaIndex([1e14, 2e14], dtype=\"timedelta64[ns]\")\n assert is_timedelta64_dtype(tdi)\n assert is_timedelta64_ns_dtype(tdi)\n assert is_timedelta64_ns_dtype(tdi.astype(\"timedelta64[ns]\"))\n\n # Conversion to Int64Index:\n assert not is_timedelta64_ns_dtype(tdi.astype(\"timedelta64\"))\n assert not is_timedelta64_ns_dtype(tdi.astype(\"timedelta64[h]\"))\n\n\nclass TestIsScalar:\n def test_is_scalar_builtin_scalars(self):\n assert is_scalar(None)\n assert is_scalar(True)\n assert is_scalar(False)\n assert is_scalar(Fraction())\n assert is_scalar(0.0)\n assert is_scalar(1)\n assert is_scalar(complex(2))\n assert is_scalar(float(\"NaN\"))\n assert is_scalar(np.nan)\n assert is_scalar(\"foobar\")\n assert is_scalar(b\"foobar\")\n assert is_scalar(datetime(2014, 1, 1))\n assert is_scalar(date(2014, 1, 1))\n assert is_scalar(time(12, 0))\n assert is_scalar(timedelta(hours=1))\n assert is_scalar(pd.NaT)\n assert is_scalar(pd.NA)\n\n def test_is_scalar_builtin_nonscalars(self):\n assert not is_scalar({})\n assert not is_scalar([])\n assert not is_scalar([1])\n assert not is_scalar(())\n assert not is_scalar((1,))\n assert not is_scalar(slice(None))\n assert not is_scalar(Ellipsis)\n\n def test_is_scalar_numpy_array_scalars(self):\n assert is_scalar(np.int64(1))\n assert is_scalar(np.float64(1.0))\n assert is_scalar(np.int32(1))\n assert is_scalar(np.complex64(2))\n assert is_scalar(np.object_(\"foobar\"))\n assert is_scalar(np.str_(\"foobar\"))\n assert is_scalar(np.unicode_(\"foobar\"))\n assert is_scalar(np.bytes_(b\"foobar\"))\n assert is_scalar(np.datetime64(\"2014-01-01\"))\n assert is_scalar(np.timedelta64(1, \"h\"))\n\n def test_is_scalar_numpy_zerodim_arrays(self):\n for zerodim in [\n np.array(1),\n np.array(\"foobar\"),\n np.array(np.datetime64(\"2014-01-01\")),\n np.array(np.timedelta64(1, \"h\")),\n np.array(np.datetime64(\"NaT\")),\n ]:\n assert not is_scalar(zerodim)\n assert is_scalar(lib.item_from_zerodim(zerodim))\n\n @pytest.mark.filterwarnings(\"ignore::PendingDeprecationWarning\")\n def test_is_scalar_numpy_arrays(self):\n for a in [\n np.array([]),\n np.array([[]]),\n np.matrix(\"1; 2\"),\n ]:\n assert not is_scalar(a)\n assert not is_scalar(MockNumpyLikeArray(a))\n\n def test_is_scalar_pandas_scalars(self):\n assert is_scalar(Timestamp(\"2014-01-01\"))\n assert is_scalar(Timedelta(hours=1))\n assert is_scalar(Period(\"2014-01-01\"))\n assert is_scalar(Interval(left=0, right=1))\n assert is_scalar(DateOffset(days=1))\n assert is_scalar(pd.offsets.Minute(3))\n\n def test_is_scalar_pandas_containers(self):\n assert not is_scalar(Series(dtype=object))\n assert not is_scalar(Series([1]))\n assert not is_scalar(DataFrame())\n assert not is_scalar(DataFrame([[1]]))\n assert not is_scalar(Index([]))\n assert not is_scalar(Index([1]))\n assert not is_scalar(Categorical([]))\n assert not is_scalar(DatetimeIndex([])._data)\n assert not is_scalar(TimedeltaIndex([])._data)\n assert not is_scalar(DatetimeIndex([])._data.to_period(\"D\"))\n assert not is_scalar(pd.array([1, 2, 3]))\n\n def test_is_scalar_number(self):\n # Number() is not recognied by PyNumber_Check, so by extension\n # is not recognized by is_scalar, but instances of non-abstract\n # subclasses are.\n\n class Numeric(Number):\n def __init__(self, value):\n self.value = value\n\n def __int__(self):\n return self.value\n\n num = Numeric(1)\n assert is_scalar(num)\n\n\ndef test_datetimeindex_from_empty_datetime64_array():\n for unit in [\"ms\", \"us\", \"ns\"]:\n idx = DatetimeIndex(np.array([], dtype=f\"datetime64[{unit}]\"))\n assert len(idx) == 0\n\n\ndef test_nan_to_nat_conversions():\n\n df = DataFrame(\n {\"A\": np.asarray(range(10), dtype=\"float64\"), \"B\": Timestamp(\"20010101\")}\n )\n df.iloc[3:6, :] = np.nan\n result = df.loc[4, \"B\"]\n assert result is pd.NaT\n\n s = df[\"B\"].copy()\n s[8:9] = np.nan\n assert s[8] is pd.NaT\n\n\n@td.skip_if_no_scipy\n@pytest.mark.filterwarnings(\"ignore::PendingDeprecationWarning\")\ndef test_is_scipy_sparse(spmatrix):\n assert is_scipy_sparse(spmatrix([[0, 1]]))\n assert not is_scipy_sparse(np.array([1]))\n\n\ndef test_ensure_int32():\n values = np.arange(10, dtype=np.int32)\n result = ensure_int32(values)\n assert result.dtype == np.int32\n\n values = np.arange(10, dtype=np.int64)\n result = ensure_int32(values)\n assert result.dtype == np.int32\n", "from datetime import (\n date,\n datetime,\n timedelta,\n)\nfrom functools import partial\nfrom io import BytesIO\nimport os\nimport re\n\nimport numpy as np\nimport pytest\n\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n get_option,\n set_option,\n)\nimport pandas._testing as tm\n\nfrom pandas.io.excel import (\n ExcelFile,\n ExcelWriter,\n _OpenpyxlWriter,\n _XlsxWriter,\n _XlwtWriter,\n register_writer,\n)\n\n\n@pytest.fixture\ndef path(ext):\n \"\"\"\n Fixture to open file for use in each test case.\n \"\"\"\n with tm.ensure_clean(ext) as file_path:\n yield file_path\n\n\n@pytest.fixture\ndef set_engine(engine, ext):\n \"\"\"\n Fixture to set engine for use in each test case.\n\n Rather than requiring `engine=...` to be provided explicitly as an\n argument in each test, this fixture sets a global option to dictate\n which engine should be used to write Excel files. After executing\n the test it rolls back said change to the global option.\n \"\"\"\n option_name = f\"io.excel.{ext.strip('.')}.writer\"\n prev_engine = get_option(option_name)\n set_option(option_name, engine)\n yield\n set_option(option_name, prev_engine) # Roll back option change\n\n\n@pytest.mark.parametrize(\n \"ext\",\n [\n pytest.param(\".xlsx\", marks=[td.skip_if_no(\"openpyxl\"), td.skip_if_no(\"xlrd\")]),\n pytest.param(\".xlsm\", marks=[td.skip_if_no(\"openpyxl\"), td.skip_if_no(\"xlrd\")]),\n pytest.param(\".xls\", marks=[td.skip_if_no(\"xlwt\"), td.skip_if_no(\"xlrd\")]),\n pytest.param(\n \".xlsx\", marks=[td.skip_if_no(\"xlsxwriter\"), td.skip_if_no(\"xlrd\")]\n ),\n pytest.param(\".ods\", marks=td.skip_if_no(\"odf\")),\n ],\n)\nclass TestRoundTrip:\n @pytest.mark.parametrize(\n \"header,expected\",\n [(None, DataFrame([np.nan] * 4)), (0, DataFrame({\"Unnamed: 0\": [np.nan] * 3}))],\n )\n def test_read_one_empty_col_no_header(self, ext, header, expected):\n # xref gh-12292\n filename = \"no_header\"\n df = DataFrame([[\"\", 1, 100], [\"\", 2, 200], [\"\", 3, 300], [\"\", 4, 400]])\n\n with tm.ensure_clean(ext) as path:\n df.to_excel(path, filename, index=False, header=False)\n result = pd.read_excel(\n path, sheet_name=filename, usecols=[0], header=header\n )\n\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"header,expected\",\n [(None, DataFrame([0] + [np.nan] * 4)), (0, DataFrame([np.nan] * 4))],\n )\n def test_read_one_empty_col_with_header(self, ext, header, expected):\n filename = \"with_header\"\n df = DataFrame([[\"\", 1, 100], [\"\", 2, 200], [\"\", 3, 300], [\"\", 4, 400]])\n\n with tm.ensure_clean(ext) as path:\n df.to_excel(path, \"with_header\", index=False, header=True)\n result = pd.read_excel(\n path, sheet_name=filename, usecols=[0], header=header\n )\n\n tm.assert_frame_equal(result, expected)\n\n def test_set_column_names_in_parameter(self, ext):\n # GH 12870 : pass down column names associated with\n # keyword argument names\n refdf = DataFrame([[1, \"foo\"], [2, \"bar\"], [3, \"baz\"]], columns=[\"a\", \"b\"])\n\n with tm.ensure_clean(ext) as pth:\n with ExcelWriter(pth) as writer:\n refdf.to_excel(writer, \"Data_no_head\", header=False, index=False)\n refdf.to_excel(writer, \"Data_with_head\", index=False)\n\n refdf.columns = [\"A\", \"B\"]\n\n with ExcelFile(pth) as reader:\n xlsdf_no_head = pd.read_excel(\n reader, sheet_name=\"Data_no_head\", header=None, names=[\"A\", \"B\"]\n )\n xlsdf_with_head = pd.read_excel(\n reader,\n sheet_name=\"Data_with_head\",\n index_col=None,\n names=[\"A\", \"B\"],\n )\n\n tm.assert_frame_equal(xlsdf_no_head, refdf)\n tm.assert_frame_equal(xlsdf_with_head, refdf)\n\n def test_creating_and_reading_multiple_sheets(self, ext):\n # see gh-9450\n #\n # Test reading multiple sheets, from a runtime\n # created Excel file with multiple sheets.\n def tdf(col_sheet_name):\n d, i = [11, 22, 33], [1, 2, 3]\n return DataFrame(d, i, columns=[col_sheet_name])\n\n sheets = [\"AAA\", \"BBB\", \"CCC\"]\n\n dfs = [tdf(s) for s in sheets]\n dfs = dict(zip(sheets, dfs))\n\n with tm.ensure_clean(ext) as pth:\n with ExcelWriter(pth) as ew:\n for sheetname, df in dfs.items():\n df.to_excel(ew, sheetname)\n\n dfs_returned = pd.read_excel(pth, sheet_name=sheets, index_col=0)\n\n for s in sheets:\n tm.assert_frame_equal(dfs[s], dfs_returned[s])\n\n def test_read_excel_multiindex_empty_level(self, ext):\n # see gh-12453\n with tm.ensure_clean(ext) as path:\n df = DataFrame(\n {\n (\"One\", \"x\"): {0: 1},\n (\"Two\", \"X\"): {0: 3},\n (\"Two\", \"Y\"): {0: 7},\n (\"Zero\", \"\"): {0: 0},\n }\n )\n\n expected = DataFrame(\n {\n (\"One\", \"x\"): {0: 1},\n (\"Two\", \"X\"): {0: 3},\n (\"Two\", \"Y\"): {0: 7},\n (\"Zero\", \"Unnamed: 4_level_1\"): {0: 0},\n }\n )\n\n df.to_excel(path)\n actual = pd.read_excel(path, header=[0, 1], index_col=0)\n tm.assert_frame_equal(actual, expected)\n\n df = DataFrame(\n {\n (\"Beg\", \"\"): {0: 0},\n (\"Middle\", \"x\"): {0: 1},\n (\"Tail\", \"X\"): {0: 3},\n (\"Tail\", \"Y\"): {0: 7},\n }\n )\n\n expected = DataFrame(\n {\n (\"Beg\", \"Unnamed: 1_level_1\"): {0: 0},\n (\"Middle\", \"x\"): {0: 1},\n (\"Tail\", \"X\"): {0: 3},\n (\"Tail\", \"Y\"): {0: 7},\n }\n )\n\n df.to_excel(path)\n actual = pd.read_excel(path, header=[0, 1], index_col=0)\n tm.assert_frame_equal(actual, expected)\n\n @pytest.mark.parametrize(\"c_idx_names\", [True, False])\n @pytest.mark.parametrize(\"r_idx_names\", [True, False])\n @pytest.mark.parametrize(\"c_idx_levels\", [1, 3])\n @pytest.mark.parametrize(\"r_idx_levels\", [1, 3])\n def test_excel_multindex_roundtrip(\n self, ext, c_idx_names, r_idx_names, c_idx_levels, r_idx_levels, request\n ):\n # see gh-4679\n with tm.ensure_clean(ext) as pth:\n if (c_idx_levels == 1 and c_idx_names) and not (\n r_idx_levels == 3 and not r_idx_names\n ):\n mark = pytest.mark.xfail(\n reason=\"Column index name cannot be serialized unless \"\n \"it's a MultiIndex\"\n )\n request.node.add_marker(mark)\n\n # Empty name case current read in as\n # unnamed levels, not Nones.\n check_names = r_idx_names or r_idx_levels <= 1\n\n df = tm.makeCustomDataframe(\n 5, 5, c_idx_names, r_idx_names, c_idx_levels, r_idx_levels\n )\n df.to_excel(pth)\n\n act = pd.read_excel(\n pth,\n index_col=list(range(r_idx_levels)),\n header=list(range(c_idx_levels)),\n )\n tm.assert_frame_equal(df, act, check_names=check_names)\n\n df.iloc[0, :] = np.nan\n df.to_excel(pth)\n\n act = pd.read_excel(\n pth,\n index_col=list(range(r_idx_levels)),\n header=list(range(c_idx_levels)),\n )\n tm.assert_frame_equal(df, act, check_names=check_names)\n\n df.iloc[-1, :] = np.nan\n df.to_excel(pth)\n act = pd.read_excel(\n pth,\n index_col=list(range(r_idx_levels)),\n header=list(range(c_idx_levels)),\n )\n tm.assert_frame_equal(df, act, check_names=check_names)\n\n def test_read_excel_parse_dates(self, ext):\n # see gh-11544, gh-12051\n df = DataFrame(\n {\"col\": [1, 2, 3], \"date_strings\": pd.date_range(\"2012-01-01\", periods=3)}\n )\n df2 = df.copy()\n df2[\"date_strings\"] = df2[\"date_strings\"].dt.strftime(\"%m/%d/%Y\")\n\n with tm.ensure_clean(ext) as pth:\n df2.to_excel(pth)\n\n res = pd.read_excel(pth, index_col=0)\n tm.assert_frame_equal(df2, res)\n\n res = pd.read_excel(pth, parse_dates=[\"date_strings\"], index_col=0)\n tm.assert_frame_equal(df, res)\n\n date_parser = lambda x: datetime.strptime(x, \"%m/%d/%Y\")\n res = pd.read_excel(\n pth, parse_dates=[\"date_strings\"], date_parser=date_parser, index_col=0\n )\n tm.assert_frame_equal(df, res)\n\n def test_multiindex_interval_datetimes(self, ext):\n # GH 30986\n midx = MultiIndex.from_arrays(\n [\n range(4),\n pd.interval_range(\n start=pd.Timestamp(\"2020-01-01\"), periods=4, freq=\"6M\"\n ),\n ]\n )\n df = DataFrame(range(4), index=midx)\n with tm.ensure_clean(ext) as pth:\n df.to_excel(pth)\n result = pd.read_excel(pth, index_col=[0, 1])\n expected = DataFrame(\n range(4),\n MultiIndex.from_arrays(\n [\n range(4),\n [\n \"(2020-01-31, 2020-07-31]\",\n \"(2020-07-31, 2021-01-31]\",\n \"(2021-01-31, 2021-07-31]\",\n \"(2021-07-31, 2022-01-31]\",\n ],\n ]\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n \"engine,ext\",\n [\n pytest.param(\n \"openpyxl\",\n \".xlsx\",\n marks=[td.skip_if_no(\"openpyxl\"), td.skip_if_no(\"xlrd\")],\n ),\n pytest.param(\n \"openpyxl\",\n \".xlsm\",\n marks=[td.skip_if_no(\"openpyxl\"), td.skip_if_no(\"xlrd\")],\n ),\n pytest.param(\n \"xlwt\", \".xls\", marks=[td.skip_if_no(\"xlwt\"), td.skip_if_no(\"xlrd\")]\n ),\n pytest.param(\n \"xlsxwriter\",\n \".xlsx\",\n marks=[td.skip_if_no(\"xlsxwriter\"), td.skip_if_no(\"xlrd\")],\n ),\n pytest.param(\"odf\", \".ods\", marks=td.skip_if_no(\"odf\")),\n ],\n)\n@pytest.mark.usefixtures(\"set_engine\")\nclass TestExcelWriter:\n def test_excel_sheet_size(self, path):\n\n # GH 26080\n breaking_row_count = 2 ** 20 + 1\n breaking_col_count = 2 ** 14 + 1\n # purposely using two arrays to prevent memory issues while testing\n row_arr = np.zeros(shape=(breaking_row_count, 1))\n col_arr = np.zeros(shape=(1, breaking_col_count))\n row_df = DataFrame(row_arr)\n col_df = DataFrame(col_arr)\n\n msg = \"sheet is too large\"\n with pytest.raises(ValueError, match=msg):\n row_df.to_excel(path)\n\n with pytest.raises(ValueError, match=msg):\n col_df.to_excel(path)\n\n def test_excel_sheet_by_name_raise(self, path, engine):\n gt = DataFrame(np.random.randn(10, 2))\n gt.to_excel(path)\n\n with ExcelFile(path) as xl:\n df = pd.read_excel(xl, sheet_name=0, index_col=0)\n\n tm.assert_frame_equal(gt, df)\n\n msg = \"Worksheet named '0' not found\"\n with pytest.raises(ValueError, match=msg):\n pd.read_excel(xl, \"0\")\n\n def test_excel_writer_context_manager(self, frame, path):\n with ExcelWriter(path) as writer:\n frame.to_excel(writer, \"Data1\")\n frame2 = frame.copy()\n frame2.columns = frame.columns[::-1]\n frame2.to_excel(writer, \"Data2\")\n\n with ExcelFile(path) as reader:\n found_df = pd.read_excel(reader, sheet_name=\"Data1\", index_col=0)\n found_df2 = pd.read_excel(reader, sheet_name=\"Data2\", index_col=0)\n\n tm.assert_frame_equal(found_df, frame)\n tm.assert_frame_equal(found_df2, frame2)\n\n def test_roundtrip(self, frame, path):\n frame = frame.copy()\n frame[\"A\"][:5] = np.nan\n\n frame.to_excel(path, \"test1\")\n frame.to_excel(path, \"test1\", columns=[\"A\", \"B\"])\n frame.to_excel(path, \"test1\", header=False)\n frame.to_excel(path, \"test1\", index=False)\n\n # test roundtrip\n frame.to_excel(path, \"test1\")\n recons = pd.read_excel(path, sheet_name=\"test1\", index_col=0)\n tm.assert_frame_equal(frame, recons)\n\n frame.to_excel(path, \"test1\", index=False)\n recons = pd.read_excel(path, sheet_name=\"test1\", index_col=None)\n recons.index = frame.index\n tm.assert_frame_equal(frame, recons)\n\n frame.to_excel(path, \"test1\", na_rep=\"NA\")\n recons = pd.read_excel(path, sheet_name=\"test1\", index_col=0, na_values=[\"NA\"])\n tm.assert_frame_equal(frame, recons)\n\n # GH 3611\n frame.to_excel(path, \"test1\", na_rep=\"88\")\n recons = pd.read_excel(path, sheet_name=\"test1\", index_col=0, na_values=[\"88\"])\n tm.assert_frame_equal(frame, recons)\n\n frame.to_excel(path, \"test1\", na_rep=\"88\")\n recons = pd.read_excel(\n path, sheet_name=\"test1\", index_col=0, na_values=[88, 88.0]\n )\n tm.assert_frame_equal(frame, recons)\n\n # GH 6573\n frame.to_excel(path, \"Sheet1\")\n recons = pd.read_excel(path, index_col=0)\n tm.assert_frame_equal(frame, recons)\n\n frame.to_excel(path, \"0\")\n recons = pd.read_excel(path, index_col=0)\n tm.assert_frame_equal(frame, recons)\n\n # GH 8825 Pandas Series should provide to_excel method\n s = frame[\"A\"]\n s.to_excel(path)\n recons = pd.read_excel(path, index_col=0)\n tm.assert_frame_equal(s.to_frame(), recons)\n\n def test_mixed(self, frame, path):\n mixed_frame = frame.copy()\n mixed_frame[\"foo\"] = \"bar\"\n\n mixed_frame.to_excel(path, \"test1\")\n with ExcelFile(path) as reader:\n recons = pd.read_excel(reader, sheet_name=\"test1\", index_col=0)\n tm.assert_frame_equal(mixed_frame, recons)\n\n def test_ts_frame(self, tsframe, path):\n df = tsframe\n\n # freq doesn't round-trip\n index = pd.DatetimeIndex(np.asarray(df.index), freq=None)\n df.index = index\n\n df.to_excel(path, \"test1\")\n with ExcelFile(path) as reader:\n recons = pd.read_excel(reader, sheet_name=\"test1\", index_col=0)\n tm.assert_frame_equal(df, recons)\n\n def test_basics_with_nan(self, frame, path):\n frame = frame.copy()\n frame[\"A\"][:5] = np.nan\n frame.to_excel(path, \"test1\")\n frame.to_excel(path, \"test1\", columns=[\"A\", \"B\"])\n frame.to_excel(path, \"test1\", header=False)\n frame.to_excel(path, \"test1\", index=False)\n\n @pytest.mark.parametrize(\"np_type\", [np.int8, np.int16, np.int32, np.int64])\n def test_int_types(self, np_type, path):\n # Test np.int values read come back as int\n # (rather than float which is Excel's format).\n df = DataFrame(np.random.randint(-10, 10, size=(10, 2)), dtype=np_type)\n df.to_excel(path, \"test1\")\n\n with ExcelFile(path) as reader:\n recons = pd.read_excel(reader, sheet_name=\"test1\", index_col=0)\n\n int_frame = df.astype(np.int64)\n tm.assert_frame_equal(int_frame, recons)\n\n recons2 = pd.read_excel(path, sheet_name=\"test1\", index_col=0)\n tm.assert_frame_equal(int_frame, recons2)\n\n # Test with convert_float=False comes back as float.\n float_frame = df.astype(float)\n float_frame.columns = float_frame.columns.astype(float)\n float_frame.index = float_frame.index.astype(float)\n with tm.assert_produces_warning(\n FutureWarning, match=\"convert_float is deprecated\"\n ):\n recons = pd.read_excel(\n path, sheet_name=\"test1\", convert_float=False, index_col=0\n )\n tm.assert_frame_equal(recons, float_frame)\n\n @pytest.mark.parametrize(\"np_type\", [np.float16, np.float32, np.float64])\n def test_float_types(self, np_type, path):\n # Test np.float values read come back as float.\n df = DataFrame(np.random.random_sample(10), dtype=np_type)\n df.to_excel(path, \"test1\")\n\n with ExcelFile(path) as reader:\n recons = pd.read_excel(reader, sheet_name=\"test1\", index_col=0).astype(\n np_type\n )\n\n tm.assert_frame_equal(df, recons)\n\n @pytest.mark.parametrize(\"np_type\", [np.bool8, np.bool_])\n def test_bool_types(self, np_type, path):\n # Test np.bool8 and np.bool_ values read come back as float.\n df = DataFrame([1, 0, True, False], dtype=np_type)\n df.to_excel(path, \"test1\")\n\n with ExcelFile(path) as reader:\n recons = pd.read_excel(reader, sheet_name=\"test1\", index_col=0).astype(\n np_type\n )\n\n tm.assert_frame_equal(df, recons)\n\n def test_inf_roundtrip(self, path):\n df = DataFrame([(1, np.inf), (2, 3), (5, -np.inf)])\n df.to_excel(path, \"test1\")\n\n with ExcelFile(path) as reader:\n recons = pd.read_excel(reader, sheet_name=\"test1\", index_col=0)\n\n tm.assert_frame_equal(df, recons)\n\n def test_sheets(self, frame, tsframe, path):\n\n # freq doesn't round-trip\n index = pd.DatetimeIndex(np.asarray(tsframe.index), freq=None)\n tsframe.index = index\n\n frame = frame.copy()\n frame[\"A\"][:5] = np.nan\n\n frame.to_excel(path, \"test1\")\n frame.to_excel(path, \"test1\", columns=[\"A\", \"B\"])\n frame.to_excel(path, \"test1\", header=False)\n frame.to_excel(path, \"test1\", index=False)\n\n # Test writing to separate sheets\n with ExcelWriter(path) as writer:\n frame.to_excel(writer, \"test1\")\n tsframe.to_excel(writer, \"test2\")\n with ExcelFile(path) as reader:\n recons = pd.read_excel(reader, sheet_name=\"test1\", index_col=0)\n tm.assert_frame_equal(frame, recons)\n recons = pd.read_excel(reader, sheet_name=\"test2\", index_col=0)\n tm.assert_frame_equal(tsframe, recons)\n assert 2 == len(reader.sheet_names)\n assert \"test1\" == reader.sheet_names[0]\n assert \"test2\" == reader.sheet_names[1]\n\n def test_colaliases(self, frame, path):\n frame = frame.copy()\n frame[\"A\"][:5] = np.nan\n\n frame.to_excel(path, \"test1\")\n frame.to_excel(path, \"test1\", columns=[\"A\", \"B\"])\n frame.to_excel(path, \"test1\", header=False)\n frame.to_excel(path, \"test1\", index=False)\n\n # column aliases\n col_aliases = Index([\"AA\", \"X\", \"Y\", \"Z\"])\n frame.to_excel(path, \"test1\", header=col_aliases)\n with ExcelFile(path) as reader:\n rs = pd.read_excel(reader, sheet_name=\"test1\", index_col=0)\n xp = frame.copy()\n xp.columns = col_aliases\n tm.assert_frame_equal(xp, rs)\n\n def test_roundtrip_indexlabels(self, merge_cells, frame, path):\n frame = frame.copy()\n frame[\"A\"][:5] = np.nan\n\n frame.to_excel(path, \"test1\")\n frame.to_excel(path, \"test1\", columns=[\"A\", \"B\"])\n frame.to_excel(path, \"test1\", header=False)\n frame.to_excel(path, \"test1\", index=False)\n\n # test index_label\n df = DataFrame(np.random.randn(10, 2)) >= 0\n df.to_excel(path, \"test1\", index_label=[\"test\"], merge_cells=merge_cells)\n with ExcelFile(path) as reader:\n recons = pd.read_excel(reader, sheet_name=\"test1\", index_col=0).astype(\n np.int64\n )\n df.index.names = [\"test\"]\n assert df.index.names == recons.index.names\n\n df = DataFrame(np.random.randn(10, 2)) >= 0\n df.to_excel(\n path,\n \"test1\",\n index_label=[\"test\", \"dummy\", \"dummy2\"],\n merge_cells=merge_cells,\n )\n with ExcelFile(path) as reader:\n recons = pd.read_excel(reader, sheet_name=\"test1\", index_col=0).astype(\n np.int64\n )\n df.index.names = [\"test\"]\n assert df.index.names == recons.index.names\n\n df = DataFrame(np.random.randn(10, 2)) >= 0\n df.to_excel(path, \"test1\", index_label=\"test\", merge_cells=merge_cells)\n with ExcelFile(path) as reader:\n recons = pd.read_excel(reader, sheet_name=\"test1\", index_col=0).astype(\n np.int64\n )\n df.index.names = [\"test\"]\n tm.assert_frame_equal(df, recons.astype(bool))\n\n frame.to_excel(\n path,\n \"test1\",\n columns=[\"A\", \"B\", \"C\", \"D\"],\n index=False,\n merge_cells=merge_cells,\n )\n # take 'A' and 'B' as indexes (same row as cols 'C', 'D')\n df = frame.copy()\n df = df.set_index([\"A\", \"B\"])\n\n with ExcelFile(path) as reader:\n recons = pd.read_excel(reader, sheet_name=\"test1\", index_col=[0, 1])\n tm.assert_frame_equal(df, recons)\n\n def test_excel_roundtrip_indexname(self, merge_cells, path):\n df = DataFrame(np.random.randn(10, 4))\n df.index.name = \"foo\"\n\n df.to_excel(path, merge_cells=merge_cells)\n\n with ExcelFile(path) as xf:\n result = pd.read_excel(xf, sheet_name=xf.sheet_names[0], index_col=0)\n\n tm.assert_frame_equal(result, df)\n assert result.index.name == \"foo\"\n\n def test_excel_roundtrip_datetime(self, merge_cells, tsframe, path):\n # datetime.date, not sure what to test here exactly\n\n # freq does not round-trip\n index = pd.DatetimeIndex(np.asarray(tsframe.index), freq=None)\n tsframe.index = index\n\n tsf = tsframe.copy()\n\n tsf.index = [x.date() for x in tsframe.index]\n tsf.to_excel(path, \"test1\", merge_cells=merge_cells)\n\n with ExcelFile(path) as reader:\n recons = pd.read_excel(reader, sheet_name=\"test1\", index_col=0)\n\n tm.assert_frame_equal(tsframe, recons)\n\n def test_excel_date_datetime_format(self, engine, ext, path):\n # see gh-4133\n #\n # Excel output format strings\n df = DataFrame(\n [\n [date(2014, 1, 31), date(1999, 9, 24)],\n [datetime(1998, 5, 26, 23, 33, 4), datetime(2014, 2, 28, 13, 5, 13)],\n ],\n index=[\"DATE\", \"DATETIME\"],\n columns=[\"X\", \"Y\"],\n )\n df_expected = DataFrame(\n [\n [datetime(2014, 1, 31), datetime(1999, 9, 24)],\n [datetime(1998, 5, 26, 23, 33, 4), datetime(2014, 2, 28, 13, 5, 13)],\n ],\n index=[\"DATE\", \"DATETIME\"],\n columns=[\"X\", \"Y\"],\n )\n\n with tm.ensure_clean(ext) as filename2:\n with ExcelWriter(path) as writer1:\n df.to_excel(writer1, \"test1\")\n\n with ExcelWriter(\n filename2,\n date_format=\"DD.MM.YYYY\",\n datetime_format=\"DD.MM.YYYY HH-MM-SS\",\n ) as writer2:\n df.to_excel(writer2, \"test1\")\n\n with ExcelFile(path) as reader1:\n rs1 = pd.read_excel(reader1, sheet_name=\"test1\", index_col=0)\n\n with ExcelFile(filename2) as reader2:\n rs2 = pd.read_excel(reader2, sheet_name=\"test1\", index_col=0)\n\n tm.assert_frame_equal(rs1, rs2)\n\n # Since the reader returns a datetime object for dates,\n # we need to use df_expected to check the result.\n tm.assert_frame_equal(rs2, df_expected)\n\n def test_to_excel_interval_no_labels(self, path):\n # see gh-19242\n #\n # Test writing Interval without labels.\n df = DataFrame(np.random.randint(-10, 10, size=(20, 1)), dtype=np.int64)\n expected = df.copy()\n\n df[\"new\"] = pd.cut(df[0], 10)\n expected[\"new\"] = pd.cut(expected[0], 10).astype(str)\n\n df.to_excel(path, \"test1\")\n with ExcelFile(path) as reader:\n recons = pd.read_excel(reader, sheet_name=\"test1\", index_col=0)\n tm.assert_frame_equal(expected, recons)\n\n def test_to_excel_interval_labels(self, path):\n # see gh-19242\n #\n # Test writing Interval with labels.\n df = DataFrame(np.random.randint(-10, 10, size=(20, 1)), dtype=np.int64)\n expected = df.copy()\n intervals = pd.cut(\n df[0], 10, labels=[\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\"]\n )\n df[\"new\"] = intervals\n expected[\"new\"] = pd.Series(list(intervals))\n\n df.to_excel(path, \"test1\")\n with ExcelFile(path) as reader:\n recons = pd.read_excel(reader, sheet_name=\"test1\", index_col=0)\n tm.assert_frame_equal(expected, recons)\n\n def test_to_excel_timedelta(self, path):\n # see gh-19242, gh-9155\n #\n # Test writing timedelta to xls.\n df = DataFrame(\n np.random.randint(-10, 10, size=(20, 1)), columns=[\"A\"], dtype=np.int64\n )\n expected = df.copy()\n\n df[\"new\"] = df[\"A\"].apply(lambda x: timedelta(seconds=x))\n expected[\"new\"] = expected[\"A\"].apply(\n lambda x: timedelta(seconds=x).total_seconds() / 86400\n )\n\n df.to_excel(path, \"test1\")\n with ExcelFile(path) as reader:\n recons = pd.read_excel(reader, sheet_name=\"test1\", index_col=0)\n tm.assert_frame_equal(expected, recons)\n\n def test_to_excel_periodindex(self, tsframe, path):\n xp = tsframe.resample(\"M\", kind=\"period\").mean()\n\n xp.to_excel(path, \"sht1\")\n\n with ExcelFile(path) as reader:\n rs = pd.read_excel(reader, sheet_name=\"sht1\", index_col=0)\n tm.assert_frame_equal(xp, rs.to_period(\"M\"))\n\n def test_to_excel_multiindex(self, merge_cells, frame, path):\n arrays = np.arange(len(frame.index) * 2).reshape(2, -1)\n new_index = MultiIndex.from_arrays(arrays, names=[\"first\", \"second\"])\n frame.index = new_index\n\n frame.to_excel(path, \"test1\", header=False)\n frame.to_excel(path, \"test1\", columns=[\"A\", \"B\"])\n\n # round trip\n frame.to_excel(path, \"test1\", merge_cells=merge_cells)\n with ExcelFile(path) as reader:\n df = pd.read_excel(reader, sheet_name=\"test1\", index_col=[0, 1])\n tm.assert_frame_equal(frame, df)\n\n # GH13511\n def test_to_excel_multiindex_nan_label(self, merge_cells, path):\n df = DataFrame({\"A\": [None, 2, 3], \"B\": [10, 20, 30], \"C\": np.random.sample(3)})\n df = df.set_index([\"A\", \"B\"])\n\n df.to_excel(path, merge_cells=merge_cells)\n df1 = pd.read_excel(path, index_col=[0, 1])\n tm.assert_frame_equal(df, df1)\n\n # Test for Issue 11328. If column indices are integers, make\n # sure they are handled correctly for either setting of\n # merge_cells\n def test_to_excel_multiindex_cols(self, merge_cells, frame, path):\n arrays = np.arange(len(frame.index) * 2).reshape(2, -1)\n new_index = MultiIndex.from_arrays(arrays, names=[\"first\", \"second\"])\n frame.index = new_index\n\n new_cols_index = MultiIndex.from_tuples([(40, 1), (40, 2), (50, 1), (50, 2)])\n frame.columns = new_cols_index\n header = [0, 1]\n if not merge_cells:\n header = 0\n\n # round trip\n frame.to_excel(path, \"test1\", merge_cells=merge_cells)\n with ExcelFile(path) as reader:\n df = pd.read_excel(\n reader, sheet_name=\"test1\", header=header, index_col=[0, 1]\n )\n if not merge_cells:\n fm = frame.columns.format(sparsify=False, adjoin=False, names=False)\n frame.columns = [\".\".join(map(str, q)) for q in zip(*fm)]\n tm.assert_frame_equal(frame, df)\n\n def test_to_excel_multiindex_dates(self, merge_cells, tsframe, path):\n # try multiindex with dates\n new_index = [tsframe.index, np.arange(len(tsframe.index))]\n tsframe.index = MultiIndex.from_arrays(new_index)\n\n tsframe.index.names = [\"time\", \"foo\"]\n tsframe.to_excel(path, \"test1\", merge_cells=merge_cells)\n with ExcelFile(path) as reader:\n recons = pd.read_excel(reader, sheet_name=\"test1\", index_col=[0, 1])\n\n tm.assert_frame_equal(tsframe, recons)\n assert recons.index.names == (\"time\", \"foo\")\n\n def test_to_excel_multiindex_no_write_index(self, path):\n # Test writing and re-reading a MI without the index. GH 5616.\n\n # Initial non-MI frame.\n frame1 = DataFrame({\"a\": [10, 20], \"b\": [30, 40], \"c\": [50, 60]})\n\n # Add a MI.\n frame2 = frame1.copy()\n multi_index = MultiIndex.from_tuples([(70, 80), (90, 100)])\n frame2.index = multi_index\n\n # Write out to Excel without the index.\n frame2.to_excel(path, \"test1\", index=False)\n\n # Read it back in.\n with ExcelFile(path) as reader:\n frame3 = pd.read_excel(reader, sheet_name=\"test1\")\n\n # Test that it is the same as the initial frame.\n tm.assert_frame_equal(frame1, frame3)\n\n def test_to_excel_float_format(self, path):\n df = DataFrame(\n [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],\n index=[\"A\", \"B\"],\n columns=[\"X\", \"Y\", \"Z\"],\n )\n df.to_excel(path, \"test1\", float_format=\"%.2f\")\n\n with ExcelFile(path) as reader:\n result = pd.read_excel(reader, sheet_name=\"test1\", index_col=0)\n\n expected = DataFrame(\n [[0.12, 0.23, 0.57], [12.32, 123123.20, 321321.20]],\n index=[\"A\", \"B\"],\n columns=[\"X\", \"Y\", \"Z\"],\n )\n tm.assert_frame_equal(result, expected)\n\n def test_to_excel_output_encoding(self, ext):\n # Avoid mixed inferred_type.\n df = DataFrame(\n [[\"\\u0192\", \"\\u0193\", \"\\u0194\"], [\"\\u0195\", \"\\u0196\", \"\\u0197\"]],\n index=[\"A\\u0192\", \"B\"],\n columns=[\"X\\u0193\", \"Y\", \"Z\"],\n )\n\n with tm.ensure_clean(\"__tmp_to_excel_float_format__.\" + ext) as filename:\n df.to_excel(filename, sheet_name=\"TestSheet\", encoding=\"utf8\")\n result = pd.read_excel(filename, sheet_name=\"TestSheet\", index_col=0)\n tm.assert_frame_equal(result, df)\n\n def test_to_excel_unicode_filename(self, ext, path):\n with tm.ensure_clean(\"\\u0192u.\" + ext) as filename:\n try:\n f = open(filename, \"wb\")\n except UnicodeEncodeError:\n pytest.skip(\"No unicode file names on this system\")\n finally:\n f.close()\n\n df = DataFrame(\n [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],\n index=[\"A\", \"B\"],\n columns=[\"X\", \"Y\", \"Z\"],\n )\n df.to_excel(filename, \"test1\", float_format=\"%.2f\")\n\n with ExcelFile(filename) as reader:\n result = pd.read_excel(reader, sheet_name=\"test1\", index_col=0)\n\n expected = DataFrame(\n [[0.12, 0.23, 0.57], [12.32, 123123.20, 321321.20]],\n index=[\"A\", \"B\"],\n columns=[\"X\", \"Y\", \"Z\"],\n )\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"use_headers\", [True, False])\n @pytest.mark.parametrize(\"r_idx_nlevels\", [1, 2, 3])\n @pytest.mark.parametrize(\"c_idx_nlevels\", [1, 2, 3])\n def test_excel_010_hemstring(\n self, merge_cells, c_idx_nlevels, r_idx_nlevels, use_headers, path\n ):\n def roundtrip(data, header=True, parser_hdr=0, index=True):\n data.to_excel(path, header=header, merge_cells=merge_cells, index=index)\n\n with ExcelFile(path) as xf:\n return pd.read_excel(\n xf, sheet_name=xf.sheet_names[0], header=parser_hdr\n )\n\n # Basic test.\n parser_header = 0 if use_headers else None\n res = roundtrip(DataFrame([0]), use_headers, parser_header)\n\n assert res.shape == (1, 2)\n assert res.iloc[0, 0] is not np.nan\n\n # More complex tests with multi-index.\n nrows = 5\n ncols = 3\n\n # ensure limited functionality in 0.10\n # override of gh-2370 until sorted out in 0.11\n\n df = tm.makeCustomDataframe(\n nrows, ncols, r_idx_nlevels=r_idx_nlevels, c_idx_nlevels=c_idx_nlevels\n )\n\n # This if will be removed once multi-column Excel writing\n # is implemented. For now fixing gh-9794.\n if c_idx_nlevels > 1:\n msg = (\n \"Writing to Excel with MultiIndex columns and no index \"\n \"\\\\('index'=False\\\\) is not yet implemented.\"\n )\n with pytest.raises(NotImplementedError, match=msg):\n roundtrip(df, use_headers, index=False)\n else:\n res = roundtrip(df, use_headers)\n\n if use_headers:\n assert res.shape == (nrows, ncols + r_idx_nlevels)\n else:\n # First row taken as columns.\n assert res.shape == (nrows - 1, ncols + r_idx_nlevels)\n\n # No NaNs.\n for r in range(len(res.index)):\n for c in range(len(res.columns)):\n assert res.iloc[r, c] is not np.nan\n\n def test_duplicated_columns(self, path):\n # see gh-5235\n df = DataFrame([[1, 2, 3], [1, 2, 3], [1, 2, 3]], columns=[\"A\", \"B\", \"B\"])\n df.to_excel(path, \"test1\")\n expected = DataFrame(\n [[1, 2, 3], [1, 2, 3], [1, 2, 3]], columns=[\"A\", \"B\", \"B.1\"]\n )\n\n # By default, we mangle.\n result = pd.read_excel(path, sheet_name=\"test1\", index_col=0)\n tm.assert_frame_equal(result, expected)\n\n # Explicitly, we pass in the parameter.\n result = pd.read_excel(\n path, sheet_name=\"test1\", index_col=0, mangle_dupe_cols=True\n )\n tm.assert_frame_equal(result, expected)\n\n # see gh-11007, gh-10970\n df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=[\"A\", \"B\", \"A\", \"B\"])\n df.to_excel(path, \"test1\")\n\n result = pd.read_excel(path, sheet_name=\"test1\", index_col=0)\n expected = DataFrame(\n [[1, 2, 3, 4], [5, 6, 7, 8]], columns=[\"A\", \"B\", \"A.1\", \"B.1\"]\n )\n tm.assert_frame_equal(result, expected)\n\n # see gh-10982\n df.to_excel(path, \"test1\", index=False, header=False)\n result = pd.read_excel(path, sheet_name=\"test1\", header=None)\n\n expected = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]])\n tm.assert_frame_equal(result, expected)\n\n msg = \"Setting mangle_dupe_cols=False is not supported yet\"\n with pytest.raises(ValueError, match=msg):\n pd.read_excel(path, sheet_name=\"test1\", header=None, mangle_dupe_cols=False)\n\n def test_swapped_columns(self, path):\n # Test for issue #5427.\n write_frame = DataFrame({\"A\": [1, 1, 1], \"B\": [2, 2, 2]})\n write_frame.to_excel(path, \"test1\", columns=[\"B\", \"A\"])\n\n read_frame = pd.read_excel(path, sheet_name=\"test1\", header=0)\n\n tm.assert_series_equal(write_frame[\"A\"], read_frame[\"A\"])\n tm.assert_series_equal(write_frame[\"B\"], read_frame[\"B\"])\n\n def test_invalid_columns(self, path):\n # see gh-10982\n write_frame = DataFrame({\"A\": [1, 1, 1], \"B\": [2, 2, 2]})\n\n with pytest.raises(KeyError, match=\"Not all names specified\"):\n write_frame.to_excel(path, \"test1\", columns=[\"B\", \"C\"])\n\n with pytest.raises(\n KeyError, match=\"'passes columns are not ALL present dataframe'\"\n ):\n write_frame.to_excel(path, \"test1\", columns=[\"C\", \"D\"])\n\n @pytest.mark.parametrize(\n \"to_excel_index,read_excel_index_col\",\n [\n (True, 0), # Include index in write to file\n (False, None), # Dont include index in write to file\n ],\n )\n def test_write_subset_columns(self, path, to_excel_index, read_excel_index_col):\n # GH 31677\n write_frame = DataFrame({\"A\": [1, 1, 1], \"B\": [2, 2, 2], \"C\": [3, 3, 3]})\n write_frame.to_excel(\n path, \"col_subset_bug\", columns=[\"A\", \"B\"], index=to_excel_index\n )\n\n expected = write_frame[[\"A\", \"B\"]]\n read_frame = pd.read_excel(\n path, sheet_name=\"col_subset_bug\", index_col=read_excel_index_col\n )\n\n tm.assert_frame_equal(expected, read_frame)\n\n def test_comment_arg(self, path):\n # see gh-18735\n #\n # Test the comment argument functionality to pd.read_excel.\n\n # Create file to read in.\n df = DataFrame({\"A\": [\"one\", \"#one\", \"one\"], \"B\": [\"two\", \"two\", \"#two\"]})\n df.to_excel(path, \"test_c\")\n\n # Read file without comment arg.\n result1 = pd.read_excel(path, sheet_name=\"test_c\", index_col=0)\n\n result1.iloc[1, 0] = None\n result1.iloc[1, 1] = None\n result1.iloc[2, 1] = None\n\n result2 = pd.read_excel(path, sheet_name=\"test_c\", comment=\"#\", index_col=0)\n tm.assert_frame_equal(result1, result2)\n\n def test_comment_default(self, path):\n # Re issue #18735\n # Test the comment argument default to pd.read_excel\n\n # Create file to read in\n df = DataFrame({\"A\": [\"one\", \"#one\", \"one\"], \"B\": [\"two\", \"two\", \"#two\"]})\n df.to_excel(path, \"test_c\")\n\n # Read file with default and explicit comment=None\n result1 = pd.read_excel(path, sheet_name=\"test_c\")\n result2 = pd.read_excel(path, sheet_name=\"test_c\", comment=None)\n tm.assert_frame_equal(result1, result2)\n\n def test_comment_used(self, path):\n # see gh-18735\n #\n # Test the comment argument is working as expected when used.\n\n # Create file to read in.\n df = DataFrame({\"A\": [\"one\", \"#one\", \"one\"], \"B\": [\"two\", \"two\", \"#two\"]})\n df.to_excel(path, \"test_c\")\n\n # Test read_frame_comment against manually produced expected output.\n expected = DataFrame({\"A\": [\"one\", None, \"one\"], \"B\": [\"two\", None, None]})\n result = pd.read_excel(path, sheet_name=\"test_c\", comment=\"#\", index_col=0)\n tm.assert_frame_equal(result, expected)\n\n def test_comment_empty_line(self, path):\n # Re issue #18735\n # Test that pd.read_excel ignores commented lines at the end of file\n\n df = DataFrame({\"a\": [\"1\", \"#2\"], \"b\": [\"2\", \"3\"]})\n df.to_excel(path, index=False)\n\n # Test that all-comment lines at EoF are ignored\n expected = DataFrame({\"a\": [1], \"b\": [2]})\n result = pd.read_excel(path, comment=\"#\")\n tm.assert_frame_equal(result, expected)\n\n def test_datetimes(self, path):\n\n # Test writing and reading datetimes. For issue #9139. (xref #9185)\n datetimes = [\n datetime(2013, 1, 13, 1, 2, 3),\n datetime(2013, 1, 13, 2, 45, 56),\n datetime(2013, 1, 13, 4, 29, 49),\n datetime(2013, 1, 13, 6, 13, 42),\n datetime(2013, 1, 13, 7, 57, 35),\n datetime(2013, 1, 13, 9, 41, 28),\n datetime(2013, 1, 13, 11, 25, 21),\n datetime(2013, 1, 13, 13, 9, 14),\n datetime(2013, 1, 13, 14, 53, 7),\n datetime(2013, 1, 13, 16, 37, 0),\n datetime(2013, 1, 13, 18, 20, 52),\n ]\n\n write_frame = DataFrame({\"A\": datetimes})\n write_frame.to_excel(path, \"Sheet1\")\n if path.endswith(\"xlsx\") or path.endswith(\"xlsm\"):\n pytest.skip(\n \"Defaults to openpyxl and fails with floating point error on \"\n \"datetimes; may be fixed on newer versions of openpyxl - GH #38644\"\n )\n read_frame = pd.read_excel(path, sheet_name=\"Sheet1\", header=0)\n\n tm.assert_series_equal(write_frame[\"A\"], read_frame[\"A\"])\n\n def test_bytes_io(self, engine):\n # see gh-7074\n with BytesIO() as bio:\n df = DataFrame(np.random.randn(10, 2))\n\n # Pass engine explicitly, as there is no file path to infer from.\n with ExcelWriter(bio, engine=engine) as writer:\n df.to_excel(writer)\n\n bio.seek(0)\n reread_df = pd.read_excel(bio, index_col=0)\n tm.assert_frame_equal(df, reread_df)\n\n def test_write_lists_dict(self, path):\n # see gh-8188.\n df = DataFrame(\n {\n \"mixed\": [\"a\", [\"b\", \"c\"], {\"d\": \"e\", \"f\": 2}],\n \"numeric\": [1, 2, 3.0],\n \"str\": [\"apple\", \"banana\", \"cherry\"],\n }\n )\n df.to_excel(path, \"Sheet1\")\n read = pd.read_excel(path, sheet_name=\"Sheet1\", header=0, index_col=0)\n\n expected = df.copy()\n expected.mixed = expected.mixed.apply(str)\n expected.numeric = expected.numeric.astype(\"int64\")\n\n tm.assert_frame_equal(read, expected)\n\n def test_render_as_column_name(self, path):\n # see gh-34331\n df = DataFrame({\"render\": [1, 2], \"data\": [3, 4]})\n df.to_excel(path, \"Sheet1\")\n read = pd.read_excel(path, \"Sheet1\", index_col=0)\n expected = df\n tm.assert_frame_equal(read, expected)\n\n def test_true_and_false_value_options(self, path):\n # see gh-13347\n df = DataFrame([[\"foo\", \"bar\"]], columns=[\"col1\", \"col2\"])\n expected = df.replace({\"foo\": True, \"bar\": False})\n\n df.to_excel(path)\n read_frame = pd.read_excel(\n path, true_values=[\"foo\"], false_values=[\"bar\"], index_col=0\n )\n tm.assert_frame_equal(read_frame, expected)\n\n def test_freeze_panes(self, path):\n # see gh-15160\n expected = DataFrame([[1, 2], [3, 4]], columns=[\"col1\", \"col2\"])\n expected.to_excel(path, \"Sheet1\", freeze_panes=(1, 1))\n\n result = pd.read_excel(path, index_col=0)\n tm.assert_frame_equal(result, expected)\n\n def test_path_path_lib(self, engine, ext):\n df = tm.makeDataFrame()\n writer = partial(df.to_excel, engine=engine)\n\n reader = partial(pd.read_excel, index_col=0)\n result = tm.round_trip_pathlib(writer, reader, path=f\"foo{ext}\")\n tm.assert_frame_equal(result, df)\n\n def test_path_local_path(self, engine, ext):\n df = tm.makeDataFrame()\n writer = partial(df.to_excel, engine=engine)\n\n reader = partial(pd.read_excel, index_col=0)\n result = tm.round_trip_localpath(writer, reader, path=f\"foo{ext}\")\n tm.assert_frame_equal(result, df)\n\n def test_merged_cell_custom_objects(self, merge_cells, path):\n # see GH-27006\n mi = MultiIndex.from_tuples(\n [\n (pd.Period(\"2018\"), pd.Period(\"2018Q1\")),\n (pd.Period(\"2018\"), pd.Period(\"2018Q2\")),\n ]\n )\n expected = DataFrame(np.ones((2, 2)), columns=mi)\n expected.to_excel(path)\n with tm.assert_produces_warning(\n FutureWarning, match=\"convert_float is deprecated\"\n ):\n result = pd.read_excel(\n path, header=[0, 1], index_col=0, convert_float=False\n )\n # need to convert PeriodIndexes to standard Indexes for assert equal\n expected.columns = expected.columns.set_levels(\n [[str(i) for i in mi.levels[0]], [str(i) for i in mi.levels[1]]],\n level=[0, 1],\n )\n expected.index = expected.index.astype(np.float64)\n tm.assert_frame_equal(expected, result)\n\n @pytest.mark.parametrize(\"dtype\", [None, object])\n def test_raise_when_saving_timezones(self, dtype, tz_aware_fixture, path):\n # GH 27008, GH 7056\n tz = tz_aware_fixture\n data = pd.Timestamp(\"2019\", tz=tz)\n df = DataFrame([data], dtype=dtype)\n with pytest.raises(ValueError, match=\"Excel does not support\"):\n df.to_excel(path)\n\n data = data.to_pydatetime()\n df = DataFrame([data], dtype=dtype)\n with pytest.raises(ValueError, match=\"Excel does not support\"):\n df.to_excel(path)\n\n def test_excel_duplicate_columns_with_names(self, path):\n # GH#39695\n df = DataFrame({\"A\": [0, 1], \"B\": [10, 11]})\n df.to_excel(path, columns=[\"A\", \"B\", \"A\"], index=False)\n\n result = pd.read_excel(path)\n expected = DataFrame([[0, 10, 0], [1, 11, 1]], columns=[\"A\", \"B\", \"A.1\"])\n tm.assert_frame_equal(result, expected)\n\n def test_if_sheet_exists_raises(self, ext):\n # GH 40230\n msg = \"if_sheet_exists is only valid in append mode (mode='a')\"\n\n with tm.ensure_clean(ext) as f:\n with pytest.raises(ValueError, match=re.escape(msg)):\n ExcelWriter(f, if_sheet_exists=\"replace\")\n\n\nclass TestExcelWriterEngineTests:\n @pytest.mark.parametrize(\n \"klass,ext\",\n [\n pytest.param(_XlsxWriter, \".xlsx\", marks=td.skip_if_no(\"xlsxwriter\")),\n pytest.param(_OpenpyxlWriter, \".xlsx\", marks=td.skip_if_no(\"openpyxl\")),\n pytest.param(_XlwtWriter, \".xls\", marks=td.skip_if_no(\"xlwt\")),\n ],\n )\n def test_ExcelWriter_dispatch(self, klass, ext):\n with tm.ensure_clean(ext) as path:\n with ExcelWriter(path) as writer:\n if ext == \".xlsx\" and td.safe_import(\"xlsxwriter\"):\n # xlsxwriter has preference over openpyxl if both installed\n assert isinstance(writer, _XlsxWriter)\n else:\n assert isinstance(writer, klass)\n\n def test_ExcelWriter_dispatch_raises(self):\n with pytest.raises(ValueError, match=\"No engine\"):\n ExcelWriter(\"nothing\")\n\n def test_register_writer(self):\n # some awkward mocking to test out dispatch and such actually works\n called_save = []\n called_write_cells = []\n\n class DummyClass(ExcelWriter):\n called_save = False\n called_write_cells = False\n supported_extensions = [\"xlsx\", \"xls\"]\n engine = \"dummy\"\n\n def save(self):\n called_save.append(True)\n\n def write_cells(self, *args, **kwargs):\n called_write_cells.append(True)\n\n def check_called(func):\n func()\n assert len(called_save) >= 1\n assert len(called_write_cells) >= 1\n del called_save[:]\n del called_write_cells[:]\n\n with pd.option_context(\"io.excel.xlsx.writer\", \"dummy\"):\n path = \"something.xlsx\"\n with tm.ensure_clean(path) as filepath:\n register_writer(DummyClass)\n with ExcelWriter(filepath) as writer:\n assert isinstance(writer, DummyClass)\n df = tm.makeCustomDataframe(1, 1)\n check_called(lambda: df.to_excel(filepath))\n with tm.ensure_clean(\"something.xls\") as filepath:\n check_called(lambda: df.to_excel(filepath, engine=\"dummy\"))\n\n @pytest.mark.parametrize(\n \"ext\",\n [\n pytest.param(\".xlsx\", marks=td.skip_if_no(\"xlsxwriter\")),\n pytest.param(\".xlsx\", marks=td.skip_if_no(\"openpyxl\")),\n pytest.param(\".ods\", marks=td.skip_if_no(\"odf\")),\n ],\n )\n def test_engine_kwargs_and_kwargs_raises(self, ext):\n # GH 40430\n msg = re.escape(\"Cannot use both engine_kwargs and **kwargs\")\n with pytest.raises(ValueError, match=msg):\n with ExcelWriter(\"\", engine_kwargs={\"a\": 1}, b=2):\n pass\n\n\n@td.skip_if_no(\"xlrd\")\n@td.skip_if_no(\"openpyxl\")\nclass TestFSPath:\n def test_excelfile_fspath(self):\n with tm.ensure_clean(\"foo.xlsx\") as path:\n df = DataFrame({\"A\": [1, 2]})\n df.to_excel(path)\n with ExcelFile(path) as xl:\n result = os.fspath(xl)\n assert result == path\n\n def test_excelwriter_fspath(self):\n with tm.ensure_clean(\"foo.xlsx\") as path:\n with ExcelWriter(path) as writer:\n assert os.fspath(writer) == str(path)\n" ]
[ [ "scipy.stats.poisson.pmf", "numpy.arange", "numpy.abs", "numpy.sqrt", "scipy.stats.binom.pmf" ], [ "numpy.zeros_like", "numpy.ones_like", "numpy.sin", "numpy.asarray", "numpy.log", "numpy.exp", "numpy.finfo", "numpy.atleast_1d", "numpy.power", "numpy.cos", "numpy.clip" ], [ "numpy.testing.assert_allclose", "numpy.array", "numpy.zeros", "numpy.testing.assert_equal", "numpy.log", "pandas.date_range", "numpy.ones", "numpy.exp", "numpy.arange", "numpy.sqrt", "pandas.read_csv" ], [ "pandas._libs.lib.infer_datetimelike_array", "numpy.int8", "pandas._testing.external_error_raised", "pandas.core.dtypes.common.ensure_int32", "pandas.core.dtypes.common.is_datetime64_dtype", "pandas.core.dtypes.inference.is_re_compilable", "pandas._libs.lib.infer_dtype", "pandas.core.dtypes.common.is_number", "pandas.DataFrame", "numpy.object_", "pandas.PeriodIndex", "pandas.core.dtypes.common.is_bool", "numpy.bytes_", "pandas.Period", "numpy.float64", "numpy.complex64", "pandas.core.dtypes.common.is_timedelta64_ns_dtype", "pandas.Index", "pandas._testing.assert_numpy_array_equal", "pandas.DatetimeIndex", "pandas.core.dtypes.inference.is_dict_like", "pandas.period_range", "pandas.Interval", "pandas.core.dtypes.common.is_datetime64tz_dtype", "numpy.arange", "pandas._libs.lib.maybe_convert_numeric", "numpy.array", "numpy.matrix", "pandas.core.dtypes.inference.is_re", "pandas._libs.lib.is_period", "pandas.date_range", "pandas.Series", "pandas.core.dtypes.common.is_timedelta64_dtype", "numpy.float16", "pandas.core.dtypes.common.is_datetime64_any_dtype", "pandas.Timedelta", "pandas._testing.assert_extension_array_equal", "pandas.DateOffset", "pandas._libs.missing.isneginf_scalar", "pandas._libs.lib.item_from_zerodim", "pandas.core.arrays.IntegerArray", "pandas.core.dtypes.common.is_scalar", "pandas.array", "pandas.core.dtypes.inference.is_list_like", "pandas.core.dtypes.common.is_integer", "numpy.ndarray", "numpy.float32", "pandas._libs.lib.to_object_array", "pandas.core.dtypes.inference.is_nested_list_like", "numpy.datetime64", "numpy.asarray", "pandas.core.arrays.FloatingArray", "pandas._testing.assert_almost_equal", "pandas.core.dtypes.common.is_complex", "numpy.uint64", "pandas.core.dtypes.common.is_float", "pandas._libs.lib.to_object_array_tuples", "pandas.offsets.Minute", "numpy.unicode_", "pandas.core.dtypes.common.is_datetime64_ns_dtype", "pandas.Timestamp", "pandas.core.dtypes.inference.is_hashable", "numpy.dtype", "numpy.int16", "pandas._libs.missing.isposinf_scalar", "pandas.core.dtypes.inference.is_array_like", "pandas._libs.lib.is_datetime_array", "pandas.TimedeltaIndex", "numpy.int32", "pandas._libs.lib.is_datetime64_array", "pandas._libs.lib.maybe_convert_objects", "numpy.str_", "numpy.complex128", "pandas.core.dtypes.inference.is_named_tuple", "numpy.bool_", "numpy.timedelta64", "numpy.isnan", "pandas.core.arrays.BooleanArray", "pandas.Categorical", "numpy.int64", "pandas._libs.lib.is_timedelta_or_timedelta64_array" ], [ "pandas.io.excel.ExcelFile", "pandas.read_excel", "pandas.Timestamp", "pandas._testing.assert_series_equal", "pandas.util._test_decorators.safe_import", "pandas.set_option", "pandas.io.excel.register_writer", "pandas.DataFrame", "numpy.random.randint", "numpy.random.sample", "pandas._testing.round_trip_pathlib", "pandas.io.excel.ExcelWriter", "pandas.Period", "numpy.zeros", "pandas.MultiIndex.from_tuples", "numpy.random.randn", "pandas._testing.assert_frame_equal", "pandas._testing.makeCustomDataframe", "pandas.MultiIndex.from_arrays", "numpy.random.random_sample", "pandas.get_option", "pandas.Index", "pandas._testing.makeDataFrame", "pandas.cut", "numpy.asarray", "pandas._testing.assert_produces_warning", "pandas.date_range", "numpy.ones", "pandas._testing.ensure_clean", "pandas.option_context", "pandas.util._test_decorators.skip_if_no", "pandas._testing.round_trip_localpath" ] ]
tommytanaka00/IML.HUJI
[ "156382fad84026ce069c6be70fa389cda32f3501" ]
[ "IMLearn/learners/regressors/linear_regression.py" ]
[ "from __future__ import annotations\nfrom typing import NoReturn\nfrom IMLearn.base import BaseEstimator\nimport numpy as np\nfrom numpy.linalg import pinv\nfrom IMLearn.metrics.loss_functions import mean_square_error\n\n\n\n\nclass LinearRegression(BaseEstimator):\n \"\"\"\n Linear Regression Estimator\n\n Solving Ordinary Least Squares optimization problem\n \"\"\"\n\n def __init__(self, include_intercept: bool = True):# -> LinearRegression:\n \"\"\"\n Instantiate a linear regression estimator\n\n Parameters\n ----------\n include_intercept: bool, default=True\n Should fitted model include an intercept or not\n\n Attributes\n ----------\n include_intercept_: bool\n Should fitted model include an intercept or not\n\n coefs_: ndarray of shape (n_features,) (without intercept) or (n_features+1,) (with intercept)\n Coefficients vector fitted by linear regression. To be set in\n `LinearRegression.fit` function.\n \"\"\"\n super().__init__()\n self.include_intercept_, self.coefs_ = include_intercept, None\n\n def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:\n \"\"\"\n Fit Least Squares model to given samples\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to fit an estimator for\n\n y : ndarray of shape (n_samples, )\n Responses of input data to fit to\n\n Notes\n -----\n Fits model with or without an intercept depending on value of `self.include_intercept_`\n \"\"\"\n if self.include_intercept_:\n X = np.insert(X, 0, values=1, axis=1)\n psuedo_inverse_X = np.linalg.pinv(X.T).T\n self.coefs_ = psuedo_inverse_X @ y # the w\n\n def _predict(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Predict responses for given samples using fitted estimator\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to predict responses for\n\n Returns\n -------\n responses : ndarray of shape (n_samples, )\n Predicted responses of given samples\n \"\"\"\n if self.include_intercept_:\n X = np.insert(X, 0, values=1, axis=1)\n return X @ self.coefs_\n\n def _loss(self, X: np.ndarray, y: np.ndarray) -> float:\n \"\"\"\n Evaluate performance under MSE loss function\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Test samples\n\n y : ndarray of shape (n_samples, )\n True labels of test samples\n\n Returns\n -------\n loss : float\n Performance under MSE loss function\n \"\"\"\n y_hat = self.predict(X)\n return mean_square_error(y, y_hat)\n\n\n\n\n\n" ]
[ [ "numpy.linalg.pinv", "numpy.insert" ] ]
rshilliday/sfm
[ "ca0eead387ba68582bf166c9a0780adcd99e3bf3" ]
[ "bundle_adjustment.py" ]
[ "import numpy as np\nimport cv2\nfrom scipy.optimize import least_squares\nfrom scipy.sparse import lil_matrix\n\ndef bundle_adjustment_sparsity(n_cameras, n_points, camera_indices, point_indices):\n \"\"\"\n\n :param n_cameras Integer. Number of cameras/images currently resected\n :param n_points: number of distinct 3D points that have been triangulated\n :param camera_indices: List. Value at ith position is index of camera that sees ith 2D point\n :param point_indices: List. Value at ith position is index of 3D point that sees ith 2D point\n \"\"\"\n m = camera_indices.size * 2\n n = n_cameras * 12 + n_points * 3\n A = lil_matrix((m, n), dtype=int)\n\n i = np.arange(camera_indices.size)\n for s in range(12):\n A[2 * i, camera_indices * 12 + s] = 1\n A[2 * i + 1, camera_indices * 12 + s] = 1\n\n for s in range(3):\n A[2 * i, n_cameras * 12 + point_indices * 3 + s] = 1\n A[2 * i + 1, n_cameras * 12 + point_indices * 3 + s] = 1\n\n return A\n\ndef project(points, camera_params, K):\n \"\"\"\n Projects 3D points onto camera coordinates\n\n :param points: N x 3 List of 3D point coordinates\n :param camera_params: N x 12 List of 12D camera parameters (r1, ... r9, t1, t2, t3)\n :param K: Intrinsics matrix\n \"\"\"\n points_proj = []\n\n for idx in range(len(camera_params)): # idx applies to both points and cam_params, they are = length vectors\n R = camera_params[idx][:9].reshape(3,3)\n rvec, _ = cv2.Rodrigues(R)\n t = camera_params[idx][9:]\n pt = points[idx]\n pt = np.expand_dims(pt, axis=0)\n pt, _ = cv2.projectPoints(pt, rvec, t, K, distCoeffs=np.array([]))\n pt = np.squeeze(np.array(pt))\n points_proj.append(pt)\n\n return points_proj\n\ndef fun(params, n_cameras, n_points, camera_indices, point_indices, points_2d, K):\n \"\"\"Compute residuals (ie reprojection error) for Bundle Adjustment.\n\n :param params: List of all parameters. First n_cameras*12 parameters are (r1, ..., r9, t1, t2, t3)\n for each resected camera. Remaining n_points*3 paramaters are (x, y, z) coord of each triangulated point\n :param n_cameras: Integer. # of resected cameras\n :param n_points: Integer. # of triangulated points\n :param camera_indices: List of indices of cameras viewing each 2D observation\n :param point_indices: List of indices of 3D points corresponding to each 2D observation\n :points_2d: 2D pixel coordinates of each observation by a camera of a 3D point\n :param K: Intrinsics matrix\n \"\"\"\n camera_params = params[:n_cameras * 12].reshape((n_cameras, 12))\n points_3d = params[n_cameras * 12:].reshape((n_points, 3))\n points_proj = project(points_3d[point_indices], camera_params[camera_indices], K)\n return (points_proj - points_2d).ravel()\n\ndef do_BA(points3d_with_views, R_mats, t_vecs, resected_imgs, keypoints, K, ftol):\n \"\"\"\n Perform Bundle Adjustment on all currently resected images and all triangulated 3D points. Return updated\n values for camera poses and 3D point coordinates.\n\n :param points3d_with_views: List of Point3D_with_views objects.\n :param R_mats: Dict mapping index of resected cameras to their Rotation matrix\n :param t_vecs: Dict mapping index of resected cameras to their translation vector\n :param resected_imgs: List of indices of resected images\n :param keypoints: List of lists of cv2.Keypoint objects. keypoints[i] is list for image i.\n :param ftol: Tolerance for change in total reprojection error. Used so scipy.optimize.least_squares knows\n when to stop adjusting\n \"\"\"\n point_indices = []\n points_2d = []\n camera_indices = []\n points_3d = []\n camera_params = []\n BA_cam_idxs = {} # maps from true cam indices to 'normalized' (i.e 11, 23, 31 maps to -> 0, 1, 2)\n cam_count = 0\n\n for r in resected_imgs:\n BA_cam_idxs[r] = cam_count\n camera_params.append(np.hstack((R_mats[r].ravel(), t_vecs[r].ravel())))\n cam_count += 1\n\n for pt3d_idx in range(len(points3d_with_views)):\n points_3d.append(points3d_with_views[pt3d_idx].point3d)\n for cam_idx, kpt_idx in points3d_with_views[pt3d_idx].source_2dpt_idxs.items():\n if cam_idx not in resected_imgs: continue\n point_indices.append(pt3d_idx)\n camera_indices.append(BA_cam_idxs[cam_idx])#append normalized cam idx\n points_2d.append(keypoints[cam_idx][kpt_idx].pt)\n if len(points_3d[0]) == 3: points_3d = np.expand_dims(points_3d, axis=0)\n\n point_indices = np.array(point_indices)\n points_2d = np.array(points_2d)\n camera_indices = np.array(camera_indices)\n points_3d = np.squeeze(points_3d)\n camera_params = np.array(camera_params)\n\n n_cameras = camera_params.shape[0]\n n_points = points_3d.shape[0]\n x0 = np.hstack((camera_params.ravel(), points_3d.ravel()))\n A = bundle_adjustment_sparsity(n_cameras, n_points, camera_indices, point_indices)\n\n res = least_squares(fun, x0, jac_sparsity=A, verbose=2, x_scale='jac', loss='linear', ftol=ftol, xtol=1e-12, method='trf',\n args=(n_cameras, n_points, camera_indices, point_indices, points_2d, K))\n\n adjusted_camera_params = res.x[:n_cameras * 12].reshape(n_cameras, 12)\n adjusted_points_3d = res.x[n_cameras * 12:].reshape(n_points, 3)\n adjusted_R_mats = {}\n adjusted_t_vecs = {}\n for true_idx, norm_idx in BA_cam_idxs.items():\n adjusted_R_mats[true_idx] = adjusted_camera_params[norm_idx][:9].reshape(3,3)\n adjusted_t_vecs[true_idx] = adjusted_camera_params[norm_idx][9:].reshape(3,1)\n R_mats = adjusted_R_mats\n t_vecs = adjusted_t_vecs\n for pt3d_idx in range(len(points3d_with_views)):\n points3d_with_views[pt3d_idx].point3d = np.expand_dims(adjusted_points_3d[pt3d_idx], axis=0)\n\n return points3d_with_views, R_mats, t_vecs\n" ]
[ [ "numpy.array", "numpy.arange", "numpy.expand_dims", "scipy.sparse.lil_matrix", "numpy.squeeze", "scipy.optimize.least_squares" ] ]
ncoman32/promise12ProstateCancerUNETSegmentation
[ "20060a8c118a4bf4ae1f702d73782d86d8276e68" ]
[ "mhd_to_jpg.py" ]
[ "# code scris urat in cazul in care ne mai trebuie conversia\nimport os\nfrom os import listdir\n\nimport SimpleITK as sitk\nimport matplotlib.pylab as plt\n\n\ndef to_images(file, path):\n print(\"Processing file: \" + file)\n input_path = path + file\n output_path = path\n\n file_name = os.path.splitext(file)[0]\n\n if 'segment' in file_name:\n output_path = output_path + 'Label\\\\' + file_name\n else:\n output_path = output_path + 'Input\\\\' + file_name\n \n ct_scans = sitk.GetArrayFromImage(sitk.ReadImage(input_path, sitk.sitkFloat32))\n no_imgs = ct_scans.shape[0]\n\n for i in range(0, no_imgs):\n plt.imshow(ct_scans[i])\n plt.axis('off')\n sub_name = output_path + '_'+str(i)+'.jpeg'\n plt.savefig(sub_name, bbox_inches='tight', pad_inches=0)\n\n\nimage_folder = \"C:\\\\Users\\\\ncoma\\\\Desktop\\\\ICA\\\\bunastareSociala\\\\UNET-TGS\\\\Train_After\\\\train\\\\\"\n\nfiles = [f for f in listdir(image_folder) if '.mhd' in f]\nfor f in files:\n to_images(f, image_folder)\n" ]
[ [ "matplotlib.pylab.savefig", "matplotlib.pylab.axis", "matplotlib.pylab.imshow" ] ]
malshaV/sar_transformer
[ "b3ac845f96f2332aa4f1af94b455f71630978b17" ]
[ "test.py" ]
[ "# Code for testing on real SAR images \n# Author: Malsha Perera\nimport argparse\nimport torch\nimport torchvision\nfrom torch import nn\nfrom torchvision.transforms import functional as F\nimport os\nimport numpy as np\nimport torch\nfrom transform_main import TransSAR, TransSARV2, TransSARV3\nimport cv2\n\n\n\nparser = argparse.ArgumentParser(description='TransSAR')\n\nparser.add_argument('--cuda', default=\"on\", type=str, \n help='switch on/off cuda option (default: off)')\n\nparser.add_argument('--load', default='default', type=str,\n help='turn on img augmentation (default: default)')\nparser.add_argument('--save_path', required=True , type=str,\n help='turn on img augmentation (default: default)')\nparser.add_argument('--model', type=str,\n help='model name')\nparser.add_argument('--crop', type=int, default=None)\nparser.add_argument('--device', default='cuda', type=str)\nparser.add_argument('--loadmodel', default='load', type=str)\n\n\n\nargs = parser.parse_args()\n\n\nmodelname = args.model\nloaddirec = args.loadmodel\nsave_path = args.save_path\n\n\n\n\ndevice = torch.device(\"cuda\")\n\n\n\nmodel = TransSARV2()\n\n\nif torch.cuda.device_count() > 1:\n print(\"Let's use\", torch.cuda.device_count(), \"GPUs!\")\n model = nn.DataParallel(model,device_ids=[0,1]).cuda()\nmodel.to(device)\n\n\nmodel.load_state_dict(torch.load(loaddirec))\nmodel.eval()\n\n\n\nif not os.path.isdir(save_path):\n \n os.makedirs(save_path)\n\n\n\n\nim_file = './test_images/test_01.png'\n\nimg = cv2.imread(im_file,0) \nnoisy_im = (np.float32(img)+1.0)/256.0\n\n\n\nx = np.float32(noisy_im)\nx = F.to_tensor(x)\nx = x.unsqueeze(0)\n\n\npred_im = model(x)\ntmp = pred_im.detach().cpu().numpy()\n\ntmp = tmp.squeeze()\ntmp = tmp*256 -1\n\nfilename_out = 'test_01_results.png'\nfilepath_out = save_path + filename_out\n\ncv2.imwrite(filepath_out,tmp)\n\n\nprint('done')\n\n" ]
[ [ "torch.device", "torch.cuda.device_count", "numpy.float32", "torch.load", "torch.nn.DataParallel" ] ]
santinoacco/multiclass_cnn
[ "a5ce73c065f69b0025f2969a9ed2dc13789304a3" ]
[ "src/preprocess/load_data.py" ]
[ "#!/usr/bin/env python3\n\nfrom ..common.parser import set_parser\nimport tensorflow as tf\n\ndef load_data(data_dir, img_height=180, img_width=180, batch_size=32):\n train_ds = tf.keras.preprocessing.image_dataset_from_directory(\n data_dir,\n validation_split=0.2,\n subset=\"training\",\n seed=123,\n image_size=(img_height, img_width),\n batch_size=batch_size,\n color_mode=\"rgb\"\n )\n\n val_ds = tf.keras.preprocessing.image_dataset_from_directory(\n data_dir,\n validation_split=0.2,\n subset=\"validation\",\n seed=123,\n image_size=(img_height, img_width),\n batch_size=batch_size,\n color_mode=\"rgb\"\n )\n return train_ds, val_ds\n" ]
[ [ "tensorflow.keras.preprocessing.image_dataset_from_directory" ] ]
icedream2/DAVAR-Lab-OCR
[ "c8b82f45516850eeadcab2739fb2a4292f2fdca1", "c8b82f45516850eeadcab2739fb2a4292f2fdca1", "c8b82f45516850eeadcab2739fb2a4292f2fdca1", "c8b82f45516850eeadcab2739fb2a4292f2fdca1" ]
[ "davarocr/davarocr/davar_common/apis/test.py", "davarocr/davarocr/davar_videotext/apis/test.py", "davarocr/davarocr/davar_rcg/models/transformations/tps_transformation.py", "davarocr/davarocr/davar_rcg/models/sequence_heads/warpctc_head.py" ]
[ "\"\"\"\n##################################################################################################\n# Copyright Info : Copyright (c) Davar Lab @ Hikvision Research Institute. All rights reserved.\n# Filename : test.py\n# Abstract : The common testing api for davarocr, used in online/offline validation\n Support for DETECTOR, RECOGNIZOR, SPOTTER, INFO_EXTRACTOR, etc.\n\n# Current Version: 1.0.0\n# Date : 2021-05-20\n##################################################################################################\n\"\"\"\nimport os.path as osp\n\nimport time\n\nimport mmcv\nimport torch\n\nfrom mmcv.image import tensor2imgs\nfrom mmcv.runner import get_dist_info\n\nfrom mmdet.core import encode_mask_results\nfrom mmdet.apis.test import collect_results_cpu, collect_results_gpu\n\n\ndef single_gpu_test(model,\n data_loader,\n show=False,\n out_dir=None,\n show_score_thr=0.3,\n model_type=\"DETECTOR\"):\n \"\"\" Test model with single GPU, used for visualization.\n\n Args:\n model (nn.Module): Model to be tested.\n data_loader (nn.Dataloader): Pytorch data loader.\n show (boolean): whether to show visualization\n out_dir (str): visualization results saved path\n show_score_thr (float): the threshold to show visualization.\n model_type(float): model type indicator, used to formalize final results.\n Returns:\n dict: test results\n \"\"\"\n\n model.eval()\n results = []\n dataset = data_loader.dataset\n prog_bar = mmcv.ProgressBar(len(dataset))\n for _, data in enumerate(data_loader):\n with torch.no_grad():\n result = model(return_loss=False, rescale=True, **data)\n\n batch_size = len(result)\n\n # Format results according to different model types\n if model_type == \"DETECTOR\":\n if show or out_dir:\n if batch_size == 1 and isinstance(data['img'][0], torch.Tensor):\n img_tensor = data['img'][0]\n else:\n img_tensor = data['img'][0].data[0]\n img_metas = data['img_metas'][0].data[0]\n imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])\n assert len(imgs) == len(img_metas)\n\n for i, (img, img_meta) in enumerate(zip(imgs, img_metas)):\n height, width, _ = img_meta['img_shape']\n img_show = img[:height, :width, :]\n\n ori_h, ori_w = img_meta['ori_shape'][:-1]\n img_show = mmcv.imresize(img_show, (ori_w, ori_h))\n\n if out_dir:\n out_file = osp.join(out_dir, img_meta['ori_filename'])\n else:\n out_file = None\n\n model.module.show_result(\n img_show,\n result[i],\n show=show,\n out_file=out_file,\n score_thr=show_score_thr)\n\n # encode mask results\n if isinstance(result[0], tuple):\n result = [(bbox_results, encode_mask_results(mask_results))\n for bbox_results, mask_results in result]\n elif model_type == \"RECOGNIZOR\":\n if \"prob\" in result:\n result = result[\"text\"]\n elif \"length\" in result and \"text\" not in result:\n result = result[\"length\"]\n elif \"length\" in result and \"text\" in result:\n result = list(zip(result[\"text\"], result[\"length\"]))\n else:\n result = result[\"text\"]\n batch_size = len(result)\n elif model_type == \"SPOTTER\":\n pass\n # if isinstance(result[0], dict):\n # # Remove useless key\n # useless_keys = []\n # for res in result:\n # for key in res.keys():\n # if key not in ['points', 'texts', 'confidence']:\n # useless_keys.append(key)\n # for key in useless_keys:\n # del res[key]\n results.extend(result)\n\n for _ in range(batch_size):\n prog_bar.update()\n return results\n\n\ndef multi_gpu_test(model,\n data_loader,\n tmpdir=None,\n gpu_collect=False,\n model_type=\"DETECTOR\"):\n \"\"\"Test model with multiple gpus.\n\n This method tests model with multiple gpus and collects the results\n under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'\n it encodes results to gpu tensors and use gpu communication for results\n collection. On cpu mode it saves the results on different gpus to 'tmpdir'\n and collects them by the rank 0 worker.\n\n Args:\n model (nn.Module): Model to be tested.\n data_loader (nn.Dataloader): Pytorch data loader.\n tmpdir (str): Path of directory to save the temporary results from\n different gpus under cpu mode.\n gpu_collect (bool): Option to use either gpu or cpu to collect results.\n\n Returns:\n list(dict): The prediction results.\n \"\"\"\n model.eval()\n results = []\n dataset = data_loader.dataset\n rank, world_size = get_dist_info()\n if rank == 0:\n prog_bar = mmcv.ProgressBar(len(dataset))\n time.sleep(2) # This line can prevent deadlock problem in some cases.\n for _, data in enumerate(data_loader):\n\n with torch.no_grad():\n result = model(return_loss=False, rescale=True, **data)\n\n # Format results according to different model types\n if model_type == \"DETECTOR\":\n # Encode mask results\n if isinstance(result[0], tuple):\n result = [(bbox_results, encode_mask_results(mask_results))\n for bbox_results, mask_results in result]\n elif model_type == \"RECOGNIZOR\":\n if \"prob\" in result:\n result = result[\"text\"]\n elif \"length\" in result and \"text\" not in result:\n result = result[\"length\"]\n elif \"length\" in result and \"text\" in result:\n result = list(zip(result[\"text\"], result[\"length\"]))\n else:\n result = result[\"text\"]\n elif model_type == \"SPOTTER\":\n pass\n # if isinstance(result[0], dict):\n # # Remove useless key\n # useless_keys = []\n # for res in result:\n # for key in res.keys():\n # if key not in ['points', 'texts', 'confidence']:\n # useless_keys.append(key)\n # for key in useless_keys:\n # del res[key]\n\n results.extend(result)\n\n if rank == 0:\n batch_size = len(result)\n for _ in range(batch_size * world_size):\n prog_bar.update()\n\n # Collect results from all ranks\n if gpu_collect:\n results = collect_results_gpu(results, len(dataset))\n else:\n results = collect_results_cpu(results, len(dataset), tmpdir)\n return results\n", "\"\"\"\n##################################################################################################\n# Copyright Info : Copyright (c) Davar Lab @ Hikvision Research Institute. All rights reserved.\n# Filename : test.py\n# Abstract : The common testing api for video text recognition, track, quality score\n\n# Current Version: 1.0.0\n# Date : 2021-06-02\n##################################################################################################\n\"\"\"\nimport numpy as np\n\nimport mmcv\nimport torch\n\n\ndef single_gpu_test(model,\n data_loader):\n \"\"\" Test model with single GPU, used for visualization.\n\n Args:\n model (nn.Module): Model to be tested.\n data_loader (nn.Dataloader): Pytorch data loader.\n\n Returns:\n dict: test results\n \"\"\"\n\n model.eval()\n results = dict()\n results['texts'] = []\n results['img_info'] = []\n results['glimpses'] = []\n results['scores'] = []\n dataset = data_loader.dataset\n prog_bar = mmcv.ProgressBar(len(dataset))\n for i, data in enumerate(data_loader):\n with torch.no_grad():\n result = model(return_loss=False, rescale=True, **data)\n texts = result['text']\n glimpses = result['glimpses']\n glimpses = glimpses.cpu().numpy()\n img_infos = result['img_info']\n scores = result['scores']\n scores = scores.cpu().numpy()\n scores = scores.reshape(-1)\n batch_size = len(texts)\n results['texts'].extend(texts)\n results['img_info'].extend(img_infos)\n results['glimpses'].extend(glimpses)\n results['scores'].extend(scores)\n for _ in range(batch_size):\n prog_bar.update()\n new_glimpse = np.stack(results['glimpses'])\n results['glimpses'] = new_glimpse\n return results\n", "\"\"\"\n##################################################################################################\n# Copyright Info : Copyright (c) Davar Lab @ Hikvision Research Institute. All rights reserved.\n# Filename : tps_transformation.py\n# Abstract : Implementations of the TPS transformation\n\n# Current Version: 1.0.0\n# Date : 2021-03-07\n# Thanks to : We borrow the released code from http://gitbug.com/clovaai/deep-text-recognition-benchmark\n for the TPS transformer.\n##################################################################################################\n\"\"\"\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom ..builder import TRANSFORMATIONS\n\n\n@TRANSFORMATIONS.register_module()\nclass TPS_SpatialTransformer(nn.Module):\n \"\"\" Rectification Network of RARE, namely TPS-based STN\n\n Ref: [1] Spatial Transformer Network, NIPS-2016\n [2] Robust scene text recognition with automatic rectification. CVPR-2016. (short in RARE)\n [3] ASTER: An Attentional Scene Text Recognizer with Flexible Rectification. T-PAMI 2018.\n\n Usage Example:\n transformation=dict(\n type='TPS_SpatialTransformer',\n F=20,\n I_size=(32, 100),\n I_r_size=(32, 100),\n I_channel_num=3),\n \"\"\"\n\n def __init__(self,\n F,\n I_size,\n I_r_size,\n I_channel_num=1):\n \"\"\"\n\n Args:\n F (int): number of fiducial points (default 20 following the paper)\n I_size (tuple): size of input images\n I_r_size (tuple): size of rectified images\n I_channel_num (int): the number of channels of the input image I\n \"\"\"\n\n super(TPS_SpatialTransformer, self).__init__()\n self.F = F\n self.I_size = I_size\n self.I_r_size = I_r_size\n self.I_channel_num = I_channel_num\n\n self.LocalizationNetwork = LocalizationNetwork(self.F, self.I_channel_num)\n self.GridGenerator = GridGenerator(self.F,\n self.I_r_size)\n\n def forward(self, batch_I):\n \"\"\"\n Args:\n batch_I (tensor): batch of input images [batch_size x I_channel_num x I_r_height x I_r_width]\n\n Returns:\n np.array: the image of rectified images\n\n \"\"\"\n\n batch_C_prime = self.LocalizationNetwork(batch_I) # batch_size x K x 2\n build_P_prime = self.GridGenerator.build_P_prime(batch_C_prime) # batch_size x n (= I_r_width x I_r_height) x 2\n build_P_prime_reshape = build_P_prime.reshape([build_P_prime.size(0),\n self.I_r_size[0],\n self.I_r_size[1],\n 2])\n batch_I_r = F.grid_sample(batch_I,\n build_P_prime_reshape,\n padding_mode='border')\n\n return batch_I_r\n\n def init_weights(self, pretrained=None):\n \"\"\"\n\n Args:\n pretrained (str): save path of pretrained model\n\n Returns:\n\n \"\"\"\n return\n\n\nclass LocalizationNetwork(nn.Module):\n \"\"\" Localization Network of RARE,\n which predicts C' (K x 2) from I (I_width x I_height) \"\"\"\n\n def __init__(self, F, I_channel_num):\n \"\"\"\n\n Args:\n F (int): number of fiducial points (default 20 following the paper)\n I_channel_num (int): the number of channels of the input image I\n \"\"\"\n super(LocalizationNetwork, self).__init__()\n self.F = F\n self.I_channel_num = I_channel_num\n self.conv = nn.Sequential(\n nn.Conv2d(in_channels=self.I_channel_num,\n out_channels=64, kernel_size=3,\n stride=1, padding=1,\n bias=False),\n nn.BatchNorm2d(64), nn.ReLU(True),\n nn.MaxPool2d(2, 2), # batch_size x 64 x I_height/2 x I_width/2\n nn.Conv2d(64, 128, 3, 1, 1,\n bias=False),\n nn.BatchNorm2d(128), nn.ReLU(True),\n nn.MaxPool2d(2, 2), # batch_size x 128 x I_height/4 x I_width/4\n nn.Conv2d(128, 256, 3, 1, 1,\n bias=False),\n nn.BatchNorm2d(256), nn.ReLU(True),\n nn.MaxPool2d(2, 2), # batch_size x 256 x I_height/8 x I_width/8\n nn.Conv2d(256, 512, 3, 1, 1,\n bias=False),\n nn.BatchNorm2d(512), nn.ReLU(True),\n nn.AdaptiveAvgPool2d(1) # batch_size x 512\n )\n\n self.localization_fc1 = nn.Sequential(nn.Linear(512, 256),\n nn.ReLU(True))\n self.localization_fc2 = nn.Linear(256, self.F * 2)\n\n # Init fc2 in LocalizationNetwork\n self.localization_fc2.weight.data.fill_(0)\n\n # see RARE paper Fig. 6 (a)\n ctrl_pts_x = np.linspace(-1.0, 1.0,\n int(F / 2))\n ctrl_pts_y_top = np.linspace(0.0, -1.0,\n num=int(F / 2))\n ctrl_pts_y_bottom = np.linspace(1.0, 0.0,\n num=int(F / 2))\n ctrl_pts_top = np.stack([ctrl_pts_x, ctrl_pts_y_top],\n axis=1)\n ctrl_pts_bottom = np.stack([ctrl_pts_x, ctrl_pts_y_bottom],\n axis=1)\n initial_bias = np.concatenate([ctrl_pts_top, ctrl_pts_bottom],\n axis=0)\n self.localization_fc2.bias.data = torch.from_numpy(initial_bias).float().view(-1)\n\n def forward(self, batch_I):\n\n \"\"\"\n Args:\n x (tensor): input image featuremaps [batch_size x I_channel_num x I_height x I_width]\n\n Returns:\n torch.Tensor: Predicted coordinates of fiducial points for input batch [batch_size x F x 2]\n \"\"\"\n batch_size = batch_I.size(0)\n features = self.conv(batch_I).view(batch_size, -1)\n batch_C_prime = self.localization_fc2(self.localization_fc1(features)). \\\n view(batch_size, self.F, 2)\n return batch_C_prime\n\n\nclass GridGenerator(nn.Module):\n \"\"\" Grid Generator of RARE,\n which produces P_prime by multipling T with P\n\n \"\"\"\n\n def __init__(self, F, I_r_size):\n \"\"\"\n Args:\n F (int): number of fiducial points (default 20 following the paper)\n I_r_size (tuple): size of rectified images\n \"\"\"\n \"\"\" Generate P_hat and inv_delta_C for later \"\"\"\n super(GridGenerator, self).__init__()\n self.eps = 1e-6\n self.I_r_height, self.I_r_width = I_r_size\n self.F = F\n self.C = self._build_C(self.F) # F x 2\n self.P = self._build_P(self.I_r_width, self.I_r_height)\n self.register_buffer(\"inv_delta_C\", # F+3 x F+3\n torch.tensor(self._build_inv_delta_C(self.F, self.C)).float())\n\n self.register_buffer(\"P_hat\",\n torch.tensor(self._build_P_hat(self.F, self.C, self.P)).float()) # n x F+3\n\n def _build_C(self, F):\n \"\"\"\n Args:\n F (int): number of fiducial points (default 20 following the paper)\n\n Returns:\n np.array: coordinates of fiducial points in I_r [batch_size x F x 2]\n\n \"\"\"\n \"\"\" Return coordinates of fiducial points in I_r; C \"\"\"\n ctrl_pts_x = np.linspace(-1.0, 1.0, int(F / 2))\n ctrl_pts_y_top = -1 * np.ones(int(F / 2))\n ctrl_pts_y_bottom = np.ones(int(F / 2))\n ctrl_pts_top = np.stack([ctrl_pts_x, ctrl_pts_y_top],\n axis=1)\n ctrl_pts_bottom = np.stack([ctrl_pts_x, ctrl_pts_y_bottom],\n axis=1)\n C = np.concatenate([ctrl_pts_top, ctrl_pts_bottom],\n axis=0)\n return C\n\n def _build_inv_delta_C(self, F, C):\n \"\"\"\n\n Args:\n F (int): number of fiducial points (default 20 following the paper)\n C (np.array): coordinates of fiducial points\n\n Returns:\n np.array: TPS transformation matrix [(F+3) x (F+3)]\n\n \"\"\"\n # Return inv_delta_C which is needed to calculate T\n hat_C = np.zeros((F, F), dtype=float) # F x F\n for i in range(0, F):\n for j in range(i, F):\n r = np.linalg.norm(C[i] - C[j])\n hat_C[i, j] = r\n hat_C[j, i] = r\n np.fill_diagonal(hat_C, 1)\n hat_C = (hat_C ** 2) * np.log(hat_C)\n\n delta_C = np.concatenate( # F+3 x F+3\n [\n np.concatenate([np.ones((F, 1)),\n C, hat_C], axis=1), # F x F+3\n np.concatenate([np.zeros((2, 3)),\n np.transpose(C)], axis=1), # 2 x F+3\n np.concatenate([np.zeros((1, 3)),\n np.ones((1, F))], axis=1) # 1 x F+3\n ],\n axis=0\n )\n inv_delta_C = np.linalg.inv(delta_C)\n return inv_delta_C # F+3 x F+3\n\n def _build_P(self, I_r_width, I_r_height):\n \"\"\"\n Args:\n I_r_width (torch.Tensor): width of rectified images\n I_r_height (torch.Tensor): height of rectified images\n\n Returns:\n np.array: generated P [(I_r_width x I_r_height) x 2]\n\n \"\"\"\n\n I_r_grid_x = (np.arange(-I_r_width, I_r_width, 2) + 1.0) / I_r_width # self.I_r_width\n I_r_grid_y = (np.arange(-I_r_height, I_r_height, 2) + 1.0) / I_r_height # self.I_r_height\n\n P = np.stack( # self.I_r_width x self.I_r_height x 2\n np.meshgrid(I_r_grid_x,\n I_r_grid_y), axis=2)\n return P.reshape([-1, 2]) # n (= self.I_r_width x self.I_r_height) x 2\n\n def _build_P_hat(self, F, C, P):\n \"\"\"\n\n Args:\n F (int): number of fiducial points\n C (np.array): coordinates of fiducial points\n\n P (np.array): the generated sampling grid P on I\n\n Returns:\n\n \"\"\"\n n = P.shape[0] # n (= self.I_r_width x self.I_r_height)\n P_tile = np.tile(np.expand_dims(P, axis=1),\n (1, F, 1)) # n x 2 -> n x 1 x 2 -> n x F x 2\n C_tile = np.expand_dims(C, axis=0) # 1 x F x 2\n P_diff = P_tile - C_tile # n x F x 2\n rbf_norm = np.linalg.norm(P_diff,\n ord=2,\n axis=2,\n keepdims=False) # n x F\n rbf = np.multiply(np.square(rbf_norm),\n np.log(rbf_norm + self.eps)) # n x F\n P_hat = np.concatenate([np.ones((n, 1)), P, rbf], axis=1)\n return P_hat # n x F+3\n\n def build_P_prime(self, batch_C_prime):\n \"\"\"\n\n Args:\n batch_C_prime (tensor):\n\n Returns:\n torch.Tensor: generated grid [batch_size x F x 2]\n\n \"\"\"\n # Generate Grid from batch_C_prime [batch_size x F x 2]\n batch_size = batch_C_prime.size(0)\n batch_inv_delta_C = self.inv_delta_C.repeat(batch_size, 1, 1)\n batch_P_hat = self.P_hat.repeat(batch_size, 1, 1)\n batch_C_prime_with_zeros = torch.cat((batch_C_prime,\n torch.zeros(batch_size, 3, 2).float().cuda()),\n dim=1) # batch_size x\n # (F+3) x 2\n batch_T = torch.bmm(batch_inv_delta_C,\n batch_C_prime_with_zeros) # batch_size x F+3 x 2\n batch_P_prime = torch.bmm(batch_P_hat,\n batch_T) # batch_size x n x 2\n return batch_P_prime # batch_size x n x 2\n", "\"\"\"\n##################################################################################################\n# Copyright Info : Copyright (c) Davar Lab @ Hikvision Research Institute. All rights reserved.\n# Filename : warpctc_head.py\n# Abstract : Implementations of the warp-ctc prediction layer, loss calculation and result converter\n\n# Current Version: 1.0.0\n# Date : 2021-05-01\n##################################################################################################\n\"\"\"\nimport logging\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.init as init\n\nfrom mmcv.runner import load_checkpoint\n\nfrom mmdet.models.builder import HEADS\nfrom mmdet.models.builder import build_loss\n\nfrom ...core.converter import build_converter\n\n\n@HEADS.register_module()\nclass WarpCTCHead(nn.Module):\n \"\"\"WarpCTC Recognition Head\"\"\"\n def __init__(self,\n input_size,\n converter=dict(\n type='CTCLabelConverter',\n character='0123456789abcdefghijklmnopqrstuvwxyz'),\n loss_ctc=dict(\n type='WarpCTCLoss',\n blank=0,\n size_average=False,\n length_average=False,\n loss_weight=1.0),\n use_1x1conv=False,\n ):\n \"\"\"\n Args:\n input_size (int): input feature dim\n loss_ctc (dict): loss function parameter\n converter (dict): converter parameter\n use_1x1conv (bool): whether to use 1*1 convolution\n \"\"\"\n\n super(WarpCTCHead, self).__init__()\n self.input_size = input_size\n\n # build the converter\n self.converter = build_converter(converter)\n self.num_classes = len(self.converter.character) # + 1 num_classes\n self.use_1x1conv = use_1x1conv\n\n # whether to use convolution or linear to realize classification\n if self.use_1x1conv:\n self.fc_logits = nn.Conv2d(self.input_size, self.num_classes,\n kernel_size=1, stride=1,\n padding=0, bias=False)\n else:\n self.fc_logits = nn.Linear(self.input_size, self.num_classes)\n\n # build the loss\n self.loss_ctc = build_loss(loss_ctc)\n\n def init_weights(self, pretrained=None):\n \"\"\"\n\n Args:\n pretrained (str): model path of the pre_trained model\n\n Returns:\n\n \"\"\"\n if isinstance(pretrained, str):\n logger = logging.getLogger()\n logger.info(\"WarpCTCHead:\")\n load_checkpoint(self, pretrained, strict=False, logger=logger)\n elif pretrained is None:\n for name, param in self.named_parameters():\n if 'bias' in name:\n init.constant_(param, 0.0)\n elif 'weight' in name:\n init.kaiming_normal_(param)\n\n def forward(self, x, text=None, is_train=None):\n \"\"\"\n\n Args:\n x (Torch.Tensor): input feature\n text (Torch.Tensor): label information\n is_train (bool): whether is training state or test state\n\n Returns:\n Torch.Tensor: model output feature\n\n \"\"\"\n pred = self.fc_logits(x)\n return pred\n\n def convert(self, text):\n \"\"\"\n Args:\n text (str): text format information\n\n Returns:\n Torch.Tensor: label information after converter\n\n \"\"\"\n return self.converter(text)\n\n def get_target(self, gt_texts):\n \"\"\"\n Args:\n gt_texts (str): text format label information\n\n Returns:\n Torch.Tensor: vector transformed by text label\n Returns:\n Torch.Tensor: vector transformed by text length\n \"\"\"\n if gt_texts is not None:\n text, length = self.converter.encode(gt_texts)\n return text, length\n return None, None\n\n def loss(self, pred, target):\n \"\"\"\n\n Args:\n pred (Torch.Tensor): model prediction\n target (Torch.Tensor): label information\n\n Returns:\n Torch.Tensor: model training loss\n\n \"\"\"\n gt_text, text_length = target\n loss = dict()\n if len(pred.shape) == 4:\n if self.use_1x1conv:\n pred = pred.permute(0, 2, 3, 1) # nchw -> nhwc\n pred = pred.squeeze(1) # n h w c\n prob = pred.log_softmax(2)\n prob_size = torch.IntTensor([prob.size(1)] * prob.size(0))\n prob = prob.permute(1, 0, 2)\n\n # torch.backends.cudnn.enabled=False\n loss_ctc = self.loss_ctc(prob, gt_text, prob_size, text_length)\n # torch.backends.cudnn.enabled=True\n loss['loss_warpctc'] = loss_ctc\n return loss\n\n def get_pred_text(self, pred, batch_max_length, beam_search=False, beam_size=2):\n \"\"\"\n\n Args:\n pred (Torch.Tensor): model output feature\n batch_max_length (int): max output text length\n beam_search (bool): whether to use beam search to decode\n beam_size (int): beam size\n\n Returns:\n list(str): true text format prediction transformed by the converter\n\n \"\"\"\n\n if len(pred.shape) == 4:\n if self.use_1x1conv:\n pred = pred.permute(0, 2, 3, 1) # nchw -> nhwc\n pred = pred.squeeze(1)\n\n # whether use beam search\n if beam_search:\n pred = pred.log_softmax(2)\n batch_size = pred.size(0)\n preds_str = list()\n for b in range(batch_size):\n # transfer the model prediction to text\n beam_result = self.converter.ctc_beam_search_decoder(\n pred[b, :, :], beam_size=beam_size)\n preds_str.append(beam_result)\n\n else:\n batch_size = pred.size(0)\n batch_max_length = pred.size(1)\n length_for_pred = torch.cuda.IntTensor([batch_max_length] *\n batch_size)\n\n _, preds_index = pred.max(2)\n preds_index = preds_index.contiguous().view(-1)\n\n # transfer the model prediction to text\n preds_str = self.converter.decode(preds_index,\n length_for_pred,\n get_before_decode=True)\n\n return preds_str\n" ]
[ [ "torch.no_grad" ], [ "torch.no_grad", "numpy.stack" ], [ "torch.nn.Linear", "torch.nn.BatchNorm2d", "torch.bmm", "numpy.concatenate", "numpy.linalg.norm", "numpy.log", "torch.nn.MaxPool2d", "numpy.arange", "numpy.transpose", "numpy.linalg.inv", "numpy.expand_dims", "numpy.square", "torch.zeros", "numpy.zeros", "torch.nn.ReLU", "torch.nn.Conv2d", "numpy.stack", "numpy.fill_diagonal", "numpy.ones", "torch.from_numpy", "torch.nn.functional.grid_sample", "torch.nn.AdaptiveAvgPool2d", "numpy.meshgrid" ], [ "torch.nn.Linear", "torch.nn.init.constant_", "torch.cuda.IntTensor", "torch.nn.init.kaiming_normal_", "torch.nn.Conv2d" ] ]
samirgadkari/companies
[ "f683a3d077ec3d9b7241e9c91e6393b290f80b2e", "f683a3d077ec3d9b7241e9c91e6393b290f80b2e" ]
[ "ml/tokens.py", "ml/validation_test_split.py" ]
[ "import os\nimport re\nimport string\nimport numpy as np\nfrom ml.clean_tables import tag_actions\nfrom utils.environ import cleaned_tags_dir, generated_data_dir\nfrom utils.file import remove_files, get_json_from_file, \\\n write_json_to_file\n\ntokenize_attr_names = ['rowspan', 'colspan', 'style', 'align', 'width']\ntokenize_attr_subnames = ['text-align']\n\n\nclass Tokens():\n\n def __init__(self):\n self.all_chars = self.set_of_all_chars_in_data()\n\n self.regex_number_token = re.compile(r'^num\\_\\d+$')\n self.MIN_DATA_SIZE = 5\n self.MAX_DATA_SIZE = 20\n\n self.NUM_TOKENS = 1000\n\n self.tokens_fn = os.path.join(generated_data_dir(), 'tokens')\n if os.path.exists(self.tokens_fn):\n self.tokens = get_json_from_file(self.tokens_fn)\n else:\n self.tokens = self.create_tokens()\n\n\n def set_of_all_chars_in_data(self):\n\n all_chars = list(string.ascii_lowercase)\n all_chars.extend(string.ascii_uppercase)\n all_chars.extend(string.digits)\n\n # We convert it to a set first to ensure that there are no\n # duplicate characters\n all_chars = list(set(all_chars))\n return all_chars\n\n\n def special_tokens(self):\n # Numbers from 0 through 100 are used for percentages/widths.\n tokens = list(map(str, np.arange(101)))\n tokens.append('-')\n return tokens\n\n\n def html_structure_tokens(self):\n html_tokens = list(filter(lambda x: x is not None, tag_actions.keys()))\n html_tokens.append('document')\n html_end_tokens = ['end_' + x for x in html_tokens]\n html_tokens.extend(html_end_tokens)\n html_tokens.extend(tokenize_attr_names)\n html_tokens.extend(tokenize_attr_subnames)\n return html_tokens\n\n\n def json_structure_tokens(self):\n json_tokens = list('{}[]:\",')\n json_tokens.extend(['name', 'values', 'header', 'table_data'])\n return json_tokens\n\n\n def create_tokens(self):\n\n lengths = np.random.randint(self.MIN_DATA_SIZE,\n self.MAX_DATA_SIZE + 1,\n self.NUM_TOKENS)\n\n all_tokens = ['<sos>', '<pad>', '<eos>']\n all_tokens.extend(self.special_tokens())\n all_tokens.extend(self.html_structure_tokens())\n all_tokens.extend(self.json_structure_tokens())\n\n all_tokens.extend([''.join(np.random.choice(self.all_chars, length))\n for length in lengths])\n\n all_tokens = [x.strip() for x in all_tokens]\n write_json_to_file(self.tokens_fn, all_tokens)\n\n return all_tokens\n\n\n\n\n\n# TODO: These are left-over from earlier implementations.\n# Make sure to remove all the code that uses them\n# as well as these functions (from here to end of the file).\n\ndef clean_tokens(tokens):\n return \\\n list(filter(lambda token:\n False if token.startswith('num_') else True,\n tokens))\n\n\ndef write_tokens_file(tokens, tokens_filename, start_token_num):\n # This function is called when we're finding all tokens.\n # At this time, all 'nums_*' tokens are saved to the\n # table's individual file. We should not use 'nums_*' from\n # the global tokens file, so we remove them.\n tokens = clean_tokens(tokens)\n\n with open(tokens_filename, 'w') as f:\n for idx, token in enumerate(tokens):\n f.write(f'{idx+start_token_num}: {token}\\n')\n\n\ndef read_tokens_file(filename):\n tokens = {}\n with open(filename, 'r') as f:\n for line in f:\n try:\n # Don't know why this is happening, but we should\n # protect our code against it.\n if ':' not in line:\n continue\n\n idx = line.index(':')\n key, value = line[:idx], line[idx+1:]\n except ValueError as e:\n print(f'line: {line}\\nerror: {e}')\n raise e\n value = value.strip()\n tokens[key] = value\n\n # Start value of an encoded number can equal a token value.\n # Calculate the shift to be the maximum token value + 1\n # if len(tokens) > 0:\n # token_keys_as_integers = map(int, tokens.keys())\n # encoded_num_start_value_shift = max(token_keys_as_integers) + 1\n # return tokens, encoded_num_start_value_shift\n # else:\n # return tokens, None\n return tokens\n\n\ndef get_token_values(tokens):\n return set([value for value in tokens.values()])\n\n\ndef remove_all_tokens_files():\n # You want to process all of these files at once\n # to ensure that the set of tokens takes all\n # files into consideration. This is why we\n # make sure that all token files are removed\n # before starting the process.\n print('Removing all token files ...', end=' ')\n remove_files(cleaned_tags_dir(), '**', 'tokens')\n\n\ndef get_tokens_filename(filing_filename, company_dir_idx,\n company_dir, tokens_filename):\n return os.path.join(filing_filename[:company_dir_idx],\n company_dir, tokens_filename)\n\n\ndef is_number_token(token):\n return False if regex_number_token.match(token) is None else True\n\n\ndef flip_tokens_keys_values(tokens):\n return {v: k for k, v in tokens.items()}\n\n\n# def compare_tokens(t1, t2):\n# if not isinstance(t1, np.ndarray) or \\\n# not isinstance(t2, np.ndarray):\n# raise ValueError('Pass in numpy arrays for speedy comparison.')\n# num_diff = np.abs(len(t1) - len(t2))\n# check_len = min(len(t1), len(t2))\n# num_diff += np.sum(t1[:check_len] != t2[:check_len])\n# return num_diff\n", "'''\nWe need a consistent way of creating testing/validation sets.\nTraining sets will be generated from samples in separate code,\nand will be split with weights of [100, 0] which ensures all\nfiles in the directory will be part of the training set.\n\nAlgorithm:\n0. If the file containing the validation/testing information\n exists, load it. If not, continue to step 1.\n1. Get the list of filenames matching the search pattern.\n Since glob is not consistent in the order of filenames returned,\n sort the filenames.\n2. Create a list the same size as the filenames list,\n but with 0/1 values.\n 0 => file is selected for validation\n 1 => file is selected for testing\n Weights will be provided to specify the number of files used\n for validation, and those for testing.\n Training sets will be generated from files in this directory,\n but only a few will be used to generate huge sets.\n The training files will not be removed when creating the\n validation/test sets.\n3. Save the filename and selector lists in a single dictionary.\n4. When asked, return the list of validation/testing files\n5. Save the dictionary in a JSON file inside the directory\n where the files were obtained.\n'''\nimport os\nimport random\nimport numpy as np\nfrom utils.file import get_filenames, get_json_from_file, \\\n write_json_to_file\nfrom utils.environ import cleaned_tags_dir\n\nFILETYPE_TRAINING = 0\nFILETYPE_VALIDATION = 1\nFILETYPE_TESTING = 2\nVALIDATION_FILE_PERCENT = 80.0\nTEST_FILE_PERCENT = 20.0\n\n\n# Initialize the random number generator to ensure\n# we will get the same results each time it's\n# functions are called. This gives you repeatability,\n# which helps when debugging.\ndef init_rng():\n random.seed(32)\n\n\n# cum_weights = cumulative weights for the validation and testing sets.\n# If the cum_weights are [80, 20], then 80% of the filenames\n# will be used for validation, and 20% for testing.\ndef validation_test_selectors(num_filenames,\n weights=[VALIDATION_FILE_PERCENT,\n TEST_FILE_PERCENT]):\n return random.choices([FILETYPE_VALIDATION,\n FILETYPE_TESTING], weights=weights, k=num_filenames)\n\n\ndef training_selectors(num_filenames):\n return random.choices([FILETYPE_TRAINING], weights=[100], k=num_filenames)\n\n\ndef select_filenames(filenames, selectors, filename_type):\n names = np.array(filenames)\n select_names = np.array(selectors)\n return names[select_names == filename_type]\n\n\ndef selectors_contain_filename_type(selectors, filename_type):\n return any(x == filename_type for x in selectors)\n\n\ndef matching_filenames(saved_filenames_path,\n all_filename_paths,\n filename_type=0,\n selector_weights=[VALIDATION_FILE_PERCENT,\n TEST_FILE_PERCENT]):\n '''\n selector_weights: For training, selector weights will be [100, 0].\n This is so we can use all the files for training. Our training\n files are not the original ones - each will be generated.\n For validation/testing, we want selector weights to be [80, 20].\n This means we will validate on 80% of our actual files,\n and test on 20%.\n '''\n\n init_rng() # Initialize the random number generator.\n\n try:\n names = get_json_from_file(saved_filenames_path)\n\n # This will allow us to regenerate the filenames list\n # for the new filename type that is passed in.\n if not selectors_contain_filename_type(names['selectors'],\n filename_type):\n raise FileNotFoundError\n\n return select_filenames(names['filenames'],\n names['selectors'],\n filename_type)\n except FileNotFoundError:\n all_filenames = []\n for paths in all_filename_paths:\n all_filenames.extend(get_filenames(paths))\n\n # Some of our directories will have files which have been processed.\n # Ignore those files by filtering them out.\n all_filenames = [fn for fn in all_filenames if\n fn.endswith(('html', 'json',\n 'expected_json',\n 'table-extracted',\n 'unescaped'))]\n all_filenames.sort()\n\n if filename_type == FILETYPE_TRAINING:\n selectors = training_selectors(len(all_filenames))\n else:\n selectors = validation_test_selectors(len(all_filenames),\n selector_weights)\n names = {'filename_type': filename_type,\n 'filenames': all_filenames,\n 'selectors': selectors}\n write_json_to_file(saved_filenames_path, names)\n return select_filenames(names['filenames'],\n names['selectors'],\n filename_type)\n\n\ndef test_matching_filenames(training):\n paths = [os.path.join(cleaned_tags_dir(),\n '*', '10-k', '*', '*', '*.unescaped')]\n saved_filenames_path = os.path.join(cleaned_tags_dir(),\n 'validation_test_split')\n if int(training) == FILETYPE_TRAINING:\n print('Training test')\n training_filenames = matching_filenames(saved_filenames_path,\n paths,\n FILETYPE_TRAINING)\n print(f'len(training_filenames): {len(training_filenames)}')\n else:\n print('Validation/testing test')\n validation_filenames = matching_filenames(saved_filenames_path,\n paths,\n FILETYPE_VALIDATION)\n test_filenames = matching_filenames(saved_filenames_path,\n paths,\n FILETYPE_TESTING)\n\n if len(set(validation_filenames) & set(test_filenames)) != 0:\n print(f'Error !! Some filenames in validation also in test.')\n\n print(f'len(validation_filenames): {len(validation_filenames)}')\n print(f'len(test_filenames): {len(test_filenames)}')\n\n num_validation_files = len(validation_filenames)\n num_test_files = len(test_filenames)\n total_num_files = num_validation_files + num_test_files\n\n validation_file_percent = num_validation_files/total_num_files * 100.\n test_file_percent = num_test_files/total_num_files * 100.\n if abs(validation_file_percent - VALIDATION_FILE_PERCENT) < 0.1 and \\\n abs(test_file_percent - TEST_FILE_PERCENT) < 0.1:\n print(f'Correct validation/test ratio of files selected')\n else:\n print(f'Error !! Incorrect validation/test ratio '\n f'of files selected')\n print('validation_file_percent: {:4.1f}'\n .format(validation_file_percent))\n print('test_file_percent: {:4.1f}'.format(test_file_percent))\n" ]
[ [ "numpy.arange", "numpy.random.randint", "numpy.random.choice" ], [ "numpy.array" ] ]
aldro61/microbiome-summer-school-2017
[ "5f7fa384b66ea776db0d6e9c397f3d143254389b" ]
[ "exercises/code/basics.model.complexity.py" ]
[ "\"\"\"\nUnderfitting vs overfitting interactive example\n\nAuthor: Alexandre Drouin\nInspired by http://scikit-learn.org/stable/auto_examples/model_selection/plot_underfitting_overfitting.html\n\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom matplotlib.widgets import Slider\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split, cross_val_score, KFold\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import PolynomialFeatures\n\n\nMAX_DEGREE = 20\n\nclass DiscreteSlider(Slider):\n \"\"\"\n A matplotlib slider widget with discrete steps.\n\n Source: https://stackoverflow.com/questions/13656387/can-i-make-matplotlib-sliders-more-discrete\n\n \"\"\"\n def __init__(self, *args, **kwargs):\n \"\"\"Identical to Slider.__init__, except for the \"increment\" kwarg.\n \"increment\" specifies the step size that the slider will be discritized\n to.\"\"\"\n self.inc = kwargs.pop('increment', 0.5)\n Slider.__init__(self, *args, **kwargs)\n\n def set_val(self, val):\n discrete_val = int(val / self.inc) * self.inc\n # We can't just call Slider.set_val(self, discrete_val), because this\n # will prevent the slider from updating properly (it will get stuck at\n # the first step and not \"slide\"). Instead, we'll keep track of the\n # the continuous value as self.val and pass in the discrete value to\n # everything else.\n xy = self.poly.xy\n xy[2] = discrete_val, 1\n xy[3] = discrete_val, 0\n self.poly.xy = xy\n self.valtext.set_text(self.valfmt % discrete_val)\n if self.drawon:\n self.ax.figure.canvas.draw()\n self.val = val\n if not self.eventson:\n return\n for cid, func in self.observers.iteritems():\n func(discrete_val)\n\n\ndef fit_linear_regression(X, y, degree):\n return Pipeline([(\"polynomial_features\", PolynomialFeatures(degree=degree,\n include_bias=False)),\n (\"linear_regression\", LinearRegression())]\n ).fit(X, y)\n\n\nclass ModelSelectionPlot(object):\n def __init__(self, n_samples, random_state):\n self.inc = 1.0\n\n self.fig, (self.ax1, self.ax2) = plt.subplots(ncols=2)\n self.sliderax = self.fig.add_axes([0.2, 0.02, 0.6, 0.03], facecolor=\"lightgray\")\n\n self.slider = DiscreteSlider(self.sliderax, 'Degree', 1, MAX_DEGREE,\n increment=self.inc, valinit=self.inc)\n self.slider.on_changed(self.update)\n self.slider.drawon = False\n\n # Generate training and testing data\n true_fun = lambda X: np.cos(1.5 * np.pi * X)\n X = random_state.rand(n_samples)\n y = true_fun(X) + random_state.randn(n_samples) * 0.1\n self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y, train_size=0.5,\n random_state=random_state)\n self.X_train = self.X_train.reshape(-1, 1)\n self.X_test = self.X_test.reshape(-1, 1)\n\n # Plot the data\n self.ax1.plot(self.X_train, self.y_train, 'bo', markersize=5, label=\"Train\")\n self.ax1.plot(self.X_test, self.y_test, 'ro', markersize=5, label=\"Test\")\n x_draw_sampling = np.linspace(0, 1, 100)\n self.ax1.plot(x_draw_sampling, true_fun(x_draw_sampling), label=\"True function\")\n\n # Train the learning algorithm using degree = 1\n estimator = fit_linear_regression(self.X_train, self.y_train, degree=1)\n x_draw_sampling = np.linspace(0, 1, 100).reshape(-1, 1)\n self.model_plot, = self.ax1.plot(x_draw_sampling, estimator.predict(x_draw_sampling), label=\"Model\")\n\n # Plot the accuracy of the learned model\n self.train_score_plot, = self.ax2.plot([1], [estimator.score(self.X_train, self.y_train)], label=\"Training set\", \n markersize=5)\n self.test_score_plot, = self.ax2.plot([1], [estimator.score(self.X_test, self.y_test)], label=\"Testing set\", \n markersize=5)\n self.degree_marker = self.ax2.axvline(1, linestyle=\"--\", color=\"red\")\n\n # Left subplot formatting\n self.ax1.set_xlabel(\"X\")\n self.ax1.set_ylabel(\"y\")\n self.ax1.set_title(\"Model\")\n self.ax1.legend()\n\n # Right subplot formatting\n self.ax2.set_xlabel(\"Degree hyperparameter\")\n self.ax2.set_ylabel(\"Coefficient of determination ($r^2$)\")\n self.ax2.set_xlim([1, MAX_DEGREE])\n self.ax2.set_ylim([0.5, 1])\n self.ax2.set_title(\"Accuracy\")\n self.ax2.legend()\n\n # Main plot formatting\n plt.suptitle(\"Use the slider to explore different values of the degree hyperparameter\")\n\n\n def update(self, degree):\n # Train the algorithm with the specified degree and plot its predictions\n estimator = fit_linear_regression(self.X_train, self.y_train, degree=int(degree))\n x_draw = np.linspace(0, 1, 100)\n self.model_plot.set_data(x_draw, estimator.predict(x_draw.reshape(-1, 1)))\n \n # Update the score plots\n def _update_score_plot(score_plot, new_score):\n t1, t2 = score_plot.get_data()\n t1 = np.hstack((t1, [degree]))\n t2 = np.hstack((t2, [new_score]))\n sorter = t1.argsort()\n t1 = t1[sorter]\n t2 = t2[sorter]\n score_plot.set_data(t1, t2)\n _update_score_plot(self.train_score_plot, estimator.score(self.X_train, self.y_train))\n _update_score_plot(self.test_score_plot, estimator.score(self.X_test, self.y_test))\n \n # Place the vertical marker at the current degree\n self.degree_marker.set_data([degree, degree], self.degree_marker.get_data()[1])\n \n # Update the slider's text and redraw the figure\n self.slider.valtext.set_text('{}'.format(degree))\n self.fig.canvas.draw()\n\n def show(self):\n plt.show()\n\n\nif __name__ == \"__main__\":\n ModelSelectionPlot(n_samples=50, random_state=np.random.RandomState(1)).show()" ]
[ [ "sklearn.linear_model.LinearRegression", "numpy.random.RandomState", "matplotlib.pyplot.suptitle", "numpy.hstack", "sklearn.preprocessing.PolynomialFeatures", "matplotlib.pyplot.subplots", "numpy.cos", "sklearn.model_selection.train_test_split", "matplotlib.pyplot.show", "numpy.linspace", "matplotlib.widgets.Slider.__init__" ] ]
acl21/deep-active-learning-pytorch
[ "637fd507235632903bcf84ed841ff524d847b94e" ]
[ "pycls/models/alexnet.py" ]
[ "import torch\r\nimport torch.nn as nn\r\nfrom torch.utils.model_zoo import load_url as load_state_dict_from_url\r\nfrom typing import Any\r\n\r\n\r\n__all__ = ['AlexNet', 'alexnet']\r\n\r\n\r\nmodel_urls = {\r\n 'alexnet': 'https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth',\r\n}\r\n\r\n\r\nclass AlexNet(nn.Module):\r\n '''\r\n AlexNet modified (features) for CIFAR10. Source: https://github.com/icpm/pytorch-cifar10/blob/master/models/AlexNet.py. \r\n '''\r\n def __init__(self, num_classes: int = 1000, use_dropout=False) -> None:\r\n super(AlexNet, self).__init__()\r\n self.use_dropout = use_dropout\r\n self.features = nn.Sequential(\r\n nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2),\r\n nn.Conv2d(64, 192, kernel_size=3, padding=1),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2),\r\n nn.Conv2d(192, 384, kernel_size=3, padding=1),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(384, 256, kernel_size=3, padding=1),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(256, 256, kernel_size=3, padding=1),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2),\r\n )\r\n # self.avgpool = nn.AdaptiveAvgPool2d((6, 6))\r\n self.fc_block = nn.Sequential(\r\n nn.Linear(256 * 2 * 2, 4096, bias=False),\r\n nn.BatchNorm1d(4096),\r\n nn.ReLU(inplace=True),\r\n nn.Linear(4096, 4096, bias=False),\r\n nn.BatchNorm1d(4096),\r\n nn.ReLU(inplace=True),\r\n )\r\n self.classifier = nn.Sequential(\r\n nn.Linear(4096, num_classes),\r\n )\r\n self.penultimate_active = False\r\n self.drop = nn.Dropout(p=0.5)\r\n\r\n def forward(self, x: torch.Tensor) -> torch.Tensor:\r\n x = self.features(x)\r\n # x = self.avgpool(x)\r\n z = torch.flatten(x, 1)\r\n if self.use_dropout:\r\n x = self.drop(x)\r\n z = self.fc_block(z)\r\n x = self.classifier(z)\r\n if self.penultimate_active:\r\n return z, x\r\n return x\r\n\r\n\r\ndef alexnet(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> AlexNet:\r\n r\"\"\"AlexNet model architecture from the\r\n `\"One weird trick...\" <https://arxiv.org/abs/1404.5997>`_ paper.\r\n Args:\r\n pretrained (bool): If True, returns a model pre-trained on ImageNet\r\n progress (bool): If True, displays a progress bar of the download to stderr\r\n \"\"\"\r\n model = AlexNet(**kwargs)\r\n if pretrained:\r\n state_dict = load_state_dict_from_url(model_urls['alexnet'],\r\n progress=progress)\r\n model.load_state_dict(state_dict)\r\n return model" ]
[ [ "torch.nn.Linear", "torch.nn.Dropout", "torch.nn.MaxPool2d", "torch.utils.model_zoo.load_url", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.BatchNorm1d", "torch.flatten" ] ]
anguswilliams91/jbc-turing-rss-nowcasting
[ "8c91e568dcf0dfcdf48e03cac86ad01bc47f8dcc" ]
[ "models/distributions.py" ]
[ "import numpy as np\nimport scipy.stats\nimport torch as t\nfrom torch.distributions import Normal, Poisson\nimport math\n\nfrom .utils import betaln, binomln, kappa_marginal_moments\n\n\n# flat gaussian mixture\nclass GaussianMixture:\n def __init__(self, mus, sigs):\n \"\"\"\n Args:\n - mus: (t.tensor) vector of component means [shape (n_particles,)]\n - sigs: (t.tensor) vector of component variances [shape (n_particles,)]\n \"\"\"\n self._kernel = Normal(loc=mus, scale=sigs)\n self._n_particles = len(mus)\n\n def log_prob(self, lam, keepdims=False):\n \"\"\"\n Log probability of scalar poisson rate under\n the Gaussian Mixture.\n\n Args:\n - lam: (scalar/size 0 tensor) Poisson rate\n \"\"\"\n if type(lam) == float:\n lam = t.tensor([lam])\n elif len(lam.size()) == 0:\n lam = lam.view(1)\n\n log_p = t.logsumexp(\n self._kernel.log_prob(lam[:, None]), dim=1, keepdims=keepdims\n )\n normalize = math.log(self._n_particles)\n return log_p - normalize\n\n def sample(self, n_samples):\n return self._kernel.sample(n_samples)\n\n\nclass PoissonMixture:\n def __init__(self, rates):\n \"\"\"\n Args:\n - rates: (t.tensor) vector of component means [shape (n_particles,)]\n \"\"\"\n self._kernel = Poisson(rates)\n self._n_particles = len(rates)\n self.support = (0, np.inf)\n\n def log_prob(self, x, keepdims=False):\n \"\"\"\n Log probability of scalar count under\n the Poisson Mixture.\n\n Args:\n - x: (scalar/size 0 tensor) count\n \"\"\"\n if type(x) == float:\n x = t.tensor([x])\n elif len(x.size()) == 0:\n x = x.view(1)\n\n log_p = t.logsumexp(self._kernel.log_prob(x), dim=1, keepdims=keepdims)\n normalize = math.log(self._n_particles)\n return log_p - normalize\n\n def sample(self, n_samples):\n return self._kernel.sample(n_samples)\n\n\nclass LaggedBetaBinomial:\n def __init__(self, ys, alphas, betas, prior):\n \"\"\"\n beta-binomial emission distribution\n\n Args:\n - ys: (t.tensor) [shape (n_lag_steps,)]\n - alphas (t.tensor) first beta shape parameters for thinning prior [shape (n_lag_steps,)]\n - alphas (t.tensor) second beta shape parameters for thinning prior [shape (n_lag_steps,)]\n - prior_x (distribution) prior distribution on the true count. Must implement log_prob\n \"\"\"\n # support of the true count for latent marginalization\n try:\n lower = prior.support[0]\n except:\n lower = prior.support.lower_bound\n\n self._support_lower = max(lower, int(ys.max()))\n\n self._ys = ys\n self._alphas = alphas\n self._betas = betas\n self._prior = prior\n\n self._left = self._ys + self._alphas # (n_lag_steps,)\n self._right = betas - ys # (n_lag_steps,)\n self._log_denom = betaln(alphas, betas) # (n_lag_steps,)\n\n def log_prob(self, x, support_check=True):\n if support_check:\n if x < self._support_lower:\n return t.tensor([-np.inf])\n\n if type(x) == float:\n x = t.tensor([x])\n elif len(x.size()) == 0:\n x = x.view(1)\n\n right = x[None, :] + self._right[:, None] # (n_lag_steps, x_dim)\n log_num = betaln(self._left[:, None], right) # (n_lag_steps, x_dim)\n log_binom = binomln(x[None, :], self._ys[:, None]) # (n_lag_steps, x_dim)\n log_prior = self._prior.log_prob(x[:, None]) # [:, None]) # (x_dim, prior_dim)\n\n log_beta_binom = (\n log_binom[:, :, None]\n + log_num[:, :, None]\n - self._log_denom[:, None, None]\n + log_prior[None, :]\n ) # (n_lag_steps, x_dim, prior_dim)\n\n return log_beta_binom.sum(dim=0) # (x_dim, prior_dim)\n\n def log_marginal(self, support_max=100):\n\n try:\n upper = prior.support[1]\n except:\n upper = support_max\n\n alpha_f, beta_f = self._alphas[-1], self._betas[-1]\n mu = alpha_f / (alpha_f + beta_f) # 0.5, var = 0.5\n sig = np.sqrt(\n alpha_f * beta_f / ((alpha_f + beta_f) ** 2 * (alpha_f + beta_f + 1))\n )\n\n xs = t.arange(self._support_lower, support_max).float()\n\n return self.log_prob(xs, support_check=False).logsumexp(dim=0) # (prior_dim,)\n\n\nclass PoissonBetaBinomial:\n def __init__(self, ys, alphas, betas, prior_x, prior_lam):\n \"\"\"\n Args:\n - ys: (t.tensor) vector of reported counts [shape (n_lag_steps,)]\n - alphas: (t.tensor) vector of beta prior shape parameters [shape (n_lag_steps,)]\n - betas: (t.tensor) vector of beta prior shape parameters [shape (n_lag_steps,)]\n - prior_x: (distribution) prior distribution on true count [must implement log_prob]\n - prior_lam: (distribution) prior distribution on poisson rate [must implement log_prob]\n \"\"\"\n\n # support of the true count for latent marginalization\n support_lower = max(prior_x.support[0], int(ys.max()))\n self._xs = t.arange(support_lower, prior_x.support[1]).float()\n\n # set prior\n self._prior_lam = prior_lam\n\n self._log_beta_binom = self._beta_binom(ys, alphas, betas, prior_x)\n self._poisson_log_norm = t.lgamma(self._xs + 1)\n\n # utils for sampler\n self.is_continuous = True\n self.support = (0, np.inf)\n\n def log_prob(self, lam, support_check=True):\n \"\"\"\n log-probability of scalar poisson rate under\n the Poisson beta-binomial emission model.\n\n Args:\n - lam: (float or 0-dim tensor) poisson rate\n \"\"\"\n support_lower, support_upper = self.support\n if support_check:\n if not self._support_check(lam):\n return -np.inf\n\n if type(lam) == float:\n lam = t.tensor([lam])\n elif len(lam.size()) == 0:\n lam = lam.view(1)\n\n log_poisson = (\n self._xs[:, None] * np.log(lam)[None, :]\n - lam[None, :]\n - self._poisson_log_norm[:, None]\n ) # (x_dim, lam_dim)\n log_series = (log_poisson[None, :] + self._log_beta_binom[:, :, None]).sum(\n axis=0\n ) # (x_dim, lam_dim)\n if self._prior_lam is None:\n log_prior_lam = 0.\n else:\n log_prior_lam = self._prior_lam.log_prob(lam) # (lam_dim)\n log_prob_lam = t.logsumexp(log_series, dim=0) + log_prior_lam # (lam_dim)\n\n return log_prob_lam\n\n def _beta_binom(self, ys, alphas, betas, prior_x):\n \"\"\"\n beta-binomial emission distribution\n\n Args:\n - ys: (t.tensor) [shape (n_lag_steps,)]\n - alphas (t.tensor) first beta shape parameters for thinning prior [shape (n_lag_steps,)]\n - alphas (t.tensor) second beta shape parameters for thinning prior [shape (n_lag_steps,)]\n - prior_x (distribution) prior distribution on the true count. Must implement log_prob\n \"\"\"\n xs = self._xs\n\n left = (ys + alphas)[:, None] # (n_lag_steps, 1)\n right = xs - ys[:, None] + betas[:, None] # (n_lag_steps, x_dim)\n log_num = betaln(left, right) # (n_lag_steps, x_dim)\n log_binom = binomln(xs[None, :], ys[:, None]) # (n_lag_steps, x_dim)\n log_denom = betaln(alphas, betas)[:, None] # (n_lag_steps, 1)\n log_prior_x = prior_x.log_prob(xs) # (x_dim)\n\n log_beta_binom = (\n log_binom + log_num - log_denom + log_prior_x\n ) # (n_lag_steps, x_dim)\n\n return log_beta_binom\n\n def _support_check(self, lam):\n return self.support[0] <= lam <= self.support[1]\n\n\nclass CountSmoothingDistribution:\n def __init__(self, ys, a, b, lambda_smoothing_particles):\n\n prior = Poisson(lambda_smoothing_particles)\n self._emission = LaggedBetaBinomial(ys, a, b, prior)\n _n_particles = len(lambda_smoothing_particles)\n self._log_normalizer = math.log(_n_particles)\n\n def log_prob(self, x):\n weights = self._emission.log_prob(\n x, support_check=False\n ) - self._emission.log_marginal(support_max=x.max())\n lp = t.logsumexp(weights, dim=1) - self._log_normalizer\n return lp\n\n\nclass AdditiveDriftDistribution:\n def __init__(self, kappa_sigma, prior_particles, emission_dist):\n\n self._prior_lambdas = prior_particles[:, 0].squeeze().numpy()\n self._prior_kappas = prior_particles[:, 1].squeeze().numpy()\n self._kappa_sigma = kappa_sigma\n self.y_likelihood = emission_dist\n\n def sample(self, length_scale, burn_in=100, thin=100, n_samples=500):\n\n n_steps = n_samples * thin + burn_in\n\n kappa_proposal_dist = Normal(0, scale=length_scale)\n deltas = kappa_proposal_dist.sample([n_steps]).squeeze()\n\n # init\n kap = np.mean(self._prior_kappas)\n lam_idx = np.random.choice(np.arange(len(self._prior_lambdas)))\n lam = (t.tensor([self._prior_lambdas[lam_idx]]) + kap).abs()\n\n ll = self.y_likelihood.log_prob(lam) + Normal(\n self._prior_kappas[lam_idx], self._kappa_sigma\n ).log_prob(kap)\n samples = []\n\n for i, delta in enumerate(deltas):\n # sampler_pbar.update()\n kap_p = kap + delta\n lam_idx = np.random.choice(np.arange(len(self._prior_lambdas)))\n lam_p = kap_p + self._prior_lambdas[lam_idx]\n\n weight = sum(self._prior_lambdas == self._prior_lambdas[lam_idx]).item()\n\n # component likelihood\n lam_p_ll = self.y_likelihood.log_prob(lam_p)\n kap_p_ll = Normal(self._prior_kappas[lam_idx], self._kappa_sigma).log_prob(\n kap_p\n )\n p_ll = lam_p_ll + kap_p_ll + np.log(weight)\n\n log_prob_accept = p_ll - ll\n\n if log_prob_accept > 0:\n accept = True\n else:\n p = t.exp(log_prob_accept).item()\n accept = np.random.choice([True, False], p=[p, 1 - p])\n\n if accept:\n kap = kap_p\n lam = lam_p\n ll = p_ll\n\n samples.append(t.tensor([lam, kap]))\n\n return t.stack(samples[burn_in:][::thin])\n\n\nclass DriftSmoothingDistribution:\n def __init__(\n self,\n lambda_filtering_particles,\n lambda_smoothing_particles,\n prior_kappa_loc,\n prior_kappa_scale,\n random_walk_scale,\n ):\n\n # required for kappa log probability\n self._filtering = lambda_filtering_particles\n self._smoothing = lambda_smoothing_particles\n self._rw_scale = random_walk_scale\n self._prior_kappa = Normal(loc=prior_kappa_loc, scale=prior_kappa_scale)\n\n # Marginal normalizer is a gaussian mixture\n mixture_locs, mixture_scales = kappa_marginal_moments(\n prior_kappa_loc,\n prior_kappa_scale,\n random_walk_scale,\n lambda_filtering_particles,\n )\n normalizer = GaussianMixture(mixture_locs, mixture_scales)\n self._row_norm = normalizer.log_prob(lambda_smoothing_particles, keepdims=True)\n\n def log_prob(self, kappa):\n\n # prior probability of kappa\n log_prior = self._prior_kappa.log_prob(kappa)\n\n # likelihood function for kappa marginalized over the filtering and smoothing distributions\n transition_log_proba = self.particle_transition_matrix(kappa)\n marginal_log_likelihood = t.logsumexp(\n transition_log_proba - self._row_norm, dim=(0, 1)\n )\n\n # smoothing probability for kappa\n particle_norm = math.log(self._smoothing.shape[0]) + math.log(\n self._filtering.shape[0]\n )\n lp = log_prior + marginal_log_likelihood - particle_norm\n\n return lp\n\n def particle_transition_matrix(self, kappa):\n tm_loc = kappa * self._filtering\n tm_scale = self._rw_scale\n transition_dist = Normal(loc=tm_loc, scale=tm_scale)\n transition_log_prob_matrix = transition_dist.log_prob(self._smoothing[:, None])\n return transition_log_prob_matrix\n\n\n# This lets me sample lam\nclass CorrectedPoissonBetaBinomial:\n def __init__(\n self, ys, alphas, betas, prior_x, prior_lam, prior_correction, multidim=False\n ):\n self._pbb = PoissonBetaBinomial(ys, alphas, betas, prior_x, prior_lam=None)\n self._prior_lam = prior_lam\n self._prior_correction = prior_correction\n self._multidim = multidim\n\n def log_prob(self, lam, support_check=True):\n\n if not self._multidim:\n if support_check:\n if lam < 0:\n return -np.inf\n\n # LAM MUST BE SCALAR HERE\n effective_lam = lam * self._prior_correction.values\n\n if self._prior_lam is None:\n prior_lam_term = 0.\n else:\n prior_lam_term = self._prior_lam.log_prob(lam) # (lam_dim)\n\n lp = t.logsumexp(\n self._pbb.log_prob(effective_lam, support_check=False)\n + self._prior_correction.log_probas,\n axis=0,\n )\n lp = lp + prior_lam_term\n\n else:\n effective_lam = (\n lam[:, None] * self._prior_correction.values[None, :]\n ) # (lam_dim, z_dim)\n if self._prior_lam is None:\n prior_lam_term = 0.\n else:\n prior_lam_term = self._prior_lam.log_prob(lam) # (lam_dim)\n\n pbb_proba = self._pbb.log_prob(effective_lam.view(-1), support_check=False)\n lp = t.logsumexp(\n pbb_proba.view(effective_lam.shape) + self._prior_correction.log_probas,\n axis=1,\n )\n lp = lp + prior_lam_term\n\n return lp\n\n\nclass DiscreteDistribution:\n def __init__(self, values, log_probas):\n self.values = values\n self.log_probas = log_probas\n\n\nclass EmpiricalDistribution:\n def __init__(self, support, probas):\n \"\"\"\n Args:\n - support: (tuple) edges of support. Support assumed to exist on all integers between. [shape (2,)]\n - probas: (t.tensor) probabilities for each element of support. [shape (support[1] - support[0],)]\n \"\"\"\n self.support = support\n self.probas = probas\n\n self._xs = t.arange(support[0], support[1]).float()\n\n def log_prob(self, x):\n return self.probas[x.int() - self.support[0]]\n\n def sample(self, size):\n idxs = np.arange(0, len(self._xs))\n sample_idxs = np.random.choice(idxs, p=self.probas, size=size)\n samples = self._xs[sample_idxs]\n\n return samples\n" ]
[ [ "torch.stack", "numpy.random.choice", "torch.arange", "numpy.log", "torch.distributions.Normal", "numpy.mean", "torch.logsumexp", "torch.tensor", "numpy.sqrt", "torch.distributions.Poisson", "torch.lgamma", "torch.exp" ] ]
ka-ryo/M2Det
[ "d947f135e7aad996da43f5fe3a350eeead237fd0" ]
[ "layers/modules/multibox_loss.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom utils.box_utils import match, log_sum_exp\n\nclass MultiBoxLoss(nn.Module):\n \"\"\"SSD Weighted Loss Function\n Compute Targets:\n 1) Produce Confidence Target Indices by matching ground truth boxes\n with (default) 'priorboxes' that have jaccard index > threshold parameter\n (default threshold: 0.5).\n 2) Produce localization target by 'encoding' variance into offsets of ground\n truth boxes and their matched 'priorboxes'.\n 3) Hard negative mining to filter the excessive number of negative examples\n that comes with using a large number of default bounding boxes.\n (default negative:positive ratio 3:1)\n Objective Loss:\n L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N\n Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss\n weighted by α which is set to 1 by cross val.\n Args:\n c: class confidences,\n l: predicted boxes,\n g: ground truth boxes\n N: number of matched default boxes\n See: https://arxiv.org/pdf/1512.02325.pdf for more details.\n \"\"\"\n\n def __init__(self, num_classes, overlap_thresh, prior_for_matching, bkg_label, neg_mining, neg_pos, neg_overlap,\n encode_target):\n super(MultiBoxLoss, self).__init__()\n self.num_classes = num_classes\n self.threshold = overlap_thresh\n self.background_label = bkg_label\n self.encode_target = encode_target\n self.use_prior_for_matching = prior_for_matching\n self.do_neg_mining = neg_mining\n self.negpos_ratio = neg_pos\n self.neg_overlap = neg_overlap\n self.variance = [0.1, 0.2]\n\n def forward(self, predictions, priors, targets):\n \"\"\"Multibox Loss\n Args:\n predictions (tuple): A tuple containing loc preds, conf preds,\n and prior boxes from SSD net.\n conf shape: torch.size(batch_size,num_priors,num_classes)\n loc shape: torch.size(batch_size,num_priors,4)\n priors shape: torch.size(num_priors,4)\n\n ground_truth (tensor): Ground truth boxes and labels for a batch,\n shape: [batch_size,num_objs,5] (last idx is the label).\n \"\"\"\n\n loc_data, conf_data = predictions\n priors = priors\n num = loc_data.size(0)\n num_priors = (priors.size(0))\n num_classes = self.num_classes\n\n # match priors (default boxes) and ground truth boxes\n loc_t = torch.zeros((num, num_priors, 4), dtype=torch.float32)\n conf_t = torch.zeros((num, num_priors), dtype=torch.int64)\n for idx in range(num):\n truths = targets[idx][:, :-1].data\n labels = targets[idx][:, -1].data\n defaults = priors.data\n match(self.threshold, truths, defaults, self.variance, labels, loc_t, conf_t, idx)\n if torch.cuda.is_available():\n loc_t = loc_t.cuda()\n conf_t = conf_t.cuda()\n\n pos = conf_t > 0\n\n # Localization Loss (Smooth L1)\n # Shape: [batch,num_priors,4]\n pos_idx = pos.unsqueeze(pos.dim()).expand_as(loc_data)\n loc_p = loc_data[pos_idx].view(-1, 4)\n loc_t = loc_t[pos_idx].view(-1, 4)\n loss_l = F.smooth_l1_loss(loc_p, loc_t, reduction='sum')\n\n # Compute max conf across batch for hard negative mining\n batch_conf = conf_data.view(-1, self.num_classes)\n loss_c = log_sum_exp(batch_conf) - batch_conf.gather(1, conf_t.view(-1, 1))\n\n # Hard Negative Mining\n loss_c[pos.view(-1, 1)] = 0 # filter out pos boxes for now\n loss_c = loss_c.view(num, -1)\n _, loss_idx = loss_c.sort(1, descending=True)\n _, idx_rank = loss_idx.sort(1)\n num_pos = pos.long().sum(1, keepdim=True)\n num_neg = torch.clamp(self.negpos_ratio * num_pos, max=pos.size(1) - 1)\n neg = idx_rank < num_neg.expand_as(idx_rank)\n\n # Confidence Loss Including Positive and Negative Examples\n pos_idx = pos.unsqueeze(2).expand_as(conf_data)\n neg_idx = neg.unsqueeze(2).expand_as(conf_data)\n conf_p = conf_data[(pos_idx + neg_idx).gt(0)].view(-1, self.num_classes)\n targets_weighted = conf_t[(pos + neg).gt(0)]\n loss_c = F.cross_entropy(conf_p, targets_weighted, reduction='sum')\n\n # Sum of losses: L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N\n\n N = max(num_pos.data.sum().float(), 1)\n loss_l /= N\n loss_c /= N\n return loss_l, loss_c\n" ]
[ [ "torch.zeros", "torch.nn.functional.cross_entropy", "torch.cuda.is_available", "torch.nn.functional.smooth_l1_loss" ] ]
jermwatt/blog
[ "3dd0d464d7a17c1c7a6508f714edc938dc3c03e9" ]
[ "posts/markov_chains/library/char_level_markov_model.py" ]
[ "# custom imports\nfrom . import text_parsing_utils as util\n\n# standard imporrts\nimport numpy as np\n\nclass Markov:\n def __init__(self,csvname):\n # preprocess input text (remove bad characters, make all lowercase, etc.,)\n self.text = util.load_preprocess(csvname)\n \n # parse into individual words\n self.tokens,self.keys,self.chars_to_keys,self.keys_to_chars = util.parse_chars(self.text)\n \n # generate starting index of generative model - do this here so different order models\n # can be more easily compared\n self.starter_ind = np.random.permutation(len(self.tokens))[0]\n self.starter_char = self.tokens[self.starter_ind]\n self.starter_key = self.chars_to_keys[self.starter_char]\n \n # make transition probabilities based on discrete count of input text\n def make_transition_probabilities(self,order):\n # get unique keys - for dimension of transition matrix \n unique_keys = np.unique(self.keys)\n num_unique_words = len(unique_keys)\n num_words = len(self.tokens)\n\n # generate initial zeros order O transition matrix\n # use a dictionary - or else this for sure won't scale \n # to any order > 1\n transition_matrix = {}\n\n # sweep through tokens list, update each individual distribution\n # as you go - each one a column\n for i in range(order,num_words):\n # grab current key, and previous order keys\n next_key = self.keys[i]\n prev_keys = tuple(self.keys[i-order:i])\n\n ## update transition matrix\n # we've seen current key already\n if prev_keys in transition_matrix.keys():\n if next_key in transition_matrix[prev_keys].keys():\n transition_matrix[prev_keys][next_key] += 1\n else:\n transition_matrix[prev_keys][next_key] = 1\n else: # we haven't seen key already, so create new subdict\n transition_matrix[prev_keys] = {}\n transition_matrix[prev_keys][next_key] = 1\n \n # assign to global\n self.order = order\n self.transition_matrix = transition_matrix\n \n def generate_text(self,num_chars):\n # use transition matrix to generate sentence of desired length\n # starting at randomly chosen word (all of this is done using\n # the associated keys, then re-translated into words)\n generated_chars = self.tokens[self.starter_ind:self.starter_ind +self.order]\n generated_keys = [self.chars_to_keys[s] for s in generated_chars]\n\n # produce next keys / words\n for i in range(num_chars):\n # get current key\n prev_keys = tuple(generated_keys[i:i+self.order])\n\n # use maximum index of this distribution in transition matrix\n # to get next key\n stats = self.transition_matrix[prev_keys]\n next_key = max(stats, key=lambda key: stats[key])\n\n # store next key\n generated_keys.append(next_key)\n \n # translate generated keys back into words and print\n for n in range(self.order,len(generated_keys)):\n key = generated_keys[n] \n char = self.keys_to_chars[key]\n generated_chars.append(char)\n \n # return predictions\n sentence = ''.join(generated_chars)\n \n # seperate seed from generated component\n seed = generated_chars[:self.order]\n self.seed = ''.join(seed)\n generated = generated_chars[self.order:]\n self.generated = ''.join(generated)\n \n # print true text\n print ('-------- TRUE TEXT -------')\n true_text = [self.tokens[s] for s in range(self.starter_ind,self.starter_ind + self.order + num_chars)]\n true_text = ''.join(true_text)\n print (true_text)\n print ('\\n')\n \n # print seed and generated component\n print ('-------- ORDER = ' + str(self.order) + ' MODEL TEXT -------')\n print('\\x1b[31m' + self.seed + '\\x1b[0m' + '' + '\\x1b[34m' + self.generated + '\\x1b[0m')" ]
[ [ "numpy.unique" ] ]
l-Imoon/jcvi
[ "db70bb98c7969bb0cc7b9941a2cc2dc8c5d1b783" ]
[ "jcvi/apps/ks.py" ]
[ "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\n\"\"\"\nCalculation of synonymous substitutions (Ks).\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\nimport os\nimport os.path as op\nimport csv\nimport logging\n\nimport numpy as np\n\nfrom math import log, sqrt, pi, exp\nfrom itertools import product, combinations\nfrom functools import partial\n\nfrom Bio import SeqIO\nfrom Bio import AlignIO\nfrom Bio.Align.Applications import ClustalwCommandline, MuscleCommandline\n\nfrom jcvi.formats.base import LineFile, must_open\nfrom jcvi.graphics.base import plt, savefig, AbstractLayout, markup\nfrom jcvi.utils.table import write_csv\nfrom jcvi.utils.cbook import gene_name\nfrom jcvi.apps.base import OptionParser, ActionDispatcher, mkdir, sh, \\\n Popen, getpath, iglob\n\n\nCLUSTALW_BIN = partial(getpath, name=\"CLUSTALW2\", warn=\"warn\")\nMUSCLE_BIN = partial(getpath, name=\"MUSCLE\", warn=\"warn\")\nPAL2NAL_BIN = partial(getpath, name=\"PAL2NAL\", warn=\"warn\")\nPAML_BIN = partial(getpath, name=\"PAML\", warn=\"warn\")\n\n\nclass AbstractCommandline:\n\n def run(self):\n r = Popen(str(self))\n return r.communicate()\n\n\nclass YnCommandline(AbstractCommandline):\n \"\"\"Little commandline for yn00.\n \"\"\"\n def __init__(self, ctl_file, command=PAML_BIN(\"yn00\")):\n self.ctl_file = ctl_file\n self.parameters = []\n self.command = command\n\n def __str__(self):\n return self.command + \" %s >/dev/null\" % self.ctl_file\n\n\nclass MrTransCommandline(AbstractCommandline):\n \"\"\"Simple commandline faker.\n \"\"\"\n def __init__(self, prot_align_file, nuc_file, output_file, outfmt=\"paml\",\n command=PAL2NAL_BIN(\"pal2nal.pl\")):\n self.prot_align_file = prot_align_file\n self.nuc_file = nuc_file\n self.output_file = output_file\n self.outfmt = outfmt\n self.command = command\n\n self.parameters = []\n\n def __str__(self):\n return self.command + \" %s %s -output %s > %s\" % \\\n (self.prot_align_file, self.nuc_file, self.outfmt, self.output_file)\n\n\ndef main():\n\n actions = (\n ('batch', 'compute ks for a set of anchors file'),\n ('fromgroups', 'flatten the gene families into pairs'),\n ('prepare', 'prepare pairs of sequences'),\n ('calc', 'calculate Ks between pairs of sequences'),\n ('subset', 'subset pre-calculated Ks according to pairs file'),\n ('gc3', 'filter the Ks results to remove high GC3 genes'),\n ('report', 'generate a distribution of Ks values'),\n ('multireport', 'generate several Ks value distributions in same figure'),\n )\n p = ActionDispatcher(actions)\n p.dispatch(globals())\n\n\ndef batch(args):\n \"\"\"\n %prog batch all.cds *.anchors\n\n Compute Ks values for a set of anchors file. This will generate a bunch of\n work directories for each comparisons. The anchorsfile should be in the form\n of specie1.species2.anchors.\n \"\"\"\n from jcvi.apps.grid import MakeManager\n\n p = OptionParser(batch.__doc__)\n opts, args = p.parse_args(args)\n\n if len(args) < 2:\n sys.exit(not p.print_help())\n\n cdsfile = args[0]\n anchors = args[1:]\n workdirs = [\".\".join(op.basename(x).split(\".\")[:2]) for x in anchors]\n for wd in workdirs:\n mkdir(wd)\n\n mm = MakeManager()\n for wd, ac in zip(workdirs, anchors):\n pairscdsfile = wd + \".cds.fasta\"\n cmd = \"python -m jcvi.apps.ks prepare {} {} -o {}\".\\\n format(ac, cdsfile, pairscdsfile)\n mm.add((ac, cdsfile), pairscdsfile, cmd)\n ksfile = wd + \".ks\"\n cmd = \"python -m jcvi.apps.ks calc {} -o {} --workdir {}\".\\\n format(pairscdsfile, ksfile, wd)\n mm.add(pairscdsfile, ksfile, cmd)\n mm.write()\n\n\nclass LayoutLine (object):\n\n def __init__(self, row, delimiter=','):\n args = row.rstrip().split(delimiter)\n args = [x.strip() for x in args]\n self.ksfile = args[0]\n self.components = int(args[1])\n self.label = args[2]\n self.color = args[3]\n self.marker = args[4]\n\n def __str__(self):\n return \", \".join(str(x) for x in (self.ksfile, self.components,\n self.label, self.color, self.marker))\n\n\nclass Layout (AbstractLayout):\n\n def __init__(self, filename, delimiter=','):\n super(Layout, self).__init__(filename)\n if not op.exists(filename):\n ksfiles = iglob(\".\", \"*.ks\")\n header = \"Ks file|ncomponents|label|color|marker\".split(\"|\")\n contents = []\n for ksfile in ksfiles:\n leg = op.basename(ksfile).rsplit(\".\", 1)[0]\n if leg.count(\".\") == 1:\n leg = leg.replace(\".\", \" *vs.* \")\n contents.append((ksfile, \"1\", leg, \"\", \"\"))\n write_csv(header, contents, comment=True, filename=filename)\n\n fp = open(filename)\n for row in fp:\n if row[0] == '#':\n continue\n self.append(LayoutLine(row, delimiter=delimiter))\n\n self.assign_colors()\n self.assign_markers()\n\n\nclass KsPlot (object):\n\n def __init__(self, ax, ks_max, bins, legendp='upper left'):\n\n self.ax = ax\n self.ks_max = ks_max\n self.interval = ks_max / bins\n self.legendp = legendp\n self.lines = []\n self.labels = []\n\n def add_data(self, data, components=1, label=\"Ks\",\n color='r', marker='.', fill=False, fitted=True):\n\n ax = self.ax\n ks_max = self.ks_max\n interval = self.interval\n\n line, line_mixture = plot_ks_dist(ax, data, interval, components, ks_max,\n color=color, marker=marker,\n fill=fill, fitted=fitted)\n self.lines.append(line)\n self.labels.append(label)\n\n if fitted:\n self.lines.append(line_mixture)\n self.labels.append(label + \" (fitted)\")\n\n def draw(self, title=\"*Ks* distribution\", filename=\"Ks_plot.pdf\"):\n\n ax = self.ax\n ks_max = self.ks_max\n lines = self.lines\n labels = [markup(x) for x in self.labels]\n legendp = self.legendp\n if len(lines) > 1:\n leg = ax.legend(lines, labels, loc=legendp,\n shadow=True, fancybox=True, prop={\"size\": 10})\n leg.get_frame().set_alpha(.5)\n\n ax.set_xlim((0, ks_max - self.interval))\n ylim = ax.get_ylim()[-1]\n ax.set_ylim(0, ylim)\n ax.set_title(markup(title), fontweight=\"bold\")\n ax.set_xlabel(markup('Synonymous substitutions per site (*Ks*)'))\n ax.set_ylabel('Percentage of gene pairs')\n\n ax.set_xticklabels(ax.get_xticks(), family='Helvetica')\n ax.set_yticklabels(ax.get_yticks(), family='Helvetica')\n\n savefig(filename, dpi=300)\n\n\ndef multireport(args):\n \"\"\"\n %prog multireport layoutfile\n\n Generate several Ks value distributions in the same figure. If the layout\n file is missing then a template file listing all ks files will be written.\n\n The layout file contains the Ks file, number of components, colors, and labels:\n\n # Ks file, ncomponents, label, color, marker\n LAP.sorghum.ks, 1, LAP-sorghum, r, o\n SES.sorghum.ks, 1, SES-sorghum, g, +\n MOL.sorghum.ks, 1, MOL-sorghum, m, ^\n\n If color or marker is missing, then a random one will be assigned.\n \"\"\"\n p = OptionParser(multireport.__doc__)\n p.set_outfile(outfile=\"Ks_plot.pdf\")\n add_plot_options(p)\n opts, args, iopts = p.set_image_options(args, figsize=\"6x6\")\n\n if len(args) != 1:\n sys.exit(not p.print_help())\n\n layoutfile, = args\n ks_min = opts.vmin\n ks_max = opts.vmax\n bins = opts.bins\n fill = opts.fill\n layout = Layout(layoutfile)\n print(layout, file=sys.stderr)\n\n fig = plt.figure(1, (iopts.w, iopts.h))\n ax = fig.add_axes([.12, .1, .8, .8])\n kp = KsPlot(ax, ks_max, bins, legendp=opts.legendp)\n for lo in layout:\n data = KsFile(lo.ksfile)\n data = [x.ng_ks for x in data]\n data = [x for x in data if ks_min <= x <= ks_max]\n kp.add_data(data, lo.components, label=lo.label, \\\n color=lo.color, marker=lo.marker,\n fill=fill, fitted=opts.fit)\n\n kp.draw(title=opts.title, filename=opts.outfile)\n\n\ndef get_GC3(cdsfile):\n from jcvi.formats.fasta import Fasta\n\n f = Fasta(cdsfile, lazy=True)\n GC3 = {}\n for name, rec in f.iteritems_ordered():\n positions = rec.seq[2::3].upper()\n gc_counts = sum(1 for x in positions if x in \"GC\")\n gc_ratio = gc_counts * 1. / len(positions)\n GC3[name] = gc_ratio\n\n return GC3\n\n\ndef plot_GC3(GC3, cdsfile, fill=\"white\"):\n from jcvi.graphics.histogram import histogram\n\n numberfile = \"{0}.gc3\".format(cdsfile)\n fw = must_open(numberfile, \"w\")\n fw.write(\"\\n\".join(map(str, GC3.values())))\n fw.close()\n histogram(numberfile, vmin=0, vmax=1, xlabel=\"GC3\", title=cdsfile,\n bins=50, skip=0, ascii=False, log=0, fill=fill)\n\n logging.debug(\"{0} GC3 values plotted to {1}.pdf\".\\\n format(len(GC3), numberfile))\n\n\ndef gc3(args):\n \"\"\"\n %prog gc3 ksfile cdsfile [cdsfile2] -o newksfile\n\n Filter the Ks results to remove high GC3 genes. High GC3 genes are\n problematic in Ks calculation - see Tang et al. 2010 PNAS. Specifically, the\n two calculation methods produce drastically different results for these\n pairs. Therefore we advise to remoeve these high GC3 genes. This is often\n the case for studying cereal genes.\n\n If 2 genomes are involved, the cdsfile of the 2nd genome can be provided\n concatenated or separated.\n \"\"\"\n p = OptionParser(gc3.__doc__)\n p.add_option(\"--plot\", default=False, action=\"store_true\",\n help=\"Also plot the GC3 histogram [default: %default]\")\n p.set_outfile()\n\n opts, args = p.parse_args(args)\n\n outfile = opts.outfile\n plot = opts.plot\n\n if not 1 < len(args) < 4:\n sys.exit(not p.print_help())\n\n ks_file, cdsfile = args[:2]\n GC3 = get_GC3(cdsfile)\n if plot:\n plot_GC3(GC3, cdsfile, fill=\"green\")\n\n if len(args) == 3:\n cdsfile2 = args[2]\n GC3_2 = get_GC3(cdsfile2)\n GC3.update(GC3_2)\n if plot:\n plot_GC3(GC3_2, cdsfile2, fill=\"lightgreen\")\n\n data = KsFile(ks_file)\n noriginals = len(data)\n\n fw = must_open(outfile, \"w\")\n writer = csv.writer(fw)\n writer.writerow(fields.split(\",\"))\n nlines = 0\n cutoff = .75\n for d in data:\n a, b = d.name.split(\";\")\n aratio, bratio = GC3[a], GC3[b]\n if (aratio + bratio) / 2 > cutoff:\n continue\n writer.writerow(d)\n nlines += 1\n logging.debug(\"{0} records written (from {1}).\".format(nlines, noriginals))\n\n\ndef extract_pairs(abed, bbed, groups):\n \"\"\"\n Called by fromgroups(), extract pairs specific to a pair of species.\n \"\"\"\n agenome = op.basename(abed.filename).split(\".\")[0]\n bgenome = op.basename(bbed.filename).split(\".\")[0]\n aorder = abed.order\n border = bbed.order\n pairsfile = \"{0}.{1}.pairs\".format(agenome, bgenome)\n fw = open(pairsfile, \"w\")\n\n is_self = abed.filename == bbed.filename\n npairs = 0\n for group in groups:\n iter = combinations(group, 2) if is_self \\\n else product(group, repeat=2)\n\n for a, b in iter:\n if a not in aorder or b not in border:\n continue\n\n print(\"\\t\".join((a, b)), file=fw)\n npairs += 1\n\n logging.debug(\"File `{0}` written with {1} pairs.\".format(pairsfile, npairs))\n\n\ndef fromgroups(args):\n \"\"\"\n %prog fromgroups groupsfile a.bed b.bed ...\n\n Flatten the gene familes into pairs, the groupsfile is a file with each line\n containing the members, separated by comma. The commands also require\n several bed files in order to sort the pairs into different piles (e.g.\n pairs of species in comparison.\n \"\"\"\n from jcvi.formats.bed import Bed\n\n p = OptionParser(fromgroups.__doc__)\n opts, args = p.parse_args(args)\n\n if len(args) < 2:\n sys.exit(not p.print_help())\n\n groupsfile = args[0]\n bedfiles = args[1:]\n beds = [Bed(x) for x in bedfiles]\n fp = open(groupsfile)\n groups = [row.strip().split(\",\") for row in fp]\n for b1, b2 in product(beds, repeat=2):\n extract_pairs(b1, b2, groups)\n\n\ndef find_first_isoform(a, f):\n if a in f:\n return a\n for i in range(100):\n ia = \".\".join((a, str(i)))\n if ia in f:\n return ia\n return a\n\n\ndef prepare(args):\n \"\"\"\n %prog prepare pairsfile cdsfile [pepfile] -o paired.cds.fasta\n\n Pick sequences from cdsfile to form pairs, ready to be calculated. The\n pairsfile can be generated from formats.blast.cscore(). The first two\n columns contain the pair.\n \"\"\"\n from jcvi.formats.fasta import Fasta\n\n p = OptionParser(prepare.__doc__)\n p.set_outfile()\n\n opts, args = p.parse_args(args)\n outfile = opts.outfile\n\n if len(args) == 2:\n pairsfile, cdsfile = args\n pepfile = None\n elif len(args) == 3:\n pairsfile, cdsfile, pepfile = args\n else:\n sys.exit(not p.print_help())\n\n f = Fasta(cdsfile)\n fp = open(pairsfile)\n fw = must_open(outfile, \"w\")\n if pepfile:\n assert outfile != \"stdout\", \"Please specify outfile name.\"\n f2 = Fasta(pepfile)\n fw2 = must_open(outfile + \".pep\", \"w\")\n for row in fp:\n if row[0] == '#':\n continue\n a, b = row.split()[:2]\n if a == b:\n logging.debug(\"Self pairs found: {0} - {1}. Ignored\".format(a, b))\n continue\n\n if a not in f:\n a = find_first_isoform(a, f)\n assert a, a\n if b not in f:\n b = find_first_isoform(b, f)\n assert b, b\n\n acds = f[a]\n bcds = f[b]\n SeqIO.write((acds, bcds), fw, \"fasta\")\n if pepfile:\n apep = f2[a]\n bpep = f2[b]\n SeqIO.write((apep, bpep), fw2, \"fasta\")\n fw.close()\n if pepfile:\n fw2.close()\n\n\ndef calc(args):\n \"\"\"\n %prog calc [prot.fasta] cds.fasta > out.ks\n\n Protein file is optional. If only one file is given, it is assumed to\n be CDS sequences with correct frame (frame 0). Results will be written to\n stdout. Both protein file and nucleotide file are assumed to be Fasta format,\n with adjacent records as the pairs to compare.\n\n Author: Haibao Tang <bao@uga.edu>, Brad Chapman, Jingping Li\n Calculate synonymous mutation rates for gene pairs\n\n This does the following:\n 1. Fetches a protein pair.\n 2. Aligns the protein pair with clustalw (default) or muscle.\n 3. Convert the output to Fasta format.\n 4. Use this alignment info to align gene sequences using PAL2NAL\n 5. Run PAML yn00 to calculate synonymous mutation rates.\n \"\"\"\n from jcvi.formats.fasta import translate\n\n p = OptionParser(calc.__doc__)\n p.add_option(\"--longest\", action=\"store_true\",\n help=\"Get longest ORF, only works if no pep file, \"\\\n \"e.g. ESTs [default: %default]\")\n p.add_option(\"--msa\", default=\"clustalw\", choices=(\"clustalw\", \"muscle\"),\n help=\"software used to align the proteins [default: %default]\")\n p.add_option(\"--workdir\", default=os.getcwd(), help=\"Work directory\")\n p.set_outfile()\n\n opts, args = p.parse_args(args)\n\n if len(args) == 1:\n protein_file, dna_file = None, args[0]\n elif len(args) == 2:\n protein_file, dna_file = args\n else:\n print(\"Incorrect arguments\", file=sys.stderr)\n sys.exit(not p.print_help())\n\n output_h = must_open(opts.outfile, \"w\")\n print(fields, file=output_h)\n work_dir = op.join(opts.workdir, \"syn_analysis\")\n mkdir(work_dir)\n\n if not protein_file:\n protein_file = dna_file + \".pep\"\n translate_args = [dna_file, \"--outfile=\" + protein_file]\n if opts.longest:\n translate_args += [\"--longest\"]\n dna_file, protein_file = translate(translate_args)\n\n prot_iterator = SeqIO.parse(open(protein_file), \"fasta\")\n dna_iterator = SeqIO.parse(open(dna_file), \"fasta\")\n for p_rec_1, p_rec_2, n_rec_1, n_rec_2 in \\\n zip(prot_iterator, prot_iterator, dna_iterator, dna_iterator):\n\n print(\"--------\", p_rec_1.name, p_rec_2.name, file=sys.stderr)\n if opts.msa == \"clustalw\":\n align_fasta = clustal_align_protein((p_rec_1, p_rec_2), work_dir)\n elif opts.msa == \"muscle\":\n align_fasta = muscle_align_protein((p_rec_1, p_rec_2), work_dir)\n mrtrans_fasta = run_mrtrans(align_fasta, (n_rec_1, n_rec_2), work_dir)\n if mrtrans_fasta:\n ds_subs_yn, dn_subs_yn, ds_subs_ng, dn_subs_ng = \\\n find_synonymous(mrtrans_fasta, work_dir)\n if ds_subs_yn is not None:\n pair_name = \"%s;%s\" % (p_rec_1.name, p_rec_2.name)\n output_h.write(\"%s\\n\" % (\",\".join(str(x) for x in (pair_name,\n ds_subs_yn, dn_subs_yn, ds_subs_ng, dn_subs_ng))))\n output_h.flush()\n\n # Clean-up\n sh(\"rm -rf 2YN.t 2YN.dN 2YN.dS rst rub rst1 syn_analysis\")\n\n\ndef find_synonymous(input_file, work_dir):\n \"\"\"Run yn00 to find the synonymous subsitution rate for the alignment.\n \"\"\"\n cwd = os.getcwd()\n os.chdir(work_dir)\n # create the .ctl file\n ctl_file = \"yn-input.ctl\"\n output_file = \"nuc-subs.yn\"\n ctl_h = open(ctl_file, \"w\")\n ctl_h.write(\"seqfile = %s\\noutfile = %s\\nverbose = 0\\n\" %\n (op.basename(input_file), output_file))\n ctl_h.write(\"icode = 0\\nweighting = 0\\ncommonf3x4 = 0\\n\")\n ctl_h.close()\n\n cl = YnCommandline(ctl_file)\n print(\"\\tyn00:\", cl, file=sys.stderr)\n r, e = cl.run()\n ds_value_yn = None\n ds_value_ng = None\n dn_value_yn = None\n dn_value_ng = None\n\n # Nei-Gojobori\n output_h = open(output_file)\n row = output_h.readline()\n while row:\n if row.find(\"Nei & Gojobori\") >=0:\n for x in range(5):\n row = next(output_h)\n dn_value_ng, ds_value_ng = row.split('(')[1].split(')')[0].split()\n break\n row = output_h.readline()\n output_h.close()\n\n # Yang\n output_h = open(output_file)\n for line in output_h:\n if line.find(\"+-\") >= 0 and line.find(\"dS\") == -1:\n parts = line.split(\" +-\")\n ds_value_yn = extract_subs_value(parts[1])\n dn_value_yn = extract_subs_value(parts[0])\n\n if ds_value_yn is None or ds_value_ng is None:\n h = open(output_file)\n print(\"yn00 didn't work: \\n%s\" % h.read(), file=sys.stderr)\n\n os.chdir(cwd)\n return ds_value_yn, dn_value_yn, ds_value_ng, dn_value_ng\n\n\ndef extract_subs_value(text):\n \"\"\"Extract a subsitution value from a line of text.\n\n This is just a friendly function to grab a float value for Ks and Kn\n values from the junk I get from the last line of the yn00 file.\n\n Line:\n 2 1 52.7 193.3 2.0452 0.8979 0.0193 0.0573 +- 0.0177\n 2.9732 +- 3.2002\n\n Parts:\n [' 2 1 52.7 193.3 2.0452 0.8979 0.0193 0.0573',\n ' 0.0177 2.9732', ' 3.2002\\n']\n\n So we want 0.0573 for Kn and 2.9732 for Ks.\n \"\"\"\n parts = text.split()\n value = float(parts[-1])\n\n return value\n\n\ndef run_mrtrans(align_fasta, recs, work_dir, outfmt=\"paml\"):\n \"\"\"Align nucleotide sequences with mrtrans and the protein alignment.\n \"\"\"\n align_file = op.join(work_dir, \"prot-align.fasta\")\n nuc_file = op.join(work_dir, \"nuc.fasta\")\n output_file = op.join(work_dir, \"nuc-align.mrtrans\")\n\n # make the prot_align file and nucleotide file\n align_h0 = open(align_file + \"0\", \"w\")\n align_h0.write(str(align_fasta))\n align_h0.close()\n prot_seqs = {}\n i = 0\n for rec in SeqIO.parse(align_h0.name, \"fasta\"):\n prot_seqs[i] = rec.seq\n i += 1\n align_h = open(align_file, \"w\")\n for i, rec in enumerate(recs):\n if len(rec.id) > 30:\n rec.id = rec.id[:28] + \"_\" + str(i)\n rec.description = \"\"\n print(\">{0}\\n{1}\".format(rec.id, prot_seqs[i]), file=align_h)\n align_h.close()\n SeqIO.write(recs, open(nuc_file, \"w\"), \"fasta\")\n\n # run the program\n cl = MrTransCommandline(align_file, nuc_file, output_file, outfmt=outfmt)\n r, e = cl.run()\n if e is None:\n print(\"\\tpal2nal:\", cl, file=sys.stderr)\n return output_file\n elif e.read().find(\"could not translate\") >= 0:\n print(\"***pal2nal could not translate\", file=sys.stderr)\n return None\n\n\ndef clustal_align_protein(recs, work_dir, outfmt=\"fasta\"):\n \"\"\"\n Align given proteins with clustalw.\n recs are iterable of Biopython SeqIO objects\n \"\"\"\n fasta_file = op.join(work_dir, \"prot-start.fasta\")\n align_file = op.join(work_dir, \"prot.aln\")\n SeqIO.write(recs, open(fasta_file, \"w\"), \"fasta\")\n\n clustal_cl = ClustalwCommandline(cmd=CLUSTALW_BIN(\"clustalw2\"),\n infile=fasta_file, outfile=align_file, outorder=\"INPUT\",\n type=\"PROTEIN\")\n stdout, stderr = clustal_cl()\n\n aln_file = open(clustal_cl.outfile)\n alignment = AlignIO.read(aln_file, \"clustal\")\n print(\"\\tDoing clustalw alignment: %s\" % clustal_cl, file=sys.stderr)\n if outfmt == \"fasta\":\n return alignment.format(\"fasta\")\n if outfmt == \"clustal\":\n return alignment\n\n\ndef muscle_align_protein(recs, work_dir, outfmt=\"fasta\", inputorder=True):\n \"\"\"\n Align given proteins with muscle.\n recs are iterable of Biopython SeqIO objects\n \"\"\"\n fasta_file = op.join(work_dir, \"prot-start.fasta\")\n align_file = op.join(work_dir, \"prot.aln\")\n SeqIO.write(recs, open(fasta_file, \"w\"), \"fasta\")\n\n muscle_cl = MuscleCommandline(cmd=MUSCLE_BIN(\"muscle\"),\n input=fasta_file, out=align_file, seqtype=\"protein\",\n clwstrict=True)\n stdout, stderr = muscle_cl()\n alignment = AlignIO.read(muscle_cl.out, \"clustal\")\n\n if inputorder:\n try:\n muscle_inputorder(muscle_cl.input, muscle_cl.out)\n except ValueError:\n return \"\"\n alignment = AlignIO.read(muscle_cl.out, \"fasta\")\n\n print(\"\\tDoing muscle alignment: %s\" % muscle_cl, file=sys.stderr)\n if outfmt == \"fasta\":\n return alignment.format(\"fasta\")\n if outfmt == \"clustal\":\n return alignment.format(\"clustal\")\n\n\ndef muscle_inputorder(inputfastafile, alnfile, trunc_name=True):\n \"\"\"\n Fix for muscle -stable option according to here:\n http://drive5.com/muscle/stable.html\n \"\"\"\n sh(\"cp {0} {0}.old\".format(alnfile), log=False)\n maxi = 30 if trunc_name else 1000\n\n aa = AlignIO.read(alnfile, \"clustal\")\n alignment = dict((a.id[:maxi], a) for a in aa)\n if trunc_name and len(alignment) < len(aa):\n raise ValueError\\\n (\"ERROR: The first 30 chars of your seq names are not unique\")\n\n fw = must_open(alnfile, \"w\")\n for rec in SeqIO.parse(inputfastafile, \"fasta\"):\n a = alignment[rec.id[:maxi]]\n fw.write(\">{0}\\n{1}\\n\".format(a.id[:maxi], a.seq))\n\n fw.close()\n sh(\"rm {0}.old\".format(alnfile), log=False)\n\n\ndef subset(args):\n \"\"\"\n %prog subset pairsfile ksfile1 ksfile2 ... -o pairs.ks\n\n Subset some pre-calculated ks ka values (in ksfile) according to pairs\n in tab delimited pairsfile/anchorfile.\n \"\"\"\n p = OptionParser(subset.__doc__)\n p.add_option(\"--noheader\", action=\"store_true\",\n help=\"don't write ksfile header line [default: %default]\")\n p.add_option(\"--block\", action=\"store_true\",\n help=\"preserve block structure in input [default: %default]\")\n p.set_stripnames()\n p.set_outfile()\n\n opts, args = p.parse_args(args)\n\n if len(args) != 2:\n sys.exit(not p.print_help())\n\n pairsfile, ksfiles = args[0], args[1:]\n noheader = opts.noheader\n block = opts.block\n if block:\n noheader = True\n outfile = opts.outfile\n\n ksvals = {}\n for ksfile in ksfiles:\n ksvals.update(dict((line.name, line) for line in \\\n KsFile(ksfile, strip_names=opts.strip_names)))\n\n fp = open(pairsfile)\n fw = must_open(outfile, \"w\")\n\n if not noheader:\n print(fields, file=fw)\n\n i = j = 0\n for row in fp:\n if row[0] == '#':\n if block:\n print(row.strip(), file=fw)\n continue\n a, b = row.split()[:2]\n name = \";\".join((a, b))\n if name not in ksvals:\n name = \";\".join((b, a))\n if name not in ksvals:\n j += 1\n print(\"\\t\".join((a, b, \".\", \".\")), file=fw)\n continue\n ksline = ksvals[name]\n if block:\n print(\"\\t\".join(str(x) for x in (a, b, ksline.ks)), file=fw)\n else:\n ksline.name = \";\".join((a, b))\n print(ksline, file=fw)\n i += 1\n fw.close()\n\n logging.debug(\"{0} pairs not found in ksfiles\".format(j))\n logging.debug(\"{0} ks records written to `{1}`\".format(i, outfile))\n return outfile\n\n\nfields = \"name,yn_ks,yn_ka,ng_ks,ng_ka\"\ndescriptions = {\n 'name': 'Gene pair',\n 'yn_ks': 'Yang-Nielson Ks estimate',\n 'yn_ka': 'Yang-Nielson Ka estimate',\n 'ng_ks': 'Nei-Gojobori Ks estimate',\n 'ng_ka': 'Nei-Gojobori Ka estimate'}\n\n\nclass KsLine:\n\n def __init__(self, row, strip_names=False):\n args = row.strip().split(\",\")\n self.name = args[0]\n self.yn_ks = self.get_float(args[1])\n self.yn_ka = self.get_float(args[2])\n self.ng_ks = self.get_float(args[3])\n self.ng_ka = self.get_float(args[4])\n self.ks = self.ng_ks\n if \";\" in self.name:\n self.gene_a, self.gene_b = self.name.split(\";\")\n if strip_names:\n self.gene_a = gene_name(self.gene_a)\n self.gene_b = gene_name(self.gene_b)\n\n def get_float(self, x):\n try:\n x = float(x)\n except:\n x = -1\n return x\n\n def __str__(self):\n return \",\".join(str(x) for x in (self.name, self.yn_ks, self.yn_ka,\n self.ng_ks, self.ng_ka))\n\n @property\n def anchorline(self):\n return \"\\t\".join((gene_name(self.gene_a), gene_name(self.gene_b),\n \"{:.3f}\".format(self.ks)))\n\n\nclass KsFile(LineFile):\n\n def __init__(self, filename, strip_names=False):\n super(KsFile, self).__init__(filename)\n\n fp = open(filename)\n for row in fp:\n ksline = KsLine(row, strip_names=strip_names)\n if ksline.name == \"name\": # header\n continue\n self.append(ksline)\n\n logging.debug('File `{0}` contains a total of {1} gene pairs'.\\\n format(filename, len(self)))\n\n def print_to_anchors(self, outfile):\n fw = must_open(outfile, \"w\")\n for row in self:\n print(row.anchorline, file=fw)\n fw.close()\n\n\ndef my_hist(ax, l, interval, max_r, color='g', marker='.', fill=False):\n if not l:\n return\n\n n, p = [], []\n total_len = len(l)\n for i in np.arange(0, max_r, interval):\n xmin, xmax = i - .5 * interval, i + .5 * interval\n nx = [x for x in l if xmin <= x < xmax]\n n.append(i)\n p.append(len(nx) * 100. / total_len)\n\n if fill:\n from pylab import poly_between\n\n xs, ys = poly_between(n, 0, p)\n line = ax.fill(xs, ys, fc=color, alpha=.5)\n\n else:\n line = ax.plot(n, p, color=color, lw=2, ms=3,\n marker=marker, mfc=\"w\", mec=color, mew=2)\n\n return line\n\n\ndef lognormpdf(bins, mu, sigma):\n return np.exp(-(np.log(bins) - mu) ** 2 / (2 * sigma ** 2)) / \\\n (bins * sigma * sqrt(2 * pi))\n\n\ndef lognormpdf_mix(bins, probs, mus, sigmas, interval=.1):\n y = 0\n for prob, mu, sigma in zip(probs, mus, sigmas):\n y += prob * lognormpdf(bins, mu, sigma)\n y *= 100 * interval # Percentage\n\n return y\n\n\ndef get_mixture(data, components):\n \"\"\"\n probs = [.476, .509]\n mus = [.69069, -.15038]\n variances = [.468982e-1, .959052e-1]\n \"\"\"\n from jcvi.apps.base import popen\n\n probs, mus, sigmas = [], [], []\n fw = must_open(\"tmp\", \"w\")\n log_data = [log(x) for x in data if x > .05]\n data = \"\\n\".join([\"%.4f\" % x for x in log_data]).replace(\"inf\\n\", \"\")\n fw.write(data)\n fw.close()\n\n cmd = \"gmm-bic {0} {1} {2}\".format(components, len(log_data), fw.name)\n pipe = popen(cmd)\n\n for row in pipe:\n if row[0] != '#':\n continue\n\n atoms = row.split(\",\")\n a, b, c = atoms[1:4]\n a = float(a)\n b = float(b)\n c = float(c)\n\n mus.append(a)\n sigmas.append(b)\n probs.append(c)\n\n os.remove(fw.name)\n return probs, mus, sigmas\n\n\ndef plot_ks_dist(ax, data, interval, components, ks_max,\n color='r', marker='.', fill=False, fitted=True):\n\n line, = my_hist(ax, data, interval, ks_max,\n color=color, marker=marker, fill=fill)\n logging.debug(\"Total {0} pairs after filtering.\".format(len(data)))\n\n line_mixture = None\n if fitted:\n probs, mus, variances = get_mixture(data, components)\n\n iv = .001\n bins = np.arange(iv, ks_max, iv)\n y = lognormpdf_mix(bins, probs, mus, variances, interval)\n\n line_mixture, = ax.plot(bins, y, ':', color=color, lw=3)\n\n for i in range(components):\n peak_val = exp(mus[i])\n mixline = lognormpdf_mix(peak_val, probs, mus, variances, interval)\n ax.text(peak_val, mixline, \"Ks=%.2f\" % peak_val, \\\n color=\"w\", size=10, bbox=dict(ec='w',fc=color, \\\n alpha=.6, boxstyle='round'))\n\n return line, line_mixture\n\n\ndef add_plot_options(p):\n p.add_option(\"--fit\", default=False, action=\"store_true\",\n help=\"Plot fitted lines\")\n p.add_option(\"--vmin\", default=0., type=\"float\",\n help=\"Minimum value, inclusive [default: %default]\")\n p.add_option(\"--vmax\", default=2., type=\"float\",\n help=\"Maximum value, inclusive [default: %default]\")\n p.add_option(\"--bins\", default=40, type=\"int\",\n help=\"Number of bins to plot in the histogram [default: %default]\")\n p.add_option(\"--legendp\", default=\"upper right\",\n help=\"Place of the legend [default: %default]\")\n p.add_option(\"--nofill\", dest=\"fill\", default=True, action=\"store_false\",\n help=\"Do not fill the histogram area\")\n p.add_option(\"--title\", default=\"*Ks* distribution\",\n help=\"Title of the plot [default: %default]\")\n\n\ndef report(args):\n '''\n %prog report ksfile\n\n generate a report given a Ks result file (as produced by synonymous_calc.py).\n describe the median Ks, Ka values, as well as the distribution in stem-leaf plot\n '''\n from jcvi.utils.cbook import SummaryStats\n from jcvi.graphics.histogram import stem_leaf_plot\n\n p = OptionParser(report.__doc__)\n p.add_option(\"--pdf\", default=False, action=\"store_true\",\n help=\"Generate graphic output for the histogram [default: %default]\")\n p.add_option(\"--components\", default=1, type=\"int\",\n help=\"Number of components to decompose peaks [default: %default]\")\n add_plot_options(p)\n opts, args, iopts = p.set_image_options(args, figsize=\"5x5\")\n\n if len(args) != 1:\n sys.exit(not p.print_help())\n\n ks_file, = args\n data = KsFile(ks_file)\n ks_min = opts.vmin\n ks_max = opts.vmax\n bins = opts.bins\n\n for f in fields.split(\",\")[1:]:\n columndata = [getattr(x, f) for x in data]\n ks = (\"ks\" in f)\n if not ks:\n continue\n\n columndata = [x for x in columndata if ks_min <= x <= ks_max]\n\n st = SummaryStats(columndata)\n title = \"{0} ({1}): \".format(descriptions[f], ks_file)\n title += \"Median:{0:.3f} (1Q:{1:.3f}|3Q:{2:.3f}||\".\\\n format(st.median, st.firstq, st.thirdq)\n title += \"Mean:{0:.3f}|Std:{1:.3f}||N:{2})\".\\\n format(st.mean, st.sd, st.size)\n\n tbins = (0, ks_max, bins) if ks else (0, .6, 10)\n digit = 2 if (ks_max * 1. / bins) < .1 else 1\n stem_leaf_plot(columndata, *tbins, digit=digit, title=title)\n\n if not opts.pdf:\n return\n\n components = opts.components\n data = [x.ng_ks for x in data]\n data = [x for x in data if ks_min <= x <= ks_max]\n\n fig = plt.figure(1, (iopts.w, iopts.h))\n ax = fig.add_axes([.12, .1, .8, .8])\n kp = KsPlot(ax, ks_max, opts.bins, legendp=opts.legendp)\n kp.add_data(data, components, fill=opts.fill, fitted=opts.fit)\n kp.draw(title=opts.title)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.arange", "numpy.log" ] ]
milyiyo/nlu
[ "d209ed11c6a84639c268f08435552248391c5573" ]
[ "tests/test_utils.py" ]
[ "import nlu\nimport pandas as pd\nimport sparknlp\n\n\n\ndef get_sample_pdf():\n data = {\"text\": ['This day sucks but tomorrow will be better ! ', 'I love this day', 'I dont like Sami']}\n text_df = pd.DataFrame(data)\n return text_df\n\n\ndef get_sample_pdf_with_labels():\n data = {\"text\": ['This day sucks', 'I love this day', 'I dont like Sami'], \"sentiment_label\": [1, 1, 0]}\n text_df = pd.DataFrame(data)\n return text_df\n\n\ndef get_sample_sdf():\n nlu.spark = sparknlp.start()\n nlu.spark_started = True\n return nlu.spark.createDataFrame(get_sample_pdf())\n\n\ndef get_sample_pdf_with_extra_cols():\n data = {\"text\": ['This day sucks', 'I love this day', 'I dont like Sami'], \"random_feature1\": [1, 1, 0], \"random_feature2\": ['d','a' , '3']}\n text_df = pd.DataFrame(data)\n return text_df\n\ndef get_sample_pdf_with_no_text_col():\n data = {\"schmext\": ['This day sucks', 'I love this day', 'I dont like Sami'], \"random_feature1\": [1, 1, 0], \"random_feature2\": ['d','a' , '3']}\n text_df = pd.DataFrame(data)\n return text_df\n\ndef get_sample_spark_dataframe():\n data = {\"text\": ['This day sucks', 'I love this day', 'I dont like Sami'], \"random_feature1\": [1, 1, 0], \"random_feature2\": ['d','a' , '3']}\n text_df = pd.DataFrame(data)\n return text_df\n\ndef get_sample_pdf_with_extra_cols_and_entities():\n data = {\"text\": ['Pater says this day sucks. He lives in America. He likes Angela Merkel from Germany', 'I love burgers from Burger King', 'I dont like Sami, he lives in Asia'], \"random_feature1\": [1, 1, 0], \"random_feature2\": ['d','a' , '3']}\n text_df = pd.DataFrame(data)\n return text_df\n\n\nfrom os.path import expanduser\n\nimport os\n\ndef download_dataset(data_url,output_file_name,output_folder,):\n import urllib.request\n import os\n download_path = create_dataset_dir_if_not_exist_and_get_path() + output_folder + output_file_name\n\n #Check if dir exists, if not create it\n # create_path_if_not_exist(data_dir )\n create_path_if_not_exist(create_dataset_dir_if_not_exist_and_get_path() + output_folder)\n\n\n from pathlib import Path\n #Check if file exists, if not download it\n if not Path(download_path).is_file():\n urllib.request.urlretrieve(data_url, download_path )\n\n print('Downloaded dataset to ',download_path)\n return download_path\n\n\ndef create_dataset_dir_if_not_exist_and_get_path():\n root = expanduser('~')\n dataset_path = root + '/nlu_test_datasets/'\n if not os.path.exists(dataset_path):\n print('Creating dir',dataset_path)\n os.mkdir(dataset_path)\n return dataset_path\n\ndef create_model_dir_if_not_exist_and_get_path():\n root = expanduser('~')\n dataset_path = root + '/nlu_test_models/'\n if not os.path.exists(dataset_path):\n print('Creating dir',dataset_path)\n os.mkdir(dataset_path)\n return dataset_path\n\n\ndef create_path_if_not_exist(path):\n #Check if dir exists, if not create it\n import os\n if not os.path.exists(path):\n print('Creating dir',path)\n os.mkdir(path)" ]
[ [ "pandas.DataFrame" ] ]
jnez71/misc
[ "397ac0b8e3bccec4daa73db8963bb0510eebebfe" ]
[ "geometry/bezier_surface.py" ]
[ "#!/usr/bin/env python3\n\"\"\"\nEfficient implementation of a Bezier surface and its differential geometry.\n\n\"\"\"\nfrom __future__ import division\nimport numpy as np\n\n################################################## CORE\n\nclass Bezier(object):\n \"\"\"\n Bezier manifold of dimension 2 embedded in Euclidean space of dimension 3.\n\n \"\"\"\n def __init__(self, knots=None):\n if knots is None:\n # Default to identity patch\n n = 4\n knots = np.zeros((n, n, 3), dtype=np.float64)\n for i in range(n):\n for j in range(n):\n knots[i, j] = np.float64((i, j, 0)) / (n-1)\n self.set_knots(knots)\n\n def set_knots(self, knots):\n \"\"\"\n Provide the control knots in an array with the first two\n dimensions indexing which knot and the third dimension\n holding the Euclidean coordinates of each knot.\n\n \"\"\"\n self.knots = np.array(knots, dtype=np.float64)\n self.degree_x = self.knots.shape[0] - 1\n self.degree_y = self.knots.shape[1] - 1\n self.dimension = self.knots.shape[2] - 1\n assert self.degree_x > 0\n assert self.degree_y > 0\n assert self.dimension == 2\n self.dknots_x = self.degree_x * np.diff(self.knots, axis=0)\n self.dknots_y = self.degree_y * np.diff(self.knots, axis=1)\n\n def evaluate(self, x, y):\n \"\"\"\n De Casteljau's algorithm is used to map the given surface coordinates\n (each from 0.0 to 1.0) to their corresponding location in Euclidean space.\n\n \"\"\"\n lerps_x = np.zeros((self.degree_x+1, self.dimension+1), dtype=np.float64)\n for i in range(len(lerps_x)):\n lerps_y = self.knots[i].copy()\n for j in range(self.degree_y):\n for k in range(self.degree_y - j):\n lerps_y[k] = (1.0-y)*lerps_y[k] + y*lerps_y[k+1]\n lerps_x[i] = lerps_y[0]\n for i in range(self.degree_x):\n for k in range(self.degree_x - i):\n lerps_x[k] = (1.0-x)*lerps_x[k] + x*lerps_x[k+1]\n return lerps_x[0]\n\n def jacobian(self, x, y):\n \"\"\"\n Returns the 2by3 Jacobian matrix of the `evaluate` function\n at the given argument. The Grammian of this is the metric tensor.\n\n \"\"\"\n return np.column_stack((Bezier(self.dknots_x).evaluate(x, y),\n Bezier(self.dknots_y).evaluate(x, y)))\n\n def metric(self, x, y):\n \"\"\"\n Returns the 2by2 metric tensor at the given surface coordinates.\n\n \"\"\"\n J = self.jacobian(x, y)\n return J.T.dot(J)\n\n def orientation(self, x, y, q=0.0):\n \"\"\"\n Returns a rotation matrix describing the orientation of the normal\n coordinates at [`x`, `y`] with yaw angle `q` in radians.\n\n \"\"\"\n J = self.jacobian(x, y)\n rx, ry = (J / np.linalg.norm(J, axis=0)).T\n normal = np.cross(rx, ry)\n ncrossx = np.cross(normal, rx) # must be re-unitized to mitigate roundoff error\n tangent = np.cos(q)*rx + np.sin(q)*(ncrossx / np.linalg.norm(ncrossx))\n binormal = np.cross(normal, tangent)\n R = np.column_stack((tangent, binormal, normal))\n return R / np.linalg.norm(R, axis=0) # must be re-unitized to mitigate roundoff error\n\n def plot(self, n=40, block=True):\n \"\"\"\n Plots this surface discretized by the given grid size `n`.\n Also shows the control knots and the central normal coordinate system.\n\n \"\"\"\n from matplotlib import pyplot\n from mpl_toolkits.mplot3d import Axes3D\n mesh = np.linspace(0.0, 1.0, n)\n points = np.transpose([self.evaluate(x, y) for x in mesh for y in mesh])\n quiver_origins = np.transpose([self.evaluate(mesh[n//2], mesh[n//2])]*3)\n quiver_arrows = self.orientation(mesh[n//2], mesh[n//2])\n fig = pyplot.figure()\n ax = fig.add_subplot(111, projection=\"3d\")\n ax.set_title(\"bezier\", fontsize=12)\n ax.set_xlabel(\"rx\", fontsize=12)\n ax.set_ylabel(\"ry\", fontsize=12)\n ax.set_zlabel(\"rz\", fontsize=12)\n ax.scatter(*self.knots.reshape(-1, 3).T, c='r', s=80)\n ax.scatter(*points, c=points[-1, :], s=60, marker='o', edgecolors=None)\n ax.quiver(quiver_origins[0], quiver_origins[1], quiver_origins[2],\n quiver_arrows[0], quiver_arrows[1], quiver_arrows[2],\n length=0.25, color=(1.0, 0.5, 0.0), lw=2.5)\n ax.axis(\"equal\")\n pyplot.show(block=block)\n\n################################################## TEST\n\nif __name__ == \"__main__\":\n\n # Initialize a flat set of knots\n knots = np.zeros((5, 4, 3), dtype=np.float64)\n for i in range(knots.shape[0]):\n for j in range(knots.shape[1]):\n knots[i, j] = np.float64((i, j, 0))\n\n # Mess with the knots to make them more interesting\n knots[:, :, 0] *= -1.0\n knots[1:3, 1:3, 2] = -1.0\n knots[1:3, 0, 2] = (0.25, 0.5)\n knots[-1, -1, :] = (-4/2, 3/2, 0.5)\n\n # Construct the Bezier surface\n bezier = Bezier(knots)\n\n # Verify the analytical Jacobian against finite-differences at a random location\n x, y = np.random.sample(2)\n r = bezier.evaluate(x, y)\n d = 1e-6\n drdx = (bezier.evaluate(x+d, y) - r) / d\n drdy = (bezier.evaluate( x, y+d) - r) / d\n assert np.allclose(np.column_stack((drdx, drdy)), bezier.jacobian(x, y), atol=10*d)\n\n # Verify that the metric tensor computation is consistent with finite-differences\n assert np.allclose([[drdx.dot(drdx), drdx.dot(drdy)],\n [drdy.dot(drdx), drdy.dot(drdy)]], bezier.metric(x, y), atol=10*d)\n\n # Verify that the orientation calculation returns an orthonormal matrix\n R = bezier.orientation(x, y, 2*np.pi*np.random.sample())\n assert np.allclose(R.dot(R.T), np.eye(3))\n\n # Plot the corresponding Bezier surface to visually inspect\n bezier.plot()\n" ]
[ [ "numpy.array", "numpy.linalg.norm", "numpy.column_stack", "numpy.sin", "numpy.zeros", "numpy.linspace", "matplotlib.pyplot.figure", "numpy.eye", "numpy.diff", "numpy.float64", "numpy.cos", "matplotlib.pyplot.show", "numpy.random.sample", "numpy.cross" ] ]
israelem/aceptaelreto.github.io
[ "91ac0586ef504cf4b1dd05eda32def6c39fbb34c" ]
[ "codes/2017-11-20-ardilla.py" ]
[ "import numpy as np\nfrom itertools import product\n\n\nclass Punto:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def __eq__(self, punto):\n return self.x == punto.x and self.y == punto.y\n\n\ndef hay_camino(mapa, salto, inicio=Punto(0, 0)):\n if inicio.x == mapa.shape[0] - 1 and inicio.y == mapa.shape[1] - 1:\n solución = True\n else:\n lista_puntos = [Punto(x, y) for x, y in\n list(product(range(inicio.x, inicio.x + salto + 1),\n range(inicio.x, inicio.x + salto + 1)))\n if x <= mapa.shape[0] - 1 and y <= mapa.shape[1] - 1][1:]\n posición = 0\n while posición < len(lista_puntos) and mapa[lista_puntos[posición].x, lista_puntos[posición].y] == '':\n posición += 1\n if posición == len(lista_puntos):\n solución = False\n else:\n solución = hay_camino(mapa, salto, lista_puntos[posición])\n return solución\n\n\nif __name__ == '__main__':\n soluciones = []\n filas, columnas, salto, árboles = [int(x) for x in input().split()]\n while filas and columnas:\n filas += 1\n columnas += 1\n mapa = np.empty([filas, columnas], str)\n mapa[0, 0] = mapa[filas - 1, columnas - 1] = 'A'\n cortados = []\n for _ in range(árboles):\n fila, columna = [int(x) for x in input().split()]\n cortados.append(Punto(fila, columna))\n mapa[fila, columna] = 'A'\n cortado = None\n while hay_camino(mapa, salto) and cortados:\n cortado = cortados[0]\n cortados = cortados[1:]\n mapa[cortado.x, cortado.y] = ''\n if not cortados or not cortado:\n soluciones.append('NUNCA SE PUDO')\n else:\n soluciones.append(cortado)\n filas, columnas, salto, árboles = [int(x) for x in input().split()]\n for solución in soluciones:\n print(solución)\n" ]
[ [ "numpy.empty" ] ]
Ureimu/weather-robot
[ "7634195af388538a566ccea9f8a8534c5fb0f4b6" ]
[ "python_code/botcode/plugins/fun_pic_plug/point_of_intersection.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\nx1 = np.array([712,653,625,605,617,635,677,762,800,872,947,1025,1111,1218,1309, 500])\ny1 = np.array([2022,1876,1710,1544,1347,1309,1025,995,850,723,705,710,761,873,1050, 2000])\n\nx_start = np.min(x1)\nx_end = np.max(x1)+1\n\nx_line = x1.copy()\ny_line = x_line * 0.9 + 500\n\ny=y1-y_line\nnLen=len(x1)\nxzero=np.zeros((nLen,))\nyzero=np.zeros((nLen,))\nfor i in range(nLen-1):\n if np.dot(y[i], y[i+1]) == 0:# %等于0的情况\n if y[i]==0:\n xzero[i]=i\n yzero[i]=0\n if y[i+1] == 0:\n xzero[i+1]=i+1\n yzero[i+1]=0\n elif np.dot(y[i],y[i+1]) < 0:# %一定有交点,用一次插值\n yzero[i] = np.dot(abs(y[i]) * y_line[i+1] + abs(y[i+1])*y_line[i], 1/(abs(y[i+1])+abs(y[i])))\n xzero[i] = (yzero[i]-500)/0.9\n else:\n pass\n\nfor i in range(nLen):\n if xzero[i]==0 and (yzero[i]==0):# %除掉不是交点的部分\n xzero[i]=np.nan\n yzero[i]=np.nan\n\nprint(xzero)\nprint(yzero)\n\nplt.plot(x1, y1, 'o-')\nplt.plot(x_line,y_line,xzero,yzero,'o')\nplt.show()\n" ]
[ [ "numpy.max", "numpy.array", "numpy.dot", "numpy.zeros", "matplotlib.pyplot.plot", "numpy.min", "matplotlib.pyplot.show" ] ]
antoniomezzacapo/qiskit-acqua
[ "102743203266ccbb18fef6d337c160246195e313", "102743203266ccbb18fef6d337c160246195e313" ]
[ "qiskit_acqua/utils/jsonutils.py", "qiskit_acqua/qpe/qpe.py" ]
[ "# -*- coding: utf-8 -*-\n\n# Copyright 2018 IBM.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\"\"\"Utilities for dict and json convertion.\"\"\"\n\nimport numpy\n\ndef convert_dict_to_json(in_item):\n \"\"\"\n Combs recursively through a list/dictionary and finds any non-json\n compatible elements and converts them. E.g. complex ndarray's are\n converted to lists of strings. Assume that all such elements are\n stored in dictionaries!\n Arg:\n in_item (dict or list): the input dict/list\n Returns:\n Result in_item possibly modified\n \"\"\"\n\n key_list = []\n for (item_index, item_iter) in enumerate(in_item):\n if isinstance(in_item, list):\n curkey = item_index\n else:\n curkey = item_iter\n\n if isinstance(in_item[curkey], (list, dict)):\n # go recursively through nested list/dictionaries\n convert_dict_to_json(in_item[curkey])\n elif isinstance(in_item[curkey], numpy.ndarray):\n # ndarray's are not json compatible. Save the key.\n key_list.append(curkey)\n\n # convert ndarray's to lists\n # split complex arrays into two lists because complex values are not\n # json compatible\n for curkey in key_list:\n if in_item[curkey].dtype == 'complex':\n in_item[curkey + '_ndarray_imag'] = numpy.imag(\n in_item[curkey]).tolist()\n in_item[curkey + '_ndarray_real'] = numpy.real(\n in_item[curkey]).tolist()\n in_item.pop(curkey)\n else:\n in_item[curkey] = in_item[curkey].tolist()\n \n return in_item\n \ndef convert_json_to_dict(in_item):\n \"\"\"Combs recursively through a list/dictionary that was loaded from json\n and finds any lists that were converted from ndarray and converts them back\n Arg:\n in_item (dict or list): the input dict/list\n Returns:\n Result in_item possibly modified\n \"\"\"\n\n key_list = []\n for (item_index, item_iter) in enumerate(in_item):\n if isinstance(in_item, list):\n curkey = item_index\n else:\n curkey = item_iter\n\n # flat these lists so that we can recombine back into a complex\n # number\n if '_ndarray_real' in curkey:\n key_list.append(curkey)\n continue\n\n if isinstance(in_item[curkey], (list, dict)):\n convert_json_to_dict(in_item[curkey])\n\n for curkey in key_list:\n curkey_root = curkey[0:-13]\n in_item[curkey_root] = numpy.array(in_item[curkey])\n in_item.pop(curkey)\n if curkey_root + '_ndarray_imag' in in_item:\n in_item[curkey_root] = in_item[curkey_root] + 1j * numpy.array(\n in_item[curkey_root + '_ndarray_imag'])\n in_item.pop(curkey_root + '_ndarray_imag')\n \n return in_item\n\n", "# -*- coding: utf-8 -*-\n\n# Copyright 2018 IBM.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\"\"\"\nThe Quantum Phase Estimation Algorithm.\n\"\"\"\n\nimport logging\n\nfrom functools import reduce\nimport numpy as np\nfrom qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\nfrom qiskit.tools.qi.pauli import Pauli\nfrom qiskit_acqua import Operator, QuantumAlgorithm, AlgorithmError\nfrom qiskit_acqua import get_initial_state_instance, get_iqft_instance\n\nlogger = logging.getLogger(__name__)\n\n\nclass QPE(QuantumAlgorithm):\n \"\"\"The Quantum Phase Estimation algorithm.\"\"\"\n\n PROP_NUM_TIME_SLICES = 'num_time_slices'\n PROP_PAULIS_GROUPING = 'paulis_grouping'\n PROP_EXPANSION_MODE = 'expansion_mode'\n PROP_EXPANSION_ORDER = 'expansion_order'\n PROP_NUM_ANCILLAE = 'num_ancillae'\n\n QPE_CONFIGURATION = {\n 'name': 'QPE',\n 'description': 'Quantum Phase Estimation for Quantum Systems',\n 'input_schema': {\n '$schema': 'http://json-schema.org/schema#',\n 'id': 'qpe_schema',\n 'type': 'object',\n 'properties': {\n PROP_NUM_TIME_SLICES: {\n 'type': 'integer',\n 'default': 1,\n 'minimum': 0\n },\n PROP_PAULIS_GROUPING: {\n 'type': 'string',\n 'default': 'random',\n 'oneOf': [\n {'enum': [\n 'random',\n 'default'\n ]}\n ]\n },\n PROP_EXPANSION_MODE: {\n 'type': 'string',\n 'default': 'suzuki',\n 'oneOf': [\n {'enum': [\n 'suzuki',\n 'trotter'\n ]}\n ]\n },\n PROP_EXPANSION_ORDER: {\n 'type': 'integer',\n 'default': 2,\n 'minimum': 1\n },\n PROP_NUM_ANCILLAE: {\n 'type': 'integer',\n 'default': 1,\n 'minimum': 1\n }\n },\n 'additionalProperties': False\n },\n 'problems': ['energy'],\n 'depends': ['initial_state', 'iqft'],\n 'defaults': {\n 'initial_state': {\n 'name': 'ZERO'\n },\n 'iqft': {\n 'name': 'STANDARD'\n }\n }\n }\n\n def __init__(self, configuration=None):\n super().__init__(configuration or self.QPE_CONFIGURATION.copy())\n self._operator = None\n self._state_in = None\n self._num_time_slices = 0\n self._paulis_grouping = None\n self._expansion_mode = None\n self._expansion_order = None\n self._num_ancillae = 0\n self._ancilla_phase_coef = 1\n self._circuit = None\n self._ret = {}\n\n def init_params(self, params, algo_input):\n \"\"\"\n Initialize via parameters dictionary and algorithm input instance\n Args:\n params: parameters dictionary\n algo_input: EnergyInput instance\n \"\"\"\n if algo_input is None:\n raise AlgorithmError(\"EnergyInput instance is required.\")\n\n operator = algo_input.qubit_op\n\n qpe_params = params.get(QuantumAlgorithm.SECTION_KEY_ALGORITHM)\n num_time_slices = qpe_params.get(QPE.PROP_NUM_TIME_SLICES)\n paulis_grouping = qpe_params.get(QPE.PROP_PAULIS_GROUPING)\n expansion_mode = qpe_params.get(QPE.PROP_EXPANSION_MODE)\n expansion_order = qpe_params.get(QPE.PROP_EXPANSION_ORDER)\n num_ancillae = qpe_params.get(QPE.PROP_NUM_ANCILLAE)\n\n # Set up initial state, we need to add computed num qubits to params\n init_state_params = params.get(QuantumAlgorithm.SECTION_KEY_INITIAL_STATE)\n init_state_params['num_qubits'] = operator.num_qubits\n init_state = get_initial_state_instance(init_state_params['name'])\n init_state.init_params(init_state_params)\n\n # Set up iqft, we need to add num qubits to params which is our num_ancillae bits here\n iqft_params = params.get(QuantumAlgorithm.SECTION_KEY_IQFT)\n iqft_params['num_qubits'] = num_ancillae\n iqft = get_iqft_instance(iqft_params['name'])\n iqft.init_params(iqft_params)\n\n self.init_args(\n operator, init_state, iqft, num_time_slices, num_ancillae,\n paulis_grouping=paulis_grouping, expansion_mode=expansion_mode,\n expansion_order=expansion_order)\n\n def init_args(\n self, operator, state_in, iqft, num_time_slices, num_ancillae,\n paulis_grouping='random', expansion_mode='trotter', expansion_order=1):\n if self._backend.find('statevector') >= 0:\n raise ValueError('Selected backend does not support measurements.')\n self._operator = operator\n self._state_in = state_in\n self._iqft = iqft\n self._num_time_slices = num_time_slices\n self._num_ancillae = num_ancillae\n self._paulis_grouping = paulis_grouping\n self._expansion_mode = expansion_mode\n self._expansion_order = expansion_order\n self._ret = {}\n\n def _construct_qpe_evolution(self):\n \"\"\"Implement the Quantum Phase Estimation algorithm\"\"\"\n\n a = QuantumRegister(self._num_ancillae, name='a')\n c = ClassicalRegister(self._num_ancillae, name='c')\n q = QuantumRegister(self._operator.num_qubits, name='q')\n qc = QuantumCircuit(a, q, c)\n\n # initialize state_in\n qc.data += self._state_in.construct_circuit('circuit', q).data\n\n # Put all ancillae in uniform superposition\n qc.u2(0, np.pi, a)\n\n # phase kickbacks via dynamics\n pauli_list = self._operator.reorder_paulis(grouping=self._paulis_grouping)\n if len(pauli_list) == 1:\n slice_pauli_list = pauli_list\n else:\n if self._expansion_mode == 'trotter':\n slice_pauli_list = pauli_list\n elif self._expansion_mode == 'suzuki':\n slice_pauli_list = Operator._suzuki_expansion_slice_pauli_list(\n pauli_list,\n 1,\n self._expansion_order\n )\n else:\n raise ValueError('Unrecognized expansion mode {}.'.format(self._expansion_mode))\n for i in range(self._num_ancillae):\n qc.data += self._operator.construct_evolution_circuit(\n slice_pauli_list, -2 * np.pi, self._num_time_slices, q, a, ctl_idx=i\n ).data\n # global phase shift for the ancilla due to the identity pauli term\n qc.u1(2 * np.pi * self._ancilla_phase_coef * (2 ** i), a[i])\n\n # inverse qft on ancillae\n self._iqft.construct_circuit('circuit', a, qc)\n\n # measuring ancillae\n qc.measure(a, c)\n\n self._circuit = qc\n\n def _setup_qpe(self):\n self._operator._check_representation('paulis')\n self._ret['translation'] = sum([abs(p[0]) for p in self._operator.paulis])\n self._ret['stretch'] = 0.5 / self._ret['translation']\n\n # translate the operator\n self._operator._simplify_paulis()\n translation_op = Operator([\n [\n self._ret['translation'],\n Pauli(\n np.zeros(self._operator.num_qubits),\n np.zeros(self._operator.num_qubits)\n )\n ]\n ])\n translation_op._simplify_paulis()\n self._operator += translation_op\n\n # stretch the operator\n for p in self._operator._paulis:\n p[0] = p[0] * self._ret['stretch']\n\n # check for identify paulis to get its coef for applying global phase shift on ancillae later\n num_identities = 0\n for p in self._operator.paulis:\n if np.all(p[1].v == 0) and np.all(p[1].w == 0):\n num_identities += 1\n if num_identities > 1:\n raise RuntimeError('Multiple identity pauli terms are present.')\n self._ancilla_phase_coef = p[0].real if isinstance(p[0], complex) else p[0]\n\n self._construct_qpe_evolution()\n logger.info('QPE circuit qasm length is roughly {}.'.format(\n len(self._circuit.qasm().split('\\n'))\n ))\n\n def _compute_energy(self):\n if self._circuit is None:\n self._setup_qpe()\n result = self.execute(self._circuit)\n\n rd = result.get_counts(self._circuit)\n rets = sorted([(rd[k], k) for k in rd])[::-1]\n ret = rets[0][-1][::-1]\n retval = sum([t[0] * t[1] for t in zip(\n [1 / 2 ** p for p in range(1, self._num_ancillae + 1)],\n [int(n) for n in ret]\n )])\n\n self._ret['measurements'] = rets\n self._ret['top_measurement_label'] = ret\n self._ret['top_measurement_decimal'] = retval\n self._ret['energy'] = retval / self._ret['stretch'] - self._ret['translation']\n\n def run(self):\n self._compute_energy()\n return self._ret\n" ]
[ [ "numpy.imag", "numpy.array", "numpy.real" ], [ "numpy.all", "numpy.zeros" ] ]
lzb863/Structured-Knowledge-Distillation-for-Dense-Prediction
[ "56db01645a0925b53ad8e9d81816858d4b5b8d78", "56db01645a0925b53ad8e9d81816858d4b5b8d78" ]
[ "libs/net/pytorchcvNets/bninception.py", "libs/net/UperNet/mobilenet.py" ]
[ "# encoding: utf-8\n\"\"\"\n BN-Inception for ImageNet-1K, implemented in PyTorch.\n Original paper: 'Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift,'\n https://arxiv.org/abs/1502.03167.\n\"\"\"\n\n__all__ = ['BNInception', 'bninception']\n\nimport os\nimport torch.nn as nn\nimport torch.nn.init as init\nfrom .common import conv1x1_block, conv3x3_block, conv7x7_block, Concurrent\n\nclass Inception3x3Branch(nn.Module):\n \"\"\"\n BN-Inception 3x3 branch block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n mid_channels : int\n Number of intermediate channels.\n stride : int or tuple/list of 2 int, default 1\n Strides of the second convolution.\n bias : bool, default True\n Whether the convolution layer uses a bias vector.\n use_bn : bool, default True\n Whether to use BatchNorm layers.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n mid_channels,\n stride=1,\n bias=True,\n use_bn=True):\n super(Inception3x3Branch, self).__init__()\n self.conv1 = conv1x1_block(\n in_channels=in_channels,\n out_channels=mid_channels,\n bias=bias,\n use_bn=use_bn)\n self.conv2 = conv3x3_block(\n in_channels=mid_channels,\n out_channels=out_channels,\n stride=stride,\n bias=bias,\n use_bn=use_bn)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n return x\n\n\nclass InceptionDouble3x3Branch(nn.Module):\n \"\"\"\n BN-Inception double 3x3 branch block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n mid_channels : int\n Number of intermediate channels.\n stride : int or tuple/list of 2 int, default 1\n Strides of the second convolution.\n bias : bool, default True\n Whether the convolution layer uses a bias vector.\n use_bn : bool, default True\n Whether to use BatchNorm layers.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n mid_channels,\n stride=1,\n bias=True,\n use_bn=True):\n super(InceptionDouble3x3Branch, self).__init__()\n self.conv1 = conv1x1_block(\n in_channels=in_channels,\n out_channels=mid_channels,\n bias=bias,\n use_bn=use_bn)\n self.conv2 = conv3x3_block(\n in_channels=mid_channels,\n out_channels=out_channels,\n bias=bias,\n use_bn=use_bn)\n self.conv3 = conv3x3_block(\n in_channels=out_channels,\n out_channels=out_channels,\n stride=stride,\n bias=bias,\n use_bn=use_bn)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.conv3(x)\n return x\n\n\nclass InceptionPoolBranch(nn.Module):\n \"\"\"\n BN-Inception avg-pool branch block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n avg_pool : bool\n Whether use average pooling or max pooling.\n bias : bool\n Whether the convolution layer uses a bias vector.\n use_bn : bool\n Whether to use BatchNorm layers.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n avg_pool,\n bias,\n use_bn):\n super(InceptionPoolBranch, self).__init__()\n if avg_pool:\n self.pool = nn.AvgPool2d(\n kernel_size=3,\n stride=1,\n padding=1,\n ceil_mode=True,\n count_include_pad=True)\n else:\n self.pool = nn.MaxPool2d(\n kernel_size=3,\n stride=1,\n padding=1,\n ceil_mode=True)\n self.conv = conv1x1_block(\n in_channels=in_channels,\n out_channels=out_channels,\n bias=bias,\n use_bn=use_bn)\n\n def forward(self, x):\n x = self.pool(x)\n x = self.conv(x)\n return x\n\n\nclass StemBlock(nn.Module):\n \"\"\"\n BN-Inception stem block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n mid_channels : int\n Number of intermediate channels.\n bias : bool\n Whether the convolution layer uses a bias vector.\n use_bn : bool\n Whether to use BatchNorm layers.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n mid_channels,\n bias,\n use_bn):\n super(StemBlock, self).__init__()\n self.conv1 = conv7x7_block(\n in_channels=in_channels,\n out_channels=mid_channels,\n stride=2,\n bias=bias,\n use_bn=use_bn)\n self.pool1 = nn.MaxPool2d(\n kernel_size=3,\n stride=2,\n padding=0,\n ceil_mode=True)\n self.conv2 = Inception3x3Branch(\n in_channels=mid_channels,\n out_channels=out_channels,\n mid_channels=mid_channels)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.pool1(x)\n x = self.conv2(x)\n return x\n\n\nclass InceptionBlock(nn.Module):\n \"\"\"\n BN-Inception unit.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n mid1_channels_list : list of int\n Number of pre-middle channels for branches.\n mid2_channels_list : list of int\n Number of middle channels for branches.\n avg_pool : bool\n Whether use average pooling or max pooling.\n bias : bool\n Whether the convolution layer uses a bias vector.\n use_bn : bool\n Whether to use BatchNorm layers.\n \"\"\"\n def __init__(self,\n in_channels,\n mid1_channels_list,\n mid2_channels_list,\n avg_pool,\n bias,\n use_bn):\n super(InceptionBlock, self).__init__()\n assert (len(mid1_channels_list) == 2)\n assert (len(mid2_channels_list) == 4)\n\n self.branches = Concurrent()\n self.branches.add_module(\"branch1\", conv1x1_block(\n in_channels=in_channels,\n out_channels=mid2_channels_list[0],\n bias=bias,\n use_bn=use_bn))\n self.branches.add_module(\"branch2\", Inception3x3Branch(\n in_channels=in_channels,\n out_channels=mid2_channels_list[1],\n mid_channels=mid1_channels_list[0],\n bias=bias,\n use_bn=use_bn))\n self.branches.add_module(\"branch3\", InceptionDouble3x3Branch(\n in_channels=in_channels,\n out_channels=mid2_channels_list[2],\n mid_channels=mid1_channels_list[1],\n bias=bias,\n use_bn=use_bn))\n self.branches.add_module(\"branch4\", InceptionPoolBranch(\n in_channels=in_channels,\n out_channels=mid2_channels_list[3],\n avg_pool=avg_pool,\n bias=bias,\n use_bn=use_bn))\n\n def forward(self, x):\n x = self.branches(x)\n return x\n\n\nclass ReductionBlock(nn.Module):\n \"\"\"\n BN-Inception reduction block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n mid1_channels_list : list of int\n Number of pre-middle channels for branches.\n mid2_channels_list : list of int\n Number of middle channels for branches.\n bias : bool\n Whether the convolution layer uses a bias vector.\n use_bn : bool\n Whether to use BatchNorm layers.\n \"\"\"\n def __init__(self,\n in_channels,\n mid1_channels_list,\n mid2_channels_list,\n bias,\n use_bn):\n super(ReductionBlock, self).__init__()\n assert (len(mid1_channels_list) == 2)\n assert (len(mid2_channels_list) == 4)\n\n self.branches = Concurrent()\n self.branches.add_module(\"branch1\", Inception3x3Branch(\n in_channels=in_channels,\n out_channels=mid2_channels_list[1],\n mid_channels=mid1_channels_list[0],\n stride=2,\n bias=bias,\n use_bn=use_bn))\n self.branches.add_module(\"branch2\", InceptionDouble3x3Branch(\n in_channels=in_channels,\n out_channels=mid2_channels_list[2],\n mid_channels=mid1_channels_list[1],\n stride=2,\n bias=bias,\n use_bn=use_bn))\n self.branches.add_module(\"branch3\", nn.MaxPool2d(\n kernel_size=3,\n stride=2,\n padding=0,\n ceil_mode=True))\n\n def forward(self, x):\n x = self.branches(x)\n return x\n\n\nclass BNInception(nn.Module):\n \"\"\"\n BN-Inception model from 'Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate\n Shift,' https://arxiv.org/abs/1502.03167.\n\n Parameters:\n ----------\n channels : list of list of int\n Number of output channels for each unit.\n init_block_channels_list : list of int\n Number of output channels for the initial unit.\n mid1_channels_list : list of list of list of int\n Number of pre-middle channels for each unit.\n mid2_channels_list : list of list of list of int\n Number of middle channels for each unit.\n bias : bool, default True\n Whether the convolution layer uses a bias vector.\n use_bn : bool, default True\n Whether to use BatchNorm layers.\n in_channels : int, default 3\n Number of input channels.\n in_size : tuple of two ints, default (224, 224)\n Spatial size of the expected input image.\n num_classes : int, default 1000\n Number of classification classes.\n \"\"\"\n def __init__(self,\n channels,\n init_block_channels_list,\n mid1_channels_list,\n mid2_channels_list,\n bias=True,\n use_bn=True,\n in_channels=3,\n in_size=(224, 224),\n num_classes=1000):\n super(BNInception, self).__init__()\n self.in_size = in_size\n self.num_classes = num_classes\n\n # self.features = nn.Sequential()\n self.eval_list = []\n self.init_block = StemBlock(\n in_channels=in_channels,\n out_channels=init_block_channels_list[1],\n mid_channels=init_block_channels_list[0],\n bias=bias,\n use_bn=use_bn)\n in_channels = init_block_channels_list[-1]\n self.pool2 = nn.MaxPool2d(\n kernel_size=3,\n stride=2,\n padding=0,\n ceil_mode=True)\n for i, channels_per_stage in enumerate(channels):\n mid1_channels_list_i = mid1_channels_list[i]\n mid2_channels_list_i = mid2_channels_list[i]\n stage = nn.Sequential()\n for j, out_channels in enumerate(channels_per_stage):\n if (j == 0) and (i != 0):\n stage.add_module(\"unit{}\".format(j + 1), ReductionBlock(\n in_channels=in_channels,\n mid1_channels_list=mid1_channels_list_i[j],\n mid2_channels_list=mid2_channels_list_i[j],\n bias=bias,\n use_bn=use_bn))\n else:\n avg_pool = (i != len(channels) - 1) or (j != len(channels_per_stage) - 1)\n stage.add_module(\"unit{}\".format(j + 1), InceptionBlock(\n in_channels=in_channels,\n mid1_channels_list=mid1_channels_list_i[j],\n mid2_channels_list=mid2_channels_list_i[j],\n avg_pool=avg_pool,\n bias=bias,\n use_bn=use_bn))\n in_channels = out_channels\n self.eval_list.append(\"stage{}\".format(i + 1))\n self.add_module(\"stage{}\".format(i + 1), stage)\n self.eval_list.append(\"final_pool\")\n self.add_module(\"final_pool\", nn.AvgPool2d(\n kernel_size=7,\n stride=1))\n\n self.outdim_16 = 608\n self.outdim_4 = 192\n\n self.output = nn.Linear(\n in_features=in_channels,\n out_features=num_classes)\n\n self._init_params()\n\n def _init_params(self):\n for name, module in self.named_modules():\n if isinstance(module, nn.Conv2d):\n init.kaiming_uniform_(module.weight)\n if module.bias is not None:\n init.constant_(module.bias, 0)\n\n def forward(self, x):\n self.layers = []\n x = self.init_block(x)\n self.layers.append(x)\n x = self.pool2(x)\n x = self.stage1(x)\n self.layers.append(x)\n x = self.stage2(x)\n self.layers.append(x)\n return x\n\n # def forward(self, x):\n # self.layers = []\n # for eval_elem in self.eval_list:\n # x = eval(\"self.\"+eval_elem+\"(x)\")\n # # x = self.features(x)\n # x = x.view(x.size(0), -1)\n # x = self.output(x)\n # return x\n\n\ndef get_bninception(model_name=None,\n pretrained=False,\n root=os.path.join(\"~\", \".torch\", \"models\"),\n **kwargs):\n \"\"\"\n Create BN-Inception model with specific parameters.\n\n Parameters:\n ----------\n model_name : str or None, default None\n Model name for loading pretrained model.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.torch/models'\n Location for keeping the model parameters.\n \"\"\"\n init_block_channels_list = [64, 192]\n channels = [[256, 320], [576, 576, 576, 608, 608], [1056, 1024, 1024]]\n mid1_channels_list = [\n [[64, 64],\n [64, 64]],\n [[128, 64], # 3c\n [64, 96], # 4a\n [96, 96], # 4a\n [128, 128], # 4c\n [128, 160]], # 4d\n [[128, 192], # 4e\n [192, 160], # 5a\n [192, 192]],\n ]\n mid2_channels_list = [\n [[64, 64, 96, 32],\n [64, 96, 96, 64]],\n [[0, 160, 96, 0], # 3c\n [224, 96, 128, 128], # 4a\n [192, 128, 128, 128], # 4b\n [160, 160, 160, 128], # 4c\n [96, 192, 192, 128]], # 4d\n [[0, 192, 256, 0], # 4e\n [352, 320, 224, 128], # 5a\n [352, 320, 224, 128]],\n ]\n\n net = BNInception(\n channels=channels,\n init_block_channels_list=init_block_channels_list,\n mid1_channels_list=mid1_channels_list,\n mid2_channels_list=mid2_channels_list,\n **kwargs)\n\n if pretrained:\n if (model_name is None) or (not model_name):\n raise ValueError(\"Parameter `model_name` should be properly initialized for loading pretrained model.\")\n from .model_store import download_model\n download_model(\n net=net,\n model_name=model_name,\n local_model_store_dir_path=root)\n\n return net\n\n\ndef bninception(**kwargs):\n \"\"\"/\n BN-Inception model from 'Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate\n Shift,' https://arxiv.org/abs/1502.03167.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.torch/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_bninception(model_name=\"bninception\", **kwargs)\n\n\ndef _calc_width(net):\n import numpy as np\n net_params = filter(lambda p: p.requires_grad, net.parameters())\n weight_count = 0\n for param in net_params:\n weight_count += np.prod(param.size())\n return weight_count\n\n\ndef _test():\n import torch\n\n pretrained = False\n\n models = [\n bninception,\n ]\n\n for model in models:\n\n net = model(pretrained=pretrained)\n\n # net.train()\n net.eval()\n weight_count = _calc_width(net)\n print(\"m={}, {}\".format(model.__name__, weight_count))\n assert (model != bninception or weight_count == 11295240)\n print(net)\n\n # x = torch.randn(1, 3, 224, 224)\n # y = net(x)\n # print(y)\n # y.sum().backward()\n # assert (tuple(y.size()) == (1, 1000))\n\n\nif __name__ == \"__main__\":\n import sys\n sys.path.insert(0,'../')\n from pytorchcvNets.common import conv1x1_block, conv3x3_block, conv7x7_block, Concurrent\n from pytorchcvNets.model_store import download_model\n _test()", "\"\"\"\nThis MobileNetV2 implementation is modified from the following repository:\nhttps://github.com/tonylins/pytorch-mobilenet-v2\n\"\"\"\n\nimport os\nimport sys\nimport torch\nimport torch.nn as nn\nimport math\nfrom libs.net.sync_batchnorm import SynchronizedBatchNorm2d\n\ntry:\n from urllib import urlretrieve\nexcept ImportError:\n from urllib.request import urlretrieve\n\n\n__all__ = ['mobilenetv2']\n\n\nmodel_urls = {\n 'mobilenetv2': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/mobilenet_v2.pth.tar',\n}\n\n\ndef conv_bn(inp, oup, stride):\n return nn.Sequential(\n nn.Conv2d(inp, oup, 3, stride, 1, bias=False),\n SynchronizedBatchNorm2d(oup),\n nn.ReLU6(inplace=True)\n )\n\n\ndef conv_1x1_bn(inp, oup):\n return nn.Sequential(\n nn.Conv2d(inp, oup, 1, 1, 0, bias=False),\n SynchronizedBatchNorm2d(oup),\n nn.ReLU6(inplace=True)\n )\n\n\nclass InvertedResidual(nn.Module):\n def __init__(self, inp, oup, stride, expand_ratio):\n super(InvertedResidual, self).__init__()\n self.stride = stride\n assert stride in [1, 2]\n\n hidden_dim = round(inp * expand_ratio)\n self.use_res_connect = self.stride == 1 and inp == oup\n\n if expand_ratio == 1:\n self.conv = nn.Sequential(\n # dw\n nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),\n SynchronizedBatchNorm2d(hidden_dim),\n nn.ReLU6(inplace=True),\n # pw-linear\n nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),\n SynchronizedBatchNorm2d(oup),\n )\n else:\n self.conv = nn.Sequential(\n # pw\n nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),\n SynchronizedBatchNorm2d(hidden_dim),\n nn.ReLU6(inplace=True),\n # dw\n nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),\n SynchronizedBatchNorm2d(hidden_dim),\n nn.ReLU6(inplace=True),\n # pw-linear\n nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),\n SynchronizedBatchNorm2d(oup),\n )\n\n def forward(self, x):\n if self.use_res_connect:\n return x + self.conv(x)\n else:\n return self.conv(x)\n\n\nclass MobileNetV2(nn.Module):\n def __init__(self, n_class=1000, input_size=224, width_mult=1.):\n super(MobileNetV2, self).__init__()\n block = InvertedResidual\n input_channel = 32\n last_channel = 1280\n interverted_residual_setting = [\n # t, c, n, s\n [1, 16, 1, 1],\n [6, 24, 2, 2],\n [6, 32, 3, 2],\n [6, 64, 4, 2],\n [6, 96, 3, 1],\n [6, 160, 3, 2],\n [6, 320, 1, 1],\n ]\n\n # building first layer\n assert input_size % 32 == 0\n input_channel = int(input_channel * width_mult)\n self.last_channel = int(last_channel * width_mult) if width_mult > 1.0 else last_channel\n self.features = [conv_bn(3, input_channel, 2)]\n # building inverted residual blocks\n for t, c, n, s in interverted_residual_setting:\n output_channel = int(c * width_mult)\n for i in range(n):\n if i == 0:\n self.features.append(block(input_channel, output_channel, s, expand_ratio=t))\n else:\n self.features.append(block(input_channel, output_channel, 1, expand_ratio=t))\n input_channel = output_channel\n # building last several layers\n self.features.append(conv_1x1_bn(input_channel, self.last_channel))\n # make it nn.Sequential\n self.features = nn.Sequential(*self.features)\n\n # building classifier\n self.classifier = nn.Sequential(\n nn.Dropout(0.2),\n nn.Linear(self.last_channel, n_class),\n )\n\n self._initialize_weights()\n\n def forward(self, x):\n x = self.features(x)\n x = x.mean(3).mean(2)\n x = self.classifier(x)\n return x\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, SynchronizedBatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n n = m.weight.size(1)\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n\n\ndef mobilenetv2(pretrained=False, **kwargs):\n \"\"\"Constructs a MobileNet_V2 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = MobileNetV2(n_class=1000, **kwargs)\n if pretrained:\n model.load_state_dict(load_url(model_urls['mobilenetv2']), strict=False)\n return model\n\n\ndef load_url(url, model_dir='./pretrained', map_location=None):\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n filename = url.split('/')[-1]\n cached_file = os.path.join(model_dir, filename)\n if not os.path.exists(cached_file):\n sys.stderr.write('Downloading: \"{}\" to {}\\n'.format(url, cached_file))\n urlretrieve(url, cached_file)\n return torch.load(cached_file, map_location=map_location)\n\n" ]
[ [ "torch.nn.Linear", "torch.nn.init.kaiming_uniform_", "torch.nn.MaxPool2d", "torch.nn.Sequential", "torch.nn.AvgPool2d", "torch.nn.init.constant_" ], [ "torch.nn.Linear", "torch.nn.Dropout", "torch.nn.Sequential", "torch.nn.ReLU6", "torch.nn.Conv2d", "torch.load" ] ]
lucasdavid/tf-experiment
[ "0c6e6b52c91f498dd0cb5a13beafadfeab627429" ]
[ "experiments/evaluate.py" ]
[ "# Copyright 2021 Lucas Oliveira David\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"\n\n# Evaluate Baseline Experiment.\n\n## Experiment Summary\n\nTFDS dataset → images → CNN → predictions → evaluation → report\n\nExecuting precedures detailing:\n\n 1. Experiment Setup [core.experiment.setup]\n GPUs and mixed precision mechanisms are setup;\n logging engines (such as Wandb) are connected.\n \n 2. Load TFDS dataset [core.datasets.tfds.load_and_prepare]\n A tfds dataset is load according to its name, and (maybe) shuffled (?);\n the dataset is filtered according to {data.prepare.validation} (?);\n the task {data.prepare.task} is extracted from the tfrecords entries;\n samples are (maybe augmented) using [none|simple|randaug] aug strategy;\n \n 2.1 if [data.prepare.augmentation.over] == samples\n the samples in the dataset are augmented;\n the samples are batched;\n 2.2 else\n the samples are batched;\n the samples in the dataset are augmented;\n \n the number of batches in the dataset is limited by {data.prepare.take} (?);\n the batches in the dataset are preprocessed using {data.prepare.preprocess_fn} (?);\n the batches are cast to tf.float32.\n \n 3. Analysis [core.datasets.save_image_samples]\n samples are saved in disk, for ad-hoc inspection.\n\n 4. Model restoring [core.models.classification.restore]\n a {model} in saved_model format is restored from the disk, and re-compiled if necessary.\n\n 6. Model evaluation [core.testing.evaluate]\n the model is evaluated with respect to the task {evaluation.task};\n the evaluation report is saved at {paths.valid_report}\n\n 7. Teardown [ex.finish]\n wandb logs are synched with the server's copy\n threads from the `tf.distribute.MirrorStrategy` are collected\n\"\"\"\n\nimport tensorflow as tf\n\nfrom sacred import Experiment\nfrom sacred.utils import apply_backspaces_and_linefeeds\n\nimport core\nfrom core.utils import dig\n\n\nex = Experiment(save_git_info=False)\nex.captured_out_filter = apply_backspaces_and_linefeeds\n\n\n@ex.main\ndef run(setup, dataset, model, training, evaluation, _log, _run):\n _log.info(__doc__)\n\n ex = core.experiment.setup({\n 'setup': setup,\n 'dataset': dataset,\n 'model': model,\n 'evaluation': evaluation\n }, _run, **setup)\n\n train, valid, test, info = core.datasets.tfds.load_and_prepare(dataset)\n\n ex.log_examples({'train/samples': train, 'valid/samples': valid, 'test/samples': test})\n\n with ex.distributed_strategy.scope():\n model = tf.keras.models.load_model(ex.paths['export'], custom_objects=core.custom_objects)\n core.models.summary(model)\n\n if not model._is_compiled: # type: ignore\n print(f'Model {model.name} is not compiled. It will be recompiled '\n 'using the loss and metrics defined in the configuration.')\n\n core.training.compile_distributed(\n model,\n loss=training['loss'],\n scale_loss=training['scale_loss'],\n optimizer=training['finetune']['optimizer'],\n metrics=training['metrics'],\n distributed=ex.distributed_strategy,\n )\n\n classes = core.datasets.tfds.classes(info)\n evaluations = core.testing.evaluate(\n model,\n test,\n classes,\n **evaluation\n )\n\n layer = model.get_layer('head/logits')\n weights = getattr(layer, 'regularized_kernel', layer.kernel)\n\n (ex.log_evaluations(evaluations)\n .log_weights(classes, weights)\n .finish())\n\n\nif __name__ == '__main__':\n ex.run_commandline()\n" ]
[ [ "tensorflow.keras.models.load_model" ] ]
milankarunarathne/performance-predictor
[ "b0df42631ebd81d4b3f177ebb2346a5a54c00ee9" ]
[ "svr_regression.py" ]
[ "import numpy as np\nimport pandas as pd\nfrom sklearn import preprocessing\nfrom sklearn.svm import SVR\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\n\nsummary_data = 'resources/wso2apimanagerperformanceresults.csv'\nx_select_columns = [0, 1, 2, 3] # select columns to x (features)\ny_select_column_throughput = 5\ny_select_column_latency = 4\ntest_size = 0.33 # percentage for testing\nn_rows = 117 # total rows\nrow_start = 25 # testing rows at start\n\n# read the file\ndatasetno = pd.read_csv(summary_data, thousands=\",\", usecols=[0, 1, 2, 3, 7, 13],)\n# replace Echo API and Mediation API with 1 and 2\ndatapd = pd.DataFrame.replace(datasetno, to_replace=['Echo API', 'Mediation API'], value=[1, 2])\ndata = np.array(datapd, copy=True, )\n\n\ndef svr_regression_throughput(dataset, r):\n dataset_row_n = dataset[0:r, :] # select specific number of rows\n x = preprocessing.scale(dataset_row_n[:, x_select_columns]) # machine learning to be in a range of -1 to 1.\n # This may do nothing, but it usually speeds up processing and can also help with accuracy.\n # Because this range is so popularly used\n y = dataset_row_n[:, y_select_column_throughput]\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=test_size, random_state=42)\n svr_rbf = SVR(kernel='rbf', C=1e5, gamma=0.1, epsilon=0.01)\n svr_rbf.fit(x_train, y_train)\n confidence_throughput_score = svr_rbf.score(x_test, y_test)\n return confidence_throughput_score\n# #############################################################################\n\n\ndef svr_regression_latency(dataset, r):\n dataset_row_n = dataset[0:r, :] # select specific number of rows\n x = preprocessing.scale(dataset_row_n[:, x_select_columns]) # machine learning to be in a range of -1 to 1.\n # This may do nothing, but it usually speeds up processing and can also help with accuracy.\n # Because this range is so popularly used\n y = dataset_row_n[:, y_select_column_latency]\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=test_size, random_state=42)\n # print (y_train)\n svr_rbf = SVR(kernel='rbf', C=1e5, gamma=0.1, epsilon=0.01)\n svr_rbf.fit(x_train, y_train)\n confidence_latency = svr_rbf.score(x_test, y_test)\n return confidence_latency\n# ################################################################################\n\n\nconfidence_results_throughput = np.array([], dtype='float64')\n\nfor i in range(row_start, n_rows):\n confidence_results_throughput = np.append(confidence_results_throughput, svr_regression_throughput(data, i))\n\n\nconfidence_results_latency = np.array([], dtype='float64')\n\nfor i in range(row_start, n_rows):\n confidence_results_latency = np.append(confidence_results_latency, svr_regression_latency(data, i))\n###########################################################################\n\n\n\nlw = 2\nplt.plot(confidence_results_throughput, color='navy', lw=lw, label='Thr')\nplt.xlim(row_start, n_rows)\nplt.title('SVR_RBF')\n# plt.xlabel('total rows')\nplt.ylabel('success score (1 is best)')\n# plt.legend()\n# plt.show()\n\nlw = 2\nplt.plot(confidence_results_latency, color='red', lw=lw, label='Lat')\nplt.xlim(row_start, n_rows)\nplt.xlabel('number of rows (use ML by increasing volume of data)')\nplt.legend()\nplt.show()\n" ]
[ [ "numpy.array", "matplotlib.pyplot.xlim", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.title", "matplotlib.pyplot.legend", "sklearn.preprocessing.scale", "sklearn.svm.SVR", "matplotlib.pyplot.ylabel", "sklearn.model_selection.train_test_split", "matplotlib.pyplot.show", "pandas.read_csv", "pandas.DataFrame.replace" ] ]
SinaKhorami/visage
[ "8dbbe6397bc20ed33a041bd07fa2887351f0e099" ]
[ "src/model/models.py" ]
[ "import os\nimport numpy as np\n\nfrom keras.models import Sequential\nfrom keras.optimizers import Adadelta\nfrom keras.losses import categorical_crossentropy\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\nfrom keras.layers import Flatten\nfrom keras.layers import Conv2D\nfrom keras.layers import MaxPooling2D\nfrom keras.models import model_from_yaml\n\n\nclass FERGModel(object):\n \"\"\"\n FERG-db Model (https://grail.cs.washington.edu/projects/deepexpr/ferg-db.html)\n\n - 96*96 pixel grayscale images\n \"\"\"\n\n def __init__(self, cfg):\n super().__init__()\n self.config = cfg\n self.model = None\n \n def get_model_structure(self):\n model = Sequential()\n model.add(Conv2D(\n 32,\n kernel_size=(3, 3),\n activation='relu',\n input_shape=(self.config['input_dim'][0], self.config['input_dim'][1], 1)\n ))\n model.add(Conv2D(\n 64,\n kernel_size=(3, 3),\n activation='relu'\n ))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n model.add(Flatten())\n model.add(Dense(\n 128,\n activation='relu'\n ))\n model.add(Dropout(0.5))\n model.add(Dense(\n len(self.config['classes']),\n activation='softmax'\n ))\n model.compile(\n optimizer=Adadelta(),\n loss=categorical_crossentropy,\n metrics=['accuracy']\n )\n\n return model\n\n def load_model_weights(self):\n self.model = self.get_model_structure()\n self.model.load_weights(os.path.join(os.path.dirname(__file__), self.config['file_name']))\n\n def get_face_emotion(self, face_image):\n out = np.asarray(face_image.resize((self.config['input_dim'][0], self.config['input_dim'][1])), dtype='float32')\n out /= 255\n out = out.reshape((1, self.config['input_dim'][0], self.config['input_dim'][1], 1))\n predicted_class_index = self.model.predict(out)\n\n return self.config['classes'][np.argmax(predicted_class_index)]\n" ]
[ [ "numpy.argmax" ] ]
freewym/lhotse
[ "66e9bbaf25b75011388ab00189baa162c3c1d435" ]
[ "test/cut/test_padding_cut.py" ]
[ "from tempfile import NamedTemporaryFile\n\nimport numpy as np\nimport pytest\n\nfrom lhotse.audio import AudioSource, Recording\nfrom lhotse.cut import Cut, CutSet, PaddingCut\nfrom lhotse.features import Features\nfrom lhotse.utils import EPSILON, LOG_EPSILON\n\nPADDING_ENERGY = EPSILON\nPADDING_LOG_ENERGY = LOG_EPSILON\n\n\n@pytest.fixture\ndef padding_cut():\n return PaddingCut(\n id='padding-1',\n duration=10.0,\n num_frames=1000,\n num_features=40,\n sampling_rate=16000,\n num_samples=160000,\n use_log_energy=True\n )\n\n\n@pytest.mark.parametrize(\n ['use_log_energy', 'expected_value'],\n [\n (True, PADDING_LOG_ENERGY),\n (False, PADDING_ENERGY)\n ]\n)\ndef test_load_features_log(padding_cut, use_log_energy, expected_value):\n padding_cut.use_log_energy = use_log_energy\n feats = padding_cut.load_features()\n assert feats.shape[0] == 1000\n assert feats.shape[1] == 40\n np.testing.assert_almost_equal(feats, expected_value)\n\n\ndef test_frame_shift(padding_cut):\n assert padding_cut.frame_shift == 0.01\n\n\ndef test_load_audio(padding_cut):\n samples = padding_cut.load_audio()\n assert samples.shape[0] == 1 # single channel\n assert samples.shape[1] == 160000\n np.testing.assert_equal(samples, 0.0)\n\n\n@pytest.mark.parametrize(\n ['offset', 'duration', 'expected_duration', 'expected_num_frames', 'expected_num_samples'],\n [\n (0.0, None, 10.0, 1000, 160000),\n (0.0, 5.0, 5.0, 500, 80000),\n (5.0, None, 5.0, 500, 80000),\n (5.0, 5.0, 5.0, 500, 80000),\n (5.0, 2.0, 2.0, 200, 32000),\n ]\n)\ndef test_truncate(padding_cut, offset, duration, expected_duration, expected_num_frames, expected_num_samples):\n cut = padding_cut.truncate(offset=offset, duration=duration, preserve_id=True)\n # Invariants\n assert cut.frame_shift == padding_cut.frame_shift\n assert cut.num_features == padding_cut.num_features\n assert cut.sampling_rate == padding_cut.sampling_rate\n assert cut.use_log_energy == padding_cut.use_log_energy\n assert cut.id == padding_cut.id\n # Variants\n assert cut.duration == expected_duration\n assert cut.num_frames == expected_num_frames\n assert cut.load_features().shape == (expected_num_frames, 40)\n assert cut.load_audio().shape == (1, expected_num_samples)\n\n\n@pytest.fixture\ndef libri_cut():\n return Cut(\n channel=0,\n duration=16.04,\n features=Features(\n channels=0,\n duration=16.04,\n num_features=40,\n num_frames=1604,\n recording_id='recording-1',\n sampling_rate=16000,\n start=0.0,\n storage_path='test/fixtures/libri/storage',\n storage_key='30c2440c-93cb-4e83-b382-f2a59b3859b4.llc',\n storage_type='lilcom_files',\n type='fbank',\n ),\n recording=Recording(\n id='recording-1',\n sources=[\n AudioSource(\n type='file',\n channels=[0],\n source='test/fixtures/libri/libri-1088-134315-0000.wav',\n )],\n sampling_rate=16000,\n num_samples=256640,\n duration=1604,\n ),\n id='849e13d8-61a2-4d09-a542-dac1aee1b544',\n start=0.0,\n supervisions=[],\n )\n\n\ndef test_mix_in_the_middle(libri_cut, padding_cut):\n mixed = libri_cut.mix(padding_cut)\n\n # Invariants\n assert mixed.duration == 16.04\n assert mixed.num_features == 40\n assert mixed.num_frames == 1604\n\n # Check that the actual feature shapes and values did not change\n pre_mixed_feats = libri_cut.load_features()\n mixed_feats = mixed.load_features()\n assert mixed_feats.shape == pre_mixed_feats.shape\n np.testing.assert_allclose(pre_mixed_feats, mixed_feats, rtol=1e-2)\n\n\ndef test_mix_pad_right(libri_cut, padding_cut):\n mixed = libri_cut.mix(padding_cut, offset_other_by=10.0)\n\n assert mixed.duration == 20.0\n assert mixed.num_frames == 2000\n\n mixed_feats = mixed.load_features()\n assert mixed_feats.shape == (2000, 40)\n np.testing.assert_allclose(mixed_feats[1604:, :], PADDING_LOG_ENERGY, atol=0.7) # Only padding after 16.04s\n np.testing.assert_array_less(PADDING_LOG_ENERGY, mixed_feats[1603, :]) # Padding didn't start before 16.04s\n\n pre_mixed_feats = libri_cut.load_features()\n np.testing.assert_allclose(pre_mixed_feats, mixed_feats[:1604, :], rtol=1e-2)\n\n\ndef test_mix_pad_left(libri_cut, padding_cut):\n mixed = padding_cut.mix(libri_cut, offset_other_by=3.96)\n\n assert mixed.duration == 20.0\n assert mixed.num_frames == 2000\n\n mixed_feats = mixed.load_features()\n assert mixed_feats.shape == (2000, 40)\n np.testing.assert_allclose(mixed_feats[:396, :], PADDING_LOG_ENERGY, atol=0.7) # Only padding before 3.96s\n np.testing.assert_array_less(PADDING_LOG_ENERGY, mixed_feats[396, :]) # No padding after 3.96s\n\n pre_mixed_feats = libri_cut.load_features()\n np.testing.assert_allclose(pre_mixed_feats, mixed_feats[396:, :], rtol=1e-2)\n\n\n@pytest.fixture\ndef mixed_libri_cut(libri_cut):\n return libri_cut.mix(libri_cut)\n\n\ndef test_mix_mixed_cut_with_padding_in_the_middle(mixed_libri_cut, padding_cut):\n mixed = mixed_libri_cut.mix(padding_cut)\n\n # Invariants\n assert mixed.duration == 16.04\n assert mixed.num_features == 40\n assert mixed.num_frames == 1604\n\n # Check that the actual feature shapes and values did not change\n pre_mixed_feats = mixed_libri_cut.load_features()\n mixed_feats = mixed.load_features()\n assert mixed_feats.shape == pre_mixed_feats.shape\n np.testing.assert_allclose(pre_mixed_feats, mixed_feats, rtol=1e-2)\n\n\ndef test_mix_mixed_cut_with_padding_on_the_right(mixed_libri_cut, padding_cut):\n mixed = mixed_libri_cut.mix(padding_cut, offset_other_by=10.0)\n\n assert mixed.duration == 20.0\n assert mixed.num_frames == 2000\n\n mixed_feats = mixed.load_features()\n assert mixed_feats.shape == (2000, 40)\n\n np.testing.assert_allclose(mixed_feats[1604:, :], PADDING_LOG_ENERGY, atol=0.8) # Only padding after 16.04s\n np.testing.assert_array_less(PADDING_LOG_ENERGY, mixed_feats[1603, :]) # Padding didn't start before 16.04s\n\n pre_mixed_feats = mixed_libri_cut.load_features()\n np.testing.assert_allclose(pre_mixed_feats, mixed_feats[:1604, :], rtol=1e-1)\n\n\ndef test_mix_mixed_cut_with_padding_on_the_left(mixed_libri_cut, padding_cut):\n mixed = padding_cut.mix(mixed_libri_cut, offset_other_by=3.96)\n\n assert mixed.duration == 20.0\n assert mixed.num_frames == 2000\n\n mixed_feats = mixed.load_features()\n assert mixed_feats.shape == (2000, 40)\n np.testing.assert_allclose(mixed_feats[:396, :], PADDING_LOG_ENERGY, atol=0.8) # Only padding before 3.96s\n np.testing.assert_array_less(PADDING_LOG_ENERGY, mixed_feats[396, :]) # No padding after 3.96s\n\n pre_mixed_feats = mixed_libri_cut.load_features()\n np.testing.assert_allclose(pre_mixed_feats, mixed_feats[396:, :], rtol=1e-1)\n\n\ndef test_append(libri_cut, padding_cut):\n appended = libri_cut.append(padding_cut)\n\n assert appended.duration == 26.04\n assert appended.num_frames == 2604\n\n appended_feats = appended.load_features()\n assert appended_feats.shape == (2604, 40)\n np.testing.assert_allclose(appended_feats[1604:, :], PADDING_LOG_ENERGY, atol=0.8) # Only padding after 16.04s\n np.testing.assert_array_less(PADDING_LOG_ENERGY, appended_feats[1603, :]) # Padding didn't start before 16.04s\n\n original_feats = libri_cut.load_features()\n np.testing.assert_allclose(original_feats, appended_feats[:1604, :], rtol=1e-2)\n\n\ndef test_pad_simple_cut(libri_cut):\n padded = libri_cut.pad(duration=20.0)\n\n assert padded.duration == 20.0\n assert padded.num_frames == 2000\n\n mixed_feats = padded.load_features()\n assert mixed_feats.shape == (2000, 40)\n np.testing.assert_allclose(mixed_feats[1604:, :], PADDING_LOG_ENERGY, atol=0.8) # Only padding after 16.04s\n np.testing.assert_array_less(PADDING_LOG_ENERGY, mixed_feats[1603, :]) # Padding didn't start before 16.04s\n\n pre_mixed_feats = libri_cut.load_features()\n np.testing.assert_almost_equal(pre_mixed_feats, mixed_feats[:1604, :], decimal=5)\n\n\ndef test_pad_simple_cut_audio_only(libri_cut):\n libri_cut.features = None\n padded = libri_cut.pad(duration=20.0)\n\n assert padded.duration == 20.0\n assert padded.num_samples == 20 * 16000\n\n mixed_audio = padded.load_audio()\n assert mixed_audio.shape == (1, padded.num_samples)\n\n pre_mixed_audio = libri_cut.load_audio()\n assert pre_mixed_audio.shape == (1, libri_cut.num_samples)\n\n\ndef test_pad_mixed_cut(mixed_libri_cut):\n padded = mixed_libri_cut.pad(duration=20.0)\n\n assert padded.duration == 20.0\n assert padded.num_frames == 2000\n\n mixed_feats = padded.load_features()\n assert mixed_feats.shape == (2000, 40)\n np.testing.assert_allclose(mixed_feats[1604:, :], PADDING_LOG_ENERGY, atol=0.8) # Only padding after 16.04s\n np.testing.assert_array_less(PADDING_LOG_ENERGY, mixed_feats[1603, :]) # Padding didn't start before 16.04s\n\n pre_mixed_feats = mixed_libri_cut.load_features()\n np.testing.assert_almost_equal(pre_mixed_feats, mixed_feats[:1604, :], decimal=2)\n\n\ndef test_pad_cut_set(cut_set):\n # cut_set fixture is defined in test/cut/conftest.py\n padded_cut_set = cut_set.pad(60.1)\n assert all(cut.duration == 60.1 for cut in padded_cut_set)\n\n\ndef test_serialize_padded_cut_set(cut_set):\n # cut_set fixture is defined in test/cut/conftest.py\n padded_cut_set = cut_set.pad(60.1)\n with NamedTemporaryFile() as f:\n padded_cut_set.to_json(f.name)\n restored = CutSet.from_json(f.name)\n assert padded_cut_set == restored\n" ]
[ [ "numpy.testing.assert_almost_equal", "numpy.testing.assert_allclose", "numpy.testing.assert_array_less", "numpy.testing.assert_equal" ] ]
abailoni/manual_annotation_spacem
[ "53a38bcb5107c9f9804f2c3a61251d9b01b5b1ef" ]
[ "traincellpose/core.py" ]
[ "import math\nimport os\nimport shutil\nfrom copy import deepcopy\nfrom shutil import copyfile\n\nimport numpy as np\nimport pandas\nimport tifffile\nimport yaml\nfrom pathlib import Path\n\nfrom speedrun import BaseExperiment, locate\nfrom speedrun.yaml_utils import recursive_update\nfrom .cellpose_training.start_training import start_cellpose_training\n\nfrom .gui_widgets.main_gui import StartingGUI\nfrom .io.images import read_uint8_img, write_image_to_file, write_ome_tiff\nfrom .io.hdf5 import readHDF5, writeHDF5\nfrom .preprocessing.utils import apply_preprocessing_to_image\nfrom .qupath import update_qupath_proj as qupath_utils\nfrom .qupath.save_labels import export_labels_from_qupath\nfrom .io.various import yaml2dict, get_path_components, open_path\nfrom .qupath.update_qupath_proj import add_image_to_project\n\n\nclass BaseAnnotationExperiment(BaseExperiment):\n def __init__(self, experiment_directory):\n self._main_window = None\n assert isinstance(experiment_directory, str)\n super(BaseAnnotationExperiment, self).__init__(experiment_directory)\n\n # Simulate sys.argv, so that configuration is loaded from the experiment directory:\n self._simulated_sys_argv = [\"script.py\", experiment_directory]\n # Check if this is a new project or if we should load the previous config:\n config_path = os.path.join(experiment_directory, \"Configurations/main_config.yml\")\n load_prev_experiment = os.path.exists(config_path)\n if load_prev_experiment:\n old_config_path = os.path.join(experiment_directory, \"Configurations/main_config_BAK.yml\")\n copyfile(config_path, old_config_path)\n self._simulated_sys_argv += [\"--inherit\", old_config_path]\n\n # Load config and setup:\n self.auto_setup(update_git_revision=False)\n\n # Set default values:\n if not load_prev_experiment:\n self.set(\"max_nb_extra_channels\", 2)\n self.set(\"extra_channels_names\", [\"Extra ch. 1\", \"Extra ch. 2\"])\n self.set(\"labeling_tool\", \"QuPath\")\n self.set_default_training_args()\n self.set_default_preprocessing_config()\n\n # Initialize or load dataframes:\n self._rois_df = None\n self._input_images_df = None\n self._init_rois()\n self._init_input_images_df()\n\n self.dump_configuration()\n\n def run(self):\n self.show_start_page()\n\n def show_start_page(self):\n self.main_window.show()\n self.dump_configuration()\n\n @property\n def main_window(self):\n if self._main_window is None:\n # self._main_window = widgets.Container(widgets=[StartWindow(self)])\n self._main_window = StartingGUI(self)\n # self._main_window.max_width = 30\n self._main_window.show(run=True)\n return self._main_window\n\n # --------------------------------------------\n # ROIs:\n # --------------------------------------------\n\n def update_rois_image(self, image_id, new_napari_rois):\n if isinstance(new_napari_rois, list):\n new_napari_rois = np.array(new_napari_rois)\n\n # Get IDs of previous ROIs:\n prev_roi_ids = self._get_roi_ids_by_image_id(image_id)\n current_max_roi_id = self._napari_rois.shape[0]\n prev_napari_rois = self._napari_rois[prev_roi_ids]\n\n # Check if no new napari rois were passed:\n if new_napari_rois.size == 0:\n # Delete any previous ROIs:\n self._delete_training_images(prev_roi_ids)\n self._delete_roi_ids(prev_roi_ids)\n else:\n assert new_napari_rois.ndim == 3\n assert new_napari_rois.shape[1] == 4 and new_napari_rois.shape[\n 2] == 2, \"ROI array does not have the correct shape\"\n\n # Check what is there:\n check_rois = np.array([[np.allclose(new_roi, old_roi) for old_roi in prev_napari_rois]\n for new_roi in new_napari_rois])\n # Add new ROIs:\n rois_not_already_in_project = ~ np.any(check_rois, axis=1)\n self._napari_rois = np.concatenate([self._napari_rois, new_napari_rois[rois_not_already_in_project]])\n for i in range(current_max_roi_id, current_max_roi_id + rois_not_already_in_project.sum()):\n self._rois_df.loc[i] = [i, image_id]\n self._create_training_images([i])\n\n # Remove ROIs that are not present anymore:\n old_rois_to_be_deleted = ~ np.any(check_rois, axis=0)\n old_rois_to_be_deleted = list(np.array(prev_roi_ids)[old_rois_to_be_deleted])\n self._delete_training_images(old_rois_to_be_deleted)\n self._delete_roi_ids(old_rois_to_be_deleted)\n\n # Update saved files:\n self.dump_rois()\n\n def get_list_rois_per_image(self):\n \"\"\"\n Return a list of tuples, such that:\n output_list[index_input_image] = (path_main_image, nb_rois)\n \"\"\"\n out_list = []\n for id_image in range(self.nb_input_images):\n selected_rows = self._input_images_df.loc[self._input_images_df[\"image_id\"] == id_image]\n assert len(selected_rows) == 1\n nb_rois = len(self._get_roi_ids_by_image_id(id_image))\n out_list.append((selected_rows[\"main_path\"].item(), nb_rois))\n return out_list\n\n def get_napari_roi_by_image_id(self, image_id):\n rois_ids = self._get_roi_ids_by_image_id(image_id)\n # Check if there are ROIs at all:\n if len(rois_ids):\n return [roi for roi in self._napari_rois[rois_ids]]\n else:\n return None\n\n def get_image_id_from_roi_id(self, roi_id):\n df = self._rois_df\n image_id = df.loc[df[\"roi_id\"] == roi_id, \"image_id\"].tolist()\n assert len(image_id) == 1\n return image_id[0]\n\n def _get_roi_ids_by_image_id(self, image_id):\n df = self._rois_df\n rois_ids = df.loc[df[\"image_id\"] == image_id, \"roi_id\"].tolist()\n\n return rois_ids\n\n def _delete_roi_ids(self, roi_ids):\n # TODO: Currently, ROIs are actually not deleted from the hdf5 file,\n # but only from the dataframe (to avoid reordering)\n # When done, uncomment assert to check consistency csv/hdf5\n df = self._rois_df\n self._rois_df = df[~df['roi_id'].isin(roi_ids)]\n\n # TODO Delete also files!\n\n def _init_rois(self):\n if self._rois_df is None:\n rois_csv_path = os.path.join(self.experiment_directory, \"ROIs/rois.csv\")\n rois_hdf5_path = os.path.join(self.experiment_directory, \"ROIs/rois.hdf5\")\n if os.path.exists(rois_csv_path):\n self._rois_df = pandas.read_csv(rois_csv_path)\n assert os.path.exists(rois_hdf5_path), \"ROIs hdf5 file not found!\"\n self._napari_rois = readHDF5(rois_hdf5_path, \"data\")\n rois_shape = self._napari_rois.shape\n assert rois_shape[1] == 4 and rois_shape[2] == 2\n # assert len(self._rois_df) == rois_shape[0], \"ROIs csv and hdf5 files do not match!\"\n else:\n # Create empty a dataframe and array:\n self._rois_df = pandas.DataFrame(columns=[\"roi_id\", \"image_id\"])\n self._napari_rois = np.empty((0, 4, 2), dtype=\"float64\")\n\n def dump_rois(self):\n # Get paths:\n proj_dir = self.experiment_directory\n rois_dir_path = os.path.join(proj_dir, \"ROIs\")\n roi_csv_path = os.path.join(rois_dir_path, \"rois.csv\")\n rois_hdf5_path = os.path.join(rois_dir_path, \"rois.hdf5\")\n\n # Write data to file:\n writeHDF5(self._napari_rois, rois_hdf5_path, \"data\")\n self._rois_df.to_csv(roi_csv_path, index=False)\n\n # Dump general configuration:\n self.dump_configuration()\n\n # --------------------------------------------\n # Input images:\n # --------------------------------------------\n def set_extra_channels_names(self, channels_names):\n # TODO: deprecate\n if not isinstance(channels_names, list):\n assert isinstance(channels_names, str)\n channels_names = [channels_names]\n assert len(channels_names) <= self.get(\"max_nb_extra_channels\")\n new_names = self.get(\"extra_channels_names\")\n for i, ch_name in enumerate(channels_names):\n new_names[i] = ch_name\n self.set(\"extra_channels_names\", new_names)\n\n def get_input_image_id_from_path(self, main_image_path):\n df = self._input_images_df\n\n # If image is in proj dir, then get relative path:\n if os.path.isabs(main_image_path):\n is_in_exp_dir, main_image_path = self.is_path_in_exp_dir(main_image_path)\n\n image_id = df.loc[df[\"main_path\"] == main_image_path, \"image_id\"].tolist()\n assert len(image_id) == 1\n return image_id[0]\n\n def is_path_in_exp_dir(self, path):\n if path is not None:\n path = path if isinstance(path, Path) else Path(path)\n is_in_exp_dir = Path(self.experiment_directory) in path.parents\n path = os.path.relpath(path, self.experiment_directory) if is_in_exp_dir else path\n return is_in_exp_dir, str(path)\n else:\n return False, None\n\n def get_image_paths(self, image_id):\n \"\"\"\n Return a dictionary with the paths for each channel. The key of the dictionary is the channel name.\n \"\"\"\n if isinstance(image_id, str):\n image_id = self.get_input_image_id_from_path(image_id)\n assert image_id < self.nb_input_images, \"Image ID not present in project\"\n image_data = self._input_images_df.loc[self._input_images_df[\"image_id\"] == image_id]\n ch_names = [\"Main channel\", \"DAPI\"] + self.get(\"extra_channels_names\")\n out_dict = {}\n for i in range(2 + self.get(\"max_nb_extra_channels\")):\n path = image_data.iloc[0, i + 1]\n if isinstance(path, str):\n # If image is in the proj dir, then construct the absolute path:\n if not os.path.isabs(path):\n path = os.path.join(self.experiment_directory, path)\n out_dict[ch_names[i]] = path\n return out_dict\n\n def add_input_image(self,\n main_image_path,\n main_image_filter=None,\n dapi_path=None,\n dapi_filter=None,\n extra_ch_1_path=None,\n extra_ch_1_filter=None,\n extra_ch_2_path=None,\n extra_ch_2_filter=None,\n id_input_image_to_rewrite=None,\n **extra_channels_kwargs\n ):\n \"\"\"\n # TODO: add option to remove input image? In that case, I need to update self.nb_input_images\n \"\"\"\n # TODO: generalize to multiple extra channels\n\n assert len(extra_channels_kwargs) == 0, \"Extra channels are not supported yet\"\n\n # Validate main image path:\n assert os.path.isfile(main_image_path), \"'{}' is not a file!\"\n\n # Convert to relative, if in proj_directory:\n _, main_image_path = self.is_path_in_exp_dir(main_image_path)\n\n def validate_ch_paths(ch_path, name_filter):\n ch_path = None if ch_path == \"\" else ch_path\n name_filter = None if name_filter == \"\" else name_filter\n if ch_path is not None:\n assert os.path.isfile(ch_path), \"'{}' is not a file!\"\n # Convert to relative, if in proj_directory:\n _, ch_path = self.is_path_in_exp_dir(ch_path)\n else:\n print(\"WARNING: filename filters outdated. No support for relative paths in proj dir\")\n if name_filter is not None:\n assert isinstance(main_image_filter,\n str) and main_image_filter != \"\", \"Please insert a proper filter string for main image\"\n assert isinstance(name_filter,\n str) and name_filter != \"\", \"Wrong format for filter '{}'\".format(name_filter)\n ch_path = main_image_path.replace(main_image_filter, name_filter)\n assert os.path.isfile(ch_path), \"'{}' is not a file!\"\n return ch_path\n\n # Validate DAPI image:\n dapi_image_path = validate_ch_paths(dapi_path, dapi_filter)\n\n # If present, then set up the training to use it (cellpose can still train fine if some of the images do\n # not have DAPI channel):\n if dapi_image_path is not None:\n self.use_dapi_channel_for_segmentation = True\n\n # Validate extra channels:\n extra_ch_1_path = validate_ch_paths(extra_ch_1_path, extra_ch_1_filter)\n extra_ch_2_path = validate_ch_paths(extra_ch_2_path, extra_ch_2_filter)\n\n # Add new image:\n image_info = [main_image_path, dapi_image_path, extra_ch_1_path, extra_ch_2_path]\n nb_input_images = self.nb_input_images\n\n # Check if main image has already been added:\n matching_images = self._input_images_df.index[self._input_images_df[\"main_path\"] == main_image_path].tolist()\n assert len(matching_images) <= 1\n if len(matching_images) == 1:\n print(\"The added image was already present in the project. Updating paths.\")\n id_input_image_to_rewrite = matching_images[0]\n\n if id_input_image_to_rewrite is not None:\n assert id_input_image_to_rewrite < nb_input_images\n added_image_id = nb_input_images if id_input_image_to_rewrite is None else id_input_image_to_rewrite\n self._input_images_df.loc[added_image_id] = [added_image_id] + image_info\n self.dump_input_images_info()\n\n # Refresh all the ROIs, if there were any:\n self._create_training_images(self._get_roi_ids_by_image_id(added_image_id))\n\n return added_image_id\n\n def dump_input_images_info(self):\n # Write data to file:\n proj_dir = self.experiment_directory\n rois_dir_path = os.path.join(proj_dir, \"ROIs\")\n input_images_csv_path = os.path.join(rois_dir_path, \"input_images.csv\")\n self._input_images_df.to_csv(input_images_csv_path, index=False)\n\n # Dump general configuration:\n self.dump_configuration()\n\n @property\n def nb_input_images(self):\n assert self._input_images_df is not None\n nb_input_images = self._input_images_df[\"image_id\"].max()\n return 0 if math.isnan(nb_input_images) else nb_input_images + 1\n\n def _init_input_images_df(self):\n if self._input_images_df is None:\n input_images_csv_path = os.path.join(self.experiment_directory, \"ROIs/input_images.csv\")\n columns_names = [\"image_id\",\n \"main_path\",\n \"DAPI_path\"]\n columns_names += [\"extra_ch_{}_path\".format(i) for i in range(self.get(\"max_nb_extra_channels\"))]\n if os.path.exists(input_images_csv_path):\n self._input_images_df = pandas.read_csv(input_images_csv_path, index_col=None)\n # TODO: remove image_id...?\n self._input_images_df.sort_values(\"image_id\")\n self._input_images_df.reset_index(drop=True)\n # Make sure that index and image ID are the same, otherwise adding images will not work properly:\n assert all([idx == row[\"image_id\"] for idx, row in self._input_images_df.iterrows()])\n else:\n self._input_images_df = pandas.DataFrame(columns=columns_names)\n\n def show_cellpose_input_folder(self):\n open_path(os.path.join(self.experiment_directory, \"ROIs/cellpose_input\"))\n\n def compress_qupath_proj_dir(self):\n shutil.make_archive(self.qupath_directory, 'zip', self.qupath_directory)\n open_path(self.experiment_directory)\n\n # --------------------------------------------\n # Image crops defined from ROIs and used for training:\n # --------------------------------------------\n def _create_training_images(self, list_roi_ids,\n update_single_channels=True,\n update_composite_images=True,\n update_cellpose_inputs=True\n ):\n \"\"\"\n Create the actual cropped images that will be used for training and for annotation.\n \"\"\"\n if not isinstance(list_roi_ids, (list, tuple)):\n list_roi_ids = [list_roi_ids]\n\n for roi_id in list_roi_ids:\n img_id = self.get_image_id_from_roi_id(roi_id)\n image_paths = self.get_image_paths(img_id)\n crop_slice = self.get_crop_slice_from_roi_id(roi_id)\n roi_paths = self.get_training_image_paths(roi_id)\n\n # Load channels and apply crops:\n ch_names = [\"Main channel\", \"DAPI\"] + self.get(\"extra_channels_names\")\n img_channels = [read_uint8_img(image_paths[ch_names[i]])[crop_slice] if ch_names[i] in image_paths else None\n for i in range(2 + self.get(\"max_nb_extra_channels\"))]\n\n # ----------------------------\n # Cellpose training image:\n # ----------------------------\n if update_cellpose_inputs:\n # Set green channel as main channel:\n cellpose_image = np.zeros_like(img_channels[0])\n cellpose_image[..., 1] = img_channels[0][..., 0]\n\n # Set red channel as DAPI:\n if self.use_dapi_channel_for_segmentation and img_channels[1] is not None:\n cellpose_image[..., 2] = img_channels[1][..., 0]\n\n # Check if I should apply any preprocessing:\n preproc_kwargs = self.get(\"preprocessing\")\n if preproc_kwargs is not None and self.apply_preprocessing:\n print(\"INFO: Preprocessing image...\")\n cellpose_image[..., 1] = apply_preprocessing_to_image(cellpose_image[..., 1], \"main_segm_ch\",\n preproc_kwargs)\n if self.use_dapi_channel_for_segmentation:\n cellpose_image[..., 2] = apply_preprocessing_to_image(cellpose_image[..., 2], \"DAPI\",\n preproc_kwargs)\n\n # Write image:\n # tifffile.imwrite(roi_paths[\"cellpose_training_input_image\"], cellpose_image)\n write_image_to_file(roi_paths[\"cellpose_training_input_image\"], cellpose_image)\n # write_ome_tiff(roi_paths[\"cellpose_training_input_image\"], cellpose_image, axes=\"YX\")\n\n # ----------------------------\n # Write composite and single-channel cropped images:\n # ----------------------------\n # image_shape = img_channels[0][..., [0]]\n # Get channel names and colors:\n # TODO: make general variable\n channel_colormaps = [\"gray\", \"red\", \"yellow\", \"cyan\"]\n\n if update_single_channels:\n for i, ch_image in enumerate(img_channels):\n if ch_image is not None:\n write_image_to_file(roi_paths[\"single_channels\"][ch_names[i]], ch_image)\n\n if update_composite_images:\n composite_image = np.stack([ch_image[..., 0] for ch_image in img_channels if ch_image is not None], axis=0)\n write_ome_tiff(roi_paths[\"composite_image\"], composite_image, axes=\"CYX\",\n channel_names=[ch_name for ch_name, ch in zip(ch_names, img_channels) if ch is not None],\n channel_colors=[ch_color for ch_color, ch in zip(channel_colormaps, img_channels) if\n ch is not None],\n )\n\n # Finally, add the image to the QuPath project:\n qupath_utils.add_image_to_project(self.qupath_directory,\n roi_paths[\"composite_image\"])\n\n def refresh_all_training_images(self, **kwargs):\n all_rois = self._rois_df[\"roi_id\"].tolist()\n self._create_training_images(all_rois, **kwargs)\n\n def _delete_training_images(self, list_roi_ids):\n \"\"\"\n Delete cropped images (apart from labels)\n \"\"\"\n if not isinstance(list_roi_ids, (list, tuple)):\n list_roi_ids = [list_roi_ids]\n\n for roi_id in list_roi_ids:\n roi_paths = self.get_training_image_paths(roi_id)\n\n # Remove single channels and cellpose inputs:\n os.remove(roi_paths[\"cellpose_training_input_image\"])\n for ch_name in roi_paths[\"single_channels\"]:\n if roi_paths[\"single_channels\"][ch_name] is not None:\n os.remove(roi_paths[\"single_channels\"][ch_name])\n\n # Delete image in QuPath:\n qupath_utils.delete_image_from_project(self.qupath_directory, int(roi_id))\n os.remove(roi_paths[\"composite_image\"])\n\n def get_crop_slice_from_roi_id(self, roi_id):\n self.assert_roi_id(roi_id)\n roi = self._napari_rois[roi_id]\n x_crop = slice(int(roi[:, 0].min()), int(roi[:, 0].max()))\n y_crop = slice(int(roi[:, 1].min()), int(roi[:, 1].max()))\n return (x_crop, y_crop)\n\n def assert_roi_id(self, roi_id):\n assert np.array(self._rois_df['roi_id'].isin([roi_id])).sum() == 1, \"ROI id not found: {}\".format(roi_id)\n\n def get_roi_list(self):\n roi_list = []\n for image_id in range(self.nb_input_images):\n rois_image = self._get_roi_ids_by_image_id(image_id)\n for i_roi, roi_id in enumerate(rois_image):\n out_roi_info = {}\n out_roi_info['roi_id'] = roi_id\n out_roi_info['image_id'] = image_id\n roi_info = self.get_training_image_paths(roi_id)\n out_roi_info['has_label'] = roi_info[\"has_labels\"]\n out_roi_info['roi_index_per_image'] = i_roi\n roi_list.append(out_roi_info)\n return roi_list\n\n def get_training_image_paths(self, roi_id):\n \"\"\"\n For a given ROI id, the function returns paths to the training image used by cellpose,\n the label file with created annotations, and cropped images (both in single-channel and composite versions)\n that are usually used for annotation.\n \"\"\"\n self.assert_roi_id(roi_id)\n filename_roi_id = \"{:04d}\".format(roi_id)\n\n base_ROI_dir = os.path.join(self.experiment_directory, \"ROIs\")\n label_image_path = self.get_napari_label_file_path(roi_id)\n # Add main paths to crop images:\n # TODO: fix cellpose input mess (ome vs tif, channels colors cellpose input)\n out_dict = {\n \"cellpose_training_input_image\": os.path.join(base_ROI_dir,\n \"cellpose_input/{}.tif\".format(filename_roi_id)),\n \"composite_image\": os.path.join(self.qupath_directory, \"input_images/{}.ome.tif\".format(filename_roi_id)),\n \"label_image\": label_image_path,\n \"has_labels\": os.path.exists(label_image_path),\n \"single_channels\": {}\n }\n\n # Add paths to single-channel crop images:\n image_id = self.get_image_id_from_roi_id(roi_id)\n ch_names = [\"Main channel\", \"DAPI\"] + self.get(\"extra_channels_names\")\n for i in range(2 + self.get(\"max_nb_extra_channels\")):\n path = self._input_images_df.iloc[image_id, i + 1]\n # Check if channel is present, then add:\n if isinstance(path, str):\n out_dict[\"single_channels\"][ch_names[i]] = \\\n os.path.join(base_ROI_dir, \"napari_data/roi_images/{}_ch{}.tif\".format(filename_roi_id, i))\n else:\n out_dict[\"single_channels\"][ch_names[i]] = None\n\n return out_dict\n\n def update_roi_labels(self, roi_id, roi_labels):\n roi_info = self.get_training_image_paths(roi_id)\n write_image_to_file(roi_info[\"label_image\"], roi_labels)\n\n def get_napari_label_file_path(self, roi_id):\n return os.path.join(self.experiment_directory, \"ROIs/napari_data/napari_annotations\",\n \"{:04d}_masks.tif\".format(roi_id))\n\n # --------------------------------------------\n # Cellpose training:\n # --------------------------------------------\n\n def setup_cellpose_training_data(self, model_name, show_training_folder=False):\n training_folder = os.path.join(self.experiment_directory, \"CellposeTraining\", model_name)\n training_images_dir = os.path.join(training_folder, \"training_images\")\n\n # Create dirs, if not already present:\n os.makedirs(training_folder, exist_ok=True)\n os.makedirs(training_images_dir, exist_ok=True)\n\n # Write training config to file\n training_config = deepcopy(self.get(\"training_config\"))\n training_config.pop(\"custom_model_path_GUI\")\n training_config.pop(\"pretrained_model_GUI\")\n training_config.pop(\"model_name\")\n # Specify relative training path:\n training_config[\"train_folder\"] = os.path.join(\"CellposeTraining\", model_name, \"training_images\")\n training_config_path = os.path.join(training_folder, \"train_config.yml\")\n existing_training_config = yaml2dict(training_config_path) if os.path.exists(training_config_path) else {}\n existing_training_config = recursive_update(existing_training_config, training_config)\n with open(training_config_path, 'w') as f:\n yaml.dump(existing_training_config, f)\n\n # Delete and recopy training images:\n shutil.rmtree(training_images_dir)\n cellpose_input_images_dir = os.path.join(self.experiment_directory, \"ROIs/cellpose_input\")\n shutil.copytree(cellpose_input_images_dir, training_images_dir)\n\n # Collect labels from QuPath or Napari:\n if self.get(\"labeling_tool\") == \"QuPath\":\n export_labels_from_qupath(self.qupath_directory, training_images_dir, filename_postfix=\"masks\")\n elif self.get(\"labeling_tool\") == \"Napari\":\n # TODO: Only copy actual existing ROIs\n # TODO: assert that all labels are present\n shutil.copytree(os.path.join(self.experiment_directory, \"ROIs/napari_data/napari_annotations\"),\n training_images_dir, dirs_exist_ok=True)\n else:\n raise ValueError(\"Labeling tool not recognized\")\n\n # Zip files and open the folder:\n shutil.make_archive(training_folder, 'zip', training_folder)\n if show_training_folder:\n open_path(os.path.join(self.experiment_directory, \"CellposeTraining\"))\n\n def run_cellpose_training(self, model_name):\n try:\n import cellpose\n except ImportError:\n return False, \"cellpose module is required to train a custom model\"\n\n # Assert that training data is present:\n training_folder = os.path.join(self.experiment_directory, \"CellposeTraining\", model_name)\n training_images_dir = os.path.join(training_folder, \"training_images\")\n training_config_path = os.path.join(training_folder, \"train_config.yml\")\n if not os.path.exists(training_folder) or not os.path.exists(training_images_dir) or not os.path.exists(\n training_config_path):\n self.setup_cellpose_training_data(model_name)\n\n # Load config:\n training_config = yaml2dict(training_config_path)\n\n # Temporary check:\n train_folder = os.path.join(self.experiment_directory, training_config.pop(\"train_folder\"))\n assert train_folder == training_images_dir\n\n training_was_successful, error_message = \\\n start_cellpose_training(train_folder,\n *training_config.get(\"cellpose_args\", []),\n # out_models_folder=os.path.split(train_folder)[0],\n **training_config.get(\"cellpose_kwargs\", {}))\n\n return training_was_successful, error_message\n\n def update_main_training_config(self,\n model_name,\n **GUI_training_kwargs\n ):\n \"\"\"\n Function called from magicgui widget to update training parameters set via the GUI\n \"\"\"\n # Prepare training config:\n training_config = {}\n training_config[\"model_name\"] = model_name\n training_config[\"cellpose_kwargs\"] = cellpose_kwargs = {}\n\n # Validate pretrained model kwargs:\n if \"pretrained_model\" not in GUI_training_kwargs:\n return False, \"No pretrained model was specified or recognized\"\n if \"custom_model_path\" not in GUI_training_kwargs:\n return False, \"No custom model path was specified\"\n training_config[\"pretrained_model_GUI\"] = GUI_training_kwargs[\"pretrained_model\"]\n training_config[\"custom_model_path_GUI\"] = GUI_training_kwargs[\"custom_model_path\"]\n\n if GUI_training_kwargs[\"pretrained_model\"] == \"None\":\n # TODO: Check if this works in config\n cellpose_kwargs[\"pretrained_model\"] = None\n elif GUI_training_kwargs[\"pretrained_model\"] == \"Custom model\":\n custom_model_path = GUI_training_kwargs[\"custom_model_path\"]\n if not os.path.isfile(custom_model_path):\n return False, \"The path of the custom model was not found\"\n cellpose_kwargs[\"pretrained_model\"] = custom_model_path\n else:\n # Set model to one of the default cellpose models:\n cellpose_kwargs[\"pretrained_model\"] = GUI_training_kwargs[\"pretrained_model\"]\n\n # Validate other kwargs from GUI:\n for kwarg in [\"batch_size\", \"n_epochs\", \"learning_rate\"]:\n if kwarg in GUI_training_kwargs:\n cellpose_kwargs[kwarg] = GUI_training_kwargs[kwarg]\n\n # Write to main config:\n old_training_config = self.get(\"training_config\", ensure_exists=True)\n old_training_config.update(training_config)\n self.set(\"training_config\", old_training_config)\n self.dump_configuration()\n\n return True, None\n\n def set_default_training_args(self):\n training_config = {\"model_name\": \"my_trained_model\",\n \"cellpose_args\": [\"no_npy\",\n # \"save_each\", # Save models at different epochs, based on \"save_every\"\n # \"dir_above\", # Only useful when saving images, not trained models\n \"verbose\"],\n \"pretrained_model_GUI\": \"cyto2\",\n \"custom_model_path_GUI\": \"\"}\n\n training_config[\"cellpose_kwargs\"] = cellpose_kwargs = {}\n\n cellpose_kwargs[\"pretrained_model\"] = \"cyto2\"\n cellpose_kwargs[\"save_every\"] = 10\n cellpose_kwargs[\"learning_rate\"] = 0.0002\n cellpose_kwargs[\"chan\"] = 2\n cellpose_kwargs[\"chan2\"] = 1\n cellpose_kwargs[\"n_epochs\"] = 2000\n cellpose_kwargs[\"batch_size\"] = 8\n cellpose_kwargs[\"mask_filter\"] = \"_masks\"\n\n self.set(\"training_config\", training_config)\n self.dump_configuration()\n\n def get_training_parameters_GUI(self):\n return self.get(\"training_config\")\n\n # --------------------------------------------\n # Internal methods:\n # --------------------------------------------\n @property\n def use_dapi_channel_for_segmentation(self):\n if self.get(\"training/use_dapi_channel_for_segmentation\") is None:\n self.use_dapi_channel_for_segmentation = True\n return self.get(\"training/use_dapi_channel_for_segmentation\")\n\n @use_dapi_channel_for_segmentation.setter\n def use_dapi_channel_for_segmentation(self, use_dapi_channel_for_segmentation):\n assert isinstance(use_dapi_channel_for_segmentation, bool)\n self.set(\"training/use_dapi_channel_for_segmentation\", use_dapi_channel_for_segmentation)\n self.dump_configuration()\n\n @property\n def apply_preprocessing(self):\n if self.get(\"apply_preprocessing\") is None:\n self.apply_preprocessing = False\n return self.get(\"apply_preprocessing\")\n\n @apply_preprocessing.setter\n def apply_preprocessing(self, apply_preprocessing):\n assert isinstance(apply_preprocessing, bool)\n self.set(\"apply_preprocessing\", apply_preprocessing)\n self.dump_configuration()\n\n\n def record_args(self):\n # Simulate sys.argv, so that configuration is loaded from the experiment directory:\n self._argv = self._simulated_sys_argv\n return self\n\n @property\n def experiment_directory(self):\n \"\"\"Directory for the experiment.\"\"\"\n return self._experiment_directory\n\n def set_default_preprocessing_config(self):\n preproc_config = {\"main_segm_ch\":\n [{\"function_kwargs\": {},\n \"function_name\": \"traincellpose.preprocessing.normalize_image\"\n }],\n \"DAPI\": []\n }\n\n self.set(\"preprocessing\", preproc_config)\n self.dump_configuration()\n\n @property\n def qupath_directory(self):\n return os.path.join(self.experiment_directory, 'QuPathProject')\n\n @experiment_directory.setter\n def experiment_directory(self, value):\n if value is not None:\n # Make directories\n os.makedirs(os.path.join(value, 'Configurations'), exist_ok=True)\n os.makedirs(os.path.join(value, 'local_input_images'), exist_ok=True)\n os.makedirs(os.path.join(value, 'ROIs'), exist_ok=True)\n os.makedirs(os.path.join(value, 'ROIs/napari_data/roi_images'), exist_ok=True)\n os.makedirs(os.path.join(value, 'ROIs/cellpose_input'), exist_ok=True)\n os.makedirs(os.path.join(value, 'ROIs/napari_data/napari_annotations'), exist_ok=True)\n os.makedirs(os.path.join(value, \"CellposeTraining\"), exist_ok=True)\n\n # Create QuPathProject:\n os.makedirs(os.path.join(value, 'QuPathProject'), exist_ok=True)\n add_image_to_project(os.path.join(value, 'QuPathProject'))\n os.makedirs(os.path.join(value, 'QuPathProject/input_images'), exist_ok=True)\n\n self._experiment_directory = value\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.zeros_like", "numpy.empty", "pandas.DataFrame", "numpy.any", "numpy.allclose", "numpy.stack", "pandas.read_csv" ] ]
ZziTaiLeo/encoder4editing
[ "9c72444a2c813bb46f6bab76caddbb566f32b5f6" ]
[ "inv_npy.py" ]
[ "import os\n\nimport cv2\nimport numpy\nimport numpy as np\nfrom skimage import transform\nimport PIL.Image\nimport torch\n\npath_to_images = r'./result/inference_inversions/'\npath_to_inv_params = r'result/npy/integrated_affine.npy'\ninv_M = np.load(path_to_inv_params)\nfiles = os.listdir(path_to_images)\nfiles.sort()\n#\nidx = 0\nfor file in files:\n cv_img = cv2.imread(path_to_images+file)\n new_img = cv2.warpAffine(cv_img, inv_M[idx,0:2,:],dsize=(720,1280))\n idx+=1\n result_path = 'result/inversion_recover/'\n cv2.imwrite(result_path+file, new_img)\n" ]
[ [ "numpy.load" ] ]
hz-ants/KerasPersonLab
[ "32d44dd1f33377128a87d6e074cf8214224f0174" ]
[ "train.py" ]
[ "import os\n\nfrom model import get_personlab\nfrom tf_data_generator import *\nfrom config import config\nfrom keras.models import load_model\nfrom keras.utils import multi_gpu_model\nfrom keras.optimizers import Adam\nfrom keras.callbacks import LambdaCallback\nfrom keras import backend as KB\n\nfrom polyak_callback import PolyakMovingAverage\n\nfrom keras.backend.tensorflow_backend import set_session\nimport tensorflow as tf\n\n# Only allocate memory on GPUs as used\ntf_config = tf.ConfigProto()\ntf_config.gpu_options.allow_growth=True\nset_session(tf.Session(config=tf_config))\n\nLOAD_MODEL_FILE = config.LOAD_MODEL_PATH\nSAVE_MODEL_FILE = config.SAVE_MODEL_PATH\nH5_DATASET = config.H5_DATASET\n\nnum_gpus = config.NUM_GPUS\nbatch_size_per_gpu = config.BATCH_SIZE_PER_GPU\nbatch_size = num_gpus * batch_size_per_gpu\n\ninput_tensors = get_data_input_tensor(batch_size=batch_size)\nfor i in range(len(input_tensors)):\n input_tensors[i].set_shape((None,)+input_shapes[i])\n\nif num_gpus > 1:\n with tf.device('/cpu:0'):\n model = get_personlab(train=True, input_tensors=input_tensors, with_preprocess_lambda=True)\nelse:\n model = get_personlab(train=True, input_tensors=input_tensors, with_preprocess_lambda=True)\n\nif LOAD_MODEL_FILE is not None:\n model.load_weights(LOAD_MODEL_FILE, by_name=True)\n\nif num_gpus > 1:\n parallel_model = multi_gpu_model(model, num_gpus)\nelse:\n parallel_model = model\n\nfor loss in parallel_model.outputs:\n parallel_model.add_loss(loss)\n\ndef save_model(epoch, logs):\n model.save_weights(SAVE_MODEL_FILE)\n\n# In Keras, this metric will not be computed for this model, since the outputs have no targets.\n# Only by commenting out that restriction in the Keras code will allow the display of these metrics\n# which can be used to monitor the individual losses.\ndef identity_metric(y_true, y_pred):\n return KB.mean(y_pred)\n\n\ncallbacks = [LambdaCallback(on_epoch_end=save_model)]\n\nif config.POLYAK:\n def build_save_model():\n with tf.device('/cpu:0'):\n save_model = get_personlab(train=True, input_tensors=input_tensors, with_preprocess_lambda=True)\n return save_model\n polyak_save_path = '/'.join(config.SAVE_MODEL_FILE.split('/')[:-1]+['polyak_'+config.SAVE_MODEL_FILE.split('/')[-1]])\n polyak = PolyakMovingAverage(filepath=polyak_save_path, verbose=1, save_weights_only=True,\n build_model_func=build_save_model, parallel_model=True)\n\n callbacks.append(polyak)\n\n# The paper uses SGD optimizer with lr=0.0001\nparallel_model.compile(target_tensors=None, loss=None, optimizer=Adam(), metrics=[identity_metric])\nparallel_model.fit(steps_per_epoch=64115//batch_size,\n epochs=config.NUM_EPOCHS, callbacks=callbacks)\n" ]
[ [ "tensorflow.ConfigProto", "tensorflow.Session", "tensorflow.device" ] ]
stuarteberg/quilted
[ "11c6e38cbce95133ab4e9cfe0dd2afb14ae088f7" ]
[ "bin/export-to-hdf5.py" ]
[ "import sys\nimport logging\nimport argparse\nimport numpy as np\nfrom quilted.h5blockstore import H5BlockStore\n\ndef main():\n logger = logging.getLogger('quilted.h5blockstore')\n logger.setLevel(logging.INFO)\n logger.addHandler(logging.StreamHandler(sys.stdout))\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--crop-halo', type=int, default=0, help='Size of the halo to remove from all blocks before they are written.')\n parser.add_argument('blockstore_root_dir')\n parser.add_argument('output_file', help='Examples: myfile.h5 or myfile.h5/myvolume')\n args = parser.parse_args()\n \n filepath, dset_name = args.output_file.split('.h5')\n filepath += '.h5'\n if not dset_name:\n dset_name = 'data'\n\n def remove_halo(block_bounds):\n block_bounds = np.array(block_bounds)\n if block_bounds.shape[1] == 4:\n # FIXME: We assume that 4D volumes have a channel dimension that should not be considered with the halo\n block_bounds[0,:-1] += args.crop_halo\n block_bounds[1,:-1] -= args.crop_halo\n else:\n block_bounds[0] += args.crop_halo\n block_bounds[1] -= args.crop_halo\n \n return block_bounds\n \n blockstore = H5BlockStore(args.blockstore_root_dir, mode='r')\n blockstore.export_to_single_dset(filepath, dset_name, remove_halo)\n\nif __name__ == \"__main__\":\n # DEBUG\n #sys.argv += [\"--crop-halo=20\"]\n #sys.argv += [\"/groups/flyem/data/scratchspace/classifiers/fib25-multicut/segmentation-cache/prediter-0\"]\n #sys.argv += [\"/tmp/output.h5/predictions\"]\n \n main()\n" ]
[ [ "numpy.array" ] ]
JeffreyThiessen/staramr
[ "8550f231b7dc528b91a2c3665a5f99f0fa3d350b" ]
[ "staramr/blast/pointfinder/PointfinderDatabaseInfo.py" ]
[ "import logging\nfrom os import path\n\nimport pandas as pd\n\n\"\"\"\nA Class storing information about the specific PointFinder database.\n\"\"\"\n\nlogger = logging.getLogger('PointfinderDatabaseInfo')\n\n\nclass PointfinderDatabaseInfo:\n\n def __init__(self, database_info_dataframe, file=None):\n \"\"\"\n Creates a new PointfinderDatabaseInfo.\n :param database_info_dataframe: A pd.DataFrame containing the information in PointFinder.\n :param file: The file where the pointfinder database info originates from.\n \"\"\"\n self._pointfinder_info = database_info_dataframe\n self._file = file\n\n self._resistance_table_hacks(self._pointfinder_info)\n\n @classmethod\n def from_file(cls, file):\n \"\"\"\n Builds a new PointfinderDatabaseInfo from the passed file containing PointFinder information on drug resistance\n mutations.\n :param file: The file containing drug resistance mutations.\n :return: A new PointfinderDatabaseInfo.\n \"\"\"\n pointfinder_info = pd.read_csv(file, sep='\\t', index_col=False)\n return cls(pointfinder_info, file)\n\n @classmethod\n def from_pandas_table(cls, database_info_dataframe):\n \"\"\"\n Builds a new PointfinderDatabaseInfo from the passed pd.DataFrame.\n :param database_info_dataframe: A pd.DataFrame containing the information in PointFinder.\n :return: A new PointfinderDatabaseInfo.\n \"\"\"\n return cls(database_info_dataframe)\n\n def _resistance_table_hacks(self, table):\n \"\"\"\n A function implementing some hacks to try and fix mismatched strings in the pointfinder databases.\n These should be removed when the underlying database is corrected.\n :param table: The pointfinder resistance table to fix.\n :return: None, but modifies the passed table in place.\n \"\"\"\n if self._file and 'salmonella' in str(self._file) and path.exists(\n path.join(path.dirname(self._file), '16S_rrsD.fsa')):\n logger.debug(\"Replacing [16S] with [16S_rrsD] for pointfinder organism [salmonella]\")\n table[['#Gene_ID']] = table[['#Gene_ID']].replace('16S', '16S_rrsD')\n\n def _get_resistance_codon_match(self, gene, codon_mutation):\n table = self._pointfinder_info\n\n matches = table[(table['#Gene_ID'] == gene)\n & (table['Codon_pos'] == codon_mutation.get_mutation_position())\n & (table['Ref_codon'] == codon_mutation.get_database_amr_gene_mutation())\n & (table['Res_codon'].str.contains(codon_mutation.get_input_genome_mutation(), regex=False))]\n\n if len(matches.index) > 1:\n raise Exception(\"Error, multiple matches for gene=\" + str(gene) + \", codon_mutation=\" + str(codon_mutation))\n else:\n return matches\n\n def _get_resistance_nucleotide_match(self, gene, nucleotide_mutations):\n return self._get_resistance_codon_match(gene, nucleotide_mutations)\n\n def get_phenotype(self, gene, codon_mutation):\n \"\"\"\n Gets the phenotype for a given gene and codon mutation from PointFinder.\n :param gene: The gene.\n :param codon_mutation: The codon mutation.\n :return: A string describing the phenotype.\n \"\"\"\n match = self._get_resistance_codon_match(gene, codon_mutation)\n\n if len(match.index) > 0:\n return match['Resistance'].iloc[0]\n else:\n raise Exception(\"Error, no match for gene=\" + str(gene) + \", codon_mutation=\" + str(codon_mutation))\n\n def get_resistance_codons(self, gene, codon_mutations):\n \"\"\"\n Gets a list of resistance codons from the given gene and codon mutations.\n :param gene: The gene.\n :param codon_mutations: The codon mutations.\n :return: The resistance codons.\n \"\"\"\n resistance_mutations = []\n\n for codon_mutation in codon_mutations:\n match = self._get_resistance_codon_match(gene, codon_mutation)\n if len(match.index) > 0:\n resistance_mutations.append(codon_mutation)\n\n return resistance_mutations\n\n def get_resistance_nucleotides(self, gene, nucleotide_mutations):\n \"\"\"\n Gets a list of resistance nucleotides from the given gene and nucleotide mutations.\n :param gene: The gene.\n :param nucleotide_mutations: The nucleotide mutations.\n :return: The resistance nucleotides.\n \"\"\"\n resistance_mutations = []\n\n for nucleotide_mutation in nucleotide_mutations:\n match = self._get_resistance_nucleotide_match(gene, nucleotide_mutation)\n if len(match.index) > 0:\n resistance_mutations.append(nucleotide_mutation)\n\n return resistance_mutations\n" ]
[ [ "pandas.read_csv" ] ]
ccp5UK/dlpoly-py
[ "a7f2f83dd97b963248d706894dc1d12f7fec16d8" ]
[ "configbuilder/builder.py" ]
[ "'''\nCode to build CONFIG/FIELD from layound files\n'''\n\nimport copy\nimport random\nimport numpy as np\nfrom dlpoly.field import Field\nfrom dlpoly.config import Config\nfrom dlpoly.utility import parse_line, read_line\nfrom .cfgLoader import CFG\n\nclass System:\n keywords = ('cell', 'include', 'potential')\n def __init__(self):\n self.config = Config()\n self.config.level = 0\n self.field = Field()\n self.CFGs = {}\n\n self.defined = {key: False for key in System.keywords}\n\n def handle_structure(self, source):\n ''' Read a structure block and add each element to the system '''\n lastConfig = None\n while True:\n line = read_line(source)\n line = parse_line(line)\n while line.endswith('&'):\n line += read_line(source).strip('&')\n if line.lower() == 'end structure':\n break\n\n keyword, *args = line.split()\n keyword = keyword.lower()\n\n if keyword == 'include':\n filename, *args = args\n if filename not in self.CFGs:\n self.CFGs[filename] = CFG(filename)\n\n lastConfig = self._add_config(self.CFGs[filename], args)\n\n elif keyword == 'lattice':\n if self.config.level == 0:\n raise ValueError('Cannot generate lattice with no cell specified')\n\n shape, *args = args\n\n if shape in ('cubic'):\n spacing, *args = args\n\n elif keyword == 'repeat':\n nRepeat, *args = args\n for i in range(int(nRepeat)):\n lastConfig = self._add_config(lastConfig, args[:])\n print(self.field.molecules['Water molecule'].nMols)\n\n def _add_config(self, inConfig, args):\n ''' Add a config to the current system\n Returns: Last Config\n '''\n newConfig = copy.copy(inConfig)\n\n currMol = self.field.add_molecule(newConfig)\n for atom in newConfig.atoms:\n atom.molecule = currMol\n\n while args:\n keyword = args.pop(0).lower()\n if keyword == 'angle':\n alpha, beta, gamma, *args = args\n angle = tuple(ang if ang != 'rand' else random.uniform(0, 180.)\n for ang in (alpha, beta, gamma))\n newConfig.rotate(np.asarray(angle, dtype=float))\n elif keyword == 'pos':\n x, y, z, *args = args\n newConfig.translate(np.asarray((x, y, z), dtype=float))\n elif keyword == 'stretch':\n x, y, z, *args = args\n newConfig.stretch(np.asarray((x, y, z), dtype=float))\n else:\n raise IOError('Unrecognised keyword {} in {}'.format(keyword, 'include'))\n\n\n if 'replace' in args:\n newConfig.clear_config(newConfig)\n\n self.config.add_atoms(newConfig.atoms)\n return newConfig\n\n def _del_config(self, delMol):\n ''' Delete a configuration from system '''\n molName, molID = delMol\n fieldMol = self.field.molecules[molName]\n fieldMol.nMols -= 1\n if not fieldMol.nMols:\n del self.field.molecules[molName]\n\n self.config.atoms = [atom for atom in self.config.atoms\n if atom.molecule != delMol]\n\n def _clear_config(self, config, radius=1.):\n ''' Clear the space occupied by a molecule\n determined by deleting any molecules in a cylindrical radius around defined internal bonding\n Config : Configuration to check space of\n Radius : Radius in Angstroms of cylinders\n '''\n\n radiusSq = radius**2\n\n # Calculate current list of potential conflicts\n potentialConflicts = [atom for atom in self.config.atoms\n if any(atom.pos > config.bounds[0]-radius and\n atom.pos < config.bounds[1]+radius)]\n\n for constraintClass in ('bonds', 'constraints', 'rigid'):\n for pot in config.get_pot_by_class(constraintClass):\n atomi, atomj = config.atoms[pot.atoms[0]], config.atoms[pot.atoms[1]]\n rij = atomj.pos - atomi.pos\n modRijSq = np.dot(rij, rij)\n\n for trialAtom in potentialConflicts:\n riPt = trialAtom.pos - atomi.pos\n dot = np.dot(riPt, rij)\n if 0.0 < dot < modRijSq and np.dot(riPt, riPt) - dot**2/modRijSq < radiusSq:\n # delete molecule!\n self._del_config(trialAtom.molecule)\n\n\n\n def handle_cell(self, line):\n ''' Read a cell line and set corresponding pbcs '''\n key, *args = line.split()\n if self.defined['cell']:\n raise ValueError('{} multiply defined in {}'.format(key.capitalize(), line))\n self.config.cell = np.zeros((3, 3))\n if len(args) == 1: # Fill diagonal\n for i in range(3):\n self.config.cell[i, i] = args[0]\n self.config.pbc = 1\n elif len(args) == 3: # Assume diagonal\n for i in range(3):\n self.config.cell[i, i] = args[i]\n self.config.pbc = 2\n elif len(args) == 9: # Full matrix\n self.config.cell = np.asarray(args).reshape((3, 3))\n self.config.pbc = 3\n else:\n raise IOError('Cannot handle cell line: {}'.format(line))\n\n def handle_potential_block(self, source):\n ''' Read a potential block into the field '''\n for line in source:\n if line.lower() == 'end potential':\n break\n line = parse_line(line)\n potClass, nPots = line.split()\n nPots = int(nPots)\n self.field._read_block(source, potClass, nPots)\n else:\n raise IOError('Unended potential block')\n\n\ndef build(source):\n system = System()\n for line in source:\n line = parse_line(line).lower()\n if not line:\n continue\n key, *args = line.split()\n\n if key == 'structure':\n system.handle_structure(source)\n elif key == 'cell':\n system.handle_cell(line)\n elif key == 'potential':\n system.handle_potential_block(source)\n\n return system\n" ]
[ [ "numpy.dot", "numpy.asarray", "numpy.zeros" ] ]
SkanderGar/QuantMacro
[ "329eae290a34ca8cb794d4bbf05ecc2ae4ede8cd", "329eae290a34ca8cb794d4bbf05ecc2ae4ede8cd" ]
[ "Pset5_hand_in/Agent1_bis1.py", "Pset3_submit/Ex2/Q2_Tvio.py" ]
[ "import numpy as np\r\nfrom scipy.stats import norm\r\n\r\nfrom numpy import vectorize\r\n\r\n@vectorize\r\ndef U1(C, C_):\r\n if C <= 0:\r\n U = -np.inf\r\n else:\r\n U = -(1/2)*(C-C_)**2\r\n return U\r\n\r\n@vectorize\r\ndef U2(C, S):\r\n if C <= 0:\r\n U = -np.inf\r\n else:\r\n U = (C**(1-S) -1)/(1-S)\r\n return U\r\n\r\n@vectorize\r\ndef Up(C, S, C_, U2):\r\n if C<=0:\r\n Upr = np.inf\r\n else:\r\n if U2 == 1:\r\n Upr = C**(-S)\r\n else:\r\n Upr = -(C-C_)\r\n \r\n \r\n return Upr\r\n\r\n@vectorize\r\ndef Up_1(Mat, S, C_, U2):\r\n if U2 == 1:\r\n Inv = Mat**(-1/S)\r\n else:\r\n Inv = -Mat + C_ \r\n return Inv\r\n#@vectorize\r\n#def E_Up_Cp(Mesh_Cp, Tr, beta, r, S, C_, U2):\r\n \r\nclass agent1:\r\n \r\n def __init__(self, N_a, T, a0 = 0, N_s=2, rho = 0.06, Sig = 5, C_ = 100, r = 0.04, B=1, U2 = 1):\r\n self.beta = 1/(1+rho)\r\n self.Sig = Sig\r\n self.C_ = C_\r\n self.r = r\r\n self.U2 = 1\r\n self.N_s = N_s\r\n self.N_a = N_a \r\n self.T = T\r\n self.B = B\r\n self.a0 = a0\r\n \r\n def markov_Tr(self, N_s, Mu_y = 1, Sig_y = 0.5, gamma=0.7, m=1):\r\n rho = gamma\r\n Sig_eps = Sig_y*((1 -rho**2)**(1/2))\r\n max_y = Mu_y + m*Sig_y\r\n min_y = Mu_y - m*Sig_y\r\n Y_grid = np.linspace(min_y, max_y, N_s)\r\n Mu = Mu_y*(1-rho) \r\n w = np.abs(max_y-min_y)/(N_s-1)\r\n Tr = np.zeros((N_s,N_s))\r\n for i in range(N_s):\r\n for j in range(1,N_s-1):\r\n Tr[i,j] = norm.cdf((Y_grid[j] - Mu -rho*Y_grid[i] + w/2)/Sig_eps ) - norm.cdf((Y_grid[j] - Mu -rho*Y_grid[i]-w/2)/Sig_eps ) \r\n Tr[i,0] = norm.cdf((Y_grid[0] - Mu -rho*Y_grid[i]+w/2)/Sig_eps )\r\n Tr[i,N_s-1] = 1 - norm.cdf((Y_grid[N_s-1] - Mu -rho*Y_grid[i]-w/2)/Sig_eps)\r\n return Tr, Y_grid\r\n \r\n def exp_Up(self, Cp):\r\n E_Up_Cp = self.Tr@Up(Cp, self.Sig, self.C_, self.U2)\r\n return E_Up_Cp\r\n \r\n def update_c(self, Cp):\r\n E_Up_Cp = self.exp_Up(Cp)\r\n new_C = Up_1(self.beta*(1+self.r)*E_Up_Cp, self.Sig, self.C_, self.U2)\r\n return new_C\r\n \r\n def problem(self):\r\n Tr, Y_grid = self.markov_Tr(self.N_s)\r\n self.Tr = Tr\r\n self.Y_grid = Y_grid\r\n if self.B == 1:\r\n A_T = Y_grid[0]*(1/(1+self.r))\r\n else:\r\n A_T = 0\r\n \r\n ####need endog grid\r\n \r\n #max_a = Y_grid[-1]*(1/(1+self.r))\r\n max_a = self.a0*(1+self.r)**self.T + (1-(1+self.r)**self.T)/(1-(1+self.r))\r\n min_a = -A_T\r\n grid_a = np.linspace(min_a, max_a, self.N_a) \r\n Mesh_a = np.tile(grid_a, (len(Y_grid),1))\r\n Mesh_y = np.tile(Y_grid, (len(grid_a),1)).T\r\n \r\n ####### last period\r\n C_store = []\r\n a_store = []\r\n C_T = (1+self.r)*Mesh_a + Mesh_y\r\n a_store.append(Mesh_a)\r\n C_store.append(C_T)\r\n a_T = Mesh_a.copy()\r\n for i in range(self.T):\r\n if self.B == 1:\r\n A_T = -Y_grid[0]*(1/(1+self.r))**(i)\r\n else:\r\n A_T = 0\r\n max_a = self.a0*(1+self.r)**(self.T-(i+1)) + (1-(1+self.r)**(self.T-(i+1)))/(1-(1+self.r))\r\n C_T_1 = self.update_c(C_T)\r\n a_T_1 = (C_T_1 + a_T-Mesh_y)/(1+self.r)\r\n ax1 = (a_T_1 <= -A_T) ###endogenous grid method\r\n ax2 = (a_T_1 >= max_a) #want to make sure in period 0 people consume what they have\r\n ### getting consumption by inverting the U can give me savings that don't\r\n ## satisfy my constraint hence ax is the set of saving that are under \r\n ##my lower bound\r\n a_T_1[ax1] = -A_T\r\n a_T_1[ax2] = max_a\r\n # updated consumption if binding\r\n C_T_1 = a_T_1*(1+self.r) - a_T + Mesh_y\r\n \r\n C_store.append(C_T_1)\r\n a_store.append(a_T_1)\r\n \r\n C_T = C_T_1\r\n a_T = a_T_1\r\n \r\n return C_store, a_store\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n ", "import numpy as np\r\nimport os\r\nos.chdir('C:/Users/DELL/Desktop/Quant_macro/PSET3')\r\nimport Agent_Tvio as A\r\nfrom numba import jit\r\nfrom Agent_Tvio import U\r\n\r\n################## parameters ##############\r\ni = 400\r\nn=100\r\n\r\nn_i = 100\r\na_max = 1\r\na_min = -1\r\nh_max = 1\r\nh_min = 0\r\n\r\npara = {}\r\npara['beta']=0.99\r\npara['eta_y']=[1, 1.5, 2.5, 3]\r\npara['eps_y']=0.05\r\npara['tho']=0\r\npara['T1'] = 0\r\npara['T2'] = 0\r\n##################\r\n\r\ny_0 = np.linspace(0.001, 0.009, 100)\r\nfor idx, y in enumerate(y_0):\r\n if y>=0.0055 and y<=0.0087:\r\n y_0[idx]=0.001 \r\ny_0 = np.hstack((y_0,y_0,y_0,y_0))\r\nOne = np.ones(n)\r\neta = para['eta_y'][0]*One\r\nfor idx in range(1,len(para['eta_y'])):\r\n eta = np.hstack((eta,para['eta_y'][idx]*One))\r\n \r\nstate = {}\r\nstate['y']=y_0\r\nstate['eta']=eta\r\n########### compute distribution nuber of people \r\nstate['dist'] = 1/400 *np.ones(i)\r\n\r\n#########################\r\n\r\ngrid_a = np.linspace(a_min, a_max, n_i)\r\ngrid_h = np.linspace(h_min, h_max, n_i)\r\nMa = np.tile(grid_a,(n_i,1)).T\r\nMh = np.tile(grid_h,(n_i,1)).T\r\nMhp = Mh.T \r\n\r\nr_int = np.linspace(-0.1, 0.5,10)\r\n\r\n@jit\r\ndef get_GE(Agents,Ma,Mh,Mhp):\r\n maximum = []\r\n maxi_pos = []\r\n h_ex_p = []\r\n Taxes = []\r\n for agent in Agents:\r\n maxi, max_idx, h_ex_post, Ts = agent.find(Ma,Mh,Mhp)\r\n maximum.append(maxi)\r\n maxi_pos.append(max_idx)\r\n h_ex_p.append(h_ex_post)\r\n Taxes.append(Ts)\r\n return maximum, maxi_pos, h_ex_p, Taxes\r\n\r\nTol = 1\r\nj = 1\r\nstore_m_c = []\r\nfor r in r_int:\r\n print('Iteration:',j)\r\n Agents = [A.Agent(state['y'][i], state['eta'][i], para['eps_y'], r) for i in range(len(state['eta']))]\r\n maximum, maxi_pos, h_ex_p, Taxes = get_GE(Agents,Ma,Mh,Mhp)\r\n asset = []\r\n for pos in maxi_pos:\r\n a = grid_a[pos[0]]\r\n asset.append(a) \r\n res = asset@state['dist']\r\n store_m_c.append([r,res])\r\n print(res)\r\n if np.abs(res) < Tol:\r\n Tol = np.abs(res)\r\n store_a = asset\r\n store_pos = maxi_pos\r\n store_r = r\r\n store_res = res\r\n store_h_ex_p = h_ex_p\r\n store_taxes = Taxes \r\n j = j+1\r\n\r\nstore_m_c = np.array(store_m_c)\r\n\r\nstore_h = []\r\nstore_h_p = []\r\nstore_h_p_bad = []\r\nstore_h_p_good = []\r\nfor idx, pos in enumerate(store_pos):\r\n store_h.append(grid_h[pos[0]])\r\n store_h_p.append(grid_h[pos[1]])\r\n store_h_p_bad.append(grid_h[store_h_ex_p[idx][0]])\r\n store_h_p_good.append(grid_h[store_h_ex_p[idx][1]])\r\n\r\nstore_a = np.array(store_a)\r\nstore_h = np.array(store_h)\r\nstore_h_p = np.array(store_h_p)\r\nstore_h_p_bad = np.array(store_h_p_bad)\r\nstore_h_p_good = np.array(store_h_p_good)\r\nstore_taxes = np.array(store_taxes)\r\n\r\nstore_c1 = (1-para['tho'])*state['eta']*store_h + state['y'] + para['T1'] - store_a\r\nstore_c2_pos = (1-para['tho'])*(state['eta']+para['eps_y'])*store_h_p + (1+store_r)*store_a + para['T2']\r\nstore_c2_neg = (1-para['tho'])*(state['eta']-para['eps_y'])*store_h_p + (1+store_r)*store_a + para['T2']\r\nstore_S_r = store_a/((state['y']+state['eta'])*store_h*(1-para['tho']))\r\nstore_ls = store_h*state['eta']*(1-para['tho'])/(store_h*state['eta']*(1-para['tho'])+state['y']+para['T1'])\r\nstore_ls_fu_pos = store_h_p_good*(state['eta']+para['eps_y'])*(1-para['tho'])/(store_h_p_good*(state['eta']+para['eps_y'])*(1-para['tho'])+(1+store_r)*store_a+para['T2'])\r\nstore_ls_fu_neg = store_h_p_bad*(state['eta']-para['eps_y'])*(1-para['tho'])/(store_h_p_bad*(state['eta']-para['eps_y'])*(1-para['tho'])+(1+store_r)*store_a+para['T2'])\r\nstore_E_gc = (0.5*store_c2_pos + store_c2_neg*0.5 - store_c1)/store_c1\r\nstore_E_gwh = (0.5*store_h_p_good*(state['eta']+para['eps_y'])+0.5*store_h_p_bad*(state['eta']-para['eps_y'])-state['eta']*store_h)/state['eta']*store_h\r\nstore_elas = store_E_gc/store_E_gwh\r\ngc_pos = (store_c2_pos-store_c1)/store_c1\r\ngc_neg = (store_c2_neg-store_c1)/store_c1\r\ngwh_pos = (store_h_p_good*(state['eta']+para['eps_y'])-store_h*state['eta'])/store_h*state['eta']\r\ngwh_neg = (store_h_p_bad*(state['eta']+para['eps_y'])-store_h*state['eta'])/store_h*state['eta']\r\nelas_ratio_pos = (gc_pos/gwh_pos)/(store_E_gc/store_E_gwh)\r\nelas_ratio_neg = (gc_neg/gwh_neg)/(store_E_gc/store_E_gwh)\r\n\r\n\r\nAv_T1 = store_taxes[:,0]@state['dist']\r\nAv_T2 = (0.5*store_taxes[:,1]+0.5*store_taxes[:,2])@state['dist']\r\nV = []\r\nfor agent in Agents:\r\n v1 = U(store_c1+Av_T1, store_h, agent.sigma, agent.kappa, agent.nu) \r\n v2 = agent.beta*(0.5*U(store_c2_pos+Av_T2, store_h_p, agent.sigma, agent.kappa, agent.nu)+ \r\n 0.5*U(store_c2_neg+Av_T2, store_h_p, agent.sigma, agent.kappa, agent.nu))\r\n V.append(v1+v2)\r\n\r\n\r\ndata = np.vstack((store_c1, store_c2_pos, store_c2_neg, store_a, store_h, store_h_p,\r\n state['dist'], state['y'], store_S_r, store_ls, store_h_p_bad,\r\n store_h_p_good, store_ls_fu_pos, store_ls_fu_neg, store_E_gc,\r\n store_E_gwh, store_elas, elas_ratio_pos, elas_ratio_neg, state['eta']*store_h,\r\n V)).T\r\n \r\nfinal_para = {}\r\nfinal_para['interest'] = store_r\r\nfinal_para['tho'] = para['tho']\r\nfinal_para['T1'] = para['T1']\r\nfinal_para['T2'] = para['T2']\r\nfinal_para['residual of assets market'] = store_res\r\nfinal_para['eta_y'] = para['eta_y']\r\nnames = list(final_para)\r\n\r\n\r\nfile = open('parameters.txt','w')\r\nfor name in names:\r\n file.write(f'{name}:{final_para[name]} \\n')\r\nfile.close()\r\n\r\nnp.savetxt('variables_Tvio.txt',data)\r\nnp.savetxt('Market_Tvio.txt',store_m_c)\r\n" ]
[ [ "numpy.linspace", "numpy.abs", "scipy.stats.norm.cdf", "numpy.zeros" ], [ "numpy.array", "numpy.savetxt", "numpy.ones", "numpy.tile", "numpy.abs", "numpy.hstack", "numpy.linspace", "numpy.vstack" ] ]
charlesblakemore/opt_lev_analysis
[ "704f174e9860907de349688ed82b5812bbb07c2d" ]
[ "scripts/mod_grav/process_to_aggdat_copy.py" ]
[ "import sys, re, os\n\nimport dill as pickle\n\nimport numpy as np\nimport pandas as pd\n\nimport scipy.interpolate as interpolate\n\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nplt.rcParams.update({'font.size': 14})\n\nimport grav_util_3 as gu\nimport bead_util as bu\nimport configuration as config\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\nncore = 30\n# ncore = 20\n# ncore = 10\n# ncore = 1\n\n\ntheory_base = '/home/cblakemore/opt_lev_analysis/gravity_sim/results/'\ntheory_data_dir = os.path.join(theory_base, '7_6um-gbead_1um-unit-cells_master/')\n\n\n# data_dirs = ['/data/old_trap/20180625/bead1/grav_data/shield/X50-75um_Z15-25um_17Hz', \\\n# '/data/old_trap/20180625/bead1/grav_data/shield/X50-75um_Z15-25um_17Hz_elec-term', \\\n# #\\\n# '/data/old_trap/20180704/bead1/grav_data/shield', \\\n# '/data/old_trap/20180704/bead1/grav_data/shield_1s_1h', \\\n# #'/data/old_trap/20180704/bead1/grav_data/shield2', \\\n# #'/data/old_trap/20180704/bead1/grav_data/shield3', \\\n# #'/data/old_trap/20180704/bead1/grav_data/shield4', \\\n# #'/data/old_trap/20180704/no_bead/grav_data/shield', \\\n# #\\\n# #'/data/old_trap/20180808/bead4/grav_data/shield1'\n# ]\n\n\n\n# data_dirs = ['/data/new_trap/20191204/Bead1/Shaking/Shaking370/']\n# data_dirs = ['/data/new_trap/20200107/Bead3/Shaking/Shaking380/']\n# data_dirs = ['/data/new_trap/20200113/Bead1/Shaking/Shaking377/']\n# data_dirs = [#'/data/new_trap/20200210/Bead2/Shaking/Shaking382/', \\\n# '/data/new_trap/20200210/Bead2/Shaking/Shaking384/']\n\narg1 = str(sys.argv[1])\narg2 = str(sys.argv[2])\narg3 = int(sys.argv[3])\n\n# data_dirs = ['/data/new_trap/20200320/Bead1/Shaking/Shaking373/']\n# data_dirs = ['/data/new_trap/20200320/Bead1/Shaking/Shaking378/']\n# data_dirs = ['/data/new_trap_processed/mockfiles/20200320/output/noise/Batch3/{:s}/'.format(arg)]\n# data_dirs = ['/data/new_trap_processed/mockfiles/20200320/output/noise/SBiN_2a/{:s}/'.format(arg)]\n# data_dirs = ['/data/new_trap_processed/mockfiles/20200320/output/noise/bkg_simple/{:s}/'.format(arg)]\n# data_dirs = ['/data/new_trap_processed/mockfiles/20200320/output/noise/StBiN/{:s}/'.format(arg1)]\ndata_dirs = ['/data/new_trap_processed/mockfiles/20200320/output/noise/StBiN3/{:s}/'.format(arg1)]\n# data_dirs = ['/data/new_trap_processed/mockfiles/20200320/output/noise/StBiN3/{:s}/'.format(arg1)]\n# data_dirs = ['/data/new_trap_processed/mockfiles/20200320/raw/noise/']\n# data_dirs = ['/data/new_trap_processed/mockfiles/20200320/output/noise/chas_tests/77/']\nnew_trap = True\n\n\nsignal_injection_path = ''\n# signal_injection_path = '/home/cblakemore/tmp/signal_injection_batch3_discovery_unc_3.p'\n# signal_injection_path = '/home/cblakemore/tmp/signal_injection_batch3_no-sig_discovery_3.p'\n# signal_injection_path = '/home/cblakemore/tmp/signal_injection_batch3_conservative_3.p'\n# signal_injection_path = '/home/cblakemore/tmp/signal_injection_sbin_2a_discovery.p'\n# signal_injection_path = '/home/cblakemore/tmp/signal_injection_bkg_simple_discovery.p'\n# signal_injection_path = '/home/cblakemore/tmp/signal_injection_stbin2_discovery.p'\n# signal_injection_path = '/home/cblakemore/tmp/signal_injection_stbin3_discovery.p'\ntry:\n signal_injection_results = pickle.load(open(signal_injection_path, 'rb'))\nexcept:\n signal_injection_results = {}\n\ninj_key = arg1\n\n\n# binning_result_path = ''\n# binning_result_path = '/home/cblakemore/tmp/20200320_mod_grav_rand3_binning_2.p'\n# binning_result_path = '/home/cblakemore/tmp/20200320_mod_grav_far_rand1_binning.p'\n# binning_result_path = '/home/cblakemore/tmp/signal_injection_stbin3_{:s}_binning.p'.format(arg1)\nbinning_result_path = '/home/cblakemore/tmp/signal_injection_stbin3_{:s}_rand{:d}_binning.p'\\\n .format(arg1, arg3)\ntry:\n binning_results = pickle.load(open(binning_result_path, 'rb'))\nexcept:\n binning_results = {}\n\nbin_key = arg2\n\n\n# step_cal_drive_freq = 41.0\nstep_cal_drive_freq = 71.0\n\n# pardirs_in_name = 1\npardirs_in_name = 2\n\n# substr = ''\n# substr = 'Noise_add_3'\n# substr = 'NoShaking_1'\nsubstr = 'Noise_batch'\n# substr = 'Shaking0' # for 20200210/.../...382/\n# substr = 'Shaking3' # for 20200210/.../...384/ and 20200320/.../...378\n# substr = 'Shaking4' # for 20200320/.../...373\n\nuser_load_ext = '_discovery'\n# user_load_ext = '_no-discovery'\n\n# user_save_ext = '_discovery'\n# user_save_ext = '_no-discovery'\n# user_save_ext = '_no-discovery_sign-sum'\n# user_save_ext = '_no-discovery_binning-{:s}'.format(arg2)\nuser_save_ext = '_no-discovery_rand{:d}_binning-{:s}'.format(arg3, arg2)\n# user_save_ext = '_no-discovery-conservative'\n# user_save_ext = '_TEST'\n\n# Nfiles = 5\n# Nfiles = 50\nNfiles = 1000\n# Nfiles = 5000\n# Nfiles = 5500 # for far 20200320 dataset\n# Nfiles = 16000\n# Nfiles = 10000\n\nsuppress_off_diag = True\n\n# reprocess = True\n# save = True\nreprocess = False\nsave = False\n\n# redo_alpha_fit = True\nredo_likelihood_sum = True\nredo_alpha_fit = False\n# redo_likelihood_sum = False\n\nnalpha = 1001\n# file_chunking = 5500\n# file_chunking = 10000\nfile_chunking = int(arg2)\nshuffle_in_time = True\nif arg3 == 1:\n shuffle_seed = 123456 # rand1\nelif arg3 == 2:\n shuffle_seed = 7654321 # rand2\nelif arg3 == 3:\n shuffle_seed = 1029384756 # rand3\nelse:\n shuffle_seed = 999999\nfreq_pairing = 1\n# freq_pairing = 8\n# freq_pairing = 15\nno_discovery = True\nsum_by_sign = False\nconfidence_level = 0.95\n\nplot_harms = False\nplot_templates = False\nplot_basis = False\nplot_alpha_xyz = False\nplot_bad_alphas = False\n\nplot_mle_vs_time = True\nmle_vs_time_chunk_size = 50\nzoom_limits = ()\n# zoom_limits = (6.0, 6.5)\nplot_freqs = [6.0, 12.0, 33.0, 36.0]\nplot_alpha = 1.0\n\nplot_chunked_mle_vs_time = True\nplot_mle_histograms = False\nplot_likelihood_ratio_histograms = False\nplot_harmonic_likelihoods = True\nplot_final_likelihood = True\nplot_limit = True\n\nlambdas_to_plot = [10.0e-6]\n# lambdas_to_plot = [5.0e-6, 10.0e-6]\n# lambdas_to_plot = [5.0e-6, 10.0e-6, 12.0e-6, 18.0e-6, 31.0e-6]\n\nlimit_xlim = (5.0e-7, 1e-3)\nlimit_ylim = (5e6, 1e14)\n\nsave_hists = False\n\n### Position of bead relative to the attractor coordinate system\np0_bead_dict = {'20200320': [392.0, 199.7, 42.37]}\n\n# harms = [6]\n# harms = [3,4,5,6]\nharms = [2,4,6,7,10,11,12,13]\n# harms = [2,3,4,5,6,7,8,9,10,11,12,13]\n# harms = [2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]\n# harms = [2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]\n# harms = [2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,21,22,23,24,25,26,27,28,29,30] # no 60 Hz\n# harms = [2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,18,19,22,23,24,25,26,27,28,29,30] # no 51/60/63 Hz\n# harms = [2,3,4,5,6,7,8,9,10]\n# harms = []\n\n\nfake_attractor_data = True\nfake_attractor_data_freq = 3.0\nfake_attractor_data_amp = 0.5*202.11\nfake_attractor_data_dc = 194.92\nfake_attractor_data_axis = 1\n\nfix_sep = True\n# fix_sep_val = 11.1\nfix_sep_val = 13.9\n# fix_sep_val = 19.9\n\nfix_height = True\nfix_height_val = -15.23\n\n\n\nadd_fake_data = False\nfake_alpha = 5.0e10\n\n\n\n######################################################################\n######################################################################\nif no_discovery:\n ss = True\nelse:\n ss = False\n\n\nif plot_harms or plot_templates or plot_basis or plot_alpha_xyz:\n ncore = 1\n\n\n#opt_ext = 'TEST'\nopt_ext = '_harms'\nfor harm in harms:\n opt_ext += '-' + str(int(harm))\n\nopt_ext += '_first-{:d}'.format(Nfiles)\nif len(substr):\n opt_ext = '_{:s}{:s}'.format(substr, opt_ext)\n\nif len(user_save_ext):\n opt_ext += user_save_ext\n\n\nfor ddir in data_dirs:\n # Skip the ones I've already calculated\n #if ddir == data_dirs[0]:\n # continue\n print()\n\n aux_path_base = ddir.replace('/data/new_trap/', '/data/new_trap_processed/processed_files/')\n aux_path = os.path.join(aux_path_base, '{:s}_aux.pkl'.format(substr))\n try:\n aux_data = pickle.load( open(aux_path, 'rb') )\n except:\n print(\"Couldn't load auxiliary data file\")\n aux_data = []\n\n\n paths = gu.build_paths(ddir, opt_ext, pardirs_in_name=pardirs_in_name, new_trap=new_trap)\n agg_path = paths['agg_path']\n plot_dir = paths['plot_dir']\n p0_bead = p0_bead_dict[paths['date']]\n\n agg_load_path = agg_path.replace(user_save_ext, user_load_ext)\n\n print('----------------------------------')\n if reprocess:\n print('Loading files from:')\n print(' {:s}'.format(ddir))\n else:\n print('Loading aggregate data from:')\n print(' {:s}'.format(agg_load_path))\n\n print('----------------------------------')\n print('Will save to:')\n print(' {:s}'.format(agg_path))\n\n print('----------------------------------')\n print('Will save plots to:')\n print(' {:s}'.format(plot_dir))\n print('----------------------------------')\n print()\n\n if save:\n bu.make_all_pardirs(agg_path)\n\n\n if reprocess:\n\n datafiles, lengths = bu.find_all_fnames(ddir, ext=config.extensions['data'], \\\n substr=substr, sort_by_index=True, \\\n sort_time=False)\n datafiles = datafiles[:Nfiles]\n\n agg_dat = gu.AggregateData(datafiles, p0_bead=p0_bead, harms=harms, \\\n plot_harm_extraction=plot_harms, new_trap=new_trap, \\\n step_cal_drive_freq=71.0, ncore=ncore, noisebins=10, \\\n aux_data=aux_data, suppress_off_diag=suppress_off_diag, \\\n fake_attractor_data=fake_attractor_data, \\\n fake_attractor_data_amp=fake_attractor_data_amp, \\\n fake_attractor_data_dc=fake_attractor_data_dc, \\\n fake_attractor_data_freq=fake_attractor_data_freq, \\\n fake_attractor_data_axis=fake_attractor_data_axis)\n\n agg_dat.load_grav_funcs(theory_data_dir)\n\n if save:\n agg_dat.save(agg_path)\n\n agg_dat.bin_rough_stage_positions()\n #agg_dat.average_resp_by_coordinate()\n\n # agg_dat.plot_force_plane(resp=0, fig_ind=1, show=True)\n # agg_dat.plot_force_plane(resp=1, fig_ind=2, show=False)\n # agg_dat.plot_force_plane(resp=2, fig_ind=3, show=True)\n\n # agg_dat.find_alpha_xyz_from_templates(plot=plot_alpha_xyz, plot_basis=plot_basis, \\\n # ncore=ncore, plot_templates=plot_templates, \\\n # n_largest_harms=n_largest_harms, \\\n # # add_fake_data=True, fake_alpha=1e9,\\\n # )\n\n agg_dat.find_alpha_likelihoods_every_harm(plot=plot_alpha_xyz, plot_basis=plot_basis, \\\n ncore=ncore, plot_templates=plot_templates, \\\n add_fake_data=add_fake_data, \\\n fake_alpha=fake_alpha, fix_sep=fix_sep, \\\n fix_sep_val=fix_sep_val, fix_height=fix_height, \\\n fix_height_val=fix_height_val)\n\n if save:\n agg_dat.save(agg_path)\n\n agg_dat.sum_alpha_likelihoods(no_discovery=no_discovery, freq_pairing=freq_pairing, \\\n nalpha=nalpha, chunk_size=file_chunking, \\\n shuffle_in_time=shuffle_in_time, shuffle_seed=shuffle_seed, \\\n sum_by_sign=sum_by_sign)\n if save:\n agg_dat.save(agg_path)\n\n\n print('Plotting/saving MLE histograms and profile likelihoods...', end='')\n sys.stdout.flush()\n\n if plot_mle_vs_time:\n agg_dat.plot_mle_vs_time(show=False, save=True, plot_freqs=plot_freqs, basepath=plot_dir, \\\n plot_alpha=plot_alpha, chunk_size=mle_vs_time_chunk_size, \\\n zoom_limits=zoom_limits)\n\n if plot_chunked_mle_vs_time:\n agg_dat.plot_chunked_mle_vs_time(show=False, save=True, plot_freqs=plot_freqs, \\\n basepath=plot_dir, plot_alpha=plot_alpha)\n\n if plot_mle_histograms:\n agg_dat.plot_mle_histograms(show=False, save=True, bins=20, basepath=plot_dir)\n\n if plot_likelihood_ratio_histograms:\n for lambda_to_plot in lambdas_to_plot:\n agg_dat.plot_likelihood_ratio_histograms(show=False, save=True, basepath=plot_dir, \\\n yuklambda=lambda_to_plot)\n\n if plot_harmonic_likelihoods:\n for lambda_to_plot in lambdas_to_plot:\n agg_dat.plot_sum_likelihood_by_harm(show=False, save=True, basepath=plot_dir, \\\n include_limit=True, no_discovery=no_discovery, \\\n confidence_level=confidence_level, ss=ss, \\\n yuklambda=lambda_to_plot)\n\n if plot_final_likelihood:\n for lambda_to_plot in lambdas_to_plot:\n agg_dat.plot_sum_likelihood(show=False, save=True, basepath=plot_dir, \\\n include_limit=True, no_discovery=no_discovery, \\\n confidence_level=confidence_level, ss=ss, \\\n yuklambda=lambda_to_plot)\n\n if plot_limit:\n agg_dat.get_limit_from_likelihood_sum(confidence_level=confidence_level, \\\n no_discovery=no_discovery, ss=ss, \\\n xlim=limit_xlim, ylim=limit_ylim,\n show=False, save=True, basepath=plot_dir)\n print('Done!')\n\n\n # agg_dat.fit_alpha_xyz_vs_alldim()\n # agg_dat.fit_alpha_xyz_onepos_simple(resp=[2], verbose=False)\n\n if save:\n agg_dat.save(agg_path)\n\n\n\n\n else:\n agg_dat = gu.AggregateData([], p0_bead=p0_bead, harms=harms, new_trap=new_trap)\n agg_dat.load(agg_load_path)\n\n agg_dat.bin_rough_stage_positions()\n #agg_dat.average_resp_by_coordinate()\n\n if redo_alpha_fit: \n # agg_dat.find_alpha_xyz_from_templates(plot=plot_alpha_xyz, plot_basis=plot_basis, \\\n # ncore=ncore, plot_bad_alphas=plot_bad_alphas, \\\n # plot_templates=plot_templates, \\\n # n_largest_harms=n_largest_harms, \\\n # # add_fake_data=True, fake_alpha=1e9, \\\n # )\n\n agg_dat.find_alpha_likelihoods_every_harm(plot=plot_alpha_xyz, plot_basis=plot_basis, \\\n ncore=ncore, plot_templates=plot_templates, \\\n add_fake_data=add_fake_data, \\\n fake_alpha=fake_alpha, fix_sep=fix_sep, \\\n fix_sep_val=fix_sep_val, \\\n fix_height=fix_height, \\\n fix_height_val=fix_height_val)\n if save:\n agg_dat.save(agg_path)\n\n # agg_dat.gfuncs_class.reload_grav_funcs()\n # agg_dat.save(agg_path)\n\n if redo_likelihood_sum:\n agg_dat.sum_alpha_likelihoods(no_discovery=no_discovery, freq_pairing=freq_pairing, \\\n nalpha=nalpha, chunk_size=file_chunking, \\\n shuffle_in_time=shuffle_in_time, shuffle_seed=shuffle_seed, \\\n sum_by_sign=sum_by_sign)\n if save:\n agg_dat.save(agg_path)\n\n print('Plotting/saving MLE histograms and profile likelihoods...', end='')\n sys.stdout.flush()\n\n if plot_mle_vs_time:\n agg_dat.plot_mle_vs_time(show=False, save=True, plot_freqs=plot_freqs, basepath=plot_dir, \\\n plot_alpha=plot_alpha, chunk_size=mle_vs_time_chunk_size, \\\n zoom_limits=zoom_limits)\n \n if plot_chunked_mle_vs_time:\n agg_dat.plot_chunked_mle_vs_time(show=False, save=True, plot_freqs=plot_freqs, \\\n basepath=plot_dir, plot_alpha=plot_alpha)\n\n if plot_mle_histograms:\n agg_dat.plot_mle_histograms(show=False, save=True, bins=20, basepath=plot_dir)\n\n if plot_likelihood_ratio_histograms:\n for lambda_to_plot in lambdas_to_plot:\n agg_dat.plot_likelihood_ratio_histograms(show=False, save=True, basepath=plot_dir, \\\n yuklambda=lambda_to_plot)\n\n if plot_harmonic_likelihoods:\n for lambda_to_plot in lambdas_to_plot:\n agg_dat.plot_sum_likelihood_by_harm(show=False, save=True, basepath=plot_dir, \\\n include_limit=True, no_discovery=no_discovery, \\\n confidence_level=confidence_level, ss=ss, \\\n yuklambda=lambda_to_plot)\n\n if plot_final_likelihood:\n for lambda_to_plot in lambdas_to_plot:\n agg_dat.plot_sum_likelihood(show=False, save=True, basepath=plot_dir, \\\n include_limit=True, no_discovery=no_discovery, \\\n confidence_level=confidence_level, ss=ss, \\\n yuklambda=lambda_to_plot)\n if plot_limit:\n agg_dat.get_limit_from_likelihood_sum(confidence_level=confidence_level, \\\n no_discovery=no_discovery, ss=ss, \\\n xlim=limit_xlim, ylim=limit_ylim,\n show=False, save=True, basepath=plot_dir)\n print('Done!')\n\n if save:\n agg_dat.save(agg_path)\n\n # agg_dat.fit_alpha_xyz_onepos_simple(resp=[2], verbose=False)\n\n #agg_dat.plot_force_plane(resp=0, fig_ind=1, show=False)\n #agg_dat.plot_force_plane(resp=1, fig_ind=2, show=False)\n #agg_dat.plot_force_plane(resp=2, fig_ind=3, show=True)\n\n # agg_dat.find_alpha_xyz_from_templates(plot=plot_alpha_xyz, plot_basis=plot_basis, \\\n # ncore=ncore)\n # agg_dat.plot_alpha_xyz_dict(k=0)\n # agg_dat.plot_alpha_xyz_dict(k=1)\n # agg_dat.plot_alpha_xyz_dict(k=2)\n # agg_dat.plot_alpha_xyz_dict(lambind=10)\n # agg_dat.plot_alpha_xyz_dict(lambind=50)\n\n\n\n # sample_lambdas = np.array([5.0e-6, 10.0e-6, 25.0e-6])\n\n if len(signal_injection_path) or len(binning_result_path):\n\n obj = agg_dat.agg_dict[list(agg_dat.agg_dict.keys())[0]][agg_dat.ax0vec[0]][agg_dat.ax1vec[0]][0]\n freqs = np.fft.rfftfreq(obj.nsamp, d=1.0/obj.fsamp)[obj.ginds]\n\n sample_lambdas = np.array([10.0e-6])\n # sample_lambdas = np.array([5.0, 10.0, 12.0, 18.0, 20.0, 25.0, 31.0]) * 1e-6\n\n mle_arr = np.zeros( (3,len(sample_lambdas),2) )\n mle_arr_2 = np.zeros( (3,len(sample_lambdas),len(freqs)) )\n\n limit_arr = np.zeros( (3,len(sample_lambdas),2) )\n limit_arr_2 = np.zeros( (3,len(sample_lambdas),len(freqs),2) )\n\n inds = []\n for yuklambda in sample_lambdas:\n inds.append( np.argmin( np.abs(yuklambda - agg_dat.pos_limit[0]) ) )\n inds = np.array(inds)\n\n for resp in [0,1,2]:\n\n func1 = interpolate.interp1d(np.log(agg_dat.pos_limit[0]), \\\n np.log(agg_dat.pos_limit[resp+1]) )\n sample_posalphas = np.exp(func1(np.log(sample_lambdas)))\n\n # out_arr[resp,0] = sample_posalphas\n limit_arr[resp,:,0] = agg_dat.pos_limit[resp+1][inds]\n\n func2 = interpolate.interp1d(np.log(agg_dat.neg_limit[0]), \\\n np.log(agg_dat.neg_limit[resp+1]) )\n sample_negalphas = np.exp(func2(np.log(sample_lambdas)))\n\n # out_arr[resp,1] = sample_negalphas\n limit_arr[resp,:,1] = agg_dat.neg_limit[resp+1][inds]\n\n\n mle_arr[resp,:,0] = agg_dat.mle[resp+1][inds]\n mle_arr[resp,:,1] = np.mean(agg_dat.mle_unc[resp,:,:][:,inds], axis=0)\n\n for freqind, freq in enumerate(freqs):\n harm_mles = agg_dat.mles_by_harmonic[freq]\n mle_arr_2[resp,:,freqind] = harm_mles[resp,inds,0]\n\n for i, ind in enumerate(inds):\n prof_alpha, prof_val = agg_dat.likelihoods_sum_by_harmonic[freq][resp,ind]\n limit = bu.get_limit_from_general_profile(prof_alpha, prof_val, ss=ss,\\\n no_discovery=no_discovery, \\\n confidence_level=confidence_level)\n \n limit_arr_2[resp,i,freqind,0] = limit['upper_unc']\n limit_arr_2[resp,i,freqind,1] = limit['lower_unc']\n\n if len(signal_injection_path):\n signal_injection_results['freqs'] = freqs\n signal_injection_results['sample_lambdas'] = sample_lambdas\n signal_injection_results['key'] = 'MLE_array axes: coord-axis, sampled-lambda, (0)mle(1)unc\\n'\\\n + 'MLE_by_harm axes: coord-axis, sampled-lambda, freq\\n'\\\n + 'Limit axes: coord-axis, sampled-lambda, (0)pos-limit(1)neg-limit'\n\n signal_injection_results[inj_key+'_limit'] = limit_arr\n signal_injection_results[inj_key+'_limit_by_harm'] = limit_arr_2\n signal_injection_results[inj_key+'_mle'] = mle_arr\n signal_injection_results[inj_key+'_mle_by_harm'] = mle_arr_2\n pickle.dump(signal_injection_results, open(signal_injection_path, 'wb'))\n\n if len(binning_result_path):\n binning_results['freqs'] = freqs\n binning_results['sample_lambdas'] = sample_lambdas\n binning_results['key'] = 'MLE_array axes: coord-axis, sampled-lambda, (0)mle(1)unc\\n'\\\n + 'MLE_by_harm axes: coord-axis, sampled-lambda, freq\\n'\\\n + 'Limit axes: coord-axis, sampled-lambda, (0)pos-limit(1)neg-limit'\n\n binning_results[bin_key+'_limit'] = limit_arr\n binning_results[bin_key+'_limit_by_harm'] = limit_arr_2\n binning_results[bin_key+'_mle'] = mle_arr\n binning_results[bin_key+'_mle_by_harm'] = mle_arr_2\n pickle.dump(binning_results, open(binning_result_path, 'wb'))" ]
[ [ "matplotlib.pyplot.rcParams.update", "numpy.array", "numpy.log", "numpy.fft.rfftfreq", "numpy.mean", "numpy.abs" ] ]
Danbinabo/Mask_Rcnn
[ "a06b1d3c35fbc63e269b735729ecc6b2b84bf13e" ]
[ "pb_model_test.py" ]
[ "import tensorflow as tf\nimport cv2\nimport glob\nimport numpy as np\npb_path = 'landmark.pb' # pb模型\n\nsess = tf.Session()\nwith sess.as_default():\n with tf.gfile.FastGFile(pb_path, 'rb') as f:\n graph_def = sess.graph_def\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def,name='')\n\n# 测试图片\nim_list = glob.glob('images/*')\n\nlandmark = sess.graph.get_tensor_by_name('fully_connected_11/Relu:0')\nfor im_url in im_list:\n im_data = cv2.imread(im_url)\n im_data = cv2.resize(im_data,(128,128))\n pred = sess.run(landmark,{'Placeholder:0':np.expand_dims(im_data,0)}) # 图片给网络 -- 增加一维\n print(pred)\n # 反归一化\n pred = pred[0]\n for i in range(0,136,2):\n cv2.circle(im_data,(int(pred[i] * 128),int(pred[i+1] * 128)),2,(0,255,0),2)\n name = im_url.split('\\\\')[-1]\n cv2.imwrite('./test_result/%s' % name,im_data)\n cv2.imshow('11',im_data)\n cv2.waitKey(200)\n\n" ]
[ [ "tensorflow.Session", "tensorflow.import_graph_def", "tensorflow.gfile.FastGFile", "numpy.expand_dims" ] ]
houzeyu2683/VariationalAutoEncoder
[ "6774824d14a5b49de46fb7f8a6f9441eca6c77d5" ]
[ "script/initialization.py" ]
[ "\n##\n## The packages.\nimport pandas, os\nimport sklearn.model_selection\n\n\"\"\"\n影像資料儲存於資料夾,根據影像名稱以及路徑,建立資料表。\n\"\"\"\n\n##\n## Handle the link of image.\nroot = \"/media/houzeyu2683/120C3F2F0C3F0D6B/DataSetGroup/celebfacesattribute/\"\nfolder = os.path.join(root, \"jpg\")\ngroup = os.listdir(folder)\nlink = [os.path.join(folder, i) for i in group]\ntable = pandas.DataFrame({'image':group, \"link\":link, 'mode':\"undefine\"})\n\n##\n## Generate train, exam and test.\ntable = table.sample(len(table)).reset_index(drop=True).copy()\ntrain, other = sklearn.model_selection.train_test_split(table, test_size=0.2, random_state=0)\nexam, test = sklearn.model_selection.train_test_split(other, test_size=0.5, random_state=0)\ntrain['mode'] = \"train\"\nexam['mode'] = 'exam'\ntest['mode'] = 'test'\ntable = pandas.concat([train,exam, test])\n\n## \nrecord = os.path.join(root, 'csv')\nos.makedirs(record, exist_ok=True)\ntable.to_csv(os.path.join(record, 'index.csv'), index=False)\npass\n\n\n# train = pandas.read_csv(\"../#DATA#/SFDDD/CSV/ANNOTATION.csv\")\n# train['mode'] = 'train'\n# train['folder'] = \"../#DATA#/SFDDD/JPG/TRAIN/\"\n\n# test = pandas.DataFrame({\n# 'mode':'test', \n# \"img\":os.listdir(\"../#DATA#/SFDDD/JPG/TEST\"), \n# \"classname\":\"\", \n# 'folder':\"../#DATA#/SFDDD/JPG/TEST/\"\n# })\n\n# train['image'] = train['folder'] + train['classname'] + '/' + train['img']\n# test['image'] = test['folder'] + test['classname'] + test['img']\n\n# table = pandas.concat([train, test])\n# target = {\n# \"c0\":0,\n# \"c1\":1,\n# \"c2\":2,\n# \"c3\":3,\n# \"c4\":4,\n# \"c5\":5,\n# \"c6\":6,\n# \"c7\":7,\n# \"c8\":8,\n# \"c9\":9,\n# \"\":-1\n# }\n# table['target'] = table['classname'].replace(target)\n\n# path = \"SOURCE/CSV/ANNOTATION.csv\"\n# os.makedirs(os.path.dirname(path), exist_ok=True)\n# table.to_csv(path, index=False)\n\n\n# import matplotlib.pyplot as plt\n\n# from skimage.feature import hog\n# from skimage import data, exposure\n\n\n# image = data.astronaut()\n\n# fd, hog_image = hog(image, orientations=8, pixels_per_cell=(16, 16), cells_per_block=(1, 1), visualize=True)\n\n\n# fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharex=True, sharey=True)\n\n# ax1.axis('off')\n# ax1.imshow(image, cmap=plt.cm.gray)\n# ax1.set_title('Input image')\n\n# # Rescale histogram for better display\n# hog_image\n# hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 10))\n# PIL.Image.fromarray(hog_image).convert(\"RGB\").save(\"demo.png\")\n# ax2.axis('off')\n# ax2.imshow(hog_image_rescaled, cmap=plt.cm.gray)\n# ax2.set_title('Histogram of Oriented Gradients')\n# plt.show()" ]
[ [ "pandas.DataFrame", "pandas.concat" ] ]
larsmaaloee/BIVA
[ "e47201113d779c6ea1245875714101b2bbfcbdae" ]
[ "layers/_neural.py" ]
[ "\nimport tensorflow as tf\nimport warnings\n\n\ndef conv2d(x, dim=(32, [3, 3], [1, 1]), pad='SAME', scope=\"conv2d\", training=True, ema=None, init=False, bias_initializer=tf.constant_initializer(0.)):\n num_filters, filter_size, stride = dim\n with tf.variable_scope(scope):\n V = tf.get_variable('V', shape=list(filter_size) + [int(x.get_shape()[-1]), num_filters], dtype=tf.float32,\n initializer=tf.random_normal_initializer(0, 0.05), trainable=True)\n\n g = tf.get_variable('g', shape=[num_filters], dtype=tf.float32,\n initializer=tf.constant_initializer(1.), trainable=True)\n b = tf.get_variable('b', shape=[num_filters], dtype=tf.float32,\n initializer=bias_initializer, trainable=True)\n\n def maybe_avg(v):\n if ema is not None and not init:\n v = tf.cond(training, lambda: v, lambda: ema.average(v))\n return v\n\n if init:\n x = tf.nn.conv2d(x, tf.nn.l2_normalize(V.initialized_value(), [0, 1, 2]), [1] + list(stride) + [1], pad)\n\n init_scale=.01\n m_init, v_init = tf.nn.moments(x, [0,1,2])\n scale_init = init_scale / tf.sqrt(v_init + 1e-10)\n with tf.control_dependencies([g.assign(g * scale_init), b.assign_add(-m_init * scale_init)]):\n x = tf.reshape(scale_init, [1, 1, 1, num_filters]) * (x - tf.reshape(m_init, [1, 1, 1, num_filters]))\n else:\n V = maybe_avg(V)\n g = maybe_avg(g)\n b = maybe_avg(b)\n\n # use weight normalization (Salimans & Kingma, 2016)\n W = tf.reshape(g, [1, 1, 1, num_filters]) * tf.nn.l2_normalize(V, [0, 1, 2])\n\n # calculate convolutional layer output\n x = tf.nn.bias_add(tf.nn.conv2d(x, W, [1] + list(stride) + [1], pad), b)\n\n return x\n\ndef gated_resnet(x, aux, dim=(32, [3, 3], [1, 1]), activation=tf.nn.elu, scope=\"gated_resnet\", residual=True, dropout=.0, conv=conv2d, training=True, ema=None, init=False):\n out = conv(activation(x), [dim[0], dim[1], [1, 1]], scope=\"%s_conv_in\"%scope, training=training, ema=ema, init=init)\n in_shp = x.get_shape().as_list()\n assert in_shp[1] == in_shp[2]\n\n\n if aux is not None:\n aux_shp = aux.get_shape().as_list()\n\n assert aux_shp[1] == aux_shp[2]\n if aux_shp[1:-1] > in_shp[1:-1]:\n aux = conv(activation(aux), [dim[0], dim[1], [aux_shp[1] // in_shp[1], aux_shp[2] // in_shp[2]]],\n scope=\"%s_conv_downsample_aux\" % scope, training=training, ema=ema, init=init)\n elif aux_shp[1:-1] < in_shp[1:-1]:\n aux = deconv2d(activation(aux), [dim[0], dim[1], [in_shp[1] // aux_shp[1], in_shp[2] // aux_shp[2]]],\n scope=\"%s_conv_upsample_aux\" % scope, training=training, ema=ema, init=init)\n else:\n aux = nin(activation(aux), dim[0], training=training, ema=ema, init=init, scope=\"%s_conv_aux\" % scope)\n\n out += aux\n\n out = activation(out)\n\n if dropout > 0:\n out = tf.layers.dropout(out, rate=dropout, training=training)\n\n out = conv(out, [2*dim[0], dim[1], dim[2]], scope=\"%s_conv_out\"%scope, training=training, ema=ema, init=init)\n h_stack1, h_stack2 = tf.split(out, 2, 3)\n sigmoid_out = tf.sigmoid(h_stack2)\n out = (h_stack1 * sigmoid_out)\n\n out_shp = out.get_shape().as_list()\n if out_shp[1:-1] < in_shp[1:-1]:\n x = tf.nn.avg_pool(x, [1, dim[2][0], dim[2][1], 1], strides=[1, dim[2][0], dim[2][1], 1], padding='SAME')\n elif out_shp[1:-1] > in_shp[1:-1]:\n warnings.warn(\"The height and width of the output are larger than the input. There will be no residual connection.\")\n residual = False\n\n if out_shp[-1] > in_shp[-1]:\n x = tf.pad(x, [[0, 0], [0, 0], [0, 0], [0, int(dim[0] - in_shp[-1])]])\n elif out_shp[-1] < in_shp[-1]:\n warnings.warn(\"The input has more feature maps than the output. There will be no residual connection.\")\n residual = False\n\n if residual:\n out += x\n\n return out\n\n\ndef deconv2d(x, dim=(32, [3, 3], [1, 1]), pad='SAME', scope=\"deconv2d\", training=True, ema=None, init=False, bias_initializer=tf.constant_initializer(0.)):\n num_filters, filter_size, stride = dim\n\n xs = x.get_shape().as_list()\n if pad=='SAME':\n target_shape = [tf.shape(x)[0], xs[1]*stride[0], xs[2]*stride[1], num_filters]\n else:\n target_shape = [tf.shape(x)[0], xs[1]*stride[0] + filter_size[0]-1, xs[2]*stride[1] + filter_size[1]-1, num_filters]\n\n with tf.variable_scope(scope):\n V = tf.get_variable(\"V\", shape=list(filter_size) + [num_filters, int(x.get_shape()[-1])], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.05), trainable=True)\n g = tf.get_variable(\"g\", shape=[num_filters], dtype=tf.float32, initializer=tf.constant_initializer(1.), trainable=True)\n b = tf.get_variable(\"b\", shape=[num_filters], dtype=tf.float32, initializer=bias_initializer, trainable=True)\n\n def maybe_avg(v):\n if ema is not None and not init:\n v = tf.cond(training, lambda: v, lambda: ema.average(v))\n return v\n\n if init:\n x = tf.nn.conv2d_transpose(x, tf.nn.l2_normalize(V.initialized_value(), [0, 1, 3]), target_shape, [1] + list(stride) + [1], padding=pad)\n\n init_scale = .01\n m_init, v_init = tf.nn.moments(x, [0, 1, 2])\n scale_init = init_scale / tf.sqrt(v_init + 1e-10)\n with tf.control_dependencies([g.assign(g * scale_init), b.assign_add(-m_init * scale_init)]):\n x = tf.reshape(scale_init, [1, 1, 1, num_filters]) * (x - tf.reshape(m_init, [1, 1, 1, num_filters]))\n\n else:\n V = maybe_avg(V)\n g = maybe_avg(g)\n b = maybe_avg(b)\n\n W = tf.reshape(g, [1, 1, num_filters, 1]) * tf.nn.l2_normalize(V, [0, 1, 3])\n # calculate convolutional layer output\n x = tf.nn.conv2d_transpose(x, W, target_shape, [1] + list(stride) + [1], padding=pad)\n x = tf.nn.bias_add(x, b)\n\n return x\n\n\ndef transposed_gated_resnet(x, aux, dim=(32, [3, 3], [1, 1]), activation=tf.nn.elu, scope=\"transposed_gated_resnet\", residual=True, dropout=.0, conv=conv2d, training=True, ema=None, init=False):\n out = conv(activation(x), [dim[0], dim[1], [1, 1]], scope=\"%s_conv_in\" % scope, training=training, ema=ema, init=init)\n in_shp = x.get_shape().as_list()\n assert in_shp[1] == in_shp[2]\n\n if aux is not None:\n aux_shp = aux.get_shape().as_list()\n\n assert aux_shp[1] == aux_shp[2]\n\n if aux_shp[1:-1] > in_shp[1:-1]:\n aux = conv(activation(aux), [dim[0], dim[1], [aux_shp[1] // in_shp[1], aux_shp[2] // in_shp[2]]],\n scope=\"%s_conv_downsample_aux\" % scope, training=training, ema=ema, init=init)\n elif aux_shp[1:-1] < in_shp[1:-1]:\n aux = deconv2d(activation(aux), [dim[0], dim[1], [in_shp[1] // aux_shp[1], in_shp[2] // aux_shp[2]]],\n scope=\"%s_conv_upsample_aux\" % scope, training=training, ema=ema, init=init)\n else:\n aux = nin(activation(aux), dim[0], training=training, ema=ema, init=init, scope=\"%s_conv_aux\" % scope)\n\n out += aux\n\n out = activation(out)\n\n if dropout > 0:\n out = tf.layers.dropout(out, rate=dropout, training=training)\n\n if sum(dim[2]) > 2:\n out = deconv2d(out, [2*dim[0], dim[1], dim[2]], scope=\"%s_conv_out\"%scope, training=training, ema=ema, init=init)\n else:\n out = conv2d(out, [2*dim[0], dim[1], dim[2]], scope=\"%s_conv_out\"%scope, training=training, ema=ema, init=init)\n\n h_stack1, h_stack2 = tf.split(out, 2, 3)\n sigmoid_out = tf.sigmoid(h_stack2)\n out = (h_stack1 * sigmoid_out)\n\n out_shp = out.get_shape().as_list()\n if out_shp[1:-1] < in_shp[1:-1]:\n x = tf.nn.avg_pool(x, [1, dim[2][0], dim[2][1], 1], strides=[1, dim[2][0], dim[2][1], 1], padding='SAME')\n elif out_shp[1:-1] > in_shp[1:-1]:\n warnings.warn(\n \"The height and width of the output are larger than the input. There will be no residual connection.\")\n residual = False\n\n\n if out_shp[-1] > in_shp[-1]:\n x = tf.pad(x, [[0, 0], [0, 0], [0, 0], [0, int(dim[0] - in_shp[-1])]])\n\n elif out_shp[-1] < in_shp[-1]:\n warnings.warn(\"The input has more feature maps than the output. There will be no residual connection.\")\n residual = False\n\n if residual:\n out += x\n\n return out\n\n\ndef nin(x, num_units, **kwargs):\n s = tf.shape(x)\n sh = x.get_shape().as_list()\n x = tf.reshape(x, [tf.reduce_prod(s[:-1]), sh[-1]])\n x = dense(x, num_units, **kwargs)\n return tf.reshape(x, [-1] + sh[1:-1] + [num_units])\n\n\ndef dense(x, num_units, scope=\"dense\", training=True, ema=None, init=False, bias_initializer=tf.constant_initializer(0.)):\n with tf.variable_scope(scope):\n V = tf.get_variable('V', shape=[int(x.get_shape()[1]), num_units], dtype=tf.float32,\n initializer=tf.random_normal_initializer(0, 0.05), trainable=True)\n g = tf.get_variable('g', shape=[num_units], dtype=tf.float32,\n initializer=tf.constant_initializer(1.), trainable=True)\n b = tf.get_variable('b', shape=[num_units], dtype=tf.float32,\n initializer=bias_initializer, trainable=True)\n\n def maybe_avg(v):\n if ema is not None and not init:\n v = tf.cond(training, lambda: v, lambda: ema.average(v))\n return v\n\n if init:\n x = tf.matmul(x, tf.nn.l2_normalize(V.initialized_value(), 0))\n\n init_scale = .01\n m_init, v_init = tf.nn.moments(x, [0])\n scale_init = init_scale / tf.sqrt(v_init + 1e-10)\n with tf.control_dependencies([g.assign(g * scale_init), b.assign_add(-m_init * scale_init)]):\n x = tf.reshape(scale_init, [1, num_units]) * (x - tf.reshape(m_init, [1, num_units]))\n\n else:\n V = maybe_avg(V)\n g = maybe_avg(g)\n b = maybe_avg(b)\n\n x = tf.matmul(x, V)\n scaler = g / tf.sqrt(tf.reduce_sum(tf.square(V), [0]))\n x = tf.reshape(scaler, [1, num_units]) * x + tf.reshape(b, [1, num_units])\n\n\n return x\n\n\ndef sample_from_discretized_mix_logistic(l, nr_mix):\n \"\"\"\n This function is copied from https://github.com/openai/pixel-cnn/blob/master/pixel_cnn_pp/nn.py in reference to:\n See [Salimans et. al., 2017](https://arxiv.org/pdf/1701.05517)\n ([pdf](https://arxiv.org/pdf/1701.05517.pdf))\n\n log-likelihood for mixture of discretized logistics, assumes the data has been rescaled to [-1,1] interval\n \"\"\"\n ls = [-1] + l.get_shape().as_list()[1:]\n xs = ls[:-1] + [3]\n # unpack parameters\n logit_probs = l[:, :, :, :nr_mix]\n l = tf.reshape(l[:, :, :, nr_mix:], xs + [nr_mix * 3])\n # sample mixture indicator from softmax\n sel = tf.one_hot(tf.argmax(logit_probs - tf.log(-tf.log(tf.random_uniform(\n tf.shape(logit_probs), minval=1e-5, maxval=1. - 1e-5))), 3), depth=nr_mix, dtype=tf.float32)\n sel = tf.reshape(sel, xs[:-1] + [1, nr_mix])\n # select logistic parameters\n means = tf.reduce_sum(l[:, :, :, :, :nr_mix] * sel, 4)\n log_scales = tf.maximum(tf.reduce_sum(\n l[:, :, :, :, nr_mix:2 * nr_mix] * sel, 4), -7.)\n coeffs = tf.reduce_sum(tf.nn.tanh(\n l[:, :, :, :, 2 * nr_mix:3 * nr_mix]) * sel, 4)\n # sample from logistic & clip to interval\n # we don't actually round to the nearest 8bit value when sampling\n u = tf.random_uniform(tf.shape(means), minval=1e-5, maxval=1. - 1e-5)\n x = means + tf.exp(log_scales) * (tf.log(u) - tf.log(1. - u))\n x0 = tf.minimum(tf.maximum(x[:, :, :, 0], -1.), 1.)\n x1 = tf.minimum(tf.maximum(\n x[:, :, :, 1] + coeffs[:, :, :, 0] * x0, -1.), 1.)\n x2 = tf.minimum(tf.maximum(\n x[:, :, :, 2] + coeffs[:, :, :, 1] * x0 + coeffs[:, :, :, 2] * x1, -1.), 1.)\n return tf.concat([tf.reshape(x0, xs[:-1] + [1]), tf.reshape(x1, xs[:-1] + [1]), tf.reshape(x2, xs[:-1] + [1])], 3)\n\n\n\n" ]
[ [ "tensorflow.exp", "tensorflow.constant_initializer", "tensorflow.matmul", "tensorflow.reshape", "tensorflow.nn.moments", "tensorflow.sqrt", "tensorflow.nn.tanh", "tensorflow.nn.avg_pool", "tensorflow.random_normal_initializer", "tensorflow.shape", "tensorflow.sigmoid", "tensorflow.variable_scope", "tensorflow.split", "tensorflow.nn.bias_add", "tensorflow.reduce_prod", "tensorflow.log", "tensorflow.reduce_sum", "tensorflow.get_variable", "tensorflow.layers.dropout", "tensorflow.maximum", "tensorflow.square", "tensorflow.nn.l2_normalize" ] ]
HDFGroup/aiohstools
[ "394aae78375616b9084e6ee36106e0853344e746" ]
[ "aiohsload/utillib.py" ]
[ "##############################################################################\n# Copyright by The HDF Group. #\n# All rights reserved. #\n# #\n# This file is part of HSDS (HDF5 Scalable Data Service), Libraries and #\n# Utilities. The full HSDS copyright notice, including #\n# terms governing use, modification, and redistribution, is contained in #\n# the file COPYING, which can be found at the root of the source code #\n# distribution tree. If you do not have access to this file, you may #\n# request a copy from help@hdfgroup.org. #\n##############################################################################\n\nimport sys\nimport aiohttp\nimport logging\nimport asyncio\n\n \ntry:\n import h5py \n import h5pyd \n import numpy as np\nexcept ImportError as e:\n sys.stderr.write(\"ERROR : %s : install it to use this utility...\\n\" % str(e)) \n sys.exit(1)\n \nif __name__ == \"utillib\":\n from chunkiter import ChunkIterator\nelse:\n from .chunkiter import ChunkIterator\n\ndef dump_dtype(dt):\n if not isinstance(dt, np.dtype):\n raise TypeError(\"expected np.dtype, but got: {}\".format(type(dt)))\n if len(dt) > 0:\n out = \"{\"\n for name in dt.fields:\n subdt = dt.fields[name][0]\n out += \"{}: {} |\".format(name, dump_dtype(subdt))\n out = out[:-1] + \"}\"\n else:\n ref = h5py.check_dtype(ref=dt)\n if ref:\n out = str(ref)\n else:\n vlen = h5py.check_dtype(vlen=dt)\n if vlen:\n out = \"VLEN: \" + dump_dtype(vlen)\n else:\n out = str(dt)\n return out\n\n \ndef is_h5py(obj):\n # Return True if objref is a h5py object and False is not \n if isinstance(obj, object) and isinstance(obj.id.id, int):\n return True\n else:\n return False\n\ndef is_reference(val):\n try:\n if isinstance(val, object) and val.__class__.__name__ == \"Reference\":\n return True \n elif isinstance(val, type) and val.__name__ == \"Reference\":\n return True\n except AttributeError as ae:\n msg = \"is_reference for {} error: {}\".format(val, ae)\n logging.error(msg)\n \n return False\n\ndef is_regionreference(val):\n try:\n if isinstance(val, object) and val.__class__.__name__ == \"RegionReference\":\n return True \n elif isinstance(val, type) and val.__name__ == \"RegionReference\":\n return True\n except AttributeError as ae:\n msg = \"is_reference for {} error: {}\".format(val, ae)\n logging.error(msg)\n \n return False\n\ndef has_reference(dtype):\n has_ref = False\n if len(dtype) > 0:\n for name in dtype.fields:\n item = dtype.fields[name]\n if has_reference(item[0]):\n has_ref = True\n break\n elif dtype.metadata and 'ref' in dtype.metadata:\n basedt = dtype.metadata['ref']\n has_ref = is_reference(basedt)\n elif dtype.metadata and 'vlen' in dtype.metadata:\n basedt = dtype.metadata['vlen']\n has_ref = has_reference(basedt)\n return has_ref\n\n\ndef convert_dtype(srcdt, ctx):\n \"\"\" Return a dtype based on input dtype, converting any Reference types from \n h5py style to h5pyd and vice-versa.\n \"\"\"\n \n msg = \"convert dtype: {}, type: {},\".format(srcdt, type(srcdt))\n logging.info(msg)\n \n if len(srcdt) > 0:\n fields = []\n for name in srcdt.fields:\n item = srcdt.fields[name]\n # item is a tuple of dtype and integer offset\n field_dt = convert_dtype(item[0], ctx)\n fields.append((name, field_dt))\n tgt_dt = np.dtype(fields)\n else:\n # check if this a \"special dtype\"\n if srcdt.metadata and 'ref' in srcdt.metadata:\n ref = srcdt.metadata['ref']\n if is_reference(ref):\n if is_h5py(ctx['fout']):\n tgt_dt = h5py.special_dtype(ref=h5py.Reference)\n else:\n tgt_dt = h5pyd.special_dtype(ref=h5pyd.Reference)\n elif is_regionreference(ref):\n if is_h5py(ctx['fout']):\n tgt_dt = h5py.special_dtype(ref=h5py.RegionReference)\n else:\n tgt_dt = h5py.special_dtype(ref=h5py.RegionReference)\n else:\n msg = \"Unexpected ref type: {}\".format(srcdt)\n logging.error(msg)\n raise TypeError(msg)\n elif srcdt.metadata and 'vlen' in srcdt.metadata:\n src_vlen = srcdt.metadata['vlen']\n if isinstance(src_vlen, np.dtype):\n tgt_base = convert_dtype(src_vlen, ctx)\n else:\n tgt_base = src_vlen\n if is_h5py(ctx['fout']):\n tgt_dt = h5py.special_dtype(vlen=tgt_base)\n else:\n tgt_dt = h5pyd.special_dtype(vlen=tgt_base)\n else:\n tgt_dt = srcdt\n return tgt_dt\n\n#----------------------------------------------------------------------------------\ndef copy_element(val, src_dt, tgt_dt, ctx):\n logging.debug(\"copy_element, val: \" + str(val) + \" val type: \" + str(type(val)) + \"src_dt: \" + dump_dtype(src_dt) + \" tgt_dt: \" + dump_dtype(tgt_dt))\n \n fin = ctx[\"fin\"]\n fout = ctx[\"fout\"]\n out = None\n if len(src_dt) > 0:\n out_fields = []\n i = 0\n for name in src_dt.fields:\n field_src_dt = src_dt.fields[name][0]\n field_tgt_dt = tgt_dt.fields[name][0]\n field_val = val[i]\n i += 1\n out_field = copy_element(field_val, field_src_dt, field_tgt_dt, ctx)\n out_fields.append(out_field)\n out = tuple(out_fields)\n elif src_dt.metadata and 'ref' in src_dt.metadata:\n if not tgt_dt.metadata or 'ref' not in tgt_dt.metadata:\n raise TypeError(\"Expected tgt dtype to be ref, but got: {}\".format(tgt_dt))\n ref = tgt_dt.metadata['ref']\n if is_reference(ref):\n # initialize out to null ref\n if is_h5py(ctx['fout']):\n out = h5py.Reference() # null h5py ref\n else:\n out = '' # h5pyd refs are strings\n \n if ref:\n try:\n fin_obj = fin[val]\n except AttributeError as ae:\n msg = \"Unable able to get obj for ref value: {}\".format(ae)\n logging.error(msg)\n print(msg)\n return None\n\n # TBD - for hsget, the name property is not getting set\n h5path = fin_obj.name\n if not h5path:\n msg = \"No path found for ref object\"\n logging.warn(msg)\n if ctx[\"verbose\"]:\n print(msg)\n else:\n fout_obj = fout[h5path]\n if is_h5py(ctx['fout']):\n out = fout_obj.ref\n else:\n out = str(fout_obj.ref) # convert to string for JSON serialization\n \n \n elif is_regionreference(ref):\n out = \"tbd\"\n else:\n raise TypeError(\"Unexpected ref type: {}\".format(type(ref)))\n elif src_dt.metadata and 'vlen' in src_dt.metadata:\n logging.debug(\"copy_elment, got vlen element, dt: {}\".format(src_dt.metadata[\"vlen\"]))\n if not isinstance(val, np.ndarray):\n raise TypeError(\"Expecting ndarray or vlen element, but got: {}\".format(type(val)))\n if not tgt_dt.metadata or 'vlen' not in tgt_dt.metadata:\n raise TypeError(\"Expected tgt dtype to be vlen, but got: {}\".format(tgt_dt))\n src_vlen_dt = src_dt.metadata[\"vlen\"]\n tgt_vlen_dt = tgt_dt.metadata[\"vlen\"]\n if has_reference(src_vlen_dt):\n if len(val.shape) == 0:\n # scalar array\n e = val[()]\n v = copy_element(e, src_vlen_dt, tgt_vlen_dt, ctx)\n out = np.array(v, dtype=tgt_dt)\n else:\n out = np.zeros(val.shape, dtype=tgt_dt)\n for i in range(len(out)):\n e = val[i]\n out[i] = copy_element(e, src_vlen_dt, tgt_vlen_dt, ctx)\n else:\n # can just directly copy the array\n out = np.zeros(val.shape, dtype=tgt_dt)\n out[...] = val[...]\n else:\n out = val # can just copy as is\n return out\n\n\n\n#----------------------------------------------------------------------------------\ndef copy_array(src_arr, ctx):\n \"\"\" Copy the numpy array to a new array.\n Convert any reference type to point to item in the target's hierarchy.\n \"\"\"\n if not isinstance(src_arr, np.ndarray):\n raise TypeError(\"Expecting ndarray, but got: {}\".format(src_arr))\n tgt_dt = convert_dtype(src_arr.dtype, ctx)\n tgt_arr = np.zeros(src_arr.shape, dtype=tgt_dt)\n\n if has_reference(src_arr.dtype):\n # flatten array to simplify iteration\n count = np.product(src_arr.shape)\n tgt_arr_flat = tgt_arr.reshape((count,))\n src_arr_flat = src_arr.reshape((count,))\n for i in range(count):\n element = copy_element(src_arr_flat[i], src_arr.dtype, tgt_dt, ctx)\n tgt_arr_flat[i] = element\n tgt_arr = tgt_arr_flat.reshape(src_arr.shape)\n else:\n # can just copy the entire array\n tgt_arr[...] = src_arr[...]\n return tgt_arr\n\n\n#----------------------------------------------------------------------------------\ndef copy_attribute(desobj, name, srcobj, ctx):\n msg = \"creating attribute {} in {}\".format(name, srcobj.name)\n logging.debug(msg)\n if ctx[\"verbose\"]:\n print(msg)\n \n try:\n srcarr = srcobj.attrs[name]\n if isinstance(srcarr, np.ndarray):\n tgtarr = copy_array(srcarr, ctx)\n desobj.attrs.create(name, tgtarr)\n else:\n # scalars are just read as the native type\n desobj.attrs.create(name, srcarr)\n except (IOError, TypeError) as e:\n msg = \"ERROR: failed to create attribute {} of object {} -- {}\".format(name, desobj.name, str(e))\n logging.error(msg)\n print(msg)\n \n# copy_attribute\n \n#----------------------------------------------------------------------------------\ndef create_dataset(dobj, ctx):\n \"\"\" create a dataset using the properties of the passed in h5py dataset.\n If successful, proceed to copy attributes and data.\n \"\"\"\n msg = \"creating dataset {}, shape: {}, type: {}\".format(dobj.name, dobj.shape, dobj.dtype)\n logging.info(msg)\n if ctx[\"verbose\"]:\n print(msg) \n fout = ctx[\"fout\"]\n deflate = ctx[\"deflate\"]\n \n fillvalue = None\n try: \n # can trigger a runtime error if fillvalue is undefined\n fillvalue = dobj.fillvalue\n except RuntimeError:\n pass # ignore\n chunks=None\n if dobj.chunks:\n chunks = tuple(dobj.chunks)\n try:\n tgt_dtype = convert_dtype(dobj.dtype, ctx)\n compression_filter = dobj.compression\n compression_opts = dobj.compression_opts\n if deflate is not None and compression_filter is None:\n compression_filter = \"gzip\"\n compression_opts = deflate\n if ctx[\"verbose\"]:\n print(\"applying gzip filter with level: {}\".format(deflate))\n\n dset = fout.create_dataset( dobj.name, shape=dobj.shape, dtype=tgt_dtype, chunks=chunks, \\\n compression=compression_filter, shuffle=dobj.shuffle, \\\n fletcher32=dobj.fletcher32, maxshape=dobj.maxshape, \\\n compression_opts=compression_opts, fillvalue=fillvalue, \\\n scaleoffset=dobj.scaleoffset)\n msg = \"dataset created, uuid: {}, chunk_size: {}\".format(dset.id.id, str(dset.chunks)) \n logging.info(msg)\n if ctx[\"verbose\"]:\n print(msg)\n except (IOError, TypeError, KeyError) as e:\n msg = \"ERROR: failed to create dataset: {}\".format(str(e))\n logging.error(msg)\n print(msg)\n return\n \n# create_dataset\n\n#----------------------------------------------------------------------------------\ndef write_dataset(src, tgt, ctx):\n \"\"\" write values from src dataset to target dataset.\n \"\"\"\n msg = \"write_dataset src: {} to tgt: {}, shape: {}, type: {}\".format(src.name, tgt.name, src.shape, src.dtype)\n logging.info(msg)\n domain = tgt.file.filename\n dsetid = tgt.id.id\n msg = f\"domain: {domain} dsetid: {dsetid}\"\n logging.info(msg)\n if ctx[\"verbose\"]:\n print(msg) \n\n if src.shape is None:\n # null space dataset\n msg = \"no data for null space dataset: {}\".format(src.name)\n logging.info(msg)\n if ctx[\"verbose\"]:\n print(msg)\n return # no data \n\n if len(src.shape) == 0:\n # scalar dataset\n x = src[()]\n msg = \"writing: {} for scalar dataset: {}\".format(x, src.name)\n logging.info(msg)\n if ctx[\"verbose\"]:\n print(msg)\n tgt[()] = x\n return\n\n msg = \"iterating over chunks for {}\".format(src.name)\n logging.info(msg)\n if ctx[\"verbose\"]:\n print(msg)\n url = f\"{ctx['endpoint']}/datasets/{dsetid}/value\"\n logging.debug(f\"url: {url}\")\n\n loop = ctx[\"loop\"]\n\n session = ctx[\"session\"]\n\n try:\n it = ChunkIterator(tgt)\n\n futures = []\n \n for s in it:\n msg = \"writing dataset data for slice: {}\".format(s)\n logging.info(msg)\n if ctx[\"verbose\"]:\n print(msg)\n \n arr = src[s]\n selection = getSliceQueryParam(s)\n logging.debug(f\"select:{selection}\")\n params = {}\n params[\"domain\"] = domain\n params[\"select\"] = selection\n\n futures.append(write_chunk(session, url, params, arr))\n if len(futures) >= ctx[\"maxtasks\"]:\n loop.run_until_complete(asyncio.gather(*futures))\n futures = []\n # send off any remaining chnks\n loop.run_until_complete(asyncio.gather(*futures))\n except Exception as e:\n logging.error(f\"got Exception: {e}\")\n \n \n msg = \"done with dataload for {}\".format(src.name)\n logging.info(msg)\n if ctx[\"verbose\"]:\n print(msg)\n# write_dataset\n\n# construct HSDS query param from selection\ndef getSliceQueryParam(sel):\n sel_param=\"[\"\n if isinstance(sel, tuple):\n for s in sel:\n if len(sel_param) > 1:\n sel_param += \",\"\n sel_param += f\"{s.start}:{s.stop}\"\n else:\n sel_param += f\"{sel.start}:{sel.stop}\"\n sel_param += \"]\"\n return sel_param\n \n\n\nasync def write_chunk(session, url, params, arr):\n # TBD: do normal h5yd write for vlen data\n msg = f\"writing chunk for slice: {params['select']}\"\n logging.info(msg)\n data = arr.tobytes()\n \n\n try:\n async with session.put(url, data=data, params=params) as rsp:\n logging.info(\"status: {}\".format(rsp.status))\n if rsp.status != 200:\n raise IOError(f\"expected status 200 but got {rsp.status}\")\n except ConnectionError as ce:\n logging.error(\"connection error: {}\".format(ce))\n raise IOError(\"Connection Error\")\n\n\n #logging.info(msg)\n #self.PUT(req, body=body, format=format, params=params)\n\n#----------------------------------------------------------------------------------\n\ndef create_links(gsrc, gdes, ctx):\n # add soft and external links\n if ctx[\"verbose\"]:\n print(\"create_links: {}\".format(gsrc.name))\n for title in gsrc:\n if ctx[\"verbose\"]:\n print(\"got link: {}\".format(title))\n lnk = gsrc.get(title, getlink=True)\n link_classname = lnk.__class__.__name__\n if link_classname == \"HardLink\":\n logging.debug(\"Got hardlink: {}\".format(title))\n # TBD: handle the case where multiple hardlinks point to same object\n elif link_classname == \"SoftLink\":\n msg = \"creating SoftLink({}) with title: {}\".format(lnk.path, title)\n if ctx[\"verbose\"]:\n print(msg)\n logging.info(msg)\n if is_h5py(gdes):\n soft_link = h5py.SoftLink(lnk.path)\n else:\n soft_link = h5pyd.SoftLink(lnk.path)\n gdes[title] = soft_link\n elif link_classname == \"ExternalLink\":\n msg = \"creating ExternalLink({}, {}) with title: {}\".format(lnk.filename, lnk.path, title)\n if ctx[\"verbose\"]:\n print(msg)\n logging.info(msg)\n if is_h5py(gdes):\n ext_link = h5py.ExternalLink(lnk.filename, lnk.path)\n else:\n ext_link = h5pyd.ExternalLink(lnk.filename, lnk.path)\n gdes[title] = ext_link\n else:\n msg = \"Unexpected link type: {}\".format(lnk.__class__.__name__)\n logging.warning(msg)\n if ctx[\"verbose\"]:\n print(msg)\n\ndef create_group(gobj, ctx):\n msg = \"creating group {}\".format(gobj.name)\n logging.info(msg)\n if ctx[\"verbose\"]:\n print(msg)\n fout = ctx[\"fout\"]\n grp = fout.create_group(gobj.name)\n\n # create any soft/external links\n create_links(gobj, grp, ctx)\n \n \n# create_group\n\n#----------------------------------------------------------------------------------\ndef create_datatype(obj, ctx):\n msg = \"creating datatype {}\".format(obj.name)\n logging.info(msg)\n if ctx[\"verbose\"]:\n print(msg)\n fout = ctx[\"fout\"]\n fout[obj.name] = obj.dtype\n\n \n# create_datatype\n \n#----------------------------------------------------------------------------------\ndef load_file(fin, fout, verbose=False, nodata=False, deflate=None, endpoint=None, username=None, password=None, maxtasks=10):\n logging.info(\"input file: {}\".format(fin.filename)) \n logging.info(\"output file: {}\".format(fout.filename))\n\n print(f\"load_file, maxtasks: {maxtasks}\")\n \n # it would be nice to make a class out of these functions, but that \n # makes it heard to use visititems iterator.\n # instead, create a context object to pass arround common state\n ctx = {}\n ctx[\"fin\"] = fin\n ctx[\"fout\"] = fout\n ctx[\"verbose\"] = verbose\n ctx[\"nodata\"] = nodata\n ctx[\"deflate\"] = deflate\n ctx[\"endpoint\"] = endpoint\n ctx[\"username\"] = username\n ctx[\"password\"] = password\n ctx[\"maxtasks\"] = maxtasks\n\n loop = asyncio.get_event_loop()\n ctx[\"loop\"] = loop\n connector = aiohttp.TCPConnector(limit=maxtasks)\n\n auth = aiohttp.BasicAuth(login=username, password=password)\n headers = {'Content-Type': \"application/octet-stream\" }\n session = aiohttp.ClientSession(auth=auth, headers=headers, loop=loop, connector=connector)\n ctx[\"session\"] = session \n\n # create any root attributes\n for ga in fin.attrs:\n copy_attribute(fout, ga, fin, ctx)\n\n # create root soft/external links\n create_links(fin, fout, ctx)\n\n def object_create_helper(name, obj):\n class_name = obj.__class__.__name__\n \n if class_name == \"Dataset\":\n create_dataset(obj, ctx)\n elif class_name == \"Group\":\n create_group(obj, ctx)\n elif class_name == \"Datatype\":\n create_datatype(obj, ctx)\n else:\n logging.error(\"no handler for object class: {}\".format(type(obj)))\n \n def object_copy_helper(name, obj):\n class_name = obj.__class__.__name__\n if class_name == \"Dataset\":\n tgt = fout[obj.name]\n write_dataset(obj, tgt, ctx)\n elif class_name == \"Group\":\n logging.debug(\"skip copy for group: {}\".format(obj.name))\n elif class_name == \"Datatype\":\n logging.debug(\"skip copy for datatype: {}\".format(obj.name))\n else:\n logging.error(\"no handler for object class: {}\".format(type(obj)))\n\n def object_attribute_helper(name, obj):\n tgt = fout[obj.name]\n for ga in obj.attrs:\n copy_attribute(tgt, ga, obj, ctx)\n\n # build a rough map of the file using the internal function above\n fin.visititems(object_create_helper)\n\n # copy over any attributes\n fin.visititems(object_attribute_helper)\n\n if not nodata:\n # copy dataset data\n fin.visititems(object_copy_helper)\n \n # Fully flush the h5py handle. \n fout.close() \n \n # close up the source domain, see reason(s) for this below\n fin.close() \n\n if verbose:\n print(\"closing session\")\n loop.run_until_complete(session.close())\n\n if verbose:\n print(\"closing connector\")\n connector.close()\n\n if verbose:\n print(\"closing loop\")\n loop = ctx[\"loop\"]\n loop.close()\n \n msg=\"load_file complete\"\n logging.info(msg)\n if verbose:\n print(msg)\n \n return 0\n# load_file\n" ]
[ [ "numpy.product", "numpy.array", "numpy.dtype", "numpy.zeros" ] ]
HiDiHlabs/pygorich
[ "64b5a44b7c4040f3f5b91109494d0c8b290e89f8" ]
[ "pygorich/gsea.py" ]
[ "import json\nimport requests\nimport pandas as pnd\nfrom scipy.stats import hypergeom, fisher_exact, binom_test\nfrom statsmodels.stats.multitest import multipletests\nimport sys\nimport geanno\n\nclass Enricher():\n '''\n Class for managing gene sets and performing GSEA.\n\n ...\n\n Attributes\n ----------\n __enrichr_url : string\n URL of EnrichR API\n\n __genesets : pandas.DataFrame\n Stores information about library id, geneset id, and geneset\n\n __library_ids : list<string>\n List of available libraries IDs.\n\n __annotated_foreground : pandas.DataFrame\n Stores Information about the foreground regions.\n\n __annotated_background : pandas.DataFrame\n Stores information about the background regions.\n\n __annotation_database : pandas.DataFrame\n Stores information about the gene bed file, and enhancer-promoter\n link file used for annotating the foreground and the background \n genomic regions.\n\n Methods\n -------\n printLibraries()\n Method for printing available geneset libraries.\n\n loadAnnotationDatabase(genes_filename=None,\n enhancer_link_filename=None,\n max_distance_gene=1000000,\n name_col_gene=6,\n max_distance_enhancer=0,\n name_col_enhancer=15)\n Loads annotation database used for annotating foregound and\n background regions.\n\n loadRegions(self,\n foreground_bed_filename = None,\n background_bed_filename = None)\n Load and annotate foreground and background genomic regions.\n\n loadLibrary(library_id,\n library_filename=None,\n from_enrichr=True)\n Load genesets from library.\n\n getAnnotationDatabase()\n Returns annotation database.\n\n getAnnotatedForground()\n Returns annotated foreground.\n\n getAnnotatedBackground()\n Returns annotated background.\n\n getGeneSets()\n Returns genesets\n '''\n\n ##############\n # Constructors\n def __init__(self):\n '''Standard constructor. Creates an empty Enricher object.\n '''\n # Define private Attributes\n self.__enrichr_url = \"http://amp.pharm.mssm.edu/Enrichr\"\n\n self.__genesets = pnd.DataFrame(columns = [\"LIB.ID\", \n \"GENESET.ID\", \n \"GENE.LIST\"])\n\n self.__library_ids = None\n\n self.__annotated_foreground = None\n\n self.__annotated_background = None\n self.__load_genesets_from_enrichr()\n\n self.__annotation_database = None\n\n self.__enrichment_results = pnd.DataFrame(columns = \n [\"LIB.ID\",\n \"GENESET.ID\",\n \"n.FOREGROUND\",\n \"n.BACKGROUND\",\n \"n.FOREGROUND.IN.SET\",\n \"n.BACKGROUND.IN.SET\",\n \"p.FISHER\",\n \"q.FISHER\",\n \"odds.FISHER\",\n \"p.HYPERGEOMETRIC\",\n \"q.HYPERGEOMETRIC\",\n \"p.BINOMIAL\",\n \"q.BINOMIAL\",\n \"REGION.GENES.PAIRS\"])\n\n ################\n # Public Methods\n\n # Print Methods\n def printLibraries(self):\n '''Method for printing available geneset libraries.\n '''\n print(\"\\n\".join(self.__library_ids))\n\n # Getter Methods\n def getAnnotationDatabase(self):\n '''Returns annotation database.\n Returns\n -------\n annotation_database : pandas.DataFrame\n DataFrame storing information about the database used for\n annotation of foreground and background regions.\n '''\n return self.__annotation_database\n\n def getAnnotatedForeground(self):\n '''Returns annotated foreground.\n Returns\n -------\n annotated_foreground : pandas.DataFrame\n DataFrame storing the annotated foreground regions.\n '''\n return self.__annotated_foreground\n\n def getAnnotatedBackground(self):\n '''Returns annotated background.\n Returns\n -------\n annotated_background : pandas.DataFrame\n DataFrame storing the annotated background regions.\n '''\n return self.__annotated_background\n\n def getGeneSets(self):\n '''Returns genesets\n Returns\n -------\n genesets : pandas.DataFrame\n DataFrame storing loaded genesets\n '''\n return self.__genesets\n\n def getEnrichmentResults(self):\n '''Returns enrichment results.\n Returns\n -------\n enrichment_results : pandas.DataFrame\n DataFrame storing enrichment results.\n '''\n return self.__enrichment_results\n\n def getLibraryIDs(self):\n '''Return Library IDs\n Returns\n -------\n library_ids : list\n List of library IDs\n '''\n return self.__library_ids\n\n # Setter methods\n def resetEnrichmentResults(self):\n '''Reset enrichment results to empty DataFrame.\n '''\n self.__enrichment_results = pnd.DataFrame(columns = \n [\"LIB.ID\",\n \"GENESET.ID\",\n \"n.FOREGROUND\",\n \"n.BACKGROUND\",\n \"n.FOREGROUND.IN.SET\",\n \"n.BACKGROUND.IN.SET\",\n \"p.FISHER\",\n \"q.FISHER\",\n \"odds.FISHER\",\n \"p.HYPERGEOMETRIC\",\n \"q.HYPERGEOMETRIC\",\n \"p.BINOMIAL\",\n \"q.BINOMIAL\",\n \"REGION.GENES.PAIRS\"])\n\n\n # Data load Methods\n def loadAnnotationDatabase(self,\n genes_filename=None,\n enhancer_link_filename=None,\n max_distance_gene=1000000,\n name_col_gene=6,\n max_distance_enhancer=0,\n name_col_enhancer=15):\n '''Load Annotation Database for foreground and background regions.\n Parameters\n ----------\n genes_filename : string\n Path to bed file containing gene regions.\n enhancer_link_filename : string\n Path to bed file containing enhancer promoter interactions.\n max_distance_gene : int\n Maximal distance in base pairs to TSS for annotating a region\n to a gene.\n name_col_gene : int\n Column of gene name in genes_filename.\n max_distance_enhancer : int\n Maximal distance of region to enhancer for annotating a region via\n an enhancer to a gene.\n name_col_enhancer : int\n Column of gene name in enhancer promoter link file.\n '''\n\n self.__annotation_database = pnd.DataFrame(columns = [\"FILENAME\",\n \"REGION.TYPE\",\n \"SOURCE\",\n \"ANNOTATION.BY\",\n \"MAX.DISTANCE\",\n \"DISTANCE.TO\",\n \"N.HITS\",\n \"NAME.COL\"])\n if(genes_filename is not None):\n self.__annotation_database.loc[\"GENES\", :] = [genes_filename,\n \"genes\",\n \"genes\",\n \"NAME\",\n max_distance_gene,\n \"START\",\n 2,\n name_col_gene]\n if(enhancer_link_filename is not None):\n self.__annotation_database.loc[\"ENHANCER\",\n :] = [enhancer_link_filename,\n \"enhancer\",\n \"enhancer\",\n \"NAME\",\n max_distance_enhancer,\n \"REGION\",\n 1,\n name_col_enhancer]\n\n def loadRegions(self,\n foreground_bed_filename = None,\n background_bed_filename = None):\n '''Load and annotate foreground and background genomic regions.\n Parameters\n ----------\n foreground_bed_filename : string\n Path to bed file containing foreground regions. Important: Must\n contain header that starts with \"#chrom start end\".\n background_bed_filename : string\n Path to bed file containing background regions. Important: Must\n contain header that starts with \"#chrom start end\".\n '''\n # Load and annotated foreground regions\n # Create a new GenomicRegionAnnotator instance\n gra = geanno.Annotator.GenomicRegionAnnotator()\n\n # load base\n gra.load_base_from_file(foreground_bed_filename)\n\n # load database\n gra.load_database_from_dataframe(self.__annotation_database)\n\n # Annotate base against all database genomic region files\n gra.annotate()\n\n # Retrieve annotated base intervals as pandas.DataFrame instance\n self.__annotated_foreground = gra.get_base()\n\n self.__annotated_foreground.loc[:, \"GENESET\"] = [\n \";\".join([ gene.split(\"(\")[0] for gene in row[\"enhancer\"].split(\";\") ]) \n if row[\"enhancer\"] != \"NA\" \n else row[\"genes\"].split(\";\")[0].split(\"(\")[0] \n for i, row in self.__annotated_foreground.iterrows()]\n\n # Load and annotated background regions\n # Create a new GenomicRegionAnnotator instance\n gra = geanno.Annotator.GenomicRegionAnnotator()\n\n # load base\n gra.load_base_from_file(background_bed_filename)\n\n # load database\n gra.load_database_from_dataframe(self.__annotation_database)\n\n # Annotate base against all database genomic region files\n gra.annotate()\n\n # Retrieve annotated base intervals as pandas.DataFrame instance\n self.__annotated_background = gra.get_base()\n\n self.__annotated_background.loc[:, \"GENESET\"] = [\n \";\".join([ gene.split(\"(\")[0] for gene in row[\"enhancer\"].split(\";\") ])\n if row[\"enhancer\"] != \"NA\" \n else row[\"genes\"].split(\";\")[0].split(\"(\")[0] \n for i, row in self.__annotated_background.iterrows()]\n\n def loadLibrary(self,\n library_id,\n library_filename=None,\n from_enrichr=True):\n '''Load genesets from library\n Parameters\n ----------\n library_id : string\n Identifier of geneset library.\n library_filename : string\n Path to geneset library file. Library file must contain one header\n line! The columns are: 1. Library ID, 2. Geneset ID, 3. Semicolon\n separated list of gene ids.\n from_enrichr : boolean\n Whether geneset library shall be loaded from enrichr database or\n not.\n '''\n if(from_enrichr):\n # Retrieve genesets for each library and store in self.__genesets\n query_string=\"/geneSetLibrary?mode=text&libraryName=%s\"\n response = requests.get(self.__enrichr_url + \n query_string % library_id)\n if not response.ok:\n raise Exception('Error searching for terms')\n for geneset_string in response.text.split(\"\\n\"):\n geneset_list = geneset_string.split(\"\\t\")\n geneset_id = geneset_list[0]\n geneset = \";\".join([gene.split(\",\")[0] for \n gene in geneset_list[2:]])\n self.__genesets.loc[library_id+\n \"@\"+\n geneset_id, :] = [library_id,\n geneset_id,\n geneset]\n else:\n if(not library_filename is None):\n library_file = open(library_filename, \"r\")\n c = 0\n for line in library_file:\n if(c == 0):\n c += 1\n continue\n split_line = line.rstrip().split(\"\\t\")\n if(len(split_line) < 3):\n continue\n lib_id = split_line[0]\n if(not(lib_id == library_id)):\n continue\n geneset_id = split_line[1]\n genelist = split_line[2]\n self.__genesets.loc[lib_id+\n \"@\"+\n geneset_id, :] = [lib_id,\n geneset_id,\n genelist]\n\n\n # Enrichment Methods\n def enrich(self,\n method=\"all\"):\n '''Perform Enrichment Analysis.\n Parameters\n ----------\n method : string\n Statistical method used for enrichment analysis. Can be either of\n fisher, hypergeometric, binomial, all.\n '''\n geneset_list_foreground_regions = [ set(geneset.split(\";\")) for\n geneset in \n self.__annotated_foreground[\"GENESET\"]]\n foreground_region_ids = list(self.__annotated_foreground.index)\n geneset_list_background_regions = [ set(geneset.split(\";\")) for\n geneset in \n self.__annotated_background[\"GENESET\"]]\n\n n_foreground = len(self.__annotated_foreground.index)\n n_background = len(self.__annotated_background.index)\n\n for library_id in list(set(self.__genesets.loc[:, \"LIB.ID\"])):\n print(\"Calculate Enrichment for: \"+library_id)\n for geneset_id in self.__genesets[self.__genesets[\"LIB.ID\"] == \n library_id].loc[:, \"GENESET.ID\"]:\n if(not library_id+\"@\"+geneset_id in \n set(self.__enrichment_results.index)):\n# print(\"\\t\"+geneset_id)\n geneset = set(self.__genesets.loc[library_id+\n \"@\"+\n geneset_id,\n \"GENE.LIST\"].split(\";\"))\n (n_foreground_in_geneset, \n foreground_region_genes_pairs) = self.__calculateGenesetOverlaps(\n geneset_list_foreground_regions,\n geneset,\n region_id_list = foreground_region_ids)\n (n_background_in_geneset,\n background_region_genes_pairs) = self.__calculateGenesetOverlaps(\n geneset_list_background_regions,\n geneset)\n\n p_val_fisher = None\n odds_fisher = None\n p_val_hyper = None\n p_val_binom = None\n if(method == \"fisher\" or method == \"all\"):\n n_foreground_not_in_geneset = (n_foreground-\n n_foreground_in_geneset)\n n_background_not_in_geneset = (n_background-\n n_background_in_geneset)\n ct = [ [n_foreground_in_geneset,\n n_foreground_not_in_geneset], \n [n_background_in_geneset,\n n_background_not_in_geneset] ]\n odds_fisher, p_val_fisher = fisher_exact(ct, \n alternative = \"greater\")\n if(method == \"hypergeometric\" or method == \"all\"):\n M = n_background\n n = n_background_in_geneset\n N = n_foreground\n k = n_foreground_in_geneset\n p_val_hyper = 1.\n if(n > 0 and k > 0):\n p_val_hyper = 1.-hypergeom.cdf(k, M, n, N)\n if(method == \"binomial\" or method == \"all\"):\n p = float(n_background_in_geneset)/float(n_background)\n x = n_foreground_in_geneset\n n = n_foreground\n p_val_binom = binom_test(x, \n n = n, \n p = p, \n alternative=\"greater\")\n\n results = [library_id,\n geneset_id,\n n_foreground,\n n_background,\n n_foreground_in_geneset,\n n_background_in_geneset,\n p_val_fisher,\n None,\n odds_fisher,\n p_val_hyper,\n None,\n p_val_binom,\n None,\n \";\".join(foreground_region_genes_pairs)]\n self.__enrichment_results.loc[\n library_id+\"@\"+geneset_id, :] = results\n\n # Perform multiple testing correction\n indices = self.__enrichment_results[\n self.__enrichment_results[\"LIB.ID\"] == library_id].index\n\n if(method == \"fisher\" or method == \"all\"):\n p_values = self.__enrichment_results.loc[indices, \"p.FISHER\"]\n r, q_values, a_s, a_b = multipletests(p_values, \n method = \"fdr_bh\")\n self.__enrichment_results.loc[indices, \"q.FISHER\"] = q_values\n if(method == \"hypergeometric\" or method == \"all\"):\n p_values = self.__enrichment_results.loc[indices, \n \"p.HYPERGEOMETRIC\"]\n r, q_values, a_s, a_b = multipletests(p_values, \n method = \"fdr_bh\")\n self.__enrichment_results.loc[indices, \n \"q.HYPERGEOMETRIC\"] = q_values\n if(method == \"binomial\" or method == \"all\"):\n p_values = self.__enrichment_results.loc[indices, \"p.BINOMIAL\"]\n r, q_values, a_s, a_b = multipletests(p_values, \n method = \"fdr_bh\")\n self.__enrichment_results.loc[indices, \"q.BINOMIAL\"] = q_values\n\n #################\n # Private Methods\n def __load_genesets_from_enrichr(self):\n '''Load available gene set libraries from EnrichR.\n '''\n # Retrieve set of libraries\n lib_url = self.__enrichr_url+\"/datasetStatistics\"\n lib_json = json.loads(requests.get(lib_url).text)\n self.__library_ids = [lib[\"libraryName\"] for \n lib in lib_json[\"statistics\"]]\n\n def __calculateGenesetOverlaps(self,\n region_associated_geneset_list,\n geneset,\n region_id_list = None):\n '''Calculate number of regions in geneset\n Parameters\n ----------\n region_associated_geneset_list : list<set>\n List of genesets the regions are associated with\n geneset : set\n Set of genes\n\n Returns\n -------\n overlaps : int\n Number of regions in geneset\n '''\n overlaps = 0\n i = 0\n region_genes_pairs = []\n for region_associated_geneset in region_associated_geneset_list:\n overlap_genes = list(region_associated_geneset & geneset)\n if(len(overlap_genes) > 0):\n overlaps += 1\n if(not region_id_list is None):\n region_id = region_id_list[i]\n region_genes_pairs += [region_id+\n \"=\"+\n \",\".join(overlap_genes)]\n i += 1\n return overlaps, region_genes_pairs\n\n" ]
[ [ "scipy.stats.hypergeom.cdf", "pandas.DataFrame", "scipy.stats.binom_test", "scipy.stats.fisher_exact" ] ]
ast0815/likelihood-machine
[ "4b0ebd193253775c31539c4a0046b79cbec8fa2b" ]
[ "docs/examples/simple_experiment/vary_detector.py" ]
[ "#!/usr/bin/env python\n\n\"\"\"Script to generate toy detector variations.\n\nThis modifies the output data of a MC simulation.\n\"\"\"\n\nimport argparse\nimport csv\n\nimport experiment\nimport numpy as np\nimport numpy.lib.recfunctions as rfn\n\nparser = argparse.ArgumentParser(\n description=\"Modify the reconstructed events of a simulation.\"\n)\nparser.add_argument(\"inputfilename\", help=\"where to get the data\")\nparser.add_argument(\"datafilename\", help=\"where to store the data\")\nargs = parser.parse_args()\n\n# Nominal detector\nnominal_detector = experiment.Detector()\n# Toy parameters\nnp.random.seed(1337) # Make sure the variations are always the same\nn_toys = 100\neff_slopes = np.abs(1.0 + 0.1 * np.random.randn(n_toys))\neff_offsets = 0.1 * np.random.randn(n_toys)\nsmear_sigmas = np.abs(1.0 + 0.1 * np.random.randn(n_toys))\nmax_effs = 1.0 - np.abs(0.1 + 0.03 * np.random.randn(n_toys))\n\n# Toy detectors\ntoy_detectors = []\nfor slope, offset, sigma, max_eff in zip(\n eff_slopes, eff_offsets, smear_sigmas, max_effs\n):\n toy_detectors.append(\n experiment.Detector(\n eff_slope=slope, eff_offset=offset, smear_sigma=sigma, max_eff=max_eff\n )\n )\n\nevents = np.genfromtxt(args.inputfilename, names=True, delimiter=\",\")\n\n# Calculate weights as efficiency ratio of nominal and toy detectors\nweights = []\nnominal = nominal_detector.efficiency(events)\nfor i, toy in enumerate(toy_detectors):\n weights.append(\n np.array(toy.efficiency(events) / nominal, dtype=[(\"weight_%i\" % (i,), float)])\n )\nweights = rfn.merge_arrays(weights, flatten=True, usemask=False)\n\n# Modify x smearing by sigma ratio\nreco_x = []\nnominal = 1.0\nfor i, toy in enumerate(smear_sigmas):\n tru = events[\"true_x\"]\n dif = events[\"reco_x\"] - tru\n new = toy / nominal * dif + tru\n reco_x.append(np.array(new, dtype=[(\"reco_x_%i\" % (i,), float)]))\nreco_x = rfn.merge_arrays(reco_x, flatten=True, usemask=False)\n\nevents = rfn.drop_fields(events, [\"reco_x\"])\nevents = rfn.merge_arrays([events, weights, reco_x], flatten=True, usemask=False)\n\ncsvfields = events.dtype.names\nwith open(args.datafilename, \"wt\") as f:\n writer = csv.DictWriter(f, csvfields, delimiter=\",\")\n writer.writerow({fn: fn for fn in csvfields}) # Write the field names\n for event in events:\n writer.writerow({k: event[k] for k in event.dtype.names})\n" ]
[ [ "numpy.array", "numpy.random.seed", "numpy.genfromtxt", "numpy.random.randn", "numpy.lib.recfunctions.merge_arrays", "numpy.lib.recfunctions.drop_fields" ] ]
deep-spin/S7
[ "c987906b032eaa727c8bcbec53f48befb467e515", "c987906b032eaa727c8bcbec53f48befb467e515", "c987906b032eaa727c8bcbec53f48befb467e515" ]
[ "joeynmt/training.py", "joeynmt/sample_decoding.py", "test/unit/test_transformer_decoder.py" ]
[ "# coding: utf-8\n\n\"\"\"\nTraining module\n\"\"\"\n\nfrom itertools import count\nimport argparse\nimport time\nimport shutil\nfrom typing import List, Dict\nimport os\nfrom os.path import join\nimport queue\nfrom functools import partial\nimport random\n\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nfrom torch import Tensor\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom torchtext.data import Dataset\n\nfrom entmax import Entmax15Loss, SparsemaxLoss, EntmaxBisectLoss\n\nfrom joeynmt.model import build_model\nfrom joeynmt.batch import Batch\nfrom joeynmt.helpers import log_data_info, load_config, log_cfg, \\\n store_attention_plots, load_checkpoint, make_model_dir, \\\n make_logger, set_seed, symlink_update, ConfigurationError, postprocess\nfrom joeynmt.model import Model\nfrom joeynmt.prediction import validate_on_data\nfrom joeynmt.loss import LabelSmoothingLoss, FYLabelSmoothingLoss\nfrom joeynmt.data import load_data, make_data_iter\nfrom joeynmt.builders import build_optimizer, build_scheduler, \\\n build_gradient_clipper\nfrom joeynmt.prediction import test\n\n\ndef _parse_hidden_size(model_config):\n if \"encoder\" in model_config:\n return model_config[\"encoder\"][\"hidden_size\"]\n\n encs_config = model_config[\"encoders\"]\n if \"encoder\" in encs_config:\n return encs_config[\"encoder\"][\"hidden_size\"]\n return next(cf[\"hidden_size\"] for cf in encs_config.values()\n if \"hidden_size\" in cf)\n\n\n# pylint: disable=too-many-instance-attributes\nclass TrainManager:\n \"\"\" Manages training loop, validations, learning rate scheduling\n and early stopping.\"\"\"\n\n def __init__(self, model: Model, config: dict) -> None:\n \"\"\"\n Creates a new TrainManager for a model, specified as in configuration.\n\n :param model: torch module defining the model\n :param config: dictionary containing the training configurations\n \"\"\"\n train_config = config[\"training\"]\n\n # files for logging and storing\n self.model_dir = train_config[\"model_dir\"]\n make_model_dir(\n self.model_dir, overwrite=train_config.get(\"overwrite\", False)\n )\n self.logger = make_logger(model_dir=self.model_dir)\n self.logging_freq = train_config.get(\"logging_freq\", 100)\n self.valid_report_file = join(self.model_dir, \"validations.txt\")\n self.tb_writer = SummaryWriter(\n log_dir=join(self.model_dir, \"tensorboard/\")\n )\n\n # model\n self.model = model\n self.pad_index = self.model.pad_index\n self._log_parameters_list()\n\n # objective\n objective = train_config.get(\"loss\", \"cross_entropy\")\n loss_alpha = train_config.get(\"loss_alpha\", 1.5)\n\n assert loss_alpha >= 1\n # maybe don't do the label smoothing thing here, instead have\n # nn.CrossEntropyLoss\n # then you look up the loss func, and you either use it directly or\n # wrap it in FYLabelSmoothingLoss\n if objective == \"softmax\":\n objective = \"cross_entropy\"\n loss_funcs = {\n \"cross_entropy\": nn.CrossEntropyLoss,\n \"entmax15\": partial(Entmax15Loss, k=512),\n \"sparsemax\": partial(SparsemaxLoss, k=512),\n \"entmax\": partial(EntmaxBisectLoss, alpha=loss_alpha, n_iter=30)\n }\n if objective not in loss_funcs:\n raise ConfigurationError(\"Unknown loss function\")\n\n loss_module = loss_funcs[objective]\n loss_func = loss_module(ignore_index=self.pad_index, reduction='sum')\n\n label_smoothing = train_config.get(\"label_smoothing\", 0.0)\n label_smoothing_type = train_config.get(\"label_smoothing_type\", \"fy\")\n assert label_smoothing_type in [\"fy\", \"szegedy\"]\n smooth_dist = train_config.get(\"smoothing_distribution\", \"uniform\")\n assert smooth_dist in [\"uniform\", \"unigram\"]\n if label_smoothing > 0:\n if label_smoothing_type == \"fy\":\n # label smoothing entmax loss\n if smooth_dist is not None:\n smooth_p = torch.FloatTensor(model.trg_vocab.frequencies)\n smooth_p /= smooth_p.sum()\n else:\n smooth_p = None\n loss_func = FYLabelSmoothingLoss(\n loss_func, smoothing=label_smoothing, smooth_p=smooth_p\n )\n else:\n assert objective == \"cross_entropy\"\n loss_func = LabelSmoothingLoss(\n ignore_index=self.pad_index,\n reduction=\"sum\",\n smoothing=label_smoothing\n )\n self.loss = loss_func\n\n self.norm_type = train_config.get(\"normalization\", \"batch\")\n if self.norm_type not in [\"batch\", \"tokens\"]:\n raise ConfigurationError(\"Invalid normalization. \"\n \"Valid options: 'batch', 'tokens'.\")\n\n # optimization\n self.learning_rate_min = train_config.get(\"learning_rate_min\", 1.0e-8)\n\n self.clip_grad_fun = build_gradient_clipper(config=train_config)\n self.optimizer = build_optimizer(\n config=train_config, parameters=model.parameters())\n\n # validation & early stopping\n self.validate_by_label = train_config.get(\"validate_by_label\", False)\n self.validation_freq = train_config.get(\"validation_freq\", 1000)\n self.log_valid_sents = train_config.get(\"print_valid_sents\", [0, 1, 2])\n self.plot_attention = train_config.get(\"plot_attention\", False)\n self.ckpt_queue = queue.Queue(\n maxsize=train_config.get(\"keep_last_ckpts\", 5))\n\n allowed = {'bleu', 'chrf', 'token_accuracy',\n 'sequence_accuracy', 'cer', \"wer\", \"levenshtein_distance\"}\n eval_metrics = train_config.get(\"eval_metric\", \"bleu\")\n if isinstance(eval_metrics, str):\n eval_metrics = [eval_metrics]\n if any(metric not in allowed for metric in eval_metrics):\n ok_metrics = \" \".join(allowed)\n raise ConfigurationError(\"Invalid setting for 'eval_metric', \"\n \"valid options: {}\".format(ok_metrics))\n self.eval_metrics = eval_metrics\n self.forced_sparsity = train_config.get(\"forced_sparsity\", False)\n\n early_stop_metric = train_config.get(\"early_stopping_metric\", \"loss\")\n allowed_early_stop = {\"ppl\", \"loss\"} | set(self.eval_metrics)\n if early_stop_metric not in allowed_early_stop:\n raise ConfigurationError(\n \"Invalid setting for 'early_stopping_metric', \"\n \"valid options: 'loss', 'ppl', and eval_metrics.\")\n self.early_stopping_metric = early_stop_metric\n min_metrics = {\"ppl\", \"loss\", \"cer\", \"wer\", \"levenshtein_distance\"}\n self.minimize_metric = early_stop_metric in min_metrics\n\n # learning rate scheduling\n hidden_size = _parse_hidden_size(config[\"model\"])\n self.scheduler, self.sched_incr = build_scheduler(\n config=train_config,\n scheduler_mode=\"min\" if self.minimize_metric else \"max\",\n optimizer=self.optimizer,\n hidden_size=hidden_size)\n\n # data & batch handling\n # src/trg magic\n if \"level\" in config[\"data\"]:\n self.src_level = self.trg_level = config[\"data\"][\"level\"]\n else:\n assert \"src_level\" in config[\"data\"]\n assert \"trg_level\" in config[\"data\"]\n self.src_level = config[\"data\"][\"src_level\"]\n self.trg_level = config[\"data\"][\"trg_level\"]\n\n self.shuffle = train_config.get(\"shuffle\", True)\n self.epochs = train_config[\"epochs\"]\n self.batch_size = train_config[\"batch_size\"]\n self.batch_type = train_config.get(\"batch_type\", \"sentence\")\n self.eval_batch_size = train_config.get(\"eval_batch_size\",\n self.batch_size)\n self.eval_batch_type = train_config.get(\"eval_batch_type\",\n self.batch_type)\n\n self.batch_multiplier = train_config.get(\"batch_multiplier\", 1)\n\n # generation\n self.max_output_length = train_config.get(\"max_output_length\", None)\n\n # CPU / GPU\n self.use_cuda = train_config[\"use_cuda\"]\n if self.use_cuda:\n self.model.cuda()\n self.loss.cuda()\n\n # initialize training statistics\n self.steps = 0\n # stop training if this flag is True by reaching learning rate minimum\n self.stop = False\n self.total_tokens = 0\n self.best_ckpt_iteration = 0\n # initial values for best scores\n self.best_ckpt_score = np.inf if self.minimize_metric else -np.inf\n\n mrt_schedule = train_config.get(\"mrt_schedule\", None)\n assert mrt_schedule is None or mrt_schedule in [\"warmup\", \"mix\", \"mtl\"]\n self.mrt_schedule = mrt_schedule\n self.mrt_p = train_config.get(\"mrt_p\", 0.0)\n self.mrt_lambda = train_config.get(\"mrt_lambda\", 1.0)\n assert 0 <= self.mrt_p <= 1\n assert 0 <= self.mrt_lambda <= 1\n self.mrt_start_steps = train_config.get(\"mrt_start_steps\", 0)\n self.mrt_samples = train_config.get(\"mrt_samples\", 1)\n self.mrt_alpha = train_config.get(\"mrt_alpha\", 1.0)\n self.mrt_strategy = train_config.get(\"mrt_strategy\", \"sample\")\n self.mrt_cost = train_config.get(\"mrt_cost\", \"levenshtein\")\n self.mrt_max_len = train_config.get(\"mrt_max_len\", 31) # hmm\n self.step_counter = count()\n\n assert self.mrt_alpha > 0\n assert self.mrt_strategy in [\"sample\", \"topk\"]\n assert self.mrt_cost in [\"levenshtein\", \"bleu\"]\n\n # model parameters\n if \"load_model\" in train_config.keys():\n model_load_path = train_config[\"load_model\"]\n reset_training = train_config.get(\"reset_training\", False)\n self.logger.info(\"Loading model from %s\", model_load_path)\n self.init_from_checkpoint(model_load_path, reset=reset_training)\n\n def is_best(self, score):\n return self.minimize_metric == (score < self.best_ckpt_score)\n\n def _save_checkpoint(self) -> None:\n \"\"\"\n Save the model's current parameters and the training state to a\n checkpoint.\n\n The training state contains the total number of training steps,\n the total number of training tokens,\n the best checkpoint score and iteration so far,\n and optimizer and scheduler states.\n\n \"\"\"\n ckpt_name = str(self.steps) + \".ckpt\"\n model_path = join(self.model_dir, ckpt_name)\n if self.scheduler is not None:\n scheduler_state = self.scheduler.state_dict()\n else:\n scheduler_state = None\n state = {\n \"steps\": self.steps,\n \"total_tokens\": self.total_tokens,\n \"best_ckpt_score\": self.best_ckpt_score,\n \"best_ckpt_iteration\": self.best_ckpt_iteration,\n \"model_state\": self.model.state_dict(),\n \"optimizer_state\": self.optimizer.state_dict(),\n \"scheduler_state\": scheduler_state\n }\n torch.save(state, model_path)\n if self.ckpt_queue.full():\n to_delete = self.ckpt_queue.get() # delete oldest ckpt\n try:\n os.remove(to_delete)\n except FileNotFoundError:\n self.logger.warning(\"Wanted to delete old checkpoint %s but \"\n \"file does not exist.\", to_delete)\n\n self.ckpt_queue.put(model_path)\n\n # create/modify symbolic link for best checkpoint\n symlink_update(ckpt_name, join(self.model_dir, \"best.ckpt\"))\n\n def init_from_checkpoint(self, path: str, reset: bool = False):\n \"\"\"\n Initialize the trainer from a given checkpoint file.\n\n This checkpoint file contains not only model parameters, but also\n scheduler and optimizer states, see `self._save_checkpoint`.\n\n :param path: path to checkpoint\n \"\"\"\n model_checkpoint = load_checkpoint(path=path, use_cuda=self.use_cuda)\n\n # restore model and optimizer parameters\n self.model.load_state_dict(model_checkpoint[\"model_state\"])\n\n if not reset:\n self.optimizer.load_state_dict(model_checkpoint[\"optimizer_state\"])\n scheduler_state = model_checkpoint[\"scheduler_state\"]\n if scheduler_state is not None and self.scheduler is not None:\n self.scheduler.load_state_dict(scheduler_state)\n\n # restore counts\n self.steps = model_checkpoint[\"steps\"]\n self.total_tokens = model_checkpoint[\"total_tokens\"]\n self.best_ckpt_score = model_checkpoint[\"best_ckpt_score\"]\n self.best_ckpt_iteration = model_checkpoint[\"best_ckpt_iteration\"]\n\n # move parameters to cuda\n if self.use_cuda:\n self.model.cuda()\n\n def log_tensorboard(self, split, **kwargs):\n \"\"\"\n split: \"train\" or \"valid\"\n \"\"\"\n assert split in [\"train\", \"valid\"]\n prefix = \"{0}/{0}_\".format(split)\n for metric, value in kwargs.items():\n name = prefix + metric\n self.tb_writer.add_scalar(name, value, self.steps)\n\n def train_and_validate(self, train_data: Dataset, valid_data: Dataset):\n \"\"\"\n Train the model and validate it on the validation set.\n\n :param train_data: training data\n :param valid_data: validation data\n \"\"\"\n train_iter = make_data_iter(\n train_data,\n batch_size=self.batch_size,\n batch_type=self.batch_type,\n train=True,\n shuffle=self.shuffle,\n use_cuda=self.use_cuda\n )\n for epoch_no in range(1, self.epochs + 1):\n self.logger.info(\"EPOCH %d\", epoch_no)\n\n if self.sched_incr == \"epoch\":\n self.scheduler.step(epoch=epoch_no - 1) # 0-based indexing\n\n self.model.train()\n\n start = time.time()\n total_valid_duration = 0\n processed_tokens = self.total_tokens\n epoch_loss = 0\n\n for i, batch in enumerate(iter(train_iter), 1):\n # reactivate training\n self.model.train()\n # create a Batch object from torchtext batch\n batch = Batch(batch, self.pad_index)\n\n # only update every batch_multiplier batches\n update = i % self.batch_multiplier == 0\n batch_loss = self._train_batch(batch, update=update)\n\n self.log_tensorboard(\"train\", batch_loss=batch_loss)\n\n epoch_loss += batch_loss.cpu().numpy()\n\n if not update:\n continue\n\n if self.sched_incr == \"step\":\n self.scheduler.step()\n\n # log learning progress\n if self.steps % self.logging_freq == 0:\n elapsed = time.time() - start - total_valid_duration\n elapsed_tokens = self.total_tokens - processed_tokens\n self.logger.info(\n \"Epoch %3d Step: %8d Batch Loss: %12.6f \"\n \"Tokens per Sec: %8.0f, Lr: %.6f\",\n epoch_no, self.steps, batch_loss,\n elapsed_tokens / elapsed,\n self.optimizer.param_groups[0][\"lr\"])\n start = time.time()\n total_valid_duration = 0\n processed_tokens = self.total_tokens\n\n # validate on the entire dev set\n if self.steps % self.validation_freq == 0:\n valid_start_time = time.time()\n\n # it would be nice to include loss and ppl in valid_scores\n valid_scores, valid_references, valid_hypotheses, \\\n valid_hypotheses_raw, valid_attention_scores, \\\n scores_by_label = validate_on_data(\n batch_size=self.eval_batch_size,\n data=valid_data,\n eval_metrics=self.eval_metrics,\n trg_level=self.trg_level,\n model=self.model,\n use_cuda=self.use_cuda,\n max_output_length=self.max_output_length,\n loss_function=self.loss,\n beam_size=0, # greedy validations\n batch_type=self.eval_batch_type,\n save_attention=self.plot_attention,\n validate_by_label=self.validate_by_label,\n forced_sparsity=self.forced_sparsity\n )\n\n ckpt_score = valid_scores[self.early_stopping_metric]\n self.log_tensorboard(\"valid\", **valid_scores)\n\n new_best = False\n if self.is_best(ckpt_score):\n self.best_ckpt_score = ckpt_score\n self.best_ckpt_iteration = self.steps\n self.logger.info(\n 'Hooray! New best validation result [%s]!',\n self.early_stopping_metric)\n if self.ckpt_queue.maxsize > 0:\n self.logger.info(\"Saving new checkpoint.\")\n new_best = True\n self._save_checkpoint()\n\n if self.sched_incr == \"validation\":\n self.scheduler.step(ckpt_score)\n\n # append to validation report\n self._add_report(valid_scores, new_best=new_best)\n\n valid_sources_raw = {f: list(getattr(valid_data, f))\n for f in valid_data.fields\n if f != \"trg\"}\n\n self._log_examples(\n sources_raw=valid_sources_raw,\n hypotheses_raw=valid_hypotheses_raw,\n hypotheses=valid_hypotheses,\n references=valid_references\n )\n\n labeled_scores = sorted(valid_scores.items())\n eval_report = \", \".join(\"{}: {:.5f}\".format(n, v)\n for n, v in labeled_scores)\n\n valid_duration = time.time() - valid_start_time\n total_valid_duration += valid_duration\n\n self.logger.info(\n 'Validation result at epoch %3d, step %8d: %s, '\n 'duration: %.4fs',\n epoch_no, self.steps, eval_report, valid_duration)\n\n if scores_by_label is not None:\n for metric, scores in scores_by_label.items():\n # make a report\n label_report = [metric]\n numbers = sorted(scores.items())\n label_report.extend(\n [\"{}={}: {:.5f}\".format(l, n, v)\n for (l, n), v in numbers]\n )\n self.logger.info(\"\\n\\t\".join(label_report))\n\n # store validation set outputs\n self._store_outputs(valid_hypotheses)\n\n # store attention plots for selected valid sentences\n if valid_attention_scores and self.plot_attention:\n store_attention_plots(\n attentions=valid_attention_scores,\n sources=list(valid_data.src),\n targets=valid_hypotheses_raw,\n indices=self.log_valid_sents,\n model_dir=self.model_dir,\n tb_writer=self.tb_writer,\n steps=self.steps)\n\n if self.stop:\n break\n if self.stop:\n self.logger.info(\n 'Training ended because minimum lr %f was reached.',\n self.learning_rate_min)\n break\n\n self.logger.info(\n 'Epoch %3d: total training loss %.2f', epoch_no, epoch_loss)\n else:\n self.logger.info('Training ended after %3d epochs.', epoch_no)\n self.logger.info('Best validation result at step %8d: %6.2f %s.',\n self.best_ckpt_iteration, self.best_ckpt_score,\n self.early_stopping_metric)\n\n self.tb_writer.close() # close Tensorboard writer\n\n def _train_batch(self, batch: Batch, update: bool = True) -> Tensor:\n \"\"\"\n Train the model on one batch: Compute the loss, make a gradient step.\n\n :param batch: training batch\n :param update: if False, only store gradient. if True also make update\n :return: loss for batch (sum)\n \"\"\"\n times_called = next(self.step_counter)\n # when do you call get_risk_for batch?\n mrt_schedule = self.mrt_schedule\n mrt_steps = self.mrt_start_steps\n warmed_up = mrt_schedule == \"warmup\" and times_called >= mrt_steps\n mrt_drawn = mrt_schedule == \"mix\" and random.random() < self.mrt_p\n\n if mrt_schedule == \"mtl\":\n batch_loss = self.model.get_loss_and_risk_for_batch(\n batch,\n self.loss,\n n_samples=self.mrt_samples,\n alpha=self.mrt_alpha,\n strategy=self.mrt_strategy,\n max_len=self.mrt_max_len,\n cost=self.mrt_cost,\n level=self.trg_level,\n mrt_lambda=self.mrt_lambda\n )\n if warmed_up or mrt_drawn:\n batch_loss = self.mrt_lambda * self.model.get_risk_for_batch(\n batch,\n n_samples=self.mrt_samples,\n alpha=self.mrt_alpha,\n strategy=self.mrt_strategy,\n max_len=self.mrt_max_len,\n cost=self.mrt_cost,\n level=self.trg_level\n )\n else:\n batch_loss = self.model.get_loss_for_batch(batch, self.loss)\n\n norm = batch.nseqs if self.norm_type == \"batch\" else batch.ntokens\n\n norm_batch_loss = batch_loss / norm\n # division needed since loss.backward sums the gradients until updated\n norm_batch_multiply = norm_batch_loss / self.batch_multiplier\n\n # compute gradients\n norm_batch_multiply.backward()\n\n if self.clip_grad_fun is not None:\n self.clip_grad_fun(self.model.parameters()) # works in-place\n\n if update:\n # make gradient step\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n self.steps += 1\n\n # increment token counter\n self.total_tokens += batch.ntokens\n\n return norm_batch_loss.detach()\n\n def _add_report(self, valid_scores: dict, new_best: bool = False) -> None:\n \"\"\"\n Append a one-line report to validation logging file.\n\n :param valid_score: validation evaluation score [eval_metric]\n :param valid_ppl: validation perplexity\n :param valid_loss: validation loss (sum over whole validation set)\n :param eval_metric: evaluation metric, e.g. \"bleu\"\n :param new_best: whether this is a new best model\n \"\"\"\n current_lr = -1\n # ignores other param groups for now\n for param_group in self.optimizer.param_groups:\n current_lr = param_group['lr']\n\n if current_lr < self.learning_rate_min:\n self.stop = True # why does this happen inside _add_report?\n\n with open(self.valid_report_file, 'a') as opened_file:\n labeled_scores = sorted(valid_scores.items())\n eval_report = \"\\t\".join(\"{}: {:.5f}\".format(n, v)\n for n, v in labeled_scores)\n opened_file.write(\n \"Steps: {}\\t{}\\tLR: {:.8f}\\t{}\\n\".format(\n self.steps, eval_report,\n current_lr, \"*\" if new_best else \"\"))\n\n def _log_parameters_list(self) -> None:\n \"\"\"\n Write all model parameters (name, shape) to the log.\n \"\"\"\n model_parameters = filter(lambda p: p.requires_grad,\n self.model.parameters())\n n_params = sum(np.prod(p.size()) for p in model_parameters)\n self.logger.info(\"Total params: %d\", n_params)\n trainable_params = [n for (n, p) in self.model.named_parameters()\n if p.requires_grad]\n self.logger.info(\"Trainable parameters: %s\", sorted(trainable_params))\n assert trainable_params\n\n def _log_examples(\n self,\n sources_raw: Dict[str, List[str]],\n hypotheses: List[str],\n references: List[str],\n hypotheses_raw: List[List[str]] = None,\n references_raw: List[List[str]] = None) -> None:\n \"\"\"\n Log a the first `self.log_valid_sents` sentences from given examples.\n\n :param sources: decoded sources (dict of list of strings)\n :param hypotheses: decoded hypotheses (list of strings)\n :param references: decoded references (list of strings)\n :param sources_raw: raw sources (list of list of tokens)\n :param hypotheses_raw: raw hypotheses (list of list of tokens)\n :param references_raw: raw references (list of list of tokens)\n \"\"\"\n ix = self.log_valid_sents\n assert all(i < len(hypotheses) for i in ix)\n\n sources = {k: postprocess(v, self.src_level)\n for k, v in sources_raw.items()}\n for i in ix:\n self.logger.info(\"Example #{}\".format(i))\n\n for f, rs in sources_raw.items():\n self.logger.debug(\"\\t{}: {}\".format(f, rs[i]))\n if references_raw is not None:\n self.logger.debug(\"\\tRaw reference: %s\", references_raw[i])\n if hypotheses_raw is not None:\n self.logger.debug(\"\\tRaw hypothesis: %s\", hypotheses_raw[i])\n\n for f, srcs in sources.items():\n self.logger.info(\"\\t{}: {}\".format(f, srcs[i]))\n self.logger.info(\"\\tReference: %s\", references[i])\n self.logger.info(\"\\tHypothesis: %s\", hypotheses[i])\n\n def _store_outputs(self, hypotheses: List[str]) -> None:\n \"\"\"\n Write current validation outputs to file in `self.model_dir.`\n\n :param hypotheses: list of strings\n \"\"\"\n valid_output_file = join(self.model_dir, \"{}.hyps\".format(self.steps))\n with open(valid_output_file, 'w') as f:\n for hyp in hypotheses:\n f.write(\"{}\\n\".format(hyp))\n\n\ndef train(cfg_file: str) -> None:\n \"\"\"\n Main training function. After training, also test on test data if given.\n\n :param cfg_file: path to configuration yaml file\n \"\"\"\n cfg = load_config(cfg_file)\n train_cfg = cfg[\"training\"]\n data_cfg = cfg[\"data\"]\n\n # set the random seed\n set_seed(seed=train_cfg.get(\"random_seed\", 42))\n\n # load the data\n data = load_data(data_cfg)\n train_data = data[\"train_data\"]\n dev_data = data[\"dev_data\"]\n test_data = data[\"test_data\"]\n vocabs = data[\"vocabs\"]\n\n # build an encoder-decoder model\n model = build_model(cfg[\"model\"], vocabs=vocabs)\n\n # for training management, e.g. early stopping and model selection\n trainer = TrainManager(model=model, config=cfg)\n\n # store copy of original training config in model dir\n shutil.copy2(cfg_file, join(trainer.model_dir, \"config.yaml\"))\n\n # log all entries of config\n log_cfg(cfg, trainer.logger)\n\n log_data_info(\n train_data=train_data,\n valid_data=dev_data,\n test_data=test_data,\n vocabs=vocabs,\n logging_function=trainer.logger.info)\n\n trainer.logger.info(str(model))\n\n # store the vocabs\n model_dir = train_cfg[\"model_dir\"]\n for vocab_name, vocab in vocabs.items():\n vocab.to_file(join(model_dir, vocab_name + \"_vocab.txt\"))\n\n # train the model\n trainer.train_and_validate(train_data=train_data, valid_data=dev_data)\n\n # predict with the best model on validation and test\n # (if test data is available)\n ckpt = join(trainer.model_dir, str(trainer.best_ckpt_iteration) + \".ckpt\")\n output_name = \"{:08d}.hyps\".format(trainer.best_ckpt_iteration)\n output_path = join(trainer.model_dir, output_name)\n test(cfg_file, ckpt=ckpt, output_path=output_path, logger=trainer.logger)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser('Joey-NMT')\n parser.add_argument(\"config\", default=\"configs/default.yaml\", type=str,\n help=\"Training configuration file (yaml).\")\n args = parser.parse_args()\n train(cfg_file=args.config)\n", "from typing import Dict\n\nimport torch\nfrom torch import Tensor\nfrom torch.distributions.categorical import Categorical\n\nfrom joeynmt.helpers import tile\n\n\ndef sample_decode(\n model,\n size: int,\n encoder_output,\n masks: Dict[str, Tensor],\n max_output_length: int,\n labels: dict = None):\n \"\"\"\n Sample size sequences from the model\n\n In each decoding step, find the k most likely partial hypotheses.\n\n :param decoder:\n :param size: size of the beam\n :param encoder_output:\n :param masks:\n :param max_output_length:\n :return:\n - stacked_output: dim?,\n - scores: dim?\n \"\"\"\n\n # init\n transformer = model.is_transformer\n any_mask = next(iter(masks.values()))\n batch_size = any_mask.size(0)\n att_vectors = None # not used for Transformer\n device = encoder_output.device\n\n masks.pop(\"trg\", None) # mutating one of the inputs is not good\n\n # Recurrent models only: initialize RNN hidden state\n if not transformer and model.decoder.bridge_layer is not None:\n hidden = model.decoder.bridge_layer(encoder_output.hidden)\n else:\n hidden = None\n\n # tile encoder states and decoder initial states beam_size times\n if hidden is not None:\n # layers x batch*k x dec_hidden_size\n hidden = tile(hidden, size, dim=1)\n\n # encoder_output: batch*k x src_len x enc_hidden_size\n encoder_output.tile(size, dim=0)\n masks = {k: tile(v, size, dim=0) for k, v in masks.items()}\n\n # Transformer only: create target mask\n masks[\"trg\"] = any_mask.new_ones([1, 1, 1]) if transformer else None\n\n # the structure holding all batch_size * k partial hypotheses\n alive_seq = torch.full(\n (batch_size * size, 1),\n model.bos_index,\n dtype=torch.long,\n device=device\n )\n # the structure indicating, for each hypothesis, whether it has\n # encountered eos yet (if it has, stop updating the hypothesis\n # likelihood)\n is_finished = torch.zeros(\n batch_size * size, dtype=torch.bool, device=device\n )\n\n # for each (batch x size) sequence, there is a log probability\n seq_probs = torch.zeros(batch_size * size, device=device)\n\n for step in range(1, max_output_length + 1):\n dec_input = alive_seq if transformer else alive_seq[:, -1].view(-1, 1)\n\n # decode a step\n probs, hidden, att_scores, att_vectors = model.decode(\n trg_input=dec_input,\n encoder_output=encoder_output,\n masks=masks,\n decoder_hidden=hidden,\n prev_att_vector=att_vectors,\n unroll_steps=1,\n labels=labels,\n generate=\"true\"\n )\n\n # batch*k x trg_vocab\n # probs = model.decoder.gen_func(logits[:, -1], dim=-1).squeeze(1)\n\n next_ids = Categorical(probs).sample().unsqueeze(1) # batch*k x 1\n next_scores = probs.gather(1, next_ids).squeeze(1) # batch*k\n\n seq_probs = torch.where(\n is_finished, seq_probs, seq_probs + next_scores.log()\n )\n\n # append latest prediction\n # batch_size*k x hyp_len\n alive_seq = torch.cat([alive_seq, next_ids], -1)\n\n # update which hypotheses are finished\n is_finished = is_finished | next_ids.eq(model.eos_index).squeeze(1)\n\n if is_finished.all():\n break\n\n # final_outputs: batch x size x len\n final_outputs = alive_seq.view(batch_size, size, -1)\n seq_probs = seq_probs.view(batch_size, size)\n max_scores, max_ix = seq_probs.max(dim=-1)\n outs = []\n for b in range(final_outputs.size(0)):\n outs.append(final_outputs[b, max_ix[b]])\n best_outputs = torch.stack(outs) # hmm, maybe not as good as pad and stack\n # print(torch.index_select(final_outputs, 0, max_ix).size())\n #print(final_outputs[:, max_ix].size())\n #print(final_outputs[:, max_ix].size())\n\n return best_outputs, max_scores\n", "import torch\n\nfrom joeynmt.model import EncoderOutput\nfrom joeynmt.decoders import TransformerDecoder, TransformerDecoderLayer\nfrom .test_helpers import TensorTestCase\n\n\nclass TestTransformerDecoder(TensorTestCase):\n\n def setUp(self):\n self.emb_size = 12\n self.num_layers = 3\n self.hidden_size = 12\n self.ff_size = 24\n self.num_heads = 4\n self.dropout = 0.\n self.seed = 42\n\n def test_transformer_decoder_freeze(self):\n torch.manual_seed(self.seed)\n encoder = TransformerDecoder(freeze=True)\n for n, p in encoder.named_parameters():\n self.assertFalse(p.requires_grad)\n\n def test_transformer_decoder_output_size(self):\n\n vocab_size = 11\n decoder = TransformerDecoder(\n num_layers=self.num_layers, num_heads=self.num_heads,\n hidden_size=self.hidden_size, ff_size=self.ff_size,\n dropout=self.dropout, vocab_size=vocab_size)\n\n if not hasattr(decoder, \"output_size\"):\n self.fail(\"Missing output_size property.\")\n\n self.assertEqual(decoder.output_size, vocab_size)\n\n def test_transformer_decoder_forward(self):\n torch.manual_seed(self.seed)\n batch_size = 2\n src_time_dim = 4\n trg_time_dim = 5\n vocab_size = 7\n\n trg_embed = torch.rand(size=(batch_size, trg_time_dim, self.emb_size))\n\n decoder = TransformerDecoder(\n num_layers=self.num_layers, num_heads=self.num_heads,\n hidden_size=self.hidden_size, ff_size=self.ff_size,\n dropout=self.dropout, emb_dropout=self.dropout,\n vocab_size=vocab_size)\n\n encoder_output = EncoderOutput(\n torch.rand(size=(batch_size, src_time_dim, self.hidden_size)),\n None)\n\n for p in decoder.parameters():\n torch.nn.init.uniform_(p, -0.5, 0.5)\n\n src_mask = torch.ones(size=(batch_size, 1, src_time_dim)) == 1\n trg_mask = torch.ones(size=(batch_size, trg_time_dim, 1)) == 1\n\n output, states, _, _ = decoder(\n trg_embed, encoder_output, src_mask, trg_mask)\n\n output_target = torch.Tensor(\n [[[ 0.1765, 0.4578, 0.2345, -0.5303, 0.3862, 0.0964, 0.6882],\n [ 0.3363, 0.3907, 0.2210, -0.5414, 0.3770, 0.0748, 0.7344],\n [ 0.3275, 0.3729, 0.2797, -0.3519, 0.3341, 0.1605, 0.5403],\n [ 0.3081, 0.4513, 0.1900, -0.3443, 0.3072, 0.0570, 0.6652],\n [ 0.3253, 0.4315, 0.1227, -0.3371, 0.3339, 0.1129, 0.6331]],\n\n [[ 0.3235, 0.4836, 0.2337, -0.4019, 0.2831, -0.0260, 0.7013],\n [ 0.2800, 0.5662, 0.0469, -0.4156, 0.4246, -0.1121, 0.8110],\n [ 0.2968, 0.4777, 0.0652, -0.2706, 0.3146, 0.0732, 0.5362],\n [ 0.3108, 0.4910, 0.0774, -0.2341, 0.2873, 0.0404, 0.5909],\n [ 0.2338, 0.4371, 0.1350, -0.1292, 0.0673, 0.1034, 0.5356]]]\n )\n self.assertEqual(output_target.shape, output.shape)\n self.assertTensorAlmostEqual(output_target, output)\n\n greedy_predictions = output.argmax(-1)\n expect_predictions = output_target.argmax(-1)\n self.assertTensorEqual(expect_predictions, greedy_predictions)\n\n states_target = torch.Tensor(\n [[[ 8.3742e-01, -1.3161e-01, 2.1876e-01, -1.3920e-01, -9.1572e-01,\n 2.3006e-01, 3.8328e-01, -1.6271e-01, 3.7370e-01, -1.2110e-01,\n -4.7549e-01, -4.0622e-01],\n [ 8.3609e-01, -2.9161e-02, 2.0583e-01, -1.3571e-01, -8.0510e-01,\n 2.7630e-01, 4.8219e-01, -1.8863e-01, 1.1977e-01, -2.0179e-01,\n -4.4314e-01, -4.1228e-01],\n [ 8.5478e-01, 1.1368e-01, 2.0400e-01, -1.3059e-01, -8.1042e-01,\n 1.6369e-01, 5.4244e-01, -2.9103e-01, 3.9919e-01, -3.3826e-01,\n -4.5423e-01, -4.2516e-01],\n [ 9.0388e-01, 1.1853e-01, 1.9927e-01, -1.1675e-01, -7.7208e-01,\n 2.0686e-01, 4.6024e-01, -9.1610e-02, 3.9778e-01, -2.6214e-01,\n -4.7688e-01, -4.0807e-01],\n [ 8.9476e-01, 1.3646e-01, 2.0298e-01, -1.0910e-01, -8.2137e-01,\n 2.8025e-01, 4.2538e-01, -1.1852e-01, 4.1497e-01, -3.7422e-01,\n -4.9212e-01, -3.9790e-01]],\n\n [[ 8.8745e-01, -2.5798e-02, 2.1483e-01, -1.8219e-01, -6.4821e-01,\n 2.6279e-01, 3.9598e-01, -1.0423e-01, 3.0726e-01, -1.1315e-01,\n -4.7201e-01, -3.6979e-01],\n [ 7.5528e-01, 6.8919e-02, 2.2486e-01, -1.6395e-01, -7.9692e-01,\n 3.7830e-01, 4.9367e-01, 2.4355e-02, 2.6674e-01, -1.1740e-01,\n -4.4945e-01, -3.6367e-01],\n [ 8.3467e-01, 1.7779e-01, 1.9504e-01, -1.6034e-01, -8.2783e-01,\n 3.2627e-01, 5.0045e-01, -1.0181e-01, 4.4797e-01, -4.8046e-01,\n -3.7264e-01, -3.7392e-01],\n [ 8.4359e-01, 2.2699e-01, 1.9721e-01, -1.5768e-01, -7.5897e-01,\n 3.3738e-01, 4.5559e-01, -1.0258e-01, 4.5782e-01, -3.8058e-01,\n -3.9275e-01, -3.8412e-01],\n [ 9.6349e-01, 1.6264e-01, 1.8207e-01, -1.6910e-01, -5.9304e-01,\n 1.4468e-01, 2.4968e-01, 6.4794e-04, 5.4930e-01, -3.8420e-01,\n -4.2137e-01, -3.8016e-01]]]\n )\n\n self.assertEqual(states_target.shape, states.shape)\n self.assertTensorAlmostEqual(states_target, states)\n\n def test_transformer_decoder_layers(self):\n\n torch.manual_seed(self.seed)\n batch_size = 2\n src_time_dim = 4\n trg_time_dim = 5\n vocab_size = 7\n\n decoder = TransformerDecoder(\n num_layers=self.num_layers, num_heads=self.num_heads,\n hidden_size=self.hidden_size, ff_size=self.ff_size,\n dropout=self.dropout, vocab_size=vocab_size)\n\n self.assertEqual(len(decoder.layers), self.num_layers)\n\n for layer in decoder.layers:\n self.assertTrue(isinstance(layer, TransformerDecoderLayer))\n self.assertTrue(hasattr(layer, \"src_trg_att\"))\n self.assertTrue(hasattr(layer, \"trg_trg_att\"))\n self.assertTrue(hasattr(layer, \"feed_forward\"))\n self.assertEqual(layer.size, self.hidden_size)\n self.assertEqual(\n layer.feed_forward.pwff_layer[0].in_features, self.hidden_size)\n self.assertEqual(\n layer.feed_forward.pwff_layer[0].out_features, self.ff_size)\n" ]
[ [ "torch.save", "torch.FloatTensor" ], [ "torch.zeros", "torch.cat", "torch.stack", "torch.distributions.categorical.Categorical", "torch.full" ], [ "torch.rand", "torch.ones", "torch.manual_seed", "torch.nn.init.uniform_", "torch.Tensor" ] ]
1jinwoo/YHack2018
[ "2cdb7961917daa7d6f592ac8bad81421d063638e" ]
[ "Models/multiple_layer_model.py" ]
[ "# libraries import\nfrom keras.models import Sequential\nfrom keras import layers\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.model_selection import StratifiedKFold\nimport numpy as np\nimport pandas as pd\n\n# file import\nimport data_cleaner as dc\nimport model_helper as mh\n\ndf = dc.clean_item_data(0)\ndf = dc.cleanup_categoryid(df)[0]\n\n# fix random seed for reproducibility\nseed = 7\nnp.random.seed(seed)\n\n\n# define 5-fold cross validation test harness\nX, Y = dc.data_split(df, 1, 0, 0)\nkfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=seed)\ncvscores_train = []\ncvscores_test = []\nfor train_valid, test in kfold.split(X, Y):\n X_Y = pd.concat([X[train_valid], Y[train_valid]], axis=1)\n _X_train, _, _X_valid, Y_train, _, Y_valid = dc.data_split(X_Y, 0.8125, 0, 0.1875)\n vectorizer = CountVectorizer(encoding='latin1') # Allow different options (min_df, encoding)\n\n # convert pandas dataframes to list of strings\n x_train_list = []\n x_test_list = []\n x_valid_list = []\n for _, row in _X_train.iterrows():\n x_train_list.append(row[0])\n for _, row in _X_valid.iterrows():\n x_valid_list.append(row[0])\n\n vectorizer.fit(x_train_list)\n X_train = vectorizer.transform(x_train_list)\n X_test = vectorizer.transform(X[test])\n X_valid = vectorizer.transform(x_valid_list)\n Y_test = Y[test]\n\n # Neural Network\n print('X train shape: ' + str(X_train.shape[1]))\n input_dim = X_train.shape[1] # Number of features\n output_dim = df['categoryId'].nunique()\n model = Sequential()\n model.add(layers.Dense(330, input_dim=input_dim, activation='relu', use_bias=False))\n model.add(layers.Dropout(rate=0.6))\n # model.add(layers.Dense(100, activation='relu'))\n model.add(layers.Dropout(rate=0.6))\n model.add(layers.Dense(output_dim, activation='softmax'))\n model.compile(loss='sparse_categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n history = model.fit(X_train, Y_train,\n epochs=5,\n verbose=1,\n validation_data=(X_valid, Y_valid),\n batch_size=10)\n print(model.summary())\n\n loss, accuracy_train = model.evaluate(X_train, Y_train, verbose=False)\n print(\"Training Accuracy: {:.4f}\".format(accuracy_train))\n loss, accuracy_test = model.evaluate(X_test, Y_test, verbose=False)\n print(\"Testing Accuracy: {:.4f}\".format(accuracy_test))\n mh.plot_history(history)\n cvscores_train.append(accuracy_train * 100)\n cvscores_test.append(accuracy_test * 100)\n\nprint('5-fold cross validation metrics on training set')\nprint(\"%.2f%% (+/- %.2f%%)\" % (np.mean(cvscores_train), np.std(cvscores_train)))\nprint('5-fold cross validation metrics on testing set')\nprint(\"%.2f%% (+/- %.2f%%)\" % (np.mean(cvscores_test), np.std(cvscores_test)))\n\n\n\n" ]
[ [ "sklearn.model_selection.StratifiedKFold", "numpy.random.seed", "numpy.mean", "numpy.std", "sklearn.feature_extraction.text.CountVectorizer", "pandas.concat" ] ]
shubhscode/onePerceptron
[ "4c9bc540de2e2ff6a6f792b5e5d4cab184ea336e" ]
[ "and.py" ]
[ "from utils.model import Perceptron\nfrom utils.all_utils import prepare_data, save_model, save_plot\nimport pandas as pd\n\nimport logging\n\nlogging_str = \"[%(asctime)s: %(levelname)s: %(module)s] %(message)s\"\nlogging.basicConfig(level=logging.INFO, format=logging_str)\n\n\ndef main(data, eta, epochs, filename, plotfilename):\n df = pd.DataFrame(data)\n\n print(df)\n\n X, y = prepare_data(df)\n\n model = Perceptron(eta=eta, epochs=epochs)\n model.fit(X, y)\n\n _ = model.total_loss()\n\n save_model(model, filename=filename)\n save_plot(df, plotfilename, model)\n\n\nif __name__ == '__main__':\n AND = {\n \"x1\": [0, 0, 1, 1],\n \"x2\": [0, 1, 0, 1],\n \"y\": [0, 0, 0, 1],\n }\n\n ETA = 0.3 # 0 and 1\n EPOCHS = 10\n\n main(data=AND, eta=ETA, epochs=EPOCHS, filename=\"and.modal\", plotfilename=\"and.png\")\n" ]
[ [ "pandas.DataFrame" ] ]
triskadecaepyon/pyworkout-toolkit
[ "8a93f9a086666508435a631774bbd6f97f3f7a52" ]
[ "pyworkout/parsers/tcxtools.py" ]
[ "\"\"\"\nTools to process TCX files,\nspecifically for parsing and\nconverting to other formats.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom lxml import objectify\nimport dateutil.parser\nimport logging\n\nTPXNS = \"{http://www.garmin.com/xmlschemas/ActivityExtension/v2}TPX\"\nLXNS = \"{http://www.garmin.com/xmlschemas/ActivityExtension/v2}LX\"\n\n\nclass TCXPandas(object):\n \"\"\"\n Class for Parsing .TCX files to Pandas DataFrames.\n\n Parameters\n ----------\n tcx_file : string, path object,\n the path to the tcx file\n\n \"\"\"\n\n def __init__(self, tcx_file, **kwds):\n self.__filehandle__ = tcx_file\n self.tcx = None\n self.activity = None\n self.dataframe = None\n\n logging.basicConfig(filename=\"TCXconversion.log\", level=logging.DEBUG)\n\n def parse(self):\n \"\"\"\n Parse specified TCX file into a DataFrame\n Return a Dataframe and sets Dataframe and sets\n the self.dataframe object in the TCXParser.\n \"\"\"\n\n self.tcx = objectify.parse(open(self.__filehandle__))\n self.activity = self.tcx.getroot().Activities.Activity\n self.dataframe = pd.DataFrame(self._traverse_laps_())\n return self.dataframe\n\n def get_activity_timestamp(self):\n \"\"\"\n Returns the TCX file timestamp if parsed\n \"\"\"\n if self.activity is None:\n return None\n else:\n return self.activity.Id\n\n def get_sport(self):\n \"\"\"\n Returns the specified sport of the TCX file\n \"\"\"\n if self.activity is None:\n return None\n else:\n return self.activity.attrib['Sport']\n\n def get_workout_startime(self):\n \"\"\"\n Returns the starting timestamp of the specified TCX file\n \"\"\"\n if self.activity is None:\n return None\n else:\n return self.activity.Lap.items()[0][1]\n\n def _traverse_laps_(self):\n\n # New iterator method to align with lxml standard\n return_array = []\n for laps in self.activity.Lap:\n for tracks in laps.Track:\n for trackingpoints in tracks.Trackpoint:\n return_dict = {}\n return_dict['time'] = dateutil.parser.parse(str(trackingpoints.Time))\n\n try:\n return_dict['latitude'] = \\\n np.float(trackingpoints.Position.LatitudeDegrees)\n except AttributeError:\n pass #TODO log this\n\n try:\n return_dict['longitude'] = \\\n np.float(trackingpoints.Position.LongitudeDegrees)\n except AttributeError:\n pass #TODO log this\n\n try:\n return_dict['altitude'] = np.float(trackingpoints.AltitudeMeters)\n except AttributeError:\n pass #TODO log this\n\n try:\n return_dict['distance'] = np.float(trackingpoints.DistanceMeters)\n except AttributeError:\n pass #TODO log this\n\n try:\n return_dict['hr'] = np.float(trackingpoints.HeartRateBpm.Value)\n except AttributeError:\n pass #TODO log this\n\n try:\n return_dict['speed'] = \\\n np.float(trackingpoints.Extensions[TPXNS].Speed)\n except AttributeError:\n pass #TODO log this\n\n if self.get_sport == 'Running':\n try:\n return_dict['cadence'] = \\\n np.float(trackingpoints.Extensions[TPXNS].RunCadence)\n except AttributeError:\n pass #TODO log this\n else: # self.activity.attrib['Sport'] == 'Biking':\n try:\n return_dict['cadence'] = np.float(trackingpoints.Cadence)\n except AttributeError:\n pass #TODO log this\n\n try:\n return_dict['power'] = \\\n np.float(trackingpoints.Extensions[TPXNS].Watts)\n except AttributeError:\n pass #TODO log this\n\n return_array.append(return_dict)\n return return_array\n" ]
[ [ "numpy.float" ] ]
eltoto1219/vltk
[ "e84c0efe9062eb864604d96345f71483816340aa" ]
[ "vltk/abc/processor.py" ]
[ "import torch\nfrom vltk.inspection import collect_args_to_func\n\n\nclass Processor:\n _type = None\n _keys = ()\n\n @property\n def keys(self):\n if isinstance(self._keys, str):\n return set([self._keys])\n return set(self._keys)\n\n def __init__(self, **kwargs):\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n def enable_padding(self):\n self.tokenizer.enable_padding(\n length=self.config.lang.max_seq_length,\n direction=self.config.lang.pad_direction,\n pad_id=self.tokenizer.token_to_id(self.tokenizer.pad_token),\n )\n\n def disable_padding(self):\n self.tokenizer.no_padding()\n\n @torch.no_grad()\n def __call__(self, inp, **kwargs):\n if isinstance(inp, dict):\n proc_keys = self.keys\n intersection = proc_keys.intersection(set(inp.keys()))\n assert (\n intersection == proc_keys\n ), f\"{type(self).__name__} requires {proc_keys} to be present within the input dictionary, but not all \\\n keys are present. the input dictionary only has: {inp.keys()}\"\n\n kwargs = collect_args_to_func(self.forward, kwargs)\n output = self.forward(inp, **kwargs)\n if not isinstance(output, dict):\n assert isinstance(\n output, torch.Tensor\n ), \"the outputs of any processor must be a torch tensor or a \\\n dictionary where the repective value(s) from the key(s) of interest, specified in the init method, \\\n must be a torch tensor aswell\"\n else:\n pass\n\n return output\n\n\nclass VisnProcessor(Processor):\n _type = \"visn\"\n\n\nclass LangProcessor(Processor):\n _type = \"lang\"\n\n\nclass VisnLangProcessor(Processor):\n _type = \"visnlang\"\n\n @torch.no_grad()\n def __call__(self, text_inp, visn_inp, **kwargs):\n\n text_inp, visn_inp = self.forward(text_inp, visn_inp, **kwargs)\n\n return text_inp, visn_inp\n" ]
[ [ "torch.no_grad" ] ]
retamia/tvm
[ "5d25dc54d874bf2ddf0e8cf34c4748e9e2656fd8", "5d25dc54d874bf2ddf0e8cf34c4748e9e2656fd8" ]
[ "web/tests/python/websock_rpc_test.py", "topi/python/topi/cuda/conv2d_nhwc_tensorcore.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Simple testcode to test Javascript RPC\n\nTo use it, start a rpc proxy with \"python -m tvm.exec.rpc_proxy\".\nConnect javascript end to the websocket port and connect to the RPC.\n\"\"\"\n\nimport tvm\nfrom tvm import te\nfrom tvm import rpc\nfrom tvm.contrib import util, emcc\nimport numpy as np\n\nproxy_host = \"localhost\"\nproxy_port = 9090\n\ndef test_rpc():\n if not tvm.runtime.enabled(\"rpc\"):\n return\n # generate the wasm library\n target = \"llvm -target=wasm32-unknown-unknown-wasm -system-lib\"\n if not tvm.runtime.enabled(target):\n raise RuntimeError(\"Target %s is not enbaled\" % target)\n n = te.var(\"n\")\n A = te.placeholder((n,), name='A')\n B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name='B')\n s = te.create_schedule(B.op)\n\n fadd = tvm.build(s, [A, B], target, name=\"addone\")\n temp = util.tempdir()\n\n wasm_path = temp.relpath(\"addone.wasm\")\n fadd.export_library(wasm_path, emcc.create_tvmjs_wasm)\n\n wasm_binary = open(wasm_path, \"rb\").read()\n\n remote = rpc.connect(proxy_host, proxy_port, key=\"wasm\",\n session_constructor_args=[\"rpc.WasmSession\", wasm_binary])\n\n def check(remote):\n # basic function checks.\n faddone = remote.get_function(\"testing.asyncAddOne\")\n fecho = remote.get_function(\"testing.echo\")\n assert(faddone(100) == 101)\n assert(fecho(1, 2, 3) == 1)\n assert(fecho(1, 2, 3) == 1)\n assert(fecho(100, 2, 3) == 100)\n assert(fecho(\"xyz\") == \"xyz\")\n assert(bytes(fecho(bytearray(b\"123\"))) == b\"123\")\n\n # run the generated library.\n f1 = remote.system_lib()\n ctx = remote.cpu(0)\n a = tvm.nd.array(np.random.uniform(size=1024).astype(A.dtype), ctx)\n b = tvm.nd.array(np.zeros(1024, dtype=A.dtype), ctx)\n # invoke the function\n addone = f1.get_function(\"addone\")\n addone(a, b)\n\n # time evaluator\n time_f = f1.time_evaluator(\"addone\", ctx, number=100, repeat=10)\n time_f(a, b)\n cost = time_f(a, b).mean\n print('%g secs/op' % cost)\n np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1)\n\n check(remote)\n\ntest_rpc()\n", "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=invalid-name, too-many-locals, too-many-function-args\n# pylint: disable=too-many-statements, unused-argument, too-many-arguments\n\"\"\"Tensorcore template for cuda backend\"\"\"\nimport numpy as np\nimport tvm\nfrom tvm import te\nfrom tvm import autotvm\nfrom ..util import get_const_tuple, traverse_inline, simplify\nfrom ..nn.pad import pad\nfrom ..nn.util import get_pad_tuple\nfrom .tensor_intrin import intrin_wmma_load_matrix_A\nfrom .tensor_intrin import intrin_wmma_load_matrix_W\nfrom .tensor_intrin import intrin_wmma_store_matrix\nfrom .tensor_intrin import intrin_wmma_gemm\n\n\ndef nhwc_tensorcore_cuda(cfg, Input, Filter, stride, padding, dilation, out_dtype):\n \"\"\"Compute declaration for tensorcore\"\"\"\n assert isinstance(stride, int) or len(stride) == 2\n assert isinstance(dilation, int) or len(dilation) == 2\n\n if isinstance(stride, int):\n stride_h = stride_w = stride\n else:\n stride_h, stride_w = stride\n\n if isinstance(dilation, int):\n dilation_h = dilation_w = dilation\n else:\n dilation_h, dilation_w = dilation\n\n batch, in_height, in_width, in_channel = get_const_tuple(Input.shape)\n kernel_h, kernel_w, _, num_filter = get_const_tuple(Filter.shape)\n assert (batch % 16 == 0 and in_channel % 16 == 0 and num_filter % 16 == 0) or \\\n (batch % 8 == 0 and in_channel % 16 == 0 and num_filter % 32 == 0) or \\\n (batch % 32 == 0 and in_channel % 16 == 0 and num_filter % 8 == 0), \\\n \"The shape of (batch, in_channel, num_filter) \"\\\n \"must be multiple of (16, 16, 16) or (32, 16, 8) or (8, 16, 32) for now\"\n\n # compute the output shape\n dilated_kernel_h = (kernel_h - 1) * dilation_h + 1\n dilated_kernel_w = (kernel_w - 1) * dilation_w + 1\n pad_top, pad_left, pad_down, pad_right = get_pad_tuple(\n padding, (dilated_kernel_h, dilated_kernel_w))\n out_channel = num_filter\n out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1)\n out_width = simplify((in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1)\n pad_before = [0, pad_top, pad_left, 0]\n pad_after = [0, pad_down, pad_right, 0]\n PaddedInput = pad(Input, pad_before, pad_after, name=\"PaddedInput\")\n rc = te.reduce_axis((0, in_channel), name='rc')\n ry = te.reduce_axis((0, kernel_h), name='ry')\n rx = te.reduce_axis((0, kernel_w), name='rx')\n # convert data type of input feature maps and weights\n TransPaddedInput = te.compute(\n PaddedInput.shape,\n lambda n, h, w, c: PaddedInput[n, h, w, c].astype('float16'))\n TransFilter = te.compute(\n Filter.shape, lambda h, w, i, o: Filter[h, w, i, o].astype('float16'))\n Output = te.compute(\n (batch, out_height, out_width, out_channel),\n lambda nn, yy, xx, ff: te.sum(\n TransPaddedInput[nn, yy * stride_h + ry * dilation_h,\n xx * stride_w + rx * dilation_w, rc].astype(out_dtype) *\n TransFilter[ry, rx, rc, ff].astype(out_dtype), axis=[ry, rx, rc]),\n name=\"Conv2dOutput\", tag=\"conv2d_nhwc_tensorcore\")\n return Output\n\n\ndef schedule_nhwc_tensorcore_cuda(cfg, s, Conv):\n \"\"\"Schedule tensorcore template\"\"\"\n kh, kw, ic = s[Conv].op.reduce_axis\n out_dtype = Conv.dtype\n trans_paddata, kernel = s[Conv].op.input_tensors\n in_dtype = trans_paddata.dtype\n batch, _, _, _ = get_const_tuple(Conv.shape)\n _, _, _, out_channels = get_const_tuple(kernel.shape)\n paddata = s[trans_paddata].op.input_tensors\n\n # inline the pad and dtype transform\n s[trans_paddata].compute_inline()\n s[kernel].compute_inline()\n s[paddata[0]].compute_inline()\n\n # Designate the memory hierarchy\n AS = s.cache_read(trans_paddata, 'shared', [Conv])\n WS = s.cache_read(kernel, 'shared', [Conv])\n AF = s.cache_read(AS, 'wmma.matrix_a', [Conv])\n WF = s.cache_read(WS, 'wmma.matrix_b', [Conv])\n ConvF = s.cache_write(Conv, 'wmma.accumulator')\n\n if Conv.op in s.outputs:\n output = Conv\n ConvS = s.cache_read(ConvF, 'shared', [Conv])\n OL = ConvS\n else:\n output = s.outputs[0].output(0)\n s[Conv].set_scope('shared')\n OL = Conv\n\n # Schedule for autotvm\n cfg.define_knob(\"block_row_warps\", [1, 2, 4])\n cfg.define_knob(\"block_col_warps\", [1, 2, 4])\n cfg.define_knob(\"warp_row_tiles\", [1, 2, 4])\n cfg.define_knob(\"warp_col_tiles\", [1, 2, 4])\n cfg.define_knob(\"chunk\", [1, 2, 4, 8])\n cfg.define_knob(\"offset\", [0, 8])\n cfg.define_knob(\"vector_width\", [1, 2, 4, 8])\n\n if (batch % 16 == 0 and out_channels % 16 == 0):\n cfg.define_knob(\"wmma_m\", [16, 8, 32])\n elif (batch % 8 == 0 and out_channels % 32 == 0):\n cfg.define_knob(\"wmma_m\", [8, 16, 32])\n elif (batch % 32 == 0 and out_channels % 8 == 0):\n cfg.define_knob(\"wmma_m\", [32, 16, 8])\n\n # fallback support\n target = tvm.target.Target.current()\n if cfg.is_fallback:\n ref_log = autotvm.tophub.load_reference_log(\n target.target_name, target.model, 'conv2d_nhwc_tensorcore.cuda')\n cfg.fallback_with_reference_log(ref_log)\n\n block_row_warps = cfg[\"block_row_warps\"].val\n block_col_warps = cfg[\"block_col_warps\"].val\n warp_row_tiles = cfg[\"warp_row_tiles\"].val\n warp_col_tiles = cfg[\"warp_col_tiles\"].val\n chunk = cfg[\"chunk\"].val\n offset = cfg[\"offset\"].val\n wmma_m = cfg[\"wmma_m\"].val\n vector_width = cfg[\"vector_width\"].val\n\n wmma_k = 16\n if wmma_m == 16:\n wmma_n = 16\n elif wmma_m == 8:\n wmma_n = 32\n elif wmma_m == 32:\n wmma_n = 8\n\n warp_size = 32\n\n block_x = te.thread_axis('blockIdx.x')\n block_y = te.thread_axis('blockIdx.y')\n block_z = te.thread_axis('blockIdx.z')\n thread_x = te.thread_axis('threadIdx.x')\n thread_y = te.thread_axis('threadIdx.y')\n thread_z = te.thread_axis('threadIdx.z')\n\n # Define the intrin strides\n def get_strides(extents):\n return [np.prod(extents[i:]).tolist() for i in range(len(extents))]\n\n AS_align = chunk * wmma_k + offset\n WS_align = warp_col_tiles * block_col_warps * wmma_n + offset\n block_factor_n = wmma_m * warp_row_tiles * block_row_warps\n block_factor_o = wmma_n * warp_col_tiles * block_col_warps\n CS_align = block_factor_o + offset\n AS_strides = get_strides([1, 1, AS_align, 1])\n AL_strides = get_strides([1, 1, wmma_k, 1])\n WS_strides = get_strides([WS_align, 1])\n WL_strides = get_strides([wmma_n * warp_col_tiles, 1])\n CL_strides = get_strides([1, 1, wmma_n * warp_col_tiles, 1])\n CS_strides = get_strides([1, 1, CS_align, 1])\n\n # Schedule for output\n nc, hc, wc, oc = output.op.axis\n block_k = s[output].fuse(hc, wc)\n s[output].bind(block_k, block_z)\n block_i, nc = s[output].split(nc, factor=block_factor_n)\n block_j, oc = s[output].split(oc, factor=block_factor_o)\n s[output].reorder(block_k, block_i, block_j, nc, oc)\n t = s[output].fuse(nc, oc)\n t, ti = s[output].split(t, factor=vector_width)\n t, tx = s[output].split(t, factor=warp_size)\n t, ty = s[output].split(t, factor=block_row_warps)\n t, tz = s[output].split(t, factor=block_col_warps)\n s[output].bind(block_i, block_x)\n s[output].bind(block_j, block_y)\n s[output].bind(tz, thread_z)\n s[output].bind(ty, thread_y)\n s[output].bind(tx, thread_x)\n s[output].vectorize(ti)\n\n # Schedule wmma store\n s[OL].compute_at(s[output], block_j)\n nc, hc, wc, oc = OL.op.axis\n s[OL].reorder(hc, wc, nc, oc)\n s[OL].storage_align(wc, CS_align - 1, CS_align)\n oc, ooc = s[OL].split(oc, factor=wmma_n)\n oc, oci = s[OL].split(oc, factor=warp_col_tiles)\n _, oc = s[OL].split(oc, factor=block_col_warps)\n nc, nnc = s[OL].split(nc, factor=wmma_m)\n nc, nci = s[OL].split(nc, factor=warp_row_tiles)\n _, nc = s[OL].split(nc, factor=block_row_warps)\n s[OL].reorder(nc, oc, nci, oci, nnc, ooc)\n s[OL].bind(nc, thread_y)\n s[OL].bind(oc, thread_z)\n\n # Schedule wmma computation\n s[ConvF].compute_at(s[OL], oc)\n n, h, w, o = ConvF.op.axis\n n, nnf = s[ConvF].split(n, factor=wmma_m)\n o, oof = s[ConvF].split(o, factor=wmma_n)\n ic, ii = s[ConvF].split(ic, factor=wmma_k)\n ko, ki = s[ConvF].split(ic, factor=chunk)\n s[ConvF].reorder(kh, kw, ko, ki, n, o, nnf, oof, ii)\n\n s[AF].compute_at(s[ConvF], ki)\n s[WF].compute_at(s[ConvF], ki)\n\n # Schedule wmma load\n n, h, w, i = AF.op.axis\n n, nn = s[AF].split(n, factor=wmma_m)\n i, ii = s[AF].split(i, factor=wmma_k)\n s[AF].reorder(n, i, nn, ii)\n\n kh, kw, i, o = WF.op.axis\n i, ii = s[WF].split(i, factor=wmma_k)\n o, oo = s[WF].split(o, factor=wmma_n)\n s[WF].reorder(o, i, oo)\n s[WF].reorder(i, o, ii, oo)\n\n s[WS].compute_at(s[ConvF], ko)\n s[AS].compute_at(s[ConvF], ko)\n\n # Schedule for data's share memory\n n, h, w, i = AS.op.axis\n s[AS].reorder(h, w, n, i)\n s[AS].storage_align(w, AS_align - 1, AS_align)\n t = s[AS].fuse(n, i)\n t, ti = s[AS].split(t, factor=vector_width)\n t, tx = s[AS].split(t, factor=warp_size)\n t, ty = s[AS].split(t, factor=block_row_warps)\n _, tz = s[AS].split(t, factor=block_col_warps)\n s[AS].bind(ty, thread_y)\n s[AS].bind(tz, thread_z)\n s[AS].bind(tx, thread_x)\n s[AS].vectorize(ti)\n\n # Schedule for kernel's share memory\n kh, kw, ic, o = WS.op.axis\n t = s[WS].fuse(ic, o)\n s[WS].storage_align(ic, WS_align - 1, WS_align)\n t, ti = s[WS].split(t, factor=vector_width)\n t, tx = s[WS].split(t, factor=warp_size)\n t, ty = s[WS].split(t, factor=block_row_warps)\n _, tz = s[WS].split(t, factor=block_col_warps)\n s[WS].bind(ty, thread_y)\n s[WS].bind(tz, thread_z)\n s[WS].bind(tx, thread_x)\n s[WS].vectorize(ti)\n\n shape = (wmma_m, wmma_n, wmma_k)\n\n # tensorize the wmma process\n AS_shape = (wmma_m, 1, 1, wmma_k)\n AL_shape = (wmma_m, 1, 1, wmma_k)\n WS_shape = (wmma_k, wmma_n)\n WL_shape = (wmma_k, wmma_n)\n CL_shape = (wmma_m, 1, 1, wmma_n)\n CS_shape = (wmma_m, 1, 1, wmma_n)\n\n AL_gemm = te.placeholder(AL_shape, name='A', dtype=in_dtype)\n WL_gemm = te.placeholder(WL_shape, name='B', dtype=in_dtype)\n k_gemm = te.reduce_axis((0, wmma_k), name=\"k\")\n CL_compute = te.compute(CL_shape, lambda ii, t0, t1, jj:\n te.sum(AL_gemm[ii, t0, t1, k_gemm].astype(out_dtype) * \\\n WL_gemm[k_gemm, jj].astype(out_dtype), axis=k_gemm),\n name='C')\n\n s[AF].tensorize(nn, intrin_wmma_load_matrix_A(AL_strides, AS_strides, shape,\n \"row_major\", AS_shape, AL_shape, in_dtype))\n s[WF].tensorize(ii, intrin_wmma_load_matrix_W(WL_strides, WS_strides, shape,\n \"row_major\", WS_shape, WL_shape, in_dtype))\n s[OL].tensorize(nnc, intrin_wmma_store_matrix(CS_strides, CL_strides,\n shape, out_dtype, CL_shape, CS_shape))\n s[ConvF].tensorize(nnf, intrin_wmma_gemm(AL_gemm, WL_gemm, CL_compute, AL_strides,\n WL_strides, CL_strides, shape))\n\n N, OH, OW, CO = get_const_tuple(output.shape)\n KH, KW, CI, _ = get_const_tuple(kernel.shape)\n cfg.add_flop(2 * N * OH * OW * CO * CI * KH * KW)\n\n\n@autotvm.register_topi_compute(\"conv2d_nhwc_tensorcore.cuda\")\ndef conv2d_nhwc_tensorcore(cfg, data, kernel, strides, padding, dilation, out_dtype):\n \"\"\"Compute conv2d with tensorcore for NCHW layout\"\"\"\n return nhwc_tensorcore_cuda(cfg, data, kernel, strides, padding, dilation, out_dtype)\n\n\n@autotvm.register_topi_schedule(\"conv2d_nhwc_tensorcore.cuda\")\ndef schedule_conv2d_nhwc_tensorcore(cfg, outs):\n \"\"\"TOPI schedule callback\"\"\"\n s = te.create_schedule([x.op for x in outs])\n\n def _callback(op):\n if 'conv2d_nhwc_tensorcore' in op.tag:\n schedule_nhwc_tensorcore_cuda(cfg, s, op.output(0))\n\n traverse_inline(s, outs[0].op, _callback)\n return s\n" ]
[ [ "numpy.random.uniform", "numpy.zeros" ], [ "numpy.prod" ] ]
tyohei/examples
[ "38652f48aca2b668bcc116ba401795d4be2f8f18" ]
[ "mpi/python/bcast.py" ]
[ "#!/usr/bin/env python\nfrom __future__ import print_function\nfrom mpi4py import MPI\nimport numpy\n\nimport common\n\n\ndef bcast(comm):\n n = 8192\n\n print_mpi = common.create_print_mpi(comm)\n\n # Allocate buffer and set value\n if comm.rank == 0:\n buf = numpy.arange(n).astype(numpy.float32)\n else:\n buf = numpy.empty(n).astype(numpy.float32)\n\n # Broadcast\n print_mpi('B: {}'.format(buf), 1)\n print_mpi('Bcast ...')\n comm.Bcast([buf, MPI.FLOAT], root=0)\n print_mpi('Bcast done')\n print_mpi('A: {}'.format(buf), 1)\n\n print_mpi('========', 0)\n\n if comm.rank == 0:\n buf = numpy.arange(n).astype(numpy.float32)\n else:\n buf = numpy.array([])\n\n # Broadcast\n print_mpi('B: {}'.format(buf), 1)\n print_mpi('Bcast ...')\n buf = comm.bcast(buf, root=0)\n print_mpi('Bcast done')\n print_mpi('A: {}'.format(buf), 1)\n\n\ndef main():\n comm = MPI.COMM_WORLD\n bcast(comm)\n\nif __name__ == '__main__':\n main()\n\n" ]
[ [ "numpy.array", "numpy.arange", "numpy.empty" ] ]
aclong/transx2gtfs
[ "36d5b87d425c5dd299a3fbc7e973aff91876c2ca" ]
[ "transx2gtfs/tests/test_calendar.py" ]
[ "from transx2gtfs.data import get_path\nimport pytest\n\n@pytest.fixture\ndef test_tfl_data():\n return get_path('test_tfl_format')\n\n\n@pytest.fixture\ndef test_txc21_data():\n return get_path('test_txc21_format')\n\n\n@pytest.fixture\ndef test_naptan_data():\n return get_path('naptan_stops')\n\n\ndef test_calendar_weekday_info_tfl(test_tfl_data):\n from transx2gtfs.calendar import get_service_operative_days_info\n import untangle\n\n data = untangle.parse(test_tfl_data)\n operative_days = get_service_operative_days_info(data)\n\n # Should return text\n assert isinstance(operative_days, str)\n\n # Should contain text 'Weekend'\n assert operative_days == 'Weekend'\n\n\ndef test_calendar_weekday_info_txc21(test_txc21_data):\n from transx2gtfs.calendar import get_service_operative_days_info\n import untangle\n\n data = untangle.parse(test_txc21_data)\n operative_days = get_service_operative_days_info(data)\n\n # Should return text\n assert isinstance(operative_days, str)\n\n # Should contain text 'Weekend'\n assert operative_days == 'Weekend'\n\n\ndef test_calendar_dataframe_tfl(test_tfl_data):\n from transx2gtfs.calendar import get_weekday_info, parse_day_range\n from pandas import DataFrame\n from pandas.testing import assert_frame_equal\n import untangle\n data = untangle.parse(test_tfl_data)\n\n # Get vehicle journeys\n vjourneys = data.TransXChange.VehicleJourneys.VehicleJourney\n\n correct_frames = {'Sunday': DataFrame({'friday': 0.0, 'monday': 0.0, 'saturday': 0.0,\n 'sunday': 1.0, 'thursday': 0.0,\n 'tuesday': 0.0, 'wednesday': 0.0}, index=[0]),\n\n 'Saturday': DataFrame({'friday': 0.0, 'monday': 0.0, 'saturday': 1.0,\n 'sunday': 0.0, 'thursday': 0.0,\n 'tuesday': 0.0, 'wednesday': 0.0}, index=[0])\n }\n\n\n for i, journey in enumerate(vjourneys):\n # Parse weekday operation times from VehicleJourney\n weekdays = get_weekday_info(journey)\n\n # Should return text\n assert isinstance(weekdays, str)\n\n # Should be either 'Sunday' or 'Saturday'\n assert weekdays in ['Sunday', 'Saturday']\n\n # Get a row of DataFrame\n calendar_info = parse_day_range(weekdays)\n\n assert_frame_equal(calendar_info, correct_frames[weekdays])\n\n\ndef test_calendar_dataframe_txc21(test_txc21_data):\n from transx2gtfs.calendar import get_weekday_info, parse_day_range\n from pandas import DataFrame\n from pandas.testing import assert_frame_equal\n import untangle\n data = untangle.parse(test_txc21_data)\n\n # Get vehicle journeys\n vjourneys = data.TransXChange.VehicleJourneys.VehicleJourney\n\n correct_frames = {'Sunday': DataFrame({'friday': 0.0, 'monday': 0.0, 'saturday': 0.0,\n 'sunday': 1.0, 'thursday': 0.0,\n 'tuesday': 0.0, 'wednesday': 0.0}, index=[0]),\n\n 'Saturday': DataFrame({'friday': 0.0, 'monday': 0.0, 'saturday': 1.0,\n 'sunday': 0.0, 'thursday': 0.0,\n 'tuesday': 0.0, 'wednesday': 0.0}, index=[0])\n }\n\n\n for i, journey in enumerate(vjourneys):\n # Parse weekday operation times from VehicleJourney\n weekdays = get_weekday_info(journey)\n\n # Should return text\n assert isinstance(weekdays, str)\n\n # Should be either 'Sunday' or 'Saturday'\n assert weekdays in ['Sunday', 'Saturday']\n\n # Get a row of DataFrame\n calendar_info = parse_day_range(weekdays)\n\n assert_frame_equal(calendar_info, correct_frames[weekdays])\n\n\ndef test_get_calendar_tfl(test_tfl_data):\n from transx2gtfs.calendar import get_calendar\n from transx2gtfs.transxchange import get_gtfs_info\n from pandas import DataFrame\n from pandas.testing import assert_frame_equal\n import numpy as np\n import untangle\n data = untangle.parse(test_tfl_data)\n\n # Get gtfs info\n gtfs_info = get_gtfs_info(data)\n assert isinstance(gtfs_info, DataFrame)\n\n # Get GTFS calendar\n gtfs_calendar = get_calendar(gtfs_info)\n assert isinstance(gtfs_calendar, DataFrame)\n\n correct_frame = DataFrame({\n 'service_id': [\"1-HAM-_-y05-2675925_20190713_20190714_Sunday\",\n \"1-HAM-_-y05-2675925_20190713_20190714_Saturday\"],\n 'monday': np.int64([0, 0]), 'tuesday': np.int64([0, 0]), 'wednesday': np.int64([0, 0]),\n 'thursday': np.int64([0, 0]), 'friday': np.int64([0, 0]),\n 'saturday': np.int64([0, 1]), 'sunday': np.int64([1, 0]),\n 'start_date': [\"20190713\", \"20190713\"],\n 'end_date': [\"20190714\", \"20190714\"],\n }, index=[0, 1])\n\n try:\n # Check that the frames match\n assert_frame_equal(gtfs_calendar, correct_frame)\n\n except AssertionError as e:\n # Ignore the dtype int32/int64 difference\n if \"\"\"Attribute \"dtype\" are different\"\"\" in str(e):\n pass\n else:\n raise e\n\n\ndef test_get_calendar_txc21(test_txc21_data):\n from transx2gtfs.calendar import get_calendar\n from transx2gtfs.transxchange import get_gtfs_info\n from pandas import DataFrame\n from pandas.testing import assert_frame_equal\n import numpy as np\n import untangle\n data = untangle.parse(test_txc21_data)\n\n # Get gtfs info\n gtfs_info = get_gtfs_info(data)\n assert isinstance(gtfs_info, DataFrame)\n\n # Get GTFS calendar\n gtfs_calendar = get_calendar(gtfs_info)\n assert isinstance(gtfs_calendar, DataFrame)\n\n correct_frame = DataFrame({\n 'service_id': [\"99-PIC-B-y05-4_20200201_20200202_Sunday\",\n \"99-PIC-B-y05-4_20200201_20200202_Saturday\"],\n 'monday': np.int64([0, 0]), 'tuesday': np.int64([0, 0]), 'wednesday': np.int64([0, 0]),\n 'thursday': np.int64([0, 0]), 'friday': np.int64([0, 0]),\n 'saturday': np.int64([0, 1]), 'sunday': np.int64([1, 0]),\n 'start_date': [\"20200201\", \"20200201\"],\n 'end_date': [\"20200202\", \"20200202\"],\n }, index=[0, 1])\n\n\n try:\n # Check that the frames match\n assert_frame_equal(gtfs_calendar, correct_frame)\n \n except AssertionError as e:\n # Ignore the dtype int32/int64 difference\n if \"\"\"Attribute \"dtype\" are different\"\"\" in str(e):\n pass\n else:\n raise e\n" ]
[ [ "pandas.DataFrame", "pandas.testing.assert_frame_equal", "numpy.int64" ] ]
texasmichelle/open_spiel
[ "d9a9b8f9f1f44143867217fc3f6ff2db71b174b0" ]
[ "open_spiel/python/egt/visualization_test.py" ]
[ "# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for open_spiel.python.egt.visualization.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl import logging\nfrom absl.testing import absltest\n\n# pylint: disable=g-import-not-at-top\ntry:\n from matplotlib.figure import Figure\n from matplotlib.quiver import Quiver\n from matplotlib.streamplot import StreamplotSet\nexcept ImportError as e:\n logging.info(\"If your tests failed with the error 'ImportError: No module \"\n \"named functools_lru_cache', this is a known bug in matplotlib \"\n \"and there is a workaround (run sudo apt install \"\n \"python-backports.functools-lru-cache. See: \"\n \"https://github.com/matplotlib/matplotlib/issues/9344.\")\n raise ImportError(str(e))\n\nimport numpy as np\n\nfrom open_spiel.python.egt import dynamics\nfrom open_spiel.python.egt import utils\nfrom open_spiel.python.egt import visualization\nimport pyspiel\n\n\ndef _build_dynamics2x2():\n \"\"\"Build multi-population dynamics.\"\"\"\n game = pyspiel.load_game(\"matrix_pd\")\n payoff_tensor = utils.game_payoffs_array(game)\n return dynamics.MultiPopulationDynamics(payoff_tensor, dynamics.replicator)\n\n\ndef _build_dynamics3x3():\n \"\"\"Build single-population dynamics.\"\"\"\n game = pyspiel.load_game(\"matrix_rps\")\n payoff_tensor = utils.game_payoffs_array(game)\n return dynamics.SinglePopulationDynamics(payoff_tensor, dynamics.replicator)\n\n\ndef _identity_dynamics(x):\n \"\"\"Returns same input as output.\"\"\"\n return x\n\n\nclass VisualizationTest(absltest.TestCase):\n\n def test_meshgrid(self):\n n = 10\n payoff_tensor = np.ones(shape=(2, 2, 2))\n identity = lambda x, f: x\n allzero = lambda x, f: np.zeros(x.shape)\n dyn = dynamics.MultiPopulationDynamics(payoff_tensor, (identity, allzero))\n x, y, u, v = visualization._eval_dynamics_2x2_grid(dyn, n)\n np.testing.assert_allclose(x, u)\n np.testing.assert_allclose(v, np.zeros(shape=(n, n)))\n\n dyn = dynamics.MultiPopulationDynamics(payoff_tensor, (allzero, identity))\n x, y, u, v = visualization._eval_dynamics_2x2_grid(dyn, n)\n np.testing.assert_allclose(u, np.zeros(shape=(n, n)))\n np.testing.assert_allclose(y, v)\n\n def test_quiver2x2(self):\n \"\"\"Test 2x2 quiver plot.\"\"\"\n dyn = _build_dynamics2x2()\n fig = Figure(figsize=(4, 4))\n ax = fig.add_subplot(111, projection=\"2x2\")\n res = ax.quiver(dyn)\n self.assertIsInstance(res, Quiver)\n\n def test_streamplot2x2(self):\n \"\"\"Test 2x2 quiver plot.\"\"\"\n dyn = _build_dynamics2x2()\n fig = Figure(figsize=(4, 4))\n ax = fig.add_subplot(111, projection=\"2x2\")\n res = ax.streamplot(dyn)\n self.assertIsInstance(res, StreamplotSet)\n\n def test_quiver3x3(self):\n \"\"\"Test 3x3 quiver plot.\"\"\"\n dyn = _build_dynamics3x3()\n fig = Figure(figsize=(4, 4))\n ax = fig.add_subplot(111, projection=\"3x3\")\n res = ax.quiver(dyn)\n self.assertIsInstance(res, Quiver)\n\n def test_streamplot3x3(self):\n \"\"\"Test 3x3 quiver plot.\"\"\"\n dyn = _build_dynamics3x3()\n fig = Figure(figsize=(4, 4))\n ax = fig.add_subplot(111, projection=\"3x3\")\n res = ax.streamplot(dyn)\n self.assertIsInstance(res, visualization.SimplexStreamMask)\n\n\nif __name__ == \"__main__\":\n absltest.main()\n" ]
[ [ "matplotlib.figure.Figure", "numpy.testing.assert_allclose", "numpy.ones", "numpy.zeros" ] ]
ivivan/DualHeadSSIM
[ "6c4157873bfbbb7d16d2fa89c947eaf816c18653" ]
[ "utils/prepare_QLD.py" ]
[ "import pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nfrom sklearn.model_selection import train_test_split\nimport random, math, os, time\n\nfrom utils.VLSW import pad_all_cases\n# from VLSW import pad_all_cases\n# set the random seeds for reproducability\nSEED = 1234\nrandom.seed(SEED)\n\n\ndef series_to_superviesed(x_timeseries,\n y_timeseries,\n n_memory_step,\n n_forcast_step,\n split=None):\n '''\n x_timeseries: input time series data, numpy array, (time_step, features)\n y_timeseries: target time series data, numpy array, (time_step, features)\n n_memory_step: number of memory step in supervised learning, int\n n_forcast_step: number of forcase step in supervised learning, int\n split: portion of data to be used as train set, float, e.g. 0.8\n '''\n assert len(x_timeseries.shape\n ) == 2, 'x_timeseries must be shape of (time_step, features)'\n assert len(y_timeseries.shape\n ) == 2, 'y_timeseries must be shape of (time_step, features)'\n\n input_step, input_feature = x_timeseries.shape\n output_step, output_feature = y_timeseries.shape\n assert input_step == output_step, 'number of time_step of x_timeseries and y_timeseries are not consistent!'\n\n n_RNN_sample = input_step - n_forcast_step - n_memory_step + 1\n RNN_x = np.zeros((n_RNN_sample, n_memory_step, input_feature))\n RNN_y = np.zeros((n_RNN_sample, n_forcast_step, output_feature))\n\n for n in range(n_RNN_sample):\n RNN_x[n, :, :] = x_timeseries[n:n + n_memory_step, :]\n RNN_y[n, :, :] = y_timeseries[n + n_memory_step:n + n_memory_step +\n n_forcast_step, :]\n if split != None:\n assert (split <= 0.9) & (split >= 0.1), 'split not in reasonable range'\n return RNN_x[:int(split * len(RNN_x))], RNN_y[:int(split * len(RNN_x))], \\\n RNN_x[int(split * len(RNN_x)) + 1:], RNN_y[int(split * len(RNN_x)) + 1:]\n else:\n return RNN_x, RNN_y, None, None\n\n\ndef preprocess_df(df):\n \"\"\" The training and testing data are manually selected.\n :param df: dataframe with raw data\n :return:\n \"\"\"\n\n df.set_index('Timestamp', inplace=True)\n\n ## some variables are not used in training the model, based on the performance evaluation\n df.drop(['Dayofweek'], axis=1, inplace=True)\n df.drop(['Month'], axis=1, inplace=True)\n\n tw = df['NO3'].values.copy().reshape(-1, 1)\n\n # Standlization, use StandardScaler\n scaler_x = MinMaxScaler()\n scaler_x.fit(\n df[['Q', 'Conductivity', 'NO3', 'Temp', 'Turbidity','Level']])\n df[['Q', 'Conductivity', 'NO3', 'Temp', 'Turbidity','Level']] = scaler_x.transform(df[[\n 'Q', 'Conductivity', 'NO3', 'Temp', 'Turbidity','Level'\n ]])\n\n scaler_y = MinMaxScaler()\n scaler_y.fit(tw)\n y_all = scaler_y.transform(tw)\n\n # get data from 2014 and 2015\n # 6,7, 8, 9,10 as train; 11 as test\n\n df_train_one = df.loc['2019-01-01T00:00':'2019-09-30T23:00'].copy()\n # df_train_two = df.loc['2015-06-01T00:00':'2015-10-31T23:30'].copy()\n\n df_test_one = df.loc['2019-10-01T00:00':'2019-12-31T23:00'].copy()\n # df_test_two = df.loc['2015-11-01T00:00':'2015-11-30T23:30'].copy()\n\n\n # return df_train_one, df_train_two, df_test_one, df_test_two, scaler_x, scaler_y\n return df_train_one, df_test_one, scaler_x, scaler_y\n\n\n\n\ndef train_val_test_generate(dataframe, model_params):\n '''\n :param dataframe: processed dataframe\n :param model_params: for input dim\n :return: train_x, train_y, test_x, test_y with the same length (by padding zero)\n '''\n\n train_val_test_x, train_val_test_y, len_x_samples, len_before_x_samples = pad_all_cases(\n dataframe, dataframe['NO3'].values, model_params,\n model_params['min_before'], model_params['max_before'],\n model_params['min_after'], model_params['max_after'],\n model_params['output_length'])\n\n train_val_test_y = np.expand_dims(train_val_test_y, axis=2)\n\n return train_val_test_x, train_val_test_y, len_x_samples, len_before_x_samples\n\n\ndef train_test_split_SSIM(x, y, x_len, x_before_len, model_params, SEED):\n '''\n :param x: all x samples\n :param y: all y samples\n :param model_params: parameters\n :param SEED: random SEED\n :return: train set, test set\n '''\n\n ## check and remove samples with NaN (just incase)\n index_list = []\n for index, (x_s, y_s, len_s,\n len_before_s) in enumerate(zip(x, y, x_len, x_before_len)):\n if (np.isnan(x_s).any()) or (np.isnan(y_s).any()):\n index_list.append(index)\n\n x = np.delete(x, index_list, axis=0)\n y = np.delete(y, index_list, axis=0)\n x_len = np.delete(x_len, index_list, axis=0)\n x_before_len = np.delete(x_before_len, index_list, axis=0)\n\n print('x:{}'.format(x.shape))\n print('y:{}'.format(y.shape))\n\n # x_train, x_test, y_train, y_test = train_test_split(x,\n # y,\n # test_size=None,\n # random_state=SEED,\n # shuffle=False)\n\n # x_train_len, x_test_len = train_test_split(x_len,\n # test_size=None,\n # random_state=SEED,\n # shuffle=False)\n\n # x_train_before_len, x_test_before_len = train_test_split(x_before_len,\n # test_size=None,\n # random_state=SEED,\n # shuffle=False)\n\n\n\n # return x_train, y_train, x_train_len, x_train_before_len\n return x, y, x_len, x_before_len\n\n\ndef test_qld_single_station():\n train_sampling_params = {\n 'dim_in': 6,\n 'output_length': 6,\n 'min_before': 10,\n 'max_before': 10,\n 'min_after': 10,\n 'max_after': 10,\n 'file_path': '../data/QLD_nomiss.csv'\n }\n\n test_sampling_params = {\n 'dim_in': 6,\n 'output_length': 6,\n 'min_before': 10,\n 'max_before': 10,\n 'min_after': 10,\n 'max_after': 10,\n 'file_path': '../data/QLD_nomiss.csv'\n }\n\n filepath = 'data/QLD_nomiss.csv'\n\n df = pd.read_csv(filepath)\n\n df_train_one, df_test_one, scaler_x, scaler_y = preprocess_df(df)\n\n print('train_preprocess:{}'.format(df_train_one.shape))\n print('test_preprocess:{}'.format(df_test_one.shape))\n\n # df_train_one, df_train_two, df_test_one, df_test_two, scaler_x, scaler_y = preprocess_df(\n # df)\n\n # generate train/test samples seperately\n\n # train one\n x_samples, y_samples, x_len, x_before_len = train_val_test_generate(\n df_train_one, train_sampling_params)\n\n x_train_one, y_train_one, x_train_len_one, x_train_before_len_one = train_test_split_SSIM(\n x_samples, y_samples, x_len, x_before_len, train_sampling_params, SEED)\n\n # # train two\n # x_samples, y_samples, x_len, x_before_len = train_val_test_generate(\n # df_train_two, train_sampling_params)\n\n # x_train_two, y_train_two, x_train_len_two, x_train_before_len_two = train_test_split_SSIM(\n # x_samples, y_samples, x_len, x_before_len, train_sampling_params, SEED)\n\n # concate all train data\n\n # x_train = np.concatenate((x_train_one, x_train_two), axis=0)\n # y_train = np.concatenate((y_train_one, y_train_two), axis=0)\n\n x_train = x_train_one\n y_train = y_train_one\n\n #------------------------------#\n\n # test one\n\n x_samples, y_samples, x_len, x_before_len = train_val_test_generate(\n df_test_one, test_sampling_params)\n\n x_test_one, y_test_one, x_test_len_one, x_test_before_len_one = train_test_split_SSIM(\n x_samples, y_samples, x_len, x_before_len, test_sampling_params, SEED)\n\n # # test two\n\n # x_samples, y_samples, x_len, x_before_len = train_val_test_generate(\n # df_test_two, test_sampling_params)\n\n # x_test_two, y_test_two, x_test_len_two, x_test_before_len_two = train_test_split_SSIM(\n # x_samples, y_samples, x_len, x_before_len, test_sampling_params, SEED)\n\n # # concate all test data\n\n # x_test = np.concatenate((x_test_one, x_test_two), axis=0)\n # y_test = np.concatenate((y_test_one, y_test_two), axis=0)\n\n x_test = x_test_one\n y_test = y_test_one\n\n print('x_train:{}'.format(x_train.shape))\n print('y_train:{}'.format(y_train.shape))\n print('x_test:{}'.format(x_test.shape))\n print('y_test:{}'.format(y_test.shape))\n\n print('split train/test array')\n x_test_list = np.split(x_test, [10, 16], axis=1)\n x_train_list = np.split(x_train, [10, 16], axis=1)\n\n for i in x_test_list:\n print(i.shape)\n\n return (x_train, y_train), (x_test, y_test), (scaler_x, scaler_y)\n\n\nif __name__ == \"__main__\":\n _,_,_ = test_qld_single_station()\n" ]
[ [ "numpy.delete", "numpy.isnan", "numpy.zeros", "numpy.split", "sklearn.preprocessing.MinMaxScaler", "pandas.read_csv", "numpy.expand_dims" ] ]
bonejay/mdetr
[ "38c6d7c26d6d493f7bf6772ba65a72b493573d90" ]
[ "scripts/pre-training/vg_preprocessing.py" ]
[ "# Copyright (c) Aishwarya Kamath & Nicolas Carion. Licensed under the Apache License 2.0. All Rights Reserved\nimport argparse\nimport json\nimport math\nimport os\nimport pickle\nfrom collections import Counter, defaultdict\nfrom copy import deepcopy\nfrom functools import partial\nfrom multiprocessing import Pool\nfrom pathlib import Path\nfrom typing import Dict, List, Tuple\nimport sys\nPACKAGE_PARENT = \"..\"\nSCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))\nsys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))\nimport torch\nfrom tqdm import tqdm\nfrom utils.boxes import box_iou_helper, combine_boxes, get_boxes_equiv, obj_to_box, region_to_box, xyxy_to_xywh\nfrom utils.dump import Annotation, Datapoint\nfrom utils.spans import (\n PreprocessError,\n consolidate_spans,\n get_canonical_spans,\n span_intersect_spanlist,\n spanlist_intersect_spanlist,\n)\nfrom utils.text import get_root_and_nouns, normalize_sentence, normalize_whitespace, simplify_punctuation\nfrom utils.unionfind import UnionFind\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\"Visual Genome conversion script\")\n\n parser.add_argument(\n \"--dataset_path\",\n required=True,\n type=str,\n help=\"Path to the VG dataset. Should contain region graphs\",\n )\n parser.add_argument(\n \"--out_path\",\n default=None,\n type=str,\n help=\"Path where to export the resulting dataset.\",\n )\n parser.add_argument(\n \"--nb_process\",\n default=1,\n type=str,\n help=\"Number of concurrent processes to use to dump the data\",\n )\n\n return parser.parse_args()\n\n\ndef preprocess_region(region):\n filtered_region = {\n \"caption\": simplify_punctuation(normalize_whitespace(region[\"phrase\"])),\n \"original_image_id\": region[\"image_id\"],\n \"original_region_id\": region[\"region_id\"],\n \"boxes\": [],\n \"tokens_positive\": [],\n \"found_objects\": False,\n }\n if len(filtered_region[\"caption\"]) < 3:\n raise PreprocessError(\"caption too short, skipping\" + filtered_region[\"caption\"])\n _, _, root_spans, negative_spans = get_root_and_nouns(filtered_region[\"caption\"].lower(), False)\n\n # Filter objects that have multiple synsets, they are likely to be spurious\n obj_synsets = set([o[\"synsets\"][0] for o in region[\"objects\"] if len(o[\"synsets\"]) == 1])\n synsets_count = Counter([s[\"synset_name\"] for s in region[\"synsets\"]])\n # Filter synsets that occur multiple times, since we don't have mapping to objects\n all_synsets = set([s[\"synset_name\"] for s in region[\"synsets\"] if synsets_count[s[\"synset_name\"]] == 1])\n authorized_synsets = obj_synsets.intersection(all_synsets)\n syn2span: Dict[str, Tuple[int, int]] = {\n s[\"synset_name\"]: (s[\"entity_idx_start\"], s[\"entity_idx_end\"])\n for s in region[\"synsets\"]\n if s[\"synset_name\"] in authorized_synsets\n }\n\n synlist, spanlist = [], []\n for k, s in syn2span.items():\n synlist.append(k)\n spanlist.append([s])\n\n # the spans positions may have been altered by the whitespace removal, so we recompute here\n spanlist, new_caption = get_canonical_spans(spanlist, region[\"phrase\"], whitespace_only=True)\n if new_caption.lower().strip() != filtered_region[\"caption\"].lower().strip():\n raise PreprocessError(f\"Inconsistent whitespace removal: '{new_caption}' vs '{filtered_region['caption']}'\")\n\n assert len(synlist) == len(spanlist)\n syn2span = {k: v[0] for k, v in zip(synlist, spanlist)}\n\n root_objs = []\n other_objs: Dict[Tuple[int, int], List[List[int]]] = {}\n for obj in region[\"objects\"]:\n if len(obj[\"synsets\"]) == 1 and obj[\"synsets\"][0] in authorized_synsets:\n cur_span = syn2span[obj[\"synsets\"][0]]\n if span_intersect_spanlist(cur_span, root_spans):\n root_objs.append(obj_to_box(obj))\n filtered_region[\"found_objects\"] = True\n else:\n if cur_span not in other_objs:\n other_objs[cur_span] = []\n negative_spans.append(cur_span)\n other_objs[cur_span].append(obj_to_box(obj))\n filtered_region[\"found_objects\"] = True\n\n if len(root_objs) == 0:\n # If we don't have a box for the root of the sentence, we use the box of the region itself.\n root_objs.append(region_to_box(region))\n\n dedup_root_objs = combine_boxes(root_objs)\n filtered_region[\"boxes\"] += dedup_root_objs\n root_spans = consolidate_spans(root_spans, filtered_region[\"caption\"])\n filtered_region[\"tokens_positive\"] += [root_spans for _ in range(len(dedup_root_objs))]\n\n for span, objs in other_objs.items():\n dedup_objs = combine_boxes(objs)\n filtered_region[\"boxes\"] += dedup_objs\n cur_spans = consolidate_spans([span], filtered_region[\"caption\"])\n filtered_region[\"tokens_positive\"] += [cur_spans for _ in range(len(dedup_objs))]\n\n filtered_region[\"tokens_negative\"] = consolidate_spans(negative_spans, filtered_region[\"caption\"])\n return filtered_region\n\n\ndef deduplicate_regions(regions, iou_threshold=0.5):\n \"\"\"This functions accepts pre-processed region descriptions for a given image, and removes regions that are redundant.\n Two regions are deemed redundant if 1) the text is closely matching 2) the IOU between region boxes is > iou_threshold\n A cleaned description is returned.\n \"\"\"\n\n def helper_merge(regions):\n if len(regions) <= 1:\n return regions\n uf = UnionFind(len(regions))\n for r in regions:\n spans, txt2 = get_canonical_spans(r[\"tokens_positive\"], r[\"caption\"])\n if txt != txt2:\n raise PreprocessError(f\"inconsistent canonicalization fct. Mismatch: '{txt}' and '{txt2}'\")\n r[\"cano_tokens\"] = spans\n\n for r1 in range(len(regions)):\n for r2 in range(r1 + 1, len(regions)):\n compatible = True\n assert len(regions[r1][\"boxes\"]) == len(regions[r1][\"cano_tokens\"])\n assert len(regions[r2][\"boxes\"]) == len(regions[r2][\"cano_tokens\"])\n ious = box_iou_helper(regions[r1][\"boxes\"], regions[r2][\"boxes\"])\n for b1 in range(len(regions[r1][\"cano_tokens\"])):\n for b2 in range(len(regions[r2][\"cano_tokens\"])):\n if (len(regions[r1][\"cano_tokens\"][b1]) == 0 or len(regions[r2][\"cano_tokens\"][b2]) == 0) or (\n spanlist_intersect_spanlist(regions[r1][\"cano_tokens\"][b1], regions[r2][\"cano_tokens\"][b2])\n and ious[b1][b2] < iou_threshold\n ):\n compatible = False\n break\n if not compatible:\n break\n if compatible:\n uf.unite(r1, r2)\n compo2regions = defaultdict(list)\n for i, r in enumerate(regions):\n compo2regions[uf.find(i)].append(r)\n\n final_regions = []\n for reg_list in compo2regions.values():\n if len(reg_list) == 1:\n final_regions.append(reg_list[0])\n else:\n # We pick as representative of this cluster the region with the most boxes\n sorted_regions = sorted([(len(r[\"boxes\"]), i) for i, r in enumerate(reg_list)], reverse=True)\n reg_ids = [sr[1] for sr in sorted_regions]\n # We need to put the boxes and token spans in buckets\n cano_spans_buckets = []\n orig_spans_buckets = []\n boxes_buckets = []\n for idx in reg_ids:\n for b in range(len(reg_list[idx][\"boxes\"])):\n # find the bucket\n bucket = -1\n for j in range(len(cano_spans_buckets)):\n if spanlist_intersect_spanlist(reg_list[idx][\"cano_tokens\"][b], cano_spans_buckets[j]):\n bucket = j\n break\n if bucket == -1:\n # bucket not found, creating one.\n if idx != reg_ids[0]:\n # This shouldn't happen. But if it does, we give up on the merging\n return regions\n assert idx == reg_ids[0], (\n \"TODO: if this triggers, it means another regions has token spans than aren't covered by the main region.\"\n + \"We need to create a new token span, which involve finding the span in the original sentencen of the main region. Don't forget to update the negative tokens\"\n )\n\n bucket = len(orig_spans_buckets)\n orig_spans_buckets.append(reg_list[idx][\"tokens_positive\"][b])\n cano_spans_buckets.append(reg_list[idx][\"cano_tokens\"][b])\n boxes_buckets.append([reg_list[idx][\"boxes\"][b]])\n else:\n boxes_buckets[bucket].append(reg_list[idx][\"boxes\"][b])\n assert len(orig_spans_buckets) == len(boxes_buckets)\n merged_region = deepcopy(reg_list[reg_ids[0]])\n merged_region[\"tokens_positive\"] = []\n merged_region[\"boxes\"] = []\n for i in range(len(boxes_buckets)):\n dedup_objs = combine_boxes(boxes_buckets[i], iou_threshold=0.5)\n merged_region[\"boxes\"] += dedup_objs\n merged_region[\"tokens_positive\"] += [orig_spans_buckets[i] for _ in range(len(dedup_objs))]\n final_regions.append(merged_region)\n for r in final_regions:\n del r[\"cano_tokens\"]\n return final_regions\n\n txt2region = defaultdict(list)\n for r in regions:\n txt2region[normalize_sentence(r[\"caption\"])].append(r)\n\n stupid_sentence_set = set([\"wall\", \"side\", \"building\"])\n final_regions = []\n for txt, regions in txt2region.items():\n # Edge case, we remove the sentences like \"the wall on the side of the building\" which are uninformative and have spurious boxes\n if \"wall\" in txt and set(txt.strip().split(\" \")).issubset(stupid_sentence_set):\n continue\n if len(regions) == 1:\n final_regions.append(deepcopy(regions[0]))\n else:\n # print(txt)\n\n regions_with_boxes = [r for r in regions if r[\"found_objects\"]]\n all_boxes = sum([r[\"boxes\"] for r in regions_with_boxes], [])\n # print(\"regions with boxes\", len(regions_with_boxes))\n\n regions_without_boxes = []\n for r in regions:\n if not r[\"found_objects\"]:\n # we consider than one of the region with boxes will be better suited and drop this one\n # if there is a positive iou. Otherwise, we have to keep it\n if len(regions_with_boxes) == 0 or box_iou_helper(all_boxes, r[\"boxes\"]).max().item() < 0.1:\n regions_without_boxes.append(r)\n\n # print(\"regions without boxes\", len(regions_without_boxes))\n\n try:\n new_regions_with_boxes = helper_merge(regions_with_boxes)\n except PreprocessError as e:\n print(\"skipping\", e)\n # Ouch, hit a cornercase, we give up on the merge\n new_regions_with_boxes = regions_with_boxes\n try:\n new_regions_without_boxes = helper_merge(regions_without_boxes)\n except PreprocessError as e:\n print(\"skipping\", e)\n # Ouch, hit a cornercase, we give up on the merge\n new_regions_without_boxes = regions_without_boxes\n\n # now collapse into one big region. We do it only when the captions are exactly matching, otherwise it's a nightmare to recompute spans\n capt2region = defaultdict(list)\n for r in new_regions_with_boxes + new_regions_without_boxes:\n capt2region[r[\"caption\"]].append(r)\n for capt, reg_list in capt2region.items():\n all_boxes = sum([r[\"boxes\"] for r in reg_list], [])\n all_tokens = sum([r[\"tokens_positive\"] for r in reg_list], [])\n compo2boxes, compo2id = get_boxes_equiv(all_boxes, iou_threshold=0.75)\n final_boxes = []\n final_tokens = []\n if compo2boxes is not None:\n for compo in compo2boxes.keys():\n box_list = compo2boxes[compo]\n id_list = compo2id[compo]\n final_boxes.append(xyxy_to_xywh(torch.stack(box_list, 0).mean(0)).tolist())\n final_tokens.append(consolidate_spans(sum([all_tokens[i] for i in id_list], []), capt))\n else:\n final_boxes = all_boxes\n final_tokens = all_tokens\n\n merged_region = {\n \"caption\": capt,\n \"original_image_id\": reg_list[0][\"original_image_id\"],\n \"original_region_id\": reg_list[0][\"original_region_id\"],\n \"boxes\": final_boxes,\n \"tokens_positive\": final_tokens,\n \"tokens_negative\": consolidate_spans(sum([r[\"tokens_negative\"] for r in reg_list], []), capt),\n \"found_objects\": False,\n }\n final_regions.append(merged_region)\n\n return final_regions\n\n\ndef _get_all_datapoints(output_path: Path, img_list, proc_id: int):\n # image2ann_map = defaultdict(lambda: defaultdict(list))\n print(f\"process {proc_id} got job queue of\", len(img_list))\n all_datapoints: List[Datapoint] = []\n for i, data in enumerate(tqdm(img_list)):\n # print(f\"status {i}/{len(img_list)}\")\n all_regions = []\n for r in data[\"regions\"]:\n try:\n all_regions.append(preprocess_region(r))\n except PreprocessError as e:\n print(\"Dropping region, preprocess failed\", e)\n all_regions = deduplicate_regions(all_regions)\n\n # all_regions = deduplicate_regions([preprocess_region(r) for r in data[\"regions\"]])\n\n for region in all_regions:\n cur_datapoint = Datapoint(\n image_id=data[\"image_id\"],\n dataset_name=\"VG\",\n tokens_negative=region[\"tokens_negative\"],\n original_id=region[\"original_region_id\"],\n caption=region[\"caption\"],\n annotations=[],\n )\n assert len(region[\"boxes\"]) == len(region[\"tokens_positive\"])\n converted_bbox = torch.as_tensor(region[\"boxes\"], dtype=torch.float)\n areas = converted_bbox[:, -1] * converted_bbox[:, -2]\n # Convert to (x,y,x,y) format\n converted_bbox[:, 2:] += converted_bbox[:, :2]\n for i in range(len(region[\"boxes\"])):\n cur_ann = Annotation(\n area=float(areas[i]),\n iscrowd=0,\n category_id=1,\n bbox=region[\"boxes\"][i],\n giou_friendly_bbox=converted_bbox[i].tolist(),\n tokens_positive=region[\"tokens_positive\"][i],\n )\n cur_datapoint.annotations.append(cur_ann)\n all_datapoints.append(cur_datapoint)\n\n print(f\"Process {proc_id} dumping...\")\n pickle.dump(all_datapoints, open(output_path / f\"vg_train_dump_{proc_id}.pkl\", \"wb\"))\n print(f\"Process {proc_id} done.\")\n del all_datapoints\n return None\n # return image2ann_map\n\n\ndef chunk_list(lst, n):\n \"\"\"Yield successive n-sized chunks from lst.\"\"\"\n for i in range(0, len(lst), n):\n yield lst[i : i + n]\n\n\ndef get_all_datapoints(dataset_path: Path, output_path: Path, nb_proc: int):\n\n print(\"loading region graphs....\")\n with open(dataset_path / \"region_graphs.json\", \"r\") as f:\n VG_region_graph = json.load(f)\n\n print(\"loading success!\")\n\n # return _get_image2ann_mapping(VG_region_graph)\n chunks = list(chunk_list(VG_region_graph, math.ceil(len(VG_region_graph) / (18 * nb_proc))))\n # sub_part = sum(chunks[:3], [])\n # chunks = list(chunk_list(sub_part, math.ceil(len(sub_part) / nb_proc)))\n proc_id = list(range(len(chunks)))\n # assert len(chunks) == nb_proc\n with Pool(nb_proc) as p:\n p.starmap(partial(_get_all_datapoints, output_path), zip(chunks, proc_id))\n\n return None\n\n\ndef main(args):\n vg_path = Path(args.dataset_path)\n\n output_path = Path(args.out_path) if args.out_path is not None else vg_path\n\n os.makedirs(str(output_path), exist_ok=True)\n\n get_all_datapoints(vg_path, output_path, int(args.nb_process))\n\n\nif __name__ == \"__main__\":\n main(parse_args())\n" ]
[ [ "torch.as_tensor", "torch.stack" ] ]
Vsevolod-pl/hivemind
[ "0300cfd91adeb14d91d9659a98221628f9b775b9" ]
[ "benchmarks/benchmark_tensor_compression.py" ]
[ "import argparse\nimport time\n\nimport torch\n\nfrom hivemind.proto.runtime_pb2 import CompressionType\nfrom hivemind.utils.compression import serialize_torch_tensor, deserialize_torch_tensor\n\n\ndef benchmark_compression(tensor: torch.Tensor, compression_type: CompressionType) -> float:\n t = time.time()\n deserialize_torch_tensor(serialize_torch_tensor(tensor, compression_type))\n return time.time() - t\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--size', type=int, default=10000000, required=False)\n parser.add_argument('--seed', type=int, default=7348, required=False)\n parser.add_argument('--num_iters', type=int, default=30, required=False)\n\n args = parser.parse_args()\n\n torch.manual_seed(args.seed)\n X = torch.randn(args.size)\n\n for name, compression_type in CompressionType.items():\n tm = 0\n for i in range(args.num_iters):\n tm += benchmark_compression(X, compression_type)\n tm /= args.num_iters\n print(f\"Compression type: {name}, time: {tm}\")\n" ]
[ [ "torch.manual_seed", "torch.randn" ] ]
montigno/mri_works
[ "8ec6ff1500aa34d3540e44e4b0148023cf821f61", "8ec6ff1500aa34d3540e44e4b0148023cf821f61" ]
[ "mri_works/NodeEditor/modules/Skimage/Morphology.py", "mri_works/NodeEditor/modules/C/Maths.py" ]
[ "from h5py.h5t import np\nclass remove_small_holes:\n def __init__(self, image=[[0.0]], area_threshold=64, **options):\n from skimage import morphology\n import numpy as np\n self.a = np.array(image, dtype=bool)\n for sl1 in range(self.a.shape[2]):\n self.a[:, :, sl1] = morphology.remove_small_holes(self.a[:, :, sl1], area_threshold, connectivity=connectivity, in_place=in_place)\n \n def image_cln(self:'array_float'):\n return np.array(self.a, dtype=float)\n \n###########################################################\n\nclass remove_small_objects:\n def __init__(self, image=[[0.0]], min_size=64, **options):\n from skimage import morphology\n import numpy as np\n self.a = np.array(image, dtype=bool)\n for sl1 in range(self.a.shape[2]):\n self.a[:, :, sl1] = morphology.remove_small_objects(self.a[:, :, sl1], min_size, **options)\n \n def image_cln(self:'array_float'):\n return np.array(self.a, dtype=float)\n \n###########################################################\n\nclass skimage_ball:\n def __init__(self, radius=1):\n from skimage.morphology import ball\n self.ball = ball(radius)\n \n def sk_ball(self:'array_float'):\n return self.ball\n\n###########################################################\n\nclass skimage_erosion:\n def __init__(self, image=[[0.0]], **options):\n from skimage.morphology import erosion\n import numpy as np\n self.eroded = erosion(np.array(image), **options)\n \n def sk_erosion(self:'array_float'):\n return self.eroded\n \n###########################################################\n\nclass skimage_dilation:\n def __init__(self, image=[[0.0]], **options):\n from skimage.morphology import dilation\n import numpy as np\n self.dilated = dilation(np.array(image), **options)\n \n def sk_dilation(self:'array_float'):\n return self.dilated\n \n###########################################################\n\nclass skimage_white_tophat:\n def __init__(self, image=[[0.0]], **options):\n from skimage.morphology import white_tophat\n import numpy as np\n self.wt = white_tophat(np.array(image), **options)\n \n def sk_white_tophat(self:'array_float'):\n return self.wt\n \n###########################################################\n\nclass skimage_black_tophat:\n def __init__(self, image=[[0.0]], **options):\n from skimage.morphology import black_tophat\n import numpy as np\n self.bt = black_tophat(np.array(image), **options)\n \n def sk_black_tophat(self:'array_float'):\n return self.bt\n\n###########################################################\n\nclass skimage_opening:\n def __init__(self, image=[[0.0]], **options):\n from skimage.morphology import opening\n import numpy as np\n self.op = opening(np.array(image), **options)\n \n def sk_opening(self:'array_float'):\n return self.op\n\n###########################################################\n\nclass skimage_closing:\n def __init__(self, image=[[0.0]], **options):\n from skimage.morphology import closing\n import numpy as np\n self.cl = closing(np.array(image), **options)\n \n def sk_closing(self:'array_float'):\n return self.cl\n\n###########################################################\n\nclass skimage_convex_hull_image:\n def __init__(self, image=[[0.0]], **options):\n from skimage.morphology import convex_hull_image\n import numpy as np\n self.ch = convex_hull_image(image, **options)\n \n def sk_convex(self:'array_bool'):\n return self.ch\n ", "class fact:\n def __init__(self, enter_int=0):\n\n from numpy.ctypeslib import load_library\n cal = load_library('lib_fact.so', './NodeEditor/modules/C/sources/')\n self.out = cal.fact(enter_int)\n\n def factorial(self: 'int'):\n return self.out\n" ]
[ [ "numpy.array" ], [ "numpy.ctypeslib.load_library" ] ]