repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
ZitongLu1996/PyCTRSA
|
[
"7b243930321c089e235c9fc1e771b6432d530819"
] |
[
"test/t_normalrdmbased.py"
] |
[
"# -*- coding: utf-8\n\n\"\"\"\n@File : t_normalrdmbased.py\n@Author : Zitong Lu\n@Contact : zitonglu1996@gmail.com\n@License : MIT License\n\"\"\"\n\nimport numpy as np\nimport unittest\nfrom pyctrsa.ctsimilarity.normalrdmbased import ctsimilarities_cal\n\nclass test_normalrdmbased(unittest.TestCase):\n\n def test_ctsimilarities_cal(self):\n\n RDMs = np.random.rand(20, 6, 6)\n CTSimilarities = ctsimilarities_cal(RDMs)\n self.assertEqual(CTSimilarities.shape[0], 20)\n self.assertEqual(len(CTSimilarities.shape), 3)\n\n RDMs = np.random.rand(5, 20, 6, 6)\n CTSimilarities = ctsimilarities_cal(RDMs)\n self.assertEqual(CTSimilarities.shape[0], 5)\n self.assertEqual(len(CTSimilarities.shape), 4)\n\n RDMs = np.random.rand(5, 4, 20, 6, 6)\n CTSimilarities = ctsimilarities_cal(RDMs)\n self.assertEqual(CTSimilarities.shape[0], 5)\n self.assertEqual(len(CTSimilarities.shape), 5)"
] |
[
[
"numpy.random.rand"
]
] |
Pat-Laub/pyABC
|
[
"f23f0ff8d430a8ce0a0c8253b45e19add9121992"
] |
[
"test/test_bytesstorage.py"
] |
[
"import pytest\nfrom pyabc.storage.bytes_storage import to_bytes, from_bytes\nimport pandas as pd\nimport numpy as np\nimport scipy as sp\nfrom rpy2.robjects import r\nimport rpy2.robjects as robjects\nfrom rpy2.robjects import pandas2ri\n\n\n@pytest.fixture(params=[\"empty\", \"int\", \"float\", \"non_numeric_str\",\n \"numeric_str\", \"int-float-numeric_str\",\n \"int-float-non_numeric_str-str_ind\",\n \"int-float-numeric_str-str_ind\",\n \"py-int\",\n \"py-float\",\n \"py-str\",\n \"r-df-cars\",\n \"r-df-faithful\" # TODO re-add iris, see #45\n ])\ndef object_(request):\n par = request.param\n if par == \"empty\":\n return pd.DataFrame()\n if par == \"int\":\n return pd.DataFrame({\"a\": sp.random.randint(-20, 20, 100),\n \"b\": sp.random.randint(-20, 20, 100)})\n if par == \"float\":\n return pd.DataFrame({\"a\": sp.randn(100),\n \"b\": sp.randn(100)})\n if par == \"non_numeric_str\":\n return pd.DataFrame({\"a\": [\"foo\", \"bar\"],\n \"b\": [\"bar\", \"foo\"]})\n\n if par == \"numeric_str\":\n return pd.DataFrame({\"a\": list(map(str, sp.randn(100))),\n \"b\": list(map(str,\n sp.random.randint(-20, 20, 100)))})\n if par == \"int-float-numeric_str\":\n return pd.DataFrame({\"a\": sp.random.randint(-20, 20, 100),\n \"b\": sp.randn(100),\n \"c\": list(map(str,\n sp.random.randint(-20, 20, 100)))})\n if par == \"int-float-non_numeric_str-str_ind\":\n return pd.DataFrame({\"a\": [1, 2],\n \"b\": [1.1, 2.2],\n \"c\": [\"foo\", \"bar\"]},\n index=[\"first\", \"second\"])\n if par == \"int-float-numeric_str-str_ind\":\n return pd.DataFrame({\"a\": [1, 2],\n \"b\": [1.1, 2.2],\n \"c\": [\"1\", \"2\"]},\n index=[\"first\", \"second\"])\n if par == \"py-int\":\n return 42\n if par == \"py-float\":\n return 42.42\n if par == \"py-str\":\n return \"foo bar\"\n if par == \"np-int\":\n return sp.random.randint(-20, 20, 100)\n if par == \"np-float\":\n return sp.random.randn(100)\n if par == \"r-df-cars\":\n return r[\"mtcars\"]\n if par == \"r-df-iris\":\n return r[\"iris\"]\n if par == \"r-df-faithful\":\n return r[\"faithful\"]\n raise Exception(\"Invalid Test DataFrame Type\")\n\n\ndef test_storage(object_):\n serial = to_bytes(object_)\n assert isinstance(serial, bytes)\n rebuilt = from_bytes(serial)\n\n if not isinstance(object_, robjects.DataFrame):\n assert isinstance(object_, type(rebuilt))\n\n if isinstance(object_, int):\n assert object_ == rebuilt\n elif isinstance(object_, float):\n assert object_ == rebuilt\n elif isinstance(object_, str):\n assert object_ == rebuilt\n elif isinstance(object_, np.ndarray):\n assert (object_ == rebuilt).all()\n elif isinstance(object_, pd.DataFrame):\n assert (object_ == rebuilt).all().all()\n elif isinstance(object_, robjects.DataFrame):\n assert (pandas2ri.ri2py(object_) == rebuilt).all().all()\n else:\n raise Exception(\"Could not compare\")\n"
] |
[
[
"scipy.randn",
"pandas.DataFrame",
"scipy.random.randint",
"scipy.random.randn"
]
] |
Pratere/stateoftheuniverse
|
[
"2ad341cb9f0a45b8a624ba23a2dc3224e03de455"
] |
[
"stateoftheuniverse/widgets/constellations.py"
] |
[
"\"\"\"\nGet a list of constellations that will be visible from a location on the \nearth as a given time.\n\"\"\"\n# -------------------------\n# Imports\n# ------------------------\nfrom astropy.utils.exceptions import AstropyDeprecationWarning\nimport warnings\n\nfrom datetime import datetime as dt\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord, AltAz, get_constellation, EarthLocation\nfrom astropy.time import Time\nimport numpy as np\nfrom typing import Optional\nfrom stateoftheuniverse.widgets.prototypes import WidgetPrototype\nfrom stateoftheuniverse.widgets.utils import stringdecorator\n\nwarnings.filterwarnings('ignore', category=AstropyDeprecationWarning)\n\n\n# -------------------------\n# Function Definitions\n# ------------------------\nclass ConstellationsWidget(WidgetPrototype):\n \"\"\"\n A widget that collects and holds list of constellations which will\n be in the sky at the users location at midnight\n\n Args:\n longitude: the longitude of the user\n latitude: the latitude of the user\n datetime: a datetime.datetime object in UTC\n \"\"\"\n\n def __init__(self,\n longitude: Optional[float] = None,\n latitude: Optional[float] = None,\n datetime: Optional[dt] = None):\n super().__init__(longitude=longitude,\n latitude=latitude,\n datetime=datetime)\n\n self.height = 1500\n\n self.location = EarthLocation.from_geodetic(lon=self.longitude * u.degree,\n lat=self.latitude * u.degree,\n height=self.height * u.meter)\n\n if self.datetime is None:\n self.datetime = dt.now()\n self.datetime = str(self.datetime)[:10] + ' 23:00:00'\n self.time = Time(self.datetime)\n\n else:\n self.time = Time(str(self.datetime)[:10]+' 23:00:00')\n\n self.alt, self.az = np.meshgrid(np.arange(5, 85, 5), np.arange(5, 355, 5))\n self.alt = self.alt.ravel()\n self.az = self.az.ravel()\n\n self.dome = SkyCoord(az=self.az * u.degree,\n alt=self.alt * u.degree,\n frame=AltAz(obstime=self.time, location=self.location))\n self.constellations = None\n self.name = \"TONIGHT'S CONSTELLATIONS\"\n self.dict_name = \"consts\"\n\n def get_data(self):\n \"\"\"\n Update and store list of tonight's constellations, based on the users\n location. Uses a matrix of points on the sky to retrieve constellations\n that they are located in.\n \"\"\"\n self.constellations = list(set(get_constellation(self.dome)))\n self.constellations.sort()\n return self.constellations\n\n @stringdecorator\n def get_string(self):\n \"\"\"\n Return formatted output string of visible constellations.\n \"\"\"\n if self.constellations is None:\n self.get_data()\n\n string = '\\n\\t'.join(self.constellations)\n return string\n\n def check_const(self, const_check):\n \"\"\"\n Return bool or list of bools for if a given constellation will be in visible on data.\n \"\"\"\n\n if self.constellations is None:\n self.get_data()\n if type(const_check) == str:\n if const_check.lower() in [constellation.lower() for constellation in self.constellations]:\n return f\"{const_check} will be visible tonight.\"\n else:\n return f\"{const_check} will not be visible tonight.\"\n elif type(const_check) == list:\n avail_consts = []\n for const in const_check:\n if const.lower() in [constellation.lower() for constellation in self.constellations]:\n avail_consts.append(f\"{const} will be visible tonight.\")\n else:\n avail_consts.append(f\"{const} will not be visible tonight.\")\n return avail_consts\n else:\n print(\"Function takes string or list of stings\")\n return False\n\n\nif __name__ == \"__main__\":\n const = ConstellationsWidget(longitude=52.2053, latitude=0.1218)\n\n print(const.get_string())\n\n for constellation in const.constellations:\n if not const.check_const(str(constellation)):\n print(\"Failed to find \" + constellation)\n\n print(const.check_const(const.constellations))\n"
] |
[
[
"numpy.arange"
]
] |
marjanin/tendon_stiffness
|
[
"b1dc379b09bbf9c044410a6bc51afbee0cba2e05"
] |
[
"archive/transfer_learning_runresults2.py"
] |
[
"\n# next is to add accel and see the difference\n# add stiffness too\nimport numpy as np\nfrom scipy import signal, stats\nfrom matplotlib import pyplot as plt\nfrom all_functions import *\nimport pickle\nfrom warnings import simplefilter\nsimplefilter(action='ignore', category=FutureWarning)\n\nexperiment_ID = \"transfer_learning_6\"\nerrors_all_A_A = np.load(\"./results/{}/errors_all_A_A.npy\".format(experiment_ID))\nerrors_all_A_B = np.load(\"./results/{}/errors_all_A_B.npy\".format(experiment_ID))\nerrors_all_B_B = np.load(\"./results/{}/errors_all_B_B.npy\".format(experiment_ID))\n## printing the results\nprint(\"errors_mean: \",errors_all_A_A.mean(2))\nprint(\"errors_std: \",errors_all_A_A.std(2))\nprint(\"errors_mean: \",errors_all_A_B.mean(2))\nprint(\"errors_std: \",errors_all_A_B.std(2))\nprint(\"errors_mean: \",errors_all_B_B.mean(2))\nprint(\"errors_std: \",errors_all_B_B.std(2))\n[f_ow, p_val_avg] = stats.f_oneway(errors_all_A_A.mean(0)[0],errors_all_A_B.mean(0)[0])\nprint(\"p-value (babbling/average/A_A vs A_B): \", p_val_avg)\n[f_ow, p_val_avg] = stats.f_oneway(errors_all_A_A.mean(0)[1],errors_all_A_B.mean(0)[1])\nprint(\"p-value (refined/average/A_A vs A_B): \", p_val_avg)\n[f_ow, p_val_avg] = stats.f_oneway(errors_all_A_A.mean(0)[1],errors_all_B_B.mean(0)[1])\nprint(\"p-value (refined/average/A_A vs B_B): \", p_val_avg)\n# [f_ow, p_val_q0] = stats.f_oneway(errors_all_A_A[0,:],errors_all_A_B[0,:])\n# print(\"p-value (q0): \", p_val_q0)\n# [f_ow, p_val_q1] = stats.f_oneway(errors_all_A_A[1,:],errors_all_A_B[1,:])\n# print(\"p-value (q1): \", p_val_q1)\ny_lim=[0, 0.9]\nfig, axes = plt.subplots(nrows=2, ncols=3, figsize=(12, 5))\np0 = axes[0][0].boxplot(\n\t[errors_all_A_A.mean(0)[0], errors_all_A_B.mean(0)[0], errors_all_B_B.mean(0)[0]],\n\tnotch=True,\n\tpatch_artist=True)\naxes[0][0].set_title(r'$(q_0+q_1)/2$',fontsize=12)\naxes[0][0].set_ylim(y_lim)\n#axes[0].set_xlabel('stiffness')\naxes[0][0].set_xticklabels([\"A_A\", \"A_B\", \"B_B\"], rotation=45, fontsize=8)\naxes[0][0].set_ylabel('RMSE')\np1 = axes[0][1].boxplot(\n\t[errors_all_A_A[0,0,:], errors_all_A_B[0,0,:], errors_all_B_B[0,0,:]],\n\tnotch=True,\n\tpatch_artist=True)\naxes[0][1].set_title('$q_0$', fontsize=12)\naxes[0][1].set_ylim(y_lim)\naxes[0][1].set_yticklabels([])\n#axes[1].set_xlabel('stiffness')\naxes[0][1].set_xticklabels([\"A_A\", \"A_B\", \"B_B\"], rotation=45, fontsize=8)\np2 = axes[0][2].boxplot(\n\t[errors_all_A_A[1,0,:], errors_all_A_B[1,0,:], errors_all_B_B[1,0,:]],\n\tnotch=True,\n\tpatch_artist=True)\naxes[0][2].set_title('$q_1$', fontsize=12)\naxes[0][2].set_ylim(y_lim)\naxes[0][2].set_yticklabels([])\n#axes[2].set_xlabel('stiffness')\naxes[0][2].set_xticklabels([\"A_A\", \"A_B\", \"B_B\"], rotation=45, fontsize=8)\n\np3 = axes[1][0].boxplot(\n\t[errors_all_A_A.mean(0)[-1], errors_all_A_B.mean(0)[-1], errors_all_B_B.mean(0)[-1]],\n\tnotch=True,\n\tpatch_artist=True)\n#axes[1][0].set_title(r'$(q_0+q_1)/2$',fontsize=12)\naxes[1][0].set_ylim(y_lim)\n#axes[0].set_xlabel('stiffness')\naxes[1][0].set_xticklabels([\"A_A\", \"A_B\", \"B_B\"], rotation=45, fontsize=8)\naxes[1][0].set_ylabel('RMSE')\np4 = axes[1][1].boxplot(\n\t[errors_all_A_A[0,-1,:], errors_all_A_B[0,-1,:], errors_all_B_B[0,-1,:]],\n\tnotch=True,\n\tpatch_artist=True)\n#axes[1][1].set_title('$q_0$', fontsize=12)\naxes[1][1].set_ylim(y_lim)\naxes[1][1].set_yticklabels([])\n#axes[1].set_xlabel('stiffness')\naxes[1][1].set_xticklabels([\"A_A\",\"A_B\", \"B_B\"], rotation=45, fontsize=8)\np5 = axes[1][2].boxplot(\n\t[errors_all_A_A[1,-1,:], errors_all_A_B[1,-1,:], errors_all_B_B[1,-1,:]],\n\tnotch=True,\n\tpatch_artist=True)\n#axes[1][2].set_title('$q_1$', fontsize=12)\naxes[1][2].set_ylim(y_lim)\naxes[1][2].set_yticklabels([])\n#axes[2].set_xlabel('stiffness')\naxes[1][2].set_xticklabels([\"A_A\",\"A_B\",\"B_B\"], rotation=45, fontsize=8)\n\nfor i_row in range(2):\n\tfor j_col in range(3):\n\t\taxes[i_row][j_col].grid(True)\nplt.show()\n#import pdb; pdb.set_trace()\n\n\n\n"
] |
[
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
]
] |
marcjour303/PytML
|
[
"99d8b799dbfe1d9a82f0bcc3648aaeb147b7298f",
"99d8b799dbfe1d9a82f0bcc3648aaeb147b7298f"
] |
[
"Chapter09/extract_stats.py",
"Chapter06/euclidean_score.py"
] |
[
"import pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom convert_to_timeseries import convert_data_to_timeseries\n\n# Input file containing data\ninput_file = 'data_timeseries.txt'\n\n# Load data\ndata1 = convert_data_to_timeseries(input_file, 2)\ndata2 = convert_data_to_timeseries(input_file, 3)\ndataframe = pd.DataFrame({'first': data1, 'second': data2})\n\n# Print max and min\nprint('Maximum:\\n', dataframe.max())\nprint('Minimum:\\n', dataframe.min())\n\n# Print mean\nprint('Mean:\\n', dataframe.mean())\nprint('Mean row-wise:\\n', dataframe.mean(1)[:10])\n\n# Plot rolling mean\nDFMean = dataframe.rolling(window=24).mean()\nplt.plot(DFMean)\n\n# Print correlation coefficients\nprint('Correlation coefficients:\\n', dataframe.corr())\n\n# Plot rolling correlation\nplt.figure()\n\nDFCorr= dataframe.rolling(window=60).corr(pairwise=False)\nplt.plot(DFCorr)\nplt.show()\n\n",
"import json\nimport numpy as np\n \n# Returns the Euclidean distance score between user1 and user2 \ndef euclidean_score(dataset, user1, user2):\n if user1 not in dataset:\n raise TypeError('User ' + user1 + ' not present in the dataset')\n\n if user2 not in dataset:\n raise TypeError('User ' + user2 + ' not present in the dataset')\n\n # Movies rated by both user1 and user2\n rated_by_both = {} \n\n for item in dataset[user1]:\n if item in dataset[user2]:\n rated_by_both[item] = 1\n\n # If there are no common movies, the score is 0 \n if len(rated_by_both) == 0:\n return 0\n\n squared_differences = [] \n\n for item in dataset[user1]:\n if item in dataset[user2]:\n squared_differences.append(np.square(dataset[user1][item] - dataset[user2][item]))\n \n return 1 / (1 + np.sqrt(np.sum(squared_differences))) \n\nif __name__=='__main__':\n data_file = 'movie_ratings.json'\n\n with open(data_file, 'r') as f:\n data = json.loads(f.read())\n\n user1 = 'John Carson'\n user2 = 'Michelle Peterson'\n\n print(\"Euclidean score:\")\n print(euclidean_score(data, user1, user2)) \n"
] |
[
[
"pandas.DataFrame",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"numpy.square",
"numpy.sum"
]
] |
J-Fit/JFit
|
[
"85c67aaca0295a75714db2011e35222dabf50c38"
] |
[
"JFit/physics/nu_oscillation/Prob_e2e.py"
] |
[
"# -*- coding: utf-8 -*-\n''' pure nue -> nue\nauthor: Jinnan Zhang\nzhangjinnan@ihep.ac.cn\ndate: 2021.03.08\n'''\nimport numpy as np\n\ndef get_parser():\n import argparse\n parser = argparse.ArgumentParser(description=\"Check or show the oscillation pattern.\")\n parser.add_argument(\n \"--cME\",\n action=\"store_true\",\n help=\"Check the matter effect model, the JUNO Yellow book model, the Yufeng model, and the Hiroshi model.\"\n )\n parser.set_defaults(cME=False)\n \n parser.add_argument(\"--pNMO\",\n action=\"store_true\",\n help=\"Show pattern of neutrino mass ordering.\")\n parser.set_defaults(pNMO=False)\n parser.add_argument(\n \"--NMO-op\",\n type=str,\n help=\"The NMO show option.\")\n\n return parser\n\nclass Prob_e2e:\n def __init__(self, NMO=1, ME=True, NameSpace='PDG2020'):\n self.NameSpace = NameSpace\n if NMO == 1:\n self.NMO = 'normal' # mass ordering, 1 for normal, others for invert\n else:\n self.NMO = 'invert'\n self.ME = ME # in Matter or not\n import os\n import yaml\n curPath = os.path.dirname(os.path.realpath(__file__))\n OsciPar_yamlPath = curPath + \"/data/OscillationParameters.yaml\"\n f_osci_par = open(OsciPar_yamlPath)\n self.OsciPar = yaml.load(f_osci_par.read(), Loader=yaml.Loader)\n self.Sin_sqTheta12 = self.OsciPar[self.NameSpace][self.NMO]['sinsq12']\n self.DeltaM21_sq = self.OsciPar[self.NameSpace][self.NMO]['dmsq21']\n self.DeltaM31_sq = self.OsciPar[self.NameSpace][self.NMO]['dmsq31']\n self.DeltaM32_sq = self.OsciPar[self.NameSpace][self.NMO]['dmsq32']\n self.Sin_sqTheta13 = self.OsciPar[self.NameSpace][self.NMO]['sinsq13']\n\n self.matter_density = self.OsciPar['MatterDensity']\n self.cal_matter_potential()\n\n def out(self):\n print(self.A_MatPoten_0)\n\n def cal_matter_potential(self):\n M_unified_atomic_kg = 1.6605390666e-27\n N_e = self.matter_density / M_unified_atomic_kg / 2.0\n hbar_C = 197.3269804 # MeV.fm\n G_F = 1.1663787e-5\n #- sign for antineutrinos\n self.A_MatPoten_0 = -2 * np.sqrt(\n 2) * G_F * N_e * hbar_C * hbar_C * hbar_C * 1e-39\n\n def get_prob_e2e_Amir(self, Enu, baseline, ME=True):\n '''\n Enu: MeV, baseline: cm\n\n Based on: arXiv:1910.12900, from Amir N. Khan, Hiroshi Nunokawa,...\n '''\n Sin_sqTheta12 = self.Sin_sqTheta12\n DeltaM21_sq = self.DeltaM21_sq\n DeltaM31_sq = self.DeltaM31_sq\n DeltaM32_sq = self.DeltaM32_sq\n Sin_sqTheta13 = self.Sin_sqTheta13\n\n E = Enu\n BaseLine = baseline * 1e-2 # cm to m\n prob = 0\n Sinsq2Theta12 = (4 * self.Sin_sqTheta12 * (1 - self.Sin_sqTheta12))\n Sinsq2Theta13 = (4 * self.Sin_sqTheta13 * (1 - self.Sin_sqTheta13))\n if ME:\n # reverse the relation\n A_MatPoten = E * self.A_MatPoten_0\n # eq. 6\n DeltaMsq_ee = DeltaM31_sq * (1 - Sin_sqTheta12) + DeltaM32_sq * Sin_sqTheta12 \n \n Cos_2Theta_12 = 1 - 2 * Sin_sqTheta12\n Cos_2Theta_13 = 1 - 2 * Sin_sqTheta13\n # eq. 8\n DeltaMsq_ee_M = DeltaMsq_ee*np.sqrt((Cos_2Theta_13-A_MatPoten/DeltaMsq_ee)**2+Sinsq2Theta13)\n # eq. 7 \n Cos_2Theta_13_M=(DeltaMsq_ee*Cos_2Theta_13-A_MatPoten)/DeltaMsq_ee_M\n # eq. 11\n A_MatPoten_prime=0.5*(A_MatPoten+DeltaMsq_ee-DeltaMsq_ee_M)\n # eq.12\n Cos_sq_theta13M_minus_theta13=(DeltaMsq_ee_M+DeltaMsq_ee-A_MatPoten*Cos_2Theta_13)*0.5/DeltaMsq_ee_M\n # eq. 10 \n DeltaM21_sq_M= DeltaM21_sq*np.sqrt((Cos_2Theta_12-A_MatPoten_prime/DeltaM21_sq)**2+Cos_sq_theta13M_minus_theta13*Sinsq2Theta12)\n # eq. 9\n Cos_2Theta_12_M=(DeltaM21_sq*Cos_2Theta_12-A_MatPoten_prime)/DeltaM21_sq_M\n\n\n Sin_sqTheta13_M=(1-Cos_2Theta_13_M)/2\n Sinsq2Theta13_M = 1-Cos_2Theta_13_M*Cos_2Theta_13_M\n\n Sin_sqTheta12_M = (1-Cos_2Theta_12_M)/2\n Sinsq2Theta12_M=1-Cos_2Theta_12_M*Cos_2Theta_12_M\n\n DeltaM31_sq_M = DeltaMsq_ee_M+Sin_sqTheta12_M*DeltaM21_sq_M\n DeltaM32_sq_M = DeltaM31_sq_M - DeltaM21_sq_M\n Delta21 = 1.266932679815373 * DeltaM21_sq_M * BaseLine / E\n Delta31 = 1.266932679815373 * DeltaM31_sq_M * BaseLine / E\n Delta32 = 1.266932679815373 * DeltaM32_sq_M * BaseLine / E\n prob = 1. - Sinsq2Theta13_M * (\n (1 - Sin_sqTheta12_M) * np.sin(Delta31)**2. +\n Sin_sqTheta12_M * np.sin(Delta32)**2.) - (\n (1 - Sin_sqTheta13_M)**\n 2.) * Sinsq2Theta12_M * np.sin(Delta21)**2.\n \n else:\n Delta21 = 1.266932679815373 * self.DeltaM21_sq * BaseLine / E\n Delta31 = 1.266932679815373 * self.DeltaM31_sq * BaseLine / E\n Delta32 = 1.266932679815373 * self.DeltaM32_sq * BaseLine / E\n prob = 1. - Sinsq2Theta13 * (\n (1 - self.Sin_sqTheta12) * np.sin(Delta31)**2. +\n self.Sin_sqTheta12 * np.sin(Delta32)**2.) - (\n (1 - self.Sin_sqTheta13)**\n 2.) * Sinsq2Theta12 * np.sin(Delta21)**2. \n return prob\n\n def get_prob_e2e_Yufeng(self, Enu, baseline, ME=True):\n '''\n Enu: MeV, baseline: cm\n\n Based on: https://juno.ihep.ac.cn/cgi-bin/Dev_DocDB/ShowDocument?docid=6859\n '''\n Sin_sqTheta12 = self.Sin_sqTheta12\n DeltaM21_sq = self.DeltaM21_sq\n DeltaM31_sq = self.DeltaM31_sq\n DeltaM32_sq = self.DeltaM32_sq\n Sin_sqTheta13 = self.Sin_sqTheta13\n\n E = Enu\n BaseLine = baseline * 1e-2 # cm to m\n prob = 0\n Sinsq2Theta12 = (4 * self.Sin_sqTheta12 * (1 - self.Sin_sqTheta12))\n Sinsq2Theta13 = (4 * self.Sin_sqTheta13 * (1 - self.Sin_sqTheta13))\n if ME:\n # reverse the relation, for neutrino\n A_MatPoten = E * self.A_MatPoten_0\n Delta_c = DeltaM31_sq * (1 - Sin_sqTheta12) + DeltaM32_sq * Sin_sqTheta12 # eq. 8\n alpha_c = DeltaM21_sq / Delta_c # eq .8\n A_star = A_MatPoten * (1 - Sin_sqTheta13) / DeltaM21_sq # eq .9\n A_c = A_MatPoten / Delta_c # eq. 9\n Cos_2Theta_12 = 1 - 2 * Sin_sqTheta12\n Cos_2Theta_13 = 1 - 2 * Sin_sqTheta13\n # C_hat_12 = np.sqrt(1 - 2.0 * A_star * Cos_2Theta_12 +A_star * A_star)\n # C_hat_13 = np.sqrt(1 - 2.0 * A_c * Cos_2Theta_13 +A_c * A_c)\n C_hat_12_prime = np.sqrt(1 - 2.0 * A_star * Cos_2Theta_12 +A_star * A_star)\n C_hat_13_prime = np.sqrt(1 - 2.0 * A_c * Cos_2Theta_13 + A_c * A_c)\n\n Cos_sq_Theta12_tilde = 0.5*(1-(A_star-Cos_2Theta_12)/C_hat_12_prime)\n Cos_sq_Theta13_tilde = 0.5*(1-(A_c-Cos_2Theta_13)/C_hat_13_prime)\n Sin_sqTheta13_M=1-Cos_sq_Theta13_tilde\n Sinsq2Theta13_M = 4*Sin_sqTheta13_M*Cos_sq_Theta13_tilde\n Sin_sqTheta12_M = 1-Cos_sq_Theta12_tilde\n Sinsq2Theta12_M=4*Sin_sqTheta12_M*Cos_sq_Theta12_tilde\n\n DeltaM21_sq_M = Delta_c*(0.5*(1+A_c-C_hat_13_prime)+alpha_c*(C_hat_12_prime-A_star))\n DeltaM31_sq_M = Delta_c*(0.5*(1+A_c+C_hat_13_prime)+alpha_c*0.5*(C_hat_12_prime-A_star-Cos_2Theta_12))\n \n DeltaM32_sq_M = DeltaM31_sq_M - DeltaM21_sq_M\n Delta21 = 1.266932679815373 * DeltaM21_sq_M * BaseLine / E\n Delta31 = 1.266932679815373 * DeltaM31_sq_M * BaseLine / E\n Delta32 = 1.266932679815373 * DeltaM32_sq_M * BaseLine / E\n prob = 1. - Sinsq2Theta13_M * (\n (1 - Sin_sqTheta12_M) * np.sin(Delta31)**2. +\n Sin_sqTheta12_M * np.sin(Delta32)**2.) - (\n (1 - Sin_sqTheta13_M)**\n 2.) * Sinsq2Theta12_M * np.sin(Delta21)**2.\n # print()\n \n else:\n Delta21 = 1.266932679815373 * self.DeltaM21_sq * BaseLine / E\n Delta31 = 1.266932679815373 * self.DeltaM31_sq * BaseLine / E\n Delta32 = 1.266932679815373 * self.DeltaM32_sq * BaseLine / E\n prob = 1. - Sinsq2Theta13 * (\n (1 - self.Sin_sqTheta12) * np.sin(Delta31)**2. +\n self.Sin_sqTheta12 * np.sin(Delta32)**2.) - (\n (1 - self.Sin_sqTheta13)**\n 2.) * Sinsq2Theta12 * np.sin(Delta21)**2.\n # print(\"Yufeng: \",self.DeltaM31_sq)\n \n return prob\n\n def get_prob_e2e_YB(self, Enu, baseline, ME=True):\n '''\n Enu: MeV, baseline: cm\n '''\n E = Enu\n BaseLine = baseline * 1e-2 # cm to m\n prob = 0\n Sinsq2Theta12 = (4 * self.Sin_sqTheta12 * (1 - self.Sin_sqTheta12))\n Sinsq2Theta13 = (4 * self.Sin_sqTheta13 * (1 - self.Sin_sqTheta13))\n if ME:\n A_MatPoten = E * self.A_MatPoten_0\n eta_12 = (1 - 2 * self.Sin_sqTheta12 -\n A_MatPoten / self.DeltaM21_sq) * (\n 1 - 2 * self.Sin_sqTheta12 -\n A_MatPoten / self.DeltaM21_sq) + Sinsq2Theta12\n eta_13 = (1 - 2 * self.Sin_sqTheta13 -\n A_MatPoten / self.DeltaM31_sq) * (\n 1 - 2 * self.Sin_sqTheta13 -\n A_MatPoten / self.DeltaM31_sq) + Sinsq2Theta13\n Sinsq2Theta12_M = Sinsq2Theta12 / eta_12\n Sinsq2Theta13_M = Sinsq2Theta13 / eta_13\n Sin_sqTheta12_M = (1 - np.sqrt(1 - Sinsq2Theta12_M)) / 2.\n Sin_sqTheta13_M = (1 - np.sqrt(1 - Sinsq2Theta13_M)) / 2.\n DeltaM21_sq_M = self.DeltaM21_sq * np.sqrt(eta_12)\n DeltaM31_sq_M = self.DeltaM31_sq * np.sqrt(eta_13)\n DeltaM32_sq_M = DeltaM31_sq_M - DeltaM21_sq_M\n Delta21 = 1.266932679815373 * DeltaM21_sq_M * BaseLine / E\n Delta31 = 1.266932679815373 * DeltaM31_sq_M * BaseLine / E\n Delta32 = 1.266932679815373 * DeltaM32_sq_M * BaseLine / E\n prob = 1. - Sinsq2Theta13_M * (\n (1 - Sin_sqTheta12_M) * np.sin(Delta31)**2. +\n Sin_sqTheta12_M * np.sin(Delta32)**2.) - (\n (1 - Sin_sqTheta13_M)**\n 2.) * Sinsq2Theta12_M * np.sin(Delta21)**2.\n else:\n Delta21 = 1.266932679815373 * self.DeltaM21_sq * BaseLine / E\n Delta31 = 1.266932679815373 * self.DeltaM31_sq * BaseLine / E\n Delta32 = 1.266932679815373 * self.DeltaM32_sq * BaseLine / E\n prob = 1. - Sinsq2Theta13 * (\n (1 - self.Sin_sqTheta12) * np.sin(Delta31)**2. +\n self.Sin_sqTheta12 * np.sin(Delta32)**2.) - (\n (1 - self.Sin_sqTheta13)**\n 2.) * Sinsq2Theta12 * np.sin(Delta21)**2.\n # print(\"YB: \",self.DeltaM31_sq)\n return prob\n\n\ndef Check_YB_Hermitian(E_low=0.8, E_up=15., N=1000, BaseLine=52.5e5,ME=1):\n def GetAsy(a, b):\n # return 2 * (a - b) / (a + b)\n return (a - b) / ( b)\n\n Es = np.linspace(E_low, E_up, N)\n\n # JUNO Yellow formula\n P_e2e_YB = Prob_e2e(NMO=1)\n y_YB = P_e2e_YB.get_prob_e2e_YB(Es, baseline=BaseLine,ME=ME)\n y_Yufeng=P_e2e_YB.get_prob_e2e_Yufeng(Es, baseline=BaseLine,ME=ME)\n y_Amir=P_e2e_YB.get_prob_e2e_Amir(Es, baseline=BaseLine,ME=ME)\n # Hermitian approach\n import sys\n sys.path.append('../..')\n from physics.nu_oscillation import oscprob3nu, hamiltonians3nu\n from physics.nu_oscillation.globaldefs import CONV_CM_TO_INV_EV, VCC_EARTH_CRUST, S23_NO_BF, DCP_NO_BF\n S12_NO_BF = np.sqrt(P_e2e_YB.Sin_sqTheta12)\n S13_NO_BF = np.sqrt(P_e2e_YB.Sin_sqTheta13)\n D21_NO_BF = P_e2e_YB.DeltaM21_sq\n D31_NO_BF = P_e2e_YB.DeltaM31_sq\n h_vacuum_energy_indep = hamiltonians3nu.hamiltonian_3nu_vacuum_energy_independent(\n S12_NO_BF, S23_NO_BF, S13_NO_BF, -DCP_NO_BF, D21_NO_BF,\n D31_NO_BF) # sign - DCP_NO_BF for antineutrinos\n y_Het = np.zeros(N)\n for i, energy in enumerate(Es):\n # sign - for antineutrinos\n if ME:\n h_matter = hamiltonians3nu.hamiltonian_3nu_matter(h_vacuum_energy_indep, energy * 1e6,-VCC_EARTH_CRUST) \n else:\n h_matter = np.multiply(1/(energy*1e6),h_vacuum_energy_indep) \n Pee, Pem, Pet, Pme, Pmm, Pmt, Pte, Ptm, Ptt = oscprob3nu.probabilities_3nu(\n h_matter, BaseLine * CONV_CM_TO_INV_EV)\n y_Het[i] = Pee\n import matplotlib.pyplot as plt\n from matplotlib.backends.backend_pdf import PdfPages\n plt.style.use('../../detector/DYB_like/lib/Paper.mplstyle')\n with PdfPages('results/ME_models.pdf') as pdf:\n fig, ax = plt.subplots()\n ax.set_ylabel(r'$\\frac{2\\cdot(A-B)}{(A+B)}$')\n ax.set_xlabel('Neutrino Energy [MeV]')\n # ax.plot(Es, y_Het, label='Hamiltonian approach')\n # ax.plot(Es, y_YB, label='Yellow Book Approach')\n # ax.plot(Es, y_Yufeng, label='Yufeng Approach')\n # ax.plot(Es, y_Amir, label='Amir Approach')\n # ax.plot(Es, GetAsy(y_YB, y_Het), label='YB/Hamiltonian')\n # ax.plot(Es, GetAsy(y_Amir,y_Yufeng), label='Amir/Yufeng')\n ax.plot(Es, GetAsy(y_Amir,y_Yufeng), label='Amir/Yufeng')\n ax.text(y=0.,x=6,s=\"Amir: arXiv:1910.12900\\n Yufeng: JUNO-doc-6859\")\n ax.legend()\n pdf.savefig()\n # ax.cla()\n # fig.savefig('./results/Yufeng_Amir.png') \n ax.plot(Es, GetAsy(y_YB, y_Het), label='YB/Hamiltonian')\n ax.plot(Es, GetAsy(y_YB, y_Yufeng), label='YB/Yufeng')\n ax.plot(Es, GetAsy(y_Yufeng,y_Het), label='Yufeng/Hamiltonian')\n # ax.plot(Es, GetAsy(y_Amir,y_Het), label='Amir/Hamiltonian')\n ax.set_ylabel(r'$\\frac{2\\cdot(A-B)}{(A+B)}$')\n ax.set_xlabel('Neutrino Energy [MeV]')\n ax.legend()\n pdf.savefig()\n # fig.savefig('./results/four_model.png')\n # plt.show()\n \ndef show_NMO_pattern(pattern='nue2nue'):\n print(pattern)\nif __name__ == \"__main__\":\n parser = get_parser()\n args = parser.parse_args()\n if args.cME:\n Check_YB_Hermitian()\n if args.pNMO:\n show_NMO_pattern(args.NMO_op)\n"
] |
[
[
"numpy.sin",
"numpy.zeros",
"matplotlib.backends.backend_pdf.PdfPages",
"matplotlib.pyplot.subplots",
"numpy.multiply",
"matplotlib.pyplot.style.use",
"numpy.sqrt",
"numpy.linspace"
]
] |
esceptico/squeezer
|
[
"98bc4c7923c6aa3b12ac81444d79392826fc34c6"
] |
[
"squeezer/onnx/export.py"
] |
[
"from logging import getLogger\nfrom typing import Dict, List, Optional, Tuple, Union\n\nimport torch\nfrom torch import nn\nfrom torch.onnx import export\n\nlogger = getLogger(__name__)\n\n\ndef export_to_onnx(\n model: nn.Module,\n dummy_input: [Union[Tuple], torch.Tensor],\n file,\n opset_version: int = 12,\n input_names: Optional[List[str]] = None,\n output_names: Optional[List[str]] = None,\n dynamic_axes: Dict[str, Dict[int, str]] = None\n) -> None:\n \"\"\"Exports PyTorch model to ONNX format.\n\n Args:\n model: PyTorch module.\n dummy_input: Dummy input.\n file: Path to save converted model or file-like object.\n opset_version: Version of ONNX operator set. Defaults to 12.\n input_names: Names of model inputs. Defaults to None.\n output_names: Names of model outputs. Defaults to None.\n dynamic_axes: Axes (input or/and outputs) with dynamic shapes.\n Defaults to None.\n\n Examples:\n >>> from transformers import AutoModel, AutoTokenizer\n >>> tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')\n >>> model = AutoModel.from_pretrained('bert-base-uncased')\n >>> encoded = tokenizer('aboba', return_tensors='np')\n >>>\n >>> export_to_onnx(\n >>> model,\n >>> dummy_input=tuple(encoded.values()),\n >>> path_to_save='model.onnx',\n >>> input_names=list(encoded.keys()),\n >>> output_names=['last_hidden_state', 'pooler_output'],\n >>> dynamic_axes={\n >>> 'input_ids' : {0 : 'batch_size', 1: 'seq'},\n >>> 'token_type_ids' : {0 : 'batch_size', 1: 'seq'},\n >>> 'attention_mask' : {0 : 'batch_size', 1: 'seq'},\n >>> 'last_hidden_state' : {0 : 'batch_size', 1: 'seq'},\n >>> 'pooler_output' : {0 : 'batch_size', 1: 'seq'}\n >>> }\n >>> )\n \"\"\"\n model.eval()\n export(\n model,\n dummy_input,\n file,\n opset_version=opset_version,\n do_constant_folding=True,\n input_names=input_names,\n output_names=output_names,\n dynamic_axes=dynamic_axes\n )\n logger.warning(f'Model was exported to ONNX.')\n"
] |
[
[
"torch.onnx.export"
]
] |
clayfish/maze
|
[
"03bad22426d90225eca71d20f16e2da590a22aa2"
] |
[
"Python/Version 5.1 (English Real-Time search)/maze51.py"
] |
[
"from tkinter import *\nfrom tkinter import font\nfrom tkinter import messagebox\nfrom functools import partial\nfrom operator import attrgetter\nimport webbrowser\nimport numpy\nimport random\nimport math\nimport os\n\n\"\"\"\n@author Nikos Kanargias\nE-mail: nkana@tee.gr\n@version 5.1\n\nThe software solves and visualizes the robot motion planning problem,\nby implementing variants of DFS, BFS and A* algorithms, as described\nby E. Keravnou in her book: \"Artificial Intelligence and Expert Systems\",\nHellenic Open University, Patra 2000 (in Greek)\nas well as the Greedy search algorithm, as a special case of A*.\n\nThe software also implements Dijkstra's algorithm,\nas just described in the relevant article in Wikipedia.\nhttp://en.wikipedia.org/wiki/Dijkstra%27s_algorithm\n\nThe superiority of A* and Dijkstra's algorithms against the other three becomes obvious.\n\nThe user can change the number of the grid cells, indicating\nthe desired number of rows and columns.\n\nThe user can add as many obstacles he/she wants, as he/she\nwould \"paint\" free curves with a drawing program.\n\nIndividual obstacles can be removed by clicking them.\n\nThe position of the robot and/or the target can be changed by dragging with the mouse.\n\nJump from search in \"Step-by-Step\" way to \"Animation\" way and vice versa is done\nby pressing the corresponding button, even when the search is in progress.\n\nThe speed of a search can be changed, even if the search is in progress.\nIt is sufficient to place the slider \"Speed\" in the new desired position\nand then press the \"Animation\" button.\n\nThe application considers that the robot itself has some volume.\nTherefore it can’t move diagonally to a free cell passing between two obstacles\nadjacent to one apex.\n\nWhen 'Step-by-Step' or 'Animation' search is underway it is not possible to change the position of obstacles,\nrobot and target, as well as the search algorithm.\n\nWhen 'Real-Time' search is underway the position of obstacles, robot and target can be changed.\n\nDrawing of arrows to predecessors, when requested, is performed only at the end of the search.\n\"\"\"\n\n\nclass Maze51:\n\n class CreateToolTip(object):\n \"\"\"\n Helper class that creates a tooltip for a given widget\n \"\"\"\n # from https://stackoverflow.com/questions/3221956/what-is-the-simplest-way-to-make-tooltips-in-tkinter\n def __init__(self, widget, text='widget info'):\n self.waittime = 500 # milliseconds\n self.wraplength = 180 # pixels\n self.widget = widget\n self.text = text\n self.widget.bind(\"<Enter>\", self.enter)\n self.widget.bind(\"<Leave>\", self.leave)\n self.widget.bind(\"<ButtonPress>\", self.leave)\n self._id = None\n self.tw = None\n\n def enter(self, event=None):\n self.schedule()\n\n def leave(self, event=None):\n self.unschedule()\n self.hidetip()\n\n def schedule(self):\n self.unschedule()\n self._id = self.widget.after(self.waittime, self.showtip)\n\n def unschedule(self):\n _id = self._id\n self._id = None\n if _id:\n self.widget.after_cancel(_id)\n\n def showtip(self, event=None):\n x, y, cx, cy = self.widget.bbox(\"insert\")\n x += self.widget.winfo_rootx() + 25\n y += self.widget.winfo_rooty() + 20\n # creates a toplevel window\n self.tw = Toplevel(self.widget)\n # Leaves only the label and removes the app window\n self.tw.wm_overrideredirect(True)\n self.tw.wm_geometry(\"+%d+%d\" % (x, y))\n label = Label(self.tw, text=self.text, justify='left', background=\"#ffffff\",\n relief='solid', borderwidth=1, wraplength=self.wraplength)\n label.pack(ipadx=1)\n\n def hidetip(self):\n tw = self.tw\n self.tw = None\n if tw:\n tw.destroy()\n\n class MyMaze(object):\n \"\"\"\n Helper class that creates a random, perfect (without cycles) maze\n \"\"\"\n # The code of the class is an adaptation, with the original commentary, of the answer given\n # by user DoubleMx2 on August 25, 2013 to a question posted by user nazar_art at stackoverflow.com:\n # http://stackoverflow.com/questions/18396364/maze-generation-arrayindexoutofboundsexception\n\n def __init__(self, x_dimension, y_dimension):\n self.dimensionX = x_dimension # dimension of maze\n self.dimensionY = y_dimension\n self.gridDimensionX = x_dimension * 2 + 1 # dimension of output grid\n self.gridDimensionY = y_dimension * 2 + 1\n # output grid\n self.mazeGrid = [[' ' for y in range(self.gridDimensionY)] for x in range(self.gridDimensionX)]\n # 2d array of Cells\n self.cells = [[self.Cell(x, y, False) for y in range(self.dimensionY)] for x in range(self.dimensionX)]\n self.generate_maze()\n self.update_grid()\n\n class Cell(object):\n \"\"\"\n inner class to represent a cell\n \"\"\"\n def __init__(self, x, y, is_wall=True):\n self.neighbors = [] # cells this cell is connected to\n self.open = True # if true, has yet to be used in generation\n self.x = x # coordinates\n self.y = y\n self.wall = is_wall # impassable cell\n\n def add_neighbor(self, other):\n \"\"\"\n add a neighbor to this cell, and this cell as a neighbor to the other\n \"\"\"\n if other not in self.neighbors: # avoid duplicates\n self.neighbors.append(other)\n if self not in other.neighbors: # avoid duplicates\n other.neighbors.append(self)\n\n def is_cell_below_neighbor(self):\n \"\"\"\n used in update_grid()\n \"\"\"\n return self.__class__(self.x, self.y + 1) in self.neighbors\n\n def is_cell_right_neighbor(self):\n \"\"\"\n used in update_grid()\n \"\"\"\n return self.__class__(self.x + 1, self.y) in self.neighbors\n\n def __eq__(self, other):\n \"\"\"\n useful Cell equivalence\n \"\"\"\n if isinstance(other, self.__class__):\n return self.x == other.x and self.y == other.y\n else:\n return False\n\n def generate_maze(self):\n \"\"\"\n generate the maze from upper left (In computing the y increases down often)\n \"\"\"\n start_at = self.get_cell(0, 0)\n start_at.open = False # indicate cell closed for generation\n cells = [start_at]\n while cells:\n # this is to reduce but not completely eliminate the number\n # of long twisting halls with short easy to detect branches\n # which results in easy mazes\n if random.randint(0, 9) == 0:\n cell = cells.pop(random.randint(0, cells.__len__()) - 1)\n else:\n cell = cells.pop(cells.__len__() - 1)\n # for collection\n neighbors = []\n # cells that could potentially be neighbors\n potential_neighbors = [self.get_cell(cell.x + 1, cell.y), self.get_cell(cell.x, cell.y + 1),\n self.get_cell(cell.x - 1, cell.y), self.get_cell(cell.x, cell.y - 1)]\n for other in potential_neighbors:\n # skip if outside, is a wall or is not opened\n if other is None or other.wall or not other.open:\n continue\n neighbors.append(other)\n if not neighbors:\n continue\n # get random cell\n selected = neighbors[random.randint(0, neighbors.__len__()) - 1]\n # add as neighbor\n selected.open = False # indicate cell closed for generation\n cell.add_neighbor(selected)\n cells.append(cell)\n cells.append(selected)\n\n def get_cell(self, x, y):\n \"\"\"\n used to get a Cell at x, y; returns None out of bounds\n \"\"\"\n if x < 0 or y < 0:\n return None\n try:\n return self.cells[x][y]\n except IndexError: # catch out of bounds\n return None\n\n def update_grid(self):\n \"\"\"\n draw the maze\n \"\"\"\n back_char = ' '\n wall_char = 'X'\n cell_char = ' '\n # fill background\n for x in range(self.gridDimensionX):\n for y in range(self.gridDimensionY):\n self.mazeGrid[x][y] = back_char\n # build walls\n for x in range(self.gridDimensionX):\n for y in range(self.gridDimensionY):\n if x % 2 == 0 or y % 2 == 0:\n self.mazeGrid[x][y] = wall_char\n # make meaningful representation\n for x in range(self.dimensionX):\n for y in range(self.dimensionY):\n current = self.get_cell(x, y)\n grid_x = x * 2 + 1\n grid_y = y * 2 + 1\n self.mazeGrid[grid_x][grid_y] = cell_char\n if current.is_cell_below_neighbor():\n self.mazeGrid[grid_x][grid_y + 1] = cell_char\n if current.is_cell_right_neighbor():\n self.mazeGrid[grid_x + 1][grid_y] = cell_char\n\n class Cell(object):\n \"\"\"\n Helper class that represents the cell of the grid\n \"\"\"\n\n def __init__(self, row, col):\n self.row = row # the row number of the cell(row 0 is the top)\n self.col = col # the column number of the cell (column 0 is the left)\n self.g = 0 # the value of the function g of A* and Greedy algorithms\n self.h = 0 # the value of the function h of A* and Greedy algorithms\n self.f = 0 # the value of the function f of A* and Greedy algorithms\n # the distance of the cell from the initial position of the robot\n # Ie the label that updates the Dijkstra's algorithm\n self.dist = 0\n # Each state corresponds to a cell\n # and each state has a predecessor which\n # stored in this variable\n self.prev = self.__class__\n\n def __eq__(self, other):\n \"\"\"\n useful Cell equivalence\n \"\"\"\n if isinstance(other, self.__class__):\n return self.row == other.row and self.col == other.col\n else:\n return False\n\n #######################################\n # #\n # Constants of Maze42 class #\n # #\n #######################################\n INFINITY = sys.maxsize # The representation of the infinite\n EMPTY = 0 # empty cell\n OBST = 1 # cell with obstacle\n ROBOT = 2 # the position of the robot\n TARGET = 3 # the position of the target\n FRONTIER = 4 # cells that form the frontier (OPEN SET)\n CLOSED = 5 # cells that form the CLOSED SET\n ROUTE = 6 # cells that form the robot-to-target path\n\n MSG_DRAW_AND_SELECT = \"\\\"Paint\\\" obstacles, then click 'Real-Time' or 'Step-by-Step' or 'Animation'\"\n MSG_SELECT_STEP_BY_STEP_ETC = \"Click 'Step-by-Step' or 'Animation' or 'Clear'\"\n MSG_NO_SOLUTION = \"There is no path to the target !!!\"\n\n def __init__(self, maze):\n \"\"\"\n Constructor\n \"\"\"\n self.center(maze)\n\n self.rows = 41 # the number of rows of the grid\n self.columns = 41 # the number of columns of the grid\n self.square_size = int(500/self.rows) # the cell size in pixels\n self.arrow_size = int(self.square_size/2) # the size of the tips of the arrow pointing the predecessor cell\n\n self.openSet = [] # the OPEN SET\n self.closedSet = [] # the CLOSED SET\n self.graph = [] # the set of vertices of the graph to be explored by Dijkstra's algorithm\n\n self.robotStart = self.Cell(self.rows - 2, 1) # the initial position of the robot\n self.targetPos = self.Cell(1, self.columns - 2) # the position of the target\n\n self.grid = [[]] # the grid\n self.realTime = False # Solution is displayed instantly\n self.found = False # flag that the goal was found\n self.searching = False # flag that the search is in progress\n self.endOfSearch = False # flag that the search came to an end\n self.animation = False # flag that the animation is running\n self.delay = 500 # time delay of animation (in msec)\n self.expanded = 0 # the number of nodes that have been expanded\n self.selected_algo = \"DFS\" # DFS is initially selected\n\n self.array = numpy.array([0] * (83 * 83))\n self.cur_row = self.cur_col = self.cur_val = 0\n app_highlight_font = font.Font(app, family='Helvetica', size=10, weight='bold')\n\n ##########################################\n # #\n # the widgets of the user interface #\n # #\n ##########################################\n self.message = Label(app, text=self.MSG_DRAW_AND_SELECT, width=55, anchor='center',\n font=('Helvetica', 12), fg=\"BLUE\")\n self.message.place(x=5, y=510)\n\n rows_lbl = Label(app, text=\"# of rows (5-83):\", width=16, anchor='e', font=(\"Helvetica\", 9))\n rows_lbl.place(x=530, y=5)\n\n validate_rows_cmd = (app.register(self.validate_rows), '%P')\n invalid_rows_cmd = (app.register(self.invalid_rows))\n\n self.rows_var = StringVar()\n self.rows_var.set(41)\n self.rowsSpinner = Spinbox(app, width=3, from_=5, to=83, textvariable=self.rows_var, validate='focus',\n validatecommand=validate_rows_cmd, invalidcommand=invalid_rows_cmd)\n self.rowsSpinner.place(x=652, y=5)\n\n cols_lbl = Label(app, text=\"# of columns (5-83):\", width=16, anchor='e', font=(\"Helvetica\", 9))\n cols_lbl.place(x=530, y=35)\n\n validate_cols_cmd = (app.register(self.validate_cols), '%P')\n invalid_cols_cmd = (app.register(self.invalid_cols))\n\n self.cols_var = StringVar()\n self.cols_var.set(41)\n self.colsSpinner = Spinbox(app, width=3, from_=5, to=83, textvariable=self.cols_var, validate='focus',\n validatecommand=validate_cols_cmd, invalidcommand=invalid_cols_cmd)\n self.colsSpinner.place(x=652, y=35)\n\n self.buttons = list()\n buttons_tool_tips = (\"Clears and redraws the grid according to the given rows and columns\",\n \"Creates a random maze\",\n \"First click: clears search, Second click: clears obstacles\",\n \"Position of obstacles, robot and target can be changed when search is underway\",\n \"The search is performed step-by-step for every click\",\n \"The search is performed automatically\")\n for i, action in enumerate((\"New grid\", \"Maze\", \"Clear\", \"Real-Time\", \"Step-by-Step\", \"Animation\")):\n btn = Button(app, text=action, width=20, font=app_highlight_font, bg=\"light grey\",\n command=partial(self.select_action, action))\n btn.place(x=515, y=65+30*i)\n self.CreateToolTip(btn, buttons_tool_tips[i])\n self.buttons.append(btn)\n\n time_delay = Label(app, text=\"Delay (msec)\", width=27, anchor='center', font=(\"Helvetica\", 8))\n time_delay.place(x=515, y=243)\n slider_value = IntVar()\n slider_value.set(500)\n self.slider = Scale(app, orient=HORIZONTAL, length=165, width=10, from_=0, to=1000,\n showvalue=1, variable=slider_value,)\n self.slider.place(x=515, y=260)\n self.CreateToolTip(self.slider, \"Regulates the delay for each step (0 to 1000 msec)\")\n\n self.frame = LabelFrame(app, text=\"Algorithms\", width=170, height=100)\n self.frame.place(x=515, y=300)\n self.radio_buttons = list()\n radio_buttons_tool_tips = (\"Depth First Search algorithm\",\n \"Breadth First Search algorithm\",\n \"A* algorithm\",\n \"Greedy search algorithm\",\n \"Dijkstra's algorithm\")\n for i, algorithm in enumerate((\"DFS\", \"BFS\", \"A*\", \"Greedy\", \"Dijkstra\")):\n btn = Radiobutton(self.frame, text=algorithm, font=app_highlight_font, value=i + 1,\n command=partial(self.select_algo, algorithm))\n btn.place(x=10 if i % 2 == 0 else 90, y=int(i/2)*25)\n self.CreateToolTip(btn, radio_buttons_tool_tips[i])\n btn.deselect()\n self.radio_buttons.append(btn)\n self.radio_buttons[0].select()\n\n self.diagonal = IntVar()\n self.diagonalBtn = Checkbutton(app, text=\"Diagonal movements\", font=app_highlight_font,\n variable=self.diagonal)\n self.diagonalBtn.place(x=515, y=405)\n self.CreateToolTip(self.diagonalBtn, \"Diagonal movements are also allowed\")\n\n self.drawArrows = IntVar()\n self.drawArrowsBtn = Checkbutton(app, text=\"Arrows to predecessors\", font=app_highlight_font,\n variable=self.drawArrows)\n self.drawArrowsBtn.place(x=515, y=430)\n self.CreateToolTip(self.drawArrowsBtn, \"Draw arrows to predecessors\")\n\n memo_colors = (\"RED\", \"GREEN\", \"BLUE\", \"CYAN\")\n for i, memo in enumerate((\"Robot\", \"Target\", \"Frontier\", \"Closed set\")):\n label = Label(app, text=memo, width=8, anchor='center', fg=memo_colors[i], font=(\"Helvetica\", 11))\n label.place(x=515 if i % 2 == 0 else 605, y=460+int(i/2)*20)\n\n self.about_button = Button(app, text='About Maze', width=20, font=app_highlight_font, bg=\"light grey\",\n command=self.about_click)\n self.about_button.place(x=515, y=505)\n\n self.canvas = Canvas(app, bd=0, highlightthickness=0)\n self.canvas.bind(\"<Button-1>\", self.left_click)\n self.canvas.bind(\"<B1-Motion>\", self.drag)\n\n self.initialize_grid(False)\n\n def validate_rows(self, entry):\n \"\"\"\n Validates entry in rowsSpinner\n\n :param entry: the value entered by the user\n :return: True, if entry is valid\n \"\"\"\n try:\n value = int(entry)\n valid = value in range(5, 84)\n except ValueError:\n valid = False\n if not valid:\n app.bell()\n # The following line is due to user PRMoureu of stackoverflow. See:\n # https://stackoverflow.com/questions/46861236/widget-validation-in-tkinter/46863849?noredirect=1#comment80675412_46863849\n self.rowsSpinner.after_idle(lambda: self.rowsSpinner.config(validate='focusout'))\n return valid\n\n def invalid_rows(self):\n \"\"\"\n Sets default value to rowsSpinner in case of invalid entry\n \"\"\"\n self.rows_var.set(41)\n\n def validate_cols(self, entry):\n \"\"\"\n Validates entry in colsSpinner\n\n :param entry: the value entered by the user\n :return: True, if entry is valid\n \"\"\"\n try:\n value = int(entry)\n valid = value in range(5, 84)\n except ValueError:\n valid = False\n if not valid:\n app.bell()\n self.colsSpinner.after_idle(lambda: self.colsSpinner.config(validate='focusout'))\n return valid\n\n def invalid_cols(self):\n \"\"\"\n Sets default value to colsSpinner in case of invalid entry\n \"\"\"\n self.cols_var.set(41)\n\n def select_action(self, action):\n if action == \"New grid\":\n self.reset_click()\n elif action == \"Maze\":\n self.maze_click()\n elif action == \"Clear\":\n self.clear_click()\n elif action == \"Real-Time\":\n self.real_time_click()\n elif action == \"Step-by-Step\":\n self.step_by_step_click()\n elif action == \"Animation\":\n self.animation_click()\n\n def select_algo(self, algorithm):\n self.selected_algo = algorithm\n\n def left_click(self, event):\n \"\"\"\n Handles clicks of left mouse button as we add or remove obstacles\n \"\"\"\n row = int(event.y/self.square_size)\n col = int(event.x/self.square_size)\n if row in range(self.rows) and col in range(self.columns):\n if True if self.realTime else (not self.found and not self.searching):\n if self.realTime:\n self.fill_grid()\n self.cur_row = row\n self.cur_col = col\n self.cur_val = self.grid[row][col]\n if self.cur_val == self.EMPTY:\n self.grid[row][col] = self.OBST\n self.paint_cell(row, col, \"BLACK\")\n if self.cur_val == self.OBST:\n self.grid[row][col] = self.EMPTY\n self.paint_cell(row, col, \"WHITE\")\n if self.realTime and self.selected_algo == \"Dijkstra\":\n self.initialize_dijkstra()\n if self.realTime:\n self.real_Time_action()\n\n def drag(self, event):\n \"\"\"\n Handles mouse movements as we \"paint\" obstacles or move the robot and/or target.\n \"\"\"\n row = int(event.y/self.square_size)\n col = int(event.x/self.square_size)\n if row in range(self.rows) and col in range(self.columns):\n if True if self.realTime else (not self.found and not self.searching):\n if self.realTime:\n self.fill_grid()\n if self.Cell(row, col) != self.Cell(self.cur_row, self.cur_col) and\\\n self.cur_val in [self.ROBOT, self.TARGET]:\n new_val = self.grid[row][col]\n if new_val == self.EMPTY:\n self.grid[row][col] = self.cur_val\n if self.cur_val == self.ROBOT:\n self.grid[self.robotStart.row][self.robotStart.col] = self.EMPTY\n self.paint_cell(self.robotStart.row, self.robotStart.col, \"WHITE\")\n self.robotStart.row = row\n self.robotStart.col = col\n self.grid[self.robotStart.row][self.robotStart.col] = self.ROBOT\n self.paint_cell(self.robotStart.row, self.robotStart.col, \"RED\")\n else:\n self.grid[self.targetPos.row][self.targetPos.col] = self.EMPTY\n self.paint_cell(self.targetPos.row, self.targetPos.col, \"WHITE\")\n self.targetPos.row = row\n self.targetPos.col = col\n self.grid[self.targetPos.row][self.targetPos.col] = self.TARGET\n self.paint_cell(self.targetPos.row, self.targetPos.col, \"GREEN\")\n self.cur_row = row\n self.cur_col = col\n self.cur_val = self.grid[row][col]\n elif self.grid[row][col] != self.ROBOT and self.grid[row][col] != self.TARGET:\n self.grid[row][col] = self.OBST\n self.paint_cell(row, col, \"BLACK\")\n if self.realTime and self.selected_algo == \"Dijkstra\":\n self.initialize_dijkstra()\n if self.realTime:\n self.real_Time_action()\n\n def initialize_grid(self, make_maze):\n \"\"\"\n Creates a new clean grid or a new maze\n\n :param make_maze: flag that indicates the creation of a random maze\n \"\"\"\n self.rows = int(self.rowsSpinner.get())\n self.columns = int(self.colsSpinner.get())\n if make_maze and self.rows % 2 == 0:\n self.rows -= 1\n if make_maze and self.columns % 2 == 0:\n self.columns -= 1\n self.square_size = int(500/(self.rows if self.rows > self.columns else self.columns))\n self.arrow_size = int(self.square_size/2)\n self.grid = self.array[:self.rows*self.columns]\n self.grid = self.grid.reshape(self.rows, self.columns)\n self.canvas.configure(width=self.columns*self.square_size+1, height=self.rows*self.square_size+1)\n self.canvas.place(x=10, y=10)\n self.canvas.create_rectangle(0, 0, self.columns*self.square_size+1,\n self.rows*self.square_size+1, width=0, fill=\"DARK GREY\")\n for r in list(range(self.rows)):\n for c in list(range(self.columns)):\n self.grid[r][c] = self.EMPTY\n self.robotStart = self.Cell(self.rows-2, 1)\n self.targetPos = self.Cell(1, self.columns-2)\n self.fill_grid()\n if make_maze:\n maze = self.MyMaze(int(self.rows/2), int(self.columns/2))\n for x in range(maze.gridDimensionX):\n for y in range(maze.gridDimensionY):\n if maze.mazeGrid[x][y] == 'X': # maze.wall_char:\n self.grid[x][y] = self.OBST\n self.repaint()\n\n def fill_grid(self):\n \"\"\"\n Gives initial values for the cells in the grid.\n \"\"\"\n # With the first click on button 'Clear' clears the data\n # of any search was performed (Frontier, Closed Set, Route)\n # and leaves intact the obstacles and the robot and target positions\n # in order to be able to run another algorithm\n # with the same data.\n # With the second click removes any obstacles also.\n if self.searching or self.endOfSearch:\n for r in list(range(self.rows)):\n for c in list(range(self.columns)):\n if self.grid[r][c] in [self.FRONTIER, self.CLOSED, self.ROUTE]:\n self.grid[r][c] = self.EMPTY\n if self.grid[r][c] == self.ROBOT:\n self.robotStart = self.Cell(r, c)\n self.searching = False\n else:\n for r in list(range(self.rows)):\n for c in list(range(self.columns)):\n self.grid[r][c] = self.EMPTY\n self.robotStart = self.Cell(self.rows-2, 1)\n self.targetPos = self.Cell(1, self.columns-2)\n if self.selected_algo in [\"A*\", \"Greedy\"]:\n self.robotStart.g = 0\n self.robotStart.h = 0\n self.robotStart.f = 0\n self.expanded = 0\n self.found = False\n self.searching = False\n self.endOfSearch = False\n\n self.openSet.clear()\n self.closedSet.clear()\n self.openSet = [self.robotStart]\n self.closedSet = []\n\n self.grid[self.targetPos.row][self.targetPos.col] = self.TARGET\n self.grid[self.robotStart.row][self.robotStart.col] = self.ROBOT\n self.message.configure(text=self.MSG_DRAW_AND_SELECT)\n\n self.repaint()\n\n def repaint(self):\n \"\"\"\n Repaints the grid\n \"\"\"\n color = \"\"\n for r in list(range(self.rows)):\n for c in list(range(self.columns)):\n if self.grid[r][c] == self.EMPTY:\n color = \"WHITE\"\n elif self.grid[r][c] == self.ROBOT:\n color = \"RED\"\n elif self.grid[r][c] == self.TARGET:\n color = \"GREEN\"\n elif self.grid[r][c] == self.OBST:\n color = \"BLACK\"\n elif self.grid[r][c] == self.FRONTIER:\n color = \"BLUE\"\n elif self.grid[r][c] == self.CLOSED:\n color = \"CYAN\"\n elif self.grid[r][c] == self.ROUTE:\n color = \"YELLOW\"\n self.paint_cell(r, c, color)\n\n def paint_cell(self, row, col, color):\n \"\"\"\n Paints a particular cell\n\n :param row: # the row of the cell\n :param col: # the column of the cell\n :param color: # the color of the cell\n \"\"\"\n self.canvas.create_rectangle(1 + col * self.square_size, 1 + row * self.square_size,\n 1 + (col + 1) * self.square_size - 1, 1 + (row + 1) * self.square_size - 1,\n width=0, fill=color)\n\n def reset_click(self):\n \"\"\"\n Action performed when user clicks \"New grid\" button\n \"\"\"\n self.animation = False\n self.realTime = False\n for but in self.buttons:\n but.configure(state=\"normal\")\n self.buttons[3].configure(fg=\"BLACK\") # Real-Time button\n self.slider.configure(state=\"normal\")\n for but in self.radio_buttons:\n but.configure(state=\"normal\")\n self.diagonalBtn.configure(state=\"normal\")\n self.drawArrowsBtn.configure(state=\"normal\")\n self.initialize_grid(False)\n\n def maze_click(self):\n \"\"\"\n Action performed when user clicks \"Maze\" button\n \"\"\"\n self.animation = False\n self.realTime = False\n for but in self.buttons:\n but.configure(state=\"normal\")\n self.buttons[3].configure(fg=\"BLACK\") # Real-Time button\n self.slider.configure(state=\"normal\")\n for but in self.radio_buttons:\n but.configure(state=\"normal\")\n self.diagonalBtn.configure(state=\"normal\")\n self.drawArrowsBtn.configure(state=\"normal\")\n self.initialize_grid(True)\n\n def clear_click(self):\n \"\"\"\n Action performed when user clicks \"Clear\" button\n \"\"\"\n self.animation = False\n self.realTime = False\n for but in self.buttons:\n but.configure(state=\"normal\")\n self.buttons[3].configure(fg=\"BLACK\") # Real-Time button\n self.slider.configure(state=\"normal\")\n for but in self.radio_buttons:\n but.configure(state=\"normal\")\n self.diagonalBtn.configure(state=\"normal\")\n self.drawArrowsBtn.configure(state=\"normal\")\n self.fill_grid()\n\n def real_time_click(self):\n \"\"\"\n Action performed when user clicks \"Real-Time\" button\n \"\"\"\n if self.realTime:\n return\n self.realTime = True\n self.searching = True\n # The Dijkstra's initialization should be done just before the\n # start of search, because obstacles must be in place.\n if self.selected_algo == \"Dijkstra\":\n self.initialize_dijkstra()\n self.buttons[3].configure(fg=\"RED\") # Real-Time button\n self.slider.configure(state=\"disabled\")\n for but in self.radio_buttons:\n but.configure(state=\"disabled\")\n self.diagonalBtn.configure(state=\"disabled\")\n self.drawArrowsBtn.configure(state=\"disabled\")\n self.real_Time_action()\n\n def real_Time_action(self):\n \"\"\"\n Action performed during real-time search\n \"\"\"\n while not self.endOfSearch:\n self.check_termination()\n\n def step_by_step_click(self):\n \"\"\"\n Action performed when user clicks \"Step-by-Step\" button\n \"\"\"\n if self.found or self.endOfSearch:\n return\n if not self.searching and self.selected_algo == \"Dijkstra\":\n self.initialize_dijkstra()\n self.animation = False\n self.searching = True\n self.message.configure(text=self.MSG_SELECT_STEP_BY_STEP_ETC)\n self.buttons[3].configure(state=\"disabled\") # Real-Time button\n for but in self.radio_buttons:\n but.configure(state=\"disabled\")\n self.diagonalBtn.configure(state=\"disabled\")\n self.drawArrowsBtn.configure(state=\"disabled\")\n self.check_termination()\n\n def animation_click(self):\n \"\"\"\n Action performed when user clicks \"Animation\" button\n \"\"\"\n self.animation = True\n if not self.searching and self.selected_algo == \"Dijkstra\":\n self.initialize_dijkstra()\n self.searching = True\n self.message.configure(text=self.MSG_SELECT_STEP_BY_STEP_ETC)\n self.buttons[3].configure(state=\"disabled\") # Real-Time button\n for but in self.radio_buttons:\n but.configure(state=\"disabled\")\n self.diagonalBtn.configure(state=\"disabled\")\n self.drawArrowsBtn.configure(state=\"disabled\")\n self.delay = self.slider.get()\n self.animation_action()\n\n def animation_action(self):\n \"\"\"\n The action periodically performed during searching in animation mode\n \"\"\"\n if self.animation:\n self.check_termination()\n if self.endOfSearch:\n return\n self.canvas.after(self.delay, self.animation_action)\n\n def about_click(self):\n \"\"\"\n Action performed when user clicks \"About Maze\" button\n \"\"\"\n about_box = Toplevel(master=app)\n about_box.title(\"\")\n about_box.geometry(\"340x160\")\n about_box.resizable(False, False)\n self.center(about_box)\n about_box.transient(app) # only one window in the task bar\n about_box.grab_set() # modal\n\n title = Label(about_box, text=\"Maze\", width=20, anchor='center', fg='SANDY BROWN', font=(\"Helvetica\", 20))\n title.place(x=0, y=0)\n version = Label(about_box, text=\"Version: 5.1\", width=35, anchor='center', font=(\"Helvetica\", 11, 'bold'))\n version.place(x=0, y=35)\n programmer = Label(about_box, text=\"Designer: Nikos Kanargias\", width=35, anchor='center',\n font=(\"Helvetica\", 12))\n programmer.place(x=0, y=60)\n email = Label(about_box, text=\"E-mail: nkana@tee.gr\", width=40, anchor='center', font=(\"Helvetica\", 10))\n email.place(x=0, y=80)\n source_code = Label(about_box, text=\"Code and documentation\", fg=\"blue\", cursor=\"hand2\", width=35,\n anchor='center',\n font=(\"Helvetica\", 12))\n f = font.Font(source_code, source_code.cget(\"font\"))\n f.configure(underline=True)\n source_code.configure(font=f)\n source_code.place(x=0, y=100)\n source_code.bind(\"<Button-1>\", self.source_code_callback)\n self.CreateToolTip(source_code, \"Click this link to retrieve code and documentation from DropBox\")\n video = Label(about_box, text=\"Watch demo video on YouTube\", fg=\"blue\", cursor=\"hand2\", width=35,\n anchor='center')\n video.configure(font=f)\n video.place(x=0, y=125)\n video.bind(\"<Button-1>\", self.video_callback)\n self.CreateToolTip(video, \"Click this link to watch a demo video on YouTube\")\n\n def check_termination(self):\n \"\"\"\n Checks if search is completed\n \"\"\"\n # Here we decide whether we can continue the search or not.\n # In the case of DFS, BFS, A* and Greedy algorithms\n # here we have the second step:\n # 2. If OPEN SET = [], then terminate. There is no solution.\n if (self.selected_algo == \"Dijkstra\" and not self.graph) or\\\n self.selected_algo != \"Dijkstra\" and not self.openSet:\n self.endOfSearch = True\n self.grid[self.robotStart.row][self.robotStart.col] = self.ROBOT\n self.message.configure(text=self.MSG_NO_SOLUTION)\n self.buttons[4].configure(state=\"disabled\") # Step-by-Step button\n self.buttons[5].configure(state=\"disabled\") # Animation button\n self.slider.configure(state=\"disabled\")\n self.repaint()\n if self.drawArrows.get():\n self.draw_arrows()\n else:\n self.expand_node()\n if self.found:\n self.endOfSearch = True\n self.plot_route()\n self.buttons[4].configure(state=\"disabled\") # Step-by-Step button\n self.buttons[5].configure(state=\"disabled\") # Animation button\n self.slider.configure(state=\"disabled\")\n\n def expand_node(self):\n \"\"\"\n Expands a node and creates his successors\n \"\"\"\n # Dijkstra's algorithm to handle separately\n if self.selected_algo == \"Dijkstra\":\n # 11: while Q is not empty:\n if not self.graph:\n return\n # 12: u := vertex in Q (graph) with smallest distance in dist[] ;\n # 13: remove u from Q (graph);\n u = self.graph.pop(0)\n # Add vertex u in closed set\n self.closedSet.append(u)\n # If target has been found ...\n if u == self.targetPos:\n self.found = True\n return\n # Counts nodes that have expanded.\n self.expanded += 1\n # Update the color of the cell\n self.grid[u.row][u.col] = self.CLOSED\n # paint the cell\n self.paint_cell(u.row, u.col, \"CYAN\")\n # 14: if dist[u] = infinity:\n if u.dist == self.INFINITY:\n # ... then there is no solution.\n # 15: break;\n return\n # 16: end if\n # Create the neighbors of u\n neighbors = self.create_successors(u, False)\n # 18: for each neighbor v of u:\n for v in neighbors:\n # 20: alt := dist[u] + dist_between(u, v) ;\n alt = u.dist + self.dist_between(u, v)\n # 21: if alt < dist[v]:\n if alt < v.dist:\n # 22: dist[v] := alt ;\n v.dist = alt\n # 23: previous[v] := u ;\n v.prev = u\n # Update the color of the cell\n self.grid[v.row][v.col] = self.FRONTIER\n # paint the cell\n self.paint_cell(v.row, v.col, \"BLUE\")\n # 24: decrease-key v in Q;\n # (sort list of nodes with respect to dist)\n self.graph.sort(key=attrgetter(\"dist\"))\n # The handling of the other four algorithms\n else:\n if self.selected_algo in [\"DFS\", \"BFS\"]:\n # Here is the 3rd step of the algorithms DFS and BFS\n # 3. Remove the first state, Si, from OPEN SET ...\n current = self.openSet.pop(0)\n else:\n # Here is the 3rd step of the algorithms A* and Greedy\n # 3. Remove the first state, Si, from OPEN SET,\n # for which f(Si) ≤ f(Sj) for all other\n # open states Sj ...\n # (sort first OPEN SET list with respect to 'f')\n self.openSet.sort(key=attrgetter(\"f\"))\n current = self.openSet.pop(0)\n # ... and add it to CLOSED SET.\n self.closedSet.insert(0, current)\n # Update the color of the cell\n self.grid[current.row][current.col] = self.CLOSED\n # paint the cell\n self.paint_cell(current.row, current.col, \"CYAN\")\n # If the selected node is the target ...\n if current == self.targetPos:\n # ... then terminate etc\n last = self.targetPos\n last.prev = current.prev\n self.closedSet.append(last)\n self.found = True\n return\n # Count nodes that have been expanded.\n self.expanded += 1\n # Here is the 4rd step of the algorithms\n # 4. Create the successors of Si, based on actions\n # that can be implemented on Si.\n # Each successor has a pointer to the Si, as its predecessor.\n # In the case of DFS and BFS algorithms, successors should not\n # belong neither to the OPEN SET nor the CLOSED SET.\n successors = self.create_successors(current, False)\n # Here is the 5th step of the algorithms\n # 5. For each successor of Si, ...\n for cell in successors:\n # ... if we are running DFS ...\n if self.selected_algo == \"DFS\":\n # ... add the successor at the beginning of the list OPEN SET\n self.openSet.insert(0, cell)\n # Update the color of the cell\n self.grid[cell.row][cell.col] = self.FRONTIER\n # paint the cell\n self.paint_cell(cell.row, cell.col, \"BLUE\")\n # ... if we are runnig BFS ...\n elif self.selected_algo == \"BFS\":\n # ... add the successor at the end of the list OPEN SET\n self.openSet.append(cell)\n # Update the color of the cell\n self.grid[cell.row][cell.col] = self.FRONTIER\n # paint the cell\n self.paint_cell(cell.row, cell.col, \"BLUE\")\n # ... if we are running A* or Greedy algorithms (step 5 of A* algorithm) ...\n elif self.selected_algo in [\"A*\", \"Greedy\"]:\n # ... calculate the value f(Sj) ...\n dxg = current.col - cell.col\n dyg = current.row - cell.row\n dxh = self.targetPos.col - cell.col\n dyh = self.targetPos.row - cell.row\n if self.diagonal.get():\n # with diagonal movements, calculate the Euclidean distance\n if self.selected_algo == \"Greedy\":\n # especially for the Greedy ...\n cell.g = 0\n else:\n cell.g = current.g + math.sqrt(dxg*dxg + dyg*dyg)\n cell.h = math.sqrt(dxh*dxh + dyh*dyh)\n else:\n # without diagonal movements, calculate the Manhattan distance\n if self.selected_algo == \"Greedy\":\n # especially for the Greedy ...\n cell.g = 0\n else:\n cell.g = current.g + abs(dxg) + abs(dyg)\n cell.h = abs(dxh) + abs(dyh)\n cell.f = cell.g+cell.h\n # ... If Sj is neither in the OPEN SET nor in the CLOSED SET states ...\n if cell not in self.openSet and cell not in self.closedSet:\n # ... then add Sj in the OPEN SET ...\n # ... evaluated as f(Sj)\n self.openSet.append(cell)\n # Update the color of the cell\n self.grid[cell.row][cell.col] = self.FRONTIER\n # paint the cell\n self.paint_cell(cell.row, cell.col, \"BLUE\")\n # Else ...\n else:\n # ... if already belongs to the OPEN SET, then ...\n if cell in self.openSet:\n open_index = self.openSet.index(cell)\n # ... compare the new value assessment with the old one.\n # If old <= new ...\n if self.openSet[open_index].f <= cell.f:\n # ... then eject the new node with state Sj.\n # (ie do nothing for this node).\n pass\n # Else, ...\n else:\n # ... remove the element (Sj, old) from the list\n # to which it belongs ...\n self.openSet.pop(open_index)\n # ... and add the item (Sj, new) to the OPEN SET.\n self.openSet.append(cell)\n # Update the color of the cell\n self.grid[cell.row][cell.col] = self.FRONTIER\n # paint the cell\n self.paint_cell(cell.row, cell.col, \"BLUE\")\n # ... if already belongs to the CLOSED SET, then ...\n elif cell in self.closedSet:\n closed_index = self.closedSet.index(cell)\n # ... compare the new value assessment with the old one.\n # If old <= new ...\n if self.closedSet[closed_index].f <= cell.f:\n # ... then eject the new node with state Sj.\n # (ie do nothing for this node).\n pass\n # Else, ...\n else:\n # ... remove the element (Sj, old) from the list\n # to which it belongs ...\n self.closedSet.pop(closed_index)\n # ... and add the item (Sj, new) to the OPEN SET.\n self.openSet.append(cell)\n # Update the color of the cell\n self.grid[cell.row][cell.col] = self.FRONTIER\n # paint the cell\n self.paint_cell(cell.row, cell.col, \"BLUE\")\n\n def create_successors(self, current, make_connected):\n \"\"\"\n Creates the successors of a state/cell\n\n :param current: the cell for which we ask successors\n :param make_connected: flag that indicates that we are interested only on the coordinates\n of cells and not on the label 'dist' (concerns only Dijkstra's)\n :return: the successors of the cell as a list\n \"\"\"\n r = current.row\n c = current.col\n # We create an empty list for the successors of the current cell.\n temp = []\n # With diagonal movements priority is:\n # 1: Up 2: Up-right 3: Right 4: Down-right\n # 5: Down 6: Down-left 7: Left 8: Up-left\n\n # Without diagonal movements the priority is:\n # 1: Up 2: Right 3: Down 4: Left\n\n # If not at the topmost limit of the grid\n # and the up-side cell is not an obstacle\n # and (only in the case are not running the A* or Greedy)\n # not already belongs neither to the OPEN SET nor to the CLOSED SET ...\n if r > 0 and self.grid[r-1][c] != self.OBST and\\\n (self.selected_algo in [\"A*\", \"Greedy\", \"Dijkstra\"] or\n (self.selected_algo in [\"DFS\", \"BFS\"]\n and not self.Cell(r-1, c) in self.openSet and not self.Cell(r-1, c) in self.closedSet)):\n cell = self.Cell(r-1, c)\n # In the case of Dijkstra's algorithm we can not append to\n # the list of successors the \"naked\" cell we have just created.\n # The cell must be accompanied by the label 'dist',\n # so we need to track it down through the list 'graph'\n # and then copy it back to the list of successors.\n # The flag makeConnected is necessary to be able\n # the present method create_succesors() to collaborate\n # with the method find_connected_component(), which creates\n # the connected component when Dijkstra's initializes.\n if self.selected_algo == \"Dijkstra\":\n if make_connected:\n temp.append(cell)\n elif cell in self.graph:\n graph_index = self.graph.index(cell)\n temp.append(self.graph[graph_index])\n else:\n # ... update the pointer of the up-side cell so it points the current one ...\n cell.prev = current\n # ... and add the up-side cell to the successors of the current one.\n temp.append(cell)\n\n if self.diagonal.get():\n # If we are not even at the topmost nor at the rightmost border of the grid\n # and the up-right-side cell is not an obstacle\n # and one of the upper side or right side cells are not obstacles\n # (because it is not reasonable to allow the robot to pass through a \"slot\")\n # and (only in the case are not running the A* or Greedy)\n # not already belongs neither to the OPEN SET nor CLOSED SET ...\n if r > 0 and c < self.columns-1 and self.grid[r-1][c+1] != self.OBST and \\\n (self.grid[r-1][c] != self.OBST or self.grid[r][c+1] != self.OBST) and \\\n (self.selected_algo in [\"A*\", \"Greedy\", \"Dijkstra\"] or\n (self.selected_algo in [\"DFS\", \"BFS\"]\n and not self.Cell(r-1, c+1) in self.openSet and not self.Cell(r-1, c+1) in self.closedSet)):\n cell = self.Cell(r-1, c+1)\n if self.selected_algo == \"Dijkstra\":\n if make_connected:\n temp.append(cell)\n elif cell in self.graph:\n graph_index = self.graph.index(cell)\n temp.append(self.graph[graph_index])\n else:\n # ... update the pointer of the up-right-side cell so it points the current one ...\n cell.prev = current\n # ... and add the up-right-side cell to the successors of the current one.\n temp.append(cell)\n\n # If not at the rightmost limit of the grid\n # and the right-side cell is not an obstacle ...\n # and (only in the case are not running the A* or Greedy)\n # not already belongs neither to the OPEN SET nor to the CLOSED SET ...\n if c < self.columns-1 and self.grid[r][c+1] != self.OBST and\\\n (self.selected_algo in [\"A*\", \"Greedy\", \"Dijkstra\"] or\n (self.selected_algo in [\"DFS\", \"BFS\"]\n and not self.Cell(r, c+1) in self.openSet and not self.Cell(r, c+1) in self.closedSet)):\n cell = self.Cell(r, c+1)\n if self.selected_algo == \"Dijkstra\":\n if make_connected:\n temp.append(cell)\n elif cell in self.graph:\n graph_index = self.graph.index(cell)\n temp.append(self.graph[graph_index])\n else:\n # ... update the pointer of the right-side cell so it points the current one ...\n cell.prev = current\n # ... and add the right-side cell to the successors of the current one.\n temp.append(cell)\n\n if self.diagonal.get():\n # If we are not even at the lowermost nor at the rightmost border of the grid\n # and the down-right-side cell is not an obstacle\n # and one of the down-side or right-side cells are not obstacles\n # and (only in the case are not running the A* or Greedy)\n # not already belongs neither to the OPEN SET nor to the CLOSED SET ...\n if r < self.rows-1 and c < self.columns-1 and self.grid[r+1][c+1] != self.OBST and \\\n (self.grid[r+1][c] != self.OBST or self.grid[r][c+1] != self.OBST) and \\\n (self.selected_algo in [\"A*\", \"Greedy\", \"Dijkstra\"] or\n (self.selected_algo in [\"DFS\", \"BFS\"]\n and not self.Cell(r+1, c+1) in self.openSet and not self.Cell(r+1, c+1) in self.closedSet)):\n cell = self.Cell(r+1, c+1)\n if self.selected_algo == \"Dijkstra\":\n if make_connected:\n temp.append(cell)\n elif cell in self.graph:\n graph_index = self.graph.index(cell)\n temp.append(self.graph[graph_index])\n else:\n # ... update the pointer of the downr-right-side cell so it points the current one ...\n cell.prev = current\n # ... and add the down-right-side cell to the successors of the current one.\n temp.append(cell)\n\n # If not at the lowermost limit of the grid\n # and the down-side cell is not an obstacle\n # and (only in the case are not running the A* or Greedy)\n # not already belongs neither to the OPEN SET nor to the CLOSED SET ...\n if r < self.rows-1 and self.grid[r+1][c] != self.OBST and \\\n (self.selected_algo in [\"A*\", \"Greedy\", \"Dijkstra\"] or\n (self.selected_algo in [\"DFS\", \"BFS\"]\n and not self.Cell(r+1, c) in self.openSet and not self.Cell(r+1, c) in self.closedSet)):\n cell = self.Cell(r+1, c)\n if self.selected_algo == \"Dijkstra\":\n if make_connected:\n temp.append(cell)\n elif cell in self.graph:\n graph_index = self.graph.index(cell)\n temp.append(self.graph[graph_index])\n else:\n # ... update the pointer of the down-side cell so it points the current one ...\n cell.prev = current\n # ... and add the down-side cell to the successors of the current one.\n temp.append(cell)\n\n if self.diagonal.get():\n # If we are not even at the lowermost nor at the leftmost border of the grid\n # and the down-left-side cell is not an obstacle\n # and one of the down-side or left-side cells are not obstacles\n # and (only in the case are not running the A* or Greedy)\n # not already belongs neither to the OPEN SET nor to the CLOSED SET ...\n if r < self.rows-1 and c > 0 and self.grid[r+1][c-1] != self.OBST and \\\n (self.grid[r+1][c] != self.OBST or self.grid[r][c-1] != self.OBST) and \\\n (self.selected_algo in [\"A*\", \"Greedy\", \"Dijkstra\"] or\n (self.selected_algo in [\"DFS\", \"BFS\"]\n and not self.Cell(r+1, c-1) in self.openSet and not self.Cell(r+1, c-1) in self.closedSet)):\n cell = self.Cell(r+1, c-1)\n if self.selected_algo == \"Dijkstra\":\n if make_connected:\n temp.append(cell)\n elif cell in self.graph:\n graph_index = self.graph.index(cell)\n temp.append(self.graph[graph_index])\n else:\n # ... update the pointer of the down-left-side cell so it points the current one ...\n cell.prev = current\n # ... and add the down-left-side cell to the successors of the current one.\n temp.append(cell)\n\n # If not at the leftmost limit of the grid\n # and the left-side cell is not an obstacle\n # and (only in the case are not running the A* or Greedy)\n # not already belongs neither to the OPEN SET nor to the CLOSED SET ...\n if c > 0 and self.grid[r][c-1] != self.OBST and \\\n (self.selected_algo in [\"A*\", \"Greedy\", \"Dijkstra\"] or\n (self.selected_algo in [\"DFS\", \"BFS\"]\n and not self.Cell(r, c-1) in self.openSet and not self.Cell(r, c-1) in self.closedSet)):\n cell = self.Cell(r, c-1)\n if self.selected_algo == \"Dijkstra\":\n if make_connected:\n temp.append(cell)\n elif cell in self.graph:\n graph_index = self.graph.index(cell)\n temp.append(self.graph[graph_index])\n else:\n # ... update the pointer of the left-side cell so it points the current one ...\n cell.prev = current\n # ... and add the left-side cell to the successors of the current one.\n temp.append(cell)\n\n if self.diagonal.get():\n # If we are not even at the topmost nor at the leftmost border of the grid\n # and the up-left-side cell is not an obstacle\n # and one of the up-side or left-side cells are not obstacles\n # and (only in the case are not running the A* or Greedy)\n # not already belongs neither to the OPEN SET nor to the CLOSED SET ...\n if r > 0 and c > 0 and self.grid[r-1][c-1] != self.OBST and \\\n (self.grid[r-1][c] != self.OBST or self.grid[r][c-1] != self.OBST) and \\\n (self.selected_algo in [\"A*\", \"Greedy\", \"Dijkstra\"] or\n (self.selected_algo in [\"DFS\", \"BFS\"]\n and not self.Cell(r-1, c-1) in self.openSet and not self.Cell(r-1, c-1) in self.closedSet)):\n cell = self.Cell(r-1, c-1)\n if self.selected_algo == \"Dijkstra\":\n if make_connected:\n temp.append(cell)\n elif cell in self.graph:\n graph_index = self.graph.index(cell)\n temp.append(self.graph[graph_index])\n else:\n # ... update the pointer of the up-left-side cell so it points the current one ...\n cell.prev = current\n # ... and add the up-left-side cell to the successors of the current one.\n temp.append(cell)\n\n # When DFS algorithm is in use, cells are added one by one at the beginning of the\n # OPEN SET list. Because of this, we must reverse the order of successors formed,\n # so the successor corresponding to the highest priority, to be placed the first in the list.\n # For the Greedy, A* and Dijkstra's no issue, because the list is sorted\n # according to 'f' or 'dist' before extracting the first element of.\n if self.selected_algo == \"DFS\":\n return reversed(temp)\n else:\n return temp\n\n def dist_between(self, u, v):\n \"\"\"\n Returns the distance between two cells\n\n :param u: the first cell\n :param v: the other cell\n :return: the distance between the cells u and v\n \"\"\"\n dx = u.col - v.col\n dy = u.row - v.row\n if self.diagonal.get():\n # with diagonal movements calculate the Euclidean distance\n return math.sqrt(dx*dx + dy*dy)\n else:\n # without diagonal movements calculate the Manhattan distance\n return abs(dx) + abs(dy)\n\n def plot_route(self):\n \"\"\"\n Calculates the path from the target to the initial position of the robot,\n counts the corresponding steps and measures the distance traveled.\n \"\"\"\n self.repaint()\n self.searching = False\n steps = 0\n distance = 0.0\n index = self.closedSet.index(self.targetPos)\n cur = self.closedSet[index]\n self.grid[cur.row][cur.col] = self.TARGET\n self.paint_cell(cur.row, cur.col, \"GREEN\")\n while cur != self.robotStart:\n steps += 1\n if self.diagonal.get():\n dx = cur.col - cur.prev.col\n dy = cur.row - cur.prev.row\n distance += math.sqrt(dx*dx + dy*dy)\n else:\n distance += 1\n cur = cur.prev\n self.grid[cur.row][cur.col] = self.ROUTE\n self.paint_cell(cur.row, cur.col, \"YELLOW\")\n\n self.grid[self.robotStart.row][self.robotStart.col] = self.ROBOT\n self.paint_cell(self.robotStart.row, self.robotStart.col, \"RED\")\n\n if self.drawArrows.get():\n self.draw_arrows()\n\n msg = \"Nodes expanded: {0}, Steps: {1}, Distance: {2:.3f}\".format(self.expanded, steps, distance)\n self.message.configure(text=msg)\n\n def find_connected_component(self, v):\n \"\"\"\n Appends to the list containing the nodes of the graph only\n the cells belonging to the same connected component with node v.\n\n :param v: the starting node\n \"\"\"\n # This is a Breadth First Search of the graph starting from node v.\n stack = [v]\n self.graph.append(v)\n while stack:\n v = stack.pop()\n successors = self.create_successors(v, True)\n for c in successors:\n if c not in self.graph:\n stack.append(c)\n self.graph.append(c)\n\n def initialize_dijkstra(self):\n \"\"\"\n Initialization of Dijkstra's algorithm\n \"\"\"\n # When one thinks of Wikipedia pseudocode, observe that the\n # algorithm is still looking for his target while there are still\n # nodes in the queue Q.\n # Only when we run out of queue and the target has not been found,\n # can answer that there is no solution.\n # As is known, the algorithm models the problem as a connected graph\n # It is obvious that no solution exists only when the graph is not\n # connected and the target is in a different connected component\n # of this initial position of the robot\n # To be thus possible negative response from the algorithm,\n # should search be made ONLY in the coherent component to which the\n # initial position of the robot belongs.\n\n # First create the connected component\n # to which the initial position of the robot belongs.\n self.graph.clear()\n self.find_connected_component(self.robotStart)\n # Here is the initialization of Dijkstra's algorithm\n # 2: for each vertex v in Graph;\n for v in self.graph:\n # 3: dist[v] := infinity ;\n v.dist = self.INFINITY\n # 5: previous[v] := undefined ;\n v.prev = None\n # 8: dist[source] := 0;\n self.graph[self.graph.index(self.robotStart)].dist = 0\n # 9: Q := the set of all nodes in Graph;\n # Instead of the variable Q we will use the list\n # 'graph' itself, which has already been initialised.\n\n # Sorts the list of nodes with respect to 'dist'.\n self.graph.sort(key=attrgetter(\"dist\"))\n # Initializes the list of closed nodes\n self.closedSet.clear()\n\n def draw_arrows(self):\n \"\"\"\n Draws the arrows to predecessors\n \"\"\"\n # We draw black arrows from each open or closed state to its predecessor.\n for r in range(self.rows):\n for c in range(self.columns):\n tail = head = cell = self.Cell(r, c)\n # If the current cell is an open state, or is a closed state\n # but not the initial position of the robot\n if self.grid[r][c] in [self.FRONTIER, self.CLOSED] and not cell == self.robotStart:\n # The tail of the arrow is the current cell, while\n # the arrowhead is the predecessor cell.\n if self.grid[r][c] == self.FRONTIER:\n if self.selected_algo == \"Dijkstra\":\n tail = self.graph[self.graph.index(cell)]\n head = tail.prev\n else:\n tail = self.openSet[self.openSet.index(cell)]\n head = tail.prev\n elif self.grid[r][c] == self.CLOSED:\n tail = self.closedSet[self.closedSet.index(cell)]\n head = tail.prev\n\n self.draw_arrow(tail, head, self.arrow_size, \"BLACK\", 2 if self.square_size >= 25 else 1)\n\n if self.found:\n # We draw red arrows along the path from robotStart to targetPos.\n # index = self.closedSet.index(self.targetPos)\n cur = self.closedSet[self.closedSet.index(self.targetPos)]\n while cur != self.robotStart:\n head = cur\n cur = cur.prev\n tail = cur\n self.draw_arrow(tail, head, self.arrow_size, \"RED\", 2 if self.square_size >= 25 else 1)\n\n def draw_arrow(self, tail, head, a, color, width):\n \"\"\"\n Draws an arrow from center of tail cell to center of head cell\n\n :param tail: the tail of the arrow\n :param head: the head of the arrow\n :param a: size of arrow tips\n :param color: color of the arrow\n :param width: thickness of the lines\n \"\"\"\n # The coordinates of the center of the tail cell\n x1 = 1 + tail.col * self.square_size + self.square_size / 2\n y1 = 1 + tail.row * self.square_size + self.square_size / 2\n # The coordinates of the center of the head cell\n x2 = 1 + head.col * self.square_size + self.square_size / 2\n y2 = 1 + head.row * self.square_size + self.square_size / 2\n\n sin20 = math.sin(20*math.pi/180)\n cos20 = math.cos(20*math.pi/180)\n sin25 = math.sin(25*math.pi/180)\n cos25 = math.cos(25*math.pi/180)\n u3 = v3 = u4 = v4 = 0\n\n if x1 == x2 and y1 > y2: # up\n u3 = x2 - a*sin20\n v3 = y2 + a*cos20\n u4 = x2 + a*sin20\n v4 = v3\n elif x1 < x2 and y1 > y2: # up-right\n u3 = x2 - a*cos25\n v3 = y2 + a*sin25\n u4 = x2 - a*sin25\n v4 = y2 + a*cos25\n elif x1 < x2 and y1 == y2: # right\n u3 = x2 - a*cos20\n v3 = y2 - a*sin20\n u4 = u3\n v4 = y2 + a*sin20\n elif x1 < x2 and y1 < y2: # righr-down\n u3 = x2 - a*cos25\n v3 = y2 - a*sin25\n u4 = x2 - a*sin25\n v4 = y2 - a*cos25\n elif x1 == x2 and y1 < y2: # down\n u3 = x2 - a*sin20\n v3 = y2 - a*cos20\n u4 = x2 + a*sin20\n v4 = v3\n elif x1 > x2 and y1 < y2: # left-down\n u3 = x2 + a*sin25\n v3 = y2 - a*cos25\n u4 = x2 + a*cos25\n v4 = y2 - a*sin25\n elif x1 > x2 and y1 == y2: # left\n u3 = x2 + a*cos20\n v3 = y2 - a*sin20\n u4 = u3\n v4 = y2 + a*sin20\n elif x1 > x2 and y1 > y2: # left-up\n u3 = x2 + a*sin25\n v3 = y2 + a*cos25\n u4 = x2 + a*cos25\n v4 = y2 + a*sin25\n\n self.canvas.create_line(x1, y1, x2, y2, fill=color, width=width)\n self.canvas.create_line(x2, y2, u3, v3, fill=color, width=width)\n self.canvas.create_line(x2, y2, u4, v4, fill=color, width=width)\n\n @staticmethod\n def center(window):\n \"\"\"\n Places a window at the center of the screen\n \"\"\"\n window.update_idletasks()\n w = window.winfo_screenwidth()\n h = window.winfo_screenheight()\n size = tuple(int(_) for _ in window.geometry().split('+')[0].split('x'))\n x = w / 2 - size[0] / 2\n y = h / 2 - size[1] / 2\n window.geometry(\"%dx%d+%d+%d\" % (size + (x, y)))\n\n @staticmethod\n def source_code_callback(self):\n webbrowser.open_new(r\"https://goo.gl/tRaLfe\")\n\n @staticmethod\n def video_callback(self):\n webbrowser.open_new(r\"https://youtu.be/7GLqy61X2oU\")\n\n\ndef on_closing():\n if messagebox.askokcancel(\"Quit\", \"Do you want to quit?\"):\n os._exit(0)\n\n\nif __name__ == '__main__':\n app = Tk()\n app.protocol(\"WM_DELETE_WINDOW\", on_closing)\n app.title(\"Maze 5.1\")\n app.geometry(\"693x545\")\n app.resizable(False, False)\n Maze51(app)\n app.mainloop()\n"
] |
[
[
"numpy.array"
]
] |
MLforHealth/ClinicalDG
|
[
"2de4a8e155231f07d80036504a6f49b50004654e"
] |
[
"clinicaldg/eicu/data.py"
] |
[
"import pandas as pd\npd.options.mode.chained_assignment = None\nimport numpy as np\nfrom clinicaldg.eicu.data_extraction.data_extraction_mortality import data_extraction_mortality\nimport clinicaldg.eicu.Constants as Constants\nfrom sklearn.preprocessing import StandardScaler, LabelEncoder\nfrom torch.utils.data import ConcatDataset, Dataset\n\nhospitals = pd.read_csv((Constants.eicu_dir/'hospital.csv'))\nhospitals['region'] = hospitals['region'].fillna('Missing')\npatients = pd.read_csv((Constants.eicu_dir/'patient.csv'))[['patientunitstayid', 'hospitalid', 'gender']]\n\nclass LabelEncoderExt(object):\n '''\n Label encoder, but when encountering an unseen label on the test set, will set to \"Missing\"\n ''' \n def __init__(self):\n self.label_encoder = LabelEncoder()\n\n def fit(self, data_list):\n self.label_encoder = self.label_encoder.fit(list(map(str, list(data_list))) + ['Missing'])\n self.classes_ = self.label_encoder.classes_\n \n return self\n\n def transform(self, data_list):\n data_list = list(map(str, list(data_list)))\n for unique_item in np.unique(data_list):\n if unique_item not in self.label_encoder.classes_:\n data_list = ['Missing' if x==unique_item else x for x in data_list]\n\n return self.label_encoder.transform(data_list)\n\nclass AugmentedDataset():\n def __init__(self, augs = [], train_pct = 0.7, val_pct = 0.1): \n self.reg_mort, self.reg_pat, self.scalers, self.labelencoders = self._get_mortality_data(train_pct, val_pct) \n for a in augs:\n a.augment(self.reg_mort, self.reg_pat) \n \n def get_torch_dataset(self, envs, dset):\n '''\n envs: a list of region names\n dset: one of ['train', 'val', 'test']. For the test environment, use \"test\" for dset\n '''\n \n datasets = []\n for r in envs:\n datasets.append(eICUDataset(self.reg_mort[r][self.reg_mort[r]['fold'] == dset], self.reg_pat[r][self.reg_pat[r]['fold'] == dset]))\n \n return ConcatDataset(datasets) \n \n def get_num_levels(self): \n return ({i: len(self.labelencoders[i].classes_) for i in Constants.ts_cat_features}, \n {i: len(self.labelencoders[i].classes_) for i in Constants.static_cat_features}, \n ) \n \n def _get_mortality_data(self, train_pct, val_pct):\n mort_df = data_extraction_mortality(str(Constants.benchmark_dir))\n\n targets = mort_df.groupby('patientunitstayid').agg({'hospitaldischargestatus': 'first'}).reset_index()\n pat_df = pd.merge(patients, hospitals, on = 'hospitalid', how = 'left')\n pat_df = pd.merge(pat_df, targets, on = 'patientunitstayid', how = 'inner').rename(columns = {'hospitaldischargestatus': 'target'})\n \n pat_df = pat_df[pat_df.patientunitstayid.isin(mort_df.patientunitstayid)].sample(frac = 1) # shuffle \n pat_df['fold'] = ''\n pat_df['fold'].iloc[:int(len(pat_df)*train_pct)] = 'train'\n pat_df['fold'].iloc[int(len(pat_df)*train_pct):int(len(pat_df)*(train_pct + val_pct))] = 'val'\n pat_df['fold'].iloc[int(len(pat_df)*(train_pct + val_pct)):] = 'test'\n\n mort_df = mort_df.merge(pat_df[['patientunitstayid', 'fold']], on = 'patientunitstayid')\n \n # make sure everyone has exactly 48h hours of data\n ## make multiindex with 48h\n ## groupby and ffill\n ## fill any remaining missing features with normal_values\n iterables = [np.unique(mort_df['patientunitstayid']), list(range(1, mort_df.itemoffset.max()+1))]\n multiind = pd.MultiIndex.from_product(iterables, names = ['patientunitstayid', 'itemoffset'])\n ind_df = pd.DataFrame(index = multiind)\n mort_df = pd.merge(ind_df, mort_df, left_index = True, right_on = ['patientunitstayid', 'itemoffset'], how = 'left')\n \n mort_df = mort_df.set_index(['patientunitstayid', 'itemoffset']).sort_index().groupby('patientunitstayid').ffill()\n \n for back_col in ['hospitaldischargestatus', 'fold'] + Constants.static_cont_features + Constants.static_cat_features:\n mort_df[back_col] = mort_df[back_col].fillna(method = 'backfill') \n\n for feat, val in Constants.normal_values.items():\n mort_df[feat] = mort_df[feat].fillna(val) \n \n # scale continuous and static ts features\n scalers = {} \n for feat in Constants.ts_cont_features + Constants.static_cont_features:\n scalers[feat] = StandardScaler().fit(mort_df.loc[mort_df.fold == 'train', feat].values.reshape(-1, 1))\n mort_df[feat] = scalers[feat].transform(mort_df[feat].values.reshape(-1, 1))[:, 0]\n \n # encode continuous and static cat features \n labelencoders, num_encodings = {}, {}\n for feat in Constants.ts_cat_features + Constants.static_cat_features:\n mort_df[feat] = mort_df[feat].fillna('Missing')\n labelencoders[feat] = LabelEncoderExt().fit(mort_df.loc[mort_df.fold == 'train', feat])\n mort_df[feat] = labelencoders[feat].transform(mort_df[feat])\n num_encodings[feat] = len(labelencoders[feat].classes_)\n \n reg_mort, reg_pat = {}, {}\n for reg in pat_df.region.unique():\n sub_pat = pat_df[pat_df.region == reg]\n sub = mort_df[mort_df.index.get_level_values(0).isin(sub_pat.patientunitstayid)]\n\n reg_mort[reg] = sub\n reg_pat[reg] = sub_pat.set_index('patientunitstayid')\n\n return reg_mort, reg_pat, scalers, labelencoders\n \nclass eICUDataset(Dataset):\n def __init__(self, mort_df, pat_df):\n self.mort_df = mort_df\n self.pat_df = pat_df\n \n def __len__(self):\n return self.pat_df.shape[0]\n \n def __getitem__(self, idx):\n pat_id = self.pat_df.index[idx]\n mort_data = self.mort_df.loc[pat_id]\n ts_cont_feats = mort_data[Constants.ts_cont_features].values\n ts_cat_feats = mort_data[Constants.ts_cat_features].values\n \n static_not_in_mort = [i for i in Constants.static_cont_features if i not in self.mort_df]\n static_in_mort = [i for i in Constants.static_cont_features if i in self.mort_df]\n \n static_cont_feats = np.concatenate((mort_data[static_in_mort].iloc[0].values, self.pat_df.loc[pat_id, static_not_in_mort].values)).astype(float)\n static_cat_feats = mort_data[Constants.static_cat_features].iloc[0].values \n \n return ({'pat_id': pat_id,\n 'ts_cont_feats': ts_cont_feats,\n 'ts_cat_feats': ts_cat_feats,\n 'static_cont_feats': static_cont_feats,\n 'static_cat_feats': static_cat_feats,\n 'gender': int(self.pat_df.loc[pat_id, 'gender'].strip() == 'Male')}, \n self.pat_df.loc[pat_id, 'target'])"
] |
[
[
"torch.utils.data.ConcatDataset",
"numpy.concatenate",
"sklearn.preprocessing.LabelEncoder",
"pandas.merge",
"sklearn.preprocessing.StandardScaler",
"pandas.DataFrame",
"pandas.MultiIndex.from_product",
"pandas.read_csv",
"numpy.unique"
]
] |
dblenkus/performance
|
[
"bae6105812c2f2414d0c10ddd465bf589503f61a"
] |
[
"src/lactolyse/analyses/lactate_threshold.py"
] |
[
"\"\"\"Lactate threshold analysis.\"\"\"\nimport logging\n\nimport numpy as np\n\nfrom .base import BaseAnalysis\nfrom .utils import FittedPolynomial\n\nlogger = logging.getLogger(__name__)\n\n\nclass LactateThresholdAnalyses(BaseAnalysis):\n \"\"\"Lactate threshold analysis.\"\"\"\n\n name = 'lactate_threshold'\n template = 'lactate_treshold.tex'\n\n def _calculate_dmax_context(self, inputs, lac_poly, hr_poly):\n \"\"\"Calculate context for d-max method.\"\"\"\n # If polynomial has a local minimum on the interval (of\n # measurments), take it as a minimum value.\n if lac_poly.deriv_roots.size:\n # If there are two roots, we know (based on the shape of the\n # polynomial) that first one is maximum and second one is\n # minimum.\n min_x = max(lac_poly.deriv_roots)\n\n # If the minimum is not on the interval (or it doesn't exist),\n # we check for the inflection point and take it if it exists.\n elif lac_poly.deriv_roots.size:\n # Second derivation of third degree polynomial have exactly\n # one root (the question is only if it is on the interval).\n min_x = lac_poly.second_deriv_roots[0]\n\n # If both conditions are false, we can just take the start of\n # the interval, as we know that it is the \"most flat\" part of\n # the polynomial on the interval.\n else:\n min_x = lac_poly.min_x\n\n max_x = lac_poly.max_x\n\n # Find the point where polynomial starts to raise - threshold is\n # 0.3 - and take only real roots (hopefully there is only one).\n roots = np.roots(lac_poly.poly - (lac_poly.poly(min_x) + 0.3))\n roots = roots[np.logical_and(np.isreal(roots), roots > min_x, roots < max_x)]\n start_x = max(roots).real\n\n # Calculate the vector cross product.\n v_x = np.poly1d(max_x - start_x)\n v_y = np.poly1d(lac_poly.poly(max_x) - lac_poly.poly(start_x))\n u_x = np.poly1d([1, -start_x])\n u_y = lac_poly.poly - lac_poly.poly(start_x)\n cross_z = v_x * u_y - v_y * u_x\n\n ftp = np.roots(cross_z.deriv())\n ftp = ftp[np.logical_and(ftp > start_x, ftp < max_x)]\n ftp = ftp[0]\n\n return {\n 'power': ftp,\n 'start_point': [start_x, lac_poly.poly(start_x)],\n 'end_point': [max_x, lac_poly.poly(max_x)],\n 'start_hr': hr_poly.poly(start_x),\n 'heart_rate': hr_poly.poly(ftp),\n 'lactate': lac_poly.poly(ftp),\n }\n\n def _calculate_cross_context(self, inputs, lac_poly, hr_poly):\n \"\"\"Calculate context for cross method.\"\"\"\n if lac_poly.deriv_roots.size:\n start_point = min(lac_poly.deriv_roots)\n\n else:\n start_point = inputs['power'][0]\n\n max_x = lac_poly.max_x\n\n start_line = np.poly1d(\n np.polyfit(\n [start_point, start_point + 5],\n [lac_poly.poly(start_point), lac_poly.poly(start_point + 5)],\n 1,\n )\n )\n end_line = np.poly1d(\n np.polyfit(\n [max_x - 5, max_x], [lac_poly.poly(max_x - 5), lac_poly.poly(max_x)], 1\n )\n )\n\n cross = np.roots(start_line - end_line)\n power = cross[0]\n\n return {\n 'power': power,\n 'start_point': [start_point, lac_poly.poly(start_point)],\n 'end_point': [inputs['power'][-1], lac_poly.poly(inputs['power'][-1])],\n 'cross': [power, start_line(power)],\n 'heart_rate': hr_poly.poly(power),\n 'lactate': lac_poly.poly(power),\n }\n\n def _calculate_at_context(self, inputs, threshold, lac_poly, hr_poly):\n \"\"\"Calculate context for at method.\"\"\"\n roots = np.roots(lac_poly.poly - threshold)\n roots = roots[np.isreal(roots)]\n roots = filter(\n lambda val: inputs['power'][0] < val < inputs['power'][-1], roots\n )\n\n power = list(roots)[0].real\n\n return {\n 'power': power,\n 'heart_rate': hr_poly.poly(power),\n 'lactate': lac_poly.poly(power),\n }\n\n def render_context(self, inputs):\n \"\"\"Render the context.\"\"\"\n for attr in ['power', 'heart_rate', 'lactate']:\n if attr not in inputs:\n raise ValueError(\"Missing input '{}'.\".format(attr))\n\n lac_poly = FittedPolynomial(inputs['power'], inputs['lactate'])\n hr_poly = FittedPolynomial(inputs['power'], inputs['heart_rate'])\n\n return {\n 'inputs': inputs,\n 'lac_poly': lac_poly,\n 'dmax': self._calculate_dmax_context(inputs, lac_poly, hr_poly),\n 'cross': self._calculate_cross_context(inputs, lac_poly, hr_poly),\n 'at2': self._calculate_at_context(inputs, 2, lac_poly, hr_poly),\n 'at4': self._calculate_at_context(inputs, 4, lac_poly, hr_poly),\n }\n\n def get_results(self, context):\n \"\"\"Return the result of the analysis.\"\"\"\n return {\n 'dmax': context['dmax']['power'],\n 'cross': context['cross']['power'],\n 'at2': context['at2']['power'],\n 'at4': context['at4']['power'],\n }\n"
] |
[
[
"numpy.roots",
"numpy.isreal",
"numpy.logical_and",
"numpy.poly1d"
]
] |
nabeeltariq2/res-repo
|
[
"faa4277b537e1075fa38d79c1a9fa31b0fd8c3af"
] |
[
"recommender6_slopeone.py"
] |
[
"# from __future__ import absolute_import, division, print_function, unicode_literals\n\n# from surprise import evaluate, print_perf, dump, Reader, Dataset\n#import algorithms from surprise\nfrom surprise import evaluate, print_perf, Reader, Dataset, accuracy\n\n# from surprise import KNNBasic, KNNWithMeans, KNNWithZScore, AlgoBase, SlopeOne, CoClustering, NormalPredictor,NMF, SVD, BaselineOnly\n\nimport time\nstart_time = time.time()\n\nfrom surprise import SlopeOne\n\nimport numpy as np\nimport pandas as pd\nfrom sqlalchemy import create_engine\nnp.random.seed(101)\nfrom collections import defaultdict\nimport os, io, sys\nfrom sqlalchemy.orm import sessionmaker, scoped_session\nfrom sqlalchemy import ForeignKey\nfrom sqlalchemy.orm import relationship, backref\nimport config\n# from surprise import dump\n# from model import add_pageview\n\n\n# algorithm = NMF(n_epochs=10)\n\n\n\n\ndef compute_recommendations(user_id, prediction_table, numeric_prediction_table):\n\n\n algo = 'SlopeOne'\n\n algorithm = SlopeOne()\n\n\n\n # add_pageview(user_id=user_id, item_id=None, page=\"Model Predictions\", activity_type=\"Initialize Predictions - \" + algo, rating=None) #pageview\n\n\n\n engine = create_engine(config.DB_URI, echo=True)\n session = scoped_session(sessionmaker(bind=engine,\n autocommit = False,\n autoflush = False))\n\n\n\n #reading in the database\n\n\n df_ratings = pd.read_sql('SELECT * FROM ratings;', con = engine)\n df_ratings=df_ratings[['user_id','item_id','rating']]\n df_ratings = df_ratings.dropna()\n df_ratings = df_ratings.drop_duplicates()\n\n\n df_ratings2 = pd.read_csv('data/ratings.csv', low_memory=False)\n df_ratings2 = df_ratings2.rename(columns = {'movie_id': 'item_id'})\n df_ratings2 = df_ratings2[['user_id','item_id','rating']]\n df_ratings2 = df_ratings2.dropna()\n df_ratings2 = df_ratings2.drop_duplicates()\n\n df_ratings = pd.concat([df_ratings, df_ratings2], axis=0)\n\n\n\n\n reader = Reader(line_format='user item rating', sep=',', rating_scale=(1, 10))\n data = Dataset.load_from_df(df_ratings, reader=reader)\n\n trainset = data.build_full_trainset()\n\n\n# algorithm = eval(algo + \"()\")# set the algorithm...............................................\n\n\n algorithm.train(trainset)\n\n items = pd.read_sql('SELECT distinct id FROM items;', con = engine)\n df_user_items = df_ratings.loc[df_ratings['user_id'] == user_id]\n total_items = items.id.unique()\n user_items = df_user_items.item_id.unique()\n # user_id = str(user_id)\n prediction_items = [x for x in total_items if x not in user_items]\n\n predictions = pd.DataFrame(columns=['user_id', 'item_id', 'prediction'])\n\n\n predicted_ratings = []\n\n for i in prediction_items:\n a = user_id\n b = i\n est = algorithm.predict(a, b)\n predicted_ratings.append(est[3])\n\n predictions['item_id'] = prediction_items\n predictions['user_id'] = pd.Series([user_id for x in range(len(predictions.index))], index=predictions.index)\n\n\n predictions['prediction'] = predicted_ratings\n\n\n predictions = predictions.sort_values('prediction', ascending=False)\n test_prediction = predictions\n predictions = predictions.head(n=10)\n\n\n cols =['pred_1', 'pred_2','pred_3','pred_4',\n 'pred_5','pred_6','pred_7','pred_8',\n 'pred_9','pred_10']\n\n\n\n\n df_pred = predictions[['item_id']].T\n\n df_pred.columns = cols\n\n df_pred['id'] = user_id\n\n\n\n df_pred = df_pred[['id','pred_1', 'pred_2','pred_3','pred_4',\n 'pred_5','pred_6','pred_7','pred_8',\n 'pred_9','pred_10']]\n\n df_pred['id'] = df_pred['id'].astype(int)\n\n\n\n df_pred.to_sql(prediction_table, engine,if_exists='append', index=False)#if_exists='append'\n session.commit()\n\n\n df_num_ratings = test_prediction\n\n df_num_ratings = df_num_ratings.head(n=20)\n\n df_num_ratings['algorithm'] = algo\n df_num_ratings.rename(columns={'prediction':'predicted_rating'}, inplace=True)\n\n\n df_num_ratings.to_sql('numeric_predictions',engine,if_exists='append', index=False)#if_exists='append'\n session.commit()\n\n\n predcols =['num_1', 'num_2','num_3','num_4',\n 'num_5','num_6','num_7','num_8',\n 'num_9','num_10']\n\n df_num_ratings_transpose = predictions[['prediction']].T\n df_num_ratings_transpose.columns = predcols\n\n\n\n\n df_num_ratings_transpose['id'] = user_id\n\n df_num_ratings_transpose = df_num_ratings_transpose[['id','num_1', 'num_2','num_3','num_4',\n 'num_5','num_6','num_7','num_8',\n 'num_9','num_10']]\n\n df_num_ratings_transpose['id'] = df_num_ratings_transpose['id'].astype(int)\n\n\n\n\n\n\n\n\n df_num_ratings_transpose.to_sql(numeric_prediction_table,engine,if_exists='append', index=False)#if_exists='append'\n session.commit()\n\n\n\n\n # add_pageview(user_id=user_id, item_id=None, page=\"Model Predictions\", activity_type=\"Finish Computing Predictions - \" + algo, rating=None) #pageview\n"
] |
[
[
"numpy.random.seed",
"pandas.DataFrame",
"pandas.concat",
"pandas.read_sql",
"pandas.read_csv"
]
] |
mehdikuchi/mne-python
|
[
"864426c4839bab05fd0d142ee20938c336c0b78e",
"b8f5e5ce0da8acfeb7298c8eb1d26a75d5526eac"
] |
[
"tutorials/misc/plot_ecog.py",
"mne/viz/misc.py"
] |
[
"\"\"\"\n.. _tut_working_with_ecog:\n\n======================\nWorking with ECoG data\n======================\n\nMNE supports working with more than just MEG and EEG data. Here we show some\nof the functions that can be used to facilitate working with\nelectrocorticography (ECoG) data.\n\"\"\"\n# Authors: Eric Larson <larson.eric.d@gmail.com>\n# Chris Holdgraf <choldgraf@gmail.com>\n# Adam Li <adam2392@gmail.com>\n#\n# License: BSD (3-clause)\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\nimport mne\nfrom mne.viz import plot_alignment, snapshot_brain_montage\n\nprint(__doc__)\n\n# paths to mne datasets - sample ECoG and FreeSurfer subject\nmisc_path = mne.datasets.misc.data_path()\nsample_path = mne.datasets.sample.data_path()\nsubject = 'sample'\nsubjects_dir = sample_path + '/subjects'\n\n###############################################################################\n# Let's load some ECoG electrode locations and names, and turn them into\n# a :class:`mne.channels.DigMontage` class.\n# First, use pandas to read in the .tsv file.\n\n# In this tutorial, the electrode coordinates are assumed to be in meters\nelec_df = pd.read_csv(misc_path + '/ecog/sample_ecog_electrodes.tsv',\n sep='\\t', header=0, index_col=None)\nch_names = elec_df['name'].tolist()\nch_coords = elec_df[['x', 'y', 'z']].to_numpy(dtype=float)\nch_pos = dict(zip(ch_names, ch_coords))\n# Ideally the nasion/LPA/RPA will also be present from the digitization, here\n# we use fiducials estimated from the subject's FreeSurfer MNI transformation:\nlpa, nasion, rpa = mne.coreg.get_mni_fiducials(\n subject, subjects_dir=subjects_dir)\nlpa, nasion, rpa = lpa['r'], nasion['r'], rpa['r']\n\n###############################################################################\n# Now we make a :class:`mne.channels.DigMontage` stating that the ECoG\n# contacts are in the FreeSurfer surface RAS (i.e., MRI) coordinate system.\n\nmontage = mne.channels.make_dig_montage(\n ch_pos, coord_frame='mri', nasion=nasion, lpa=lpa, rpa=rpa)\nprint('Created %s channel positions' % len(ch_names))\n\n###############################################################################\n# Now we get the :term:`trans` that transforms from our MRI coordinate system\n# to the head coordinate frame. This transform will be applied to the\n# data when applying the montage so that standard plotting functions like\n# :func:`mne.viz.plot_evoked_topomap` will be aligned properly.\n\ntrans = mne.channels.compute_native_head_t(montage)\nprint(trans)\n\n###############################################################################\n# Now that we have our montage, we can load in our corresponding\n# time-series data and set the montage to the raw data.\n\n# first we'll load in the sample dataset\nraw = mne.io.read_raw_edf(misc_path + '/ecog/sample_ecog.edf')\n\n# drop bad channels\nraw.info['bads'].extend([ch for ch in raw.ch_names if ch not in ch_names])\nraw.load_data()\nraw.drop_channels(raw.info['bads'])\nraw.crop(0, 2) # just process 2 sec of data for speed\n\n# attach montage\nraw.set_montage(montage)\n\n# set channel types to ECoG (instead of EEG)\nraw.set_channel_types({ch_name: 'ecog' for ch_name in raw.ch_names})\n\n###############################################################################\n# We can then plot the locations of our electrodes on our subject's brain.\n# We'll use :func:`~mne.viz.snapshot_brain_montage` to save the plot as image\n# data (along with xy positions of each electrode in the image), so that later\n# we can plot frequency band power on top of it.\n#\n# .. note:: These are not real electrodes for this subject, so they\n# do not align to the cortical surface perfectly.\n\nfig = plot_alignment(raw.info, subject=subject, subjects_dir=subjects_dir,\n surfaces=['pial'], trans=trans, coord_frame='mri')\nmne.viz.set_3d_view(fig, 200, 70, focalpoint=[0, -0.005, 0.03])\n\nxy, im = snapshot_brain_montage(fig, montage)\n\n###############################################################################\n# Next, we'll compute the signal power in the gamma (30-90 Hz) and alpha\n# (8-12 Hz) bands.\ngamma_power_t = raw.copy().filter(30, 90).apply_hilbert(\n envelope=True).get_data()\nalpha_power_t = raw.copy().filter(8, 12).apply_hilbert(\n envelope=True).get_data()\ngamma_power = gamma_power_t.mean(axis=-1)\nalpha_power = alpha_power_t.mean(axis=-1)\n\n###############################################################################\n# Now let's use matplotlib to overplot frequency band power onto the electrodes\n# which can be plotted on top of the brain from\n# :func:`~mne.viz.snapshot_brain_montage`.\n\n# Convert from a dictionary to array to plot\nxy_pts = np.vstack([xy[ch] for ch in raw.info['ch_names']])\n\n# colormap to view spectral power\ncmap = 'viridis'\n\n# Create a 1x2 figure showing the average power in gamma and alpha bands.\nfig, axs = plt.subplots(1, 2, figsize=(20, 10))\n# choose a colormap range wide enough for both frequency bands\n_gamma_alpha_power = np.concatenate((gamma_power, alpha_power)).flatten()\nvmin, vmax = np.percentile(_gamma_alpha_power, [10, 90])\nfor ax, band_power, band in zip(axs,\n [gamma_power, alpha_power],\n ['Gamma', 'Alpha']):\n ax.imshow(im)\n ax.set_axis_off()\n sc = ax.scatter(*xy_pts.T, c=band_power, s=200,\n cmap=cmap, vmin=vmin, vmax=vmax)\n ax.set_title(f'{band} band power', size='x-large')\nfig.colorbar(sc, ax=axs)\n\n###############################################################################\n# Say we want to visualize the evolution of the power in the gamma band,\n# instead of just plotting the average. We can use\n# `matplotlib.animation.FuncAnimation` to create an animation and apply this\n# to the brain figure.\n\n\n# create an initialization and animation function\n# to pass to FuncAnimation\ndef init():\n \"\"\"Create an empty frame.\"\"\"\n return paths,\n\n\ndef animate(i, activity):\n \"\"\"Animate the plot.\"\"\"\n paths.set_array(activity[:, i])\n return paths,\n\n\n# create the figure and apply the animation of the\n# gamma frequency band activity\nfig, ax = plt.subplots(figsize=(10, 10))\nax.imshow(im)\nax.set_axis_off()\npaths = ax.scatter(*xy_pts.T, c=np.zeros(len(xy_pts)), s=200,\n cmap=cmap, vmin=vmin, vmax=vmax)\nfig.colorbar(paths, ax=ax)\nax.set_title('Gamma frequency over time (Hilbert transform)',\n size='large')\n\n# avoid edge artifacts and decimate, showing just a short chunk\nshow_power = gamma_power_t[:, 100:150]\nanim = animation.FuncAnimation(fig, animate, init_func=init,\n fargs=(show_power,),\n frames=show_power.shape[1],\n interval=200, blit=True)\n\n###############################################################################\n# Alternatively, we can project the sensor data to the nearest locations on\n# the pial surface and visualize that:\n\n# sphinx_gallery_thumbnail_number = 4\n\nevoked = mne.EvokedArray(gamma_power_t, raw.info)\nstc = mne.stc_near_sensors(evoked, trans, subject, subjects_dir=subjects_dir)\nclim = dict(kind='value', lims=[vmin * 0.9, vmin, vmax])\nbrain = stc.plot(surface='pial', hemi='both', initial_time=0.68,\n colormap='viridis', clim=clim, views='parietal',\n subjects_dir=subjects_dir, size=(600, 600))\n# You can save a movie like the one on our documentation website with:\n# brain.save_movie(time_dilation=20, tmin=0.62, tmax=0.72,\n# interpolation='linear', framerate=5,\n# time_viewer=True)\n",
"# -*- coding: utf-8 -*-\n\"\"\"Functions to make simple plots with M/EEG data.\"\"\"\n\n# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>\n# Denis Engemann <denis.engemann@gmail.com>\n# Martin Luessi <mluessi@nmr.mgh.harvard.edu>\n# Eric Larson <larson.eric.d@gmail.com>\n# Cathy Nangini <cnangini@gmail.com>\n# Mainak Jas <mainak@neuro.hut.fi>\n#\n# License: Simplified BSD\n\nimport base64\nimport copy\nfrom glob import glob\nfrom io import BytesIO\nfrom itertools import cycle\nimport os.path as op\nimport warnings\nfrom distutils.version import LooseVersion\nfrom collections import defaultdict\n\nimport numpy as np\nfrom scipy import linalg\n\nfrom ..defaults import DEFAULTS\nfrom ..fixes import _get_img_fdata\nfrom ..rank import compute_rank\nfrom ..source_space import _mri_orientation\nfrom ..surface import read_surface\nfrom ..io.constants import FIFF\nfrom ..io.proj import make_projector\nfrom ..io.pick import (_DATA_CH_TYPES_SPLIT, pick_types, pick_info,\n pick_channels)\nfrom ..source_space import (read_source_spaces, SourceSpaces, _read_mri_info,\n _check_mri, _ensure_src)\nfrom ..transforms import invert_transform, apply_trans, _frame_to_str\nfrom ..utils import (logger, verbose, warn, _check_option, get_subjects_dir,\n _mask_to_onsets_offsets, _pl, _on_missing)\nfrom ..io.pick import _picks_by_type\nfrom ..filter import estimate_ringing_samples\nfrom .utils import tight_layout, _get_color_list, _prepare_trellis, plt_show\n\n\ndef _index_info_cov(info, cov, exclude):\n if exclude == 'bads':\n exclude = info['bads']\n info = pick_info(info, pick_channels(info['ch_names'], cov['names'],\n exclude))\n del exclude\n picks_list = \\\n _picks_by_type(info, meg_combined=False, ref_meg=False,\n exclude=())\n picks_by_type = dict(picks_list)\n\n ch_names = [n for n in cov.ch_names if n in info['ch_names']]\n ch_idx = [cov.ch_names.index(n) for n in ch_names]\n\n info_ch_names = info['ch_names']\n idx_by_type = defaultdict(list)\n for ch_type, sel in picks_by_type.items():\n idx_by_type[ch_type] = [ch_names.index(info_ch_names[c])\n for c in sel if info_ch_names[c] in ch_names]\n idx_names = [(idx_by_type[key],\n '%s covariance' % DEFAULTS['titles'][key],\n DEFAULTS['units'][key],\n DEFAULTS['scalings'][key],\n key)\n for key in _DATA_CH_TYPES_SPLIT\n if len(idx_by_type[key]) > 0]\n C = cov.data[ch_idx][:, ch_idx]\n return info, C, ch_names, idx_names\n\n\n@verbose\ndef plot_cov(cov, info, exclude=(), colorbar=True, proj=False, show_svd=True,\n show=True, verbose=None):\n \"\"\"Plot Covariance data.\n\n Parameters\n ----------\n cov : instance of Covariance\n The covariance matrix.\n info : dict\n Measurement info.\n exclude : list of str | str\n List of channels to exclude. If empty do not exclude any channel.\n If 'bads', exclude info['bads'].\n colorbar : bool\n Show colorbar or not.\n proj : bool\n Apply projections or not.\n show_svd : bool\n Plot also singular values of the noise covariance for each sensor\n type. We show square roots ie. standard deviations.\n show : bool\n Show figure if True.\n %(verbose)s\n\n Returns\n -------\n fig_cov : instance of matplotlib.figure.Figure\n The covariance plot.\n fig_svd : instance of matplotlib.figure.Figure | None\n The SVD spectra plot of the covariance.\n\n See Also\n --------\n mne.compute_rank\n\n Notes\n -----\n For each channel type, the rank is estimated using\n :func:`mne.compute_rank`.\n\n .. versionchanged:: 0.19\n Approximate ranks for each channel type are shown with red dashed lines.\n \"\"\"\n from ..cov import Covariance\n import matplotlib.pyplot as plt\n from matplotlib.colors import Normalize\n\n info, C, ch_names, idx_names = _index_info_cov(info, cov, exclude)\n del cov, exclude\n\n projs = []\n if proj:\n projs = copy.deepcopy(info['projs'])\n\n # Activate the projection items\n for p in projs:\n p['active'] = True\n\n P, ncomp, _ = make_projector(projs, ch_names)\n if ncomp > 0:\n logger.info(' Created an SSP operator (subspace dimension'\n ' = %d)' % ncomp)\n C = np.dot(P, np.dot(C, P.T))\n else:\n logger.info(' The projection vectors do not apply to these '\n 'channels.')\n\n fig_cov, axes = plt.subplots(1, len(idx_names), squeeze=False,\n figsize=(3.8 * len(idx_names), 3.7))\n for k, (idx, name, _, _, _) in enumerate(idx_names):\n vlim = np.max(np.abs(C[idx][:, idx]))\n im = axes[0, k].imshow(C[idx][:, idx], interpolation=\"nearest\",\n norm=Normalize(vmin=-vlim, vmax=vlim),\n cmap='RdBu_r')\n axes[0, k].set(title=name)\n\n if colorbar:\n from mpl_toolkits.axes_grid1 import make_axes_locatable\n divider = make_axes_locatable(axes[0, k])\n cax = divider.append_axes(\"right\", size=\"5.5%\", pad=0.05)\n plt.colorbar(im, cax=cax, format='%.0e')\n\n fig_cov.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.2, 0.26)\n tight_layout(fig=fig_cov)\n\n fig_svd = None\n if show_svd:\n fig_svd, axes = plt.subplots(1, len(idx_names), squeeze=False,\n figsize=(3.8 * len(idx_names), 3.7))\n for k, (idx, name, unit, scaling, key) in enumerate(idx_names):\n this_C = C[idx][:, idx]\n s = linalg.svd(this_C, compute_uv=False)\n this_C = Covariance(this_C, [info['ch_names'][ii] for ii in idx],\n [], [], 0)\n this_info = pick_info(info, idx)\n this_info['projs'] = []\n this_rank = compute_rank(this_C, info=this_info)\n # Protect against true zero singular values\n s[s <= 0] = 1e-10 * s[s > 0].min()\n s = np.sqrt(s) * scaling\n axes[0, k].plot(s, color='k', zorder=3)\n this_rank = this_rank[key]\n axes[0, k].axvline(this_rank - 1, ls='--', color='r',\n alpha=0.5, zorder=4, clip_on=False)\n axes[0, k].text(this_rank - 1, axes[0, k].get_ylim()[1],\n 'rank ≈ %d' % (this_rank,), ha='right', va='top',\n color='r', alpha=0.5, zorder=4)\n axes[0, k].set(ylabel=u'Noise σ (%s)' % unit, yscale='log',\n xlabel='Eigenvalue index', title=name,\n xlim=[0, len(s) - 1])\n tight_layout(fig=fig_svd)\n\n plt_show(show)\n\n return fig_cov, fig_svd\n\n\ndef plot_source_spectrogram(stcs, freq_bins, tmin=None, tmax=None,\n source_index=None, colorbar=False, show=True):\n \"\"\"Plot source power in time-freqency grid.\n\n Parameters\n ----------\n stcs : list of SourceEstimate\n Source power for consecutive time windows, one SourceEstimate object\n should be provided for each frequency bin.\n freq_bins : list of tuples of float\n Start and end points of frequency bins of interest.\n tmin : float\n Minimum time instant to show.\n tmax : float\n Maximum time instant to show.\n source_index : int | None\n Index of source for which the spectrogram will be plotted. If None,\n the source with the largest activation will be selected.\n colorbar : bool\n If true, a colorbar will be added to the plot.\n show : bool\n Show figure if True.\n\n Returns\n -------\n fig : instance of Figure\n The figure.\n \"\"\"\n import matplotlib.pyplot as plt\n\n # Input checks\n if len(stcs) == 0:\n raise ValueError('cannot plot spectrogram if len(stcs) == 0')\n\n stc = stcs[0]\n if tmin is not None and tmin < stc.times[0]:\n raise ValueError('tmin cannot be smaller than the first time point '\n 'provided in stcs')\n if tmax is not None and tmax > stc.times[-1] + stc.tstep:\n raise ValueError('tmax cannot be larger than the sum of the last time '\n 'point and the time step, which are provided in stcs')\n\n # Preparing time-frequency cell boundaries for plotting\n if tmin is None:\n tmin = stc.times[0]\n if tmax is None:\n tmax = stc.times[-1] + stc.tstep\n time_bounds = np.arange(tmin, tmax + stc.tstep, stc.tstep)\n freq_bounds = sorted(set(np.ravel(freq_bins)))\n freq_ticks = copy.deepcopy(freq_bounds)\n\n # Reject time points that will not be plotted and gather results\n source_power = []\n for stc in stcs:\n stc = stc.copy() # copy since crop modifies inplace\n stc.crop(tmin, tmax - stc.tstep)\n source_power.append(stc.data)\n source_power = np.array(source_power)\n\n # Finding the source with maximum source power\n if source_index is None:\n source_index = np.unravel_index(source_power.argmax(),\n source_power.shape)[1]\n\n # If there is a gap in the frequency bins record its locations so that it\n # can be covered with a gray horizontal bar\n gap_bounds = []\n for i in range(len(freq_bins) - 1):\n lower_bound = freq_bins[i][1]\n upper_bound = freq_bins[i + 1][0]\n if lower_bound != upper_bound:\n freq_bounds.remove(lower_bound)\n gap_bounds.append((lower_bound, upper_bound))\n\n # Preparing time-frequency grid for plotting\n time_grid, freq_grid = np.meshgrid(time_bounds, freq_bounds)\n\n # Plotting the results\n fig = plt.figure(figsize=(9, 6))\n plt.pcolor(time_grid, freq_grid, source_power[:, source_index, :],\n cmap='Reds')\n ax = plt.gca()\n\n ax.set(title='Source power', xlabel='Time (s)', ylabel='Frequency (Hz)')\n\n time_tick_labels = [str(np.round(t, 2)) for t in time_bounds]\n n_skip = 1 + len(time_bounds) // 10\n for i in range(len(time_bounds)):\n if i % n_skip != 0:\n time_tick_labels[i] = ''\n\n ax.set_xticks(time_bounds)\n ax.set_xticklabels(time_tick_labels)\n plt.xlim(time_bounds[0], time_bounds[-1])\n plt.yscale('log')\n ax.set_yticks(freq_ticks)\n ax.set_yticklabels([np.round(freq, 2) for freq in freq_ticks])\n plt.ylim(freq_bounds[0], freq_bounds[-1])\n\n plt.grid(True, ls='-')\n if colorbar:\n plt.colorbar()\n tight_layout(fig=fig)\n\n # Covering frequency gaps with horizontal bars\n for lower_bound, upper_bound in gap_bounds:\n plt.barh(lower_bound, time_bounds[-1] - time_bounds[0], upper_bound -\n lower_bound, time_bounds[0], color='#666666')\n\n plt_show(show)\n return fig\n\n\ndef _plot_mri_contours(mri_fname, surfaces, src, orientation='coronal',\n slices=None, show=True, show_indices=False,\n show_orientation=False, img_output=False):\n \"\"\"Plot BEM contours on anatomical slices.\"\"\"\n import matplotlib.pyplot as plt\n from matplotlib import patheffects\n # For ease of plotting, we will do everything in voxel coordinates.\n _check_option('orientation', orientation, ('coronal', 'axial', 'sagittal'))\n\n # Load the T1 data\n _, vox_mri_t, _, _, _, nim = _read_mri_info(\n mri_fname, units='mm', return_img=True)\n mri_vox_t = invert_transform(vox_mri_t)['trans']\n del vox_mri_t\n\n # plot axes (x, y, z) as data axes\n (x, y, z), (flip_x, flip_y, flip_z), order = _mri_orientation(\n nim, orientation)\n transpose = x < y\n\n data = _get_img_fdata(nim)\n shift_x = data.shape[x] if flip_x < 0 else 0\n shift_y = data.shape[y] if flip_y < 0 else 0\n n_slices = data.shape[z]\n if slices is None:\n slices = np.round(np.linspace(0, n_slices - 1, 14)).astype(int)[1:-1]\n slices = np.atleast_1d(slices).copy()\n slices[slices < 0] += n_slices # allow negative indexing\n if not np.array_equal(np.sort(slices), slices) or slices.ndim != 1 or \\\n slices.size < 1 or slices[0] < 0 or slices[-1] >= n_slices or \\\n slices.dtype.kind not in 'iu':\n raise ValueError('slices must be a sorted 1D array of int with unique '\n 'elements, at least one element, and no elements '\n 'greater than %d, got %s' % (n_slices - 1, slices))\n if flip_z < 0:\n # Proceed in the opposite order to maintain left-to-right / orientation\n slices = slices[::-1]\n\n # create of list of surfaces\n surfs = list()\n for file_name, color in surfaces:\n surf = dict()\n surf['rr'], surf['tris'] = read_surface(file_name)\n # move surface to voxel coordinate system\n surf['rr'] = apply_trans(mri_vox_t, surf['rr'])\n surfs.append((surf, color))\n\n sources = list()\n if src is not None:\n _ensure_src(src, extra=' or None')\n # Eventually we can relax this by allowing ``trans`` if need be\n if src[0]['coord_frame'] != FIFF.FIFFV_COORD_MRI:\n raise ValueError(\n 'Source space must be in MRI coordinates, got '\n f'{_frame_to_str[src[0][\"coord_frame\"]]}')\n for src_ in src:\n points = src_['rr'][src_['inuse'].astype(bool)]\n sources.append(apply_trans(mri_vox_t, points * 1e3))\n sources = np.concatenate(sources, axis=0)\n\n if img_output:\n n_col = n_axes = 1\n fig, ax = plt.subplots(1, 1, figsize=(7.0, 7.0))\n axs = [ax] * len(slices)\n\n w = fig.get_size_inches()[0]\n fig.set_size_inches([w, w / data.shape[x] * data.shape[y]])\n plt.close(fig)\n else:\n n_col = 4\n fig, axs, _, _ = _prepare_trellis(len(slices), n_col)\n n_axes = len(axs)\n fig.set_facecolor('k')\n bounds = np.concatenate(\n [[-np.inf], slices[:-1] + np.diff(slices) / 2., [np.inf]]) # float\n slicer = [slice(None)] * 3\n ori_labels = dict(R='LR', A='PA', S='IS')\n xlabels, ylabels = ori_labels[order[0]], ori_labels[order[1]]\n path_effects = [patheffects.withStroke(linewidth=4, foreground=\"k\",\n alpha=0.75)]\n out = list() if img_output else fig\n for ai, (ax, sl, lower, upper) in enumerate(zip(\n axs, slices, bounds[:-1], bounds[1:])):\n # adjust the orientations for good view\n slicer[z] = sl\n dat = data[tuple(slicer)]\n dat = dat.T if transpose else dat\n dat = dat[::flip_y, ::flip_x]\n\n # First plot the anatomical data\n if img_output:\n ax.clear()\n ax.imshow(dat, cmap=plt.cm.gray, origin='lower')\n ax.set_autoscale_on(False)\n ax.axis('off')\n ax.set_aspect('equal') # XXX eventually could deal with zooms\n\n # and then plot the contours on top\n for surf, color in surfs:\n with warnings.catch_warnings(record=True): # ignore contour warn\n warnings.simplefilter('ignore')\n ax.tricontour(flip_x * surf['rr'][:, x] + shift_x,\n flip_y * surf['rr'][:, y] + shift_y,\n surf['tris'], surf['rr'][:, z],\n levels=[sl], colors=color, linewidths=1.0,\n zorder=1)\n\n if len(sources):\n in_slice = (sources[:, z] >= lower) & (sources[:, z] < upper)\n ax.scatter(flip_x * sources[in_slice, x] + shift_x,\n flip_y * sources[in_slice, y] + shift_y,\n marker='.', color='#FF00FF', s=1, zorder=2)\n if show_indices:\n ax.text(dat.shape[1] // 8 + 0.5, 0.5, str(sl),\n color='w', fontsize='x-small', va='bottom', ha='left')\n # label the axes\n kwargs = dict(\n color='#66CCEE', fontsize='medium', path_effects=path_effects,\n family='monospace', clip_on=False, zorder=5, weight='bold')\n if show_orientation:\n if ai % n_col == 0: # left\n ax.text(0, dat.shape[0] / 2., xlabels[0],\n va='center', ha='left', **kwargs)\n if ai % n_col == n_col - 1 or ai == n_axes - 1: # right\n ax.text(dat.shape[1] - 1, dat.shape[0] / 2., xlabels[1],\n va='center', ha='right', **kwargs)\n if ai >= n_axes - n_col: # bottom\n ax.text(dat.shape[1] / 2., 0, ylabels[0],\n ha='center', va='bottom', **kwargs)\n if ai < n_col or n_col == 1: # top\n ax.text(dat.shape[1] / 2., dat.shape[0] - 1, ylabels[1],\n ha='center', va='top', **kwargs)\n if img_output:\n output = BytesIO()\n fig.savefig(output, bbox_inches='tight',\n pad_inches=0, format='png')\n out.append(base64.b64encode(output.getvalue()).decode('ascii'))\n\n fig.subplots_adjust(left=0., bottom=0., right=1., top=1., wspace=0.,\n hspace=0.)\n plt_show(show, fig=fig)\n return out\n\n\ndef plot_bem(subject=None, subjects_dir=None, orientation='coronal',\n slices=None, brain_surfaces=None, src=None, show=True,\n show_indices=True, mri='T1.mgz', show_orientation=True):\n \"\"\"Plot BEM contours on anatomical slices.\n\n Parameters\n ----------\n subject : str\n Subject name.\n subjects_dir : str | None\n Path to the SUBJECTS_DIR. If None, the path is obtained by using\n the environment variable SUBJECTS_DIR.\n orientation : str\n 'coronal' or 'axial' or 'sagittal'.\n slices : list of int\n Slice indices.\n brain_surfaces : None | str | list of str\n One or more brain surface to plot (optional). Entries should correspond\n to files in the subject's ``surf`` directory (e.g. ``\"white\"``).\n src : None | SourceSpaces | str\n SourceSpaces instance or path to a source space to plot individual\n sources as scatter-plot. Sources will be shown on exactly one slice\n (whichever slice is closest to each source in the given orientation\n plane). Path can be absolute or relative to the subject's ``bem``\n folder.\n\n .. versionchanged:: 0.20\n All sources are shown on the nearest slice rather than some\n being omitted.\n show : bool\n Show figure if True.\n show_indices : bool\n Show slice indices if True.\n\n .. versionadded:: 0.20\n mri : str\n The name of the MRI to use. Can be a standard FreeSurfer MRI such as\n ``'T1.mgz'``, or a full path to a custom MRI file.\n\n .. versionadded:: 0.21\n show_orientation : str\n Show the orientation (L/R, P/A, I/S) of the data slices.\n\n .. versionadded:: 0.21\n\n Returns\n -------\n fig : instance of matplotlib.figure.Figure\n The figure.\n\n See Also\n --------\n mne.viz.plot_alignment\n\n Notes\n -----\n Images are plotted in MRI voxel coordinates.\n\n If ``src`` is not None, for a given slice index, all source points are\n shown that are halfway between the previous slice and the given slice,\n and halfway between the given slice and the next slice.\n For large slice decimations, this can\n make some source points appear outside the BEM contour, which is shown\n for the given slice index. For example, in the case where the single\n midpoint slice is used ``slices=[128]``, all source points will be shown\n on top of the midpoint MRI slice with the BEM boundary drawn for that\n slice.\n \"\"\"\n subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)\n mri_fname = _check_mri(mri, subject, subjects_dir)\n\n # Get the BEM surface filenames\n bem_path = op.join(subjects_dir, subject, 'bem')\n\n if not op.isdir(bem_path):\n raise IOError('Subject bem directory \"%s\" does not exist' % bem_path)\n\n surfaces = _get_bem_plotting_surfaces(bem_path)\n if brain_surfaces is not None:\n if isinstance(brain_surfaces, str):\n brain_surfaces = (brain_surfaces,)\n for surf_name in brain_surfaces:\n for hemi in ('lh', 'rh'):\n surf_fname = op.join(subjects_dir, subject, 'surf',\n hemi + '.' + surf_name)\n if op.exists(surf_fname):\n surfaces.append((surf_fname, '#00DD00'))\n else:\n raise IOError(\"Surface %s does not exist.\" % surf_fname)\n\n if isinstance(src, str):\n if not op.exists(src):\n src_ = op.join(subjects_dir, subject, 'bem', src)\n if op.exists(src_):\n src = src_\n else:\n raise IOError(\"%s does not exist\" % src)\n src = read_source_spaces(src)\n elif src is not None and not isinstance(src, SourceSpaces):\n raise TypeError(\"src needs to be None, str or SourceSpaces instance, \"\n \"not %s\" % repr(src))\n\n if len(surfaces) == 0:\n raise IOError('No surface files found. Surface files must end with '\n 'inner_skull.surf, outer_skull.surf or outer_skin.surf')\n\n # Plot the contours\n return _plot_mri_contours(mri_fname, surfaces, src, orientation, slices,\n show, show_indices, show_orientation)\n\n\ndef _get_bem_plotting_surfaces(bem_path):\n surfaces = []\n for surf_name, color in (('*inner_skull', '#FF0000'),\n ('*outer_skull', '#FFFF00'),\n ('*outer_skin', '#FFAA80')):\n surf_fname = glob(op.join(bem_path, surf_name + '.surf'))\n if len(surf_fname) > 0:\n surf_fname = surf_fname[0]\n logger.info(\"Using surface: %s\" % surf_fname)\n surfaces.append((surf_fname, color))\n return surfaces\n\n\n@verbose\ndef plot_events(events, sfreq=None, first_samp=0, color=None, event_id=None,\n axes=None, equal_spacing=True, show=True, on_missing='raise',\n verbose=None):\n \"\"\"Plot events to get a visual display of the paradigm.\n\n Parameters\n ----------\n events : array, shape (n_events, 3)\n The events.\n sfreq : float | None\n The sample frequency. If None, data will be displayed in samples (not\n seconds).\n first_samp : int\n The index of the first sample. Recordings made on Neuromag systems\n number samples relative to the system start (not relative to the\n beginning of the recording). In such cases the ``raw.first_samp``\n attribute can be passed here. Default is 0.\n color : dict | None\n Dictionary of event_id integers as keys and colors as values. If None,\n colors are automatically drawn from a default list (cycled through if\n number of events longer than list of default colors). Color can be any\n valid :doc:`matplotlib color <tutorials/colors/colors>`.\n event_id : dict | None\n Dictionary of event labels (e.g. 'aud_l') as keys and their associated\n event_id values. Labels are used to plot a legend. If None, no legend\n is drawn.\n axes : instance of Axes\n The subplot handle.\n equal_spacing : bool\n Use equal spacing between events in y-axis.\n show : bool\n Show figure if True.\n %(on_missing_events)s\n %(verbose)s\n\n Returns\n -------\n fig : matplotlib.figure.Figure\n The figure object containing the plot.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n if sfreq is None:\n sfreq = 1.0\n xlabel = 'Samples'\n else:\n xlabel = 'Time (s)'\n\n events = np.asarray(events)\n if len(events) == 0:\n raise ValueError('No events in events array, cannot plot.')\n unique_events = np.unique(events[:, 2])\n\n if event_id is not None:\n # get labels and unique event ids from event_id dict,\n # sorted by value\n event_id_rev = {v: k for k, v in event_id.items()}\n conditions, unique_events_id = zip(*sorted(event_id.items(),\n key=lambda x: x[1]))\n\n keep = np.ones(len(unique_events_id), bool)\n for ii, this_event in enumerate(unique_events_id):\n if this_event not in unique_events:\n msg = f'{this_event} from event_id is not present in events.'\n _on_missing(on_missing, msg)\n keep[ii] = False\n conditions = [cond for cond, k in zip(conditions, keep) if k]\n unique_events_id = [id_ for id_, k in zip(unique_events_id, keep) if k]\n if len(unique_events_id) == 0:\n raise RuntimeError('No usable event IDs found')\n\n for this_event in unique_events:\n if this_event not in unique_events_id:\n warn('event %s missing from event_id will be ignored'\n % this_event)\n\n else:\n unique_events_id = unique_events\n\n color = _handle_event_colors(color, unique_events, event_id)\n import matplotlib.pyplot as plt\n\n fig = None\n if axes is None:\n fig = plt.figure()\n ax = axes if axes else plt.gca()\n\n unique_events_id = np.array(unique_events_id)\n min_event = np.min(unique_events_id)\n max_event = np.max(unique_events_id)\n max_x = (events[np.in1d(events[:, 2], unique_events_id), 0].max() -\n first_samp) / sfreq\n\n handles, labels = list(), list()\n for idx, ev in enumerate(unique_events_id):\n ev_mask = events[:, 2] == ev\n count = ev_mask.sum()\n if count == 0:\n continue\n y = np.full(count, idx + 1 if equal_spacing else events[ev_mask, 2][0])\n if event_id is not None:\n event_label = '%s (%s)' % (event_id_rev[ev], count)\n else:\n event_label = 'N=%d' % (count,)\n labels.append(event_label)\n kwargs = {}\n if ev in color:\n kwargs['color'] = color[ev]\n handles.append(\n ax.plot((events[ev_mask, 0] - first_samp) / sfreq,\n y, '.', clip_on=False, **kwargs)[0])\n\n if equal_spacing:\n ax.set_ylim(0, unique_events_id.size + 1)\n ax.set_yticks(1 + np.arange(unique_events_id.size))\n ax.set_yticklabels(unique_events_id)\n else:\n ax.set_ylim([min_event - 1, max_event + 1])\n\n ax.set(xlabel=xlabel, ylabel='Events id', xlim=[0, max_x])\n\n ax.grid(True)\n\n fig = fig if fig is not None else plt.gcf()\n # reverse order so that the highest numbers are at the top\n # (match plot order)\n handles, labels = handles[::-1], labels[::-1]\n box = ax.get_position()\n factor = 0.8 if event_id is not None else 0.9\n ax.set_position([box.x0, box.y0, box.width * factor, box.height])\n ax.legend(handles, labels, loc='center left', bbox_to_anchor=(1, 0.5),\n fontsize='small')\n fig.canvas.draw()\n plt_show(show)\n return fig\n\n\ndef _get_presser(fig):\n \"\"\"Get our press callback.\"\"\"\n import matplotlib\n callbacks = fig.canvas.callbacks.callbacks['button_press_event']\n func = None\n for key, val in callbacks.items():\n if LooseVersion(matplotlib.__version__) >= '3':\n func = val()\n else:\n func = val.func\n if func.__class__.__name__ == 'partial':\n break\n else:\n func = None\n assert func is not None\n return func\n\n\ndef plot_dipole_amplitudes(dipoles, colors=None, show=True):\n \"\"\"Plot the amplitude traces of a set of dipoles.\n\n Parameters\n ----------\n dipoles : list of instance of Dipole\n The dipoles whose amplitudes should be shown.\n colors : list of color | None\n Color to plot with each dipole. If None default colors are used.\n show : bool\n Show figure if True.\n\n Returns\n -------\n fig : matplotlib.figure.Figure\n The figure object containing the plot.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n import matplotlib.pyplot as plt\n if colors is None:\n colors = cycle(_get_color_list())\n fig, ax = plt.subplots(1, 1)\n xlim = [np.inf, -np.inf]\n for dip, color in zip(dipoles, colors):\n ax.plot(dip.times, dip.amplitude * 1e9, color=color, linewidth=1.5)\n xlim[0] = min(xlim[0], dip.times[0])\n xlim[1] = max(xlim[1], dip.times[-1])\n ax.set(xlim=xlim, xlabel='Time (s)', ylabel='Amplitude (nAm)')\n if show:\n fig.show(warn=False)\n return fig\n\n\ndef adjust_axes(axes, remove_spines=('top', 'right'), grid=True):\n \"\"\"Adjust some properties of axes.\n\n Parameters\n ----------\n axes : list\n List of axes to process.\n remove_spines : list of str\n Which axis spines to remove.\n grid : bool\n Turn grid on (True) or off (False).\n \"\"\"\n axes = [axes] if not isinstance(axes, (list, tuple, np.ndarray)) else axes\n for ax in axes:\n if grid:\n ax.grid(zorder=0)\n for key in remove_spines:\n ax.spines[key].set_visible(False)\n\n\ndef _filter_ticks(lims, fscale):\n \"\"\"Create approximately spaced ticks between lims.\"\"\"\n if fscale == 'linear':\n return None, None # let matplotlib handle it\n lims = np.array(lims)\n ticks = list()\n if lims[1] > 20 * lims[0]:\n base = np.array([1, 2, 4])\n else:\n base = np.arange(1, 11)\n for exp in range(int(np.floor(np.log10(lims[0]))),\n int(np.floor(np.log10(lims[1]))) + 1):\n ticks += (base * (10 ** exp)).tolist()\n ticks = np.array(ticks)\n ticks = ticks[(ticks >= lims[0]) & (ticks <= lims[1])]\n ticklabels = [('%g' if t < 1 else '%d') % t for t in ticks]\n return ticks, ticklabels\n\n\ndef _get_flim(flim, fscale, freq, sfreq=None):\n \"\"\"Get reasonable frequency limits.\"\"\"\n if flim is None:\n if freq is None:\n flim = [0.1 if fscale == 'log' else 0., sfreq / 2.]\n else:\n if fscale == 'linear':\n flim = [freq[0]]\n else:\n flim = [freq[0] if freq[0] > 0 else 0.1 * freq[1]]\n flim += [freq[-1]]\n if fscale == 'log':\n if flim[0] <= 0:\n raise ValueError('flim[0] must be positive, got %s' % flim[0])\n elif flim[0] < 0:\n raise ValueError('flim[0] must be non-negative, got %s' % flim[0])\n return flim\n\n\ndef _check_fscale(fscale):\n \"\"\"Check for valid fscale.\"\"\"\n if not isinstance(fscale, str) or fscale not in ('log', 'linear'):\n raise ValueError('fscale must be \"log\" or \"linear\", got %s'\n % (fscale,))\n\n\n_DEFAULT_ALIM = (-80, 10)\n\n\ndef plot_filter(h, sfreq, freq=None, gain=None, title=None, color='#1f77b4',\n flim=None, fscale='log', alim=_DEFAULT_ALIM, show=True,\n compensate=False, plot=('time', 'magnitude', 'delay'),\n axes=None):\n \"\"\"Plot properties of a filter.\n\n Parameters\n ----------\n h : dict or ndarray\n An IIR dict or 1D ndarray of coefficients (for FIR filter).\n sfreq : float\n Sample rate of the data (Hz).\n freq : array-like or None\n The ideal response frequencies to plot (must be in ascending order).\n If None (default), do not plot the ideal response.\n gain : array-like or None\n The ideal response gains to plot.\n If None (default), do not plot the ideal response.\n title : str | None\n The title to use. If None (default), determine the title based\n on the type of the system.\n color : color object\n The color to use (default '#1f77b4').\n flim : tuple or None\n If not None, the x-axis frequency limits (Hz) to use.\n If None, freq will be used. If None (default) and freq is None,\n ``(0.1, sfreq / 2.)`` will be used.\n fscale : str\n Frequency scaling to use, can be \"log\" (default) or \"linear\".\n alim : tuple\n The y-axis amplitude limits (dB) to use (default: (-60, 10)).\n show : bool\n Show figure if True (default).\n compensate : bool\n If True, compensate for the filter delay (phase will not be shown).\n\n - For linear-phase FIR filters, this visualizes the filter coefficients\n assuming that the output will be shifted by ``N // 2``.\n - For IIR filters, this changes the filter coefficient display\n by filtering backward and forward, and the frequency response\n by squaring it.\n\n .. versionadded:: 0.18\n plot : list | tuple | str\n A list of the requested plots from ``time``, ``magnitude`` and\n ``delay``. Default is to plot all three filter properties\n ('time', 'magnitude', 'delay').\n\n .. versionadded:: 0.21.0\n axes : instance of Axes | list | None\n The axes to plot to. If list, the list must be a list of Axes of\n the same length as the number of requested plot types. If instance of\n Axes, there must be only one filter property plotted.\n Defaults to ``None``.\n\n .. versionadded:: 0.21.0\n\n Returns\n -------\n fig : matplotlib.figure.Figure\n The figure containing the plots.\n\n See Also\n --------\n mne.filter.create_filter\n plot_ideal_filter\n\n Notes\n -----\n .. versionadded:: 0.14\n \"\"\"\n from scipy.signal import (\n freqz, group_delay, lfilter, filtfilt, sosfilt, sosfiltfilt)\n import matplotlib.pyplot as plt\n\n sfreq = float(sfreq)\n _check_option('fscale', fscale, ['log', 'linear'])\n if isinstance(plot, str):\n plot = [plot]\n for xi, x in enumerate(plot):\n _check_option('plot[%d]' % xi, x, ('magnitude', 'delay', 'time'))\n\n flim = _get_flim(flim, fscale, freq, sfreq)\n if fscale == 'log':\n omega = np.logspace(np.log10(flim[0]), np.log10(flim[1]), 1000)\n else:\n omega = np.linspace(flim[0], flim[1], 1000)\n xticks, xticklabels = _filter_ticks(flim, fscale)\n omega /= sfreq / (2 * np.pi)\n if isinstance(h, dict): # IIR h.ndim == 2: # second-order sections\n if 'sos' in h:\n H = np.ones(len(omega), np.complex128)\n gd = np.zeros(len(omega))\n for section in h['sos']:\n this_H = freqz(section[:3], section[3:], omega)[1]\n H *= this_H\n if compensate:\n H *= this_H.conj() # time reversal is freq conj\n else:\n # Assume the forward-backward delay zeros out, which it\n # mostly should\n with warnings.catch_warnings(record=True): # singular GD\n warnings.simplefilter('ignore')\n gd += group_delay((section[:3], section[3:]), omega)[1]\n n = estimate_ringing_samples(h['sos'])\n delta = np.zeros(n)\n delta[0] = 1\n if compensate:\n delta = np.pad(delta, [(n - 1, 0)], 'constant')\n func = sosfiltfilt\n gd += (len(delta) - 1) // 2\n else:\n func = sosfilt\n h = func(h['sos'], delta)\n else:\n H = freqz(h['b'], h['a'], omega)[1]\n if compensate:\n H *= H.conj()\n with warnings.catch_warnings(record=True): # singular GD\n warnings.simplefilter('ignore')\n gd = group_delay((h['b'], h['a']), omega)[1]\n if compensate:\n gd += group_delay(h['b'].conj(), h['a'].conj(), omega)[1]\n n = estimate_ringing_samples((h['b'], h['a']))\n delta = np.zeros(n)\n delta[0] = 1\n if compensate:\n delta = np.pad(delta, [(n - 1, 0)], 'constant')\n func = filtfilt\n else:\n func = lfilter\n h = func(h['b'], h['a'], delta)\n if title is None:\n title = 'SOS (IIR) filter'\n if compensate:\n title += ' (forward-backward)'\n else:\n H = freqz(h, worN=omega)[1]\n with warnings.catch_warnings(record=True): # singular GD\n warnings.simplefilter('ignore')\n gd = group_delay((h, [1.]), omega)[1]\n title = 'FIR filter' if title is None else title\n if compensate:\n title += ' (delay-compensated)'\n\n fig = None\n if axes is None:\n fig, axes = plt.subplots(len(plot), 1)\n if isinstance(axes, plt.Axes):\n axes = [axes]\n elif isinstance(axes, np.ndarray):\n axes = list(axes)\n if fig is None:\n fig = axes[0].get_figure()\n if len(axes) != len(plot):\n raise ValueError('Length of axes (%d) must be the same as number of '\n 'requested filter properties (%d)'\n % (len(axes), len(plot)))\n\n t = np.arange(len(h))\n dlim = np.abs(t).max() / 2.\n dlim = [-dlim, dlim]\n if compensate:\n n_shift = (len(h) - 1) // 2\n t -= n_shift\n assert t[0] == -t[-1]\n gd -= n_shift\n t = t / sfreq\n gd = gd / sfreq\n f = omega * sfreq / (2 * np.pi)\n sl = slice(0 if fscale == 'linear' else 1, None, None)\n mag = 10 * np.log10(np.maximum((H * H.conj()).real, 1e-20))\n\n if 'time' in plot:\n ax_time_idx = np.where([p == 'time' for p in plot])[0][0]\n axes[ax_time_idx].plot(t, h, color=color)\n axes[ax_time_idx].set(xlim=t[[0, -1]], xlabel='Time (s)',\n ylabel='Amplitude', title=title)\n # Magnitude\n if 'magnitude' in plot:\n ax_mag_idx = np.where([p == 'magnitude' for p in plot])[0][0]\n axes[ax_mag_idx].plot(f[sl], mag[sl], color=color,\n linewidth=2, zorder=4)\n if freq is not None and gain is not None:\n plot_ideal_filter(freq, gain, axes[ax_mag_idx],\n fscale=fscale, show=False)\n axes[ax_mag_idx].set(ylabel='Magnitude (dB)', xlabel='', xscale=fscale)\n if xticks is not None:\n axes[ax_mag_idx].set(xticks=xticks)\n axes[ax_mag_idx].set(xticklabels=xticklabels)\n axes[ax_mag_idx].set(xlim=flim, ylim=alim, xlabel='Frequency (Hz)',\n ylabel='Amplitude (dB)')\n # Delay\n if 'delay' in plot:\n ax_delay_idx = np.where([p == 'delay' for p in plot])[0][0]\n axes[ax_delay_idx].plot(f[sl], gd[sl], color=color,\n linewidth=2, zorder=4)\n # shade nulled regions\n for start, stop in zip(*_mask_to_onsets_offsets(mag <= -39.9)):\n axes[ax_delay_idx].axvspan(f[start], f[stop - 1],\n facecolor='k', alpha=0.05,\n zorder=5)\n axes[ax_delay_idx].set(xlim=flim, ylabel='Group delay (s)',\n xlabel='Frequency (Hz)',\n xscale=fscale)\n if xticks is not None:\n axes[ax_delay_idx].set(xticks=xticks)\n axes[ax_delay_idx].set(xticklabels=xticklabels)\n axes[ax_delay_idx].set(xlim=flim, ylim=dlim, xlabel='Frequency (Hz)',\n ylabel='Delay (s)')\n\n adjust_axes(axes)\n tight_layout()\n plt_show(show)\n return fig\n\n\ndef plot_ideal_filter(freq, gain, axes=None, title='', flim=None, fscale='log',\n alim=_DEFAULT_ALIM, color='r', alpha=0.5, linestyle='--',\n show=True):\n \"\"\"Plot an ideal filter response.\n\n Parameters\n ----------\n freq : array-like\n The ideal response frequencies to plot (must be in ascending order).\n gain : array-like or None\n The ideal response gains to plot.\n axes : instance of Axes | None\n The subplot handle. With None (default), axes are created.\n title : str\n The title to use, (default: '').\n flim : tuple or None\n If not None, the x-axis frequency limits (Hz) to use.\n If None (default), freq used.\n fscale : str\n Frequency scaling to use, can be \"log\" (default) or \"linear\".\n alim : tuple\n If not None (default), the y-axis limits (dB) to use.\n color : color object\n The color to use (default: 'r').\n alpha : float\n The alpha to use (default: 0.5).\n linestyle : str\n The line style to use (default: '--').\n show : bool\n Show figure if True (default).\n\n Returns\n -------\n fig : instance of matplotlib.figure.Figure\n The figure.\n\n See Also\n --------\n plot_filter\n\n Notes\n -----\n .. versionadded:: 0.14\n\n Examples\n --------\n Plot a simple ideal band-pass filter::\n\n >>> from mne.viz import plot_ideal_filter\n >>> freq = [0, 1, 40, 50]\n >>> gain = [0, 1, 1, 0]\n >>> plot_ideal_filter(freq, gain, flim=(0.1, 100)) #doctest: +ELLIPSIS\n <...Figure...>\n \"\"\"\n import matplotlib.pyplot as plt\n my_freq, my_gain = list(), list()\n if freq[0] != 0:\n raise ValueError('freq should start with DC (zero) and end with '\n 'Nyquist, but got %s for DC' % (freq[0],))\n freq = np.array(freq)\n # deal with semilogx problems @ x=0\n _check_option('fscale', fscale, ['log', 'linear'])\n if fscale == 'log':\n freq[0] = 0.1 * freq[1] if flim is None else min(flim[0], freq[1])\n flim = _get_flim(flim, fscale, freq)\n transitions = list()\n for ii in range(len(freq)):\n if ii < len(freq) - 1 and gain[ii] != gain[ii + 1]:\n transitions += [[freq[ii], freq[ii + 1]]]\n my_freq += np.linspace(freq[ii], freq[ii + 1], 20,\n endpoint=False).tolist()\n my_gain += np.linspace(gain[ii], gain[ii + 1], 20,\n endpoint=False).tolist()\n else:\n my_freq.append(freq[ii])\n my_gain.append(gain[ii])\n my_gain = 10 * np.log10(np.maximum(my_gain, 10 ** (alim[0] / 10.)))\n if axes is None:\n axes = plt.subplots(1)[1]\n for transition in transitions:\n axes.axvspan(*transition, color=color, alpha=0.1)\n axes.plot(my_freq, my_gain, color=color, linestyle=linestyle, alpha=0.5,\n linewidth=4, zorder=3)\n xticks, xticklabels = _filter_ticks(flim, fscale)\n axes.set(ylim=alim, xlabel='Frequency (Hz)', ylabel='Amplitude (dB)',\n xscale=fscale)\n if xticks is not None:\n axes.set(xticks=xticks)\n axes.set(xticklabels=xticklabels)\n axes.set(xlim=flim)\n if title:\n axes.set(title=title)\n adjust_axes(axes)\n tight_layout()\n plt_show(show)\n return axes.figure\n\n\ndef _handle_event_colors(color_dict, unique_events, event_id):\n \"\"\"Create event-integer-to-color mapping, assigning defaults as needed.\"\"\"\n default_colors = dict(zip(sorted(unique_events), cycle(_get_color_list())))\n # warn if not enough colors\n if color_dict is None:\n if len(unique_events) > len(_get_color_list()):\n warn('More events than default colors available. You should pass '\n 'a list of unique colors.')\n else:\n custom_colors = dict()\n for key, color in color_dict.items():\n if key in unique_events: # key was a valid event integer\n custom_colors[key] = color\n elif key in event_id: # key was an event label\n custom_colors[event_id[key]] = color\n else: # key not a valid event, warn and ignore\n warn('Event ID %s is in the color dict but is not '\n 'present in events or event_id.' % str(key))\n # warn if color_dict is missing any entries\n unassigned = sorted(set(unique_events) - set(custom_colors))\n if len(unassigned):\n unassigned_str = ', '.join(str(e) for e in unassigned)\n warn('Color was not assigned for event%s %s. Default colors will '\n 'be used.' % (_pl(unassigned), unassigned_str))\n default_colors.update(custom_colors)\n return default_colors\n\n\ndef plot_csd(csd, info=None, mode='csd', colorbar=True, cmap=None,\n n_cols=None, show=True):\n \"\"\"Plot CSD matrices.\n\n A sub-plot is created for each frequency. If an info object is passed to\n the function, different channel types are plotted in different figures.\n\n Parameters\n ----------\n csd : instance of CrossSpectralDensity\n The CSD matrix to plot.\n info : instance of Info | None\n To split the figure by channel-type, provide the measurement info.\n By default, the CSD matrix is plotted as a whole.\n mode : 'csd' | 'coh'\n Whether to plot the cross-spectral density ('csd', the default), or\n the coherence ('coh') between the channels.\n colorbar : bool\n Whether to show a colorbar. Defaults to ``True``.\n cmap : str | None\n The matplotlib colormap to use. Defaults to None, which means the\n colormap will default to matplotlib's default.\n n_cols : int | None\n CSD matrices are plotted in a grid. This parameter controls how\n many matrix to plot side by side before starting a new row. By\n default, a number will be chosen to make the grid as square as\n possible.\n show : bool\n Whether to show the figure. Defaults to ``True``.\n\n Returns\n -------\n fig : list of Figure\n The figures created by this function.\n \"\"\"\n import matplotlib.pyplot as plt\n\n if mode not in ['csd', 'coh']:\n raise ValueError('\"mode\" should be either \"csd\" or \"coh\".')\n\n if info is not None:\n info_ch_names = info['ch_names']\n sel_eeg = pick_types(info, meg=False, eeg=True, ref_meg=False,\n exclude=[])\n sel_mag = pick_types(info, meg='mag', eeg=False, ref_meg=False,\n exclude=[])\n sel_grad = pick_types(info, meg='grad', eeg=False, ref_meg=False,\n exclude=[])\n idx_eeg = [csd.ch_names.index(info_ch_names[c])\n for c in sel_eeg if info_ch_names[c] in csd.ch_names]\n idx_mag = [csd.ch_names.index(info_ch_names[c])\n for c in sel_mag if info_ch_names[c] in csd.ch_names]\n idx_grad = [csd.ch_names.index(info_ch_names[c])\n for c in sel_grad if info_ch_names[c] in csd.ch_names]\n indices = [idx_eeg, idx_mag, idx_grad]\n titles = ['EEG', 'Magnetometers', 'Gradiometers']\n\n if mode == 'csd':\n # The units in which to plot the CSD\n units = dict(eeg='µV²', grad='fT²/cm²', mag='fT²')\n scalings = dict(eeg=1e12, grad=1e26, mag=1e30)\n else:\n indices = [np.arange(len(csd.ch_names))]\n if mode == 'csd':\n titles = ['Cross-spectral density']\n # Units and scaling unknown\n units = dict()\n scalings = dict()\n elif mode == 'coh':\n titles = ['Coherence']\n\n n_freqs = len(csd.frequencies)\n\n if n_cols is None:\n n_cols = int(np.ceil(np.sqrt(n_freqs)))\n n_rows = int(np.ceil(n_freqs / float(n_cols)))\n\n figs = []\n for ind, title, ch_type in zip(indices, titles, ['eeg', 'mag', 'grad']):\n if len(ind) == 0:\n continue\n\n fig, axes = plt.subplots(n_rows, n_cols, squeeze=False,\n figsize=(2 * n_cols + 1, 2.2 * n_rows))\n\n csd_mats = []\n for i in range(len(csd.frequencies)):\n cm = csd.get_data(index=i)[ind][:, ind]\n if mode == 'csd':\n cm = np.abs(cm) * scalings.get(ch_type, 1)\n elif mode == 'coh':\n # Compute coherence from the CSD matrix\n psd = np.diag(cm).real\n cm = np.abs(cm) ** 2 / psd[np.newaxis, :] / psd[:, np.newaxis]\n csd_mats.append(cm)\n\n vmax = np.max(csd_mats)\n\n for i, (freq, mat) in enumerate(zip(csd.frequencies, csd_mats)):\n ax = axes[i // n_cols][i % n_cols]\n im = ax.imshow(mat, interpolation='nearest', cmap=cmap, vmin=0,\n vmax=vmax)\n ax.set_xticks([])\n ax.set_yticks([])\n if csd._is_sum:\n ax.set_title('%.1f-%.1f Hz.' % (np.min(freq),\n np.max(freq)))\n else:\n ax.set_title('%.1f Hz.' % freq)\n\n plt.suptitle(title)\n plt.subplots_adjust(top=0.8)\n\n if colorbar:\n cb = plt.colorbar(im, ax=[a for ax_ in axes for a in ax_])\n if mode == 'csd':\n label = u'CSD'\n if ch_type in units:\n label += u' (%s)' % units[ch_type]\n cb.set_label(label)\n elif mode == 'coh':\n cb.set_label('Coherence')\n\n figs.append(fig)\n\n plt_show(show)\n return figs\n"
] |
[
[
"numpy.concatenate",
"matplotlib.animation.FuncAnimation",
"numpy.percentile",
"matplotlib.pyplot.subplots",
"pandas.read_csv",
"numpy.vstack"
],
[
"numpy.dot",
"matplotlib.pyplot.xlim",
"scipy.linalg.svd",
"numpy.min",
"numpy.where",
"matplotlib.pyplot.gcf",
"numpy.sort",
"numpy.max",
"numpy.concatenate",
"matplotlib.pyplot.colorbar",
"numpy.full",
"matplotlib.pyplot.subplots",
"numpy.arange",
"numpy.sqrt",
"numpy.in1d",
"matplotlib.pyplot.gca",
"numpy.log10",
"matplotlib.pyplot.yscale",
"matplotlib.patheffects.withStroke",
"matplotlib.pyplot.subplots_adjust",
"numpy.array",
"numpy.pad",
"numpy.zeros",
"numpy.round",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"numpy.diff",
"scipy.signal.group_delay",
"numpy.diag",
"scipy.signal.freqz",
"numpy.asarray",
"matplotlib.pyplot.pcolor",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.suptitle",
"numpy.ravel",
"matplotlib.pyplot.barh",
"numpy.abs",
"numpy.atleast_1d",
"matplotlib.colors.Normalize",
"numpy.linspace",
"numpy.meshgrid",
"numpy.unique",
"numpy.maximum"
]
] |
vishalbelsare/abcpy
|
[
"72d0d31ae3fa531b69ea3fef39c96af6628ee76f"
] |
[
"tests/statisticslearning_tests.py"
] |
[
"import unittest\n\nimport numpy as np\n\nfrom abcpy.backends import BackendDummy as Backend\nfrom abcpy.continuousmodels import Normal\nfrom abcpy.continuousmodels import Uniform\nfrom abcpy.statistics import Identity\nfrom abcpy.statisticslearning import Semiautomatic, SemiautomaticNN, TripletDistanceLearning, \\\n ContrastiveDistanceLearning, ExponentialFamilyScoreMatching\n\ntry:\n import torch\nexcept ImportError:\n has_torch = False\nelse:\n has_torch = True\n from abcpy.NN_utilities.networks import createDefaultNN\n\n\nclass SemiautomaticTests(unittest.TestCase):\n def setUp(self):\n # define prior and model\n sigma = Uniform([[10], [20]])\n mu = Normal([0, 1])\n Y = Normal([mu, sigma])\n\n # define backend\n self.backend = Backend()\n\n # define statistics\n self.statistics_cal = Identity(degree=3, cross=False)\n\n # Initialize statistics learning\n self.statisticslearning = Semiautomatic([Y], self.statistics_cal, self.backend, n_samples=1000,\n n_samples_per_param=1, seed=1)\n\n def test_transformation(self):\n # Transform statistics extraction\n self.new_statistics_calculator = self.statisticslearning.get_statistics()\n # Simulate observed data\n Obs = Normal([2, 4])\n y_obs = Obs.forward_simulate(Obs.get_input_values(), 1)[0].tolist()\n\n extracted_statistics = self.new_statistics_calculator.statistics(y_obs)\n self.assertEqual(np.shape(extracted_statistics), (1, 2))\n\n # NOTE we cannot test this, since the linear regression used uses a random number generator (which we cannot access and is in C). Therefore, our results differ and testing might fail\n # self.assertLess(extracted_statistics[0,0] - 0.00215507052338, 10e-2)\n # self.assertLess(extracted_statistics[0,1] - (-0.0058023274456), 10e-2)\n\n\nclass SemiautomaticNNTests(unittest.TestCase):\n def setUp(self):\n # define prior and model\n sigma = Uniform([[10], [20]])\n mu = Normal([0, 1])\n self.Y = Normal([mu, sigma])\n\n # define backend\n self.backend = Backend()\n\n # define statistics\n self.statistics_cal = Identity(degree=3, cross=False)\n\n if has_torch:\n # Initialize statistics learning\n self.statisticslearning = SemiautomaticNN([self.Y], self.statistics_cal, self.backend, n_samples=100,\n n_samples_val=100, n_samples_per_param=1, seed=1, n_epochs=2,\n scale_samples=False, use_tqdm=False)\n self.statisticslearning2 = SemiautomaticNN([self.Y], self.statistics_cal, self.backend, n_samples=10,\n n_samples_val=10, n_samples_per_param=1, seed=1, n_epochs=5,\n scale_samples=False, use_tqdm=False)\n # with sample scaler:\n self.statisticslearning_with_scaler = SemiautomaticNN([self.Y], self.statistics_cal, self.backend,\n n_samples=100, n_samples_per_param=1, seed=1,\n n_epochs=2, scale_samples=True, use_tqdm=False)\n\n def test_initialization(self):\n if not has_torch:\n self.assertRaises(ImportError, SemiautomaticNN, [self.Y], self.statistics_cal, self.backend)\n\n def test_transformation(self):\n if has_torch:\n # Transform statistics extraction\n self.new_statistics_calculator = self.statisticslearning.get_statistics()\n self.new_statistics_calculator_with_scaler = self.statisticslearning_with_scaler.get_statistics()\n # Simulate observed data\n Obs = Normal([2, 4])\n y_obs = Obs.forward_simulate(Obs.get_input_values(), 1)[0].tolist()\n\n extracted_statistics = self.new_statistics_calculator.statistics(y_obs)\n self.assertEqual(np.shape(extracted_statistics), (1, 2))\n\n self.assertRaises(RuntimeError, self.new_statistics_calculator.statistics, [np.array([1, 2])])\n\n extracted_statistics = self.new_statistics_calculator_with_scaler.statistics(y_obs)\n self.assertEqual(np.shape(extracted_statistics), (1, 2))\n\n self.assertRaises(RuntimeError, self.new_statistics_calculator_with_scaler.statistics, [np.array([1, 2])])\n\n def test_errors(self):\n if has_torch:\n with self.assertRaises(RuntimeError):\n self.statisticslearning = SemiautomaticNN([self.Y], self.statistics_cal, self.backend, n_samples=1000,\n n_samples_per_param=1, seed=1, parameters=np.ones((100, 1)))\n with self.assertRaises(RuntimeError):\n self.statisticslearning = SemiautomaticNN([self.Y], self.statistics_cal, self.backend, n_samples=1000,\n n_samples_per_param=1, seed=1,\n embedding_net=createDefaultNN(1, 2))\n with self.assertRaises(RuntimeError):\n self.statisticslearning = SemiautomaticNN([self.Y], self.statistics_cal, self.backend, n_samples=1000,\n n_samples_per_param=1, seed=1, simulations=np.ones((100, 1)))\n with self.assertRaises(RuntimeError):\n self.statisticslearning = SemiautomaticNN([self.Y], self.statistics_cal, self.backend, n_samples=1000,\n n_samples_per_param=1, seed=1,\n simulations=np.ones((100, 1, 3)))\n with self.assertRaises(RuntimeError):\n self.statisticslearning = SemiautomaticNN([self.Y], self.statistics_cal, self.backend, n_samples=1000,\n n_samples_per_param=1, seed=1,\n parameters=np.ones((100, 1, 2)))\n with self.assertRaises(RuntimeError):\n self.statisticslearning = SemiautomaticNN([self.Y], self.statistics_cal, self.backend, n_samples=1000,\n n_samples_per_param=1, seed=1, simulations=np.ones((100, 1)),\n parameters=np.zeros((99, 1)))\n with self.assertRaises(RuntimeError):\n self.statisticslearning = SemiautomaticNN([self.Y], self.statistics_cal, self.backend, n_samples=1000,\n n_samples_per_param=1, seed=1,\n parameters_val=np.ones((100, 1)))\n with self.assertRaises(RuntimeError):\n self.statisticslearning = SemiautomaticNN([self.Y], self.statistics_cal, self.backend, n_samples=1000,\n n_samples_per_param=1, seed=1,\n simulations_val=np.ones((100, 1)))\n with self.assertRaises(RuntimeError):\n self.statisticslearning = SemiautomaticNN([self.Y], self.statistics_cal, self.backend, n_samples=1000,\n n_samples_per_param=1, seed=1,\n simulations_val=np.ones((100, 1, 3)))\n with self.assertRaises(RuntimeError):\n self.statisticslearning = SemiautomaticNN([self.Y], self.statistics_cal, self.backend, n_samples=1000,\n n_samples_per_param=1, seed=1,\n parameters_val=np.ones((100, 1, 2)))\n with self.assertRaises(RuntimeError):\n self.statisticslearning = SemiautomaticNN([self.Y], self.statistics_cal, self.backend, n_samples=1000,\n n_samples_per_param=1, seed=1,\n simulations_val=np.ones((100, 1)),\n parameters_val=np.zeros((99, 1)))\n with self.assertRaises(TypeError):\n self.statisticslearning = SemiautomaticNN([self.Y], self.statistics_cal, self.backend, n_samples=1000,\n n_samples_per_param=1, seed=1,\n parameters=[i for i in range(10)],\n simulations=[i for i in range(10)])\n with self.assertRaises(TypeError):\n self.statisticslearning = SemiautomaticNN([self.Y], self.statistics_cal, self.backend, n_samples=1000,\n n_samples_per_param=1, seed=1,\n parameters_val=[i for i in range(10)],\n simulations_val=[i for i in range(10)])\n with self.assertRaises(RuntimeError):\n self.statisticslearning2.test_losses = [4, 2, 1]\n self.statisticslearning2.plot_losses()\n with self.assertRaises(NotImplementedError):\n self.statisticslearning.plot_losses(which_losses=\"foo\")\n\n def test_plots(self):\n if has_torch:\n self.statisticslearning.plot_losses()\n self.statisticslearning.plot_losses(which_losses=\"train\")\n self.statisticslearning.plot_losses(which_losses=\"test\")\n\n\nclass ContrastiveDistanceLearningTests(unittest.TestCase):\n def setUp(self):\n # define prior and model\n sigma = Uniform([[10], [20]])\n mu = Normal([0, 1])\n self.Y = Normal([mu, sigma])\n\n # define backend\n self.backend = Backend()\n\n # define statistics\n self.statistics_cal = Identity(degree=3, cross=False)\n\n if has_torch:\n # Initialize statistics learning\n self.statisticslearning = ContrastiveDistanceLearning([self.Y], self.statistics_cal, self.backend,\n n_samples=100, n_samples_val=100,\n n_samples_per_param=1, seed=1, n_epochs=2,\n scale_samples=False, use_tqdm=False)\n # with sample scaler:\n self.statisticslearning_with_scaler = ContrastiveDistanceLearning([self.Y], self.statistics_cal,\n self.backend, n_samples=100,\n n_samples_per_param=1, seed=1,\n n_epochs=2, scale_samples=True,\n use_tqdm=False)\n\n def test_initialization(self):\n if not has_torch:\n self.assertRaises(ImportError, ContrastiveDistanceLearning, [self.Y], self.statistics_cal,\n self.backend)\n\n def test_transformation(self):\n if has_torch:\n # Transform statistics extraction\n self.new_statistics_calculator = self.statisticslearning.get_statistics()\n self.new_statistics_calculator_with_scaler = self.statisticslearning_with_scaler.get_statistics()\n # Simulate observed data\n Obs = Normal([2, 4])\n y_obs = Obs.forward_simulate(Obs.get_input_values(), 1)[0].tolist()\n\n extracted_statistics = self.new_statistics_calculator.statistics(y_obs)\n self.assertEqual(np.shape(extracted_statistics), (1, 2))\n\n self.assertRaises(RuntimeError, self.new_statistics_calculator.statistics, [np.array([1, 2])])\n\n extracted_statistics = self.new_statistics_calculator_with_scaler.statistics(y_obs)\n self.assertEqual(np.shape(extracted_statistics), (1, 2))\n\n self.assertRaises(RuntimeError, self.new_statistics_calculator_with_scaler.statistics, [np.array([1, 2])])\n\n def test_plots(self):\n if has_torch:\n self.statisticslearning.plot_losses()\n self.statisticslearning.plot_losses(which_losses=\"train\")\n self.statisticslearning.plot_losses(which_losses=\"test\")\n\n\nclass TripletDistanceLearningTests(unittest.TestCase):\n def setUp(self):\n # define prior and model\n sigma = Uniform([[10], [20]])\n mu = Normal([0, 1])\n self.Y = Normal([mu, sigma])\n\n # define backend\n self.backend = Backend()\n\n # define statistics\n self.statistics_cal = Identity(degree=3, cross=False)\n\n if has_torch:\n # Initialize statistics learning\n self.statisticslearning = TripletDistanceLearning([self.Y], self.statistics_cal, self.backend,\n n_samples=100, n_samples_val=100, n_samples_per_param=1,\n seed=1, n_epochs=2, scale_samples=False, use_tqdm=False)\n # with sample scaler:\n self.statisticslearning_with_scaler = TripletDistanceLearning([self.Y], self.statistics_cal, self.backend,\n scale_samples=True, use_tqdm=False,\n n_samples=100, n_samples_per_param=1, seed=1,\n n_epochs=2)\n\n def test_initialization(self):\n if not has_torch:\n self.assertRaises(ImportError, TripletDistanceLearning, [self.Y], self.statistics_cal, self.backend)\n\n def test_transformation(self):\n if has_torch:\n # Transform statistics extraction\n self.new_statistics_calculator = self.statisticslearning.get_statistics()\n self.new_statistics_calculator_with_scaler = self.statisticslearning_with_scaler.get_statistics()\n # Simulate observed data\n Obs = Normal([2, 4])\n y_obs = Obs.forward_simulate(Obs.get_input_values(), 1)[0].tolist()\n\n extracted_statistics = self.new_statistics_calculator.statistics(y_obs)\n self.assertEqual(np.shape(extracted_statistics), (1, 2))\n\n self.assertRaises(RuntimeError, self.new_statistics_calculator.statistics, [np.array([1, 2])])\n\n extracted_statistics = self.new_statistics_calculator_with_scaler.statistics(y_obs)\n self.assertEqual(np.shape(extracted_statistics), (1, 2))\n\n self.assertRaises(RuntimeError, self.new_statistics_calculator_with_scaler.statistics, [np.array([1, 2])])\n\n def test_plots(self):\n if has_torch:\n self.statisticslearning.plot_losses()\n self.statisticslearning.plot_losses(which_losses=\"train\")\n self.statisticslearning.plot_losses(which_losses=\"test\")\n\n\nclass ExponentialFamilyScoreMatchingTests(unittest.TestCase):\n def setUp(self):\n # define prior and model\n sigma = Uniform([[1], [2]])\n mu = Normal([0, 1])\n self.Y = Normal([mu, sigma])\n\n # define backend\n self.backend = Backend()\n\n # define statistics\n self.statistics_cal = Identity(degree=3, cross=False)\n\n if has_torch:\n self.statisticslearning_all_defaults = ExponentialFamilyScoreMatching([self.Y], self.statistics_cal, self.backend,\n n_samples=4, n_epochs=2, use_tqdm=False)\n self.statisticslearning_no_sliced = ExponentialFamilyScoreMatching([self.Y], self.statistics_cal, self.backend,\n n_samples=4, n_epochs=2,\n sliced=False, use_tqdm=False)\n self.statisticslearning_sphere_noise = ExponentialFamilyScoreMatching([self.Y], self.statistics_cal, self.backend,\n n_samples=4, n_epochs=2, use_tqdm=False,\n noise_type=\"sphere\")\n self.statisticslearning_gaussian_noise = ExponentialFamilyScoreMatching([self.Y], self.statistics_cal, self.backend,\n n_samples=4, n_epochs=2, use_tqdm=False,\n noise_type=\"gaussian\")\n self.statisticslearning_variance_reduction = ExponentialFamilyScoreMatching([self.Y], self.statistics_cal, self.backend,\n n_samples=4, n_epochs=2, use_tqdm=False,\n variance_reduction=True)\n self.statisticslearning_no_bn = ExponentialFamilyScoreMatching([self.Y], self.statistics_cal, self.backend, n_samples=4,\n n_epochs=2, batch_norm=False, use_tqdm=False)\n self.statisticslearning_provide_nets = ExponentialFamilyScoreMatching([self.Y], self.statistics_cal, self.backend,\n n_samples=4, n_epochs=2,\n simulations_net=createDefaultNN(3, 3)(),\n parameters_net=createDefaultNN(2, 2)(),\n use_tqdm=False)\n self.statisticslearning_embedding_dim = ExponentialFamilyScoreMatching([self.Y], self.statistics_cal, self.backend,\n n_samples=4, n_epochs=2,\n embedding_dimension=4, use_tqdm=False)\n self.statisticslearning_validation_early_stop = ExponentialFamilyScoreMatching([self.Y], self.statistics_cal,\n self.backend,\n n_samples=4, n_epochs=20,\n n_samples_val=20, early_stopping=True,\n use_tqdm=False)\n self.statisticslearning_scale = ExponentialFamilyScoreMatching([self.Y], self.statistics_cal, self.backend,\n n_samples=4, n_epochs=2, scale_samples=False,\n scale_parameters=True, use_tqdm=False)\n self.statisticslearning_bounds = ExponentialFamilyScoreMatching([self.Y], self.statistics_cal, self.backend,\n n_samples=4, n_epochs=2,\n lower_bound_simulations=np.array([-1000, -1000, -1000]),\n upper_bound_simulations=np.array([1000, 1000, 1000]),\n use_tqdm=False, seed=1)\n self.statisticslearning_no_schedulers = ExponentialFamilyScoreMatching([self.Y], self.statistics_cal, self.backend,\n n_samples=4, n_epochs=2,\n scheduler_parameters=False,\n scheduler_simulations=False, use_tqdm=False)\n self.statisticslearning_lam = ExponentialFamilyScoreMatching([self.Y], self.statistics_cal, self.backend,\n n_samples=4, n_epochs=2, use_tqdm=False, sliced=False,\n lam=0.1)\n\n def test_initialization(self):\n if not has_torch:\n self.assertRaises(ImportError, ExponentialFamilyScoreMatching, [self.Y], self.statistics_cal, self.backend)\n\n def test_transformation(self):\n if has_torch:\n self.new_statistics_calculator = self.statisticslearning_all_defaults.get_statistics()\n # with no scaler on data:\n self.new_statistics_calculator_no_scaler = self.statisticslearning_scale.get_statistics()\n # with no rescaling of the statistics:\n self.new_statistics_calculator_no_rescale = self.statisticslearning_all_defaults.get_statistics(\n rescale_statistics=False)\n\n # Simulate observed data\n Obs = Normal([2, 4])\n y_obs = Obs.forward_simulate(Obs.get_input_values(), 1)[0].tolist()\n\n extracted_statistics = self.new_statistics_calculator.statistics(y_obs)\n self.assertEqual(np.shape(extracted_statistics), (1, 2))\n extracted_statistics_no_rescale = self.new_statistics_calculator_no_rescale.statistics(y_obs)\n self.assertEqual(np.shape(extracted_statistics_no_rescale), (1, 2))\n self.assertFalse(np.allclose(extracted_statistics_no_rescale, extracted_statistics))\n\n self.assertRaises(RuntimeError, self.new_statistics_calculator.statistics, [np.array([1, 2])])\n self.assertRaises(RuntimeError, self.new_statistics_calculator_no_scaler.statistics, [np.array([1, 2])])\n\n def test_errors(self):\n if has_torch:\n with self.assertRaises(RuntimeError):\n self.statisticslearning = ExponentialFamilyScoreMatching([self.Y], self.statistics_cal, self.backend, n_samples=1000,\n seed=1, parameters=np.ones((100, 1)))\n with self.assertRaises(RuntimeError):\n self.statisticslearning = ExponentialFamilyScoreMatching([self.Y], self.statistics_cal, self.backend, n_samples=1000,\n seed=1, simulations_net=createDefaultNN(1, 3))\n with self.assertRaises(RuntimeError):\n self.statisticslearning = ExponentialFamilyScoreMatching([self.Y], self.statistics_cal, self.backend, n_samples=1000,\n seed=1, parameters_net=createDefaultNN(1, 3))\n with self.assertRaises(RuntimeError):\n self.statisticslearning = ExponentialFamilyScoreMatching([self.Y], self.statistics_cal, self.backend, n_samples=1000,\n seed=1, noise_type=\"ciao\", use_tqdm=False)\n with self.assertRaises(RuntimeError):\n self.statisticslearning = ExponentialFamilyScoreMatching([self.Y], self.statistics_cal, self.backend, n_samples=1000,\n seed=1, noise_type=\"sphere\", variance_reduction=True,\n use_tqdm=False)\n with self.assertRaises(RuntimeError):\n self.statisticslearning = ExponentialFamilyScoreMatching([self.Y], self.statistics_cal, self.backend, n_samples=1000,\n seed=1, simulations=np.ones((100, 1)))\n with self.assertRaises(RuntimeError):\n self.statisticslearning = ExponentialFamilyScoreMatching([self.Y], self.statistics_cal, self.backend, n_samples=1000,\n seed=1,\n simulations=np.ones((100, 1, 3)))\n with self.assertRaises(RuntimeError):\n self.statisticslearning = ExponentialFamilyScoreMatching([self.Y], self.statistics_cal, self.backend, n_samples=1000,\n seed=1,\n parameters=np.ones((100, 1, 2)))\n with self.assertRaises(RuntimeError):\n self.statisticslearning = ExponentialFamilyScoreMatching([self.Y], self.statistics_cal, self.backend, n_samples=1000,\n seed=1, simulations=np.ones((100, 1)),\n parameters=np.zeros((99, 1)))\n with self.assertRaises(RuntimeError):\n self.statisticslearning = ExponentialFamilyScoreMatching([self.Y], self.statistics_cal, self.backend, n_samples=1000,\n seed=1,\n parameters_val=np.ones((100, 1)))\n with self.assertRaises(RuntimeError):\n self.statisticslearning = ExponentialFamilyScoreMatching([self.Y], self.statistics_cal, self.backend, n_samples=1000,\n seed=1,\n simulations_val=np.ones((100, 1)))\n with self.assertRaises(RuntimeError):\n self.statisticslearning = ExponentialFamilyScoreMatching([self.Y], self.statistics_cal, self.backend, n_samples=1000,\n seed=1,\n simulations_val=np.ones((100, 1, 3)))\n with self.assertRaises(RuntimeError):\n self.statisticslearning = ExponentialFamilyScoreMatching([self.Y], self.statistics_cal, self.backend, n_samples=1000,\n seed=1,\n parameters_val=np.ones((100, 1, 2)))\n with self.assertRaises(RuntimeError):\n self.statisticslearning = ExponentialFamilyScoreMatching([self.Y], self.statistics_cal, self.backend, n_samples=1000,\n seed=1,\n simulations_val=np.ones((100, 1)),\n parameters_val=np.zeros((99, 1)))\n with self.assertRaises(TypeError):\n self.statisticslearning = ExponentialFamilyScoreMatching([self.Y], self.statistics_cal, self.backend, n_samples=1000,\n seed=1,\n parameters=[i for i in range(10)],\n simulations=[i for i in range(10)])\n with self.assertRaises(TypeError):\n self.statisticslearning = ExponentialFamilyScoreMatching([self.Y], self.statistics_cal, self.backend, n_samples=1000,\n seed=1,\n parameters_val=[i for i in range(10)],\n simulations_val=[i for i in range(10)])\n with self.assertRaises(RuntimeError):\n self.statisticslearning = ExponentialFamilyScoreMatching([self.Y], self.statistics_cal, self.backend, n_samples=1000,\n seed=1, lower_bound_simulations=[1, 2, 3])\n with self.assertRaises(RuntimeError):\n self.statisticslearning = ExponentialFamilyScoreMatching([self.Y], self.statistics_cal, self.backend, n_samples=1000,\n seed=1, upper_bound_simulations=[1, 2, 3])\n with self.assertRaises(RuntimeError):\n self.statisticslearning = ExponentialFamilyScoreMatching([self.Y], self.statistics_cal, self.backend, n_samples=1000,\n lower_bound_simulations=np.array([-1000, -1000]), seed=1,\n upper_bound_simulations=np.array([1000, 1000, 1000]))\n\n with self.assertRaises(RuntimeError):\n self.statisticslearning_all_defaults.test_losses = [4, 2, 1]\n self.statisticslearning_all_defaults.plot_losses()\n with self.assertRaises(NotImplementedError):\n self.statisticslearning_all_defaults.plot_losses(which_losses=\"foo\")\n\n def test_plots(self):\n if has_torch:\n self.statisticslearning_all_defaults.plot_losses()\n self.statisticslearning_all_defaults.plot_losses(which_losses=\"train\")\n self.statisticslearning_all_defaults.plot_losses(which_losses=\"test\")\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"numpy.array",
"numpy.zeros",
"numpy.ones",
"numpy.shape",
"numpy.allclose"
]
] |
konovalovdmitry/catsnap
|
[
"d5f1d7c37dcee1ad3fee2cdc12a3b44b56f4c63f"
] |
[
"kd_splicing/kd_splicing/helpers.py"
] |
[
"import os\nimport uuid\nfrom collections import defaultdict\nfrom itertools import chain\nfrom typing import Dict, List, Mapping, Optional, Tuple\n\n\nimport pandas as pd\n\nfrom kd_common import excel, logutil, pathutil\nfrom kd_splicing import as_type, blast, database, features, ml, performance, pipeline\nfrom kd_splicing.dataset.models import Dataset\nfrom kd_splicing.dump import dump\nfrom kd_splicing.models import FormattedResults, IsoformTuple, Match, SimpleMatch, Queries\nfrom kd_splicing.models import SearchStatus\nimport itertools\nfrom tqdm import tqdm\nimport json\n\n_logger = logutil.get_logger(__name__)\n\n\ndef find_in_queries(\n protein_ids_str: str,\n isoforms_to_duplicates: Mapping[uuid.UUID, List[uuid.UUID]],\n db: database.models.DB,\n matches_dict: Mapping[IsoformTuple, List[SimpleMatch]]\n) -> Optional[IsoformTuple]:\n tup = str_to_isoform_tuple(db, protein_ids_str)\n for a in isoforms_to_duplicates[tup.a]:\n for b in isoforms_to_duplicates[tup.b]:\n dup = IsoformTuple(a,b)\n if dup in matches_dict:\n return dup\n return None\n\n#############\n# Common\n#############\n\ndef str_to_isoform_tuple(db: database.models.DB, protein_ids_str: str) -> IsoformTuple:\n protein_ids = [protein_id.strip()\n for protein_id in protein_ids_str.split(\",\")]\n assert len(protein_ids) == 2\n return IsoformTuple(\n db.protein_id_to_isoform[protein_ids[0]], db.protein_id_to_isoform[protein_ids[1]])\n\n\ndef isoform_tuple_to_protein_ids(db: database.models.DB, iso_tuple: IsoformTuple) -> str:\n return f\"{db.isoforms[iso_tuple.a].protein_id},{db.isoforms[iso_tuple.b].protein_id}\"\n\n\ndef tuples_to_queries(tuples: List[IsoformTuple], num_groups: int = 20) -> Queries:\n isoforms = sorted(list(set(chain.from_iterable(\n (query_isoforms.a, query_isoforms.b)\n for query_isoforms in tuples\n ))))\n group_count = itertools.cycle(range(0, num_groups))\n isoform_to_group = {}\n isoform_to_idx = {}\n group_size: Dict[int, int] = defaultdict(int)\n for iso in isoforms:\n group = next(group_count)\n isoform_to_group[iso] = group\n isoform_to_idx[iso] = group_size[group]\n group_size[group] += 1\n return Queries(\n tuples=tuples,\n isoforms=isoforms,\n isoform_to_idx=isoform_to_idx,\n isoform_to_group=isoform_to_group,\n )\n\n\ndef best_match_per_organism(matches: List[Match]) -> List[Match]:\n query_organism_to_match: Dict[Tuple[IsoformTuple, str], Match] = {}\n for m in matches:\n key = (m.query_isoforms, m.hit_organism)\n best_match = query_organism_to_match.get(key)\n if not best_match or best_match.predicted_positive_probability < m.predicted_positive_probability:\n query_organism_to_match[key] = m\n return list(query_organism_to_match.values())\n\n#############\n# Stats\n#############\n\ndef count_isoforms_per_organism(db: database.models.DB) -> None:\n organism_to_isoform_count: Dict[str, int] = defaultdict(int)\n for isoform in db.isoforms.values():\n gene = db.genes[isoform.gene_uuid]\n record = db.records[gene.record_uuid]\n organism_to_isoform_count[record.organism] += 1\n for key, value in sorted(organism_to_isoform_count.items(), key=lambda p: p[1]):\n print(key, \",\", value)\n\n\ndef calc_performance(matches: List[Match]) -> None:\n predicted = [m.predicted_positive for m in matches]\n correct = [m.positive for m in matches]\n _logger.info(\n f\"Performance:\\n{pd.Series(performance.binary_y_performance(correct, predicted))}\")\n\n\ndef db_organisms_stats(db: database.models.DB, db_folder: str) -> None:\n df = database.utils.to_df(db)\n\n d1 = df[\"organism\"].value_counts().reset_index()\n excel.write(d1, os.path.join(pathutil.create_folder(\n db_folder, \"stats\"), os.path.basename(db_folder) + \"_db_organisms.xlsx\"))\n\n d2 = df[df[\"db_name\"] == \"refseq\"][\"organism\"].value_counts().reset_index()\n excel.write(d2, os.path.join(pathutil.create_folder(\n db_folder, \"stats\"), os.path.basename(db_folder) + \"_db_organisms_refseq.xlsx\"))\n\n d3 = df[df[\"db_name\"] == \"genbank\"][\"organism\"].value_counts().reset_index()\n excel.write(d3, os.path.join(pathutil.create_folder(\n db_folder, \"stats\"), os.path.basename(db_folder) + \"_db_organisms_genbank.xlsx\"))\n \n d4 = df[df[\"db_name_src\"] == \"refseq\"][\"organism\"].value_counts().reset_index()\n excel.write(d4, os.path.join(pathutil.create_folder(\n db_folder, \"stats\"), os.path.basename(db_folder) + \"_db_organisms_refseq_src.xlsx\"))\n\n d5 = df[df[\"db_name_src\"] == \"genbank\"][\"organism\"].value_counts().reset_index()\n excel.write(d5, os.path.join(pathutil.create_folder(\n db_folder, \"stats\"), os.path.basename(db_folder) + \"_db_organisms_genbank_src.xlsx\"))\n\n\ndef db_missed_files(p: pipeline.Pipeline) -> None:\n def prepare(s: str) -> str:\n s = os.path.basename(s)\n s = \".\".join(s.split(\".\")[:-1])\n return s\n\n files = pathutil.file_list(p.folder_archive, p.archive_extension)\n extracted = pathutil.file_list(p.folder_extracted)\n files.sort()\n extracted = [prepare(f) for f in extracted]\n files = [prepare(f) for f in files]\n print(sorted(list(set(files) - set(extracted))))\n\n#############\n# Dumps\n#############\n\n\ndef single_cross_validation_and_dump(db: database.models.DB, launch_folder: str, ds: List[Match], test_protein_ids: str) -> None:\n protein_ids = test_protein_ids.split(\",\")\n assert len(protein_ids) == 2\n protein_id_to_isoform = {\n i.protein_id: i.uuid for i in db.isoforms.values()}\n test_query_isoforms = IsoformTuple(\n protein_id_to_isoform[protein_ids[0]], protein_id_to_isoform[protein_ids[1]])\n train_ds = [m for m in ds if m.query_isoforms != test_query_isoforms]\n test_ds = [m for m in ds if m.query_isoforms == test_query_isoforms]\n d = ml.Detector()\n d.fit(train_ds)\n d.transform(test_ds)\n calc_performance(test_ds)\n folder = pathutil.create_folder(launch_folder, \"cross_validation\")\n dump(db, folder, test_ds)\n\n\ndef dump_single_query_simple_matches(\n db: database.models.DB,\n launch_folder: str,\n matches_dict: Mapping[IsoformTuple, List[SimpleMatch]],\n protein_ids_str: str,\n isoforms_to_duplicates: Mapping[uuid.UUID, List[uuid.UUID]],\n) -> None:\n query_isoforms = find_in_queries(protein_ids_str, isoforms_to_duplicates, db, matches_dict)\n if not query_isoforms:\n print(\"No such query in precalculated queries\")\n return\n\n simple_matches = matches_dict[query_isoforms]\n matches = features.convert_matches({query_isoforms: simple_matches})\n dump(db, pathutil.create_folder(launch_folder,\n \"matches_simple_single\", protein_ids_str), matches)\n\n\ndef calc_features_and_dump_single(\n db: database.models.DB,\n launch_folder: str,\n queries: Queries,\n detector: ml.Detector,\n protein_ids_str: str,\n isoforms_to_duplicates: Mapping[uuid.UUID, List[uuid.UUID]],\n matches_dict: Mapping[IsoformTuple, List[SimpleMatch]],\n) -> List[Match]:\n query_isoforms = find_in_queries(protein_ids_str, isoforms_to_duplicates, db, matches_dict)\n if not query_isoforms:\n print(\"No such query in precalculated queries\")\n return\n\n matches = features.calc(db, launch_folder, queries, [query_isoforms])\n detector.transform(matches)\n dump(db, pathutil.create_folder(launch_folder,\n \"matches_single\", protein_ids_str), matches)\n return matches\n\n\n#############\n# Search\n#############\n\ndef search(\n db: database.models.DB,\n p: pipeline.Pipeline,\n detector: ml.Detector,\n query_protein_ids_str: List[str],\n blast_db_path: str,\n status: SearchStatus = SearchStatus.construct(progress = 0, description = \"\"),\n isoforms_to_duplicates: Optional[Mapping[uuid.UUID, List[uuid.UUID]]] = None,\n) -> str:\n status.set(0, \"Preparing queries\")\n tuples = [str_to_isoform_tuple(db, query_proteins) for query_proteins in query_protein_ids_str]\n queries = tuples_to_queries(tuples, num_groups=1)\n name = \";\".join(query_protein_ids_str)\n return search_queries(db, p, detector, queries, name, blast_db_path, status, isoforms_to_duplicates)\n\ndef search_queries(\n db: database.models.DB,\n p: pipeline.Pipeline,\n detector: ml.Detector,\n queries: Queries,\n name: str,\n blast_db_path: str,\n status: SearchStatus,\n isoforms_to_duplicates: Optional[Mapping[uuid.UUID, List[uuid.UUID]]] = None,\n) -> str:\n status.set(10, \"BLAST running\")\n blast.create_queires(db, queries, p.launch_folder)\n blast.run(p.launch_folder, blast_db_path, parallel = False)\n status.set(20, \"Reading BLAST results\")\n queries.isoform_to_file = get_isoforms_to_file(p.launch_folder)\n\n status.set(30, \"Calculating features\")\n matches = features.calc(db, p.launch_folder, queries)\n status.set(40, \"Running ml model\")\n detector.transform(matches)\n\n result_folder = pathutil.create_folder(p.launch_folder, \"search_single\", name)\n status.set(50, \"Preparing results\")\n dump(db, result_folder, matches, isoforms_to_duplicates)\n return result_folder\n\ndef matches_to_df(\n db: database.models.DB,\n isoforms_to_duplicates: Mapping[uuid.UUID, List[uuid.UUID]],\n matches: List[Match],\n) -> pd.DataFrame:\n data = []\n for m in tqdm(matches):\n q_iso_a = db.isoforms[m.query_isoforms.a]\n q_iso_b = db.isoforms[m.query_isoforms.b]\n q_gene = db.genes[q_iso_a.gene_uuid]\n q_record = db.records[q_gene.record_uuid]\n q_file = db.files[q_record.file_uuid]\n\n h_iso_a = db.isoforms[m.hit_isoforms.a]\n h_iso_b = db.isoforms[m.hit_isoforms.b]\n h_gene = db.genes[h_iso_a.gene_uuid]\n h_record = db.records[h_gene.record_uuid]\n h_file = db.files[h_record.file_uuid]\n hit_as_types = as_type.get_isoforms_as_types(db, isoforms_to_duplicates, h_iso_a.uuid, h_iso_b.uuid)\n query_as_types = as_type.get_isoforms_as_types(db, isoforms_to_duplicates, q_iso_a.uuid, q_iso_b.uuid)\n intersection_as_types = hit_as_types & query_as_types\n row = {\n \"query_isoforms\": m.query_isoforms,\n \"hit_isoforms\": m.hit_isoforms,\n\n \"hit_organism\": m.hit_organism,\n \"hit_db_name\": m.hit_db_name,\n \"hit_gene_uuid\": h_iso_a.gene_uuid,\n \"hit_protein_ids\": f\"{h_iso_a.protein_id}, {h_iso_b.protein_id}\",\n \"hit_locus_tag\": h_gene.locus_tag,\n \"hit_gene_id\": h_gene.gene_id,\n \"hit_db_xref\": h_gene.db_xref,\n \"hit_as_types\": hit_as_types,\n \"hit_as_types_max\": max([len(as_type) for as_type in hit_as_types], default=0),\n\n \"positive\": m.positive,\n \"predicted_positive\": m.predicted_positive,\n \"predicted_positive_probability\": m.predicted_positive_probability,\n\n \"isoform_blast_score\": m.isoform_blast_score,\n \"splicing_difference\": m.splicing_difference,\n \"splicing_similarity\": m.splicing_similarity,\n \"splicing_dissimilarity\": m.splicing_dissimilarity,\n\n \"query_gene_uuid\": q_iso_a.gene_uuid,\n \"query_protein_ids\": f\"{q_iso_a.protein_id}, {q_iso_b.protein_id}\",\n \"query_locus_tag\": q_gene.locus_tag,\n \"query_gene_id\": q_gene.gene_id,\n \"query_db_xref\": q_gene.db_xref,\n \"query_as_types\": query_as_types,\n \"query_as_types_max\": max([len(as_type) for as_type in query_as_types], default=0),\n\n \"intersection_as_types\": intersection_as_types,\n \"intersection_as_types_len\": len(intersection_as_types),\n\n \"conservative\": int(m.predicted_positive),\n \"conservative_probability\": m.predicted_positive_probability,\n \"db_name\": q_file.db_name,\n }\n data.append(row)\n df = pd.DataFrame(data)\n return df\n\ndef get_isoforms_to_file(launch_folder: str) -> Mapping[uuid.UUID, str]:\n results_folder = pathutil.create_folder(launch_folder, \"blast_results\")\n isoforms_to_file: Dict[uuid.UUID, str] = {}\n for group_folder in tqdm(pathutil.get_sub_directories(results_folder)):\n for result_file in pathutil.file_list(group_folder, \".json\"):\n with open(result_file, \"r\") as f:\n try:\n data = json.load(f)\n except Exception as e:\n _logger.exception(\"exception in file result file\")\n continue\n query_iso_str = data[\"BlastOutput2\"][\"report\"][\"results\"][\"search\"][\"query_title\"]\n isoforms_to_file[uuid.UUID(query_iso_str)] = result_file\n return isoforms_to_file"
] |
[
[
"pandas.DataFrame"
]
] |
HyunjiEllenPak/automl
|
[
"fedf04adf12c5fd11045ea06e2f5c11a5a5490c4"
] |
[
"efficientdet/anchors.py"
] |
[
"# Lint as: python3\n# Copyright 2020 Google Research. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Anchor definition.\n\nThis module is borrowed from TPU RetinaNet implementation:\nhttps://github.com/tensorflow/tpu/blob/master/models/official/retinanet/anchors.py\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nfrom absl import logging\nimport numpy as np\nimport tensorflow.compat.v1 as tf\nimport utils\nfrom object_detection import argmax_matcher\nfrom object_detection import box_list\nfrom object_detection import faster_rcnn_box_coder\nfrom object_detection import region_similarity_calculator\nfrom object_detection import target_assigner\n\n# The minimum score to consider a logit for identifying detections.\nMIN_CLASS_SCORE = -5.0\n\n# The score for a dummy detection\n_DUMMY_DETECTION_SCORE = -1e5\n\n# The maximum number of (anchor,class) pairs to keep for non-max suppression.\nMAX_DETECTION_POINTS = 5000\n\n# The maximum number of detections per image.\nMAX_DETECTIONS_PER_IMAGE = 100\n\n# The minimal score threshold.\nMIN_SCORE_THRESH = 0.4\n\n\ndef sigmoid(x):\n \"\"\"Sigmoid function for use with Numpy for CPU evaluation.\"\"\"\n return 1 / (1 + np.exp(-x))\n\n\ndef decode_box_outputs(rel_codes, anchors):\n \"\"\"Transforms relative regression coordinates to absolute positions.\n\n Network predictions are normalized and relative to a given anchor; this\n reverses the transformation and outputs absolute coordinates for the input\n image.\n\n Args:\n rel_codes: box regression targets.\n anchors: anchors on all feature levels.\n Returns:\n outputs: bounding boxes.\n\n \"\"\"\n ycenter_a = (anchors[0] + anchors[2]) / 2\n xcenter_a = (anchors[1] + anchors[3]) / 2\n ha = anchors[2] - anchors[0]\n wa = anchors[3] - anchors[1]\n ty, tx, th, tw = rel_codes\n\n w = np.exp(tw) * wa\n h = np.exp(th) * ha\n ycenter = ty * ha + ycenter_a\n xcenter = tx * wa + xcenter_a\n ymin = ycenter - h / 2.\n xmin = xcenter - w / 2.\n ymax = ycenter + h / 2.\n xmax = xcenter + w / 2.\n return np.column_stack([ymin, xmin, ymax, xmax])\n\n\ndef decode_box_outputs_tf(rel_codes, anchors):\n \"\"\"Transforms relative regression coordinates to absolute positions.\n\n Network predictions are normalized and relative to a given anchor; this\n reverses the transformation and outputs absolute coordinates for the input\n image.\n\n Args:\n rel_codes: box regression targets.\n anchors: anchors on all feature levels.\n Returns:\n outputs: bounding boxes.\n \"\"\"\n ycenter_a = (anchors[0] + anchors[2]) / 2\n xcenter_a = (anchors[1] + anchors[3]) / 2\n ha = anchors[2] - anchors[0]\n wa = anchors[3] - anchors[1]\n ty, tx, th, tw = tf.unstack(rel_codes, num=4)\n\n w = tf.math.exp(tw) * wa\n h = tf.math.exp(th) * ha\n ycenter = ty * ha + ycenter_a\n xcenter = tx * wa + xcenter_a\n ymin = ycenter - h / 2.\n xmin = xcenter - w / 2.\n ymax = ycenter + h / 2.\n xmax = xcenter + w / 2.\n return tf.stack([ymin, xmin, ymax, xmax], axis=1)\n\n\n@tf.autograph.to_graph\ndef nms_tf(dets, thresh):\n \"\"\"Non-maximum suppression with tf graph mode.\"\"\"\n x1 = dets[:, 0]\n y1 = dets[:, 1]\n x2 = dets[:, 2]\n y2 = dets[:, 3]\n scores = dets[:, 4]\n\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n order = tf.argsort(scores, direction='DESCENDING')\n\n keep = tf.TensorArray(tf.int32, size=0, dynamic_size=True)\n index = 0\n while tf.size(order) > 0:\n i = order[0]\n keep = keep.write(index, i)\n xx1 = tf.maximum(x1[i], tf.gather(x1, order[1:]))\n yy1 = tf.maximum(y1[i], tf.gather(y1, order[1:]))\n xx2 = tf.minimum(x2[i], tf.gather(x2, order[1:]))\n yy2 = tf.minimum(y2[i], tf.gather(y2, order[1:]))\n\n w = tf.maximum(0.0, xx2 - xx1 + 1)\n h = tf.maximum(0.0, yy2 - yy1 + 1)\n intersection = w * h\n overlap = intersection / (\n areas[i] + tf.gather(areas, order[1:]) - intersection)\n\n inds = tf.where_v2(overlap <= thresh)\n order = tf.concat(tf.gather(order, inds + 1), axis=1)\n order = tf.squeeze(order, axis=-1)\n index += 1\n return keep.stack()\n\n\ndef nms(dets, thresh):\n \"\"\"Non-maximum suppression.\"\"\"\n x1 = dets[:, 0]\n y1 = dets[:, 1]\n x2 = dets[:, 2]\n y2 = dets[:, 3]\n scores = dets[:, 4]\n\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n order = scores.argsort()[::-1]\n\n keep = []\n while order.size > 0:\n i = order[0]\n keep.append(i)\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n\n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n intersection = w * h\n overlap = intersection / (areas[i] + areas[order[1:]] - intersection)\n\n inds = np.where(overlap <= thresh)[0]\n order = order[inds + 1]\n return keep\n\n\ndef _generate_anchor_configs(feat_sizes, min_level, max_level, num_scales,\n aspect_ratios):\n \"\"\"Generates mapping from output level to a list of anchor configurations.\n\n A configuration is a tuple of (num_anchors, scale, aspect_ratio).\n\n Args:\n feat_sizes: list of dict of integer numbers of feature map sizes.\n min_level: integer number of minimum level of the output feature pyramid.\n max_level: integer number of maximum level of the output feature pyramid.\n num_scales: integer number representing intermediate scales added\n on each level. For instances, num_scales=2 adds two additional\n anchor scales [2^0, 2^0.5] on each level.\n aspect_ratios: list of tuples representing the aspect ratio anchors added\n on each level. For instances, aspect_ratios =\n [(1, 1), (1.4, 0.7), (0.7, 1.4)] adds three anchors on each level.\n\n Returns:\n anchor_configs: a dictionary with keys as the levels of anchors and\n values as a list of anchor configuration.\n \"\"\"\n anchor_configs = {}\n for level in range(min_level, max_level + 1):\n anchor_configs[level] = []\n for scale_octave in range(num_scales):\n for aspect in aspect_ratios:\n anchor_configs[level].append(\n ((feat_sizes[0]['height'] / float(feat_sizes[level]['height']),\n feat_sizes[0]['width'] / float(feat_sizes[level]['width'])),\n scale_octave / float(num_scales), aspect))\n return anchor_configs\n\n\ndef _generate_anchor_boxes(image_size, anchor_scale, anchor_configs):\n \"\"\"Generates multiscale anchor boxes.\n\n Args:\n image_size: tuple of integer numbers of input image size.\n anchor_scale: float number representing the scale of size of the base\n anchor to the feature stride 2^level.\n anchor_configs: a dictionary with keys as the levels of anchors and\n values as a list of anchor configuration.\n\n Returns:\n anchor_boxes: a numpy array with shape [N, 4], which stacks anchors on all\n feature levels.\n Raises:\n ValueError: input size must be the multiple of largest feature stride.\n \"\"\"\n boxes_all = []\n for _, configs in anchor_configs.items():\n boxes_level = []\n for config in configs:\n stride, octave_scale, aspect = config\n base_anchor_size_x = anchor_scale * stride[1] * 2**octave_scale\n base_anchor_size_y = anchor_scale * stride[0] * 2**octave_scale\n anchor_size_x_2 = base_anchor_size_x * aspect[0] / 2.0\n anchor_size_y_2 = base_anchor_size_y * aspect[1] / 2.0\n\n x = np.arange(stride[1] / 2, image_size[1], stride[1])\n y = np.arange(stride[0] / 2, image_size[0], stride[0])\n xv, yv = np.meshgrid(x, y)\n xv = xv.reshape(-1)\n yv = yv.reshape(-1)\n\n boxes = np.vstack((yv - anchor_size_y_2, xv - anchor_size_x_2,\n yv + anchor_size_y_2, xv + anchor_size_x_2))\n boxes = np.swapaxes(boxes, 0, 1)\n boxes_level.append(np.expand_dims(boxes, axis=1))\n # concat anchors on the same level to the reshape NxAx4\n boxes_level = np.concatenate(boxes_level, axis=1)\n boxes_all.append(boxes_level.reshape([-1, 4]))\n\n anchor_boxes = np.vstack(boxes_all)\n return anchor_boxes\n\n\ndef _generate_detections_tf(cls_outputs,\n box_outputs,\n anchor_boxes,\n indices,\n classes,\n image_id,\n image_scale,\n min_score_thresh=MIN_SCORE_THRESH,\n max_boxes_to_draw=MAX_DETECTIONS_PER_IMAGE,\n soft_nms_sigma=0.0,\n iou_threshold=0.5,\n use_native_nms=True):\n \"\"\"Generates detections with model outputs and anchors.\n\n Args:\n cls_outputs: a numpy array with shape [N, 1], which has the highest class\n scores on all feature levels. The N is the number of selected\n top-K total anchors on all levels. (k being MAX_DETECTION_POINTS)\n box_outputs: a numpy array with shape [N, 4], which stacks box regression\n outputs on all feature levels. The N is the number of selected top-k\n total anchors on all levels. (k being MAX_DETECTION_POINTS)\n anchor_boxes: a numpy array with shape [N, 4], which stacks anchors on all\n feature levels. The N is the number of selected top-k total anchors on\n all levels.\n indices: a numpy array with shape [N], which is the indices from top-k\n selection.\n classes: a numpy array with shape [N], which represents the class\n prediction on all selected anchors from top-k selection.\n image_id: an integer number to specify the image id.\n image_scale: a float tensor representing the scale between original image\n and input image for the detector. It is used to rescale detections for\n evaluating with the original groundtruth annotations.\n min_score_thresh: A float representing the threshold for deciding when to\n remove boxes based on score.\n max_boxes_to_draw: Max number of boxes to draw.\n soft_nms_sigma: A scalar float representing the Soft NMS sigma parameter;\n See Bodla et al, https://arxiv.org/abs/1704.04503). When\n `soft_nms_sigma=0.0` (which is default), we fall back to standard (hard)\n NMS.\n iou_threshold: A float representing the threshold for deciding whether boxes\n overlap too much with respect to IOU.\n use_native_nms: a bool that indicates whether to use native nms.\n\n Returns:\n detections: detection results in a tensor with each row representing\n [image_id, y, x, height, width, score, class]\n \"\"\"\n logging.info('Using tf version of post-processing.')\n anchor_boxes = tf.gather(anchor_boxes, indices)\n\n scores = tf.math.sigmoid(cls_outputs)\n # apply bounding box regression to anchors\n boxes = decode_box_outputs_tf(\n tf.transpose(box_outputs, [1, 0]), tf.transpose(anchor_boxes, [1, 0]))\n\n if use_native_nms:\n logging.info('Using native nms.')\n top_detection_idx, scores = tf.image.non_max_suppression_with_scores(\n boxes,\n scores,\n max_boxes_to_draw,\n iou_threshold=iou_threshold,\n score_threshold=min_score_thresh,\n soft_nms_sigma=soft_nms_sigma)\n boxes = tf.gather(boxes, top_detection_idx)\n else:\n logging.info('Using customized nms.')\n scores = tf.expand_dims(scores, axis=1)\n all_detections = tf.concat([boxes, scores], axis=1)\n top_detection_idx = nms_tf(all_detections, iou_threshold)\n detections = tf.gather(all_detections, top_detection_idx)\n scores = detections[:, 4]\n boxes = detections[:, :4]\n height = boxes[:, 2] - boxes[:, 0]\n width = boxes[:, 3] - boxes[:, 1]\n\n detections = tf.stack([\n tf.cast(tf.repeat(image_id, tf.size(top_detection_idx)), tf.float32),\n boxes[:, 0] * image_scale,\n boxes[:, 1] * image_scale,\n height * image_scale,\n width * image_scale,\n scores,\n tf.cast(tf.gather(classes, top_detection_idx) + 1, tf.float32)\n ], axis=1)\n return detections\n\n\ndef _generate_detections(cls_outputs, box_outputs, anchor_boxes, indices,\n classes, image_id, image_scale, num_classes,\n max_boxes_to_draw):\n \"\"\"Generates detections with model outputs and anchors.\n\n Args:\n cls_outputs: a numpy array with shape [N, 1], which has the highest class\n scores on all feature levels. The N is the number of selected\n top-K total anchors on all levels. (k being MAX_DETECTION_POINTS)\n box_outputs: a numpy array with shape [N, 4], which stacks box regression\n outputs on all feature levels. The N is the number of selected top-k\n total anchors on all levels. (k being MAX_DETECTION_POINTS)\n anchor_boxes: a numpy array with shape [N, 4], which stacks anchors on all\n feature levels. The N is the number of selected top-k total anchors on\n all levels.\n indices: a numpy array with shape [N], which is the indices from top-k\n selection.\n classes: a numpy array with shape [N], which represents the class\n prediction on all selected anchors from top-k selection.\n image_id: an integer number to specify the image id.\n image_scale: a float tensor representing the scale between original image\n and input image for the detector. It is used to rescale detections for\n evaluating with the original groundtruth annotations.\n num_classes: a integer that indicates the number of classes.\n max_boxes_to_draw: max number of boxes to draw per image.\n\n Returns:\n detections: detection results in a tensor with each row representing\n [image_id, x, y, width, height, score, class]\n \"\"\"\n logging.info('Using numpy version of post-processing.')\n anchor_boxes = anchor_boxes[indices, :]\n scores = sigmoid(cls_outputs)\n # apply bounding box regression to anchors\n boxes = decode_box_outputs(\n box_outputs.swapaxes(0, 1), anchor_boxes.swapaxes(0, 1))\n boxes = boxes[:, [1, 0, 3, 2]]\n # run class-wise nms\n detections = []\n for c in range(num_classes):\n indices = np.where(classes == c)[0]\n if indices.shape[0] == 0:\n continue\n boxes_cls = boxes[indices, :]\n scores_cls = scores[indices]\n # Select top-scoring boxes in each class and apply non-maximum suppression\n # (nms) for boxes in the same class. The selected boxes from each class are\n # then concatenated for the final detection outputs.\n all_detections_cls = np.column_stack((boxes_cls, scores_cls))\n top_detection_idx = nms(all_detections_cls, 0.5)\n top_detections_cls = all_detections_cls[top_detection_idx]\n top_detections_cls[:, 2] -= top_detections_cls[:, 0]\n top_detections_cls[:, 3] -= top_detections_cls[:, 1]\n top_detections_cls = np.column_stack(\n (np.repeat(image_id, len(top_detection_idx)),\n top_detections_cls,\n np.repeat(c + 1, len(top_detection_idx)))\n )\n detections.append(top_detections_cls)\n\n def _generate_dummy_detections(number):\n detections_dummy = np.zeros((number, 7), dtype=np.float32)\n detections_dummy[:, 0] = image_id[0]\n detections_dummy[:, 5] = _DUMMY_DETECTION_SCORE\n return detections_dummy\n\n if detections:\n detections = np.vstack(detections)\n # take final 100 detections\n indices = np.argsort(-detections[:, -2])\n detections = np.array(\n detections[indices[0:max_boxes_to_draw]], dtype=np.float32)\n # Add dummy detections to fill up to 100 detections\n n = max(max_boxes_to_draw - len(detections), 0)\n detections_dummy = _generate_dummy_detections(n)\n detections = np.vstack([detections, detections_dummy])\n else:\n detections = _generate_dummy_detections(max_boxes_to_draw)\n\n detections[:, 1:5] *= image_scale\n\n return detections\n\n\nclass Anchors(object):\n \"\"\"RetinaNet Anchors class.\"\"\"\n\n def __init__(self, min_level, max_level, num_scales, aspect_ratios,\n anchor_scale, image_size):\n \"\"\"Constructs multiscale RetinaNet anchors.\n\n Args:\n min_level: integer number of minimum level of the output feature pyramid.\n max_level: integer number of maximum level of the output feature pyramid.\n num_scales: integer number representing intermediate scales added\n on each level. For instances, num_scales=2 adds two additional\n anchor scales [2^0, 2^0.5] on each level.\n aspect_ratios: list of tuples representing the aspect ratio anchors added\n on each level. For instances, aspect_ratios =\n [(1, 1), (1.4, 0.7), (0.7, 1.4)] adds three anchors on each level.\n anchor_scale: float number representing the scale of size of the base\n anchor to the feature stride 2^level.\n image_size: integer number or tuple of integer number of input image size.\n \"\"\"\n self.min_level = min_level\n self.max_level = max_level\n self.num_scales = num_scales\n self.aspect_ratios = aspect_ratios\n self.anchor_scale = anchor_scale\n if isinstance(image_size, int):\n self.image_size = (image_size, image_size)\n else:\n self.image_size = image_size\n self.feat_sizes = utils.get_feat_sizes(image_size, max_level)\n self.config = self._generate_configs()\n self.boxes = self._generate_boxes()\n\n def _generate_configs(self):\n \"\"\"Generate configurations of anchor boxes.\"\"\"\n return _generate_anchor_configs(self.feat_sizes, self.min_level,\n self.max_level, self.num_scales,\n self.aspect_ratios)\n\n def _generate_boxes(self):\n \"\"\"Generates multiscale anchor boxes.\"\"\"\n boxes = _generate_anchor_boxes(self.image_size, self.anchor_scale,\n self.config)\n boxes = tf.convert_to_tensor(boxes, dtype=tf.float32)\n return boxes\n\n def get_anchors_per_location(self):\n return self.num_scales * len(self.aspect_ratios)\n\n\nclass AnchorLabeler(object):\n \"\"\"Labeler for multiscale anchor boxes.\"\"\"\n\n def __init__(self, anchors, num_classes, match_threshold=0.5):\n \"\"\"Constructs anchor labeler to assign labels to anchors.\n\n Args:\n anchors: an instance of class Anchors.\n num_classes: integer number representing number of classes in the dataset.\n match_threshold: float number between 0 and 1 representing the threshold\n to assign positive labels for anchors.\n \"\"\"\n similarity_calc = region_similarity_calculator.IouSimilarity()\n matcher = argmax_matcher.ArgMaxMatcher(\n match_threshold,\n unmatched_threshold=match_threshold,\n negatives_lower_than_unmatched=True,\n force_match_for_each_row=True)\n box_coder = faster_rcnn_box_coder.FasterRcnnBoxCoder()\n\n self._target_assigner = target_assigner.TargetAssigner(\n similarity_calc, matcher, box_coder)\n self._anchors = anchors\n self._match_threshold = match_threshold\n self._num_classes = num_classes\n\n def _unpack_labels(self, labels):\n \"\"\"Unpacks an array of labels into multiscales labels.\"\"\"\n labels_unpacked = collections.OrderedDict()\n anchors = self._anchors\n count = 0\n for level in range(anchors.min_level, anchors.max_level + 1):\n feat_size = anchors.feat_sizes[level]\n steps = feat_size['height'] * feat_size[\n 'width'] * anchors.get_anchors_per_location()\n indices = tf.range(count, count + steps)\n count += steps\n labels_unpacked[level] = tf.reshape(\n tf.gather(labels, indices),\n [feat_size['height'], feat_size['width'], -1])\n return labels_unpacked\n\n def label_anchors(self, gt_boxes, gt_labels):\n \"\"\"Labels anchors with ground truth inputs.\n\n Args:\n gt_boxes: A float tensor with shape [N, 4] representing groundtruth boxes.\n For each row, it stores [y0, x0, y1, x1] for four corners of a box.\n gt_labels: A integer tensor with shape [N, 1] representing groundtruth\n classes.\n Returns:\n cls_targets_dict: ordered dictionary with keys\n [min_level, min_level+1, ..., max_level]. The values are tensor with\n shape [height_l, width_l, num_anchors]. The height_l and width_l\n represent the dimension of class logits at l-th level.\n box_targets_dict: ordered dictionary with keys\n [min_level, min_level+1, ..., max_level]. The values are tensor with\n shape [height_l, width_l, num_anchors * 4]. The height_l and\n width_l represent the dimension of bounding box regression output at\n l-th level.\n num_positives: scalar tensor storing number of positives in an image.\n \"\"\"\n gt_box_list = box_list.BoxList(gt_boxes)\n anchor_box_list = box_list.BoxList(self._anchors.boxes)\n\n # cls_weights, box_weights are not used\n cls_targets, _, box_targets, _, matches = self._target_assigner.assign(\n anchor_box_list, gt_box_list, gt_labels)\n\n # class labels start from 1 and the background class = -1\n cls_targets -= 1\n cls_targets = tf.cast(cls_targets, tf.int32)\n\n # Unpack labels.\n cls_targets_dict = self._unpack_labels(cls_targets)\n box_targets_dict = self._unpack_labels(box_targets)\n num_positives = tf.reduce_sum(\n tf.cast(tf.not_equal(matches.match_results, -1), tf.float32))\n\n return cls_targets_dict, box_targets_dict, num_positives\n\n def generate_detections(self,\n cls_outputs,\n box_outputs,\n indices,\n classes,\n image_id,\n image_scale,\n min_score_thresh=MIN_SCORE_THRESH,\n max_boxes_to_draw=MAX_DETECTIONS_PER_IMAGE,\n disable_pyfun=None):\n \"\"\"Generate detections based on class and box predictions.\"\"\"\n if disable_pyfun:\n return _generate_detections_tf(\n cls_outputs,\n box_outputs,\n self._anchors.boxes,\n indices,\n classes,\n image_id,\n image_scale,\n min_score_thresh=min_score_thresh,\n max_boxes_to_draw=max_boxes_to_draw)\n else:\n return tf.py_func(_generate_detections, [\n cls_outputs, box_outputs, self._anchors.boxes, indices, classes,\n image_id, image_scale, self._num_classes, max_boxes_to_draw,\n ], tf.float32)\n"
] |
[
[
"tensorflow.compat.v1.transpose",
"tensorflow.compat.v1.argsort",
"numpy.minimum",
"numpy.exp",
"tensorflow.compat.v1.convert_to_tensor",
"numpy.where",
"numpy.concatenate",
"tensorflow.compat.v1.unstack",
"tensorflow.compat.v1.not_equal",
"tensorflow.compat.v1.stack",
"tensorflow.compat.v1.where_v2",
"numpy.swapaxes",
"numpy.arange",
"numpy.column_stack",
"tensorflow.compat.v1.range",
"numpy.vstack",
"numpy.expand_dims",
"tensorflow.compat.v1.cast",
"numpy.array",
"numpy.zeros",
"tensorflow.compat.v1.py_func",
"tensorflow.compat.v1.math.exp",
"tensorflow.compat.v1.maximum",
"tensorflow.compat.v1.gather",
"tensorflow.compat.v1.expand_dims",
"numpy.argsort",
"tensorflow.compat.v1.squeeze",
"tensorflow.compat.v1.math.sigmoid",
"tensorflow.compat.v1.image.non_max_suppression_with_scores",
"tensorflow.compat.v1.TensorArray",
"tensorflow.compat.v1.size",
"tensorflow.compat.v1.concat",
"numpy.meshgrid",
"numpy.maximum"
]
] |
tynguyen/sound-spaces
|
[
"b196f3a36b4076752400cbf186e9cf2e160cc3c2",
"b196f3a36b4076752400cbf186e9cf2e160cc3c2"
] |
[
"ss_baselines/savi/ppo/slurm_utils.py",
"utils/observations_conversion.py"
] |
[
"#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport os\nimport shlex\nimport signal\nimport subprocess\nimport threading\nfrom os import path as osp\nfrom typing import Any, Optional, Tuple\n\nimport ifcfg\nimport torch\n\nfrom habitat import logger\n\nEXIT = threading.Event()\nEXIT.clear()\nREQUEUE = threading.Event()\nREQUEUE.clear()\nMAIN_PID = os.getpid()\n\n\nSLURM_JOBID = os.environ.get(\"SLURM_JOB_ID\", None)\nINTERRUPTED_STATE_FILE = osp.join(\n os.environ[\"HOME\"], \".interrupted_states\", f\"{SLURM_JOBID}.pth\"\n)\n\n\ndef _clean_exit_handler(signum, frame):\n EXIT.set()\n print(\"Exiting cleanly\", flush=True)\n\n\ndef _requeue_handler(signal, frame):\n print(\"Got signal to requeue\", flush=True)\n EXIT.set()\n REQUEUE.set()\n\n\ndef add_signal_handlers():\n signal.signal(signal.SIGINT, _clean_exit_handler)\n signal.signal(signal.SIGTERM, _clean_exit_handler)\n\n # SIGUSR2 can be sent to all processes to have them cleanup\n # and exit nicely. This is nice to use with SLURM as scancel <job_id>\n # sets a 30 second timer for the job to exit, and it can take more than\n # 30 seconds for the job to cleanup and exit nicely. When using NCCL,\n # forcing the job to exit without cleaning up can be bad.\n # scancel --signal SIGUSR2 <job_id> will set no such timer and will give\n # the job ample time to cleanup and exit.\n signal.signal(signal.SIGUSR2, _clean_exit_handler)\n\n signal.signal(signal.SIGUSR1, _requeue_handler)\n\n\ndef save_interrupted_state(state: Any, filename: str = None, model_dir: str = None):\n r\"\"\"Saves the interrupted job state to the specified filename.\n This is useful when working with preemptable job partitions.\n This method will do nothing if SLURM is not currently being used and the filename is the default\n :param state: The state to save\n :param filename: The filename. Defaults to \"${HOME}/.interrupted_states/${SLURM_JOBID}.pth\"\n \"\"\"\n if SLURM_JOBID is None and filename is None:\n logger.warn(\"SLURM_JOBID is none, not saving interrupted state\")\n return\n\n if filename is None:\n if model_dir is not None:\n filename = os.path.join(model_dir, 'interrupted_state.pth')\n else:\n filename = INTERRUPTED_STATE_FILE\n\n torch.save(state, filename)\n\n\ndef load_interrupted_state(filename: str = None, model_dir: str = None) -> Optional[Any]:\n r\"\"\"Loads the saved interrupted state\n :param filename: The filename of the saved state.\n Defaults to \"${HOME}/.interrupted_states/${SLURM_JOBID}.pth\"\n :return: The saved state if the file exists, else none\n \"\"\"\n if SLURM_JOBID is None and filename is None:\n return None\n\n if filename is None:\n if model_dir is not None:\n filename = os.path.join(model_dir, 'interrupted_state.pth')\n else:\n filename = INTERRUPTED_STATE_FILE\n\n if not osp.exists(filename):\n return None\n\n return torch.load(filename, map_location=\"cpu\")\n\n\ndef requeue_job():\n r\"\"\"Requeue the job by calling ``scontrol requeue ${SLURM_JOBID}``\"\"\"\n if SLURM_JOBID is None:\n return\n\n if os.environ['SLURM_PROCID'] == '0' and os.getpid() == MAIN_PID:\n logger.info(f\"Requeueing job {SLURM_JOBID}\")\n subprocess.check_call(shlex.split(f\"scontrol requeue {SLURM_JOBID}\"))\n\n\ndef get_ifname():\n return ifcfg.default_interface()[\"device\"]",
"import numpy as np\nfrom typing import Dict, Any, List, Optional\n\n\ndef convert_observation_to_frame(\n observation: Dict, is_depth_normalized=False\n) -> np.ndarray:\n r\"\"\"Generate image of single frame from observation\n\n Args:\n observation: observation returned from an environment step().\n Returns:\n generated image of a single frame.\n \"\"\"\n egocentric_view_l: List[np.ndarray] = []\n if \"rgb\" in observation:\n rgb = observation[\"rgb\"]\n if not isinstance(rgb, np.ndarray):\n rgb = rgb.cpu().numpy()\n\n egocentric_view_l.append(rgb)\n\n # draw depth map if observation has depth info\n if \"depth\" in observation:\n depth_map = observation[\"depth\"].squeeze()\n if is_depth_normalized:\n depth_map *= 255.0\n if not isinstance(depth_map, np.ndarray):\n depth_map = depth_map.cpu().numpy()\n\n depth_map = depth_map.astype(np.uint8)\n depth_map = np.stack([depth_map for _ in range(3)], axis=2)\n egocentric_view_l.append(depth_map)\n\n # add image goal if observation has image_goal info\n if \"imagegoal\" in observation:\n rgb = observation[\"imagegoal\"]\n if not isinstance(rgb, np.ndarray):\n rgb = rgb.cpu().numpy()\n\n egocentric_view_l.append(rgb)\n\n assert len(egocentric_view_l) > 0, \"Expected at least one visual sensor enabled.\"\n egocentric_view = np.concatenate(egocentric_view_l, axis=1)\n\n frame = egocentric_view\n\n return frame\n"
] |
[
[
"torch.save",
"torch.load"
],
[
"numpy.concatenate"
]
] |
stungkit/pytorch
|
[
"e5282c3cb8bf6ad8c5161f9d0cc271edb9abed25",
"e5282c3cb8bf6ad8c5161f9d0cc271edb9abed25"
] |
[
"torch/fx/passes/shape_prop.py",
"torch/cpu/amp/autocast_mode.py"
] |
[
"import torch\nimport torch.fx\nimport traceback\n\nfrom torch.fx.node import Node, map_aggregate\nfrom typing import Any, Tuple, NamedTuple, Optional, Dict\nfrom torch.fx._compatibility import compatibility\n\n\n@compatibility(is_backward_compatible=True)\nclass TensorMetadata(NamedTuple):\n # TensorMetadata is a structure containing pertinent information\n # about a tensor within a PyTorch program.\n\n # General Tensor metadata\n shape : torch.Size\n dtype : torch.dtype\n requires_grad : bool\n stride : Tuple[int]\n memory_format : Optional[torch.memory_format]\n\n # Quantization metadata\n is_quantized : bool\n qparams: Dict[str, Any]\n\ndef _extract_tensor_metadata(result : torch.Tensor) -> TensorMetadata:\n \"\"\"\n Extract a TensorMetadata NamedTuple describing `result`.\n \"\"\"\n shape = result.shape\n dtype = result.dtype\n requires_grad = result.requires_grad\n stride = result.stride()\n\n memory_formats = {\n torch.contiguous_format,\n torch.channels_last,\n torch.channels_last_3d,\n }\n\n memory_format = None\n\n for query_format in memory_formats:\n if result.is_contiguous(memory_format=query_format):\n memory_format = query_format\n break\n\n is_quantized = result.is_quantized\n qparams: Dict[str, Any] = {}\n if is_quantized:\n qscheme = result.qscheme()\n qparams[\"qscheme\"] = qscheme\n if qscheme in {torch.per_tensor_affine, torch.per_tensor_symmetric}:\n qparams[\"scale\"] = result.q_scale() # type: ignore[assignment]\n qparams[\"zero_point\"] = result.q_zero_point() # type: ignore[assignment]\n elif qscheme in {torch.per_channel_affine, torch.per_channel_affine_float_qparams, torch.per_channel_symmetric}:\n # In this branch, scale and zero_point are expected to be tensors,\n # we store the values as immutable_list in TensorMetadata for\n # easier serialization downstream\n qparams[\"scale\"] = result.q_per_channel_scales().tolist() # type: ignore[assignment]\n qparams[\"zero_point\"] = result.q_per_channel_zero_points().tolist() # type: ignore[assignment]\n qparams[\"axis\"] = result.q_per_channel_axis() # type: ignore[assignment]\n\n return TensorMetadata(\n shape, dtype, requires_grad, stride, memory_format, is_quantized, qparams)\n\n@compatibility(is_backward_compatible=True)\nclass ShapeProp(torch.fx.Interpreter):\n \"\"\"\n Execute an FX graph Node-by-Node and\n record the shape and type of the result\n into the corresponding node.\n\n Example:\n In this example, we record the shape\n and data type of a module given\n an example input ``torch.randn(50, D_in)``.\n We print the name, shape and dtype of each node.\n\n class TwoLayerNet(torch.nn.Module):\n def __init__(self, D_in, H, D_out):\n super(TwoLayerNet, self).__init__()\n self.linear1 = torch.nn.Linear(D_in, H)\n self.linear2 = torch.nn.Linear(H, D_out)\n def forward(self, x):\n h_relu = self.linear1(x).clamp(min=0)\n y_pred = self.linear2(h_relu)\n return y_pred\n N, D_in, H, D_out = 64, 1000, 100, 10\n x = torch.randn(N, D_in)\n y = torch.randn(N, D_out)\n model = TwoLayerNet(D_in, H, D_out)\n gm = torch.fx.symbolic_trace(model)\n sample_input = torch.randn(50, D_in)\n ShapeProp(gm).propagate(sample_input)\n\n for node in gm.graph.nodes:\n print(node.name, node.meta['tensor_meta'].dtype,\n node.meta['tensor_meta'].shape)\n\n The output of this code is:\n\n x torch.float32 torch.Size([50, 1000])\n linear1 torch.float32 torch.Size([50, 100])\n clamp_1 torch.float32 torch.Size([50, 100])\n linear2 torch.float32 torch.Size([50, 10])\n output torch.float32 torch.Size([50, 10])\n\n Args:\n module (GraphModule): The module to be executed\n\n \"\"\"\n def run_node(self, n : Node) -> Any:\n try:\n result = super().run_node(n)\n except Exception:\n traceback.print_exc()\n raise RuntimeError(\n f\"ShapeProp error for: node={n.format_node()} with \"\n f\"meta={n.meta}\"\n )\n\n found_tensor = False\n\n def extract_tensor_meta(obj):\n if isinstance(obj, torch.Tensor):\n nonlocal found_tensor\n found_tensor = True\n return _extract_tensor_metadata(obj)\n else:\n return obj\n\n meta = map_aggregate(result, extract_tensor_meta)\n if found_tensor:\n n.meta['tensor_meta'] = meta\n\n n.meta['type'] = type(result)\n return result\n\n def propagate(self, *args):\n \"\"\"\n Run `module` via interpretation and return the result and\n record the shape and type of each node.\n\n Args:\n *args (Tensor): the sample input.\n\n Returns:\n Any: The value returned from executing the Module\n \"\"\"\n return super().run(*args)\n",
"import torch\nfrom typing import Any\n\nclass autocast(torch.amp.autocast_mode.autocast):\n r\"\"\"\n See :class:`torch.autocast`.\n ``torch.cpu.amp.autocast(args...)`` is equivalent to ``torch.autocast(\"cpu\", args...)``\n \"\"\"\n def __init__(self, enabled : bool = True, dtype : torch.dtype = torch.bfloat16, cache_enabled : bool = True):\n if torch._jit_internal.is_scripting():\n self._enabled = enabled\n self.device = \"cpu\"\n self.fast_dtype = dtype\n return\n super().__init__(\"cpu\", enabled=enabled, dtype=dtype, cache_enabled=cache_enabled)\n\n def __enter__(self):\n if torch._jit_internal.is_scripting():\n return self\n return super().__enter__()\n\n # TODO: discuss a unified TorchScript-friendly API for autocast\n def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any): # type: ignore[override]\n if torch._jit_internal.is_scripting():\n return\n return super().__exit__(exc_type, exc_val, exc_tb)\n\n def __call__(self, func):\n if torch._jit_internal.is_scripting():\n return func\n return super().__call__(func)\n"
] |
[
[
"torch.fx._compatibility.compatibility",
"torch.fx.node.map_aggregate"
],
[
"torch._jit_internal.is_scripting"
]
] |
shere-khan/models
|
[
"ab1858370a204f793c9d609b05ff60e001d403e6"
] |
[
"official/utils/misc/keras_utils.py"
] |
[
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Helper functions for the Keras implementations of models.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport multiprocessing\nimport os\nimport time\n\nfrom absl import logging\nimport tensorflow as tf\nfrom tensorflow.core.protobuf import rewriter_config_pb2\nfrom tensorflow.python import tf2\nfrom tensorflow.python.eager import profiler\n\n\nclass BatchTimestamp(object):\n \"\"\"A structure to store batch time stamp.\"\"\"\n\n def __init__(self, batch_index, timestamp):\n self.batch_index = batch_index\n self.timestamp = timestamp\n\n def __repr__(self):\n return \"'BatchTimestamp<batch_index: {}, timestamp: {}>'\".format(\n self.batch_index, self.timestamp)\n\n\nclass TimeHistory(tf.keras.callbacks.Callback):\n \"\"\"Callback for Keras models.\"\"\"\n\n def __init__(self, batch_size, log_steps):\n \"\"\"Callback for logging performance.\n\n Args:\n batch_size: Total batch size.\n log_steps: Interval of steps between logging of batch level stats.\n \"\"\"\n self.batch_size = batch_size\n super(TimeHistory, self).__init__()\n self.log_steps = log_steps\n self.global_steps = 0\n\n # Logs start of step 1 then end of each step based on log_steps interval.\n self.timestamp_log = []\n\n # Records the time each epoch takes to run from start to finish of epoch.\n self.epoch_runtime_log = []\n\n def on_train_end(self, logs=None):\n self.train_finish_time = time.time()\n\n def on_epoch_begin(self, epoch, logs=None):\n self.epoch_start = time.time()\n\n def on_batch_begin(self, batch, logs=None):\n self.global_steps += 1\n if self.global_steps == 1:\n self.start_time = time.time()\n self.timestamp_log.append(BatchTimestamp(self.global_steps,\n self.start_time))\n\n def on_batch_end(self, batch, logs=None):\n \"\"\"Records elapse time of the batch and calculates examples per second.\"\"\"\n if self.global_steps % self.log_steps == 0:\n timestamp = time.time()\n elapsed_time = timestamp - self.start_time\n examples_per_second = (self.batch_size * self.log_steps) / elapsed_time\n self.timestamp_log.append(BatchTimestamp(self.global_steps, timestamp))\n logging.info(\n \"BenchmarkMetric: {'global step':%d, 'time_taken': %f,\"\n \"'examples_per_second': %f}\",\n self.global_steps, elapsed_time, examples_per_second)\n self.start_time = timestamp\n\n def on_epoch_end(self, epoch, logs=None):\n epoch_run_time = time.time() - self.epoch_start\n self.epoch_runtime_log.append(epoch_run_time)\n logging.info(\n \"BenchmarkMetric: {'epoch':%d, 'time_taken': %f}\",\n epoch, epoch_run_time)\n\n\ndef get_profiler_callback(model_dir, profile_steps, enable_tensorboard,\n steps_per_epoch):\n \"\"\"Validate profile_steps flag value and return profiler callback.\"\"\"\n profile_steps_error_message = (\n 'profile_steps must be a comma separated pair of positive integers, '\n 'specifying the first and last steps to be profiled.'\n )\n try:\n profile_steps = [int(i) for i in profile_steps.split(',')]\n except ValueError:\n raise ValueError(profile_steps_error_message)\n if len(profile_steps) != 2:\n raise ValueError(profile_steps_error_message)\n start_step, stop_step = profile_steps\n if start_step < 0 or start_step > stop_step:\n raise ValueError(profile_steps_error_message)\n if enable_tensorboard:\n logging.warning(\n 'Both TensorBoard and profiler callbacks are used. Note that the '\n 'TensorBoard callback profiles the 2nd step (unless otherwise '\n 'specified). Please make sure the steps profiled by the two callbacks '\n 'do not overlap.')\n return ProfilerCallback(model_dir, start_step, stop_step, steps_per_epoch)\n\n\nclass ProfilerCallback(tf.keras.callbacks.Callback):\n \"\"\"Save profiles in specified step range to log directory.\"\"\"\n\n def __init__(self, log_dir, start_step, stop_step, steps_per_epoch):\n super(ProfilerCallback, self).__init__()\n self.log_dir = log_dir\n self.start_step = start_step\n self.stop_step = stop_step\n self.start_epoch = start_step // steps_per_epoch\n self.stop_epoch = stop_step // steps_per_epoch\n self.start_step_in_epoch = start_step % steps_per_epoch\n self.stop_step_in_epoch = stop_step % steps_per_epoch\n self.should_start = False\n self.should_stop = False\n\n def on_epoch_begin(self, epoch, logs=None):\n if epoch == self.start_epoch:\n self.should_start = True\n if epoch == self.stop_epoch:\n self.should_stop = True\n\n def on_batch_begin(self, batch, logs=None):\n if batch == self.start_step_in_epoch and self.should_start:\n self.should_start = False\n profiler.start()\n logging.info('Profiler started at Step %s', self.start_step)\n\n def on_batch_end(self, batch, logs=None):\n if batch == self.stop_step_in_epoch and self.should_stop:\n self.should_stop = False\n results = profiler.stop()\n profiler.save(self.log_dir, results)\n logging.info(\n 'Profiler saved profiles for steps between %s and %s to %s',\n self.start_step, self.stop_step, self.log_dir)\n\n\ndef set_session_config(enable_eager=False,\n enable_xla=False):\n \"\"\"Sets the session config.\"\"\"\n if is_v2_0():\n set_config_v2(enable_xla=enable_xla)\n else:\n config = get_config_proto_v1(enable_xla=enable_xla)\n if enable_eager:\n tf.compat.v1.enable_eager_execution(config=config)\n else:\n sess = tf.Session(config=config)\n tf.keras.backend.set_session(sess)\n\n\ndef get_config_proto_v1(enable_xla=False):\n \"\"\"Return config proto according to flag settings, or None to use default.\"\"\"\n config = None\n if enable_xla:\n config = tf.compat.v1.ConfigProto()\n config.graph_options.optimizer_options.global_jit_level = (\n tf.OptimizerOptions.ON_2)\n # Disable PinToHostOptimizer in grappler when enabling XLA because it causes\n # OOM and performance regression.\n config.graph_options.rewrite_options.pin_to_host_optimization = (\n rewriter_config_pb2.RewriterConfig.OFF)\n return config\n\n\ndef set_config_v2(enable_xla=False):\n \"\"\"Config eager context according to flag values using TF 2.0 API.\"\"\"\n if enable_xla:\n tf.config.optimizer.set_jit(True)\n # Disable PinToHostOptimizer in grappler when enabling XLA because it\n # causes OOM and performance regression.\n tf.config.optimizer.set_experimental_options(\n {'pin_to_host_optimization': False}\n )\n\n\ndef is_v2_0():\n \"\"\"Returns true if using tf 2.0.\"\"\"\n return tf2.enabled()\n\n\ndef set_gpu_thread_mode_and_count(gpu_thread_mode,\n datasets_num_private_threads,\n num_gpus, per_gpu_thread_count):\n \"\"\"Set GPU thread mode and count, and adjust dataset threads count.\"\"\"\n cpu_count = multiprocessing.cpu_count()\n logging.info('Logical CPU cores: %s', cpu_count)\n\n # Allocate private thread pool for each GPU to schedule and launch kernels\n per_gpu_thread_count = per_gpu_thread_count or 2\n os.environ['TF_GPU_THREAD_MODE'] = gpu_thread_mode\n os.environ['TF_GPU_THREAD_COUNT'] = str(per_gpu_thread_count)\n logging.info('TF_GPU_THREAD_COUNT: %s',\n os.environ['TF_GPU_THREAD_COUNT'])\n logging.info('TF_GPU_THREAD_MODE: %s',\n os.environ['TF_GPU_THREAD_MODE'])\n\n # Limit data preprocessing threadpool to CPU cores minus number of total GPU\n # private threads and memory copy threads.\n total_gpu_thread_count = per_gpu_thread_count * num_gpus\n num_runtime_threads = num_gpus\n if not datasets_num_private_threads:\n datasets_num_private_threads = min(\n cpu_count - total_gpu_thread_count - num_runtime_threads,\n num_gpus * 8)\n logging.info('Set datasets_num_private_threads to %s',\n datasets_num_private_threads)\n"
] |
[
[
"tensorflow.compat.v1.ConfigProto",
"tensorflow.config.optimizer.set_jit",
"tensorflow.python.eager.profiler.stop",
"tensorflow.python.eager.profiler.save",
"tensorflow.Session",
"tensorflow.keras.backend.set_session",
"tensorflow.python.tf2.enabled",
"tensorflow.compat.v1.enable_eager_execution",
"tensorflow.python.eager.profiler.start",
"tensorflow.config.optimizer.set_experimental_options"
]
] |
MilesCranmer/numpy
|
[
"7a1ee13ee28083c484a42a657067570773bcddbe"
] |
[
"numpy/lib/histograms.py"
] |
[
"\"\"\"\nHistogram-related functions\n\"\"\"\nimport contextlib\nimport functools\nimport operator\nimport warnings\n\nimport numpy as np\nfrom numpy.core import overrides\n\n__all__ = ['histogram', 'histogramdd', 'histogram_bin_edges']\n\narray_function_dispatch = functools.partial(\n overrides.array_function_dispatch, module='numpy')\n\n# range is a keyword argument to many functions, so save the builtin so they can\n# use it.\n_range = range\n\n\ndef _ptp(x):\n \"\"\"Peak-to-peak value of x.\n\n This implementation avoids the problem of signed integer arrays having a\n peak-to-peak value that cannot be represented with the array's data type.\n This function returns an unsigned value for signed integer arrays.\n \"\"\"\n return _unsigned_subtract(x.max(), x.min())\n\n\ndef _hist_bin_sqrt(x, range):\n \"\"\"\n Square root histogram bin estimator.\n\n Bin width is inversely proportional to the data size. Used by many\n programs for its simplicity.\n\n Parameters\n ----------\n x : array_like\n Input data that is to be histogrammed, trimmed to range. May not\n be empty.\n\n Returns\n -------\n h : An estimate of the optimal bin width for the given data.\n \"\"\"\n del range # unused\n return _ptp(x) / np.sqrt(x.size)\n\n\ndef _hist_bin_sturges(x, range):\n \"\"\"\n Sturges histogram bin estimator.\n\n A very simplistic estimator based on the assumption of normality of\n the data. This estimator has poor performance for non-normal data,\n which becomes especially obvious for large data sets. The estimate\n depends only on size of the data.\n\n Parameters\n ----------\n x : array_like\n Input data that is to be histogrammed, trimmed to range. May not\n be empty.\n\n Returns\n -------\n h : An estimate of the optimal bin width for the given data.\n \"\"\"\n del range # unused\n return _ptp(x) / (np.log2(x.size) + 1.0)\n\n\ndef _hist_bin_rice(x, range):\n \"\"\"\n Rice histogram bin estimator.\n\n Another simple estimator with no normality assumption. It has better\n performance for large data than Sturges, but tends to overestimate\n the number of bins. The number of bins is proportional to the cube\n root of data size (asymptotically optimal). The estimate depends\n only on size of the data.\n\n Parameters\n ----------\n x : array_like\n Input data that is to be histogrammed, trimmed to range. May not\n be empty.\n\n Returns\n -------\n h : An estimate of the optimal bin width for the given data.\n \"\"\"\n del range # unused\n return _ptp(x) / (2.0 * x.size ** (1.0 / 3))\n\n\ndef _hist_bin_scott(x, range):\n \"\"\"\n Scott histogram bin estimator.\n\n The binwidth is proportional to the standard deviation of the data\n and inversely proportional to the cube root of data size\n (asymptotically optimal).\n\n Parameters\n ----------\n x : array_like\n Input data that is to be histogrammed, trimmed to range. May not\n be empty.\n\n Returns\n -------\n h : An estimate of the optimal bin width for the given data.\n \"\"\"\n del range # unused\n return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x)\n\n\ndef _hist_bin_stone(x, range):\n \"\"\"\n Histogram bin estimator based on minimizing the estimated integrated squared error (ISE).\n\n The number of bins is chosen by minimizing the estimated ISE against the unknown true distribution.\n The ISE is estimated using cross-validation and can be regarded as a generalization of Scott's rule.\n https://en.wikipedia.org/wiki/Histogram#Scott.27s_normal_reference_rule\n\n This paper by Stone appears to be the origination of this rule.\n http://digitalassets.lib.berkeley.edu/sdtr/ucb/text/34.pdf\n\n Parameters\n ----------\n x : array_like\n Input data that is to be histogrammed, trimmed to range. May not\n be empty.\n range : (float, float)\n The lower and upper range of the bins.\n\n Returns\n -------\n h : An estimate of the optimal bin width for the given data.\n \"\"\"\n\n n = x.size\n ptp_x = _ptp(x)\n if n <= 1 or ptp_x == 0:\n return 0\n\n def jhat(nbins):\n hh = ptp_x / nbins\n p_k = np.histogram(x, bins=nbins, range=range)[0] / n\n return (2 - (n + 1) * p_k.dot(p_k)) / hh\n\n nbins_upper_bound = max(100, int(np.sqrt(n)))\n nbins = min(_range(1, nbins_upper_bound + 1), key=jhat)\n if nbins == nbins_upper_bound:\n warnings.warn(\"The number of bins estimated may be suboptimal.\",\n RuntimeWarning, stacklevel=3)\n return ptp_x / nbins\n\n\ndef _hist_bin_doane(x, range):\n \"\"\"\n Doane's histogram bin estimator.\n\n Improved version of Sturges' formula which works better for\n non-normal data. See\n stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning\n\n Parameters\n ----------\n x : array_like\n Input data that is to be histogrammed, trimmed to range. May not\n be empty.\n\n Returns\n -------\n h : An estimate of the optimal bin width for the given data.\n \"\"\"\n del range # unused\n if x.size > 2:\n sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))\n sigma = np.std(x)\n if sigma > 0.0:\n # These three operations add up to\n # g1 = np.mean(((x - np.mean(x)) / sigma)**3)\n # but use only one temp array instead of three\n temp = x - np.mean(x)\n np.true_divide(temp, sigma, temp)\n np.power(temp, 3, temp)\n g1 = np.mean(temp)\n return _ptp(x) / (1.0 + np.log2(x.size) +\n np.log2(1.0 + np.absolute(g1) / sg1))\n return 0.0\n\n\ndef _hist_bin_fd(x, range):\n \"\"\"\n The Freedman-Diaconis histogram bin estimator.\n\n The Freedman-Diaconis rule uses interquartile range (IQR) to\n estimate binwidth. It is considered a variation of the Scott rule\n with more robustness as the IQR is less affected by outliers than\n the standard deviation. However, the IQR depends on fewer points\n than the standard deviation, so it is less accurate, especially for\n long tailed distributions.\n\n If the IQR is 0, this function returns 0 for the bin width.\n Binwidth is inversely proportional to the cube root of data size\n (asymptotically optimal).\n\n Parameters\n ----------\n x : array_like\n Input data that is to be histogrammed, trimmed to range. May not\n be empty.\n\n Returns\n -------\n h : An estimate of the optimal bin width for the given data.\n \"\"\"\n del range # unused\n iqr = np.subtract(*np.percentile(x, [75, 25]))\n return 2.0 * iqr * x.size ** (-1.0 / 3.0)\n\n\ndef _hist_bin_auto(x, range):\n \"\"\"\n Histogram bin estimator that uses the minimum width of the\n Freedman-Diaconis and Sturges estimators if the FD bin width is non-zero.\n If the bin width from the FD estimator is 0, the Sturges estimator is used.\n\n The FD estimator is usually the most robust method, but its width\n estimate tends to be too large for small `x` and bad for data with limited\n variance. The Sturges estimator is quite good for small (<1000) datasets\n and is the default in the R language. This method gives good off-the-shelf\n behaviour.\n\n .. versionchanged:: 1.15.0\n If there is limited variance the IQR can be 0, which results in the\n FD bin width being 0 too. This is not a valid bin width, so\n ``np.histogram_bin_edges`` chooses 1 bin instead, which may not be optimal.\n If the IQR is 0, it's unlikely any variance-based estimators will be of\n use, so we revert to the Sturges estimator, which only uses the size of the\n dataset in its calculation.\n\n Parameters\n ----------\n x : array_like\n Input data that is to be histogrammed, trimmed to range. May not\n be empty.\n\n Returns\n -------\n h : An estimate of the optimal bin width for the given data.\n\n See Also\n --------\n _hist_bin_fd, _hist_bin_sturges\n \"\"\"\n fd_bw = _hist_bin_fd(x, range)\n sturges_bw = _hist_bin_sturges(x, range)\n del range # unused\n if fd_bw:\n return min(fd_bw, sturges_bw)\n else:\n # limited variance, so we return a len dependent bw estimator\n return sturges_bw\n\n# Private dict initialized at module load time\n_hist_bin_selectors = {'stone': _hist_bin_stone,\n 'auto': _hist_bin_auto,\n 'doane': _hist_bin_doane,\n 'fd': _hist_bin_fd,\n 'rice': _hist_bin_rice,\n 'scott': _hist_bin_scott,\n 'sqrt': _hist_bin_sqrt,\n 'sturges': _hist_bin_sturges}\n\n\ndef _ravel_and_check_weights(a, weights):\n \"\"\" Check a and weights have matching shapes, and ravel both \"\"\"\n a = np.asarray(a)\n\n # Ensure that the array is a \"subtractable\" dtype\n if a.dtype == np.bool_:\n warnings.warn(\"Converting input from {} to {} for compatibility.\"\n .format(a.dtype, np.uint8),\n RuntimeWarning, stacklevel=3)\n a = a.astype(np.uint8)\n\n if weights is not None:\n weights = np.asarray(weights)\n if weights.shape != a.shape:\n raise ValueError(\n 'weights should have the same shape as a.')\n weights = weights.ravel()\n a = a.ravel()\n return a, weights\n\n\ndef _get_outer_edges(a, range):\n \"\"\"\n Determine the outer bin edges to use, from either the data or the range\n argument\n \"\"\"\n if range is not None:\n first_edge, last_edge = range\n if first_edge > last_edge:\n raise ValueError(\n 'max must be larger than min in range parameter.')\n if not (np.isfinite(first_edge) and np.isfinite(last_edge)):\n raise ValueError(\n \"supplied range of [{}, {}] is not finite\".format(first_edge, last_edge))\n elif a.size == 0:\n # handle empty arrays. Can't determine range, so use 0-1.\n first_edge, last_edge = 0, 1\n else:\n first_edge, last_edge = a.min(), a.max()\n if not (np.isfinite(first_edge) and np.isfinite(last_edge)):\n raise ValueError(\n \"autodetected range of [{}, {}] is not finite\".format(first_edge, last_edge))\n\n # expand empty range to avoid divide by zero\n if first_edge == last_edge:\n first_edge = first_edge - 0.5\n last_edge = last_edge + 0.5\n\n return first_edge, last_edge\n\n\ndef _unsigned_subtract(a, b):\n \"\"\"\n Subtract two values where a >= b, and produce an unsigned result\n\n This is needed when finding the difference between the upper and lower\n bound of an int16 histogram\n \"\"\"\n # coerce to a single type\n signed_to_unsigned = {\n np.byte: np.ubyte,\n np.short: np.ushort,\n np.intc: np.uintc,\n np.int_: np.uint,\n np.longlong: np.ulonglong\n }\n dt = np.result_type(a, b)\n try:\n dt = signed_to_unsigned[dt.type]\n except KeyError:\n return np.subtract(a, b, dtype=dt)\n else:\n # we know the inputs are integers, and we are deliberately casting\n # signed to unsigned\n return np.subtract(a, b, casting='unsafe', dtype=dt)\n\n\ndef _get_bin_edges(a, bins, range, weights):\n \"\"\"\n Computes the bins used internally by `histogram`.\n\n Parameters\n ==========\n a : ndarray\n Ravelled data array\n bins, range\n Forwarded arguments from `histogram`.\n weights : ndarray, optional\n Ravelled weights array, or None\n\n Returns\n =======\n bin_edges : ndarray\n Array of bin edges\n uniform_bins : (Number, Number, int):\n The upper bound, lowerbound, and number of bins, used in the optimized\n implementation of `histogram` that works on uniform bins.\n \"\"\"\n # parse the overloaded bins argument\n n_equal_bins = None\n bin_edges = None\n\n if isinstance(bins, str):\n bin_name = bins\n # if `bins` is a string for an automatic method,\n # this will replace it with the number of bins calculated\n if bin_name not in _hist_bin_selectors:\n raise ValueError(\n \"{!r} is not a valid estimator for `bins`\".format(bin_name))\n if weights is not None:\n raise TypeError(\"Automated estimation of the number of \"\n \"bins is not supported for weighted data\")\n\n first_edge, last_edge = _get_outer_edges(a, range)\n\n # truncate the range if needed\n if range is not None:\n keep = (a >= first_edge)\n keep &= (a <= last_edge)\n if not np.logical_and.reduce(keep):\n a = a[keep]\n\n if a.size == 0:\n n_equal_bins = 1\n else:\n # Do not call selectors on empty arrays\n width = _hist_bin_selectors[bin_name](a, (first_edge, last_edge))\n if width:\n n_equal_bins = int(np.ceil(_unsigned_subtract(last_edge, first_edge) / width))\n else:\n # Width can be zero for some estimators, e.g. FD when\n # the IQR of the data is zero.\n n_equal_bins = 1\n\n elif np.ndim(bins) == 0:\n try:\n n_equal_bins = operator.index(bins)\n except TypeError as e:\n raise TypeError(\n '`bins` must be an integer, a string, or an array') from e\n if n_equal_bins < 1:\n raise ValueError('`bins` must be positive, when an integer')\n\n first_edge, last_edge = _get_outer_edges(a, range)\n\n elif np.ndim(bins) == 1:\n bin_edges = np.asarray(bins)\n if np.any(bin_edges[:-1] > bin_edges[1:]):\n raise ValueError(\n '`bins` must increase monotonically, when an array')\n\n else:\n raise ValueError('`bins` must be 1d, when an array')\n\n if n_equal_bins is not None:\n # gh-10322 means that type resolution rules are dependent on array\n # shapes. To avoid this causing problems, we pick a type now and stick\n # with it throughout.\n bin_type = np.result_type(first_edge, last_edge, a)\n if np.issubdtype(bin_type, np.integer):\n bin_type = np.result_type(bin_type, float)\n\n # bin edges must be computed\n bin_edges = np.linspace(\n first_edge, last_edge, n_equal_bins + 1,\n endpoint=True, dtype=bin_type)\n return bin_edges, (first_edge, last_edge, n_equal_bins)\n else:\n return bin_edges, None\n\n\ndef _search_sorted_inclusive(a, v):\n \"\"\"\n Like `searchsorted`, but where the last item in `v` is placed on the right.\n\n In the context of a histogram, this makes the last bin edge inclusive\n \"\"\"\n return np.concatenate((\n a.searchsorted(v[:-1], 'left'),\n a.searchsorted(v[-1:], 'right')\n ))\n\n\ndef _histogram_bin_edges_dispatcher(a, bins=None, range=None, weights=None):\n return (a, bins, weights)\n\n\n@array_function_dispatch(_histogram_bin_edges_dispatcher)\ndef histogram_bin_edges(a, bins=10, range=None, weights=None):\n r\"\"\"\n Function to calculate only the edges of the bins used by the `histogram`\n function.\n\n Parameters\n ----------\n a : array_like\n Input data. The histogram is computed over the flattened array.\n bins : int or sequence of scalars or str, optional\n If `bins` is an int, it defines the number of equal-width\n bins in the given range (10, by default). If `bins` is a\n sequence, it defines the bin edges, including the rightmost\n edge, allowing for non-uniform bin widths.\n\n If `bins` is a string from the list below, `histogram_bin_edges` will use\n the method chosen to calculate the optimal bin width and\n consequently the number of bins (see `Notes` for more detail on\n the estimators) from the data that falls within the requested\n range. While the bin width will be optimal for the actual data\n in the range, the number of bins will be computed to fill the\n entire range, including the empty portions. For visualisation,\n using the 'auto' option is suggested. Weighted data is not\n supported for automated bin size selection.\n\n 'auto'\n Maximum of the 'sturges' and 'fd' estimators. Provides good\n all around performance.\n\n 'fd' (Freedman Diaconis Estimator)\n Robust (resilient to outliers) estimator that takes into\n account data variability and data size.\n\n 'doane'\n An improved version of Sturges' estimator that works better\n with non-normal datasets.\n\n 'scott'\n Less robust estimator that takes into account data variability\n and data size.\n\n 'stone'\n Estimator based on leave-one-out cross-validation estimate of\n the integrated squared error. Can be regarded as a generalization\n of Scott's rule.\n\n 'rice'\n Estimator does not take variability into account, only data\n size. Commonly overestimates number of bins required.\n\n 'sturges'\n R's default method, only accounts for data size. Only\n optimal for gaussian data and underestimates number of bins\n for large non-gaussian datasets.\n\n 'sqrt'\n Square root (of data size) estimator, used by Excel and\n other programs for its speed and simplicity.\n\n range : (float, float), optional\n The lower and upper range of the bins. If not provided, range\n is simply ``(a.min(), a.max())``. Values outside the range are\n ignored. The first element of the range must be less than or\n equal to the second. `range` affects the automatic bin\n computation as well. While bin width is computed to be optimal\n based on the actual data within `range`, the bin count will fill\n the entire range including portions containing no data.\n\n weights : array_like, optional\n An array of weights, of the same shape as `a`. Each value in\n `a` only contributes its associated weight towards the bin count\n (instead of 1). This is currently not used by any of the bin estimators,\n but may be in the future.\n\n Returns\n -------\n bin_edges : array of dtype float\n The edges to pass into `histogram`\n\n See Also\n --------\n histogram\n\n Notes\n -----\n The methods to estimate the optimal number of bins are well founded\n in literature, and are inspired by the choices R provides for\n histogram visualisation. Note that having the number of bins\n proportional to :math:`n^{1/3}` is asymptotically optimal, which is\n why it appears in most estimators. These are simply plug-in methods\n that give good starting points for number of bins. In the equations\n below, :math:`h` is the binwidth and :math:`n_h` is the number of\n bins. All estimators that compute bin counts are recast to bin width\n using the `ptp` of the data. The final bin count is obtained from\n ``np.round(np.ceil(range / h))``. The final bin width is often less\n than what is returned by the estimators below.\n\n 'auto' (maximum of the 'sturges' and 'fd' estimators)\n A compromise to get a good value. For small datasets the Sturges\n value will usually be chosen, while larger datasets will usually\n default to FD. Avoids the overly conservative behaviour of FD\n and Sturges for small and large datasets respectively.\n Switchover point is usually :math:`a.size \\approx 1000`.\n\n 'fd' (Freedman Diaconis Estimator)\n .. math:: h = 2 \\frac{IQR}{n^{1/3}}\n\n The binwidth is proportional to the interquartile range (IQR)\n and inversely proportional to cube root of a.size. Can be too\n conservative for small datasets, but is quite good for large\n datasets. The IQR is very robust to outliers.\n\n 'scott'\n .. math:: h = \\sigma \\sqrt[3]{\\frac{24 \\sqrt{\\pi}}{n}}\n\n The binwidth is proportional to the standard deviation of the\n data and inversely proportional to cube root of ``x.size``. Can\n be too conservative for small datasets, but is quite good for\n large datasets. The standard deviation is not very robust to\n outliers. Values are very similar to the Freedman-Diaconis\n estimator in the absence of outliers.\n\n 'rice'\n .. math:: n_h = 2n^{1/3}\n\n The number of bins is only proportional to cube root of\n ``a.size``. It tends to overestimate the number of bins and it\n does not take into account data variability.\n\n 'sturges'\n .. math:: n_h = \\log _{2}(n) + 1\n\n The number of bins is the base 2 log of ``a.size``. This\n estimator assumes normality of data and is too conservative for\n larger, non-normal datasets. This is the default method in R's\n ``hist`` method.\n\n 'doane'\n .. math:: n_h = 1 + \\log_{2}(n) +\n \\log_{2}\\left(1 + \\frac{|g_1|}{\\sigma_{g_1}}\\right)\n\n g_1 = mean\\left[\\left(\\frac{x - \\mu}{\\sigma}\\right)^3\\right]\n\n \\sigma_{g_1} = \\sqrt{\\frac{6(n - 2)}{(n + 1)(n + 3)}}\n\n An improved version of Sturges' formula that produces better\n estimates for non-normal datasets. This estimator attempts to\n account for the skew of the data.\n\n 'sqrt'\n .. math:: n_h = \\sqrt n\n\n The simplest and fastest estimator. Only takes into account the\n data size.\n\n Examples\n --------\n >>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5])\n >>> np.histogram_bin_edges(arr, bins='auto', range=(0, 1))\n array([0. , 0.25, 0.5 , 0.75, 1. ])\n >>> np.histogram_bin_edges(arr, bins=2)\n array([0. , 2.5, 5. ])\n\n For consistency with histogram, an array of pre-computed bins is\n passed through unmodified:\n\n >>> np.histogram_bin_edges(arr, [1, 2])\n array([1, 2])\n\n This function allows one set of bins to be computed, and reused across\n multiple histograms:\n\n >>> shared_bins = np.histogram_bin_edges(arr, bins='auto')\n >>> shared_bins\n array([0., 1., 2., 3., 4., 5.])\n\n >>> group_id = np.array([0, 1, 1, 0, 1, 1, 0, 1, 1])\n >>> hist_0, _ = np.histogram(arr[group_id == 0], bins=shared_bins)\n >>> hist_1, _ = np.histogram(arr[group_id == 1], bins=shared_bins)\n\n >>> hist_0; hist_1\n array([1, 1, 0, 1, 0])\n array([2, 0, 1, 1, 2])\n\n Which gives more easily comparable results than using separate bins for\n each histogram:\n\n >>> hist_0, bins_0 = np.histogram(arr[group_id == 0], bins='auto')\n >>> hist_1, bins_1 = np.histogram(arr[group_id == 1], bins='auto')\n >>> hist_0; hist_1\n array([1, 1, 1])\n array([2, 1, 1, 2])\n >>> bins_0; bins_1\n array([0., 1., 2., 3.])\n array([0. , 1.25, 2.5 , 3.75, 5. ])\n\n \"\"\"\n a, weights = _ravel_and_check_weights(a, weights)\n bin_edges, _ = _get_bin_edges(a, bins, range, weights)\n return bin_edges\n\n\ndef _histogram_dispatcher(\n a, bins=None, range=None, density=None, weights=None):\n return (a, bins, weights)\n\n\n@array_function_dispatch(_histogram_dispatcher)\ndef histogram(a, bins=10, range=None, density=None, weights=None):\n r\"\"\"\n Compute the histogram of a dataset.\n\n Parameters\n ----------\n a : array_like\n Input data. The histogram is computed over the flattened array.\n bins : int or sequence of scalars or str, optional\n If `bins` is an int, it defines the number of equal-width\n bins in the given range (10, by default). If `bins` is a\n sequence, it defines a monotonically increasing array of bin edges,\n including the rightmost edge, allowing for non-uniform bin widths.\n\n .. versionadded:: 1.11.0\n\n If `bins` is a string, it defines the method used to calculate the\n optimal bin width, as defined by `histogram_bin_edges`.\n\n range : (float, float), optional\n The lower and upper range of the bins. If not provided, range\n is simply ``(a.min(), a.max())``. Values outside the range are\n ignored. The first element of the range must be less than or\n equal to the second. `range` affects the automatic bin\n computation as well. While bin width is computed to be optimal\n based on the actual data within `range`, the bin count will fill\n the entire range including portions containing no data.\n weights : array_like, optional\n An array of weights, of the same shape as `a`. Each value in\n `a` only contributes its associated weight towards the bin count\n (instead of 1). If `density` is True, the weights are\n normalized, so that the integral of the density over the range\n remains 1.\n density : bool, optional\n If ``False``, the result will contain the number of samples in\n each bin. If ``True``, the result is the value of the\n probability *density* function at the bin, normalized such that\n the *integral* over the range is 1. Note that the sum of the\n histogram values will not be equal to 1 unless bins of unity\n width are chosen; it is not a probability *mass* function.\n\n Returns\n -------\n hist : array\n The values of the histogram. See `density` and `weights` for a\n description of the possible semantics.\n bin_edges : array of dtype float\n Return the bin edges ``(length(hist)+1)``.\n\n\n See Also\n --------\n histogramdd, bincount, searchsorted, digitize, histogram_bin_edges\n\n Notes\n -----\n All but the last (righthand-most) bin is half-open. In other words,\n if `bins` is::\n\n [1, 2, 3, 4]\n\n then the first bin is ``[1, 2)`` (including 1, but excluding 2) and\n the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which\n *includes* 4.\n\n\n Examples\n --------\n >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])\n (array([0, 2, 1]), array([0, 1, 2, 3]))\n >>> np.histogram(np.arange(4), bins=np.arange(5), density=True)\n (array([0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))\n >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])\n (array([1, 4, 1]), array([0, 1, 2, 3]))\n\n >>> a = np.arange(5)\n >>> hist, bin_edges = np.histogram(a, density=True)\n >>> hist\n array([0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])\n >>> hist.sum()\n 2.4999999999999996\n >>> np.sum(hist * np.diff(bin_edges))\n 1.0\n\n .. versionadded:: 1.11.0\n\n Automated Bin Selection Methods example, using 2 peak random data\n with 2000 points:\n\n >>> import matplotlib.pyplot as plt\n >>> rng = np.random.RandomState(10) # deterministic random data\n >>> a = np.hstack((rng.normal(size=1000),\n ... rng.normal(loc=5, scale=2, size=1000)))\n >>> _ = plt.hist(a, bins='auto') # arguments are passed to np.histogram\n >>> plt.title(\"Histogram with 'auto' bins\")\n Text(0.5, 1.0, \"Histogram with 'auto' bins\")\n >>> plt.show()\n\n \"\"\"\n a, weights = _ravel_and_check_weights(a, weights)\n\n bin_edges, uniform_bins = _get_bin_edges(a, bins, range, weights)\n\n # Histogram is an integer or a float array depending on the weights.\n if weights is None:\n ntype = np.dtype(np.intp)\n else:\n ntype = weights.dtype\n\n # We set a block size, as this allows us to iterate over chunks when\n # computing histograms, to minimize memory usage.\n BLOCK = 65536\n\n # The fast path uses bincount, but that only works for certain types\n # of weight\n simple_weights = (\n weights is None or\n np.can_cast(weights.dtype, np.double) or\n np.can_cast(weights.dtype, complex)\n )\n\n if uniform_bins is not None and simple_weights:\n # Fast algorithm for equal bins\n # We now convert values of a to bin indices, under the assumption of\n # equal bin widths (which is valid here).\n first_edge, last_edge, n_equal_bins = uniform_bins\n\n # Initialize empty histogram\n n = np.zeros(n_equal_bins, ntype)\n\n # Pre-compute histogram scaling factor\n norm = n_equal_bins / _unsigned_subtract(last_edge, first_edge)\n\n # We iterate over blocks here for two reasons: the first is that for\n # large arrays, it is actually faster (for example for a 10^8 array it\n # is 2x as fast) and it results in a memory footprint 3x lower in the\n # limit of large arrays.\n for i in _range(0, len(a), BLOCK):\n tmp_a = a[i:i+BLOCK]\n if weights is None:\n tmp_w = None\n else:\n tmp_w = weights[i:i + BLOCK]\n\n # Only include values in the right range\n keep = (tmp_a >= first_edge)\n keep &= (tmp_a <= last_edge)\n if not np.logical_and.reduce(keep):\n tmp_a = tmp_a[keep]\n if tmp_w is not None:\n tmp_w = tmp_w[keep]\n\n # This cast ensures no type promotions occur below, which gh-10322\n # make unpredictable. Getting it wrong leads to precision errors\n # like gh-8123.\n tmp_a = tmp_a.astype(bin_edges.dtype, copy=False)\n\n # Compute the bin indices, and for values that lie exactly on\n # last_edge we need to subtract one\n f_indices = _unsigned_subtract(tmp_a, first_edge) * norm\n indices = f_indices.astype(np.intp)\n indices[indices == n_equal_bins] -= 1\n\n # The index computation is not guaranteed to give exactly\n # consistent results within ~1 ULP of the bin edges.\n decrement = tmp_a < bin_edges[indices]\n indices[decrement] -= 1\n # The last bin includes the right edge. The other bins do not.\n increment = ((tmp_a >= bin_edges[indices + 1])\n & (indices != n_equal_bins - 1))\n indices[increment] += 1\n\n # We now compute the histogram using bincount\n if ntype.kind == 'c':\n n.real += np.bincount(indices, weights=tmp_w.real,\n minlength=n_equal_bins)\n n.imag += np.bincount(indices, weights=tmp_w.imag,\n minlength=n_equal_bins)\n else:\n n += np.bincount(indices, weights=tmp_w,\n minlength=n_equal_bins).astype(ntype)\n else:\n # Compute via cumulative histogram\n cum_n = np.zeros(bin_edges.shape, ntype)\n if weights is None:\n for i in _range(0, len(a), BLOCK):\n sa = np.sort(a[i:i+BLOCK])\n cum_n += _search_sorted_inclusive(sa, bin_edges)\n else:\n zero = np.zeros(1, dtype=ntype)\n for i in _range(0, len(a), BLOCK):\n tmp_a = a[i:i+BLOCK]\n tmp_w = weights[i:i+BLOCK]\n sorting_index = np.argsort(tmp_a)\n sa = tmp_a[sorting_index]\n sw = tmp_w[sorting_index]\n cw = np.concatenate((zero, sw.cumsum()))\n bin_index = _search_sorted_inclusive(sa, bin_edges)\n cum_n += cw[bin_index]\n\n n = np.diff(cum_n)\n\n if density:\n db = np.array(np.diff(bin_edges), float)\n return n/db/n.sum(), bin_edges\n\n return n, bin_edges\n\n\ndef _histogramdd_dispatcher(sample, bins=None, range=None, density=None,\n weights=None):\n if hasattr(sample, 'shape'): # same condition as used in histogramdd\n yield sample\n else:\n yield from sample\n with contextlib.suppress(TypeError):\n yield from bins\n yield weights\n\n\n@array_function_dispatch(_histogramdd_dispatcher)\ndef histogramdd(sample, bins=10, range=None, density=None, weights=None):\n \"\"\"\n Compute the multidimensional histogram of some data.\n\n Parameters\n ----------\n sample : (N, D) array, or (D, N) array_like\n The data to be histogrammed.\n\n Note the unusual interpretation of sample when an array_like:\n\n * When an array, each row is a coordinate in a D-dimensional space -\n such as ``histogramdd(np.array([p1, p2, p3]))``.\n * When an array_like, each element is the list of values for single\n coordinate - such as ``histogramdd((X, Y, Z))``.\n\n The first form should be preferred.\n\n bins : sequence or int, optional\n The bin specification:\n\n * A sequence of arrays describing the monotonically increasing bin\n edges along each dimension.\n * The number of bins for each dimension (nx, ny, ... =bins)\n * The number of bins for all dimensions (nx=ny=...=bins).\n\n range : sequence, optional\n A sequence of length D, each an optional (lower, upper) tuple giving\n the outer bin edges to be used if the edges are not given explicitly in\n `bins`.\n An entry of None in the sequence results in the minimum and maximum\n values being used for the corresponding dimension.\n The default, None, is equivalent to passing a tuple of D None values.\n density : bool, optional\n If False, the default, returns the number of samples in each bin.\n If True, returns the probability *density* function at the bin,\n ``bin_count / sample_count / bin_volume``.\n weights : (N,) array_like, optional\n An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.\n Weights are normalized to 1 if density is True. If density is False,\n the values of the returned histogram are equal to the sum of the\n weights belonging to the samples falling into each bin.\n\n Returns\n -------\n H : ndarray\n The multidimensional histogram of sample x. See density and weights\n for the different possible semantics.\n edges : list\n A list of D arrays describing the bin edges for each dimension.\n\n See Also\n --------\n histogram: 1-D histogram\n histogram2d: 2-D histogram\n\n Examples\n --------\n >>> r = np.random.randn(100,3)\n >>> H, edges = np.histogramdd(r, bins = (5, 8, 4))\n >>> H.shape, edges[0].size, edges[1].size, edges[2].size\n ((5, 8, 4), 6, 9, 5)\n\n \"\"\"\n\n try:\n # Sample is an ND-array.\n N, D = sample.shape\n except (AttributeError, ValueError):\n # Sample is a sequence of 1D arrays.\n sample = np.atleast_2d(sample).T\n N, D = sample.shape\n\n nbin = np.empty(D, int)\n edges = D*[None]\n dedges = D*[None]\n if weights is not None:\n weights = np.asarray(weights)\n\n try:\n M = len(bins)\n if M != D:\n raise ValueError(\n 'The dimension of bins must be equal to the dimension of the '\n ' sample x.')\n except TypeError:\n # bins is an integer\n bins = D*[bins]\n\n # normalize the range argument\n if range is None:\n range = (None,) * D\n elif len(range) != D:\n raise ValueError('range argument must have one entry per dimension')\n\n # Create edge arrays\n for i in _range(D):\n if np.ndim(bins[i]) == 0:\n if bins[i] < 1:\n raise ValueError(\n '`bins[{}]` must be positive, when an integer'.format(i))\n smin, smax = _get_outer_edges(sample[:,i], range[i])\n try:\n n = operator.index(bins[i])\n\n except TypeError as e:\n raise TypeError(\n \t\"`bins[{}]` must be an integer, when a scalar\".format(i)\n ) from e\n\n edges[i] = np.linspace(smin, smax, n + 1)\n elif np.ndim(bins[i]) == 1:\n edges[i] = np.asarray(bins[i])\n if np.any(edges[i][:-1] > edges[i][1:]):\n raise ValueError(\n '`bins[{}]` must be monotonically increasing, when an array'\n .format(i))\n else:\n raise ValueError(\n '`bins[{}]` must be a scalar or 1d array'.format(i))\n\n nbin[i] = len(edges[i]) + 1 # includes an outlier on each end\n dedges[i] = np.diff(edges[i])\n\n # Compute the bin number each sample falls into.\n Ncount = tuple(\n # avoid np.digitize to work around gh-11022\n np.searchsorted(edges[i], sample[:, i], side='right')\n for i in _range(D)\n )\n\n # Using digitize, values that fall on an edge are put in the right bin.\n # For the rightmost bin, we want values equal to the right edge to be\n # counted in the last bin, and not as an outlier.\n for i in _range(D):\n # Find which points are on the rightmost edge.\n on_edge = (sample[:, i] == edges[i][-1])\n # Shift these points one bin to the left.\n Ncount[i][on_edge] -= 1\n\n # Compute the sample indices in the flattened histogram matrix.\n # This raises an error if the array is too large.\n xy = np.ravel_multi_index(Ncount, nbin)\n\n # Compute the number of repetitions in xy and assign it to the\n # flattened histmat.\n hist = np.bincount(xy, weights, minlength=nbin.prod())\n\n # Shape into a proper matrix\n hist = hist.reshape(nbin)\n\n # This preserves the (bad) behavior observed in gh-7845, for now.\n hist = hist.astype(float, casting='safe')\n\n # Remove outliers (indices 0 and -1 for each dimension).\n core = D*(slice(1, -1),)\n hist = hist[core]\n\n if density:\n # calculate the probability density function\n s = hist.sum()\n for i in _range(D):\n shape = np.ones(D, int)\n shape[i] = nbin[i] - 2\n hist = hist / dedges[i].reshape(shape)\n hist /= s\n\n if (hist.shape != nbin - 2).any():\n raise RuntimeError(\n \"Internal Shape Error\")\n return hist, edges\n"
] |
[
[
"numpy.true_divide",
"numpy.mean",
"numpy.sort",
"numpy.issubdtype",
"numpy.dtype",
"numpy.bincount",
"numpy.histogram",
"numpy.empty",
"numpy.can_cast",
"numpy.sqrt",
"numpy.ndim",
"numpy.isfinite",
"numpy.atleast_2d",
"numpy.zeros",
"numpy.percentile",
"numpy.diff",
"numpy.std",
"numpy.subtract",
"numpy.power",
"numpy.argsort",
"numpy.absolute",
"numpy.searchsorted",
"numpy.ravel_multi_index",
"numpy.logical_and.reduce",
"numpy.log2",
"numpy.result_type",
"numpy.asarray",
"numpy.ones",
"numpy.any",
"numpy.linspace"
]
] |
sspickle/genlatex
|
[
"d5fd86d4415c85c2f2933981e0e65f9df9f3cda5"
] |
[
"example/tlo-EX-Data.py"
] |
[
"\nimport random\nimport vpython as vp\nimport numpy as np\n\nfrom genlatex import latex_float, latex_vec\n\ndata = []\n\n\"\"\"\nnaming convention. \n\ntlo: this is the prefix to all the templates, and to the .tex outputs\n\ne.g., if you're working on TLO-3 and this is the third quiz you've\ngiven, you might say \"tlo = 'TLO-3v3' and then the template would\nbe 'TLO-3v1-template.txt' and the .tex files would be:\n\n'TLO-3v1-XX.tex' and\n'TLO-3v1-soln-XX.tex' \n\nwhere 'XX' would be a quiz version number based on the random numbers\nused to create the problem/solution pairs.\n\n\"\"\"\n\ntlo='TLO-EX'\ntemplateFile = tlo + '-template.txt'\nquizFilenameTemplate = tlo + '-{:}.tex'\nquizSolnFilenameTemplate = tlo + '-soln-{:}.tex'\n\nqe = 1.6e-19\nG = 1.67e-11\n\n#\n# --- this is where the random values are set up --\n#\n\nrint = random.randint\nuni = random.uniform\n\ndef getTemplateValues(numSamples, seeds):\n \"\"\"\n input:\n numSamples: Integer, number of random Versions to generate.\n seeds: a list of integers to act as random number seeds.\n This allows you to re-create the same values reproducibly.\n If you send in the same integer, you get the same values again.\n \n output:\n A dictionary of data, answers, and the template filenames\n (see below for details)\n \n This example creates a random planet, star mass and position.\n \"\"\"\n \n data = []\n answers = []\n\n for i in range(numSamples):\n \n random.seed(seeds[i]) # make these reproducible, but randomish\n\n Mstar = uni(0.6,1.0)*2e30\n Mplanet = random.randint(2,9)*1e26\n rStar = vp.vec(rint(-8,8),rint(-8,8),rint(-8,8))*1e11\n rPlanet = vp.vec(rint(-8,8),rint(-8,8),rint(-8,8))*1e11\n \n MstarStr = latex_float(Mstar) + r'\\,{\\rm kg}'\n MplanetStr = latex_float(Mplanet) + r'\\,{\\rm kg}'\n rStarStr = latex_vec(rStar) + r'\\,{\\rm m}'\n rPlanetStr = latex_vec(rPlanet) + r'\\,{\\rm m}'\n \n dt = uni(0.8,1.2)*30*3600*24 # around one month\n dtStr = latex_float(dt) + r'\\,{\\rm s}'\n \n r = rPlanet - rStar\n rStr = latex_vec(r) + r'\\,{\\rm m}'\n \n rhat = r.norm()\n rHatStr = latex_vec(rhat)\n \n # let's work out a circular orbit speed\n \n v = np.sqrt(G*Mstar/r.mag)\n vPlanet = r.cross(vp.vec(0,0,1)).norm()*v # pick a good perp. direction\n vPlanetStr = latex_vec(vPlanet) + r'\\,{\\rm m/s}'\n\n newData = {}\n newData.update(vnum= str(seeds[i]).zfill(2))\n newData.update(tlo=tlo, dt=dtStr)\n newData.update(Mstar=MstarStr)\n newData.update(Mplanet=MplanetStr)\n newData.update(rStar=rStarStr)\n newData.update(rPlanet=rPlanetStr)\n newData.update(vPlanet=vPlanetStr)\n\n data.append(newData)\n \n F = -G*Mplanet*Mstar*r.norm()/r.mag**2\n p = Mplanet*vPlanet\n dp = F*dt\n pnew = p + dp\n vnew = pnew/Mplanet\n \n Fstr = latex_vec(F) + r'\\,{\\rm N}'\n pStr = latex_vec(p) + r'\\,{\\rm N s}'\n dPstr = latex_vec(dp) + r'\\,{\\rm N s}'\n vNewStr = latex_vec(vnew) + r'\\,{\\rm m/s}'\n rNew = rPlanet + vnew*dt\n rNewStr = latex_vec(rNew) + r'\\,{\\rm m}'\n \n newAnswer = {}\n newAnswer.update(F=Fstr, p=pStr, dp=dPstr)\n newAnswer.update(r=rStr, rhat=rHatStr)\n newAnswer.update(vNew = vNewStr)\n newAnswer.update(rNew = rNewStr)\n\n answers.append(newAnswer)\n\n def returnDict(**kwargs):\n return kwargs\n \n return returnDict(data = data,\n answers = answers,\n templateFile = templateFile,\n quizFilenameTemplate = quizFilenameTemplate,\n quizSolnFilenameTemplate = quizSolnFilenameTemplate)\n\n"
] |
[
[
"numpy.sqrt"
]
] |
FNTwin/BayGPGO
|
[
"bad7e335d2fd19a93aeee0d591ea7da51c2c6d59",
"2f89699648601d4499dcab285a1d7376f0e1ef4b"
] |
[
"GPGO/dpd_opt_script.py",
"Test/optimization_test.py"
] |
[
"from GPGO.GaussianProcess.GP import GP, generate_grid\nfrom GPGO.GaussianProcess.Kernel.RBF import RBF\nfrom GPGO.BayesOpt import BayesianOptimization\nimport numpy as np\nimport os\nimport argparse\n\n\ndef get_right_coeffs(array):\n # truth_index=[2,3,6,7,9,15,16,19,20,21,24,25,30]\n truth_index = [2, 6, 7, 9, 15, 19, 20, 30]\n\n return np.atleast_2d(np.squeeze(array)[truth_index])\n\n\ndef fill_spots(array):\n # truth_index = [2, 3, 6, 7, 9, 15, 16, 19, 20, 21, 24, 25, 30]\n truth_index = [2, 6, 7, 9, 15, 19, 20, 30]\n\n copy_to = [4, 10, 11, 13, 14, 17, 22, 26, 28, 29]\n copy_from = [2, 3, 2, 6, 7, 15, 16, 15, 19, 20]\n coeffs = np.zeros(36)\n coeffs[truth_index] = np.squeeze(array)\n N = [3, 16, 21, 24, 25]\n coeffs[N] = np.array([127.19, 2.51, -4.3, 124.4, 4.5])\n coeffs[copy_to] = coeffs[copy_from]\n\n return np.atleast_2d(coeffs)\n\n\ndef read_interaction(path):\n \"\"\"Read the dpd interaction file and return the array of the interactions parameters\"\"\"\n path = os.path.join(path, \"full_G11_326N16.solv.inputinteg.txt\")\n with open(path, \"r\") as f:\n coeffs = []\n for row in f:\n a = row.split()\n if \"pair_coeffs\" in a:\n coeffs.append(float(a[3]))\n return np.atleast_2d(coeffs)\n\n\ndef write_interaction(path, array):\n # Array will be less than 36 bounds\n bounds = [\"! AuE\\t\\tAuE\", \"! AuE\\t\\tAuI\", \"! AuE\\t\\tC\", \"! AuE\\t\\tN\", \"! AuE\\t\\tL\", \"! AuE\\t\\tS\", \"! AuE\\t\\tW\",\n \"! AuE\\t\\tCl\", \"! AuI\\t\\tAuI\", \"! AuI\\t\\tC\", \"! AuI\\t\\tN\", \"! AuI\\t\\tL\", \"! AuI\\t\\tS\", \"! AuI\\t\\tW\",\n \"! AuI\\t\\tCl\", \"! C\\t\\tC\", \"! C\\t\\tN\", \"! C\\t\\tL\", \"! C\\t\\tS\", \"! C\\t\\tW\", \"! C\\t\\tCl\",\n \"! N\\t\\tN\", \"! N\\t\\tL\", \"! N\\t\\tS\", \"! N\\t\\tW\", \"! N\\t\\tCl\", \"! L\\t\\tL\", \"! L\\t\\tS\",\n \"! L\\t\\tW\", \"! L\\t\\tCl\", \"! S\\t\\tS\", \"! S\\t\\tW\", \"! S\\t\\tCl\", \"! W\\t\\tW\", \"! W\\t\\tCl\",\n \"! Cl\\t\\tCl\"]\n\n bounds_index = [\"2\\t2\", \"1\\t2\", \"2\\t3\", \"2\\t5\", \"2\\t4\", \"2\\t6\", \"2\\t7\", \"2\\t8\", \"1\\t1\", \"1\\t3\",\n \"1\\t5\",\n \"1\\t4\", \"1\\t6\", \"1\\t7\", \"1\\t8\", \"3\\t3\", \"3\\t5\", \"3\\t4\", \"3\\t6\", \"3\\t7\", \"3\\t8\",\n \"5\\t5\", \"4\\t5\",\n\n \"5\\t6\", \"5\\t7\", \"5\\t8\", \"4\\t4\", \"4\\t6\", \"4\\t7\", \"4\\t8\", \"6\\t6\", \"6\\t7\", \"6\\t8\",\n \"7\\t7\", \"7\\t8\",\n \"8\\t8\"]\n\n # bound_mask=[0,1,5,8,12,18,23,27,31,32,33,34,35]\n # mask_value=[51.6, 51.6, -10., 51.6 , 40., 72.,68.9,72., 80.,80.,51.6,51.6, 51.6]\n # N beads fixed\n # bound_mask=[0, 1, 3, 5, 8, 12, 16, 18, 21, 23,24,25 ,27,31,32,33,34,35]\n # mask_value=[51.6, 51.6, 127.19, -10., 51.6 , 40.,2.5, 72.,-4.3,68.9,124.4,4.53,72., 80.,80.,51.6,51.6, 51.6]\n\n bound_mask = [0, 1, 3, 5, 8, 12, 16, 18, 21, 23, 24, 25, 27, 31, 32, 33, 34, 35]\n mask_value = [51.6, 51.6, 127.19, -10., 51.6, 40., 2.5, 72., -4.3, 68.9, 124.4, 4.53, 72., 80., 80., 51.6, 51.6,\n 51.6]\n n_bounds = 36\n # n_real_bounds=13\n n_real_bounds = 8\n array = np.squeeze(array)\n\n \"write an interaction file in path\"\n path = os.path.join(path, \"full_G11_326N16.solv.inputinteg.txt\")\n with open(path, \"w\") as f:\n f.write(\"\\n# Atom Types used: AuE: 2, AuI: 1, C: 3, Cl: 8, L: 4, N: 5, S: 6, W: 7, \\n\\n\")\n f.write(\"# pair_coeff, to be imported in the lammps input file...\\n\")\n for i in range(len(bounds)):\n if i in bound_mask:\n f.write(\n f'pair_coeff\\t{bounds_index[i]}\\tdpd\\t{mask_value[bound_mask.index(i)]:.4f}\\t\\t{4.5:.4f}\\t#{bounds[i]}\\n')\n else:\n f.write(f'pair_coeff\\t{bounds_index[i]}\\tdpd\\t{np.squeeze(array[i]):.4f}\\t\\t{4.5:.4f}\\t#{bounds[i]}\\n')\n\n\ndef write_db(path, array):\n with open(path, \"ab\") as f:\n np.savetxt(f, array, fmt=\"%2.3f\", header=\"#----------------------\")\n\n\ndef read_db(path):\n return np.atleast_2d(np.loadtxt(path))\n\n\ndef parse_cmd():\n \"\"\"\n Function that parse the input from command line.\n Read three flags: -f , -o, -c\n -f: path to the input file [Required]\n -o: Path to the output file [Optional]\n Output: args object containing the input path, the outputh path and the dictionary of the charges\n \"\"\"\n\n parser = argparse.ArgumentParser(description=\"Prepare the lammps pair coeffs\")\n\n parser.add_argument('-f', '--file', dest='file',\n action='store', type=str, help=\"Path to input fie\")\n\n args = parser.parse_args()\n\n return args\n\n\ndef main():\n args = parse_cmd()\n path_interaction = args.file\n # Write X and Y\n # X_old=get_right_coeffs(read_interaction(path_interaction))\n path_x = \"/home/merk/Desktop/optimization_run/data_X.txt\"\n path_y = \"/home/merk/Desktop/optimization_run/data_Y.txt\"\n\n X, Y = read_db(path_x), read_db(path_y).reshape(-1, 1)\n print(X.shape)\n tmp = []\n for i in X:\n tmp.append(get_right_coeffs(i))\n X = np.asarray(np.squeeze(tmp))\n dim = X[0].shape[0]\n print(X.shape)\n # bo run\n # mean, var=np.mean(X), np.std(X)\n # X= (X - mean)/var\n # low, up =(-10-mean)/var , (140 - mean)/var\n boundaries = [[-10, 140] for i in range(dim)]\n boundaries=[[0,1] for i in range(dim)]\n min=-10\n max=140\n X=(X-min)/(max-min)\n\n gp = GP(X, Y, RBF(), normalize_y=True)\n gp.set_boundary([[1e-4,1]])\n settings = {\"type\": \"BFGS\",\n \"ac_type\": \"EI\",\n \"n_search\": 100,\n \"boundaries\": boundaries,\n \"epsilon\": 0.1,\n \"iteration\": 1,\n \"minimization\": True,\n \"optimization\": True,\n \"n_restart\": 30,\n \"sampling\": \"LHS\"}\n\n BayOpt = BayesianOptimization(X, Y, settings, gp, func=None)\n proposal = BayOpt.suggest_location()\n\n # Write new file\n # proposal= proposal *var + mean\n proposal=proposal*(max-min)+min\n\n print(proposal)\n #write_interaction(path_interaction, fill_spots(proposal))\n\n\nif __name__ == \"__main__\":\n main()\n",
"import time\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom .GaussianProcess.GP import GP, generate_grid\nfrom .GaussianProcess.Kernel.RBF import RBF\nfrom .Opt import BayesianOptimization\n\n\ndef min_2D():\n dim_test = 2\n dim_out = 1\n n_train_p = 10\n X = np.random.uniform(0,10,(1,2))\n boundaries = [[-5, 10], [0, 15]]\n\n def f(x):\n x1, x2 = x[:, 0], x[:, 1]\n return (1 * (x2 - (5.1 / (4 * np.pi ** 2)) * x1 ** 2 + 5 / np.pi *\n x1 - 6) ** 2 + 10 * (1 - (1 / (8 * np.pi))) * np.cos(x1) + 10)\n\n Z = f(X)[:, None]\n \"\"\"gp = GP(X, Z, noise=0.01)\n gp.fit()\n BayOpt = GPGO(X, Z, gp, f, err=1e-2)\n # best=BayOpt.bayesian_run(100, [[-1,4] for i in range(dim_test)] , iteration=30, optimization=False)\n err = BayOpt.bayesian_run_min(100,\n boundaries,\n #minima=0.397887,\n iteration=50,\n optimization=False,\n epsilon=0.01,\n func=np.random.uniform)\"\"\"\n\n BayesianOptimization.test_long(x=X,y=Z,f=f, n_search_points=300, boundaries=boundaries, iter=50,minima= 0.397887)\n\n\n\ndef min_6D():\n dim = 6\n points = 10\n x = np.random.uniform(0, 1, (10, 6))\n\n def f(x):\n alpha = np.array([[1.], [1.2], [3.], [3.2]])\n\n A = np.array([[10, 3, 17, 3.50, 1.7, 8],\n [0.05, 10, 17, 0.1, 8, 14],\n [3, 3.5, 1.7, 10, 17, 8],\n [17, 8, 0.05, 10, 0.1, 14]])\n\n P = 10 ** -4 * np.array([[1312, 1696, 5569, 124, 8283, 5886],\n [2329, 4135, 8307, 3736, 1004, 9991],\n [2348, 1451, 3522, 2883, 3047, 6650],\n [4047, 8828, 8732, 5743, 1091, 381]])\n\n def comp(i):\n tot = 0\n for j in range(6):\n tot += A[i][j] * (x.T[j] - P[i][j]) ** 2\n return np.exp(-tot)\n\n f = 0\n for i in range(4):\n f += -(alpha[i] * comp(i))\n\n return f[:, None]\n\n y = f(x)\n\n gp = GP(x, y, noise=0.0002, kernel=RBF(sigma_l=0.7,l=0.52))\n gp.fit()\n BayOpt = BayesianOptimization(x,y, gp, f, err=1e-4)\n best=BayOpt.bayesian_run_BFGL(n_search=10,\n iteration=80,\n boundaries=[[0,1] for i in range(6)],\n minimization=True)\n\n\n print(best)\n\n\n # best=BayOpt.bayesian_run(100, [[-1,4] for i in range(dim_test)] , iteration=30, optimization=False)\n \"\"\"err = BayOpt.direct_test(10,\n [[0, 1] for i in range(6)],\n minima=-3.32237,\n iteration=100,\n optimization=False,\n epsilon=0.01,\n func=np.random.uniform)\"\"\"\n #print(\"BEST\", err)\n #GPGO.test_long(x=x, y=y, f=f, n_search_points=6, boundaries=[[0, 1] for i in range(6)]\n # , iter=100, minima=-3.32237, opt=False)\n\ndef one_run_test():\n\n x=np.array([[-2],[2],[-3],[1]])\n gp = GP(x, np.array([[2],[3],[2.3],[0.5]]), noise=0.005)\n gp.fit()\n BayOpt = BayesianOptimization(x, np.array([[-2],[3],[-2.3],[0.5]]), gp, err=1e-3)\n gp.plot(np.linspace(-3,3,1000)[:,None])\n\n\n print(BayOpt.bayesian_run_min(200,\n [[-3,3]],\n optimization=False,\n minimization=True,\n epsilon=0.1,\n opt_constrain=[[2, 30], [2, 30]],\n n_opt_points=100,\n sampling=np.random.uniform))\n\ndef test_minimization_1D():\n dim_test = 1\n dim_out = 1\n n_train_p = 3\n\n np.random.seed(1)\n\n X = np.random.uniform(0,1,3)[:,None]\n\n def f(X):\n return (6* X - 2)**2 * np.sin (12 * X - 4)\n\n Z = f(X)\n\n BayesianOptimization.test_long(x=X, y=Z, f=f, n_search_points=6, boundaries=[[0, 1]]\n , iter=10, minima=-3.32237, opt=False)\n \"\"\"gp = GP(X, Z, noise=0.0005)\n gp.fit()\n BayOpt = GPGO(X, Z, gp, f)\n #gp.optimize(constrains=[[2,100],[2,100]],n_points=150)\n\n # best=BayOpt.bayesian_run(100, [[-1,4] for i in range(dim_test)] , iteration=30, optimization=False)\n best = BayOpt.bayesian_run_min(1000,\n [[0,1]],\n iteration=10,\n optimization=True,\n opt_constrain=[[0.1, 20], [0.1, 20]],\n epsilon=0.1,\n func=np.linspace,\n plot=True)\n\n #print(\"bay:\", best)\n \"\"\"\ndef test_GP_2D(optimize=True, function=np.linspace):\n dim_test = 2\n dim_out = 1\n n_train_p = 7\n #X = np.random.uniform(-2, 2, (40, 2))\n #Z = ((X[:, 1] ** 2 * X[:, 0] ** 2) * np.sin((X[:, 1] ** 2 + X[:, 0] ** 2)))[:, None]\n data=np.loadtxt(\"/Users/gab/Desktop/data_test_reg.txt\")\n\n gp = GP((data[:,0:2]-np.mean(data[:,0:2]))/np.std(data[:,0:2]), data[:,2][:,None], kernel=RBF(2,2), noise=0.0002)\n gp.fit()\n plot = generate_grid(dim_test, 100, [[-3, 3] for i in range(dim_test)])\n\n gp.plot(plot)\n # gp.static_compute_marg()\n print(\"Old marg likelihood :\", gp.get_marg(),\n \"\\n Hyperparameters: \", gp.get_kernel().gethyper())\n\n if optimize:\n gp.optimize_grid(constrains=[[0.1, 20], [0.5, 30]], n_points=200, function=function)\n #pred = gp.predict(plot)\n gp.plot(plot)\n print(\"New marg likelihood :\", gp.get_marg(),\n \"\\n Hyperparameters: \", gp.get_kernel().gethyper())\n\ndef test_GP_1D(optimize=False):\n x = np.array([ -1,0.5, 1, 3])[:, None]\n\n def f(X):\n return np.sin(X)\n\n def noise(x, alpha=1):\n return f(x) + np.random.uniform(-1, 1, size=x.shape) * alpha\n\n y = noise(x, alpha=0)\n\n gp = GP(x, y, noise=0.0002, kernel=RBF(sigma_l=0.5135, l=1.26))\n gp.fit()\n\n plot = np.linspace(-1.5, 3.5, 1000)\n\n pred_old, var_old = gp.predict(plot[:, None])\n #plt.plot(plot[:,None],f(plot), color=\"saddlebrown\", linestyle=\"-.\", label=\"True Function\")\n gp.plot(plot[:, None])\n\n\n gp.log_marginal_likelihood()\n print(\"Old marg likelihood :\", gp.get_marg(), \"\\n Hyperparameters: \",\n gp.get_kernel().gethyper())\n if optimize:\n new = gp.grid_search_optimization(constrains=[[0.2, 3], [0.2, 3]],\n n_points=500,\n function=np.random.uniform)\n\n optimized = GP(x, y, noise=0.000005, kernel=RBF(sigma_l=new[\"hyper\"][0], l=new[\"hyper\"][1]))\n optimized.fit()\n pred, var = optimized.predict(plot[:, None])\n\n optimized.plot(plot[:, None])\n optimized.log_marginal_likelihood()\n print(\"New marg likelihood :\", optimized.get_marg(),\n \"\\n Hyperparameters: \", optimized.get_kernel().gethyper())\n\n\nmin_6D()"
] |
[
[
"numpy.array",
"numpy.savetxt",
"numpy.zeros",
"numpy.loadtxt",
"numpy.squeeze",
"numpy.atleast_2d"
],
[
"numpy.array",
"numpy.sin",
"numpy.random.seed",
"numpy.exp",
"numpy.mean",
"numpy.std",
"numpy.random.uniform",
"numpy.loadtxt",
"numpy.cos",
"numpy.linspace"
]
] |
Leanneliansong1/inf1340-programmingfordatascience-fa21
|
[
"8944d019f64f9f25e7b10dafbca18250bf12e9ee"
] |
[
"Tutorial code/Week 3_Pandas tutorial.py"
] |
[
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\n\n\n# In[3]:\n\n\ndf = pd.read_csv('merc.csv')\n\n\n# In[4]:\n\n\nprint(df.head(10))\n\n\n# In[47]:\n\n\nprint(df.head())\n\n\n# In[48]:\n\n\nprint(df.tail()) \n\n\n# In[49]:\n\n\nprint(df.info()) \n\n\n# In[50]:\n\n\n#refer to the row index:\nprint(df.loc[0])\n\n\n# In[54]:\n\n\nprint(df.loc[[3, 9]])\n\n\n# In[44]:\n\n\nprint(df.loc[2])\n\n\n# In[28]:\n\n\n#use a list of indexes:\nprint(df.loc[[0, 1]])\n\n\n# In[29]:\n\n\n#identifying duplicates\nprint(df.duplicated())\n\n\n# In[31]:\n\n\n#removing duplicates\ndf.drop_duplicates(inplace = True)\nprint(df.head())\n\n\n# In[32]:\n\n\n#Dealing with null values\n\n#(1) Remove all null values\nnona_df = df.dropna()\n\nprint(nona_df.to_string())\n\n\n# In[34]:\n\n\n#(2) Replace NULL values with the number 200\nchecknull = df.fillna(200, inplace = True)\nprint(checknull)\n\n#df.fillna(130, inplace = True)\n#print(df)\n\n\n# In[36]:\n\n\n# (3) eplace null values Using Mean, Median, or Mode\nx = df[\"price\"].mean()\n\ndf[\"price\"].fillna(x, inplace = True)\n\nprint(df.head())\n\n\n# In[19]:\n\n\ndata = {\n \"mileage\": [240000, 130000, 20000],\n \"years\": [2003, 2017, 2021]\n}\n\nnewdf = pd.DataFrame(data, index = [\"car1\", \"car2\", \"car3\"])\n\nprint(newdf) \n\n\n# In[20]:\n\n\n#refer to the named index:\nprint(newdf.loc[\"car2\"])\n\n\n# In[4]:\n\n\nname_dict = {\n 'Name': ['a','b','c','d'],\n 'Score': [90,80,95,20]\n }\n\ndf = pd.DataFrame(name_dict)\n\ndf.to_csv('file_name.csv')\n\n\n# In[64]:\n\n\ntest1 = [1, 2, 3, 4, 5]\nprint(type(test1))\n\n\n# In[65]:\n\n\nprint(test1[-2])\n\n\n# In[66]:\n\n\ntest2 = (1, 2, 3, 4, 5)\nprint(type(test2))\n\n\n# In[67]:\n\n\ntest1[-2] = 6\n\n\n# In[68]:\n\n\nprint(test1[-2])\n\n\n# In[69]:\n\n\ntest2[-2] = 6\n\n\n# In[70]:\n\n\ntest3 = {1, 2, 3, 4, 5}\nprint(type(test3))\n\n\n# In[72]:\n\n\ntest4 = {\n \"Fruit\": [\"apple\", \"orange\", \"watermelon\"],\n \"Weight\": [5, 10, 15]\n}\n\n\n# In[73]:\n\n\nprint(test4[\"Fruit\"])\n\n\n# In[74]:\n\n\nx = test4.keys()\nprint(x)\n\n\n# In[75]:\n\n\nx = test4.items()\nprint(x)\n\n\n# In[ ]:\n\n\n\n\n"
] |
[
[
"pandas.DataFrame",
"pandas.read_csv"
]
] |
Zakobian/WD_gas_disk_imaging
|
[
"b8bda209e541b442f44fdb6109de8f2f72ec38cf"
] |
[
"interactive.py"
] |
[
"import cv2\nimport numpy as np\nfrom plot_one import plot_me_one\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom scipy.integrate import simps\nM_sun=1.989*10**30;\nR_sun=696340*10**3;\n\nM=0.62*M_sun\nr_star=0.0151*R_sun\nG=6.67408*10**(-11);\n\n\n\n####\n####\n#### This code will create the sandbox and allow user to play around with densities. To begin one needs a density to start with.\n#### You can generate one by running one of the other programs.\n#### The controls are:\n####\n#### c - switches between drawing circles and drawing by hand. Circles are drawn between inner and outer radius\n#### B - sets color/density to 0\n#### b - decreases current color/density by 1\n#### w - increases current color/density by 1\n#### backspace - Plot the emission lines from current density\n#### Esc - close\n####\n\nimg=np.load(\"density_todraw.npy\")\n# variables\nix = -1\niy = -1\ndrawing = False\nsize=img.shape[0]\ncolor=1\ncircle=True\n\n\nconsts={'e':0.0,\n'b':0.0,\n'view_angle':np.pi/2,\n'inclination_angle':np.pi/5,\n'r_max':550*r_star,\n'r_min':r_star\n}\n\n\ndef on_change(val):\n consts['b']=4*(val-100)/100\n print(4*(val-100)/100)\n\ndef draw_rectangle_with_drag(event, x, y, flags, param):\n\n global ix, iy,ir, drawing, img\n\n if event == cv2.EVENT_LBUTTONDOWN and circle:\n if not drawing:\n ix = x\n iy = y\n ir = np.sqrt((ix-size//2)**2+(iy-size//2)**2)\n if drawing:\n r = np.sqrt((x-size//2)**2+(y-size//2)**2)\n print(r,ir)\n cv2.circle(img, (size//2, size//2), ((r+ir)/2).astype(int), color=color, thickness=np.abs((r-ir)/2).astype(int))\n print('drawn 1')\n print(x,y)\n drawing = not drawing\n if event == cv2.EVENT_LBUTTONDOWN and not circle:\n drawing = True\n ix=x\n iy=y\n\n\n elif event == cv2.EVENT_MOUSEMOVE and not circle:\n if drawing == True:\n cv2.line(img,(ix,iy),(x,y),color,50)\n ix=x\n iy=y\n\n elif event == cv2.EVENT_LBUTTONUP and not circle:\n if(drawing):\n cv2.line(img,(ix,iy),(x,y),color,50)\n drawing = False\n\n\n\n\ncv2.namedWindow(winname = \"Density of gas\")\ncv2.createTrackbar('Emissivity(b)', \"Density of gas\", 100, 200, on_change)\ncv2.setMouseCallback(\"Density of gas\",\n draw_rectangle_with_drag)\n\n\nfig_hist = plt.figure(1)\nax_hist = fig_hist.add_subplot(1, 1, 1)\nplt.ion()\nplt.xlabel(\"Velocity/Wavelength\")\nplt.ylabel(\"Flux\")\ninst_names=['Xshooter','MIKE2']\nfor j,inst_name in enumerate(inst_names):\n x,y=np.loadtxt('data/SiII'+'_'+inst_name+'.csv', delimiter=',', unpack=True)\n\n area = simps((y-1),x)\n y=(y-1)/area\n ax_hist.plot(x,y, linewidth=1,label=inst_name)\n\n\nwhile True:\n # imgC = cv2.applyColorMap(img, cv2.COLORMAP_JET)\n if img.max()!=0: cv2.imshow(\"Density of gas\", img/img.max())\n else: cv2.imshow(\"Density of gas\", img)\n k = cv2.waitKey(33)\n if k == 27:\n break\n elif k== ord(' '):\n print('Plotting')\n plot_me_one(img,ax_hist,consts)\n plt.show()\n plt.pause(0.001)\n elif k== ord('B'):\n color=0\n print('Density now: '+str(color))\n elif k== ord('b'):\n color-=1\n print('Density now: '+str(color))\n elif k== ord('w'):\n color+=1\n print('Density now: '+str(color))\n elif k== ord('c'):\n circle = not circle\n drawing=False\n if(circle):\n print('Now in circle mode')\n else:\n print('Now in drawing mode')\n\n\ncv2.destroyAllWindows()\n"
] |
[
[
"matplotlib.pyplot.ion",
"scipy.integrate.simps",
"matplotlib.pyplot.xlabel",
"numpy.load",
"matplotlib.pyplot.figure",
"numpy.loadtxt",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.ylabel",
"numpy.sqrt",
"numpy.abs",
"matplotlib.pyplot.show"
]
] |
Lorraine333/joint-order-cbow
|
[
"ed22c7b037a8d016a8982c8341bc02edc5054fc3"
] |
[
"src/plus_train.py"
] |
[
"\"\"\"Licensed to the Apache Software Foundation (ASF) under one\nor more contributor license agreements. See the NOTICE file\ndistributed with this work for additional information\nregarding copyright ownership. The ASF licenses this file\nto you under the Apache License, Version 2.0 (the\n\"License\"); you may not use this file except in compliance\nwith the License. You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing,\nsoftware distributed under the License is distributed on an\n\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\nKIND, either express or implied. See the License for the\nspecific language governing permissions and limitations\nunder the License.\"\"\"\n\n\"\"\"Trains and Evaluates the network.\"\"\"\nfrom __future__ import division\nfrom __future__ import print_function\n# from tensorflow.python.client import timeline\nimport time\nimport random\nimport tensorflow as tf\nimport plus_input_data as input_data\nimport numpy as np \nfrom plus_model import tf_model\nimport plus_eval_model \nimport map_eval \nimport plus_eval_model as eval_model\nfrom collections import defaultdict\nimport pickle\n\n# Basic model parameters as external flags.\nflags = tf.app.flags\nFLAGS = flags.FLAGS\nflags.DEFINE_float('learning_rate', 1e-2, 'Initial learning rate.')\nflags.DEFINE_float('cbow_learning_rate',1e-3, 'cbow learning rate')\nflags.DEFINE_float('regularization', 0.0, 'l2 regularization parameters')\nflags.DEFINE_boolean('save', False, 'Save the model')\nflags.DEFINE_boolean('update_embed', True, 'Update the embeddings')\nflags.DEFINE_integer('max_steps', 10000, 'Number of steps to run trainer.')\nflags.DEFINE_integer('batch_size', 800, 'Batch size. Must divide evenly into the dataset sizes.')\nflags.DEFINE_string('train_dir', '../TransE_Order/data', 'Directory to put the data.')\nflags.DEFINE_integer('cbow_step', 1000, 'Number of steps to run cbow trainer.')\nflags.DEFINE_integer('embed_dim', 50, 'word embedding dimension')\nflags.DEFINE_float('eps', 0., 'hierarchical error threshold')\nflags.DEFINE_float('margin', 5, 'hinge loss margin')\nflags.DEFINE_boolean('overfit', False, 'Over fit the dev data to check model')\nflags.DEFINE_float('lower_scale', 0, 'lower initialize value for embeddings')\nflags.DEFINE_float('higher_scale', 0.1, 'higher initialize value for embeddings')\nflags.DEFINE_boolean('kb_only', True, 'whether to train kb only')\n# flags.DEFINE_boolean('special_neg_sample', False, 'Whether to find negative examples from the not relation')\nflags.DEFINE_integer('print_every',100,'Every 20 step, print out the evaluation results')\nflags.DEFINE_float('alpha',0.01, 'regularization on error Function')\nflags.DEFINE_boolean('rel_acc', True, 'check the different relation accurancy for test dataset')\nflags.DEFINE_boolean('error_analysis', True, 'do error analysis for evaluation data')\nflags.DEFINE_string('params_file', './params/','file to save parameters')\nflags.DEFINE_string('error_file','./error_analysis/','dictionary to save error analysis result')\nflags.DEFINE_string('ouput_file', './result/', 'print the result to this file')\nflags.DEFINE_string('train', 'new_isa', 'training on both noisa and isa relatiosn')\nflags.DEFINE_string('test','new_isa', 'test on isa relations')\nflags.DEFINE_string('eval', 'acc', 'evaluate on MAP')\nflags.DEFINE_integer('rel_size', 35, 'relation_size')\n# lstm parameters\nflags.DEFINE_integer('hidden_dim',100, 'lstm hidden layer dimension')\nflags.DEFINE_boolean('peephole',True, 'whether to use peephole in lstm layer')\nflags.DEFINE_string('tuple_model', 'ave', 'how to compose term vector, can choose from ave or lstm')\n\n# word2vec parameters\nflags.DEFINE_float('epsilon', 1e-6, 'epsilon for optimizor')\nflags.DEFINE_float('beta1',1.0, 'weight on order_embedding loss')\nflags.DEFINE_float('beta2',1.0, 'Weight on word2vec loss')\nflags.DEFINE_string(\"word2vec_train_data\", 'text8', \"Training text file.\")\nflags.DEFINE_integer('word2vec_batch_size', 256, 'Batch size. Must divide evenly into the dataset sizes.') #256 #512\nflags.DEFINE_integer('data_shard_rows', 256*600, 'num text \"lines\" for training in one shard') #256*600\nflags.DEFINE_integer('data_shard_cols', 100, 'num tokens per text line') #100\nflags.DEFINE_integer('vocab_size', 80000, 'vocab_size')\nflags.DEFINE_float('num_neg_samples', 30, 'num_neg_samples')\nflags.DEFINE_integer(\"window_size\", 5, \"The number of words to predict to the left and right \")\n\n# nearest neighbor parameters\nflags.DEFINE_integer('knn', 10, 'how many neighbors want to check')\n# big wiki\n# flags.DEFINE_string(\"word2vec_train_data\", '../acl_cbow/data/binary-wackypedia-1-4-ukwac-', \"Training text file.\")\n# flags.DEFINE_integer('word2vec_batch_size', 512, 'Batch size. Must divide evenly into the dataset sizes.') #256 #512\n# flags.DEFINE_integer('data_shard_rows', 512*600, 'num text \"lines\" for training in one shard') #256*600\n# flags.DEFINE_integer('data_shard_cols', 200, 'num tokens per text line') #100\n\ndef placeholder_inputs(batch_size):\n placeholder = {}\n #positive example term1\n placeholder['t1_idx_placeholder'] = tf.placeholder(tf.int32, shape=(None, None))\n placeholder['t1_msk_placeholder'] = tf.placeholder(tf.int32, shape=(None, None, 1))\n placeholder['t1_length_placeholder'] = tf.placeholder(tf.int32, shape=(None, 1))\n # positive example term2\n placeholder['t2_idx_placeholder'] = tf.placeholder(tf.int32, shape=(None, None))\n placeholder['t2_msk_placeholder'] = tf.placeholder(tf.int32, shape=(None, None, 1))\n placeholder['t2_length_placeholder'] = tf.placeholder(tf.int32, shape=(None, 1))\n #negative example term1\n # placeholder['nt1_idx_placeholder'] = tf.placeholder(tf.int32, shape=(None, None))\n # placeholder['nt1_msk_placeholder'] = tf.placeholder(tf.int32, shape=(None, None, 1))\n # placeholder['nt1_length_placeholder'] = tf.placeholder(tf.int32, shape=(None, 1))\n #negative exmaple term2\n # placeholder['nt2_idx_placeholder'] = tf.placeholder(tf.int32, shape=(None, None))\n # placeholder['nt2_msk_placeholder'] = tf.placeholder(tf.int32, shape=(None, None, 1))\n # placeholder['nt2_length_placeholder'] = tf.placeholder(tf.int32, shape=(None, 1))\n #positive relation\n placeholder['rel_placeholder'] = tf.placeholder(tf.int32, shape=[None])\n placeholder['rel_msk_placeholder'] = tf.placeholder(tf.float32, shape=[None, 1])\n #negative relation\n # placeholder['nrel_placeholder'] = tf.placeholder(tf.int32, shape=[None])\n # placeholder['nrel_msk_placeholder'] = tf.placeholder(tf.float32, shape=[None, 1])\n #positive label\n placeholder['label_placeholder'] = tf.placeholder(tf.float32, shape=[None])\n #negative label\n # placeholder['nlabel_placeholder'] = tf.placeholder(tf.float32, shape=[None])\n #word2vec input\n placeholder['row_indices'] = tf.placeholder(tf.int32, shape = [FLAGS.word2vec_batch_size])\n placeholder['real_batch_size'] = tf.placeholder(tf.int32, shape = [])\n placeholder['data_shard'] = tf.placeholder(tf.int32, shape=[FLAGS.data_shard_rows, FLAGS.data_shard_cols])\n\n return placeholder\n\ndef fill_feed_dict(data_set, placeholder, row_indices, rel):\n\n r_idx, t1_idx, t2_idx, labels = data_set.next_batch(FLAGS.batch_size)\n t1x, t1mask, t1length= input_data.prepare_data(t1_idx)\n t2x, t2mask, t2length = input_data.prepare_data(t2_idx)\n # print('r_idx', r_idx.shape)\n relmsk = input_data.rel_msk(r_idx, rel)\n \n #random find negative examples from the same batch\n # nr_idx, nt1_idx, nt2_idx, nlabels = input_data.find_neg(r_idx, t1_idx, t2_idx, labels)\n # nt1x, nt1mask, nt1length= input_data.prepare_data(nt1_idx)\n # nt2x, nt2mask, nt2length = input_data.prepare_data(nt2_idx)\n # nrelmsk = input_data.rel_msk(nr_idx, rel)\n\n feed_dict = {\n placeholder['t1_idx_placeholder']: t1x,\n placeholder['t1_msk_placeholder']: t1mask, \n placeholder['t1_length_placeholder']: t1length,\n placeholder['t2_idx_placeholder']: t2x,\n placeholder['t2_msk_placeholder']: t2mask,\n placeholder['t2_length_placeholder']: t2length,\n # placeholder['nt1_idx_placeholder']: nt1x,\n # placeholder['nt1_msk_placeholder']: nt1mask,\n # placeholder['nt1_length_placeholder']: nt1length,\n # placeholder['nt2_idx_placeholder']: nt2x,\n # placeholder['nt2_msk_placeholder']: nt2mask,\n # placeholder['nt2_length_placeholder']: nt2length,\n placeholder['rel_placeholder']: r_idx,\n # placeholder['nrel_placeholder']: nr_idx,\n placeholder['label_placeholder']: labels,\n # placeholder['nlabel_placeholder']: nlabels,\n placeholder['rel_msk_placeholder']: relmsk,\n # placeholder['nrel_msk_placeholder']: nrelmsk,\n placeholder['row_indices']: row_indices,\n placeholder['real_batch_size']: len(row_indices),\n }\n\n return feed_dict\n\ndef partial_word2vec_fill_feed_dict(placeholder, data_shared):\n feed_dict = {\n placeholder['data_shard']: data_shared,\n }\n return feed_dict\n\n\n\ndef run_training():\n accu_list = []\n train_accu_list = []\n test_accu_list = []\n curr_best = 0\n outfile = open(FLAGS.ouput_file+'newtask_abs_l1_learning_rate_'+str(FLAGS.learning_rate)+'_word2vec_batch'+str(FLAGS.word2vec_batch_size)+'_word2vec'+'_kb_only'+str(FLAGS.kb_only)+'_cbow_step'+str(FLAGS.cbow_step)+'_batchsize'+str(FLAGS.batch_size)+'_eps'+str(FLAGS.eps)+'_tuplemodel'+str(FLAGS.tuple_model)+'_peephole'+str(FLAGS.peephole)+'_hiddendim'+str(FLAGS.hidden_dim)+'_train'+str(FLAGS.train)+'_test'+str(FLAGS.test)+'_eval'+str(FLAGS.eval)+'_margin'+str(FLAGS.margin)+'_reg'+str(FLAGS.regularization)+'_dim'+str(FLAGS.embed_dim)+'_steps'+str(FLAGS.max_steps)+'.txt', 'wt')\n\n error_file_name = FLAGS.error_file+'newtask_abs_l1_learning_rate'+str(FLAGS.learning_rate)+'_word2vec_batch'+str(FLAGS.word2vec_batch_size)+'_word2vec'+'_kb_only'+str(FLAGS.kb_only)+'_cbow_step'+str(FLAGS.cbow_step)+'_batchsize'+str(FLAGS.batch_size)+'_eps'+str(FLAGS.eps)+'_tuplemodel'+str(FLAGS.tuple_model)+'_peephole'+str(FLAGS.peephole)+'_hiddendim'+str(FLAGS.hidden_dim)+'_eval'+str(FLAGS.eval)+'_train'+str(FLAGS.train)+'_test'+str(FLAGS.test)+'_margin'+str(FLAGS.margin)+'_reg'+str(FLAGS.regularization)+'_dim'+str(FLAGS.embed_dim)+'_steps'+str(FLAGS.max_steps)\n\n fname = FLAGS.params_file+'newtask_abs_l1_learning_rate'+str(FLAGS.learning_rate)+'_word2vec_batch'+str(FLAGS.word2vec_batch_size)+'_word2vec'+'_kb_only'+str(FLAGS.kb_only)+'_cbow_step'+str(FLAGS.cbow_step)+'_batchsize'+str(FLAGS.batch_size)+'_eps'+str(FLAGS.eps)+'_tuplemodel'+str(FLAGS.tuple_model)+'_peephole'+str(FLAGS.peephole)+'_hiddendim'+str(FLAGS.hidden_dim)+'_eval'+str(FLAGS.eval)+'_train'+str(FLAGS.train)+'_test'+str(FLAGS.test)+'_margin'+str(FLAGS.margin)+'_reg'+str(FLAGS.regularization)+'_dim'+str(FLAGS.embed_dim)+'_steps'+str(FLAGS.max_steps)+'.pkl'\n data_sets = input_data.read_data_sets(FLAGS, outfile)\n # special_neg_sample = FLAGS.special_neg_sample\n if FLAGS.overfit:\n train_data = data_sets.dev\n else:\n train_data = data_sets.train\n\n \n\n with tf.Graph().as_default():\n placeholder = placeholder_inputs(FLAGS.batch_size)\n print('Build Model...', file = outfile)\n model = tf_model(data_sets.words, data_sets.We, data_sets.rel, data_sets.Rel, placeholder, FLAGS)\n print('Build Loss Function...', file = outfile)\n # loss = model.loss()\n kb_loss = model.kbc_loss()\n cbow_loss = model.cbow_loss()\n print('Build Encode Function...', file = outfile)\n if FLAGS.tuple_model == 'ave':\n embed_t1 = model.tuple_embedding(placeholder['t1_idx_placeholder'], placeholder['t1_msk_placeholder'], placeholder['t1_length_placeholder'], model.getWe())\n embed_t2 = model.tuple_embedding(placeholder['t2_idx_placeholder'], placeholder['t2_msk_placeholder'], placeholder['t2_length_placeholder'], model.getWe())\n # embed_nt1 = model.tuple_embedding(placeholder['nt1_idx_placeholder'], placeholder['nt1_msk_placeholder'], placeholder['nt1_length_placeholder'], model.getWe())\n # embed_nt2 = model.tuple_embedding(placeholder['nt2_idx_placeholder'], placeholder['nt2_msk_placeholder'], placeholder['nt2_length_placeholder'], model.getWe())\n\n elif FLAGS.tuple_model == 'lstm':\n with tf.variable_scope('term_embed', reuse = True):\n embed_t1 = model.tuple_lstm_embedding(placeholder['t1_idx_placeholder'], placeholder['t1_msk_placeholder'], placeholder['t1_length_placeholder'], model.getWe())\n with tf.variable_scope('term_embed', reuse = True): \n embed_t2 = model.tuple_lstm_embedding(placeholder['t2_idx_placeholder'], placeholder['t2_msk_placeholder'], placeholder['t2_length_placeholder'], model.getWe())\n # with tf.variable_scope('term_embed', reuse = True): \n # embed_nt1 = model.tuple_lstm_embedding(placeholder['nt1_idx_placeholder'], placeholder['nt1_msk_placeholder'], placeholder['nt1_length_placeholder'], model.getWe())\n # with tf.variable_scope('term_embed', reuse = True): \n # embed_nt2 = model.tuple_lstm_embedding(placeholder['nt2_idx_placeholder'], placeholder['nt2_msk_placeholder'], placeholder['nt2_length_placeholder'], model.getWe())\n else:\n print('Sorry, currently only support lstm terms and average terms')\n\n embed_r = model.rel_embedding(model.getRel(), placeholder['rel_placeholder'], placeholder['rel_msk_placeholder'])\n # embed_nr = model.rel_embedding(model.getRel(), placeholder['nrel_placeholder'], placeholder['nrel_msk_placeholder'])\n \n print('Build Hierarchical Error Function...', file = outfile)\n h_error = model.hierarchical_error(embed_t1, embed_r, embed_t2, FLAGS.eps, FLAGS.batch_size, FLAGS.embed_dim)\n nh_error = model.neg_hier_error()\n # nh_error = model.neg_hier_error(embed_nt1, embed_nr, embed_nt2, FLAGS.eps, FLAGS.margin, FLAGS.batch_size, FLAGS.embed_dim)\n # test_h_error = model.test_hierarchical_error(embed_t1, embed_r, embed_t2, FLAGS.eps, FLAGS.batch_size, FLAGS.embed_dim)\n print('Build Training Function...', file = outfile)\n # train_op = model.training(loss, FLAGS.learning_rate)\n kb_train_op = model.training(kb_loss, FLAGS.epsilon, FLAGS.learning_rate)\n cbow_train_op = model.training(cbow_loss, FLAGS.epsilon, FLAGS.cbow_learning_rate)\n \n data_shared = np.asarray(data_sets.word2vec_data[:FLAGS.data_shard_rows*FLAGS.data_shard_cols]).reshape((FLAGS.data_shard_rows, FLAGS.data_shard_cols))\n \n\n model_we = model.getWe()\n model_rel = model.getRel() \n summary_op = tf.summary.merge_all()\n saver = tf.train.Saver()\n sess = tf.Session()\n init = tf.global_variables_initializer()\n sess.run(init)\n\n partial_feed_dict = partial_word2vec_fill_feed_dict(placeholder, data_shared)\n sess.run(model.assign_data_shard_var, feed_dict = partial_feed_dict)\n\n summary_writer = tf.summary.FileWriter(FLAGS.train_dir, graph=sess.graph)\n\n # profile \n # run_opts = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) \n # run_metadata = tf.RunMetadata()\n # profile\n perm = np.arange(FLAGS.data_shard_rows)\n word2vec_idx = 0\n for step in range(FLAGS.max_steps):\n start_time = time.time()\n if (word2vec_idx + FLAGS.word2vec_batch_size) > FLAGS.data_shard_rows:\n random.shuffle(perm)\n word2vec_idx = 0\n # row_indices = perm[:FLAGS.word2vec_batch_size]\n row_indices = perm[word2vec_idx:word2vec_idx+FLAGS.word2vec_batch_size]\n word2vec_idx += FLAGS.word2vec_batch_size\n feed_dict = fill_feed_dict(train_data, placeholder, row_indices, data_sets.rel)\n # print('feed_dict', time.time()-start_time)\n if(FLAGS.kb_only):\n t1 = time.time()\n # _, kb_loss_value = sess.run([kb_train_op, kb_loss], feed_dict=feed_dict, options=run_opts,run_metadata=run_metadata)\n _, kb_loss_value = sess.run([kb_train_op, kb_loss], feed_dict=feed_dict)\n # print('kb_train_op, ',time.time()-t1)\n elif(step<FLAGS.cbow_step):\n t1 = time.time()\n # _, cbow_loss_value = sess.run([cbow_train_op,cbow_loss], feed_dict=feed_dict, options=run_opts,run_metadata=run_metadata)\n _, cbow_loss_value = sess.run([cbow_train_op,cbow_loss], feed_dict=feed_dict)\n # _, kb_loss_value = sess.run([kb_train_op, kb_loss], feed_dict=feed_dict)\n # print('cbow_train_op, ',time.time()-t1)\n else:\n t1 = time.time()\n # _,loss_value = sess.run([train_op, loss], feed_dict=feed_dict)\n # _, kb_loss_value = sess.run([kb_train_op, kb_loss], feed_dict=feed_dict, options=run_opts,run_metadata=run_metadata)\n # _, cbow_loss_value = sess.run([cbow_train_op,cbow_loss], feed_dict=feed_dict, options=run_opts,run_metadata=run_metadata)\n _, kb_loss_value = sess.run([kb_train_op, kb_loss], feed_dict=feed_dict)\n # print('kb_train_op', time.time()-t1)\n t2 = time.time()\n _, cbow_loss_value = sess.run([cbow_train_op,cbow_loss], feed_dict=feed_dict)\n # print('cbow_train_op, ', time.time()-t2)\n\n he_error = sess.run(h_error, feed_dict=feed_dict)\n\n duration = time.time() - start_time\n # if (train_data.index_in_epoch + FLAGS.batch_size) > train_data.num_examples:\n # if (FLAGS.save):\n # saver.save(sess, FLAGS.train_dir, global_step=step)\n if (step%(FLAGS.print_every) == 0):\n embed = sess.run(model_we,feed_dict=feed_dict)\n print(step, file = outfile)\n print('*'*80, file = outfile)\n if(FLAGS.kb_only):\n print('Epoch %d: kb_loss = %.2f (%.3f sec)' % (train_data.epochs_completed, kb_loss_value, duration), file = outfile)\n elif(step<FLAGS.cbow_step): \n print('Epoch %d: cbow_loss = %.2f (%.3f sec)' % (train_data.epochs_completed, cbow_loss_value, duration), file = outfile)\n print('Epoch %d: cbow_loss = %.2f (%.3f sec)' % (train_data.epochs_completed, cbow_loss_value, duration))\n # print('Epoch %d: kb_loss = %.2f (%.3f sec)' % (train_data.epochs_completed, kb_loss_value, duration), file = outfile)\n else:\n print('Epoch %d: kb_loss = %.2f (%.3f sec)' % (train_data.epochs_completed, kb_loss_value, duration), file = outfile)\n print('Epoch %d: cbow_loss = %.2f (%.3f sec)' % (train_data.epochs_completed, cbow_loss_value, duration), file = outfile)\n \n if FLAGS.eval == 'map':\n print('MAP Evaluation......', file = outfile)\n train_map = map_eval.do_train_eval(sess, h_error, placeholder, data_sets.train_test, train_data.epochs_completed, data_sets.train_neg, FLAGS, outfile)\n train_accu_list.append(train_map)\n print('Training MAP:%.5f' %train_map, file = outfile)\n dev_map = map_eval.do_train_eval(sess, h_error, placeholder, data_sets.dev, train_data.epochs_completed, data_sets.train_neg, FLAGS, outfile)\n print('Dev MAP:%.5f' %dev_map, file = outfile)\n accuracy = map_eval.do_train_eval(sess, h_error, placeholder, data_sets.devtest, train_data.epochs_completed, data_sets.train_neg, FLAGS, outfile)\n accu_list.append(accuracy)\n print('Devtest MAP:%.5f' %accuracy, file = outfile)\n print('', file = outfile)\n \n\n if FLAGS.eval == 'acc':\n train_acc = eval_model.do_train_eval(sess, h_error, nh_error, placeholder, train_data, train_data.epochs_completed, data_sets.train_neg, curr_best, FLAGS, error_file_name, outfile, data_sets.rel, data_sets.words)\n train_accu_list.append(train_acc)\n\n dev2_acc, test_acc, wrong_indices, wrong_preds = eval_model.do_eval(sess, h_error, placeholder, data_sets.dev, data_sets.devtest, data_sets.test, train_data.epochs_completed,curr_best, FLAGS, error_file_name, outfile, data_sets.rel, data_sets.words)\n accu_list.append(dev2_acc)\n test_accu_list.append(test_acc)\n\n eval_model.knn(data_sets.nn_data, data_sets.words, embed, FLAGS.knn, outfile)\n # print(\"Accuracy for Devtest: %.5f\" % dev2_acc)\n # print(\"Accuracy for Test: %.5f\" %test_acc)\n # print ('')\n print(\"Accuracy for Devtest: %.5f\" % dev2_acc, file = outfile)\n print(\"Accuracy for Test: %.5f\" %test_acc, file = outfile)\n print ('', file = outfile)\n if FLAGS.save and dev2_acc > curr_best:\n print('saving model')\n f = open(fname,'wb')\n save_model = {}\n save_model['embeddings'] = sess.run(model_we, feed_dict=feed_dict)\n save_model['rel'] = sess.run(model_rel, feed_dict = feed_dict)\n pickle.dump(save_model, f, protocol=pickle.HIGHEST_PROTOCOL)\n f.close()\n # tl = timeline.Timeline(run_metadata.step_stats)\n # ctf = tl.generate_chrome_trace_format(show_memory=True)\n # # ctf = tl.generate_chrome_trace_format()\n # with open('timeline_cache.json', 'w') as f:\n # f.write(ctf)\n\n print('Average of Top 10 Training Score', np.mean(sorted(train_accu_list, reverse = True)[:10]), file = outfile)\n opt_idx = np.argmax(np.asarray(accu_list))\n print('Epoch', opt_idx, file = outfile)\n print('Best Dev2 Score: %.5f' %accu_list[opt_idx], file = outfile)\n print('Best Test Score: %.5f' %test_accu_list[opt_idx], file = outfile)\ndef main(_):\n run_training()\n\n\nif __name__ == '__main__':\n tf.app.run()\n"
] |
[
[
"numpy.asarray",
"tensorflow.Graph",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.variable_scope",
"tensorflow.placeholder",
"numpy.arange",
"tensorflow.summary.merge_all",
"tensorflow.summary.FileWriter",
"tensorflow.app.run",
"tensorflow.global_variables_initializer"
]
] |
rbv83/DHNx
|
[
"edb5c9be17f74d7f200c1eb6a17000a26633bdc3"
] |
[
"dhnx/plotting.py"
] |
[
"# -*- coding: utf-8\n\n\"\"\"\nThis module is designed to hold functions for visualization.\n\nThis file is part of project dhnx (). It's copyrighted\nby the contributors recorded in the version control history of the file,\navailable from its original location:\n\nSPDX-License-Identifier: MIT\n\"\"\"\n\nimport logging\nfrom collections import namedtuple\n\nimport folium as fol\nimport matplotlib.collections as collections\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom folium.features import DivIcon\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\ncartopy_installed = True\n\ntry:\n from cartopy import crs as ccrs\n from cartopy.io.img_tiles import Stamen\n\nexcept ImportError:\n logging.info(\"Cartopy is not installed. Background maps will not be drawn.\")\n cartopy_installed = False\n\n\nclass InteractiveMap():\n r\"\"\"\n An interactive map of a network.ThermalNetwork.\n \"\"\"\n def __init__(self, thermal_network):\n self.node_data = self.collect_node_data(thermal_network)\n self.edge_data = thermal_network.components.pipes\n self.edge_data['value'] = 1\n self.node_id = self.node_data.index\n self.lat = self.node_data['lat']\n self.lon = self.node_data['lon']\n self.component_type = self.node_data['component_type']\n self._add_colors()\n\n @staticmethod\n def collect_node_data(thermal_network):\n node_data = {\n list_name: thermal_network.components[list_name].copy() for list_name in [\n 'consumers',\n 'producers',\n 'forks'\n ]\n }\n\n for k, v in node_data.items():\n v.index = [k + '-' + str(id) for id in v.index]\n\n return pd.concat(node_data.values())\n\n def _add_colors(self):\n color = {'producer': '#ff0000',\n 'consumer': '#00ff00',\n 'split': '#000000'}\n\n self.node_data = (\n self.node_data\n .assign(node_color=self.node_data['component_type'])\n .replace({'node_color': color}))\n\n return self.node_data['node_color']\n\n @staticmethod\n def _get_bearing(p1, p2):\n '''\n Returns compass bearing from p1 to p2\n\n Parameters\n p1 : namedtuple with lat lon\n p2 : namedtuple with lat lon\n\n Return\n compass bearing of type float\n '''\n y = p2[0] - p1[0]\n x = p2[1] - p1[1]\n\n bearing = np.arctan2(x, y) / np.pi * 180\n\n # adjusting for compass bearing\n if bearing < 0:\n return bearing + 360\n\n return bearing\n\n def _get_arrows(self, locations, color='black', size=8, n_arrows=3):\n '''\n Get a list of correctly placed and rotated\n arrows/markers to be plotted\n\n Parameters\n locations : list of lists of lat lons that represent the\n start and end of the line.\n eg [[41.1132, -96.1993],[41.3810, -95.8021]]\n color : default is 'black'\n size : default is 8\n n_arrows : number of arrows to create. default is 3\n\n Return\n list of arrows/markers\n '''\n\n Point = namedtuple('Point', field_names=['lat', 'lon'])\n\n # creating point from our Point named tuple\n p1 = Point(locations[0][0], locations[0][1])\n p2 = Point(locations[1][0], locations[1][1])\n\n # getting the rotation needed for our marker.\n # Subtracting 90 to account for the marker's orientation\n # of due East(get_bearing returns North)\n rotation = self._get_bearing(p1, p2) - 90\n\n # get an evenly space list of lats and lons for our arrows\n # note that I'm discarding the first and last for aesthetics\n # as I'm using markers to denote the start and end\n arrow_lats = np.linspace(p1.lat, p2.lat, n_arrows + 2)[1:n_arrows + 1]\n arrow_lons = np.linspace(p1.lon, p2.lon, n_arrows + 2)[1:n_arrows + 1]\n\n arrows = []\n\n # creating each \"arrow\" and appending them to our arrows list\n for points in zip(arrow_lats, arrow_lons):\n arrows.append(\n fol.RegularPolygonMarker(\n location=points,\n color=color, number_of_sides=3,\n radius=size, rotation=rotation, fill=True))\n\n return arrows\n\n def draw(self):\n # create map\n m = fol.Map(location=[self.lat.mean(), self.lon.mean()],\n zoom_start=14)\n\n for i in range(0, len(self.node_data)):\n # draw nodes\n fol.CircleMarker([self.lat[i], self.lon[i]],\n # popup=data['node_id'][i],\n color=self.node_data['node_color'][i],\n fill_color=self.node_data['node_color'][i],\n radius=20).add_to(m)\n\n # draw node ids\n fol.Marker(\n [self.lat[i], self.lon[i]],\n icon=DivIcon(\n icon_size=(-35, 75),\n icon_anchor=(0, 0),\n html='<div style=\"font-size: 16pt\">%s</div>'\n % self.node_data.index[i]\n )\n ).add_to(m)\n\n for i in range(0, len(self.edge_data)):\n # linewidth settings\n lw_avg = self.edge_data['value'].mean()\n lw = self.edge_data['value'][i] / lw_avg\n\n fol.PolyLine(locations=[[self.lat[self.edge_data['from_node'][i]],\n self.lon[self.edge_data['from_node'][i]]],\n [self.lat[self.edge_data['to_node'][i]],\n self.lon[self.edge_data['to_node'][i]]]],\n color='orange',\n weight=lw * 3).add_to(m)\n\n arrows = self._get_arrows(\n locations=[[self.lat[self.edge_data['from_node'][i]],\n self.lon[self.edge_data['from_node'][i]]],\n [self.lat[self.edge_data['to_node'][i]],\n self.lon[self.edge_data['to_node'][i]]]],\n color='orange', n_arrows=3)\n\n for arrow in arrows:\n arrow.add_to(m)\n\n return m\n\n\nclass StaticMap():\n r\"\"\"\n A static map of a network.ThermalNetwork.\n \"\"\"\n def __init__(self, thermal_network, figsize=(5, 5), node_size=3,\n edge_width=3, node_color='r', edge_color='g'):\n self.graph = thermal_network.to_nx_graph()\n self.figsize = figsize\n self.node_size = node_size\n self.edge_width = edge_width\n self.node_color = node_color\n self.edge_color = edge_color\n self.positions = {node_id: np.array([data['lon'], data['lat']])\n for node_id, data in self.graph.nodes(data=True)}\n self.extent = self._get_extent()\n\n def _get_extent(self):\n lon = [pos[0] for pos in self.positions.values()]\n lat = [pos[1] for pos in self.positions.values()]\n extent = np.array([np.min(lon), np.max(lon), np.min(lat), np.max(lat)])\n delta = [extent[1] - extent[0], extent[3] - extent[2]]\n extent = extent.astype(float)\n extent += 0.1 * np.array([-delta[0], delta[0], -delta[1], delta[1]])\n return extent\n\n def draw(self, bgcolor='w', no_axis=False, background_map=False,\n use_geom=False, edge_color='b', edge_linewidth=2,\n edge_alpha=1, node_size=40, node_color='r', node_alpha=1,\n edgecolor='r', node_zorder=1):\n \"\"\"\n This function has been adapted from osmnx plots.plot_graph() function.\n \"\"\"\n if background_map:\n if not cartopy_installed:\n logging.warning('To draw background map, cartopy must be installed.')\n background_map = False\n\n if background_map:\n imagery = Stamen(style='toner-lite')\n zoom_level = 15\n fig, ax = plt.subplots(\n figsize=self.figsize,\n subplot_kw={'projection': imagery.crs}\n )\n ax.set_extent(self.extent, crs=ccrs.Geodetic())\n ax.add_image(imagery, zoom_level, alpha=1, interpolation='bilinear')\n\n else:\n fig, ax = plt.subplots(figsize=self.figsize, facecolor=bgcolor)\n\n lines = []\n for u, v, data in self.graph.edges(data=True):\n if 'geometry' in data and use_geom:\n # if it has a geometry attribute (a list of line segments), add them\n # to the list of lines to plot\n xs, ys = data['geometry'].xy\n lines.append(list(zip(xs, ys)))\n else:\n # if it doesn't have a geometry attribute, the edge is a straight\n # line from node to node\n x1 = self.graph.nodes[u]['lon']\n y1 = self.graph.nodes[u]['lat']\n x2 = self.graph.nodes[v]['lon']\n y2 = self.graph.nodes[v]['lat']\n line = [(x1, y1), (x2, y2)]\n lines.append(line)\n\n # add the lines to the axis as a linecollection\n lc = collections.LineCollection(lines,\n colors=edge_color,\n linewidths=edge_linewidth,\n alpha=edge_alpha,\n zorder=2)\n ax.add_collection(lc)\n\n node_Xs = [float(x) for _, x in self.graph.nodes(data='lon')]\n node_Ys = [float(y) for _, y in self.graph.nodes(data='lat')]\n\n ax.scatter(node_Xs,\n node_Ys,\n s=node_size,\n c=node_color,\n alpha=node_alpha,\n edgecolor=edgecolor,\n zorder=node_zorder)\n\n if no_axis:\n ax = plt.gca()\n ax.set_axis_off()\n\n return fig, ax\n"
] |
[
[
"numpy.max",
"numpy.array",
"numpy.min",
"matplotlib.pyplot.subplots",
"numpy.linspace",
"matplotlib.collections.LineCollection",
"numpy.arctan2",
"matplotlib.pyplot.gca"
]
] |
shiyuchengTJU/CISA
|
[
"81c43d2ee2ba1224e307baf636f1c87bfddd2ffa"
] |
[
"boundary/sampling/normal.py"
] |
[
"\"\"\"\r\nNoise generation from Normal distributions.\r\n\"\"\"\r\n\r\nimport numpy as np\r\n\r\n\r\ndef sample_hypersphere(n_samples, sample_shape, radius, l_norm=2, mode='sphere', sample_gen=None, seed=None):\r\n \"\"\"\r\n Uniformly sample the surface of a hypersphere.\r\n Uniform picking: create a n-dimensional normal distribution and then normalize it to the desired radius.\r\n See http://mathworld.wolfram.com/HyperspherePointPicking.html\r\n WARNING: this is probably not correct for other norms!! We should check it out carefully if we don't use L2.\r\n :param n_samples: number of image samples to generate.\r\n :param sample_shape: shape of a single image sample.\r\n :param radius: radius(=eps) of the hypersphere.\r\n :param l_norm: L-norm.\r\n :param mode: if 'sphere', then samples the surface of the eps-sphere. If 'ball', then samples the volume of the eps-ball.\r\n Note: 'ball' is currently unused, and certainly not uniformly distributed.\r\n :param sample_gen: If provided, retrieves random numbers from this generator.\r\n :param seed: seed for the random generator. Cannot be used with the sample generator.\r\n :return: Batch of image samples, shape: (n_samples,) + sample_shape\r\n \"\"\"\r\n\r\n if sample_gen is not None:\r\n assert seed is None, \"Can't provide individual seeds if using the multi-threaded generator.\"\r\n assert sample_shape == sample_gen.shape\r\n\r\n # Get precalculated samples from the generator\r\n gauss = np.empty(shape=(n_samples, np.prod(sample_shape)), dtype=np.float64)\r\n for i in range(n_samples):\r\n gauss[i] = sample_gen.get_normal().reshape(-1)\r\n else:\r\n if seed is not None:\r\n np.random.seed(seed)\r\n gauss = np.random.normal(size=(n_samples, np.prod(sample_shape)))\r\n\r\n # Norm to\r\n norm = np.linalg.norm(gauss, ord=l_norm, axis=1)\r\n perturbation = (gauss / norm[:, np.newaxis])\r\n\r\n # Sphere: sample only the surface of the hypersphere.\r\n # Ball: sample inside the sphere. Note: this is probably not uniform.\r\n if mode == 'sphere':\r\n perturbation *= radius\r\n elif mode == 'ball':\r\n perturbation *= np.random.uniform(low=0.0, high=radius, size=(n_samples, 1))\r\n else:\r\n raise ValueError(\"Unknown sampling mode.\")\r\n\r\n perturbation = np.reshape(perturbation, (n_samples,) + sample_shape)\r\n\r\n return perturbation"
] |
[
[
"numpy.linalg.norm",
"numpy.reshape",
"numpy.random.seed",
"numpy.prod",
"numpy.random.uniform"
]
] |
joshim5/fairscale
|
[
"1c2a6f6b46646866f3e86d628b8a4ca437f68215"
] |
[
"tests/optim/test_oss.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\n# pylint: disable=missing-module-docstring\n# pylint: disable=missing-class-docstring\n# pylint: disable=missing-function-docstring\n\nimport os\n\nimport pytest\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\n\nimport fairscale.optim as optim\n\nskip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason=\"cuda required\")\n\nBACKEND = dist.Backend.NCCL if torch.cuda.is_available() else dist.Backend.GLOO # type: ignore\nDEVICE = \"cuda\" if torch.cuda.is_available() else torch.device(\"cpu\")\n\n\ndef setup_module(module):\n os.environ[\"MASTER_ADDR\"] = \"localhost\"\n os.environ[\"MASTER_PORT\"] = \"29500\"\n dist.init_process_group(backend=BACKEND, rank=0, world_size=1)\n\n\ndef dist_init(rank, world_size):\n os.environ[\"MASTER_ADDR\"] = \"localhost\"\n os.environ[\"MASTER_PORT\"] = \"29501\"\n dist.init_process_group(backend=BACKEND, rank=rank, world_size=world_size)\n\n\ndef test_create():\n params = [torch.rand(1)]\n o = optim.OSS(params, lr=0.01)\n\n\ndef test_state_dict():\n x = torch.tensor([1.0], device=DEVICE, requires_grad=True)\n o = optim.OSS([x], lr=0.1, momentum=0.9)\n x.backward()\n o.step()\n assert x == torch.tensor([0.9], device=DEVICE)\n assert o.optim.state[x][\"momentum_buffer\"] == torch.tensor([1.0], device=DEVICE)\n o.zero_grad()\n o.consolidate_state_dict() # Sync state dict in between replicas - even if there are none\n state_dict = o.state_dict()\n\n # Check that the state dict is pytorch-compliant key wise\n assert \"param_groups\" in state_dict.keys()\n assert \"state\" in state_dict.keys()\n\n # Check that the pulled state is what we expect, and that we have all the expected keys\n assert state_dict[\"param_groups\"][0][\"lr\"] == 0.1\n assert state_dict[\"param_groups\"][0][\"momentum\"] == 0.9\n assert not state_dict[\"param_groups\"][0][\"nesterov\"]\n assert state_dict[\"param_groups\"][0][\"weight_decay\"] == 0.0\n assert state_dict[\"param_groups\"][0][\"dampening\"] == 0.0\n\n # Check that the pulled state and the .param_groups attribute are in sync\n for k in state_dict[\"param_groups\"][0].keys():\n if k != \"params\":\n assert state_dict[\"param_groups\"][0][k] == o.param_groups[0][k]\n\n # Check that it's correctly loaded\n o = optim.OSS([x], lr=0.01)\n o.load_state_dict(state_dict)\n # Check that state is correct and on proper device\n assert o.optim.state[x][\"momentum_buffer\"] == torch.tensor([1.0], device=DEVICE)\n\n # We should now be using a lr of 0.1, both within the optimizer\n # and as exposed by the .param_groups attribute\n assert o.param_groups[0][\"lr\"] == 0.1\n x.backward()\n o.step()\n assert x == torch.tensor([0.71], device=DEVICE)\n assert o.optim.state[x][\"momentum_buffer\"] == torch.tensor([1.9], device=DEVICE)\n\n # Check that the exposed param_groups are on the proper device\n assert o.param_groups[0][\"params\"][0].device == x.device\n\n\ndef test_lr_scheduler():\n x = torch.tensor([1.0], device=DEVICE, requires_grad=True)\n x2 = torch.tensor([1.0], device=DEVICE, requires_grad=True)\n o = optim.OSS([x], lr=0.01)\n o2 = torch.optim.SGD([x2], lr=0.01)\n s = torch.optim.lr_scheduler.StepLR(o, 1)\n s2 = torch.optim.lr_scheduler.StepLR(o2, 1)\n for _ in range(5):\n x.backward()\n o.zero_grad()\n o.step()\n s.step()\n x2.backward()\n o2.zero_grad()\n o2.step()\n s2.step()\n assert x == x2\n\n\ndef test_step_with_kwargs():\n class SGDWithStepKWArg(torch.optim.SGD):\n def step(self, closure=None, kwarg=[]):\n super().step()\n kwarg.append(5)\n\n kwarg = []\n x = torch.tensor([1.0], device=DEVICE, requires_grad=True)\n o = optim.OSS([x], SGDWithStepKWArg, lr=0.1)\n x.backward()\n o.step(0, kwarg=kwarg)\n assert kwarg == [5]\n assert x == torch.tensor([0.9], device=DEVICE)\n\n\ndef test_step_without_closure():\n class SGDWithoutClosure(torch.optim.SGD):\n def step(self):\n return super().step()\n\n x = torch.tensor([1.0], device=DEVICE, requires_grad=True)\n o = optim.OSS([x], SGDWithoutClosure, lr=0.1)\n x.backward()\n o.step()\n assert x == torch.tensor([0.9], device=DEVICE)\n\n\ndef test_local_state_dict():\n x = torch.tensor([1.0], device=DEVICE, requires_grad=True)\n o = optim.OSS([x], lr=0.1)\n local_state_dict = o.local_state_dict()\n o = optim.OSS([x], lr=0.01)\n o.load_local_state_dict(local_state_dict)\n # We should now be using a lr of 0.1.\n assert o.optim.param_groups[0][\"lr\"] == 0.1\n assert o.param_groups[0][\"lr\"] == 0.1\n x.backward()\n o.step()\n assert x == torch.tensor([0.9], device=DEVICE)\n\n\ndef test_implicit_local_state_dict():\n x = torch.tensor([1.0], device=DEVICE, requires_grad=True)\n o = optim.OSS([x], lr=0.1)\n local_state_dict = o.state_dict()\n o = optim.OSS([x], lr=0.01)\n o.load_state_dict(local_state_dict)\n # We should now be using a lr of 0.1.\n assert o.optim.param_groups[0][\"lr\"] == 0.1\n assert o.param_groups[0][\"lr\"] == 0.1\n x.backward()\n o.step()\n assert x == torch.tensor([0.9], device=DEVICE)\n\n\ndef run_test_add_param_group(rank, world_size):\n dist_init(rank, world_size)\n params = []\n for size in [4, 5, 2, 6, 4]:\n params.append(torch.rand(size, 1))\n o = optim.OSS(params, lr=0.1)\n assert len(o.param_groups) == 1\n o.add_param_group({\"params\": [torch.rand(3, 1)]})\n assert len(o.param_groups) == 2\n # Verify that added group is added to the correct partition making all have 8 elements.\n assert sum([x.numel() for g in o.optim.param_groups for x in g[\"params\"]]) == 8\n assert len(o.optim.param_groups) == 2\n\n\ndef test_add_param_group():\n world_size = 3\n mp.spawn(run_test_add_param_group, args=(world_size,), nprocs=world_size, join=True)\n\n\ndef run_test_zero_grad(rank, world_size):\n dist_init(rank, world_size)\n x = torch.rand(1)\n m = torch.nn.Linear(1, 1)\n o = optim.OSS(m.parameters(), lr=0.1)\n y = m(x)\n y.backward(x)\n assert m.weight.grad\n assert m.bias.grad\n o.zero_grad()\n assert not m.weight.grad\n assert not m.bias.grad\n\n\ndef test_zero_grad():\n world_size = 2\n mp.spawn(run_test_zero_grad, args=(world_size,), nprocs=world_size, join=True)\n\n\ndef run_test_step(rank, world_size):\n dist_init(rank, world_size)\n x = torch.tensor([float(rank + 1)], device=rank)\n m = torch.nn.Linear(1, 1)\n m.weight.data = torch.tensor([[1.0]])\n m.bias.data = torch.tensor([2.0])\n m.to(rank)\n o = optim.OSS(m.parameters(), lr=0.1)\n y = m(x)\n y.backward(x)\n for p in m.parameters():\n dist.all_reduce(p.grad.data, op=dist.ReduceOp.SUM)\n p.grad.data /= world_size\n o.step()\n assert m.weight == torch.tensor([[0.75]], device=rank)\n assert m.bias == torch.tensor([1.85], device=rank)\n\n\n@skip_if_no_cuda\ndef test_step():\n world_size = min(2, torch.cuda.device_count())\n mp.spawn(run_test_step, args=(world_size,), nprocs=world_size, join=True)\n\n\ndef run_test_step_with_closure(rank, world_size, optimizer=None):\n dist_init(rank, world_size)\n\n x_val = rank + 1\n weight = 1.0\n bias = 2.0\n error = 1.0\n target = torch.tensor([x_val * weight + bias + error], device=rank)\n loss_fn = torch.nn.L1Loss()\n\n x = torch.tensor([float(x_val)], device=rank)\n m = torch.nn.Linear(1, 1)\n m.weight.data = torch.tensor([[weight]])\n m.bias.data = torch.tensor([bias])\n m.to(rank)\n\n o = optim.OSS(m.parameters(), lr=0.1)\n\n y = m(x)\n y.backward(x)\n for p in m.parameters():\n dist.all_reduce(p.grad.data, op=dist.ReduceOp.SUM)\n p.grad.data /= world_size\n\n def closure():\n o.zero_grad()\n output = m(x)\n loss = loss_fn(output, target)\n loss.backward()\n return loss\n\n loss = o.step(closure=closure)\n\n assert loss == torch.tensor(error, device=rank)\n assert m.weight == torch.tensor([[1.1]], device=rank)\n assert m.bias == torch.tensor([2.1], device=rank)\n\n\n@skip_if_no_cuda\ndef test_step_with_closure():\n world_size = min(2, torch.cuda.device_count())\n mp.spawn(run_test_step_with_closure, args=(world_size,), nprocs=world_size, join=True)\n\n\ndef run_test_sharding(rank, world_size):\n dist_init(rank, world_size)\n params = []\n for size in [5, 4, 2, 6, 4, 3]:\n params.append(torch.rand(size, 1))\n o = optim.OSS(params, lr=0.1)\n assert sum([x.numel() for x in o.optim.param_groups[0][\"params\"]]) == 8\n\n\ndef test_sharding():\n world_size = 3\n mp.spawn(run_test_sharding, args=(world_size,), nprocs=world_size, join=True)\n\n\ndef run_test_collect_shards(rank, world_size, reference_rank):\n dist_init(rank, world_size)\n device = torch.device(rank) if torch.cuda.device_count() > 1 else DEVICE\n\n # Run a dummy step so that the optimizer state dict exists\n batch, input_width, hidden, target_width = 3, 20, 10, 5\n target = torch.rand((batch, target_width), device=device)\n inputs = torch.rand((batch, input_width), device=device)\n\n model = torch.nn.Sequential(torch.nn.Linear(input_width, hidden), torch.nn.Linear(hidden, target_width))\n model.to(device)\n\n loss_fn = torch.nn.L1Loss()\n loss_fn.to(device)\n\n # With SGD, Momentum is required to get a state to shard\n optimizer = optim.OSS(model.parameters(), lr=0.1, momentum=0.99)\n\n def closure():\n optimizer.zero_grad()\n output = model(inputs)\n loss = loss_fn(output, target)\n loss.backward()\n return loss\n\n _ = optimizer.step(closure=closure)\n\n # Update the optimizer state on the reference rank\n optimizer.consolidate_state_dict(recipient_rank=reference_rank)\n\n # Fetch the state on the reference rank\n # - check that it has the correct size\n # - load it again\n if rank == reference_rank:\n optimizer_state_dict = optimizer.state_dict()\n assert len(optimizer_state_dict[\"state\"]) == world_size\n else:\n optimizer_state_dict = {}\n\n optimizer_state_dict = optim.utils.broadcast_object(\n optimizer_state_dict, src_rank=reference_rank, group=dist.group.WORLD, dist_device=device\n )\n\n # Load the optimizer state dict\n optimizer.load_state_dict(optimizer_state_dict)\n\n\ndef test_collect_shards():\n world_size = 3\n if torch.cuda.is_available():\n world_size = min(world_size, torch.cuda.device_count())\n reference_rank = 0\n\n mp.spawn(\n run_test_collect_shards, args=(world_size, reference_rank), nprocs=world_size, join=True,\n )\n"
] |
[
[
"torch.nn.Linear",
"torch.device",
"torch.rand",
"torch.optim.lr_scheduler.StepLR",
"torch.distributed.init_process_group",
"torch.optim.SGD",
"torch.multiprocessing.spawn",
"torch.nn.L1Loss",
"torch.cuda.device_count",
"torch.cuda.is_available",
"torch.tensor",
"torch.distributed.all_reduce"
]
] |
EmanueleGhelfi/ashpy
|
[
"6156b97c636c5b568c5a57c23b77d9ae28421bba"
] |
[
"ashpy/losses/gan.py"
] |
[
"# Copyright 2019 Zuru Tech HK Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"GAN losses.\"\"\"\nfrom abc import ABC\nfrom enum import Enum\nfrom typing import List, Union, Type\n\nimport tensorflow as tf\nfrom ashpy.contexts import GANContext\n\nfrom ashpy.losses.executor import Executor, SumExecutor\n\n\nclass AdversarialLossType(Enum):\n \"\"\"\n Enumeration for Adversarial Losses. Implemented: GAN and LSGAN.\n \"\"\"\n\n GAN = 0 # classical gan loss (minmax)\n LSGAN = 1 # Least Square GAN\n\n\nclass GANExecutor(Executor, ABC):\n \"\"\"\n Executor for GANs. Implements the basic functions needed by the GAN losses\n \"\"\"\n\n @staticmethod\n def get_discriminator_inputs(\n context: GANContext,\n fake_or_real: tf.Tensor,\n condition: tf.Tensor,\n training: bool,\n ) -> Union[tf.Tensor, List[tf.Tensor]]:\n \"\"\"\n Returns the discriminator inputs. If needed it uses the encoder.\n The current implementation uses the number of inputs to determine\n whether the discriminator is conditioned or not.\n\n Args:\n context (:py:class:`ashpy.contexts.gan.GANContext`): context for GAN models\n fake_or_real (:py:class:`tf.Tensor`): discriminator input tensor, it can be fake (generated) or real\n condition (:py:class:`tf.Tensor`): discriminator condition (it can also be generator noise)\n training (:py:class:`bool`): whether is training phase or not\n\n Returns:\n The discriminator inputs.\n\n \"\"\"\n num_inputs = len(context.discriminator_model.inputs)\n\n # Handle encoder\n if hasattr(context, \"encoder_model\"):\n if num_inputs == 2:\n d_inputs = [\n fake_or_real,\n context.encoder_model(fake_or_real, training=training),\n ]\n elif num_inputs == 3:\n d_inputs = [\n fake_or_real,\n context.encoder_model(fake_or_real, training=training),\n condition,\n ]\n else:\n raise ValueError(\n f\"Context has encoder_model, but generator has only {num_inputs} inputs\"\n )\n else:\n if num_inputs == 2:\n d_inputs = [fake_or_real, condition]\n else:\n d_inputs = fake_or_real\n\n return d_inputs\n\n\nclass AdversarialLossG(GANExecutor):\n r\"\"\"\n Base class for the adversarial loss of the generator\n \"\"\"\n\n def __init__(self, loss_fn=None):\n \"\"\"\n Args:\n loss_fn: loss_fn to call passing (tf.ones_like(d_fake_i), d_fake_i)\n \"\"\"\n super().__init__(loss_fn)\n\n @Executor.reduce_loss\n def call(self, context, *, fake, condition, training, **kwargs):\n r\"\"\"\n Call: setup the discriminator inputs and calls `loss_fn`\n Args:\n context: GAN Context\n fake: fake images\n condition: generator condition\n training: if training or evaluation\n Returns:\n The loss for each example\n \"\"\"\n\n fake_inputs = self.get_discriminator_inputs(\n context=context, fake_or_real=fake, condition=condition, training=training\n )\n\n d_fake = context.discriminator_model(fake_inputs, training=training)\n\n # support for Multiscale discriminator\n # TODO: Improve\n if isinstance(d_fake, list):\n value = tf.add_n(\n [\n tf.reduce_mean(\n self._fn(tf.ones_like(d_fake_i), d_fake_i), axis=[1, 2]\n )\n for d_fake_i in d_fake\n ]\n )\n return value\n else:\n value = self._fn(tf.ones_like(d_fake), d_fake)\n value = tf.cond(\n tf.equal(tf.rank(d_fake), tf.constant(4)),\n lambda: value,\n lambda: tf.expand_dims(tf.expand_dims(value, axis=-1), axis=-1),\n )\n return tf.reduce_mean(value, axis=[1, 2])\n\n\nclass GeneratorBCE(AdversarialLossG):\n r\"\"\"\n The Binary CrossEntropy computed among the generator and the 1 label.\n\n .. math::\n L_{G} = E [\\log (D( G(z))]\n\n \"\"\"\n\n def __init__(self, from_logits=True):\n self.name = \"GeneratorBCE\"\n super().__init__(tf.losses.BinaryCrossentropy(from_logits=from_logits))\n\n\nclass GeneratorLSGAN(AdversarialLossG):\n r\"\"\"\n Least Square GAN Loss for generator\n Reference: https://arxiv.org/abs/1611.04076\n Basically the Mean Squared Error between\n the discriminator output when evaluated in fake and 1\n\n .. math::\n L_{G} = \\frac{1}{2} E [(1 - D(G(z))^2]\n\n \"\"\"\n\n def __init__(self):\n super().__init__(tf.keras.losses.MeanSquaredError())\n self.name = \"GeneratorLSGAN\"\n\n\nclass GeneratorL1(GANExecutor):\n r\"\"\"\n L1 loss between the generator output and the target.\n\n .. math::\n L_G = E ||x - G(z)||_1\n\n where x is the target and G(z) is generated image.\n\n \"\"\"\n\n class L1Loss(tf.losses.Loss):\n def __init__(self):\n super().__init__()\n self._reduction = tf.losses.Reduction.SUM_OVER_BATCH_SIZE\n\n @property\n def reduction(self):\n return self._reduction\n\n @reduction.setter\n def reduction(self, value):\n self._reduction = value\n\n def call(self, x, y):\n \"\"\"\n For each element the mean of the l1 between x and y\n \"\"\"\n if self._reduction == tf.losses.Reduction.SUM_OVER_BATCH_SIZE:\n axis = None\n elif self._reduction == tf.losses.Reduction.NONE:\n axis = (1, 2, 3)\n else:\n raise ValueError(\"L1Loss: unhandled reduction type\")\n\n return tf.reduce_mean(tf.abs(x - y), axis=axis)\n\n def __init__(self):\n super().__init__(GeneratorL1.L1Loss())\n\n @Executor.reduce_loss\n def call(self, context, *, fake, real, **kwargs):\n mae = self._fn(fake, real)\n return mae\n\n\nclass FeatureMatchingLoss(GeneratorL1):\n r\"\"\"\n Conditional GAN Feature matching loss.\n The loss is computed for each example and it's the L1 (MAE) of the feature difference.\n Implementation of pix2pix HD: https://github.com/NVIDIA/pix2pixHD\n\n .. math::\n \\text{FM} = \\sum_{i=0}^N \\frac{1}{M_i} ||D_i(x, c) - D_i(G(c), c) ||_1\n\n Where:\n\n - D_i is the i-th layer of the discriminator\n - N is the total number of layer of the discriminator\n - M_i is the number of components for the i-th layer\n - x is the target image\n - c is the condition\n - G(c) is the generated image from the condition c\n - || ||_1 stands for norm 1.\n\n This is for a single example: basically for each layer of the discriminator we compute the absolute error between\n the layer evaluated in real examples and in fake examples.\n Then we average along the batch. In the case where D_i is a multidimensional tensor we simply calculate the mean\n over the axis 1,2,3.\n \"\"\"\n\n @Executor.reduce_loss\n def call(self, context, *, fake, real, condition, training, **kwargs):\n fake_inputs = self.get_discriminator_inputs(\n context, fake_or_real=fake, condition=condition, training=training\n )\n\n real_inputs = self.get_discriminator_inputs(\n context, fake_or_real=real, condition=condition, training=training\n )\n\n _, features_fake = context.discriminator_model(\n fake_inputs, training=training, return_features=True\n )\n _, features_real = context.discriminator_model(\n real_inputs, training=training, return_features=True\n )\n\n # for each feature the L1 between the real and the fake\n # every call to fn should return [batch_size, 1] that is the mean L1\n feature_loss = [\n self._fn(feat_real_i, feat_fake_i)\n for feat_real_i, feat_fake_i in zip(features_real, features_fake)\n ]\n mae = tf.add_n(feature_loss)\n return mae\n\n\nclass CategoricalCrossEntropy(Executor):\n r\"\"\"\n Categorical Cross Entropy between generator output and target.\n Useful when the output of the generator is a distribution over classes\n The target must be represented in one hot notation\n \"\"\"\n\n def __init__(self):\n self.name = \"CrossEntropy\"\n super().__init__(tf.keras.losses.CategoricalCrossentropy())\n\n @Executor.reduce_loss\n def call(self, context, *, fake, real, **kwargs):\n \"\"\"\n Compute the categorical cross entropy loss\n Args:\n context: unused\n fake: fake images G(condition)\n real: Real images x(c)\n **kwargs:\n\n Returns:\n The categorical cross entropy loss for each example\n\n \"\"\"\n loss_value = tf.reduce_mean(self._fn(real, fake), axis=[1, 2])\n return loss_value\n\n\nclass Pix2PixLoss(SumExecutor):\n r\"\"\"\n Weighted sum of :py:class:`ashpy.losses.gan.GeneratorL1`, :py:class:`ashpy.losses.gan.AdversarialLossG` and\n :py:class:`ashpy.losses.gan.FeatureMatchingLoss`.\n Used by Pix2Pix [1] and Pix2PixHD [2]\n\n .. [1] Image-to-Image Translation with Conditional Adversarial Networks\n https://arxiv.org/abs/1611.07004\n .. [2] High-Resolution Image Synthesis and Semantic Manipulation with Conditional GANs\n https://arxiv.org/abs/1711.11585\n\n \"\"\"\n\n def __init__(\n self,\n l1_loss_weight=100.0,\n adversarial_loss_weight=1.0,\n feature_matching_weight=10.0,\n adversarial_loss_type: Union[\n AdversarialLossType, int\n ] = AdversarialLossType.GAN,\n use_feature_matching_loss: bool = False,\n ):\n r\"\"\"\n Weighted sum of :py:class:`ashpy.losses.gan.GeneratorL1`, :py:class:`ashpy.losses.gan.AdversarialLossG` and\n :py:class:`ashpy.losses.gan.FeatureMatchingLoss`.\n\n Args:\n l1_loss_weight: weight of L1 loss (scalar, :py:class:`tf.Tensor`, callable)\n adversarial_loss_weight: weight of adversarial loss (scalar, :py:class:`tf.Tensor`, callable)\n feature_matching_weight: weight of the feature matching loss (scalar, :py:class:`tf.Tensor`, callable)\n adversarial_loss_type (:py:class:`ashpy.losses.gan.AdversarialLossType`): Adversarial loss type\n (:py:class:`ashpy.losses.gan.AdversarialLossType.GAN`\n or :py:class:`ashpy.losses.gan.AdversarialLossType.LSGAN`)\n use_feature_matching_loss (bool): if True use also :py:class:`ashpy.losses.gan.FeatureMatchingLoss`\n\n \"\"\"\n executors = [\n GeneratorL1() * l1_loss_weight,\n get_adversarial_loss_generator(adversarial_loss_type)()\n * adversarial_loss_weight,\n ]\n\n if use_feature_matching_loss:\n executors.append(FeatureMatchingLoss() * feature_matching_weight)\n\n super().__init__(executors)\n\n\nclass Pix2PixLossSemantic(SumExecutor):\n \"\"\"\n Weighted sum of :py:class:`ashpy.losses.gan.CategoricalCrossEntropy`, :py:class:`ashpy.losses.gan.AdversarialLossG` and\n :py:class:`ashpy.losses.gan.FeatureMatchingLoss`\n \"\"\"\n\n def __init__(\n self,\n cross_entropy_weight=100.0,\n adversarial_loss_weight=1.0,\n feature_matching_weight=10.0,\n adversarial_loss_type: AdversarialLossType = AdversarialLossType.GAN,\n use_feature_matching_loss: bool = False,\n ):\n r\"\"\"\n Weighted sum of :py:class:`ashpy.losses.gan.CategoricalCrossEntropy`, :py:class:`ashpy.losses.gan.AdversarialLossG` and\n :py:class:`ashpy.losses.gan.FeatureMatchingLoss`\n Args:\n cross_entropy_weight: weight of the categorical cross entropy loss (scalar, :py:class:`tf.Tensor`, callable)\n adversarial_loss_weight: weight of the adversarial loss (scalar, :py:class:`tf.Tensor`, callable)\n feature_matching_weight: weight of the feature matching loss (scalar, :py:class:`tf.Tensor`, callable)\n adversarial_loss_type (:py:class:`ashpy.losses.gan.AdversarialLossType`): type of adversarial loss,\n see :py:class:`ashpy.losses.gan.AdversarialLossType`\n use_feature_matching_loss (bool): whether to use feature matching loss or not\n \"\"\"\n executors = [\n CategoricalCrossEntropy() * cross_entropy_weight,\n get_adversarial_loss_generator(adversarial_loss_type)()\n * adversarial_loss_weight,\n ]\n\n if use_feature_matching_loss:\n executors.append(FeatureMatchingLoss() * feature_matching_weight)\n super().__init__(executors)\n\n\nclass EncoderBCE(Executor):\n \"\"\"The Binary Cross Entropy computed among the encoder and the 0 label.\n TODO: Check if this supports condition\n \"\"\"\n\n def __init__(self, from_logits=True):\n super().__init__(tf.losses.BinaryCrossentropy(from_logits=from_logits))\n\n @Executor.reduce_loss\n def call(self, context, *, real, training, **kwargs):\n encode = context.encoder_model(real, training=training)\n d_real = context.discriminator_model([real, encode], training=training)\n return self._fn(tf.zeros_like(d_real), d_real)\n\n\nclass AdversarialLossD(GANExecutor):\n r\"\"\"\n Base class for the adversarial loss of the discriminator\n \"\"\"\n\n def __init__(self, loss_fn=None):\n r\"\"\"\n Args:\n loss_fn to call passing (d_real, d_fake)\n \"\"\"\n super().__init__(loss_fn)\n\n @Executor.reduce_loss\n def call(self, context, *, fake, real, condition, training, **kwargs):\n r\"\"\"\n Call: setup the discriminator inputs and calls `loss_fn`\n\n Args:\n context: GAN Context\n fake: fake images corresponding to the condition G(c)\n real: real images corresponding to the condition x(c)\n condition: condition for the generator and discriminator\n training: if training or evaluation\n\n Returns:\n The loss for each example\n \"\"\"\n\n fake_inputs = self.get_discriminator_inputs(\n context, fake_or_real=fake, condition=condition, training=training\n )\n\n real_inputs = self.get_discriminator_inputs(\n context, fake_or_real=real, condition=condition, training=training\n )\n\n d_fake = context.discriminator_model(fake_inputs, training=training)\n d_real = context.discriminator_model(real_inputs, training=training)\n\n if isinstance(d_fake, list):\n value = tf.add_n(\n [\n tf.reduce_mean(self._fn(d_real_i, d_fake_i), axis=[1, 2])\n for d_real_i, d_fake_i in zip(d_real, d_fake)\n ]\n )\n return value\n else:\n value = self._fn(d_real, d_fake)\n value = tf.cond(\n tf.equal(tf.rank(d_fake), tf.constant(4)),\n lambda: value,\n lambda: tf.expand_dims(tf.expand_dims(value, axis=-1), axis=-1),\n )\n return tf.reduce_mean(value, axis=[1, 2])\n\n\nclass DiscriminatorMinMax(AdversarialLossD):\n r\"\"\"\n The min-max game played by the discriminator.\n\n .. math::\n L_{D} = - \\frac{1}{2} E [\\log(D(x)) + \\log (1 - D(G(z))]\n\n \"\"\"\n\n class GANLoss(tf.losses.Loss):\n def __init__(self, from_logits=True, label_smoothing=0.0):\n self._positive_bce = tf.losses.BinaryCrossentropy(\n from_logits=from_logits,\n label_smoothing=label_smoothing,\n reduction=tf.losses.Reduction.NONE,\n )\n\n self._negative_bce = tf.losses.BinaryCrossentropy(\n from_logits=from_logits,\n label_smoothing=0.0,\n reduction=tf.losses.Reduction.NONE,\n )\n super().__init__()\n\n @property\n def reduction(self):\n return self._positive_bce.reduction\n\n @reduction.setter\n def reduction(self, value):\n self._positive_bce.reduction = value\n self._negative_bce.reduction = value\n\n def call(self, d_real, d_fake):\n \"\"\"Play the DiscriminatorMinMax game between the discriminator computed in real\n and the discriminator compute with fake inputs.\"\"\"\n\n return 0.5 * (\n self._positive_bce(tf.ones_like(d_real), d_real)\n + self._negative_bce(tf.zeros_like(d_fake), d_fake)\n )\n\n def __init__(self, from_logits=True, label_smoothing=0.0):\n super().__init__(\n DiscriminatorMinMax.GANLoss(\n from_logits=from_logits, label_smoothing=label_smoothing\n )\n )\n\n\nclass DiscriminatorLSGAN(AdversarialLossD):\n r\"\"\"\n Least square Loss for discriminator.\n\n Reference: Least Squares Generative Adversarial Networks [1]_ .\n\n Basically the Mean Squared Error between\n the discriminator output when evaluated in fake samples and 0\n and the discriminator output when evaluated in real samples and 1:\n For the unconditioned case this is:\n\n .. math::\n L_{D} = \\frac{1}{2} E[(D(x) - 1)^2 + (0 - D(G(z))^2]\n\n where x are real samples and z is the latent vector.\n\n For the conditioned case this is:\n\n .. math::\n L_{D} = \\frac{1}{2} E[(D(x, c) - 1)^2 + (0 - D(G(c), c)^2]\n\n where c is the condition and x are real samples.\n\n .. [1] https://arxiv.org/abs/1611.04076\n\n \"\"\"\n\n class LeastSquareLoss(tf.losses.Loss):\n def __init__(self):\n self._positive_mse = tf.keras.losses.MeanSquaredError(\n reduction=tf.losses.Reduction.NONE\n )\n self._negative_mse = tf.keras.losses.MeanSquaredError(\n reduction=tf.losses.Reduction.NONE\n )\n super().__init__()\n\n @property\n def reduction(self):\n return self._positive_mse.reduction\n\n @reduction.setter\n def reduction(self, value):\n self._positive_mse.reduction = value\n self._negative_mse.reduction = value\n\n def call(self, d_real, d_fake):\n return 0.5 * (\n self._positive_mse(tf.ones_like(d_real), d_real)\n + self._negative_mse(tf.zeros_like(d_fake), d_fake)\n )\n\n def __init__(self):\n super().__init__(DiscriminatorLSGAN.LeastSquareLoss())\n self.name = \"DiscriminatorLSGAN\"\n\n\n###\n# Utility functions in order to get the correct loss\n###\n\n\ndef get_adversarial_loss_discriminator(\n adversarial_loss_type: Union[AdversarialLossType, int] = AdversarialLossType.GAN\n) -> Type[Executor]:\n r\"\"\"\n Returns the correct loss fot the discriminator\n\n Args:\n adversarial_loss_type (:py:class:`ashpy.losses.gan.AdversarialLossType`): Type of loss (:py:class:`ashpy.losses.gan.AdversarialLossType.GAN` or :py:class:`ashpy.losses.gan.AdversarialLossType.LSGAN`)\n\n Returns:\n The correct (:py:class:`ashpy.losses.executor.Executor`) (to be instantiated)\n \"\"\"\n if (\n adversarial_loss_type == AdversarialLossType.GAN\n or adversarial_loss_type == AdversarialLossType.GAN.value\n ):\n return DiscriminatorMinMax\n elif (\n adversarial_loss_type == AdversarialLossType.LSGAN\n or adversarial_loss_type == AdversarialLossType.LSGAN.value\n ):\n return DiscriminatorLSGAN\n else:\n raise ValueError(\n \"Loss type not supported, the implemented losses are GAN or LSGAN\"\n )\n\n\ndef get_adversarial_loss_generator(\n adversarial_loss_type: Union[AdversarialLossType, int] = AdversarialLossType.GAN\n) -> Type[Executor]:\n r\"\"\"\n Returns the correct loss for the generator\n\n Args:\n adversarial_loss_type (:py:class:`ashpy.losses.gan.AdversarialLossType`): Type of loss (:py:class:`ashpy.losses.gan.AdversarialLossType.GAN` or :py:class:`ashpy.losses.gan.AdversarialLossType.LSGAN`)\n\n Returns:\n The correct (:py:class:`ashpy.losses.executor.Executor`) (to be instantiated)\n \"\"\"\n if (\n adversarial_loss_type == AdversarialLossType.GAN\n or adversarial_loss_type == AdversarialLossType.GAN.value\n ):\n return GeneratorBCE\n elif (\n adversarial_loss_type == AdversarialLossType.LSGAN\n or adversarial_loss_type == AdversarialLossType.LSGAN.value\n ):\n return GeneratorLSGAN\n else:\n raise ValueError(\n \"Loss type not supported, the implemented losses are GAN or LSGAN\"\n )\n"
] |
[
[
"tensorflow.rank",
"tensorflow.abs",
"tensorflow.losses.BinaryCrossentropy",
"tensorflow.expand_dims",
"tensorflow.keras.losses.MeanSquaredError",
"tensorflow.ones_like",
"tensorflow.add_n",
"tensorflow.constant",
"tensorflow.zeros_like",
"tensorflow.keras.losses.CategoricalCrossentropy",
"tensorflow.reduce_mean"
]
] |
nryotaro/han
|
[
"ed78f6772f4bf6923d9a3f52dbcc8a55e757631b"
] |
[
"han/vocabulary.py"
] |
[
"\"\"\"Word embedding.\"\"\"\nimport typing as t\nimport torchtext.vocab as v\nimport torch\n\n\ndef build_vocabulary(\n sentences: t.Iterator[t.Iterator[str]],\n pad_symbol: str = \"<pad>\",\n unknown_symbol: str = \"<unk>\",\n) -> v.Vocab:\n \"\"\"Build vocabulary.\n\n Each element of `sentences` is a list of words. The vocabulary\n encode unknown word to the indice of `unknown_symbol`.\n\n \"\"\"\n vocab: v.Vocab = v.build_vocab_from_iterator(\n (sentence for sentence in sentences),\n special_first=True,\n specials=[pad_symbol, unknown_symbol],\n )\n vocab.set_default_index(1)\n return vocab\n\n\nclass EmbeddingProtocol(t.Protocol):\n \"\"\"Provide the format to provide trained embedding.\n\n The methods of this protocol follows `torchtext.vocab.Vectors` to\n use it.\n\n \"\"\"\n\n @property\n def itos(self) -> list[str]:\n \"\"\"Correspond to `stoi`.\"\"\"\n\n @property\n def vectors(self) -> torch.Tensor:\n \"\"\"Return embeddings.\n\n The shape of the tensor is (`len(itos)`, embedding_dim).\n\n \"\"\"\n\n\nclass VocabularyProtocol(t.Protocol):\n \"\"\"Map strings to index.\"\"\"\n\n def forward(self, words: list[str]) -> list[int]:\n \"\"\"Take words and return their index.\"\"\"\n\n def __getitem__(self, s: str) -> int:\n \"\"\"Take a string and return its indice.\"\"\"\n\n def __call__(self, words: list[str]) -> list[int]:\n \"\"\"See `forward`.\"\"\"\n\n def __len__(self) -> int:\n \"\"\"Return the size of the vocabulary.\"\"\"\n\n\nclass _VocabularyImpl:\n def __init__(self, dictionary: dict[str, int], default_idx: int = 1):\n self._dictionary = dictionary\n self._default_idx = default_idx\n\n def forward(self, words: list[str]) -> list[int]:\n return [self.__getitem__(word) for word in words]\n\n def __getitem__(self, s: str) -> int:\n return self._dictionary.get(s, self._default_idx)\n\n def __call__(self, words: list[str]) -> list[int]:\n return self.forward(words)\n\n def __len__(self) -> int:\n return len(self._dictionary)\n\n\ndef create_vocab(\n embedding: EmbeddingProtocol,\n pad_symbol: str = \"<pad>\",\n unknown_symbol: str = \"<unk>\",\n) -> t.Tuple[VocabularyProtocol, torch.Tensor]:\n \"\"\"Create a tensor that contains pad and unkown symbols.\n\n Bind `pad_symbol` to 0 and `unknown_symbol` to 1.\n\n \"\"\"\n d = dict()\n d[pad_symbol] = 0\n d[unknown_symbol] = 1\n c = 2\n dim = embedding.vectors.shape[1]\n weights = [torch.Tensor([0] * dim), torch.Tensor([0] * dim)]\n\n for index, word in enumerate(embedding.itos):\n if word not in set([pad_symbol, unknown_symbol]):\n d[word] = c\n c += 1\n weights.append(embedding.vectors[index, :])\n\n return _VocabularyImpl(d, 1), torch.vstack(weights)\n"
] |
[
[
"torch.Tensor",
"torch.vstack"
]
] |
mathieucaroff/metravision
|
[
"f0bbd4ed1d4b7c8d7a2de4c7a77c5dbe3714bf90"
] |
[
"src/devint/counter.py"
] |
[
"\"\"\"\nCode ajoutant les compteurs à la collection d'images.\n\"\"\"\n\n\nimport cv2\nimport numpy as np\n\n\ndef genFilledRegion(height=520, width=720, channelCount=None, dtype=np.uint8, fill_value=0):\n shape = [height, width]\n if channelCount is not None:\n shape.append(channelCount)\n return np.full(shape=shape, dtype=dtype, fill_value=fill_value)\n\n\ndef addCounters(im, segmenter):\n counting = genFilledRegion(height=200, width=300, fill_value=255)\n segmentIndex = segmenter.segmentIndex\n pairs = [(\"Segment\", segmentIndex)]\n\n cs = segmenter.currentSegment\n if cs is not None:\n pairs.extend(sorted(cs.items()))\n for i, (name, count) in enumerate(pairs):\n text = f\"{name}: {count}\"\n cv2.putText(\n img=counting,\n text=text,\n org=(12, 45 + 40 * i),\n fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=1,\n color=0,\n thickness=2\n )\n im[\"counting\"] = counting"
] |
[
[
"numpy.full"
]
] |
rgschmitz1/BioDepot-workflow-builder
|
[
"4ee93abe2d79465755e82a145af3b6a6e1e79fd4",
"4ee93abe2d79465755e82a145af3b6a6e1e79fd4",
"4ee93abe2d79465755e82a145af3b6a6e1e79fd4"
] |
[
"orange3/Orange/preprocess/setup.py",
"orange3/Orange/preprocess/transformation.py",
"orange3/Orange/widgets/data/owcreateclass.py"
] |
[
"# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>\n# License: BSD Style.\nimport os\n\nimport numpy\n\n\ndef configuration(parent_package=\"\", top_path=None):\n from numpy.distutils.misc_util import Configuration\n\n libraries = []\n if os.name == \"posix\":\n libraries.append(\"m\")\n\n config = Configuration(\"preprocess\", parent_package, top_path)\n for source in (\"_discretize.c\", \"_relieff.cpp\"):\n config.add_extension(\n source.rsplit(\".\", 1)[0],\n sources=[source],\n include_dirs=[numpy.get_include()],\n libraries=libraries,\n )\n return config\n\n\nif __name__ == \"__main__\":\n from numpy.distutils.core import setup\n\n setup(**configuration(top_path=\"\").todict())\n",
"import numpy as np\nimport scipy.sparse as sp\n\nfrom Orange.data import Instance, Table, Domain\nfrom Orange.util import Reprable\n\n\nclass Transformation(Reprable):\n \"\"\"\n Base class for simple transformations of individual variables. Derived\n classes are used in continuization, imputation, discretization...\n \"\"\"\n\n def __init__(self, variable):\n \"\"\"\n :param variable: The variable whose transformed value is returned.\n :type variable: int or str or :obj:`~Orange.data.Variable`\n \"\"\"\n self.variable = variable\n\n def __call__(self, data):\n \"\"\"\n Return transformed column from the data by extracting the column view\n from the data and passing it to the `transform` method.\n \"\"\"\n inst = isinstance(data, Instance)\n if inst:\n data = Table(data.domain, [data])\n if self.variable.is_primitive():\n domain = Domain([self.variable])\n data = Table.from_table(domain, data)\n col = data.X\n else:\n domain = Domain([], metas=[self.variable])\n data = Table.from_table(domain, data)\n col = data.metas\n if not sp.issparse(col):\n col = col.squeeze(axis=1)\n transformed = self.transform(col)\n if inst:\n transformed = transformed[0]\n return transformed\n\n def transform(self, c):\n \"\"\"\n Return the transformed value of the argument `c`, which can be a number\n of a vector view.\n \"\"\"\n raise NotImplementedError(\n \"ColumnTransformations must implement method 'transform'.\"\n )\n\n\nclass Identity(Transformation):\n \"\"\"Return an untransformed value of `c`.\n \"\"\"\n\n def transform(self, c):\n return c\n\n\nclass Indicator(Transformation):\n \"\"\"\n Return an indicator value that equals 1 if the variable has the specified\n value and 0 otherwise.\n \"\"\"\n\n def __init__(self, variable, value):\n \"\"\"\n :param variable: The variable whose transformed value is returned.\n :type variable: int or str or :obj:`~Orange.data.Variable`\n\n :param value: The value to which the indicator refers\n :type value: int or float\n \"\"\"\n super().__init__(variable)\n self.value = value\n\n def transform(self, c):\n return c == self.value\n\n\nclass Indicator1(Transformation):\n \"\"\"\n Return an indicator value that equals 1 if the variable has the specified\n value and -1 otherwise.\n \"\"\"\n\n def __init__(self, variable, value):\n \"\"\"\n :param variable: The variable whose transformed value is returned.\n :type variable: int or str or :obj:`~Orange.data.Variable`\n\n :param value: The value to which the indicator refers\n :type value: int or float\n \"\"\"\n super().__init__(variable)\n self.value = value\n\n def transform(self, c):\n return (c == self.value) * 2 - 1\n\n\nclass Normalizer(Transformation):\n \"\"\"\n Return a normalized variable; for the given `value`, the transformed value\n if `(value - self.offset) * self.factor`.\n \"\"\"\n\n def __init__(self, variable, offset, factor):\n \"\"\"\n :param variable: The variable whose transformed value is returned.\n :type variable: int or str or :obj:`~Orange.data.Variable`\n :param offset:\n :type offset: float\n :param factor:\n :type factor: float\n \"\"\"\n super().__init__(variable)\n self.offset = offset\n self.factor = factor\n\n def transform(self, c):\n if sp.issparse(c):\n if self.offset != 0:\n raise ValueError(\"Non-zero offset in normalization \" \"of sparse data\")\n return c * self.factor\n else:\n return (c - self.offset) * self.factor\n\n\nclass Lookup(Transformation):\n \"\"\"\n Transform a discrete variable according to lookup table (`self.lookup`).\n \"\"\"\n\n def __init__(self, variable, lookup_table, unknown=np.nan):\n \"\"\"\n :param variable: The variable whose transformed value is returned.\n :type variable: int or str or :obj:`~Orange.data.DiscreteVariable`\n :param lookup_table: transformations for each value of `self.variable`\n :type lookup_table: np.array or list or tuple\n :param unknown: The value to be used as unknown value.\n :type unknown: float or int \n \"\"\"\n super().__init__(variable)\n self.lookup_table = lookup_table\n self.unknown = unknown\n\n def transform(self, column):\n # Densify DiscreteVariable values coming from sparse datasets.\n if sp.issparse(column):\n column = column.toarray().ravel()\n mask = np.isnan(column)\n column = column.astype(int)\n column[mask] = 0\n values = self.lookup_table[column]\n return np.where(mask, self.unknown, values)\n",
"\"\"\"Widget for creating classes from non-numeric attribute by substrings\"\"\"\nimport re\nfrom itertools import count\n\nimport numpy as np\n\nfrom AnyQt.QtWidgets import QGridLayout, QLabel, QLineEdit, QSizePolicy\nfrom AnyQt.QtCore import QSize, Qt\n\nfrom Orange.data import StringVariable, DiscreteVariable, Domain\nfrom Orange.data.table import Table\nfrom Orange.statistics.util import bincount\nfrom Orange.preprocess.transformation import Transformation, Lookup\nfrom Orange.widgets import gui, widget\nfrom Orange.widgets.settings import DomainContextHandler, ContextSetting\nfrom Orange.widgets.utils.itemmodels import DomainModel\nfrom Orange.widgets.widget import Msg, Input, Output\n\n\ndef map_by_substring(a, patterns, case_sensitive, match_beginning):\n \"\"\"\n Map values in a using a list of patterns. The patterns are considered in\n order of appearance.\n\n Args:\n a (np.array): input array of `dtype` `str`\n patterns (list of str): list of stirngs\n case_sensitive (bool): case sensitive match\n match_beginning (bool): match only at the beginning of the string\n\n Returns:\n np.array of floats representing indices of matched patterns\n \"\"\"\n res = np.full(len(a), np.nan)\n if not case_sensitive:\n a = np.char.lower(a)\n patterns = (pattern.lower() for pattern in patterns)\n for val_idx, pattern in reversed(list(enumerate(patterns))):\n indices = np.char.find(a, pattern)\n matches = indices == 0 if match_beginning else indices != -1\n res[matches] = val_idx\n return res\n\n\nclass ValueFromStringSubstring(Transformation):\n \"\"\"\n Transformation that computes a discrete variable from a string variable by\n pattern matching.\n\n Given patterns `[\"abc\", \"a\", \"bc\", \"\"]`, string data\n `[\"abcd\", \"aa\", \"bcd\", \"rabc\", \"x\"]` is transformed to values of the new\n attribute with indices`[0, 1, 2, 0, 3]`.\n\n Args:\n variable (:obj:`~Orange.data.StringVariable`): the original variable\n patterns (list of str): list of string patterns\n case_sensitive (bool, optional): if set to `True`, the match is case\n sensitive\n match_beginning (bool, optional): if set to `True`, the pattern must\n appear at the beginning of the string\n \"\"\"\n\n def __init__(self, variable, patterns, case_sensitive=False, match_beginning=False):\n super().__init__(variable)\n self.patterns = patterns\n self.case_sensitive = case_sensitive\n self.match_beginning = match_beginning\n\n def transform(self, c):\n \"\"\"\n Transform the given data.\n\n Args:\n c (np.array): an array of type that can be cast to dtype `str`\n\n Returns:\n np.array of floats representing indices of matched patterns\n \"\"\"\n nans = np.equal(c, None)\n c = c.astype(str)\n c[nans] = \"\"\n res = map_by_substring(\n c, self.patterns, self.case_sensitive, self.match_beginning\n )\n res[nans] = np.nan\n return res\n\n\nclass ValueFromDiscreteSubstring(Lookup):\n \"\"\"\n Transformation that computes a discrete variable from discrete variable by\n pattern matching.\n\n Say that the original attribute has values\n `[\"abcd\", \"aa\", \"bcd\", \"rabc\", \"x\"]`. Given patterns\n `[\"abc\", \"a\", \"bc\", \"\"]`, the values are mapped to the values of the new\n attribute with indices`[0, 1, 2, 0, 3]`.\n\n Args:\n variable (:obj:`~Orange.data.DiscreteVariable`): the original variable\n patterns (list of str): list of string patterns\n case_sensitive (bool, optional): if set to `True`, the match is case\n sensitive\n match_beginning (bool, optional): if set to `True`, the pattern must\n appear at the beginning of the string\n \"\"\"\n\n def __init__(self, variable, patterns, case_sensitive=False, match_beginning=False):\n super().__init__(variable, [])\n self.case_sensitive = case_sensitive\n self.match_beginning = match_beginning\n self.patterns = patterns # Finally triggers computation of the lookup\n\n def __setattr__(self, key, value):\n \"\"\"__setattr__ is overloaded to recompute the lookup table when the\n patterns, the original attribute or the flags change.\"\"\"\n super().__setattr__(key, value)\n if hasattr(self, \"patterns\") and key in (\n \"case_sensitive\",\n \"match_beginning\",\n \"patterns\",\n \"variable\",\n ):\n self.lookup_table = map_by_substring(\n self.variable.values,\n self.patterns,\n self.case_sensitive,\n self.match_beginning,\n )\n\n\nclass OWCreateClass(widget.OWWidget):\n name = \"Create Class\"\n description = \"Create class attribute from a string attribute\"\n icon = \"icons/CreateClass.svg\"\n category = \"Data\"\n keywords = [\"data\"]\n\n class Inputs:\n data = Input(\"Data\", Table)\n\n class Outputs:\n data = Output(\"Data\", Table)\n\n want_main_area = False\n\n settingsHandler = DomainContextHandler()\n attribute = ContextSetting(None)\n class_name = ContextSetting(\"class\")\n rules = ContextSetting({})\n match_beginning = ContextSetting(False)\n case_sensitive = ContextSetting(False)\n\n TRANSFORMERS = {\n StringVariable: ValueFromStringSubstring,\n DiscreteVariable: ValueFromDiscreteSubstring,\n }\n\n class Warning(widget.OWWidget.Warning):\n no_nonnumeric_vars = Msg(\"Data contains only numeric variables.\")\n\n class Error(widget.OWWidget.Error):\n class_name_duplicated = Msg(\"Class name duplicated.\")\n class_name_empty = Msg(\"Class name should not be empty.\")\n\n def __init__(self):\n super().__init__()\n self.data = None\n\n # The following lists are of the same length as self.active_rules\n\n #: list of pairs with counts of matches for each patter when the\n # patterns are applied in order and when applied on the entire set,\n # disregarding the preceding patterns\n self.match_counts = []\n\n #: list of list of QLineEdit: line edit pairs for each pattern\n self.line_edits = []\n #: list of QPushButton: list of remove buttons\n self.remove_buttons = []\n #: list of list of QLabel: pairs of labels with counts\n self.counts = []\n\n combo = gui.comboBox(\n self.controlArea,\n self,\n \"attribute\",\n label=\"From column: \",\n box=True,\n orientation=Qt.Horizontal,\n callback=self.update_rules,\n model=DomainModel(valid_types=(StringVariable, DiscreteVariable)),\n )\n # Don't use setSizePolicy keyword argument here: it applies to box,\n # not the combo\n combo.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Fixed)\n\n patternbox = gui.vBox(self.controlArea, box=True)\n #: QWidget: the box that contains the remove buttons, line edits and\n # count labels. The lines are added and removed dynamically.\n self.rules_box = rules_box = QGridLayout()\n patternbox.layout().addLayout(self.rules_box)\n box = gui.hBox(patternbox)\n gui.button(\n box,\n self,\n \"+\",\n callback=self.add_row,\n autoDefault=False,\n flat=True,\n minimumSize=(QSize(20, 20)),\n )\n gui.rubber(box)\n self.rules_box.setColumnMinimumWidth(1, 70)\n self.rules_box.setColumnMinimumWidth(0, 10)\n self.rules_box.setColumnStretch(0, 1)\n self.rules_box.setColumnStretch(1, 1)\n self.rules_box.setColumnStretch(2, 100)\n rules_box.addWidget(QLabel(\"Name\"), 0, 1)\n rules_box.addWidget(QLabel(\"Substring\"), 0, 2)\n rules_box.addWidget(QLabel(\"#Instances\"), 0, 3, 1, 2)\n self.update_rules()\n\n gui.lineEdit(\n self.controlArea,\n self,\n \"class_name\",\n label=\"Name for the new class:\",\n box=True,\n orientation=Qt.Horizontal,\n )\n\n optionsbox = gui.vBox(self.controlArea, box=True)\n gui.checkBox(\n optionsbox,\n self,\n \"match_beginning\",\n \"Match only at the beginning\",\n callback=self.options_changed,\n )\n gui.checkBox(\n optionsbox,\n self,\n \"case_sensitive\",\n \"Case sensitive\",\n callback=self.options_changed,\n )\n\n box = gui.hBox(self.controlArea)\n gui.rubber(box)\n gui.button(\n box, self, \"Apply\", autoDefault=False, width=180, callback=self.apply\n )\n\n # TODO: Resizing upon changing the number of rules does not work\n self.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Maximum)\n\n @property\n def active_rules(self):\n \"\"\"\n Returns the class names and patterns corresponding to the currently\n selected attribute. If the attribute is not yet in the dictionary,\n set the default.\n \"\"\"\n return self.rules.setdefault(\n self.attribute and self.attribute.name, [[\"\", \"\"], [\"\", \"\"]]\n )\n\n def rules_to_edits(self):\n \"\"\"Fill the line edites with the rules from the current settings.\"\"\"\n for editr, textr in zip(self.line_edits, self.active_rules):\n for edit, text in zip(editr, textr):\n edit.setText(text)\n\n @Inputs.data\n def set_data(self, data):\n \"\"\"Input data signal handler.\"\"\"\n self.closeContext()\n self.rules = {}\n self.data = data\n model = self.controls.attribute.model()\n model.set_domain(data and data.domain)\n self.Warning.no_nonnumeric_vars(shown=data is not None and not model)\n if not model:\n self.attribute = None\n self.Outputs.data.send(None)\n return\n self.attribute = model[0]\n self.openContext(data)\n self.update_rules()\n self.apply()\n\n def update_rules(self):\n \"\"\"Called when the rules are changed: adjust the number of lines in\n the form and fill them, update the counts. The widget does not have\n auto-apply.\"\"\"\n self.adjust_n_rule_rows()\n self.rules_to_edits()\n self.update_counts()\n # TODO: Indicator that changes need to be applied\n\n def options_changed(self):\n self.update_counts()\n\n def adjust_n_rule_rows(self):\n \"\"\"Add or remove lines if needed and fix the tab order.\"\"\"\n\n def _add_line():\n self.line_edits.append([])\n n_lines = len(self.line_edits)\n for coli in range(1, 3):\n edit = QLineEdit()\n self.line_edits[-1].append(edit)\n self.rules_box.addWidget(edit, n_lines, coli)\n edit.textChanged.connect(self.sync_edit)\n button = gui.button(\n None,\n self,\n label=\"×\",\n flat=True,\n height=20,\n styleSheet=\"* {font-size: 16pt; color: silver}\"\n \"*:hover {color: black}\",\n autoDefault=False,\n callback=self.remove_row,\n )\n button.setMinimumSize(QSize(12, 20))\n self.remove_buttons.append(button)\n self.rules_box.addWidget(button, n_lines, 0)\n self.counts.append([])\n for coli, kwargs in enumerate(\n (\n dict(alignment=Qt.AlignRight),\n dict(alignment=Qt.AlignLeft, styleSheet=\"color: gray\"),\n )\n ):\n label = QLabel(**kwargs)\n self.counts[-1].append(label)\n self.rules_box.addWidget(label, n_lines, 3 + coli)\n\n def _remove_line():\n for edit in self.line_edits.pop():\n edit.deleteLater()\n self.remove_buttons.pop().deleteLater()\n for label in self.counts.pop():\n label.deleteLater()\n\n def _fix_tab_order():\n prev = None\n for row, rule in zip(self.line_edits, self.active_rules):\n for col_idx, edit in enumerate(row):\n edit.row, edit.col_idx = rule, col_idx\n if prev is not None:\n self.setTabOrder(prev, edit)\n prev = edit\n\n n = len(self.active_rules)\n while n > len(self.line_edits):\n _add_line()\n while len(self.line_edits) > n:\n _remove_line()\n _fix_tab_order()\n\n def add_row(self):\n \"\"\"Append a new row at the end.\"\"\"\n self.active_rules.append([\"\", \"\"])\n self.adjust_n_rule_rows()\n self.update_counts()\n\n def remove_row(self):\n \"\"\"Remove a row.\"\"\"\n remove_idx = self.remove_buttons.index(self.sender())\n del self.active_rules[remove_idx]\n self.update_rules()\n self.update_counts()\n\n def sync_edit(self, text):\n \"\"\"Handle changes in line edits: update the active rules and counts\"\"\"\n edit = self.sender()\n edit.row[edit.col_idx] = text\n self.update_counts()\n\n def class_labels(self):\n \"\"\"Construct a list of class labels. Empty labels are replaced with\n C1, C2, C3. If C<n> already appears in the list of values given by\n the user, the labels start at C<n+1> instead.\n \"\"\"\n largest_c = max(\n (\n int(label[1:])\n for label, _ in self.active_rules\n if re.match(\"^C\\\\d+\", label)\n ),\n default=0,\n )\n class_count = count(largest_c + 1)\n return [\n label_edit.text() or \"C{}\".format(next(class_count))\n for label_edit, _ in self.line_edits\n ]\n\n def update_counts(self):\n \"\"\"Recompute and update the counts of matches.\"\"\"\n\n def _matcher(strings, pattern):\n \"\"\"Return indices of strings into patterns; consider case\n sensitivity and matching at the beginning. The given strings are\n assumed to be in lower case if match is case insensitive. Patterns\n are fixed on the fly.\"\"\"\n if not self.case_sensitive:\n pattern = pattern.lower()\n indices = np.char.find(strings, pattern.strip())\n return indices == 0 if self.match_beginning else indices != -1\n\n def _lower_if_needed(strings):\n return strings if self.case_sensitive else np.char.lower(strings)\n\n def _string_counts():\n \"\"\"\n Generate pairs of arrays for each rule until running out of data\n instances. np.sum over the two arrays in each pair gives the\n number of matches of the remaining instances (considering the\n order of patterns) and of the original data.\n\n For _string_counts, the arrays contain bool masks referring to the\n original data\n \"\"\"\n nonlocal data\n data = data.astype(str)\n data = data[~np.char.equal(data, \"\")]\n data = _lower_if_needed(data)\n remaining = np.array(data)\n for _, pattern in self.active_rules:\n matching = _matcher(remaining, pattern)\n total_matching = _matcher(data, pattern)\n yield matching, total_matching\n remaining = remaining[~matching]\n if len(remaining) == 0:\n break\n\n def _discrete_counts():\n \"\"\"\n Generate pairs similar to _string_counts, except that the arrays\n contain bin counts for the attribute's values matching the pattern.\n \"\"\"\n attr_vals = np.array(attr.values)\n attr_vals = _lower_if_needed(attr_vals)\n bins = bincount(data, max_val=len(attr.values) - 1)[0]\n remaining = np.array(bins)\n for _, pattern in self.active_rules:\n matching = _matcher(attr_vals, pattern)\n yield remaining[matching], bins[matching]\n remaining[matching] = 0\n if not np.any(remaining):\n break\n\n def _clear_labels():\n \"\"\"Clear all labels\"\"\"\n for lab_matched, lab_total in self.counts:\n lab_matched.setText(\"\")\n lab_total.setText(\"\")\n\n def _set_labels():\n \"\"\"Set the labels to show the counts\"\"\"\n for (n_matched, n_total), (lab_matched, lab_total), (lab, patt) in zip(\n self.match_counts, self.counts, self.active_rules\n ):\n n_before = n_total - n_matched\n lab_matched.setText(\"{}\".format(n_matched))\n if n_before and (lab or patt):\n lab_total.setText(\"+ {}\".format(n_before))\n if n_matched:\n tip = (\n \"{} of the {} matching instances are already \"\n \"covered above\".format(n_before, n_total)\n )\n else:\n tip = \"All matching instances are already covered above\"\n lab_total.setToolTip(tip)\n lab_matched.setToolTip(tip)\n\n def _set_placeholders():\n \"\"\"Set placeholders for empty edit lines\"\"\"\n matches = [n for n, _ in self.match_counts] + [0] * len(self.line_edits)\n for n_matched, (_, patt) in zip(matches, self.line_edits):\n if not patt.text():\n patt.setPlaceholderText(\n \"(remaining instances)\" if n_matched else \"(unused)\"\n )\n\n labels = self.class_labels()\n for label, (lab_edit, _) in zip(labels, self.line_edits):\n if not lab_edit.text():\n lab_edit.setPlaceholderText(label)\n\n _clear_labels()\n attr = self.attribute\n if attr is None:\n return\n counters = {StringVariable: _string_counts, DiscreteVariable: _discrete_counts}\n data = self.data.get_column_view(attr)[0]\n self.match_counts = [\n [int(np.sum(x)) for x in matches] for matches in counters[type(attr)]()\n ]\n _set_labels()\n _set_placeholders()\n\n def apply(self):\n \"\"\"Output the transformed data.\"\"\"\n self.Error.clear()\n self.class_name = self.class_name.strip()\n if not self.attribute:\n self.Outputs.data.send(None)\n return\n domain = self.data.domain\n if not len(self.class_name):\n self.Error.class_name_empty()\n if self.class_name in domain:\n self.Error.class_name_duplicated()\n if not len(self.class_name) or self.class_name in domain:\n self.Outputs.data.send(None)\n return\n rules = self.active_rules\n # Transposition + stripping\n valid_rules = [\n label or pattern or n_matches\n for (label, pattern), n_matches in zip(rules, self.match_counts)\n ]\n patterns = [pattern for (_, pattern), valid in zip(rules, valid_rules) if valid]\n names = [name for name, valid in zip(self.class_labels(), valid_rules) if valid]\n transformer = self.TRANSFORMERS[type(self.attribute)]\n compute_value = transformer(\n self.attribute, patterns, self.case_sensitive, self.match_beginning\n )\n new_class = DiscreteVariable(\n self.class_name, names, compute_value=compute_value\n )\n new_domain = Domain(\n domain.attributes, new_class, domain.metas + domain.class_vars\n )\n new_data = self.data.transform(new_domain)\n self.Outputs.data.send(new_data)\n\n def send_report(self):\n def _cond_part():\n rule = \"<b>{}</b> \".format(class_name)\n if patt:\n rule += \"if <b>{}</b> contains <b>{}</b>\".format(\n self.attribute.name, patt\n )\n else:\n rule += \"otherwise\"\n return rule\n\n def _count_part():\n if not n_matched:\n return \"all {} matching instances are already covered \" \"above\".format(\n n_total\n )\n elif n_matched < n_total and patt:\n return (\n \"{} matching instances (+ {} that are already \"\n \"covered above\".format(n_matched, n_total - n_matched)\n )\n else:\n return \"{} matching instances\".format(n_matched)\n\n if not self.attribute:\n return\n self.report_items(\"Input\", [(\"Source attribute\", self.attribute.name)])\n output = \"\"\n names = self.class_labels()\n for (n_matched, n_total), class_name, (lab, patt) in zip(\n self.match_counts, names, self.active_rules\n ):\n if lab or patt or n_total:\n output += \"<li>{}; {}</li>\".format(_cond_part(), _count_part())\n if output:\n self.report_items(\"Output\", [(\"Class name\", self.class_name)])\n self.report_raw(\"<ol>{}</ol>\".format(output))\n\n\ndef main(): # pragma: no cover\n \"\"\"Simple test for manual inspection of the widget\"\"\"\n import sys\n from AnyQt.QtWidgets import QApplication\n\n a = QApplication(sys.argv)\n table = Table(\"zoo\")\n ow = OWCreateClass()\n ow.show()\n ow.set_data(table)\n a.exec()\n ow.saveSettings()\n\n\nif __name__ == \"__main__\": # pragma: no cover\n main()\n"
] |
[
[
"numpy.get_include",
"numpy.distutils.misc_util.Configuration"
],
[
"scipy.sparse.issparse",
"numpy.isnan",
"numpy.where"
],
[
"numpy.equal",
"numpy.array",
"numpy.sum",
"numpy.char.find",
"numpy.char.lower",
"numpy.any",
"numpy.char.equal"
]
] |
salarim/scene_vis
|
[
"8e146195599aaa7598137dd223e9ce2b9e0b25a3",
"8e146195599aaa7598137dd223e9ce2b9e0b25a3"
] |
[
"src/core/depth_map_utils.py",
"demos/kitti_odometry/overlay_odom_point_clouds.py"
] |
[
"import cv2\nimport numpy as np\nimport png\n\nfrom datasets.kitti.obj import calib_utils\n\n\ndef read_depth_map(depth_map_path):\n\n depth_image = cv2.imread(depth_map_path, cv2.IMREAD_ANYDEPTH)\n depth_map = depth_image / 256.0\n\n # Discard depths less than 10cm from the camera\n depth_map[depth_map < 0.1] = 0.0\n\n return depth_map.astype(np.float32)\n\n\ndef save_depth_map(save_path, depth_map,\n version='cv2', png_compression=3):\n \"\"\"Saves depth map to disk as uint16 png\n\n Args:\n save_path: path to save depth map\n depth_map: depth map numpy array [h w]\n version: 'cv2' or 'pypng'\n png_compression: Only when version is 'cv2', sets png compression level.\n A lower value is faster with larger output,\n a higher value is slower with smaller output.\n \"\"\"\n\n # Convert depth map to a uint16 png\n depth_image = (depth_map * 256.0).astype(np.uint16)\n\n if version == 'cv2':\n ret = cv2.imwrite(save_path, depth_image, [cv2.IMWRITE_PNG_COMPRESSION, png_compression])\n\n if not ret:\n raise RuntimeError('Could not save depth map')\n\n elif version == 'pypng':\n with open(save_path, 'wb') as f:\n depth_image = (depth_map * 256.0).astype(np.uint16)\n writer = png.Writer(width=depth_image.shape[1],\n height=depth_image.shape[0],\n bitdepth=16,\n greyscale=True)\n writer.write(f, depth_image)\n\n else:\n raise ValueError('Invalid version', version)\n\n\ndef get_depth_point_cloud(depth_map, cam_p, min_v=0, flatten=True, in_cam0_frame=True):\n \"\"\"Calculates the point cloud from a depth map given the camera parameters\n\n Args:\n depth_map: depth map\n cam_p: camera p matrix\n min_v: amount to crop off the top\n flatten: flatten point cloud to (3, N), otherwise return the point cloud\n in xyz_map (3, H, W) format. (H, W, 3) points can be retrieved using\n xyz_map.transpose(1, 2, 0)\n in_cam0_frame: (optional) If True, shifts the point cloud into cam_0 frame.\n If False, returns the point cloud in the provided camera frame\n\n Returns:\n point_cloud: (3, N) point cloud\n \"\"\"\n\n depth_map_shape = depth_map.shape[0:2]\n\n if min_v > 0:\n # Crop top part\n depth_map[0:min_v] = 0.0\n\n xx, yy = np.meshgrid(\n np.linspace(0, depth_map_shape[1] - 1, depth_map_shape[1]),\n np.linspace(0, depth_map_shape[0] - 1, depth_map_shape[0]))\n\n # Calibration centre x, centre y, focal length\n centre_u = cam_p[0, 2]\n centre_v = cam_p[1, 2]\n focal_length = cam_p[0, 0]\n\n i = xx - centre_u\n j = yy - centre_v\n\n # Similar triangles ratio (x/i = d/f)\n ratio = depth_map / focal_length\n x = i * ratio\n y = j * ratio\n z = depth_map\n\n if in_cam0_frame:\n # Return the points in cam_0 frame\n # Get x offset (b_cam) from calibration: cam_p[0, 3] = (-f_x * b_cam)\n x_offset = -cam_p[0, 3] / focal_length\n\n valid_pixel_mask = depth_map > 0\n x[valid_pixel_mask] += x_offset\n\n # Return the points in the provided camera frame\n point_cloud_map = np.asarray([x, y, z])\n\n if flatten:\n point_cloud = np.reshape(point_cloud_map, (3, -1))\n return point_cloud.astype(np.float32)\n else:\n return point_cloud_map.astype(np.float32)\n\n\ndef project_depths(point_cloud, cam_p, image_shape, max_depth=100.0):\n \"\"\"Projects a point cloud into image space and saves depths per pixel.\n\n Args:\n point_cloud: (3, N) Point cloud in cam0\n cam_p: camera projection matrix\n image_shape: image shape [h, w]\n max_depth: optional, max depth for inversion\n\n Returns:\n projected_depths: projected depth map\n \"\"\"\n\n # Only keep points in front of the camera\n all_points = point_cloud.T\n\n # Save the depth corresponding to each point\n points_in_img = calib_utils.project_pc_to_image(all_points.T, cam_p)\n points_in_img_int = np.int32(np.round(points_in_img))\n\n # Remove points outside image\n valid_indices = \\\n (points_in_img_int[0] >= 0) & (points_in_img_int[0] < image_shape[1]) & \\\n (points_in_img_int[1] >= 0) & (points_in_img_int[1] < image_shape[0])\n\n all_points = all_points[valid_indices]\n points_in_img_int = points_in_img_int[:, valid_indices]\n\n # Invert depths\n all_points[:, 2] = max_depth - all_points[:, 2]\n\n # Only save valid pixels, keep closer points when overlapping\n projected_depths = np.zeros(image_shape)\n valid_indices = [points_in_img_int[1], points_in_img_int[0]]\n projected_depths[valid_indices] = [\n max(projected_depths[\n points_in_img_int[1, idx], points_in_img_int[0, idx]],\n all_points[idx, 2])\n for idx in range(points_in_img_int.shape[1])]\n\n projected_depths[valid_indices] = \\\n max_depth - projected_depths[valid_indices]\n\n return projected_depths.astype(np.float32)\n",
"import os\nimport time\n\nimport numpy as np\nimport pykitti\nimport vtk\nfrom scene_vis.vtk_wrapper import vtk_utils\n\nfrom core import demo_utils\nfrom datasets.kitti.obj import obj_utils, calib_utils\nfrom scene_vis.vtk_wrapper.vtk_point_cloud import VtkPointCloud\n\n\ndef get_velo_points(odom_dataset, frame_idx):\n velo_points = odom_dataset.get_velo(frame_idx)\n\n # Filter points to certain area\n points = velo_points[:, 0:3]\n area_extents = np.array([[0, 100], [-50, 50], [-5, 1]], dtype=np.float32)\n area_filter = \\\n (points[:, 0] > area_extents[0, 0]) & \\\n (points[:, 0] < area_extents[0, 1]) & \\\n (points[:, 1] > area_extents[1, 0]) & \\\n (points[:, 1] < area_extents[1, 1]) & \\\n (points[:, 2] > area_extents[2, 0]) & \\\n (points[:, 2] < area_extents[2, 1])\n points = points[area_filter]\n\n return points\n\n\ndef main():\n\n ####################\n # Options\n ####################\n\n odom_dir = os.path.expanduser('~/Kitti/odometry/dataset')\n\n sequence = '03'\n\n # max_fps = 10.0\n\n # vtk_window_size = (1280, 720)\n vtk_window_size = (960, 540)\n\n save_images = False\n\n point_cloud_source = 'lidar'\n # point_cloud_source = 'fast'\n # point_cloud_source = 'multiscale'\n\n # first_frame_idx = None\n # first_frame_pose = None\n\n # Setup odometry dataset handler\n odom_dataset = pykitti.odometry(odom_dir, sequence)\n\n # # Check that velo length matches timestamps?\n # if len(odom_dataset.velo_files) != len(odom_dataset.timestamps):\n # raise ValueError('velo files and timestamps have different length!')\n\n frame_range = (0, len(odom_dataset.timestamps))\n # frame_range = (0, 100)\n # frame_range = (1, 10)\n # frame_range = (20, 30)\n # frame_range = (440, 442)\n # frame_range = (441, 442)\n # frame_range = (440, 452)\n # frame_range = (440, 512)\n # frame_range = (500, 502)\n # frame_range = (500, 512)\n\n # for frame_idx in range(len(raw_data.timestamps)):\n # for frame_idx in range(457, 459):\n\n camera_viewpoint = 'front'\n camera_viewpoint = 'elevated'\n\n # camera_zoom = 2.2\n # camera_viewpoint = (0.0, -5.0, -30.0)\n # camera_fp = (0.0, 1.0, 30.0)\n\n # viewpoint = 'front'\n # camera_zoom = 0.6\n # camera_pos = (0.0, 0.0, 0.0)\n # camera_fp = (0.0, 0.0, 2000.0)\n # vtk_window_size = (1000, 500)\n\n # viewpoint = 'bev'\n # camera_zoom = 1.0\n # # camera_pos = (0.0, -15.0, -25.0)\n # # camera_pos = (0.0, 0.0, 0.0)\n # camera_fp = (0.0, 1.0, 30.0)\n\n ####################\n # End of Options\n ####################\n\n # Setup output folder\n\n # drive_name = category + '_' + date + '_' + drive\n # images_out_dir = 'outputs/point_clouds/' + drive_name + '/' + point_cloud_source + '_' + viewpoint\n # os.makedirs(images_out_dir, exist_ok=True)\n\n # max_loop_time = 1.0 / max_fps\n\n vtk_renderer = demo_utils.setup_vtk_renderer()\n\n vtk_render_window = demo_utils.setup_vtk_render_window(\n 'Overlaid Point Cloud', vtk_window_size, vtk_renderer)\n\n vtk_interactor = vtk.vtkRenderWindowInteractor()\n vtk_interactor.SetRenderWindow(vtk_render_window)\n vtk_interactor.SetInteractorStyle(vtk_utils.ToggleActorsInteractorStyle(None, vtk_renderer))\n vtk_interactor.Initialize()\n\n cam_p2 = odom_dataset.calib.P_rect_20\n\n # Load poses\n cam0_poses = odom_dataset.poses\n\n # Setup camera\n if camera_viewpoint == 'front':\n cam0_curr_vtk_cam_pos = [0.0, 0.0, 0.0, 1.0]\n cam0_curr_vtk_focal_point = [0.0, 0.0, 20.0, 1.0]\n elif camera_viewpoint == 'elevated':\n cam0_curr_vtk_cam_pos = [0.0, -5.0, -15.0, 1.0]\n cam0_curr_vtk_focal_point = [0.0, 0.0, 20.0, 1.0]\n else:\n raise ValueError('Invalid camera_pos', camera_viewpoint)\n\n # Create VtkAxes\n vtk_axes = vtk.vtkAxesActor()\n vtk_axes.SetTotalLength(2, 2, 2)\n vtk_renderer.AddActor(vtk_axes)\n\n for frame_idx in range(*frame_range):\n\n # Point cloud actor wrapper\n vtk_pc = VtkPointCloud()\n vtk_pc.vtk_actor.GetProperty().SetPointSize(2)\n\n # all_vtk_actors.append(vtk_point_cloud.vtk_actor)\n vtk_renderer.AddActor(vtk_pc.vtk_actor)\n\n print('{} / {}'.format(frame_idx, len(odom_dataset.timestamps) - 1))\n\n # Load next frame data\n load_start_time = time.time()\n rgb_image = np.asarray(odom_dataset.get_cam2(frame_idx))\n bgr_image = rgb_image[..., ::-1]\n\n if point_cloud_source == 'lidar':\n velo_points = get_velo_points(odom_dataset, frame_idx)\n\n # Transform point cloud to cam_0_curr frame\n velo_curr_points_padded = np.pad(\n velo_points, [[0, 0], [0, 1]],\n constant_values=1.0, mode='constant')\n cam0_curr_pc_all_padded = odom_dataset.calib.T_cam0_velo @ velo_curr_points_padded.T\n\n elif point_cloud_source == 'multiscale':\n\n # depth_map_path = depth_map_dir + '/{:010d}.png'.format(frame_idx)\n # depth_map = depth_map_utils.read_depth_map(depth_map_path)\n # points_cam2 = depth_map_utils.get_depth_point_cloud(depth_map, cam_p).T\n raise NotImplementedError()\n\n else:\n raise ValueError('Invalid point cloud source')\n print('load\\t\\t', time.time() - load_start_time)\n\n # Project velodyne points\n projection_start_time = time.time()\n\n if point_cloud_source == 'lidar':\n\n # Project into image2\n points_in_img2 = calib_utils.project_pc_to_image(cam0_curr_pc_all_padded[0:3], cam_p2)\n points_in_img2_int = np.round(points_in_img2).astype(np.int32)\n\n image_filter = obj_utils.points_in_img_filter(points_in_img2_int, bgr_image.shape)\n\n cam0_curr_pc_padded = cam0_curr_pc_all_padded[:, image_filter]\n\n points_in_img_int_valid = points_in_img2_int[:, image_filter]\n point_colours = bgr_image[points_in_img_int_valid[1], points_in_img_int_valid[0]]\n else:\n raise ValueError('Invalid point_cloud_source', point_cloud_source)\n\n print('projection\\t', time.time() - projection_start_time)\n\n # Get pose\n cam0_ref_pose = cam0_poses[frame_idx]\n tf_cam0_ref_cam0_curr = cam0_ref_pose\n\n # print('cam0_ref_pose\\n', np.round(cam0_ref_pose, 3))\n\n cam0_ref_pc_padded = tf_cam0_ref_cam0_curr @ cam0_curr_pc_padded\n\n # VtkPointCloud\n vtk_pc_start_time = time.time()\n vtk_pc.set_points(cam0_ref_pc_padded[0:3].T, point_colours)\n print('vtk_pc\\t\\t', time.time() - vtk_pc_start_time)\n\n # Display pose\n vtk_pc_pose = VtkPointCloud()\n vtk_pc_pose.vtk_actor.GetProperty().SetPointSize(5)\n vtk_pc_pose.set_points(np.reshape(cam0_ref_pose[0:3, 3], [-1, 3]))\n vtk_renderer.AddActor(vtk_pc_pose.vtk_actor)\n\n cam0_ref_vtk_cam_pos = tf_cam0_ref_cam0_curr.dot(cam0_curr_vtk_cam_pos)\n cam0_ref_vtk_focal_point = tf_cam0_ref_cam0_curr.dot(cam0_curr_vtk_focal_point)\n\n current_cam = vtk_renderer.GetActiveCamera()\n vtk_renderer.ResetCamera()\n current_cam.SetViewUp(0, -1, 0)\n current_cam.SetPosition(cam0_ref_vtk_cam_pos[0:3])\n current_cam.SetFocalPoint(*cam0_ref_vtk_focal_point[0:3])\n current_cam.Zoom(0.5)\n\n vtk_renderer.ResetCameraClippingRange()\n\n # Render\n render_start_time = time.time()\n vtk_render_window.Render()\n print('render\\t\\t', time.time() - render_start_time)\n\n print('---')\n\n print('Done')\n\n # Keep window open\n vtk_interactor.Start()\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.reshape",
"numpy.asarray",
"numpy.zeros",
"numpy.round",
"numpy.linspace"
],
[
"numpy.round",
"numpy.pad",
"numpy.array",
"numpy.reshape"
]
] |
carlosep93/LANGSPEC
|
[
"8c8f55d999d79628a56f48d4e1a8918f8c426f72"
] |
[
"fairseq/criterions/interlingua_loss.py"
] |
[
"# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\nimport math\nimport torch\nfrom fairseq import utils\n\nfrom . import FairseqCriterion, register_criterion\n\n\n@register_criterion('interlingua_label_smoothed_cross_entropy')\nclass InterlinguaLabelSmoothedCrossEntropyCriterion(FairseqCriterion):\n\n def __init__(self, args, task):\n super().__init__(args, task)\n self.eps = args.label_smoothing\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add criterion-specific arguments to the parser.\"\"\"\n parser.add_argument('--label-smoothing', default=0., type=float, metavar='D',\n help='epsilon for label smoothing, 0 means no label smoothing')\n\n def forward(self, model, sample, reduce=True):\n \"\"\"Compute the loss for the given sample.\n\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n \"\"\"\n net_output = model(**sample['net_input'])\n loss, nll_loss = self.compute_loss(model, net_output, sample, reduce=reduce)\n sample_size = sample['target'].size(0) if self.args.sentence_avg else sample['ntokens']\n logging_output = {\n 'loss': utils.item(loss.data) if reduce else loss.data,\n 'nll_loss': utils.item(nll_loss.data) if reduce else nll_loss.data,\n 'ntokens': sample['ntokens'],\n 'nsentences': sample['target'].size(0),\n 'sample_size': sample_size,\n }\n return loss, sample_size, logging_output\n\n def compute_loss(self, model, net_output, sample, reduce=True):\n lprobs = model.get_normalized_probs(net_output, log_probs=True)\n lprobs = lprobs.view(-1, lprobs.size(-1))\n target = model.get_targets(sample, net_output).view(-1, 1)\n non_pad_mask = target.ne(self.padding_idx)\n nll_loss = -lprobs.gather(dim=-1, index=target)[non_pad_mask]\n smooth_loss = -lprobs.sum(dim=-1, keepdim=True)[non_pad_mask]\n if reduce:\n nll_loss = nll_loss.sum()\n smooth_loss = smooth_loss.sum()\n eps_i = self.eps / lprobs.size(-1)\n loss = (1. - self.eps) * nll_loss + eps_i * smooth_loss\n\n nd = torch.cuda.device_count()\n d0 = torch.device(\"cuda:\" + str(nd-1)) if nd > 1 else torch.device(\"cpu:0\")\n return loss.to(d0), nll_loss.to(d0)\n\n @staticmethod\n def aggregate_logging_outputs(logging_outputs):\n \"\"\"Aggregate logging outputs from data parallel training.\"\"\"\n ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)\n losses = sum(log.get('loss', 0) for log in logging_outputs)\n nll_losses = sum(log.get('nll_loss', 0) for log in logging_outputs)\n nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)\n sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)\n d = {\n 'loss': losses / sample_size / math.log(2),\n 'nll_loss': nll_losses / ntokens / math.log(2),\n 'ntokens': ntokens,\n 'nsentences': nsentences,\n 'sample_size': sample_size,\n }\n return d\n"
] |
[
[
"torch.device",
"torch.cuda.device_count"
]
] |
aaronwtr/shap
|
[
"5a7b3740a6eccd772bcc3450dee3188487c18104"
] |
[
"shap/explainers/_partition.py"
] |
[
"import types\nimport copy\nimport inspect\nfrom ..utils import MaskedModel\nimport numpy as np\nimport warnings\nimport time\nfrom tqdm.auto import tqdm\nimport queue\nfrom ..utils import assert_import, record_import_error, safe_isinstance, make_masks, OpChain\nfrom .. import Explanation\nfrom .. import maskers\nfrom ._explainer import Explainer\nfrom .. import links\nimport cloudpickle\nimport pickle\nfrom ..maskers import Masker\nfrom ..models import Model\nfrom numba import jit\n\n# .shape[0] messes up pylint a lot here\n# pylint: disable=unsubscriptable-object\n\n\nclass Partition(Explainer):\n\n def __init__(self, model, masker, *, output_names=None, link=links.identity, linearize_link=True,\n feature_names=None, **call_args):\n \"\"\" Uses the Partition SHAP method to explain the output of any function.\n\n Partition SHAP computes Shapley values recursively through a hierarchy of features, this\n hierarchy defines feature coalitions and results in the Owen values from game theory. The\n PartitionExplainer has two particularly nice properties: 1) PartitionExplainer is\n model-agnostic but when using a balanced partition tree only has quadradic exact runtime\n (in term of the number of input features). This is in contrast to the exponential exact\n runtime of KernelExplainer or SamplingExplainer. 2) PartitionExplainer always assigns to groups of\n correlated features the credit that set of features would have had if treated as a group. This\n means if the hierarchical clustering given to PartitionExplainer groups correlated features\n together, then feature correlations are \"accounted for\" ... in the sense that the total credit assigned\n to a group of tightly dependent features does net depend on how they behave if their correlation\n structure was broken during the explanation's perterbation process. Note that for linear models\n the Owen values that PartitionExplainer returns are the same as the standard non-hierarchical\n Shapley values.\n\n\n Parameters\n ----------\n model : function\n User supplied function that takes a matrix of samples (# samples x # features) and\n computes the output of the model for those samples.\n\n masker : function or numpy.array or pandas.DataFrame or tokenizer\n The function used to \"mask\" out hidden features of the form `masker(mask, x)`. It takes a\n single input sample and a binary mask and returns a matrix of masked samples. These\n masked samples will then be evaluated using the model function and the outputs averaged.\n As a shortcut for the standard masking using by SHAP you can pass a background data matrix\n instead of a function and that matrix will be used for masking. Domain specific masking\n functions are available in shap such as shap.maksers.Image for images and shap.maskers.Text\n for text.\n\n partition_tree : None or function or numpy.array\n A hierarchical clustering of the input features represented by a matrix that follows the format\n used by scipy.cluster.hierarchy (see the notebooks_html/partition_explainer directory an example).\n If this is a function then the function produces a clustering matrix when given a single input\n example. If you are using a standard SHAP masker object then you can pass masker.clustering\n to use that masker's built-in clustering of the features, or if partition_tree is None then\n masker.clustering will be used by default.\n\n Examples\n --------\n See `Partition explainer examples <https://shap.readthedocs.io/en/latest/api_examples/explainers/Partition.html>`_\n \"\"\"\n\n super().__init__(model, masker, link=link, linearize_link=linearize_link, algorithm=\"partition\", \\\n output_names = output_names, feature_names=feature_names)\n\n # convert dataframes\n # if safe_isinstance(masker, \"pandas.core.frame.DataFrame\"):\n # masker = TabularMasker(masker)\n # elif safe_isinstance(masker, \"numpy.ndarray\") and len(masker.shape) == 2:\n # masker = TabularMasker(masker)\n # elif safe_isinstance(masker, \"transformers.PreTrainedTokenizer\"):\n # masker = TextMasker(masker)\n # self.masker = masker\n\n # TODO: maybe? if we have a tabular masker then we build a PermutationExplainer that we\n # will use for sampling\n self.input_shape = masker.shape[1:] if hasattr(masker, \"shape\") and not callable(masker.shape) else None\n # self.output_names = output_names\n if not safe_isinstance(self.model, \"shap.models.Model\"):\n self.model = Model(self.model)#lambda *args: np.array(model(*args))\n self.expected_value = None\n self._curr_base_value = None\n if getattr(self.masker, \"clustering\", None) is None:\n raise ValueError(\"The passed masker must have a .clustering attribute defined! Try shap.maskers.Partition(data) for example.\")\n # if partition_tree is None:\n # if not hasattr(masker, \"partition_tree\"):\n # raise ValueError(\"The passed masker does not have masker.clustering, so the partition_tree must be passed!\")\n # self.partition_tree = masker.clustering\n # else:\n # self.partition_tree = partition_tree\n\n # handle higher dimensional tensor inputs\n if self.input_shape is not None and len(self.input_shape) > 1:\n self._reshaped_model = lambda x: self.model(x.reshape(x.shape[0], *self.input_shape))\n else:\n self._reshaped_model = self.model\n\n # if we don't have a dynamic clustering algorithm then can precowe mpute\n # a lot of information\n if not callable(self.masker.clustering):\n self._clustering = self.masker.clustering\n self._mask_matrix = make_masks(self._clustering)\n\n # if we have gotten default arguments for the call function we need to wrap ourselves in a new class that\n # has a call function with those new default arguments\n if len(call_args) > 0:\n class Partition(self.__class__):\n # this signature should match the __call__ signature of the class defined below\n def __call__(self, *args, max_evals=500, fixed_context=None, main_effects=False, error_bounds=False, batch_size=\"auto\",\n outputs=None, silent=False):\n return super().__call__(\n *args, max_evals=max_evals, fixed_context=fixed_context, main_effects=main_effects, error_bounds=error_bounds,\n batch_size=batch_size, outputs=outputs, silent=silent\n )\n Partition.__call__.__doc__ = self.__class__.__call__.__doc__\n self.__class__ = Partition\n for k, v in call_args.items():\n self.__call__.__kwdefaults__[k] = v\n\n # note that changes to this function signature should be copied to the default call argument wrapper above\n def __call__(self, *args, max_evals=500, fixed_context=None, main_effects=False, error_bounds=False, batch_size=\"auto\",\n outputs=None, silent=False):\n \"\"\" Explain the output of the model on the given arguments.\n \"\"\"\n return super().__call__(\n *args, max_evals=max_evals, fixed_context=fixed_context, main_effects=main_effects, error_bounds=error_bounds, batch_size=batch_size,\n outputs=outputs, silent=silent\n )\n\n def explain_row(self, *row_args, max_evals, main_effects, error_bounds, batch_size, outputs, silent, fixed_context = \"auto\"):\n \"\"\" Explains a single row and returns the tuple (row_values, row_expected_values, row_mask_shapes).\n \"\"\"\n\n if fixed_context == \"auto\":\n # if isinstance(self.masker, maskers.Text):\n # fixed_context = 1 # we err on the side of speed for text models\n # else:\n fixed_context = None\n elif fixed_context not in [0, 1, None]:\n raise Exception(\"Unknown fixed_context value passed (must be 0, 1 or None): %s\" %fixed_context)\n\n # build a masked version of the model for the current input sample\n fm = MaskedModel(self.model, self.masker, self.link, self.linearize_link, *row_args)\n\n # make sure we have the base value and current value outputs\n M = len(fm)\n m00 = np.zeros(M, dtype=np.bool)\n # if not fixed background or no base value assigned then compute base value for a row\n if self._curr_base_value is None or not getattr(self.masker, \"fixed_background\", False):\n self._curr_base_value = fm(m00.reshape(1, -1), zero_index=0)[0] # the zero index param tells the masked model what the baseline is\n f11 = fm(~m00.reshape(1, -1))[0]\n\n if callable(self.masker.clustering):\n self._clustering = self.masker.clustering(*row_args)\n self._mask_matrix = make_masks(self._clustering)\n\n if hasattr(self._curr_base_value, 'shape') and len(self._curr_base_value.shape) > 0:\n if outputs is None:\n outputs = np.arange(len(self._curr_base_value))\n elif isinstance(outputs, OpChain):\n outputs = outputs.apply(Explanation(f11)).values\n\n out_shape = (2*self._clustering.shape[0]+1, len(outputs))\n else:\n out_shape = (2*self._clustering.shape[0]+1,)\n\n if max_evals == \"auto\":\n max_evals = 500\n\n self.values = np.zeros(out_shape)\n self.dvalues = np.zeros(out_shape)\n\n self.owen(fm, self._curr_base_value, f11, max_evals - 2, outputs, fixed_context, batch_size, silent)\n\n # if False:\n # if self.multi_output:\n # return [self.dvalues[:,i] for i in range(self.dvalues.shape[1])], oinds\n # else:\n # return self.dvalues.copy(), oinds\n # else:\n # drop the interaction terms down onto self.values\n self.values[:] = self.dvalues\n\n lower_credit(len(self.dvalues) - 1, 0, M, self.values, self._clustering)\n\n return {\n \"values\": self.values[:M].copy(),\n \"expected_values\": self._curr_base_value if outputs is None else self._curr_base_value[outputs],\n \"mask_shapes\": [s + out_shape[1:] for s in fm.mask_shapes],\n \"main_effects\": None,\n \"hierarchical_values\": self.dvalues.copy(),\n \"clustering\": self._clustering,\n \"output_indices\": outputs,\n \"output_names\": getattr(self.model, \"output_names\", None)\n }\n\n def __str__(self):\n return \"shap.explainers.Partition()\"\n\n def owen(self, fm, f00, f11, max_evals, output_indexes, fixed_context, batch_size, silent):\n \"\"\" Compute a nested set of recursive Owen values based on an ordering recursion.\n \"\"\"\n\n #f = self._reshaped_model\n #r = self.masker\n #masks = np.zeros(2*len(inds)+1, dtype=np.int)\n M = len(fm)\n m00 = np.zeros(M, dtype=np.bool)\n #f00 = fm(m00.reshape(1,-1))[0]\n base_value = f00\n #f11 = fm(~m00.reshape(1,-1))[0]\n #f11 = self._reshaped_model(r(~m00, x)).mean(0)\n ind = len(self.dvalues)-1\n\n # make sure output_indexes is a list of indexes\n if output_indexes is not None:\n # assert self.multi_output, \"output_indexes is only valid for multi-output models!\"\n # inds = output_indexes.apply(f11, 0)\n # out_len = output_indexes_len(output_indexes)\n # if output_indexes.startswith(\"max(\"):\n # output_indexes = np.argsort(-f11)[:out_len]\n # elif output_indexes.startswith(\"min(\"):\n # output_indexes = np.argsort(f11)[:out_len]\n # elif output_indexes.startswith(\"max(abs(\"):\n # output_indexes = np.argsort(np.abs(f11))[:out_len]\n\n f00 = f00[output_indexes]\n f11 = f11[output_indexes]\n\n q = queue.PriorityQueue()\n q.put((0, 0, (m00, f00, f11, ind, 1.0)))\n eval_count = 0\n total_evals = min(max_evals, (M-1)*M) # TODO: (M-1)*M is only right for balanced clusterings, but this is just for plotting progress...\n pbar = None\n start_time = time.time()\n while not q.empty():\n\n # if we passed our execution limit then leave everything else on the internal nodes\n if eval_count >= max_evals:\n while not q.empty():\n m00, f00, f11, ind, weight = q.get()[2]\n self.dvalues[ind] += (f11 - f00) * weight\n break\n\n # create a batch of work to do\n batch_args = []\n batch_masks = []\n while not q.empty() and len(batch_masks) < batch_size and eval_count + len(batch_masks) < max_evals:\n\n # get our next set of arguments\n m00, f00, f11, ind, weight = q.get()[2]\n\n # get the left and right children of this cluster\n lind = int(self._clustering[ind-M, 0]) if ind >= M else -1\n rind = int(self._clustering[ind-M, 1]) if ind >= M else -1\n\n # get the distance of this cluster's children\n if ind < M:\n distance = -1\n else:\n if self._clustering.shape[1] >= 3:\n distance = self._clustering[ind-M, 2]\n else:\n distance = 1\n\n # check if we are a leaf node (or other negative distance cluster) and so should terminate our decent\n if distance < 0:\n self.dvalues[ind] += (f11 - f00) * weight\n continue\n\n # build the masks\n m10 = m00.copy() # we separate the copy from the add so as to not get converted to a matrix\n m10[:] += self._mask_matrix[lind, :]\n m01 = m00.copy()\n m01[:] += self._mask_matrix[rind, :]\n\n batch_args.append((m00, m10, m01, f00, f11, ind, lind, rind, weight))\n batch_masks.append(m10)\n batch_masks.append(m01)\n\n batch_masks = np.array(batch_masks)\n\n # run the batch\n if len(batch_args) > 0:\n fout = fm(batch_masks)\n if output_indexes is not None:\n fout = fout[:,output_indexes]\n\n eval_count += len(batch_masks)\n\n if pbar is None and time.time() - start_time > 5:\n pbar = tqdm(total=total_evals, disable=silent, leave=False)\n pbar.update(eval_count)\n if pbar is not None:\n pbar.update(len(batch_masks))\n\n # use the results of the batch to add new nodes\n for i in range(len(batch_args)):\n\n m00, m10, m01, f00, f11, ind, lind, rind, weight = batch_args[i]\n\n # get the evaluated model output on the two new masked inputs\n f10 = fout[2*i]\n f01 = fout[2*i+1]\n\n new_weight = weight\n if fixed_context is None:\n new_weight /= 2\n elif fixed_context == 0:\n self.dvalues[ind] += (f11 - f10 - f01 + f00) * weight # leave the interaction effect on the internal node\n elif fixed_context == 1:\n self.dvalues[ind] -= (f11 - f10 - f01 + f00) * weight # leave the interaction effect on the internal node\n\n if fixed_context is None or fixed_context == 0:\n # recurse on the left node with zero context\n args = (m00, f00, f10, lind, new_weight)\n q.put((-np.max(np.abs(f10 - f00)) * new_weight, np.random.randn(), args))\n\n # recurse on the right node with zero context\n args = (m00, f00, f01, rind, new_weight)\n q.put((-np.max(np.abs(f01 - f00)) * new_weight, np.random.randn(), args))\n\n if fixed_context is None or fixed_context == 1:\n # recurse on the left node with one context\n args = (m01, f01, f11, lind, new_weight)\n q.put((-np.max(np.abs(f11 - f01)) * new_weight, np.random.randn(), args))\n\n # recurse on the right node with one context\n args = (m10, f10, f11, rind, new_weight)\n q.put((-np.max(np.abs(f11 - f10)) * new_weight, np.random.randn(), args))\n\n if pbar is not None:\n pbar.close()\n\n self.last_eval_count = eval_count\n\n return output_indexes, base_value\n\n def owen3(self, fm, f00, f11, max_evals, output_indexes, fixed_context, batch_size, silent):\n \"\"\" Compute a nested set of recursive Owen values based on an ordering recursion.\n \"\"\"\n\n #f = self._reshaped_model\n #r = self.masker\n #masks = np.zeros(2*len(inds)+1, dtype=np.int)\n M = len(fm)\n m00 = np.zeros(M, dtype=np.bool)\n #f00 = fm(m00.reshape(1,-1))[0]\n base_value = f00\n #f11 = fm(~m00.reshape(1,-1))[0]\n #f11 = self._reshaped_model(r(~m00, x)).mean(0)\n ind = len(self.dvalues)-1\n\n # make sure output_indexes is a list of indexes\n if output_indexes is not None:\n # assert self.multi_output, \"output_indexes is only valid for multi-output models!\"\n # inds = output_indexes.apply(f11, 0)\n # out_len = output_indexes_len(output_indexes)\n # if output_indexes.startswith(\"max(\"):\n # output_indexes = np.argsort(-f11)[:out_len]\n # elif output_indexes.startswith(\"min(\"):\n # output_indexes = np.argsort(f11)[:out_len]\n # elif output_indexes.startswith(\"max(abs(\"):\n # output_indexes = np.argsort(np.abs(f11))[:out_len]\n\n f00 = f00[output_indexes]\n f11 = f11[output_indexes]\n\n # our starting plan is to evaluate all the nodes with a fixed_context\n evals_planned = M\n\n q = queue.PriorityQueue()\n q.put((0, 0, (m00, f00, f11, ind, 1.0, fixed_context))) # (m00, f00, f11, tree_index, weight)\n eval_count = 0\n total_evals = min(max_evals, (M-1)*M) # TODO: (M-1)*M is only right for balanced clusterings, but this is just for plotting progress...\n pbar = None\n start_time = time.time()\n while not q.empty():\n\n # if we passed our execution limit then leave everything else on the internal nodes\n if eval_count >= max_evals:\n while not q.empty():\n m00, f00, f11, ind, weight, _ = q.get()[2]\n self.dvalues[ind] += (f11 - f00) * weight\n break\n\n # create a batch of work to do\n batch_args = []\n batch_masks = []\n while not q.empty() and len(batch_masks) < batch_size and eval_count < max_evals:\n\n # get our next set of arguments\n m00, f00, f11, ind, weight, context = q.get()[2]\n\n # get the left and right children of this cluster\n lind = int(self._clustering[ind-M, 0]) if ind >= M else -1\n rind = int(self._clustering[ind-M, 1]) if ind >= M else -1\n\n # get the distance of this cluster's children\n if ind < M:\n distance = -1\n else:\n distance = self._clustering[ind-M, 2]\n\n # check if we are a leaf node (or other negative distance cluster) and so should terminate our decent\n if distance < 0:\n self.dvalues[ind] += (f11 - f00) * weight\n continue\n\n # build the masks\n m10 = m00.copy() # we separate the copy from the add so as to not get converted to a matrix\n m10[:] += self._mask_matrix[lind, :]\n m01 = m00.copy()\n m01[:] += self._mask_matrix[rind, :]\n\n batch_args.append((m00, m10, m01, f00, f11, ind, lind, rind, weight, context))\n batch_masks.append(m10)\n batch_masks.append(m01)\n\n batch_masks = np.array(batch_masks)\n\n # run the batch\n if len(batch_args) > 0:\n fout = fm(batch_masks)\n if output_indexes is not None:\n fout = fout[:,output_indexes]\n\n eval_count += len(batch_masks)\n\n if pbar is None and time.time() - start_time > 5:\n pbar = tqdm(total=total_evals, disable=silent, leave=False)\n pbar.update(eval_count)\n if pbar is not None:\n pbar.update(len(batch_masks))\n\n # use the results of the batch to add new nodes\n for i in range(len(batch_args)):\n\n m00, m10, m01, f00, f11, ind, lind, rind, weight, context = batch_args[i]\n\n # get the the number of leaves in this cluster\n if ind < M:\n num_leaves = 0\n else:\n num_leaves = self._clustering[ind-M, 3]\n\n # get the evaluated model output on the two new masked inputs\n f10 = fout[2*i]\n f01 = fout[2*i+1]\n\n # see if we have enough evaluations left to get both sides of a fixed context\n if max_evals - evals_planned > num_leaves:\n evals_planned += num_leaves\n ignore_context = True\n else:\n ignore_context = False\n\n new_weight = weight\n if context is None or ignore_context:\n new_weight /= 2\n\n if context is None or context == 0 or ignore_context:\n self.dvalues[ind] += (f11 - f10 - f01 + f00) * weight # leave the interaction effect on the internal node\n\n # recurse on the left node with zero context, flip the context for all decendents if we are ignoring it\n args = (m00, f00, f10, lind, new_weight, 0 if context == 1 else context)\n q.put((-np.max(np.abs(f10 - f00)) * new_weight, np.random.randn(), args))\n\n # recurse on the right node with zero context, flip the context for all decendents if we are ignoring it\n args = (m00, f00, f01, rind, new_weight, 0 if context == 1 else context)\n q.put((-np.max(np.abs(f01 - f00)) * new_weight, np.random.randn(), args))\n\n if context is None or context == 1 or ignore_context:\n self.dvalues[ind] -= (f11 - f10 - f01 + f00) * weight # leave the interaction effect on the internal node\n\n # recurse on the left node with one context, flip the context for all decendents if we are ignoring it\n args = (m01, f01, f11, lind, new_weight, 1 if context == 0 else context)\n q.put((-np.max(np.abs(f11 - f01)) * new_weight, np.random.randn(), args))\n\n # recurse on the right node with one context, flip the context for all decendents if we are ignoring it\n args = (m10, f10, f11, rind, new_weight, 1 if context == 0 else context)\n q.put((-np.max(np.abs(f11 - f10)) * new_weight, np.random.randn(), args))\n\n if pbar is not None:\n pbar.close()\n\n self.last_eval_count = eval_count\n\n return output_indexes, base_value\n\n\n\n # def owen2(self, fm, f00, f11, max_evals, output_indexes, fixed_context, batch_size, silent):\n # \"\"\" Compute a nested set of recursive Owen values based on an ordering recursion.\n # \"\"\"\n\n # #f = self._reshaped_model\n # #r = self.masker\n # #masks = np.zeros(2*len(inds)+1, dtype=np.int)\n # M = len(fm)\n # m00 = np.zeros(M, dtype=np.bool)\n # #f00 = fm(m00.reshape(1,-1))[0]\n # base_value = f00\n # #f11 = fm(~m00.reshape(1,-1))[0]\n # #f11 = self._reshaped_model(r(~m00, x)).mean(0)\n # ind = len(self.dvalues)-1\n\n # # make sure output_indexes is a list of indexes\n # if output_indexes is not None:\n # # assert self.multi_output, \"output_indexes is only valid for multi-output models!\"\n # # inds = output_indexes.apply(f11, 0)\n # # out_len = output_indexes_len(output_indexes)\n # # if output_indexes.startswith(\"max(\"):\n # # output_indexes = np.argsort(-f11)[:out_len]\n # # elif output_indexes.startswith(\"min(\"):\n # # output_indexes = np.argsort(f11)[:out_len]\n # # elif output_indexes.startswith(\"max(abs(\"):\n # # output_indexes = np.argsort(np.abs(f11))[:out_len]\n\n # f00 = f00[output_indexes]\n # f11 = f11[output_indexes]\n\n # fc_owen(m00, m11, 1)\n # fc_owen(m00, m11, 0)\n\n # def fc_owen(m00, m11, context):\n\n # # recurse on the left node with zero context\n # args = (m00, f00, f10, lind, new_weight)\n # q.put((-np.max(np.abs(f10 - f00)) * new_weight, np.random.randn(), args))\n\n # # recurse on the right node with zero context\n # args = (m00, f00, f01, rind, new_weight)\n # q.put((-np.max(np.abs(f01 - f00)) * new_weight, np.random.randn(), args))\n # fc_owen(m00, m11, 1)\n # m00 m11\n # owen(fc=1)\n # owen(fc=0)\n\n # q = queue.PriorityQueue()\n # q.put((0, 0, (m00, f00, f11, ind, 1.0, 1)))\n # eval_count = 0\n # total_evals = min(max_evals, (M-1)*M) # TODO: (M-1)*M is only right for balanced clusterings, but this is just for plotting progress...\n # pbar = None\n # start_time = time.time()\n # while not q.empty():\n\n # # if we passed our execution limit then leave everything else on the internal nodes\n # if eval_count >= max_evals:\n # while not q.empty():\n # m00, f00, f11, ind, weight, _ = q.get()[2]\n # self.dvalues[ind] += (f11 - f00) * weight\n # break\n\n # # create a batch of work to do\n # batch_args = []\n # batch_masks = []\n # while not q.empty() and len(batch_masks) < batch_size and eval_count < max_evals:\n\n # # get our next set of arguments\n # m00, f00, f11, ind, weight, context = q.get()[2]\n\n # # get the left and right children of this cluster\n # lind = int(self._clustering[ind-M, 0]) if ind >= M else -1\n # rind = int(self._clustering[ind-M, 1]) if ind >= M else -1\n\n # # get the distance of this cluster's children\n # if ind < M:\n # distance = -1\n # else:\n # if self._clustering.shape[1] >= 3:\n # distance = self._clustering[ind-M, 2]\n # else:\n # distance = 1\n\n # # check if we are a leaf node (or other negative distance cluster) and so should terminate our decent\n # if distance < 0:\n # self.dvalues[ind] += (f11 - f00) * weight\n # continue\n\n # # build the masks\n # m10 = m00.copy() # we separate the copy from the add so as to not get converted to a matrix\n # m10[:] += self._mask_matrix[lind, :]\n # m01 = m00.copy()\n # m01[:] += self._mask_matrix[rind, :]\n\n # batch_args.append((m00, m10, m01, f00, f11, ind, lind, rind, weight, context))\n # batch_masks.append(m10)\n # batch_masks.append(m01)\n\n # batch_masks = np.array(batch_masks)\n\n # # run the batch\n # if len(batch_args) > 0:\n # fout = fm(batch_masks)\n # if output_indexes is not None:\n # fout = fout[:,output_indexes]\n\n # eval_count += len(batch_masks)\n\n # if pbar is None and time.time() - start_time > 5:\n # pbar = tqdm(total=total_evals, disable=silent, leave=False)\n # pbar.update(eval_count)\n # if pbar is not None:\n # pbar.update(len(batch_masks))\n\n # # use the results of the batch to add new nodes\n # for i in range(len(batch_args)):\n\n # m00, m10, m01, f00, f11, ind, lind, rind, weight, context = batch_args[i]\n\n # # get the evaluated model output on the two new masked inputs\n # f10 = fout[2*i]\n # f01 = fout[2*i+1]\n\n # new_weight = weight\n # if fixed_context is None:\n # new_weight /= 2\n # elif fixed_context == 0:\n # self.dvalues[ind] += (f11 - f10 - f01 + f00) * weight # leave the interaction effect on the internal node\n # elif fixed_context == 1:\n # self.dvalues[ind] -= (f11 - f10 - f01 + f00) * weight # leave the interaction effect on the internal node\n\n # if fixed_context is None or fixed_context == 0:\n # self.dvalues[ind] += (f11 - f10 - f01 + f00) * weight # leave the interaction effect on the internal node\n\n\n # # recurse on the left node with zero context\n # args = (m00, f00, f10, lind, new_weight)\n # q.put((-np.max(np.abs(f10 - f00)) * new_weight, np.random.randn(), args))\n\n # # recurse on the right node with zero context\n # args = (m00, f00, f01, rind, new_weight)\n # q.put((-np.max(np.abs(f01 - f00)) * new_weight, np.random.randn(), args))\n\n # if fixed_context is None or fixed_context == 1:\n # self.dvalues[ind] -= (f11 - f10 - f01 + f00) * weight # leave the interaction effect on the internal node\n\n \n # # recurse on the left node with one context\n # args = (m01, f01, f11, lind, new_weight)\n # q.put((-np.max(np.abs(f11 - f01)) * new_weight, np.random.randn(), args))\n\n # # recurse on the right node with one context\n # args = (m10, f10, f11, rind, new_weight)\n # q.put((-np.max(np.abs(f11 - f10)) * new_weight, np.random.randn(), args))\n\n # if pbar is not None:\n # pbar.close()\n\n # return output_indexes, base_value\n\n\ndef output_indexes_len(output_indexes):\n if output_indexes.startswith(\"max(\"):\n return int(output_indexes[4:-1])\n elif output_indexes.startswith(\"min(\"):\n return int(output_indexes[4:-1])\n elif output_indexes.startswith(\"max(abs(\"):\n return int(output_indexes[8:-2])\n elif not isinstance(output_indexes, str):\n return len(output_indexes)\n\n@jit\ndef lower_credit(i, value, M, values, clustering):\n if i < M:\n values[i] += value\n return\n li = int(clustering[i-M,0])\n ri = int(clustering[i-M,1])\n group_size = int(clustering[i-M,3])\n lsize = int(clustering[li-M,3]) if li >= M else 1\n rsize = int(clustering[ri-M,3]) if ri >= M else 1\n assert lsize+rsize == group_size\n values[i] += value\n lower_credit(li, values[i] * lsize / group_size, M, values, clustering)\n lower_credit(ri, values[i] * rsize / group_size, M, values, clustering)"
] |
[
[
"numpy.array",
"numpy.abs",
"numpy.random.randn",
"numpy.zeros"
]
] |
BracketJohn/GPflow
|
[
"33178689c34d773a05532d50e3d4d97e7d5d6d60"
] |
[
"doc/source/notebooks/intro_to_gpflow2.pct.py"
] |
[
"# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,.pct.py:percent\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.3.3\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %% [markdown]\n# GPflow with TensorFlow 2\n# ===\n#\n# ##### Small steps big changes\n#\n# <br>\n#\n#\n\n# %%\nfrom typing import Tuple, Optional\nfrom pathlib import Path\n\nimport datetime\nimport io\nimport matplotlib.pyplot as plt\n\nimport numpy as np\nimport tensorflow as tf\nimport gpflow\n\nfrom gpflow.config import default_float\n\nimport warnings\n\nwarnings.filterwarnings('ignore')\n\n# %% [markdown]\n# Make `tensorboard` work inside notebook:\n\n# %%\noutput_logdir = \"/tmp/tensorboard\"\n\n# !rm -rf \"{output_logdir}\"\n# !mkdir \"{output_logdir}\"\n\n# %load_ext tensorboard\n# %matplotlib inline\n\n\ndef enumerated_logdir(_logdir_id: int = [0]):\n logdir = Path(output_logdir, str(_logdir_id[0]))\n _logdir_id[0] += 1\n return str(logdir)\n\n\n# %% [markdown]\n# Set up random seeds and default float for `gpflow` tensors:\n\n# %%\ngpflow.config.set_default_float(np.float64)\nnp.random.seed(0)\ntf.random.set_seed(0)\n\n\n# %% [markdown]\n# ## Loading data using TensorFlow Datasets\n#\n# For this example, we create a synthetic dataset (noisy sine function):\n\n# %%\ndef noisy_sin(x):\n return tf.math.sin(x) + 0.1 * tf.random.normal(x.shape, dtype=default_float())\n\nnum_train_data, num_test_data = 100, 500\n\nX = tf.random.uniform((num_train_data, 1), dtype=default_float()) * 10\nXtest = tf.random.uniform((num_test_data, 1), dtype=default_float()) * 10\n\nY = noisy_sin(X)\nYtest = noisy_sin(Xtest)\n\ndata = (X, Y)\n\nplt.plot(X, Y, 'xk')\nplt.show()\n\n# %% [markdown]\n# Working with TensorFlow Datasets is an efficient way to rapidly shuffle, iterate, and batch from data.\n\n# %%\ntrain_dataset = tf.data.Dataset.from_tensor_slices((X, Y))\ntest_dataset = tf.data.Dataset.from_tensor_slices((Xtest, Ytest))\n\nbatch_size = 32\nnum_features = 10\nprefetch_size = num_train_data // 2\nshuffle_buffer_size = num_train_data // 2\nnum_batches_per_epoch = num_train_data // batch_size\n\noriginal_train_dataset = train_dataset\ntrain_dataset = train_dataset.repeat()\\\n .prefetch(prefetch_size)\\\n .shuffle(buffer_size=shuffle_buffer_size)\\\n .batch(batch_size)\n\nprint(f\"prefetch_size={prefetch_size}\")\nprint(f\"shuffle_buffer_size={shuffle_buffer_size}\")\nprint(f\"num_batches_per_epoch={num_batches_per_epoch}\")\n\n# %% [markdown]\n# ## Define a GP model\n#\n# In GPflow 2.0, we use `tf.Module` (or the very thin `gpflow.base.Module` wrapper) to build all our models, as well as their components (kernels, likelihoods, parameters, and so on).\n\n# %%\nkernel = gpflow.kernels.SquaredExponential(variance=2.)\nlikelihood = gpflow.likelihoods.Gaussian()\ninducing_variable = np.linspace(0, 10, num_features).reshape(-1, 1)\n\nmodel = gpflow.models.SVGP(kernel=kernel, likelihood=likelihood, inducing_variable=inducing_variable)\n\n# %% [markdown]\n# You can set a module (or a particular parameter) to be non-trainable using the auxiliary method ```set_trainable(module, False)```:\n\n# %%\nfrom gpflow.utilities import set_trainable\n\nset_trainable(likelihood, False)\nset_trainable(kernel.variance, False)\n\nset_trainable(likelihood, True)\nset_trainable(kernel.variance, True)\n\n# %% [markdown]\n# We can use ```param.assign(value)``` to assign a value to a parameter:\n\n# %%\nkernel.lengthscale.assign(0.5)\n\n# %% [markdown]\n# All these changes are reflected when we use ```print_summary(model)``` to print a detailed summary of the model. By default the output is displayed in a minimalistic and simple table.\n\n# %%\nfrom gpflow.utilities import print_summary\n\nprint_summary(model) # same as print_summary(model, fmt=\"simple\")\n\n# %% [markdown]\n# We can change default printing so that it will look nicer in our notebook:\n\n# %%\ngpflow.config.set_default_summary_fmt(\"notebook\")\n\nprint_summary(model) # same as print_summary(model, fmt=\"notebook\")\n\n# %% [markdown]\n# Jupyter notebooks also format GPflow classes (that are subclasses of `gpflow.base.Module`) in the same nice way when at the end of a cell (this is independent of the `default_summary_fmt`):\n\n# %%\nmodel\n\n# %% [markdown]\n# ## Training using Gradient Tapes\n#\n# In TensorFlow 2, we can optimize (trainable) model parameters with TensorFlow optimizers using `tf.GradientTape`. In this simple example, we perform one gradient update of the Adam optimizer to minimize the negative marginal log likelihood (or ELBO) of our model.\n\n# %%\noptimizer = tf.optimizers.Adam()\n\nwith tf.GradientTape() as tape:\n tape.watch(model.trainable_variables)\n obj = - model.elbo(data)\n grads = tape.gradient(obj, model.trainable_variables)\n\noptimizer.apply_gradients(zip(grads, model.trainable_variables))\n\n\n# %% [markdown]\n# For a more elaborate example of a gradient update we can define an ```optimization_step``` that uses the decorator ```tf.function``` on a closure. A closure is a callable that returns the model objective evaluated at a given dataset when called.\n\n# %%\ndef optimization_step(model: gpflow.models.SVGP, batch: Tuple[tf.Tensor, tf.Tensor]):\n with tf.GradientTape(watch_accessed_variables=False) as tape:\n tape.watch(model.trainable_variables)\n obj = - model.elbo(batch)\n grads = tape.gradient(obj, model.trainable_variables)\n optimizer.apply_gradients(zip(grads, model.trainable_variables))\n\n\n# %% [markdown]\n# We can use the functionality of TensorFlow Datasets to define a simple training loop that iterates over batches of the training dataset:\n\n# %%\ndef simple_training_loop(model: gpflow.models.SVGP, epochs: int = 1, logging_epoch_freq: int = 10):\n batches = iter(train_dataset)\n tf_optimization_step = tf.function(optimization_step, autograph=False)\n for epoch in range(epochs):\n for _ in range(num_batches_per_epoch):\n tf_optimization_step(model, next(batches))\n\n epoch_id = epoch + 1\n if epoch_id % logging_epoch_freq == 0:\n tf.print(f\"Epoch {epoch_id}: ELBO (train) {model.elbo(data)}\")\n\n\n# %%\nsimple_training_loop(model, epochs=10, logging_epoch_freq=2)\n\n# %% [markdown]\n# ## Monitoring\n#\n# We can monitor the training procedure using `tf.summary`. First we create a summary writer object through which we can write scalars and images.\n\n# %%\nfrom intro_to_gpflow2_plotting import plotting_regression, summary_matplotlib_image\n\nsamples_input = tf.cast(np.linspace(0, 10, 100).reshape(100, 1), default_float())\n\ndef monitored_training_loop(model: gpflow.models.SVGP, logdir: str,\n epochs: int = 1, logging_epoch_freq: int = 10,\n num_samples: int = 10):\n summary_writer = tf.summary.create_file_writer(logdir)\n tf_optimization_step = tf.function(optimization_step)\n batches = iter(train_dataset)\n\n with summary_writer.as_default():\n for epoch in range(epochs):\n for _ in range(num_batches_per_epoch):\n tf_optimization_step(model, next(batches))\n\n epoch_id = epoch + 1\n if epoch_id % logging_epoch_freq == 0:\n tf.print(f\"Epoch {epoch_id}: ELBO (train) {model.elbo(data)}\")\n\n mean, var = model.predict_f(samples_input)\n samples = model.predict_f_samples(samples_input, num_samples)\n fig = plotting_regression(X, Y, samples_input, mean, var, samples)\n\n summary_matplotlib_image(dict(model_samples=fig), step=epoch)\n tf.summary.scalar('elbo', data=model.elbo(data), step=epoch)\n tf.summary.scalar('likelihood/variance', data=model.likelihood.variance, step=epoch)\n tf.summary.scalar('kernel/lengthscale', data=model.kernel.lengthscale, step=epoch)\n tf.summary.scalar('kernel/variance', data=model.kernel.variance, step=epoch)\n\n\n# %%\nmodel = gpflow.models.SVGP(kernel=kernel, likelihood=likelihood, inducing_variable=inducing_variable)\n\noutput_logdir = enumerated_logdir()\nmonitored_training_loop(model, output_logdir, epochs=1000, logging_epoch_freq=100)\n\n# %% [markdown]\n# Then, we can use TensorBoard to examine the training procedure in more detail\n\n# %%\n# # %tensorboard --logdir \"{output_logdir}\"\n\n# %% [markdown]\n# ## Checkpointing: saving and loading models\n#\n# With the help of `tf.train.CheckpointManager` and `tf.train.Checkpoint`, we can checkpoint the model throughout the training procedure. Let's start with a simple example using checkpointing to save and load a `tf.Variable`:\n\n# %%\ninitial_value = 1.2\na = tf.Variable(initial_value)\n\n# %% [markdown]\n# Create `Checkpoint` object:\n\n# %%\nckpt = tf.train.Checkpoint(a=a)\nmanager = tf.train.CheckpointManager(ckpt, output_logdir, max_to_keep=3)\n\n# %% [markdown]\n# Save the variable `a` and change its value right after:\n\n# %%\nmanager.save()\n_ = a.assign(0.33)\n\n# %% [markdown]\n# Now we can restore the old variable value:\n\n# %%\nprint(f\"Current value of variable a: {a.numpy():0.3f}\")\n\nckpt.restore(manager.latest_checkpoint)\n\nprint(f\"Value of variable a after restore: {a.numpy():0.3f}\")\n\n# %% [markdown]\n# In the example below, we modify a simple training loop to save the model every 100 epochs using the `CheckpointManager`.\n\n# %%\nmodel = gpflow.models.SVGP(kernel=kernel, likelihood=likelihood, inducing_variable=inducing_variable)\n\ndef checkpointing_training_loop(model: gpflow.models.SVGP,\n batch_size: int,\n epochs: int,\n manager: tf.train.CheckpointManager,\n logging_epoch_freq: int = 100,\n epoch_var: Optional[tf.Variable] = None,\n step_var: Optional[tf.Variable] = None):\n tf_optimization_step = tf.function(optimization_step)\n batches = iter(train_dataset)\n\n for epoch in range(epochs):\n for step in range(num_batches_per_epoch):\n tf_optimization_step(model, next(batches))\n if step_var is not None:\n step_var.assign(epoch * num_batches_per_epoch + step + 1)\n if epoch_var is not None:\n epoch_var.assign(epoch + 1)\n\n epoch_id = epoch + 1\n if epoch_id % logging_epoch_freq == 0:\n ckpt_path = manager.save()\n tf.print(f\"Epoch {epoch_id}: ELBO (train) {model.elbo(data)}, saved at {ckpt_path}\")\n\n\n# %%\nstep_var = tf.Variable(1, dtype=tf.int32, trainable=False)\nepoch_var = tf.Variable(1, dtype=tf.int32, trainable=False)\nckpt = tf.train.Checkpoint(model=model, step=step_var, epoch=epoch_var)\nmanager = tf.train.CheckpointManager(ckpt, output_logdir, max_to_keep=5)\n\nprint(f\"Checkpoint folder path at: {output_logdir}\")\n\ncheckpointing_training_loop(model, batch_size=batch_size, epochs=1000, manager=manager, epoch_var=epoch_var, step_var=step_var)\n\n# %% [markdown]\n# After the models have been saved, we can restore them using ```tf.train.Checkpoint.restore``` and assert that their performance corresponds to that logged during training.\n\n# %%\nfor i, recorded_checkpoint in enumerate(manager.checkpoints):\n ckpt.restore(recorded_checkpoint)\n print(f\"{i} restored model from epoch {int(epoch_var)} [step:{int(step_var)}] : ELBO training set {model.elbo(data)}\")\n\n# %% [markdown]\n# ## Copying (hyper)parameter values between models\n#\n# It is easy to interact with the set of all parameters of a model or a subcomponent programmatically.\n#\n# The following returns a dictionary of all parameters within\n\n# %%\nmodel = gpflow.models.SGPR(data, kernel=kernel, inducing_variable=inducing_variable)\n\n# %%\ngpflow.utilities.parameter_dict(model)\n\n# %% [markdown]\n# Such a dictionary can be assigned back to this model (or another model with the same tree of parameters) as follows:\n\n# %%\nparams = gpflow.utilities.parameter_dict(model)\ngpflow.utilities.multiple_assign(model, params)\n"
] |
[
[
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.train.CheckpointManager",
"tensorflow.GradientTape",
"numpy.random.seed",
"tensorflow.random.set_seed",
"tensorflow.optimizers.Adam",
"matplotlib.pyplot.plot",
"tensorflow.Variable",
"tensorflow.summary.create_file_writer",
"tensorflow.function",
"tensorflow.summary.scalar",
"tensorflow.math.sin",
"matplotlib.pyplot.show",
"numpy.linspace",
"tensorflow.train.Checkpoint"
]
] |
Divyanshu23/model-zoo
|
[
"2eea6df691d302e182bb1ff8ec5af3542de562ba"
] |
[
"classification/Inception-V3_PyTorch/dataloader.py"
] |
[
"import shutil\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.utils.data import Dataset, DataLoader, random_split\r\nfrom torchvision import transforms, datasets\r\n\r\n\r\ndef load_cifar():\r\n\r\n transform = transforms.Compose([transforms.Resize((32, 32)),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.5], std=[0.5])])\r\n\r\n train_dataset = datasets.CIFAR10(\r\n './data', train=True, download=True, transform=transform)\r\n test_dataset = datasets.CIFAR10(\r\n './data', train=False, download=True, transform=transform)\r\n\r\n # Split dataset into training set and validation set.\r\n train_dataset, val_dataset = random_split(train_dataset, (45000, 5000))\r\n\r\n print(\"Image Shape: {}\".format(\r\n train_dataset[0][0].numpy().shape), end='\\n\\n')\r\n print(\"Training Set: {} samples\".format(len(train_dataset)))\r\n print(\"Validation Set: {} samples\".format(len(val_dataset)))\r\n print(\"Test Set: {} samples\".format(len(test_dataset)))\r\n\r\n BATCH_SIZE = 32\r\n\r\n # Create iterator.\r\n train_loader = DataLoader(\r\n train_dataset, batch_size=BATCH_SIZE, shuffle=True)\r\n val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE, shuffle=True)\r\n test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=True)\r\n\r\n # Delete the data/ folder.\r\n shutil.rmtree('./data')\r\n\r\n return train_loader, val_loader, test_loader\r\n"
] |
[
[
"torch.utils.data.random_split",
"torch.utils.data.DataLoader"
]
] |
trusthlt/dp-across-nlp-tasks
|
[
"ec3e03511420044cdb0bb1a3574925d354ff03f4",
"ec3e03511420044cdb0bb1a3574925d354ff03f4"
] |
[
"NLPCode/named_entity_recognition/utils.py",
"NLPCode/sentiment_analysis/Transformers/train_eval_models.py"
] |
[
"import time\nimport torch\nfrom queue import Queue\nimport numpy as np\nfrom sklearn.metrics import precision_recall_fscore_support\n\ndef get_acc_pre_rec_f1(y_true, y_pred):\n assert (len(y_true) == len(y_pred))\n\n # accuracy\n acc = 0\n for t, p in zip(y_true, y_pred):\n if t == p:\n acc += 1\n\n # precision, recall, f1\n pr_epoch, rec_epoch, f1_epoch, _ = precision_recall_fscore_support(y_true, y_pred, average='macro')\n return acc / len(y_true), pr_epoch, rec_epoch, f1_epoch\n\ndef take_no_pad(seqlens, y_pred, y):\n # for measurment save only the non padded tags\n y_true_noPad = []\n y_pred_noPad = []\n\n for i, seqlen in enumerate(seqlens):\n y_pred_noPad.append(y_pred[i][:seqlen].cpu().detach().numpy())\n y_true_noPad.append(y[i][:seqlen].cpu().detach().numpy())\n\n if not (len(y_true_noPad[i]) == seqlens[i] and len(y_pred_noPad[i]) == seqlens[i]):\n print(y_pred)\n print(len(y_pred))\n print(y)\n print(len(y))\n print(f'{len(y_true_noPad[i])} == {seqlens[i]} and {len(y_pred_noPad[i])} == {seqlens[i]}')\n print(f'{y_true_noPad[i]} with length: {seqlens[i]}')\n print(f'{y_pred_noPad[i]} with length: {seqlens[i]}')\n\n # sanity check if seq len is actual length of seqence\n assert(len(y_true_noPad[i]) == seqlens[i] and len(y_pred_noPad[i]) == seqlens[i])\n \n return y_true_noPad, y_pred_noPad\n\n\ndef epoch_time(start_time, end_time):\n elapsed_time = end_time - start_time\n elapsed_mins = int(elapsed_time / 60)\n elapsed_secs = int(elapsed_time - (elapsed_mins * 60))\n return elapsed_mins, elapsed_secs\n\nclass EarlyStopping:\n def __init__(self, patience):\n self.patience = patience\n self.q = Queue(maxsize = self.patience)\n self.max_acc = -1\n self.counter = 0\n \n def should_stop(self, accuracy):\n\n # check if accuracy is greater than max than empy out queue and set new max\n if accuracy > self.max_acc:\n self.q.queue.clear()\n self.max_acc = accuracy\n self.counter = 0\n else:\n # else add element to queue and check if queue is full (if we should do early stopping)\n self.q.put(accuracy)\n self.counter += 1\n if self.q.full():\n # do early stopping\n return True",
"import torch\nimport torch.nn as nn\nfrom utils import binary_accuracy\nfrom sklearn.metrics import precision_recall_fscore_support\nimport numpy as np\nonly_one_iteration = False\n\n\ndef train(model, iterator, optimizer, criterion, device):\n epoch_loss = 0\n epoch_acc = 0\n precission = 0\n recall = 0\n f1 = 0\n\n model.train()\n\n for i, batch in enumerate(iterator):\n\n optimizer.zero_grad()\n predictions = model(batch.text, device).squeeze(1)\n\n loss = criterion(predictions, batch.label)\n acc = binary_accuracy(predictions, batch.label)\n\n loss.backward()\n \n optimizer.step()\n\n epoch_loss += float(loss.item())\n epoch_acc += float(acc.item())\n \n pr_epoch, rec_epoch, f1_epoch, _ = precision_recall_fscore_support(batch.label.cpu().detach().numpy(), torch.round(torch.sigmoid(predictions)).cpu().detach().numpy(),average='macro')\n precission += pr_epoch\n recall += rec_epoch\n f1 += f1_epoch\n \n if only_one_iteration:\n break\n\n return epoch_loss / len(iterator), epoch_acc / len(iterator), precission/ len(iterator), recall/ len(iterator), f1/ len(iterator)\n\n\ndef evaluate(model, iterator, criterion, device):\n epoch_loss = 0\n epoch_acc = 0\n precission = 0\n recall = 0\n f1 = 0\n model.eval()\n\n with torch.no_grad():\n for batch in iterator:\n predictions = model(batch.text, device).squeeze(1)\n\n loss = criterion(predictions, batch.label)\n acc = binary_accuracy(predictions, batch.label)\n\n epoch_loss += loss.item()\n epoch_acc += acc.item()\n\n pr_epoch, rec_epoch, f1_epoch, _ = precision_recall_fscore_support(batch.label.cpu().detach().numpy(), torch.round(torch.sigmoid(predictions)).cpu().detach().numpy(),average='micro')\n precission += pr_epoch\n recall += rec_epoch\n f1 += f1_epoch\n\n if only_one_iteration:\n break\n\n return epoch_loss / len(iterator), epoch_acc / len(iterator), precission/ len(iterator), recall/ len(iterator), f1/ len(iterator)\n"
] |
[
[
"sklearn.metrics.precision_recall_fscore_support"
],
[
"torch.sigmoid",
"torch.no_grad"
]
] |
parallelworks/welding
|
[
"eb1fe04e9f1be1d374782f7476767dcf2197fe36"
] |
[
"mexdex/pvutils.py"
] |
[
"from paraview.simple import *\nimport sys\nimport data_IO\nimport os\nimport subprocess\nimport shutil\n\n# For saving plots as pngs\nimport matplotlib\n\nimport numpy as np\nimport warnings\n\ndef getParaviewVersion():\n \"\"\" Return paraview version as a double number: e.g. 5.4\"\"\"\n PVversionMajor = paraview.servermanager.vtkSMProxyManager.GetVersionMajor() \n PVversionMinor = paraview.servermanager.vtkSMProxyManager.GetVersionMinor()\n PVversion = PVversionMajor + PVversionMinor/100.0\n return PVversion\n\n\ndef planeNormalFromName(planeName):\n if planeName.lower() == \"x\":\n normal = [1.0, 0.0, 0.0]\n if planeName.lower() == \"y\":\n normal = [0.0, 1.0, 0.0]\n if planeName.lower() == \"z\":\n normal = [0.0, 0.0, 1.0]\n return normal\n\n\ndef setviewposition(position_key, camera):\n center = position_key.split()\n nPoints = len(center)/3\n positionXYZ = []\n for iPoint in range(nPoints):\n positionXYZ.extend(list(camera.GetFocalPoint()))\n for i in range(iPoint*3, 3+iPoint*3):\n if center[i] != \"center\":\n positionXYZ[i] = float(center[i])\n return positionXYZ\n\n\ndef read_csv(f):\n kpihash = {}\n cols = [l.replace(\"\\n\", \"\") for l in f.readline().split(\",\")]\n for i, line in enumerate(f):\n data = [l.replace(\"\\n\", \"\") for l in line.split(\",\")]\n kpihash[data[0]] = {}\n for ii, v in enumerate(data):\n if ii != 0:\n kpihash[data[0]][cols[ii]] = v\n return kpihash\n\n\ndef getfieldsfromkpihash(kpihash):\n cellsarrays = []\n for kpi in kpihash:\n if 'field' in kpihash[kpi]:\n cellsarrays.append(kpihash[kpi]['field'])\n\n ca = set(cellsarrays)\n cellsarrays = list(ca)\n return cellsarrays\n\n\ndef isfldScalar(arrayInfo):\n numComps = arrayInfo.GetNumberOfComponents()\n if numComps == 1:\n return True\n else:\n return False\n\n\ndef getfldComponentMap(arrayInfo):\n compName2num = {}\n numComps = arrayInfo.GetNumberOfComponents()\n if numComps>1:\n for iComp in range(-1,numComps):\n compName2num[arrayInfo.GetComponentName(iComp)] = iComp\n return compName2num\n\n\ndef getfldCompNumber(arrayInfo, kpiComp):\n compNumberMap = getfldComponentMap(arrayInfo)\n if not kpiComp:\n compNum = 0\n else:\n compNum = compNumberMap[kpiComp]\n return compNum\n\n\ndef getdatarange(datasource, kpifld, kpifldcomp):\n arrayInfo = datasource.PointData[kpifld]\n compNumber = getfldCompNumber(arrayInfo, kpifldcomp)\n datarange = arrayInfo.GetRange(compNumber)\n return datarange\n\n\ndef extractStatsOld(d, kpi, kpifield, kpiComp, kpitype, fp_csv_metrics, ave=[]):\n datarange = getdatarange(d, kpifield, kpiComp)\n if kpitype == \"Probe\":\n average=(datarange[0]+datarange[1])/2\n elif kpitype == \"Line\":\n average=ave\n elif kpitype == \"Slice\":\n # get kpi field value and area - average = value/area\n integrateVariables = IntegrateVariables(Input=d)\n average = getdatarange(integrateVariables, kpifield, kpiComp)[0]\\\n / integrateVariables.CellData['Area'].GetRange()[0]\n elif kpitype == \"Volume\" or kpitype == \"Clip\":\n integrateVariables = IntegrateVariables(Input=d)\n average = getdatarange(integrateVariables, kpifield, kpiComp)[0]\\\n / integrateVariables.CellData['Volume'].GetRange()[0]\n\n fp_csv_metrics.write(\",\".join([kpi, str(average), str(datarange[0]),str(datarange[1])]) + \"\\n\")\n\n\ndef extractStats(dataSource, kpi, kpifield, kpiComp, kpitype, fp_csv_metrics):\n # If kpifield is a vector, add a calculater on top and extract the component of the vector\n # as a scalar\n \n arrayInfo = dataSource.PointData[kpifield]\n if isfldScalar(arrayInfo):\n statVarName = kpifield\n else:\n # create a new 'Calculator'\n statVarName = kpifield + '_' + kpiComp\n calc1 = Calculator(Input=dataSource)\n calc1.ResultArrayName = statVarName\n if kpiComp == 'Magnitude':\n calc1.Function = 'mag('+kpifield+')'\n else:\n calc1.Function = calc1.ResultArrayName\n UpdatePipeline()\n dataSource = calc1\n\n # create a new 'Descriptive Statistics'\n dStats = DescriptiveStatistics(Input=dataSource, ModelInput=None)\n \n dStats.VariablesofInterest = [statVarName]\n UpdatePipeline()\n\n dStatsDataInfo = dStats.GetDataInformation()\n dStatsStatsInfo = dStatsDataInfo.GetRowDataInformation()\n numStats = dStatsDataInfo.GetRowDataInformation().GetNumberOfArrays()\n\n for iStat in range(numStats):\n statName = dStatsStatsInfo.GetArrayInformation(iStat).GetName()\n statValue = dStatsStatsInfo.GetArrayInformation(iStat).GetComponentRange(0)[0]\n if statName == 'Maximum':\n maxaximum = statValue\n elif statName == 'Minimum' :\n minimum = statValue\n elif statName == 'Mean':\n average = statValue\n elif statName == 'Standard Deviation':\n stanDev = statValue\n\n fp_csv_metrics.write(\",\".join([kpi, str(average), str(minimum), str(maxaximum), str(stanDev)]) + \"\\n\")\n\n\ndef correctfieldcomponent(datasource, metrichash):\n \"\"\"\n Set \"fieldComponent\" to \"Magnitude\" if the component of vector/tensor fields is not given. For scalar fields set \n \"fieldComponent\" to an empty string.\n \"\"\"\n kpifld = metrichash['field']\n arrayInfo = datasource.PointData[kpifld]\n if isfldScalar(arrayInfo):\n metrichash['fieldComponent'] = ''\n else:\n if not 'fieldComponent' in metrichash:\n metrichash['fieldComponent'] = 'Magnitude'\n return metrichash\n\n\ndef getReaderTypeFromfileAddress(dataFileAddress):\n if dataFileAddress.endswith('system/controlDict'):\n readerType = 'openFOAM'\n else:\n try:\n filename, file_extension = os.path.splitext(dataFileAddress)\n readerType = file_extension.replace('.', '')\n except:\n print('Error: Reader type cannot be set. Please check data file address')\n sys.exit(1)\n\n return readerType\n\n\ndef readDataFile(dataFileAddress, dataarray):\n\n readerType = getReaderTypeFromfileAddress(dataFileAddress)\n if readerType == 'exo':\n # Read the results file : create a new 'ExodusIIReader'\n dataReader = ExodusIIReader(FileName=dataFileAddress)\n\n dataReader.ElementBlocks = ['PNT', 'C3D20 C3D20R', 'COMPOSITE LAYER C3D20', 'Beam B32 B32R',\n 'CPS8 CPE8 CAX8 S8 S8R', 'C3D8 C3D8R', 'TRUSS2', 'TRUSS2',\n 'CPS4R CPE4R S4 S4R', 'CPS4I CPE4I', 'C3D10', 'C3D4', 'C3D15',\n 'CPS6 CPE6 S6', 'C3D6', 'CPS3 CPE3 S3',\n '2-node 1d network entry elem', '2-node 1d network exit elem',\n '2-node 1d genuine network elem']\n\n # only load the data that is needed\n dataReader.PointVariables = dataarray\n elif readerType == 'openFOAM':\n # create a new 'OpenFOAMReader'\n dataReader = OpenFOAMReader(FileName=dataFileAddress)\n\n dataReader.MeshRegions = ['internalMesh']\n\n dataReader.CellArrays = dataarray\n\n elif readerType == 'vtk':\n dataReader = LegacyVTKReader(FileNames=[dataFileAddress])\n \n elif readerType == 'stl':\n dataReader = STLReader(FileNames=[dataFileAddress])\n\n return dataReader\n\n\ndef getTimeSteps():\n # get animation scene\n animationScene1 = GetAnimationScene()\n\n # update animation scene based on data timesteps\n animationScene1.UpdateAnimationUsingDataTimeSteps()\n\n timeSteps = []\n if type(animationScene1.TimeKeeper.TimestepValues)== int or type(animationScene1.TimeKeeper.TimestepValues)== float:\n timeSteps.append(animationScene1.TimeKeeper.TimestepValues) \n else:\n timeSteps = list(animationScene1.TimeKeeper.TimestepValues)\n\n return timeSteps\n\n\ndef setFrame2latestTime(renderView1):\n\n TimeSteps = getTimeSteps()\n\n latesttime = TimeSteps[-1]\n print(\"Setting view to latest Time: \" + str(latesttime))\n\n renderView1.ViewTime = latesttime\n return renderView1\n\n\ndef initRenderView (dataReader, viewSize, backgroundColor):\n # get active view\n renderView1 = GetActiveViewOrCreate('RenderView')\n\n try:\n renderView1 = setFrame2latestTime(renderView1)\n except:\n pass\n\n # set the view size\n renderView1.ViewSize = viewSize\n renderView1.Background = backgroundColor\n\n # show data in view\n readerDisplay = Show(dataReader, renderView1)\n\n # reset view to fit data\n renderView1.ResetCamera()\n\n return renderView1, readerDisplay\n\n\ndef colorMetric(d, metrichash):\n display = GetDisplayProperties(d)\n kpifld = metrichash['field']\n kpifldcomp = metrichash['fieldComponent']\n ColorBy(display, ('POINTS', kpifld, kpifldcomp))\n\n Render()\n UpdateScalarBars()\n ctf = GetColorTransferFunction(kpifld)\n try:\n ctf.ApplyPreset(metrichash[\"colorscale\"], True)\n except:\n pass\n try:\n if data_IO.str2bool(metrichash[\"invertcolor\"]):\n ctf.InvertTransferFunction()\n except:\n pass\n \n try:\n datarange = getdatarange(d, kpifld, kpifldcomp)\n min = datarange[0]\n max = datarange[1]\n if metrichash[\"min\"] != \"auto\":\n min = float(metrichash[\"min\"])\n if metrichash[\"max\"] != \"auto\":\n max = float(metrichash[\"max\"])\n ctf.RescaleTransferFunction(min, max)\n if int(metrichash[\"discretecolors\"]) > 0:\n ctf.Discretize = 1\n ctf.NumberOfTableValues = int(metrichash[\"discretecolors\"])\n else:\n ctf.Discretize = 0\n except:\n pass\n\n renderView1 = GetActiveViewOrCreate('RenderView')\n ctfColorBar = GetScalarBar(ctf, renderView1)\n\n ctfColorBar.Orientation = \"Horizontal\"\n\n # Properties modified on uLUTColorBar\n if 'barTitle' in metrichash:\n ctfColorBar.Title = metrichash[\"barTitle\"]\n if 'ComponentTitle' in metrichash:\n ctfColorBar.ComponentTitle = metrichash[\"ComponentTitle\"]\n if 'FontColor' in metrichash:\n ctfColorBar.TitleColor = data_IO.read_floats_from_string(metrichash[\"FontColor\"])\n ctfColorBar.LabelColor = data_IO.read_floats_from_string(metrichash[\"FontColor\"])\n else:\n ctfColorBar.TitleColor = [0, 0, 0]\n ctfColorBar.LabelColor = [0, 0, 0]\n if 'FontSize' in metrichash:\n ctfColorBar.TitleFontSize = int(metrichash[\"FontSize\"])\n ctfColorBar.LabelFontSize = int(metrichash[\"FontSize\"])\n if 'LabelFormat' in metrichash:\n ctfColorBar.LabelFormat = metrichash[\"LabelFormat\"]\n ctfColorBar.RangeLabelFormat = metrichash[\"LabelFormat\"]\n\n imgtype=metrichash['image'].split(\"_\")[0]\n PVversion = getParaviewVersion()\n if (imgtype!=\"iso\"):\n # center\n if PVversion < 5.04:\n ctfColorBar.Position = [0.25,0.05]\n ctfColorBar.Position2 = [0.5,0] # no such property in PV 5.04\n else:\n ctfColorBar.WindowLocation = 'LowerCenter'\n else:\n # left\n if PVversion < 5.04:\n ctfColorBar.Position = [0.05,0.025]\n ctfColorBar.Position2 = [0.4,0] # no such property in PV 5.04\n else:\n ctfColorBar.WindowLocation = 'LowerLeftCorner'\n\n #if individualImages == False:\n # display.SetScalarBarVisibility(renderView1, False)\n\n\ndef createSlice(metrichash, dataReader, dataDisplay):\n camera = GetActiveCamera()\n renderView1 = GetActiveViewOrCreate('RenderView')\n\n opacity=float(metrichash['opacity'])\n bodyopacity=float(metrichash['bodyopacity'])\n dataDisplay.Opacity = bodyopacity\n dataDisplay.ColorArrayName = ['POINTS', '']\n slicetype = \"Plane\"\n plane = metrichash['plane']\n\n s = Slice(Input=dataReader)\n s.SliceType = slicetype\n s.SliceType.Origin = setviewposition(metrichash['position'], camera)\n s.SliceType.Normal = planeNormalFromName(plane)\n sDisplay = Show(s, renderView1)\n sDisplay.ColorArrayName = [None, '']\n sDisplay.SetRepresentationType('Surface')\n sDisplay.DiffuseColor = [0.0, 1.0, 0.0]\n sDisplay.Specular = 0\n sDisplay.Opacity = opacity\n colorMetric(s, metrichash)\n return s\n\n\ndef createStreamTracer(metrichash, data_reader, data_display):\n camera = GetActiveCamera()\n renderView1 = GetActiveViewOrCreate('RenderView')\n\n opacity = float(metrichash['opacity'])\n bodyopacity = float(metrichash['bodyopacity'])\n data_display.Opacity = bodyopacity\n data_display.ColorArrayName = ['POINTS', '']\n\n seedPosition = setviewposition(metrichash['position'], camera)\n if metrichash['seedType'].lower() == 'line':\n streamTracer = StreamTracer(Input=data_reader,\n SeedType='High Resolution Line Source')\n streamTracer.SeedType.Point1 = seedPosition[0:3]\n streamTracer.SeedType.Point2 = seedPosition[3:6]\n streamTracer.SeedType.Resolution = int(metrichash['resolution'])\n\n elif metrichash['seedType'].lower() == 'plane':\n # create a new 'Point Plane Interpolator' for seeding the stream lines\n pointPlaneInterpolator = PointPlaneInterpolator(Input=data_reader, Source='Bounded Plane')\n pointPlaneInterpolator.Source.Center = setviewposition(metrichash['center'], camera)\n pointPlaneInterpolator.Source.BoundingBox = seedPosition\n pointPlaneInterpolator.Source.Normal = planeNormalFromName(metrichash['plane'])\n pointPlaneInterpolator.Source.Resolution = int(metrichash['resolution'])\n UpdatePipeline()\n streamTracer = StreamTracerWithCustomSource(Input=data_reader,\n SeedSource=pointPlaneInterpolator)\n\n\n kpifld = metrichash['field'] #!!!!!!!\n streamTracer.Vectors = ['POINTS', kpifld]\n \n streamTracer.IntegrationDirection = metrichash['integralDirection'] # 'BACKWARD', 'FORWARD' or 'BOTH'\n streamTracer.IntegratorType = 'Runge-Kutta 4'\n # To do : Add a default value based on domain size ?\n streamTracer.MaximumStreamlineLength = float(metrichash['maxStreamLength'])\n\n\n ##\n # create a new 'Tube'\n tube = Tube(Input=streamTracer)\n tube.Radius = float(metrichash['tubeRadius'])\n # show data in view\n tubeDisplay = Show(tube, renderView1)\n # trace defaults for the display properties.\n tubeDisplay.Representation = 'Surface'\n tubeDisplay.ColorArrayName = [None, '']\n tubeDisplay.EdgeColor = [0.0, 0.0, 0.0]\n tubeDisplay.DiffuseColor = [0.0, 1.0, 0.0]\n tubeDisplay.Specular = 0\n tubeDisplay.Opacity = opacity\n\n metrichash['field'] = metrichash['colorByField']\n if 'colorByFieldComponent' in metrichash:\n metrichash['fieldComponent'] = metrichash['colorByFieldComponent']\n metrichash = correctfieldcomponent(streamTracer, metrichash)\n colorMetric(tube, metrichash)\n try:\n if metrichash['image'].split(\"_\")[1] == \"solo\":\n Hide(data_reader, renderView1)\n except:\n pass\n return tube\n\n\ndef createClip(metrichash, data_reader, data_display):\n camera = GetActiveCamera()\n renderView1 = GetActiveViewOrCreate('RenderView')\n\n opacity = float(metrichash['opacity'])\n bodyopacity = float(metrichash['bodyopacity'])\n data_display.Opacity = bodyopacity\n data_display.ColorArrayName = ['POINTS', '']\n cliptype = \"Plane\"\n plane = metrichash['plane']\n if 'invert' in metrichash.keys():\n invert = data_IO.str2bool(metrichash['invert'])\n else:\n invert = 0\n\n s = Clip(Input=data_reader)\n s.ClipType = cliptype\n s.ClipType.Origin = camera.GetFocalPoint()\n s.InsideOut = invert\n s.ClipType.Origin = setviewposition(metrichash['position'],camera)\n s.ClipType.Normal = planeNormalFromName(plane)\n sDisplay = Show(s, renderView1)\n sDisplay.ColorArrayName = [None, '']\n sDisplay.SetRepresentationType('Surface')\n sDisplay.DiffuseColor = [0.0, 1.0, 0.0]\n sDisplay.Specular = 0\n sDisplay.Opacity = opacity\n colorMetric(s, metrichash)\n try:\n if metrichash['image'].split(\"_\")[1] == \"solo\":\n Hide(data_reader, renderView1)\n except:\n pass\n return s\n\n\ndef createProbe(metrichash, data_reader):\n camera = GetActiveCamera()\n renderView1 = GetActiveViewOrCreate('RenderView')\n\n p = ProbeLocation(Input=data_reader, ProbeType='Fixed Radius Point Source')\n p.PassFieldArrays = 1\n #p.ProbeType.Center = [1.2176899909973145, 1.2191989705897868, 1.5207239668816328]\n p.ProbeType.Center = setviewposition(metrichash['position'], camera)\n p.ProbeType.NumberOfPoints = 1\n p.ProbeType.Radius = 0.0\n ps = Sphere(Radius=0.025, ThetaResolution=32)\n ps.Center = setviewposition(metrichash['position'], camera)\n psDisplay = Show(ps, renderView1)\n psDisplay.DiffuseColor = [1.0, 0.0, 0.0]\n psDisplay.Opacity = 0.8\n return p\n\n\ndef createVolume(metrichash, data_reader):\n bounds = [float(x) for x in metrichash['position'].split(\" \")]\n renderView1 = GetActiveViewOrCreate('RenderView')\n c = Clip(Input=data_reader)\n c.ClipType = 'Box'\n # (xmin,xmax,ymin,ymax,zmin,zmax)\n #c.ClipType.Bounds = [0.1, 3, 0.1, 2.3, 0.15, 2.3]\n c.ClipType.Bounds = bounds\n c.InsideOut = 1\n cDisplay = Show(c, renderView1)\n cDisplay.ColorArrayName = ['Points', metrichash['field']]\n cDisplay.SetRepresentationType('Surface')\n cDisplay.DiffuseColor = [1.0, 1.0, 0.0]\n cDisplay.Specular = 0\n cDisplay.Opacity = 0.1\n return c\n\ndef createBasic(metrichash, dataReader, dataDisplay):\n camera = GetActiveCamera()\n renderView1 = GetActiveViewOrCreate('RenderView')\n bodyopacity=float(metrichash['bodyopacity'])\n dataDisplay.Opacity = bodyopacity\n\n if not (metrichash['field'] == 'None'):\n colorMetric(dataReader, metrichash)\n else:\n ColorBy(dataDisplay, ('POINTS', ''))\n return dataReader\n\ndef plotLine(infile, imageName) :\n matplotlib.use('Agg')\n import matplotlib.pyplot as plt\n warnings.filterwarnings('ignore')\n\n header = np.genfromtxt(infile, delimiter=',', names=True).dtype.names\n data = np.genfromtxt(infile, delimiter=',', skip_header=1)\n\n x = data[:, 0]\n y = data[:, 1]\n\n plt.figure(figsize=(10, 6))\n plt.plot(x, y)\n\n locs, labels = plt.yticks()\n plt.yticks(locs, map(lambda x: \"%g\" % x, locs))\n\n plt.xlabel('Point')\n plt.ylabel(header[1])\n # plt.title(infile.replace(\".csv\", \"\").replace(\"plot_\", \"\") + ' Plot')\n plt.grid(True)\n plt.savefig(imageName)\n\n\ndef createLine(metrichash, data_reader, outputDir=\".\", caseNumber=\"\"):\n resolution = int(metrichash['resolution'])\n try:\n image = metrichash['image']\n except:\n image = None\n\n point = [x for x in metrichash['position'].split()]\n\n camera = GetActiveCamera()\n renderView1 = GetActiveViewOrCreate('RenderView')\n\n if point[0] == \"center\":\n point[0] = camera.GetFocalPoint()[0]\n if point[3] == \"center\":\n point[3] = camera.GetFocalPoint()[0]\n if point[1] == \"center\":\n point[1] = camera.GetFocalPoint()[1]\n if point[4] == \"center\":\n point[4] = camera.GetFocalPoint()[1]\n if point[2] == \"center\":\n point[2] = camera.GetFocalPoint()[2]\n if point[5] == \"center\":\n point[5] = camera.GetFocalPoint()[2]\n \n point1=[float(point[0]),float(point[1]),float(point[2])]\n point2=[float(point[3]),float(point[4]),float(point[5])]\n l = PlotOverLine(Input=data_reader, Source='High Resolution Line Source')\n l.PassPartialArrays = 1\n l.Source.Point1 = point1\n l.Source.Point2 = point2\n l.Source.Resolution = resolution\n lDisplay = Show(l, renderView1)\n lDisplay.DiffuseColor = [1.0, 0.0, 0.0]\n lDisplay.Specular = 0\n lDisplay.Opacity = 1\n\n # Get the line data\n pl = servermanager.Fetch(l)\n\n kpifld = metrichash['field']\n kpiComp = metrichash['fieldComponent']\n if (image == \"plot\"):\n if not (os.path.exists(outputDir)):\n os.makedirs(outputDir)\n if caseNumber:\n metrichash['imageName'] = metrichash['imageName'].format(int(caseNumber))\n imageFullPath = outputDir + '/' + metrichash['imageName']\n imageName, imageExtension = os.path.splitext(imageFullPath)\n csvFileName = imageName + \".csv\"\n f=open(csvFileName,\"w\")\n f.write(\"point,\"+kpifld)\n if kpiComp:\n f.write(\"_\" + kpiComp)\n f.write(\"\\n\")\n\n METRIC_INDEX=0\n for a in range(0,pl.GetPointData().GetNumberOfArrays()):\n if kpifld == pl.GetPointData().GetArrayName(a):\n METRIC_INDEX = a\n sum=0\n num=pl.GetPointData().GetArray(METRIC_INDEX).GetNumberOfTuples()\n # Get the component numbers from the input of line filter (data_reader) (?)\n compNumber = getfldCompNumber(data_reader.PointData[kpifld], kpiComp)\n for t in range(0,num):\n dataPoint = pl.GetPointData().GetArray(METRIC_INDEX).GetTuple(t)[compNumber]\n if str(float(dataPoint)).lower() != \"nan\":\n sum += dataPoint\n if image == \"plot\":\n f.write(\",\".join([str(t), str(dataPoint)])+\"\\n\")\n if image == \"plot\":\n f.close()\n plotLine(csvFileName, imageFullPath)\n ave = sum/pl.GetPointData().GetArray(METRIC_INDEX).GetNumberOfTuples()\n return l\n\n\ndef adjustCamera(view, renderView1, metrichash):\n camera=GetActiveCamera()\n if view.startswith(\"iso\"):\n camera.SetFocalPoint(0, 0, 0)\n if (view == \"iso-flipped\"):\n camera.SetPosition(0, 1, 0)\n else:\n camera.SetPosition(0, -1, 0)\n renderView1.ResetCamera()\n # adjust for scale margin\n camera.SetFocalPoint(camera.GetFocalPoint()[0],camera.GetFocalPoint()[1],camera.GetFocalPoint()[2]-0.25)\n camera.SetPosition(camera.GetPosition()[0],camera.GetPosition()[1],camera.GetPosition()[2]-1)\n camera.Elevation(45)\n camera.Azimuth(45)\n elif view == \"+X\" or view == \"+x\" or view == \"back\": \n camera.SetFocalPoint(0,0,0)\n camera.SetPosition(1,0,0)\n renderView1.ResetCamera()\n elif view == \"-X\" or view == \"-x\" or view == \"front\": \n camera.SetFocalPoint(0,0,0)\n camera.SetPosition(-1,0,0)\n renderView1.ResetCamera()\n elif view == \"+Y\" or view == \"+y\" or view == \"right\": \n camera.SetFocalPoint(0,0,0)\n camera.SetPosition(0,1,0)\n renderView1.ResetCamera()\n elif view == \"-Y\" or view == \"-y\" or view == \"left\": \n camera.SetFocalPoint(0,0,0)\n camera.SetPosition(0,-1,0)\n renderView1.ResetCamera()\n elif view == \"+Z\" or view == \"+z\" or view == \"top\": \n camera.SetFocalPoint(0,0,0)\n camera.SetPosition(0,0,1)\n renderView1.ResetCamera()\n # camera.Roll(90)\n elif view == \"-Z\" or view == \"-z\" or view == \"bottom\": \n camera.SetFocalPoint(0,0,0)\n camera.SetPosition(0,0,-1)\n renderView1.ResetCamera()\n # camera.Roll(-90)\n elif view == \"customize\":\n renderView1.InteractionMode = '3D'\n renderView1.CameraPosition = data_IO.read_floats_from_string(metrichash[\"CameraPosition\"])\n renderView1.CameraFocalPoint = data_IO.read_floats_from_string(metrichash[\"CameraFocalPoint\"])\n renderView1.CameraViewUp = data_IO.read_floats_from_string(metrichash[\"CameraViewUp\"])\n renderView1.CameraParallelScale = float(metrichash[\"CameraParallelScale\"])\n renderView1.CameraParallelProjection = int(metrichash[\"CameraParallelProjection\"])\n\n\ndef makeAnimation(outputDir, kpi, magnification, animationName, deleteFrames=True):\n animationFramesDir = outputDir + '/animFrames'\n if not (os.path.exists(animationFramesDir)):\n os.makedirs(animationFramesDir)\n\n WriteAnimation(animationFramesDir + \"/out_\" + kpi + \".png\", Magnification=magnification, FrameRate=15.0,\n Compression=False)\n\n subprocess.call([\"convert\", \"-delay\", \"15\", \"-loop\", \"0\",\n animationFramesDir + \"/out_\" + kpi + \".*.png\",\n outputDir + \"/\" + animationName])\n\n if deleteFrames:\n shutil.rmtree(animationFramesDir)\n\n\ndef exportx3d(outputDir,kpi, metricObj, dataReader, renderBody, blenderContext):\n\n blenderFramesDir = outputDir + kpi + '_blender'\n\n if not (os.path.exists(blenderFramesDir)):\n os.makedirs(blenderFramesDir)\n\n try:\n TimeSteps = getTimeSteps()\n firstTimeStep = TimeSteps[0]\n renderView1 = GetActiveViewOrCreate('RenderView')\n renderView1.ViewTime = firstTimeStep\n for num, time in enumerate(TimeSteps):\n name_solo = blenderFramesDir + '/' + str(num) + '_solo.x3d'\n Show(metricObj, renderView1)\n Hide(dataReader, renderView1)\n ExportView(name_solo, view=renderView1)\n if renderBody == \"true\":\n name_body = blenderFramesDir + '/' + str(num) + '_body.x3d'\n Show(dataReader, renderView1)\n Hide(metricObj, renderView1)\n ExportView(name_body, view=renderView1)\n animationScene1 = GetAnimationScene()\n animationScene1.GoToNext()\n except:\n renderView1 = GetActiveViewOrCreate('RenderView')\n name_body = blenderFramesDir + '/' + 'body.x3d'\n Show(dataReader, renderView1)\n ExportView(name_body, view=renderView1)\n \n if blenderContext != None and len(blenderContext) > 0:\n for i in blenderContext:\n dataReaderTmp = readDataFile(i, None)\n renderViewTmp = CreateView('RenderView')\n readerDisplayTmp = Show(dataReaderTmp, renderViewTmp)\n name_body = blenderFramesDir + '/' + os.path.splitext(os.path.basename(i))[0] + '.x3d'\n ExportView(name_body, view=renderViewTmp)\n\n # tar the directory\n data_IO.tarDirectory(blenderFramesDir + \".tar\", blenderFramesDir)\n shutil.rmtree(blenderFramesDir)\n\ndef saveSTLfile(renderView,filename,magnification,quality):\n adjustCamera(\"iso\", renderView, None, \"false\")\n SaveScreenshot(filename, magnification=magnification, quality=quality)\n \n"
] |
[
[
"matplotlib.use",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.plot",
"numpy.genfromtxt",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel"
]
] |
pshiko/io
|
[
"a1793e6b41ed7a8db572249aba15a8e513a348a5"
] |
[
"tensorflow_io/core/python/experimental/serialization_ops.py"
] |
[
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Serialization Ops.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow_io.core.python.ops import core_ops\n\n# _NamedTensorSpec allows adding a `named` key while traversing,\n# so that it is possible to build up the `/R/Foo` JSON Pointers.\nclass _NamedTensorSpec(tf.TensorSpec):\n \"\"\"_NamedTensorSpec\"\"\"\n def named(self, named=None):\n if named is not None:\n self._named = named\n return self._named\n\n# named_spec updates named field for JSON Pointers while traversing.\ndef named_spec(specs, name=''):\n \"\"\"named_spec\"\"\"\n if isinstance(specs, _NamedTensorSpec):\n specs.named(name)\n return\n\n if isinstance(specs, dict):\n for k in specs.keys():\n named_spec(specs[k], \"{}/{}\".format(name, k))\n return\n\n for k, _ in enumerate(specs):\n named_spec(specs[k], \"{}/{}\".format(name, k))\n return\n\n\ndef decode_json(json, specs, name=None):\n \"\"\"\n Decode JSON string into Tensors.\n\n TODO: support batch (1-D) input\n\n Args:\n json: A String Tensor. The JSON strings to decode.\n specs: A structured TensorSpecs describing the signature\n of the JSON elements.\n name: A name for the operation (optional).\n\n Returns:\n A structured Tensors.\n \"\"\"\n # Make a copy of specs to keep the original specs\n named = tf.nest.map_structure(lambda e: _NamedTensorSpec(e.shape, e.dtype), specs)\n named_spec(named)\n named = tf.nest.flatten(named)\n names = [e.named() for e in named]\n shapes = [e.shape for e in named]\n dtypes = [e.dtype for e in named]\n\n values = core_ops.io_decode_json(json, names, shapes, dtypes, name=name)\n return tf.nest.pack_sequence_as(specs, values)\n"
] |
[
[
"tensorflow.nest.pack_sequence_as",
"tensorflow.nest.flatten"
]
] |
vinceHardy/learning
|
[
"2c207029e7c93807fe57b0a4ae098c8afe38a661"
] |
[
"project/reports/global_warming/myutils.py"
] |
[
"import pandas as pd\nimport os.path\nimport matplotlib.pyplot as plt\n\ndef makeTimeSeries(df):\n ts = pd.to_datetime(df.dt)\n df.index = ts\n return df.drop('dt', axis=1)\n\ndef differenciate(X):\n diff = list()\n for i in range(1, len(X)):\n value = X[i] - X[i - 1]\n diff.append(value)\n X_diff=pd.DataFrame(diff)\n X_diff.index=X.index[1:]\n X_diff=X_diff[0]\n return X_diff\n\nfrom statsmodels.tsa.stattools import adfuller\ndef test_stationarity(timeseries):\n \n #Determing rolling statistics\n rolmean = pd.rolling_mean(timeseries, window=10)\n rolstd = pd.rolling_std(timeseries, window=10)\n\n #Plot rolling statistics:\n plt.figure(figsize=(16,8))\n orig = plt.plot(timeseries, color='blue',label='Original')\n mean = plt.plot(rolmean, color='red', label='Rolling Mean')\n std = plt.plot(rolstd, color='black', label = 'Rolling Std')\n plt.xlabel('years',fontsize=16)\n plt.ylabel('Temperature, °C',fontsize=16)\n plt.legend(loc='best')\n plt.title('Rolling Mean & Standard Deviation',fontsize=24)\n plt.show(block=False)\n \n #Perform Dickey-Fuller test:\n print('Results of Dickey-Fuller Test:')\n dftest = adfuller(timeseries, autolag='AIC')\n dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used'])\n for key,value in dftest[4].items():\n dfoutput['Critical Value (%s)'%key] = value\n print(dfoutput)"
] |
[
[
"pandas.to_datetime",
"pandas.rolling_mean",
"pandas.rolling_std",
"pandas.DataFrame",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"pandas.Series",
"matplotlib.pyplot.show"
]
] |
pystudent1913/proyecto-reconocimiento-facial
|
[
"881fb2f724b43b93b224dd591e250e0f2f078764"
] |
[
"recognize_video.py"
] |
[
"# USAGE\n# python recognize_video.py --detector face_detection_model \\\n#\t--embedding-model openface_nn4.small2.v1.t7 \\\n#\t--recognizer output/recognizer.pickle \\\n#\t--le output/le.pickle\n\n# import the necessary packages\nfrom imutils.video import VideoStream\nfrom imutils.video import FPS\nimport numpy as np\nimport argparse\nimport imutils\nimport pickle\nimport time\nimport cv2\nimport os\nimport requests\nimport json \n\n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-d\", \"--detector\", required=True,\n\thelp=\"path to OpenCV's deep learning face detector\")\nap.add_argument(\"-m\", \"--embedding-model\", required=True,\n\thelp=\"path to OpenCV's deep learning face embedding model\")\nap.add_argument(\"-r\", \"--recognizer\", required=True,\n\thelp=\"path to model trained to recognize faces\")\nap.add_argument(\"-l\", \"--le\", required=True,\n\thelp=\"path to label encoder\")\nap.add_argument(\"-c\", \"--confidence\", type=float, default=0.5,\n\thelp=\"minimum probability to filter weak detections\")\nargs = vars(ap.parse_args())\n\n# load our serialized face detector from disk\nprint(\"[INFO] loading face detector...\")\nprotoPath = os.path.sep.join([args[\"detector\"], \"deploy.prototxt\"])\nmodelPath = os.path.sep.join([args[\"detector\"],\n\t\"res10_300x300_ssd_iter_140000.caffemodel\"])\ndetector = cv2.dnn.readNetFromCaffe(protoPath, modelPath)\n\n# load our serialized face embedding model from disk\nprint(\"[INFO] loading face recognizer...\")\nembedder = cv2.dnn.readNetFromTorch(args[\"embedding_model\"])\n\n# load the actual face recognition model along with the label encoder\nrecognizer = pickle.loads(open(args[\"recognizer\"], \"rb\").read())\nle = pickle.loads(open(args[\"le\"], \"rb\").read())\n\n# initialize the video stream, then allow the camera sensor to warm up\nprint(\"[INFO] starting video stream...\")\nvs = VideoStream(src=0).start()\ntime.sleep(2.0)\n\n# start the FPS throughput estimator\nfps = FPS().start()\n\ncontador = 0\nfinded = False\n\n\n# variable to handle the login\nisLogged = False\nprobability = 0.0\nuser=\"\"\n\n\ndef handleLoggin(username):\n\tprint(\"\"\"\n\tFUISTE LOGEADO CON EXITO\n\t\n\tHOLA CRISTIAN FABRIZIO SOTOMAYOR GONZALES\n\n\tCODIGO 20162019\n\n\n\t\"\"\")\n\tprint('username', username)\n\tres = requests.get('http://127.0.0.1:5000/')\n\tresponse = json.loads(res.text)\n\tprint('response', response)\n\n\niterar = True\n# loop over frames from the video file stream\nwhile True:\n\t# grab the frame from the threaded video stream\n\tframe = vs.read()\n\n\t# resize the frame to have a width of 600 pixels (while\n\t# maintaining the aspect ratio), and then grab the image\n\t# dimensions\n\tframe = imutils.resize(frame, width=600)\n\t(h, w) = frame.shape[:2]\n\n\t# construct a blob from the image\n\timageBlob = cv2.dnn.blobFromImage(\n\t\tcv2.resize(frame, (300, 300)), 1.0, (300, 300),\n\t\t(104.0, 177.0, 123.0), swapRB=False, crop=False)\n\n\t# apply OpenCV's deep learning-based face detector to localize\n\t# faces in the input image\n\tdetector.setInput(imageBlob)\n\tdetections = detector.forward()\n\n\t# loop over the detections\n\tfor i in range(0, detections.shape[2]):\n\t\t# extract the confidence (i.e., probability) associated with\n\t\t# the prediction\n\t\tconfidence = detections[0, 0, i, 2]\n\n\t\t# filter out weak detections\n\t\tif confidence > args[\"confidence\"]:\n\t\t\t# compute the (x, y)-coordinates of the bounding box for\n\t\t\t# the face\n\t\t\tbox = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n\t\t\t(startX, startY, endX, endY) = box.astype(\"int\")\n\n\t\t\t# extract the face ROI\n\t\t\tface = frame[startY:endY, startX:endX]\n\t\t\t(fH, fW) = face.shape[:2]\n\n\t\t\t# ensure the face width and height are sufficiently large\n\t\t\tif fW < 20 or fH < 20:\n\t\t\t\tcontinue\n\n\t\t\t# construct a blob for the face ROI, then pass the blob\n\t\t\t# through our face embedding model to obtain the 128-d\n\t\t\t# quantification of the face\n\t\t\tfaceBlob = cv2.dnn.blobFromImage(face, 1.0 / 255,\n\t\t\t\t(96, 96), (0, 0, 0), swapRB=True, crop=False)\n\t\t\tembedder.setInput(faceBlob)\n\t\t\tvec = embedder.forward()\n\n\t\t\t# perform classification to recognize the face\n\t\t\tpreds = recognizer.predict_proba(vec)[0]\n\t\t\tj = np.argmax(preds)\n\t\t\tproba = preds[j]\n\t\t\tname = le.classes_[j]\n\n\n\t\t\t# draw the bounding box of the face along with the\n\t\t\t# associated probability\n\t\t\tif isLogged == False:\n\t\t\t\ttext = \"{}: {:.2f}%\".format(name, proba * 100)\n\t\t\telse:\n\t\t\t\ttext = \"{}: {:.2f}% -- LOGGED\".format(user, probability)\n\n\t\t\ty = startY - 10 if startY - 10 > 10 else startY + 10\n\n\t\t\tif isLogged == False:\n\t\t\t\tif(name == 'cristian' and proba > 0.5):\n\t\t\t\t\tprint('hola', contador)\n\t\t\t\t\tcv2.rectangle(frame, (startX, startY), (endX, endY),\n\t\t\t\t\t\t(224, 0, 0), 2)\n\t\t\t\t\tcv2.putText(frame, text, (startX, y),\n\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)\n\t\t\t\t\t# finded = True\n\t\t\t\t\tprint('apagate')\n\t\t\t\t\t# break\n\t\t\t\t\t\n\t\t\t\t\tif(isLogged is not True):\n\t\t\t\t\t\tisLogged = True\n\t\t\t\t\t\tprobability = proba * 100\n\t\t\t\t\t\tuser = name\n\t\t\t\t\t\thandleLoggin(name)\n\n\t\t\t\telse:\n\t\t\t\t\tcv2.rectangle(frame, (startX, startY), (endX, endY),\n\t\t\t\t\t\t(0, 0, 255), 2)\n\t\t\t\t\tcv2.putText(frame, text, (startX, y),\n\t\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)\n\t\t\t\t\n\t\t\telse:\n\t\t\t\tcv2.rectangle(frame, (startX, startY), (endX, endY),\n\t\t\t\t\t\t(0, 255, 0), 2)\n\t\t\t\tcv2.putText(frame, text, (startX, y),\n\t\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 255, 0), 2)\n\n\tif finded:\n\t\tbreak\n\tcontador = contador + 1\n\t# update the FPS counter\n\tfps.update()\n\n\t# show the output frame\n\tcv2.imshow(\"Frame\", frame)\n\tkey = cv2.waitKey(1) & 0xFF\n\n\t# if the `q` key was pressed, break from the loop\n\tif key == ord(\"q\"):\n\t\tbreak\n\n\n# stop the timer and display FPS information\nfps.stop()\nprint(\"[INFO] elasped time: {:.2f}\".format(fps.elapsed()))\nprint(\"[INFO] approx. FPS: {:.2f}\".format(fps.fps()))\n\n# do a bit of cleanup\ncv2.destroyAllWindows()\nvs.stop()"
] |
[
[
"numpy.array",
"numpy.argmax"
]
] |
ana-kuznetsova/espnet
|
[
"263a9ba04b626fa46442d6679531ce98c7afa9df"
] |
[
"espnet2/samplers/num_elements_batch_sampler.py"
] |
[
"from typing import Iterator\nfrom typing import List\nfrom typing import Tuple\nfrom typing import Union\n\nimport numpy as np\nfrom typeguard import check_argument_types\n\nfrom espnet2.fileio.read_text import load_num_sequence_text\nfrom espnet2.samplers.abs_sampler import AbsSampler\n\n\nclass NumElementsBatchSampler(AbsSampler):\n def __init__(\n self,\n batch_bins: int,\n shape_files: Union[Tuple[str, ...], List[str]],\n min_batch_size: int = 1,\n sort_in_batch: str = \"descending\",\n sort_batch: str = \"ascending\",\n drop_last: bool = False,\n padding: bool = True,\n ):\n assert check_argument_types()\n assert batch_bins > 0\n if sort_batch != \"ascending\" and sort_batch != \"descending\":\n raise ValueError(\n f\"sort_batch must be ascending or descending: {sort_batch}\"\n )\n if sort_in_batch != \"descending\" and sort_in_batch != \"ascending\":\n raise ValueError(\n f\"sort_in_batch must be ascending or descending: {sort_in_batch}\"\n )\n\n self.batch_bins = batch_bins\n self.shape_files = shape_files\n self.sort_in_batch = sort_in_batch\n self.sort_batch = sort_batch\n self.drop_last = drop_last\n\n # utt2shape: (Length, ...)\n # uttA 100,...\n # uttB 201,...\n utt2shapes = [\n load_num_sequence_text(s, loader_type=\"csv_int\") for s in shape_files\n ]\n\n first_utt2shape = utt2shapes[0]\n for s, d in zip(shape_files, utt2shapes):\n if set(d) != set(first_utt2shape):\n raise RuntimeError(\n f\"keys are mismatched between {s} != {shape_files[0]}\"\n )\n\n #JD - fix nan grad issue by filtering utterances where the length of the text in tokens\n # is less than the length of the audio, downsampled by a factor of 4\n tmp_utt2shapes_0 = dict()\n tmp_utt2shapes_1 = dict()\n \n for k in first_utt2shape:\n # assuming that the first shape file is speech shape, second is text shape\n # this order is hard-coded into asr.sh in the TEMPLATE experiment\n if utt2shapes[1][k][0]+1 < utt2shapes[0][k][0]//5:\n tmp_utt2shapes_0[k] = utt2shapes[0][k]\n tmp_utt2shapes_1[k] = utt2shapes[1][k]\n \n num_filtered = len(first_utt2shape) - len(tmp_utt2shapes_0)\n print(\"filtered \" + str(num_filtered) + \" utterances out of \" + str(len(first_utt2shape)), flush=True)\n utt2shapes = [tmp_utt2shapes_0, tmp_utt2shapes_1]\n first_utt2shape = tmp_utt2shapes_0\n\n # Sort samples in ascending order\n # (shape order should be like (Length, Dim))\n keys = sorted(first_utt2shape, key=lambda k: first_utt2shape[k][0])\n if len(keys) == 0:\n raise RuntimeError(f\"0 lines found: {shape_files[0]}\")\n if padding:\n # If padding case, the feat-dim must be same over whole corpus,\n # therefore the first sample is referred\n feat_dims = [np.prod(d[keys[0]][1:]) for d in utt2shapes]\n else:\n feat_dims = None\n\n # Decide batch-sizes\n batch_sizes = []\n current_batch_keys = []\n for key in keys:\n current_batch_keys.append(key)\n # shape: (Length, dim1, dim2, ...)\n if padding:\n for d, s in zip(utt2shapes, shape_files):\n if tuple(d[key][1:]) != tuple(d[keys[0]][1:]):\n raise RuntimeError(\n \"If padding=True, the \"\n f\"feature dimension must be unified: {s}\",\n )\n bins = sum(\n len(current_batch_keys) * sh[key][0] * d\n for sh, d in zip(utt2shapes, feat_dims)\n )\n else:\n bins = sum(\n np.prod(d[k]) for k in current_batch_keys for d in utt2shapes\n )\n\n if bins > batch_bins and len(current_batch_keys) >= min_batch_size:\n batch_sizes.append(len(current_batch_keys))\n current_batch_keys = []\n else:\n if len(current_batch_keys) != 0 and (\n not self.drop_last or len(batch_sizes) == 0\n ):\n batch_sizes.append(len(current_batch_keys))\n\n if len(batch_sizes) == 0:\n # Maybe we can't reach here\n raise RuntimeError(\"0 batches\")\n\n # If the last batch-size is smaller than minimum batch_size,\n # the samples are redistributed to the other mini-batches\n if len(batch_sizes) > 1 and batch_sizes[-1] < min_batch_size:\n for i in range(batch_sizes.pop(-1)):\n batch_sizes[-(i % len(batch_sizes)) - 1] += 1\n\n if not self.drop_last:\n # Bug check\n assert sum(batch_sizes) == len(keys), f\"{sum(batch_sizes)} != {len(keys)}\"\n\n # Set mini-batch\n self.batch_list = []\n iter_bs = iter(batch_sizes)\n bs = next(iter_bs)\n minibatch_keys = []\n for key in keys:\n minibatch_keys.append(key)\n if len(minibatch_keys) == bs:\n if sort_in_batch == \"descending\":\n minibatch_keys.reverse()\n elif sort_in_batch == \"ascending\":\n # Key are already sorted in ascending\n pass\n else:\n raise ValueError(\n \"sort_in_batch must be ascending\"\n f\" or descending: {sort_in_batch}\"\n )\n\n self.batch_list.append(tuple(minibatch_keys))\n minibatch_keys = []\n try:\n bs = next(iter_bs)\n except StopIteration:\n break\n\n if sort_batch == \"ascending\":\n pass\n elif sort_batch == \"descending\":\n self.batch_list.reverse()\n else:\n raise ValueError(\n f\"sort_batch must be ascending or descending: {sort_batch}\"\n )\n\n def __repr__(self):\n return (\n f\"{self.__class__.__name__}(\"\n f\"N-batch={len(self)}, \"\n f\"batch_bins={self.batch_bins}, \"\n f\"sort_in_batch={self.sort_in_batch}, \"\n f\"sort_batch={self.sort_batch})\"\n )\n\n def __len__(self):\n return len(self.batch_list)\n\n def __iter__(self) -> Iterator[Tuple[str, ...]]:\n return iter(self.batch_list)\n"
] |
[
[
"numpy.prod"
]
] |
Datacket/Invado
|
[
"20ca439d9a3151fd97e85c87e6dc264152410aea"
] |
[
"core.py"
] |
[
"import json \nimport pandas as pd\nimport numpy as np\nimport pymysql\nimport pymysql.cursors as pycurse\nfrom datetime import datetime\nfrom model_animal_tracking import *\nimport tensorflow as tf\nfrom io import StringIO\nfrom datetime import timedelta\nfrom flask import Flask,jsonify,request\nfrom sklearn.preprocessing import OneHotEncoder\napp=Flask(__name__)\n@app.route(\"/save\",methods=[\"POST\"])\ndef reply():\n #lat,long,city,date of sighting, time of sighting, species, month of sighting\n #float,float,string,date,str(M,A,E,N),\n lat=request.args.get('lat',None)\n lon=request.args.get('lon',None)\n tos=request.args.get('tos',None)\n dos=request.args.get('dos')\n print(dos)\n dt1=datetime.strptime(dos,'%Y-%m-%d %H:%M:%S')\n dos=str(dos).split(' ')[0]\n mos=int(dos.split('-')[1])\n spec=request.args.get('spec',None)\n dt2=datetime.now()\n try:\n conn=pymysql.connect(host=\"127.0.0.1\",user=\"root\",db='details',password=\"891998\",cursorclass=pycurse.DictCursor)\n with conn.cursor() as cur:\n sql=\"INSERT INTO DETAILS (date,lat,lon,tos,spec,mos) VALUES(\\'{}\\',{},{},\\'{}\\',\\'{}\\',{})\".format(*list(map(str,[dos,lat,lon,tos,spec,mos])))\n cur.execute(sql)\n conn.commit()\n return jsonify({\"Status\":200})\n except Exception as e:\n return jsonify({\"Status\":str(e)})\n var=model.fit(list(map(str,[lat,lon,tos,spec,mos])))\ndef lat_long(tup, list_long_lang, radius):\n fres = []\n for l in list_long_lang:\n dis_for_l = edis(tup, l)\n if is_short_enough(dis_for_l, radius):\n fres.append(l)\n if len(fres) == 15:\n break\n return fres\n #return sorted(fres)[:15]\n\ndef edis(X, Y):\n x1, y1, x2, y2 = X[0], X[1], Y[0], Y[1]\n return np.sqrt(np.square(x1 - x2) + np.square(y1 - y2))\n\ndef is_short_enough(deg_dist, radius):\n dist_in_km = np.cos(deg_dist) * 110\n return True if dist_in_km < radius else False\nfrom tqdm import tqdm\n@app.route(\"/\",methods=[\"GET\"])\ndef get():\n centre=list(map(float,[request.args.get('lat',None),request.args.get('lon',None)]))\n date=request.args.get('dos',None)\n mos=int(date.split('-')[1])\n print(\"Hello world!\")\n if True:\n conn=pymysql.connect(host=\"127.0.0.1\",user=\"root\",db='details',password=\"891998\",cursorclass=pycurse.DictCursor)\n with conn.cursor() as curr:\n sql=\"SELECT * FROM DETAILS\"\n curr.execute(sql)\n result=curr.fetchall()\n latitude=[]\n longitude=[]\n print(\"Hello world!\")\n for i in tqdm(result):\n latitude.append(i['lat'])\n longitude.append(i['lon'])\n l=list(zip(latitude,longitude))\n lt_ln=lat_long(centre,l,5)\n df=pd.DataFrame(result)\n df[\"spec\"] = df[\"spec\"].apply(lambda x : x.lower())\n df[\"spec\"] = df[\"spec\"].apply(lambda x : \"snake\" if x == \"cobra\" else x)\n spec_copy = df[\"spec\"].copy()\n df[\"spec\"]=df[\"spec\"].apply(str.lower).astype(\"category\").cat.codes\n df[\"tos\"]=df[\"tos\"].astype(\"category\").cat.codes\n\n oh1=OneHotEncoder().fit(np.array(df[\"spec\"]).reshape(-1,1))\n l=oh1.transform(np.array(df[\"spec\"]).reshape(-1,1)).toarray()\n #l=l[:,1:]\n oh2=OneHotEncoder().fit(np.array(df[\"tos\"]).reshape(-1,1))\n l2=oh2.transform(np.array(df[\"tos\"]).reshape(-1,1)).toarray()\n #l2=l2[:,1:]\n s2=np.concatenate([np.array(df[\"lat\"]).reshape(-1,1),np.array(df[\"lon\"]).reshape(-1,1),np.array(df[\"mos\"]).reshape(-1,1),l2],axis=1)\n wlc=WildlifeCraziness(s2.shape[1],l.shape[1])\n wlc.load_dataset(s2,l)\n print(\"Hello World!!\")\n wlc.fit()\n print(\"World\")\n dat=[np.array(centre[0]).reshape(-1,1),np.array(centre[1]).reshape(-1,1),np.array(mos).reshape(-1,1)]\n test={}\n for i in \"MEAN\":\n #dat.append(np.array(l2.transform(i)).reshape(-1,1))\n if i == 'A':\n arr = [1, 0, 0, 0]\n elif i == 'E':\n arr = [0, 1, 0, 0]\n elif i == 'M':\n arr = [0, 0, 1, 0]\n else:\n arr = [0, 0, 0, 1]\n l=sorted(set(spec_copy))\n #print (l)\n #print(np.concatenate([np.array(dat).reshape(-1,1),np.array(arr).reshape(-1,1)]).shape)\n \n prediction=wlc.predict(np.concatenate([np.array(dat).reshape(-1,1),np.array(arr).reshape(-1,1)]).T, l)\n test[i]=prediction\n test[\"lat_ln\"]=lt_ln\n return jsonify(test)\n # l2 as JSON \n #except Exception as e:\n # return jsonify({\"Status\":str(e)})\n \napp.run(host=\"0.0.0.0\",port=10400,debug=True)\n"
] |
[
[
"numpy.square",
"numpy.array",
"pandas.DataFrame",
"numpy.cos",
"sklearn.preprocessing.OneHotEncoder"
]
] |
ka5par/MIR
|
[
"ca8d9ee84435299f680b158d9c92c2b6e47682b3"
] |
[
"pop_music_highlighter/lib.py"
] |
[
"import os.path\n\nimport numpy as np\nimport librosa\nfrom pydub import AudioSegment\n\n\ndef chunk(incoming, n_chunk):\n input_length = incoming.shape[1]\n chunk_length = input_length // n_chunk\n outputs = []\n for i in range(incoming.shape[0]):\n for j in range(n_chunk):\n outputs.append(incoming[i, j*chunk_length:(j+1)*chunk_length, :])\n outputs = np.array(outputs)\n return outputs\n\n\ndef audio_read(f):\n\n y, sr = librosa.core.load(\"data\" + os.path.sep + f.name, sr=22050)\n d = librosa.core.get_duration(y=y, sr=sr)\n S = librosa.feature.melspectrogram(y, sr=sr, n_fft=2048, hop_length=512, n_mels=128)\n S = np.transpose(np.log(1+10000*S))\n S = np.expand_dims(S, axis=0)\n return y, S, int(d)\n\n\ndef positional_encoding(batch_size, n_pos, d_pos):\n # keep dim 0 for padding token position encoding zero vector\n position_enc = np.array([\n [pos / np.power(10000, 2 * (j // 2) / d_pos) for j in range(d_pos)]\n if pos != 0 else np.zeros(d_pos) for pos in range(n_pos)])\n\n position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2]) # dim 2i\n position_enc[1:, 1::2] = np.cos(position_enc[1:, 1::2]) # dim 2i+1\n position_enc = np.tile(position_enc, [batch_size, 1, 1])\n return position_enc\n"
] |
[
[
"numpy.array",
"numpy.sin",
"numpy.log",
"numpy.zeros",
"numpy.tile",
"numpy.power",
"numpy.cos",
"numpy.expand_dims"
]
] |
benjaminysmith/covidcast-indicators
|
[
"b1474cd68a1497166fefe4beffd4d5ff867b9a61"
] |
[
"quidel_covidtest/delphi_quidel_covidtest/pull.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"Simply downloads email attachments.\n\nUses this handy package: https://pypi.org/project/imap-tools/\n\"\"\"\nimport io\nfrom os.path import join\nimport os\nfrom datetime import datetime, timedelta\n\nimport pandas as pd\nimport numpy as np\n\nfrom imap_tools import MailBox, A, AND\n\ndef get_from_email(start_date, end_date, mail_server,\n account, sender, password):\n \"\"\"\n Get raw data from email account.\n\n Args:\n start_date: datetime.datetime\n pull data from email received from the start date\n end_date: datetime.datetime\n pull data from email received on/before the end date\n mail_server: str\n account: str\n email account to receive new data\n sender: str\n email account of the sender\n password: str\n password of the datadrop email\n output:\n df: pd.DataFrame\n \"\"\"\n time_flag = None\n df = pd.DataFrame(columns=['SofiaSerNum', 'TestDate', 'Facility', 'City',\n 'State', 'Zip', 'PatientAge', 'Result1', 'Result2',\n 'OverallResult', 'County', 'FacilityType', 'Assay',\n 'SCO1', 'SCO2', 'CLN', 'CSN', 'InstrType',\n 'StorageDate', 'ResultId', 'SarsTestNumber'])\n with MailBox(mail_server).login(account, password, 'INBOX') as mailbox:\n for search_date in [start_date + timedelta(days=x)\n for x in range((end_date - start_date).days + 1)]:\n for message in mailbox.fetch(A(AND(date=search_date.date(), from_=sender))):\n for att in message.attachments:\n name = att.filename\n # Only consider covid tests\n if \"Sars\" not in name:\n continue\n print(\"Pulling data received on %s\"%search_date.date())\n toread = io.BytesIO()\n toread.write(att.payload)\n toread.seek(0) # reset the pointer\n newdf = pd.read_excel(toread) # now read to dataframe\n df = df.append(newdf)\n time_flag = search_date\n return df, time_flag\n\ndef fix_zipcode(df):\n \"\"\"Fix zipcode that is 9 digit instead of 5 digit.\"\"\"\n zipcode5 = []\n fixnum = 0\n for zipcode in df['Zip'].values:\n if isinstance(zipcode, str) and '-' in zipcode:\n zipcode5.append(int(zipcode.split('-')[0]))\n fixnum += 1\n else:\n zipcode = int(float(zipcode))\n zipcode5.append(zipcode)\n df['zip'] = zipcode5\n # print('Fixing %.2f %% of the data' % (fixnum * 100 / len(zipcode5)))\n return df\n\ndef fix_date(df):\n \"\"\"\n Remove invalid dates and select correct test date to use.\n\n Quidel Covid Test are labeled with Test Date and Storage Date. In principle,\n the TestDate should reflect when the test was performed and the StorageDate\n when the test was logged in the MyVirena cloud storage device. We expect\n that the test date should precede the storage date by several days. However,\n in the actual data the test date can be far earlier than the storage date\n and the test date can also occur after the storage date.\n\n - For most of the cases, use test date as the timestamp\n - Remove tests with a storage date which is earlier than the test date\n - If the storage date is 90 days later than the test date, the storage\n will be adopted instead\n \"\"\"\n df.insert(2, \"timestamp\", df[\"TestDate\"])\n\n mask = df[\"TestDate\"] <= df[\"StorageDate\"]\n print(\"Removing %.2f%% of unusual data\" % ((len(df) - np.sum(mask)) * 100 / len(df)))\n df = df[mask]\n\n mask = df[\"StorageDate\"] - df[\"TestDate\"] > pd.Timedelta(days=90)\n print(\"Fixing %.2f%% of outdated data\" % (np.sum(mask) * 100 / len(df)))\n df[\"timestamp\"].values[mask] = df[\"StorageDate\"].values[mask]\n return df\n\ndef preprocess_new_data(start_date, end_date, mail_server, account,\n sender, password, test_mode):\n \"\"\"\n Pull and pre-process Quidel Covid Test data from datadrop email.\n\n Drop unnecessary columns. Temporarily consider the positive rate\n sensor only which is related to number of total tests and number\n of positive tests.\n\n Args:\n start_date: datetime.datetime\n pull data from email received from the start date\n end_date: datetime.datetime\n pull data from email received on/before the end date\n mail_server: str\n account: str\n email account to receive new data\n sender: str\n email account of the sender\n password: str\n password of the datadrop email\n test_mode: bool\n pull raw data from email or not\n output:\n df: pd.DataFrame\n time_flag: datetime.date:\n the actual pull end date on which we successfully pull the data\n \"\"\"\n if test_mode:\n test_data_dir = \"./test_data/test_data.xlsx\"\n df, time_flag = pd.read_excel(test_data_dir), datetime(2020, 8, 17)\n else:\n # Get new data from email\n df, time_flag = get_from_email(start_date, end_date, mail_server,\n account, sender, password)\n\n # No new data can be pulled\n if time_flag is None:\n return df, time_flag\n\n # Fix some of the fipcodes that are 9 digit instead of 5 digit\n df = fix_zipcode(df)\n\n # Create a column CanonicalDate according to StarageDate and TestDate\n df = fix_date(df)\n\n # Compute overallPositive\n overall_pos = df[df[\"OverallResult\"] == \"positive\"].groupby(\n by=[\"timestamp\", \"zip\"],\n as_index=False)['OverallResult'].count()\n overall_pos[\"positiveTest\"] = overall_pos[\"OverallResult\"]\n overall_pos.drop(labels=\"OverallResult\", axis=\"columns\", inplace=True)\n\n # Compute overallTotal\n overall_total = df.groupby(\n by=[\"timestamp\", \"zip\"],\n as_index=False)['OverallResult'].count()\n overall_total[\"totalTest\"] = overall_total[\"OverallResult\"]\n overall_total.drop(labels=\"OverallResult\", axis=\"columns\", inplace=True)\n\n # Compute numUniqueDevices\n numUniqueDevices = df.groupby(\n by=[\"timestamp\", \"zip\"],\n as_index=False)[\"SofiaSerNum\"].agg({\"SofiaSerNum\": \"nunique\"}).rename(\n columns={\"SofiaSerNum\": \"numUniqueDevices\"}\n )\n\n df_merged = overall_total.merge(\n numUniqueDevices, on=[\"timestamp\", \"zip\"], how=\"left\"\n ).merge(\n overall_pos, on=[\"timestamp\", \"zip\"], how=\"left\"\n ).fillna(0).drop_duplicates()\n\n\n return df_merged, time_flag\n\ndef check_intermediate_file(cache_dir, pull_start_date):\n \"\"\"Check whether there is a cache file containing historical data already.\"\"\"\n for filename in os.listdir(cache_dir):\n if \".csv\" in filename:\n pull_start_date = datetime.strptime(filename.split(\"_\")[2].split(\".\")[0],\n '%Y%m%d') + timedelta(days=1)\n previous_df = pd.read_csv(os.path.join(cache_dir, filename),\n sep=\",\", parse_dates=[\"timestamp\"])\n return previous_df, pull_start_date\n return None, pull_start_date\n\ndef pull_quidel_covidtest(params):\n \"\"\"\n Pull the quidel covid test data and ecide whether to combine the new data with stored historical records in ./cache.\n\n Parameters:\n params: dict\n including all the information read from params.json\n end_from_today_minus: int\n report data until - X days\n export_day_range: int\n number of dates to report\n\n Returns:\n DataFrame:\n A data frame containinig the pre-process data with columns:\n timestamp, numUniqueDevices, positiveTest, totalTest\n datetime.datetime\n the first date of the report\n datetime.datetime\n the last date of the report\n \"\"\"\n cache_dir = params[\"cache_dir\"]\n\n mail_server = params[\"mail_server\"]\n account = params[\"account\"]\n password = params[\"password\"]\n sender = params[\"sender\"]\n\n test_mode = (params[\"mode\"] == \"test\")\n\n # pull new data only that has not been ingested\n previous_df, pull_start_date = check_intermediate_file(\n cache_dir,\n datetime.strptime(params[\"pull_start_date\"], '%Y-%m-%d'))\n\n if params[\"pull_end_date\"] == \"\":\n pull_end_date = datetime.today()\n else:\n pull_end_date = datetime.strptime(params[\"pull_end_date\"], '%Y-%m-%d')\n\n # Pull data from the email at 5 digit zipcode level\n # Use _end_date to check the most recent date that we received data\n df, _end_date = preprocess_new_data(\n pull_start_date, pull_end_date, mail_server,\n account, sender, password, test_mode)\n\n # Utilize previously stored data\n if previous_df is not None:\n df = previous_df.append(df).groupby([\"timestamp\", \"zip\"]).sum().reset_index()\n return df, _end_date\n\ndef check_export_end_date(input_export_end_date, _end_date,\n end_from_today_minus):\n \"\"\"\n Update the export_end_date according to the data received.\n\n By default, set the export end date to be the last pulling date - 5 days\n (end_from_today_minus = 5).\n Otherwise, use the required date if it is earlier than the default one.\n\n Parameter:\n input_export_end_date: str\n read from params\n _end_date: datetime.datetime\n updated according the data received\n end_from_today_minus: int\n report data until - X days\n\n Returns:\n datetime.datetime\n export data from which date\n \"\"\"\n export_end_date = _end_date - timedelta(days=end_from_today_minus)\n if input_export_end_date != \"\":\n input_export_end_date = datetime.strptime(input_export_end_date, '%Y-%m-%d')\n if input_export_end_date < export_end_date:\n return input_export_end_date\n return export_end_date\n\ndef check_export_start_date(export_start_date, export_end_date,\n export_day_range):\n \"\"\"\n Update export_start_date according to the export_end_date so that it could be export_end_date - export_day_range.\n\n Parameters:\n export_start_date: str\n Read from params\n export_end_date: datetime.datetime\n Calculated according to the data received\n export_day_range: int\n Number of days to report\n\n Returns:\n datetime.datetime\n export data until which date\n \"\"\"\n if export_start_date == \"\":\n export_start_date = datetime(2020, 5, 26)\n else:\n export_start_date = datetime.strptime(export_start_date, '%Y-%m-%d')\n # Only export data from -45 days to -5 days\n if (export_end_date - export_start_date).days > export_day_range:\n export_start_date = export_end_date - timedelta(days=export_day_range)\n\n if export_start_date < datetime(2020, 5, 26):\n return datetime(2020, 5, 26)\n return export_start_date\n\ndef update_cache_file(df, _end_date, cache_dir):\n \"\"\"\n Update cache file. Remove the old one, export the new one.\n\n Parameter:\n df: pd.DataFrame\n Pre-process file at ZipCode level\n _end_date:\n The most recent date when the raw data is received\n cache_dir:\n ./cache where the cache file is stored\n \"\"\"\n for fn in os.listdir(cache_dir):\n if \".csv\" in fn:\n os.remove(join(cache_dir, fn))\n df.to_csv(join(cache_dir, \"pulled_until_%s.csv\") % _end_date.strftime(\"%Y%m%d\"), index=False)\n"
] |
[
[
"pandas.DataFrame",
"pandas.read_excel",
"numpy.sum",
"pandas.Timedelta"
]
] |
rohitsanj/doe
|
[
"d1fe3629dfe3fb789dfe42b072c2682581a9ae90"
] |
[
"doex/rcbd.py"
] |
[
"import numpy as np\n\nfrom .utils import p_value, create_anova_table, multiple_comparisons\n\n\nclass RandomizedCompleteBlockDesign:\n def __init__(self, data):\n self.data = np.array(data)\n\n n_treatments, n_blocks = self.data.shape\n\n if hasattr(self, \"num_missing\"):\n num_missing = self.num_missing\n else:\n num_missing = 0\n\n N = 0\n for entry in self.data:\n N += len(entry)\n\n self.correction_factor = np.square(np.sum(self.data)) / N\n\n # Calculate Sum of Squares\n self.row_totals = np.sum(self.data, axis=1)\n self.ss_treatments = np.sum(np.square(self.row_totals)) / n_blocks\n self.ss_treatments = self.ss_treatments - self.correction_factor\n\n self.column_totals = np.sum(self.data, axis=0)\n self.ss_blocks = np.sum(np.square(self.column_totals)) / n_treatments\n self.ss_blocks = self.ss_blocks - self.correction_factor\n\n self.ss_total = np.sum(np.square(self.data)) - self.correction_factor\n\n self.ss_error = self.ss_total - (self.ss_treatments + self.ss_blocks)\n\n # Calculate Degrees of Freedom\n self.dof_treatments = n_treatments - 1\n self.dof_blocks = n_blocks - 1\n self.dof_total = N - 1\n self.dof_error = self.dof_total - (self.dof_treatments + self.dof_blocks + num_missing)\n\n # Calculate Mean Sum of Squares\n self.mss_treatments = self.ss_treatments / self.dof_treatments\n self.mss_blocks = self.ss_blocks / self.dof_blocks\n self.mss_error = self.ss_error / self.dof_error\n\n self.f_treatments = self.mss_treatments / self.mss_error\n self.f_blocks = self.mss_blocks / self.mss_error\n\n self.p_treatments = p_value(self.f_treatments, self.dof_treatments, self.dof_error)\n self.p_blocks = p_value(self.f_blocks, self.dof_blocks, self.dof_error)\n\n # Display results\n self.table = self._create_table()\n print(self.table)\n\n def multiple_comparisons(self):\n # Multiple comparisons\n n_treatments, _ = self.data.shape\n\n print(\n multiple_comparisons(\n list(range(1, n_treatments + 1)),\n self.data,\n self.dof_error,\n np.sqrt(self.mss_error),\n )\n )\n\n def _create_table(self):\n table = create_anova_table()\n\n rows = [\n [\n \"Treatments\",\n self.dof_treatments,\n self.ss_treatments,\n self.mss_treatments,\n self.f_treatments,\n self.p_treatments,\n ],\n [\n \"Blocks\",\n self.dof_blocks,\n self.ss_blocks,\n self.mss_blocks,\n self.f_blocks,\n self.p_blocks,\n ],\n [\"Error\", self.dof_error, self.ss_error, self.mss_error, \"\", \"\"],\n [\"Total\", self.dof_total, self.ss_total, \"\", \"\", \"\"],\n ]\n\n for row in rows:\n table.add_row(row)\n\n return table\n\n\nTwoWayANOVA = RandomizedCompleteBlockDesign\n\n\nclass RandomizedCompleteBlockDesign_MissingValues(RandomizedCompleteBlockDesign):\n def __init__(self, data):\n self.data = np.array(data)\n\n n_treatments, n_blocks = self.data.shape\n\n self.num_missing = np.count_nonzero(np.isnan(self.data))\n missing_locations = np.argwhere(np.isnan(self.data))\n self.handle_missing(self.data, missing_locations)\n\n print(\"Data after adjusting for {} missing value(s)\".format(self.num_missing))\n print(self.data)\n\n # Continue with RCBD analysis\n super().__init__(self.data)\n\n def handle_missing(self, data, locations):\n if len(locations) == 1:\n return self._missing_1_value(data, locations[0])\n elif len(locations) == 2:\n return self._missing_2_values(data, locations)\n else:\n raise Exception(\"Data must have either 1 or 2 missing values\")\n\n def _missing_1_value(self, data, location):\n k, r = data.shape # k treatments, r replications\n i, j = location\n\n G = np.nansum(data)\n treatments_sum = np.nansum(data[i, :])\n blocks_sum = np.nansum(data[:, j])\n\n self.data[i, j] = (r * blocks_sum + k * treatments_sum - G) / ((r - 1) * (k - 1))\n\n def _missing_2_values(self, data, locations):\n k, r = data.shape # k treatments, r replications\n\n y1_loc, y2_loc = locations\n i, j = y1_loc\n m, j_1 = y2_loc\n\n G = np.nansum(data)\n Ti = np.nansum(data[i, :])\n Tm = np.nansum(data[m, :])\n Bj = np.nansum(data[:, j])\n Bj_1 = np.nansum(data[:, j_1])\n\n y1_estimate = ((k - 1) * (r - 1) * (k * Ti + r * Bj - G) - (k * Tm + r * Bj_1 - G)) / (\n np.square(r - 1) * np.square(k - 1) - 1\n )\n\n y2_estimate = ((k - 1) * (r - 1) * (k * Tm + r * Bj_1 - G) - (k * Ti + r * Bj - G)) / (\n np.square(r - 1) * np.square(k - 1) - 1\n )\n\n self.data[y1_loc[0], y1_loc[1]] = y1_estimate\n self.data[y2_loc[0], y2_loc[1]] = y2_estimate\n"
] |
[
[
"numpy.square",
"numpy.array",
"numpy.isnan",
"numpy.sum",
"numpy.nansum",
"numpy.sqrt"
]
] |
anttisaukko/tensorflow-onnx
|
[
"1341bdf476df6023b75bc6b3c6e4cda00cc58a29"
] |
[
"tf2onnx/rewriter/custom_rnn_rewriter.py"
] |
[
"# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT license.\n\n\"\"\"\ntf2onnx.rewriter.custom_rnn_rewriter - custom rnn support\n\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\nimport logging\nimport sys\nfrom onnx import helper, onnx_pb\nimport numpy as np\nfrom tf2onnx.graph import Graph, Node\nfrom tf2onnx.graph_matcher import OpTypePattern, GraphMatcher\nfrom tf2onnx.rewriter.loop_rewriter_base import LoopRewriterBase, Context\nfrom tf2onnx.rewriter.rnn_utils import is_tensor_array_gather_op, is_tensor_array_write_op, \\\n is_placeholder_op, make_onnx_node\nfrom tf2onnx.rewriter.rnn_utils import BodyGraphDict, REWRITER_RESULT, SubGraphMetadata\nfrom tf2onnx.tfonnx import utils\n\n\nlogging.basicConfig(level=logging.INFO)\nlog = logging.getLogger(\"tf2onnx.rewriter.custom_rnn_rewriter\")\nINVLAID_INPUT_ID = \"invalid:0\"\n\n# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test,broad-except,protected-access\n\n\nclass CustomRnnContext(Context):\n def __init__(self):\n super(CustomRnnContext, self).__init__()\n self.other_loop_vars = {}\n self.rnn_scope = None\n\n self.output_tas = []\n self.input_tas = []\n self.time_var = None\n self.iteration_var = None\n\n\nclass TensorArrayProp(object):\n def __init__(self):\n self.index_input_id = None\n self.data_input_id = None\n self.output_id = None\n\n\nclass ScanProperties(object):\n def __init__(self, initial_state_and_scan_inputs, loop_state_inputs,\n loop_state_outputs, loop_scan_inputs, loop_scan_outputs):\n self.initial_state_and_scan_inputs = initial_state_and_scan_inputs\n self.loop_state_inputs = loop_state_inputs\n self.loop_state_outputs = loop_state_outputs\n self.loop_scan_inputs = loop_scan_inputs\n self.loop_scan_outputs = loop_scan_outputs\n\n\nclass CustomRnnRewriter(LoopRewriterBase):\n def __init__(self, g):\n super(CustomRnnRewriter, self).__init__(g)\n self.rnn_input_pattern = \\\n OpTypePattern('TensorArrayReadV3', name='ta_read', inputs=[\n OpTypePattern(\"Enter\", name=\"ta_enter\", inputs=[\n OpTypePattern(\"TensorArrayV3\")\n ]),\n OpTypePattern('*'),\n OpTypePattern(\"Enter\", name=\"ta_scatter_enter\", inputs=[\n OpTypePattern(\"TensorArrayScatterV3\", name=\"ta_input_scatter\")\n ]),\n ])\n\n def create_context(self):\n return CustomRnnContext()\n\n def run(self):\n log.debug(\"enter custom rnn rewriter\")\n return self.run_internal()\n\n def _get_rnn_scope_name(self, while_scope_name):\n parts = while_scope_name.split('/')\n rnn_scope = '/'.join(parts[0:-2]) + \"/\"\n log.debug(\"found rnn scope %s\", rnn_scope)\n return rnn_scope\n\n def _parse_rnn_loop(self, context):\n # check a while loop is generated by dynamic_rnn or bidirectional_rnn by\n #\n # 1. some patterns in _time_step in dynamic_rnn: tensor array read, tensor array write\n # 2. some patterns in control_flow_ops.while_loop in dynamic_rnn:\n # cond: time < loop_bound\n # loop_vars: (time, output_ta, state)\n # time has name called \"time\"\n # iteration_cnt is added by control flow.\n\n # be noted:\n # 1. iteration counter does not exist in tf1.4 or earlier versions\n # 2. if dynamic_rnn's first input is not consumed, output ta does not exist.\n time_name = context.rnn_scope + \"time\"\n ta_array_name_prefix = context.rnn_scope + \"dynamic_rnn/output_\"\n iteration_counter_name = context.while_context_scope + \"iteration_counter\"\n\n found_time = False\n is_rnn_out_ta = True\n for enter_name, val in context.loop_variables.items():\n enter_input_node = self.g.get_node_by_name(val.enter_input_id)\n if val.is_tensor_array:\n ta_name = enter_input_node.get_attr(\"tensor_array_name\").s.decode(\"utf-8\")\n if not ta_name.startswith(ta_array_name_prefix):\n is_rnn_out_ta = False\n elif enter_input_node.name == time_name:\n found_time = True\n context.time_var = val\n elif enter_input_node.name == iteration_counter_name:\n context.iteration_var = val\n else:\n context.other_loop_vars[enter_name] = val\n\n if not (found_time and is_rnn_out_ta):\n log.debug(\"this should not be a dynamic_rnn loop, found_time: %s, is_rnn_out_ta: %s\",\n found_time, is_rnn_out_ta)\n return False\n\n return True\n\n def need_rewrite(self, context):\n context.rnn_scope = self._get_rnn_scope_name(context.while_context_scope)\n\n if not self._parse_rnn_loop(context):\n log.debug(\"skip the loop due to parse_rnn_loop failed\")\n return False\n\n self._parse_time_var(context)\n self._parse_output_ta(context)\n self._parse_input_ta(context)\n\n if not (context.input_tas or context.output_tas):\n log.debug(\"this should not be a dynamic_rnn loop, no ta input or output are found\")\n return False\n return True\n\n def rewrite(self, context):\n log.debug(\"enter rewrite function\")\n scan_node = None\n try:\n to_remove = self._cut_off_connection_for_cell(context)\n all_nodes = self.g.get_nodes()\n for n in set(to_remove):\n if n in all_nodes:\n all_nodes.remove(n)\n self.g.set_nodes(all_nodes)\n\n scan_props, nodes_to_append = self._compose_cell_inputs_and_outputs(context)\n scan_node = self._create_scan_node(context, scan_props)\n if not scan_node:\n log.error(\"failed to create scan node during rewrite\")\n return REWRITER_RESULT.FAIL\n nodes_to_append.append(scan_node)\n\n _ = self._extract_and_register_cell_graph_info(context, scan_props, scan_node)\n\n to_append = self._connect_scan_with_output(context, scan_node)\n nodes_to_append.extend(to_append)\n all_nodes = self.g.get_nodes()\n all_nodes.extend(nodes_to_append)\n self.g.set_nodes(all_nodes)\n\n return REWRITER_RESULT.OK\n except Exception as ex:\n if scan_node and BodyGraphDict.has_body_graph_info(scan_node.name):\n BodyGraphDict.pop_body_graph_info(scan_node.name)\n log.error(\"remove scan node body graph from dict\")\n log.error(\"rewrite failed, due to exception: %s\", ex)\n return REWRITER_RESULT.FAIL\n\n def _parse_time_var(self, context):\n time_var = context.time_var\n log.debug(\"time var %s - enter input id (%s) shape: %s, output (%s) shape: %s\", time_var.enter_name,\n time_var.enter_input_id, self.g.get_shape(time_var.enter_input_id),\n time_var.switch_true_identity_output_id, self.g.get_shape(time_var.switch_true_identity_output_id))\n\n def _parse_output_ta(self, context):\n for enter_name, loop_var in context.loop_variables.items():\n if not loop_var.is_tensor_array:\n continue\n\n output_ta = TensorArrayProp()\n output_ta.data_input_id = loop_var.next_iteration_input_id\n\n output_ta.index_input_id = loop_var.ta_index_id\n if loop_var.exit_output_id:\n exit_consumers = self.g.find_output_consumers(loop_var.exit_output_id)\n ta_gather_node = [n for n in exit_consumers if is_tensor_array_gather_op(n)][0]\n output_ta.output_id = ta_gather_node.output[0]\n\n context.output_tas.append(output_ta)\n log.debug(\"output ta %s - data input (%s) shape: %s, output (%s) shape: %s\", enter_name,\n output_ta.data_input_id, self.g.get_shape(output_ta.data_input_id),\n output_ta.output_id, self.g.get_shape(output_ta.output_id))\n\n def _parse_input_ta(self, context):\n matcher = GraphMatcher(self.rnn_input_pattern, allow_reorder=True)\n match_results = list(matcher.match_ops(self.g.get_nodes()))\n match_results = [r for r in match_results if r.get_op(\"ta_input_scatter\").name.startswith(context.rnn_scope)]\n for match in match_results:\n ta_input_scatter = match.get_op(\"ta_input_scatter\")\n # the 3rd input of scatter is the value\n input_ta = TensorArrayProp()\n\n # dynamic_rnn specific approach.\n input_ta.data_input_id = ta_input_scatter.input[2]\n\n ta_read_node = match.get_op(\"ta_read\")\n input_ta.index_input_id = ta_read_node.input[1]\n input_ta.output_id = match.get_op(\"ta_read\").output[0]\n\n context.input_tas.append(input_ta)\n\n log.debug(\"input ta %s - data input (%s) shape: %s, output (%s) shape: %s\", ta_read_node.name,\n input_ta.data_input_id, self.g.get_shape(input_ta.data_input_id),\n input_ta.output_id, self.g.get_shape(input_ta.output_id))\n\n def _cut_off_connection_for_cell(self, context):\n nodes_to_remove = []\n all_vars = [context.time_var]\n all_vars += [val for _, val in context.other_loop_vars.items()]\n for val in all_vars:\n # remove the node to cut off a starting node of the cell (e.g. loop body).\n nodes_to_remove.append(self.g.get_node_by_name(val.switch_true_identity_output_id))\n\n # connect NextIteration to an invalid node, to cut off a ending node of the cell.\n next_iter_nodes = [n for n in self.g.get_nodes() if n.type == \"NextIteration\"]\n self.g.replace_all_inputs(next_iter_nodes, val.next_iteration_input_id, INVLAID_INPUT_ID)\n\n for input_ta in context.input_tas:\n # remove the node to cut off connection between scan_input and the cell.\n nodes_to_remove.append(self.g.get_node_by_name(input_ta.output_id))\n\n for output_ta in context.output_tas:\n # remove the node to cut off connection between scan_output and the cell.\n ta_write_nodes = [n for n in self.g.get_nodes() if is_tensor_array_write_op(n)]\n self.g.replace_all_inputs(ta_write_nodes, output_ta.data_input_id, INVLAID_INPUT_ID)\n\n return nodes_to_remove\n\n def _compose_cell_inputs_and_outputs(self, context):\n log.debug(\"_compose_cell_inputs_and_outputs\")\n\n nodes_to_append = []\n loop_state_inputs = []\n loop_state_outputs = []\n initial_state_and_scan_inputs = []\n\n # change time shape to {1} since current Scan does not support\n time_var, to_append = self._adapt_time_var_as_workaround(context.time_var)\n nodes_to_append.extend(to_append)\n\n log.debug(\"prepare cell state inputs\")\n vars_to_iterate = [time_var] + [val for _, val in context.other_loop_vars.items()]\n for var in vars_to_iterate:\n nodes = self._adapt_scan_sequence_input_or_output(\"input\", var.enter_input_id, False)\n var.enter_input_id = nodes[-1].output[0]\n nodes_to_append.extend(nodes)\n\n loop_state_inputs.append(var.switch_true_identity_output_id)\n loop_state_outputs.append(var.next_iteration_input_id)\n initial_state_and_scan_inputs.append(var.enter_input_id)\n\n log.debug(\"prepare cell scan inputs\")\n loop_scan_inputs = []\n for input_ta in context.input_tas:\n nodes = self._adapt_scan_sequence_input_or_output(\"input_ta\", input_ta.data_input_id, False)\n input_ta.data_input_id = nodes[-1].output[0]\n nodes_to_append.extend(nodes)\n\n loop_scan_inputs.append(input_ta.output_id)\n initial_state_and_scan_inputs.append(input_ta.data_input_id)\n\n log.debug(\"prepare cell scan outputs\")\n loop_scan_outputs = []\n for output_ta in context.output_tas:\n loop_scan_outputs.append(output_ta.data_input_id)\n\n scan_props = ScanProperties(initial_state_and_scan_inputs, loop_state_inputs, loop_state_outputs,\n loop_scan_inputs, loop_scan_outputs)\n\n return scan_props, nodes_to_append\n\n def _create_scan_node(self, context, scan_props):\n log.debug(\"create scan node\")\n # here we did not give the sequence_length, because\n # current batch size is 1, not original batch size\n # original seq_length will be used by the loop body of Scan op.\n scan_node = make_onnx_node(self.g, \"Scan\", [\"\"] + scan_props.initial_state_and_scan_inputs,\n attr={\"num_scan_inputs\": len(scan_props.loop_scan_inputs)},\n output_count=len(scan_props.loop_state_outputs + scan_props.loop_scan_outputs),\n skip_conversion=True)\n\n # the first state var is time-iterator.\n index = 0\n time_input_shape = self.g.get_shape(scan_node.input[1])\n time_input_dtype = self.g.get_dtype(scan_node.input[1])\n\n log.debug(\"_create_scan_node - set scan state_output shape for %s[%s]:%s\",\n scan_node.name, index, time_input_shape)\n self.g.set_shape(scan_node.output[index], time_input_shape)\n self.g.set_dtype(scan_node.output[index], time_input_dtype)\n index += 1\n\n # for other state vars\n state_input_shape = self.g.get_shape(scan_node.input[2])\n state_input_dtype = self.g.get_dtype(scan_node.input[2])\n for i in range(len(scan_props.loop_state_outputs) - 1):\n log.debug(\"_create_scan_node - set scan state_output shape for %s[%s]:%s\",\n scan_node.name, index, state_input_shape)\n self.g.set_shape(scan_node.output[index], state_input_shape)\n self.g.set_dtype(scan_node.output[index], state_input_dtype)\n index += 1\n\n last_scan_input_shape = self.g.get_shape(scan_node.input[-1])\n batch = last_scan_input_shape[0] # should be 1\n time = last_scan_input_shape[1]\n for i in range(len(scan_props.loop_scan_outputs)):\n scan_out_dtype = self.g.get_dtype(scan_props.loop_scan_outputs[i])\n scan_output_shape = [batch, time] + self.g.get_shape(scan_props.loop_scan_outputs[i])\n log.debug(\"scan output [%s] has shape %s, batch:%s, time: %s\",\n scan_props.loop_scan_outputs[i], scan_output_shape, batch, time)\n log.debug(\"_create_scan_node - set scan scan_output shape for %s[%s]:%s\",\n scan_node.name, index, scan_output_shape)\n self.g.set_shape(scan_node.output[index], scan_output_shape)\n self.g.set_dtype(scan_node.output[index], scan_out_dtype)\n index += 1\n\n return scan_node\n\n def _extract_and_register_cell_graph_info(self, context, scan_props, scan_node):\n log.debug(\"_extract_cell_graph_nodes\")\n\n sub_graph_inputs = scan_props.loop_state_inputs + scan_props.loop_scan_inputs\n sub_graph_outputs = scan_props.loop_state_outputs + scan_props.loop_scan_outputs\n body_graph_meta = SubGraphMetadata(self.g, sub_graph_inputs, sub_graph_outputs,\n scan_props.initial_state_and_scan_inputs)\n\n # according to input and output, find the body graph\n nodes, enter_nodes = self.find_subgraph(body_graph_meta, self.g)\n other_enter_input_ids = []\n for enter_node in enter_nodes:\n # connect Enter's output to Enter's input\n self.g.replace_all_inputs(self.g.get_nodes(), enter_node.output[0], enter_node.input[0])\n\n nodes = self.g._extract_sub_graph_nodes(self.g.get_node_by_name(enter_node.input[0]))\n\n # if the enter target subgraph contains planeholder, then we keep record that as cell boundary.\n has_placeholder = None\n for n in nodes:\n if is_placeholder_op(n):\n has_placeholder = True\n break\n\n # if there is placeholder in the Enter's input graph, then we think we should consider the Enter's input\n # nodes as cell's input; otherwise, we think the input graph should be part of cell graph.\n if has_placeholder is True:\n log.debug(\"Enter input id [%s] is a subgraph containing placeholder, so make it cell boundary\",\n enter_node.input[0])\n other_enter_input_ids.append(enter_node.input[0])\n\n body_graph_meta.other_enter_input_ids = other_enter_input_ids\n\n log.debug(\"add body graph meta data into store\")\n BodyGraphDict.add_body_graph_info(scan_node.name, body_graph_meta)\n return nodes\n\n def _connect_scan_with_output(self, context, scan_node):\n log.debug(\"connect scan output with the graph\")\n\n index = 1 # ignore the 1st input (time-iterator)\n nodes_to_append = []\n for _, val in context.other_loop_vars.items():\n var_output_id = val.exit_output_id\n if var_output_id:\n nodes = self._adapt_scan_sequence_input_or_output(\"state_output_reshape\",\n scan_node.output[index], True)\n nodes_to_append.extend(nodes)\n self.g.replace_all_inputs(self.g.get_nodes(), var_output_id, nodes[-1].output[0])\n\n index += 1\n\n for output_ta in context.output_tas:\n ta_final_output_id = output_ta.output_id\n if ta_final_output_id:\n nodes = self._adapt_scan_sequence_input_or_output(\"scan_output_reshape\",\n scan_node.output[index], True)\n nodes_to_append.extend(nodes)\n self.g.replace_all_inputs(self.g.get_nodes(), ta_final_output_id, nodes[-1].output[0])\n index += 1\n\n return nodes_to_append\n\n def _adapt_scan_sequence_input_or_output(self, target_name, input_id, handle_output=False):\n nodes_to_add = []\n shape_node = make_onnx_node(self.g, \"Shape\", [input_id])\n nodes_to_add.append(shape_node)\n inferred_shape = self.g.get_shape(input_id)\n if handle_output is True:\n # handle output:\n # if required dim values don't contain more than one -1,\n # just use a const for Reshape's shape input.\n if inferred_shape is not None and inferred_shape[1:].count(-1) <= 1:\n new_shape_node = self.g.make_const(utils.make_name(target_name + \"_target_shape\"),\n np.array(inferred_shape[1:], dtype=np.int64))\n else:\n # otherwise, get the dim dynamically, e.g. remove the fake batch size (e.g.1)\n # from [1, time, real-batch, ...]\n origin_shape_node = make_onnx_node(self.g, \"Cast\", [shape_node.output[0]],\n {\"to\": onnx_pb.TensorProto.FLOAT})\n nodes_to_add.append(origin_shape_node)\n\n sliced_shape_node = make_onnx_node(self.g, \"Slice\", [origin_shape_node.output[0]],\n {\"axes\": [0], \"starts\": [1], \"ends\": [sys.maxsize]})\n nodes_to_add.append(sliced_shape_node)\n\n new_shape_node = make_onnx_node(self.g, \"Cast\", [sliced_shape_node.output[0]],\n {\"to\": onnx_pb.TensorProto.INT64})\n nodes_to_add.append(new_shape_node)\n\n new_shape = inferred_shape[1:]\n else:\n # handle input:\n if inferred_shape is not None and inferred_shape.count(-1) <= 1:\n new_shape_node = self.g.make_const(utils.make_name(target_name + \"_target_shape\"),\n np.array([1] + inferred_shape, dtype=np.int64))\n else:\n # add a fake batch size : 1\n fake_batch_size_node = self.g.make_const(utils.make_name(target_name + \"_target_shape\"),\n np.array([1,], dtype=np.int64))\n new_shape_node = make_onnx_node(self.g, \"Concat\",\n [fake_batch_size_node.output[0], shape_node.output[0]],\n {\"axis\": 0})\n nodes_to_add.append(new_shape_node)\n new_shape = [1] + inferred_shape\n\n reshape_node = make_onnx_node(self.g, \"Reshape\", [input_id, new_shape_node.output[0]],\n skip_conversion=True, op_name_scope=target_name)\n nodes_to_add.append(reshape_node)\n self.g.set_shape(reshape_node.output[0], new_shape)\n self.g.set_dtype(reshape_node.output[0], self.g.get_dtype(input_id))\n log.debug(\"create Reshape for scan output %s, with output shape %s\",\n reshape_node.output[0], new_shape)\n return nodes_to_add\n\n # in theory, time var can be a scalar, but in current implementation of runtime, it could not be handled\n # correctly, so we unsqueeze it to a list containing a single element.\n def _adapt_time_var_as_workaround(self, var):\n log.debug(\"_adapt_time_var_as_workaround\")\n nodes_to_append = []\n # change time shape to {1} since current Scan does not support\n time_init_node = self._create_unsqueeze_node(\"time_var_init\", var.enter_input_id)\n nodes_to_append.append(time_init_node)\n var.enter_input_id = time_init_node.output[0]\n\n time_output_node = self._create_unsqueeze_node(\"time_var_output\", var.next_iteration_input_id)\n nodes_to_append.append(time_output_node)\n var.next_iteration_input_id = time_output_node.output[0]\n\n time_input_node = self._create_squeeze_node(\"time_var_input\", var.switch_true_identity_output_id)\n nodes_to_append.append(time_input_node)\n self.g.replace_all_inputs(self.g.get_nodes(), var.switch_true_identity_output_id, time_input_node.output[0])\n self.g.set_shape(var.switch_true_identity_output_id, [1] + self.g.get_shape(var.switch_true_identity_output_id))\n\n return var, nodes_to_append\n\n def _create_unsqueeze_node(self, target_name, input_id):\n unsqueeze_node = make_onnx_node(self.g, \"Unsqueeze\", [input_id], attr={\"axes\": [0]},\n skip_conversion=True, op_name_scope=target_name)\n input_shape = self.g.get_shape(input_id)\n if input_shape is None:\n raise ValueError(input_id + \" is none\")\n input_shape = [1] + input_shape\n self.g.set_shape(unsqueeze_node.output[0], input_shape)\n self.g.set_dtype(unsqueeze_node.output[0], self.g.get_dtype(input_id))\n\n return unsqueeze_node\n\n def _create_squeeze_node(self, target_name, input_id):\n squeeze_node = make_onnx_node(self.g, \"Squeeze\", [input_id], attr={\"axes\": [0]},\n skip_conversion=True, op_name_scope=target_name)\n input_shape = self.g.get_shape(input_id)\n if input_shape is None:\n raise ValueError(input_id + \" is none\")\n input_shape = list(input_shape)[1:]\n self.g.set_shape(squeeze_node.output[0], input_shape)\n self.g.set_dtype(squeeze_node.output[0], self.g.get_dtype(input_id))\n\n return squeeze_node\n # end of time var workaround\n\n\nclass CustomRnnLateRewriter(object):\n def __init__(self, g):\n self.g = g\n\n def rewrite(self):\n log.debug(\"enter custom rnn late rewriter\")\n nodes = self.g.get_nodes()\n nodes_to_remove = []\n for scan_node in nodes:\n if scan_node.type != \"Scan\":\n continue\n log.debug(\"late write for scan node %s\", scan_node.name)\n num_scan_inputs = scan_node.get_attr(\"num_scan_inputs\").i\n if not BodyGraphDict.has_body_graph_info(scan_node.name):\n continue\n\n body_graph_meta = BodyGraphDict.pop_body_graph_info(scan_node.name)\n onnx_nodes, _ = LoopRewriterBase.find_subgraph(body_graph_meta, self.g)\n nodes_to_remove.extend(onnx_nodes)\n\n log.debug(\"start creating body graph for scan node %s \", scan_node.name)\n body_graph_initializers = {}\n const_nodes = [n for n in onnx_nodes if n.type in (\"Const\", \"ConstV2\")]\n for n in const_nodes:\n # when set nodes, Const should be removed, they need be replaced as initializers.\n body_graph_initializers[n.output[0]] = self.g.initializers[n.output[0]]\n onnx_nodes.remove(n)\n\n onnx_nodes = set(onnx_nodes)\n\n ops = []\n for op in onnx_nodes:\n onnx_op = op.op\n ops.append(onnx_op)\n\n body_g = Graph(ops, output_shapes=self.g._output_shapes, dtypes=self.g._dtypes)\n body_g._initializers = body_graph_initializers\n\n log.debug(\"start preparing body graph inputs nodes\")\n temp_nodes = body_g.get_nodes()\n i = 0\n input_count = len(body_graph_meta.input_ids)\n for input_name, init_input_id in zip(body_graph_meta.input_ids, body_graph_meta.initial_input_ids):\n shape = body_g.get_shape(input_name)\n dtype = body_g.get_dtype(input_name)\n if shape is None:\n shape = self.g.get_shape(init_input_id)\n if i >= input_count - num_scan_inputs:\n loop_input_shape = list(shape)[2:] # delete [1, time,]\n else:\n loop_input_shape = list(shape)\n else:\n loop_input_shape = list(shape)\n\n onnx_input_shape = utils.make_onnx_shape(loop_input_shape)\n val = helper.make_tensor_value_info(input_name, dtype, onnx_input_shape)\n body_g.add_model_input(input_name, val)\n i += 1\n\n log.debug(\"start preparing body graph outputs nodes\")\n new_output_names = []\n for o in body_graph_meta.output_ids:\n # insert identity node, since sometimes we need output same output_id as state_output\n # and scan_out, but ONNX don't allow the same output_id appeared more than once as\n # output node.\n identity_name = utils.make_name(\"Identity\")\n identity_output = utils.port_name(identity_name)\n node = Node(helper.make_node(\"Identity\", [o], [identity_output], name=identity_name), body_g)\n body_g.set_dtype(identity_output, body_g.get_dtype(o))\n body_g.copy_shape(o, identity_output)\n new_output_names.append(identity_output)\n temp_nodes.append(node)\n\n body_g.set_nodes(temp_nodes)\n body_g.topological_sort(body_g.get_nodes())\n\n log.debug(\"start make graph based on body graph nodes\")\n body_g.output_names = new_output_names\n graph = body_g.make_graph(\"scan body graph\")\n scan_node.set_attr(\"body\", graph)\n\n # remove nodes in body graph from g\n for n in set(nodes_to_remove):\n if n in nodes:\n nodes.remove(n)\n elif self.g.is_initializer(n.output[0]):\n del self.g.initializers[n.output[0]]\n else:\n raise ValueError(\"error when removing nodes\")\n\n return nodes\n"
] |
[
[
"numpy.array"
]
] |
Suhasnama/datasets
|
[
"1259b2329825dfee02ab1925f41d00756d9e7bdc"
] |
[
"tensorflow_datasets/image/cats_vs_dogs.py"
] |
[
"# coding=utf-8\n# Copyright 2020 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Cats vs Dogs dataset.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport re\n\nfrom absl import logging\nimport tensorflow.compat.v2 as tf\nimport tensorflow_datasets.public_api as tfds\n\n_CITATION = \"\"\"\\\n@Inproceedings (Conference){asirra-a-captcha-that-exploits-interest-aligned-manual-image-categorization,\nauthor = {Elson, Jeremy and Douceur, John (JD) and Howell, Jon and Saul, Jared},\ntitle = {Asirra: A CAPTCHA that Exploits Interest-Aligned Manual Image Categorization},\nbooktitle = {Proceedings of 14th ACM Conference on Computer and Communications Security (CCS)},\nyear = {2007},\nmonth = {October},\npublisher = {Association for Computing Machinery, Inc.},\nurl = {https://www.microsoft.com/en-us/research/publication/asirra-a-captcha-that-exploits-interest-aligned-manual-image-categorization/},\nedition = {Proceedings of 14th ACM Conference on Computer and Communications Security (CCS)},\n}\n\"\"\"\n\n_URL = (\"https://download.microsoft.com/download/3/E/1/3E1C3F21-\"\n \"ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip\")\n_NUM_CORRUPT_IMAGES = 1738\n_DESCRIPTION = ((\"A large set of images of cats and dogs.\"\n \"There are %d corrupted images that are dropped.\")\n % _NUM_CORRUPT_IMAGES)\n\n_NAME_RE = re.compile(r\"^PetImages[\\\\/](Cat|Dog)[\\\\/]\\d+\\.jpg$\")\n\n\nclass CatsVsDogs(tfds.core.GeneratorBasedBuilder):\n \"\"\"Cats vs Dogs.\"\"\"\n\n VERSION = tfds.core.Version(\n \"4.0.0\", \"New split API (https://tensorflow.org/datasets/splits)\")\n\n def _info(self):\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict({\n \"image\": tfds.features.Image(),\n \"image/filename\": tfds.features.Text(), # eg 'PetImages/Dog/0.jpg'\n \"label\": tfds.features.ClassLabel(names=[\"cat\", \"dog\"]),\n }),\n supervised_keys=(\"image\", \"label\"),\n homepage=\n \"https://www.microsoft.com/en-us/download/details.aspx?id=54765\",\n citation=_CITATION\n )\n\n def _split_generators(self, dl_manager):\n path = dl_manager.download(_URL)\n\n # There is no predefined train/val/test split for this dataset.\n return [\n tfds.core.SplitGenerator(\n name=tfds.Split.TRAIN,\n gen_kwargs={\n \"archive\": dl_manager.iter_archive(path),\n }),\n ]\n\n def _generate_examples(self, archive):\n \"\"\"Generate Cats vs Dogs images and labels given a directory path.\"\"\"\n num_skipped = 0\n for fname, fobj in archive:\n res = _NAME_RE.match(fname)\n if not res: # README file, ...\n continue\n label = res.group(1).lower()\n if tf.compat.as_bytes(\"JFIF\") not in fobj.peek(10):\n num_skipped += 1\n continue\n record = {\n \"image\": fobj,\n \"image/filename\": fname,\n \"label\": label,\n }\n yield fname, record\n\n if num_skipped != _NUM_CORRUPT_IMAGES:\n raise ValueError(\"Expected %d corrupt images, but found %d\" % (\n _NUM_CORRUPT_IMAGES, num_skipped))\n logging.warning(\"%d images were corrupted and were skipped\", num_skipped)\n"
] |
[
[
"tensorflow.compat.v2.compat.as_bytes"
]
] |
ConnerZhao/FRES
|
[
"a6d82065eedf90ffaad91b242488e4aef844033d"
] |
[
"srs/main.py"
] |
[
"import tkinter \r\nfrom tkinter import *\r\nfrom tkinter import messagebox\r\nimport cv2\r\nimport numpy as np\r\nfrom keras.preprocessing import image\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\nfrom keras.preprocessing.image import load_img, img_to_array \r\nfrom keras.models import load_model\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport random\r\n\r\n#window\r\ntkWindow = Tk() \r\ntkWindow.geometry('400x150') \r\ntkWindow.title('Tkinter Login Form - pythonexamples.org')\r\n\r\n# load model\r\nmodel = load_model(\"best_model.h5\")\r\nface_haar_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')\r\ncap = cv2.VideoCapture(0)\r\n\r\ndef scanButton():\r\n messagebox.showinfo( title = \"Look at the Camera\", message= \"Look at the Camera\\nOnce the facial expression is labeled, press Q to stop scanning!\")\r\n while True:\r\n f = open(\"emotions.txt\", \"w\")\r\n ret, test_img = cap.read() # captures frame and returns boolean value and captured image\r\n if not ret:\r\n continue\r\n gray_img = cv2.cvtColor(test_img, cv2.COLOR_BGR2RGB)\r\n\r\n faces_detected = face_haar_cascade.detectMultiScale(gray_img, 1.32, 5)\r\n\r\n for (x, y, w, h) in faces_detected:\r\n cv2.rectangle(test_img, (x, y), (x + w, y + h), (255, 0, 0), thickness=6)\r\n roi_gray = gray_img[y:y + w, x:x + h] # cropping region of interest i.e. face area from image\r\n roi_gray = cv2.resize(roi_gray, (224, 224))\r\n img_pixels = image.img_to_array(roi_gray)\r\n img_pixels = np.expand_dims(img_pixels, axis=0)\r\n img_pixels /= 255\r\n\r\n predictions = model.predict(img_pixels)\r\n\r\n # find max indexed arra y\r\n max_index = np.argmax(predictions[0])\r\n\r\n emotions = ('angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral')\r\n predicted_emotion = emotions[max_index]\r\n f.write(emotions[max_index] + \"\\n\")\r\n cv2.putText(test_img, predicted_emotion, (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\r\n\r\n resized_img = cv2.resize(test_img, (1000, 700))\r\n cv2.imshow('Facial emotion analysis ', resized_img)\r\n if cv2.waitKey(10) == ord('q'): # wait until 'q' key is pressed\r\n f.close()\r\n break\r\n messagebox.showinfo( title = '', message= \"Scanning Completed\")\r\n cap.release()\r\n cv2.destroyAllWindows\r\n\r\ndef read():\r\n # Random number for random quotes\r\n x = random.randint(1,4)\r\n # Opens file containing emotion from scanning\r\n y = open(\"emotions.txt\", \"rt\")\r\n # Reads the first 11 characters\r\n z = y.read(10)\r\n # Strips the first 11 characters, so its only text\r\n emotion = z.strip()\r\n print(z)\r\n if emotion == \"angry\":\r\n quote = open(\"angry.txt\", \"rt\")\r\n messagebox.showinfo( title = '', message= quote.readlines(x))\r\n quote.close()\r\n elif emotion == \"disgust\":\r\n quote = open(\"disgust.txt\", \"rt\")\r\n messagebox.showinfo( title = '', message= quote.readlines(x))\r\n quote.close()\r\n elif emotion == \"fear\":\r\n quote = open(\"fear.txt\", \"rt\")\r\n messagebox.showinfo( title = '', message= quote.readlines(x))\r\n quote.close()\r\n elif emotion == \"happy\":\r\n messagebox.showinfo( title = '', message= \"We're glad you are having a great day!\\nKeep it up!\")\r\n quote = open(\"happy.txt\", \"rt\")\r\n messagebox.showinfo( title = '', message= quote.readlines(x))\r\n quote.close()\r\n elif emotion == \"surprise\":\r\n quote = open(\"surprise.txt\", \"rt\")\r\n messagebox.showinfo( title = '', message= quote.readlines(x))\r\n quote.close()\r\n elif emotion == \"sad\":\r\n quote = open(\"sad.txt\", \"rt\")\r\n messagebox.showinfo( title = '', message= quote.readlines(x))\r\n quote.close()\r\n else:\r\n messagebox.showinfo( title = '', message= 'You have not scanned your facial expression yet!')\r\n# Exit Button\r\nquitButton = tkinter.Button(tkWindow, \r\n text=\"Quit\", \r\n fg=\"red\",\r\n command=quit)\r\n\r\n# init Buttons\r\nscan = Button(tkWindow, text=\"Scan\", fg=\"Green\", command = scanButton)\r\nmsgButton = Button(tkWindow, text=\"Mesage\", command = read)\r\nscan.pack()\r\nmsgButton.pack()\r\nquitButton.pack()\r\ntkWindow.mainloop()"
] |
[
[
"numpy.argmax",
"numpy.expand_dims"
]
] |
adarshchbs/adda_sketch
|
[
"25f7adf3563d8e1edb8c431fb93876bbed4d4e76"
] |
[
"pretrain.py"
] |
[
"from torch import nn\nfrom torch import optim\nimport torch\n\nimport params\nfrom utils import make_variable, save_model\nfrom preprocess import preprocess_image\n\ndef train_src( source_encoder, source_classifier, data_loader, gpu_flag = False, gpu_name = 'cuda:0' ):\n\n # source_classifier.train()\n # source_encoder.train()\n \n optimizer = optim.Adam( list(source_classifier.parameters())\n + list(source_encoder.parameters()) ,\n lr = params.c_learning_rate,\n betas = ( params.beta1, params.beta2 )\n )\n \n \n \n criterion = nn.CrossEntropyLoss()\n\n for epoch in range( params.num_epochs_classifier ):\n \n for step, ( images, labels ) in enumerate( data_loader.image_gen('train') ):\n\n images = preprocess_image( array = images,\n split_type = 'train',\n use_gpu = gpu_flag, gpu_name=gpu_name )\n\n labels = torch.tensor(labels,dtype=torch.long)\n\n if(gpu_flag == True):\n labels = labels.cuda(gpu_name)\n\n\n optimizer.zero_grad()\n \n preds = source_classifier( source_encoder( images ))\n loss = criterion( preds, labels )\n\n loss.backward()\n optimizer.step()\n\n # print step info\n if ((step + 1) % params.log_step_pre == 0):\n print(\"Epoch [{}/{}] Step [{}/{}]: loss={}\"\n .format(epoch + 1,\n params.num_epochs_classifier,\n step + 1,\n int(data_loader.size['train']/data_loader.batch_size),\n loss.data.item()))\n # print(list(source_classifier.parameters()))\n # eval model on test set\n if ((epoch + 1) % params.eval_step_pre == 0):\n eval_src(source_encoder, source_classifier, data_loader, gpu_flag=True)\n\n # save model parameters\n if ((epoch + 1) % params.save_step_pre == 0):\n save_model(source_encoder, \"ADDA-source-encoder-{}.pt\".format(epoch + 1))\n save_model(\n source_classifier, \"ADDA-source-classifier-{}.pt\".format(epoch + 1))\n\n\n\n # # save final model\n save_model(source_encoder, \"ADDA-source-encoder-final.pt\")\n save_model(source_classifier, \"ADDA-source-classifier-final.pt\")\n\n return source_encoder, source_classifier\n\n\n\ndef eval_src( source_encoder, source_classifier, data_loader, gpu_flag = False, gpu_name = 'cuda:0' ):\n\n loss = 0\n accuracy = 0 \n\n source_encoder.eval()\n source_classifier.eval()\n\n criterion = nn.CrossEntropyLoss()\n correct = 0\n total = 0\n\n for (images, labels) in data_loader.image_gen(split_type='val'):\n images = preprocess_image( array = images,\n split_type = 'val',\n use_gpu = gpu_flag, gpu_name= gpu_name )\n\n labels = torch.tensor(labels,dtype=torch.long)\n\n if(gpu_flag == True):\n labels = labels.cuda(gpu_name)\n preds = source_classifier( source_encoder( images ))\n loss += criterion( preds, labels ).item()\n\n _, predicted = torch.max(preds.data,1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\n # pred_cls = preds.data.max(1)[1]\n # print(pred_cls.eq(labels.data).cpu().sum())\n # accuracy += pred_cls.eq(labels.data).cpu().sum() / len(labels)\n\n \n loss /= data_loader.size['val']\n # accuracy /= len( data_loader )\n accuracy = correct/total\n\n print(\"Avg Loss = {}, Avg Accuracy = {:2%}\".format(loss, accuracy))\n\n"
] |
[
[
"torch.tensor",
"torch.nn.CrossEntropyLoss",
"torch.max"
]
] |
refactoring-ai/Machine-Learning
|
[
"908d35322a06a7b1709d83f731033a939a864c6b"
] |
[
"ml/models/random_forest.py"
] |
[
"from sklearn.ensemble import RandomForestClassifier\n\nfrom configs import CORE_COUNT, SEED\nfrom ml.models.base import SupervisedMLRefactoringModel\n\n\nclass RandomForestRefactoringModel(SupervisedMLRefactoringModel):\n def feature_reduction(self) -> bool:\n return False\n\n def params_to_tune(self):\n return {\n \"max_depth\": [3, 6, 12, 24, None],\n \"max_features\": [\"auto\", \"log2\", None],\n \"min_samples_split\": [2, 3, 4, 5, 10],\n \"bootstrap\": [True, False],\n \"criterion\": [\"gini\", \"entropy\"],\n \"n_estimators\": [10, 50, 100, 150, 200]\n }\n\n def model(self, best_params=None):\n if best_params is not None:\n return RandomForestClassifier(\n random_state=SEED,\n n_jobs=CORE_COUNT,\n max_depth=best_params[\"max_depth\"],\n max_features=best_params[\"max_features\"],\n min_samples_split=best_params[\"min_samples_split\"],\n bootstrap=best_params[\"bootstrap\"],\n criterion=best_params[\"criterion\"],\n n_estimators=best_params[\"n_estimators\"],\n )\n\n return RandomForestClassifier(random_state=SEED)\n"
] |
[
[
"sklearn.ensemble.RandomForestClassifier"
]
] |
Moado/Robotics-ROS
|
[
"c5aca2dffa6c5c9376e1cda8624ed611ffb11ca0"
] |
[
"Homework4/ars_motion_controller_pid/source/ars_motion_controller.py"
] |
[
"#!/usr/bin/env python\n\nimport numpy as np\nfrom numpy import *\n\nimport os\n\n\n# ROS\n\nimport rospy\n\nimport tf_conversions as tf\n\n\n#\nimport ars_lib_helpers\n\n#\nimport ars_pid\n\n\n\n\nclass ArsMotionController:\n\n #######\n\n # References\n #\n flag_set_robot_pose_ref = False\n robot_posi_ref = None\n robot_atti_quat_simp_ref = None\n #\n flag_set_robot_velo_world_ref = False\n robot_velo_lin_world_ref = None\n robot_velo_ang_world_ref = None\n #\n flag_set_robot_velo_cmd_ref = False\n # m/s\n robot_velo_lin_cmd_ref = None\n # rad/s\n robot_velo_ang_cmd_ref = None\n\n\n # Feedback\n #\n flag_set_robot_pose = False\n robot_posi = None\n robot_atti_quat_simp = None\n #\n flag_set_robot_vel_world = False\n robot_velo_lin_world = None\n robot_velo_ang_world = None\n\n\n # Commands\n robot_velo_cmd_time_stamp = rospy.Time(0.0, 0.0)\n robot_velo_lin_cmd = None\n robot_velo_ang_cmd = None\n \n\n # Loops Internal\n\n # Vel loop\n # Not needed!\n #\n #vel_loop_time_stamp_ros = rospy.Time(0.0, 0.0)\n #vel_loop_out_lin_cmd = None\n #vel_loop_out_ang_cmd = None\n \n # Pos loop\n #\n pos_loop_time_stamp_ros = rospy.Time(0.0, 0.0)\n flag_set_pos_loop_out = False\n pos_loop_out_lin_cmd = None\n pos_loop_out_ang_cmd = None\n\n\n # PIDs\n # Pose\n #\n flag_ctr_pos_hor = True\n pos_hor_pid = ars_pid.PID()\n #\n flag_ctr_pos_z = True\n pos_z_pid = ars_pid.PID()\n #\n flag_ctr_att_yaw = True\n att_yaw_pid = ars_pid.PID()\n\n # Velocity\n #\n flag_ctr_vel_lin_hor = True\n vel_lin_hor_pid = ars_pid.PID()\n #\n flag_ctr_vel_lin_z = True\n vel_lin_z_pid = ars_pid.PID()\n #\n flag_ctr_vel_ang_z = True\n vel_ang_z_pid = ars_pid.PID()\n\n\n\n\n #########\n\n def __init__(self):\n\n # Commands\n self.robot_velo_cmd_time_stamp = rospy.Time(0.0, 0.0)\n self.robot_velo_lin_cmd = np.zeros((3,), dtype=float)\n self.robot_velo_ang_cmd = np.zeros((1,), dtype=float)\n\n # Feedback\n #\n self.flag_set_robot_pose = False\n self.robot_posi = np.zeros((3,), dtype=float)\n self.robot_atti_quat_simp = ars_lib_helpers.Quaternion.zerosQuatSimp()\n #\n self.flag_set_robot_vel_world = False\n self.robot_velo_lin_world = np.zeros((3,), dtype=float)\n self.robot_velo_ang_world = np.zeros((1,), dtype=float)\n\n # References\n #\n self.flag_set_robot_pose_ref = False\n self.robot_posi_ref = np.zeros((3,), dtype=float)\n self.robot_atti_quat_simp_ref = ars_lib_helpers.Quaternion.zerosQuatSimp()\n #\n self.flag_set_robot_velo_world_ref = False\n self.robot_velo_lin_world_ref = np.zeros((3,), dtype=float)\n self.robot_velo_ang_world_ref = np.zeros((1,), dtype=float)\n #\n self.flag_set_robot_velo_cmd_ref = False\n self.robot_velo_lin_cmd_ref = np.zeros((3,), dtype=float)\n self.robot_velo_ang_cmd_ref = np.zeros((1,), dtype=float)\n\n # Internal\n # Vel loop\n # Not needed!\n #self.vel_loop_time_stamp_ros = rospy.Time(0.0, 0.0)\n #self.vel_loop_out_lin_cmd = np.zeros((3,1), dtype=float)\n #self.vel_loop_out_ang_cmd = np.zeros((1,1), dtype=float)\n # Pos loop\n self.pos_loop_time_stamp_ros = rospy.Time(0.0, 0.0)\n self.flag_set_pos_loop_out = False\n self.pos_loop_out_lin_cmd = np.zeros((3,), dtype=float)\n self.pos_loop_out_ang_cmd = np.zeros((1,), dtype=float)\n\n # PIDs\n # Pos\n #\n self.flag_ctr_pos_hor = True\n self.pos_hor_pid = ars_pid.PID()\n self.pos_hor_pid.setGainsPID(gain_P=1.0)\n self.pos_hor_pid.setAntiWindUp(-0.1, 0.1)\n self.pos_hor_pid.setCtrCmdSaturation(-5.0, 5.0)\n #\n self.flag_ctr_pos_z = True\n self.pos_z_pid = ars_pid.PID()\n self.pos_z_pid.setGainsPID(gain_P=1.0)\n self.pos_z_pid.setAntiWindUp(-0.1, 0.1)\n self.pos_z_pid.setCtrCmdSaturation(-5.0, 5.0)\n #\n self.flag_ctr_att_yaw = True\n self.att_yaw_pid = ars_pid.PID()\n self.att_yaw_pid.setGainsPID(gain_P=1.0)\n self.att_yaw_pid.setAntiWindUp(-0.1, 0.1)\n self.att_yaw_pid.setCtrCmdSaturation(-5.0, 5.0)\n\n # Vel\n #\n self.flag_ctr_vel_lin_hor = True\n self.vel_lin_hor_pid = ars_pid.PID()\n self.vel_lin_hor_pid.setGainsPID(gain_P=1.0)\n self.vel_lin_hor_pid.setAntiWindUp(-0.1, 0.1)\n self.vel_lin_hor_pid.setCtrCmdSaturation(-1.0, 1.0)\n #\n self.flag_ctr_vel_lin_z = True\n self.vel_lin_z_pid = ars_pid.PID()\n self.vel_lin_z_pid.setGainsPID(gain_P=1.0)\n self.vel_lin_z_pid.setAntiWindUp(-0.1, 0.1)\n self.vel_lin_z_pid.setCtrCmdSaturation(-1.0, 1.0)\n #\n self.flag_ctr_vel_ang_z = True\n self.vel_ang_z_pid = ars_pid.PID()\n self.vel_ang_z_pid.setGainsPID(gain_P=1.0)\n self.vel_ang_z_pid.setAntiWindUp(-0.1, 0.1)\n self.vel_ang_z_pid.setCtrCmdSaturation(-1.0, 1.0)\n\n # End\n return\n\n def setRobotPosRef(self, robot_posi_ref, robot_atti_quat_simp_ref):\n\n self.flag_set_robot_pose_ref = True\n\n self.robot_posi_ref = robot_posi_ref\n self.robot_atti_quat_simp_ref = robot_atti_quat_simp_ref\n\n return\n\n def setRobotVelWorldRef(self, lin_vel_world_ref, ang_vel_world_ref):\n\n self.flag_set_robot_velo_world_ref = True\n\n self.robot_velo_lin_world_ref = lin_vel_world_ref\n self.robot_velo_ang_world_ref = ang_vel_world_ref\n\n return\n\n def setRobotVelCmdRef(self, lin_vel_cmd_ref, ang_vel_cmd_ref):\n\n self.flag_set_robot_velo_cmd_ref = True\n\n self.robot_velo_lin_cmd_ref = lin_vel_cmd_ref\n self.robot_velo_ang_cmd_ref = ang_vel_cmd_ref\n\n return\n\n def setRobotPose(self, robot_posi, robot_atti_quat_simp):\n \n self.flag_set_robot_pose = True\n\n self.robot_posi = robot_posi\n self.robot_atti_quat_simp = robot_atti_quat_simp\n\n return\n\n def setRobotVelWorld(self, lin_vel_world, ang_vel_world):\n\n self.flag_set_robot_vel_world = True\n\n self.robot_velo_lin_world = lin_vel_world\n self.robot_velo_ang_world = ang_vel_world\n\n return\n\n def getRobotVeloCmdTimeStamp(self):\n return self.robot_velo_cmd_time_stamp\n\n def getRobotVeloLinCmd(self):\n return self.robot_velo_lin_cmd\n\n def getRobotVeloAngCmd(self):\n return self.robot_velo_ang_cmd\n\n\n def velLoopMotionController(self, time_stamp_ros):\n\n # Time stamp\n self.robot_velo_cmd_time_stamp = time_stamp_ros\n\n # Conversion (from world to robot)\n # Reference\n pos_loop_out_lin_cmd_robot = ars_lib_helpers.Conversions.convertVelLinFromWorldToRobot(self.pos_loop_out_lin_cmd, self.robot_atti_quat_simp)\n pos_loop_out_ang_cmd_robot = ars_lib_helpers.Conversions.convertVelAngFromWorldToRobot(self.pos_loop_out_ang_cmd, self.robot_atti_quat_simp)\n # Feedback\n robot_velo_lin_robot = ars_lib_helpers.Conversions.convertVelLinFromWorldToRobot(self.robot_velo_lin_world, self.robot_atti_quat_simp)\n robot_velo_ang_robot = ars_lib_helpers.Conversions.convertVelAngFromWorldToRobot(self.robot_velo_ang_world, self.robot_atti_quat_simp)\n\n # Initialization\n robot_velo_lin_cmd_ff = np.zeros((3,), dtype=float)\n robot_velo_lin_cmd_fb = np.zeros((3,), dtype=float)\n robot_velo_ang_cmd_ff = np.zeros((1,), dtype=float)\n robot_velo_ang_cmd_fb = np.zeros((1,), dtype=float)\n\n # Velocity Linear horizontal (x & y)\n # Feedforward\n # TODO by student\n # Use: self.robot_velo_lin_cmd_ref[0:2]\n robot_velo_lin_cmd_ff[0:2] = self.robot_velo_lin_cmd_ref[0:2]\n # Feedback\n if(self.flag_ctr_vel_lin_hor and self.flag_set_pos_loop_out and self.flag_set_robot_vel_world):\n \n # TODO by student\n # Use: pos_loop_out_lin_cmd_robot[0:2], robot_velo_lin_robot[0:2], time_stamp_ros, self.vel_lin_hor_pid\n \n #error = reference - feedback\n error_velo_lin_horizontal = pos_loop_out_lin_cmd_robot[0:2] - robot_velo_lin_robot[0:2]\n\n\n mod_error_velo_lin_horizontal = math.sqrt(error_velo_lin_horizontal[0]**2 + error_velo_lin_horizontal[1]**2) \n \n if mod_error_velo_lin_horizontal != 0:\n\n #normalized_error \n normalized_error_velo_lin_horizontal = error_velo_lin_horizontal / mod_error_velo_lin_horizontal\n \n else: \n\n normalized_error_robot_posi = 0\n\n \n #output = normalized_error * ctr(mod_error)\n ctr_vel_lin = self.vel_lin_hor_pid.call(time_stamp_ros, mod_error_velo_lin_horizontal)\n robot_velo_lin_cmd_fb[0:2] = normalized_error_velo_lin_horizontal * ctr_vel_lin\n \n\n # Total\n # TODO by student\n # Use: robot_velo_lin_cmd_ff[0:2], robot_velo_lin_cmd_fb[0:2]\n self.robot_velo_lin_cmd[0:2] = robot_velo_lin_cmd_ff[0:2] + robot_velo_lin_cmd_fb[0:2]\n\n \n\n # Velocity Linear vertical (z)\n # Feedforward\n # TODO by student\n # Use self.robot_velo_lin_cmd_ref[2]\n robot_velo_lin_cmd_ff[2] = self.robot_velo_lin_cmd_ref[2]\n # Feedback\n if(self.flag_ctr_vel_lin_z and self.flag_set_pos_loop_out and self.flag_set_robot_vel_world):\n # TODO by student\n # Use: pos_loop_out_lin_cmd_robot[2], robot_velo_lin_robot[2], time_stamp_ros, self.vel_lin_z_pid\n\n\n #error = reference - feedback\n error_velo_lin_vertical = pos_loop_out_lin_cmd_robot[2] - robot_velo_lin_robot[2]\n\n \n #output = error * ctr(mod_error)\n ctr_vel_lin = self.vel_lin_z_pid.call(time_stamp_ros, error_velo_lin_vertical)\n robot_velo_lin_cmd_fb[2] = ctr_vel_lin\n \n\n # Total\n # TODO by student\n # Use: robot_velo_lin_cmd_ff[2], robot_velo_lin_cmd_fb[2]\n self.robot_velo_lin_cmd[2] = robot_velo_lin_cmd_ff[2] + robot_velo_lin_cmd_fb[2]\n\n\n\n # Velocity Angular (z)\n # Feedforward\n robot_velo_ang_cmd_ff[0] = self.robot_velo_ang_cmd_ref[0]\n # Feedback\n if(self.flag_ctr_vel_ang_z and self.flag_set_pos_loop_out and self.flag_set_robot_vel_world):\n error_vel_ang_z = pos_loop_out_ang_cmd_robot - robot_velo_ang_robot\n robot_velo_ang_cmd_fb[0] = self.vel_ang_z_pid.call(time_stamp_ros, error_vel_ang_z)\n # Total\n self.robot_velo_ang_cmd[0] = robot_velo_ang_cmd_ff[0] + robot_velo_ang_cmd_fb[0]\n\n # End\n return\n\n\n def posLoopMotionController(self, time_stamp_ros):\n\n # Time stamp\n self.pos_loop_time_stamp_ros = time_stamp_ros\n\n # Initialization\n pos_loop_out_lin_cmd_ff = np.zeros((3,), dtype=float)\n pos_loop_out_lin_cmd_fb = np.zeros((3,), dtype=float)\n pos_loop_out_ang_cmd_ff = np.zeros((1,), dtype=float)\n pos_loop_out_ang_cmd_fb = np.zeros((1,), dtype=float)\n\n # Linear horizontal (x & y)\n # Feedforward\n # TODO by student\n # Use: self.robot_velo_lin_world_ref[0:2]\n pos_loop_out_lin_cmd_ff[0:2] = self.robot_velo_lin_world_ref[0:2]\n # Feedback\n if(self.flag_ctr_pos_hor and self.flag_set_robot_pose and self.flag_set_robot_pose_ref):\n # TODO by student\n # Use: self.robot_posi_ref[0:2], self.robot_posi[0:2], time_stamp_ros, self.pos_hor_pid\n \n #error = reference - feedback\n error_robot_posi = self.robot_posi_ref[0:2] - self.robot_posi[0:2]\n\n \n mod_error_robot_posi = math.sqrt(error_robot_posi[0]**2 + error_robot_posi[1]**2) \n \n if mod_error_robot_posi !=0:\n #normalized_error \n normalized_error_robot_posi = error_robot_posi / mod_error_robot_posi \n\n else:\n\n normalized_error_robot_posi = 0\n\n \n #output = normalized_error * ctr(mod_error)\n ctr_robot_posi = self.pos_hor_pid.call(time_stamp_ros, mod_error_robot_posi)\n pos_loop_out_lin_cmd_fb[0:2] = normalized_error_robot_posi * ctr_robot_posi\n \n\n # Total\n # TODO by student\n # Use: pos_loop_out_lin_cmd_ff[0:2], pos_loop_out_lin_cmd_fb[0:2]\n self.pos_loop_out_lin_cmd[0:2] = pos_loop_out_lin_cmd_ff[0:2] + pos_loop_out_lin_cmd_fb[0:2]\n\n # Linear vertical (z)\n # Feedforward\n # TODO by student\n # Use: self.robot_velo_lin_world_ref[2]\n pos_loop_out_lin_cmd_ff[2] = self.robot_velo_lin_world_ref[2]\n # Feedback\n if(self.flag_ctr_pos_z and self.flag_set_robot_pose and self.flag_set_robot_pose_ref):\n # TODO by student\n # Use: self.robot_posi_ref[2], self.robot_posi[2], time_stamp_ros, self.pos_z_pid\n \n \n #error = reference - feedback\n error_pos_loop = self.robot_posi_ref[2] - self.robot_posi[2]\n\n \n #output = error * ctr(mod_error)\n ctr_pos_loop = self.pos_z_pid.call(time_stamp_ros, error_pos_loop)\n pos_loop_out_lin_cmd_fb[2] = ctr_pos_loop\n\n\n # Total\n # TODO by student\n # Use: pos_loop_out_lin_cmd_ff[2], pos_loop_out_lin_cmd_fb[2]\n self.pos_loop_out_lin_cmd[2] = pos_loop_out_lin_cmd_ff[2] + pos_loop_out_lin_cmd_fb[2]\n\n\n\n\n # Angular (z)\n # Feedforward\n pos_loop_out_ang_cmd_ff[0] = self.robot_velo_ang_world_ref[0]\n # Feedback\n if(self.flag_ctr_att_yaw and self.flag_set_robot_pose and self.flag_set_robot_pose_ref):\n error_att_z = ars_lib_helpers.Quaternion.errorDiffFromQuatSimp(self.robot_atti_quat_simp_ref,self.robot_atti_quat_simp)\n pos_loop_out_ang_cmd_fb[0] = self.att_yaw_pid.call(time_stamp_ros, error_att_z)\n # Total\n self.pos_loop_out_ang_cmd[0] = pos_loop_out_ang_cmd_ff[0] + pos_loop_out_ang_cmd_fb[0]\n\n # Flag\n self.flag_set_pos_loop_out = True\n\n # End\n return"
] |
[
[
"numpy.zeros"
]
] |
Gorilla-Lab-SCUT/SS-Conv
|
[
"47d21fdb8f8e02f677201d86295f6ef1c4d1f059"
] |
[
"SS_Conv_lib/ss_conv/sp_ops/tensor.py"
] |
[
"# Modified from https://github.com/traveller59/spconv/tree/v1.1\r\nimport numpy as np\r\nimport torch\r\n\r\n\r\ndef scatter_nd(indices, updates, shape):\r\n \"\"\"pytorch edition of tensorflow scatter_nd.\r\n this function don't contain except handle code. so use this carefully\r\n when indice repeats, don't support repeat add which is supported\r\n in tensorflow.\r\n \"\"\"\r\n ret = torch.zeros(*shape, dtype=updates.dtype, device=updates.device)\r\n ndim = indices.shape[-1]\r\n output_shape = list(indices.shape[:-1]) + shape[indices.shape[-1]:]\r\n flatted_indices = indices.view(-1, ndim)\r\n slices = [flatted_indices[:, i] for i in range(ndim)]\r\n slices += [Ellipsis]\r\n ret[slices] = updates.view(*output_shape)\r\n return ret\r\n\r\n\r\nclass SparseTensor(object):\r\n def __init__(self, features, indices, spatial_shape, batch_size, grid=None):\r\n \"\"\"\r\n Args:\r\n grid: pre-allocated grid tensor. should be used when the volume of spatial shape\r\n is very large.\r\n \"\"\"\r\n self.features = features\r\n self.indices = indices \r\n if self.indices.dtype != torch.int32:\r\n self.indices.int()\r\n self.spatial_shape = spatial_shape\r\n self.batch_size = batch_size\r\n self.indice_dict = {}\r\n self.grid = grid\r\n\r\n @property\r\n def spatial_size(self):\r\n return np.prod(self.spatial_shape)\r\n\r\n def find_indice_pair(self, key):\r\n if key is None:\r\n return None \r\n if key in self.indice_dict:\r\n return self.indice_dict[key]\r\n return None\r\n\r\n def dense(self, channels_first=True):\r\n output_shape = [self.batch_size] + list(self.spatial_shape) + [self.features.shape[1]]\r\n res = scatter_nd(self.indices.long(), self.features, output_shape)\r\n if not channels_first:\r\n return res\r\n ndim = len(self.spatial_shape)\r\n trans_params = list(range(0, ndim + 1))\r\n trans_params.insert(1, ndim + 1)\r\n return res.permute(*trans_params).contiguous()\r\n\r\n def get_offsets(self):\r\n offsets = [0]\r\n for i in range(self.batch_size):\r\n is_i = (self.indices[:,0]==i).sum()\r\n offsets.append(is_i.item()+offsets[-1])\r\n offsets = torch.tensor(offsets).int().to(self.features.device).detach()\r\n return offsets\r\n\r\n @property\r\n def sparity(self):\r\n return self.indices.shape[0] / np.prod(self.spatial_shape) / self.batch_size\r\n"
] |
[
[
"torch.zeros",
"numpy.prod",
"torch.tensor"
]
] |
PVSemk/ABAW2020TNT
|
[
"3cf667e0958f411b510c734755da5e30a091df11"
] |
[
"face_alignment.py"
] |
[
"\"\"\"\nCode from\n\"Two-Stream Aural-Visual Affect Analysis in the Wild\"\nFelix Kuhnke and Lars Rumberg and Joern Ostermann\nPlease see https://github.com/kuhnkeF/ABAW2020TNT\n\"\"\"\nimport cv2 as cv\nimport numpy as np\nimport os\n\ndef align_rescale_face(image, M):\n aligned = cv.warpAffine(image, M, (112, 112), flags=cv.INTER_CUBIC, borderValue=0.0)\n return aligned\n\ndef render_img_and_mask(img, mask, frame_nr, render_path, mask_path):\n frame_nr_str = str(frame_nr).zfill(5)\n frame = cv.cvtColor(img, cv.COLOR_BGR2RGB)\n output_filepath = os.path.join(render_path, frame_nr_str + '.jpg')\n cv.imwrite(output_filepath, frame, [int(cv.IMWRITE_JPEG_QUALITY), 95])\n frame_mask = cv.cvtColor(mask, cv.COLOR_BGR2GRAY)\n output_filepath = os.path.join(mask_path, frame_nr_str + '.jpg')\n cv.imwrite(output_filepath, frame_mask, [int(cv.IMWRITE_JPEG_QUALITY), 100])\n\ndef draw_mask(points, image):\n line_type = cv.LINE_8\n left_eyebrow = points[17:22, :]\n right_eyebrow = points[22:27, :]\n nose_bridge = points[28:31, :]\n chin = points[6:11, :]\n mouth_outer = points[48:60, :]\n left_eye = points[36:42, :]\n right_eye = points[42:48, :]\n pts = [np.rint(mouth_outer).reshape(-1, 1, 2).astype(np.int32)]\n cv.polylines(image, pts, True, color=(255, 255, 255), thickness=1, lineType=line_type)\n pts = [np.rint(left_eyebrow).reshape(-1, 1, 2).astype(np.int32)]\n cv.polylines(image, pts, False, color=(223, 223, 223), thickness=1, lineType=line_type)\n pts = [np.rint(right_eyebrow).reshape(-1, 1, 2).astype(np.int32)]\n cv.polylines(image, pts, False, color=(191, 191, 191), thickness=1, lineType=line_type)\n pts = [np.rint(left_eye).reshape(-1, 1, 2).astype(np.int32)]\n cv.polylines(image, pts, True, color=(159, 159, 159), thickness=1, lineType=line_type)\n pts = [np.rint(right_eye).reshape(-1, 1, 2).astype(np.int32)]\n cv.polylines(image, pts, True, color=(127, 127, 127), thickness=1, lineType=line_type)\n pts = [np.rint(nose_bridge).reshape(-1, 1, 2).astype(np.int32)]\n cv.polylines(image, pts, False, color=(63, 63, 63), thickness=1, lineType=line_type)\n pts = [np.rint(chin).reshape(-1, 1, 2).astype(np.int32)]\n cv.polylines(image, pts, False, color=(31, 31, 31), thickness=1, lineType=line_type)\n"
] |
[
[
"numpy.rint"
]
] |
Supreeth-Shetty/Projectathon---Simplified-AI
|
[
"3fc26a58a9370d119811ac4e864af977c21f6c40",
"3fc26a58a9370d119811ac4e864af977c21f6c40",
"3fc26a58a9370d119811ac4e864af977c21f6c40"
] |
[
"src/routes/routes_training.py",
"src/utils/common/prediction_helper.py",
"src/routes/routes_eda.py"
] |
[
"from flask import Blueprint, redirect, url_for, render_template, request, session\nfrom src.constants.model_params import Ridge_Params, Lasso_Params, ElasticNet_Params, RandomForestRegressor_Params, \\\n SVR_params, AdabootRegressor_Params, \\\n GradientBoostRegressor_Params\nfrom src.constants.model_params import KmeansClustering_Params, DbscanClustering_Params, AgglomerativeClustering_Params\nfrom src.constants.model_params import LogisticRegression_Params, SVC_Params, KNeighborsClassifier_Params, \\\n DecisionTreeClassifier_Params, RandomForestClassifier_Params, GradientBoostingClassifier_Params, \\\n AdaBoostClassifier_Params\nfrom src.constants.constants import ACTIVATION_FUNCTIONS, CLASSIFICATION_MODELS, CLUSTERING_MODELS, OPTIMIZERS, \\\n REGRESSION_LOSS, POOLING\nfrom flask.json import jsonify\nfrom src.constants.model_params import DecisionTreeRegressor_Params, LinearRegression_Params\nfrom src.model.custom.classification_models import ClassificationModels\nfrom src.model.custom.regression_models import RegressionModels\nfrom src.model.custom.clustering_models import ClusteringModels\nfrom src.preprocessing.preprocessing_helper import Preprocessing\nfrom src.constants.constants import REGRESSION_MODELS\nfrom src.utils.common.prediction_helper import make_prediction\nfrom src.utils.databases.mysql_helper import MySqlHelper\nfrom werkzeug.utils import secure_filename\nimport os\nfrom src.utils.common.common_helper import get_param_value, load_prediction_result, load_project_model, \\\n read_config, save_prediction_result, save_project_model\nimport pandas as pd\nfrom src.utils.common.data_helper import load_data\nfrom src.model.auto.Auto_classification import ModelTrain_Classification\nfrom src.model.auto.Auto_regression import ModelTrain_Regression\nfrom src.feature_engineering.feature_engineering_helper import FeatureEngineering\nfrom loguru import logger\nfrom from_root import from_root\nfrom sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error, accuracy_score, precision_score, \\\n f1_score, recall_score\nfrom src.utils.common.project_report_helper import ProjectReports\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader, Dataset\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom prettytable import PrettyTable\nfrom src.utils.common.plotly_helper import PlotlyHelper\n\napp_training = Blueprint('training', __name__)\n\nconfig_args = read_config(\"./config.yaml\")\n\nmysql = MySqlHelper.get_connection_obj()\n\nlog_path = os.path.join(from_root(), config_args['logs']['logger'], config_args['logs']['generallogs_file'])\nlogger.add(sink=log_path, format=\"[{time:YYYY-MM-DD HH:mm:ss.SSS} - {level} - {module} ] - {message}\", level=\"INFO\")\n\nUPLOAD_FOLDER = config_args['dir_structure']['upload_folder']\nALLOWED_EXTENSIONS = set(['zip'])\n\n\n@app_training.route('/model_training/<action>', methods=['GET'])\ndef model_training(action):\n try:\n if 'pid' in session:\n df = load_data()\n if df is not None:\n target_column = \"\"\n if session['target_column'] is not None:\n target_column = session['target_column']\n\n target_column = session['target_column']\n cols_ = [col for col in df.columns if col != target_column]\n # Check data contain any categorical independent features\n Categorical_columns = Preprocessing.col_seperator(df.loc[:, cols_], \"Categorical_columns\")\n if len(Categorical_columns.columns) > 0:\n return render_template('model_training/auto_training.html', project_type=session['project_type'],\n target_column=session['target_column'], status=\"error\",\n msg=\"Data contain some categorical indepedent features, please perform encoding first\")\n\n \"\"\"Check If Project type is Regression or Classificaion and target Columns is not Selected\"\"\"\n if session['project_type'] != 3 and session['target_column'] is None:\n return redirect('/target-column')\n\n if action == 'help':\n return render_template('model_training/help.html')\n elif action == 'auto_training':\n logger.info('Redirect To Auto Training Page')\n ProjectReports.insert_record_ml('Redirect To Auto Training Page')\n\n if session['project_type'] == 3:\n return render_template('model_training/auto_training.html',\n project_type=session['project_type'],\n target_column=session['target_column'], status=\"error\",\n msg=\"Auto Training is not available for Clustering!!!\")\n\n return render_template('model_training/auto_training.html', project_type=session['project_type'],\n target_column=session['target_column'])\n\n elif action == 'custom_training' or action == 'final_train_model':\n query = f\"\"\" select a.pid ProjectId , a.TargetColumn TargetName, \n a.Model_Name ModelName, \n b.Schedule_date, \n b.schedule_time ,\n a.Model_Trained, \n b.train_status ,\n b.email, \n b.deleted\n from tblProjects as a\n join tblProject_scheduler as b on a.Pid = b.ProjectId where b.ProjectId = '{session.get('project_name')}' \n and b.deleted=0\n \"\"\"\n result = mysql.fetch_one(query)\n\n if result is not None:\n return render_template('scheduler/training_blocker.html')\n\n logger.info('Redirect To Custom Training Page')\n ProjectReports.insert_record_ml('Redirect To Custom Training Page')\n\n try:\n if session['project_type'] == 2:\n return render_template('model_training/classification.html', action=action,\n models=CLASSIFICATION_MODELS)\n elif session['project_type'] == 1:\n return render_template('model_training/regression.html', action=action,\n models=REGRESSION_MODELS)\n elif session['project_type'] == 3:\n return render_template('model_training/clustering.html', action=action,\n models=CLUSTERING_MODELS)\n else:\n return render_template('model_training/custom_training.html')\n except Exception as e:\n logger.error(e)\n return render_template('model_training/custom_training.html')\n else:\n return 'Non-Implemented Action'\n else:\n return redirect('/')\n else:\n return redirect(url_for('/'))\n except Exception as e:\n logger.error('Error in Model Training')\n ProjectReports.insert_record_ml('Error in Model Training', '', '', 0, str(e))\n return render_template('500.html', exception=e)\n\n\n@app_training.route('/model_training/<action>', methods=['POST'])\ndef model_training_post(action):\n try:\n if 'pid' in session:\n df = load_data()\n model = None\n range = None\n random_state = None\n if df is not None:\n if action == 'help':\n return render_template('model_training/help.html')\n elif action == 'custom_training':\n try:\n model = request.form['model']\n range = int(request.form['range'])\n\n if model != \"KNeighborsClassifier\" and model != \"SVR\":\n random_state = int(request.form['random_state'])\n\n logger.info('Submitted Custom Training Page')\n ProjectReports.insert_record_ml('Submitted Custom Training Page',\n f\"Model:{model}; Range:{range}; Random_State: {random_state}\")\n\n target = session['target_column']\n if session['project_type'] != 3:\n X = df.drop(target, axis=1)\n y = df[target]\n train_model_fun = None\n X_train, X_test, y_train, y_test = FeatureEngineering.train_test_Split(cleanedData=X,\n label=y,\n train_size=range / 100,\n random_state=random_state)\n\n model_params = {}\n if model == \"LinearRegression\":\n Model_Params = LinearRegression_Params\n train_model_fun = RegressionModels.linear_regression_regressor\n elif model == \"Ridge\":\n Model_Params = Ridge_Params\n train_model_fun = RegressionModels.ridge_regressor\n elif model == \"Lasso\":\n Model_Params = Lasso_Params\n train_model_fun = RegressionModels.lasso_regressor\n elif model == \"ElasticNet\":\n Model_Params = ElasticNet_Params\n train_model_fun = RegressionModels.elastic_net_regressor\n elif model == \"DecisionTreeRegressor\":\n Model_Params = DecisionTreeRegressor_Params\n train_model_fun = RegressionModels.decision_tree_regressor\n elif model == \"RandomForestRegressor\":\n Model_Params = RandomForestRegressor_Params\n train_model_fun = RegressionModels.random_forest_regressor\n elif model == \"SVR\":\n Model_Params = SVR_params\n train_model_fun = RegressionModels.support_vector_regressor\n elif model == \"AdaBoostRegressor\":\n Model_Params = AdabootRegressor_Params\n train_model_fun = RegressionModels.ada_boost_regressor\n elif model == \"GradientBoostingRegressor\":\n Model_Params = GradientBoostRegressor_Params\n train_model_fun = RegressionModels.gradient_boosting_regressor\n elif model == \"LogisticRegression\":\n Model_Params = LogisticRegression_Params\n train_model_fun = ClassificationModels.logistic_regression_classifier\n elif model == \"SVC\":\n Model_Params = SVC_Params\n train_model_fun = ClassificationModels.support_vector_classifier\n elif model == \"KNeighborsClassifier\":\n print('here')\n Model_Params = KNeighborsClassifier_Params\n train_model_fun = ClassificationModels.k_neighbors_classifier\n elif model == \"DecisionTreeClassifier\":\n Model_Params = DecisionTreeClassifier_Params\n train_model_fun = ClassificationModels.decision_tree_classifier\n elif model == \"RandomForestClassifier\":\n Model_Params = RandomForestClassifier_Params\n train_model_fun = ClassificationModels.random_forest_classifier\n elif model == \"AdaBoostClassifier\":\n Model_Params = AdaBoostClassifier_Params\n train_model_fun = ClassificationModels.ada_boost_classifier\n elif model == \"GradientBoostClassifier\":\n Model_Params = GradientBoostingClassifier_Params\n train_model_fun = ClassificationModels.gradient_boosting_classifier\n else:\n return 'Non-Implemented Action'\n\n for param in Model_Params:\n model_params[param['name']] = get_param_value(param, request.form[param['name']])\n trained_model = train_model_fun(X_train, y_train, True, **model_params)\n\n \"\"\"Save Trained Model\"\"\"\n save_project_model(trained_model)\n\n reports = [{\"key\": \"Model Name\", \"value\": model},\n {\"key\": \"Data Size\", \"value\": len(df)},\n {\"key\": \"Trained Data Size\", \"value\": len(X_train)},\n {\"key\": \"Test Data Size\", \"value\": len(X_test)}]\n\n scores = []\n # Regression\n if trained_model is not None and session['project_type'] == 1:\n y_pred = trained_model.predict(X_test)\n scores.append({\"key\": \"r2_score\", \"value\": r2_score(y_test, y_pred)})\n scores.append(\n {\"key\": \"mean_absolute_error\", \"value\": mean_absolute_error(y_test, y_pred)})\n scores.append(\n {\"key\": \"mean_squared_error\", \"value\": mean_squared_error(y_test, y_pred)})\n # Model Name Set in table while training\n query = f'''Update tblProjects Set Model_Name=\"{model}\", Model_Trained=0 Where Id=\"{session.get('pid')}\"'''\n mysql.update_record(query)\n\n return render_template('model_training/model_result.html', action=action,\n status=\"success\",\n reports=reports, scores=scores, model_params=model_params)\n\n # Classification\n if trained_model is not None and session['project_type'] == 2:\n y_pred = trained_model.predict(X_test)\n scores.append({\"key\": \"Accuracy\", \"value\": accuracy_score(y_test, y_pred)})\n scores.append({\"key\": \"Classes\", \"value\": df[target].unique()})\n scores.append(\n {\"key\": \"Precision\", \"value\": precision_score(y_test, y_pred, average=None)})\n scores.append({\"key\": \"Recall\", \"value\": recall_score(y_test, y_pred, average=None)})\n scores.append({\"key\": \"F1_score\", \"value\": f1_score(y_test, y_pred, average=None)})\n\n # Model Name Set in table while training\n query = f'''Update tblProjects Set Model_Name=\"{model}\", Model_Trained=0 Where Id=\"{session.get('pid')}\"'''\n result = mysql.update_record(query)\n return render_template('model_training/model_result.html', action=action,\n status=\"success\",\n reports=reports, scores=scores, model_params=model_params)\n elif session['project_type'] == 3:\n X = df\n train_model_fun = None\n model_params = {}\n if model == \"KMeans\":\n Model_Params = KmeansClustering_Params\n train_model_fun = ClusteringModels.kmeans_clustering\n elif model == \"DBSCAN\":\n Model_Params = DbscanClustering_Params\n train_model_fun = ClusteringModels.dbscan_clustering\n elif model == \"AgglomerativeClustering\":\n Model_Params = AgglomerativeClustering_Params\n train_model_fun = ClusteringModels.agglomerative_clustering\n else:\n return 'Non-Implemented Action'\n\n for param in Model_Params:\n model_params[param['name']] = get_param_value(param, request.form[param['name']])\n\n trained_model, y_pred = train_model_fun(X, True, **model_params)\n \"\"\"Save Trained Model\"\"\"\n save_project_model(trained_model)\n\n reports = [{\"key\": \"Model Name\", \"value\": model},\n {\"key\": \"Data Size\", \"value\": len(df)},\n {\"key\": \"Train Data Size\", \"value\": len(X)},\n {\"key\": \"Test Data Size\", \"value\": 0}]\n\n scores = []\n\n # Clustering\n if trained_model is not None and session['project_type'] == 3:\n scores.append({\"key\": \"Predicted Classes\",\n \"value\": pd.DataFrame(data=y_pred, columns=['y_pred'])[\n 'y_pred'].unique()})\n\n # Model Name Set in table while training\n query = f'''Update tblProjects Set Model_Name=\"{model}\", Model_Trained=0 Where Id=\"{session.get('pid')}\"'''\n result = mysql.update_record(query)\n return render_template('model_training/model_result.html', action=action,\n status=\"success\",\n reports=reports, scores=scores, model_params=model_params)\n else:\n raise Exception(\"Model Couldn't train, please check parametes\")\n\n except Exception as e:\n logger.error('Error Submitted Custom Training Page')\n ProjectReports.insert_record_ml('Error Submitted Custom Training Page',\n f\"Model:{model}; Range:{range}; Random_State: {random_state}\",\n '', 0, str(e))\n if session['project_type'] == 2:\n return render_template('model_training/classification.html', action=action,\n models=CLASSIFICATION_MODELS, status=\"error\", msg=str(e))\n elif session['project_type'] == 1:\n return render_template('model_training/regression.html', action=action,\n models=REGRESSION_MODELS, status=\"error\", msg=str(e))\n else:\n return render_template('model_training/clustering.html', action=action,\n models=CLUSTERING_MODELS, status=\"error\", msg=str(e))\n\n elif action == \"auto_training\":\n try:\n target = session['target_column']\n if target is None:\n return redirect(url_for('/target-column'))\n\n # data_len = len(df)\n # data_len = 10000 if data_len > 10000 else int(len(df) * 0.9)\n\n # df = df.sample(frac=1).loc[:data_len, :]\n trainer = None\n X = df.drop(target, axis=1)\n y = df[target]\n X_train, X_test, y_train, y_test = FeatureEngineering.train_test_Split(cleanedData=X,\n label=y,\n train_size=0.75,\n random_state=101)\n if session['project_type'] == 1:\n trainer = ModelTrain_Regression(X_train, X_test, y_train, y_test, True)\n result = trainer.results()\n result = result.to_html()\n return render_template('model_training/auto_training.html', status=\"success\",\n project_type=session['project_type'],\n target_column=session['target_column'], train_done=True,\n result=result)\n\n elif session['project_type'] == 2:\n trainer = ModelTrain_Classification(X_train, X_test, y_train, y_test, True)\n result = trainer.results()\n\n result = result.to_html()\n return render_template('model_training/auto_training.html', status=\"success\",\n project_type=session['project_type'],\n target_column=session['target_column'], train_done=True,\n result=result)\n except Exception as ex:\n return render_template('model_training/auto_training.html', status=\"error\",\n project_type=session['project_type'],\n target_column=session['target_column'], msg=str(ex))\n\n elif action == 'final_train_model':\n try:\n logger.info('Final Train Model')\n ProjectReports.insert_record_ml('Final Train Model')\n query = f'''select Model_Name from tblProjects Where Id=\"{session.get('pid')}\"'''\n model_name = mysql.fetch_one(query)[0]\n\n if session['project_type'] != 3:\n target = session['target_column']\n X = df.drop(target, axis=1)\n y = df[target]\n model = load_project_model()\n if model is None:\n return render_template('model_training/model_result.html', action=action,\n status=\"error\",\n msg=\"Model is not found, please train model again\")\n else:\n model_params = {}\n for key, value in model.get_params().items():\n model_params[key] = value\n if model_name == \"LinearRegression\":\n train_model_fun = RegressionModels.linear_regression_regressor\n elif model_name == \"Ridge\":\n train_model_fun = RegressionModels.ridge_regressor\n elif model_name == \"Lasso\":\n train_model_fun = RegressionModels.lasso_regressor\n elif model_name == \"ElasticNet\":\n train_model_fun = RegressionModels.elastic_net_regressor\n elif model_name == \"DecisionTreeRegressor\":\n train_model_fun = RegressionModels.decision_tree_regressor\n elif model_name == \"RandomForestRegressor\":\n train_model_fun = RegressionModels.random_forest_regressor\n elif model_name == \"SVR\":\n train_model_fun = RegressionModels.support_vector_regressor\n elif model_name == \"AdaBoostRegressor\":\n train_model_fun = RegressionModels.ada_boost_regressor\n elif model_name == \"GradientBoostingRegressor\":\n train_model_fun = RegressionModels.gradient_boosting_regressor\n elif model_name == \"LogisticRegression\":\n train_model_fun = ClassificationModels.logistic_regression_classifier\n elif model_name == \"SVC\":\n train_model_fun = ClassificationModels.support_vector_classifier\n elif model_name == \"KNeighborsClassifier\":\n train_model_fun = ClassificationModels.k_neighbors_classifier\n elif model_name == \"DecisionTreeClassifier\":\n train_model_fun = ClassificationModels.decision_tree_classifier\n elif model_name == \"RandomForestClassifier\":\n train_model_fun = ClassificationModels.random_forest_classifier\n elif model_name == \"AdaBoostClassifier\":\n train_model_fun = ClassificationModels.ada_boost_classifier\n elif model_name == \"GradientBoostClassifier\":\n train_model_fun = ClassificationModels.gradient_boosting_classifier\n else:\n return 'Non-Implemented Action'\n\n trained_model = train_model_fun(X, y, True, **model_params)\n\n \"\"\"Save Final Model\"\"\"\n save_project_model(trained_model, 'model.pkl')\n query = f'''Update tblProjects Set Model_Trained=1 Where Id=\"{session.get('pid')}\"'''\n mysql.update_record(query)\n logger.info('Final Training Done')\n ProjectReports.insert_record_ml('Final Training Done')\n\n return render_template('model_training/congrats.html')\n\n elif session['project_type'] == 3:\n X = df\n model = load_project_model()\n if model is None:\n return render_template('model_training/model_result.html', action=action,\n status=\"error\",\n msg=\"Model is not found, please train model again\")\n else:\n model_params = {}\n for key, value in model.get_params().items():\n model_params[key] = value\n if model_name == \"KMeans\":\n train_model_fun = ClusteringModels.kmeans_clustering\n elif model_name == \"DBSCAN\":\n train_model_fun = ClusteringModels.dbscan_clustering\n elif model_name == \"AgglomerativeClustering\":\n train_model_fun = ClusteringModels.agglomerative_clustering\n else:\n return 'Non Implemented mtd'\n\n trained_model, y_pred = train_model_fun(X, True, **model_params)\n\n \"\"\"Save Trained Model\"\"\"\n save_project_model(trained_model, 'model.pkl')\n query = f'''Update tblProjects Set Model_Trained=1 Where Id=\"{session.get('pid')}\"'''\n mysql.update_record(query)\n logger.info('Final Training Done')\n ProjectReports.insert_record_ml('Final Training Done')\n\n return render_template('model_training/congrats.html')\n\n except Exception as e:\n logger.error('Error in Model Training Submit')\n ProjectReports.insert_record_ml('Error in Model Training', '', '', 0, str(e))\n render_template('model_training/model_result.html', action=action, status=\"error\",\n msg=\"Model is not found, please train model again\")\n\n if action == \"Scheduled_model\":\n path = os.path.join(from_root(), 'artifacts', 'model_temp.pkl')\n pass\n\n else:\n return \"Non Implemented Method\"\n else:\n logger.critical('DataFrame has no data')\n return redirect('/')\n except Exception as e:\n logger.error('Error in Model Training Submit')\n ProjectReports.insert_record_ml('Error in Model Training', '', '', 0, str(e))\n return render_template('500.html', exception=e)\n\n\n@app_training.route('/congrats', methods=['GET', 'POST'])\ndef congrats():\n try:\n if 'pid' in session:\n df = load_data()\n if df is not None:\n target = session['target_column']\n X = df.drop(target, axis=1)\n y = df[target]\n model = load_project_model()\n if model is None:\n return render_template('model_training/model_result.html', status=\"error\",\n msg=\"Model is not found, please train model again\")\n else:\n for key, value in model.get_params():\n exec(key + \"=value\")\n\n logger.info('Loaded Congrats Page')\n ProjectReports.insert_record_ml('Loaded Congrats Page')\n if request.method == \"GET\":\n return render_template('model_training/congrats.html')\n else:\n return render_template('model_training/congrats.html')\n except Exception as e:\n logger.error('Error in Model Training Submit')\n ProjectReports.insert_record_ml('Error in Model Training', '', '', 0, str(e))\n return render_template('500.html', exception=e)\n\n\n@app_training.route('/prediction', methods=['GET', 'POST'])\ndef prediction():\n try:\n if 'pid' in session:\n file_path = \"\"\n logger.info('Loaded Prediction Page')\n ProjectReports.insert_record_ml('Loaded Prediction Page')\n if request.method == \"GET\":\n is_trained = mysql.fetch_all(\n f\"SELECT * FROM tblProjects WHERE Id ={session.get('pid')} AND Model_Trained=1\")\n if is_trained is None:\n return render_template('model_training/prediction_page.html', status=\"error\",\n msg=\"your model is not trained, please train model first\")\n else:\n return render_template('model_training/prediction_page.html', status=\"success\")\n else:\n try:\n\n f = request.files['file']\n ALLOWED_EXTENSIONS = ['csv', 'tsv', 'json']\n msg = \"\"\n if len(request.files) == 0:\n msg = 'Please select a file to upload'\n elif f.filename.strip() == '':\n msg = 'Please select a file to upload'\n elif f.filename.rsplit('.', 1)[1].lower() not in ALLOWED_EXTENSIONS:\n msg = 'This file format is not allowed, please select mentioned one'\n\n if msg:\n logger.error(msg)\n return render_template('model_training/prediction_page.html', status=\"error\", msg=msg)\n\n filename = secure_filename(f.filename)\n file_path = os.path.join(config_args['dir_structure']['upload_folder'], filename)\n f.save(file_path)\n\n if file_path.endswith('.csv'):\n df = pd.read_csv(file_path)\n elif file_path.endswith('.tsv'):\n df = pd.read_csv(file_path, sep='\\t')\n elif file_path.endswith('.json'):\n df = pd.read_json(file_path)\n else:\n msg = 'This file format is currently not supported'\n logger.info(msg)\n return render_template('model_training/prediction_page.html', status=\"error\", msg=msg)\n\n prediction = make_prediction(df)\n data = prediction.to_html()\n\n if len(data) > 0:\n save_prediction_result(prediction)\n return render_template('model_training/prediction_result.html', status=\"success\", data=data)\n else:\n return render_template('model_training/prediction_result.html', status=\"error\",\n msg=\"There is some issue, coudn't perform prediction. Please check your data\")\n except Exception as e:\n logger.error('Error in Model Training Submit')\n ProjectReports.insert_record_ml('Error in Model Training', '', '', 0, str(e))\n return render_template('model_training/prediction_page.html', status=\"error\", msg=str(e))\n finally:\n if file_path:\n os.remove(file_path)\n else:\n logger.error('Project id not found, redirect to home page')\n ProjectReports.insert_record_ml('Project id not found, redirect to home page', '', '', 0, 'Error')\n return redirect('/')\n except Exception as e:\n logger.error(e)\n return redirect('/')\n\n\n@app_training.route('/download_prediction', methods=['POST'])\ndef download_prediction():\n try:\n return load_prediction_result()\n\n except Exception as e:\n logger.error(e)\n return jsonify({'success': False})\n\n\n@app_training.route('/model_training/ann', methods=['GET'])\ndef ann_training():\n try:\n return render_template('model_training/ann.html', optimizers=OPTIMIZERS,\n activation_functions=ACTIVATION_FUNCTIONS, loss=REGRESSION_LOSS)\n\n except Exception as e:\n logger.error(e)\n return jsonify({'success': False})\n\n\ndef save_neural_network(checkpoint, name='model_temp.pth.tar'):\n path = os.path.join(from_root(), 'artifacts', session.get('project_name'))\n if not os.path.exists(path):\n os.mkdir(path)\n\n file_name = os.path.join(path, name)\n torch.save(checkpoint, file_name)\n\n\ndef load_neural_network(checkpoint, name='model_temp.pth.tar'):\n path = os.path.join(from_root(), 'artifacts', session.get('project_name'))\n if not os.path.exists(path):\n os.mkdir(path)\n\n file_name = os.path.join(path, name)\n torch.save(checkpoint, file_name)\n\n\ndef create_layers(data=None, df=None, feature_map={}, typ=None):\n layers = []\n\n activation = {'ReLU': nn.ReLU(),\n 'ELU': nn.ELU(),\n 'LeakyReLU': nn.LeakyReLU(),\n 'Softmax': nn.Softmax(),\n 'PReLU': nn.PReLU(),\n 'SELU': nn.SELU(),\n 'Tanh': nn.Tanh(),\n 'Softplus': nn.Softplus(),\n 'Softmin': nn.Softmin(),\n 'Sigmoid': nn.Sigmoid(),\n 'RReLU': nn.RReLU(),\n }\n\n infer_in = data[0]['units']\n\n for i in data:\n if i['type'] == 'input':\n in_feature = df.shape[1]\n out_feature = i['units']\n layers.append(nn.Linear(in_features=in_feature, out_features=out_feature))\n layers.append(activation[i['activation']])\n\n if i['type'] == 'linear':\n in_feature = infer_in\n out_feature = i['units']\n layers.append(nn.Linear(in_feature, out_feature))\n layers.append(activation[i['activation']])\n infer_in = out_feature\n\n if i['type'] == 'batch_normalization':\n layers.append(nn.BatchNorm1d(num_features=infer_in))\n\n if i['type'] == 'dropout':\n layers.append(nn.Dropout(p=i['percentage']))\n\n if i['type'] == 'output':\n if typ == 'Regression':\n in_feature = infer_in\n out_feature = 1\n layers.append(nn.Linear(in_features=in_feature, out_features=out_feature))\n\n if typ == 'Classification':\n in_feature = infer_in\n out_feature = len(feature_map.keys())\n layers.append(nn.Linear(in_features=in_feature, out_features=out_feature))\n\n if typ == 'cluestring':\n return 'CLuestring cant be performed using Ann'\n\n return layers\n\n\nclass CustomTrainData(Dataset):\n def __init__(self, train_df, target):\n self.train_df = train_df\n self.target = target\n self.x = torch.from_numpy(self.train_df.to_numpy())\n self.y = torch.from_numpy(self.target.to_numpy())\n self.n_sample = self.train_df.shape[0]\n\n def __getitem__(self, index):\n return self.x[index], self.y[index]\n\n def __len__(self):\n return self.n_sample\n\n\nclass CustomTestData(Dataset):\n def __init__(self, test_df, target):\n self.test_df = test_df\n self.target = target\n self.x = torch.from_numpy(self.test_df.to_numpy())\n self.y = torch.from_numpy(self.target.to_numpy())\n self.n_sample = self.test_df.shape[0]\n\n def __getitem__(self, index):\n return self.x[index], self.y[index]\n\n def __len__(self):\n return self.n_sample\n\n\ndef count_parameters(model):\n table = PrettyTable([\"Modules\", \"Parameters\"])\n total_params = 0\n for name, parameter in model.named_parameters():\n if not parameter.requires_grad: continue\n param = parameter.numel()\n table.add_row([name, param])\n total_params += param\n return table, total_params\n\n\ndef trainTestSplit(df, target, size=0.25):\n X = df.drop(target, axis=1)\n y = df[target]\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=1 - size, random_state=101)\n\n return X_train, X_test, y_train, y_test\n\n\ndef main(Data=None, df=None, target=None, size=None, num_epoch=None, typ=None):\n model_info = {}\n model_metrice = {}\n model_metrice_plot = {}\n feature_map = {}\n if typ == 'Classification':\n for i in enumerate(df[target].unique()):\n feature_map[i[1]] = i[0]\n df[target] = df[target].replace(feature_map)\n model_info['feature_map'] = feature_map\n\n model_info['split_size'] = size\n model_info['batch_size'] = 32\n\n X_train, X_test, y_train, y_test = trainTestSplit(df, target, size=size)\n\n # Data class creation\n trainData = CustomTrainData(X_train, y_train)\n testData = CustomTestData(X_test, y_test)\n\n # Data loader creation\n train_data_loader = DataLoader(trainData, batch_size=32, shuffle=True)\n test_data_loader = DataLoader(testData, batch_size=32)\n\n # Model Creation\n model = nn.Sequential(*create_layers(Data['layerUnits'], X_train, feature_map, typ))\n print(model)\n # Optimizer and Loss ---- > front end\n table, total_params = count_parameters(model)\n\n model_info['table'] = table.get_html_string()\n model_info['total_params'] = total_params\n model_info['optimizer'] = Data['optimizers']\n model_info['loss'] = Data['loss']\n model_info['model'] = list(model)\n\n optimizer_selection = {'Adam': torch.optim.Adam(model.parameters(), lr=float(Data['learningRate'])),\n 'AdaGrad': torch.optim.Adagrad(model.parameters(), lr=float(Data['learningRate'])),\n 'AdaMax': torch.optim.Adamax(model.parameters(), lr=float(Data['learningRate'])),\n 'RMSProps': torch.optim.RMSprop(model.parameters(), lr=float(Data['learningRate']))}\n\n optimizer = optimizer_selection[Data['optimizers']]\n\n if typ == \"Classification\":\n loss_selection_classification = {'BCEWithLogitsLoss': nn.BCEWithLogitsLoss(), 'CrossEntropyLoss': nn.CrossEntropyLoss()}\n loss_func = loss_selection_classification[Data['loss']]\n\n if typ == \"Regression\":\n loss_selection_regression = {'MAE': nn.L1Loss(), 'MSE': nn.MSELoss(), 'Huber Loss': nn.HuberLoss(),\n 'Smoth L1': nn.SmoothL1Loss()}\n loss_func = loss_selection_regression[Data['loss']]\n print(loss_func)\n # Regression\n # Train\n\n if typ == \"Regression\":\n loss_perEpoch = []\n model.train()\n num_epochs = num_epoch\n for epooch in range(num_epochs):\n for batch_idx, data in enumerate(train_data_loader):\n features = data[0].float()\n labels = data[1].float().reshape(features.shape[0],1)\n # print(features.shape,labels.shape)\n optimizer.zero_grad()\n\n output = model(features)\n loss = loss_func(output, labels)\n\n loss.backward()\n optimizer.step()\n\n if batch_idx % 2 == 0:\n loss_perEpoch.append(loss.item())\n print(f'Epoch {epooch}/{num_epochs} Loss: {loss.item()}')\n\n model_metrice['train_loss'] = loss_perEpoch[-1]\n model_metrice_plot['train_loss'] = loss_perEpoch\n model_metrice_plot['train_accuracy'] = [x for x in range(len(loss_perEpoch))]\n\n # Test\n model.eval()\n test_loss = []\n\n with torch.no_grad():\n for idx, data in enumerate(test_data_loader):\n features = data[0].float()\n labels = data[1].float().reshape(features.shape[0],1)\n\n output = model(features)\n test_loss.append(loss_func(output, labels).item())\n\n model_metrice['test_loss'] = np.mean(test_loss)\n model_metrice['test_accuracy'] = None\n model_metrice_plot['test_loss'] = test_loss\n model_metrice_plot['test_accuracy'] = [x for x in range(len(test_loss))]\n print(\"Test Loss :\", np.mean(test_loss))\n\n # Classification\n if typ == 'Classification':\n # Train\n loss_perEpoch = []\n train_acc = []\n model.train()\n num_epochs = num_epoch\n for epooch in range(num_epochs):\n for batch_idx, data in enumerate(train_data_loader):\n features = data[0].float()\n labels = data[1]\n # print(features,labels)\n optimizer.zero_grad()\n\n output = model(features)\n loss = loss_func(output, labels)\n\n loss.backward()\n optimizer.step()\n\n if batch_idx % 8 == 0:\n train_acc.append((torch.argmax(output, axis=1) == labels.squeeze().long()).float().mean())\n loss_perEpoch.append(loss.item())\n print(f'Epoch {epooch}/{num_epochs} Loss: {loss.item()}')\n\n model_metrice['train_loss'] = loss_perEpoch[-1]\n model_metrice_plot['train_loss'] = loss_perEpoch\n model_metrice_plot['train_accuracy'] = train_acc\n\n # Test\n model.eval()\n test_loss = []\n test_acc = []\n with torch.no_grad():\n for idx, data in enumerate(test_data_loader):\n features = data[0].float()\n labels = data[1]\n\n output = model(features)\n\n test_acc.append((torch.argmax(output, axis=1) == labels.squeeze().long()).float().mean())\n test_loss.append(loss_func(output, labels).item())\n\n print(\"Test Loss :\", np.mean(test_loss), \" \", \"Test Accuracy :\", np.mean(test_acc))\n\n model_metrice['test_accuracy'] = np.mean(test_acc)\n model_metrice['test_loss'] = np.mean(test_loss)\n model_metrice_plot['test_loss'] = test_loss\n model_metrice_plot['test_accuracy'] = [x for x in range(len(test_loss))]\n\n return model_info, model_metrice, model_metrice_plot\n\n\n@app_training.route('/model_training/ann', methods=['POST'])\ndef ann_model_training():\n try:\n data = request.get_json(force=True)\n print(data)\n df = load_data()\n target = session['target_column']\n typ = 'Regression' if session['project_type'] == 1 else 'Classification'\n\n model_info, model_metrice, model_metrice_plot = main(data, df, target=target, size=float(data['trainSplitPercent']), num_epoch=int(data['epoch']), typ=typ)\n\n graphJSON = {}\n\n graphJSON['train'] = PlotlyHelper.line(df, x=model_metrice_plot['train_accuracy'], y=model_metrice_plot['train_loss'])\n graphJSON['test'] = PlotlyHelper.line(df, x=model_metrice_plot['test_accuracy'], y=model_metrice_plot['test_loss'])\n \n return render_template('model_training/ann_summary.html', model_info=model_info, model_metrice=model_metrice, status=\"success\", graphJSON=graphJSON)\n\n except Exception as e:\n logger.error(e)\n return jsonify({'success': False})\n\n\n@app_training.route('/model_training/cnn', methods=['GET'])\ndef cnn_training():\n try:\n return render_template('model_training/cnn.html', optimizers=OPTIMIZERS, poolings = POOLING, \n activation_functions=ACTIVATION_FUNCTIONS, loss=REGRESSION_LOSS)\n\n except Exception as e:\n logger.error(e)\n return jsonify({'success': False})\n\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\n@app_training.route('/model_training/upload_zip', methods=['POST'])\ndef cnn_model_training():\n try:\n if 'zip_file' not in request.files:\n print('No file part')\n\n file = request.files['zip_file']\n \n if file.filename == '':\n print('No selected file')\n\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(UPLOAD_FOLDER, filename))\n\n return jsonify({'success': True})\n\n except Exception as e:\n logger.error(e)\n return jsonify({'success': False})\n\n\n",
"import os\nfrom flask import session\nfrom src.utils.common.common_helper import load_project_encdoing, load_project_model, load_project_pca, \\\n load_project_scaler, read_config\nfrom loguru import logger\nfrom from_root import from_root\nfrom src.utils.databases.mysql_helper import MySqlHelper\nfrom src.preprocessing.preprocessing_helper import Preprocessing\nfrom src.feature_engineering.feature_engineering_helper import FeatureEngineering\nimport pandas as pd\nimport numpy as np\n\nconfig_args = read_config(\"./config.yaml\")\nlog_path = os.path.join(from_root(), config_args['logs']['logger'], config_args['logs']['generallogs_file'])\nlogger.add(sink=log_path, format=\"[{time:YYYY-MM-DD HH:mm:ss.SSS} - {level} - {module} ] - {message}\", level=\"INFO\")\n\nmysql = MySqlHelper.get_connection_obj()\n\n\"\"\"[Function to make prediction]\n\"\"\"\n\n\ndef make_prediction(df):\n try:\n\n logger.info(f\"Started Prediction!!1\")\n if df is None:\n logger.info(f\"DataFrame is null\")\n raise Exception(\"Data Frame is None\")\n else:\n query_ = f\"\"\"Select Name, Input,Output,ActionDate from tblProject_Actions_Reports\n Join tblProjectActions on tblProject_Actions_Reports.ProjectActionId=tblProjectActions.Id\n where ProjectId={session['pid']}\"\"\"\n action_performed = mysql.fetch_all(query_)\n print(action_performed)\n\n feature_columns = [col for col in df.columns if col != session['target_column']]\n df = df.loc[:, feature_columns]\n df_org = df\n\n if len(action_performed) > 0:\n for action in action_performed:\n if action[0] == 'Delete Column':\n df = Preprocessing.delete_col(df, action[1].split(\",\"))\n elif action[0] == 'Change Data Type':\n df = FeatureEngineering.change_data_type(df, action[1], action[2])\n elif action[0] == 'Column Name Change':\n df = FeatureEngineering.change_column_name(df, action[1], action[2])\n elif action[0] == 'Encdoing':\n cat_data = Preprocessing.col_seperator(df, 'Categorical_columns')\n num_data = Preprocessing.col_seperator(df, 'Numerical_columns')\n\n encoder = load_project_encdoing()\n # columns=action[1].split(\",\")\n # df_=df.loc[:,columns]\n df_ = encoder.transform(cat_data)\n df = pd.concat([df_, num_data], axis=1)\n elif action[0] == 'Scalling':\n scalar = load_project_scaler()\n columns = df.columns\n df = scalar.transform(df)\n df = pd.DataFrame(df, columns=columns)\n elif action[0] == 'PCA':\n pca = load_project_pca()\n columns = df.columns\n df_ = pca.transform(df)\n df_ = df_[:, :int(action[1])]\n df = pd.DataFrame(df_, columns=[f\"Col_{col + 1}\" for col in np.arange(0, df_.shape[1])])\n elif action[0] == 'Custom Script':\n if action[1] is not None:\n exec(action[1])\n\n model = load_project_model()\n result = model.predict(df)\n df_org.insert(loc=0, column=session['target_column'], value=result)\n return df_org\n\n else:\n pass\n\n return df\n\n except Exception as e:\n logger.info('Error in Prediction ' + str(e))\n raise Exception(e)\n",
"from flask import Blueprint, request, render_template, session, redirect, url_for\nfrom flask.wrappers import Response\nfrom loguru import logger\nfrom src.utils.common.data_helper import load_data\nfrom src.utils.common.plotly_helper import PlotlyHelper\nfrom src.utils.common.project_report_helper import ProjectReports\nimport numpy as np\nfrom src.eda.eda_helper import EDA\nfrom pandas_profiling import ProfileReport\nfrom src.constants.constants import TWO_D_GRAPH_TYPES, TWO_D_GRAPH_TYPES_2\nimport plotly.figure_factory as ff\nimport json\nimport plotly\nfrom src.utils.common.common_helper import immutable_multi_dict_to_str, get_numeric_categorical_columns\nimport os\nfrom from_root import from_root\nimport pandas as pd\n\napp_eda = Blueprint('eda', __name__)\n\n\n@app_eda.route('/eda/<action>')\ndef eda(action):\n try:\n if 'pid' in session:\n df = load_data()\n if df is not None:\n if action == \"data-summary\":\n ProjectReports.insert_record_eda('Redirect To Data Summary')\n summary = EDA.five_point_summary(df)\n data = summary.to_html()\n dtypes = EDA.data_dtype_info(df)\n return render_template('eda/5point.html', data=data, dtypes=dtypes.to_html(), count=len(df),\n column_count=df.shape[1])\n # elif action == \"profiler\":\n # ProjectReports.insert_record_eda('Redirect To Profile Report')\n # return render_template('eda/profiler.html', action=action)\n\n elif action == \"show\":\n ProjectReports.insert_record_eda('Redirect To Show Dataset')\n data = EDA.get_no_records(df, 100)\n data = data.to_html()\n topselected = True\n bottomSelected = False\n selectedCount = 100\n return render_template('eda/showdataset.html', data=data, length=len(df),\n bottomSelected=bottomSelected, topselected=topselected, action=action,\n selectedCount=selectedCount, columns=df.columns)\n elif action == \"missing\":\n ProjectReports.insert_record_eda('Redirect To Missing Value')\n df = EDA.missing_cells_table(df)\n\n if df is not None:\n\n graphJSON = PlotlyHelper.barplot(df, x='Column', y='Missing values')\n pie_graphJSON = PlotlyHelper.pieplot(df, names='Column', values='Missing values',\n title='Missing Values')\n\n data = df.drop('Column', axis=1)\n data = data.to_html()\n return render_template('eda/missing_values.html', action=action, data=data, barplot=graphJSON,\n pieplot=pie_graphJSON, contain_missing=True)\n else:\n return render_template('eda/missing_values.html', action=action, contain_missing=False)\n\n elif action == \"outlier\":\n ProjectReports.insert_record_eda('Redirect To Outlier')\n df = EDA.z_score_outlier_detection(df)\n graphJSON = PlotlyHelper.barplot(df, x='Features', y='Total outliers')\n pie_graphJSON = PlotlyHelper.pieplot(\n df.sort_values(by='Total outliers', ascending=False).loc[: 10 if len(df) > 10 else len(df)-1, :],\n names='Features', values='Total outliers', title='Top 10 Outliers')\n data = df.to_html()\n return render_template('eda/outliers.html', data=data, method='zscore', action=action,\n barplot=graphJSON, pieplot=pie_graphJSON)\n\n elif action == \"correlation\":\n ProjectReports.insert_record_eda('Redirect To Correlation')\n pearson_corr = EDA.correlation_report(df, 'pearson')\n persion_data = list(np.around(np.array(pearson_corr.values), 2))\n fig = ff.create_annotated_heatmap(persion_data, x=list(pearson_corr.columns),\n y=list(pearson_corr.columns), colorscale='Viridis')\n graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n return render_template('eda/correlation.html', data=graphJSON, columns=list(pearson_corr.columns),\n action=action, method='pearson')\n\n elif action == \"plots\":\n ProjectReports.insert_record_eda('Plots')\n num_cols, cat_cols = get_numeric_categorical_columns(df)\n if len(cat_cols) == 0:\n graph_type_list = TWO_D_GRAPH_TYPES_2\n else:\n graph_type_list = TWO_D_GRAPH_TYPES\n\n return render_template('eda/plots.html', columns=list(df.columns), x_list=list(df.columns),\n y_list=num_cols,\n graphs_2d=graph_type_list, action=action, x_column=\"\", y_column=\"\")\n else:\n return render_template('eda/help.html')\n else:\n return redirect('/')\n\n else:\n return redirect(url_for('/'))\n except Exception as e:\n ProjectReports.insert_record_eda(e)\n logger.error(e)\n return render_template('500.html', exception=e)\n\n\n@app_eda.route('/eda/<action>', methods=['POST'])\ndef eda_post(action):\n try:\n if 'pid' in session:\n df = load_data()\n if df is not None:\n graphJSON = None\n if action == \"show\":\n range = request.form['range']\n optradio = request.form['optradio']\n columns_for_list = df.columns\n columns = request.form.getlist('columns')\n input_str = immutable_multi_dict_to_str(request.form)\n ProjectReports.insert_record_eda('Show', input=input_str)\n\n if len(columns) > 0:\n df = df.loc[:, columns]\n\n data = EDA.get_no_records(df, int(range), optradio)\n data = data.to_html()\n topselected = True if optradio == 'top' else False\n bottomSelected = True if optradio == 'bottom' else False\n return render_template('eda/showdataset.html', data=data, length=len(df),\n bottomSelected=bottomSelected, topselected=topselected, action=action,\n selectedCount=range, columns=columns_for_list)\n # elif action == \"profiler\":\n # ProjectReports.insert_record_eda('Download Profile Report')\n #\n # pr = ProfileReport(df, explorative=True, minimal=True,\n # correlations={\"cramers\": {\"calculate\": False}})\n #\n # report_path = os.path.join(from_root(), \"artifacts\", f\"{session.get('id')}_report.html\")\n # pr.to_file(report_path)\n # with open(report_path) as fp:\n # content = fp.read()\n #\n # return Response(\n # content,\n # mimetype=\"text/csv\",\n # headers={\"Content-disposition\": \"attachment; filename=report.html\"})\n\n elif action == \"correlation\":\n method = request.form['method']\n columns = request.form.getlist('columns')\n\n input_str = immutable_multi_dict_to_str(request.form, True)\n ProjectReports.insert_record_eda('Redirect To Correlation', input=input_str)\n\n if method is not None:\n # df=df.loc[:,columns]\n _corr = EDA.correlation_report(df, method)\n if len(columns) == 0:\n columns = _corr.columns\n\n _corr = _corr.loc[:, columns]\n _data = list(np.around(np.array(_corr.values), 2))\n fig = ff.create_annotated_heatmap(_data, x=list(_corr.columns),\n y=list(_corr.index), colorscale='Viridis')\n # fig = ff.create_annotated_heatmap(_data, colorscale='Viridis')\n graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n return render_template('eda/correlation.html', data=graphJSON,\n columns=list(df.select_dtypes(exclude='object').columns), action=action,\n method=method)\n else:\n return render_template('eda/help.html')\n\n elif action == \"outlier\":\n method = request.form['method']\n print(method)\n lower = 25\n upper = 75\n if method == \"iqr\":\n lower = request.form['lower']\n upper = request.form['upper']\n df = EDA.outlier_detection_iqr(df, int(lower), int(upper))\n print(df)\n else:\n df = EDA.z_score_outlier_detection(df)\n print('missed')\n\n input_str = immutable_multi_dict_to_str(request.form, True)\n ProjectReports.insert_record_eda('Redirect To Outlier', input=input_str)\n\n graphJSON = PlotlyHelper.barplot(df, x='Features', y='Total outliers')\n\n pie_graphJSON = PlotlyHelper.pieplot(\n df.sort_values(by='Total outliers', ascending=False).loc[: 10 if len(df) > 10 else len(df)-1,:],\n names='Features', values='Total outliers', title='Top 10 Outliers')\n\n data = df.to_html()\n return render_template('eda/outliers.html', data=data, method=method, action=action, lower=lower,\n upper=upper, barplot=graphJSON, pieplot=pie_graphJSON)\n\n elif action == \"plots\":\n \"\"\"All Polots for all kind of features????\"\"\"\n selected_graph_type = request.form['graph']\n\n input_str = immutable_multi_dict_to_str(request.form)\n ProjectReports.insert_record_eda('Plot', input=input_str)\n num_cols, cat_cols = get_numeric_categorical_columns(df)\n if len(cat_cols) == 0:\n graph_type_list = TWO_D_GRAPH_TYPES_2\n else:\n graph_type_list = TWO_D_GRAPH_TYPES\n\n if selected_graph_type == \"Scatter Plot\":\n x_column = request.form['xcolumn']\n y_column = request.form['ycolumn']\n graphJSON = PlotlyHelper.scatterplot(df, x=x_column, y=y_column, title='Scatter Plot')\n\n elif selected_graph_type == \"Pie Chart\":\n\n x_column = request.form['xcolumn']\n new_df = df.groupby(x_column).count()\n temp_df = pd.DataFrame()\n\n temp_df[x_column] = list(new_df.index)\n temp_df['Count'] = list(new_df.iloc[:, 0])\n\n graphJSON = PlotlyHelper.pieplot(temp_df, names=x_column, values='Count', title='Pie Chart')\n\n elif selected_graph_type == \"Bar Graph\":\n x_column = request.form['xcolumn']\n new_df = df.groupby(x_column).count()\n temp_df = pd.DataFrame()\n\n temp_df[x_column] = list(new_df.index)\n temp_df['Count'] = list(new_df.iloc[:, 0])\n\n graphJSON = PlotlyHelper.barplot(temp_df, x=x_column, y='Count')\n\n elif selected_graph_type == \"Histogram\":\n x_column = request.form['xcolumn']\n graphJSON = PlotlyHelper.histogram(df, x=x_column)\n\n elif selected_graph_type == \"Line Chart\":\n x_column = request.form['xcolumn']\n y_column = request.form['ycolumn']\n graphJSON = PlotlyHelper.line(df, x=x_column, y=y_column)\n\n elif selected_graph_type == \"Box Plot\":\n x_column = request.form['xcolumn']\n y_column = request.form['ycolumn']\n graphJSON = PlotlyHelper.boxplot(df, x=x_column, y=y_column)\n\n elif selected_graph_type == \"Dist Plot\":\n x_column = request.form['xcolumn']\n y_column = request.form['ycolumn']\n hist_data = []\n category_list = list(df[y_column].unique())\n for category in category_list:\n hist_data.append(list(df[df[y_column] == category][x_column]))\n\n graphJSON = PlotlyHelper.create_distplot(hist_data, category_list)\n\n elif selected_graph_type == \"Heat Map\":\n graphJSON = PlotlyHelper.heatmap(df)\n\n return render_template('eda/plots.html', selected_graph_type=selected_graph_type,\n columns=list(df.columns), graphs_2d=graph_type_list,\n action=action, graphJSON=graphJSON)\n else:\n return render_template('eda/help.html')\n else:\n \"\"\"Manage This\"\"\"\n pass\n\n else:\n return redirect(url_for('/'))\n except Exception as e:\n ProjectReports.insert_record_eda(e)\n return render_template('500.html', exception=e)\n\n\n@app_eda.route('/x_y_columns', methods=['GET', 'POST'])\ndef x_y_columns():\n try:\n if 'pid' in session:\n graph_selected = request.args.get('graph_selected')\n df = load_data()\n if df is not None:\n num_cols, cat_cols = get_numeric_categorical_columns(df)\n if graph_selected == \"Bar Graph\":\n return render_template('eda/x_y_columns.html', x_list=list(cat_cols),\n graph_selected=graph_selected)\n elif graph_selected == \"Histogram\":\n return render_template('eda/x_y_columns.html', x_list=list(df.columns), y_list=[],\n graph_selected=graph_selected)\n elif graph_selected == \"Scatter Plot\":\n return render_template('eda/x_y_columns.html', x_list=list(num_cols), y_list=list(num_cols),\n graph_selected=graph_selected)\n elif graph_selected == \"Pie Chart\":\n return render_template('eda/x_y_columns.html', x_list=list(cat_cols),\n graph_selected=graph_selected)\n elif graph_selected == \"Line Chart\":\n return render_template('eda/x_y_columns.html', x_list=list(num_cols), y_list=list(num_cols),\n graph_selected=graph_selected)\n elif graph_selected == \"Box Plot\":\n return render_template('eda/x_y_columns.html', x_list=list(cat_cols), y_list=list(num_cols),\n graph_selected=graph_selected)\n elif graph_selected == \"Dist Plot\":\n return render_template('eda/x_y_columns.html', x_list=list(num_cols), y_list=list(cat_cols),\n graph_selected=graph_selected)\n elif graph_selected == \"Heat Map\":\n return render_template('eda/x_y_columns.html', graph_selected=graph_selected)\n else:\n return redirect(url_for('/eda/help'))\n else:\n \"\"\"Manage This\"\"\"\n pass\n else:\n return redirect(url_for('/'))\n except Exception as e:\n ProjectReports.insert_record_eda(e)\n return render_template('500.html', exception=e)\n"
] |
[
[
"torch.nn.Linear",
"torch.nn.SELU",
"torch.nn.LeakyReLU",
"numpy.mean",
"torch.nn.SmoothL1Loss",
"torch.nn.BCEWithLogitsLoss",
"sklearn.metrics.r2_score",
"pandas.read_csv",
"sklearn.metrics.f1_score",
"torch.nn.CrossEntropyLoss",
"torch.nn.Softmax",
"pandas.DataFrame",
"sklearn.metrics.accuracy_score",
"torch.utils.data.DataLoader",
"torch.nn.HuberLoss",
"torch.nn.Tanh",
"torch.save",
"torch.nn.RReLU",
"torch.nn.ReLU",
"pandas.read_json",
"torch.nn.Softplus",
"torch.nn.Softmin",
"sklearn.model_selection.train_test_split",
"torch.argmax",
"sklearn.metrics.recall_score",
"torch.nn.Dropout",
"sklearn.metrics.mean_squared_error",
"torch.nn.MSELoss",
"torch.nn.Sigmoid",
"torch.no_grad",
"torch.nn.L1Loss",
"sklearn.metrics.mean_absolute_error",
"torch.nn.PReLU",
"torch.nn.BatchNorm1d",
"sklearn.metrics.precision_score",
"torch.nn.ELU"
],
[
"pandas.DataFrame",
"numpy.arange",
"pandas.concat"
],
[
"pandas.DataFrame",
"numpy.array"
]
] |
emotive-computing/mosaic_stress_2021
|
[
"be4e0f2e0f0455d97cf6c9b5fd6dac60872d94c7"
] |
[
"tools/common-models/src/metrics/results.py"
] |
[
"# -*- coding: utf-8 -*-\nimport abc\nfrom collections import defaultdict\nfrom math import sqrt\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import pearsonr\nfrom scipy.stats import spearmanr\nfrom sklearn.metrics import accuracy_score, r2_score, mean_squared_error, f1_score, recall_score, precision_score, \\\n average_precision_score, cohen_kappa_score\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.preprocessing import label_binarize\n\nfrom src.common import utils\nfrom src.configuration.settings_template import Settings\nfrom src.io.read_data_input import Dataset\nfrom src.metrics.metrics import Metrics\nfrom src.metrics.output_columns import RunInfoOutputColumnNames, MetricsOutputColumnNames, \\\n NumExamplesOutputColumnNames, CorrelationOutputColumnNames, TrueVsPredictedNumExamplesOutputColumnNames, \\\n AdditionalMetricsOutputColumnNames, PROBABILITY_COLUMN_NAME_SUFFIX, RegressionMetricsOutputColumnNames, FoldGroupByOutputColumnNames\nfrom src.run.model_run_instance import ModelInfoOnlyInstance\n\n\nclass ResultMetrics(Metrics):\n\n @classmethod\n def get_child_type(cls):\n if Settings.PREDICTION.is_regression():\n return RegressionResultMetrics\n elif Settings.PREDICTION.is_multiclass():\n return MulticlassResultMetrics\n else:\n return ClassificationResultMetrics\n\n def get_child_type_from_instance(self):\n return type(self).get_child_type()\n\n @classmethod\n def get(cls, model_run_instance):\n child_type = cls.get_child_type()\n return child_type(model_run_instance)\n\n @classmethod\n def get_output_column_names(cls, df, include_groupby=False):\n if Settings.SHOW_GROUP_BY_COLUMN_VALUE:\n return cls.get_child_type().get_output_column_names(df, include_groupby)\n return cls.get_child_type().get_output_column_names(df)\n\n # Gets base set of metrics\n @abc.abstractmethod\n def get_metrics(self, y_true, probabilities):\n pass\n\n # Gets set of metrics across all folds\n @abc.abstractmethod\n def get_metrics_per_run_instance(self, y_true, probabilities):\n pass\n\n # Gets metrics per single fold\n def get_metrics_per_fold(self, y_true, probabilities, num_train=None, num_test=None):\n metrics = self.get_metrics(y_true, probabilities)\n if num_train is not None:\n metrics[NumExamplesOutputColumnNames.Num_train_examples.name] = num_train\n if num_test is not None:\n metrics[NumExamplesOutputColumnNames.Num_test_examples.name] = num_test\n if num_train is not None and num_test is not None:\n metrics[NumExamplesOutputColumnNames.Total_num_examples.name] = num_train + num_test\n\n return metrics\n\n @classmethod\n def get_metrics_from_all_predictions(cls, all_predictions):\n return cls.get_child_type().get_metrics_from_all_predictions(all_predictions)\n\n\n @classmethod\n def get_positive_probabilities(cls, probabilities):\n return cls.get_child_type().get_positive_probabilities(probabilities)\n\n\n########################################################################################################################\n\nclass RegressionResultMetrics(ResultMetrics):\n\n def get_metrics_per_run_instance(self, y_true, probabilities):\n metrics = self.get_metrics(y_true, probabilities)\n print(\"Finished running: \", self.model_run_instance)\n return metrics\n\n\n def get_metrics(self, y_true, probabilities):\n metrics = defaultdict()\n\n metrics[RunInfoOutputColumnNames.Model.name] = self.model_run_instance.model_name\n metrics[RunInfoOutputColumnNames.Label.name] = self.model_run_instance.label\n metrics[RunInfoOutputColumnNames.Feature_source.name] = self.model_run_instance.feature_source_name\n\n metrics[RegressionMetricsOutputColumnNames.R2_score.name] = r2_score(y_true, probabilities)\n metrics[RegressionMetricsOutputColumnNames.RMSE_score.name] = sqrt(mean_squared_error(y_true, probabilities))\n\n metrics[CorrelationOutputColumnNames.Pearson_correlation.name], metrics[\n CorrelationOutputColumnNames.Pearson_corr_p_value.name] = pearsonr(y_true, probabilities)\n\n metrics[CorrelationOutputColumnNames.Spearman_correlation.name], metrics[\n CorrelationOutputColumnNames.Spearman_corr_p_value.name] = spearmanr(y_true, probabilities)\n\n metrics[NumExamplesOutputColumnNames.Total_num_examples.name] = len(y_true)\n\n print(\"Pearson Correlation: {}\".format(metrics[CorrelationOutputColumnNames.Pearson_correlation.name]))\n print(\"Spearman Correlation: {}\".format(metrics[CorrelationOutputColumnNames.Spearman_correlation.name]))\n return metrics\n\n @classmethod\n def get_metrics_from_all_predictions(cls, all_predictions):\n all_results = pd.DataFrame()\n groups = all_predictions.groupby(\n [RunInfoOutputColumnNames.Model.name, RunInfoOutputColumnNames.Feature_source.name,\n RunInfoOutputColumnNames.Label.name])\n for name, group in groups:\n model_run_instance = ModelInfoOnlyInstance(model_name=name[0], feature_source_name=name[1], label=name[2])\n all_results = all_results.append(\n RegressionResultMetrics(model_run_instance).get_metrics(group.True_value.values,\n group.Predicted_value.values),\n ignore_index=True)\n return all_results\n\n @classmethod\n def get_output_column_names(self, df):\n return RunInfoOutputColumnNames.list_member_names() + \\\n RegressionMetricsOutputColumnNames.list_member_names() + \\\n CorrelationOutputColumnNames.list_member_names() + \\\n NumExamplesOutputColumnNames.get_columns_to_show_in_output(df)\n\n @classmethod\n def get_positive_probabilities(cls, probabilities):\n return probabilities\n\n\n########################################################################################################################\n\n# Find metrics for model / label such as AUC, Accuracy, etc...\nclass ClassificationResultMetrics(ResultMetrics):\n\n def get_predictions_from_probabilities(self, probabilities):\n if not isinstance(self.model_run_instance.label, list):\n if probabilities.ndim == 1:\n return np.argmax(probabilities)\n else:\n return np.argmax(probabilities, axis=1)\n else:\n return np.argmax(probabilities, axis=2)\n\n # Get metrics (like AUROC, etc... ) based on predictions and probability scores of predictions\n def get_metrics_per_run_instance(self, y_true, probabilities):\n\n # In the case of multi-class prediction (the y label is an array of labels)\n if isinstance(self.model_run_instance.label, list):\n metrics = []\n for idx, lbl in enumerate(self.model_run_instance.label):\n individual_metrics_for_label_class = type(self)(self.model_run_instance.get_new_instance_with_label(lbl))\n individual_metrics_for_label = individual_metrics_for_label_class.get_metrics(y_true[:, idx], probabilities[:, idx], le)\n metrics.append(individual_metrics_for_label)\n else:\n metrics = self.get_metrics(y_true, probabilities)\n\n print(\"Finished running: \", self.model_run_instance)\n return metrics\n\n def get_metrics(self, y_true, probabilities):\n\n predictions = self.get_predictions_from_probabilities(probabilities)\n\n metrics = defaultdict()\n\n metrics[RunInfoOutputColumnNames.Model.name] = self.model_run_instance.model_name\n metrics[RunInfoOutputColumnNames.Label.name] = self.model_run_instance.label\n metrics[RunInfoOutputColumnNames.Feature_source.name] = self.model_run_instance.feature_source_name\n metrics[MetricsOutputColumnNames.Accuracy.name] = accuracy_score(y_true, predictions)\n\n metrics[NumExamplesOutputColumnNames.Total_num_examples.name] = len(y_true)\n\n le = Dataset().get_saved_label_encoder(self.model_run_instance.label)\n\n if le.is_binary_prediction:\n # negative_class_probabilities, positive_class_probabilities = probabilities[:, 0]\n # positive_class_probabilities = probabilities[:, 1]\n negative_class_probabilities, positive_class_probabilities = list(zip(*probabilities))\n metrics[MetricsOutputColumnNames.AUC.name] = roc_auc_score(y_true, list(positive_class_probabilities))\n\n metrics[TrueVsPredictedNumExamplesOutputColumnNames.True_num_pos_examples.name] = len(\n [i for i in y_true if i == 1])\n metrics[TrueVsPredictedNumExamplesOutputColumnNames.True_base_rate.name] = \\\n metrics[TrueVsPredictedNumExamplesOutputColumnNames.True_num_pos_examples.name] / metrics[\n NumExamplesOutputColumnNames.Total_num_examples.name]\n\n metrics[TrueVsPredictedNumExamplesOutputColumnNames.Predicted_num_pos_examples.name] = len(\n [i for i in predictions if i == 1])\n metrics[TrueVsPredictedNumExamplesOutputColumnNames.Predicted_base_rate.name] = \\\n metrics[TrueVsPredictedNumExamplesOutputColumnNames.Predicted_num_pos_examples.name] / metrics[\n NumExamplesOutputColumnNames.Total_num_examples.name]\n\n metrics[AdditionalMetricsOutputColumnNames.F1_score_pos.name] = f1_score(y_true, predictions)\n metrics[AdditionalMetricsOutputColumnNames.Precision_pos.name] = precision_score(y_true, predictions)\n metrics[AdditionalMetricsOutputColumnNames.Recall_pos.name] = recall_score(y_true, predictions)\n\n y_true_neg = utils.get_inverse_binary_values(y_true)\n predictions_neg = utils.get_inverse_binary_values(predictions)\n metrics[AdditionalMetricsOutputColumnNames.F1_score_neg.name] = f1_score(y_true_neg, predictions_neg)\n metrics[AdditionalMetricsOutputColumnNames.Precision_neg.name] = precision_score(y_true_neg,\n predictions_neg)\n metrics[AdditionalMetricsOutputColumnNames.Recall_neg.name] = recall_score(y_true_neg, predictions_neg)\n\n metrics[AdditionalMetricsOutputColumnNames.AUPRC_pos.name] = average_precision_score(y_true,\n positive_class_probabilities)\n\n\n print(\"AUC: {}\".format(metrics[MetricsOutputColumnNames.AUC.name]))\n print(\"AUPRC: {}\".format(metrics[AdditionalMetricsOutputColumnNames.AUPRC_pos.name]))\n\n else:\n y_binarized = label_binarize(le.inverse_transform(y_true), le.classes_)\n # metrics[data_config.CORREL_COLUMN_NAME], metrics[data_config.CORREL_P_VALUE_COLUMN_NAME] = pearsonr(y_binarized, probabilities)\n\n # Compute ROC curve and ROC area for each class\n fpr = dict()\n tpr = dict()\n roc_auc = dict()\n\n # for c in le.classes_:\n # i = le.transform([c])[0]\n # fpr[i], tpr[i], _ = roc_curve(y_binarized[:, i], probabilities[:, i])\n # roc_auc[i] = auc(fpr[i], tpr[i])\n # metrics[MetricsOutputColumnNames.AUC.name + \"_\" + c] = roc_auc[i]\n\n # metrics[MetricsOutputColumnNames.AUC.name] = roc_auc_score(y_binarized, probabilities, average=\"weighted\")\n # metrics[CorrelationOutputColumnNames.Pearson_correlation.name], metrics[\n # CorrelationOutputColumnNames.Pearson_corr_p_value.name] = \"NA\", \"NA\" # pearsonr(y_binarized, probabilities) # TODO\n\n return metrics\n\n @classmethod\n def get_metrics_from_all_predictions(cls, all_predictions):\n all_results = pd.DataFrame()\n groups = all_predictions.groupby(\n [RunInfoOutputColumnNames.Label.name, RunInfoOutputColumnNames.Feature_source.name,\n RunInfoOutputColumnNames.Model.name])\n for name, group in groups:\n label = name[0]\n print(label, \" \" , name[1])\n _, _, le = Dataset().get(label) # TODO fix\n probabilities = np.asarray([group[c + PROBABILITY_COLUMN_NAME_SUFFIX] for c in le.classes_]).T\n model_run_instance = ModelInfoOnlyInstance(model_name=name[2], feature_source_name=name[1], label=name[0])\n metrics = ClassificationResultMetrics(model_run_instance).get_metrics(\n le.transform(group.True_value.values.astype('str')),\n probabilities)\n all_results = all_results.append(metrics, ignore_index=True)\n print()\n return all_results\n\n @classmethod\n def get_output_column_names(cls, df, include_groupby=False):\n\n # lists out all AUC columns in case of more than binary prediction\n auc_columns = [col for col in df.columns if MetricsOutputColumnNames.AUC.name == col]\n\n columns_to_print = RunInfoOutputColumnNames.list_member_names() + \\\n auc_columns + \\\n [MetricsOutputColumnNames.Accuracy.name,\n NumExamplesOutputColumnNames.Total_num_examples.name,\n NumExamplesOutputColumnNames.Num_train_examples.name,\n NumExamplesOutputColumnNames.Num_test_examples.name]\n\n # if binary prediction\n if len(auc_columns) == 1:\n columns_to_print += TrueVsPredictedNumExamplesOutputColumnNames.list_member_names()\n columns_to_print += AdditionalMetricsOutputColumnNames.list_member_names()\n\n if include_groupby:\n group_by_columns = [FoldGroupByOutputColumnNames.Train_Group_By_Value.name, FoldGroupByOutputColumnNames.Test_Group_By_Value.name]\n columns_to_print += group_by_columns\n\n return columns_to_print\n\n @classmethod\n def get_positive_probabilities(cls, probabilities):\n return np.array(probabilities)[:, 1]\n\n\n########################################################################################################################\n\n\n# Find metrics for model / label such as AUC, Accuracy, etc...\nclass MulticlassResultMetrics(ResultMetrics):\n\n def get_predictions_from_probabilities(self, probabilities):\n return np.argmax(probabilities, axis=1) if not isinstance(self.model_run_instance.label, list) else np.argmax(\n probabilities, axis=2)\n\n # Get metrics (like AUROC, etc... ) based on predictions and probability scores of predictions\n def get_metrics_per_run_instance(self, y_true, probabilities):\n\n # In the case of multi-class prediction (the y label is an array of labels)\n if isinstance(self.model_run_instance.label, list):\n metrics = []\n for idx, lbl in enumerate(self.model_run_instance.label):\n individual_metrics_for_label_class = type(self)(self.model_run_instance.get_new_instance_with_label(lbl))\n individual_metrics_for_label = individual_metrics_for_label_class.get_metrics(y_true[:, idx], probabilities[:, idx], le)\n metrics.append(individual_metrics_for_label)\n else:\n metrics = self.get_metrics(y_true, probabilities)\n\n print(\"Finished running: \", self.model_run_instance)\n return metrics\n\n def get_metrics(self, y_true, probabilities):\n predictions = probabilities\n\n metrics = defaultdict()\n\n metrics[RunInfoOutputColumnNames.Model.name] = self.model_run_instance.model_name\n metrics[RunInfoOutputColumnNames.Label.name] = self.model_run_instance.label\n metrics[RunInfoOutputColumnNames.Feature_source.name] = self.model_run_instance.feature_source_name\n metrics[MetricsOutputColumnNames.Accuracy.name] = accuracy_score(y_true, predictions)\n\n metrics[NumExamplesOutputColumnNames.Total_num_examples.name] = len(y_true)\n\n le = Dataset().get_saved_label_encoder(self.model_run_instance.label)\n\n y_true_binarized = label_binarize(le.inverse_transform(y_true), le.classes_)\n y_pred_binarized = label_binarize(le.inverse_transform(predictions), le.classes_)\n\n metrics[AdditionalMetricsOutputColumnNames.F1_score.name] = f1_score(y_true, predictions, average='weighted')\n metrics[AdditionalMetricsOutputColumnNames.Kappa.name] = cohen_kappa_score(y_true, predictions, weights='linear')\n metrics[AdditionalMetricsOutputColumnNames.AUROC.name] = roc_auc_score(y_true_binarized, y_pred_binarized, average='weighted')\n\n return metrics\n\n @classmethod\n def get_metrics_from_all_predictions(cls, all_predictions):\n all_results = pd.DataFrame()\n groups = all_predictions.groupby(\n [RunInfoOutputColumnNames.Label.name, RunInfoOutputColumnNames.Feature_source.name,\n RunInfoOutputColumnNames.Model.name])\n for name, group in groups:\n label = name[0]\n print(label, \" \" , name[1])\n _, _, le = Dataset().get(label) # TODO fix\n probabilities = np.asarray([group[c + PROBABILITY_COLUMN_NAME_SUFFIX] for c in le.classes_]).T\n model_run_instance = ModelInfoOnlyInstance(model_name=name[2], feature_source_name=name[1], label=name[0])\n metrics = ClassificationResultMetrics(model_run_instance).get_metrics(\n le.transform(group.True_value.values.astype('str')),\n probabilities)\n all_results = all_results.append(metrics, ignore_index=True)\n print()\n return all_results\n\n @classmethod\n def get_output_column_names(cls, df):\n\n # lists out all AUC columns in case of more than binary prediction\n auc_columns = [col for col in df.columns if MetricsOutputColumnNames.AUC.name == col]\n\n columns_to_print = RunInfoOutputColumnNames.list_member_names() + \\\n auc_columns + \\\n [MetricsOutputColumnNames.Accuracy.name,\n AdditionalMetricsOutputColumnNames.F1_score.name,\n AdditionalMetricsOutputColumnNames.Kappa.name,\n AdditionalMetricsOutputColumnNames.AUROC.name,\n NumExamplesOutputColumnNames.Total_num_examples.name]\n\n # # if binary prediction\n # if len(auc_columns) == 1:\n # columns_to_print += TrueVsPredictedNumExamplesOutputColumnNames.list_member_names()\n # columns_to_print += AdditionalMetricsOutputColumnNames.list_member_names()\n\n return columns_to_print\n\n @classmethod\n def get_positive_probabilities(cls, probabilities):\n return probabilities\n\n"
] |
[
[
"numpy.array",
"sklearn.metrics.mean_squared_error",
"numpy.asarray",
"pandas.DataFrame",
"scipy.stats.spearmanr",
"scipy.stats.pearsonr",
"sklearn.metrics.accuracy_score",
"numpy.argmax",
"sklearn.metrics.average_precision_score",
"sklearn.metrics.r2_score",
"sklearn.metrics.precision_score",
"sklearn.metrics.f1_score",
"sklearn.metrics.roc_auc_score",
"sklearn.metrics.cohen_kappa_score",
"sklearn.metrics.recall_score"
]
] |
awslabs/w-lda
|
[
"15eb320faac0570e858c689df6f2c61bcad3010e"
] |
[
"examples/domains/wikitext103_wae.py"
] |
[
"# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\nimport os\nimport shutil\n\nimport numpy as np\n\nfrom core import Data\nfrom utils import reverse_dict\nimport scipy.sparse as sparse\nimport nltk\n\n\nclass Wikitext103(Data):\n def __init__(self, batch_size, data_path='', ctx=None, saveto='', **kwargs):\n self.saveto = saveto\n super(Wikitext103, self).__init__(batch_size, data_path, ctx)\n\n def load(self, path='./data/wikitext-103', features='BoW', match_avitm=True):\n if path[:2] == '~/':\n path = os.path.join(os.path.expanduser(path[:2]), path[2:])\n\n ### Specify the file locations\n train_path = path + '/wikitext-103_tra.csr.npz'\n test_path = path + '/wikitext-103_test.csr.npz'\n vocab_path = path + '/vocab.txt'\n\n ### Load train\n train_csr = sparse.load_npz(train_path)\n train = np.array(train_csr.todense()).astype('float32')\n\n ### Load test\n test_csr = sparse.load_npz(test_path)\n test = np.array(test_csr.todense()).astype('float32')\n\n ### load vocab\n ENCODING = \"ISO-8859-1\"\n # ENCODING = \"utf-8\"\n with open(vocab_path, encoding=ENCODING) as f:\n vocab_list = [line.strip('\\n') for line in f]\n\n # construct maps\n vocab2dim = dict(zip(vocab_list, range(len(vocab_list))))\n dim2vocab = reverse_dict(vocab2dim)\n\n return [train, None, test, None, None, None], [None, None, None], [vocab2dim, dim2vocab, None, None]\n\n\nif __name__ == '__main__':\n\n def check_create_dir(dir):\n if os.path.exists(dir): # cleanup existing data folder\n shutil.rmtree(dir)\n os.mkdir(dir)\n\n # create directory for data\n dataset = 'wikitext-103'\n current_dir = os.getcwd()\n\n data_dir = os.path.join(current_dir, \"data\")\n if not os.path.exists(data_dir):\n print('Creating directory:', data_dir)\n os.mkdir(data_dir)\n data_dir = os.path.join(current_dir, \"data\", dataset)\n check_create_dir(data_dir)\n os.chdir(data_dir)\n print('Current directory: ', os.getcwd())\n\n # download data\n os.system(\"curl -O https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-v1.zip\")\n os.system(\"unzip wikitext-103-v1.zip\")\n\n # parse into documents\n def is_document_start(line):\n if len(line) < 4:\n return False\n if line[0] is '=' and line[-1] is '=':\n if line[2] is not '=':\n return True\n else:\n return False\n else:\n return False\n\n\n def token_list_per_doc(input_dir, token_file):\n lines_list = []\n line_prev = ''\n prev_line_start_doc = False\n with open(os.path.join(input_dir, token_file), 'r', encoding='utf-8') as f:\n for l in f:\n line = l.strip()\n if prev_line_start_doc and line:\n # the previous line should not have been start of a document!\n lines_list.pop()\n lines_list[-1] = lines_list[-1] + ' ' + line_prev\n\n if line:\n if is_document_start(line) and not line_prev:\n lines_list.append(line)\n prev_line_start_doc = True\n else:\n lines_list[-1] = lines_list[-1] + ' ' + line\n prev_line_start_doc = False\n else:\n prev_line_start_doc = False\n line_prev = line\n\n print(\"{} documents parsed!\".format(len(lines_list)))\n return lines_list\n\n\n input_dir = os.path.join(data_dir, dataset)\n train_file = 'wiki.train.tokens'\n val_file = 'wiki.valid.tokens'\n test_file = 'wiki.test.tokens'\n train_doc_list = token_list_per_doc(input_dir, train_file)\n val_doc_list = token_list_per_doc(input_dir, val_file)\n test_doc_list = token_list_per_doc(input_dir, test_file)\n\n nltk.download('wordnet')\n from nltk.stem import WordNetLemmatizer\n import re\n\n token_pattern = re.compile(r\"(?u)\\b\\w\\w+\\b\")\n class LemmaTokenizer(object):\n def __init__(self):\n self.wnl = WordNetLemmatizer()\n\n def __call__(self, doc):\n return [self.wnl.lemmatize(t) for t in doc.split() if len(t) >= 2 and re.match(\"[a-z].*\", t)\n and re.match(token_pattern, t)]\n\n\n import time\n import numpy as np\n from sklearn.feature_extraction.text import CountVectorizer\n\n print('Lemmatizing and counting, this may take a few minutes...')\n start_time = time.time()\n vectorizer = CountVectorizer(input='content', analyzer='word', stop_words='english',\n tokenizer=LemmaTokenizer(), max_df=0.8, min_df=3, max_features=20000)\n\n train_vectors = vectorizer.fit_transform(train_doc_list)\n val_vectors = vectorizer.transform(val_doc_list)\n test_vectors = vectorizer.transform(test_doc_list)\n\n vocab_list = vectorizer.get_feature_names()\n vocab_size = len(vocab_list)\n print('vocab size:', vocab_size)\n print('Done. Time elapsed: {:.2f}s'.format(time.time() - start_time))\n\n import scipy.sparse as sparse\n def shuffle_and_dtype(vectors):\n idx = np.arange(vectors.shape[0])\n np.random.shuffle(idx)\n vectors = vectors[idx]\n vectors = sparse.csr_matrix(vectors, dtype=np.float32)\n print(type(vectors), vectors.dtype)\n return vectors\n\n train_vectors = shuffle_and_dtype(train_vectors)\n val_vectors = shuffle_and_dtype(val_vectors)\n test_vectors = shuffle_and_dtype(test_vectors)\n\n with open('vocab.txt', 'w', encoding='utf-8') as f:\n for item in vocab_list:\n f.write(item+'\\n')\n\n sparse.save_npz('wikitext-103_tra.csr.npz', train_vectors)\n sparse.save_npz('wikitext-103_val.csr.npz', val_vectors)\n sparse.save_npz('wikitext-103_test.csr.npz', test_vectors)\n"
] |
[
[
"scipy.sparse.load_npz",
"numpy.random.shuffle",
"numpy.arange",
"scipy.sparse.csr_matrix",
"scipy.sparse.save_npz"
]
] |
kukaiN/modeling_in_python
|
[
"99641bc2be5887c5571fd494f5025efe8a0441bb"
] |
[
"corona_model/start_here.py"
] |
[
"import model_framework\nimport platform\nimport statfile\nimport copy\nimport fileRelated\nimport pandas as pd\nimport experiment as experiment\nimport main_config\nfrom pathlib import Path\n\ndef main():\n \"\"\"intialize and run the model, for indepth detail about the config or how to run the code, go to the github page for this code\"\"\"\n\n\n # you can control for multiple interventions by adding a case:\n # [(modified attr1, newVal), (modified attr2, newVal), ...]\n\n # simulation name --> simulation controlled variable(s)\n # dont use . or - in the simulation name because the names are used to save images, or any symbols below\n modelConfig = main_config.modelConfig\n\n\n R0_controls = {\n \"World\" : [\n (\"DynamicCapacity\", False),\n ],\n \"Infection\" : [\n (\"baseP\" , 1.25),\n (\"SeedNumber\", 100),\n ],\n \"HybridClass\":[\n (\"ChangedSeedNumber\", 10),\n ],\n }\n # this overrides the previous experiments, since base_p is being chnaged\n R0_controls = {\n \"World\" : [\n (\"DynamicCapacity\", False),\n ],\n \"HybridClass\":[\n (\"ChangedSeedNumber\", 10),\n ],\n }\n\n\n\n def cross_scenarios(scenario1, scenario2):\n experiments = {}\n for keyname, experiment1 in scenario1.items():\n for screenname, screen in scenario2.items():\n experiment_name = screenname +\"_\" + keyname\n experiments[experiment_name] = screen.copy()\n for key, value in experiment1.items():\n #print(key, value)\n experiments[experiment_name][key] = value.copy()\n return copy.deepcopy(experiments)\n\n def print_nicely(experiment_scenarios):\n for ex_name, ex_config in experiment_scenarios.items():\n print(\"\\n\",\"*\"*20,\"\\n\", ex_name)\n for ex_config_name, ex_config_list in ex_config.items():\n print(ex_config_name, \":\" ,ex_config_list)\n\n #experiment2 = cross_scenarios(experiment.vaccine3, experiment.low_med)\n #experiment3 =cross_scenarios(experiment.vaccine4, experiment.facemask3)\n experiment1 = experiment.marginals\n experiment2 = experiment.original_3x3\n experiment3 = cross_scenarios(experiment.different_base_p_jump_025, experiment.medium_student_vary_policy)\n experiment4 = cross_scenarios(experiment.medium_student_vary_policy, experiment.off_campus_multiplier)\n experiment5 = experiment.diff_seed_number\n experiment6 = experiment.facemask_param\n #print(len(experiment3))\n #print_nicely(experiment3)\n\n\n\n\n basemodel = {\"basemodel\": {}}\n\n multi_experiments = {\n \"request_1_marginal\": experiment1,#\n \"request_2_3x3\": experiment2,\n \"request_3_diff_base_p\": experiment3,\n \"request_4_fixed_p_diff_offcampusP\": experiment4,\n \"request_5_diff_seed_number\": experiment5,\n \"request_6_facemask_param\": experiment6,\n }\n\n print(\"here are the loaded experiments:\")\n for r_name, exp in multi_experiments.items():\n r_name+=(\" \"*max(0, (40-len(r_name))))\n print(f\"{r_name} with {len(exp)} experiments\")\n\n #multi_experiments = {\"new_request4\": experiment.new_check}\n user_input = input(\"which request # do you want to run? 0 to run all in one thread\")\n user_input = int(user_input)\n sp_num = [123, 456, 12, 34, 56]\n if (user_input < 0 or user_input > len(multi_experiments)) and user_input not in sp_num:\n print(\"input number does not match experiment number, exiting program\")\n return\n\n\n for sp_index, (request_name, modelConfigs) in enumerate(multi_experiments.items()):\n if ((sp_index == user_input-1) or (user_input == 0) or (user_input==123 and sp_index < 3) or\n (user_input==456 and sp_index >= 3) or (user_input==12 and sp_index < 2) or (user_input==34 and 4>sp_index>1)\n or (user_input==56 and sp_index >= 4)):\n print(sp_index)\n R0Dict = dict()\n InfectedCountDict = dict()\n output_dir = fileRelated.fullPath(request_name, \"outputs\")\n Path(output_dir).mkdir(parents=False, exist_ok=True)\n output_folder = \"outputs/\"+ request_name\n print(request_name)\n for index, (modelName, modelControl) in enumerate(modelConfigs.items()):\n\n print(\"finished\", index)\n configCopy = copy.deepcopy(modelConfig)\n #print(\"*\"*20)\n #print(configCopy[\"Agents\"].keys())\n #print(\"*\"*20)\n #print(f\"started working on initializing the simualtion for {modelName}\")\n for categoryKey, listOfControls in modelControl.items():\n #print(listOfControls)\n for (specificKey, specificValue) in listOfControls:\n if specificKey not in configCopy[categoryKey].keys():\n print(\"error\", specificKey, specificValue, \" was not assigned correctly\")\n\n #return\n else:\n configCopy[categoryKey][specificKey] = specificValue\n\n R0Count, multiCounts = 100, 100\n if index in [0, 1] and False:\n R0Count = 200\n #print(configCopy1\n if index > -1:\n #model_framework.simpleCheck(configCopy, days=10, visuals=True, debug=True, modelName=modelName)\n InfectedCountDict[modelName] = model_framework.multiSimulation(multiCounts, configCopy, days=100, debug=False, modelName=modelName, outputDir=output_folder)\n R0Dict[modelName] = model_framework.R0_simulation(configCopy, R0_controls,R0Count, debug=False, timeSeriesVisual=False, R0Visuals=True, modelName=modelName, outputDir=output_folder)\n\n # the value of the dictionary is ([multiple R0 values], (descriptors, (tuple of useful data like mean and stdev))\n print(InfectedCountDict.items())\n print(R0Dict.items())\n\n if True:\n #for k in R0Dict.keys():\n # R0Dict[k] = [list(R0Dict[k][0]) + [1 for _ in range(98)], R0Dict[k][1]]\n # print(R0Dict)\n\n simulationGeneration = \"0\"\n saveName = \"comparingModels_\"+simulationGeneration\n # reads R0 data\n #fileRelated.mergeR0(R0Dict, fileRelated.fullPath(\"request_5/R0_data.csv\", \"outputs\"))\n\n print(R0Dict)\n if R0Count > 0:\n statfile.comparingBoxPlots(R0Dict, plottedData=\"R0\", saveName=saveName, outputDir=output_folder)\n if multiCounts >0:\n statfile.comparingBoxPlots(InfectedCountDict ,plottedData=\"inf\", saveName=saveName, outputDir=output_folder)\n\n #for key, value in R0Dict.items():\n # if isinstance(R0Dict[key][1], str):\n # R0Dict[key] = value[0]\n # # else do nothing\n # #print(key, value)\n #print(R0Dict)\n # check if dict is not empty\n merged = False\n if merged:\n for k, v in R0Dict.items():\n print(k, len(v))\n if isinstance(value[-1], str) or isinstance(value[-1], tuple):\n R0Dict[k] = v[0]\n sameshape = True\n sizes = []\n for k,v in R0Dict.items():\n sizes.append(len(v[0]))\n\n print(\"size is\",sizes)\n if len(set(sizes)) == 1:\n R0_df = pd.DataFrame(R0Dict)\n fileRelated.save_df_to_csv(fileRelated.fullPath(\"R0_data.csv\", output_folder), R0_df)\n else:\n for specialsize in list(set(sizes)):\n new_dict = dict()\n newR0_df = None\n for k, v in R0Dict.items():\n if len(v[0]) == specialsize:\n new_dict[k] = copy.deepcopy(v[0])\n newR0_df = pd.DataFrame(new_dict)\n print(newR0_df)\n print(new_dict)\n fileRelated.save_df_to_csv(fileRelated.fullPath(\"R0_data_len\"+str(specialsize)+\".csv\", output_folder), newR0_df)\n\n else: # never ran after jan 30\n #statfile.generateVisualByLoading(ControlledExperiment, plottedData=\"inf\", saveName=saveName)\n model_framework.createFilledPlot(modelConfig, modelName=\"baseModel\",\n simulationN=3, outputDir=output_folder)\n\n\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n"
] |
[
[
"pandas.DataFrame"
]
] |
tadeoos/gpt-2
|
[
"32e29f1acca9ade9913b5d0d2b71384c31357eed"
] |
[
"src/model.py"
] |
[
"import numpy as np\nimport tensorflow as tf\nfrom tensorflow.contrib.training import HParams\n\ndef default_hparams():\n return HParams(\n n_vocab=0,\n n_ctx=1024,\n n_embd=768,\n n_head=12,\n n_layer=12,\n )\n\ndef shape_list(x):\n \"\"\"Deal with dynamic shape in tensorflow cleanly.\"\"\"\n static = x.shape.as_list()\n dynamic = tf.shape(x)\n return [dynamic[i] if s is None else s for i, s in enumerate(static)]\n\ndef softmax(x, axis=-1):\n x = x - tf.reduce_max(x, axis=axis, keepdims=True)\n ex = tf.exp(x)\n return ex / tf.reduce_sum(ex, axis=axis, keepdims=True)\n\ndef gelu(x):\n return 0.5*x*(1+tf.tanh(np.sqrt(2/np.pi)*(x+0.044715*tf.pow(x, 3))))\n\ndef norm(x, scope, *, axis=-1, epsilon=1e-5):\n \"\"\"Normalize to mean = 0, std = 1, then do a diagonal affine transform.\"\"\"\n with tf.variable_scope(scope):\n n_state = x.shape[-1].value\n g = tf.get_variable('g', [n_state], initializer=tf.constant_initializer(1))\n b = tf.get_variable('b', [n_state], initializer=tf.constant_initializer(0))\n u = tf.reduce_mean(x, axis=axis, keepdims=True)\n s = tf.reduce_mean(tf.square(x-u), axis=axis, keepdims=True)\n x = (x - u) * tf.rsqrt(s + epsilon)\n x = x*g + b\n return x\n\ndef split_states(x, n):\n \"\"\"Reshape the last dimension of x into [n, x.shape[-1]/n].\"\"\"\n *start, m = shape_list(x)\n return tf.reshape(x, start + [n, m//n])\n\ndef merge_states(x):\n \"\"\"Smash the last two dimensions of x into a single dimension.\"\"\"\n *start, a, b = shape_list(x)\n return tf.reshape(x, start + [a*b])\n\ndef conv1d(x, scope, nf, *, w_init_stdev=0.02):\n with tf.variable_scope(scope):\n *start, nx = shape_list(x)\n w = tf.get_variable('w', [1, nx, nf], initializer=tf.random_normal_initializer(stddev=w_init_stdev))\n b = tf.get_variable('b', [nf], initializer=tf.constant_initializer(0))\n c = tf.reshape(tf.matmul(tf.reshape(x, [-1, nx]), tf.reshape(w, [-1, nf]))+b, start+[nf])\n return c\n\ndef attention_mask(nd, ns, *, dtype):\n \"\"\"1's in the lower triangle, counting from the lower right corner.\n\n Same as tf.matrix_band_part(tf.ones([nd, ns]), -1, ns-nd), but doesn't produce garbage on TPUs.\n \"\"\"\n i = tf.range(nd)[:,None]\n j = tf.range(ns)\n m = i >= j - ns + nd\n return tf.cast(m, dtype)\n\n\ndef attn(x, scope, n_state, *, past, hparams):\n assert x.shape.ndims == 3 # Should be [batch, sequence, features]\n assert n_state % hparams.n_head == 0\n if past is not None:\n assert past.shape.ndims == 5 # Should be [batch, 2, heads, sequence, features], where 2 is [k, v]\n\n def split_heads(x):\n # From [batch, sequence, features] to [batch, heads, sequence, features]\n return tf.transpose(split_states(x, hparams.n_head), [0, 2, 1, 3])\n\n def merge_heads(x):\n # Reverse of split_heads\n return merge_states(tf.transpose(x, [0, 2, 1, 3]))\n\n def mask_attn_weights(w):\n # w has shape [batch, heads, dst_sequence, src_sequence], where information flows from src to dst.\n _, _, nd, ns = shape_list(w)\n b = attention_mask(nd, ns, dtype=w.dtype)\n b = tf.reshape(b, [1, 1, nd, ns])\n w = w*b - tf.cast(1e10, w.dtype)*(1-b)\n return w\n\n def multihead_attn(q, k, v):\n # q, k, v have shape [batch, heads, sequence, features]\n w = tf.matmul(q, k, transpose_b=True)\n w = w * tf.rsqrt(tf.cast(v.shape[-1].value, w.dtype))\n\n w = mask_attn_weights(w)\n w = softmax(w)\n a = tf.matmul(w, v)\n return a\n\n with tf.variable_scope(scope):\n c = conv1d(x, 'c_attn', n_state*3)\n q, k, v = map(split_heads, tf.split(c, 3, axis=2))\n present = tf.stack([k, v], axis=1)\n if past is not None:\n pk, pv = tf.unstack(past, axis=1)\n k = tf.concat([pk, k], axis=-2)\n v = tf.concat([pv, v], axis=-2)\n a = multihead_attn(q, k, v)\n a = merge_heads(a)\n a = conv1d(a, 'c_proj', n_state)\n return a, present\n\n\ndef mlp(x, scope, n_state, *, hparams):\n with tf.variable_scope(scope):\n nx = x.shape[-1].value\n h = gelu(conv1d(x, 'c_fc', n_state))\n h2 = conv1d(h, 'c_proj', nx)\n return h2\n\n\ndef block(x, scope, *, past, hparams):\n with tf.variable_scope(scope):\n nx = x.shape[-1].value\n a, present = attn(norm(x, 'ln_1'), 'attn', nx, past=past, hparams=hparams)\n x = x + a\n m = mlp(norm(x, 'ln_2'), 'mlp', nx*4, hparams=hparams)\n x = x + m\n return x, present\n\ndef past_shape(*, hparams, batch_size=None, sequence=None):\n return [batch_size, hparams.n_layer, 2, hparams.n_head, sequence, hparams.n_embd // hparams.n_head]\n\ndef expand_tile(value, size):\n \"\"\"Add a new axis of given size.\"\"\"\n value = tf.convert_to_tensor(value, name='value')\n ndims = value.shape.ndims\n return tf.tile(tf.expand_dims(value, axis=0), [size] + [1]*ndims)\n\ndef positions_for(tokens, past_length):\n batch_size = tf.shape(tokens)[0]\n nsteps = tf.shape(tokens)[1]\n return expand_tile(past_length + tf.range(nsteps), batch_size)\n\n\ndef model(hparams, X, past=None, scope='model', reuse=False):\n with tf.compat.v1.variable_scope(scope, reuse=reuse):\n results = {}\n batch, sequence = shape_list(X)\n\n wpe = tf.get_variable('wpe', [hparams.n_ctx, hparams.n_embd],\n initializer=tf.random_normal_initializer(stddev=0.01))\n wte = tf.get_variable('wte', [hparams.n_vocab, hparams.n_embd],\n initializer=tf.random_normal_initializer(stddev=0.02))\n past_length = 0 if past is None else tf.shape(past)[-2]\n h = tf.gather(wte, X) + tf.gather(wpe, positions_for(X, past_length))\n\n # Transformer\n presents = []\n pasts = tf.unstack(past, axis=1) if past is not None else [None] * hparams.n_layer\n assert len(pasts) == hparams.n_layer\n for layer, past in enumerate(pasts):\n h, present = block(h, 'h%d' % layer, past=past, hparams=hparams)\n presents.append(present)\n results['present'] = tf.stack(presents, axis=1)\n h = norm(h, 'ln_f')\n\n # Language model loss. Do tokens <n predict token n?\n h_flat = tf.reshape(h, [batch*sequence, hparams.n_embd])\n logits = tf.matmul(h_flat, wte, transpose_b=True)\n logits = tf.reshape(logits, [batch, sequence, hparams.n_vocab])\n results['logits'] = logits\n return results\n"
] |
[
[
"tensorflow.exp",
"tensorflow.constant_initializer",
"tensorflow.matmul",
"tensorflow.reshape",
"tensorflow.stack",
"tensorflow.cast",
"tensorflow.random_normal_initializer",
"tensorflow.shape",
"tensorflow.concat",
"tensorflow.transpose",
"tensorflow.variable_scope",
"numpy.sqrt",
"tensorflow.split",
"tensorflow.contrib.training.HParams",
"tensorflow.range",
"tensorflow.expand_dims",
"tensorflow.compat.v1.variable_scope",
"tensorflow.rsqrt",
"tensorflow.reduce_sum",
"tensorflow.unstack",
"tensorflow.convert_to_tensor",
"tensorflow.reduce_max",
"tensorflow.gather",
"tensorflow.pow",
"tensorflow.reduce_mean",
"tensorflow.square"
]
] |
fregu856/ebms_proposals
|
[
"e3e1cc35d5419ca61e25decb243a0b8bebd0d700"
] |
[
"mdn_cell/mdn_train_K4_fullnet.py"
] |
[
"# camera-ready\n\nfrom datasets import DatasetTrain # (this needs to be imported before torch, because cv2 needs to be imported before torch for some reason)\nfrom mdn_model_K4 import ToyNet\n\nimport torch\nimport torch.utils.data\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\n\nimport torch.distributions\n\nimport math\nimport numpy as np\nimport pickle\nimport matplotlib\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\nimport cv2\n\n# NOTE! change this to not overwrite all log data when you train the model:\nmodel_id = \"mdn_train_K4_fullnet\"\n\nnum_epochs = 75\nbatch_size = 32\nlearning_rate = 0.001\n\ntrain_dataset = DatasetTrain()\n\nnum_train_batches = int(len(train_dataset)/batch_size)\nprint (\"num_train_batches:\", num_train_batches)\n\ntrain_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)\n\nnum_models = 20\nfor i in range(num_models):\n network = ToyNet(model_id + \"_%d\" % i, project_dir=\"/root/ebms_proposals/mdn_cell\").cuda()\n\n K = network.noise_net.K\n print (K)\n\n optimizer = torch.optim.Adam(network.parameters(), lr=learning_rate)\n\n epoch_losses_train = []\n for epoch in range(num_epochs):\n print (\"###########################\")\n print (\"######## NEW EPOCH ########\")\n print (\"###########################\")\n print (\"model: %d/%d | epoch: %d/%d\" % (i+1, num_models, epoch+1, num_epochs))\n\n network.train() # (set in training mode, this affects BatchNorm and dropout)\n batch_losses = []\n for step, (xs, ys) in enumerate(train_loader):\n xs = xs.cuda() # (shape: (batch_size, 3, img_size, img_size))\n ys = ys.cuda().unsqueeze(1) # (shape: (batch_size, 1))\n\n x_features = network.feature_net(xs) # (shape: (batch_size, hidden_dim))\n if epoch < 20:\n ####################################################################\n # make sure we do NOT train the resnet feature extractor:\n ####################################################################\n x_features = x_features.detach()\n ####################################################################\n means, log_sigma2s, weights = network.noise_net(x_features) # (all have shape: (batch_size, K))\n sigmas = torch.exp(log_sigma2s/2.0) # (shape: (batch_size, K))\n\n q_distr = torch.distributions.normal.Normal(loc=means, scale=sigmas)\n q_ys_K = torch.exp(q_distr.log_prob(torch.transpose(ys, 1, 0).unsqueeze(2))) # (shape: (1, batch_size, K))\n q_ys = torch.sum(weights.unsqueeze(0)*q_ys_K, dim=2) # (shape: (1, batch_size))\n q_ys = q_ys.squeeze(0) # (shape: (batch_size))\n\n ########################################################################\n # compute loss:\n ########################################################################\n loss = torch.mean(-torch.log(q_ys))\n\n loss_value = loss.data.cpu().numpy()\n batch_losses.append(loss_value)\n\n ########################################################################\n # optimization step:\n ########################################################################\n optimizer.zero_grad() # (reset gradients)\n loss.backward() # (compute gradients)\n optimizer.step() # (perform optimization step)\n\n # print (\"model: %d/%d | epoch: %d/%d | step: %d/%d | loss: %g\" % (i, num_models-1, epoch+1, num_epochs, step+1, num_train_batches, loss_value))\n\n epoch_loss = np.mean(batch_losses)\n epoch_losses_train.append(epoch_loss)\n with open(\"%s/epoch_losses_train.pkl\" % network.model_dir, \"wb\") as file:\n pickle.dump(epoch_losses_train, file)\n print (\"train loss: %g\" % epoch_loss)\n plt.figure(1)\n plt.plot(epoch_losses_train, \"k^\")\n plt.plot(epoch_losses_train, \"k\")\n plt.ylabel(\"loss\")\n plt.xlabel(\"epoch\")\n plt.title(\"train loss per epoch\")\n plt.savefig(\"%s/epoch_losses_train.png\" % network.model_dir)\n plt.close(1)\n\n # save the model weights to disk:\n checkpoint_path = network.checkpoints_dir + \"/model_\" + model_id +\"_epoch_\" + str(epoch+1) + \".pth\"\n torch.save(network.state_dict(), checkpoint_path)\n"
] |
[
[
"matplotlib.use",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.title",
"matplotlib.pyplot.close",
"numpy.mean",
"matplotlib.pyplot.figure",
"torch.distributions.normal.Normal",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.ylabel",
"torch.transpose",
"torch.log",
"torch.exp"
]
] |
kimukook/CLF-CBF-python
|
[
"28a46a4f9abf095e1f1b92e6cc056956caab5374"
] |
[
"cbf_clf_qp.py"
] |
[
"'''\n=====================================\nAuthor : Muhan Zhao\nDate : Feb. 16, 2020\nLocation: UC San Diego, La Jolla, CA\n=====================================\n'''\n\nimport numpy as np\nimport cvxpy as cp\n\n\nclass OptionsClass:\n \"\"\"\n Options Class\n \"\"\"\n\n def __init__(self):\n self.options = None\n self.solverName = 'None'\n\n def set_option(self, key, value):\n try:\n if type(value) is self.options[key][2]:\n self.options[key][0] = value\n else:\n print(f\"The type of value for the keyword '{key}' should be '{self.options[key][2]}'.\")\n except:\n raise ValueError('Incorrect option keyword or type: ' + key)\n\n def get_option(self, key):\n try:\n value = self.options[key][0]\n return value\n except:\n raise ValueError('Incorrect option keyword: ' + key)\n\n def reset_options(self, key):\n try:\n self.options[key] = self.options[key][1]\n except:\n raise ValueError('Incorrect option keyword: ' + key)\n\n\nclass CbfClfQpOptions(OptionsClass):\n def __init__(self):\n OptionsClass.__init__(self)\n self.setup()\n self.solver_name = 'CBF-CLF'\n\n def setup(self):\n self.options = {\n # [Current value, default value, type]\n 'u_max': [None, None, np.ndarray],\n 'u_min': [None, None, np.ndarray],\n 'clf_lambda': [None, 5, float],\n 'cbf_gamma': [None, 5, float],\n 'weight_input': [None, None, np.ndarray],\n 'weight_slack': [None, 2e-2, float],\n }\n\n # def define_slack(self):\n # TODO\n\n\nclass CbfClfQp:\n \"\"\"\n This is the implementation of the vanilla CBF-CLF-QP method. The optimization problem is:\n\n min (u-u_ref).T * H * (u-u_ref) + p * delta**2\n s.t. L_f V(x) + L_g V(x) * u + lambda * V(x) <= delta ---> CLF constraint\n L_f B(x) + L_g B(x) * u + gamma * B(x) >= 0 ---> CBF constraint\n\n Input:\n :param system : The dynamic system of interest, containing CBF, CLF, and their Lie derivatives\n :param x : The current state x\n :param u_ref : The reference control input\n :param slack : The slack activated or not, 1 -> activate while 0 -> not activate\n :param verbose : Show the optimization log or not\n \"\"\"\n def __init__(self, system, option_class):\n if hasattr(system, 'udim'):\n self.udim = system.udim\n else:\n raise KeyError('udim is not given in the system dynamic!')\n\n self.cbf = system.cbf\n\n # todo check lf.lg/cbf clfs symbolic expression and their size!\n self.lf_cbf = system.lf_cbf\n self.lg_cbf = system.lg_cbf\n\n self.clf = system.clf\n self.lf_clf = system.lf_clf\n self.lg_clf = system.lg_clf\n\n # todo take input from the option class\n self.weight_input = np.atleast_2d(option_class.get_option('weight_input'))\n self.weight_slack = np.atleast_2d(option_class.get_option('weight_slack'))\n self.H = None\n self.slack_H = None\n\n # todo\n self.A = None\n self.b = None\n\n # Hyperparameters: CLF <- Lambda & CBF <- Gamma\n self.clf_lambda = option_class.get_option('clf_lambda')\n self.cbf_gamma = option_class.get_option('cbf_gamma')\n\n self.u_max = option_class.get_option('u_max')\n if self.u_max.shape != (self.udim,):\n raise ValueError('The size of u_max should be udim-by-, a one dimensional vector in python.')\n self.u_min = option_class.get_option('u_min')\n if self.u_min.shape != (self.udim,):\n raise ValueError('The size of u_min should be udim-by-, a one dimensional vector in python.')\n\n self.with_slack = None\n\n def cbf_clf_qp(self, x, u_ref=None, with_slack=1, verbose=0):\n \"\"\"\n\n :param x : The current state\n :param u_ref : A real number of 1D vector with shape (udim,)\n :param with_slack: Indicator if there is slack variable\n :param verbose : Indicator if QP info is displayed\n :return:\n \"\"\"\n inf = np.inf\n self.with_slack = with_slack\n\n slack = None\n if u_ref is None:\n u_ref = np.zeros(self.udim)\n else:\n if u_ref.shape != (self.udim,):\n raise ValueError(f'u_ref should have the shape size (u_dim,), now it is {u_ref.shape}')\n\n # Read the weight input and build up the matrix H in the cost function\n if self.weight_input.shape == (1, 1):\n # Weight input is a scalar\n self.H = self.weight_input * np.eye(self.udim)\n\n elif self.weight_input.shape == (self.udim, 1):\n # Weight_input is a vector, use it to form the diagonal of the H matrix\n self.H = np.diag(self.weight_input)\n\n elif self.weight_input.shape == (self.udim, self.udim):\n # Weight_input is a udim * udim matrix\n self.H = np.copy(self.weight_input)\n else:\n self.H = np.eye(self.udim)\n\n V = self.clf(x)\n lf_V = self.lf_clf(x)\n lg_V = self.lg_clf(x)\n\n B = self.cbf(x)\n lf_B = self.lf_cbf(x)\n lg_B = self.lg_cbf(x)\n\n if self.with_slack:\n # slack variable is activated\n # Constraints: A [u; slack] <= b\n # LfV + LgV * u + lambda * V <= slack\n # LfB + LgB * u + gamma * B >= 0\n lg_V = np.hstack((lg_V, -np.ones((1, 1))))\n lg_B = np.hstack((-lg_B, np.zeros((1, 1))))\n\n self.A = np.vstack((lg_V, lg_B))\n self.b = np.hstack((-lf_V - self.clf_lambda * V, lf_B + self.cbf_gamma * B))\n\n # make sure that b is just a 1D vector with the shape (udim+1,)\n self.b = np.atleast_2d(self.b)[0]\n\n # Slack -> unconstrained\n u_min = np.hstack((self.u_min, -inf * np.ones(1)))\n u_max = np.hstack((self.u_max, inf * np.ones(1)))\n\n u = cp.Variable(self.udim + 1)\n\n # H_new = [H, 0; 0, p]\n self.slack_H = np.hstack((self.H, np.zeros((1, 1))))\n self.slack_H = np.vstack((self.slack_H, np.hstack((np.zeros((1, 1)), self.weight_slack * np.ones((1, 1))))))\n\n # Cost -> (u-u_ref)' * H_new * (u-u_ref) + p * delta**2\n # -> (1/2) * [u slack]' * H_new * [u slack] - [u slack]' * H_new * [u_ref 0]\n u_ref = np.hstack((u_ref, np.zeros(1)))\n objective = cp.Minimize((1/2) * cp.quad_form(u, self.slack_H) - (self.slack_H @ u_ref).T @ u)\n\n # Constraints: A * u <= b and u_min, u_max\n constraints = [u_min <= u, u <= u_max, self.A @ u <= self.b]\n # constraints = [self.u_min <= u, u <= self.u_max, np.eye(2) @ u <= np.zeros(2)]\n\n problem = cp.Problem(objective, constraints)\n\n problem.solve()\n\n # what if infeasible?\n if problem.status != 'infeasible':\n slack = u.value[-1]\n u = u.value[:self.udim]\n feas = 1\n else:\n u = None\n slack = None\n feas = -1\n\n else:\n # Slack variable is not activated:\n # Constraints: A u <= b\n # LfV + LgV * u + lambda * V <= 0\n # LfB + LgB * u + gamma * B >= 0\n self.A = np.vstack((lg_V, -lg_B))\n # b -> one dimensional vector\n self.b = np.hstack((-lf_V - self.clf_lambda * V, lf_B + self.cbf_gamma * B))\n self.b = np.atleast_2d(self.b)[0]\n\n u = cp.Variable(self.udim)\n\n # Cost -> (u-u_ref)' * H * (u-u_ref) -> (1/2) * u'*H*u - u'*H*u_ref\n objective = cp.Minimize((1/2)*cp.quad_form(u, self.H) - (self.H @ u_ref).T @ u)\n\n # cons: A * u <= b and u_min, u_max\n constraints = [self.u_min <= u, u <= self.u_max, self.A @ x <= self.b]\n\n problem = cp.Problem(objective, constraints)\n\n problem.solve()\n\n if problem.status != 'infeasible':\n u = u.value\n feas = 1\n else:\n u = None\n feas = -1\n\n return u, slack, B, V, feas\n\n\n\n"
] |
[
[
"numpy.zeros",
"numpy.copy",
"numpy.ones",
"numpy.eye",
"numpy.hstack",
"numpy.diag",
"numpy.vstack",
"numpy.atleast_2d"
]
] |
JiazhengChai/synergy_DRL
|
[
"c08e78e5fe39d9d46213e1bf07b8dafc2195b05a"
] |
[
"softlearning/algorithms/sac.py"
] |
[
"from collections import OrderedDict\nfrom numbers import Number\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.training import training_util\n\nfrom .rl_algorithm import RLAlgorithm\n\n\ndef td_target(reward, discount, next_value):\n return reward + discount * next_value\n\n\nclass SAC(RLAlgorithm):\n \"\"\"Soft Actor-Critic (SAC)\n\n References\n ----------\n [1] Tuomas Haarnoja*, Aurick Zhou*, Kristian Hartikainen*, George Tucker,\n Sehoon Ha, Jie Tan, Vikash Kumar, Henry Zhu, Abhishek Gupta, Pieter\n Abbeel, and Sergey Levine. Soft Actor-Critic Algorithms and\n Applications. arXiv preprint arXiv:1812.05905. 2018.\n \"\"\"\n\n def __init__(\n self,\n env,\n policy,\n Qs,\n pool,\n plotter=None,\n tf_summaries=False,\n\n lr=3e-4,\n reward_scale=1.0,\n target_entropy='auto',\n discount=0.99,\n tau=5e-3,\n target_update_interval=1,\n action_prior='uniform',\n reparameterize=False,\n store_extra_policy_info=False,\n\n save_full_state=False,\n **kwargs,\n ):\n \"\"\"\n Args:\n env (`SoftlearningEnv`): Environment used for training.\n policy: A policy function approximator.\n initial_exploration_policy: ('Policy'): A policy that we use\n for initial exploration which is not trained by the algorithm.\n Qs: Q-function approximators. The min of these\n approximators will be used. Usage of at least two Q-functions\n improves performance by reducing overestimation bias.\n pool (`PoolBase`): Replay pool to add gathered samples to.\n plotter (`QFPolicyPlotter`): Plotter instance to be used for\n visualizing Q-function during training.\n lr (`float`): Learning rate used for the function approximators.\n discount (`float`): Discount factor for Q-function updates.\n tau (`float`): Soft value function target update weight.\n target_update_interval ('int'): Frequency at which target network\n updates occur in iterations.\n reparameterize ('bool'): If True, we use a gradient estimator for\n the policy derived using the reparameterization trick. We use\n a likelihood ratio based estimator otherwise.\n \"\"\"\n\n super(SAC, self).__init__(**kwargs)\n\n self._env = env\n self._policy = policy\n\n self._Qs = Qs\n self._Q_targets = tuple(tf.keras.models.clone_model(Q) for Q in Qs)\n\n self._pool = pool\n self._plotter = plotter\n self._tf_summaries = tf_summaries\n\n self._policy_lr = lr\n self._Q_lr = lr\n\n self._reward_scale = reward_scale\n self._target_entropy = (\n -np.prod(self._env.action_space.shape)\n if target_entropy == 'auto'\n else target_entropy)\n\n self._discount = discount\n self._tau = tau\n self._target_update_interval = target_update_interval\n self._action_prior = action_prior\n\n self._reparameterize = reparameterize\n self._store_extra_policy_info = store_extra_policy_info\n\n self._save_full_state = save_full_state\n\n observation_shape = self._env.active_observation_shape\n action_shape = self._env.action_space.shape\n\n assert len(observation_shape) == 1, observation_shape\n self._observation_shape = observation_shape\n assert len(action_shape) == 1, action_shape\n self._action_shape = action_shape\n\n self._build()\n\n def _build(self):\n self._training_ops = {}\n\n self._init_global_step()\n self._init_placeholders()\n self._init_actor_update()\n self._init_critic_update()\n\n def train(self, *args, **kwargs):\n \"\"\"Initiate training of the SAC instance.\"\"\"\n\n return self._train(\n self._env,\n self._policy,\n self._pool,\n initial_exploration_policy=self._initial_exploration_policy,\n *args,\n **kwargs)\n\n def _init_global_step(self):\n self.global_step = training_util.get_or_create_global_step()\n self._training_ops.update({\n 'increment_global_step': training_util._increment_global_step(1)\n })\n\n def _init_placeholders(self):\n \"\"\"Create input placeholders for the SAC algorithm.\n\n Creates `tf.placeholder`s for:\n - observation\n - next observation\n - action\n - reward\n - terminals\n \"\"\"\n self._iteration_ph = tf.placeholder(\n tf.int64, shape=None, name='iteration')\n\n self._observations_ph = tf.placeholder(\n tf.float32,\n shape=(None, *self._observation_shape),\n name='observation',\n )\n\n self._next_observations_ph = tf.placeholder(\n tf.float32,\n shape=(None, *self._observation_shape),\n name='next_observation',\n )\n\n self._actions_ph = tf.placeholder(\n tf.float32,\n shape=(None, *self._action_shape),\n name='actions',\n )\n\n self._rewards_ph = tf.placeholder(\n tf.float32,\n shape=(None, 1),\n name='rewards',\n )\n\n self._terminals_ph = tf.placeholder(\n tf.float32,\n shape=(None, 1),\n name='terminals',\n )\n\n if self._store_extra_policy_info:\n self._log_pis_ph = tf.placeholder(\n tf.float32,\n shape=(None, 1),\n name='log_pis',\n )\n self._raw_actions_ph = tf.placeholder(\n tf.float32,\n shape=(None, *self._action_shape),\n name='raw_actions',\n )\n\n def _get_Q_target(self):\n next_actions = self._policy.actions([self._next_observations_ph])\n next_log_pis = self._policy.log_pis(\n [self._next_observations_ph], next_actions)\n\n next_Qs_values = tuple(\n Q([self._next_observations_ph, next_actions])\n for Q in self._Q_targets)\n\n min_next_Q = tf.reduce_min(next_Qs_values, axis=0)\n next_value = min_next_Q - self._alpha * next_log_pis\n\n Q_target = td_target(\n reward=self._reward_scale * self._rewards_ph,\n discount=self._discount,\n next_value=(1 - self._terminals_ph) * next_value)\n\n return Q_target\n\n def _init_critic_update(self):\n \"\"\"Create minimization operation for critic Q-function.\n\n Creates a `tf.optimizer.minimize` operation for updating\n critic Q-function with gradient descent, and appends it to\n `self._training_ops` attribute.\n\n See Equations (5, 6) in [1], for further information of the\n Q-function update rule.\n \"\"\"\n Q_target = tf.stop_gradient(self._get_Q_target())\n\n assert Q_target.shape.as_list() == [None, 1]\n\n Q_values = self._Q_values = tuple(\n Q([self._observations_ph, self._actions_ph])\n for Q in self._Qs)\n\n Q_losses = self._Q_losses = tuple(\n tf.losses.mean_squared_error(\n labels=Q_target, predictions=Q_value, weights=0.5)\n for Q_value in Q_values)\n\n self._Q_optimizers = tuple(\n tf.train.AdamOptimizer(\n learning_rate=self._Q_lr,\n name='{}_{}_optimizer'.format(Q._name, i)\n ) for i, Q in enumerate(self._Qs))\n Q_training_ops = tuple(\n tf.contrib.layers.optimize_loss(\n Q_loss,\n self.global_step,\n learning_rate=self._Q_lr,\n optimizer=Q_optimizer,\n variables=Q.trainable_variables,\n increment_global_step=False,\n summaries=((\n \"loss\", \"gradients\", \"gradient_norm\", \"global_gradient_norm\"\n ) if self._tf_summaries else ()))\n for i, (Q, Q_loss, Q_optimizer)\n in enumerate(zip(self._Qs, Q_losses, self._Q_optimizers)))\n\n self._training_ops.update({'Q': tf.group(Q_training_ops)})\n\n def _init_actor_update(self):\n \"\"\"Create minimization operations for policy and entropy.\n\n Creates a `tf.optimizer.minimize` operations for updating\n policy and entropy with gradient descent, and adds them to\n `self._training_ops` attribute.\n\n See Section 4.2 in [1], for further information of the policy update,\n and Section 5 in [1] for further information of the entropy update.\n \"\"\"\n\n actions = self._policy.actions([self._observations_ph])\n log_pis = self._policy.log_pis([self._observations_ph], actions)\n\n assert log_pis.shape.as_list() == [None, 1]\n\n log_alpha = self._log_alpha = tf.get_variable(\n 'log_alpha',\n dtype=tf.float32,\n initializer=0.0)\n alpha = tf.exp(log_alpha)\n\n if isinstance(self._target_entropy, Number):\n alpha_loss = -tf.reduce_mean(\n log_alpha * tf.stop_gradient(log_pis + self._target_entropy))\n\n self._alpha_optimizer = tf.train.AdamOptimizer(\n self._policy_lr, name='alpha_optimizer')\n self._alpha_train_op = self._alpha_optimizer.minimize(\n loss=alpha_loss, var_list=[log_alpha])\n\n self._training_ops.update({\n 'temperature_alpha': self._alpha_train_op\n })\n\n self._alpha = alpha\n\n if self._action_prior == 'normal':\n policy_prior = tf.contrib.distributions.MultivariateNormalDiag(\n loc=tf.zeros(self._action_shape),\n scale_diag=tf.ones(self._action_shape))\n policy_prior_log_probs = policy_prior.log_prob(actions)\n elif self._action_prior == 'uniform':\n policy_prior_log_probs = 0.0\n\n Q_log_targets = tuple(\n Q([self._observations_ph, actions])\n for Q in self._Qs)\n min_Q_log_target = tf.reduce_min(Q_log_targets, axis=0)\n\n if self._reparameterize:\n policy_kl_losses = (\n alpha * log_pis\n - min_Q_log_target\n - policy_prior_log_probs)\n else:\n raise NotImplementedError\n\n assert policy_kl_losses.shape.as_list() == [None, 1]\n\n policy_loss = tf.reduce_mean(policy_kl_losses)\n\n self._policy_optimizer = tf.train.AdamOptimizer(\n learning_rate=self._policy_lr,\n name=\"policy_optimizer\")\n policy_train_op = tf.contrib.layers.optimize_loss(\n policy_loss,\n self.global_step,\n learning_rate=self._policy_lr,\n optimizer=self._policy_optimizer,\n variables=self._policy.trainable_variables,\n increment_global_step=False,\n summaries=(\n \"loss\", \"gradients\", \"gradient_norm\", \"global_gradient_norm\"\n ) if self._tf_summaries else ())\n\n self._training_ops.update({'policy_train_op': policy_train_op})\n\n def _init_training(self):\n self._update_target(tau=1.0)\n\n def _update_target(self, tau=None):\n tau = tau or self._tau\n\n for Q, Q_target in zip(self._Qs, self._Q_targets):\n source_params = Q.get_weights()\n target_params = Q_target.get_weights()\n Q_target.set_weights([\n tau * source + (1.0 - tau) * target\n for source, target in zip(source_params, target_params)\n ])\n\n def _do_training(self, iteration, batch):\n \"\"\"Runs the operations for updating training and target ops.\"\"\"\n\n feed_dict = self._get_feed_dict(iteration, batch)\n\n self._session.run(self._training_ops, feed_dict)\n\n if iteration % self._target_update_interval == 0:\n # Run target ops here.\n self._update_target()\n\n def _get_feed_dict(self, iteration, batch):\n \"\"\"Construct TensorFlow feed_dict from sample batch.\"\"\"\n\n feed_dict = {\n self._observations_ph: batch['observations'],\n self._actions_ph: batch['actions'],\n self._next_observations_ph: batch['next_observations'],\n self._rewards_ph: batch['rewards'],\n self._terminals_ph: batch['terminals'],\n }\n\n if self._store_extra_policy_info:\n feed_dict[self._log_pis_ph] = batch['log_pis']\n feed_dict[self._raw_actions_ph] = batch['raw_actions']\n\n if iteration is not None:\n feed_dict[self._iteration_ph] = iteration\n\n return feed_dict\n\n def get_diagnostics(self,\n iteration,\n batch,\n training_paths,\n evaluation_paths):\n \"\"\"Return diagnostic information as ordered dictionary.\n\n Records mean and standard deviation of Q-function and state\n value function, and TD-loss (mean squared Bellman error)\n for the sample batch.\n\n Also calls the `draw` method of the plotter, if plotter defined.\n \"\"\"\n\n feed_dict = self._get_feed_dict(iteration, batch)\n\n (Q_values, Q_losses, alpha, global_step) = self._session.run(\n (self._Q_values,\n self._Q_losses,\n self._alpha,\n self.global_step),\n feed_dict)\n\n diagnostics = OrderedDict({\n 'Q-avg': np.mean(Q_values),\n 'Q-std': np.std(Q_values),\n 'Q_loss': np.mean(Q_losses),\n 'alpha': alpha,\n })\n\n policy_diagnostics = self._policy.get_diagnostics(\n batch['observations'])\n diagnostics.update({\n f'policy/{key}': value\n for key, value in policy_diagnostics.items()\n })\n\n if self._plotter:\n self._plotter.draw()\n\n return diagnostics\n\n @property\n def tf_saveables(self):\n saveables = {\n '_policy_optimizer': self._policy_optimizer,\n **{\n f'Q_optimizer_{i}': optimizer\n for i, optimizer in enumerate(self._Q_optimizers)\n },\n '_log_alpha': self._log_alpha,\n }\n\n if hasattr(self, '_alpha_optimizer'):\n saveables['_alpha_optimizer'] = self._alpha_optimizer\n\n return saveables\n"
] |
[
[
"tensorflow.exp",
"tensorflow.reduce_min",
"tensorflow.zeros",
"tensorflow.train.AdamOptimizer",
"tensorflow.python.training.training_util.get_or_create_global_step",
"tensorflow.group",
"tensorflow.ones",
"numpy.mean",
"tensorflow.python.training.training_util._increment_global_step",
"numpy.std",
"tensorflow.contrib.layers.optimize_loss",
"tensorflow.placeholder",
"tensorflow.get_variable",
"numpy.prod",
"tensorflow.losses.mean_squared_error",
"tensorflow.keras.models.clone_model",
"tensorflow.reduce_mean",
"tensorflow.stop_gradient"
]
] |
sakshamarora1/Road-Fighter-AI
|
[
"9009240aee7af60605a1f878c0e969c35558c51c"
] |
[
"Deep RL/agent.py"
] |
[
"from dqn import DeepQNetwork, ReplayMemory, Transition\nimport torch\nimport numpy as np\n\n\nclass DQNAgent:\n def __init__(self, inputs, n_actions):\n self.brain = DeepQNetwork(inputs, 16, 16, outputNum=n_actions)\n self.target_brain = DeepQNetwork(inputs, 16, 16, outputNum=n_actions)\n self.target_brain.load_state_dict(self.brain.state_dict())\n self.target_brain.eval()\n\n self.set_params()\n self.optimizer = torch.optim.Adam(self.brain.parameters())\n self.memory = ReplayMemory(50000)\n self.action_space = [0, 1]\n\n def set_params(self):\n self.batch_size = 64\n\n self.max_exploration_rate = 1\n self.min_exploration_rate = 0.05\n self.exploration_decay_rate = 0.0005\n\n self.steps_done = 0\n\n def select_action(self, state):\n sample = np.random.random()\n exploration_rate = self.min_exploration_rate + (\n self.max_exploration_rate - self.min_exploration_rate\n ) * np.exp(-self.steps_done * self.exploration_decay_rate)\n\n self.steps_done += 1\n if sample > exploration_rate:\n with torch.no_grad():\n actions = self.brain(state)\n return torch.argmax(actions).item()\n else:\n return np.random.choice(self.action_space)\n\n def learn(self):\n if len(self.memory) < self.batch_size:\n return\n\n self.optimizer.zero_grad()\n\n max_capacity = (\n len(self.memory)\n if len(self.memory) < self.memory.capacity\n else self.memory.capacity\n )\n\n batch = np.random.choice(max_capacity, self.batch_size)\n\n transitions = self.memory.sample(self.batch_size)\n batch = Transition(*zip(*transitions))\n\n non_final_mask = torch.tensor(\n tuple(map(lambda s: s is not None, batch.next_state)), dtype=torch.bool,\n )\n non_final_next_states = torch.tensor(\n [s for s in batch.next_state if s is not None]\n )\n\n state_batch = torch.tensor(batch.state)\n action_batch = torch.tensor(batch.action)\n reward_batch = torch.tensor(batch.reward, dtype=torch.float)\n\n state_action_values = self.brain(state_batch).gather(\n 1, action_batch.unsqueeze(-1)\n )\n\n next_state_values = torch.zeros(self.batch_size)\n next_state_values[non_final_mask] = self.target_brain(\n non_final_next_states\n ).max(1)[0]\n\n gamma = 0.99\n expected_state_action_values = (\n gamma * next_state_values + reward_batch / reward_batch.max()\n )\n\n self.loss = torch.nn.MSELoss()(\n expected_state_action_values.unsqueeze(-1), state_action_values\n )\n\n self.optimizer.zero_grad()\n self.loss.backward()\n self.optimizer.step()\n"
] |
[
[
"torch.zeros",
"numpy.random.choice",
"torch.nn.MSELoss",
"torch.no_grad",
"numpy.exp",
"torch.tensor",
"numpy.random.random",
"torch.argmax"
]
] |
FedericoMontana/instrumentum
|
[
"0d07f6503c3c0fc980d349aeb6f47c960a4afe9c"
] |
[
"src/instrumentum/model_tuning/old_wrapper_optuna.py"
] |
[
"import logging\n\nimport optuna\nimport optuna.integration.lightgbm as lgb\nimport pandas as pd\nfrom catboost import CatBoostClassifier\nfrom lightgbm import LGBMClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.model_selection import (RepeatedStratifiedKFold, StratifiedKFold,\n cross_val_score)\nfrom sklearn.tree import DecisionTreeClassifier\nfrom xgboost import XGBClassifier, XGBRegressor, plot_importance\n\nfrom instrumentum.model_tuning._optuna_dispatchers import optuna_param_disp\n\nlogger = logging.getLogger(__name__)\n\n\ndef _opt_generic_objective(X, y, trial, estimator, cv, metric):\n\n param = optuna_param_disp[estimator.__name__](trial)\n estimator = estimator(**param)\n\n score = cross_val_score(estimator, X=X, y=y, cv=cv, scoring=metric).mean()\n\n trial_n = len(trial.study.trials)\n best_score = (\n score\n if trial_n == 1 or score > trial.study.best_value\n else trial.study.best_value\n )\n\n logger.info(\"Trials: %s, Best Score: %s, Score %s\", trial_n, best_score, score)\n return score\n\n\ndef wrapper_opt(\n X,\n y,\n estimator=None,\n metric=\"roc_auc\",\n n_trials=5,\n verbose=logging.INFO,\n return_fit=True,\n direction=\"maximize\",\n cv_splits=5,\n cv_repeats=1,\n):\n # Our Logger\n logger.setLevel(verbose)\n # Let's turn off the verbosity of optuna\n optuna.logging.set_verbosity(optuna.logging.ERROR)\n\n cv = RepeatedStratifiedKFold(n_splits=cv_splits, n_repeats=cv_repeats)\n estimator = estimator or DecisionTreeClassifier\n\n logger.info(\"Estimator received: %s, trials: %s\\n\", estimator.__name__, n_trials)\n\n study = optuna.create_study(direction=direction)\n study.optimize(\n lambda trial: _opt_generic_objective(\n trial=trial,\n X=X,\n y=y,\n estimator=estimator,\n cv=cv,\n metric=metric,\n ),\n n_trials=n_trials,\n )\n\n estimator = estimator(**study.best_params)\n return_fit and estimator.fit(X, y)\n\n return study.best_trial.value, estimator\n\n\ndef wrapper_opt_lgbm(\n X, y, metric=\"auc\", time_budget=120, verbose=logging.INFO, return_fit=False\n):\n\n # Our Logger\n logger.setLevel(verbose)\n\n # Let's turn off the verbosity of optuna and lighgbm\n optuna.logging.set_verbosity(optuna.logging.ERROR)\n\n no_logger = logging.getLogger(\"sd\")\n no_logger.addHandler(logging.NullHandler())\n lgb.register_logger(no_logger)\n\n def log_trials(std, frz_trial):\n logger.info(\n \"\\nTrials: %s, Iteration Score: %s\", len(std.trials), std.best_value\n )\n\n params = {\n \"objective\": \"binary\",\n \"metric\": metric,\n \"boosting_type\": \"gbdt\",\n \"seed\": 42,\n }\n\n dtrain = lgb.Dataset(X, label=y)\n rkf = RepeatedStratifiedKFold(\n n_splits=10,\n n_repeats=2,\n random_state=42,\n )\n study_tuner = optuna.create_study(direction=\"maximize\")\n\n tuner = lgb.LightGBMTunerCV(\n params,\n dtrain,\n study=study_tuner,\n time_budget=time_budget,\n seed=42,\n optuna_callbacks=[log_trials],\n show_progress_bar=False,\n folds=rkf,\n )\n\n tuner.run()\n\n lgbm = LGBMClassifier(**tuner.best_params)\n return_fit and lgbm.fit(X, y)\n\n return tuner.best_score, lgbm\n"
] |
[
[
"sklearn.model_selection.cross_val_score",
"sklearn.model_selection.RepeatedStratifiedKFold"
]
] |
pradeep90/reddit-post-classifier
|
[
"081fd1cda50a0938d1b7f32c3919defbf27bea68"
] |
[
"src/plotting.py"
] |
[
"import datetime\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nPLOT_NBC = False\nPLOT_LR = False\nPLOT_CNN = True\n\n# Learing curve for NBC\nif PLOT_NBC:\n\ttraining_fracs = [0.025, 0.05, 0.075, 0.1, 0.15, 0.2, 1.0]\n\ttraining_precision_at_5 = [1, 1, 1, 1, 1, 1]\n\ttesting_precision_at_5 = [0.77, 0.82, 0.84, 0.85, 0.86, 0.85]\n\n\tplot_name = 'learning_curve_nbc_{}.png'.format(str(datetime.datetime.now()))\n\tfig, ax = plt.subplots()\n\tline1, = ax.plot(training_fracs, training_precision_at_5, label='train')\n\tline2, = ax.plot(training_fracs, testing_precision_at_5, label='test')\n\tax.legend()\n\ttitle='Training and Test Accuracies v.s. Training Fraction'\n\tplt.xlabel('training_fracs')\n\tplt.ylabel('Precision@5')\n\tplt.title(title)\n\tplt.savefig(plot_name)\n\nif PLOT_LR:\n\ttraining_fracs = [0.025, 0.05, 0.075, 0.1, 0.15, 0.2]\n\ttraining_precision_at_5 = [1, 1, 1, 1, 1, 1]\n\ttesting_precision_at_5 = [0.77, 0.82, 0.84, 0.85, 0.86, 0.87]\n\n\tplot_name = 'learning_curve_lr_{}.png'.format(str(datetime.datetime.now()))\n\tfig, ax = plt.subplots()\n\tline1, = ax.plot(training_fracs, training_precision_at_5, label='train')\n\tline2, = ax.plot(training_fracs, testing_precision_at_5, label='test')\n\tax.legend()\n\ttitle='Training and Test Accuracies v.s. Training Fraction'\n\tplt.xlabel('training_fracs')\n\tplt.ylabel('Precision@5')\n\tplt.title(title)\n\tplt.savefig(plot_name)\n\n\nif PLOT_CNN:\n\n\tdata = \"\"\"\n\t80000/80000 [==============================] - 221s 3ms/step - loss: 6.6233 - acc: 0.0040 - top_k_categorical_accuracy: 0.0174 - val_loss: 6.0892 - val_acc: 0.0098 - val_top_k_categorical_accuracy: 0.0420\n\n\t80000/80000 [==============================] - 221s 3ms/step - loss: 5.5705 - acc: 0.0277 - top_k_categorical_accuracy: 0.0998 - val_loss: 5.2726 - val_acc: 0.0420 - val_top_k_categorical_accuracy: 0.1362\n\t\n\n\t80000/80000 [==============================] - 223s 3ms/step - loss: 4.9008 - acc: 0.0701 - top_k_categorical_accuracy: 0.2075 - val_loss: 4.7563 - val_acc: 0.0891 - val_top_k_categorical_accuracy: 0.2454\n\t\n\n\t80000/80000 [==============================] - 225s 3ms/step - loss: 4.4173 - acc: 0.1218 - top_k_categorical_accuracy: 0.3048 - val_loss: 4.5053 - val_acc: 0.1213 - val_top_k_categorical_accuracy: 0.3004\n\t\n\n\t80000/80000 [==============================] - 224s 3ms/step - loss: 4.0409 - acc: 0.1718 - top_k_categorical_accuracy: 0.3845 - val_loss: 4.3248 - val_acc: 0.1529 - val_top_k_categorical_accuracy: 0.3457\n\t\n\n\t80000/80000 [==============================] - 222s 3ms/step - loss: 3.7427 - acc: 0.2126 - top_k_categorical_accuracy: 0.4449 - val_loss: 4.2171 - val_acc: 0.1756 - val_top_k_categorical_accuracy: 0.3790\n\t\n\n\t80000/80000 [==============================] - 221s 3ms/step - loss: 3.4914 - acc: 0.2529 - top_k_categorical_accuracy: 0.4950 - val_loss: 4.3441 - val_acc: 0.1744 - val_top_k_categorical_accuracy: 0.3719\n\t\n\n\t80000/80000 [==============================] - 221s 3ms/step - loss: 3.2783 - acc: 0.2870 - top_k_categorical_accuracy: 0.5333 - val_loss: 4.1594 - val_acc: 0.1989 - val_top_k_categorical_accuracy: 0.4041\n\t\n\n\t80000/80000 [==============================] - 220s 3ms/step - loss: 3.0868 - acc: 0.3154 - top_k_categorical_accuracy: 0.5703 - val_loss: 4.2744 - val_acc: 0.1933 - val_top_k_categorical_accuracy: 0.3917\n\t\n\n\t80000/80000 [==============================] - 220s 3ms/step - loss: 2.9134 - acc: 0.3444 - top_k_categorical_accuracy: 0.6034 - val_loss: 4.2295 - val_acc: 0.2097 - val_top_k_categorical_accuracy: 0.4143\n\t\n\n\t80000/80000 [==============================] - 222s 3ms/step - loss: 2.7519 - acc: 0.3720 - top_k_categorical_accuracy: 0.6330 - val_loss: 4.3870 - val_acc: 0.2099 - val_top_k_categorical_accuracy: 0.4190\n\t\n\n\t80000/80000 [==============================] - 222s 3ms/step - loss: 2.6156 - acc: 0.3946 - top_k_categorical_accuracy: 0.6581 - val_loss: 4.4451 - val_acc: 0.2042 - val_top_k_categorical_accuracy: 0.4023\n\t\n\n\t80000/80000 [==============================] - 233s 3ms/step - loss: 2.4792 - acc: 0.4192 - top_k_categorical_accuracy: 0.6820 - val_loss: 4.5719 - val_acc: 0.2061 - val_top_k_categorical_accuracy: 0.4101\n\t\n\n\t80000/80000 [==============================] - 227s 3ms/step - loss: 2.3596 - acc: 0.4394 - top_k_categorical_accuracy: 0.7028 - val_loss: 4.7459 - val_acc: 0.1976 - val_top_k_categorical_accuracy: 0.4000\n\t\n\n\t80000/80000 [==============================] - 243s 3ms/step - loss: 2.2500 - acc: 0.4592 - top_k_categorical_accuracy: 0.7230 - val_loss: 4.8310 - val_acc: 0.2021 - val_top_k_categorical_accuracy: 0.4004\n\t\n\n\t80000/80000 [==============================] - 242s 3ms/step - loss: 2.1556 - acc: 0.4772 - top_k_categorical_accuracy: 0.7412 - val_loss: 4.9553 - val_acc: 0.2046 - val_top_k_categorical_accuracy: 0.4051\n\t\n\n\t80000/80000 [==============================] - 243s 3ms/step - loss: 2.0611 - acc: 0.4944 - top_k_categorical_accuracy: 0.7579 - val_loss: 5.1070 - val_acc: 0.1941 - val_top_k_categorical_accuracy: 0.3936\n\t\n\n\t80000/80000 [==============================] - 242s 3ms/step - loss: 1.9717 - acc: 0.5104 - top_k_categorical_accuracy: 0.7723 - val_loss: 5.3036 - val_acc: 0.2042 - val_top_k_categorical_accuracy: 0.4021\n\t\n\n\t80000/80000 [==============================] - 240s 3ms/step - loss: 1.8947 - acc: 0.5250 - top_k_categorical_accuracy: 0.7874 - val_loss: 5.4930 - val_acc: 0.1991 - val_top_k_categorical_accuracy: 0.3966\n\t\n\n\t80000/80000 [==============================] - 238s 3ms/step - loss: 1.8224 - acc: 0.5395 - top_k_categorical_accuracy: 0.7985 - val_loss: 5.7421 - val_acc: 0.1953 - val_top_k_categorical_accuracy: 0.3928\n\t\n\n\t80000/80000 [==============================] - 237s 3ms/step - loss: 1.7553 - acc: 0.5534 - top_k_categorical_accuracy: 0.8112 - val_loss: 5.7278 - val_acc: 0.1931 - val_top_k_categorical_accuracy: 0.3948\n\t\n\n\t80000/80000 [==============================] - 236s 3ms/step - loss: 1.6928 - acc: 0.5660 - top_k_categorical_accuracy: 0.8206 - val_loss: 5.8661 - val_acc: 0.1908 - val_top_k_categorical_accuracy: 0.3825\n\t\n\n\t80000/80000 [==============================] - 240s 3ms/step - loss: 1.6367 - acc: 0.5775 - top_k_categorical_accuracy: 0.8308 - val_loss: 6.0282 - val_acc: 0.1882 - val_top_k_categorical_accuracy: 0.3840\n\t\n\n\t80000/80000 [==============================] - 237s 3ms/step - loss: 1.5811 - acc: 0.5891 - top_k_categorical_accuracy: 0.8403 - val_loss: 6.3243 - val_acc: 0.1915 - val_top_k_categorical_accuracy: 0.3886\n\t\n\n\t80000/80000 [==============================] - 235s 3ms/step - loss: 1.5333 - acc: 0.5996 - top_k_categorical_accuracy: 0.8481 - val_loss: 6.1779 - val_acc: 0.1834 - val_top_k_categorical_accuracy: 0.3750\n\t\n\n\t80000/80000 [==============================] - 255s 3ms/step - loss: 1.4889 - acc: 0.6095 - top_k_categorical_accuracy: 0.8551 - val_loss: 6.5050 - val_acc: 0.1883 - val_top_k_categorical_accuracy: 0.3822\n\t\n\n\t80000/80000 [==============================] - 266s 3ms/step - loss: 1.4466 - acc: 0.6185 - top_k_categorical_accuracy: 0.8629 - val_loss: 6.6663 - val_acc: 0.1800 - val_top_k_categorical_accuracy: 0.3674\n\t\n\n\t80000/80000 [==============================] - 259s 3ms/step - loss: 1.4101 - acc: 0.6263 - top_k_categorical_accuracy: 0.8694 - val_loss: 6.7527 - val_acc: 0.1837 - val_top_k_categorical_accuracy: 0.3735\n\t\n\n\t80000/80000 [==============================] - 272s 3ms/step - loss: 1.3773 - acc: 0.6326 - top_k_categorical_accuracy: 0.8748 - val_loss: 6.9058 - val_acc: 0.1862 - val_top_k_categorical_accuracy: 0.3782\n\t\n\n\t80000/80000 [==============================] - 253s 3ms/step - loss: 1.3401 - acc: 0.6404 - top_k_categorical_accuracy: 0.8797 - val_loss: 7.1382 - val_acc: 0.1855 - val_top_k_categorical_accuracy: 0.3767\n\t\n\n\t80000/80000 [==============================] - 247s 3ms/step - loss: 1.3198 - acc: 0.6465 - top_k_categorical_accuracy: 0.8853 - val_loss: 7.1970 - val_acc: 0.1823 - val_top_k_categorical_accuracy: 0.3719\n\t\n\n\t80000/80000 [==============================] - 241s 3ms/step - loss: 1.2913 - acc: 0.6535 - top_k_categorical_accuracy: 0.8886 - val_loss: 7.1783 - val_acc: 0.1801 - val_top_k_categorical_accuracy: 0.3652\n\t\n\n\t80000/80000 [==============================] - 244s 3ms/step - loss: 1.2608 - acc: 0.6585 - top_k_categorical_accuracy: 0.8938 - val_loss: 7.4171 - val_acc: 0.1772 - val_top_k_categorical_accuracy: 0.3654\n\t\n\n\t80000/80000 [==============================] - 245s 3ms/step - loss: 1.2429 - acc: 0.6660 - top_k_categorical_accuracy: 0.8966 - val_loss: 7.3468 - val_acc: 0.1793 - val_top_k_categorical_accuracy: 0.3670\n\t\n\n\t80000/80000 [==============================] - 245s 3ms/step - loss: 1.2206 - acc: 0.6705 - top_k_categorical_accuracy: 0.9003 - val_loss: 7.7206 - val_acc: 0.1839 - val_top_k_categorical_accuracy: 0.3712\n\t\n\n\t80000/80000 [==============================] - 238s 3ms/step - loss: 1.1945 - acc: 0.6777 - top_k_categorical_accuracy: 0.9044 - val_loss: 7.7350 - val_acc: 0.1785 - val_top_k_categorical_accuracy: 0.3648\n\t\n\n\t80000/80000 [==============================] - 241s 3ms/step - loss: 1.1816 - acc: 0.6795 - top_k_categorical_accuracy: 0.9073 - val_loss: 7.8643 - val_acc: 0.1794 - val_top_k_categorical_accuracy: 0.3625\n\t\n\n\t80000/80000 [==============================] - 231s 3ms/step - loss: 1.1680 - acc: 0.6851 - top_k_categorical_accuracy: 0.9097 - val_loss: 8.0242 - val_acc: 0.1809 - val_top_k_categorical_accuracy: 0.3685\n\t\n\n\t80000/80000 [==============================] - 243s 3ms/step - loss: 1.1455 - acc: 0.6898 - top_k_categorical_accuracy: 0.9136 - val_loss: 8.0614 - val_acc: 0.1764 - val_top_k_categorical_accuracy: 0.3644\n\t\n\n\t80000/80000 [==============================] - 258s 3ms/step - loss: 1.1306 - acc: 0.6925 - top_k_categorical_accuracy: 0.9152 - val_loss: 8.1652 - val_acc: 0.1784 - val_top_k_categorical_accuracy: 0.3674\n\t\n\n\t80000/80000 [==============================] - 265s 3ms/step - loss: 1.1163 - acc: 0.6969 - top_k_categorical_accuracy: 0.9178 - val_loss: 8.1587 - val_acc: 0.1764 - val_top_k_categorical_accuracy: 0.3659\n\t\n\n\t80000/80000 [==============================] - 270s 3ms/step - loss: 1.1088 - acc: 0.7013 - top_k_categorical_accuracy: 0.9188 - val_loss: 8.3066 - val_acc: 0.1741 - val_top_k_categorical_accuracy: 0.3579\n\t\n\n\t80000/80000 [==============================] - 260s 3ms/step - loss: 1.0948 - acc: 0.7034 - top_k_categorical_accuracy: 0.9227 - val_loss: 8.3521 - val_acc: 0.1719 - val_top_k_categorical_accuracy: 0.3529\n\t\n\n\t80000/80000 [==============================] - 256s 3ms/step - loss: 1.0910 - acc: 0.7053 - top_k_categorical_accuracy: 0.9235 - val_loss: 8.4043 - val_acc: 0.1704 - val_top_k_categorical_accuracy: 0.3555\n\t\n\n\t80000/80000 [==============================] - 248s 3ms/step - loss: 1.0773 - acc: 0.7081 - top_k_categorical_accuracy: 0.9262 - val_loss: 8.5451 - val_acc: 0.1749 - val_top_k_categorical_accuracy: 0.3617\n\t\n\n\t80000/80000 [==============================] - 248s 3ms/step - loss: 1.0668 - acc: 0.7108 - top_k_categorical_accuracy: 0.9266 - val_loss: 8.5922 - val_acc: 0.1718 - val_top_k_categorical_accuracy: 0.3588\n\t\n\n\t80000/80000 [==============================] - 246s 3ms/step - loss: 1.0596 - acc: 0.7133 - top_k_categorical_accuracy: 0.9293 - val_loss: 8.5660 - val_acc: 0.1652 - val_top_k_categorical_accuracy: 0.3448\n\t\n\n\t80000/80000 [==============================] - 246s 3ms/step - loss: 1.0497 - acc: 0.7182 - top_k_categorical_accuracy: 0.9305 - val_loss: 8.6918 - val_acc: 0.1758 - val_top_k_categorical_accuracy: 0.3589\n\t\n\n\t80000/80000 [==============================] - 248s 3ms/step - loss: 1.0363 - acc: 0.7221 - top_k_categorical_accuracy: 0.9315 - val_loss: 8.8265 - val_acc: 0.1732 - val_top_k_categorical_accuracy: 0.3564\n\t\n\n\t80000/80000 [==============================] - 248s 3ms/step - loss: 1.0340 - acc: 0.7208 - top_k_categorical_accuracy: 0.9330 - val_loss: 8.8335 - val_acc: 0.1741 - val_top_k_categorical_accuracy: 0.3563\"\"\"\n\n\t#print(data)\n\n\t# parse the data\n\txs = []\n\tlosses = [] \n\tvalidation_losses = [] \n\tlines = data.split('\\n')\n\tx=1\n\tLOSS_IDX, VALIDATION_LOSS_IDX = 7,16\n\tfor line in lines:\n\t\tline = line.strip()\n\t\tline_tokens = line.split(' ')\n\t\tif len(line_tokens) > 1:\n\t\t\tlosses.append(float(line_tokens[LOSS_IDX]))\n\t\t\tvalidation_losses.append(float(line_tokens[VALIDATION_LOSS_IDX]))\n\t\t\txs.append(x)\n\t\t\tx += 1\n\n\t#print(losses)\n\t#print(validation_losses)\n\n\t\"\"\"\n\txs = xs[:15]\n\tlosses = losses[:15]\n\tvalidation_losses = validation_losses[:15]\n\n\tplot_name = 'learning_curve_cnn_{}.png'.format(str(datetime.datetime.now()))\n\tfig, ax = plt.subplots()\n\tline1, = ax.plot(xs, losses, label='Training Loss')\n\tline2, = ax.plot(xs, validation_losses, label='Validation Loss')\n\ttitle='CNN Loss Curve'\n\tax.legend()\n\tplt.tick_params(\n\t\t\taxis='x', # changes apply to the x-axis\n\t\t\twhich='both', # both major and minor ticks are affected\n\t\t\tbottom=False, # ticks along the bottom edge are off\n\t\t\ttop=False, # ticks along the top edge are off\n\t\t\tlabelbottom=False) # labels along the bottom edge are off\n\tplt.ylabel('Loss')\n\tplt.title(title)\n\tplt.savefig(plot_name)\n\t\"\"\"\n\n\tvalidation_losses = [3.78,2.26,1.77,1.52,1.37,1.29,1.22,1.18,1.14,1.12]\n\taccuracies = [0.58,0.76,0.82,0.85,0.86,0.87,0.88,0.89,0.89,0.89]\n\tassert(len(validation_losses) == len(accuracies))\n\tepochs = list(range(1,len(validation_losses)+1))\n\n\tplot_name = 'learning_curve_cnn_{}.png'.format(str(datetime.datetime.now()))\n\n\tfig, ax1 = plt.subplots()\n\t\n\tcolor = 'tab:red'\n\tax1.set_xlabel('epoch')\n\tax1.set_ylabel('loss', color=color)\n\tax1.plot(epochs, validation_losses, color=color)\n\tax1.tick_params(axis='y', labelcolor=color)\n\n\tax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\n\n\tcolor = 'tab:blue'\n\tax2.set_ylabel('accuracy', color=color) # we already handled the x-label with ax1\n\tax2.plot(epochs, accuracies, color=color)\n\tax2.tick_params(axis='y', labelcolor=color)\n\n\t\"\"\"\n\tplt.tick_params(\n\t\t\taxis='x', # changes apply to the x-axis\n\t\t\twhich='both', # both major and minor ticks are affected\n\t\t\tbottom=False, # ticks along the bottom edge are off\n\t\t\ttop=False, # ticks along the top edge are off\n\t\t\tlabelbottom=False) # labels along the bottom edge are off\n\t\"\"\"\n\t\n\tfig.tight_layout() # otherwise the right y-label is slightly clipped\n\tplt.savefig(plot_name)\n\n"
] |
[
[
"matplotlib.use",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.ylabel"
]
] |
Crossmdl/Crossmdl
|
[
"49f245349cc32f750bc33ef891b2ee90f60317a6",
"49f245349cc32f750bc33ef891b2ee90f60317a6"
] |
[
"DropDtw-Code/train.py",
"DropDtw-Code/models/nets.py"
] |
[
"import os\nimport torch\nimport argparse\nimport random\nimport torch\nimport numpy as np\nimport pytorch_lightning as pl\nimport torchmetrics\nfrom copy import deepcopy, copy\nimport pickle as pkl\n\n\nfrom paths import PROJECT_PATH, WEIGHTS_PATH\nfrom models.nets import EmbeddingsMapping\nfrom models.losses import compute_clust_loss, compute_alignment_loss\nfrom models.visualization import visualize_drop_dtw_matching, visualize_step_strength\nfrom data.data_module import DataModule\nfrom data.data_utils import sample_to_device\nfrom data.batching import unflatten_batch\nfrom evaluate import compute_all_metrics\nfrom utils import Namespace, load_yaml\n\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n# Enabling reproducibility\nrandom.seed(10)\nnp.random.seed(10)\ntorch.manual_seed(10)\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--name', type=str, help=\"name of the experiment\", default=\"myexp\")\nparser.add_argument('--dataset', type=str, default='COIN', choices=['COIN', 'CrossTask', 'YouCook2'], help=\"name of the dataset we are encoding\")\n\n# training hyper-parameters\nparser.add_argument('--batch_size', type=int, default=24, help=\"batch size\")\nparser.add_argument('--epochs', type=int, default=10, help=\"batch size\")\nparser.add_argument('--lr', type=float, default=3e-4, help=\"learning rate\")\nparser.add_argument('--wd', type=float, default=1e-4, help=\"weight decay\")\nparser.add_argument('--n_cls', type=int, default=3, help=\"Number of video of one class in a batch. Must divide batch_size\")\n\n# model hyper-parameters\nparser.add_argument('--video_layers', type=int, default=2, help=\"Number of layers in nonlinear mapping for video embeddings\")\nparser.add_argument('--text_layers', type=int, default=0, help=\"Number of layers in nonlinear mapping for text embeddings\")\nparser.add_argument('--batchnorm', type=int, default=0, help=\"Wheather to use batchnorm in models\")\nparser.add_argument('--pretrained_drop', action='store_true', default=False, help='Start with pre-trained drop costs')\n\n# loss hyper-parameters\nparser.add_argument('--dp_algo', type=str, default='DropDTW', choices=['DropDTW', 'OTAM', 'NW', 'DTW'], help=\"DP algo used for matching\")\nparser.add_argument('--drop_cost', type=str, default='logit', choices=['logit', 'learn'], help=\"The way to define drop cost\")\nparser.add_argument('--dtw_softning', type=str, default='prob', choices=['prob', 'gamma', 'none'], help=\"DP algo used for matching\")\nparser.add_argument('--keep_percentile', type=float, default=0.3, help=\"If drop cost is defined as logit, computes the percentile of drops\")\nparser.add_argument('--contiguous_drop', type=bool, default=True, help=\"Wheather to do contiguous drop in Drop-DTW\")\nparser.add_argument('--clust_loss_mult', type=float, default=4, help=\"Multiplier for the step loss\")\nparser.add_argument('--dtw_loss_mult', type=float, default=2.5, help=\"Multiplier for the dtw loss\")\nparser.add_argument('--dtw_xz_gamma', type=float, default=10, help=\"Softmax temperature for xz product, in dtw\")\nparser.add_argument('--dtw_min_gamma', type=float, default=1, help=\"Softmax temperature for softmin, in dtw\")\nparser.add_argument('--step_xz_gamma', type=float, default=30, help=\"Softmax temperature for xz product, in step loss\")\nparser.add_argument('--bg_scope', type=str, default='global', choices=['global', 'class', 'video'], help=\"The scope where the background prototype is conisdered the same\")\nargs = parser.parse_args()\n\n\nclass VisualizationCallback(pl.callbacks.Callback):\n def on_train_batch_end(self, trainer, pl_module, outputs, flat_batch, batch_idx, dataloader_idx):\n step = trainer.global_step\n if step % 10 == 0:\n original_sample = sample_to_device(random.choice(unflatten_batch(flat_batch)), device)\n # sample = deepcopy(original_sample)\n sample = copy(original_sample)\n sample['frame_features'] = pl_module.model.map_video(sample['frame_features'].to(device)).detach()\n sample['step_features'] = pl_module.model.map_text(sample['step_features'].to(device)).detach()\n if args.drop_cost == 'learn':\n distractor = pl_module.model.compute_distractors(sample['step_features'].mean(0)).detach().cpu()\n else:\n distractor = None\n\n sample_gammas = (args.dtw_xz_gamma, 1)\n sample_dict = {'Ours': sample_to_device(sample, 'cpu'),\n 'HowTo100M': sample_to_device(original_sample, 'cpu')}\n\n dtw_image = visualize_drop_dtw_matching(\n sample_dict, distractor, gamma_f=sample_gammas,\n drop_cost=args.drop_cost, keep_percentile=args.keep_percentile, shape=(10, 2))\n steps_image = visualize_step_strength(\n sample_dict, distractor, gamma_f=sample_gammas,\n drop_cost=args.drop_cost, keep_percentile=args.keep_percentile, shape=(10, 2))\n matching_picture = np.concatenate([steps_image, dtw_image], 1)\n trainer.logger.experiment.add_image(\n 'matching_picture', matching_picture.transpose((2, 0, 1)), global_step=step)\n\n\nclass TrainModule(pl.LightningModule):\n def __init__(self, model, data, name=None):\n super(TrainModule, self).__init__()\n self.name = name\n self.model = model\n self.data = data\n self.avg_loss_metric = torchmetrics.MeanMetric()\n\n def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.model.parameters(), lr=args.lr, weight_decay=args.wd)\n return optimizer\n\n def training_step(self, flat_batch, batch_id):\n flat_batch['frame_features'] = self.model.map_video(flat_batch['frame_features'])\n flat_batch['step_features'] = self.model.map_text(flat_batch['step_features'])\n samples = unflatten_batch(flat_batch)\n\n if args.drop_cost == 'learn':\n mean_steps = torch.stack([s['step_features'].mean(0) for s in samples], 0)\n distractors = self.model.compute_distractors(mean_steps)\n else:\n distractors = None\n\n # Computing total loss\n total_loss = 0\n if args.clust_loss_mult > 0:\n clust_loss = compute_clust_loss(samples, distractors, xz_hard_ratio=1,\n xz_gamma=args.step_xz_gamma, frame_gamma=10,\n all_classes_distinct=(args.dataset == 'YouCook2'),\n bg_scope=args.bg_scope)\n self.log('train/clust_loss', clust_loss)\n total_loss += args.clust_loss_mult * clust_loss\n\n if args.dtw_loss_mult > 0:\n dtw_loss = args.dtw_loss_mult * compute_alignment_loss(\n samples, distractors, contiguous=args.contiguous_drop,\n gamma_xz=args.dtw_xz_gamma, gamma_min=args.dtw_min_gamma,\n drop_cost_type=args.drop_cost, dp_algo=args.dp_algo,\n keep_percentile=args.keep_percentile, softning=args.dtw_softning)\n self.log('train/dtw_loss', dtw_loss)\n total_loss += dtw_loss\n\n self.log('train/total_loss', self.avg_loss_metric(total_loss))\n return total_loss\n\n def training_epoch_end(self, training_step_outputs):\n self.model.eval()\n avg_total_loss = self.avg_loss_metric.compute()\n print('Train Total loss: {:.2f}'.format(avg_total_loss))\n self.avg_loss_metric.reset()\n\n eval_config = Namespace(dp_algo='DropDTW', drop_cost=args.drop_cost, keep_percentile=0.3,\n use_unlabeled=True, distance='inner', dataset=args.dataset)\n _, _, accuracy_dtw, iou_dtw, recall = compute_all_metrics(\n self.data.val_dataset, self.model, gamma=30, config=eval_config)\n \n print(\"Recall is \",recall)\n print(\"DTW Accuracy is \",accuracy_dtw)\n print(\"DTW IoU is \",iou_dtw)\n self.log(\"Metrics/Recall\", recall)\n self.log(\"Metrics/Accuracy\", accuracy_dtw)\n self.log(\"Metrics/IoU\", iou_dtw)\n\n\ndef main():\n with open('dataset.pickle', 'rb') as f:\n data = pkl.load(f)\n print(len(data))\n model = EmbeddingsMapping(\n d=512, learnable_drop=(args.drop_cost == 'learn'), video_layers=args.video_layers,\n text_layers=args.text_layers, normalization_dataset=None,\n batchnorm=args.batchnorm)\n\n # load drop costs from a pre-trained model\n if args.pretrained_drop:\n # assumes that the model with the same name has been already trained\n # this retraines the model, but uses drop_mapping intialization from the previous training\n from glob import glob\n weights_path = glob(os.path.join(WEIGHTS_PATH, args.name, \"weights-epoch=*.ckpt\"))[0]\n state_dict = {k[6:]: v for k, v in torch.load(weights_path, map_location=device)['state_dict'].items()\n if k.startswith('model.drop_mapping')}\n model.load_state_dict(state_dict, strict=False)\n \n train_module = TrainModule(model, data)\n\n checkpoint_callback = pl.callbacks.ModelCheckpoint(\n monitor='Metrics/Recall',\n dirpath=os.path.join(PROJECT_PATH, 'weights', args.name),\n filename='weights-{epoch:02d}',\n save_top_k=1,\n mode='max',\n )\n vis_callback = VisualizationCallback()\n logger = pl.loggers.TensorBoardLogger('tb_logs', args.name)\n\n trainer = pl.Trainer(gpus=1, callbacks=[checkpoint_callback, vis_callback],\n max_epochs=args.epochs, logger=logger)\n\n trainer.fit(train_module, data)\n\n\nif __name__ == '__main__':\n print(device)\n main()\n",
"import sys\nimport os\nimport torch\nfrom torch import nn\nfrom os import path as osp\n\nsys.path.append(osp.dirname(osp.dirname(osp.abspath(__file__)))) # add parent dir\nfrom models.model_utils import compute_normalization_parameters\n\nclass NonlinBlock(nn.Module):\n def __init__(self, d_in, d_out, batchnorm):\n super(NonlinBlock, self).__init__()\n self.fc = nn.Linear(d_in, d_out)\n self.relu = nn.ReLU()\n self.do_batchnorm = batchnorm\n if batchnorm:\n self.norm_fn = nn.BatchNorm1d(d_out)\n # self.layer_norm = nn.LayerNorm(d_out)\n\n def forward(self, x):\n x = self.fc(x)\n if self.do_batchnorm:\n x = self.norm_fn(x)\n x = self.relu(x)\n return x\n\n\nclass NonlinMapping(nn.Module):\n def __init__(self, d, layers=2, normalization_params=None, batchnorm=False):\n super(NonlinMapping, self).__init__()\n self.nonlin_mapping = nn.Sequential(*[NonlinBlock(d, d, batchnorm) for i in range(layers - 1)])\n if layers > 0:\n self.lin_mapping = nn.Linear(d, d)\n else:\n self.lin_mapping = lambda x: torch.zeros_like(x)\n\n self.register_buffer('norm_mean', torch.zeros(d))\n self.register_buffer('norm_sigma', torch.ones(d))\n\n def initialize_normalization(self, normalization_params):\n if normalization_params is not None:\n if len(normalization_params) > 0:\n self.norm_mean.data.copy_(normalization_params[0])\n if len(normalization_params) > 1:\n self.norm_sigma.data.copy_(normalization_params[1])\n\n def forward(self, x):\n x = (x - self.norm_mean) / self.norm_sigma\n res = self.nonlin_mapping(x)\n res = self.lin_mapping(res)\n return x + res \n\n\nclass EmbeddingsMapping(nn.Module):\n def __init__(self, d, video_layers=2, text_layers=2, drop_layers=1, learnable_drop=False, normalization_dataset=None, batchnorm=False):\n super(EmbeddingsMapping, self).__init__()\n self.video_mapping = NonlinMapping(d, video_layers, batchnorm=batchnorm)\n self.text_mapping = NonlinMapping(d, text_layers, batchnorm=batchnorm)\n\n if learnable_drop:\n self.drop_mapping = NonlinMapping(d, drop_layers, batchnorm=batchnorm)\n\n if normalization_dataset is not None:\n norm_params = compute_normalization_parameters(normalization_dataset)\n self.video_mapping.initialize_normalization(norm_params[:2])\n self.text_mapping.initialize_normalization(norm_params[2:])\n\n def map_video(self, x):\n return self.video_mapping(x)\n\n def map_text(self, z):\n return self.text_mapping(z)\n\n def compute_distractors(self, v):\n return self.drop_mapping(v)\n"
] |
[
[
"numpy.concatenate",
"numpy.random.seed",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.load"
],
[
"torch.nn.Linear",
"torch.zeros",
"torch.ones",
"torch.nn.ReLU",
"torch.nn.BatchNorm1d",
"torch.zeros_like"
]
] |
YuHe0108/cvmodule
|
[
"ea00a90fc9bbca5b2c7809791cbd1f7b0da526cd"
] |
[
"HumeanPoseEstimate/loss.py"
] |
[
"from tensorflow import keras\r\nimport tensorflow as tf\r\n\r\n\r\ndef joint_mse_loss(y_pred, y_true, true_weight):\r\n \"\"\"\r\n 损失函数想要表达的意思: 输出的特征图数量为关键点的数量,意味着输出的是每一个像素属于各个关键点的置信度\r\n \"\"\"\r\n batch_size = y_pred.shape[0]\r\n num_of_joints = y_pred.shape[-1] # 有多少个关键点\r\n y_pred = tf.reshape(y_pred, shape=(batch_size, -1, num_of_joints)) # 合并宽和高\r\n heatmap_pred_list = tf.split(value=y_pred,\r\n num_or_size_splits=num_of_joints,\r\n axis=-1) # 拆分每一个关键点的特征图 [batch_size, -1, 1]\r\n y_true = tf.reshape(y_true, shape=(batch_size, -1, num_of_joints))\r\n heatmap_true_list = tf.split(value=y_true, # y_true执行与y_pred相同的操作\r\n num_or_size_splits=num_of_joints,\r\n axis=-1)\r\n losses = [] # 计算每一个关键点的损失值,并累加求平均\r\n for i in range(num_of_joints):\r\n heatmap_pred = tf.squeeze(heatmap_pred_list[i])\r\n heatmap_true = tf.squeeze(heatmap_true_list[i])\r\n loss = 0.5 * tf.losses.mean_squared_error(y_pred=heatmap_pred * true_weight[:, i],\r\n y_true=heatmap_true * true_weight[:, i])\r\n losses.append(loss)\r\n return tf.reduce_mean(loss)\r\n\r\n\r\nclass JointsMSELoss(object):\r\n def __init__(self):\r\n self.mse = tf.losses.MeanSquaredError()\r\n\r\n def __call__(self, y_pred, target, target_weight):\r\n batch_size = y_pred.shape[0]\r\n num_of_joints = y_pred.shape[-1]\r\n pred = tf.reshape(tensor=y_pred, shape=(batch_size, -1, num_of_joints))\r\n heatmap_pred_list = tf.split(value=pred, num_or_size_splits=num_of_joints, axis=-1)\r\n gt = tf.reshape(tensor=target, shape=(batch_size, -1, num_of_joints))\r\n heatmap_gt_list = tf.split(value=gt, num_or_size_splits=num_of_joints, axis=-1)\r\n loss = 0.0\r\n for i in range(num_of_joints):\r\n heatmap_pred = tf.squeeze(heatmap_pred_list[i])\r\n heatmap_gt = tf.squeeze(heatmap_gt_list[i])\r\n loss += 0.5 * self.mse(y_true=heatmap_pred * target_weight[:, i],\r\n y_pred=heatmap_gt * target_weight[:, i])\r\n return loss / num_of_joints\r\n"
] |
[
[
"tensorflow.losses.MeanSquaredError",
"tensorflow.reshape",
"tensorflow.squeeze",
"tensorflow.losses.mean_squared_error",
"tensorflow.split",
"tensorflow.reduce_mean"
]
] |
IvanIFChen/Passiotic
|
[
"f6a035fad5c6a6372721e2f74f9abdc98d0ffe67",
"f6a035fad5c6a6372721e2f74f9abdc98d0ffe67"
] |
[
"plots/test 1 total count.py",
"plots/test 2 total and unique count.py"
] |
[
"import matplotlib.pyplot as plt\nimport json\nfrom dateutil import parser\nfrom pprint import pprint\nfrom collections import defaultdict\nfrom datetime import datetime, timedelta\n\n\ndef load_from_file(filename):\n data = defaultdict(int)\n raw_data = []\n with open(filename, 'r') as f:\n for line in f.readlines():\n x = json.loads(line)\n date = parser.parse(x['end_time']['s'])\n pi = x['pi_id']['s']\n if pi == 'pi_2':\n date = date - timedelta(minutes=6)\n raw_data.append(date.minute)\n for d in raw_data:\n data[d] += 1\n return data\n\n\nif __name__ == '__main__':\n data = load_from_file('dynamo_exports/First-Test-Snell-2nd-Floor')\n data = [(key, data[key]) for key in data.keys()]\n data.sort(key=lambda x: x[0])\n f, ax = plt.subplots(1)\n ydata = [x[1] for x in data]\n xdata = ['11:{} AM'.format(x[0]) for x in data]\n ax.plot(xdata, ydata, label='total devices')\n ax.set_ylim(bottom=0, top=100)\n plt.legend()\n plt.show()\n",
"import matplotlib.pyplot as plt\nimport json\nfrom dateutil import parser\nfrom pprint import pprint\nfrom collections import defaultdict\nfrom datetime import datetime, timedelta\n\n\ndef load_from_file(filename):\n data = defaultdict(list)\n raw_data = []\n with open(filename, 'r') as f:\n for line in f.readlines():\n x = json.loads(line)\n round_id = x['round_id']['n']\n pi = x['pi_id']['s']\n mac = x['device_mac']['s']\n raw_data.append((round_id, pi, mac))\n for d in raw_data:\n data[d[0]] += [d[2]]\n\n unique_data = defaultdict(list)\n for key in data:\n unique_data[key] = list(set(data[key]))\n return data, unique_data\n\n\nif __name__ == '__main__':\n data, unique_data = load_from_file(\n 'dynamo_exports/Second-Test-Snell-2nd-Floor')\n unique_data = sorted([(key, unique_data[key])\n for key in unique_data.keys()],\n key=lambda x: x[0][0])[:-1]\n data = sorted([(key, data[key]) for key in data.keys()],\n key=lambda x: x[0][0])[:-1]\n f, ax = plt.subplots(1)\n ydata_data = [len(x[1]) for x in data]\n ydata_unique = [len(x[1]) for x in unique_data]\n xdata = ['round {}'.format(x[0]) for x in data]\n ax.plot(xdata, ydata_data, label='total devices')\n ax.plot(xdata, ydata_unique, label='unique devices')\n ax.set_ylim(bottom=0, top=100)\n\n for i, j in zip(xdata, ydata_data):\n ax.annotate(str(j), xy=((i, j + 3)))\n\n for i, j in zip(xdata, ydata_unique):\n ax.annotate(str(j), xy=((i, j + 3)))\n\n plt.legend()\n plt.show()\n"
] |
[
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.subplots"
],
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.subplots"
]
] |
VishaalMK/VectorDefense
|
[
"dc488fbf19bc9aefaf58bcc2b89dfe0e5adc3806"
] |
[
"mnist_cnn.py"
] |
[
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np\nimport keras\nfrom keras.utils import np_utils\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, Flatten, Dropout\nfrom keras import backend\nimport tensorflow as tf\nfrom tensorflow.python.platform import flags\nimport scipy.misc\n\nimport matplotlib\nmatplotlib.use('Agg')\n\nfrom cleverhans.utils_mnist import data_mnist\nfrom cleverhans.utils_tf import model_train, model_eval\nfrom cleverhans.attacks import CarliniWagnerL2\nfrom cleverhans.utils import AccuracyReport\nfrom cleverhans.utils_keras import KerasModelWrapper, conv_2d\n\nFLAGS = flags.FLAGS\n\nmodel_path = \"models/mnist\"\n\ndef cnn_model(logits=False, input_ph=None, img_rows=28, img_cols=28,\n channels=1, nb_filters=64, nb_classes=10):\n \"\"\"\n Defines a CNN model using Keras sequential model\n :param logits: If set to False, returns a Keras model, otherwise will also\n return logits tensor\n :param input_ph: The TensorFlow tensor for the input\n (needed if returning logits)\n (\"ph\" stands for placeholder but it need not actually be a\n placeholder)\n :param img_rows: number of row in the image\n :param img_cols: number of columns in the image\n :param channels: number of color channels (e.g., 1 for MNIST)\n :param nb_filters: number of convolutional filters per layer\n :param nb_classes: the number of output classes\n :return:\n \"\"\"\n model = Sequential()\n\n # Define the layers successively (convolution layers are version dependent)\n if keras.backend.image_dim_ordering() == 'th':\n input_shape = (channels, img_rows, img_cols)\n else:\n input_shape = (img_rows, img_cols, channels)\n\n layers = [conv_2d(nb_filters, (5, 5), (1, 1), \"same\",\n input_shape=input_shape),\n Activation('relu'),\n conv_2d(nb_filters, (5, 5), (1, 1), \"valid\"),\n Activation('relu'),\n\t Flatten(),\n\t Dropout(0.25),\n\t Dense(128),\n\t Activation('relu'),\n\t Dropout(0.5),\n Dense(nb_classes)]\n\n for layer in layers:\n model.add(layer)\n\n if logits:\n logits_tensor = model(input_ph)\n model.add(Activation('softmax'))\n\n if logits:\n return model, logits_tensor\n else:\n return model\n\ndef mnist_fgsm(train_start=0, train_end=60000, test_start=0,\n test_end=10000, nb_epochs=6, batch_size=128,\n learning_rate=0.001, train_dir=model_path,\n filename=\"mnist.ckpt\", load_model=False,\n nb_classes=10, testing=False):\n \"\"\"\n MNIST CleverHans tutorial\n :param train_start: index of first training set example\n :param train_end: index of last training set example\n :param test_start: index of first test set example\n :param test_end: index of last test set example\n :param nb_epochs: number of epochs to train model\n :param batch_size: size of training batches\n :param learning_rate: learning rate for training\n :param train_dir: Directory storing the saved model\n :param filename: Filename to save model under\n :param load_model: True for load, False for not load\n :param testing: if true, test error is calculated\n :return: an AccuracyReport object\n \"\"\"\n keras.layers.core.K.set_learning_phase(0)\n\n # Object used to keep track of (and return) key accuracies\n report = AccuracyReport()\n\n # Set TF random seed to improve reproducibility\n tf.set_random_seed(1234)\n\n if not hasattr(backend, \"tf\"):\n raise RuntimeError(\"This tutorial requires keras to be configured\"\n \" to use the TensorFlow backend.\")\n\n if keras.backend.image_dim_ordering() != 'tf':\n keras.backend.set_image_dim_ordering('tf')\n print(\"INFO: '~/.keras/keras.json' sets 'image_dim_ordering' to \"\n \"'th', temporarily setting to 'tf'\")\n\n # Create TF session and set as Keras backend session\n sess = tf.Session()\n keras.backend.set_session(sess)\n\n # Get MNIST test data\n X_train, Y_train, X_test, Y_test = data_mnist(train_start=train_start,\n train_end=train_end,\n test_start=test_start,\n test_end=test_end)\n\n # Use label smoothing\n assert Y_train.shape[1] == 10\n label_smooth = .1\n Y_train = Y_train.clip(label_smooth / 9., 1. - label_smooth)\n\n # Define input TF placeholder\n x = tf.placeholder(tf.float32, shape=(None, 28, 28, 1))\n y = tf.placeholder(tf.float32, shape=(None, 10))\n\n # Define TF model graph\n model, logits = cnn_model(logits=True, input_ph=x)\n preds = model(x)\n print(\"Defined TensorFlow model graph.\")\n\n def evaluate():\n # Evaluate the accuracy of the MNIST model on legitimate test examples\n eval_params = {'batch_size': batch_size}\n acc = model_eval(sess, x, y, preds, X_test, Y_test, args=eval_params)\n report.clean_train_clean_eval = acc\n assert X_test.shape[0] == test_end - test_start, X_test.shape\n print('Test accuracy on legitimate examples: %0.4f' % acc)\n\n # Train an MNIST model\n train_params = {\n 'nb_epochs': nb_epochs,\n 'batch_size': batch_size,\n 'learning_rate': learning_rate,\n 'train_dir': train_dir,\n 'filename': filename\n }\n ckpt = tf.train.get_checkpoint_state(train_dir)\n ckpt_path = False if ckpt is None else ckpt.model_checkpoint_path\n\n rng = np.random.RandomState([2017, 8, 30])\n if load_model and ckpt_path:\n saver = tf.train.Saver()\n saver.restore(sess, ckpt_path)\n print(\"Model loaded from: {}\".format(ckpt_path))\n evaluate()\n else:\n print(\"Model was not loaded, training from scratch.\")\n model_train(sess, x, y, preds, X_train, Y_train, evaluate=evaluate,\n args=train_params, save=True, rng=rng)\n\n # Calculate training error\n if testing:\n eval_params = {'batch_size': batch_size}\n acc = model_eval(sess, x, y, preds, X_train, Y_train, args=eval_params)\n report.train_clean_train_clean_eval = acc\n\n return report\n\n\ndef main(argv=None):\n mnist_fgsm(nb_epochs=FLAGS.nb_epochs,\n batch_size=FLAGS.batch_size,\n learning_rate=FLAGS.learning_rate,\n train_dir=FLAGS.train_dir,\n filename=FLAGS.filename,\n\t\t nb_classes=FLAGS.nb_classes,\n load_model=FLAGS.load_model)\n\n\nif __name__ == '__main__':\n flags.DEFINE_integer('nb_epochs', 6, 'Number of epochs to train model')\n flags.DEFINE_integer('nb_classes', 10, 'Number of output classes')\n flags.DEFINE_integer('batch_size', 128, 'Size of training batches')\n flags.DEFINE_float('learning_rate', 0.001, 'Learning rate for training')\n flags.DEFINE_string('train_dir', model_path, 'Directory where to save model.')\n flags.DEFINE_string('filename', 'mnist.ckpt', 'Checkpoint filename.')\n flags.DEFINE_boolean('load_model', True, 'Load saved model or train.')\n tf.app.run()\n\n"
] |
[
[
"matplotlib.use",
"tensorflow.set_random_seed",
"tensorflow.python.platform.flags.DEFINE_integer",
"tensorflow.python.platform.flags.DEFINE_boolean",
"numpy.random.RandomState",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.train.get_checkpoint_state",
"tensorflow.python.platform.flags.DEFINE_float",
"tensorflow.python.platform.flags.DEFINE_string",
"tensorflow.placeholder",
"tensorflow.app.run"
]
] |
hangzh-msft/MLOpsDatabricks
|
[
"fc50127b236142a15c102a8c8e44ef1cbb584738"
] |
[
"src/score/score.py"
] |
[
"import json\nimport torch\nimport torch.nn as nn\nimport torchvision.transforms as transforms\nfrom azorean.core.model import Model\nfrom PIL import Image\n\n\nclass CNN(nn.Module):\n def __init__(self):\n super(CNN, self).__init__()\n self.layer1 = nn.Sequential(\n nn.Conv2d(1, 16, kernel_size=5, padding=2),\n nn.BatchNorm2d(16),\n nn.ReLU(),\n nn.MaxPool2d(2))\n self.layer2 = nn.Sequential(\n nn.Conv2d(16, 32, kernel_size=5, padding=2),\n nn.BatchNorm2d(32),\n nn.ReLU(),\n nn.MaxPool2d(2))\n self.fc = nn.Linear(7 * 7 * 32, 10)\n\n def forward(self, x):\n out = self.layer1(x)\n out = self.layer2(out)\n out = out.view(out.size(0), -1)\n out = self.fc(out)\n return out\n\n\ndef init():\n global model\n model = CNN()\n model_path = Model.get_model_path(model_name=\"torchcnn\")\n model.load_state_dict(torch.load(model_path))\n model.eval()\n\n\ndef run(raw_data):\n transform = transforms.transforms.Compose([\n transforms.transforms.ToTensor(),\n transforms.transforms.Normalize(\n (0.1307,), (0.3081,))\n ])\n img = Image.frombytes(\n '1', (28, 28), (json.loads(raw_data)['data']).encode())\n input_data = transform(img)\n input_data = input_data.unsqueeze(0)\n classes = ['tshirt', 'Trouser', 'Pullover', 'Dress', 'Coat',\n 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\n output = model(input_data)\n index = torch.argmax(output, 1)\n return classes[index]\n"
] |
[
[
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.load",
"torch.argmax"
]
] |
guangyizhangbci/EEG_Riemannian
|
[
"2d301bf3d06a192da2829c1c54b24d388ddea1dd"
] |
[
"code/main.py"
] |
[
"from __future__ import print_function, division\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport sys, os\nimport numpy as np\nfrom tqdm import tqdm\nfrom rich.progress import track\nfrom time import time\nimport pyriemann\nimport yaml\nimport argparse\nfrom scipy.stats import pearsonr\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import cohen_kappa_score,accuracy_score\nfrom sklearn.model_selection import KFold\nfrom spatial_embedding import spatial_features\nfrom model.spatial_temporal_information import spatial_temporal_info_stream\nfrom utils import root_mean_squared_error_numpy, load_dataset_signal_addr, load_dataset_feature_addr, parse_valid_data_all, save_test_result\n\nprint('ready')\n\n\nparser = argparse.ArgumentParser(description='Spatial Temporal_Info')\nparser.add_argument('--dataset', default='BCI_IV_2b', type=str,\n help='learning rate')\nparser.add_argument('--cpu-seed', default=0, type=int, metavar='N',\n help='cpu seed')\nparser.add_argument('--gpu-seed', default=12345, type=int, metavar='N',\n help='gpu seed')\nparser.add_argument('--lr', default=0.001, type=float, metavar='N',\n help='learning rate')\nparser.add_argument('--batch-size', default=32, type=int, metavar='N',\n help='train batchsize')\nparser.add_argument('--epochs', default=200, type=int, metavar='N',\n help='training epochs')\nparser.add_argument('--early-stopping', default=200, type=int, metavar='N',\n help='EarlyStopping')\nparser.add_argument('--riemannian_dist', default=True, action='store_false')\nparser.add_argument('--saved-ckpt', default=False, action='store_false')\n\nargs = parser.parse_args()\nstate = {k: v for k, v in args._get_kwargs()}\n\n\n\ndef load_config(name):\n with open(os.path.join(sys.path[0], name)) as file:\n config = yaml.safe_load(file)\n\n return config\n\nconfig = load_config('dataset_params.yaml')\n\nnet_params = {'epochs': args.epochs, 'batch_size': args.batch_size, 'early_stopping': args.early_stopping, 'saved_ckpt_flag': args.saved_ckpt}\n\n\n\nclass experiments():\n def __init__(self, dataset_name):\n self.dataset_name = dataset_name\n\n '''\n The main file of experiments, the training loop depends on each dataset\n (e.g., train-test direct split or using k-Fold, session-dependent or not)\n '''\n\n def run_seed(self):\n '''\n Address of filtered EEG in each frequency band and extracted features (DE, PSD)\n '''\n addr_dict = load_dataset_signal_addr(self.dataset_name)\n signal_train_addr, signal_test_addr, label_train_addr, label_test_addr = list(addr_dict.values())\n\n addr_dict = load_dataset_feature_addr(self.dataset_name)\n features_train_addr, features_test_addr, _, _ = list(addr_dict.values())\n\n\n test_acc_result = np.zeros((config[self.dataset_name]['Subject_No'], config[self.dataset_name]['Session_No']))\n test_kap_result = np.zeros((config[self.dataset_name]['Subject_No'], config[self.dataset_name]['Session_No']))\n\n\n for subject_num in track(range(1, config[self.dataset_name]['Subject_No'] +1)):\n for session_num in range(1,config[self.dataset_name]['Session_No']+1):\n #____________________LOAD DATA____________________#\n X_train_signal = np.load(signal_train_addr.format(subject_num, session_num))\n X_test_signal = np.load(signal_test_addr.format(subject_num, session_num))\n X_train_features = np.load(features_train_addr.format(subject_num, session_num))\n X_test_features = np.load(features_test_addr.format(subject_num, session_num))\n Y_train = np.load(label_train_addr.format(subject_num, session_num))\n Y_test = np.load(label_test_addr.format(subject_num, session_num))\n ####################################################\n\n\n train_embed, test_embed = spatial_features(config, self.dataset_name, args.riemannian_dist, config[self.dataset_name]['params']['Rank_No']).embedding(X_train_signal, X_test_signal)\n\n Y_pred = spatial_temporal_info_stream(train_embed, test_embed, X_train_features, X_test_features, Y_train, Y_test, seld.dataset_name, net_params)\n\n test_acc_mlp = np.mean(accuracy_score(Y_test, np.argmax(Y_pred, axis=-1)))\n test_kap_mlp = np.mean(cohen_kappa_score(Y_test, np.argmax(Y_pred, axis=-1)))\n\n test_acc_result[subject_num-1,session_num-1] = test_acc_mlp\n test_kap_result[subject_num-1,session_num-1] = test_kap_mlp\n\n\n test_acc_result = np.mean(test_acc_result, axis=2)\n test_kap_result = np.mean(test_kap_result, axis=2)\n\n save_test_result(self.dataset_name, test_acc_result, test_kap_result)\n\n\n def run_seed_vig(self):\n '''\n Address of filtered EEG in each frequency band and extracted features (DE, PSD)\n '''\n addr_dict = load_dataset_signal_addr(self.dataset_name)\n signal_addr, label_addr = list(addr_dict.values())\n\n addr_dict = load_dataset_feature_addr(self.dataset_name)\n features_addr, _ = list(addr_dict.values())\n\n\n test_Fold_No = config[self.dataset_name]['Fold_No']\n test_rmse_result = np.zeros((config[self.dataset_name]['Subject_No'], test_Fold_No))\n test_corr_result = np.zeros((config[self.dataset_name]['Subject_No'], test_Fold_No))\n\n\n for subject_num in track(range(1, config[self.dataset_name]['Subject_No'] +1)):\n #____________________LOAD DATA____________________#\n X_signal = np.load(signal_addr.format(subject_num))\n X_features = np.load(features_addr.format(subject_num))\n Y = np.load(label_addr.format(subject_num))\n ####################################################\n\n test_Fold_count=1\n\n Y_test_total = np.zeros((1,1))\n Y_pred_total = np.zeros((1,1))\n\n\n rmse_array = np.zeros(([config[self.dataset_name]['Subject_No'], test_Fold_No]))\n corr_array = np.zeros(([config[self.dataset_name]['Subject_No'], test_Fold_No]))\n\n\n kfold_test = KFold(test_Fold_No, True, 1)\n # kfold_test = KFold(Fold_No_test, False, None)\n\n\n Y_test_total = np.zeros((0,1))\n Y_pred_total = np.zeros((0,1))\n\n for train_index, test_index in kfold_test.split(X_signal):\n\n print(\"KFold No.\", test_Fold_count)\n\n X_train_signal, X_test_signal, X_train_features, X_test_features, Y_train, Y_test = X_signal[train_index], X_signal[test_index], X_features[train_index], X_features[test_index], Y[train_index], Y[test_index]\n\n\n train_embed, test_embed = spatial_features(config, self.dataset_name, args.riemannian_dist, config[self.dataset_name]['params']['Rank_No']).embedding(X_train_signal, X_test_signal)\n\n\n Y_pred = spatial_temporal_info_stream(train_embed, test_embed, X_train_features, X_test_features, Y_train, Y_test, self.dataset_name, net_params)\n\n\n temp_Y_test = Y_test\n temp_Y_pred = Y_pred\n\n Y_test_total = np.vstack((Y_test_total, temp_Y_test))\n Y_pred_total = np.vstack((Y_pred_total, temp_Y_pred))\n\n Y_test_total = np.ravel(Y_test_total)\n Y_pred_total = np.ravel(Y_pred_total)\n print(Y_test_total.shape, Y_pred_total.shape)\n test_Fold_count += 1\n\n\n rmse_value = root_mean_squared_error_numpy(Y_test_total, Y_pred_total) # RMSE value for all 885 samples\n corcoeff_value, _ = pearsonr(Y_test_total, Y_pred_total)\n\n\n rmse_array[subject_num-1, test_Fold_No-1] = rmse_value\n corr_array[subject_num-1, test_Fold_No-1] = corcoeff_value\n\n save_test_result(self.dataset_name, test_acc_result, test_kap_result)\n\n\n\n def run_bci(self):\n\n '''\n Address of filtered EEG in each frequency band and extracted features (DE, PSD)\n '''\n addr_dict = load_dataset_signal_addr(self.dataset_name)\n signal_train_addr, signal_test_addr, label_train_addr, label_test_addr = list(addr_dict.values())\n\n addr_dict = load_dataset_feature_addr(self.dataset_name)\n features_train_addr, features_test_addr, _, _ = list(addr_dict.values())\n\n\n test_acc_result = np.zeros((config[self.dataset_name]['Subject_No']))\n test_kap_result = np.zeros((config[self.dataset_name]['Subject_No']))\n\n\n\n for subject_num in track(range(1, config[self.dataset_name]['Subject_No']+1)):\n # for subject_num in track(range(1, 2)):\n\n #____________________LOAD DATA____________________#\n X_train_signal = np.load(signal_train_addr.format(subject_num))\n X_test_signal = np.load(signal_test_addr.format(subject_num))\n X_train_features = np.load(features_train_addr.format(subject_num))\n X_test_features = np.load(features_test_addr.format(subject_num))\n Y_train = np.load(label_train_addr.format(subject_num))\n Y_test = np.load(label_test_addr.format(subject_num))\n Y_train = np.expand_dims(Y_train, axis=1) -1 #1,2,3,4 ---> 0,1,2,3\n Y_test = np.expand_dims(Y_test, axis=1) -1 #1,2,3,4 ---> 0,1,2,3\n ####################################################\n\n X_train_signal, X_train_features, Y_train = parse_valid_data_all(X_train_signal, X_train_features, Y_train)\n X_test_signal, X_test_features, Y_test = parse_valid_data_all(X_test_signal, X_test_features, Y_test)\n\n train_embed, test_embed = spatial_features(config, self.dataset_name, args.riemannian_dist, config[self.dataset_name]['params']['Rank_No']).embedding(X_train_signal, X_test_signal)\n\n\n Y_pred = spatial_temporal_info_stream(train_embed, test_embed, X_train_features, X_test_features, Y_train, Y_test, self.dataset_name, net_params)\n\n\n '''\n 2a output label in one-hot form, 2b output label in range (0,1)\n '''\n if '2a' in self.dataset_name:\n Y_pred = np.argmax(Y_pred, axis=-1)\n else:\n Y_pred = np.round(Y_pred)\n Y_test = Y_test.squeeze(1)\n\n test_acc_mlp = np.mean(accuracy_score(Y_test, Y_pred))\n test_kap_mlp = np.mean(cohen_kappa_score(Y_test, Y_pred))\n\n\n test_acc_result[subject_num-1] = test_acc_mlp\n test_kap_result[subject_num-1] = test_kap_mlp\n\n save_test_result(self.dataset_name, test_acc_result, test_kap_result)\n\n\n def run(self):\n\n if 'BCI' in self.dataset_name:\n self.run_bci()\n elif self.dataset_name=='SEED':\n self.run_seed()\n elif self.dataset_name=='SEED_VIG':\n self.run_seed_vig()\n else:\n raise Exception('Datasets Name Error')\n\n\n\n\nif __name__ == '__main__':\n\n\n config = load_config('dataset_params.yaml')\n with tf.device(\"gpu:0\"):\n np.random.seed(args.cpu_seed)\n tf.random.set_random_seed(args.gpu_seed)\n experiments(args.dataset).run()\n\n\n\n\n\n#\n"
] |
[
[
"numpy.zeros",
"numpy.random.seed",
"tensorflow.random.set_random_seed",
"numpy.round",
"numpy.mean",
"scipy.stats.pearsonr",
"sklearn.metrics.accuracy_score",
"numpy.ravel",
"numpy.argmax",
"tensorflow.device",
"sklearn.model_selection.KFold",
"numpy.expand_dims",
"sklearn.metrics.cohen_kappa_score",
"numpy.vstack"
]
] |
johnjohndoe/c3nav
|
[
"a17f863a3512e305595c16b0300796b6bae81241",
"a17f863a3512e305595c16b0300796b6bae81241"
] |
[
"src/c3nav/mapdata/render/geometry/altitudearea.py",
"src/c3nav/mapdata/render/engines/svg.py"
] |
[
"from collections import deque\nfrom itertools import chain\n\nimport numpy as np\n\nfrom c3nav.mapdata.models import AltitudeArea\nfrom c3nav.mapdata.render.geometry.hybrid import HybridGeometry\n\n\nclass AltitudeAreaGeometries:\n def __init__(self, altitudearea=None, colors=None, obstacles=None):\n if altitudearea is not None:\n self.geometry = altitudearea.geometry\n self.altitude = int(altitudearea.altitude * 1000)\n self.altitude2 = None if altitudearea.altitude2 is None else int(altitudearea.altitude2 * 1000)\n self.point1 = altitudearea.point1\n self.point2 = altitudearea.point2\n else:\n self.geometry = None\n self.altitude = None\n self.altitude2 = None\n self.point1 = None\n self.point2 = None\n self.base = None\n self.bottom = None\n self.colors = colors\n self.obstacles = obstacles\n\n def get_altitudes(self, points):\n # noinspection PyCallByClass,PyTypeChecker\n return AltitudeArea.get_altitudes(self, points/1000).astype(np.int32)\n\n def create_hybrid_geometries(self, face_centers, vertices_offset, faces_offset):\n self.geometry = HybridGeometry.create(self.geometry, face_centers)\n\n vertices = deque()\n faces = deque()\n\n for color, areas in self.colors.items():\n for height in tuple(areas.keys()):\n faces_offset, vertices_offset = self._call_create_full(areas, height, faces, vertices,\n faces_offset, vertices_offset)\n\n for height_obstacles in self.obstacles.values():\n for color_obstacles in height_obstacles.values():\n for i in range(len(color_obstacles)):\n faces_offset, vertices_offset = self._call_create_full(color_obstacles, i, faces, vertices,\n faces_offset, vertices_offset)\n\n if not vertices:\n return np.empty((0, 2), dtype=np.int32), np.empty((0, 3), dtype=np.uint32)\n return np.vstack(vertices), np.vstack(faces)\n\n def _call_create_full(self, mapping, key, faces, vertices, faces_offset, vertices_offset):\n geom = mapping[key]\n new_geom, new_vertices, new_faces = HybridGeometry.create_full(geom, vertices_offset, faces_offset)\n mapping[key] = new_geom\n vertices_offset += new_vertices.shape[0]\n faces_offset += new_faces.shape[0]\n vertices.append(new_vertices)\n faces.append(new_faces)\n return faces_offset, vertices_offset\n\n def remove_faces(self, faces):\n self.geometry.remove_faces(faces)\n for areas in self.colors.values():\n for area in areas.values():\n area.remove_faces(faces)\n\n def create_polyhedrons(self, create_polyhedron, altitudes, min_altitude, crops):\n if self.altitude2 is None:\n altitudes = self.altitude\n\n self.base = HybridGeometry(self.geometry.geom, self.geometry.faces)\n self.bottom = HybridGeometry(self.geometry.geom, self.geometry.faces)\n self.geometry.build_polyhedron(create_polyhedron,\n lower=altitudes - int(0.7 * 1000),\n upper=altitudes,\n crops=crops)\n self.base.build_polyhedron(create_polyhedron,\n lower=min_altitude - int(0.7 * 1000),\n upper=altitudes - int(0.7 * 1000),\n crops=crops,\n top=False, bottom=False)\n self.bottom.build_polyhedron(create_polyhedron,\n lower=0, upper=1,\n crops=crops,\n top=False)\n\n for geometry in chain(*(areas.values() for areas in self.colors.values())):\n geometry.build_polyhedron(create_polyhedron,\n lower=altitudes,\n upper=altitudes + int(0.001 * 1000),\n crops=crops)\n # todo: treat altitude properly\n for height, height_geometries in self.obstacles.items():\n for color, color_geometries in height_geometries.items():\n for geometry in color_geometries:\n geometry.build_polyhedron(create_polyhedron,\n lower=altitudes,\n upper=altitudes + height,\n crops=crops)\n",
"import io\nimport re\nimport subprocess\nimport zlib\nfrom itertools import chain\nfrom typing import Optional\n\nimport numpy as np\nfrom django.conf import settings\nfrom django.core import checks\nfrom PIL import Image\nfrom shapely.affinity import translate\nfrom shapely.geometry import LineString, Polygon\n# import gobject-inspect, cairo and rsvg if the native rsvg SVG_RENDERER should be used\nfrom shapely.ops import unary_union\n\nfrom c3nav.mapdata.render.engines.base import FillAttribs, RenderEngine, StrokeAttribs\n\nif settings.SVG_RENDERER == 'rsvg':\n import pgi\n import cairocffi\n pgi.require_version('Rsvg', '2.0')\n from pgi.repository import Rsvg\n\n\n@checks.register()\ndef check_svg_renderer(app_configs, **kwargs):\n errors = []\n if settings.SVG_RENDERER not in ('rsvg', 'rsvg-convert', 'inkscape'):\n errors.append(\n checks.Error(\n 'Invalid SVG renderer: '+settings.SVG_RENDERER,\n obj='settings.SVG_RENDERER',\n id='c3nav.mapdata.E002',\n )\n )\n return errors\n\n\nclass SVGEngine(RenderEngine):\n filetype = 'png'\n\n # draw an svg image. supports pseudo-3D shadow-rendering\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # create base elements and counter for clip path ids\n self.g = ''\n self.defs = ''\n self.clip_path_i = 0\n\n # for fast numpy operations\n self.np_scale = np.array((self.scale, -self.scale))\n self.np_offset = np.array((-self.minx * self.scale, self.maxy * self.scale))\n\n # keep track of created blur filters to avoid duplicates\n self.blurs = set()\n\n # keep track which area of the image has which altitude currently\n self.altitudes = {}\n self.last_altitude = None\n\n self._create_geometry_cache = {}\n\n def get_xml(self, buffer=False):\n # get the root <svg> element as an ElementTree element, with or without buffer\n if buffer:\n width_px = self._trim_decimals(str(self.buffered_width))\n height_px = self._trim_decimals(str(self.buffered_height))\n offset_px = self._trim_decimals(str(-self.buffer))\n attribs = ' viewBox=\"' + ' '.join((offset_px, offset_px, width_px, height_px)) + '\"' if buffer else ''\n else:\n width_px = self._trim_decimals(str(self.width))\n height_px = self._trim_decimals(str(self.height))\n attribs = ''\n\n result = ('<svg xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\" '\n 'width=\"'+width_px+'\" height=\"'+height_px+'\"'+attribs+'>')\n if self.defs:\n result += '<defs>'+self.defs+'</defs>'\n if self.g:\n result += '<g>'+self.g+'</g>'\n result += '</svg>'\n return result\n\n def render(self, filename=None):\n # render the image to png. returns bytes if f is None, otherwise it calls f.write()\n\n if self.width == 256 and self.height == 256 and not self.g:\n # create empty tile png with minimal size, indexed color palette with only one entry\n plte = b'PLTE' + bytearray(tuple(int(i*255) for i in self.background_rgb))\n return (b'\\x89PNG\\r\\n\\x1a\\n' +\n b'\\x00\\x00\\x00\\rIHDR\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\x00\\x01\\x03\\x00\\x00\\x00f\\xbc:%\\x00\\x00\\x00\\x03' +\n plte + zlib.crc32(plte).to_bytes(4, byteorder='big') +\n b'\\x00\\x00\\x00\\x1fIDATh\\xde\\xed\\xc1\\x01\\r\\x00\\x00\\x00\\xc2\\xa0\\xf7Om\\x0e7\\xa0\\x00\\x00\\x00\\x00\\x00' +\n b'\\x00\\x00\\x00\\xbe\\r!\\x00\\x00\\x01\\x7f\\x19\\x9c\\xa7\\x00\\x00\\x00\\x00IEND\\xaeB`\\x82')\n\n if settings.SVG_RENDERER == 'rsvg':\n # create buffered surfaces\n buffered_surface = cairocffi.SVGSurface(None, self.buffered_width, self.buffered_height)\n buffered_context = cairocffi.Context(buffered_surface)\n\n # draw svg with rsvg\n handle = Rsvg.Handle()\n svg = handle.new_from_data(self.get_xml(buffer=True).encode())\n svg.render_cairo(buffered_context)\n\n # create cropped image\n surface = buffered_surface.create_similar(cairocffi.CONTENT_COLOR, self.width, self.height)\n context = cairocffi.Context(surface)\n\n # set background color\n context.set_source(cairocffi.SolidPattern(*self.background_rgb))\n context.paint()\n\n # paste buffered immage with offset\n context.set_source_surface(buffered_surface, -self.buffer, -self.buffer)\n context.paint()\n\n return surface.write_to_png()\n\n elif settings.SVG_RENDERER == 'rsvg-convert':\n p = subprocess.run(('rsvg-convert', '-b', self.background, '--format', 'png'),\n input=self.get_xml(buffer=True).encode(), stdout=subprocess.PIPE, check=True)\n png = io.BytesIO(p.stdout)\n img = Image.open(png)\n img = img.crop((self.buffer, self.buffer,\n self.buffer + self.width,\n self.buffer + self.height))\n\n f = io.BytesIO()\n img.save(f, 'PNG')\n f.seek(0)\n return f.read()\n\n elif settings.SVG_RENDERER == 'inkscape':\n p = subprocess.run(('inkscape', '-z', '-b', self.background, '-e', '/dev/stderr', '/dev/stdin'),\n input=self.get_xml().encode(), stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n check=True)\n png = p.stderr[p.stderr.index(b'\\x89PNG'):]\n return png\n\n def _trim_decimals(self, data):\n # remove trailing zeros from a decimal – yes this is slow, but it greatly speeds up cairo rendering\n return re.sub(r'([0-9]+)((\\.[1-9])[0-9]+|\\.[0-9]+)?', r'\\1\\2', data)\n\n def _geometry_to_svg(self, geom):\n # scale and move geometry and create svg code for it\n if isinstance(geom, Polygon):\n return ('<path d=\"' +\n ' '.join((('M %.1f %.1f L'+(' %.1f %.1f'*(len(ring.coords)-1))+' z') %\n tuple((np.array(ring)*self.np_scale+self.np_offset).flatten()))\n for ring in chain((geom.exterior,), geom.interiors))\n + '\"/>').replace('.0 ', ' ')\n if isinstance(geom, LineString):\n return (('<path d=\"M %.1f %.1f L'+(' %.1f %.1f'*(len(geom.coords)-1))+'\"/>') %\n tuple((np.array(geom)*self.np_scale+self.np_offset).flatten())).replace('.0 ', ' ')\n try:\n geoms = geom.geoms\n except AttributeError:\n return ''\n return ''.join(self._geometry_to_svg(g) for g in geoms)\n\n def _create_geometry(self, geometry, attribs='', tag='g', cache_key=None):\n # convert a shapely geometry into an svg xml element\n result = None\n if cache_key is not None:\n result = self._create_geometry_cache.get(cache_key, None)\n if result is None:\n result = self._geometry_to_svg(geometry)\n if cache_key is not None:\n self._create_geometry_cache[cache_key] = result\n return '<'+tag+attribs+'>'+result+'</'+tag+'>'\n\n def register_clip_path(self, geometry):\n defid = 'clip'+str(self.clip_path_i)\n self.defs += self._create_geometry(geometry, ' id=\"'+defid+'\"', tag='clipPath')\n self.clip_path_i += 1\n return defid\n\n def add_shadow(self, geometry, elevation, clip_path=None):\n # add a shadow for the given geometry with the given elevation and, optionally, a clip path\n elevation = float(min(elevation, 2))\n blur_radius = elevation / 3 * 0.25\n\n shadow_geom = translate(geometry.buffer(blur_radius),\n xoff=(elevation / 3 * 0.12), yoff=-(elevation / 3 * 0.12))\n\n if clip_path is not None:\n if shadow_geom.distance(clip_path) >= blur_radius:\n return\n\n blur_id = 'blur'+str(int(elevation*100))\n if elevation not in self.blurs:\n self.defs += ('<filter id=\"'+blur_id+'\" width=\"200%\" height=\"200%\" x=\"-50%\" y=\"-50%\">'\n '<feGaussianBlur stdDeviation=\"'+str(blur_radius * self.scale)+'\"/>'\n '</filter>')\n self.blurs.add(elevation)\n\n attribs = ' filter=\"url(#'+blur_id+')\" fill=\"#000\" fill-opacity=\"0.2\"'\n if clip_path:\n attribs += ' clip-path=\"url(#'+self.register_clip_path(clip_path)+')\"'\n shadow = self._create_geometry(shadow_geom, attribs)\n self.g += shadow\n\n def clip_altitudes(self, new_geometry, new_altitude=None):\n # register new geometry with an altitude\n # a geometry with no altitude will reset the altitude information of its area as if nothing was ever there\n if self.last_altitude is not None and self.last_altitude > new_altitude:\n raise ValueError('Altitudes have to be ascending.')\n\n if new_altitude in self.altitudes:\n self.altitudes[new_altitude] = unary_union([self.altitudes[new_altitude], new_geometry])\n else:\n self.altitudes[new_altitude] = new_geometry\n\n def darken(self, area):\n if area:\n self.add_geometry(geometry=area, fill=FillAttribs('#000000', 0.1), category='darken')\n\n def _add_geometry(self, geometry, fill: Optional[FillAttribs], stroke: Optional[StrokeAttribs],\n altitude=None, height=None, shape_cache_key=None, **kwargs):\n geometry = self.buffered_bbox.intersection(geometry.geom if hasattr(geometry, 'geom') else geometry)\n\n if geometry.is_empty:\n return\n\n if fill:\n attribs = ' fill=\"'+(fill.color)+'\"'\n if fill.opacity:\n attribs += ' fill-opacity=\"'+str(fill.opacity)[:4]+'\"'\n else:\n attribs = ' fill=\"none\"'\n\n if altitude is not None and stroke is None:\n stroke = StrokeAttribs('rgba(0, 0, 0, 0.15)', 0.05, min_px=0.2)\n\n if stroke:\n width = stroke.width*self.scale\n if stroke.min_px:\n width = max(width, stroke.min_px)\n attribs += ' stroke-width=\"' + self._trim_decimals(str(width)) + '\" stroke=\"' + stroke.color + '\"'\n if stroke.opacity:\n attribs += ' stroke-opacity=\"'+str(stroke.opacity)[:4]+'\"'\n\n if geometry is not None:\n\n if False:\n # old shadow rendering. currently needs too much resources\n if altitude is not None or height is not None:\n if height is not None:\n if height:\n self.add_shadow(geometry, height)\n else:\n for other_altitude, other_geom in self.altitudes.items():\n self.add_shadow(geometry, altitude-other_altitude, clip_path=other_geom)\n\n self.clip_altitudes(geometry, altitude)\n else:\n if height is not None:\n self.add_shadow(geometry, height)\n\n element = self._create_geometry(geometry, attribs, cache_key=shape_cache_key)\n\n else:\n element = '<rect width=\"100%\" height=\"100%\"'+attribs+'>'\n\n self.g += element\n"
] |
[
[
"numpy.empty",
"numpy.vstack"
],
[
"numpy.array"
]
] |
ezhaohongwei/jina
|
[
"9769f2e35eb8a196304a145409f959a7beac0432",
"9769f2e35eb8a196304a145409f959a7beac0432"
] |
[
"jina/types/document/graph.py",
"tests/unit/types/document/test_document.py"
] |
[
"from typing import Optional, Iterator, Tuple, Dict, Iterable\n\nimport numpy as np\n\nfrom . import Document, DocumentSourceType\nfrom ..arrays import ChunkArray\nfrom ..struct import StructView\nfrom ..ndarray.sparse.scipy import SparseNdArray\nfrom ...importer import ImportExtensions\nfrom ...logging.predefined import default_logger\n\n__all__ = ['GraphDocument']\n\nif False:\n from scipy.sparse import coo_matrix\n from dgl import DGLGraph\n\n\nclass GraphDocument(Document):\n \"\"\"\n :class:`GraphDocument` is a data type created based on Jina primitive data type :class:`Document`.\n\n It adds functionality that lets you work with a `Document` as a `directed graph` where all its chunks are the nodes in the `graph`.\n\n It exposes functionality to access and manipulate `graph related info` from the `DocumentProto` such as adjacency and edge features.\n\n .. warning::\n - It assumes that every ``chunk`` of a ``document`` is a node of a graph.\n\n :param document: the document to construct from. If ``bytes`` is given\n then deserialize a :class:`DocumentProto`; ``dict`` is given then\n parse a :class:`DocumentProto` from it; ``str`` is given, then consider\n it as a JSON string and parse a :class:`DocumentProto` from it; finally,\n one can also give `DocumentProto` directly, then depending on the ``copy``,\n it builds a view or a copy from it.\n :param copy: when ``document`` is given as a :class:`DocumentProto` object, build a\n view (i.e. weak reference) from it or a deep copy from it.\n :param kwargs: further key value arguments\n \"\"\"\n\n def __init__(\n self,\n document: Optional[DocumentSourceType] = None,\n copy: bool = False,\n **kwargs,\n ):\n self._check_installed_array_packages()\n super().__init__(document=document, copy=copy, **kwargs)\n self._node_id_to_offset = {\n node.id: offset for offset, node in enumerate(self.nodes)\n } # dangerous because document is stateless, try to work only with proto\n\n @staticmethod\n def _check_installed_array_packages():\n from ... import JINA_GLOBAL\n\n if JINA_GLOBAL.scipy_installed is None:\n JINA_GLOBAL.scipy_installed = False\n with ImportExtensions(\n required=True,\n pkg_name='scipy',\n help_text=f'GraphDocument requires scipy to be installed for sparse matrix support.',\n ):\n import scipy\n\n JINA_GLOBAL.scipy_installed = True\n\n def add_node(self, node: 'Document'):\n \"\"\"\n Add a a node to the graph\n\n :param node: the node to be added to the graph\n \"\"\"\n if node.id in self._node_id_to_offset:\n default_logger.debug(f'Document {node.id} is already a node of the graph')\n return\n\n self._node_id_to_offset[node.id] = len(self.nodes)\n self.nodes.append(node)\n\n def remove_node(self, node: 'Document'):\n \"\"\"\n Remove a node from the graph along with the edges that may contain it\n\n :param node: the node to be removed from the graph\n \"\"\"\n from scipy.sparse import coo_matrix\n\n if node.id not in self._node_id_to_offset:\n default_logger.debug(\n f'Trying to remove document {node.id} from the graph while is not a node of the graph'\n )\n return\n\n offset = self._node_id_to_offset[node.id]\n\n if self.num_edges > 0:\n edges_to_remove = []\n for edge_id, (row, col) in enumerate(\n zip(self.adjacency.row, self.adjacency.col)\n ):\n if row.item() == offset or col.item() == offset:\n edge_features_keys = (\n f'{self.nodes[row.item()].id}-{self.nodes[col.item()]}'\n )\n edges_to_remove.append((edge_id, edge_features_keys))\n\n for edge_id, edge_features_key in reversed(edges_to_remove):\n self._remove_edge_id(edge_id, edge_features_key)\n\n if self.num_edges > 0:\n row = np.copy(self.adjacency.row)\n col = np.copy(self.adjacency.col)\n data = np.copy(self.adjacency.data)\n for i in range(self.num_edges):\n if self.adjacency.row[i] > offset:\n row[i] = row[i] - 1\n if self.adjacency.col[i] > offset:\n col[i] = col[i] - 1\n SparseNdArray(\n self._pb_body.graph.adjacency, sp_format='coo'\n ).value = coo_matrix((data, (row, col)))\n\n del self.nodes[offset]\n self._node_id_to_offset = {\n node.id: offset for offset, node in enumerate(self.nodes)\n }\n\n def add_edge(\n self, doc1: 'Document', doc2: 'Document', features: Optional[Dict] = None\n ):\n \"\"\"\n Add an edge to the graph connecting `doc1` with `doc2`\n\n :param doc1: the starting node for this edge\n :param doc2: the ending node for this edge\n :param features: Optional features dictionary to be added to this new created edge\n \"\"\"\n from scipy.sparse import coo_matrix\n\n self.add_node(doc1)\n self.add_node(doc2)\n\n current_adjacency = self.adjacency\n doc1_node_offset = self._node_id_to_offset[doc1.id]\n doc2_node_offset = self._node_id_to_offset[doc2.id]\n row = (\n np.append(current_adjacency.row, doc1_node_offset)\n if current_adjacency is not None\n else np.array([doc1_node_offset])\n )\n col = (\n np.append(current_adjacency.col, doc2_node_offset)\n if current_adjacency is not None\n else np.array([doc2_node_offset])\n )\n data = (\n np.append(current_adjacency.data, 1)\n if current_adjacency is not None\n else np.array([1])\n )\n SparseNdArray(\n self._pb_body.graph.adjacency, sp_format='coo'\n ).value = coo_matrix((data, (row, col)))\n if features is not None:\n self.edge_features[f'{doc1.id}-{doc2.id}'] = features\n\n def _remove_edge_id(self, edge_id: int, edge_feature_key: str):\n from scipy.sparse import coo_matrix\n\n if self.adjacency is not None:\n if edge_id > self.num_edges:\n raise Exception(\n f'Trying to remove edge {edge_id} while number of edges is {self.num_edges}'\n )\n row = np.delete(self.adjacency.row, edge_id)\n col = np.delete(self.adjacency.col, edge_id)\n data = np.delete(self.adjacency.data, edge_id)\n if row.shape[0] > 0:\n SparseNdArray(\n self._pb_body.graph.adjacency, sp_format='coo'\n ).value = coo_matrix((data, (row, col)))\n else:\n SparseNdArray(\n self._pb_body.graph.adjacency, sp_format='coo'\n ).value = coo_matrix((0, 0))\n\n if edge_feature_key in self.edge_features:\n del self.edge_features[edge_feature_key]\n\n def remove_edge(self, doc1: 'Document', doc2: 'Document'):\n \"\"\"\n Remove a node from the graph along with the edges that may contain it\n\n :param doc1: the starting node for this edge\n :param doc2: the ending node for this edge\n \"\"\"\n offset1 = self._node_id_to_offset[doc1.id]\n offset2 = self._node_id_to_offset[doc2.id]\n for edge_id, (row, col) in enumerate(\n zip(self.adjacency.row, self.adjacency.col)\n ):\n if row.item() == offset1 and col.item() == offset2:\n self._remove_edge_id(edge_id, f'{doc1.id}-{doc2.id}')\n\n @property\n def edge_features(self):\n \"\"\"\n The dictionary of edge features, indexed by `edge_id` in the `edge list`\n\n .. # noqa: DAR201\n \"\"\"\n return StructView(self._pb_body.graph.edge_features)\n\n @property\n def adjacency(self):\n \"\"\"\n The adjacency list for this graph,\n\n .. # noqa: DAR201\n \"\"\"\n return SparseNdArray(self._pb_body.graph.adjacency, sp_format='coo').value\n\n @property\n def num_nodes(self) -> int:\n \"\"\"\n The number of nodes in the graph\n\n .. # noqa: DAR201\n \"\"\"\n return len(self.nodes)\n\n @property\n def num_edges(self) -> int:\n \"\"\"\n The number of edges in the graph\n\n .. # noqa: DAR201\n \"\"\"\n adjacency = self.adjacency\n return adjacency.data.shape[0] if adjacency is not None else 0\n\n @property\n def nodes(self):\n \"\"\"\n The nodes list for this graph\n\n .. # noqa: DAR201\n \"\"\"\n return self.chunks\n\n def get_out_degree(self, doc: 'Document') -> int:\n \"\"\"\n The out degree of the doc node\n\n .. # noqa: DAR201\n :param doc: the document node from which to extract the outdegree.\n \"\"\"\n out_edges = self.get_outgoing_nodes(doc)\n return len(out_edges) if out_edges else 0\n\n def get_in_degree(self, doc: 'Document') -> int:\n \"\"\"\n The in degree of the doc node\n\n .. # noqa: DAR201\n :param doc: the document node from which to extract the indegree.\n \"\"\"\n in_edges = self.get_incoming_nodes(doc)\n return len(in_edges) if in_edges else 0\n\n @nodes.setter\n def nodes(self, value: Iterable['Document']):\n \"\"\"Set all nodes of the current document.\n\n :param value: the array of nodes of this document\n \"\"\"\n self.chunks = value\n\n def get_outgoing_nodes(self, doc: 'Document') -> Optional[ChunkArray]:\n \"\"\"\n Get all the outgoing edges from `doc`\n\n .. # noqa: DAR201\n :param doc: the document node from which to extract the outgoing nodes.\n \"\"\"\n if self.adjacency is not None and doc.id in self._node_id_to_offset:\n offset = self._node_id_to_offset[doc.id]\n return ChunkArray(\n [\n self.nodes[col.item()]\n for (row, col) in zip(self.adjacency.row, self.adjacency.col)\n if row.item() == offset\n ],\n reference_doc=self,\n )\n\n def get_incoming_nodes(self, doc: 'Document') -> Optional[ChunkArray]:\n \"\"\"\n Get all the outgoing edges from `doc`\n\n .. # noqa: DAR201\n :param doc: the document node from which to extract the incoming nodes.\n \"\"\"\n if self.adjacency is not None and doc.id in self._node_id_to_offset:\n offset = self._node_id_to_offset[doc.id]\n return ChunkArray(\n [\n self.nodes[row.item()]\n for (row, col) in zip(self.adjacency.row, self.adjacency.col)\n if col.item() == offset\n ],\n reference_doc=self,\n )\n\n @staticmethod\n def load_from_dgl_graph(dgl_graph: 'DGLGraph') -> 'GraphDocument':\n \"\"\"\n Construct a GraphDocument from of graph with type `DGLGraph`\n\n .. # noqa: DAR201\n :param dgl_graph: the graph from which to construct a `GraphDocument`.\n\n .. warning::\n - This method only deals with the graph structure (nodes and conectivity) graph\n features that are task specific are ignored.\n \"\"\"\n jina_graph = GraphDocument()\n nodeid_to_doc = {}\n for node in dgl_graph.nodes():\n node_doc = Document()\n nodeid_to_doc[int(node)] = node_doc\n jina_graph.add_node(node_doc)\n\n for node_source, node_destination in zip(*dgl_graph.edges()):\n jina_graph.add_edge(\n nodeid_to_doc[int(node_source)], nodeid_to_doc[int(node_destination)]\n )\n\n return jina_graph\n\n def to_dgl_graph(self) -> 'DGLGraph':\n \"\"\"\n Construct a `dgl.DGLGraph` from a `GraphDocument` instance.\n\n .. warning::\n - This method only deals with the graph structure (nodes and conectivity) graph\n features that are task specific are ignored.\n\n .. # noqa: DAR201\n \"\"\"\n from ... import JINA_GLOBAL\n\n if JINA_GLOBAL.dgl_installed is None:\n JINA_GLOBAL.dgl_installed = False\n with ImportExtensions(\n required=True,\n pkg_name='dgl',\n help_text=f'to_dgl_graph method requires dgl to be installed',\n ):\n import dgl\n\n JINA_GLOBAL.dgl_installed = True\n\n if JINA_GLOBAL.torch_installed is None:\n JINA_GLOBAL.torch_installed = False\n with ImportExtensions(\n required=True,\n pkg_name='torch',\n help_text=f'to_dgl_graph method requires torch to be installed',\n ):\n import torch\n\n JINA_GLOBAL.torch_installed = True\n\n import torch\n import dgl\n\n if self.adjacency is None:\n default_logger.debug(\n f'Trying to convert to dgl graph without \\\n for GraphDocument.id = {self.id} without adjacency matrix'\n )\n dgl_graph = dgl.DGLGraph()\n dgl_graph.add_nodes(self.num_nodes)\n return dgl_graph\n else:\n source_nodes = torch.tensor(self.adjacency.row.copy())\n destination_nodes = torch.tensor(self.adjacency.col.copy())\n\n return dgl.graph((source_nodes, destination_nodes))\n\n def __iter__(self) -> Iterator[Tuple['Document']]:\n if self.adjacency is not None:\n for (row, col) in zip(self.adjacency.row, self.adjacency.col):\n yield self.nodes[row.item()], self.nodes[col.item()]\n else:\n default_logger.debug(f'Trying to iterate over a graph without edges')\n\n def __mermaid_str__(self):\n\n if len(self.nodes) == 0:\n return super().__mermaid_str__()\n\n results = []\n printed_ids = set()\n _node_id_node_mermaid_id = {}\n\n for node in self.nodes:\n _node_id_node_mermaid_id[node.id] = node._mermaid_id\n\n for in_node, out_node in self:\n\n in_node_mermaid_id = _node_id_node_mermaid_id[in_node.id]\n if in_node_mermaid_id not in printed_ids:\n in_node._mermaid_id = in_node_mermaid_id\n printed_ids.add(in_node_mermaid_id)\n results.append(in_node.__mermaid_str__())\n\n out_node_mermaid_id = _node_id_node_mermaid_id[out_node.id]\n if out_node_mermaid_id not in printed_ids:\n out_node._mermaid_id = out_node_mermaid_id\n printed_ids.add(out_node_mermaid_id)\n results.append(out_node.__mermaid_str__())\n\n results.append(f'{in_node_mermaid_id[:3]} --> {out_node_mermaid_id[:3]}')\n\n return '\\n'.join(results)\n",
"import json\nimport os\nfrom contextlib import contextmanager\n\nimport numpy as np\nimport pytest\nimport tensorflow as tf\nimport torch\nfrom google.protobuf.json_format import MessageToDict\nfrom scipy.sparse import coo_matrix, bsr_matrix, csr_matrix, csc_matrix\n\nfrom jina.proto.jina_pb2 import DocumentProto\nfrom jina.types.document import Document\nfrom jina.types.ndarray.generic import NdArray\nfrom jina.types.request import Request\nfrom jina.types.score import NamedScore\nfrom tests import random_docs\n\n\ndef scipy_sparse_list():\n return [coo_matrix, bsr_matrix, csr_matrix, csc_matrix]\n\n\n@pytest.fixture\ndef row():\n return np.array([0, 0, 1, 2, 2, 2])\n\n\n@pytest.fixture\ndef column():\n return np.array([0, 2, 2, 0, 1, 2])\n\n\n@pytest.fixture\ndef data():\n return np.array([1, 2, 3, 4, 5, 6])\n\n\n@pytest.fixture(params=scipy_sparse_list())\ndef scipy_sparse_matrix(request, row, column, data):\n matrix_type = request.param\n return matrix_type((data, (row, column)), shape=(4, 10))\n\n\n@pytest.fixture\ndef tf_sparse_matrix(row, column, data):\n indices = [(x, y) for x, y in zip(row, column)]\n return tf.SparseTensor(indices=indices, values=data, dense_shape=[4, 10])\n\n\n@pytest.fixture\ndef torch_sparse_matrix(row, column, data):\n shape = [4, 10]\n indices = [list(row), list(column)]\n return torch.sparse_coo_tensor(indices, data, shape)\n\n\n@pytest.mark.parametrize('field', ['blob', 'embedding'])\ndef test_ndarray_get_set(field):\n a = Document()\n b = np.random.random([10, 10])\n setattr(a, field, b)\n np.testing.assert_equal(getattr(a, field), b)\n\n b = np.random.random([10, 10])\n c = NdArray()\n c.value = b\n setattr(a, field, c)\n np.testing.assert_equal(getattr(a, field), b)\n\n b = np.random.random([10, 10])\n c = NdArray()\n c.value = b\n setattr(a, field, c._pb_body)\n np.testing.assert_equal(getattr(a, field), b)\n\n\ndef test_doc_update_fields():\n a = Document()\n b = np.random.random([10, 10])\n c = {'tags': 'string', 'tag-tag': {'tags': 123.45}}\n d = [12, 34, 56]\n e = 'text-mod'\n w = 2.0\n a.set_attributes(embedding=b, tags=c, location=d, modality=e, weight=w)\n np.testing.assert_equal(a.embedding, b)\n assert list(a.location) == d\n assert a.modality == e\n assert a.tags == c\n assert a.weight == w\n\n\ndef test_granularity_get_set():\n d = Document()\n d.granularity = 1\n assert d.granularity == 1\n\n\ndef test_uri_get_set():\n a = Document()\n a.uri = 'https://abc.com/a.jpg'\n assert a.uri == 'https://abc.com/a.jpg'\n assert a.mime_type == 'image/jpeg'\n a.uri = 'abcdefg'\n assert a.uri == 'abcdefg'\n a.content = 'abcdefg'\n assert a.text == 'abcdefg'\n assert not a.uri\n\n\ndef test_set_get_mime():\n a = Document()\n a.mime_type = 'jpg'\n assert a.mime_type == 'image/jpeg'\n b = Document()\n b.mime_type = 'jpeg'\n assert b.mime_type == 'image/jpeg'\n c = Document()\n c.mime_type = '.jpg'\n assert c.mime_type == 'image/jpeg'\n\n\ndef test_no_copy_construct():\n a = DocumentProto()\n b = Document(a, copy=False)\n a.id = '1' * 16\n assert b.id == '1' * 16\n\n b.id = '2' * 16\n assert a.id == '2' * 16\n\n\ndef test_copy_construct():\n a = DocumentProto()\n b = Document(a, copy=True)\n a.id = '1' * 16\n assert b.id != '1' * 16\n\n b.id = '2' * 16\n assert a.id == '1' * 16\n\n\ndef test_bad_good_doc_id():\n b = Document()\n b.id = 'hello'\n b.id = 'abcd' * 4\n b.id = 'de09' * 4\n b.id = 'af54' * 4\n b.id = 'abcdef0123456789'\n\n\ndef test_id_context():\n with Document() as d:\n d.buffer = b'123'\n assert d.id\n\n\ndef test_doc_content():\n d = Document()\n assert d.content is None\n d.text = 'abc'\n assert d.content == 'abc'\n c = np.random.random([10, 10])\n d.blob = c\n np.testing.assert_equal(d.content, c)\n d.buffer = b'123'\n assert d.buffer == b'123'\n\n\ndef test_request_docs_mutable_iterator():\n \"\"\"To test the weak reference work in docs\"\"\"\n r = Request()\n r.request_type = 'data'\n for d in random_docs(10):\n r.docs.append(d)\n\n for idx, d in enumerate(r.docs):\n assert isinstance(d, Document)\n d.text = f'look I changed it! {idx}'\n\n # iterate it again should see the change\n doc_pointers = []\n for idx, d in enumerate(r.docs):\n assert isinstance(d, Document)\n assert d.text == f'look I changed it! {idx}'\n doc_pointers.append(d)\n\n # pb-lize it should see the change\n rpb = r.proto\n\n for idx, d in enumerate(rpb.data.docs):\n assert isinstance(d, DocumentProto)\n assert d.text == f'look I changed it! {idx}'\n\n # change again by following the pointers\n for d in doc_pointers:\n d.text = 'now i change it back'\n\n # iterate it again should see the change\n for idx, d in enumerate(rpb.data.docs):\n assert isinstance(d, DocumentProto)\n assert d.text == 'now i change it back'\n\n\ndef test_request_docs_chunks_mutable_iterator():\n \"\"\"Test if weak reference work in nested docs\"\"\"\n r = Request()\n r.request_type = 'data'\n for d in random_docs(10):\n r.docs.append(d)\n\n for d in r.docs:\n assert isinstance(d, Document)\n for idx, c in enumerate(d.chunks):\n assert isinstance(d, Document)\n c.text = f'look I changed it! {idx}'\n\n # iterate it again should see the change\n doc_pointers = []\n for d in r.docs:\n assert isinstance(d, Document)\n for idx, c in enumerate(d.chunks):\n assert c.text == f'look I changed it! {idx}'\n doc_pointers.append(c)\n\n # pb-lize it should see the change\n rpb = r.proto\n\n for d in rpb.data.docs:\n assert isinstance(d, DocumentProto)\n for idx, c in enumerate(d.chunks):\n assert isinstance(c, DocumentProto)\n assert c.text == f'look I changed it! {idx}'\n\n # change again by following the pointers\n for d in doc_pointers:\n d.text = 'now i change it back'\n\n # iterate it again should see the change\n for d in rpb.data.docs:\n assert isinstance(d, DocumentProto)\n for c in d.chunks:\n assert c.text == 'now i change it back'\n\n\ndef test_doc_setattr():\n from jina import Document\n\n with Document() as root:\n root.text = 'abc'\n\n assert root.adjacency == 0\n\n with Document() as match:\n match.text = 'def'\n m = root.matches.append(match)\n\n with Document() as chunk:\n chunk.text = 'def'\n c = root.chunks.append(chunk)\n\n assert len(root.matches) == 1\n assert root.matches[0].granularity == 0\n assert root.matches[0].adjacency == 1\n\n assert m.granularity == 0\n assert m.adjacency == 1\n\n assert len(root.chunks) == 1\n assert root.chunks[0].granularity == 1\n assert root.chunks[0].adjacency == 0\n\n assert c.granularity == 1\n assert c.adjacency == 0\n\n\ndef test_doc_score():\n from jina.types.score import NamedScore\n\n with Document() as doc:\n doc.text = 'text'\n\n score = NamedScore(op_name='operation', value=10.0, ref_id=doc.id)\n doc.score = score\n\n assert doc.score.op_name == 'operation'\n assert doc.score.value == 10.0\n assert doc.score.ref_id == doc.id\n\n\ndef test_content_hash_not_dependent_on_chunks_or_matches():\n doc1 = Document()\n doc1.content = 'one'\n doc1.update_content_hash()\n\n doc2 = Document()\n doc2.content = 'one'\n doc2.update_content_hash()\n assert doc1.content_hash == doc2.content_hash\n\n doc3 = Document()\n doc3.content = 'one'\n for _ in range(3):\n with Document() as m:\n m.content = 'some chunk'\n doc3.chunks.append(m)\n doc3.update_content_hash()\n assert doc1.content_hash == doc3.content_hash\n\n doc4 = Document()\n doc4.content = 'one'\n for _ in range(3):\n with Document() as m:\n m.content = 'some match'\n doc4.matches.append(m)\n doc4.update_content_hash()\n assert doc1.content_hash == doc4.content_hash\n\n\ndef test_include_scalar():\n d1 = Document()\n d1.text = 'hello'\n dd1 = Document()\n d1.chunks.append(dd1)\n d1.update_content_hash(include_fields=('text',), exclude_fields=None)\n\n d2 = Document()\n d2.text = 'hello'\n d2.update_content_hash(include_fields=('text',), exclude_fields=None)\n\n assert d1.content_hash == d2.content_hash\n\n # change text should result in diff hash\n d2.text = 'world'\n d2.update_content_hash(include_fields=('text',), exclude_fields=None)\n assert d1.content_hash != d2.content_hash\n\n\ndef test_include_repeated_fields():\n def build_document(chunk=None):\n d = Document()\n d.chunks.append(chunk)\n d.chunks[0].update_content_hash(\n exclude_fields=('parent_id', 'id', 'content_hash')\n )\n d.chunks[0].parent_id = 0\n d.update_content_hash(include_fields=('chunks',), exclude_fields=None)\n return d\n\n c = Document()\n d1 = build_document(chunk=c)\n d2 = build_document(chunk=c)\n\n assert d1.chunks[0].content_hash == d2.chunks[0].content_hash\n assert d1.content_hash == d2.content_hash\n\n # change text should result in same hash\n d2.text = 'world'\n d2.update_content_hash(include_fields=('chunks',), exclude_fields=None)\n assert d1.content_hash == d2.content_hash\n\n # change chunks should result in diff hash\n d2.chunks.clear()\n d2.update_content_hash(include_fields=('chunks',), exclude_fields=None)\n assert d1.content_hash != d2.content_hash\n\n\n@pytest.mark.parametrize('from_str', [True, False])\n@pytest.mark.parametrize(\n 'd_src',\n [\n {\n 'id': '123',\n 'mime_type': 'txt',\n 'parent_id': '456',\n 'tags': {'hello': 'world'},\n },\n {'id': '123', 'mimeType': 'txt', 'parentId': '456', 'tags': {'hello': 'world'}},\n {\n 'id': '123',\n 'mimeType': 'txt',\n 'parent_id': '456',\n 'tags': {'hello': 'world'},\n },\n ],\n)\ndef test_doc_from_dict_cases(d_src, from_str):\n # regular case\n if from_str:\n d_src = json.dumps(d_src)\n d = Document(d_src)\n assert d.tags['hello'] == 'world'\n assert d.mime_type == 'txt'\n assert d.id == '123'\n assert d.parent_id == '456'\n\n\n@pytest.mark.parametrize('from_str', [True, False])\ndef test_doc_arbitrary_dict(from_str):\n d_src = {'id': '123', 'hello': 'world', 'tags': {'good': 'bye'}}\n if from_str:\n d_src = json.dumps(d_src)\n d = Document(d_src)\n assert d.id == '123'\n assert d.tags['hello'] == 'world'\n assert d.tags['good'] == 'bye'\n\n d_src = {'hello': 'world', 'good': 'bye'}\n if from_str:\n d_src = json.dumps(d_src)\n d = Document(d_src)\n assert d.tags['hello'] == 'world'\n assert d.tags['good'] == 'bye'\n\n\n@pytest.mark.parametrize('from_str', [True, False])\ndef test_doc_field_resolver(from_str):\n d_src = {'music_id': '123', 'hello': 'world', 'tags': {'good': 'bye'}}\n if from_str:\n d_src = json.dumps(d_src)\n d = Document(d_src)\n assert d.id != '123'\n assert d.tags['hello'] == 'world'\n assert d.tags['good'] == 'bye'\n assert d.tags['music_id'] == '123'\n\n d_src = {'music_id': '123', 'hello': 'world', 'tags': {'good': 'bye'}}\n if from_str:\n d_src = json.dumps(d_src)\n d = Document(d_src, field_resolver={'music_id': 'id'})\n assert d.id == '123'\n assert d.tags['hello'] == 'world'\n assert d.tags['good'] == 'bye'\n assert 'music_id' not in d.tags\n\n\ndef test_doc_plot(tmpdir):\n docs = [\n Document(\n id='🐲',\n embedding=np.array([0, 0]),\n tags={'guardian': 'Azure Dragon', 'position': 'East'},\n ),\n Document(\n id='🐦',\n embedding=np.array([1, 0]),\n tags={'guardian': 'Vermilion Bird', 'position': 'South'},\n ),\n Document(\n id='🐢',\n embedding=np.array([0, 1]),\n tags={'guardian': 'Black Tortoise', 'position': 'North'},\n ),\n Document(\n id='🐯',\n embedding=np.array([1, 1]),\n tags={'guardian': 'White Tiger', 'position': 'West'},\n ),\n ]\n\n docs[0].chunks.append(docs[1])\n docs[0].chunks[0].chunks.append(docs[2])\n docs[0].matches.append(docs[3])\n\n assert docs[0]._mermaid_to_url('svg')\n docs[0].plot(inline_display=True, output=os.path.join(tmpdir, 'doc.svg'))\n assert os.path.exists(os.path.join(tmpdir, 'doc.svg'))\n docs[0].plot()\n\n\n@pytest.fixture\ndef test_docs():\n s = Document(\n id='🐲',\n content='hello-world',\n tags={'a': 'b'},\n embedding=np.array([1, 2, 3]),\n chunks=[Document(id='🐢')],\n )\n d = Document(\n id='🐦',\n content='goodbye-world',\n tags={'c': 'd'},\n embedding=np.array([4, 5, 6]),\n chunks=[Document(id='🐯')],\n )\n return (s, d)\n\n\n@pytest.fixture\ndef expected_doc_fields():\n from jina.proto import jina_pb2\n\n return sorted(set(list(jina_pb2.DocumentProto().DESCRIPTOR.fields_by_name)))\n\n\n@pytest.fixture\ndef ignored_doc_fields():\n return ['embedding', 'score', 'blob', 'buffer', 'text', 'tags', 'uri']\n\n\ndef test_document_to_json(expected_doc_fields, ignored_doc_fields):\n doc = Document()\n doc_dict = json.loads(doc.json())\n present_keys = sorted(doc_dict.keys())\n assert present_keys == ['id']\n\n\ndef test_document_to_dict(expected_doc_fields, ignored_doc_fields):\n doc = Document()\n doc_dict = doc.dict()\n present_keys = sorted(doc_dict.keys())\n assert present_keys == ['id']\n\n\ndef test_non_empty_fields():\n d_score = Document(score=NamedScore(value=42))\n assert d_score.non_empty_fields == ('id', 'score')\n\n d = Document()\n assert d.non_empty_fields == ('id',)\n\n d = Document(id='')\n assert not d.non_empty_fields\n\n\ndef test_get_attr_values():\n d = Document(\n {\n 'id': '123',\n 'text': 'document',\n 'feature1': 121,\n 'name': 'name',\n 'tags': {'id': 'identity', 'a': 'b', 'c': 'd'},\n }\n )\n d.score = NamedScore(value=42)\n\n required_keys = [\n 'id',\n 'text',\n 'tags__name',\n 'tags__feature1',\n 'score__value',\n 'tags__c',\n 'tags__id',\n 'tags__inexistant',\n 'inexistant',\n ]\n res = d.get_attributes(*required_keys)\n\n assert len(res) == len(required_keys)\n assert res[required_keys.index('id')] == '123'\n assert res[required_keys.index('tags__feature1')] == 121\n assert res[required_keys.index('tags__name')] == 'name'\n assert res[required_keys.index('text')] == 'document'\n assert res[required_keys.index('tags__c')] == 'd'\n assert res[required_keys.index('tags__id')] == 'identity'\n assert res[required_keys.index('score__value')] == 42\n assert res[required_keys.index('tags__inexistant')] is None\n assert res[required_keys.index('inexistant')] is None\n\n required_keys_2 = ['tags', 'text']\n res2 = d.get_attributes(*required_keys_2)\n assert len(res2) == 2\n assert res2[required_keys_2.index('text')] == 'document'\n assert res2[required_keys_2.index('tags')] == d.tags\n\n d = Document({'id': '123', 'tags': {'outterkey': {'innerkey': 'real_value'}}})\n required_keys_3 = ['tags__outterkey__innerkey']\n res3 = d.get_attributes(*required_keys_3)\n assert res3 == 'real_value'\n\n d = Document(content=np.array([1, 2, 3]))\n res4 = np.stack(d.get_attributes(*['blob']))\n np.testing.assert_equal(res4, np.array([1, 2, 3]))\n\n\ndef test_document_sparse_attributes_scipy(scipy_sparse_matrix):\n d = Document()\n d.embedding = scipy_sparse_matrix\n d.blob = scipy_sparse_matrix\n np.testing.assert_array_equal(d.embedding.todense(), scipy_sparse_matrix.todense())\n np.testing.assert_array_equal(d.blob.todense(), scipy_sparse_matrix.todense())\n\n\ndef test_document_sparse_attributes_tensorflow(tf_sparse_matrix):\n import tensorflow as tf\n\n d = Document()\n d.embedding = tf_sparse_matrix\n d.blob = tf_sparse_matrix\n np.testing.assert_array_equal(\n d.embedding.todense(), tf.sparse.to_dense(tf_sparse_matrix)\n )\n np.testing.assert_array_equal(\n d.blob.todense(), tf.sparse.to_dense(tf_sparse_matrix)\n )\n\n\ndef test_document_sparse_attributes_pytorch(torch_sparse_matrix):\n d = Document()\n d.embedding = torch_sparse_matrix\n d.blob = torch_sparse_matrix\n\n np.testing.assert_array_equal(\n d.embedding.todense(), torch_sparse_matrix.to_dense().numpy()\n )\n np.testing.assert_array_equal(\n d.blob.todense(), torch_sparse_matrix.to_dense().numpy()\n )\n\n\n@pytest.mark.parametrize(\n 'return_sparse_ndarray_cls_type, return_scipy_class_type, return_expected_type',\n [\n ('scipy', 'coo', coo_matrix),\n ('scipy', 'csr', csr_matrix),\n ('scipy', 'csc', csc_matrix),\n ('scipy', 'bsr', bsr_matrix),\n ('torch', None, torch.Tensor),\n ('tf', None, tf.SparseTensor),\n ],\n)\n@pytest.mark.parametrize('field', ['embedding', 'blob'])\ndef test_document_sparse_embedding(\n scipy_sparse_matrix,\n return_sparse_ndarray_cls_type,\n return_scipy_class_type,\n return_expected_type,\n field,\n):\n d = Document()\n setattr(d, field, scipy_sparse_matrix)\n cls_type = None\n sparse_kwargs = {}\n if return_sparse_ndarray_cls_type == 'scipy':\n from jina.types.ndarray.sparse.scipy import SparseNdArray\n\n cls_type = SparseNdArray\n sparse_kwargs['sp_format'] = return_scipy_class_type\n elif return_sparse_ndarray_cls_type == 'torch':\n from jina.types.ndarray.sparse.pytorch import SparseNdArray\n\n cls_type = SparseNdArray\n elif return_sparse_ndarray_cls_type == 'tf':\n from jina.types.ndarray.sparse.tensorflow import SparseNdArray\n\n cls_type = SparseNdArray\n\n if field == 'blob':\n field_sparse = d.get_sparse_blob(\n sparse_ndarray_cls_type=cls_type, **sparse_kwargs\n )\n elif field == 'embedding':\n field_sparse = d.get_sparse_embedding(\n sparse_ndarray_cls_type=cls_type, **sparse_kwargs\n )\n\n assert field_sparse is not None\n assert isinstance(field_sparse, return_expected_type)\n if return_sparse_ndarray_cls_type == 'torch':\n assert field_sparse.is_sparse\n\n if return_sparse_ndarray_cls_type == 'scipy':\n np.testing.assert_equal(field_sparse.todense(), scipy_sparse_matrix.todense())\n elif return_sparse_ndarray_cls_type == 'torch':\n np.testing.assert_equal(\n field_sparse.to_dense().numpy(), scipy_sparse_matrix.todense()\n )\n elif return_scipy_class_type == 'tf':\n np.testing.assert_equal(\n tf.sparse.to_dense(field_sparse).numpy(), scipy_sparse_matrix.todense()\n )\n\n\ndef test_evaluations():\n document = Document()\n score = document.evaluations.add()\n score.op_name = 'operation'\n score.value = 10.0\n assert document.evaluations[0].value == 10.0\n assert document.evaluations[0].op_name == 'operation'\n\n\n@contextmanager\ndef does_not_raise():\n yield\n\n\n@pytest.mark.parametrize(\n 'doccontent, expectation',\n [\n ({'content': 'hello', 'uri': 'https://jina.ai'}, pytest.raises(ValueError)),\n ({'content': 'hello', 'text': 'world'}, pytest.raises(ValueError)),\n ({'content': 'hello', 'blob': np.array([1, 2, 3])}, pytest.raises(ValueError)),\n ({'content': 'hello', 'buffer': b'hello'}, pytest.raises(ValueError)),\n ({'buffer': b'hello', 'text': 'world'}, pytest.raises(ValueError)),\n ({'content': 'hello', 'id': 1}, does_not_raise()),\n ],\n)\ndef test_conflicting_doccontent(doccontent, expectation):\n with expectation:\n document = Document(**doccontent)\n assert document.content is not None\n\n\n@pytest.mark.parametrize('val', [1, 1.0, np.float64(1.0)])\ndef test_doc_different_score_value_type(val):\n d = Document()\n d.score = val\n assert int(d.score.value) == 1\n\n\ndef test_doc_match_score_assign():\n d = Document(id='hello')\n d1 = Document(d, copy=True, score=123)\n d.matches = [d1]\n assert d.matches[0].score.value == 123\n\n\ndef test_doc_update_given_empty_fields_and_attributes_identical(test_docs):\n # doc1 and doc2 has the same fields, id, content, tags, embedding and chunks.\n doc1, doc2 = test_docs\n doc1.update(source=doc2)\n assert doc1.id == doc2.id\n assert doc1.content == doc2.content\n assert doc1.tags == {'a': 'b', 'c': 'd'} # tags will be merged.\n assert (doc1.embedding == doc2.embedding).all()\n assert doc1.chunks == doc2.chunks\n\n\ndef test_doc_update_given_empty_fields_and_destination_has_more_attributes(test_docs):\n # doc1 and doc2 has the same fields, id, content, tags, embedding and chunks.\n doc1, doc2 = test_docs\n # remove doc2 content field\n doc2._pb_body.ClearField(\n 'content'\n ) # content of source \"goodbye-world\" was removed, not update this field.\n assert doc2.content is None\n doc1.update(source=doc2)\n assert doc1.id == doc2.id\n assert doc1.content == 'hello-world' # doc1 content remains the same.\n assert doc1.tags == {'a': 'b', 'c': 'd'} # tags will be merged.\n assert (doc1.embedding == doc2.embedding).all()\n assert doc1.chunks == doc2.chunks\n\n\ndef test_doc_update_given_empty_fields_and_source_has_more_attributes(test_docs):\n # doc1 and doc2 has the same fields, id, content, tags, embedding and chunks.\n doc1, doc2 = test_docs\n # remove doc2 content field\n doc1._pb_body.ClearField('content') # content of destination was removed.\n assert doc1.content is None\n doc1.update(source=doc2)\n assert doc1.id == doc2.id\n assert (\n doc1.content == doc2.content\n ) # destination content `None` was updated by source's content.\n assert doc1.tags == {'a': 'b', 'c': 'd'} # tags will be merged.\n assert (doc1.embedding == doc2.embedding).all()\n assert doc1.chunks == doc2.chunks\n\n\ndef test_doc_update_given_singular_fields_and_attributes_identical(test_docs):\n # doc1 and doc2 has the same fields, id, content, tags, embedding and chunks.\n doc1, doc2 = test_docs\n # After update, only specified fields are updated.\n doc1.update(source=doc2, fields=['id', 'text'])\n assert doc1.id == doc2.id\n assert doc1.content == doc2.content # None was updated by source's content.\n assert doc1.tags != doc2.tags\n assert doc1.tags == {'a': 'b'}\n assert (doc1.embedding != doc2.embedding).all()\n assert doc1.chunks != doc2.chunks\n\n\ndef test_doc_update_given_nested_fields_and_attributes_identical(test_docs):\n # doc1 and doc2 has the same fields, id, content, tags, embedding and chunks.\n doc1, doc2 = test_docs\n # After update, only specified nested fields are updated.\n doc1.update(source=doc2, fields=['tags', 'embedding', 'chunks'])\n assert doc1.id != doc2.id\n assert doc1.content != doc2.content # None was updated by source's content.\n assert doc1.tags == {'a': 'b', 'c': 'd'} # tags will be merged.\n assert (doc1.embedding == doc2.embedding).all()\n assert (\n doc1.chunks[0].parent_id != doc2.chunks[0].parent_id\n ) # parent id didn't change since id field not updated.\n assert doc1.chunks[0].id == doc2.chunks[0].id\n assert doc1.chunks[0].content_hash == doc2.chunks[0].content_hash\n\n\ndef test_doc_update_given_fields_and_destination_has_more_attributes(test_docs):\n # doc1 and doc2 has the same fields, id, content, tags, embedding and chunks.\n # After update, the specified fields will be cleared.\n doc1, doc2 = test_docs\n # remove doc2 text field\n doc2._pb_body.ClearField('text')\n assert doc2.text == ''\n assert doc2.content is None\n doc1.update(source=doc2, fields=['text'])\n assert doc1.text == ''\n assert doc1.tags != doc2.tags\n assert doc1.tags == {'a': 'b'}\n assert (doc1.embedding != doc2.embedding).all()\n assert doc1.chunks != doc2.chunks\n\n\ndef test_doc_update_given_fields_and_source_has_more_attributes(test_docs):\n # doc1 and doc2 has the same fields, id, content, tags, embedding and chunks.\n # After update, the specified fields will be replaced by source attribuet value.\n doc1, doc2 = test_docs\n # remove doc2 text field\n doc1._pb_body.ClearField('text')\n assert doc1.text == ''\n assert doc1.content is None\n doc1.update(source=doc2, fields=['text'])\n assert doc1.id != doc2.id\n assert doc1.content == doc2.content # None was updated by source's content\n assert doc1.tags != doc2.tags\n assert doc1.tags == {'a': 'b'}\n assert (doc1.embedding != doc2.embedding).all()\n assert doc1.chunks != doc2.chunks\n\n\ndef test_doc_update_given_content_hash_updated(test_docs):\n doc1, doc2 = test_docs\n doc1.update_content_hash()\n doc2.update(doc1)\n assert doc1.content_hash == doc2.content_hash\n\n\ndef test_document_pretty_dict():\n doc = Document(\n blob=np.array([[0, 1, 2], [2, 1, 0]]),\n embedding=np.array([1.0, 2.0, 3.0]),\n tags={'hello': 'world'},\n )\n chunk = Document(doc, copy=True)\n chunk.blob = np.array([[3, 4, 5], [5, 4, 3]])\n chunk.embedding = np.array([4.0, 5.0, 6.0])\n match = Document(doc, copy=True)\n match.blob = np.array([[6, 7, 8], [8, 7, 6]])\n match.embedding = np.array([7.0, 8.0, 9.0])\n doc.chunks.append(chunk)\n doc.matches.append(match)\n assert doc.tags == {'hello': 'world'}\n assert doc.blob.tolist() == [[0, 1, 2], [2, 1, 0]]\n assert doc.embedding.tolist() == [1.0, 2.0, 3.0]\n assert doc.chunks[0].tags == {'hello': 'world'}\n assert doc.chunks[0].blob.tolist() == [[3, 4, 5], [5, 4, 3]]\n assert doc.chunks[0].embedding.tolist() == [4.0, 5.0, 6.0]\n assert doc.matches[0].tags == {'hello': 'world'}\n assert doc.matches[0].blob.tolist() == [[6, 7, 8], [8, 7, 6]]\n assert doc.matches[0].embedding.tolist() == [7.0, 8.0, 9.0]\n\n d = doc.dict(prettify_ndarrays=True)\n assert d['blob'] == [[0, 1, 2], [2, 1, 0]]\n assert d['embedding'] == [1.0, 2.0, 3.0]\n assert d['tags'] == {'hello': 'world'}\n assert d['chunks'][0]['blob'] == [[3, 4, 5], [5, 4, 3]]\n assert d['chunks'][0]['embedding'] == [4.0, 5.0, 6.0]\n assert d['chunks'][0]['tags'] == {'hello': 'world'}\n assert d['matches'][0]['blob'] == [[6, 7, 8], [8, 7, 6]]\n assert d['matches'][0]['embedding'] == [7.0, 8.0, 9.0]\n assert d['matches'][0]['tags'] == {'hello': 'world'}\n\n d_reconstructed = Document(d)\n assert d_reconstructed.tags == {'hello': 'world'}\n assert d_reconstructed.blob.tolist() == [[0, 1, 2], [2, 1, 0]]\n assert d_reconstructed.embedding.tolist() == [1.0, 2.0, 3.0]\n assert d_reconstructed.chunks[0].tags == {'hello': 'world'}\n assert d_reconstructed.chunks[0].blob.tolist() == [[3, 4, 5], [5, 4, 3]]\n assert d_reconstructed.chunks[0].embedding.tolist() == [4.0, 5.0, 6.0]\n assert d_reconstructed.matches[0].tags == {'hello': 'world'}\n assert d_reconstructed.matches[0].blob.tolist() == [[6, 7, 8], [8, 7, 6]]\n assert d_reconstructed.matches[0].embedding.tolist() == [7.0, 8.0, 9.0]\n\n\ndef test_document_pretty_json():\n doc = Document(\n blob=np.array([[0, 1, 2], [2, 1, 0]]),\n embedding=np.array([1.0, 2.0, 3.0]),\n tags={'hello': 'world'},\n )\n doc.chunks.append(Document(doc, copy=True))\n doc.matches.append(Document(doc, copy=True))\n assert doc.tags == {'hello': 'world'}\n assert doc.blob.tolist() == [[0, 1, 2], [2, 1, 0]]\n assert doc.embedding.tolist() == [1.0, 2.0, 3.0]\n assert doc.chunks[0].tags == {'hello': 'world'}\n assert doc.chunks[0].blob.tolist() == [[0, 1, 2], [2, 1, 0]]\n assert doc.chunks[0].embedding.tolist() == [1.0, 2.0, 3.0]\n assert doc.matches[0].tags == {'hello': 'world'}\n assert doc.matches[0].blob.tolist() == [[0, 1, 2], [2, 1, 0]]\n assert doc.matches[0].embedding.tolist() == [1.0, 2.0, 3.0]\n doc_json = doc.json(prettify_ndarrays=True)\n d = json.loads(doc_json)\n assert d['blob'] == [[0, 1, 2], [2, 1, 0]]\n assert d['embedding'] == [1.0, 2.0, 3.0]\n assert d['tags'] == {'hello': 'world'}\n assert d['chunks'][0]['blob'] == [[0, 1, 2], [2, 1, 0]]\n assert d['chunks'][0]['embedding'] == [1.0, 2.0, 3.0]\n assert d['chunks'][0]['tags'] == {'hello': 'world'}\n assert d['matches'][0]['blob'] == [[0, 1, 2], [2, 1, 0]]\n assert d['matches'][0]['embedding'] == [1.0, 2.0, 3.0]\n assert d['matches'][0]['tags'] == {'hello': 'world'}\n\n d_reconstructed = Document(doc_json)\n assert d_reconstructed.tags == {'hello': 'world'}\n assert d_reconstructed.blob.tolist() == [[0, 1, 2], [2, 1, 0]]\n assert d_reconstructed.embedding.tolist() == [1.0, 2.0, 3.0]\n assert d_reconstructed.chunks[0].tags == {'hello': 'world'}\n assert d_reconstructed.chunks[0].blob.tolist() == [[0, 1, 2], [2, 1, 0]]\n assert d_reconstructed.chunks[0].embedding.tolist() == [1.0, 2.0, 3.0]\n assert d_reconstructed.matches[0].tags == {'hello': 'world'}\n assert d_reconstructed.matches[0].blob.tolist() == [[0, 1, 2], [2, 1, 0]]\n assert d_reconstructed.matches[0].embedding.tolist() == [1.0, 2.0, 3.0]\n"
] |
[
[
"scipy.sparse.coo_matrix",
"numpy.array",
"numpy.delete",
"numpy.copy",
"numpy.append"
],
[
"tensorflow.SparseTensor",
"numpy.array",
"numpy.testing.assert_equal",
"tensorflow.sparse.to_dense",
"numpy.float64",
"torch.sparse_coo_tensor",
"numpy.random.random"
]
] |
RayRuizhiLiao/DIM_DEMI
|
[
"6b0cb14f56f1aee232e553e75ea81a722b9e71dd"
] |
[
"cortex_DIM_DEMI/models/coordinates.py"
] |
[
"'''Coordinate task\n\n'''\n\nfrom cortex.plugins import ModelPlugin\nimport torch\nimport torch.nn.functional as F\n\nfrom cortex_DIM_DEMI.nn_modules.mi_networks import MI1x1ConvNet\n\n\nclass CoordinatePredictor(ModelPlugin):\n '''Coordinate prediction\n\n '''\n defaults = dict(\n data=dict(batch_size=dict(train=64, test=64),\n inputs=dict(inputs='data.images'), skip_last_batch=True),\n train=dict(save_on_lowest='losses.encoder', epochs=1000),\n optimizer=dict(learning_rate=1e-4)\n )\n\n def build(self, encoder, config, task_idx=None):\n '''\n\n Args:\n task_idx: Indices for coordinate task.\n\n '''\n\n self.nets.encoder = encoder\n\n if task_idx is not None:\n self.task_idx = task_idx\n elif 'local_task_idx' not in config.keys():\n raise ValueError('')\n else:\n self.task_idx = config['local_task_idx']\n\n # Create MI nn_modules.\n X = self.inputs('data.images')\n outs = self.nets.encoder(X, return_all_activations=True)\n L, G = [outs[i] for i in self.task_idx]\n local_size = L.size()[1:]\n dim_x = local_size[1]\n dim_y = local_size[2]\n n_coords = dim_x + dim_y\n global_size = G.size()[1:]\n n_inputs = global_size[0] + local_size[0]\n if len(global_size) != 1:\n raise NotImplementedError('Global vector must be 1d')\n\n # Set up ground truth labels\n self.labels = torch.zeros((n_coords, dim_x, dim_y)).float().to(L.device)\n for i in range(dim_x):\n for j in range(dim_y):\n self.labels[i, i, j] = 1.\n self.labels[dim_x + j, i, j] = 1.\n\n coord_net = MI1x1ConvNet(n_inputs, n_coords).to(X.device)\n\n def extract(outs, coord_net=None):\n '''Wrapper function to be put in encoder forward for speed.\n\n Args:\n outs (list): List of activations\n coord_net (nn.Module): Network to predict coordinates of every location.\n\n Returns:\n tuple: local, global outputs\n\n '''\n L, G = [outs[i] for i in self.task_idx]\n\n input = torch.cat([L, G[:, :, None, None].expand(-1, -1, L.size(2), L.size(3))], dim=1)\n logits = coord_net(input)\n\n return logits\n\n self.nets.encoder.module.add_network(self.name, extract,\n networks=dict(coord_net=coord_net))\n\n def routine(self, outs=None, scale=1.0):\n '''\n\n Args:\n scale: Scaling term for loss on the encoder.\n\n '''\n logits = outs[self.name]\n\n labels_ex = self.labels[None, :, :, :].expand(logits.size(0), -1, -1, -1)\n\n x_logits, y_logits = torch.chunk(logits, 2, dim=1)\n x_labels, y_labels = torch.chunk(labels_ex, 2, dim=1)\n\n x_sm_out = F.log_softmax(x_logits, dim=1)\n y_sm_out = F.log_softmax(y_logits, dim=1)\n\n x_loss = -(x_labels * x_sm_out).sum(1).mean()\n y_loss = -(y_labels * y_sm_out).sum(1).mean()\n loss = x_loss + y_loss\n\n # Computing accuracies.\n x_labels = torch.max(x_labels.data, 1)[1]\n y_labels = torch.max(y_labels.data, 1)[1]\n\n x_pred = torch.max(x_logits.data, 1)[1]\n y_pred = torch.max(y_logits.data, 1)[1]\n\n x_correct = 100. * x_pred.eq(x_labels.data).float().cpu().mean()\n y_correct = 100. * y_pred.eq(y_labels.data).float().cpu().mean()\n self.add_losses(encoder=scale * loss)\n self.add_results(x_accuracy=x_correct, y_accuracy=y_correct, total_accuracy=0.5 * (x_correct + y_correct))\n"
] |
[
[
"torch.zeros",
"torch.max",
"torch.chunk",
"torch.nn.functional.log_softmax"
]
] |
ljzycmd/SimDeblur
|
[
"dd2f60c41176b75c4eaf80d740f547c206aa8227",
"31d88e1fbec91d5cc9062f4a46538e4ba806ab29"
] |
[
"simdeblur/model/loss/perceptual_loss.py",
"utils/metrics.py"
] |
[
"\"\"\" ************************************************\n* fileName: perceptual_loss.py\n* desc: Perceptual loss using vggnet with conv1_2, conv2_2, conv3_3 feature,\n before relu layer. \n* author: mingdeng_cao\n* date: 2021/07/09 11:08\n* last revised: None\n************************************************ \"\"\"\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torchvision.models import vgg19, vgg16\n\nfrom ..build import LOSS_REGISTRY\n\n\n@LOSS_REGISTRY.register()\nclass PerceptualLossVGG19(nn.Module):\n def __init__(self, layer_idx=[2, 7, 14], layer_weights=[1, 0.2, 0.04], reduction=\"sum\"):\n super().__init__()\n self.layer_idx = layer_idx\n self.layer_weights = layer_weights\n self.vggnet_feats_layers = vgg19(pretrained=True).features\n\n self.reduction = reduction\n\n def vgg_forward(self, img):\n selected_feats = []\n out = img\n self.vggnet_feats_layers = self.vggnet_feats_layers.to(img)\n for i, layer in enumerate(self.vggnet_feats_layers):\n out = layer(out)\n if i in self.layer_idx:\n selected_feats.append(out)\n if i == self.layer_idx[-1]:\n break\n assert len(selected_feats) == len(self.layer_idx)\n return selected_feats\n\n def forward(self, img1, img2):\n selected_feats1 = self.vgg_forward(img1)\n selected_feats2 = self.vgg_forward(img2)\n\n loss = 0\n for i, (feat1, feat2) in enumerate(zip(selected_feats1, selected_feats2)):\n assert feat1.shape == feat2.shape, \"The input tensor should be in same shape!\"\n loss += F.mse_loss(feat1, feat2, reduction=self.reduction) * self.layer_weights[i]\n\n return loss\n",
"# CMD\n\nimport torch\nimport torch.nn.functional as F\nimport cv2\n\n\ndef calculate_psnr(img1, img2):\n \"\"\"\n data range [0, 1]\n \"\"\"\n img1 = img1.clamp(0, 1)\n img2 = img2.clamp(0, 1)\n\n mse = torch.mean((img1 - img2) ** 2, [1, 2, 3])\n # if mse == 0:\n # return 100\n PIXEL_MAX = 1\n return 20 * torch.mean(torch.log10(PIXEL_MAX / torch.sqrt(mse)))\n\n\ndef calculate_ssim(img1, img2):\n # implemented with pytorch\n assert isinstance(img1, torch.Tensor)\n assert isinstance(img1, torch.Tensor)\n\n img1 = img1.clamp(0, 1)\n img2 = img2.clamp(0, 1)\n \n C1 = (0.01 * 1)**2\n C2 = (0.03 * 1)**2\n\n # img1 = img1.to(torch.float32)\n # img2 = img2.to(torch.float32)\n kernel = gaussian(11, 1.5).to(img1).unsqueeze(1)\n window = kernel.mm(kernel.t()).float().expand(3, 1, 11, 11)\n\n mu1 = F.conv2d(img1, window, groups = 3) # valid\n mu2 = F.conv1d(img2, window, groups = 3)\n mu1_sq = mu1**2\n mu2_sq = mu2**2\n mu1_mu2 = mu1 * mu2\n sigma1_sq = F.conv2d(img1**2, window, groups=3) - mu1_sq\n sigma2_sq = F.conv2d(img2**2, window, groups=3) - mu2_sq\n sigma12 = F.conv2d(img1 * img2, window, groups=3) - mu1_mu2\n\n # mu1 = F.conv2d(img1, window, padding = 11//2, groups = 3) # same\n # mu2 = F.conv1d(img2, window, padding = 11//2, groups = 3)\n # mu1_sq = mu1**2\n # mu2_sq = mu2**2\n # mu1_mu2 = mu1 * mu2\n # sigma1_sq = F.conv2d(img1**2, window, padding=11//2, groups=3) - mu1_sq\n # sigma2_sq = F.conv2d(img2**2, window, padding=11//2, groups=3) - mu2_sq\n # sigma12 = F.conv2d(img1 * img2, window, padding=11//2, groups=3) - mu1_mu2\n\n ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *\n (sigma1_sq + sigma2_sq + C2))\n return ssim_map.mean()\n\ndef gaussian(window_size, sigma):\n gauss = torch.exp(torch.Tensor([-(x - window_size//2)**2/float(2*sigma**2) for x in range(window_size)]).float())\n return gauss/gauss.sum()\n\ndef create_window(window_size, channel):\n _1D_window = gaussian(window_size, 1.5).unsqueeze(1)\n _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)\n window = (_2D_window.expand(channel, 1, window_size, window_size).contiguous())\n return window\n\ndef _ssim(img1, img2, window, window_size, channel, size_average = True):\n mu1 = F.conv2d(img1, window, padding = window_size//2, groups = channel)\n mu2 = F.conv2d(img2, window, padding = window_size//2, groups = channel)\n\n mu1_sq = mu1.pow(2)\n mu2_sq = mu2.pow(2)\n mu1_mu2 = mu1*mu2\n\n sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channel) - mu1_sq\n sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channel) - mu2_sq\n sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channel) - mu1_mu2\n\n C1 = 0.01**2\n C2 = 0.03**2\n\n ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))\n\n if size_average:\n return ssim_map.mean()\n else:\n return ssim_map.mean(1).mean(1).mean(1)\n\nclass SSIM(torch.nn.Module):\n def __init__(self, window_size = 11, size_average = True):\n super(SSIM, self).__init__()\n self.window_size = window_size\n self.size_average = size_average\n self.channel = 1\n self.window = create_window(window_size, self.channel)\n\n def forward(self, img1, img2):\n (_, channel, _, _) = img1.size()\n\n if channel == self.channel and self.window.data.type() == img1.data.type():\n window = self.window\n else:\n window = create_window(self.window_size, channel)\n \n if img1.is_cuda:\n window = window.cuda(img1.get_device())\n window = window.type_as(img1)\n \n self.window = window\n self.channel = channel\n\n\n return _ssim(img1, img2, window, self.window_size, channel, self.size_average)\n\ndef ssim2(img1, img2, window_size = 11, size_average = True):\n (_, channel, _, _) = img1.size()\n window = create_window(window_size, channel)\n \n if img1.is_cuda:\n window = window.cuda(img1.get_device())\n window = window.type_as(img1)\n \n return _ssim(img1, img2, window, window_size, channel, size_average)\n\n\nif __name__ == \"__main__\":\n img1 = torch.ones(1, 3, 256, 256)*0.95\n img2 = torch.ones(1, 3, 256, 256) \n print(ssim2(img1, img2))\n print(ssim(img1, img2))\n print(psnr(img1, img2))"
] |
[
[
"torch.nn.functional.mse_loss"
],
[
"torch.sqrt",
"torch.ones",
"torch.nn.functional.conv1d",
"torch.nn.functional.conv2d",
"torch.mean"
]
] |
mleszczy/bootleg
|
[
"162d74001cdfbbe146753393641d549e0328acb1"
] |
[
"bootleg/dataloaders/wiki_dataset.py"
] |
[
"import os\nimport time\nimport ujson as json\nimport torch\nimport sys\nimport pickle\nimport numpy as np\nfrom torch.utils.data import Dataset\nimport torch.distributed as dist\nimport torch.nn.functional as F\n\nfrom bootleg.symbols.alias_entity_table import AliasEntityTable\nfrom bootleg.symbols.constants import *\nfrom bootleg.prep import prep_data\nfrom bootleg.utils import logging_utils, data_utils, train_utils\nfrom bootleg.utils.utils import import_class\nfrom bootleg.utils import utils\n\n# https://github.com/pytorch/pytorch/issues/37581#issuecomment-624516586\nimport warnings\nwarnings.filterwarnings(\"ignore\", message=\".*The given NumPy array is not writeable.*\")\n\nclass WikiDataset(Dataset):\n \"\"\"\n Main dataset class that handles preparing a batch of input.\n\n Things to note\n **Input is a sentence with mentions that are both true and false golds. A true gold is one that was directly\n mined with Wikipedia. A false gold is one that was generated by weak labelling.\n **We determine entities that are in a slice by if the true entity index is -1 or not. During train, if use_weak_label is true,\n we allow the model to leverage true and false golds. During eval, we only score true enchors.\n **Some embeddings require more expensive processing. E.g., extracting the pairs of candidate entities that are connected\n in a KG. When this processing is done in the dataloader where is can benefit from multiple dataloader threads,\n the embedding is stored in batch_on_the_fly. This embedding must have a batch_prep method\n When this processing is done during data prep, the embedding is stored in batch_prep.\n **If training a NIL model, we support randomly removing the true entity from the candidate list and setting the true\n entity index to be the NIL entity.\n **We support data slices (subsets of data) for both training (if slice model) and eval. If using slices for training model,\n we supports probabilistic slice indices.\n\n Attributes:\n batch_prepped_emb_file_names: embedding that are batch prepped in advance\n batch_on_the_fly_embs: embedding where the batch_prep method is called in the __get_item__ method\n random_nil: whether to do NIL candidate random generation\n\n Batch Inputs:\n start_idx_in_sent: first token index of a mention,\n end_idx_in_sent: last token index of a mention,\n alias_idx: the alias (mention) index in our alias dictionary,\n word_indices: word indexes into the word emeddings (e.g., BERT token indices),\n sent_idx: unique sentence index,\n subsent_idx: unique subsentence index in the case of sentence windowing,\n entity_indices: the entity indices in our entity dictionary,\n alias_list_pos: keeps track of the original alias position in the list of all aliases in case the sentence\n is split via windowing\n true_entity_idx_for_train: entity indices for true and false golds, as seen during train\n slice_indices (optional): if slice dataset, we pass in matrix where each row is alias and each column\n is 0/1 if that mention is in the slice or not\n <ind_task_name> (option): probabilistic labels of if an mention is in a slice or not (used in slicing model)\n <pred_task_name>: NED prediction labels; for slice model, predictions of aliases not in the slice are masked\n <embs>: all batch prep or batch on the fly emeddings\n \"\"\"\n def __init__(self, args, use_weak_label, input_src, dataset_name,\n is_writer, distributed, word_symbols, entity_symbols,\n slice_dataset=None, dataset_is_eval=False):\n # Need to save args to reinstantiate logger\n self.args = args\n self.logger = logging_utils.get_logger(args)\n # Number of candidates, including NIL if a NIL model (train_in_candidates is False)\n self.K = entity_symbols.max_candidates + (not args.data_config.train_in_candidates)\n self.num_entities_with_pad_and_nocand = entity_symbols.num_entities_with_pad_and_nocand\n self.dataset_name = dataset_name\n self.slice_dataset = slice_dataset\n self.dataset_is_eval = dataset_is_eval\n # Slice names used for eval slices and a slicing model\n self.slice_names = train_utils.get_data_slices(args, dataset_is_eval)\n self.storage_type_file = data_utils.get_storage_file(self.dataset_name)\n # Mappings from sent_idx to row_id in dataset\n self.sent_idx_file = os.path.splitext(dataset_name)[0] + \"_sent_idx.json\"\n self.type_pred = False\n if args.data_config.type_prediction.use_type_pred:\n self.type_pred = True\n self.eid2typeid, self.num_types_with_pad = self.load_coarse_type_table(args, entity_symbols)\n # Load memory mapped file\n self.logger.info(\"Loading dataset...\")\n self.logger.debug(\"Seeing if \" + dataset_name + \" exists\")\n if (args.data_config.overwrite_preprocessed_data or\n (not os.path.exists(self.dataset_name)) or\n (not os.path.exists(self.sent_idx_file)) or\n (not os.path.exists(self.storage_type_file)) or\n (not os.path.exists(data_utils.get_batch_prep_config(self.dataset_name)))):\n start = time.time()\n self.logger.debug(f\"Building dataset with {input_src}\")\n # Only prep data once per node\n if is_writer:\n prep_data(args, use_weak_label=use_weak_label, dataset_is_eval=self.dataset_is_eval,\n input_src=input_src, dataset_name=dataset_name,\n prep_dir=data_utils.get_data_prep_dir(args))\n if distributed:\n # Make sure all processes wait for data to be created\n dist.barrier()\n self.logger.debug(f\"Finished building and saving dataset in {round(time.time() - start, 2)}s.\")\n\n start = time.time()\n\n # Storage type for loading memory mapped file of dataset\n self.storage_type = pickle.load(open(self.storage_type_file, 'rb'))\n\n self.data = np.memmap(self.dataset_name, dtype=self.storage_type, mode='r')\n self.data_len = len(self.data)\n\n # Mapping from sentence idx to rows in the dataset (indices).\n # Needed when sampling sentence indices from slices for evaluation.\n sent_idx_to_idx_str = utils.load_json_file(self.sent_idx_file)\n self.sent_idx_to_idx = {int(i):val for i,val in sent_idx_to_idx_str.items()}\n self.logger.info(f\"Finished loading dataset.\")\n\n # Stores info about the batch prepped embedding memory mapped files and their shapes and datatypes\n # so we can load them\n self.batch_prep_config = utils.load_json_file(data_utils.get_batch_prep_config(self.dataset_name))\n self.batch_prepped_emb_files = {}\n self.batch_prepped_emb_file_names = {}\n for emb in args.data_config.ent_embeddings:\n if 'batch_prep' in emb and emb['batch_prep']:\n assert emb.key in self.batch_prep_config, f'Need to prep {emb.key}. Please call prep instead of run with batch_prep_embeddings set to true.'\n self.batch_prepped_emb_file_names[emb.key] = os.path.join(os.path.dirname(self.dataset_name),\n os.path.basename(self.batch_prep_config[emb.key]['file_name']))\n self.batch_prepped_emb_files[emb.key] = np.memmap(\n self.batch_prepped_emb_file_names[emb.key],\n dtype=self.batch_prep_config[emb.key]['dtype'],\n shape=tuple(self.batch_prep_config[emb.key]['shape']),\n mode='r')\n assert len(self.batch_prepped_emb_files[emb.key]) == self.data_len,\\\n f'Preprocessed emb data file {self.batch_prep_config[emb.key][\"file_name\"]} does not match length of main data file.'\n\n # Stores embeddings that we compute on the fly; these are embeddings where batch_on_the_fly is set to true.\n self.batch_on_the_fly_embs = {}\n for emb in args.data_config.ent_embeddings:\n if 'batch_on_the_fly' in emb and emb['batch_on_the_fly'] is True:\n mod, load_class = import_class(\"bootleg.embeddings\", emb.load_class)\n try:\n self.batch_on_the_fly_embs[emb.key] = getattr(mod, load_class)(main_args=args,\n emb_args=emb['args'], entity_symbols=entity_symbols,\n model_device=None, word_symbols=None, key=emb.key)\n except AttributeError as e:\n self.logger.warning(f'No prep method found for {emb.load_class} with error {e}')\n except Exception as e:\n print(\"ERROR\", e)\n # The data in this table shouldn't be pickled since we delete it in the class __getstate__\n self.alias2entity_table = AliasEntityTable(args=args, entity_symbols=entity_symbols)\n # Random NIL percent\n self.mask_perc = args.train_config.random_nil_perc\n self.random_nil = False\n # Don't want to random mask for eval\n if not dataset_is_eval:\n # Whether to use a random NIL training regime\n self.random_nil = args.train_config.random_nil\n if self.random_nil:\n self.logger.info(f'Using random nils during training with {self.mask_perc} percent')\n\n def __len__(self):\n return self.data_len\n\n def __getitem__(self, key):\n # start = time.time()\n example = self.data[key]\n entity_indices = self.alias2entity_table(example['alias_idx'])\n # True entities will be true and false golds for train (if use_weak_label in config is true) and just true golds for eval\n true_entities = torch.from_numpy(example['true_entity_idx'])\n M = true_entities.shape\n if self.random_nil:\n # example['true_entity_idx'] is M -> we want to sample some % of these and set them to not in candidate list\n # randomly mask each entity embedding\n bern_prob = (torch.ones(M) * self.mask_perc)\n keep_mask = torch.bernoulli(bern_prob) < 1\n # whichever we sample, we want to set corresponding true candidate to -1 and mask it out\n # to simulate not being in the candidate list\n # can't have negatives for one hot so we temporarily cast padded values to 0\n padded_entities = true_entities == -1\n true_entities = true_entities.masked_fill(padded_entities, 0)\n one_hot_true_entities = F.one_hot(true_entities, num_classes=self.K)\n one_hot_true_entities[keep_mask.unsqueeze(-1).expand_as(one_hot_true_entities)] = 0\n one_hot_true_entities[padded_entities.unsqueeze(-1).expand_as(one_hot_true_entities)] = 0\n entity_indices = entity_indices.masked_fill(one_hot_true_entities, -1)\n # set new true label to 0 ('not in candidate')\n true_entities = true_entities.masked_fill(~keep_mask, 0)\n # make sure original padded entities are padded\n true_entities = true_entities.masked_fill(padded_entities, -1)\n\n start_idx_in_sent = example['start_idx_in_sent']\n end_idx_in_sent = example['end_idx_in_sent']\n example_dict = {'start_idx_in_sent': start_idx_in_sent,\n 'end_idx_in_sent': end_idx_in_sent,\n 'alias_idx': example['alias_idx'],\n 'word_indices': example['word_indices'],\n 'sent_idx': example['sent_idx'],\n 'subsent_idx': example['subsent_idx'],\n 'entity_indices': entity_indices,\n # due to subsentence split, we need to keep track of the original alias position in the list\n # to do eval over slices when distributed\n # (examples from a sentence may be distributed across different GPUs)\n 'alias_list_pos': example['alias_list_pos'],\n # true entities of the mentions seen during train (true and false golds); in eval, we only keep\n # true entities of true golds\n 'true_entity_idx_for_train': example['true_entity_idx_for_train']}\n\n\n # If this dataset is associated with slices, slice_indices is a incidence matrix indicating\n # for each alias in the batch, which ones participate in which slice (slices keep track of sentence indexes and aliases to predict)\n # Slices are not windowed like that are for training data.\n if self.slice_dataset is not None:\n # -1 is pad and should not be in the mapping from sentence index to row in array.\n assert -1 != self.slice_dataset.sent_idx_arr[example[\"sent_idx\"]]\n # One row per mention and one column per slice\n slice_indices = np.hstack([self.slice_dataset.data[slice_name][self.slice_dataset.sent_idx_arr[example[\"sent_idx\"]]].alias_to_predict.T\n for slice_name in self.slice_names])\n prob_labels_arr = np.hstack([self.slice_dataset.data[slice_name][self.slice_dataset.sent_idx_arr[example[\"sent_idx\"]]].prob_labels.T\n for slice_name in self.slice_names])\n # alias_list_pos will have -1 for no alias; we want these to become zero in slice_indices.\n # Therefore we add a pad row to the bottom of slice_indices\n slice_indices = np.vstack([slice_indices, np.zeros(slice_indices.shape[1])]).astype(int)\n slice_indices = slice_indices[example['alias_list_pos']]\n # Probabilistic slice labels for slice indicator head training\n prob_labels_arr = np.vstack([prob_labels_arr, np.zeros(prob_labels_arr.shape[1])]).astype(float)\n prob_labels_arr = prob_labels_arr[example['alias_list_pos']]\n\n # If this is an eval dataset, keep slice indices intact for eval_wrapper\n example_dict['slice_indices'] = slice_indices\n # Assign true entity idx to -1 if example alias doesn't participate in slice\n for i, slice_name in enumerate(self.slice_names):\n prob_labels = prob_labels_arr[:,i]\n bin_in_slice_labels = slice_indices[:,i]\n\n # NED prediction labels; set predictions to be -1 for masking for mentions not in a slice\n pred_labels = np.copy(true_entities)\n pred_labels[~(bin_in_slice_labels).astype(bool)] = -1\n\n # Mask out slice alias labels for which we don't want to make a prediction\n # We need to use true_entity_idx to account for subsentences which indicate\n # which alias to predict\n prob_labels[true_entities == -1] = -1\n\n ind_task_name = train_utils.get_slice_head_ind_name(slice_name)\n pred_task_name = train_utils.get_slice_head_pred_name(slice_name)\n\n # Add indicator head and prediction head labels\n example_dict[ind_task_name] = prob_labels\n example_dict[pred_task_name] = pred_labels\n\n else:\n example_dict[train_utils.get_slice_head_pred_name(FINAL_LOSS)] = example['true_entity_idx']\n # Add type preds\n if self.type_pred:\n example_dict[\"type_labels\"] = self.eid2typeid[true_entities]\n # Add embeddings to example forward\n for emb_name in self.batch_prepped_emb_files:\n example_dict[emb_name] = np.asarray(self.batch_prepped_emb_files[emb_name][key])\n # Prep the embeddings (this will call the batch_prep method for the embedding)\n for emb_name, emb in self.batch_on_the_fly_embs.items():\n example_dict[emb_name] = emb.batch_prep(example['alias_idx'], entity_indices)\n return example_dict\n\n def __getstate__(self):\n state = self.__dict__.copy()\n # Not picklable\n del state['data']\n del state['logger']\n # the sent_idx mapping is expensive to pickle so remove\n # also not needed in dataloader workers so we don't need to setstate for it\n del state['sent_idx_to_idx']\n del state['batch_prepped_emb_files']\n return state\n\n def __setstate__(self, state):\n self.__dict__.update(state)\n self.data = np.memmap(self.dataset_name, dtype=self.storage_type, mode='r')\n self.batch_prepped_emb_files = {}\n for emb_name, file_name in self.batch_prepped_emb_file_names.items():\n self.batch_prepped_emb_files[emb_name] = np.memmap(self.batch_prepped_emb_file_names[emb_name],\n dtype=self.batch_prep_config[emb_name]['dtype'],\n shape=tuple(self.batch_prep_config[emb_name]['shape']),\n mode='r')\n self.logger = logging_utils.get_logger(self.args)\n\n def __repr__(self):\n return f\"Dataset {self.dataset_name}\"\n\n def load_coarse_type_table(self, args, entity_symbols):\n emb_dir = args.data_config.emb_dir\n coarse_type_file = args.data_config.type_prediction.file\n with open(os.path.join(emb_dir, coarse_type_file)) as in_f:\n # take the first type; UNK type is 0\n qid2type = {}\n max_type = 0\n for k, v in json.load(in_f).items():\n if len(v) > 0:\n qid2type[k] = v[0]+1\n else:\n qid2type[k] = 0\n max_type = max(max_type, qid2type[k])\n # We assume types are indexed from 0. So, 6 types will have indices 0 - 5. Max type will get 5+1 = 6.\n assert max_type == args.data_config.type_prediction.num_types,\\\n f\"{args.data_config.type_prediction.num_types} from args.data_config.type_prediction.num_types must match our computed number {max_type}\"\n # All qids get unk types\n values = [0 for _ in range(self.num_entities_with_pad_and_nocand)]\n for qid in qid2type:\n if entity_symbols.qid_exists(qid):\n values[entity_symbols.get_eid(qid)] = qid2type[qid]\n # Padded eid gets -1\n values[-1] = -1\n num_types_with_pad = max_type+1\n eid2coarsetype = torch.tensor(values)\n return eid2coarsetype, num_types_with_pad\n"
] |
[
[
"torch.nn.functional.one_hot",
"numpy.asarray",
"numpy.zeros",
"numpy.copy",
"torch.from_numpy",
"torch.ones",
"numpy.memmap",
"torch.tensor",
"torch.bernoulli",
"numpy.hstack",
"torch.distributed.barrier"
]
] |
CyberFlameGO/conversationai-models
|
[
"f82f66398b221d9fe3bcfd7641610af454b3db46",
"f82f66398b221d9fe3bcfd7641610af454b3db46",
"f82f66398b221d9fe3bcfd7641610af454b3db46"
] |
[
"kaggle-classification/keras_trainer/custom_metrics.py",
"experiments/tf_trainer/tf_cnn/finetune.py",
"attention-tutorial/visualize_attention.py"
] |
[
"\"\"\"Custom metrics used by Keras models.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\ndef auc_roc(y_true, y_pred):\n # any tensorflow metric\n y_true = tf.to_int32(tf.greater(y_true, 0.5))\n value, update_op = tf.metrics.auc(y_true, y_pred)\n\n # find all variables created for this metric\n metric_vars = [\n i for i in tf.local_variables() if 'auc_roc' in i.name.split('/')[1]\n ]\n\n # Add metric variables to GLOBAL_VARIABLES collection.\n # They will be initialized for new session.\n for v in metric_vars:\n tf.add_to_collection(tf.GraphKeys.GLOBAL_VARIABLES, v)\n\n # force update metric values\n with tf.control_dependencies([update_op]):\n value = tf.identity(value)\n return value\n",
"\"\"\"Experiments with many_communities dataset.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport nltk\nimport os\nimport pandas as pd\nimport tensorflow as tf\n\nfrom tf_trainer.common import base_model\nfrom tf_trainer.common import model_trainer\nfrom tf_trainer.common import serving_input\nfrom tf_trainer.common import text_preprocessor\nfrom tf_trainer.common import tfrecord_input\nfrom tf_trainer.common import types\nfrom tf_trainer.tf_cnn import model as tf_cnn\n\nfrom tensorflow.python.lib.io import file_io\n\nFLAGS = tf.app.flags.FLAGS\n\ntf.app.flags.DEFINE_string(\"embeddings_path\",\n \"local_data/glove.6B/glove.6B.100d.txt\",\n \"Path to the embeddings file.\")\n\ntf.app.flags.DEFINE_string(\"tmp_results_path\", None,\n \"Path to the local combined (across communities) results file.\")\n\ntf.app.flags.mark_flag_as_required(\"warm_start_from\")\ntf.app.flags.mark_flag_as_required(\"tmp_results_path\")\n\ndef main(argv):\n del argv # unused\n\n embeddings_path = FLAGS.embeddings_path\n\n preprocessor = text_preprocessor.TextPreprocessor(embeddings_path)\n\n nltk.download(\"punkt\")\n train_preprocess_fn = preprocessor.train_preprocess_fn(nltk.word_tokenize)\n dataset = tfrecord_input.TFRecordInputWithTokenizer(\n train_preprocess_fn=train_preprocess_fn)\n\n # TODO: Move embedding *into* Keras model.\n model_tf = tf_cnn.TFCNNModel(dataset.labels())\n model = preprocessor.add_embedding_to_model(model_tf,\n base_model.TOKENS_FEATURE_KEY)\n\n trainer = model_trainer.ModelTrainer(dataset, model,\n warm_start_from=FLAGS.warm_start_from)\n trainer.train_with_eval()\n\n keys = [(\"label\", \"probabilities\")]\n predictions = list(trainer.predict_on_dev(predict_keys=keys))\n\n valid_path_csv = FLAGS.validate_path.replace(\"..tfrecord\", \".csv\")\n df = pd.read_csv(valid_path_csv)\n labels = df[\"label\"].values\n community = os.path.basename(FLAGS.validate_path).split(\"..\")[0]\n\n assert len(labels) == len(predictions), \\\n \"Labels and predictions must have the same length.\"\n\n d = {\n \"label\" : labels,\n \"prediction\": [p[keys[0]][1] for p in predictions],\n \"community\": [community for p in predictions],\n }\n\n df = pd.DataFrame(data=d)\n df.to_csv(path_or_buf=FLAGS.tmp_results_path, mode='a+',\n index=False, header=False)\n\nif __name__ == \"__main__\":\n tf.logging.set_verbosity(tf.logging.INFO)\n tf.app.run(main)\n",
"\"\"\"A class to help visualize attention weights.\n\n------------------------------------------------------------------------\n\nCopyright 2018, Google Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport pandas as pd\nimport tensorflow as tf\nimport numpy as np\n\npd.set_option('max_columns', 100)\ntokenizer = tf.contrib.learn.preprocessing.tokenizer\nWORDS_FEATURE = 'words'\nMAX_DOCUMENT_LENGTH = 60\n\n\nclass wordVal(object):\n \"\"\"A helper class that represents a word and value simultaneously.\"\"\"\n\n def __init__(self, word, val):\n self.word = word\n self.val = val\n\n def __str__(self):\n return self.word\n\n\nclass attentionDisplay(object):\n \"\"\"A class to visualize attention weights produced by a classifer on a given string.\"\"\"\n\n def __init__(self, vocab_processor, classifier, words_feature='words'):\n \"\"\"\n Args:\n * vocab_processor: a trained vocabulary processor from\n tf.contrib.learn.preprocessing.VocabularyProcessor\n * classifier: the classifier of class Estimator produced in\n Attention_Model_Codelab.ipynb\n * words_feature (string): if provided, the key for the comments in the\n feed dictionary expected by the classifier\n \"\"\"\n\n self.vocab_processor = vocab_processor\n self.classifier = classifier\n self.words_feature = words_feature\n\n def _rgb_to_hex(self, rgb):\n return '#%02x%02x%02x' % rgb\n\n def _color_wordvals(self, s):\n r = 255 - int(s.val * 255)\n color = self._rgb_to_hex((255, r, r))\n return 'background-color: %s' % color\n\n def _predict_sentence(self, input_string):\n x_test = self.vocab_processor.transform([input_string])\n x_test = np.array(list(x_test))\n\n test_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={self.words_feature: x_test}, num_epochs=1, shuffle=False)\n\n predictions = self.classifier.predict(input_fn=test_input_fn)\n y_predicted = []\n alphas_predicted = []\n for p in predictions:\n y_predicted.append(p['class'])\n alphas_predicted.append(p['attention'])\n return y_predicted, alphas_predicted\n\n def _resize_and_tokenize(self, input_string):\n tokenized_sentence = list(tokenizer([input_string]))[0]\n tokenized_sentence = tokenized_sentence + [''] * (\n MAX_DOCUMENT_LENGTH - len(tokenized_sentence))\n tokenized_sentence = tokenized_sentence[:MAX_DOCUMENT_LENGTH]\n return tokenized_sentence\n\n def display_prediction_attention(self, input_string):\n \"\"\"Visualizes the attention weights of the initialized classifier on the given string.\"\"\"\n pred, attn = self._predict_sentence(input_string)\n if pred[0]:\n print('Toxic')\n else:\n print('Not toxic')\n tokenized_string = self._resize_and_tokenize(input_string)\n wordvals = [wordVal(w, v) for w, v in zip(tokenized_string, attn[0])]\n word_df = pd.DataFrame(wordvals).transpose()\n return word_df.style.applymap(self._color_wordvals)\n"
] |
[
[
"tensorflow.local_variables",
"tensorflow.metrics.auc",
"tensorflow.greater",
"tensorflow.control_dependencies",
"tensorflow.add_to_collection",
"tensorflow.identity"
],
[
"tensorflow.logging.set_verbosity",
"tensorflow.app.flags.DEFINE_string",
"pandas.DataFrame",
"tensorflow.app.run",
"tensorflow.app.flags.mark_flag_as_required",
"pandas.read_csv"
],
[
"tensorflow.estimator.inputs.numpy_input_fn",
"pandas.set_option",
"pandas.DataFrame"
]
] |
zabaras/deep-turbulence
|
[
"0daca5daada449d4ba16bce37b703e20b444b6bc"
] |
[
"tmglow/nn/modules/flowUtils.py"
] |
[
"'''\n=====\nDistributed by: Notre Dame SCAI Lab (MIT Liscense)\n- Associated publication:\nurl: http://aimsciences.org//article/id/3a9f3d14-3421-4947-a45f-a9cc74edd097\ndoi: https://dx.doi.org/10.3934/fods.2020019\ngithub: https://github.com/zabaras/deep-turbulence\n=====\n'''\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport math\nfrom torch.autograd import Variable\nfrom torch.nn.modules.utils import _pair, _quadruple\n\nclass Squeeze(nn.Module):\n \"\"\"Squeezes feature map by reducing the dimensions of the feature\n and increasing channel number in chunks.\n\n :param factor: factor to reduce feature dimensions by, defaults to 2\n :type factor: int, optional\n\n :note: This is the squeeze approached used in \"Glow: Generative flow with invertible 1x1 convolutions\" \n by Kingma et al. https://arxiv.org/abs/1807.03039\n \"\"\"\n def __init__(self, factor=2):\n \"\"\"Constructor method\n \"\"\" \n super(Squeeze, self).__init__()\n assert factor >= 1\n if factor == 1:\n Warning('Squeeze factor is 1, this is identity function')\n self.factor = factor\n\n def forward(self, x):\n \"\"\"Forward pass\n\n :param x: [B, in_features, H, W] input feature tensor\n :type x: torch.Tensor\n :returns: \n - y: [B, factor**2 * in_features, H/factor, W/factor] Squeezed output feature tensor\n :rtype: torch.Tensor\n \"\"\"\n if self.factor == 1:\n return x\n # n_channels, height, width\n B, C, H, W = x.shape[:]\n assert H % self.factor == 0 and W % self.factor == 0\n x = x.reshape(-1, C, self.factor, H//self.factor, self.factor, W//self.factor)\n x = x.transpose(3, 4)\n y = x.reshape(-1, C * self.factor ** 2, H//self.factor, W//self.factor)\n\n return y\n\n def reverse(self, y):\n \"\"\"Backward pass\n\n :param y: [B, factor**2 * in_features, H/factor, W/factor] Squeezed input feature tensor\n :type y: torch.Tensor\n :returns: \n - x: [B, in_features, H, W] Output feature tensor\n :rtype: torch.Tensor\n \"\"\"\n if self.factor == 1:\n return y\n B, C, H, W = y.shape[:]\n assert C >= self.factor ** 2 and C % self.factor ** 2 == 0\n y = y.reshape(-1, C // self.factor ** 2, self.factor, self.factor, H, W)\n y = y.transpose(3, 4)\n x = y.reshape(-1, C // self.factor ** 2, H * self.factor, W * self.factor)\n\n return x\n\nclass CheckerSqueeze(nn.Module):\n \"\"\"Squeezes feature map by reducing the dimensions of the feature\n and increasing channel number in a checkered pattern.\n See Fig. 8 of paper: https://arxiv.org/abs/2006.04731\n\n :param factor: factor to reduce feature dimensions by, defaults to 2\n :type factor: int, optional\n\n :note: This is the squeeze approached used in \"Density estimation using real nvp\" \n by Dinh et al. https://arxiv.org/abs/1605.08803\n \"\"\"\n def __init__(self, factor=2):\n \"\"\"Constructor method\n \"\"\" \n super(CheckerSqueeze, self).__init__()\n assert factor >= 1\n if factor == 1:\n Warning('Squeeze factor is 1, this is identity function')\n\n # Not tested for other factor values\n factor = 2 \n self.factor = factor\n\n def forward(self, x):\n \"\"\"Forward pass\n\n :param x: [B, in_features, H, W] input feature tensor\n :type x: torch.Tensor\n :returns: \n - y: [B, factor**2 * in_features, H/factor, W/factor] Squeezed output feature tensor\n :rtype: torch.Tensor\n \"\"\"\n if self.factor == 1:\n return x\n # n_channels, height, width\n B, C, H, W = x.shape[:]\n assert H % self.factor == 0 and W % self.factor == 0\n\n y = torch.zeros(B, C * self.factor ** 2, H//self.factor, W//self.factor).type(x.type())\n\n c0 = C\n y[:,:c0,:,:] = x[:,:,::self.factor,::self.factor]\n y[:,c0:2*c0,:,:] = x[:,:,1::self.factor,::self.factor]\n y[:,2*c0:3*c0,:,:] = x[:,:,1::self.factor,1::self.factor]\n y[:,3*c0:,:,:] = x[:,:,::self.factor,1::self.factor]\n\n return y\n\n def reverse(self, y):\n \"\"\"Backward pass\n\n :param y: [B, factor**2 * in_features, H/factor, W/factor] Squeezed input feature tensor\n :type y: torch.Tensor\n :returns: \n - x: [B, in_features, H, W] Output feature tensor\n :rtype: torch.Tensor\n \"\"\"\n if self.factor == 1:\n return y\n B, C, H, W = y.shape[:]\n assert C >= self.factor ** 2 and C % self.factor ** 2 == 0\n x = torch.zeros(B, C//self.factor ** 2, H* self.factor, W* self.factor).type(y.type())\n\n c0 = C//self.factor ** 2\n x[:,:,::self.factor,::self.factor] = y[:,:c0,:,:]\n x[:,:,1::self.factor,::self.factor] = y[:,c0:2*c0,:,:]\n x[:,:,1::self.factor,1::self.factor] = y[:,2*c0:3*c0,:,:]\n x[:,:,::self.factor,1::self.factor] = y[:,3*c0:,:,:]\n\n return x\n\nclass GaussianDiag(object):\n \"\"\"Multi-variate Gaussian class with diagonal covariance\n for representing the latent variables\n\n :param mean: [B, in_features, H, W] tensor of mean values\n :type mean: torch.Tensor\n :param log_stddev: [B, in_features, H, W] tensor of log sandard deviations\n :type log_stddev: torch.Tensor\n \"\"\"\n Log2PI = float(np.log(2 * np.pi))\n\n def __init__(self, mean, log_stddev):\n \"\"\"Constructor method\n \"\"\" \n super().__init__()\n self.mean = mean\n self.log_stddev = log_stddev.clamp_(min=-10., max=math.log(5.))\n # self._backward_hook = self.log_stddev.register_hook(\n # lambda grad: torch.clamp_(grad, -10., 10.))\n\n def likelihood(self, x):\n \"\"\"Computes the Gaussian log-likelihood of each element\n\n :param x: [B, in_features, H, W] input feature tensor\n :type x: torch.Tensor\n :return:\n - like: [B, in_features, H, W] log-likelihood tensor\n :rtype: torch.Tensor\n \"\"\"\n like = -0.5 * (GaussianDiag.Log2PI + self.log_stddev * 2. \\\n + (x - self.mean) ** 2 / (self.log_stddev * 2.).exp())\n \n return like\n\n def log_prob(self, x):\n \"\"\"Computes the log product (sum) of Gaussian likelihoods\n over the entire input feature tensor\n\n :param x: [B, in_features, H, W] input feature tensor\n :type x: torch.Tensor\n :return: \n - likelihood: [B] sum log-likelihood over features\n :rtype: torch.Tensor\n \"\"\"\n likelihood = self.likelihood(x)\n return likelihood.view(x.shape[0], -1).sum(1)\n\n def sample(self, eps=None):\n \"\"\"Samples latent variables from learned Gaussian density\n\n :param eps: [B, in_features, H, W] Latent samples from the unit Gaussian to reconstruct specific latent variables.\n If none are provided latent variables are sampled randomly from learned density, defaults to None\n :type eps: torch.Tensor, optional\n :return: \n - z: [B, in_features, H, W] sum log-likelihood over features\n :rtype: torch.Tensor\n \"\"\"\n self.log_stddev.data.clamp_(min=-10., max=math.log(5.))\n if eps is None:\n eps = torch.randn_like(self.log_stddev)\n # print(eps, self.log_stddev.data )\n z = self.mean + self.log_stddev.exp() * eps\n return z\n\nclass Conv2dZeros(nn.Module):\n \"\"\"Convolution with weight and bias initialized to zero followed by channel-wise scaling\n :math:`x*exp(scale * logscale\\_factor)`\n\n :param in_features: Number of input feature channels\n :type in_features: int\n :param out_features: Number of output feature channels\n :type out_features: int\n :param logscale_factor: log factor to scale output tensor by, defaults to 1\n :type logscale_factor: int, optional\n\n :note: This is proposed in \"Glow: Generative flow with invertible 1x1 convolutions\" \n by Kingma et al. https://arxiv.org/abs/1807.03039. Appears to help with stability.\n \"\"\"\n def __init__(self, in_features, out_features, logscale_factor=1):\n \"\"\"Constructor method\n \"\"\"\n super(Conv2dZeros, self).__init__()\n self.conv = nn.Conv2d(in_features, out_features, kernel_size=3, \n stride=1, padding=0, bias=True)\n self.conv.weight.data.zero_()\n self.conv.bias.data.zero_()\n self.scale = nn.Parameter(torch.zeros(1, 1, 1, 1))\n self.logscale_factor = logscale_factor\n # self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n \"\"\"Forward pass.\n\n :param x: [B, in_features, H, W] input feature tensor\n :type x: torch.Tensor\n :return: \n - out: [B, out_features, H, W] output feature tensor\n :rtype: torch.Tensor\n \"\"\"\n x = self.conv(F.pad(x, _quadruple(1), mode='replicate'))\n return x * torch.exp(torch.clamp(self.scale, -4., np.log(4)) * self.logscale_factor)\n\nclass LatentEncoder(nn.Module):\n \"\"\"Latent encoder used to compute mu and std for Gaussian density\n from split feature map. See NN block in Fig. 8 of paper: \n https://arxiv.org/abs/2006.04731\n\n :param in_features: Number of input feature channels\n :type in_features: int\n \"\"\"\n def __init__(self, in_features):\n \"\"\"Constructor method\n \"\"\"\n super(LatentEncoder, self).__init__()\n self.conv2d = Conv2dZeros(in_features, in_features * 2)\n self.hardtanh = nn.Hardtanh(min_val=-2.0, max_val=np.log(5.0), inplace=False)\n # self.hardtanh = nn.Sigmoid()\n\n def forward(self, x):\n \"\"\"Forward pass\n\n :param x: [B, in_features, H, W] input feature tensor\n :type x: torch.Tensor\n :return: \n - gauss_diag: Gaussian prior\n :rtype: :class:`nn.modules.flowUtils.GaussianDiag`\n \"\"\"\n mean, log_stddev = (self.hardtanh(self.conv2d(x))).chunk(2, 1)\n gauss_diag = GaussianDiag(mean, log_stddev)\n return gauss_diag\n\nclass Split(nn.Module):\n \"\"\"Splits input features into half features that are passed deeper in the model\n and the other half modeled as a Gaussian density.\n See NN block in Fig. 8 of paper: https://arxiv.org/abs/2006.04731\n\n :param in_features: Number of input feature channels\n :type in_features: int\n \"\"\"\n def __init__(self, in_features):\n \"\"\"Constructor method\n \"\"\"\n super(Split, self).__init__()\n self.latent_encoder = LatentEncoder(in_features // 2)\n\n def forward(self, z, return_eps=False):\n \"\"\"Forward split\n\n :param z: [B, in_features, H, W] input feature tensor\n :type z: torch.Tensor\n :param return_eps: Return samples from latent densities, defaults to False\n :type return_eps: bool, optional\n :return: \n - z1: [B, in_features//2, H, W] output feature tensor\n - log_prob_prior: [B] log-likelihood of split features \n - eps: [B, in_features//2, H, W] tensor of sampled latent variables from unit gaussian\n :rtype: (torch.Tensor, torch.Tensor, torch.Tensor)\n \"\"\"\n # split out z2, and evalute log prob at z2 which takes the form of \n # diagonal Gaussian are reparameterized by latent_encoder\n z1, z2 = z.chunk(2, 1)\n prior = self.latent_encoder(z1)\n log_prob_prior = prior.log_prob(z2)\n if return_eps:\n eps = (z2 - prior.mean) / prior.log_stddev.exp()\n else:\n eps = None\n return z1, log_prob_prior, eps\n\n def reverse(self, z1, eps=None):\n \"\"\"Backward split\n\n :param z1: [B, in_features//2, H, W] input split feature tensor\n :type z1: torch.Tensor\n :param eps: [B, in_features//2, H, W] Latent samples from the unit Gaussian to reconstruct specific latent variables.\n If none are provided latent variables are sampled randomly from learned density, defaults to None\n :type eps: torch.Tensor, optional\n :return: \n - z: [B, in_features, H, W] output reconstructed feature tensor\n - log_prob_prior: [B] log-likelihood of split features \n :rtype: (torch.Tensor, torch.Tensor)\n \"\"\"\n # sample z2, then concat with z1\n # intermediate flow, z2 is the split-out latent\n prior = self.latent_encoder(z1)\n z2 = prior.sample(eps)\n z = torch.cat((z1, z2), 1)\n log_prob_prior = prior.log_prob(z2)\n return z, log_prob_prior"
] |
[
[
"torch.zeros",
"torch.cat",
"numpy.log",
"torch.randn_like",
"torch.nn.Conv2d",
"torch.nn.modules.utils._quadruple"
]
] |
tartaruszen/oddstream
|
[
"c3f2a4d6cba9753052acf8be03e5df038d40b745"
] |
[
"build/lib/oddstream/kde_estimation.py"
] |
[
"from fastkde import fastKDE\nimport numpy as np\n\n\"\"\"\n Fast 2D Kernel Density Estimation with simple point evaluation\n\"\"\"\nclass KDEEstimation2D(object):\n def __init__(self, X):\n self.pdf, self.axes = fastKDE.pdf(X[:, 0], X[:, 1])\n\n def evaluate_points(self, X):\n m = X.shape[0]\n values = np.array(range(0, m), dtype=float)\n for i in range(0, m):\n values[i] = self.evaluate_pdf_value(X[i, :])\n return values\n\n def evaluate_pdf_value(self, s):\n x_up = s[0] <= self.axes[0]\n index_up_x = self.get_index_upper(x_up, 0)\n x_low = s[0] >= self.axes[0]\n index_low_x = self.get_index_lower(x_low)\n\n y_up = s[1] <= self.axes[1]\n index_up_y = self.get_index_upper(y_up, 1)\n y_low = s[1] >= self.axes[1]\n index_low_y = self.get_index_lower(y_low)\n\n # TODO\n value = 0.0\n for i in range(index_low_x, index_up_x + 1):\n for j in range(index_low_y, index_up_y + 1):\n value += self.pdf.T[i, j]\n value /= 4\n return value\n\n def get_index_upper(self, values, index):\n c = [i for i in range(0, len(values)) if values[i]]\n if len(c) == 0:\n up = self.pdf.shape[index] - 2\n else:\n up = np.min(c)\n return up\n\n def get_index_lower(self, values):\n c = [i for i in range(0, len(values)) if values[i]]\n if len(c) == 0:\n up = 0\n else:\n up = np.max(c)\n return up"
] |
[
[
"numpy.max",
"numpy.min"
]
] |
iamgroot42/cleverhans
|
[
"53da9cd6daf9d7457800831c3eaa75f729a39145",
"53da9cd6daf9d7457800831c3eaa75f729a39145"
] |
[
"cleverhans/train.py",
"scripts/plot_success_fail_curve.py"
] |
[
"\"\"\"\nMulti-replica synchronous training\n\n\nNOTE: This module is much more free to change than many other modules\nin CleverHans. CleverHans is very conservative about changes to any\ncode that affects the output of benchmark tests (attacks, evaluation\nmethods, etc.). This module provides *model training* functionality\nnot *benchmarks* and thus is free to change rapidly to provide better\nspeed, accuracy, etc.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\nimport os\nimport time\nimport warnings\n\nimport math\nimport numpy as np\nfrom six.moves import xrange\nimport tensorflow as tf\n\nfrom cleverhans import canary\nfrom cleverhans.utils import _ArgsWrapper, create_logger\nfrom cleverhans.utils import safe_zip\nfrom cleverhans.utils_tf import infer_devices\nfrom cleverhans.utils_tf import initialize_uninitialized_global_variables\n\n\n_logger = create_logger(\"train\")\n_logger.setLevel(logging.INFO)\n\n\ndef train(sess, loss, x_train, y_train,\n init_all=False, evaluate=None, feed=None, args=None,\n rng=None, var_list=None, fprop_args=None, optimizer=None,\n devices=None, x_batch_preprocessor=None, use_ema=False,\n ema_decay=.998, run_canary=None,\n loss_threshold=1e5, dataset_train=None, dataset_size=None):\n \"\"\"\n Run (optionally multi-replica, synchronous) training to minimize `loss`\n :param sess: TF session to use when training the graph\n :param loss: tensor, the loss to minimize\n :param x_train: numpy array with training inputs or tf Dataset\n :param y_train: numpy array with training outputs or tf Dataset\n :param init_all: (boolean) If set to true, all TF variables in the session\n are (re)initialized, otherwise only previously\n uninitialized variables are initialized before training.\n :param evaluate: function that is run after each training iteration\n (typically to display the test/validation accuracy).\n :param feed: An optional dictionary that is appended to the feeding\n dictionary before the session runs. Can be used to feed\n the learning phase of a Keras model for instance.\n :param args: dict or argparse `Namespace` object.\n Should contain `nb_epochs`, `learning_rate`,\n `batch_size`\n :param rng: Instance of numpy.random.RandomState\n :param var_list: Optional list of parameters to train.\n :param fprop_args: dict, extra arguments to pass to fprop (loss and model).\n :param optimizer: Optimizer to be used for training\n :param devices: list of device names to use for training\n If None, defaults to: all GPUs, if GPUs are available\n all devices, if no GPUs are available\n :param x_batch_preprocessor: callable\n Takes a single tensor containing an x_train batch as input\n Returns a single tensor containing an x_train batch as output\n Called to preprocess the data before passing the data to the Loss\n :param use_ema: bool\n If true, uses an exponential moving average of the model parameters\n :param ema_decay: float or callable\n The decay parameter for EMA, if EMA is used\n If a callable rather than a float, this is a callable that takes\n the epoch and batch as arguments and returns the ema_decay for\n the current batch.\n :param loss_threshold: float\n Raise an exception if the loss exceeds this value.\n This is intended to rapidly detect numerical problems.\n Sometimes the loss may legitimately be higher than this value. In\n such cases, raise the value. If needed it can be np.inf.\n :param dataset_train: tf Dataset instance.\n Used as a replacement for x_train, y_train for faster performance.\n :param dataset_size: integer, the size of the dataset_train.\n :return: True if model trained\n \"\"\"\n\n # Check whether the hardware is working correctly\n canary.run_canary()\n if run_canary is not None:\n warnings.warn(\"The `run_canary` argument is deprecated. The canary \"\n \"is now much cheaper and thus runs all the time. The \"\n \"canary now uses its own loss function so it is not \"\n \"necessary to turn off the canary when training with \"\n \" a stochastic loss. Simply quit passing `run_canary`.\"\n \"Passing `run_canary` may become an error on or after \"\n \"2019-10-16.\")\n\n args = _ArgsWrapper(args or {})\n fprop_args = fprop_args or {}\n\n # Check that necessary arguments were given (see doc above)\n # Be sure to support 0 epochs for debugging purposes\n if args.nb_epochs is None:\n raise ValueError(\"`args` must specify number of epochs\")\n if optimizer is None:\n if args.learning_rate is None:\n raise ValueError(\"Learning rate was not given in args dict\")\n assert args.batch_size, \"Batch size was not given in args dict\"\n\n if rng is None:\n rng = np.random.RandomState()\n\n if optimizer is None:\n optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate)\n else:\n if not isinstance(optimizer, tf.train.Optimizer):\n raise ValueError(\"optimizer object must be from a child class of \"\n \"tf.train.Optimizer\")\n\n grads = []\n xs = []\n preprocessed_xs = []\n ys = []\n if dataset_train is not None:\n assert x_train is None and y_train is None and x_batch_preprocessor is None\n if dataset_size is None:\n raise ValueError(\"You must provide a dataset size\")\n data_iterator = dataset_train.make_one_shot_iterator().get_next()\n x_train, y_train = sess.run(data_iterator)\n\n devices = infer_devices(devices)\n for device in devices:\n with tf.device(device):\n x = tf.placeholder(x_train.dtype, (None,) + x_train.shape[1:])\n y = tf.placeholder(y_train.dtype, (None,) + y_train.shape[1:])\n xs.append(x)\n ys.append(y)\n\n if x_batch_preprocessor is not None:\n x = x_batch_preprocessor(x)\n\n # We need to keep track of these so that the canary can feed\n # preprocessed values. If the canary had to feed raw values,\n # stochastic preprocessing could make the canary fail.\n preprocessed_xs.append(x)\n\n loss_value = loss.fprop(x, y, **fprop_args)\n\n grads.append(optimizer.compute_gradients(\n loss_value, var_list=var_list))\n num_devices = len(devices)\n print(\"num_devices: \", num_devices)\n\n grad = avg_grads(grads)\n # Trigger update operations within the default graph (such as batch_norm).\n with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n train_step = optimizer.apply_gradients(grad)\n\n epoch_tf = tf.placeholder(tf.int32, [])\n batch_tf = tf.placeholder(tf.int32, [])\n\n if use_ema:\n if callable(ema_decay):\n ema_decay = ema_decay(epoch_tf, batch_tf)\n ema = tf.train.ExponentialMovingAverage(decay=ema_decay)\n with tf.control_dependencies([train_step]):\n train_step = ema.apply(var_list)\n # Get pointers to the EMA's running average variables\n avg_params = [ema.average(param) for param in var_list]\n # Make temporary buffers used for swapping the live and running average\n # parameters\n tmp_params = [tf.Variable(param, trainable=False)\n for param in var_list]\n # Define the swapping operation\n param_to_tmp = [tf.assign(tmp, param)\n for tmp, param in safe_zip(tmp_params, var_list)]\n with tf.control_dependencies(param_to_tmp):\n avg_to_param = [tf.assign(param, avg)\n for param, avg in safe_zip(var_list, avg_params)]\n with tf.control_dependencies(avg_to_param):\n tmp_to_avg = [tf.assign(avg, tmp)\n for avg, tmp in safe_zip(avg_params, tmp_params)]\n swap = tmp_to_avg\n\n batch_size = args.batch_size\n\n assert batch_size % num_devices == 0\n device_batch_size = batch_size // num_devices\n\n if init_all:\n sess.run(tf.global_variables_initializer())\n else:\n initialize_uninitialized_global_variables(sess)\n\n for epoch in xrange(args.nb_epochs):\n if dataset_train is not None:\n nb_batches = int(math.ceil(float(dataset_size) / batch_size))\n else:\n # Indices to shuffle training set\n index_shuf = list(range(len(x_train)))\n # Randomly repeat a few training examples each epoch to avoid\n # having a too-small batch\n while len(index_shuf) % batch_size != 0:\n index_shuf.append(rng.randint(len(x_train)))\n nb_batches = len(index_shuf) // batch_size\n rng.shuffle(index_shuf)\n # Shuffling here versus inside the loop doesn't seem to affect\n # timing very much, but shuffling here makes the code slightly\n # easier to read\n x_train_shuffled = x_train[index_shuf]\n y_train_shuffled = y_train[index_shuf]\n\n prev = time.time()\n for batch in range(nb_batches):\n if dataset_train is not None:\n x_train_shuffled, y_train_shuffled = sess.run(data_iterator)\n start, end = 0, batch_size\n else:\n # Compute batch start and end indices\n start = batch * batch_size\n end = (batch + 1) * batch_size\n # Perform one training step\n diff = end - start\n assert diff == batch_size\n\n feed_dict = {epoch_tf: epoch, batch_tf: batch}\n for dev_idx in xrange(num_devices):\n cur_start = start + dev_idx * device_batch_size\n cur_end = start + (dev_idx + 1) * device_batch_size\n feed_dict[xs[dev_idx]] = x_train_shuffled[cur_start:cur_end]\n feed_dict[ys[dev_idx]] = y_train_shuffled[cur_start:cur_end]\n if cur_end != end and dataset_train is None:\n msg = (\"batch_size (%d) must be a multiple of num_devices \"\n \"(%d).\\nCUDA_VISIBLE_DEVICES: %s\"\n \"\\ndevices: %s\")\n args = (batch_size, num_devices,\n os.environ['CUDA_VISIBLE_DEVICES'],\n str(devices))\n raise ValueError(msg % args)\n if feed is not None:\n feed_dict.update(feed)\n\n _, loss_numpy = sess.run(\n [train_step, loss_value], feed_dict=feed_dict)\n\n if np.abs(loss_numpy) > loss_threshold:\n raise ValueError(\"Extreme loss during training: \", loss_numpy)\n if np.isnan(loss_numpy) or np.isinf(loss_numpy):\n raise ValueError(\"NaN/Inf loss during training\")\n assert (dataset_train is not None or\n end == len(index_shuf)) # Check that all examples were used\n cur = time.time()\n _logger.info(\"Epoch \" + str(epoch) + \" took \" +\n str(cur - prev) + \" seconds\")\n if evaluate is not None:\n if use_ema:\n # Before running evaluation, load the running average\n # parameters into the live slot, so we can see how well\n # the EMA parameters are performing\n sess.run(swap)\n evaluate()\n if use_ema:\n # Swap the parameters back, so that we continue training\n # on the live parameters\n sess.run(swap)\n if use_ema:\n # When training is done, swap the running average parameters into\n # the live slot, so that we use them when we deploy the model\n sess.run(swap)\n\n return True\n\n\ndef avg_grads(tower_grads):\n \"\"\"Calculate the average gradient for each shared variable across all\n towers.\n Note that this function provides a synchronization point across all towers.\n Args:\n tower_grads: List of lists of (gradient, variable) tuples. The outer list\n is over individual gradients. The inner list is over the gradient\n calculation for each tower.\n Returns:\n List of pairs of (gradient, variable) where the gradient has been\n averaged across all towers.\n\n Modified from this tutorial: https://tinyurl.com/n3jr2vm\n \"\"\"\n if len(tower_grads) == 1:\n return tower_grads[0]\n average_grads = []\n for grad_and_vars in zip(*tower_grads):\n # Note that each grad_and_vars looks like the following:\n # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))\n grads = [g for g, _ in grad_and_vars]\n\n # Average over the 'tower' dimension.\n grad = tf.add_n(grads) / len(grads)\n\n # Keep in mind that the Variables are redundant because they are shared\n # across towers. So .. we will just return the first tower's pointer to\n # the Variable.\n v = grad_and_vars[0][1]\n assert all(v is grad_and_var[1] for grad_and_var in grad_and_vars)\n grad_and_var = (grad, v)\n average_grads.append(grad_and_var)\n return average_grads\n",
"#!/usr/bin/env python3\n\"\"\"\nPlots a success-fail curve ( https://openreview.net/forum?id=H1g0piA9tQ )\nUsage:\nplot_success_fail_curve.py model.joblib\nplot_success_fail_curve.py model1.joblib model2.joblib\n\nThis script is mostly intended to rapidly visualize success-fail curves\nduring model development and testing.\nTo make nicely labeled plots formatted to fit the page / column of a\npublication, you should probably write your own script that calls some\nof the same plotting commands.\n\"\"\"\nfrom matplotlib import pyplot\nimport tensorflow as tf\nfrom cleverhans.utils_tf import silence\nsilence()\n# silence call must precede this imports. pylint doesn't like that\n# pylint: disable=C0413\nfrom cleverhans.compat import flags\nfrom cleverhans.plot.success_fail import DEFAULT_FAIL_NAMES\nfrom cleverhans.plot.success_fail import plot_report_from_path\nFLAGS = flags.FLAGS\n\ndef main(argv=None):\n \"\"\"Takes the path to a directory with reports and renders success fail plots.\"\"\"\n report_paths = argv[1:]\n\n fail_names = FLAGS.fail_names.split(',')\n\n for report_path in report_paths:\n plot_report_from_path(report_path, label=report_path, fail_names=fail_names)\n pyplot.legend()\n\n pyplot.xlim(-.01, 1.)\n pyplot.ylim(0., 1.)\n\n pyplot.show()\n\nif __name__ == '__main__':\n flags.DEFINE_string('fail_names', ','.join(DEFAULT_FAIL_NAMES), 'Names of adversarial datasets for failure rate')\n tf.app.run()\n"
] |
[
[
"tensorflow.control_dependencies",
"numpy.isinf",
"numpy.isnan",
"tensorflow.train.AdamOptimizer",
"tensorflow.assign",
"numpy.random.RandomState",
"tensorflow.Variable",
"tensorflow.add_n",
"tensorflow.placeholder",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.device",
"numpy.abs",
"tensorflow.global_variables_initializer",
"tensorflow.get_collection"
],
[
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"tensorflow.app.run"
]
] |
Twizwei/idinvert_pytorch
|
[
"11f1126aab517fbe32b488d92f6fdea339463d04"
] |
[
"utils/inverter.py"
] |
[
"# python 3.7\n\"\"\"Utility functions to invert a given image back to a latent code.\"\"\"\n\nfrom tqdm import tqdm\nimport cv2\nimport numpy as np\n\nimport torch\n\nfrom models.stylegan_generator import StyleGANGenerator\nfrom models.stylegan_encoder import StyleGANEncoder\nfrom models.perceptual_model import PerceptualModel\n\n__all__ = ['StyleGANInverter']\n\n\ndef _softplus(x):\n \"\"\"Implements the softplus function.\"\"\"\n return torch.nn.functional.softplus(x, beta=1, threshold=10000)\n\ndef _get_tensor_value(tensor):\n \"\"\"Gets the value of a torch Tensor.\"\"\"\n return tensor.cpu().detach().numpy()\n\n\nclass StyleGANInverter(object):\n \"\"\"Defines the class for StyleGAN inversion.\n\n Even having the encoder, the output latent code is not good enough to recover\n the target image satisfyingly. To this end, this class optimize the latent\n code based on gradient descent algorithm. In the optimization process,\n following loss functions will be considered:\n\n (1) Pixel-wise reconstruction loss. (required)\n (2) Perceptual loss. (optional, but recommended)\n (3) Regularization loss from encoder. (optional, but recommended for in-domain\n inversion)\n\n NOTE: The encoder can be missing for inversion, in which case the latent code\n will be randomly initialized and the regularization loss will be ignored.\n \"\"\"\n\n def __init__(self,\n model_name,\n learning_rate=1e-2,\n iteration=100,\n reconstruction_loss_weight=1.0,\n perceptual_loss_weight=5e-5,\n regularization_loss_weight=2.0,\n logger=None):\n \"\"\"Initializes the inverter.\n\n NOTE: Only Adam optimizer is supported in the optimization process.\n\n Args:\n model_name: Name of the model on which the inverted is based. The model\n should be first registered in `models/model_settings.py`.\n logger: Logger to record the log message.\n learning_rate: Learning rate for optimization. (default: 1e-2)\n iteration: Number of iterations for optimization. (default: 100)\n reconstruction_loss_weight: Weight for reconstruction loss. Should always\n be a positive number. (default: 1.0)\n perceptual_loss_weight: Weight for perceptual loss. 0 disables perceptual\n loss. (default: 5e-5)\n regularization_loss_weight: Weight for regularization loss from encoder.\n This is essential for in-domain inversion. However, this loss will\n automatically ignored if the generative model does not include a valid\n encoder. 0 disables regularization loss. (default: 2.0)\n \"\"\"\n self.logger = logger\n self.model_name = model_name\n self.gan_type = 'stylegan'\n\n self.G = StyleGANGenerator(self.model_name, self.logger)\n self.E = StyleGANEncoder(self.model_name, self.logger)\n self.F = PerceptualModel(min_val=self.G.min_val, max_val=self.G.max_val)\n self.encode_dim = [self.G.num_layers, self.G.w_space_dim]\n self.run_device = self.G.run_device\n assert list(self.encode_dim) == list(self.E.encode_dim)\n\n assert self.G.gan_type == self.gan_type\n assert self.E.gan_type == self.gan_type\n\n self.learning_rate = learning_rate\n self.iteration = iteration\n self.loss_pix_weight = reconstruction_loss_weight\n self.loss_feat_weight = perceptual_loss_weight\n self.loss_reg_weight = regularization_loss_weight\n assert self.loss_pix_weight > 0\n\n\n def preprocess(self, image):\n \"\"\"Preprocesses a single image.\n\n This function assumes the input numpy array is with shape [height, width,\n channel], channel order `RGB`, and pixel range [0, 255].\n\n The returned image is with shape [channel, new_height, new_width], where\n `new_height` and `new_width` are specified by the given generative model.\n The channel order of returned image is also specified by the generative\n model. The pixel range is shifted to [min_val, max_val], where `min_val` and\n `max_val` are also specified by the generative model.\n \"\"\"\n if not isinstance(image, np.ndarray):\n raise ValueError(f'Input image should be with type `numpy.ndarray`!')\n if image.dtype != np.uint8:\n raise ValueError(f'Input image should be with dtype `numpy.uint8`!')\n\n if image.ndim != 3 or image.shape[2] not in [1, 3]:\n raise ValueError(f'Input should be with shape [height, width, channel], '\n f'where channel equals to 1 or 3!\\n'\n f'But {image.shape} is received!')\n if image.shape[2] == 1 and self.G.image_channels == 3:\n image = np.tile(image, (1, 1, 3))\n if image.shape[2] != self.G.image_channels:\n raise ValueError(f'Number of channels of input image, which is '\n f'{image.shape[2]}, is not supported by the current '\n f'inverter, which requires {self.G.image_channels} '\n f'channels!')\n\n if self.G.image_channels == 3 and self.G.channel_order == 'BGR':\n image = image[:, :, ::-1]\n if image.shape[1:3] != [self.G.resolution, self.G.resolution]:\n image = cv2.resize(image, (self.G.resolution, self.G.resolution))\n image = image.astype(np.float32)\n image = image / 255.0 * (self.G.max_val - self.G.min_val) + self.G.min_val\n image = image.astype(np.float32).transpose(2, 0, 1)\n\n return image\n\n def get_init_code(self, image):\n \"\"\"Gets initial latent codes as the start point for optimization.\n\n The input image is assumed to have already been preprocessed, meaning to\n have shape [self.G.image_channels, self.G.resolution, self.G.resolution],\n channel order `self.G.channel_order`, and pixel range [self.G.min_val,\n self.G.max_val].\n \"\"\"\n x = image[np.newaxis]\n x = self.G.to_tensor(x.astype(np.float32))\n z = _get_tensor_value(self.E.net(x).view(1, *self.encode_dim))\n return z.astype(np.float32)\n\n def invert(self, image, num_viz=0):\n \"\"\"Inverts the given image to a latent code.\n\n Basically, this function is based on gradient descent algorithm.\n\n Args:\n image: Target image to invert, which is assumed to have already been\n preprocessed.\n num_viz: Number of intermediate outputs to visualize. (default: 0)\n\n Returns:\n A two-element tuple. First one is the inverted code. Second one is a list\n of intermediate results, where first image is the input image, second\n one is the reconstructed result from the initial latent code, remainings\n are from the optimization process every `self.iteration // num_viz`\n steps.\n \"\"\"\n x = image[np.newaxis]\n x = self.G.to_tensor(x.astype(np.float32))\n x.requires_grad = False\n init_z = self.get_init_code(image)\n z = torch.Tensor(init_z).to(self.run_device)\n z.requires_grad = True\n\n optimizer = torch.optim.Adam([z], lr=self.learning_rate)\n\n viz_results = []\n viz_results.append(self.G.postprocess(_get_tensor_value(x))[0])\n x_init_inv = self.G.net.synthesis(z)\n viz_results.append(self.G.postprocess(_get_tensor_value(x_init_inv))[0])\n pbar = tqdm(range(1, self.iteration + 1), leave=True)\n for step in pbar:\n loss = 0.0\n\n # Reconstruction loss.\n x_rec = self.G.net.synthesis(z)\n loss_pix = torch.mean((x - x_rec) ** 2)\n loss = loss + loss_pix * self.loss_pix_weight\n log_message = f'loss_pix: {_get_tensor_value(loss_pix):.3f}'\n\n # Perceptual loss.\n if self.loss_feat_weight:\n x_feat = self.F.net(x)\n x_rec_feat = self.F.net(x_rec)\n loss_feat = torch.mean((x_feat - x_rec_feat) ** 2)\n loss = loss + loss_feat * self.loss_feat_weight\n log_message += f', loss_feat: {_get_tensor_value(loss_feat):.3f}'\n\n # Regularization loss.\n if self.loss_reg_weight:\n z_rec = self.E.net(x_rec).view(1, *self.encode_dim)\n loss_reg = torch.mean((z - z_rec) ** 2)\n loss = loss + loss_reg * self.loss_reg_weight\n log_message += f', loss_reg: {_get_tensor_value(loss_reg):.3f}'\n\n log_message += f', loss: {_get_tensor_value(loss):.3f}'\n pbar.set_description_str(log_message)\n if self.logger:\n self.logger.debug(f'Step: {step:05d}, '\n f'lr: {self.learning_rate:.2e}, '\n f'{log_message}')\n\n # Do optimization.\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if num_viz > 0 and step % (self.iteration // num_viz) == 0:\n viz_results.append(self.G.postprocess(_get_tensor_value(x_rec))[0])\n\n return _get_tensor_value(z), viz_results\n\n def easy_invert(self, image, num_viz=0):\n \"\"\"Wraps functions `preprocess()` and `invert()` together.\"\"\"\n return self.invert(self.preprocess(image), num_viz)\n\n def diffuse(self,\n target,\n context,\n center_x,\n center_y,\n crop_x,\n crop_y,\n num_viz=0):\n \"\"\"Diffuses the target image to a context image.\n\n Basically, this function is a motified version of `self.invert()`. More\n concretely, the encoder regularizer is removed from the objectives and the\n reconstruction loss is computed from the masked region.\n\n Args:\n target: Target image (foreground).\n context: Context image (background).\n center_x: The x-coordinate of the crop center.\n center_y: The y-coordinate of the crop center.\n crop_x: The crop size along the x-axis.\n crop_y: The crop size along the y-axis.\n num_viz: Number of intermediate outputs to visualize. (default: 0)\n\n Returns:\n A two-element tuple. First one is the inverted code. Second one is a list\n of intermediate results, where first image is the direct copy-paste\n image, second one is the reconstructed result from the initial latent\n code, remainings are from the optimization process every\n `self.iteration // num_viz` steps.\n \"\"\"\n image_shape = (self.G.image_channels, self.G.resolution, self.G.resolution)\n mask = np.zeros((1, *image_shape), dtype=np.float32)\n xx = center_x - crop_x // 2\n yy = center_y - crop_y // 2\n mask[:, :, yy:yy + crop_y, xx:xx + crop_x] = 1.0\n\n target = target[np.newaxis]\n context = context[np.newaxis]\n x = target * mask + context * (1 - mask)\n x = self.G.to_tensor(x.astype(np.float32))\n x.requires_grad = False\n mask = self.G.to_tensor(mask.astype(np.float32))\n mask.requires_grad = False\n\n init_z = _get_tensor_value(self.E.net(x).view(1, *self.encode_dim))\n init_z = init_z.astype(np.float32)\n z = torch.Tensor(init_z).to(self.run_device)\n z.requires_grad = True\n\n optimizer = torch.optim.Adam([z], lr=self.learning_rate)\n\n viz_results = []\n viz_results.append(self.G.postprocess(_get_tensor_value(x))[0])\n x_init_inv = self.G.net.synthesis(z)\n viz_results.append(self.G.postprocess(_get_tensor_value(x_init_inv))[0])\n pbar = tqdm(range(1, self.iteration + 1), leave=True)\n for step in pbar:\n loss = 0.0\n\n # Reconstruction loss.\n x_rec = self.G.net.synthesis(z)\n loss_pix = torch.mean(((x - x_rec) * mask) ** 2)\n loss = loss + loss_pix * self.loss_pix_weight\n log_message = f'loss_pix: {_get_tensor_value(loss_pix):.3f}'\n\n # Perceptual loss.\n if self.loss_feat_weight:\n x_feat = self.F.net(x * mask)\n x_rec_feat = self.F.net(x_rec * mask)\n loss_feat = torch.mean((x_feat - x_rec_feat) ** 2)\n loss = loss + loss_feat * self.loss_feat_weight\n log_message += f', loss_feat: {_get_tensor_value(loss_feat):.3f}'\n\n log_message += f', loss: {_get_tensor_value(loss):.3f}'\n pbar.set_description_str(log_message)\n if self.logger:\n self.logger.debug(f'Step: {step:05d}, '\n f'lr: {self.learning_rate:.2e}, '\n f'{log_message}')\n\n # Do optimization.\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if num_viz > 0 and step % (self.iteration // num_viz) == 0:\n viz_results.append(self.G.postprocess(_get_tensor_value(x_rec))[0])\n\n return _get_tensor_value(z), viz_results\n\n def easy_diffuse(self, target, context, *args, **kwargs):\n \"\"\"Wraps functions `preprocess()` and `diffuse()` together.\"\"\"\n return self.diffuse(self.preprocess(target),\n self.preprocess(context),\n *args, **kwargs)\n"
] |
[
[
"torch.nn.functional.softplus",
"numpy.zeros",
"torch.optim.Adam",
"numpy.tile",
"torch.Tensor",
"torch.mean"
]
] |
acceleratedmaterials/NUS_workshop
|
[
"8937111a4f4d252ed76e33897fd4be7d9582a491"
] |
[
"gold nanocluster synthesis/own_package/others.py"
] |
[
"import numpy as np\nimport pandas as pd\nfrom openpyxl import load_workbook\nimport sys\n\ndef print_array_to_excel(array, first_cell, ws, axis=2):\n '''\n Print an np array to excel using openpyxl\n :param array: np array\n :param first_cell: first cell to start dumping values in\n :param ws: worksheet reference. From openpyxl, ws=wb[sheetname]\n :param axis: to determine if the array is a col vector (0), row vector (1), or 2d matrix (2)\n '''\n if isinstance(array, (list,)):\n array = np.array(array)\n shape = array.shape\n if axis == 0:\n # Treat array as col vector and print along the rows\n array.flatten() # Flatten in case the input array is a nx1 ndarry which acts weird\n for i in range(shape[0]):\n j = 0\n ws.cell(i + first_cell[0], j + first_cell[1]).value = array[i]\n elif axis == 1:\n # Treat array as row vector and print along the columns\n array.flatten() # Flatten in case the input array is a 1xn ndarry which acts weird\n for j in range(shape[0]):\n i = 0\n ws.cell(i + first_cell[0], j + first_cell[1]).value = array[j]\n elif axis == 2:\n # If axis==2, means it is a 2d array\n for i in range(shape[0]):\n for j in range(shape[1]):\n ws.cell(i + first_cell[0], j + first_cell[1]).value = array[i, j]\n\n\nif __name__ == '__main__':\n print('hi')"
] |
[
[
"numpy.array"
]
] |
Juspem1980/privacy
|
[
"d122e2d1c7182ba7195ecbcb1cb8da29b2a14d6f"
] |
[
"tensorflow_privacy/privacy/dp_query/gaussian_query.py"
] |
[
"# Copyright 2018, The TensorFlow Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Implements DPQuery interface for Gaussian average queries.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nfrom distutils.version import LooseVersion\nimport tensorflow.compat.v1 as tf\n\nfrom tensorflow_privacy.privacy.dp_query import dp_query\nfrom tensorflow_privacy.privacy.dp_query import normalized_query\n\n\nclass GaussianSumQuery(dp_query.SumAggregationDPQuery):\n \"\"\"Implements DPQuery interface for Gaussian sum queries.\n\n Accumulates clipped vectors, then adds Gaussian noise to the sum.\n \"\"\"\n\n # pylint: disable=invalid-name\n _GlobalState = collections.namedtuple(\n '_GlobalState', ['l2_norm_clip', 'stddev'])\n\n def __init__(self, l2_norm_clip, stddev):\n \"\"\"Initializes the GaussianSumQuery.\n\n Args:\n l2_norm_clip: The clipping norm to apply to the global norm of each\n record.\n stddev: The stddev of the noise added to the sum.\n \"\"\"\n self._l2_norm_clip = l2_norm_clip\n self._stddev = stddev\n self._ledger = None\n\n def set_ledger(self, ledger):\n self._ledger = ledger\n\n def make_global_state(self, l2_norm_clip, stddev):\n \"\"\"Creates a global state from the given parameters.\"\"\"\n return self._GlobalState(tf.cast(l2_norm_clip, tf.float32),\n tf.cast(stddev, tf.float32))\n\n def initial_global_state(self):\n return self.make_global_state(self._l2_norm_clip, self._stddev)\n\n def derive_sample_params(self, global_state):\n return global_state.l2_norm_clip\n\n def initial_sample_state(self, template):\n return tf.nest.map_structure(\n dp_query.zeros_like, template)\n\n def preprocess_record_impl(self, params, record):\n \"\"\"Clips the l2 norm, returning the clipped record and the l2 norm.\n\n Args:\n params: The parameters for the sample.\n record: The record to be processed.\n\n Returns:\n A tuple (preprocessed_records, l2_norm) where `preprocessed_records` is\n the structure of preprocessed tensors, and l2_norm is the total l2 norm\n before clipping.\n \"\"\"\n l2_norm_clip = params\n record_as_list = tf.nest.flatten(record)\n clipped_as_list, norm = tf.clip_by_global_norm(record_as_list, l2_norm_clip)\n return tf.nest.pack_sequence_as(record, clipped_as_list), norm\n\n def preprocess_record(self, params, record):\n preprocessed_record, _ = self.preprocess_record_impl(params, record)\n return preprocessed_record\n\n def get_noised_result(self, sample_state, global_state):\n \"\"\"See base class.\"\"\"\n if LooseVersion(tf.__version__) < LooseVersion('2.0.0'):\n def add_noise(v):\n return v + tf.random.normal(\n tf.shape(input=v), stddev=global_state.stddev)\n else:\n random_normal = tf.random_normal_initializer(\n stddev=global_state.stddev)\n\n def add_noise(v):\n return v + random_normal(tf.shape(input=v))\n\n if self._ledger:\n dependencies = [\n self._ledger.record_sum_query(\n global_state.l2_norm_clip, global_state.stddev)\n ]\n else:\n dependencies = []\n with tf.control_dependencies(dependencies):\n return tf.nest.map_structure(add_noise, sample_state), global_state\n\n\nclass GaussianAverageQuery(normalized_query.NormalizedQuery):\n \"\"\"Implements DPQuery interface for Gaussian average queries.\n\n Accumulates clipped vectors, adds Gaussian noise, and normalizes.\n\n Note that we use \"fixed-denominator\" estimation: the denominator should be\n specified as the expected number of records per sample. Accumulating the\n denominator separately would also be possible but would be produce a higher\n variance estimator.\n \"\"\"\n\n def __init__(self,\n l2_norm_clip,\n sum_stddev,\n denominator):\n \"\"\"Initializes the GaussianAverageQuery.\n\n Args:\n l2_norm_clip: The clipping norm to apply to the global norm of each\n record.\n sum_stddev: The stddev of the noise added to the sum (before\n normalization).\n denominator: The normalization constant (applied after noise is added to\n the sum).\n \"\"\"\n super(GaussianAverageQuery, self).__init__(\n numerator_query=GaussianSumQuery(l2_norm_clip, sum_stddev),\n denominator=denominator)\n"
] |
[
[
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.clip_by_global_norm",
"tensorflow.compat.v1.nest.map_structure",
"tensorflow.compat.v1.random_normal_initializer",
"tensorflow.compat.v1.shape",
"tensorflow.compat.v1.control_dependencies",
"tensorflow.compat.v1.nest.flatten",
"tensorflow.compat.v1.nest.pack_sequence_as"
]
] |
jaimefrio/pandas
|
[
"d6a77007b247f3c218ecc38de8130e7d42e1d0e9"
] |
[
"pandas/tests/test_frame.py"
] |
[
"# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\n# pylint: disable-msg=W0612,E1101\nfrom copy import deepcopy\nfrom datetime import datetime, timedelta, time, date\nimport sys\nimport operator\nimport re\nimport csv\nimport nose\nimport functools\nimport itertools\nfrom itertools import product, permutations\nfrom distutils.version import LooseVersion\n\nfrom pandas.compat import(\n map, zip, range, long, lrange, lmap, lzip,\n OrderedDict, u, StringIO, is_platform_windows\n)\nfrom pandas import compat\n\nfrom numpy import random, nan, inf\nfrom numpy.random import randn\nimport numpy as np\nimport numpy.ma as ma\nimport numpy.ma.mrecords as mrecords\n\nimport pandas.core.nanops as nanops\nimport pandas.core.common as com\nimport pandas.core.format as fmt\nimport pandas.core.datetools as datetools\nfrom pandas import (DataFrame, Index, Series, Panel, notnull, isnull,\n MultiIndex, DatetimeIndex, Timestamp, date_range,\n read_csv, timedelta_range, Timedelta, option_context, period_range)\nfrom pandas.core.dtypes import DatetimeTZDtype\nimport pandas as pd\nfrom pandas.parser import CParserError\nfrom pandas.util.misc import is_little_endian\n\nfrom pandas.util.testing import (assert_almost_equal,\n assert_numpy_array_equal,\n assert_series_equal,\n assert_frame_equal,\n assertRaisesRegexp,\n assertRaises,\n makeCustomDataframe as mkdf,\n ensure_clean,\n SubclassedDataFrame)\nfrom pandas.core.indexing import IndexingError\nfrom pandas.core.common import PandasError\n\nimport pandas.util.testing as tm\nimport pandas.lib as lib\n\nfrom numpy.testing.decorators import slow\n\n#---------------------------------------------------------------------\n# DataFrame test cases\n\nJOIN_TYPES = ['inner', 'outer', 'left', 'right']\nMIXED_FLOAT_DTYPES = ['float16','float32','float64']\nMIXED_INT_DTYPES = ['uint8','uint16','uint32','uint64','int8','int16',\n 'int32','int64']\n\ndef _check_mixed_float(df, dtype = None):\n\n # float16 are most likely to be upcasted to float32\n dtypes = dict(A = 'float32', B = 'float32', C = 'float16', D = 'float64')\n if isinstance(dtype, compat.string_types):\n dtypes = dict([ (k,dtype) for k, v in dtypes.items() ])\n elif isinstance(dtype, dict):\n dtypes.update(dtype)\n if dtypes.get('A'):\n assert(df.dtypes['A'] == dtypes['A'])\n if dtypes.get('B'):\n assert(df.dtypes['B'] == dtypes['B'])\n if dtypes.get('C'):\n assert(df.dtypes['C'] == dtypes['C'])\n if dtypes.get('D'):\n assert(df.dtypes['D'] == dtypes['D'])\n\n\ndef _check_mixed_int(df, dtype = None):\n dtypes = dict(A = 'int32', B = 'uint64', C = 'uint8', D = 'int64')\n if isinstance(dtype, compat.string_types):\n dtypes = dict([ (k,dtype) for k, v in dtypes.items() ])\n elif isinstance(dtype, dict):\n dtypes.update(dtype)\n if dtypes.get('A'):\n assert(df.dtypes['A'] == dtypes['A'])\n if dtypes.get('B'):\n assert(df.dtypes['B'] == dtypes['B'])\n if dtypes.get('C'):\n assert(df.dtypes['C'] == dtypes['C'])\n if dtypes.get('D'):\n assert(df.dtypes['D'] == dtypes['D'])\n\n\nclass CheckIndexing(object):\n\n _multiprocess_can_split_ = True\n\n def test_getitem(self):\n # slicing\n sl = self.frame[:20]\n self.assertEqual(20, len(sl.index))\n\n # column access\n\n for _, series in compat.iteritems(sl):\n self.assertEqual(20, len(series.index))\n self.assertTrue(tm.equalContents(series.index, sl.index))\n\n for key, _ in compat.iteritems(self.frame._series):\n self.assertIsNotNone(self.frame[key])\n\n self.assertNotIn('random', self.frame)\n with assertRaisesRegexp(KeyError, 'random'):\n self.frame['random']\n\n df = self.frame.copy()\n df['$10'] = randn(len(df))\n ad = randn(len(df))\n df['@awesome_domain'] = ad\n self.assertRaises(KeyError, df.__getitem__, 'df[\"$10\"]')\n res = df['@awesome_domain']\n assert_numpy_array_equal(ad, res.values)\n\n def test_getitem_dupe_cols(self):\n df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=['a', 'a', 'b'])\n try:\n df[['baf']]\n except KeyError:\n pass\n else:\n self.fail(\"Dataframe failed to raise KeyError\")\n\n def test_get(self):\n b = self.frame.get('B')\n assert_series_equal(b, self.frame['B'])\n\n self.assertIsNone(self.frame.get('foo'))\n assert_series_equal(self.frame.get('foo', self.frame['B']),\n self.frame['B'])\n # None\n # GH 5652\n for df in [DataFrame(), DataFrame(columns=list('AB')), DataFrame(columns=list('AB'),index=range(3)) ]:\n result = df.get(None)\n self.assertIsNone(result)\n\n def test_getitem_iterator(self):\n idx = iter(['A', 'B', 'C'])\n result = self.frame.ix[:, idx]\n expected = self.frame.ix[:, ['A', 'B', 'C']]\n assert_frame_equal(result, expected)\n\n def test_getitem_list(self):\n self.frame.columns.name = 'foo'\n\n result = self.frame[['B', 'A']]\n result2 = self.frame[Index(['B', 'A'])]\n\n expected = self.frame.ix[:, ['B', 'A']]\n expected.columns.name = 'foo'\n\n assert_frame_equal(result, expected)\n assert_frame_equal(result2, expected)\n\n self.assertEqual(result.columns.name, 'foo')\n\n with assertRaisesRegexp(KeyError, 'not in index'):\n self.frame[['B', 'A', 'food']]\n with assertRaisesRegexp(KeyError, 'not in index'):\n self.frame[Index(['B', 'A', 'foo'])]\n\n # tuples\n df = DataFrame(randn(8, 3),\n columns=Index([('foo', 'bar'), ('baz', 'qux'),\n ('peek', 'aboo')], name=['sth', 'sth2']))\n\n result = df[[('foo', 'bar'), ('baz', 'qux')]]\n expected = df.ix[:, :2]\n assert_frame_equal(result, expected)\n self.assertEqual(result.columns.names, ['sth', 'sth2'])\n\n def test_setitem_list(self):\n\n self.frame['E'] = 'foo'\n data = self.frame[['A', 'B']]\n self.frame[['B', 'A']] = data\n\n assert_series_equal(self.frame['B'], data['A'], check_names=False)\n assert_series_equal(self.frame['A'], data['B'], check_names=False)\n\n with assertRaisesRegexp(ValueError, 'Columns must be same length as key'):\n data[['A']] = self.frame[['A', 'B']]\n with assertRaisesRegexp(ValueError, 'Length of values does not match '\n 'length of index'):\n data['A'] = range(len(data.index) - 1)\n\n df = DataFrame(0, lrange(3), ['tt1', 'tt2'], dtype=np.int_)\n df.ix[1, ['tt1', 'tt2']] = [1, 2]\n\n result = df.ix[1, ['tt1', 'tt2']]\n expected = Series([1, 2], df.columns, dtype=np.int_, name=1)\n assert_series_equal(result, expected)\n\n df['tt1'] = df['tt2'] = '0'\n df.ix[1, ['tt1', 'tt2']] = ['1', '2']\n result = df.ix[1, ['tt1', 'tt2']]\n expected = Series(['1', '2'], df.columns, name=1)\n assert_series_equal(result, expected)\n\n def test_setitem_list_not_dataframe(self):\n data = np.random.randn(len(self.frame), 2)\n self.frame[['A', 'B']] = data\n assert_almost_equal(self.frame[['A', 'B']].values, data)\n\n def test_setitem_list_of_tuples(self):\n tuples = lzip(self.frame['A'], self.frame['B'])\n self.frame['tuples'] = tuples\n\n result = self.frame['tuples']\n expected = Series(tuples, index=self.frame.index, name='tuples')\n assert_series_equal(result, expected)\n\n def test_setitem_mulit_index(self):\n # GH7655, test that assigning to a sub-frame of a frame\n # with multi-index columns aligns both rows and columns\n it = ['jim', 'joe', 'jolie'], ['first', 'last'], \\\n ['left', 'center', 'right']\n\n cols = MultiIndex.from_product(it)\n index = pd.date_range('20141006',periods=20)\n vals = np.random.randint(1, 1000, (len(index), len(cols)))\n df = pd.DataFrame(vals, columns=cols, index=index)\n\n i, j = df.index.values.copy(), it[-1][:]\n\n np.random.shuffle(i)\n df['jim'] = df['jolie'].loc[i, ::-1]\n assert_frame_equal(df['jim'], df['jolie'])\n\n np.random.shuffle(j)\n df[('joe', 'first')] = df[('jolie', 'last')].loc[i, j]\n assert_frame_equal(df[('joe', 'first')], df[('jolie', 'last')])\n\n np.random.shuffle(j)\n df[('joe', 'last')] = df[('jolie', 'first')].loc[i, j]\n assert_frame_equal(df[('joe', 'last')], df[('jolie', 'first')])\n\n def test_inplace_ops_alignment(self):\n\n # inplace ops / ops alignment\n # GH 8511\n\n columns = list('abcdefg')\n X_orig = DataFrame(np.arange(10*len(columns)).reshape(-1,len(columns)), columns=columns, index=range(10))\n Z = 100*X_orig.iloc[:,1:-1].copy()\n block1 = list('bedcf')\n subs = list('bcdef')\n\n # add\n X = X_orig.copy()\n result1 = (X[block1] + Z).reindex(columns=subs)\n\n X[block1] += Z\n result2 = X.reindex(columns=subs)\n\n X = X_orig.copy()\n result3 = (X[block1] + Z[block1]).reindex(columns=subs)\n\n X[block1] += Z[block1]\n result4 = X.reindex(columns=subs)\n\n assert_frame_equal(result1, result2)\n assert_frame_equal(result1, result3)\n assert_frame_equal(result1, result4)\n\n # sub\n X = X_orig.copy()\n result1 = (X[block1] - Z).reindex(columns=subs)\n\n X[block1] -= Z\n result2 = X.reindex(columns=subs)\n\n X = X_orig.copy()\n result3 = (X[block1] - Z[block1]).reindex(columns=subs)\n\n X[block1] -= Z[block1]\n result4 = X.reindex(columns=subs)\n\n assert_frame_equal(result1, result2)\n assert_frame_equal(result1, result3)\n assert_frame_equal(result1, result4)\n\n def test_inplace_ops_identity(self):\n\n # GH 5104\n # make sure that we are actually changing the object\n s_orig = Series([1, 2, 3])\n df_orig = DataFrame(np.random.randint(0,5,size=10).reshape(-1,5))\n\n # no dtype change\n s = s_orig.copy()\n s2 = s\n s += 1\n assert_series_equal(s,s2)\n assert_series_equal(s_orig+1,s)\n self.assertIs(s,s2)\n self.assertIs(s._data,s2._data)\n\n df = df_orig.copy()\n df2 = df\n df += 1\n assert_frame_equal(df,df2)\n assert_frame_equal(df_orig+1,df)\n self.assertIs(df,df2)\n self.assertIs(df._data,df2._data)\n\n # dtype change\n s = s_orig.copy()\n s2 = s\n s += 1.5\n assert_series_equal(s,s2)\n assert_series_equal(s_orig+1.5,s)\n\n df = df_orig.copy()\n df2 = df\n df += 1.5\n assert_frame_equal(df,df2)\n assert_frame_equal(df_orig+1.5,df)\n self.assertIs(df,df2)\n self.assertIs(df._data,df2._data)\n\n # mixed dtype\n arr = np.random.randint(0,10,size=5)\n df_orig = DataFrame({'A' : arr.copy(), 'B' : 'foo'})\n df = df_orig.copy()\n df2 = df\n df['A'] += 1\n expected = DataFrame({'A' : arr.copy()+1, 'B' : 'foo'})\n assert_frame_equal(df,expected)\n assert_frame_equal(df2,expected)\n self.assertIs(df._data,df2._data)\n\n df = df_orig.copy()\n df2 = df\n df['A'] += 1.5\n expected = DataFrame({'A' : arr.copy()+1.5, 'B' : 'foo'})\n assert_frame_equal(df,expected)\n assert_frame_equal(df2,expected)\n self.assertIs(df._data,df2._data)\n\n def test_getitem_boolean(self):\n # boolean indexing\n d = self.tsframe.index[10]\n indexer = self.tsframe.index > d\n indexer_obj = indexer.astype(object)\n\n subindex = self.tsframe.index[indexer]\n subframe = self.tsframe[indexer]\n\n self.assert_numpy_array_equal(subindex, subframe.index)\n with assertRaisesRegexp(ValueError, 'Item wrong length'):\n self.tsframe[indexer[:-1]]\n\n subframe_obj = self.tsframe[indexer_obj]\n assert_frame_equal(subframe_obj, subframe)\n\n with tm.assertRaisesRegexp(ValueError, 'boolean values only'):\n self.tsframe[self.tsframe]\n\n # test that Series work\n indexer_obj = Series(indexer_obj, self.tsframe.index)\n\n subframe_obj = self.tsframe[indexer_obj]\n assert_frame_equal(subframe_obj, subframe)\n\n # test that Series indexers reindex\n with tm.assert_produces_warning(UserWarning):\n indexer_obj = indexer_obj.reindex(self.tsframe.index[::-1])\n\n subframe_obj = self.tsframe[indexer_obj]\n assert_frame_equal(subframe_obj, subframe)\n\n # test df[df > 0]\n for df in [ self.tsframe, self.mixed_frame, self.mixed_float, self.mixed_int ]:\n\n data = df._get_numeric_data()\n bif = df[df > 0]\n bifw = DataFrame(dict([ (c,np.where(data[c] > 0, data[c], np.nan)) for c in data.columns ]),\n index=data.index, columns=data.columns)\n\n # add back other columns to compare\n for c in df.columns:\n if c not in bifw:\n bifw[c] = df[c]\n bifw = bifw.reindex(columns = df.columns)\n\n assert_frame_equal(bif, bifw, check_dtype=False)\n for c in df.columns:\n if bif[c].dtype != bifw[c].dtype:\n self.assertEqual(bif[c].dtype, df[c].dtype)\n\n def test_getitem_boolean_casting(self):\n\n # don't upcast if we don't need to\n df = self.tsframe.copy()\n df['E'] = 1\n df['E'] = df['E'].astype('int32')\n df['E1'] = df['E'].copy()\n df['F'] = 1\n df['F'] = df['F'].astype('int64')\n df['F1'] = df['F'].copy()\n\n casted = df[df>0]\n result = casted.get_dtype_counts()\n expected = Series({'float64': 4, 'int32' : 2, 'int64' : 2})\n assert_series_equal(result, expected)\n\n # int block splitting\n df.ix[1:3,['E1','F1']] = 0\n casted = df[df>0]\n result = casted.get_dtype_counts()\n expected = Series({'float64': 6, 'int32' : 1, 'int64' : 1})\n assert_series_equal(result, expected)\n\n # where dtype conversions\n # GH 3733\n df = DataFrame(data = np.random.randn(100, 50))\n df = df.where(df > 0) # create nans\n bools = df > 0\n mask = isnull(df)\n expected = bools.astype(float).mask(mask)\n result = bools.mask(mask)\n assert_frame_equal(result,expected)\n\n def test_getitem_boolean_list(self):\n df = DataFrame(np.arange(12).reshape(3, 4))\n\n def _checkit(lst):\n result = df[lst]\n expected = df.ix[df.index[lst]]\n assert_frame_equal(result, expected)\n\n _checkit([True, False, True])\n _checkit([True, True, True])\n _checkit([False, False, False])\n\n def test_getitem_boolean_iadd(self):\n arr = randn(5, 5)\n\n df = DataFrame(arr.copy(), columns = ['A','B','C','D','E'])\n\n df[df < 0] += 1\n arr[arr < 0] += 1\n\n assert_almost_equal(df.values, arr)\n\n def test_boolean_index_empty_corner(self):\n # #2096\n blah = DataFrame(np.empty([0, 1]), columns=['A'],\n index=DatetimeIndex([]))\n\n # both of these should succeed trivially\n k = np.array([], bool)\n\n blah[k]\n blah[k] = 0\n\n def test_getitem_ix_mixed_integer(self):\n df = DataFrame(np.random.randn(4, 3),\n index=[1, 10, 'C', 'E'], columns=[1, 2, 3])\n\n result = df.ix[:-1]\n expected = df.ix[df.index[:-1]]\n assert_frame_equal(result, expected)\n\n result = df.ix[[1, 10]]\n expected = df.ix[Index([1, 10], dtype=object)]\n assert_frame_equal(result, expected)\n\n # 11320\n df = pd.DataFrame({ \"rna\": (1.5,2.2,3.2,4.5),\n -1000: [11,21,36,40],\n 0: [10,22,43,34],\n 1000:[0, 10, 20, 30] },columns=['rna',-1000,0,1000])\n result = df[[1000]]\n expected = df.iloc[:,[3]]\n assert_frame_equal(result, expected)\n result = df[[-1000]]\n expected = df.iloc[:,[1]]\n assert_frame_equal(result, expected)\n\n def test_getitem_setitem_ix_negative_integers(self):\n result = self.frame.ix[:, -1]\n assert_series_equal(result, self.frame['D'])\n\n result = self.frame.ix[:, [-1]]\n assert_frame_equal(result, self.frame[['D']])\n\n result = self.frame.ix[:, [-1, -2]]\n assert_frame_equal(result, self.frame[['D', 'C']])\n\n self.frame.ix[:, [-1]] = 0\n self.assertTrue((self.frame['D'] == 0).all())\n\n df = DataFrame(np.random.randn(8, 4))\n self.assertTrue(isnull(df.ix[:, [-1]].values).all())\n\n # #1942\n a = DataFrame(randn(20, 2), index=[chr(x + 65) for x in range(20)])\n a.ix[-1] = a.ix[-2]\n\n assert_series_equal(a.ix[-1], a.ix[-2], check_names=False)\n self.assertEqual(a.ix[-1].name, 'T')\n self.assertEqual(a.ix[-2].name, 'S')\n\n def test_getattr(self):\n tm.assert_series_equal(self.frame.A, self.frame['A'])\n self.assertRaises(AttributeError, getattr, self.frame,\n 'NONEXISTENT_NAME')\n\n def test_setattr_column(self):\n df = DataFrame({'foobar': 1}, index=lrange(10))\n\n df.foobar = 5\n self.assertTrue((df.foobar == 5).all())\n\n def test_setitem(self):\n # not sure what else to do here\n series = self.frame['A'][::2]\n self.frame['col5'] = series\n self.assertIn('col5', self.frame)\n tm.assert_dict_equal(series, self.frame['col5'],\n compare_keys=False)\n\n series = self.frame['A']\n self.frame['col6'] = series\n tm.assert_dict_equal(series, self.frame['col6'],\n compare_keys=False)\n\n with tm.assertRaises(KeyError):\n self.frame[randn(len(self.frame) + 1)] = 1\n\n # set ndarray\n arr = randn(len(self.frame))\n self.frame['col9'] = arr\n self.assertTrue((self.frame['col9'] == arr).all())\n\n self.frame['col7'] = 5\n assert((self.frame['col7'] == 5).all())\n\n self.frame['col0'] = 3.14\n assert((self.frame['col0'] == 3.14).all())\n\n self.frame['col8'] = 'foo'\n assert((self.frame['col8'] == 'foo').all())\n\n # this is partially a view (e.g. some blocks are view)\n # so raise/warn\n smaller = self.frame[:2]\n def f():\n smaller['col10'] = ['1', '2']\n self.assertRaises(com.SettingWithCopyError, f)\n self.assertEqual(smaller['col10'].dtype, np.object_)\n self.assertTrue((smaller['col10'] == ['1', '2']).all())\n\n # with a dtype\n for dtype in ['int32','int64','float32','float64']:\n self.frame[dtype] = np.array(arr,dtype=dtype)\n self.assertEqual(self.frame[dtype].dtype.name, dtype)\n\n # dtype changing GH4204\n df = DataFrame([[0,0]])\n df.iloc[0] = np.nan\n expected = DataFrame([[np.nan,np.nan]])\n assert_frame_equal(df,expected)\n\n df = DataFrame([[0,0]])\n df.loc[0] = np.nan\n assert_frame_equal(df,expected)\n\n def test_setitem_tuple(self):\n self.frame['A', 'B'] = self.frame['A']\n assert_series_equal(self.frame['A', 'B'], self.frame['A'], check_names=False)\n\n def test_setitem_always_copy(self):\n s = self.frame['A'].copy()\n self.frame['E'] = s\n\n self.frame['E'][5:10] = nan\n self.assertTrue(notnull(s[5:10]).all())\n\n def test_setitem_boolean(self):\n df = self.frame.copy()\n values = self.frame.values\n\n df[df['A'] > 0] = 4\n values[values[:, 0] > 0] = 4\n assert_almost_equal(df.values, values)\n\n # test that column reindexing works\n series = df['A'] == 4\n series = series.reindex(df.index[::-1])\n df[series] = 1\n values[values[:, 0] == 4] = 1\n assert_almost_equal(df.values, values)\n\n df[df > 0] = 5\n values[values > 0] = 5\n assert_almost_equal(df.values, values)\n\n df[df == 5] = 0\n values[values == 5] = 0\n assert_almost_equal(df.values, values)\n\n # a df that needs alignment first\n df[df[:-1] < 0] = 2\n np.putmask(values[:-1], values[:-1] < 0, 2)\n assert_almost_equal(df.values, values)\n\n # indexed with same shape but rows-reversed df\n df[df[::-1] == 2] = 3\n values[values == 2] = 3\n assert_almost_equal(df.values, values)\n\n with assertRaisesRegexp(TypeError, 'Must pass DataFrame with boolean '\n 'values only'):\n df[df * 0] = 2\n\n # index with DataFrame\n mask = df > np.abs(df)\n expected = df.copy()\n df[df > np.abs(df)] = nan\n expected.values[mask.values] = nan\n assert_frame_equal(df, expected)\n\n # set from DataFrame\n expected = df.copy()\n df[df > np.abs(df)] = df * 2\n np.putmask(expected.values, mask.values, df.values * 2)\n assert_frame_equal(df, expected)\n\n def test_setitem_cast(self):\n self.frame['D'] = self.frame['D'].astype('i8')\n self.assertEqual(self.frame['D'].dtype, np.int64)\n\n # #669, should not cast?\n # this is now set to int64, which means a replacement of the column to\n # the value dtype (and nothing to do with the existing dtype)\n self.frame['B'] = 0\n self.assertEqual(self.frame['B'].dtype, np.int64)\n\n # cast if pass array of course\n self.frame['B'] = np.arange(len(self.frame))\n self.assertTrue(issubclass(self.frame['B'].dtype.type, np.integer))\n\n self.frame['foo'] = 'bar'\n self.frame['foo'] = 0\n self.assertEqual(self.frame['foo'].dtype, np.int64)\n\n self.frame['foo'] = 'bar'\n self.frame['foo'] = 2.5\n self.assertEqual(self.frame['foo'].dtype, np.float64)\n\n self.frame['something'] = 0\n self.assertEqual(self.frame['something'].dtype, np.int64)\n self.frame['something'] = 2\n self.assertEqual(self.frame['something'].dtype, np.int64)\n self.frame['something'] = 2.5\n self.assertEqual(self.frame['something'].dtype, np.float64)\n\n # GH 7704\n # dtype conversion on setting\n df = DataFrame(np.random.rand(30, 3), columns=tuple('ABC'))\n df['event'] = np.nan\n df.loc[10,'event'] = 'foo'\n result = df.get_dtype_counts().sort_values()\n expected = Series({'float64' : 3, 'object' : 1 }).sort_values()\n assert_series_equal(result, expected)\n\n def test_setitem_boolean_column(self):\n expected = self.frame.copy()\n mask = self.frame['A'] > 0\n\n self.frame.ix[mask, 'B'] = 0\n expected.values[mask.values, 1] = 0\n\n assert_frame_equal(self.frame, expected)\n\n def test_setitem_corner(self):\n # corner case\n df = DataFrame({'B': [1., 2., 3.],\n 'C': ['a', 'b', 'c']},\n index=np.arange(3))\n del df['B']\n df['B'] = [1., 2., 3.]\n self.assertIn('B', df)\n self.assertEqual(len(df.columns), 2)\n\n df['A'] = 'beginning'\n df['E'] = 'foo'\n df['D'] = 'bar'\n df[datetime.now()] = 'date'\n df[datetime.now()] = 5.\n\n # what to do when empty frame with index\n dm = DataFrame(index=self.frame.index)\n dm['A'] = 'foo'\n dm['B'] = 'bar'\n self.assertEqual(len(dm.columns), 2)\n self.assertEqual(dm.values.dtype, np.object_)\n\n # upcast\n dm['C'] = 1\n self.assertEqual(dm['C'].dtype, np.int64)\n\n dm['E'] = 1.\n self.assertEqual(dm['E'].dtype, np.float64)\n\n # set existing column\n dm['A'] = 'bar'\n self.assertEqual('bar', dm['A'][0])\n\n dm = DataFrame(index=np.arange(3))\n dm['A'] = 1\n dm['foo'] = 'bar'\n del dm['foo']\n dm['foo'] = 'bar'\n self.assertEqual(dm['foo'].dtype, np.object_)\n\n dm['coercable'] = ['1', '2', '3']\n self.assertEqual(dm['coercable'].dtype, np.object_)\n\n def test_setitem_corner2(self):\n data = {\"title\": ['foobar', 'bar', 'foobar'] + ['foobar'] * 17,\n \"cruft\": np.random.random(20)}\n\n df = DataFrame(data)\n ix = df[df['title'] == 'bar'].index\n\n df.ix[ix, ['title']] = 'foobar'\n df.ix[ix, ['cruft']] = 0\n\n assert(df.ix[1, 'title'] == 'foobar')\n assert(df.ix[1, 'cruft'] == 0)\n\n def test_setitem_ambig(self):\n # difficulties with mixed-type data\n from decimal import Decimal\n\n # created as float type\n dm = DataFrame(index=lrange(3), columns=lrange(3))\n\n coercable_series = Series([Decimal(1) for _ in range(3)],\n index=lrange(3))\n uncoercable_series = Series(['foo', 'bzr', 'baz'], index=lrange(3))\n\n dm[0] = np.ones(3)\n self.assertEqual(len(dm.columns), 3)\n # self.assertIsNone(dm.objects)\n\n dm[1] = coercable_series\n self.assertEqual(len(dm.columns), 3)\n # self.assertIsNone(dm.objects)\n\n dm[2] = uncoercable_series\n self.assertEqual(len(dm.columns), 3)\n # self.assertIsNotNone(dm.objects)\n self.assertEqual(dm[2].dtype, np.object_)\n\n def test_setitem_clear_caches(self):\n # GH #304\n df = DataFrame({'x': [1.1, 2.1, 3.1, 4.1], 'y': [5.1, 6.1, 7.1, 8.1]},\n index=[0, 1, 2, 3])\n df.insert(2, 'z', np.nan)\n\n # cache it\n foo = df['z']\n\n df.ix[2:, 'z'] = 42\n\n expected = Series([np.nan, np.nan, 42, 42], index=df.index, name='z')\n self.assertIsNot(df['z'], foo)\n assert_series_equal(df['z'], expected)\n\n def test_setitem_None(self):\n # GH #766\n self.frame[None] = self.frame['A']\n assert_series_equal(self.frame.iloc[:,-1], self.frame['A'], check_names=False)\n assert_series_equal(self.frame.loc[:,None], self.frame['A'], check_names=False)\n assert_series_equal(self.frame[None], self.frame['A'], check_names=False)\n repr(self.frame)\n\n def test_setitem_empty(self):\n # GH 9596\n df = pd.DataFrame({'a': ['1', '2', '3'],\n 'b': ['11', '22', '33'],\n 'c': ['111', '222', '333']})\n\n result = df.copy()\n result.loc[result.b.isnull(), 'a'] = result.a\n assert_frame_equal(result, df)\n\n def test_setitem_empty_frame_with_boolean(self):\n # Test for issue #10126\n\n for dtype in ('float', 'int64'):\n for df in [\n pd.DataFrame(dtype=dtype),\n pd.DataFrame(dtype=dtype, index=[1]),\n pd.DataFrame(dtype=dtype, columns=['A']),\n ]:\n df2 = df.copy()\n df[df > df2] = 47\n assert_frame_equal(df, df2)\n\n def test_delitem_corner(self):\n f = self.frame.copy()\n del f['D']\n self.assertEqual(len(f.columns), 3)\n self.assertRaises(KeyError, f.__delitem__, 'D')\n del f['B']\n self.assertEqual(len(f.columns), 2)\n\n def test_getitem_fancy_2d(self):\n f = self.frame\n ix = f.ix\n\n assert_frame_equal(ix[:, ['B', 'A']], f.reindex(columns=['B', 'A']))\n\n subidx = self.frame.index[[5, 4, 1]]\n assert_frame_equal(ix[subidx, ['B', 'A']],\n f.reindex(index=subidx, columns=['B', 'A']))\n\n # slicing rows, etc.\n assert_frame_equal(ix[5:10], f[5:10])\n assert_frame_equal(ix[5:10, :], f[5:10])\n assert_frame_equal(ix[:5, ['A', 'B']],\n f.reindex(index=f.index[:5], columns=['A', 'B']))\n\n # slice rows with labels, inclusive!\n expected = ix[5:11]\n result = ix[f.index[5]:f.index[10]]\n assert_frame_equal(expected, result)\n\n # slice columns\n assert_frame_equal(ix[:, :2], f.reindex(columns=['A', 'B']))\n\n # get view\n exp = f.copy()\n ix[5:10].values[:] = 5\n exp.values[5:10] = 5\n assert_frame_equal(f, exp)\n\n self.assertRaises(ValueError, ix.__getitem__, f > 0.5)\n\n def test_slice_floats(self):\n index = [52195.504153, 52196.303147, 52198.369883]\n df = DataFrame(np.random.rand(3, 2), index=index)\n\n s1 = df.ix[52195.1:52196.5]\n self.assertEqual(len(s1), 2)\n\n s1 = df.ix[52195.1:52196.6]\n self.assertEqual(len(s1), 2)\n\n s1 = df.ix[52195.1:52198.9]\n self.assertEqual(len(s1), 3)\n\n def test_getitem_fancy_slice_integers_step(self):\n df = DataFrame(np.random.randn(10, 5))\n\n # this is OK\n result = df.ix[:8:2]\n df.ix[:8:2] = np.nan\n self.assertTrue(isnull(df.ix[:8:2]).values.all())\n\n def test_getitem_setitem_integer_slice_keyerrors(self):\n df = DataFrame(np.random.randn(10, 5), index=lrange(0, 20, 2))\n\n # this is OK\n cp = df.copy()\n cp.ix[4:10] = 0\n self.assertTrue((cp.ix[4:10] == 0).values.all())\n\n # so is this\n cp = df.copy()\n cp.ix[3:11] = 0\n self.assertTrue((cp.ix[3:11] == 0).values.all())\n\n result = df.ix[4:10]\n result2 = df.ix[3:11]\n expected = df.reindex([4, 6, 8, 10])\n\n assert_frame_equal(result, expected)\n assert_frame_equal(result2, expected)\n\n # non-monotonic, raise KeyError\n df2 = df.iloc[lrange(5) + lrange(5, 10)[::-1]]\n self.assertRaises(KeyError, df2.ix.__getitem__, slice(3, 11))\n self.assertRaises(KeyError, df2.ix.__setitem__, slice(3, 11), 0)\n\n def test_setitem_fancy_2d(self):\n f = self.frame\n ix = f.ix\n\n # case 1\n frame = self.frame.copy()\n expected = frame.copy()\n frame.ix[:, ['B', 'A']] = 1\n expected['B'] = 1.\n expected['A'] = 1.\n assert_frame_equal(frame, expected)\n\n # case 2\n frame = self.frame.copy()\n frame2 = self.frame.copy()\n\n expected = frame.copy()\n\n subidx = self.frame.index[[5, 4, 1]]\n values = randn(3, 2)\n\n frame.ix[subidx, ['B', 'A']] = values\n frame2.ix[[5, 4, 1], ['B', 'A']] = values\n\n expected['B'].ix[subidx] = values[:, 0]\n expected['A'].ix[subidx] = values[:, 1]\n\n assert_frame_equal(frame, expected)\n assert_frame_equal(frame2, expected)\n\n # case 3: slicing rows, etc.\n frame = self.frame.copy()\n\n expected1 = self.frame.copy()\n frame.ix[5:10] = 1.\n expected1.values[5:10] = 1.\n assert_frame_equal(frame, expected1)\n\n expected2 = self.frame.copy()\n arr = randn(5, len(frame.columns))\n frame.ix[5:10] = arr\n expected2.values[5:10] = arr\n assert_frame_equal(frame, expected2)\n\n # case 4\n frame = self.frame.copy()\n frame.ix[5:10, :] = 1.\n assert_frame_equal(frame, expected1)\n frame.ix[5:10, :] = arr\n assert_frame_equal(frame, expected2)\n\n # case 5\n frame = self.frame.copy()\n frame2 = self.frame.copy()\n\n expected = self.frame.copy()\n values = randn(5, 2)\n\n frame.ix[:5, ['A', 'B']] = values\n expected['A'][:5] = values[:, 0]\n expected['B'][:5] = values[:, 1]\n assert_frame_equal(frame, expected)\n\n frame2.ix[:5, [0, 1]] = values\n assert_frame_equal(frame2, expected)\n\n # case 6: slice rows with labels, inclusive!\n frame = self.frame.copy()\n expected = self.frame.copy()\n\n frame.ix[frame.index[5]:frame.index[10]] = 5.\n expected.values[5:11] = 5\n assert_frame_equal(frame, expected)\n\n # case 7: slice columns\n frame = self.frame.copy()\n frame2 = self.frame.copy()\n expected = self.frame.copy()\n\n # slice indices\n frame.ix[:, 1:3] = 4.\n expected.values[:, 1:3] = 4.\n assert_frame_equal(frame, expected)\n\n # slice with labels\n frame.ix[:, 'B':'C'] = 4.\n assert_frame_equal(frame, expected)\n\n # new corner case of boolean slicing / setting\n frame = DataFrame(lzip([2, 3, 9, 6, 7], [np.nan] * 5),\n columns=['a', 'b'])\n lst = [100]\n lst.extend([np.nan] * 4)\n expected = DataFrame(lzip([100, 3, 9, 6, 7], lst),\n columns=['a', 'b'])\n frame[frame['a'] == 2] = 100\n assert_frame_equal(frame, expected)\n\n def test_fancy_getitem_slice_mixed(self):\n sliced = self.mixed_frame.ix[:, -3:]\n self.assertEqual(sliced['D'].dtype, np.float64)\n\n # get view with single block\n # setting it triggers setting with copy\n sliced = self.frame.ix[:, -3:]\n def f():\n sliced['C'] = 4.\n self.assertRaises(com.SettingWithCopyError, f)\n self.assertTrue((self.frame['C'] == 4).all())\n\n def test_fancy_setitem_int_labels(self):\n # integer index defers to label-based indexing\n\n df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))\n\n tmp = df.copy()\n exp = df.copy()\n tmp.ix[[0, 2, 4]] = 5\n exp.values[:3] = 5\n assert_frame_equal(tmp, exp)\n\n tmp = df.copy()\n exp = df.copy()\n tmp.ix[6] = 5\n exp.values[3] = 5\n assert_frame_equal(tmp, exp)\n\n tmp = df.copy()\n exp = df.copy()\n tmp.ix[:, 2] = 5\n\n # tmp correctly sets the dtype\n # so match the exp way\n exp[2] = 5\n assert_frame_equal(tmp, exp)\n\n def test_fancy_getitem_int_labels(self):\n df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))\n\n result = df.ix[[4, 2, 0], [2, 0]]\n expected = df.reindex(index=[4, 2, 0], columns=[2, 0])\n assert_frame_equal(result, expected)\n\n result = df.ix[[4, 2, 0]]\n expected = df.reindex(index=[4, 2, 0])\n assert_frame_equal(result, expected)\n\n result = df.ix[4]\n expected = df.xs(4)\n assert_series_equal(result, expected)\n\n result = df.ix[:, 3]\n expected = df[3]\n assert_series_equal(result, expected)\n\n def test_fancy_index_int_labels_exceptions(self):\n df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))\n\n # labels that aren't contained\n self.assertRaises(KeyError, df.ix.__setitem__,\n ([0, 1, 2], [2, 3, 4]), 5)\n\n # try to set indices not contained in frame\n self.assertRaises(KeyError,\n self.frame.ix.__setitem__,\n ['foo', 'bar', 'baz'], 1)\n self.assertRaises(KeyError,\n self.frame.ix.__setitem__,\n (slice(None, None), ['E']), 1)\n\n # partial setting now allows this GH2578\n #self.assertRaises(KeyError,\n # self.frame.ix.__setitem__,\n # (slice(None, None), 'E'), 1)\n\n def test_setitem_fancy_mixed_2d(self):\n self.mixed_frame.ix[:5, ['C', 'B', 'A']] = 5\n result = self.mixed_frame.ix[:5, ['C', 'B', 'A']]\n self.assertTrue((result.values == 5).all())\n\n self.mixed_frame.ix[5] = np.nan\n self.assertTrue(isnull(self.mixed_frame.ix[5]).all())\n\n self.mixed_frame.ix[5] = self.mixed_frame.ix[6]\n assert_series_equal(self.mixed_frame.ix[5], self.mixed_frame.ix[6],\n check_names=False)\n\n # #1432\n df = DataFrame({1: [1., 2., 3.],\n 2: [3, 4, 5]})\n self.assertTrue(df._is_mixed_type)\n\n df.ix[1] = [5, 10]\n\n expected = DataFrame({1: [1., 5., 3.],\n 2: [3, 10, 5]})\n\n assert_frame_equal(df, expected)\n\n def test_ix_align(self):\n b = Series(randn(10), name=0).sort_values()\n df_orig = DataFrame(randn(10, 4))\n df = df_orig.copy()\n\n df.ix[:, 0] = b\n assert_series_equal(df.ix[:, 0].reindex(b.index), b)\n\n dft = df_orig.T\n dft.ix[0, :] = b\n assert_series_equal(dft.ix[0, :].reindex(b.index), b)\n\n df = df_orig.copy()\n df.ix[:5, 0] = b\n s = df.ix[:5, 0]\n assert_series_equal(s, b.reindex(s.index))\n\n dft = df_orig.T\n dft.ix[0, :5] = b\n s = dft.ix[0, :5]\n assert_series_equal(s, b.reindex(s.index))\n\n df = df_orig.copy()\n idx = [0, 1, 3, 5]\n df.ix[idx, 0] = b\n s = df.ix[idx, 0]\n assert_series_equal(s, b.reindex(s.index))\n\n dft = df_orig.T\n dft.ix[0, idx] = b\n s = dft.ix[0, idx]\n assert_series_equal(s, b.reindex(s.index))\n\n def test_ix_frame_align(self):\n b = DataFrame(np.random.randn(3, 4))\n df_orig = DataFrame(randn(10, 4))\n df = df_orig.copy()\n\n df.ix[:3] = b\n out = b.ix[:3]\n assert_frame_equal(out, b)\n\n b.sort_index(inplace=True)\n\n df = df_orig.copy()\n df.ix[[0, 1, 2]] = b\n out = df.ix[[0, 1, 2]].reindex(b.index)\n assert_frame_equal(out, b)\n\n df = df_orig.copy()\n df.ix[:3] = b\n out = df.ix[:3]\n assert_frame_equal(out, b.reindex(out.index))\n\n def test_getitem_setitem_non_ix_labels(self):\n df = tm.makeTimeDataFrame()\n\n start, end = df.index[[5, 10]]\n\n result = df.ix[start:end]\n result2 = df[start:end]\n expected = df[5:11]\n assert_frame_equal(result, expected)\n assert_frame_equal(result2, expected)\n\n result = df.copy()\n result.ix[start:end] = 0\n result2 = df.copy()\n result2[start:end] = 0\n expected = df.copy()\n expected[5:11] = 0\n assert_frame_equal(result, expected)\n assert_frame_equal(result2, expected)\n\n def test_ix_multi_take(self):\n df = DataFrame(np.random.randn(3, 2))\n rs = df.ix[df.index == 0, :]\n xp = df.reindex([0])\n assert_frame_equal(rs, xp)\n\n \"\"\" #1321\n df = DataFrame(np.random.randn(3, 2))\n rs = df.ix[df.index==0, df.columns==1]\n xp = df.reindex([0], [1])\n assert_frame_equal(rs, xp)\n \"\"\"\n\n def test_ix_multi_take_nonint_index(self):\n df = DataFrame(np.random.randn(3, 2), index=['x', 'y', 'z'],\n columns=['a', 'b'])\n rs = df.ix[[0], [0]]\n xp = df.reindex(['x'], columns=['a'])\n assert_frame_equal(rs, xp)\n\n def test_ix_multi_take_multiindex(self):\n df = DataFrame(np.random.randn(3, 2), index=['x', 'y', 'z'],\n columns=[['a', 'b'], ['1', '2']])\n rs = df.ix[[0], [0]]\n xp = df.reindex(['x'], columns=[('a', '1')])\n assert_frame_equal(rs, xp)\n\n def test_ix_dup(self):\n idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])\n df = DataFrame(np.random.randn(len(idx), 3), idx)\n\n sub = df.ix[:'d']\n assert_frame_equal(sub, df)\n\n sub = df.ix['a':'c']\n assert_frame_equal(sub, df.ix[0:4])\n\n sub = df.ix['b':'d']\n assert_frame_equal(sub, df.ix[2:])\n\n def test_getitem_fancy_1d(self):\n f = self.frame\n ix = f.ix\n\n # return self if no slicing...for now\n self.assertIs(ix[:, :], f)\n\n # low dimensional slice\n xs1 = ix[2, ['C', 'B', 'A']]\n xs2 = f.xs(f.index[2]).reindex(['C', 'B', 'A'])\n assert_series_equal(xs1, xs2)\n\n ts1 = ix[5:10, 2]\n ts2 = f[f.columns[2]][5:10]\n assert_series_equal(ts1, ts2)\n\n # positional xs\n xs1 = ix[0]\n xs2 = f.xs(f.index[0])\n assert_series_equal(xs1, xs2)\n\n xs1 = ix[f.index[5]]\n xs2 = f.xs(f.index[5])\n assert_series_equal(xs1, xs2)\n\n # single column\n assert_series_equal(ix[:, 'A'], f['A'])\n\n # return view\n exp = f.copy()\n exp.values[5] = 4\n ix[5][:] = 4\n assert_frame_equal(exp, f)\n\n exp.values[:, 1] = 6\n ix[:, 1][:] = 6\n assert_frame_equal(exp, f)\n\n # slice of mixed-frame\n xs = self.mixed_frame.ix[5]\n exp = self.mixed_frame.xs(self.mixed_frame.index[5])\n assert_series_equal(xs, exp)\n\n def test_setitem_fancy_1d(self):\n\n # case 1: set cross-section for indices\n frame = self.frame.copy()\n expected = self.frame.copy()\n\n frame.ix[2, ['C', 'B', 'A']] = [1., 2., 3.]\n expected['C'][2] = 1.\n expected['B'][2] = 2.\n expected['A'][2] = 3.\n assert_frame_equal(frame, expected)\n\n frame2 = self.frame.copy()\n frame2.ix[2, [3, 2, 1]] = [1., 2., 3.]\n assert_frame_equal(frame, expected)\n\n # case 2, set a section of a column\n frame = self.frame.copy()\n expected = self.frame.copy()\n\n vals = randn(5)\n expected.values[5:10, 2] = vals\n frame.ix[5:10, 2] = vals\n assert_frame_equal(frame, expected)\n\n frame2 = self.frame.copy()\n frame2.ix[5:10, 'B'] = vals\n assert_frame_equal(frame, expected)\n\n # case 3: full xs\n frame = self.frame.copy()\n expected = self.frame.copy()\n\n frame.ix[4] = 5.\n expected.values[4] = 5.\n assert_frame_equal(frame, expected)\n\n frame.ix[frame.index[4]] = 6.\n expected.values[4] = 6.\n assert_frame_equal(frame, expected)\n\n # single column\n frame = self.frame.copy()\n expected = self.frame.copy()\n\n frame.ix[:, 'A'] = 7.\n expected['A'] = 7.\n assert_frame_equal(frame, expected)\n\n def test_getitem_fancy_scalar(self):\n f = self.frame\n ix = f.ix\n # individual value\n for col in f.columns:\n ts = f[col]\n for idx in f.index[::5]:\n assert_almost_equal(ix[idx, col], ts[idx])\n\n def test_setitem_fancy_scalar(self):\n f = self.frame\n expected = self.frame.copy()\n ix = f.ix\n # individual value\n for j, col in enumerate(f.columns):\n ts = f[col]\n for idx in f.index[::5]:\n i = f.index.get_loc(idx)\n val = randn()\n expected.values[i, j] = val\n ix[idx, col] = val\n assert_frame_equal(f, expected)\n\n def test_getitem_fancy_boolean(self):\n f = self.frame\n ix = f.ix\n\n expected = f.reindex(columns=['B', 'D'])\n result = ix[:, [False, True, False, True]]\n assert_frame_equal(result, expected)\n\n expected = f.reindex(index=f.index[5:10], columns=['B', 'D'])\n result = ix[5:10, [False, True, False, True]]\n assert_frame_equal(result, expected)\n\n boolvec = f.index > f.index[7]\n expected = f.reindex(index=f.index[boolvec])\n result = ix[boolvec]\n assert_frame_equal(result, expected)\n result = ix[boolvec, :]\n assert_frame_equal(result, expected)\n\n result = ix[boolvec, 2:]\n expected = f.reindex(index=f.index[boolvec],\n columns=['C', 'D'])\n assert_frame_equal(result, expected)\n\n def test_setitem_fancy_boolean(self):\n # from 2d, set with booleans\n frame = self.frame.copy()\n expected = self.frame.copy()\n\n mask = frame['A'] > 0\n frame.ix[mask] = 0.\n expected.values[mask.values] = 0.\n assert_frame_equal(frame, expected)\n\n frame = self.frame.copy()\n expected = self.frame.copy()\n frame.ix[mask, ['A', 'B']] = 0.\n expected.values[mask.values, :2] = 0.\n assert_frame_equal(frame, expected)\n\n def test_getitem_fancy_ints(self):\n result = self.frame.ix[[1, 4, 7]]\n expected = self.frame.ix[self.frame.index[[1, 4, 7]]]\n assert_frame_equal(result, expected)\n\n result = self.frame.ix[:, [2, 0, 1]]\n expected = self.frame.ix[:, self.frame.columns[[2, 0, 1]]]\n assert_frame_equal(result, expected)\n\n def test_getitem_setitem_fancy_exceptions(self):\n ix = self.frame.ix\n with assertRaisesRegexp(IndexingError, 'Too many indexers'):\n ix[:, :, :]\n\n with assertRaises(IndexingError):\n ix[:, :, :] = 1\n\n def test_getitem_setitem_boolean_misaligned(self):\n # boolean index misaligned labels\n mask = self.frame['A'][::-1] > 1\n\n result = self.frame.ix[mask]\n expected = self.frame.ix[mask[::-1]]\n assert_frame_equal(result, expected)\n\n cp = self.frame.copy()\n expected = self.frame.copy()\n cp.ix[mask] = 0\n expected.ix[mask] = 0\n assert_frame_equal(cp, expected)\n\n def test_getitem_setitem_boolean_multi(self):\n df = DataFrame(np.random.randn(3, 2))\n\n # get\n k1 = np.array([True, False, True])\n k2 = np.array([False, True])\n result = df.ix[k1, k2]\n expected = df.ix[[0, 2], [1]]\n assert_frame_equal(result, expected)\n\n expected = df.copy()\n df.ix[np.array([True, False, True]),\n np.array([False, True])] = 5\n expected.ix[[0, 2], [1]] = 5\n assert_frame_equal(df, expected)\n\n def test_getitem_setitem_float_labels(self):\n index = Index([1.5, 2, 3, 4, 5])\n df = DataFrame(np.random.randn(5, 5), index=index)\n\n result = df.ix[1.5:4]\n expected = df.reindex([1.5, 2, 3, 4])\n assert_frame_equal(result, expected)\n self.assertEqual(len(result), 4)\n\n result = df.ix[4:5]\n expected = df.reindex([4, 5]) # reindex with int\n assert_frame_equal(result, expected, check_index_type=False)\n self.assertEqual(len(result), 2)\n\n result = df.ix[4:5]\n expected = df.reindex([4.0, 5.0]) # reindex with float\n assert_frame_equal(result, expected)\n self.assertEqual(len(result), 2)\n\n # loc_float changes this to work properly\n result = df.ix[1:2]\n expected = df.iloc[0:2]\n assert_frame_equal(result, expected)\n\n df.ix[1:2] = 0\n result = df[1:2]\n self.assertTrue((result==0).all().all())\n\n # #2727\n index = Index([1.0, 2.5, 3.5, 4.5, 5.0])\n df = DataFrame(np.random.randn(5, 5), index=index)\n\n # positional slicing only via iloc!\n # stacklevel=False -> needed stacklevel depends on index type\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n result = df.iloc[1.0:5]\n\n expected = df.reindex([2.5, 3.5, 4.5, 5.0])\n assert_frame_equal(result, expected)\n self.assertEqual(len(result), 4)\n\n result = df.iloc[4:5]\n expected = df.reindex([5.0])\n assert_frame_equal(result, expected)\n self.assertEqual(len(result), 1)\n\n # GH 4892, float indexers in iloc are deprecated\n import warnings\n warnings.filterwarnings(action='error', category=FutureWarning)\n\n cp = df.copy()\n def f():\n cp.iloc[1.0:5] = 0\n self.assertRaises(FutureWarning, f)\n def f():\n result = cp.iloc[1.0:5] == 0\n self.assertRaises(FutureWarning, f)\n self.assertTrue(result.values.all())\n self.assertTrue((cp.iloc[0:1] == df.iloc[0:1]).values.all())\n\n warnings.filterwarnings(action='default', category=FutureWarning)\n\n cp = df.copy()\n cp.iloc[4:5] = 0\n self.assertTrue((cp.iloc[4:5] == 0).values.all())\n self.assertTrue((cp.iloc[0:4] == df.iloc[0:4]).values.all())\n\n # float slicing\n result = df.ix[1.0:5]\n expected = df\n assert_frame_equal(result, expected)\n self.assertEqual(len(result), 5)\n\n result = df.ix[1.1:5]\n expected = df.reindex([2.5, 3.5, 4.5, 5.0])\n assert_frame_equal(result, expected)\n self.assertEqual(len(result), 4)\n\n result = df.ix[4.51:5]\n expected = df.reindex([5.0])\n assert_frame_equal(result, expected)\n self.assertEqual(len(result), 1)\n\n result = df.ix[1.0:5.0]\n expected = df.reindex([1.0, 2.5, 3.5, 4.5, 5.0])\n assert_frame_equal(result, expected)\n self.assertEqual(len(result), 5)\n\n cp = df.copy()\n cp.ix[1.0:5.0] = 0\n result = cp.ix[1.0:5.0]\n self.assertTrue((result == 0).values.all())\n\n def test_setitem_single_column_mixed(self):\n df = DataFrame(randn(5, 3), index=['a', 'b', 'c', 'd', 'e'],\n columns=['foo', 'bar', 'baz'])\n df['str'] = 'qux'\n df.ix[::2, 'str'] = nan\n expected = [nan, 'qux', nan, 'qux', nan]\n assert_almost_equal(df['str'].values, expected)\n\n def test_setitem_single_column_mixed_datetime(self):\n df = DataFrame(randn(5, 3), index=['a', 'b', 'c', 'd', 'e'],\n columns=['foo', 'bar', 'baz'])\n\n df['timestamp'] = Timestamp('20010102')\n\n # check our dtypes\n result = df.get_dtype_counts()\n expected = Series({'float64': 3, 'datetime64[ns]': 1})\n assert_series_equal(result, expected)\n\n # set an allowable datetime64 type\n from pandas import tslib\n df.ix['b', 'timestamp'] = tslib.iNaT\n self.assertTrue(com.isnull(df.ix['b', 'timestamp']))\n\n # allow this syntax\n df.ix['c', 'timestamp'] = nan\n self.assertTrue(com.isnull(df.ix['c', 'timestamp']))\n\n # allow this syntax\n df.ix['d', :] = nan\n self.assertTrue(com.isnull(df.ix['c', :]).all() == False)\n\n # as of GH 3216 this will now work!\n # try to set with a list like item\n #self.assertRaises(\n # Exception, df.ix.__setitem__, ('d', 'timestamp'), [nan])\n\n def test_setitem_frame(self):\n piece = self.frame.ix[:2, ['A', 'B']]\n self.frame.ix[-2:, ['A', 'B']] = piece.values\n assert_almost_equal(self.frame.ix[-2:, ['A', 'B']].values,\n piece.values)\n\n # GH 3216\n\n # already aligned\n f = self.mixed_frame.copy()\n piece = DataFrame([[ 1, 2], [3, 4]], index=f.index[0:2],columns=['A', 'B'])\n key = (slice(None,2), ['A', 'B'])\n f.ix[key] = piece\n assert_almost_equal(f.ix[0:2, ['A', 'B']].values,\n piece.values)\n\n # rows unaligned\n f = self.mixed_frame.copy()\n piece = DataFrame([[ 1, 2 ], [3, 4], [5, 6], [7, 8]], index=list(f.index[0:2]) + ['foo','bar'],columns=['A', 'B'])\n key = (slice(None,2), ['A', 'B'])\n f.ix[key] = piece\n assert_almost_equal(f.ix[0:2:, ['A', 'B']].values,\n piece.values[0:2])\n\n # key is unaligned with values\n f = self.mixed_frame.copy()\n piece = f.ix[:2, ['A']]\n piece.index = f.index[-2:]\n key = (slice(-2, None), ['A', 'B'])\n f.ix[key] = piece\n piece['B'] = np.nan\n assert_almost_equal(f.ix[-2:, ['A', 'B']].values,\n piece.values)\n\n # ndarray\n f = self.mixed_frame.copy()\n piece = self.mixed_frame.ix[:2, ['A', 'B']]\n key = (slice(-2, None), ['A', 'B'])\n f.ix[key] = piece.values\n assert_almost_equal(f.ix[-2:, ['A', 'B']].values,\n piece.values)\n\n\n # needs upcasting\n df = DataFrame([[1,2,'foo'],[3,4,'bar']],columns=['A','B','C'])\n df2 = df.copy()\n df2.ix[:,['A','B']] = df.ix[:,['A','B']]+0.5\n expected = df.reindex(columns=['A','B'])\n expected += 0.5\n expected['C'] = df['C']\n assert_frame_equal(df2, expected)\n\n def test_setitem_frame_align(self):\n piece = self.frame.ix[:2, ['A', 'B']]\n piece.index = self.frame.index[-2:]\n piece.columns = ['A', 'B']\n self.frame.ix[-2:, ['A', 'B']] = piece\n assert_almost_equal(self.frame.ix[-2:, ['A', 'B']].values,\n piece.values)\n\n def test_setitem_fancy_exceptions(self):\n pass\n\n def test_getitem_boolean_missing(self):\n pass\n\n def test_setitem_boolean_missing(self):\n pass\n\n def test_getitem_setitem_ix_duplicates(self):\n # #1201\n df = DataFrame(np.random.randn(5, 3),\n index=['foo', 'foo', 'bar', 'baz', 'bar'])\n\n result = df.ix['foo']\n expected = df[:2]\n assert_frame_equal(result, expected)\n\n result = df.ix['bar']\n expected = df.ix[[2, 4]]\n assert_frame_equal(result, expected)\n\n result = df.ix['baz']\n expected = df.ix[3]\n assert_series_equal(result, expected)\n\n def test_getitem_ix_boolean_duplicates_multiple(self):\n # #1201\n df = DataFrame(np.random.randn(5, 3),\n index=['foo', 'foo', 'bar', 'baz', 'bar'])\n\n result = df.ix[['bar']]\n exp = df.ix[[2, 4]]\n assert_frame_equal(result, exp)\n\n result = df.ix[df[1] > 0]\n exp = df[df[1] > 0]\n assert_frame_equal(result, exp)\n\n result = df.ix[df[0] > 0]\n exp = df[df[0] > 0]\n assert_frame_equal(result, exp)\n\n def test_getitem_setitem_ix_bool_keyerror(self):\n # #2199\n df = DataFrame({'a': [1, 2, 3]})\n\n self.assertRaises(KeyError, df.ix.__getitem__, False)\n self.assertRaises(KeyError, df.ix.__getitem__, True)\n\n self.assertRaises(KeyError, df.ix.__setitem__, False, 0)\n self.assertRaises(KeyError, df.ix.__setitem__, True, 0)\n\n def test_getitem_list_duplicates(self):\n # #1943\n df = DataFrame(np.random.randn(4, 4), columns=list('AABC'))\n df.columns.name = 'foo'\n\n result = df[['B', 'C']]\n self.assertEqual(result.columns.name, 'foo')\n\n expected = df.ix[:, 2:]\n assert_frame_equal(result, expected)\n\n def test_get_value(self):\n for idx in self.frame.index:\n for col in self.frame.columns:\n result = self.frame.get_value(idx, col)\n expected = self.frame[col][idx]\n assert_almost_equal(result, expected)\n\n def test_iteritems(self):\n df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=['a', 'a', 'b'])\n for k, v in compat.iteritems(df):\n self.assertEqual(type(v), Series)\n\n def test_lookup(self):\n def alt(df, rows, cols):\n result = []\n for r, c in zip(rows, cols):\n result.append(df.get_value(r, c))\n return result\n\n def testit(df):\n rows = list(df.index) * len(df.columns)\n cols = list(df.columns) * len(df.index)\n result = df.lookup(rows, cols)\n expected = alt(df, rows, cols)\n assert_almost_equal(result, expected)\n\n testit(self.mixed_frame)\n testit(self.frame)\n\n df = DataFrame({'label': ['a', 'b', 'a', 'c'],\n 'mask_a': [True, True, False, True],\n 'mask_b': [True, False, False, False],\n 'mask_c': [False, True, False, True]})\n df['mask'] = df.lookup(df.index, 'mask_' + df['label'])\n exp_mask = alt(df, df.index, 'mask_' + df['label'])\n assert_almost_equal(df['mask'], exp_mask)\n self.assertEqual(df['mask'].dtype, np.bool_)\n\n with tm.assertRaises(KeyError):\n self.frame.lookup(['xyz'], ['A'])\n\n with tm.assertRaises(KeyError):\n self.frame.lookup([self.frame.index[0]], ['xyz'])\n\n with tm.assertRaisesRegexp(ValueError, 'same size'):\n self.frame.lookup(['a', 'b', 'c'], ['a'])\n\n def test_set_value(self):\n for idx in self.frame.index:\n for col in self.frame.columns:\n self.frame.set_value(idx, col, 1)\n assert_almost_equal(self.frame[col][idx], 1)\n\n def test_set_value_resize(self):\n\n res = self.frame.set_value('foobar', 'B', 0)\n self.assertIs(res, self.frame)\n self.assertEqual(res.index[-1], 'foobar')\n self.assertEqual(res.get_value('foobar', 'B'), 0)\n\n self.frame.loc['foobar','qux'] = 0\n self.assertEqual(self.frame.get_value('foobar', 'qux'), 0)\n\n res = self.frame.copy()\n res3 = res.set_value('foobar', 'baz', 'sam')\n self.assertEqual(res3['baz'].dtype, np.object_)\n\n res = self.frame.copy()\n res3 = res.set_value('foobar', 'baz', True)\n self.assertEqual(res3['baz'].dtype, np.object_)\n\n res = self.frame.copy()\n res3 = res.set_value('foobar', 'baz', 5)\n self.assertTrue(com.is_float_dtype(res3['baz']))\n self.assertTrue(isnull(res3['baz'].drop(['foobar'])).all())\n self.assertRaises(ValueError, res3.set_value, 'foobar', 'baz', 'sam')\n\n def test_set_value_with_index_dtype_change(self):\n df_orig = DataFrame(randn(3, 3), index=lrange(3), columns=list('ABC'))\n\n # this is actually ambiguous as the 2 is interpreted as a positional\n # so column is not created\n df = df_orig.copy()\n df.set_value('C', 2, 1.0)\n self.assertEqual(list(df.index), list(df_orig.index) + ['C'])\n #self.assertEqual(list(df.columns), list(df_orig.columns) + [2])\n\n df = df_orig.copy()\n df.loc['C', 2] = 1.0\n self.assertEqual(list(df.index), list(df_orig.index) + ['C'])\n #self.assertEqual(list(df.columns), list(df_orig.columns) + [2])\n\n # create both new\n df = df_orig.copy()\n df.set_value('C', 'D', 1.0)\n self.assertEqual(list(df.index), list(df_orig.index) + ['C'])\n self.assertEqual(list(df.columns), list(df_orig.columns) + ['D'])\n\n df = df_orig.copy()\n df.loc['C', 'D'] = 1.0\n self.assertEqual(list(df.index), list(df_orig.index) + ['C'])\n self.assertEqual(list(df.columns), list(df_orig.columns) + ['D'])\n\n def test_get_set_value_no_partial_indexing(self):\n # partial w/ MultiIndex raise exception\n index = MultiIndex.from_tuples([(0, 1), (0, 2), (1, 1), (1, 2)])\n df = DataFrame(index=index, columns=lrange(4))\n self.assertRaises(KeyError, df.get_value, 0, 1)\n # self.assertRaises(KeyError, df.set_value, 0, 1, 0)\n\n def test_single_element_ix_dont_upcast(self):\n self.frame['E'] = 1\n self.assertTrue(issubclass(self.frame['E'].dtype.type,\n (int, np.integer)))\n\n result = self.frame.ix[self.frame.index[5], 'E']\n self.assertTrue(com.is_integer(result))\n\n def test_irow(self):\n df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2))\n\n # 10711, deprecated\n with tm.assert_produces_warning(FutureWarning):\n df.irow(1)\n\n result = df.iloc[1]\n exp = df.ix[2]\n assert_series_equal(result, exp)\n\n result = df.iloc[2]\n exp = df.ix[4]\n assert_series_equal(result, exp)\n\n # slice\n result = df.iloc[slice(4, 8)]\n expected = df.ix[8:14]\n assert_frame_equal(result, expected)\n\n # verify slice is view\n # setting it makes it raise/warn\n def f():\n result[2] = 0.\n self.assertRaises(com.SettingWithCopyError, f)\n exp_col = df[2].copy()\n exp_col[4:8] = 0.\n assert_series_equal(df[2], exp_col)\n\n # list of integers\n result = df.iloc[[1, 2, 4, 6]]\n expected = df.reindex(df.index[[1, 2, 4, 6]])\n assert_frame_equal(result, expected)\n\n def test_icol(self):\n\n df = DataFrame(np.random.randn(4, 10), columns=lrange(0, 20, 2))\n\n # 10711, deprecated\n with tm.assert_produces_warning(FutureWarning):\n df.icol(1)\n\n result = df.iloc[:, 1]\n exp = df.ix[:, 2]\n assert_series_equal(result, exp)\n\n result = df.iloc[:, 2]\n exp = df.ix[:, 4]\n assert_series_equal(result, exp)\n\n # slice\n result = df.iloc[:, slice(4, 8)]\n expected = df.ix[:, 8:14]\n assert_frame_equal(result, expected)\n\n # verify slice is view\n # and that we are setting a copy\n def f():\n result[8] = 0.\n self.assertRaises(com.SettingWithCopyError, f)\n self.assertTrue((df[8] == 0).all())\n\n # list of integers\n result = df.iloc[:, [1, 2, 4, 6]]\n expected = df.reindex(columns=df.columns[[1, 2, 4, 6]])\n assert_frame_equal(result, expected)\n\n def test_irow_icol_duplicates(self):\n # 10711, deprecated\n\n df = DataFrame(np.random.rand(3, 3), columns=list('ABC'),\n index=list('aab'))\n\n result = df.iloc[0]\n result2 = df.ix[0]\n tm.assertIsInstance(result, Series)\n assert_almost_equal(result.values, df.values[0])\n assert_series_equal(result, result2)\n\n result = df.T.iloc[:, 0]\n result2 = df.T.ix[:, 0]\n tm.assertIsInstance(result, Series)\n assert_almost_equal(result.values, df.values[0])\n assert_series_equal(result, result2)\n\n # multiindex\n df = DataFrame(np.random.randn(3, 3), columns=[['i', 'i', 'j'],\n ['A', 'A', 'B']],\n index=[['i', 'i', 'j'], ['X', 'X', 'Y']])\n rs = df.iloc[0]\n xp = df.ix[0]\n assert_series_equal(rs, xp)\n\n rs = df.iloc[:, 0]\n xp = df.T.ix[0]\n assert_series_equal(rs, xp)\n\n rs = df.iloc[:, [0]]\n xp = df.ix[:, [0]]\n assert_frame_equal(rs, xp)\n\n # #2259\n df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1, 1, 2])\n result = df.iloc[:, [0]]\n expected = df.take([0], axis=1)\n assert_frame_equal(result, expected)\n\n def test_icol_sparse_propegate_fill_value(self):\n from pandas.sparse.api import SparseDataFrame\n df = SparseDataFrame({'A': [999, 1]}, default_fill_value=999)\n self.assertTrue(len(df['A'].sp_values) == len(df.iloc[:, 0].sp_values))\n\n def test_iget_value(self):\n # 10711 deprecated\n\n with tm.assert_produces_warning(FutureWarning):\n self.frame.iget_value(0,0)\n\n for i, row in enumerate(self.frame.index):\n for j, col in enumerate(self.frame.columns):\n result = self.frame.iat[i,j]\n expected = self.frame.at[row, col]\n assert_almost_equal(result, expected)\n\n def test_nested_exception(self):\n # Ignore the strange way of triggering the problem\n # (which may get fixed), it's just a way to trigger\n # the issue or reraising an outer exception without\n # a named argument\n df = DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6], \"c\": [7, 8,\n 9]}).set_index([\"a\", \"b\"])\n l = list(df.index)\n l[0] = [\"a\", \"b\"]\n df.index = l\n\n try:\n repr(df)\n except Exception as e:\n self.assertNotEqual(type(e), UnboundLocalError)\n\n def test_reindex_methods(self):\n df = pd.DataFrame({'x': list(range(5))})\n target = np.array([-0.1, 0.9, 1.1, 1.5])\n\n for method, expected_values in [('nearest', [0, 1, 1, 2]),\n ('pad', [np.nan, 0, 1, 1]),\n ('backfill', [0, 1, 2, 2])]:\n expected = pd.DataFrame({'x': expected_values}, index=target)\n actual = df.reindex(target, method=method)\n assert_frame_equal(expected, actual)\n\n actual = df.reindex_like(df, method=method, tolerance=0)\n assert_frame_equal(df, actual)\n\n actual = df.reindex(target, method=method, tolerance=1)\n assert_frame_equal(expected, actual)\n\n e2 = expected[::-1]\n actual = df.reindex(target[::-1], method=method)\n assert_frame_equal(e2, actual)\n\n new_order = [3, 0, 2, 1]\n e2 = expected.iloc[new_order]\n actual = df.reindex(target[new_order], method=method)\n assert_frame_equal(e2, actual)\n\n switched_method = ('pad' if method == 'backfill'\n else 'backfill' if method == 'pad'\n else method)\n actual = df[::-1].reindex(target, method=switched_method)\n assert_frame_equal(expected, actual)\n\n expected = pd.DataFrame({'x': [0, 1, 1, np.nan]}, index=target)\n actual = df.reindex(target, method='nearest', tolerance=0.2)\n assert_frame_equal(expected, actual)\n\n def test_non_monotonic_reindex_methods(self):\n dr = pd.date_range('2013-08-01', periods=6, freq='B')\n data = np.random.randn(6,1)\n df = pd.DataFrame(data, index=dr, columns=list('A'))\n df_rev = pd.DataFrame(data, index=dr[[3, 4, 5] + [0, 1, 2]],\n columns=list('A'))\n # index is not monotonic increasing or decreasing\n self.assertRaises(ValueError, df_rev.reindex, df.index, method='pad')\n self.assertRaises(ValueError, df_rev.reindex, df.index, method='ffill')\n self.assertRaises(ValueError, df_rev.reindex, df.index, method='bfill')\n self.assertRaises(ValueError, df_rev.reindex, df.index, method='nearest')\n\n def test_reindex_level(self):\n from itertools import permutations\n icol = ['jim', 'joe', 'jolie']\n\n def verify_first_level(df, level, idx, check_index_type=True):\n f = lambda val: np.nonzero(df[level] == val)[0]\n i = np.concatenate(list(map(f, idx)))\n left = df.set_index(icol).reindex(idx, level=level)\n right = df.iloc[i].set_index(icol)\n assert_frame_equal(left, right, check_index_type=check_index_type)\n\n def verify(df, level, idx, indexer, check_index_type=True):\n left = df.set_index(icol).reindex(idx, level=level)\n right = df.iloc[indexer].set_index(icol)\n assert_frame_equal(left, right, check_index_type=check_index_type)\n\n df = pd.DataFrame({'jim':list('B' * 4 + 'A' * 2 + 'C' * 3),\n 'joe':list('abcdeabcd')[::-1],\n 'jolie':[10, 20, 30] * 3,\n 'joline': np.random.randint(0, 1000, 9)})\n\n target = [['C', 'B', 'A'], ['F', 'C', 'A', 'D'], ['A'],\n ['A', 'B', 'C'], ['C', 'A', 'B'], ['C', 'B'], ['C', 'A'],\n ['A', 'B'], ['B', 'A', 'C']]\n\n for idx in target:\n verify_first_level(df, 'jim', idx)\n\n # reindex by these causes different MultiIndex levels\n for idx in [['D', 'F'], ['A', 'C', 'B']]:\n verify_first_level(df, 'jim', idx, check_index_type=False)\n\n verify(df, 'joe', list('abcde'), [3, 2, 1, 0, 5, 4, 8, 7, 6])\n verify(df, 'joe', list('abcd'), [3, 2, 1, 0, 5, 8, 7, 6])\n verify(df, 'joe', list('abc'), [3, 2, 1, 8, 7, 6])\n verify(df, 'joe', list('eca'), [1, 3, 4, 6, 8])\n verify(df, 'joe', list('edc'), [0, 1, 4, 5, 6])\n verify(df, 'joe', list('eadbc'), [3, 0, 2, 1, 4, 5, 8, 7, 6])\n verify(df, 'joe', list('edwq'), [0, 4, 5])\n verify(df, 'joe', list('wq'), [], check_index_type=False)\n\n df = DataFrame({'jim':['mid'] * 5 + ['btm'] * 8 + ['top'] * 7,\n 'joe':['3rd'] * 2 + ['1st'] * 3 + ['2nd'] * 3 +\n ['1st'] * 2 + ['3rd'] * 3 + ['1st'] * 2 +\n ['3rd'] * 3 + ['2nd'] * 2,\n # this needs to be jointly unique with jim and joe or\n # reindexing will fail ~1.5% of the time, this works\n # out to needing unique groups of same size as joe\n 'jolie': np.concatenate([np.random.choice(1000, x, replace=False)\n for x in [2, 3, 3, 2, 3, 2, 3, 2]]),\n 'joline': np.random.randn(20).round(3) * 10})\n\n for idx in permutations(df['jim'].unique()):\n for i in range(3):\n verify_first_level(df, 'jim', idx[:i+1])\n\n i = [2,3,4,0,1,8,9,5,6,7,10,11,12,13,14,18,19,15,16,17]\n verify(df, 'joe', ['1st', '2nd', '3rd'], i)\n\n i = [0,1,2,3,4,10,11,12,5,6,7,8,9,15,16,17,18,19,13,14]\n verify(df, 'joe', ['3rd', '2nd', '1st'], i)\n\n i = [0,1,5,6,7,10,11,12,18,19,15,16,17]\n verify(df, 'joe', ['2nd', '3rd'], i)\n\n i = [0,1,2,3,4,10,11,12,8,9,15,16,17,13,14]\n verify(df, 'joe', ['3rd', '1st'], i)\n\n def test_getitem_ix_float_duplicates(self):\n df = pd.DataFrame(np.random.randn(3, 3),\n index=[0.1, 0.2, 0.2], columns=list('abc'))\n expect = df.iloc[1:]\n tm.assert_frame_equal(df.loc[0.2], expect)\n tm.assert_frame_equal(df.ix[0.2], expect)\n\n expect = df.iloc[1:, 0]\n tm.assert_series_equal(df.loc[0.2, 'a'], expect)\n\n df.index = [1, 0.2, 0.2]\n expect = df.iloc[1:]\n tm.assert_frame_equal(df.loc[0.2], expect)\n tm.assert_frame_equal(df.ix[0.2], expect)\n\n expect = df.iloc[1:, 0]\n tm.assert_series_equal(df.loc[0.2, 'a'], expect)\n\n df = pd.DataFrame(np.random.randn(4, 3),\n index=[1, 0.2, 0.2, 1], columns=list('abc'))\n expect = df.iloc[1:-1]\n tm.assert_frame_equal(df.loc[0.2], expect)\n tm.assert_frame_equal(df.ix[0.2], expect)\n\n expect = df.iloc[1:-1, 0]\n tm.assert_series_equal(df.loc[0.2, 'a'], expect)\n\n df.index = [0.1, 0.2, 2, 0.2]\n expect = df.iloc[[1, -1]]\n tm.assert_frame_equal(df.loc[0.2], expect)\n tm.assert_frame_equal(df.ix[0.2], expect)\n\n expect = df.iloc[[1, -1], 0]\n tm.assert_series_equal(df.loc[0.2, 'a'], expect)\n\n def test_setitem_with_sparse_value(self):\n # GH8131\n df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_1': [1., 2., 3.]})\n sp_series = pd.Series([0, 0, 1]).to_sparse(fill_value=0)\n df['new_column'] = sp_series\n tm.assert_series_equal(df['new_column'], sp_series, check_names=False)\n\n def test_setitem_with_unaligned_sparse_value(self):\n df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_1': [1., 2., 3.]})\n sp_series = (pd.Series([0, 0, 1], index=[2, 1, 0])\n .to_sparse(fill_value=0))\n df['new_column'] = sp_series\n exp = pd.Series([1, 0, 0], name='new_column')\n tm.assert_series_equal(df['new_column'], exp)\n\n\n_seriesd = tm.getSeriesData()\n_tsd = tm.getTimeSeriesData()\n\n_frame = DataFrame(_seriesd)\n_frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])\n_intframe = DataFrame(dict((k, v.astype(int))\n for k, v in compat.iteritems(_seriesd)))\n\n_tsframe = DataFrame(_tsd)\n\n_mixed_frame = _frame.copy()\n_mixed_frame['foo'] = 'bar'\n\n\nclass SafeForSparse(object):\n\n _multiprocess_can_split_ = True\n\n def test_copy_index_name_checking(self):\n # don't want to be able to modify the index stored elsewhere after\n # making a copy\n for attr in ('index', 'columns'):\n ind = getattr(self.frame, attr)\n ind.name = None\n cp = self.frame.copy()\n getattr(cp, attr).name = 'foo'\n self.assertIsNone(getattr(self.frame, attr).name)\n\n def test_getitem_pop_assign_name(self):\n s = self.frame['A']\n self.assertEqual(s.name, 'A')\n\n s = self.frame.pop('A')\n self.assertEqual(s.name, 'A')\n\n s = self.frame.ix[:, 'B']\n self.assertEqual(s.name, 'B')\n\n s2 = s.ix[:]\n self.assertEqual(s2.name, 'B')\n\n def test_get_value(self):\n for idx in self.frame.index:\n for col in self.frame.columns:\n result = self.frame.get_value(idx, col)\n expected = self.frame[col][idx]\n assert_almost_equal(result, expected)\n\n def test_join_index(self):\n # left / right\n\n f = self.frame.reindex(columns=['A', 'B'])[:10]\n f2 = self.frame.reindex(columns=['C', 'D'])\n\n joined = f.join(f2)\n self.assertTrue(f.index.equals(joined.index))\n self.assertEqual(len(joined.columns), 4)\n\n joined = f.join(f2, how='left')\n self.assertTrue(joined.index.equals(f.index))\n self.assertEqual(len(joined.columns), 4)\n\n joined = f.join(f2, how='right')\n self.assertTrue(joined.index.equals(f2.index))\n self.assertEqual(len(joined.columns), 4)\n\n # inner\n\n f = self.frame.reindex(columns=['A', 'B'])[:10]\n f2 = self.frame.reindex(columns=['C', 'D'])\n\n joined = f.join(f2, how='inner')\n self.assertTrue(joined.index.equals(f.index.intersection(f2.index)))\n self.assertEqual(len(joined.columns), 4)\n\n # outer\n\n f = self.frame.reindex(columns=['A', 'B'])[:10]\n f2 = self.frame.reindex(columns=['C', 'D'])\n\n joined = f.join(f2, how='outer')\n self.assertTrue(tm.equalContents(self.frame.index, joined.index))\n self.assertEqual(len(joined.columns), 4)\n\n assertRaisesRegexp(ValueError, 'join method', f.join, f2, how='foo')\n\n # corner case - overlapping columns\n for how in ('outer', 'left', 'inner'):\n with assertRaisesRegexp(ValueError, 'columns overlap but no suffix'):\n self.frame.join(self.frame, how=how)\n\n def test_join_index_more(self):\n af = self.frame.ix[:, ['A', 'B']]\n bf = self.frame.ix[::2, ['C', 'D']]\n\n expected = af.copy()\n expected['C'] = self.frame['C'][::2]\n expected['D'] = self.frame['D'][::2]\n\n result = af.join(bf)\n assert_frame_equal(result, expected)\n\n result = af.join(bf, how='right')\n assert_frame_equal(result, expected[::2])\n\n result = bf.join(af, how='right')\n assert_frame_equal(result, expected.ix[:, result.columns])\n\n def test_join_index_series(self):\n df = self.frame.copy()\n s = df.pop(self.frame.columns[-1])\n joined = df.join(s)\n\n assert_frame_equal(joined, self.frame, check_names=False) # TODO should this check_names ?\n\n s.name = None\n assertRaisesRegexp(ValueError, 'must have a name', df.join, s)\n\n def test_join_overlap(self):\n df1 = self.frame.ix[:, ['A', 'B', 'C']]\n df2 = self.frame.ix[:, ['B', 'C', 'D']]\n\n joined = df1.join(df2, lsuffix='_df1', rsuffix='_df2')\n df1_suf = df1.ix[:, ['B', 'C']].add_suffix('_df1')\n df2_suf = df2.ix[:, ['B', 'C']].add_suffix('_df2')\n no_overlap = self.frame.ix[:, ['A', 'D']]\n expected = df1_suf.join(df2_suf).join(no_overlap)\n\n # column order not necessarily sorted\n assert_frame_equal(joined, expected.ix[:, joined.columns])\n\n def test_add_prefix_suffix(self):\n with_prefix = self.frame.add_prefix('foo#')\n expected = ['foo#%s' % c for c in self.frame.columns]\n self.assert_numpy_array_equal(with_prefix.columns, expected)\n\n with_suffix = self.frame.add_suffix('#foo')\n expected = ['%s#foo' % c for c in self.frame.columns]\n self.assert_numpy_array_equal(with_suffix.columns, expected)\n\n\nclass TestDataFrame(tm.TestCase, CheckIndexing,\n SafeForSparse):\n klass = DataFrame\n\n _multiprocess_can_split_ = True\n\n def setUp(self):\n\n self.frame = _frame.copy()\n self.frame2 = _frame2.copy()\n\n # force these all to int64 to avoid platform testing issues\n self.intframe = DataFrame(dict([ (c,s) for c,s in compat.iteritems(_intframe) ]), dtype = np.int64)\n self.tsframe = _tsframe.copy()\n self.mixed_frame = _mixed_frame.copy()\n self.mixed_float = DataFrame({ 'A': _frame['A'].copy().astype('float32'),\n 'B': _frame['B'].copy().astype('float32'),\n 'C': _frame['C'].copy().astype('float16'),\n 'D': _frame['D'].copy().astype('float64') })\n self.mixed_float2 = DataFrame({ 'A': _frame2['A'].copy().astype('float32'),\n 'B': _frame2['B'].copy().astype('float32'),\n 'C': _frame2['C'].copy().astype('float16'),\n 'D': _frame2['D'].copy().astype('float64') })\n self.mixed_int = DataFrame({ 'A': _intframe['A'].copy().astype('int32'),\n 'B': np.ones(len(_intframe['B']),dtype='uint64'),\n 'C': _intframe['C'].copy().astype('uint8'),\n 'D': _intframe['D'].copy().astype('int64') })\n self.all_mixed = DataFrame({'a': 1., 'b': 2, 'c': 'foo', 'float32' : np.array([1.]*10,dtype='float32'),\n 'int32' : np.array([1]*10,dtype='int32'),\n }, index=np.arange(10))\n self.tzframe = DataFrame({'A' : date_range('20130101',periods=3),\n 'B' : date_range('20130101',periods=3,tz='US/Eastern'),\n 'C' : date_range('20130101',periods=3,tz='CET')})\n self.tzframe.iloc[1,1] = pd.NaT\n self.tzframe.iloc[1,2] = pd.NaT\n\n self.ts1 = tm.makeTimeSeries()\n self.ts2 = tm.makeTimeSeries()[5:]\n self.ts3 = tm.makeTimeSeries()[-5:]\n self.ts4 = tm.makeTimeSeries()[1:-1]\n\n self.ts_dict = {\n 'col1': self.ts1,\n 'col2': self.ts2,\n 'col3': self.ts3,\n 'col4': self.ts4,\n }\n self.empty = DataFrame({})\n\n arr = np.array([[1., 2., 3.],\n [4., 5., 6.],\n [7., 8., 9.]])\n\n self.simple = DataFrame(arr, columns=['one', 'two', 'three'],\n index=['a', 'b', 'c'])\n\n def test_get_axis(self):\n f = self.frame\n self.assertEqual(f._get_axis_number(0), 0)\n self.assertEqual(f._get_axis_number(1), 1)\n self.assertEqual(f._get_axis_number('index'), 0)\n self.assertEqual(f._get_axis_number('rows'), 0)\n self.assertEqual(f._get_axis_number('columns'), 1)\n\n self.assertEqual(f._get_axis_name(0), 'index')\n self.assertEqual(f._get_axis_name(1), 'columns')\n self.assertEqual(f._get_axis_name('index'), 'index')\n self.assertEqual(f._get_axis_name('rows'), 'index')\n self.assertEqual(f._get_axis_name('columns'), 'columns')\n\n self.assertIs(f._get_axis(0), f.index)\n self.assertIs(f._get_axis(1), f.columns)\n\n assertRaisesRegexp(ValueError, 'No axis named', f._get_axis_number, 2)\n assertRaisesRegexp(ValueError, 'No axis.*foo', f._get_axis_name, 'foo')\n assertRaisesRegexp(ValueError, 'No axis.*None', f._get_axis_name, None)\n assertRaisesRegexp(ValueError, 'No axis named', f._get_axis_number, None)\n\n def test_set_index(self):\n idx = Index(np.arange(len(self.mixed_frame)))\n\n # cache it\n _ = self.mixed_frame['foo']\n self.mixed_frame.index = idx\n self.assertIs(self.mixed_frame['foo'].index, idx)\n with assertRaisesRegexp(ValueError, 'Length mismatch'):\n self.mixed_frame.index = idx[::2]\n\n def test_set_index_cast(self):\n\n # issue casting an index then set_index\n df = DataFrame({'A' : [1.1,2.2,3.3], 'B' : [5.0,6.1,7.2]},\n index = [2010,2011,2012])\n expected = df.ix[2010]\n new_index = df.index.astype(np.int32)\n df.index = new_index\n result = df.ix[2010]\n assert_series_equal(result,expected)\n\n def test_set_index2(self):\n df = DataFrame({'A': ['foo', 'foo', 'foo', 'bar', 'bar'],\n 'B': ['one', 'two', 'three', 'one', 'two'],\n 'C': ['a', 'b', 'c', 'd', 'e'],\n 'D': np.random.randn(5),\n 'E': np.random.randn(5)})\n\n # new object, single-column\n result = df.set_index('C')\n result_nodrop = df.set_index('C', drop=False)\n\n index = Index(df['C'], name='C')\n\n expected = df.ix[:, ['A', 'B', 'D', 'E']]\n expected.index = index\n\n expected_nodrop = df.copy()\n expected_nodrop.index = index\n\n assert_frame_equal(result, expected)\n assert_frame_equal(result_nodrop, expected_nodrop)\n self.assertEqual(result.index.name, index.name)\n\n # inplace, single\n df2 = df.copy()\n\n df2.set_index('C', inplace=True)\n\n assert_frame_equal(df2, expected)\n\n df3 = df.copy()\n df3.set_index('C', drop=False, inplace=True)\n\n assert_frame_equal(df3, expected_nodrop)\n\n # create new object, multi-column\n result = df.set_index(['A', 'B'])\n result_nodrop = df.set_index(['A', 'B'], drop=False)\n\n index = MultiIndex.from_arrays([df['A'], df['B']], names=['A', 'B'])\n\n expected = df.ix[:, ['C', 'D', 'E']]\n expected.index = index\n\n expected_nodrop = df.copy()\n expected_nodrop.index = index\n\n assert_frame_equal(result, expected)\n assert_frame_equal(result_nodrop, expected_nodrop)\n self.assertEqual(result.index.names, index.names)\n\n # inplace\n df2 = df.copy()\n df2.set_index(['A', 'B'], inplace=True)\n assert_frame_equal(df2, expected)\n\n df3 = df.copy()\n df3.set_index(['A', 'B'], drop=False, inplace=True)\n assert_frame_equal(df3, expected_nodrop)\n\n # corner case\n with assertRaisesRegexp(ValueError, 'Index has duplicate keys'):\n df.set_index('A', verify_integrity=True)\n\n # append\n result = df.set_index(['A', 'B'], append=True)\n xp = df.reset_index().set_index(['index', 'A', 'B'])\n xp.index.names = [None, 'A', 'B']\n assert_frame_equal(result, xp)\n\n # append to existing multiindex\n rdf = df.set_index(['A'], append=True)\n rdf = rdf.set_index(['B', 'C'], append=True)\n expected = df.set_index(['A', 'B', 'C'], append=True)\n assert_frame_equal(rdf, expected)\n\n # Series\n result = df.set_index(df.C)\n self.assertEqual(result.index.name, 'C')\n\n def test_set_index_nonuniq(self):\n df = DataFrame({'A': ['foo', 'foo', 'foo', 'bar', 'bar'],\n 'B': ['one', 'two', 'three', 'one', 'two'],\n 'C': ['a', 'b', 'c', 'd', 'e'],\n 'D': np.random.randn(5),\n 'E': np.random.randn(5)})\n with assertRaisesRegexp(ValueError, 'Index has duplicate keys'):\n df.set_index('A', verify_integrity=True, inplace=True)\n self.assertIn('A', df)\n\n def test_set_index_bug(self):\n # GH1590\n df = DataFrame({'val': [0, 1, 2], 'key': ['a', 'b', 'c']})\n df2 = df.select(lambda indx: indx >= 1)\n rs = df2.set_index('key')\n xp = DataFrame({'val': [1, 2]},\n Index(['b', 'c'], name='key'))\n assert_frame_equal(rs, xp)\n\n def test_set_index_pass_arrays(self):\n df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',\n 'foo', 'bar', 'foo', 'foo'],\n 'B': ['one', 'one', 'two', 'three',\n 'two', 'two', 'one', 'three'],\n 'C': np.random.randn(8),\n 'D': np.random.randn(8)})\n\n # multiple columns\n result = df.set_index(['A', df['B'].values], drop=False)\n expected = df.set_index(['A', 'B'], drop=False)\n assert_frame_equal(result, expected, check_names=False) # TODO should set_index check_names ?\n\n def test_construction_with_categorical_index(self):\n\n ci = tm.makeCategoricalIndex(10)\n\n # with Categorical\n df = DataFrame({'A' : np.random.randn(10),\n 'B' : ci.values })\n idf = df.set_index('B')\n str(idf)\n tm.assert_index_equal(idf.index, ci, check_names=False)\n self.assertEqual(idf.index.name, 'B')\n\n # from a CategoricalIndex\n df = DataFrame({'A' : np.random.randn(10),\n 'B' : ci })\n idf = df.set_index('B')\n str(idf)\n tm.assert_index_equal(idf.index, ci, check_names=False)\n self.assertEqual(idf.index.name, 'B')\n\n idf = df.set_index('B').reset_index().set_index('B')\n str(idf)\n tm.assert_index_equal(idf.index, ci, check_names=False)\n self.assertEqual(idf.index.name, 'B')\n\n new_df = idf.reset_index()\n new_df.index = df.B\n tm.assert_index_equal(new_df.index, ci, check_names=False)\n self.assertEqual(idf.index.name, 'B')\n\n def test_set_index_cast_datetimeindex(self):\n df = DataFrame({'A': [datetime(2000, 1, 1) + timedelta(i)\n for i in range(1000)],\n 'B': np.random.randn(1000)})\n\n idf = df.set_index('A')\n tm.assertIsInstance(idf.index, DatetimeIndex)\n\n # don't cast a DatetimeIndex WITH a tz, leave as object\n # GH 6032\n i = pd.DatetimeIndex(pd.tseries.tools.to_datetime(['2013-1-1 13:00','2013-1-2 14:00'], errors=\"raise\")).tz_localize('US/Pacific')\n df = DataFrame(np.random.randn(2,1),columns=['A'])\n\n expected = Series(np.array([pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'),\n pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Pacific')], dtype=\"object\"))\n\n # convert index to series\n result = Series(i)\n assert_series_equal(result, expected)\n\n # assignt to frame\n df['B'] = i\n result = df['B']\n assert_series_equal(result, expected, check_names=False)\n self.assertEqual(result.name, 'B')\n\n # keep the timezone\n result = i.to_series(keep_tz=True)\n assert_series_equal(result.reset_index(drop=True), expected)\n\n # convert to utc\n df['C'] = i.to_series().reset_index(drop=True)\n result = df['C']\n comp = DatetimeIndex(expected.values).copy()\n comp.tz = None\n self.assert_numpy_array_equal(result.values, comp.values)\n\n # list of datetimes with a tz\n df['D'] = i.to_pydatetime()\n result = df['D']\n assert_series_equal(result, expected, check_names=False)\n self.assertEqual(result.name, 'D')\n\n # GH 6785\n # set the index manually\n import pytz\n df = DataFrame([{'ts':datetime(2014, 4, 1, tzinfo=pytz.utc), 'foo':1}])\n expected = df.set_index('ts')\n df.index = df['ts']\n df.pop('ts')\n assert_frame_equal(df, expected)\n\n # GH 3950\n # reset_index with single level\n for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern']:\n idx = pd.date_range('1/1/2011', periods=5, freq='D', tz=tz, name='idx')\n df = pd.DataFrame({'a': range(5), 'b': ['A', 'B', 'C', 'D', 'E']}, index=idx)\n\n expected = pd.DataFrame({'idx': [datetime(2011, 1, 1), datetime(2011, 1, 2),\n datetime(2011, 1, 3), datetime(2011, 1, 4),\n datetime(2011, 1, 5)],\n 'a': range(5), 'b': ['A', 'B', 'C', 'D', 'E']},\n columns=['idx', 'a', 'b'])\n expected['idx'] = expected['idx'].apply(lambda d: pd.Timestamp(d, tz=tz))\n assert_frame_equal(df.reset_index(), expected)\n\n def test_set_index_multiindexcolumns(self):\n columns = MultiIndex.from_tuples([('foo', 1), ('foo', 2), ('bar', 1)])\n df = DataFrame(np.random.randn(3, 3), columns=columns)\n rs = df.set_index(df.columns[0])\n xp = df.ix[:, 1:]\n xp.index = df.ix[:, 0].values\n xp.index.names = [df.columns[0]]\n assert_frame_equal(rs, xp)\n\n def test_set_index_empty_column(self):\n # #1971\n df = DataFrame([\n dict(a=1, p=0),\n dict(a=2, m=10),\n dict(a=3, m=11, p=20),\n dict(a=4, m=12, p=21)\n ], columns=('a', 'm', 'p', 'x'))\n\n # it works!\n result = df.set_index(['a', 'x'])\n repr(result)\n\n def test_set_columns(self):\n cols = Index(np.arange(len(self.mixed_frame.columns)))\n self.mixed_frame.columns = cols\n with assertRaisesRegexp(ValueError, 'Length mismatch'):\n self.mixed_frame.columns = cols[::2]\n\n def test_keys(self):\n getkeys = self.frame.keys\n self.assertIs(getkeys(), self.frame.columns)\n\n def test_column_contains_typeerror(self):\n try:\n self.frame.columns in self.frame\n except TypeError:\n pass\n\n def test_constructor(self):\n df = DataFrame()\n self.assertEqual(len(df.index), 0)\n\n df = DataFrame(data={})\n self.assertEqual(len(df.index), 0)\n\n def test_constructor_mixed(self):\n index, data = tm.getMixedTypeDict()\n\n indexed_frame = DataFrame(data, index=index)\n unindexed_frame = DataFrame(data)\n\n self.assertEqual(self.mixed_frame['foo'].dtype, np.object_)\n\n def test_constructor_cast_failure(self):\n foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)\n self.assertEqual(foo['a'].dtype, object)\n\n # GH 3010, constructing with odd arrays\n df = DataFrame(np.ones((4,2)))\n\n # this is ok\n df['foo'] = np.ones((4,2)).tolist()\n\n # this is not ok\n self.assertRaises(ValueError, df.__setitem__, tuple(['test']), np.ones((4,2)))\n\n # this is ok\n df['foo2'] = np.ones((4,2)).tolist()\n\n def test_constructor_dtype_copy(self):\n orig_df = DataFrame({\n 'col1': [1.],\n 'col2': [2.],\n 'col3': [3.]})\n\n new_df = pd.DataFrame(orig_df, dtype=float, copy=True)\n\n new_df['col1'] = 200.\n self.assertEqual(orig_df['col1'][0], 1.)\n\n def test_constructor_dtype_nocast_view(self):\n df = DataFrame([[1, 2]])\n should_be_view = DataFrame(df, dtype=df[0].dtype)\n should_be_view[0][0] = 99\n self.assertEqual(df.values[0, 0], 99)\n\n should_be_view = DataFrame(df.values, dtype=df[0].dtype)\n should_be_view[0][0] = 97\n self.assertEqual(df.values[0, 0], 97)\n\n def test_constructor_dtype_list_data(self):\n df = DataFrame([[1, '2'],\n [None, 'a']], dtype=object)\n self.assertIsNone(df.ix[1, 0])\n self.assertEqual(df.ix[0, 1], '2')\n\n def test_constructor_list_frames(self):\n\n # GH 3243\n result = DataFrame([DataFrame([])])\n self.assertEqual(result.shape, (1,0))\n\n result = DataFrame([DataFrame(dict(A = lrange(5)))])\n tm.assertIsInstance(result.iloc[0,0], DataFrame)\n\n def test_constructor_mixed_dtypes(self):\n\n def _make_mixed_dtypes_df(typ, ad = None):\n\n if typ == 'int':\n dtypes = MIXED_INT_DTYPES\n arrays = [ np.array(np.random.rand(10), dtype = d) for d in dtypes ]\n elif typ == 'float':\n dtypes = MIXED_FLOAT_DTYPES\n arrays = [ np.array(np.random.randint(10, size=10), dtype = d) for d in dtypes ]\n\n zipper = lzip(dtypes,arrays)\n for d,a in zipper:\n assert(a.dtype == d)\n if ad is None:\n ad = dict()\n ad.update(dict([ (d,a) for d,a in zipper ]))\n return DataFrame(ad)\n\n def _check_mixed_dtypes(df, dtypes = None):\n if dtypes is None:\n dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES\n for d in dtypes:\n if d in df:\n assert(df.dtypes[d] == d)\n\n # mixed floating and integer coexinst in the same frame\n df = _make_mixed_dtypes_df('float')\n _check_mixed_dtypes(df)\n\n # add lots of types\n df = _make_mixed_dtypes_df('float', dict(A = 1, B = 'foo', C = 'bar'))\n _check_mixed_dtypes(df)\n\n # GH 622\n df = _make_mixed_dtypes_df('int')\n _check_mixed_dtypes(df)\n\n def test_constructor_complex_dtypes(self):\n # GH10952\n a = np.random.rand(10).astype(np.complex64)\n b = np.random.rand(10).astype(np.complex128)\n\n df = DataFrame({'a': a, 'b': b})\n self.assertEqual(a.dtype, df.a.dtype)\n self.assertEqual(b.dtype, df.b.dtype)\n\n def test_constructor_rec(self):\n rec = self.frame.to_records(index=False)\n\n # Assigning causes segfault in NumPy < 1.5.1\n # rec.dtype.names = list(rec.dtype.names)[::-1]\n\n index = self.frame.index\n\n df = DataFrame(rec)\n self.assert_numpy_array_equal(df.columns, rec.dtype.names)\n\n df2 = DataFrame(rec, index=index)\n self.assert_numpy_array_equal(df2.columns, rec.dtype.names)\n self.assertTrue(df2.index.equals(index))\n\n rng = np.arange(len(rec))[::-1]\n df3 = DataFrame(rec, index=rng, columns=['C', 'B'])\n expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])\n assert_frame_equal(df3, expected)\n\n def test_constructor_bool(self):\n df = DataFrame({0: np.ones(10, dtype=bool),\n 1: np.zeros(10, dtype=bool)})\n self.assertEqual(df.values.dtype, np.bool_)\n\n def test_constructor_overflow_int64(self):\n values = np.array([2 ** 64 - i for i in range(1, 10)],\n dtype=np.uint64)\n\n result = DataFrame({'a': values})\n self.assertEqual(result['a'].dtype, object)\n\n # #2355\n data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),\n (8921811264899370420, 45), (long(17019687244989530680), 270),\n (long(9930107427299601010), 273)]\n dtype = [('uid', 'u8'), ('score', 'u8')]\n data = np.zeros((len(data_scores),), dtype=dtype)\n data[:] = data_scores\n df_crawls = DataFrame(data)\n self.assertEqual(df_crawls['uid'].dtype, object)\n\n def test_constructor_ordereddict(self):\n import random\n nitems = 100\n nums = lrange(nitems)\n random.shuffle(nums)\n expected = ['A%d' % i for i in nums]\n df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))\n self.assertEqual(expected, list(df.columns))\n\n def test_constructor_dict(self):\n frame = DataFrame({'col1': self.ts1,\n 'col2': self.ts2})\n\n tm.assert_dict_equal(self.ts1, frame['col1'], compare_keys=False)\n tm.assert_dict_equal(self.ts2, frame['col2'], compare_keys=False)\n\n frame = DataFrame({'col1': self.ts1,\n 'col2': self.ts2},\n columns=['col2', 'col3', 'col4'])\n\n self.assertEqual(len(frame), len(self.ts2))\n self.assertNotIn('col1', frame)\n self.assertTrue(isnull(frame['col3']).all())\n\n # Corner cases\n self.assertEqual(len(DataFrame({})), 0)\n\n # mix dict and array, wrong size - no spec for which error should raise\n # first\n with tm.assertRaises(ValueError):\n DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})\n\n # Length-one dict micro-optimization\n frame = DataFrame({'A': {'1': 1, '2': 2}})\n self.assert_numpy_array_equal(frame.index, ['1', '2'])\n\n # empty dict plus index\n idx = Index([0, 1, 2])\n frame = DataFrame({}, index=idx)\n self.assertIs(frame.index, idx)\n\n # empty with index and columns\n idx = Index([0, 1, 2])\n frame = DataFrame({}, index=idx, columns=idx)\n self.assertIs(frame.index, idx)\n self.assertIs(frame.columns, idx)\n self.assertEqual(len(frame._series), 3)\n\n # with dict of empty list and Series\n frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])\n self.assertTrue(frame.index.equals(Index([])))\n\n # GH10856\n # dict with scalar values should raise error, even if columns passed\n with tm.assertRaises(ValueError):\n DataFrame({'a': 0.7})\n\n with tm.assertRaises(ValueError):\n DataFrame({'a': 0.7}, columns=['a'])\n\n with tm.assertRaises(ValueError):\n DataFrame({'a': 0.7}, columns=['b'])\n\n def test_constructor_multi_index(self):\n # GH 4078\n # construction error with mi and all-nan frame\n tuples = [(2, 3), (3, 3), (3, 3)]\n mi = MultiIndex.from_tuples(tuples)\n df = DataFrame(index=mi,columns=mi)\n self.assertTrue(pd.isnull(df).values.ravel().all())\n\n tuples = [(3, 3), (2, 3), (3, 3)]\n mi = MultiIndex.from_tuples(tuples)\n df = DataFrame(index=mi,columns=mi)\n self.assertTrue(pd.isnull(df).values.ravel().all())\n\n def test_constructor_error_msgs(self):\n msg = \"Mixing dicts with non-Series may lead to ambiguous ordering.\"\n # mix dict and array, wrong size\n with assertRaisesRegexp(ValueError, msg):\n DataFrame({'A': {'a': 'a', 'b': 'b'},\n 'B': ['a', 'b', 'c']})\n\n # wrong size ndarray, GH 3105\n msg = \"Shape of passed values is \\(3, 4\\), indices imply \\(3, 3\\)\"\n with assertRaisesRegexp(ValueError, msg):\n DataFrame(np.arange(12).reshape((4, 3)),\n columns=['foo', 'bar', 'baz'],\n index=date_range('2000-01-01', periods=3))\n\n\n # higher dim raise exception\n with assertRaisesRegexp(ValueError, 'Must pass 2-d input'):\n DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])\n\n # wrong size axis labels\n with assertRaisesRegexp(ValueError, \"Shape of passed values is \\(3, 2\\), indices imply \\(3, 1\\)\"):\n DataFrame(np.random.rand(2,3), columns=['A', 'B', 'C'], index=[1])\n\n with assertRaisesRegexp(ValueError, \"Shape of passed values is \\(3, 2\\), indices imply \\(2, 2\\)\"):\n DataFrame(np.random.rand(2,3), columns=['A', 'B'], index=[1, 2])\n\n with assertRaisesRegexp(ValueError, 'If using all scalar values, you must pass an index'):\n DataFrame({'a': False, 'b': True})\n\n def test_constructor_with_embedded_frames(self):\n\n # embedded data frames\n df1 = DataFrame({'a':[1, 2, 3], 'b':[3, 4, 5]})\n df2 = DataFrame([df1, df1+10])\n\n df2.dtypes\n str(df2)\n\n result = df2.loc[0,0]\n assert_frame_equal(result,df1)\n\n result = df2.loc[1,0]\n assert_frame_equal(result,df1+10)\n\n def test_insert_error_msmgs(self):\n\n # GH 7432\n df = DataFrame({'foo':['a', 'b', 'c'], 'bar':[1,2,3], 'baz':['d','e','f']}).set_index('foo')\n s = DataFrame({'foo':['a', 'b', 'c', 'a'], 'fiz':['g','h','i','j']}).set_index('foo')\n msg = 'cannot reindex from a duplicate axis'\n with assertRaisesRegexp(ValueError, msg):\n df['newcol'] = s\n\n # GH 4107, more descriptive error message\n df = DataFrame(np.random.randint(0,2,(4,4)),\n columns=['a', 'b', 'c', 'd'])\n\n msg = 'incompatible index of inserted column with frame index'\n with assertRaisesRegexp(TypeError, msg):\n df['gr'] = df.groupby(['b', 'c']).count()\n\n def test_frame_subclassing_and_slicing(self):\n # Subclass frame and ensure it returns the right class on slicing it\n # In reference to PR 9632\n\n class CustomSeries(Series):\n @property\n def _constructor(self):\n return CustomSeries\n\n def custom_series_function(self):\n return 'OK'\n\n class CustomDataFrame(DataFrame):\n \"Subclasses pandas DF, fills DF with simulation results, adds some custom plotting functions.\"\n\n def __init__(self, *args, **kw):\n super(CustomDataFrame, self).__init__(*args, **kw)\n\n @property\n def _constructor(self):\n return CustomDataFrame\n\n _constructor_sliced = CustomSeries\n\n def custom_frame_function(self):\n return 'OK'\n\n data = {'col1': range(10),\n 'col2': range(10)}\n cdf = CustomDataFrame(data)\n\n # Did we get back our own DF class?\n self.assertTrue(isinstance(cdf, CustomDataFrame))\n\n # Do we get back our own Series class after selecting a column?\n cdf_series = cdf.col1\n self.assertTrue(isinstance(cdf_series, CustomSeries))\n self.assertEqual(cdf_series.custom_series_function(), 'OK')\n\n # Do we get back our own DF class after slicing row-wise?\n cdf_rows = cdf[1:5]\n self.assertTrue(isinstance(cdf_rows, CustomDataFrame))\n self.assertEqual(cdf_rows.custom_frame_function(), 'OK')\n\n # Make sure sliced part of multi-index frame is custom class\n mcol = pd.MultiIndex.from_tuples([('A', 'A'), ('A', 'B')])\n cdf_multi = CustomDataFrame([[0, 1], [2, 3]], columns=mcol)\n self.assertTrue(isinstance(cdf_multi['A'], CustomDataFrame))\n\n mcol = pd.MultiIndex.from_tuples([('A', ''), ('B', '')])\n cdf_multi2 = CustomDataFrame([[0, 1], [2, 3]], columns=mcol)\n self.assertTrue(isinstance(cdf_multi2['A'], CustomSeries))\n\n def test_constructor_subclass_dict(self):\n # Test for passing dict subclass to constructor\n data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),\n 'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}\n df = DataFrame(data)\n refdf = DataFrame(dict((col, dict(compat.iteritems(val)))\n for col, val in compat.iteritems(data)))\n assert_frame_equal(refdf, df)\n\n data = tm.TestSubDict(compat.iteritems(data))\n df = DataFrame(data)\n assert_frame_equal(refdf, df)\n\n # try with defaultdict\n from collections import defaultdict\n data = {}\n self.frame['B'][:10] = np.nan\n for k, v in compat.iteritems(self.frame):\n dct = defaultdict(dict)\n dct.update(v.to_dict())\n data[k] = dct\n frame = DataFrame(data)\n assert_frame_equal(self.frame.sort_index(), frame)\n\n def test_constructor_dict_block(self):\n expected = [[4., 3., 2., 1.]]\n df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},\n columns=['d', 'c', 'b', 'a'])\n assert_almost_equal(df.values, expected)\n\n def test_constructor_dict_cast(self):\n # cast float tests\n test_data = {\n 'A': {'1': 1, '2': 2},\n 'B': {'1': '1', '2': '2', '3': '3'},\n }\n frame = DataFrame(test_data, dtype=float)\n self.assertEqual(len(frame), 3)\n self.assertEqual(frame['B'].dtype, np.float64)\n self.assertEqual(frame['A'].dtype, np.float64)\n\n frame = DataFrame(test_data)\n self.assertEqual(len(frame), 3)\n self.assertEqual(frame['B'].dtype, np.object_)\n self.assertEqual(frame['A'].dtype, np.float64)\n\n # can't cast to float\n test_data = {\n 'A': dict(zip(range(20), tm.makeStringIndex(20))),\n 'B': dict(zip(range(15), randn(15)))\n }\n frame = DataFrame(test_data, dtype=float)\n self.assertEqual(len(frame), 20)\n self.assertEqual(frame['A'].dtype, np.object_)\n self.assertEqual(frame['B'].dtype, np.float64)\n\n def test_constructor_dict_dont_upcast(self):\n d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}\n df = DataFrame(d)\n tm.assertIsInstance(df['Col1']['Row2'], float)\n\n dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])\n tm.assertIsInstance(dm[1][1], int)\n\n def test_constructor_dict_of_tuples(self):\n # GH #1491\n data = {'a': (1, 2, 3), 'b': (4, 5, 6)}\n\n result = DataFrame(data)\n expected = DataFrame(dict((k, list(v)) for k, v in compat.iteritems(data)))\n assert_frame_equal(result, expected, check_dtype=False)\n\n def test_constructor_dict_multiindex(self):\n check = lambda result, expected: tm.assert_frame_equal(\n result, expected, check_dtype=True, check_index_type=True,\n check_column_type=True, check_names=True)\n d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},\n ('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},\n ('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}\n _d = sorted(d.items())\n df = DataFrame(d)\n expected = DataFrame(\n [x[1] for x in _d],\n index=MultiIndex.from_tuples([x[0] for x in _d])).T\n expected.index = MultiIndex.from_tuples(expected.index)\n check(df, expected)\n\n d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}\n _d.insert(0, ('z', d['z']))\n expected = DataFrame(\n [x[1] for x in _d],\n index=Index([x[0] for x in _d], tupleize_cols=False)).T\n expected.index = Index(expected.index, tupleize_cols=False)\n df = DataFrame(d)\n df = df.reindex(columns=expected.columns, index=expected.index)\n check(df, expected)\n\n def test_constructor_dict_datetime64_index(self):\n # GH 10160\n dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']\n\n def create_data(constructor):\n return dict((i, {constructor(s): 2*i}) for i, s in enumerate(dates_as_str))\n\n data_datetime64 = create_data(np.datetime64)\n data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))\n data_Timestamp = create_data(Timestamp)\n\n expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},\n {0: None, 1: 2, 2: None, 3: None},\n {0: None, 1: None, 2: 4, 3: None},\n {0: None, 1: None, 2: None, 3: 6}],\n index=[Timestamp(dt) for dt in dates_as_str])\n\n result_datetime64 = DataFrame(data_datetime64)\n result_datetime = DataFrame(data_datetime)\n result_Timestamp = DataFrame(data_Timestamp)\n assert_frame_equal(result_datetime64, expected)\n assert_frame_equal(result_datetime, expected)\n assert_frame_equal(result_Timestamp, expected)\n\n def test_constructor_dict_timedelta64_index(self):\n # GH 10160\n td_as_int = [1, 2, 3, 4]\n\n def create_data(constructor):\n return dict((i, {constructor(s): 2*i}) for i, s in enumerate(td_as_int))\n\n data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))\n data_timedelta = create_data(lambda x: timedelta(days=x))\n data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))\n\n expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},\n {0: None, 1: 2, 2: None, 3: None},\n {0: None, 1: None, 2: 4, 3: None},\n {0: None, 1: None, 2: None, 3: 6}],\n index=[Timedelta(td, 'D') for td in td_as_int])\n\n result_timedelta64 = DataFrame(data_timedelta64)\n result_timedelta = DataFrame(data_timedelta)\n result_Timedelta = DataFrame(data_Timedelta)\n assert_frame_equal(result_timedelta64, expected)\n assert_frame_equal(result_timedelta, expected)\n assert_frame_equal(result_Timedelta, expected)\n\n def test_nested_dict_frame_constructor(self):\n rng = period_range('1/1/2000', periods=5)\n df = DataFrame(randn(10, 5), columns=rng)\n\n data = {}\n for col in df.columns:\n for row in df.index:\n data.setdefault(col, {})[row] = df.get_value(row, col)\n\n result = DataFrame(data, columns=rng)\n tm.assert_frame_equal(result, df)\n\n data = {}\n for col in df.columns:\n for row in df.index:\n data.setdefault(row, {})[col] = df.get_value(row, col)\n\n result = DataFrame(data, index=rng).T\n tm.assert_frame_equal(result, df)\n\n\n def _check_basic_constructor(self, empty):\n \"mat: 2d matrix with shpae (3, 2) to input. empty - makes sized objects\"\n mat = empty((2, 3), dtype=float)\n # 2-D input\n frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])\n\n self.assertEqual(len(frame.index), 2)\n self.assertEqual(len(frame.columns), 3)\n\n # 1-D input\n frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])\n self.assertEqual(len(frame.index), 3)\n self.assertEqual(len(frame.columns), 1)\n\n\n # cast type\n frame = DataFrame(mat, columns=['A', 'B', 'C'],\n index=[1, 2], dtype=np.int64)\n self.assertEqual(frame.values.dtype, np.int64)\n\n # wrong size axis labels\n msg = r'Shape of passed values is \\(3, 2\\), indices imply \\(3, 1\\)'\n with assertRaisesRegexp(ValueError, msg):\n DataFrame(mat, columns=['A', 'B', 'C'], index=[1])\n msg = r'Shape of passed values is \\(3, 2\\), indices imply \\(2, 2\\)'\n with assertRaisesRegexp(ValueError, msg):\n DataFrame(mat, columns=['A', 'B'], index=[1, 2])\n\n # higher dim raise exception\n with assertRaisesRegexp(ValueError, 'Must pass 2-d input'):\n DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],\n index=[1])\n\n # automatic labeling\n frame = DataFrame(mat)\n self.assert_numpy_array_equal(frame.index, lrange(2))\n self.assert_numpy_array_equal(frame.columns, lrange(3))\n\n frame = DataFrame(mat, index=[1, 2])\n self.assert_numpy_array_equal(frame.columns, lrange(3))\n\n frame = DataFrame(mat, columns=['A', 'B', 'C'])\n self.assert_numpy_array_equal(frame.index, lrange(2))\n\n # 0-length axis\n frame = DataFrame(empty((0, 3)))\n self.assertEqual(len(frame.index), 0)\n\n frame = DataFrame(empty((3, 0)))\n self.assertEqual(len(frame.columns), 0)\n\n def test_constructor_ndarray(self):\n mat = np.zeros((2, 3), dtype=float)\n self._check_basic_constructor(np.ones)\n\n frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])\n self.assertEqual(len(frame), 2)\n\n def test_constructor_maskedarray(self):\n self._check_basic_constructor(ma.masked_all)\n\n # Check non-masked values\n mat = ma.masked_all((2, 3), dtype=float)\n mat[0, 0] = 1.0\n mat[1, 2] = 2.0\n frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])\n self.assertEqual(1.0, frame['A'][1])\n self.assertEqual(2.0, frame['C'][2])\n\n # what is this even checking??\n mat = ma.masked_all((2, 3), dtype=float)\n frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])\n self.assertTrue(np.all(~np.asarray(frame == frame)))\n\n def test_constructor_maskedarray_nonfloat(self):\n # masked int promoted to float\n mat = ma.masked_all((2, 3), dtype=int)\n # 2-D input\n frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])\n\n self.assertEqual(len(frame.index), 2)\n self.assertEqual(len(frame.columns), 3)\n self.assertTrue(np.all(~np.asarray(frame == frame)))\n\n # cast type\n frame = DataFrame(mat, columns=['A', 'B', 'C'],\n index=[1, 2], dtype=np.float64)\n self.assertEqual(frame.values.dtype, np.float64)\n\n # Check non-masked values\n mat2 = ma.copy(mat)\n mat2[0, 0] = 1\n mat2[1, 2] = 2\n frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])\n self.assertEqual(1, frame['A'][1])\n self.assertEqual(2, frame['C'][2])\n\n # masked np.datetime64 stays (use lib.NaT as null)\n mat = ma.masked_all((2, 3), dtype='M8[ns]')\n # 2-D input\n frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])\n\n self.assertEqual(len(frame.index), 2)\n self.assertEqual(len(frame.columns), 3)\n self.assertTrue(isnull(frame).values.all())\n\n # cast type\n frame = DataFrame(mat, columns=['A', 'B', 'C'],\n index=[1, 2], dtype=np.int64)\n self.assertEqual(frame.values.dtype, np.int64)\n\n # Check non-masked values\n mat2 = ma.copy(mat)\n mat2[0, 0] = 1\n mat2[1, 2] = 2\n frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])\n self.assertEqual(1, frame['A'].view('i8')[1])\n self.assertEqual(2, frame['C'].view('i8')[2])\n\n # masked bool promoted to object\n mat = ma.masked_all((2, 3), dtype=bool)\n # 2-D input\n frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])\n\n self.assertEqual(len(frame.index), 2)\n self.assertEqual(len(frame.columns), 3)\n self.assertTrue(np.all(~np.asarray(frame == frame)))\n\n # cast type\n frame = DataFrame(mat, columns=['A', 'B', 'C'],\n index=[1, 2], dtype=object)\n self.assertEqual(frame.values.dtype, object)\n\n # Check non-masked values\n mat2 = ma.copy(mat)\n mat2[0, 0] = True\n mat2[1, 2] = False\n frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])\n self.assertEqual(True, frame['A'][1])\n self.assertEqual(False, frame['C'][2])\n\n def test_constructor_mrecarray(self):\n # Ensure mrecarray produces frame identical to dict of masked arrays\n # from GH3479\n\n assert_fr_equal = functools.partial(assert_frame_equal,\n check_index_type=True,\n check_column_type=True,\n check_frame_type=True)\n arrays = [\n ('float', np.array([1.5, 2.0])),\n ('int', np.array([1, 2])),\n ('str', np.array(['abc', 'def'])),\n ]\n for name, arr in arrays[:]:\n arrays.append(('masked1_' + name,\n np.ma.masked_array(arr, mask=[False, True])))\n arrays.append(('masked_all', np.ma.masked_all((2,))))\n arrays.append(('masked_none',\n np.ma.masked_array([1.0, 2.5], mask=False)))\n\n # call assert_frame_equal for all selections of 3 arrays\n for comb in itertools.combinations(arrays, 3):\n names, data = zip(*comb)\n mrecs = mrecords.fromarrays(data, names=names)\n\n # fill the comb\n comb = dict([ (k, v.filled()) if hasattr(v,'filled') else (k, v) for k, v in comb ])\n\n expected = DataFrame(comb,columns=names)\n result = DataFrame(mrecs)\n assert_fr_equal(result,expected)\n\n # specify columns\n expected = DataFrame(comb,columns=names[::-1])\n result = DataFrame(mrecs, columns=names[::-1])\n assert_fr_equal(result,expected)\n\n # specify index\n expected = DataFrame(comb,columns=names,index=[1,2])\n result = DataFrame(mrecs, index=[1,2])\n assert_fr_equal(result,expected)\n\n def test_constructor_corner(self):\n df = DataFrame(index=[])\n self.assertEqual(df.values.shape, (0, 0))\n\n # empty but with specified dtype\n df = DataFrame(index=lrange(10), columns=['a', 'b'], dtype=object)\n self.assertEqual(df.values.dtype, np.object_)\n\n # does not error but ends up float\n df = DataFrame(index=lrange(10), columns=['a', 'b'], dtype=int)\n self.assertEqual(df.values.dtype, np.object_)\n\n # #1783 empty dtype object\n df = DataFrame({}, columns=['foo', 'bar'])\n self.assertEqual(df.values.dtype, np.object_)\n\n df = DataFrame({'b': 1}, index=lrange(10), columns=list('abc'),\n dtype=int)\n self.assertEqual(df.values.dtype, np.object_)\n\n\n def test_constructor_scalar_inference(self):\n data = {'int': 1, 'bool': True,\n 'float': 3., 'complex': 4j, 'object': 'foo'}\n df = DataFrame(data, index=np.arange(10))\n\n self.assertEqual(df['int'].dtype, np.int64)\n self.assertEqual(df['bool'].dtype, np.bool_)\n self.assertEqual(df['float'].dtype, np.float64)\n self.assertEqual(df['complex'].dtype, np.complex128)\n self.assertEqual(df['object'].dtype, np.object_)\n\n def test_constructor_arrays_and_scalars(self):\n df = DataFrame({'a': randn(10), 'b': True})\n exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})\n\n assert_frame_equal(df, exp)\n with tm.assertRaisesRegexp(ValueError, 'must pass an index'):\n DataFrame({'a': False, 'b': True})\n\n def test_constructor_DataFrame(self):\n df = DataFrame(self.frame)\n assert_frame_equal(df, self.frame)\n\n df_casted = DataFrame(self.frame, dtype=np.int64)\n self.assertEqual(df_casted.values.dtype, np.int64)\n\n def test_constructor_more(self):\n # used to be in test_matrix.py\n arr = randn(10)\n dm = DataFrame(arr, columns=['A'], index=np.arange(10))\n self.assertEqual(dm.values.ndim, 2)\n\n arr = randn(0)\n dm = DataFrame(arr)\n self.assertEqual(dm.values.ndim, 2)\n self.assertEqual(dm.values.ndim, 2)\n\n # no data specified\n dm = DataFrame(columns=['A', 'B'], index=np.arange(10))\n self.assertEqual(dm.values.shape, (10, 2))\n\n dm = DataFrame(columns=['A', 'B'])\n self.assertEqual(dm.values.shape, (0, 2))\n\n dm = DataFrame(index=np.arange(10))\n self.assertEqual(dm.values.shape, (10, 0))\n\n # corner, silly\n # TODO: Fix this Exception to be better...\n with assertRaisesRegexp(PandasError, 'constructor not properly called'):\n DataFrame((1, 2, 3))\n\n # can't cast\n mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)\n with assertRaisesRegexp(ValueError, 'cast'):\n DataFrame(mat, index=[0, 1], columns=[0], dtype=float)\n\n dm = DataFrame(DataFrame(self.frame._series))\n tm.assert_frame_equal(dm, self.frame)\n\n # int cast\n dm = DataFrame({'A': np.ones(10, dtype=int),\n 'B': np.ones(10, dtype=np.float64)},\n index=np.arange(10))\n\n self.assertEqual(len(dm.columns), 2)\n self.assertEqual(dm.values.dtype, np.float64)\n\n def test_constructor_empty_list(self):\n df = DataFrame([], index=[])\n expected = DataFrame(index=[])\n assert_frame_equal(df, expected)\n\n # GH 9939\n df = DataFrame([], columns=['A', 'B'])\n expected = DataFrame({}, columns=['A', 'B'])\n assert_frame_equal(df, expected)\n\n # Empty generator: list(empty_gen()) == []\n def empty_gen():\n return\n yield\n\n df = DataFrame(empty_gen(), columns=['A', 'B'])\n assert_frame_equal(df, expected)\n\n def test_constructor_list_of_lists(self):\n # GH #484\n l = [[1, 'a'], [2, 'b']]\n df = DataFrame(data=l, columns=[\"num\", \"str\"])\n self.assertTrue(com.is_integer_dtype(df['num']))\n self.assertEqual(df['str'].dtype, np.object_)\n\n # GH 4851\n # list of 0-dim ndarrays\n expected = DataFrame({ 0: range(10) })\n data = [np.array(x) for x in range(10)]\n result = DataFrame(data)\n assert_frame_equal(result, expected)\n\n def test_constructor_sequence_like(self):\n # GH 3783\n # collections.Squence like\n import collections\n\n class DummyContainer(collections.Sequence):\n def __init__(self, lst):\n self._lst = lst\n def __getitem__(self, n):\n return self._lst.__getitem__(n)\n def __len__(self, n):\n return self._lst.__len__()\n\n l = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]\n columns = [\"num\", \"str\"]\n result = DataFrame(l, columns=columns)\n expected = DataFrame([[1,'a'],[2,'b']],columns=columns)\n assert_frame_equal(result, expected, check_dtype=False)\n\n # GH 4297\n # support Array\n import array\n result = DataFrame.from_items([('A', array.array('i', range(10)))])\n expected = DataFrame({ 'A' : list(range(10)) })\n assert_frame_equal(result, expected, check_dtype=False)\n\n expected = DataFrame([ list(range(10)), list(range(10)) ])\n result = DataFrame([ array.array('i', range(10)), array.array('i',range(10)) ])\n assert_frame_equal(result, expected, check_dtype=False)\n\n def test_constructor_iterator(self):\n\n expected = DataFrame([ list(range(10)), list(range(10)) ])\n result = DataFrame([ range(10), range(10) ])\n assert_frame_equal(result, expected)\n\n def test_constructor_generator(self):\n #related #2305\n\n gen1 = (i for i in range(10))\n gen2 = (i for i in range(10))\n\n expected = DataFrame([ list(range(10)), list(range(10)) ])\n result = DataFrame([ gen1, gen2 ])\n assert_frame_equal(result, expected)\n\n gen = ([ i, 'a'] for i in range(10))\n result = DataFrame(gen)\n expected = DataFrame({ 0 : range(10), 1 : 'a' })\n assert_frame_equal(result, expected, check_dtype=False)\n\n def test_constructor_list_of_dicts(self):\n data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),\n OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),\n OrderedDict([['a', 1.5], ['d', 6]]),\n OrderedDict(),\n OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),\n OrderedDict([['b', 3], ['c', 4], ['d', 6]])]\n\n result = DataFrame(data)\n expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),\n orient='index')\n assert_frame_equal(result, expected.reindex(result.index))\n\n result = DataFrame([{}])\n expected = DataFrame(index=[0])\n assert_frame_equal(result, expected)\n\n def test_constructor_list_of_series(self):\n data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),\n OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]\n sdict = OrderedDict(zip(['x', 'y'], data))\n idx = Index(['a', 'b', 'c'])\n\n # all named\n data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),\n Series([1.5, 3, 6], idx, name='y')]\n result = DataFrame(data2)\n expected = DataFrame.from_dict(sdict, orient='index')\n assert_frame_equal(result, expected)\n\n # some unnamed\n data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),\n Series([1.5, 3, 6], idx)]\n result = DataFrame(data2)\n\n sdict = OrderedDict(zip(['x', 'Unnamed 0'], data))\n expected = DataFrame.from_dict(sdict, orient='index')\n assert_frame_equal(result.sort_index(), expected)\n\n # none named\n data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),\n OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),\n OrderedDict([['a', 1.5], ['d', 6]]),\n OrderedDict(),\n OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),\n OrderedDict([['b', 3], ['c', 4], ['d', 6]])]\n data = [Series(d) for d in data]\n\n result = DataFrame(data)\n sdict = OrderedDict(zip(range(len(data)), data))\n expected = DataFrame.from_dict(sdict, orient='index')\n assert_frame_equal(result, expected.reindex(result.index))\n\n result2 = DataFrame(data, index=np.arange(6))\n assert_frame_equal(result, result2)\n\n result = DataFrame([Series({})])\n expected = DataFrame(index=[0])\n assert_frame_equal(result, expected)\n\n data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),\n OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]\n sdict = OrderedDict(zip(range(len(data)), data))\n\n idx = Index(['a', 'b', 'c'])\n data2 = [Series([1.5, 3, 4], idx, dtype='O'),\n Series([1.5, 3, 6], idx)]\n result = DataFrame(data2)\n expected = DataFrame.from_dict(sdict, orient='index')\n assert_frame_equal(result, expected)\n\n def test_constructor_list_of_derived_dicts(self):\n class CustomDict(dict):\n pass\n d = {'a': 1.5, 'b': 3}\n\n data_custom = [CustomDict(d)]\n data = [d]\n\n result_custom = DataFrame(data_custom)\n result = DataFrame(data)\n assert_frame_equal(result, result_custom)\n\n def test_constructor_ragged(self):\n data = {'A': randn(10),\n 'B': randn(8)}\n with assertRaisesRegexp(ValueError, 'arrays must all be same length'):\n DataFrame(data)\n\n def test_constructor_scalar(self):\n idx = Index(lrange(3))\n df = DataFrame({\"a\": 0}, index=idx)\n expected = DataFrame({\"a\": [0, 0, 0]}, index=idx)\n assert_frame_equal(df, expected, check_dtype=False)\n\n def test_constructor_Series_copy_bug(self):\n df = DataFrame(self.frame['A'], index=self.frame.index, columns=['A'])\n df.copy()\n\n def test_constructor_mixed_dict_and_Series(self):\n data = {}\n data['A'] = {'foo': 1, 'bar': 2, 'baz': 3}\n data['B'] = Series([4, 3, 2, 1], index=['bar', 'qux', 'baz', 'foo'])\n\n result = DataFrame(data)\n self.assertTrue(result.index.is_monotonic)\n\n # ordering ambiguous, raise exception\n with assertRaisesRegexp(ValueError, 'ambiguous ordering'):\n DataFrame({'A': ['a', 'b'], 'B': {'a': 'a', 'b': 'b'}})\n\n # this is OK though\n result = DataFrame({'A': ['a', 'b'],\n 'B': Series(['a', 'b'], index=['a', 'b'])})\n expected = DataFrame({'A': ['a', 'b'], 'B': ['a', 'b']},\n index=['a', 'b'])\n assert_frame_equal(result, expected)\n\n def test_constructor_tuples(self):\n result = DataFrame({'A': [(1, 2), (3, 4)]})\n expected = DataFrame({'A': Series([(1, 2), (3, 4)])})\n assert_frame_equal(result, expected)\n\n def test_constructor_namedtuples(self):\n # GH11181\n from collections import namedtuple\n named_tuple = namedtuple(\"Pandas\", list('ab'))\n tuples = [named_tuple(1, 3), named_tuple(2, 4)]\n expected = DataFrame({'a': [1, 2], 'b': [3, 4]})\n result = DataFrame(tuples)\n assert_frame_equal(result, expected)\n\n # with columns\n expected = DataFrame({'y': [1, 2], 'z': [3, 4]})\n result = DataFrame(tuples, columns=['y', 'z'])\n assert_frame_equal(result, expected)\n\n def test_constructor_orient(self):\n data_dict = self.mixed_frame.T._series\n recons = DataFrame.from_dict(data_dict, orient='index')\n expected = self.mixed_frame.sort_index()\n assert_frame_equal(recons, expected)\n\n # dict of sequence\n a = {'hi': [32, 3, 3],\n 'there': [3, 5, 3]}\n rs = DataFrame.from_dict(a, orient='index')\n xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))\n assert_frame_equal(rs, xp)\n\n def test_constructor_Series_named(self):\n a = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')\n df = DataFrame(a)\n self.assertEqual(df.columns[0], 'x')\n self.assertTrue(df.index.equals(a.index))\n\n # ndarray like\n arr = np.random.randn(10)\n s = Series(arr,name='x')\n df = DataFrame(s)\n expected = DataFrame(dict(x = s))\n assert_frame_equal(df,expected)\n\n s = Series(arr,index=range(3,13))\n df = DataFrame(s)\n expected = DataFrame({ 0 : s })\n assert_frame_equal(df,expected)\n\n self.assertRaises(ValueError, DataFrame, s, columns=[1,2])\n\n # #2234\n a = Series([], name='x')\n df = DataFrame(a)\n self.assertEqual(df.columns[0], 'x')\n\n # series with name and w/o\n s1 = Series(arr,name='x')\n df = DataFrame([s1, arr]).T\n expected = DataFrame({ 'x' : s1, 'Unnamed 0' : arr },columns=['x','Unnamed 0'])\n assert_frame_equal(df,expected)\n\n # this is a bit non-intuitive here; the series collapse down to arrays\n df = DataFrame([arr, s1]).T\n expected = DataFrame({ 1 : s1, 0 : arr },columns=[0,1])\n assert_frame_equal(df,expected)\n\n def test_constructor_Series_differently_indexed(self):\n # name\n s1 = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')\n\n # no name\n s2 = Series([1, 2, 3], index=['a', 'b', 'c'])\n\n other_index = Index(['a', 'b'])\n\n df1 = DataFrame(s1, index=other_index)\n exp1 = DataFrame(s1.reindex(other_index))\n self.assertEqual(df1.columns[0], 'x')\n assert_frame_equal(df1, exp1)\n\n df2 = DataFrame(s2, index=other_index)\n exp2 = DataFrame(s2.reindex(other_index))\n self.assertEqual(df2.columns[0], 0)\n self.assertTrue(df2.index.equals(other_index))\n assert_frame_equal(df2, exp2)\n\n def test_constructor_manager_resize(self):\n index = list(self.frame.index[:5])\n columns = list(self.frame.columns[:3])\n\n result = DataFrame(self.frame._data, index=index,\n columns=columns)\n self.assert_numpy_array_equal(result.index, index)\n self.assert_numpy_array_equal(result.columns, columns)\n\n def test_constructor_from_items(self):\n items = [(c, self.frame[c]) for c in self.frame.columns]\n recons = DataFrame.from_items(items)\n assert_frame_equal(recons, self.frame)\n\n # pass some columns\n recons = DataFrame.from_items(items, columns=['C', 'B', 'A'])\n assert_frame_equal(recons, self.frame.ix[:, ['C', 'B', 'A']])\n\n # orient='index'\n\n row_items = [(idx, self.mixed_frame.xs(idx))\n for idx in self.mixed_frame.index]\n\n recons = DataFrame.from_items(row_items,\n columns=self.mixed_frame.columns,\n orient='index')\n assert_frame_equal(recons, self.mixed_frame)\n self.assertEqual(recons['A'].dtype, np.float64)\n\n with tm.assertRaisesRegexp(TypeError,\n \"Must pass columns with orient='index'\"):\n DataFrame.from_items(row_items, orient='index')\n\n # orient='index', but thar be tuples\n arr = lib.list_to_object_array(\n [('bar', 'baz')] * len(self.mixed_frame))\n self.mixed_frame['foo'] = arr\n row_items = [(idx, list(self.mixed_frame.xs(idx)))\n for idx in self.mixed_frame.index]\n recons = DataFrame.from_items(row_items,\n columns=self.mixed_frame.columns,\n orient='index')\n assert_frame_equal(recons, self.mixed_frame)\n tm.assertIsInstance(recons['foo'][0], tuple)\n\n rs = DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],\n orient='index', columns=['one', 'two', 'three'])\n xp = DataFrame([[1, 2, 3], [4, 5, 6]], index=['A', 'B'],\n columns=['one', 'two', 'three'])\n assert_frame_equal(rs, xp)\n\n def test_constructor_mix_series_nonseries(self):\n df = DataFrame({'A': self.frame['A'],\n 'B': list(self.frame['B'])}, columns=['A', 'B'])\n assert_frame_equal(df, self.frame.ix[:, ['A', 'B']])\n\n with tm.assertRaisesRegexp(ValueError, 'does not match index length'):\n DataFrame({'A': self.frame['A'], 'B': list(self.frame['B'])[:-2]})\n\n def test_constructor_miscast_na_int_dtype(self):\n df = DataFrame([[np.nan, 1], [1, 0]], dtype=np.int64)\n expected = DataFrame([[np.nan, 1], [1, 0]])\n assert_frame_equal(df, expected)\n\n def test_constructor_iterator_failure(self):\n with assertRaisesRegexp(TypeError, 'iterator'):\n df = DataFrame(iter([1, 2, 3]))\n\n def test_constructor_column_duplicates(self):\n # it works! #2079\n df = DataFrame([[8, 5]], columns=['a', 'a'])\n edf = DataFrame([[8, 5]])\n edf.columns = ['a', 'a']\n\n assert_frame_equal(df, edf)\n\n idf = DataFrame.from_items(\n [('a', [8]), ('a', [5])], columns=['a', 'a'])\n assert_frame_equal(idf, edf)\n\n self.assertRaises(ValueError, DataFrame.from_items,\n [('a', [8]), ('a', [5]), ('b', [6])],\n columns=['b', 'a', 'a'])\n\n def test_constructor_empty_with_string_dtype(self):\n # GH 9428\n expected = DataFrame(index=[0, 1], columns=[0, 1], dtype=object)\n\n df = DataFrame(index=[0, 1], columns=[0, 1], dtype=str)\n assert_frame_equal(df, expected)\n df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.str_)\n assert_frame_equal(df, expected)\n df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.unicode_)\n assert_frame_equal(df, expected)\n df = DataFrame(index=[0, 1], columns=[0, 1], dtype='U5')\n assert_frame_equal(df, expected)\n\n\n def test_column_dups_operations(self):\n\n def check(result, expected=None):\n if expected is not None:\n assert_frame_equal(result,expected)\n result.dtypes\n str(result)\n\n # assignment\n # GH 3687\n arr = np.random.randn(3, 2)\n idx = lrange(2)\n df = DataFrame(arr, columns=['A', 'A'])\n df.columns = idx\n expected = DataFrame(arr,columns=idx)\n check(df,expected)\n\n idx = date_range('20130101',periods=4,freq='Q-NOV')\n df = DataFrame([[1,1,1,5],[1,1,2,5],[2,1,3,5]],columns=['a','a','a','a'])\n df.columns = idx\n expected = DataFrame([[1,1,1,5],[1,1,2,5],[2,1,3,5]],columns=idx)\n check(df,expected)\n\n # insert\n df = DataFrame([[1,1,1,5],[1,1,2,5],[2,1,3,5]],columns=['foo','bar','foo','hello'])\n df['string'] = 'bah'\n expected = DataFrame([[1,1,1,5,'bah'],[1,1,2,5,'bah'],[2,1,3,5,'bah']],columns=['foo','bar','foo','hello','string'])\n check(df,expected)\n with assertRaisesRegexp(ValueError, 'Length of value'):\n df.insert(0, 'AnotherColumn', range(len(df.index) - 1))\n\n # insert same dtype\n df['foo2'] = 3\n expected = DataFrame([[1,1,1,5,'bah',3],[1,1,2,5,'bah',3],[2,1,3,5,'bah',3]],columns=['foo','bar','foo','hello','string','foo2'])\n check(df,expected)\n\n # set (non-dup)\n df['foo2'] = 4\n expected = DataFrame([[1,1,1,5,'bah',4],[1,1,2,5,'bah',4],[2,1,3,5,'bah',4]],columns=['foo','bar','foo','hello','string','foo2'])\n check(df,expected)\n df['foo2'] = 3\n\n # delete (non dup)\n del df['bar']\n expected = DataFrame([[1,1,5,'bah',3],[1,2,5,'bah',3],[2,3,5,'bah',3]],columns=['foo','foo','hello','string','foo2'])\n check(df,expected)\n\n # try to delete again (its not consolidated)\n del df['hello']\n expected = DataFrame([[1,1,'bah',3],[1,2,'bah',3],[2,3,'bah',3]],columns=['foo','foo','string','foo2'])\n check(df,expected)\n\n # consolidate\n df = df.consolidate()\n expected = DataFrame([[1,1,'bah',3],[1,2,'bah',3],[2,3,'bah',3]],columns=['foo','foo','string','foo2'])\n check(df,expected)\n\n # insert\n df.insert(2,'new_col',5.)\n expected = DataFrame([[1,1,5.,'bah',3],[1,2,5.,'bah',3],[2,3,5.,'bah',3]],columns=['foo','foo','new_col','string','foo2'])\n check(df,expected)\n\n # insert a dup\n assertRaisesRegexp(ValueError, 'cannot insert', df.insert, 2, 'new_col', 4.)\n df.insert(2,'new_col',4.,allow_duplicates=True)\n expected = DataFrame([[1,1,4.,5.,'bah',3],[1,2,4.,5.,'bah',3],[2,3,4.,5.,'bah',3]],columns=['foo','foo','new_col','new_col','string','foo2'])\n check(df,expected)\n\n # delete (dup)\n del df['foo']\n expected = DataFrame([[4.,5.,'bah',3],[4.,5.,'bah',3],[4.,5.,'bah',3]],columns=['new_col','new_col','string','foo2'])\n assert_frame_equal(df,expected)\n\n # dup across dtypes\n df = DataFrame([[1,1,1.,5],[1,1,2.,5],[2,1,3.,5]],columns=['foo','bar','foo','hello'])\n check(df)\n\n df['foo2'] = 7.\n expected = DataFrame([[1,1,1.,5,7.],[1,1,2.,5,7.],[2,1,3.,5,7.]],columns=['foo','bar','foo','hello','foo2'])\n check(df,expected)\n\n result = df['foo']\n expected = DataFrame([[1,1.],[1,2.],[2,3.]],columns=['foo','foo'])\n check(result,expected)\n\n # multiple replacements\n df['foo'] = 'string'\n expected = DataFrame([['string',1,'string',5,7.],['string',1,'string',5,7.],['string',1,'string',5,7.]],columns=['foo','bar','foo','hello','foo2'])\n check(df,expected)\n\n del df['foo']\n expected = DataFrame([[1,5,7.],[1,5,7.],[1,5,7.]],columns=['bar','hello','foo2'])\n check(df,expected)\n\n # values\n df = DataFrame([[1,2.5],[3,4.5]], index=[1,2], columns=['x','x'])\n result = df.values\n expected = np.array([[1,2.5],[3,4.5]])\n self.assertTrue((result == expected).all().all())\n\n # rename, GH 4403\n df4 = DataFrame({'TClose': [22.02],\n 'RT': [0.0454],\n 'TExg': [0.0422]},\n index=MultiIndex.from_tuples([(600809, 20130331)], names=['STK_ID', 'RPT_Date']))\n\n df5 = DataFrame({'STK_ID': [600809] * 3,\n 'RPT_Date': [20120930,20121231,20130331],\n 'STK_Name': [u('饡驦'), u('饡驦'), u('饡驦')],\n 'TClose': [38.05, 41.66, 30.01]},\n index=MultiIndex.from_tuples([(600809, 20120930), (600809, 20121231),(600809,20130331)], names=['STK_ID', 'RPT_Date']))\n\n k = pd.merge(df4,df5,how='inner',left_index=True,right_index=True)\n result = k.rename(columns={'TClose_x':'TClose', 'TClose_y':'QT_Close'})\n str(result)\n result.dtypes\n\n expected = DataFrame([[0.0454, 22.02, 0.0422, 20130331, 600809, u('饡驦'), 30.01 ]],\n columns=['RT','TClose','TExg','RPT_Date','STK_ID','STK_Name','QT_Close']).set_index(['STK_ID','RPT_Date'],drop=False)\n assert_frame_equal(result,expected)\n\n # reindex is invalid!\n df = DataFrame([[1,5,7.],[1,5,7.],[1,5,7.]],columns=['bar','a','a'])\n self.assertRaises(ValueError, df.reindex, columns=['bar'])\n self.assertRaises(ValueError, df.reindex, columns=['bar','foo'])\n\n # drop\n df = DataFrame([[1,5,7.],[1,5,7.],[1,5,7.]],columns=['bar','a','a'])\n result = df.drop(['a'],axis=1)\n expected = DataFrame([[1],[1],[1]],columns=['bar'])\n check(result,expected)\n result = df.drop('a',axis=1)\n check(result,expected)\n\n # describe\n df = DataFrame([[1,1,1],[2,2,2],[3,3,3]],columns=['bar','a','a'],dtype='float64')\n result = df.describe()\n s = df.iloc[:,0].describe()\n expected = pd.concat([ s, s, s],keys=df.columns,axis=1)\n check(result,expected)\n\n # check column dups with index equal and not equal to df's index\n df = DataFrame(np.random.randn(5, 3), index=['a', 'b', 'c', 'd', 'e'],\n columns=['A', 'B', 'A'])\n for index in [df.index, pd.Index(list('edcba'))]:\n this_df = df.copy()\n expected_ser = pd.Series(index.values, index=this_df.index)\n expected_df = DataFrame.from_items([('A', expected_ser),\n ('B', this_df['B']),\n ('A', expected_ser)])\n this_df['A'] = index\n check(this_df, expected_df)\n\n # operations\n for op in ['__add__','__mul__','__sub__','__truediv__']:\n df = DataFrame(dict(A = np.arange(10), B = np.random.rand(10)))\n expected = getattr(df,op)(df)\n expected.columns = ['A','A']\n df.columns = ['A','A']\n result = getattr(df,op)(df)\n check(result,expected)\n\n # multiple assignments that change dtypes\n # the location indexer is a slice\n # GH 6120\n df = DataFrame(np.random.randn(5,2), columns=['that', 'that'])\n expected = DataFrame(1.0, index=range(5), columns=['that', 'that'])\n\n df['that'] = 1.0\n check(df, expected)\n\n df = DataFrame(np.random.rand(5,2), columns=['that', 'that'])\n expected = DataFrame(1, index=range(5), columns=['that', 'that'])\n\n df['that'] = 1\n check(df, expected)\n\n def test_column_dups2(self):\n\n # drop buggy GH 6240\n df = DataFrame({'A' : np.random.randn(5),\n 'B' : np.random.randn(5),\n 'C' : np.random.randn(5),\n 'D' : ['a','b','c','d','e'] })\n\n expected = df.take([0,1,1], axis=1)\n df2 = df.take([2,0,1,2,1], axis=1)\n result = df2.drop('C',axis=1)\n assert_frame_equal(result, expected)\n\n # dropna\n df = DataFrame({'A' : np.random.randn(5),\n 'B' : np.random.randn(5),\n 'C' : np.random.randn(5),\n 'D' : ['a','b','c','d','e'] })\n df.iloc[2,[0,1,2]] = np.nan\n df.iloc[0,0] = np.nan\n df.iloc[1,1] = np.nan\n df.iloc[:,3] = np.nan\n expected = df.dropna(subset=['A','B','C'],how='all')\n expected.columns = ['A','A','B','C']\n\n df.columns = ['A','A','B','C']\n\n result = df.dropna(subset=['A','C'],how='all')\n assert_frame_equal(result, expected)\n\n def test_column_dups_indexing(self):\n def check(result, expected=None):\n if expected is not None:\n assert_frame_equal(result,expected)\n result.dtypes\n str(result)\n\n # boolean indexing\n # GH 4879\n dups = ['A', 'A', 'C', 'D']\n df = DataFrame(np.arange(12).reshape(3,4), columns=['A', 'B', 'C', 'D'],dtype='float64')\n expected = df[df.C > 6]\n expected.columns = dups\n df = DataFrame(np.arange(12).reshape(3,4), columns=dups,dtype='float64')\n result = df[df.C > 6]\n check(result,expected)\n\n # where\n df = DataFrame(np.arange(12).reshape(3,4), columns=['A', 'B', 'C', 'D'],dtype='float64')\n expected = df[df > 6]\n expected.columns = dups\n df = DataFrame(np.arange(12).reshape(3,4), columns=dups,dtype='float64')\n result = df[df > 6]\n check(result,expected)\n\n # boolean with the duplicate raises\n df = DataFrame(np.arange(12).reshape(3,4), columns=dups,dtype='float64')\n self.assertRaises(ValueError, lambda : df[df.A > 6])\n\n # dup aligining operations should work\n # GH 5185\n df1 = DataFrame([1, 2, 3, 4, 5], index=[1, 2, 1, 2, 3])\n df2 = DataFrame([1, 2, 3], index=[1, 2, 3])\n expected = DataFrame([0,2,0,2,2],index=[1,1,2,2,3])\n result = df1.sub(df2)\n assert_frame_equal(result,expected)\n\n # equality\n df1 = DataFrame([[1,2],[2,np.nan],[3,4],[4,4]],columns=['A','B'])\n df2 = DataFrame([[0,1],[2,4],[2,np.nan],[4,5]],columns=['A','A'])\n\n # not-comparing like-labelled\n self.assertRaises(ValueError, lambda : df1 == df2)\n\n df1r = df1.reindex_like(df2)\n result = df1r == df2\n expected = DataFrame([[False,True],[True,False],[False,False],[True,False]],columns=['A','A'])\n assert_frame_equal(result,expected)\n\n # mixed column selection\n # GH 5639\n dfbool = DataFrame({'one' : Series([True, True, False], index=['a', 'b', 'c']),\n 'two' : Series([False, False, True, False], index=['a', 'b', 'c', 'd']),\n 'three': Series([False, True, True, True], index=['a', 'b', 'c', 'd'])})\n expected = pd.concat([dfbool['one'],dfbool['three'],dfbool['one']],axis=1)\n result = dfbool[['one', 'three', 'one']]\n check(result,expected)\n\n # multi-axis dups\n # GH 6121\n df = DataFrame(np.arange(25.).reshape(5,5),\n index=['a', 'b', 'c', 'd', 'e'],\n columns=['A', 'B', 'C', 'D', 'E'])\n z = df[['A', 'C', 'A']].copy()\n expected = z.ix[['a', 'c', 'a']]\n\n df = DataFrame(np.arange(25.).reshape(5,5),\n index=['a', 'b', 'c', 'd', 'e'],\n columns=['A', 'B', 'C', 'D', 'E'])\n z = df[['A', 'C', 'A']]\n result = z.ix[['a', 'c', 'a']]\n check(result,expected)\n\n\n def test_column_dups_indexing2(self):\n\n # GH 8363\n # datetime ops with a non-unique index\n df = DataFrame({'A' : np.arange(5,dtype='int64'),\n 'B' : np.arange(1,6,dtype='int64')},\n index=[2,2,3,3,4])\n result = df.B-df.A\n expected = Series(1,index=[2,2,3,3,4])\n assert_series_equal(result,expected)\n\n df = DataFrame({'A' : date_range('20130101',periods=5), 'B' : date_range('20130101 09:00:00', periods=5)},index=[2,2,3,3,4])\n result = df.B-df.A\n expected = Series(Timedelta('9 hours'),index=[2,2,3,3,4])\n assert_series_equal(result,expected)\n\n def test_insert_benchmark(self):\n # from the vb_suite/frame_methods/frame_insert_columns\n N = 10\n K = 5\n df = DataFrame(index=lrange(N))\n new_col = np.random.randn(N)\n for i in range(K):\n df[i] = new_col\n expected = DataFrame(np.repeat(new_col,K).reshape(N,K),index=lrange(N))\n assert_frame_equal(df,expected)\n\n def test_constructor_single_value(self):\n\n # expecting single value upcasting here\n df = DataFrame(0., index=[1, 2, 3], columns=['a', 'b', 'c'])\n assert_frame_equal(df, DataFrame(np.zeros(df.shape).astype('float64'), df.index,\n df.columns))\n\n df = DataFrame(0, index=[1, 2, 3], columns=['a', 'b', 'c'])\n assert_frame_equal(df, DataFrame(np.zeros(df.shape).astype('int64'), df.index,\n df.columns))\n\n\n df = DataFrame('a', index=[1, 2], columns=['a', 'c'])\n assert_frame_equal(df, DataFrame(np.array([['a', 'a'],\n ['a', 'a']],\n dtype=object),\n index=[1, 2],\n columns=['a', 'c']))\n\n self.assertRaises(com.PandasError, DataFrame, 'a', [1, 2])\n self.assertRaises(com.PandasError, DataFrame, 'a', columns=['a', 'c'])\n with tm.assertRaisesRegexp(TypeError, 'incompatible data and dtype'):\n DataFrame('a', [1, 2], ['a', 'c'], float)\n\n def test_constructor_with_datetimes(self):\n intname = np.dtype(np.int_).name\n floatname = np.dtype(np.float_).name\n datetime64name = np.dtype('M8[ns]').name\n objectname = np.dtype(np.object_).name\n\n # single item\n df = DataFrame({'A' : 1, 'B' : 'foo', 'C' : 'bar', 'D' : Timestamp(\"20010101\"), 'E' : datetime(2001,1,2,0,0) },\n index=np.arange(10))\n result = df.get_dtype_counts()\n expected = Series({'int64': 1, datetime64name: 2, objectname : 2})\n result.sort_index()\n expected.sort_index()\n assert_series_equal(result, expected)\n\n # check with ndarray construction ndim==0 (e.g. we are passing a ndim 0 ndarray with a dtype specified)\n df = DataFrame({'a': 1., 'b': 2, 'c': 'foo', floatname : np.array(1.,dtype=floatname),\n intname : np.array(1,dtype=intname)}, index=np.arange(10))\n result = df.get_dtype_counts()\n expected = { objectname : 1 }\n if intname == 'int64':\n expected['int64'] = 2\n else:\n expected['int64'] = 1\n expected[intname] = 1\n if floatname == 'float64':\n expected['float64'] = 2\n else:\n expected['float64'] = 1\n expected[floatname] = 1\n\n result.sort_index()\n expected = Series(expected)\n expected.sort_index()\n assert_series_equal(result, expected)\n\n # check with ndarray construction ndim>0\n df = DataFrame({'a': 1., 'b': 2, 'c': 'foo', floatname : np.array([1.]*10,dtype=floatname),\n intname : np.array([1]*10,dtype=intname)}, index=np.arange(10))\n result = df.get_dtype_counts()\n result.sort_index()\n assert_series_equal(result, expected)\n\n # GH 2809\n ind = date_range(start=\"2000-01-01\", freq=\"D\", periods=10)\n datetimes = [ts.to_pydatetime() for ts in ind]\n datetime_s = Series(datetimes)\n self.assertEqual(datetime_s.dtype, 'M8[ns]')\n df = DataFrame({'datetime_s':datetime_s})\n result = df.get_dtype_counts()\n expected = Series({ datetime64name : 1 })\n result.sort_index()\n expected.sort_index()\n assert_series_equal(result, expected)\n\n # GH 2810\n ind = date_range(start=\"2000-01-01\", freq=\"D\", periods=10)\n datetimes = [ts.to_pydatetime() for ts in ind]\n dates = [ts.date() for ts in ind]\n df = DataFrame({'datetimes': datetimes, 'dates':dates})\n result = df.get_dtype_counts()\n expected = Series({ datetime64name : 1, objectname : 1 })\n result.sort_index()\n expected.sort_index()\n assert_series_equal(result, expected)\n\n # GH 7594\n # don't coerce tz-aware\n import pytz\n tz = pytz.timezone('US/Eastern')\n dt = tz.localize(datetime(2012, 1, 1))\n\n df = DataFrame({'End Date': dt}, index=[0])\n self.assertEqual(df.iat[0,0],dt)\n assert_series_equal(df.dtypes,Series({'End Date' : 'datetime64[ns, US/Eastern]' }))\n\n df = DataFrame([{'End Date': dt}])\n self.assertEqual(df.iat[0,0],dt)\n assert_series_equal(df.dtypes,Series({'End Date' : 'datetime64[ns, US/Eastern]' }))\n\n # tz-aware (UTC and other tz's)\n # GH 8411\n dr = date_range('20130101',periods=3)\n df = DataFrame({ 'value' : dr})\n self.assertTrue(df.iat[0,0].tz is None)\n dr = date_range('20130101',periods=3,tz='UTC')\n df = DataFrame({ 'value' : dr})\n self.assertTrue(str(df.iat[0,0].tz) == 'UTC')\n dr = date_range('20130101',periods=3,tz='US/Eastern')\n df = DataFrame({ 'value' : dr})\n self.assertTrue(str(df.iat[0,0].tz) == 'US/Eastern')\n\n # GH 7822\n # preserver an index with a tz on dict construction\n i = date_range('1/1/2011', periods=5, freq='10s', tz = 'US/Eastern')\n\n expected = DataFrame( {'a' : i.to_series(keep_tz=True).reset_index(drop=True) })\n df = DataFrame()\n df['a'] = i\n assert_frame_equal(df, expected)\n\n df = DataFrame( {'a' : i } )\n assert_frame_equal(df, expected)\n\n # multiples\n i_no_tz = date_range('1/1/2011', periods=5, freq='10s')\n df = DataFrame( {'a' : i, 'b' : i_no_tz } )\n expected = DataFrame( {'a' : i.to_series(keep_tz=True).reset_index(drop=True), 'b': i_no_tz })\n assert_frame_equal(df, expected)\n\n def test_constructor_with_datetime_tz(self):\n\n # 8260\n # support datetime64 with tz\n\n idx = Index(date_range('20130101',periods=3,tz='US/Eastern'),\n name='foo')\n dr = date_range('20130110',periods=3)\n\n # construction\n df = DataFrame({'A' : idx, 'B' : dr})\n self.assertTrue(df['A'].dtype,'M8[ns, US/Eastern')\n self.assertTrue(df['A'].name == 'A')\n assert_series_equal(df['A'],Series(idx,name='A'))\n assert_series_equal(df['B'],Series(dr,name='B'))\n\n # construction from dict\n df2 = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'), B=Timestamp('20130603', tz='CET')), index=range(5))\n assert_series_equal(df2.dtypes, Series(['datetime64[ns, US/Eastern]', 'datetime64[ns, CET]'], index=['A','B']))\n\n # dtypes\n tzframe = DataFrame({'A' : date_range('20130101',periods=3),\n 'B' : date_range('20130101',periods=3,tz='US/Eastern'),\n 'C' : date_range('20130101',periods=3,tz='CET')})\n tzframe.iloc[1,1] = pd.NaT\n tzframe.iloc[1,2] = pd.NaT\n result = tzframe.dtypes.sort_index()\n expected = Series([ np.dtype('datetime64[ns]'),\n DatetimeTZDtype('datetime64[ns, US/Eastern]'),\n DatetimeTZDtype('datetime64[ns, CET]') ],\n ['A','B','C'])\n\n # concat\n df3 = pd.concat([df2.A.to_frame(),df2.B.to_frame()],axis=1)\n assert_frame_equal(df2, df3)\n\n # select_dtypes\n result = df3.select_dtypes(include=['datetime64[ns]'])\n expected = df3.reindex(columns=[])\n assert_frame_equal(result, expected)\n\n # this will select based on issubclass, and these are the same class\n result = df3.select_dtypes(include=['datetime64[ns, CET]'])\n expected = df3\n assert_frame_equal(result, expected)\n\n # from index\n idx2 = date_range('20130101',periods=3,tz='US/Eastern',name='foo')\n df2 = DataFrame(idx2)\n assert_series_equal(df2['foo'],Series(idx2,name='foo'))\n df2 = DataFrame(Series(idx2))\n assert_series_equal(df2['foo'],Series(idx2,name='foo'))\n\n idx2 = date_range('20130101',periods=3,tz='US/Eastern')\n df2 = DataFrame(idx2)\n assert_series_equal(df2[0],Series(idx2,name=0))\n df2 = DataFrame(Series(idx2))\n assert_series_equal(df2[0],Series(idx2,name=0))\n\n # interleave with object\n result = self.tzframe.assign(D = 'foo').values\n expected = np.array([[Timestamp('2013-01-01 00:00:00'),\n Timestamp('2013-01-02 00:00:00'),\n Timestamp('2013-01-03 00:00:00')],\n [Timestamp('2013-01-01 00:00:00-0500', tz='US/Eastern'),\n pd.NaT,\n Timestamp('2013-01-03 00:00:00-0500', tz='US/Eastern')],\n [Timestamp('2013-01-01 00:00:00+0100', tz='CET'),\n pd.NaT,\n Timestamp('2013-01-03 00:00:00+0100', tz='CET')],\n ['foo','foo','foo']], dtype=object).T\n self.assert_numpy_array_equal(result, expected)\n\n # interleave with only datetime64[ns]\n result = self.tzframe.values\n expected = np.array([[Timestamp('2013-01-01 00:00:00'),\n Timestamp('2013-01-02 00:00:00'),\n Timestamp('2013-01-03 00:00:00')],\n [Timestamp('2013-01-01 00:00:00-0500', tz='US/Eastern'),\n pd.NaT,\n Timestamp('2013-01-03 00:00:00-0500', tz='US/Eastern')],\n [Timestamp('2013-01-01 00:00:00+0100', tz='CET'),\n pd.NaT,\n Timestamp('2013-01-03 00:00:00+0100', tz='CET')]], dtype=object).T\n self.assert_numpy_array_equal(result, expected)\n\n # astype\n expected = np.array([[Timestamp('2013-01-01 00:00:00'),\n Timestamp('2013-01-02 00:00:00'),\n Timestamp('2013-01-03 00:00:00')],\n [Timestamp('2013-01-01 00:00:00-0500', tz='US/Eastern'),\n pd.NaT,\n Timestamp('2013-01-03 00:00:00-0500', tz='US/Eastern')],\n [Timestamp('2013-01-01 00:00:00+0100', tz='CET'),\n pd.NaT,\n Timestamp('2013-01-03 00:00:00+0100', tz='CET')]], dtype=object).T\n result = self.tzframe.astype(object)\n assert_frame_equal(result, DataFrame(expected, index=self.tzframe.index, columns=self.tzframe.columns))\n\n result = self.tzframe.astype('datetime64[ns]')\n expected = DataFrame({'A' : date_range('20130101',periods=3),\n 'B' : date_range('20130101',periods=3,tz='US/Eastern').tz_convert('UTC').tz_localize(None),\n 'C' : date_range('20130101',periods=3,tz='CET').tz_convert('UTC').tz_localize(None)})\n expected.iloc[1,1] = pd.NaT\n expected.iloc[1,2] = pd.NaT\n assert_frame_equal(result, expected)\n\n # str formatting\n result = self.tzframe.astype(str)\n expected = np.array([['2013-01-01', '2013-01-01 00:00:00-05:00',\n '2013-01-01 00:00:00+01:00'],\n ['2013-01-02', 'NaT', 'NaT'],\n ['2013-01-03', '2013-01-03 00:00:00-05:00',\n '2013-01-03 00:00:00+01:00']], dtype=object)\n self.assert_numpy_array_equal(result, expected)\n\n result = str(self.tzframe)\n self.assertTrue('0 2013-01-01 2013-01-01 00:00:00-05:00 2013-01-01 00:00:00+01:00' in result)\n self.assertTrue('1 2013-01-02 NaT NaT' in result)\n self.assertTrue('2 2013-01-03 2013-01-03 00:00:00-05:00 2013-01-03 00:00:00+01:00' in result)\n\n # setitem\n df['C'] = idx\n assert_series_equal(df['C'],Series(idx,name='C'))\n\n df['D'] = 'foo'\n df['D'] = idx\n assert_series_equal(df['D'],Series(idx,name='D'))\n del df['D']\n\n # assert that A & C are not sharing the same base (e.g. they\n # are copies)\n b1 = df._data.blocks[1]\n b2 = df._data.blocks[2]\n self.assertTrue(b1.values.equals(b2.values))\n self.assertFalse(id(b1.values.values.base) == id(b2.values.values.base))\n\n # with nan\n df2 = df.copy()\n df2.iloc[1,1] = pd.NaT\n df2.iloc[1,2] = pd.NaT\n result = df2['B']\n assert_series_equal(notnull(result), Series([True,False,True],name='B'))\n assert_series_equal(df2.dtypes, df.dtypes)\n\n # set/reset\n df = DataFrame({'A' : [0,1,2] }, index=idx)\n result = df.reset_index()\n self.assertTrue(result['foo'].dtype,'M8[ns, US/Eastern')\n\n result = result.set_index('foo')\n tm.assert_index_equal(df.index,idx)\n\n def test_constructor_for_list_with_dtypes(self):\n intname = np.dtype(np.int_).name\n floatname = np.dtype(np.float_).name\n datetime64name = np.dtype('M8[ns]').name\n objectname = np.dtype(np.object_).name\n\n # test list of lists/ndarrays\n df = DataFrame([np.arange(5) for x in range(5)])\n result = df.get_dtype_counts()\n expected = Series({'int64' : 5})\n\n df = DataFrame([np.array(np.arange(5),dtype='int32') for x in range(5)])\n result = df.get_dtype_counts()\n expected = Series({'int32' : 5})\n\n # overflow issue? (we always expecte int64 upcasting here)\n df = DataFrame({'a' : [2**31,2**31+1]})\n result = df.get_dtype_counts()\n expected = Series({'int64' : 1 })\n assert_series_equal(result, expected)\n\n # GH #2751 (construction with no index specified), make sure we cast to platform values\n df = DataFrame([1, 2])\n result = df.get_dtype_counts()\n expected = Series({'int64': 1 })\n assert_series_equal(result, expected)\n\n df = DataFrame([1.,2.])\n result = df.get_dtype_counts()\n expected = Series({'float64' : 1 })\n assert_series_equal(result, expected)\n\n df = DataFrame({'a' : [1, 2]})\n result = df.get_dtype_counts()\n expected = Series({'int64' : 1})\n assert_series_equal(result, expected)\n\n df = DataFrame({'a' : [1., 2.]})\n result = df.get_dtype_counts()\n expected = Series({'float64' : 1})\n assert_series_equal(result, expected)\n\n df = DataFrame({'a' : 1 }, index=lrange(3))\n result = df.get_dtype_counts()\n expected = Series({'int64': 1})\n assert_series_equal(result, expected)\n\n df = DataFrame({'a' : 1. }, index=lrange(3))\n result = df.get_dtype_counts()\n expected = Series({'float64': 1 })\n assert_series_equal(result, expected)\n\n # with object list\n df = DataFrame({'a':[1,2,4,7], 'b':[1.2, 2.3, 5.1, 6.3],\n 'c':list('abcd'), 'd':[datetime(2000,1,1) for i in range(4)],\n 'e' : [1.,2,4.,7]})\n result = df.get_dtype_counts()\n expected = Series({'int64': 1, 'float64' : 2, datetime64name: 1, objectname : 1})\n result.sort_index()\n expected.sort_index()\n assert_series_equal(result, expected)\n\n def test_not_hashable(self):\n df = pd.DataFrame([1])\n self.assertRaises(TypeError, hash, df)\n self.assertRaises(TypeError, hash, self.empty)\n\n def test_timedeltas(self):\n\n df = DataFrame(dict(A = Series(date_range('2012-1-1', periods=3, freq='D')),\n B = Series([ timedelta(days=i) for i in range(3) ])))\n result = df.get_dtype_counts().sort_values()\n expected = Series({'datetime64[ns]': 1, 'timedelta64[ns]' : 1 }).sort_values()\n assert_series_equal(result, expected)\n\n df['C'] = df['A'] + df['B']\n expected = Series({'datetime64[ns]': 2, 'timedelta64[ns]' : 1 }).sort_values()\n result = df.get_dtype_counts().sort_values()\n assert_series_equal(result, expected)\n\n # mixed int types\n df['D'] = 1\n expected = Series({'datetime64[ns]': 2, 'timedelta64[ns]' : 1, 'int64' : 1 }).sort_values()\n result = df.get_dtype_counts().sort_values()\n assert_series_equal(result, expected)\n\n def test_operators_timedelta64(self):\n\n from datetime import timedelta\n df = DataFrame(dict(A = date_range('2012-1-1', periods=3, freq='D'),\n B = date_range('2012-1-2', periods=3, freq='D'),\n C = Timestamp('20120101')-timedelta(minutes=5,seconds=5)))\n\n diffs = DataFrame(dict(A = df['A']-df['C'],\n B = df['A']-df['B']))\n\n\n # min\n result = diffs.min()\n self.assertEqual(result[0], diffs.ix[0,'A'])\n self.assertEqual(result[1], diffs.ix[0,'B'])\n\n result = diffs.min(axis=1)\n self.assertTrue((result == diffs.ix[0,'B']).all() == True)\n\n # max\n result = diffs.max()\n self.assertEqual(result[0], diffs.ix[2,'A'])\n self.assertEqual(result[1], diffs.ix[2,'B'])\n\n result = diffs.max(axis=1)\n self.assertTrue((result == diffs['A']).all() == True)\n\n # abs\n result = diffs.abs()\n result2 = abs(diffs)\n expected = DataFrame(dict(A = df['A']-df['C'],\n B = df['B']-df['A']))\n assert_frame_equal(result,expected)\n assert_frame_equal(result2, expected)\n\n # mixed frame\n mixed = diffs.copy()\n mixed['C'] = 'foo'\n mixed['D'] = 1\n mixed['E'] = 1.\n mixed['F'] = Timestamp('20130101')\n\n # results in an object array\n from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type\n result = mixed.min()\n expected = Series([_coerce_scalar_to_timedelta_type(timedelta(seconds=5*60+5)),\n _coerce_scalar_to_timedelta_type(timedelta(days=-1)),\n 'foo',\n 1,\n 1.0,\n Timestamp('20130101')],\n index=mixed.columns)\n assert_series_equal(result,expected)\n\n # excludes numeric\n result = mixed.min(axis=1)\n expected = Series([1, 1, 1.],index=[0, 1, 2])\n assert_series_equal(result,expected)\n\n # works when only those columns are selected\n result = mixed[['A','B']].min(1)\n expected = Series([ timedelta(days=-1) ] * 3)\n assert_series_equal(result,expected)\n\n result = mixed[['A','B']].min()\n expected = Series([ timedelta(seconds=5*60+5), timedelta(days=-1) ],index=['A','B'])\n assert_series_equal(result,expected)\n\n # GH 3106\n df = DataFrame({'time' : date_range('20130102',periods=5),\n 'time2' : date_range('20130105',periods=5) })\n df['off1'] = df['time2']-df['time']\n self.assertEqual(df['off1'].dtype, 'timedelta64[ns]')\n\n df['off2'] = df['time']-df['time2']\n df._consolidate_inplace()\n self.assertTrue(df['off1'].dtype == 'timedelta64[ns]')\n self.assertTrue(df['off2'].dtype == 'timedelta64[ns]')\n\n def test_datetimelike_setitem_with_inference(self):\n # GH 7592\n # assignment of timedeltas with NaT\n\n one_hour = timedelta(hours=1)\n df = DataFrame(index=date_range('20130101',periods=4))\n df['A'] = np.array([1*one_hour]*4, dtype='m8[ns]')\n df.loc[:,'B'] = np.array([2*one_hour]*4, dtype='m8[ns]')\n df.loc[:3,'C'] = np.array([3*one_hour]*3, dtype='m8[ns]')\n df.ix[:,'D'] = np.array([4*one_hour]*4, dtype='m8[ns]')\n df.ix[:3,'E'] = np.array([5*one_hour]*3, dtype='m8[ns]')\n df['F'] = np.timedelta64('NaT')\n df.ix[:-1,'F'] = np.array([6*one_hour]*3, dtype='m8[ns]')\n df.ix[-3:,'G'] = date_range('20130101',periods=3)\n df['H'] = np.datetime64('NaT')\n result = df.dtypes\n expected = Series([np.dtype('timedelta64[ns]')]*6+[np.dtype('datetime64[ns]')]*2,index=list('ABCDEFGH'))\n assert_series_equal(result,expected)\n\n def test_setitem_datetime_coercion(self):\n # GH 1048\n df = pd.DataFrame({'c': [pd.Timestamp('2010-10-01')]*3})\n df.loc[0:1, 'c'] = np.datetime64('2008-08-08')\n self.assertEqual(pd.Timestamp('2008-08-08'), df.loc[0, 'c'])\n self.assertEqual(pd.Timestamp('2008-08-08'), df.loc[1, 'c'])\n df.loc[2, 'c'] = date(2005, 5, 5)\n self.assertEqual(pd.Timestamp('2005-05-05'), df.loc[2, 'c'])\n\n\n def test_new_empty_index(self):\n df1 = DataFrame(randn(0, 3))\n df2 = DataFrame(randn(0, 3))\n df1.index.name = 'foo'\n self.assertIsNone(df2.index.name)\n\n def test_astype(self):\n casted = self.frame.astype(int)\n expected = DataFrame(self.frame.values.astype(int),\n index=self.frame.index,\n columns=self.frame.columns)\n assert_frame_equal(casted, expected)\n\n casted = self.frame.astype(np.int32)\n expected = DataFrame(self.frame.values.astype(np.int32),\n index=self.frame.index,\n columns=self.frame.columns)\n assert_frame_equal(casted, expected)\n\n self.frame['foo'] = '5'\n casted = self.frame.astype(int)\n expected = DataFrame(self.frame.values.astype(int),\n index=self.frame.index,\n columns=self.frame.columns)\n assert_frame_equal(casted, expected)\n\n # mixed casting\n def _check_cast(df, v):\n self.assertEqual(list(set([ s.dtype.name for _, s in compat.iteritems(df) ]))[0], v)\n\n mn = self.all_mixed._get_numeric_data().copy()\n mn['little_float'] = np.array(12345.,dtype='float16')\n mn['big_float'] = np.array(123456789101112.,dtype='float64')\n\n casted = mn.astype('float64')\n _check_cast(casted, 'float64')\n\n casted = mn.astype('int64')\n _check_cast(casted, 'int64')\n\n casted = self.mixed_float.reindex(columns = ['A','B']).astype('float32')\n _check_cast(casted, 'float32')\n\n casted = mn.reindex(columns = ['little_float']).astype('float16')\n _check_cast(casted, 'float16')\n\n casted = self.mixed_float.reindex(columns = ['A','B']).astype('float16')\n _check_cast(casted, 'float16')\n\n casted = mn.astype('float32')\n _check_cast(casted, 'float32')\n\n casted = mn.astype('int32')\n _check_cast(casted, 'int32')\n\n # to object\n casted = mn.astype('O')\n _check_cast(casted, 'object')\n\n def test_astype_with_exclude_string(self):\n df = self.frame.copy()\n expected = self.frame.astype(int)\n df['string'] = 'foo'\n casted = df.astype(int, raise_on_error = False)\n\n expected['string'] = 'foo'\n assert_frame_equal(casted, expected)\n\n df = self.frame.copy()\n expected = self.frame.astype(np.int32)\n df['string'] = 'foo'\n casted = df.astype(np.int32, raise_on_error = False)\n\n expected['string'] = 'foo'\n assert_frame_equal(casted, expected)\n\n def test_astype_with_view(self):\n\n tf = self.mixed_float.reindex(columns = ['A','B','C'])\n\n casted = tf.astype(np.int64)\n\n casted = tf.astype(np.float32)\n\n # this is the only real reason to do it this way\n tf = np.round(self.frame).astype(np.int32)\n casted = tf.astype(np.float32, copy = False)\n\n tf = self.frame.astype(np.float64)\n casted = tf.astype(np.int64, copy = False)\n\n def test_astype_cast_nan_int(self):\n df = DataFrame(data={\"Values\": [1.0, 2.0, 3.0, np.nan]})\n self.assertRaises(ValueError, df.astype, np.int64)\n\n def test_astype_str(self):\n # GH9757\n a = Series(date_range('2010-01-04', periods=5))\n b = Series(date_range('3/6/2012 00:00', periods=5, tz='US/Eastern'))\n c = Series([Timedelta(x, unit='d') for x in range(5)])\n d = Series(range(5))\n e = Series([0.0, 0.2, 0.4, 0.6, 0.8])\n\n df = DataFrame({'a' : a, 'b' : b, 'c' : c, 'd' : d, 'e' : e})\n\n # datetimelike\n # Test str and unicode on python 2.x and just str on python 3.x\n for tt in set([str, compat.text_type]):\n result = df.astype(tt)\n\n expected = DataFrame({\n 'a' : list(map(tt, map(lambda x: Timestamp(x)._date_repr, a._values))),\n 'b' : list(map(tt, map(Timestamp, b._values))),\n 'c' : list(map(tt, map(lambda x: Timedelta(x)._repr_base(format='all'), c._values))),\n 'd' : list(map(tt, d._values)),\n 'e' : list(map(tt, e._values)),\n })\n\n assert_frame_equal(result, expected)\n\n # float/nan\n # 11302\n # consistency in astype(str)\n for tt in set([str, compat.text_type]):\n result = DataFrame([np.NaN]).astype(tt)\n expected = DataFrame(['nan'])\n assert_frame_equal(result, expected)\n\n result = DataFrame([1.12345678901234567890]).astype(tt)\n expected = DataFrame(['1.12345678901'])\n assert_frame_equal(result, expected)\n\n def test_array_interface(self):\n result = np.sqrt(self.frame)\n tm.assertIsInstance(result, type(self.frame))\n self.assertIs(result.index, self.frame.index)\n self.assertIs(result.columns, self.frame.columns)\n\n assert_frame_equal(result, self.frame.apply(np.sqrt))\n\n def test_pickle(self):\n unpickled = self.round_trip_pickle(self.mixed_frame)\n assert_frame_equal(self.mixed_frame, unpickled)\n\n # buglet\n self.mixed_frame._data.ndim\n\n # empty\n unpickled = self.round_trip_pickle(self.empty)\n repr(unpickled)\n\n # tz frame\n unpickled = self.round_trip_pickle(self.tzframe)\n assert_frame_equal(self.tzframe, unpickled)\n\n def test_to_dict(self):\n test_data = {\n 'A': {'1': 1, '2': 2},\n 'B': {'1': '1', '2': '2', '3': '3'},\n }\n recons_data = DataFrame(test_data).to_dict()\n\n for k, v in compat.iteritems(test_data):\n for k2, v2 in compat.iteritems(v):\n self.assertEqual(v2, recons_data[k][k2])\n\n recons_data = DataFrame(test_data).to_dict(\"l\")\n\n for k, v in compat.iteritems(test_data):\n for k2, v2 in compat.iteritems(v):\n self.assertEqual(v2, recons_data[k][int(k2) - 1])\n\n recons_data = DataFrame(test_data).to_dict(\"s\")\n\n for k, v in compat.iteritems(test_data):\n for k2, v2 in compat.iteritems(v):\n self.assertEqual(v2, recons_data[k][k2])\n\n recons_data = DataFrame(test_data).to_dict(\"sp\")\n\n expected_split = {'columns': ['A', 'B'], 'index': ['1', '2', '3'],\n 'data': [[1.0, '1'], [2.0, '2'], [nan, '3']]}\n\n tm.assert_almost_equal(recons_data, expected_split)\n\n recons_data = DataFrame(test_data).to_dict(\"r\")\n\n expected_records = [{'A': 1.0, 'B': '1'},\n {'A': 2.0, 'B': '2'},\n {'A': nan, 'B': '3'}]\n\n tm.assert_almost_equal(recons_data, expected_records)\n\n # GH10844\n recons_data = DataFrame(test_data).to_dict(\"i\")\n\n for k, v in compat.iteritems(test_data):\n for k2, v2 in compat.iteritems(v):\n self.assertEqual(v2, recons_data[k2][k])\n\n def test_to_dict_timestamp(self):\n\n # GH11247\n # split/records producing np.datetime64 rather than Timestamps\n # on datetime64[ns] dtypes only\n\n tsmp = Timestamp('20130101')\n test_data = DataFrame({'A': [tsmp, tsmp], 'B': [tsmp, tsmp]})\n test_data_mixed = DataFrame({'A': [tsmp, tsmp], 'B': [1, 2]})\n\n expected_records = [{'A': tsmp, 'B': tsmp},\n {'A': tsmp, 'B': tsmp}]\n expected_records_mixed = [{'A': tsmp, 'B': 1},\n {'A': tsmp, 'B': 2}]\n\n tm.assert_almost_equal(test_data.to_dict(\n orient='records'), expected_records)\n tm.assert_almost_equal(test_data_mixed.to_dict(\n orient='records'), expected_records_mixed)\n\n expected_series = {\n 'A': Series([tsmp, tsmp]),\n 'B': Series([tsmp, tsmp]),\n }\n expected_series_mixed = {\n 'A': Series([tsmp, tsmp]),\n 'B': Series([1, 2]),\n }\n\n tm.assert_almost_equal(test_data.to_dict(\n orient='series'), expected_series)\n tm.assert_almost_equal(test_data_mixed.to_dict(\n orient='series'), expected_series_mixed)\n\n expected_split = {\n 'index': [0, 1],\n 'data': [[tsmp, tsmp],\n [tsmp, tsmp]],\n 'columns': ['A', 'B']\n }\n expected_split_mixed = {\n 'index': [0, 1],\n 'data': [[tsmp, 1],\n [tsmp, 2]],\n 'columns': ['A', 'B']\n }\n\n tm.assert_almost_equal(test_data.to_dict(\n orient='split'), expected_split)\n tm.assert_almost_equal(test_data_mixed.to_dict(\n orient='split'), expected_split_mixed)\n\n def test_to_dict_invalid_orient(self):\n df = DataFrame({'A':[0, 1]})\n self.assertRaises(ValueError, df.to_dict, orient='xinvalid')\n\n def test_to_records_dt64(self):\n df = DataFrame([[\"one\", \"two\", \"three\"],\n [\"four\", \"five\", \"six\"]],\n index=date_range(\"2012-01-01\", \"2012-01-02\"))\n self.assertEqual(df.to_records()['index'][0], df.index[0])\n\n rs = df.to_records(convert_datetime64=False)\n self.assertEqual(rs['index'][0], df.index.values[0])\n\n def test_to_records_with_multindex(self):\n # GH3189\n index = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],\n ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]\n data = np.zeros((8, 4))\n df = DataFrame(data, index=index)\n r = df.to_records(index=True)['level_0']\n self.assertTrue('bar' in r)\n self.assertTrue('one' not in r)\n\n def test_to_records_with_Mapping_type(self):\n import email\n from email.parser import Parser\n import collections\n\n collections.Mapping.register(email.message.Message)\n\n headers = Parser().parsestr('From: <user@example.com>\\n'\n 'To: <someone_else@example.com>\\n'\n 'Subject: Test message\\n'\n '\\n'\n 'Body would go here\\n')\n\n frame = DataFrame.from_records([headers])\n all( x in frame for x in ['Type','Subject','From'])\n\n def test_from_records_to_records(self):\n # from numpy documentation\n arr = np.zeros((2,), dtype=('i4,f4,a10'))\n arr[:] = [(1, 2., 'Hello'), (2, 3., \"World\")]\n\n frame = DataFrame.from_records(arr)\n\n index = np.arange(len(arr))[::-1]\n indexed_frame = DataFrame.from_records(arr, index=index)\n self.assert_numpy_array_equal(indexed_frame.index, index)\n\n # without names, it should go to last ditch\n arr2 = np.zeros((2,3))\n tm.assert_frame_equal(DataFrame.from_records(arr2), DataFrame(arr2))\n\n # wrong length\n msg = r'Shape of passed values is \\(3, 2\\), indices imply \\(3, 1\\)'\n with assertRaisesRegexp(ValueError, msg):\n DataFrame.from_records(arr, index=index[:-1])\n\n indexed_frame = DataFrame.from_records(arr, index='f1')\n\n # what to do?\n records = indexed_frame.to_records()\n self.assertEqual(len(records.dtype.names), 3)\n\n records = indexed_frame.to_records(index=False)\n self.assertEqual(len(records.dtype.names), 2)\n self.assertNotIn('index', records.dtype.names)\n\n def test_from_records_nones(self):\n tuples = [(1, 2, None, 3),\n (1, 2, None, 3),\n (None, 2, 5, 3)]\n\n df = DataFrame.from_records(tuples, columns=['a', 'b', 'c', 'd'])\n self.assertTrue(np.isnan(df['c'][0]))\n\n def test_from_records_iterator(self):\n arr = np.array([(1.0, 1.0, 2, 2), (3.0, 3.0, 4, 4), (5., 5., 6, 6), (7., 7., 8, 8)],\n dtype=[('x', np.float64), ('u', np.float32), ('y', np.int64), ('z', np.int32) ])\n df = DataFrame.from_records(iter(arr), nrows=2)\n xp = DataFrame({'x': np.array([1.0, 3.0], dtype=np.float64),\n 'u': np.array([1.0, 3.0], dtype=np.float32),\n 'y': np.array([2, 4], dtype=np.int64),\n 'z': np.array([2, 4], dtype=np.int32)})\n assert_frame_equal(df.reindex_like(xp), xp)\n\n # no dtypes specified here, so just compare with the default\n arr = [(1.0, 2), (3.0, 4), (5., 6), (7., 8)]\n df = DataFrame.from_records(iter(arr), columns=['x', 'y'],\n nrows=2)\n assert_frame_equal(df, xp.reindex(columns=['x','y']), check_dtype=False)\n\n def test_from_records_tuples_generator(self):\n def tuple_generator(length):\n for i in range(length):\n letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n yield (i, letters[i % len(letters)], i/length)\n\n columns_names = ['Integer', 'String', 'Float']\n columns = [[i[j] for i in tuple_generator(10)] for j in range(len(columns_names))]\n data = {'Integer': columns[0], 'String': columns[1], 'Float': columns[2]}\n expected = DataFrame(data, columns=columns_names)\n\n generator = tuple_generator(10)\n result = DataFrame.from_records(generator, columns=columns_names)\n assert_frame_equal(result, expected)\n\n def test_from_records_lists_generator(self):\n def list_generator(length):\n for i in range(length):\n letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n yield [i, letters[i % len(letters)], i/length]\n\n columns_names = ['Integer', 'String', 'Float']\n columns = [[i[j] for i in list_generator(10)] for j in range(len(columns_names))]\n data = {'Integer': columns[0], 'String': columns[1], 'Float': columns[2]}\n expected = DataFrame(data, columns=columns_names)\n\n generator = list_generator(10)\n result = DataFrame.from_records(generator, columns=columns_names)\n assert_frame_equal(result, expected)\n\n def test_from_records_columns_not_modified(self):\n tuples = [(1, 2, 3),\n (1, 2, 3),\n (2, 5, 3)]\n\n columns = ['a', 'b', 'c']\n original_columns = list(columns)\n df = DataFrame.from_records(tuples, columns=columns, index='a')\n self.assertEqual(columns, original_columns)\n\n def test_from_records_decimal(self):\n from decimal import Decimal\n\n tuples = [(Decimal('1.5'),), (Decimal('2.5'),), (None,)]\n\n df = DataFrame.from_records(tuples, columns=['a'])\n self.assertEqual(df['a'].dtype, object)\n\n df = DataFrame.from_records(tuples, columns=['a'], coerce_float=True)\n self.assertEqual(df['a'].dtype, np.float64)\n self.assertTrue(np.isnan(df['a'].values[-1]))\n\n def test_from_records_duplicates(self):\n result = DataFrame.from_records([(1, 2, 3), (4, 5, 6)],\n columns=['a', 'b', 'a'])\n\n expected = DataFrame([(1, 2, 3), (4, 5, 6)],\n columns=['a', 'b', 'a'])\n\n assert_frame_equal(result, expected)\n\n def test_from_records_set_index_name(self):\n def create_dict(order_id):\n return {'order_id': order_id, 'quantity': np.random.randint(1, 10),\n 'price': np.random.randint(1, 10)}\n documents = [create_dict(i) for i in range(10)]\n # demo missing data\n documents.append({'order_id': 10, 'quantity': 5})\n\n result = DataFrame.from_records(documents, index='order_id')\n self.assertEqual(result.index.name, 'order_id')\n\n # MultiIndex\n result = DataFrame.from_records(documents,\n index=['order_id', 'quantity'])\n self.assertEqual(result.index.names, ('order_id', 'quantity'))\n\n def test_from_records_misc_brokenness(self):\n # #2179\n\n data = {1: ['foo'], 2: ['bar']}\n\n result = DataFrame.from_records(data, columns=['a', 'b'])\n exp = DataFrame(data, columns=['a', 'b'])\n assert_frame_equal(result, exp)\n\n # overlap in index/index_names\n\n data = {'a': [1, 2, 3], 'b': [4, 5, 6]}\n\n result = DataFrame.from_records(data, index=['a', 'b', 'c'])\n exp = DataFrame(data, index=['a', 'b', 'c'])\n assert_frame_equal(result, exp)\n\n\n # GH 2623\n rows = []\n rows.append([datetime(2010, 1, 1), 1])\n rows.append([datetime(2010, 1, 2), 'hi']) # test col upconverts to obj\n df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])\n results = df2_obj.get_dtype_counts()\n expected = Series({ 'datetime64[ns]' : 1, 'object' : 1 })\n\n rows = []\n rows.append([datetime(2010, 1, 1), 1])\n rows.append([datetime(2010, 1, 2), 1])\n df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])\n results = df2_obj.get_dtype_counts()\n expected = Series({ 'datetime64[ns]' : 1, 'int64' : 1 })\n\n def test_from_records_empty(self):\n # 3562\n result = DataFrame.from_records([], columns=['a','b','c'])\n expected = DataFrame(columns=['a','b','c'])\n assert_frame_equal(result, expected)\n\n result = DataFrame.from_records([], columns=['a','b','b'])\n expected = DataFrame(columns=['a','b','b'])\n assert_frame_equal(result, expected)\n\n def test_from_records_empty_with_nonempty_fields_gh3682(self):\n a = np.array([(1, 2)], dtype=[('id', np.int64), ('value', np.int64)])\n df = DataFrame.from_records(a, index='id')\n assert_numpy_array_equal(df.index, Index([1], name='id'))\n self.assertEqual(df.index.name, 'id')\n assert_numpy_array_equal(df.columns, Index(['value']))\n\n b = np.array([], dtype=[('id', np.int64), ('value', np.int64)])\n df = DataFrame.from_records(b, index='id')\n assert_numpy_array_equal(df.index, Index([], name='id'))\n self.assertEqual(df.index.name, 'id')\n\n def test_from_records_with_datetimes(self):\n if sys.version < LooseVersion('2.7'):\n raise nose.SkipTest('rec arrays dont work properly with py2.6')\n\n # this may fail on certain platforms because of a numpy issue\n # related GH6140\n if not is_little_endian():\n raise nose.SkipTest(\"known failure of test on non-little endian\")\n\n # construction with a null in a recarray\n # GH 6140\n expected = DataFrame({ 'EXPIRY' : [datetime(2005, 3, 1, 0, 0), None ]})\n\n arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]\n dtypes = [('EXPIRY', '<M8[ns]')]\n\n try:\n recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)\n except (ValueError):\n raise nose.SkipTest(\"known failure of numpy rec array creation\")\n\n result = DataFrame.from_records(recarray)\n assert_frame_equal(result,expected)\n\n # coercion should work too\n arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]\n dtypes = [('EXPIRY', '<M8[m]')]\n recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)\n result = DataFrame.from_records(recarray)\n assert_frame_equal(result,expected)\n\n def test_to_records_floats(self):\n df = DataFrame(np.random.rand(10, 10))\n df.to_records()\n\n def test_to_recods_index_name(self):\n df = DataFrame(np.random.randn(3, 3))\n df.index.name = 'X'\n rs = df.to_records()\n self.assertIn('X', rs.dtype.fields)\n\n df = DataFrame(np.random.randn(3, 3))\n rs = df.to_records()\n self.assertIn('index', rs.dtype.fields)\n\n df.index = MultiIndex.from_tuples([('a', 'x'), ('a', 'y'), ('b', 'z')])\n df.index.names = ['A', None]\n rs = df.to_records()\n self.assertIn('level_0', rs.dtype.fields)\n\n def test_join_str_datetime(self):\n str_dates = ['20120209', '20120222']\n dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]\n\n A = DataFrame(str_dates, index=lrange(2), columns=['aa'])\n C = DataFrame([[1, 2], [3, 4]], index=str_dates, columns=dt_dates)\n\n tst = A.join(C, on='aa')\n\n self.assertEqual(len(tst.columns), 3)\n\n def test_join_multiindex_leftright(self):\n # GH 10741\n df1 = pd.DataFrame([['a', 'x', 0.471780], ['a','y', 0.774908],\n ['a', 'z', 0.563634], ['b', 'x', -0.353756],\n ['b', 'y', 0.368062], ['b', 'z', -1.721840],\n ['c', 'x', 1], ['c', 'y', 2], ['c', 'z', 3]],\n columns=['first', 'second', 'value1']).set_index(['first', 'second'])\n df2 = pd.DataFrame([['a', 10], ['b', 20]], columns=['first', 'value2']).set_index(['first'])\n\n exp = pd.DataFrame([[0.471780, 10], [0.774908, 10], [0.563634, 10],\n [-0.353756, 20], [0.368062, 20], [-1.721840, 20],\n [1.000000, np.nan], [2.000000, np.nan], [3.000000, np.nan]],\n index=df1.index, columns=['value1', 'value2'])\n\n # these must be the same results (but columns are flipped)\n tm.assert_frame_equal(df1.join(df2, how='left'), exp)\n tm.assert_frame_equal(df2.join(df1, how='right'), exp[['value2', 'value1']])\n\n exp_idx = pd.MultiIndex.from_product([['a', 'b'], ['x', 'y', 'z']],\n names=['first', 'second'])\n exp = pd.DataFrame([[0.471780, 10], [0.774908, 10], [0.563634, 10],\n [-0.353756, 20], [0.368062, 20], [-1.721840, 20]],\n index=exp_idx, columns=['value1', 'value2'])\n\n tm.assert_frame_equal(df1.join(df2, how='right'), exp)\n tm.assert_frame_equal(df2.join(df1, how='left'), exp[['value2', 'value1']])\n\n def test_from_records_sequencelike(self):\n df = DataFrame({'A' : np.array(np.random.randn(6), dtype = np.float64),\n 'A1': np.array(np.random.randn(6), dtype = np.float64),\n 'B' : np.array(np.arange(6), dtype = np.int64),\n 'C' : ['foo'] * 6,\n 'D' : np.array([True, False] * 3, dtype=bool),\n 'E' : np.array(np.random.randn(6), dtype = np.float32),\n 'E1': np.array(np.random.randn(6), dtype = np.float32),\n 'F' : np.array(np.arange(6), dtype = np.int32) })\n\n # this is actually tricky to create the recordlike arrays and have the dtypes be intact\n blocks = df.blocks\n tuples = []\n columns = []\n dtypes = []\n for dtype, b in compat.iteritems(blocks):\n columns.extend(b.columns)\n dtypes.extend([ (c,np.dtype(dtype).descr[0][1]) for c in b.columns ])\n for i in range(len(df.index)):\n tup = []\n for _, b in compat.iteritems(blocks):\n tup.extend(b.iloc[i].values)\n tuples.append(tuple(tup))\n\n recarray = np.array(tuples, dtype=dtypes).view(np.recarray)\n recarray2 = df.to_records()\n lists = [list(x) for x in tuples]\n\n # tuples (lose the dtype info)\n result = DataFrame.from_records(tuples, columns=columns).reindex(columns=df.columns)\n\n # created recarray and with to_records recarray (have dtype info)\n result2 = DataFrame.from_records(recarray, columns=columns).reindex(columns=df.columns)\n result3 = DataFrame.from_records(recarray2, columns=columns).reindex(columns=df.columns)\n\n # list of tupels (no dtype info)\n result4 = DataFrame.from_records(lists, columns=columns).reindex(columns=df.columns)\n\n assert_frame_equal(result, df, check_dtype=False)\n assert_frame_equal(result2, df)\n assert_frame_equal(result3, df)\n assert_frame_equal(result4, df, check_dtype=False)\n\n # tuples is in the order of the columns\n result = DataFrame.from_records(tuples)\n self.assert_numpy_array_equal(result.columns, lrange(8))\n\n # test exclude parameter & we are casting the results here (as we don't have dtype info to recover)\n columns_to_test = [ columns.index('C'), columns.index('E1') ]\n\n exclude = list(set(range(8))-set(columns_to_test))\n result = DataFrame.from_records(tuples, exclude=exclude)\n result.columns = [ columns[i] for i in sorted(columns_to_test) ]\n assert_series_equal(result['C'], df['C'])\n assert_series_equal(result['E1'], df['E1'].astype('float64'))\n\n # empty case\n result = DataFrame.from_records([], columns=['foo', 'bar', 'baz'])\n self.assertEqual(len(result), 0)\n self.assert_numpy_array_equal(result.columns, ['foo', 'bar', 'baz'])\n\n result = DataFrame.from_records([])\n self.assertEqual(len(result), 0)\n self.assertEqual(len(result.columns), 0)\n\n def test_from_records_dictlike(self):\n\n # test the dict methods\n df = DataFrame({'A' : np.array(np.random.randn(6), dtype = np.float64),\n 'A1': np.array(np.random.randn(6), dtype = np.float64),\n 'B' : np.array(np.arange(6), dtype = np.int64),\n 'C' : ['foo'] * 6,\n 'D' : np.array([True, False] * 3, dtype=bool),\n 'E' : np.array(np.random.randn(6), dtype = np.float32),\n 'E1': np.array(np.random.randn(6), dtype = np.float32),\n 'F' : np.array(np.arange(6), dtype = np.int32) })\n\n # columns is in a different order here than the actual items iterated from the dict\n columns = []\n for dtype, b in compat.iteritems(df.blocks):\n columns.extend(b.columns)\n\n asdict = dict((x, y) for x, y in compat.iteritems(df))\n asdict2 = dict((x, y.values) for x, y in compat.iteritems(df))\n\n # dict of series & dict of ndarrays (have dtype info)\n results = []\n results.append(DataFrame.from_records(asdict).reindex(columns=df.columns))\n results.append(DataFrame.from_records(asdict, columns=columns).reindex(columns=df.columns))\n results.append(DataFrame.from_records(asdict2, columns=columns).reindex(columns=df.columns))\n\n for r in results:\n assert_frame_equal(r, df)\n\n def test_from_records_with_index_data(self):\n df = DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])\n\n data = np.random.randn(10)\n df1 = DataFrame.from_records(df, index=data)\n assert(df1.index.equals(Index(data)))\n\n def test_from_records_bad_index_column(self):\n df = DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])\n\n # should pass\n df1 = DataFrame.from_records(df, index=['C'])\n assert(df1.index.equals(Index(df.C)))\n\n df1 = DataFrame.from_records(df, index='C')\n assert(df1.index.equals(Index(df.C)))\n\n # should fail\n self.assertRaises(ValueError, DataFrame.from_records, df, index=[2])\n self.assertRaises(KeyError, DataFrame.from_records, df, index=2)\n\n def test_from_records_non_tuple(self):\n class Record(object):\n\n def __init__(self, *args):\n self.args = args\n\n def __getitem__(self, i):\n return self.args[i]\n\n def __iter__(self):\n return iter(self.args)\n\n recs = [Record(1, 2, 3), Record(4, 5, 6), Record(7, 8, 9)]\n tups = lmap(tuple, recs)\n\n result = DataFrame.from_records(recs)\n expected = DataFrame.from_records(tups)\n assert_frame_equal(result, expected)\n\n def test_from_records_len0_with_columns(self):\n # #2633\n result = DataFrame.from_records([], index='foo',\n columns=['foo', 'bar'])\n\n self.assertTrue(np.array_equal(result.columns, ['bar']))\n self.assertEqual(len(result), 0)\n self.assertEqual(result.index.name, 'foo')\n\n def test_get_agg_axis(self):\n cols = self.frame._get_agg_axis(0)\n self.assertIs(cols, self.frame.columns)\n\n idx = self.frame._get_agg_axis(1)\n self.assertIs(idx, self.frame.index)\n\n self.assertRaises(ValueError, self.frame._get_agg_axis, 2)\n\n def test_nonzero(self):\n self.assertTrue(self.empty.empty)\n\n self.assertFalse(self.frame.empty)\n self.assertFalse(self.mixed_frame.empty)\n\n # corner case\n df = DataFrame({'A': [1., 2., 3.],\n 'B': ['a', 'b', 'c']},\n index=np.arange(3))\n del df['A']\n self.assertFalse(df.empty)\n\n def test_repr_empty(self):\n buf = StringIO()\n\n # empty\n foo = repr(self.empty)\n\n # empty with index\n frame = DataFrame(index=np.arange(1000))\n foo = repr(frame)\n\n def test_repr_mixed(self):\n buf = StringIO()\n\n # mixed\n foo = repr(self.mixed_frame)\n self.mixed_frame.info(verbose=False, buf=buf)\n\n @slow\n def test_repr_mixed_big(self):\n # big mixed\n biggie = DataFrame({'A': randn(200),\n 'B': tm.makeStringIndex(200)},\n index=lrange(200))\n biggie.loc[:20,'A'] = nan\n biggie.loc[:20,'B'] = nan\n\n foo = repr(biggie)\n\n def test_repr(self):\n buf = StringIO()\n\n # small one\n foo = repr(self.frame)\n self.frame.info(verbose=False, buf=buf)\n\n # even smaller\n self.frame.reindex(columns=['A']).info(verbose=False, buf=buf)\n self.frame.reindex(columns=['A', 'B']).info(verbose=False, buf=buf)\n\n # exhausting cases in DataFrame.info\n\n # columns but no index\n no_index = DataFrame(columns=[0, 1, 3])\n foo = repr(no_index)\n\n # no columns or index\n self.empty.info(buf=buf)\n\n df = DataFrame([\"a\\n\\r\\tb\"], columns=[\"a\\n\\r\\td\"], index=[\"a\\n\\r\\tf\"])\n self.assertFalse(\"\\t\" in repr(df))\n self.assertFalse(\"\\r\" in repr(df))\n self.assertFalse(\"a\\n\" in repr(df))\n\n def test_repr_dimensions(self):\n df = DataFrame([[1, 2,], [3, 4]])\n with option_context('display.show_dimensions', True):\n self.assertTrue(\"2 rows x 2 columns\" in repr(df))\n\n with option_context('display.show_dimensions', False):\n self.assertFalse(\"2 rows x 2 columns\" in repr(df))\n\n with option_context('display.show_dimensions', 'truncate'):\n self.assertFalse(\"2 rows x 2 columns\" in repr(df))\n\n @slow\n def test_repr_big(self):\n buf = StringIO()\n\n # big one\n biggie = DataFrame(np.zeros((200, 4)), columns=lrange(4),\n index=lrange(200))\n foo = repr(biggie)\n\n def test_repr_unsortable(self):\n # columns are not sortable\n import warnings\n warn_filters = warnings.filters\n warnings.filterwarnings('ignore',\n category=FutureWarning,\n module=\".*format\")\n\n unsortable = DataFrame({'foo': [1] * 50,\n datetime.today(): [1] * 50,\n 'bar': ['bar'] * 50,\n datetime.today(\n ) + timedelta(1): ['bar'] * 50},\n index=np.arange(50))\n foo = repr(unsortable)\n\n fmt.set_option('display.precision', 3, 'display.column_space', 10)\n repr(self.frame)\n\n fmt.set_option('display.max_rows', 10, 'display.max_columns', 2)\n repr(self.frame)\n\n fmt.set_option('display.max_rows', 1000, 'display.max_columns', 1000)\n repr(self.frame)\n\n self.reset_display_options()\n\n warnings.filters = warn_filters\n\n def test_repr_unicode(self):\n uval = u('\\u03c3\\u03c3\\u03c3\\u03c3')\n bval = uval.encode('utf-8')\n df = DataFrame({'A': [uval, uval]})\n\n result = repr(df)\n ex_top = ' A'\n self.assertEqual(result.split('\\n')[0].rstrip(), ex_top)\n\n df = DataFrame({'A': [uval, uval]})\n result = repr(df)\n self.assertEqual(result.split('\\n')[0].rstrip(), ex_top)\n\n def test_unicode_string_with_unicode(self):\n df = DataFrame({'A': [u(\"\\u05d0\")]})\n\n if compat.PY3:\n str(df)\n else:\n compat.text_type(df)\n\n def test_bytestring_with_unicode(self):\n df = DataFrame({'A': [u(\"\\u05d0\")]})\n if compat.PY3:\n bytes(df)\n else:\n str(df)\n\n def test_very_wide_info_repr(self):\n df = DataFrame(np.random.randn(10, 20),\n columns=tm.rands_array(10, 20))\n repr(df)\n\n def test_repr_column_name_unicode_truncation_bug(self):\n # #1906\n df = DataFrame({'Id': [7117434],\n 'StringCol': ('Is it possible to modify drop plot code'\n ' so that the output graph is displayed '\n 'in iphone simulator, Is it possible to '\n 'modify drop plot code so that the '\n 'output graph is \\xe2\\x80\\xa8displayed '\n 'in iphone simulator.Now we are adding '\n 'the CSV file externally. I want to Call'\n ' the File through the code..')})\n\n result = repr(df)\n self.assertIn('StringCol', result)\n\n def test_head_tail(self):\n assert_frame_equal(self.frame.head(), self.frame[:5])\n assert_frame_equal(self.frame.tail(), self.frame[-5:])\n assert_frame_equal(self.frame.head(0), self.frame)\n assert_frame_equal(self.frame.tail(0), self.frame)\n assert_frame_equal(self.frame.head(-1), self.frame[:-1])\n assert_frame_equal(self.frame.tail(-1), self.frame[1:])\n assert_frame_equal(self.frame.head(1), self.frame[:1])\n assert_frame_equal(self.frame.tail(1), self.frame[-1:])\n # with a float index\n df = self.frame.copy()\n df.index = np.arange(len(self.frame)) + 0.1\n assert_frame_equal(df.head(), df.iloc[:5])\n assert_frame_equal(df.tail(), df.iloc[-5:])\n assert_frame_equal(df.head(0), df)\n assert_frame_equal(df.tail(0), df)\n assert_frame_equal(df.head(-1), df.iloc[:-1])\n assert_frame_equal(df.tail(-1), df.iloc[1:])\n #test empty dataframe\n empty_df = DataFrame()\n assert_frame_equal(empty_df.tail(), empty_df)\n assert_frame_equal(empty_df.head(), empty_df)\n\n def test_insert(self):\n df = DataFrame(np.random.randn(5, 3), index=np.arange(5),\n columns=['c', 'b', 'a'])\n\n df.insert(0, 'foo', df['a'])\n self.assert_numpy_array_equal(df.columns, ['foo', 'c', 'b', 'a'])\n assert_almost_equal(df['a'], df['foo'])\n\n df.insert(2, 'bar', df['c'])\n self.assert_numpy_array_equal(df.columns, ['foo', 'c', 'bar', 'b', 'a'])\n assert_almost_equal(df['c'], df['bar'])\n\n # diff dtype\n\n # new item\n df['x'] = df['a'].astype('float32')\n result = Series(dict(float64 = 5, float32 = 1))\n self.assertTrue((df.get_dtype_counts() == result).all())\n\n # replacing current (in different block)\n df['a'] = df['a'].astype('float32')\n result = Series(dict(float64 = 4, float32 = 2))\n self.assertTrue((df.get_dtype_counts() == result).all())\n\n df['y'] = df['a'].astype('int32')\n result = Series(dict(float64 = 4, float32 = 2, int32 = 1))\n self.assertTrue((df.get_dtype_counts() == result).all())\n\n with assertRaisesRegexp(ValueError, 'already exists'):\n df.insert(1, 'a', df['b'])\n self.assertRaises(ValueError, df.insert, 1, 'c', df['b'])\n\n df.columns.name = 'some_name'\n # preserve columns name field\n df.insert(0, 'baz', df['c'])\n self.assertEqual(df.columns.name, 'some_name')\n\n def test_delitem(self):\n del self.frame['A']\n self.assertNotIn('A', self.frame)\n\n def test_pop(self):\n self.frame.columns.name = 'baz'\n\n A = self.frame.pop('A')\n self.assertNotIn('A', self.frame)\n\n self.frame['foo'] = 'bar'\n foo = self.frame.pop('foo')\n self.assertNotIn('foo', self.frame)\n # TODO self.assertEqual(self.frame.columns.name, 'baz')\n\n # 10912\n # inplace ops cause caching issue\n a = DataFrame([[1,2,3],[4,5,6]], columns=['A','B','C'], index=['X','Y'])\n b = a.pop('B')\n b += 1\n\n # original frame\n expected = DataFrame([[1,3],[4,6]], columns=['A','C'], index=['X','Y'])\n assert_frame_equal(a, expected)\n\n # result\n expected = Series([2,5],index=['X','Y'],name='B')+1\n assert_series_equal(b, expected)\n\n def test_pop_non_unique_cols(self):\n df = DataFrame({0: [0, 1], 1: [0, 1], 2: [4, 5]})\n df.columns = [\"a\", \"b\", \"a\"]\n\n res = df.pop(\"a\")\n self.assertEqual(type(res), DataFrame)\n self.assertEqual(len(res), 2)\n self.assertEqual(len(df.columns), 1)\n self.assertTrue(\"b\" in df.columns)\n self.assertFalse(\"a\" in df.columns)\n self.assertEqual(len(df.index), 2)\n\n def test_iter(self):\n self.assertTrue(tm.equalContents(list(self.frame), self.frame.columns))\n\n def test_iterrows(self):\n for i, (k, v) in enumerate(self.frame.iterrows()):\n exp = self.frame.xs(self.frame.index[i])\n assert_series_equal(v, exp)\n\n for i, (k, v) in enumerate(self.mixed_frame.iterrows()):\n exp = self.mixed_frame.xs(self.mixed_frame.index[i])\n assert_series_equal(v, exp)\n\n def test_itertuples(self):\n for i, tup in enumerate(self.frame.itertuples()):\n s = Series(tup[1:])\n s.name = tup[0]\n expected = self.frame.ix[i, :].reset_index(drop=True)\n assert_series_equal(s, expected)\n\n df = DataFrame({'floats': np.random.randn(5),\n 'ints': lrange(5)}, columns=['floats', 'ints'])\n\n for tup in df.itertuples(index=False):\n tm.assertIsInstance(tup[1], np.integer)\n\n df = DataFrame(data={\"a\": [1, 2, 3], \"b\": [4, 5, 6]})\n dfaa = df[['a', 'a']]\n self.assertEqual(list(dfaa.itertuples()), [(0, 1, 1), (1, 2, 2), (2, 3, 3)])\n\n tup = next(df.itertuples(name='TestName'))\n\n # no support for field renaming in Python 2.6, regular tuples are returned\n if sys.version >= LooseVersion('2.7'):\n self.assertEqual(tup._fields, ('Index', 'a', 'b'))\n self.assertEqual((tup.Index, tup.a, tup.b), tup)\n self.assertEqual(type(tup).__name__, 'TestName')\n\n df.columns = ['def', 'return']\n tup2 = next(df.itertuples(name='TestName'))\n self.assertEqual(tup2, (0, 1, 4))\n\n if sys.version >= LooseVersion('2.7'):\n self.assertEqual(tup2._fields, ('Index', '_1', '_2'))\n\n df3 = DataFrame(dict(('f'+str(i), [i]) for i in range(1024)))\n # will raise SyntaxError if trying to create namedtuple\n tup3 = next(df3.itertuples())\n self.assertFalse(hasattr(tup3, '_fields'))\n self.assertIsInstance(tup3, tuple)\n\n def test_len(self):\n self.assertEqual(len(self.frame), len(self.frame.index))\n\n def test_operators(self):\n garbage = random.random(4)\n colSeries = Series(garbage, index=np.array(self.frame.columns))\n\n idSum = self.frame + self.frame\n seriesSum = self.frame + colSeries\n\n for col, series in compat.iteritems(idSum):\n for idx, val in compat.iteritems(series):\n origVal = self.frame[col][idx] * 2\n if not np.isnan(val):\n self.assertEqual(val, origVal)\n else:\n self.assertTrue(np.isnan(origVal))\n\n for col, series in compat.iteritems(seriesSum):\n for idx, val in compat.iteritems(series):\n origVal = self.frame[col][idx] + colSeries[col]\n if not np.isnan(val):\n self.assertEqual(val, origVal)\n else:\n self.assertTrue(np.isnan(origVal))\n\n added = self.frame2 + self.frame2\n expected = self.frame2 * 2\n assert_frame_equal(added, expected)\n\n df = DataFrame({'a': ['a', None, 'b']})\n assert_frame_equal(df + df, DataFrame({'a': ['aa', np.nan, 'bb']}))\n\n # Test for issue #10181\n for dtype in ('float', 'int64'):\n frames = [\n DataFrame(dtype=dtype),\n DataFrame(columns=['A'], dtype=dtype),\n DataFrame(index=[0], dtype=dtype),\n ]\n for df in frames:\n self.assertTrue((df + df).equals(df))\n assert_frame_equal(df + df, df)\n\n def test_ops_np_scalar(self):\n vals, xs = np.random.rand(5, 3), [nan, 7, -23, 2.718, -3.14, np.inf]\n f = lambda x: DataFrame(x, index=list('ABCDE'),\n columns=['jim', 'joe', 'jolie'])\n\n df = f(vals)\n\n for x in xs:\n assert_frame_equal(df / np.array(x), f(vals / x))\n assert_frame_equal(np.array(x) * df, f(vals * x))\n assert_frame_equal(df + np.array(x), f(vals + x))\n assert_frame_equal(np.array(x) - df, f(x - vals))\n\n def test_operators_boolean(self):\n\n # GH 5808\n # empty frames, non-mixed dtype\n\n result = DataFrame(index=[1]) & DataFrame(index=[1])\n assert_frame_equal(result,DataFrame(index=[1]))\n\n result = DataFrame(index=[1]) | DataFrame(index=[1])\n assert_frame_equal(result,DataFrame(index=[1]))\n\n result = DataFrame(index=[1]) & DataFrame(index=[1,2])\n assert_frame_equal(result,DataFrame(index=[1,2]))\n\n result = DataFrame(index=[1],columns=['A']) & DataFrame(index=[1],columns=['A'])\n assert_frame_equal(result,DataFrame(index=[1],columns=['A']))\n\n result = DataFrame(True,index=[1],columns=['A']) & DataFrame(True,index=[1],columns=['A'])\n assert_frame_equal(result,DataFrame(True,index=[1],columns=['A']))\n\n result = DataFrame(True,index=[1],columns=['A']) | DataFrame(True,index=[1],columns=['A'])\n assert_frame_equal(result,DataFrame(True,index=[1],columns=['A']))\n\n # boolean ops\n result = DataFrame(1,index=[1],columns=['A']) | DataFrame(True,index=[1],columns=['A'])\n assert_frame_equal(result,DataFrame(1,index=[1],columns=['A']))\n\n def f():\n DataFrame(1.0,index=[1],columns=['A']) | DataFrame(True,index=[1],columns=['A'])\n self.assertRaises(TypeError, f)\n\n def f():\n DataFrame('foo',index=[1],columns=['A']) | DataFrame(True,index=[1],columns=['A'])\n self.assertRaises(TypeError, f)\n\n def test_operators_none_as_na(self):\n df = DataFrame({\"col1\": [2, 5.0, 123, None],\n \"col2\": [1, 2, 3, 4]}, dtype=object)\n\n ops = [operator.add, operator.sub, operator.mul, operator.truediv]\n\n # since filling converts dtypes from object, changed expected to be object\n for op in ops:\n filled = df.fillna(np.nan)\n result = op(df, 3)\n expected = op(filled, 3).astype(object)\n expected[com.isnull(expected)] = None\n assert_frame_equal(result, expected)\n\n result = op(df, df)\n expected = op(filled, filled).astype(object)\n expected[com.isnull(expected)] = None\n assert_frame_equal(result, expected)\n\n result = op(df, df.fillna(7))\n assert_frame_equal(result, expected)\n\n result = op(df.fillna(7), df)\n assert_frame_equal(result, expected, check_dtype=False)\n\n def test_comparison_invalid(self):\n\n def check(df,df2):\n\n for (x, y) in [(df,df2),(df2,df)]:\n self.assertRaises(TypeError, lambda : x == y)\n self.assertRaises(TypeError, lambda : x != y)\n self.assertRaises(TypeError, lambda : x >= y)\n self.assertRaises(TypeError, lambda : x > y)\n self.assertRaises(TypeError, lambda : x < y)\n self.assertRaises(TypeError, lambda : x <= y)\n\n # GH4968\n # invalid date/int comparisons\n df = DataFrame(np.random.randint(10, size=(10, 1)), columns=['a'])\n df['dates'] = date_range('20010101', periods=len(df))\n\n df2 = df.copy()\n df2['dates'] = df['a']\n check(df,df2)\n\n df = DataFrame(np.random.randint(10, size=(10, 2)), columns=['a', 'b'])\n df2 = DataFrame({'a': date_range('20010101', periods=len(df)), 'b': date_range('20100101', periods=len(df))})\n check(df,df2)\n\n def test_timestamp_compare(self):\n # make sure we can compare Timestamps on the right AND left hand side\n # GH4982\n df = DataFrame({'dates1': date_range('20010101', periods=10),\n 'dates2': date_range('20010102', periods=10),\n 'intcol': np.random.randint(1000000000, size=10),\n 'floatcol': np.random.randn(10),\n 'stringcol': list(tm.rands(10))})\n df.loc[np.random.rand(len(df)) > 0.5, 'dates2'] = pd.NaT\n ops = {'gt': 'lt', 'lt': 'gt', 'ge': 'le', 'le': 'ge', 'eq': 'eq',\n 'ne': 'ne'}\n for left, right in ops.items():\n left_f = getattr(operator, left)\n right_f = getattr(operator, right)\n\n # no nats\n expected = left_f(df, Timestamp('20010109'))\n result = right_f(Timestamp('20010109'), df)\n tm.assert_frame_equal(result, expected)\n\n # nats\n expected = left_f(df, Timestamp('nat'))\n result = right_f(Timestamp('nat'), df)\n tm.assert_frame_equal(result, expected)\n\n def test_modulo(self):\n\n # GH3590, modulo as ints\n p = DataFrame({ 'first' : [3,4,5,8], 'second' : [0,0,0,3] })\n\n ### this is technically wrong as the integer portion is coerced to float ###\n expected = DataFrame({ 'first' : Series([0,0,0,0],dtype='float64'), 'second' : Series([np.nan,np.nan,np.nan,0]) })\n result = p % p\n assert_frame_equal(result,expected)\n\n # numpy has a slightly different (wrong) treatement\n result2 = DataFrame(p.values % p.values,index=p.index,columns=p.columns,dtype='float64')\n result2.iloc[0:3,1] = np.nan\n assert_frame_equal(result2,expected)\n\n result = p % 0\n expected = DataFrame(np.nan,index=p.index,columns=p.columns)\n assert_frame_equal(result,expected)\n\n # numpy has a slightly different (wrong) treatement\n result2 = DataFrame(p.values.astype('float64') % 0,index=p.index,columns=p.columns)\n assert_frame_equal(result2,expected)\n\n # not commutative with series\n p = DataFrame(np.random.randn(10, 5))\n s = p[0]\n res = s % p\n res2 = p % s\n self.assertFalse(np.array_equal(res.fillna(0), res2.fillna(0)))\n\n def test_div(self):\n\n # integer div, but deal with the 0's (GH 9144)\n p = DataFrame({ 'first' : [3,4,5,8], 'second' : [0,0,0,3] })\n result = p / p\n\n expected = DataFrame({'first': Series([1.0, 1.0, 1.0, 1.0]),\n 'second': Series([nan, nan, nan, 1])})\n assert_frame_equal(result,expected)\n\n result2 = DataFrame(p.values.astype('float') / p.values, index=p.index,\n columns=p.columns)\n assert_frame_equal(result2,expected)\n\n result = p / 0\n expected = DataFrame(inf, index=p.index, columns=p.columns)\n expected.iloc[0:3, 1] = nan\n assert_frame_equal(result,expected)\n\n # numpy has a slightly different (wrong) treatement\n result2 = DataFrame(p.values.astype('float64') / 0, index=p.index,\n columns=p.columns)\n assert_frame_equal(result2,expected)\n\n p = DataFrame(np.random.randn(10, 5))\n s = p[0]\n res = s / p\n res2 = p / s\n self.assertFalse(np.array_equal(res.fillna(0), res2.fillna(0)))\n\n def test_logical_operators(self):\n\n def _check_bin_op(op):\n result = op(df1, df2)\n expected = DataFrame(op(df1.values, df2.values), index=df1.index,\n columns=df1.columns)\n self.assertEqual(result.values.dtype, np.bool_)\n assert_frame_equal(result, expected)\n\n def _check_unary_op(op):\n result = op(df1)\n expected = DataFrame(op(df1.values), index=df1.index,\n columns=df1.columns)\n self.assertEqual(result.values.dtype, np.bool_)\n assert_frame_equal(result, expected)\n\n df1 = {'a': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True},\n 'b': {'a': False, 'b': True, 'c': False,\n 'd': False, 'e': False},\n 'c': {'a': False, 'b': False, 'c': True,\n 'd': False, 'e': False},\n 'd': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True},\n 'e': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True}}\n\n df2 = {'a': {'a': True, 'b': False, 'c': True, 'd': False, 'e': False},\n 'b': {'a': False, 'b': True, 'c': False,\n 'd': False, 'e': False},\n 'c': {'a': True, 'b': False, 'c': True, 'd': False, 'e': False},\n 'd': {'a': False, 'b': False, 'c': False,\n 'd': True, 'e': False},\n 'e': {'a': False, 'b': False, 'c': False,\n 'd': False, 'e': True}}\n\n df1 = DataFrame(df1)\n df2 = DataFrame(df2)\n\n _check_bin_op(operator.and_)\n _check_bin_op(operator.or_)\n _check_bin_op(operator.xor)\n\n # operator.neg is deprecated in numpy >= 1.9\n _check_unary_op(operator.inv)\n\n def test_logical_typeerror(self):\n if not compat.PY3:\n self.assertRaises(TypeError, self.frame.__eq__, 'foo')\n self.assertRaises(TypeError, self.frame.__lt__, 'foo')\n self.assertRaises(TypeError, self.frame.__gt__, 'foo')\n self.assertRaises(TypeError, self.frame.__ne__, 'foo')\n else:\n raise nose.SkipTest('test_logical_typeerror not tested on PY3')\n\n def test_constructor_lists_to_object_dtype(self):\n # from #1074\n d = DataFrame({'a': [np.nan, False]})\n self.assertEqual(d['a'].dtype, np.object_)\n self.assertFalse(d['a'][1])\n\n def test_constructor_with_nas(self):\n # GH 5016\n # na's in indicies\n\n def check(df):\n for i in range(len(df.columns)):\n df.iloc[:,i]\n\n # allow single nans to succeed\n indexer = np.arange(len(df.columns))[isnull(df.columns)]\n\n if len(indexer) == 1:\n assert_series_equal(df.iloc[:,indexer[0]],df.loc[:,np.nan])\n\n\n # multiple nans should fail\n else:\n\n def f():\n df.loc[:,np.nan]\n self.assertRaises(TypeError, f)\n\n\n df = DataFrame([[1,2,3],[4,5,6]], index=[1,np.nan])\n check(df)\n\n df = DataFrame([[1,2,3],[4,5,6]], columns=[1.1,2.2,np.nan])\n check(df)\n\n df = DataFrame([[0,1,2,3],[4,5,6,7]], columns=[np.nan,1.1,2.2,np.nan])\n check(df)\n\n df = DataFrame([[0.0,1,2,3.0],[4,5,6,7]], columns=[np.nan,1.1,2.2,np.nan])\n check(df)\n\n def test_logical_with_nas(self):\n d = DataFrame({'a': [np.nan, False], 'b': [True, True]})\n\n # GH4947\n # bool comparisons should return bool\n result = d['a'] | d['b']\n expected = Series([False, True])\n assert_series_equal(result, expected)\n\n # GH4604, automatic casting here\n result = d['a'].fillna(False) | d['b']\n expected = Series([True, True])\n assert_series_equal(result, expected)\n\n result = d['a'].fillna(False,downcast=False) | d['b']\n expected = Series([True, True])\n assert_series_equal(result, expected)\n\n def test_neg(self):\n # what to do?\n assert_frame_equal(-self.frame, -1 * self.frame)\n\n def test_invert(self):\n assert_frame_equal(-(self.frame < 0), ~(self.frame < 0))\n\n def test_first_last_valid(self):\n N = len(self.frame.index)\n mat = randn(N)\n mat[:5] = nan\n mat[-5:] = nan\n\n frame = DataFrame({'foo': mat}, index=self.frame.index)\n index = frame.first_valid_index()\n\n self.assertEqual(index, frame.index[5])\n\n index = frame.last_valid_index()\n self.assertEqual(index, frame.index[-6])\n\n def test_arith_flex_frame(self):\n ops = ['add', 'sub', 'mul', 'div', 'truediv', 'pow', 'floordiv', 'mod']\n if not compat.PY3:\n aliases = {}\n else:\n aliases = {'div': 'truediv'}\n\n for op in ops:\n try:\n alias = aliases.get(op, op)\n f = getattr(operator, alias)\n result = getattr(self.frame, op)(2 * self.frame)\n exp = f(self.frame, 2 * self.frame)\n assert_frame_equal(result, exp)\n\n # vs mix float\n result = getattr(self.mixed_float, op)(2 * self.mixed_float)\n exp = f(self.mixed_float, 2 * self.mixed_float)\n assert_frame_equal(result, exp)\n _check_mixed_float(result, dtype = dict(C = None))\n\n # vs mix int\n if op in ['add','sub','mul']:\n result = getattr(self.mixed_int, op)(2 + self.mixed_int)\n exp = f(self.mixed_int, 2 + self.mixed_int)\n\n # overflow in the uint\n dtype = None\n if op in ['sub']:\n dtype = dict(B = 'object', C = None)\n elif op in ['add','mul']:\n dtype = dict(C = None)\n assert_frame_equal(result, exp)\n _check_mixed_int(result, dtype = dtype)\n\n # rops\n r_f = lambda x, y: f(y, x)\n result = getattr(self.frame, 'r' + op)(2 * self.frame)\n exp = r_f(self.frame, 2 * self.frame)\n assert_frame_equal(result, exp)\n\n # vs mix float\n result = getattr(self.mixed_float, op)(2 * self.mixed_float)\n exp = f(self.mixed_float, 2 * self.mixed_float)\n assert_frame_equal(result, exp)\n _check_mixed_float(result, dtype = dict(C = None))\n\n result = getattr(self.intframe, op)(2 * self.intframe)\n exp = f(self.intframe, 2 * self.intframe)\n assert_frame_equal(result, exp)\n\n # vs mix int\n if op in ['add','sub','mul']:\n result = getattr(self.mixed_int, op)(2 + self.mixed_int)\n exp = f(self.mixed_int, 2 + self.mixed_int)\n\n # overflow in the uint\n dtype = None\n if op in ['sub']:\n dtype = dict(B = 'object', C = None)\n elif op in ['add','mul']:\n dtype = dict(C = None)\n assert_frame_equal(result, exp)\n _check_mixed_int(result, dtype = dtype)\n except:\n com.pprint_thing(\"Failing operation %r\" % op)\n raise\n\n # ndim >= 3\n ndim_5 = np.ones(self.frame.shape + (3, 4, 5))\n with assertRaisesRegexp(ValueError, 'shape'):\n f(self.frame, ndim_5)\n\n with assertRaisesRegexp(ValueError, 'shape'):\n getattr(self.frame, op)(ndim_5)\n\n\n # res_add = self.frame.add(self.frame)\n # res_sub = self.frame.sub(self.frame)\n # res_mul = self.frame.mul(self.frame)\n # res_div = self.frame.div(2 * self.frame)\n\n # assert_frame_equal(res_add, self.frame + self.frame)\n # assert_frame_equal(res_sub, self.frame - self.frame)\n # assert_frame_equal(res_mul, self.frame * self.frame)\n # assert_frame_equal(res_div, self.frame / (2 * self.frame))\n\n const_add = self.frame.add(1)\n assert_frame_equal(const_add, self.frame + 1)\n\n # corner cases\n result = self.frame.add(self.frame[:0])\n assert_frame_equal(result, self.frame * np.nan)\n\n result = self.frame[:0].add(self.frame)\n assert_frame_equal(result, self.frame * np.nan)\n with assertRaisesRegexp(NotImplementedError, 'fill_value'):\n self.frame.add(self.frame.iloc[0], fill_value=3)\n with assertRaisesRegexp(NotImplementedError, 'fill_value'):\n self.frame.add(self.frame.iloc[0], axis='index', fill_value=3)\n\n def test_binary_ops_align(self):\n\n # test aligning binary ops\n\n # GH 6681\n index=MultiIndex.from_product([list('abc'),\n ['one','two','three'],\n [1,2,3]],\n names=['first','second','third'])\n\n df = DataFrame(np.arange(27*3).reshape(27,3),\n index=index,\n columns=['value1','value2','value3']).sortlevel()\n\n idx = pd.IndexSlice\n for op in ['add','sub','mul','div','truediv']:\n opa = getattr(operator,op,None)\n if opa is None:\n continue\n\n x = Series([ 1.0, 10.0, 100.0], [1,2,3])\n result = getattr(df,op)(x,level='third',axis=0)\n\n expected = pd.concat([ opa(df.loc[idx[:,:,i],:],v) for i, v in x.iteritems() ]).sortlevel()\n assert_frame_equal(result, expected)\n\n x = Series([ 1.0, 10.0], ['two','three'])\n result = getattr(df,op)(x,level='second',axis=0)\n\n expected = pd.concat([ opa(df.loc[idx[:,i],:],v) for i, v in x.iteritems() ]).reindex_like(df).sortlevel()\n assert_frame_equal(result, expected)\n\n ## GH9463 (alignment level of dataframe with series)\n\n midx = MultiIndex.from_product([['A', 'B'],['a', 'b']])\n df = DataFrame(np.ones((2,4), dtype='int64'), columns=midx)\n s = pd.Series({'a':1, 'b':2})\n\n df2 = df.copy()\n df2.columns.names = ['lvl0', 'lvl1']\n s2 = s.copy()\n s2.index.name = 'lvl1'\n\n # different cases of integer/string level names:\n res1 = df.mul(s, axis=1, level=1)\n res2 = df.mul(s2, axis=1, level=1)\n res3 = df2.mul(s, axis=1, level=1)\n res4 = df2.mul(s2, axis=1, level=1)\n res5 = df2.mul(s, axis=1, level='lvl1')\n res6 = df2.mul(s2, axis=1, level='lvl1')\n\n exp = DataFrame(np.array([[1, 2, 1, 2], [1, 2, 1, 2]], dtype='int64'),\n columns=midx)\n\n for res in [res1, res2]:\n assert_frame_equal(res, exp)\n\n exp.columns.names = ['lvl0', 'lvl1']\n for res in [res3, res4, res5, res6]:\n assert_frame_equal(res, exp)\n\n def test_arith_mixed(self):\n\n left = DataFrame({'A': ['a', 'b', 'c'],\n 'B': [1, 2, 3]})\n\n result = left + left\n expected = DataFrame({'A': ['aa', 'bb', 'cc'],\n 'B': [2, 4, 6]})\n assert_frame_equal(result, expected)\n\n def test_arith_getitem_commute(self):\n df = DataFrame({'A': [1.1, 3.3], 'B': [2.5, -3.9]})\n\n self._test_op(df, operator.add)\n self._test_op(df, operator.sub)\n self._test_op(df, operator.mul)\n self._test_op(df, operator.truediv)\n self._test_op(df, operator.floordiv)\n self._test_op(df, operator.pow)\n\n self._test_op(df, lambda x, y: y + x)\n self._test_op(df, lambda x, y: y - x)\n self._test_op(df, lambda x, y: y * x)\n self._test_op(df, lambda x, y: y / x)\n self._test_op(df, lambda x, y: y ** x)\n\n self._test_op(df, lambda x, y: x + y)\n self._test_op(df, lambda x, y: x - y)\n self._test_op(df, lambda x, y: x * y)\n self._test_op(df, lambda x, y: x / y)\n self._test_op(df, lambda x, y: x ** y)\n\n @staticmethod\n def _test_op(df, op):\n result = op(df, 1)\n\n if not df.columns.is_unique:\n raise ValueError(\"Only unique columns supported by this test\")\n\n for col in result.columns:\n assert_series_equal(result[col], op(df[col], 1))\n\n def test_bool_flex_frame(self):\n data = np.random.randn(5, 3)\n other_data = np.random.randn(5, 3)\n df = DataFrame(data)\n other = DataFrame(other_data)\n ndim_5 = np.ones(df.shape + (1, 3))\n\n # Unaligned\n def _check_unaligned_frame(meth, op, df, other):\n part_o = other.ix[3:, 1:].copy()\n rs = meth(part_o)\n xp = op(df, part_o.reindex(index=df.index, columns=df.columns))\n assert_frame_equal(rs, xp)\n\n # DataFrame\n self.assertTrue(df.eq(df).values.all())\n self.assertFalse(df.ne(df).values.any())\n for op in ['eq', 'ne', 'gt', 'lt', 'ge', 'le']:\n f = getattr(df, op)\n o = getattr(operator, op)\n # No NAs\n assert_frame_equal(f(other), o(df, other))\n _check_unaligned_frame(f, o, df, other)\n # ndarray\n assert_frame_equal(f(other.values), o(df, other.values))\n # scalar\n assert_frame_equal(f(0), o(df, 0))\n # NAs\n assert_frame_equal(f(np.nan), o(df, np.nan))\n with assertRaisesRegexp(ValueError, 'shape'):\n f(ndim_5)\n\n # Series\n def _test_seq(df, idx_ser, col_ser):\n idx_eq = df.eq(idx_ser, axis=0)\n col_eq = df.eq(col_ser)\n idx_ne = df.ne(idx_ser, axis=0)\n col_ne = df.ne(col_ser)\n assert_frame_equal(col_eq, df == Series(col_ser))\n assert_frame_equal(col_eq, -col_ne)\n assert_frame_equal(idx_eq, -idx_ne)\n assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)\n assert_frame_equal(col_eq, df.eq(list(col_ser)))\n assert_frame_equal(idx_eq, df.eq(Series(idx_ser), axis=0))\n assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))\n\n idx_gt = df.gt(idx_ser, axis=0)\n col_gt = df.gt(col_ser)\n idx_le = df.le(idx_ser, axis=0)\n col_le = df.le(col_ser)\n\n assert_frame_equal(col_gt, df > Series(col_ser))\n assert_frame_equal(col_gt, -col_le)\n assert_frame_equal(idx_gt, -idx_le)\n assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)\n\n idx_ge = df.ge(idx_ser, axis=0)\n col_ge = df.ge(col_ser)\n idx_lt = df.lt(idx_ser, axis=0)\n col_lt = df.lt(col_ser)\n assert_frame_equal(col_ge, df >= Series(col_ser))\n assert_frame_equal(col_ge, -col_lt)\n assert_frame_equal(idx_ge, -idx_lt)\n assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)\n\n idx_ser = Series(np.random.randn(5))\n col_ser = Series(np.random.randn(3))\n _test_seq(df, idx_ser, col_ser)\n\n\n # list/tuple\n _test_seq(df, idx_ser.values, col_ser.values)\n\n # NA\n df.ix[0, 0] = np.nan\n rs = df.eq(df)\n self.assertFalse(rs.ix[0, 0])\n rs = df.ne(df)\n self.assertTrue(rs.ix[0, 0])\n rs = df.gt(df)\n self.assertFalse(rs.ix[0, 0])\n rs = df.lt(df)\n self.assertFalse(rs.ix[0, 0])\n rs = df.ge(df)\n self.assertFalse(rs.ix[0, 0])\n rs = df.le(df)\n self.assertFalse(rs.ix[0, 0])\n\n\n\n # complex\n arr = np.array([np.nan, 1, 6, np.nan])\n arr2 = np.array([2j, np.nan, 7, None])\n df = DataFrame({'a': arr})\n df2 = DataFrame({'a': arr2})\n rs = df.gt(df2)\n self.assertFalse(rs.values.any())\n rs = df.ne(df2)\n self.assertTrue(rs.values.all())\n\n arr3 = np.array([2j, np.nan, None])\n df3 = DataFrame({'a': arr3})\n rs = df3.gt(2j)\n self.assertFalse(rs.values.any())\n\n # corner, dtype=object\n df1 = DataFrame({'col': ['foo', np.nan, 'bar']})\n df2 = DataFrame({'col': ['foo', datetime.now(), 'bar']})\n result = df1.ne(df2)\n exp = DataFrame({'col': [False, True, False]})\n assert_frame_equal(result, exp)\n\n def test_arith_flex_series(self):\n df = self.simple\n\n row = df.xs('a')\n col = df['two']\n # after arithmetic refactor, add truediv here\n ops = ['add', 'sub', 'mul', 'mod']\n for op in ops:\n f = getattr(df, op)\n op = getattr(operator, op)\n assert_frame_equal(f(row), op(df, row))\n assert_frame_equal(f(col, axis=0), op(df.T, col).T)\n\n # special case for some reason\n assert_frame_equal(df.add(row, axis=None), df + row)\n\n # cases which will be refactored after big arithmetic refactor\n assert_frame_equal(df.div(row), df / row)\n assert_frame_equal(df.div(col, axis=0), (df.T / col).T)\n\n # broadcasting issue in GH7325\n df = DataFrame(np.arange(3*2).reshape((3,2)),dtype='int64')\n expected = DataFrame([[nan, inf], [1.0, 1.5], [1.0, 1.25]])\n result = df.div(df[0],axis='index')\n assert_frame_equal(result,expected)\n\n df = DataFrame(np.arange(3*2).reshape((3,2)),dtype='float64')\n expected = DataFrame([[np.nan,np.inf],[1.0,1.5],[1.0,1.25]])\n result = df.div(df[0],axis='index')\n assert_frame_equal(result,expected)\n\n def test_arith_non_pandas_object(self):\n df = self.simple\n\n val1 = df.xs('a').values\n added = DataFrame(df.values + val1, index=df.index, columns=df.columns)\n assert_frame_equal(df + val1, added)\n\n added = DataFrame((df.values.T + val1).T,\n index=df.index, columns=df.columns)\n assert_frame_equal(df.add(val1, axis=0), added)\n\n val2 = list(df['two'])\n\n added = DataFrame(df.values + val2, index=df.index, columns=df.columns)\n assert_frame_equal(df + val2, added)\n\n added = DataFrame((df.values.T + val2).T, index=df.index,\n columns=df.columns)\n assert_frame_equal(df.add(val2, axis='index'), added)\n\n val3 = np.random.rand(*df.shape)\n added = DataFrame(df.values + val3, index=df.index, columns=df.columns)\n assert_frame_equal(df.add(val3), added)\n\n def test_combineFrame(self):\n frame_copy = self.frame.reindex(self.frame.index[::2])\n\n del frame_copy['D']\n frame_copy['C'][:5] = nan\n\n added = self.frame + frame_copy\n tm.assert_dict_equal(added['A'].valid(),\n self.frame['A'] * 2,\n compare_keys=False)\n\n self.assertTrue(np.isnan(added['C'].reindex(frame_copy.index)[:5]).all())\n\n # assert(False)\n\n self.assertTrue(np.isnan(added['D']).all())\n\n self_added = self.frame + self.frame\n self.assertTrue(self_added.index.equals(self.frame.index))\n\n added_rev = frame_copy + self.frame\n self.assertTrue(np.isnan(added['D']).all())\n\n # corner cases\n\n # empty\n plus_empty = self.frame + self.empty\n self.assertTrue(np.isnan(plus_empty.values).all())\n\n empty_plus = self.empty + self.frame\n self.assertTrue(np.isnan(empty_plus.values).all())\n\n empty_empty = self.empty + self.empty\n self.assertTrue(empty_empty.empty)\n\n # out of order\n reverse = self.frame.reindex(columns=self.frame.columns[::-1])\n\n assert_frame_equal(reverse + self.frame, self.frame * 2)\n\n # mix vs float64, upcast\n added = self.frame + self.mixed_float\n _check_mixed_float(added, dtype = 'float64')\n added = self.mixed_float + self.frame\n _check_mixed_float(added, dtype = 'float64')\n\n # mix vs mix\n added = self.mixed_float + self.mixed_float2\n _check_mixed_float(added, dtype = dict(C = None))\n added = self.mixed_float2 + self.mixed_float\n _check_mixed_float(added, dtype = dict(C = None))\n\n # with int\n added = self.frame + self.mixed_int\n _check_mixed_float(added, dtype = 'float64')\n\n def test_combineSeries(self):\n\n # Series\n series = self.frame.xs(self.frame.index[0])\n\n added = self.frame + series\n\n for key, s in compat.iteritems(added):\n assert_series_equal(s, self.frame[key] + series[key])\n\n larger_series = series.to_dict()\n larger_series['E'] = 1\n larger_series = Series(larger_series)\n larger_added = self.frame + larger_series\n\n for key, s in compat.iteritems(self.frame):\n assert_series_equal(larger_added[key], s + series[key])\n self.assertIn('E', larger_added)\n self.assertTrue(np.isnan(larger_added['E']).all())\n\n # vs mix (upcast) as needed\n added = self.mixed_float + series\n _check_mixed_float(added, dtype = 'float64')\n added = self.mixed_float + series.astype('float32')\n _check_mixed_float(added, dtype = dict(C = None))\n added = self.mixed_float + series.astype('float16')\n _check_mixed_float(added, dtype = dict(C = None))\n\n #### these raise with numexpr.....as we are adding an int64 to an uint64....weird\n # vs int\n #added = self.mixed_int + (100*series).astype('int64')\n #_check_mixed_int(added, dtype = dict(A = 'int64', B = 'float64', C = 'int64', D = 'int64'))\n #added = self.mixed_int + (100*series).astype('int32')\n #_check_mixed_int(added, dtype = dict(A = 'int32', B = 'float64', C = 'int32', D = 'int64'))\n\n\n # TimeSeries\n ts = self.tsframe['A']\n\n # 10890\n # we no longer allow auto timeseries broadcasting\n # and require explict broadcasting\n added = self.tsframe.add(ts, axis='index')\n\n for key, col in compat.iteritems(self.tsframe):\n result = col + ts\n assert_series_equal(added[key], result, check_names=False)\n self.assertEqual(added[key].name, key)\n if col.name == ts.name:\n self.assertEqual(result.name, 'A')\n else:\n self.assertTrue(result.name is None)\n\n smaller_frame = self.tsframe[:-5]\n smaller_added = smaller_frame.add(ts, axis='index')\n\n self.assertTrue(smaller_added.index.equals(self.tsframe.index))\n\n smaller_ts = ts[:-5]\n smaller_added2 = self.tsframe.add(smaller_ts, axis='index')\n assert_frame_equal(smaller_added, smaller_added2)\n\n # length 0, result is all-nan\n result = self.tsframe.add(ts[:0], axis='index')\n expected = DataFrame(np.nan,index=self.tsframe.index,columns=self.tsframe.columns)\n assert_frame_equal(result, expected)\n\n # Frame is all-nan\n result = self.tsframe[:0].add(ts, axis='index')\n expected = DataFrame(np.nan,index=self.tsframe.index,columns=self.tsframe.columns)\n assert_frame_equal(result, expected)\n\n # empty but with non-empty index\n frame = self.tsframe[:1].reindex(columns=[])\n result = frame.mul(ts,axis='index')\n self.assertEqual(len(result), len(ts))\n\n def test_combineFunc(self):\n result = self.frame * 2\n self.assert_numpy_array_equal(result.values, self.frame.values * 2)\n\n # vs mix\n result = self.mixed_float * 2\n for c, s in compat.iteritems(result):\n self.assert_numpy_array_equal(s.values, self.mixed_float[c].values * 2)\n _check_mixed_float(result, dtype = dict(C = None))\n\n result = self.empty * 2\n self.assertIs(result.index, self.empty.index)\n self.assertEqual(len(result.columns), 0)\n\n def test_comparisons(self):\n df1 = tm.makeTimeDataFrame()\n df2 = tm.makeTimeDataFrame()\n\n row = self.simple.xs('a')\n ndim_5 = np.ones(df1.shape + (1, 1, 1))\n\n def test_comp(func):\n result = func(df1, df2)\n self.assert_numpy_array_equal(result.values,\n func(df1.values, df2.values))\n with assertRaisesRegexp(ValueError, 'Wrong number of dimensions'):\n func(df1, ndim_5)\n\n result2 = func(self.simple, row)\n self.assert_numpy_array_equal(result2.values,\n func(self.simple.values, row.values))\n\n result3 = func(self.frame, 0)\n self.assert_numpy_array_equal(result3.values,\n func(self.frame.values, 0))\n\n\n with assertRaisesRegexp(ValueError, 'Can only compare '\n 'identically-labeled DataFrame'):\n func(self.simple, self.simple[:2])\n\n test_comp(operator.eq)\n test_comp(operator.ne)\n test_comp(operator.lt)\n test_comp(operator.gt)\n test_comp(operator.ge)\n test_comp(operator.le)\n\n def test_string_comparison(self):\n df = DataFrame([{\"a\": 1, \"b\": \"foo\"}, {\"a\": 2, \"b\": \"bar\"}])\n mask_a = df.a > 1\n assert_frame_equal(df[mask_a], df.ix[1:1, :])\n assert_frame_equal(df[-mask_a], df.ix[0:0, :])\n\n mask_b = df.b == \"foo\"\n assert_frame_equal(df[mask_b], df.ix[0:0, :])\n assert_frame_equal(df[-mask_b], df.ix[1:1, :])\n\n def test_float_none_comparison(self):\n df = DataFrame(np.random.randn(8, 3), index=lrange(8),\n columns=['A', 'B', 'C'])\n\n self.assertRaises(TypeError, df.__eq__, None)\n\n def test_boolean_comparison(self):\n\n # GH 4576\n # boolean comparisons with a tuple/list give unexpected results\n df = DataFrame(np.arange(6).reshape((3,2)))\n b = np.array([2, 2])\n b_r = np.atleast_2d([2,2])\n b_c = b_r.T\n l = (2,2,2)\n tup = tuple(l)\n\n # gt\n expected = DataFrame([[False,False],[False,True],[True,True]])\n result = df>b\n assert_frame_equal(result,expected)\n\n result = df.values>b\n assert_numpy_array_equal(result,expected.values)\n\n result = df>l\n assert_frame_equal(result,expected)\n\n result = df>tup\n assert_frame_equal(result,expected)\n\n result = df>b_r\n assert_frame_equal(result,expected)\n\n result = df.values>b_r\n assert_numpy_array_equal(result,expected.values)\n\n self.assertRaises(ValueError, df.__gt__, b_c)\n self.assertRaises(ValueError, df.values.__gt__, b_c)\n\n # ==\n expected = DataFrame([[False,False],[True,False],[False,False]])\n result = df == b\n assert_frame_equal(result,expected)\n\n result = df==l\n assert_frame_equal(result,expected)\n\n result = df==tup\n assert_frame_equal(result,expected)\n\n result = df == b_r\n assert_frame_equal(result,expected)\n\n result = df.values == b_r\n assert_numpy_array_equal(result,expected.values)\n\n self.assertRaises(ValueError, lambda : df == b_c)\n self.assertFalse((df.values == b_c))\n\n # with alignment\n df = DataFrame(np.arange(6).reshape((3,2)),columns=list('AB'),index=list('abc'))\n expected.index=df.index\n expected.columns=df.columns\n\n result = df==l\n assert_frame_equal(result,expected)\n\n result = df==tup\n assert_frame_equal(result,expected)\n\n # not shape compatible\n self.assertRaises(ValueError, lambda : df == (2,2))\n self.assertRaises(ValueError, lambda : df == [2,2])\n\n def test_equals_different_blocks(self):\n # GH 9330\n df0 = pd.DataFrame({\"A\": [\"x\",\"y\"], \"B\": [1,2],\n \"C\": [\"w\",\"z\"]})\n df1 = df0.reset_index()[[\"A\",\"B\",\"C\"]]\n # this assert verifies that the above operations have\n # induced a block rearrangement\n self.assertTrue(df0._data.blocks[0].dtype !=\n df1._data.blocks[0].dtype)\n # do the real tests\n assert_frame_equal(df0, df1)\n self.assertTrue(df0.equals(df1))\n self.assertTrue(df1.equals(df0))\n\n def test_copy_blocks(self):\n # API/ENH 9607\n df = DataFrame(self.frame, copy=True)\n column = df.columns[0]\n\n # use the default copy=True, change a column\n blocks = df.as_blocks()\n for dtype, _df in blocks.items():\n if column in _df:\n _df.ix[:, column] = _df[column] + 1\n\n # make sure we did not change the original DataFrame\n self.assertFalse(_df[column].equals(df[column]))\n\n def test_no_copy_blocks(self):\n # API/ENH 9607\n df = DataFrame(self.frame, copy=True)\n column = df.columns[0]\n\n # use the copy=False, change a column\n blocks = df.as_blocks(copy=False)\n for dtype, _df in blocks.items():\n if column in _df:\n _df.ix[:, column] = _df[column] + 1\n\n # make sure we did change the original DataFrame\n self.assertTrue(_df[column].equals(df[column]))\n\n def test_to_csv_from_csv(self):\n\n pname = '__tmp_to_csv_from_csv__'\n with ensure_clean(pname) as path:\n\n self.frame['A'][:5] = nan\n\n self.frame.to_csv(path)\n self.frame.to_csv(path, columns=['A', 'B'])\n self.frame.to_csv(path, header=False)\n self.frame.to_csv(path, index=False)\n\n # test roundtrip\n self.tsframe.to_csv(path)\n recons = DataFrame.from_csv(path)\n\n assert_frame_equal(self.tsframe, recons)\n\n self.tsframe.to_csv(path, index_label='index')\n recons = DataFrame.from_csv(path, index_col=None)\n assert(len(recons.columns) == len(self.tsframe.columns) + 1)\n\n # no index\n self.tsframe.to_csv(path, index=False)\n recons = DataFrame.from_csv(path, index_col=None)\n assert_almost_equal(self.tsframe.values, recons.values)\n\n # corner case\n dm = DataFrame({'s1': Series(lrange(3), lrange(3)),\n 's2': Series(lrange(2), lrange(2))})\n dm.to_csv(path)\n recons = DataFrame.from_csv(path)\n assert_frame_equal(dm, recons)\n\n with ensure_clean(pname) as path:\n\n # duplicate index\n df = DataFrame(np.random.randn(3, 3), index=['a', 'a', 'b'],\n columns=['x', 'y', 'z'])\n df.to_csv(path)\n result = DataFrame.from_csv(path)\n assert_frame_equal(result, df)\n\n midx = MultiIndex.from_tuples([('A', 1, 2), ('A', 1, 2), ('B', 1, 2)])\n df = DataFrame(np.random.randn(3, 3), index=midx,\n columns=['x', 'y', 'z'])\n df.to_csv(path)\n result = DataFrame.from_csv(path, index_col=[0, 1, 2],\n parse_dates=False)\n assert_frame_equal(result, df, check_names=False) # TODO from_csv names index ['Unnamed: 1', 'Unnamed: 2'] should it ?\n\n # column aliases\n col_aliases = Index(['AA', 'X', 'Y', 'Z'])\n self.frame2.to_csv(path, header=col_aliases)\n rs = DataFrame.from_csv(path)\n xp = self.frame2.copy()\n xp.columns = col_aliases\n\n assert_frame_equal(xp, rs)\n\n self.assertRaises(ValueError, self.frame2.to_csv, path,\n header=['AA', 'X'])\n\n with ensure_clean(pname) as path:\n df1 = DataFrame(np.random.randn(3, 1))\n df2 = DataFrame(np.random.randn(3, 1))\n\n df1.to_csv(path)\n df2.to_csv(path,mode='a',header=False)\n xp = pd.concat([df1,df2])\n rs = pd.read_csv(path,index_col=0)\n rs.columns = lmap(int,rs.columns)\n xp.columns = lmap(int,xp.columns)\n assert_frame_equal(xp,rs)\n\n with ensure_clean() as path:\n # GH 10833 (TimedeltaIndex formatting)\n dt = pd.Timedelta(seconds=1)\n df = pd.DataFrame({'dt_data': [i*dt for i in range(3)]},\n index=pd.Index([i*dt for i in range(3)],\n name='dt_index'))\n df.to_csv(path)\n\n result = pd.read_csv(path, index_col='dt_index')\n result.index = pd.to_timedelta(result.index)\n # TODO: remove renaming when GH 10875 is solved\n result.index = result.index.rename('dt_index')\n result['dt_data'] = pd.to_timedelta(result['dt_data'])\n\n assert_frame_equal(df, result, check_index_type=True)\n\n # tz, 8260\n with ensure_clean(pname) as path:\n\n self.tzframe.to_csv(path)\n result = pd.read_csv(path, index_col=0, parse_dates=['A'])\n\n converter = lambda c: pd.to_datetime(result[c]).dt.tz_localize('UTC').dt.tz_convert(self.tzframe[c].dt.tz)\n result['B'] = converter('B')\n result['C'] = converter('C')\n assert_frame_equal(result, self.tzframe)\n\n def test_to_csv_cols_reordering(self):\n # GH3454\n import pandas as pd\n\n chunksize=5\n N = int(chunksize*2.5)\n\n df= mkdf(N, 3)\n cs = df.columns\n cols = [cs[2],cs[0]]\n\n with ensure_clean() as path:\n df.to_csv(path,columns = cols,chunksize=chunksize)\n rs_c = pd.read_csv(path,index_col=0)\n\n assert_frame_equal(df[cols],rs_c,check_names=False)\n\n def test_to_csv_legacy_raises_on_dupe_cols(self):\n df= mkdf(10, 3)\n df.columns = ['a','a','b']\n with ensure_clean() as path:\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n self.assertRaises(NotImplementedError,df.to_csv,path,engine='python')\n\n def test_to_csv_new_dupe_cols(self):\n import pandas as pd\n def _check_df(df,cols=None):\n with ensure_clean() as path:\n df.to_csv(path,columns = cols,chunksize=chunksize)\n rs_c = pd.read_csv(path,index_col=0)\n\n # we wrote them in a different order\n # so compare them in that order\n if cols is not None:\n\n if df.columns.is_unique:\n rs_c.columns = cols\n else:\n indexer, missing = df.columns.get_indexer_non_unique(cols)\n rs_c.columns = df.columns.take(indexer)\n\n for c in cols:\n obj_df = df[c]\n obj_rs = rs_c[c]\n if isinstance(obj_df,Series):\n assert_series_equal(obj_df,obj_rs)\n else:\n assert_frame_equal(obj_df,obj_rs,check_names=False)\n\n # wrote in the same order\n else:\n rs_c.columns = df.columns\n assert_frame_equal(df,rs_c,check_names=False)\n\n chunksize=5\n N = int(chunksize*2.5)\n\n # dupe cols\n df= mkdf(N, 3)\n df.columns = ['a','a','b']\n _check_df(df,None)\n\n # dupe cols with selection\n cols = ['b','a']\n _check_df(df,cols)\n\n @slow\n def test_to_csv_moar(self):\n path = '__tmp_to_csv_moar__'\n\n def _do_test(df,path,r_dtype=None,c_dtype=None,rnlvl=None,cnlvl=None,\n dupe_col=False):\n\n kwargs = dict(parse_dates=False)\n if cnlvl:\n if rnlvl is not None:\n kwargs['index_col'] = lrange(rnlvl)\n kwargs['header'] = lrange(cnlvl)\n with ensure_clean(path) as path:\n df.to_csv(path,encoding='utf8',chunksize=chunksize,tupleize_cols=False)\n recons = DataFrame.from_csv(path,tupleize_cols=False,**kwargs)\n else:\n kwargs['header'] = 0\n with ensure_clean(path) as path:\n df.to_csv(path,encoding='utf8',chunksize=chunksize)\n recons = DataFrame.from_csv(path,**kwargs)\n\n def _to_uni(x):\n if not isinstance(x, compat.text_type):\n return x.decode('utf8')\n return x\n if dupe_col:\n # read_Csv disambiguates the columns by\n # labeling them dupe.1,dupe.2, etc'. monkey patch columns\n recons.columns = df.columns\n if rnlvl and not cnlvl:\n delta_lvl = [recons.iloc[:, i].values for i in range(rnlvl-1)]\n ix=MultiIndex.from_arrays([list(recons.index)]+delta_lvl)\n recons.index = ix\n recons = recons.iloc[:,rnlvl-1:]\n\n type_map = dict(i='i',f='f',s='O',u='O',dt='O',p='O')\n if r_dtype:\n if r_dtype == 'u': # unicode\n r_dtype='O'\n recons.index = np.array(lmap(_to_uni,recons.index),\n dtype=r_dtype)\n df.index = np.array(lmap(_to_uni,df.index),dtype=r_dtype)\n elif r_dtype == 'dt': # unicode\n r_dtype='O'\n recons.index = np.array(lmap(Timestamp,recons.index),\n dtype=r_dtype)\n df.index = np.array(lmap(Timestamp,df.index),dtype=r_dtype)\n elif r_dtype == 'p':\n r_dtype='O'\n recons.index = np.array(list(map(Timestamp,\n recons.index.to_datetime())),\n dtype=r_dtype)\n df.index = np.array(list(map(Timestamp,\n df.index.to_datetime())),\n dtype=r_dtype)\n else:\n r_dtype= type_map.get(r_dtype)\n recons.index = np.array(recons.index,dtype=r_dtype )\n df.index = np.array(df.index,dtype=r_dtype )\n if c_dtype:\n if c_dtype == 'u':\n c_dtype='O'\n recons.columns = np.array(lmap(_to_uni,recons.columns),\n dtype=c_dtype)\n df.columns = np.array(lmap(_to_uni,df.columns),dtype=c_dtype )\n elif c_dtype == 'dt':\n c_dtype='O'\n recons.columns = np.array(lmap(Timestamp,recons.columns),\n dtype=c_dtype )\n df.columns = np.array(lmap(Timestamp,df.columns),dtype=c_dtype)\n elif c_dtype == 'p':\n c_dtype='O'\n recons.columns = np.array(lmap(Timestamp,recons.columns.to_datetime()),\n dtype=c_dtype)\n df.columns = np.array(lmap(Timestamp,df.columns.to_datetime()),dtype=c_dtype )\n else:\n c_dtype= type_map.get(c_dtype)\n recons.columns = np.array(recons.columns,dtype=c_dtype )\n df.columns = np.array(df.columns,dtype=c_dtype )\n\n assert_frame_equal(df,recons,check_names=False,check_less_precise=True)\n\n N = 100\n chunksize=1000\n\n # GH3437\n from pandas import NaT\n def make_dtnat_arr(n,nnat=None):\n if nnat is None:\n nnat= int(n*0.1) # 10%\n s=list(date_range('2000',freq='5min',periods=n))\n if nnat:\n for i in np.random.randint(0,len(s),nnat):\n s[i] = NaT\n i = np.random.randint(100)\n s[-i] = NaT\n s[i] = NaT\n return s\n\n # N=35000\n s1=make_dtnat_arr(chunksize+5)\n s2=make_dtnat_arr(chunksize+5,0)\n path = '1.csv'\n\n # s3=make_dtnjat_arr(chunksize+5,0)\n with ensure_clean('.csv') as pth:\n df=DataFrame(dict(a=s1,b=s2))\n df.to_csv(pth,chunksize=chunksize)\n recons = DataFrame.from_csv(pth)._convert(datetime=True,\n coerce=True)\n assert_frame_equal(df, recons,check_names=False,check_less_precise=True)\n\n for ncols in [4]:\n base = int((chunksize// ncols or 1) or 1)\n for nrows in [2,10,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,\n base-1,base,base+1]:\n _do_test(mkdf(nrows, ncols,r_idx_type='dt',\n c_idx_type='s'),path, 'dt','s')\n\n\n for ncols in [4]:\n base = int((chunksize// ncols or 1) or 1)\n for nrows in [2,10,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,\n base-1,base,base+1]:\n _do_test(mkdf(nrows, ncols,r_idx_type='dt',\n c_idx_type='s'),path, 'dt','s')\n pass\n\n for r_idx_type,c_idx_type in [('i','i'),('s','s'),('u','dt'),('p','p')]:\n for ncols in [1,2,3,4]:\n base = int((chunksize// ncols or 1) or 1)\n for nrows in [2,10,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,\n base-1,base,base+1]:\n _do_test(mkdf(nrows, ncols,r_idx_type=r_idx_type,\n c_idx_type=c_idx_type),path,r_idx_type,c_idx_type)\n\n for ncols in [1,2,3,4]:\n base = int((chunksize// ncols or 1) or 1)\n for nrows in [10,N-2,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,\n base-1,base,base+1]:\n _do_test(mkdf(nrows, ncols),path)\n\n for nrows in [10,N-2,N-1,N,N+1,N+2]:\n df = mkdf(nrows, 3)\n cols = list(df.columns)\n cols[:2] = [\"dupe\",\"dupe\"]\n cols[-2:] = [\"dupe\",\"dupe\"]\n ix = list(df.index)\n ix[:2] = [\"rdupe\",\"rdupe\"]\n ix[-2:] = [\"rdupe\",\"rdupe\"]\n df.index=ix\n df.columns=cols\n _do_test(df,path,dupe_col=True)\n\n\n _do_test(DataFrame(index=lrange(10)),path)\n _do_test(mkdf(chunksize//2+1, 2,r_idx_nlevels=2),path,rnlvl=2)\n for ncols in [2,3,4]:\n base = int(chunksize//ncols)\n for nrows in [10,N-2,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,\n base-1,base,base+1]:\n _do_test(mkdf(nrows, ncols,r_idx_nlevels=2),path,rnlvl=2)\n _do_test(mkdf(nrows, ncols,c_idx_nlevels=2),path,cnlvl=2)\n _do_test(mkdf(nrows, ncols,r_idx_nlevels=2,c_idx_nlevels=2),\n path,rnlvl=2,cnlvl=2)\n\n def test_to_csv_from_csv_w_some_infs(self):\n\n # test roundtrip with inf, -inf, nan, as full columns and mix\n self.frame['G'] = np.nan\n f = lambda x: [np.inf, np.nan][np.random.rand() < .5]\n self.frame['H'] = self.frame.index.map(f)\n\n with ensure_clean() as path:\n self.frame.to_csv(path)\n recons = DataFrame.from_csv(path)\n\n assert_frame_equal(self.frame, recons, check_names=False) # TODO to_csv drops column name\n assert_frame_equal(np.isinf(self.frame), np.isinf(recons), check_names=False)\n\n def test_to_csv_from_csv_w_all_infs(self):\n\n # test roundtrip with inf, -inf, nan, as full columns and mix\n self.frame['E'] = np.inf\n self.frame['F'] = -np.inf\n\n with ensure_clean() as path:\n self.frame.to_csv(path)\n recons = DataFrame.from_csv(path)\n\n assert_frame_equal(self.frame, recons, check_names=False) # TODO to_csv drops column name\n assert_frame_equal(np.isinf(self.frame), np.isinf(recons), check_names=False)\n\n def test_to_csv_no_index(self):\n # GH 3624, after appending columns, to_csv fails\n pname = '__tmp_to_csv_no_index__'\n with ensure_clean(pname) as path:\n df = DataFrame({'c1':[1,2,3], 'c2':[4,5,6]})\n df.to_csv(path, index=False)\n result = read_csv(path)\n assert_frame_equal(df,result)\n df['c3'] = Series([7,8,9],dtype='int64')\n df.to_csv(path, index=False)\n result = read_csv(path)\n assert_frame_equal(df,result)\n\n def test_to_csv_headers(self):\n # GH6186, the presence or absence of `index` incorrectly\n # causes to_csv to have different header semantics.\n pname = '__tmp_to_csv_headers__'\n from_df = DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])\n to_df = DataFrame([[1, 2], [3, 4]], columns=['X', 'Y'])\n with ensure_clean(pname) as path:\n from_df.to_csv(path, header=['X', 'Y'])\n recons = DataFrame.from_csv(path)\n assert_frame_equal(to_df, recons)\n\n from_df.to_csv(path, index=False, header=['X', 'Y'])\n recons = DataFrame.from_csv(path)\n recons.reset_index(inplace=True)\n assert_frame_equal(to_df, recons)\n\n def test_to_csv_multiindex(self):\n\n pname = '__tmp_to_csv_multiindex__'\n frame = self.frame\n old_index = frame.index\n arrays = np.arange(len(old_index) * 2).reshape(2, -1)\n new_index = MultiIndex.from_arrays(arrays, names=['first', 'second'])\n frame.index = new_index\n\n with ensure_clean(pname) as path:\n\n frame.to_csv(path, header=False)\n frame.to_csv(path, columns=['A', 'B'])\n\n # round trip\n frame.to_csv(path)\n df = DataFrame.from_csv(path, index_col=[0, 1], parse_dates=False)\n\n assert_frame_equal(frame, df, check_names=False) # TODO to_csv drops column name\n self.assertEqual(frame.index.names, df.index.names)\n self.frame.index = old_index # needed if setUP becomes a classmethod\n\n # try multiindex with dates\n tsframe = self.tsframe\n old_index = tsframe.index\n new_index = [old_index, np.arange(len(old_index))]\n tsframe.index = MultiIndex.from_arrays(new_index)\n\n tsframe.to_csv(path, index_label=['time', 'foo'])\n recons = DataFrame.from_csv(path, index_col=[0, 1])\n assert_frame_equal(tsframe, recons, check_names=False) # TODO to_csv drops column name\n\n # do not load index\n tsframe.to_csv(path)\n recons = DataFrame.from_csv(path, index_col=None)\n np.testing.assert_equal(len(recons.columns), len(tsframe.columns) + 2)\n\n # no index\n tsframe.to_csv(path, index=False)\n recons = DataFrame.from_csv(path, index_col=None)\n assert_almost_equal(recons.values, self.tsframe.values)\n self.tsframe.index = old_index # needed if setUP becomes classmethod\n\n with ensure_clean(pname) as path:\n # GH3571, GH1651, GH3141\n\n def _make_frame(names=None):\n if names is True:\n names = ['first','second']\n return DataFrame(np.random.randint(0,10,size=(3,3)),\n columns=MultiIndex.from_tuples([('bah', 'foo'),\n ('bah', 'bar'),\n ('ban', 'baz')],\n names=names),\n dtype='int64')\n\n # column & index are multi-index\n df = mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)\n df.to_csv(path,tupleize_cols=False)\n result = read_csv(path,header=[0,1,2,3],index_col=[0,1],tupleize_cols=False)\n assert_frame_equal(df,result)\n\n # column is mi\n df = mkdf(5,3,r_idx_nlevels=1,c_idx_nlevels=4)\n df.to_csv(path,tupleize_cols=False)\n result = read_csv(path,header=[0,1,2,3],index_col=0,tupleize_cols=False)\n assert_frame_equal(df,result)\n\n # dup column names?\n df = mkdf(5,3,r_idx_nlevels=3,c_idx_nlevels=4)\n df.to_csv(path,tupleize_cols=False)\n result = read_csv(path,header=[0,1,2,3],index_col=[0,1,2],tupleize_cols=False)\n assert_frame_equal(df,result)\n\n # writing with no index\n df = _make_frame()\n df.to_csv(path,tupleize_cols=False,index=False)\n result = read_csv(path,header=[0,1],tupleize_cols=False)\n assert_frame_equal(df,result)\n\n # we lose the names here\n df = _make_frame(True)\n df.to_csv(path,tupleize_cols=False,index=False)\n result = read_csv(path,header=[0,1],tupleize_cols=False)\n self.assertTrue(all([ x is None for x in result.columns.names ]))\n result.columns.names = df.columns.names\n assert_frame_equal(df,result)\n\n # tupleize_cols=True and index=False\n df = _make_frame(True)\n df.to_csv(path,tupleize_cols=True,index=False)\n result = read_csv(path,header=0,tupleize_cols=True,index_col=None)\n result.columns = df.columns\n assert_frame_equal(df,result)\n\n # whatsnew example\n df = _make_frame()\n df.to_csv(path,tupleize_cols=False)\n result = read_csv(path,header=[0,1],index_col=[0],tupleize_cols=False)\n assert_frame_equal(df,result)\n\n df = _make_frame(True)\n df.to_csv(path,tupleize_cols=False)\n result = read_csv(path,header=[0,1],index_col=[0],tupleize_cols=False)\n assert_frame_equal(df,result)\n\n # column & index are multi-index (compatibility)\n df = mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)\n df.to_csv(path,tupleize_cols=True)\n result = read_csv(path,header=0,index_col=[0,1],tupleize_cols=True)\n result.columns = df.columns\n assert_frame_equal(df,result)\n\n # invalid options\n df = _make_frame(True)\n df.to_csv(path,tupleize_cols=False)\n\n # catch invalid headers\n with assertRaisesRegexp(CParserError, 'Passed header=\\[0,1,2\\] are too many rows for this multi_index of columns'):\n read_csv(path,tupleize_cols=False,header=lrange(3),index_col=0)\n\n with assertRaisesRegexp(CParserError, 'Passed header=\\[0,1,2,3,4,5,6\\], len of 7, but only 6 lines in file'):\n read_csv(path,tupleize_cols=False,header=lrange(7),index_col=0)\n\n for i in [4,5,6]:\n with tm.assertRaises(CParserError):\n read_csv(path, tupleize_cols=False, header=lrange(i), index_col=0)\n\n # write with cols\n with assertRaisesRegexp(TypeError, 'cannot specify cols with a MultiIndex'):\n df.to_csv(path, tupleize_cols=False, columns=['foo', 'bar'])\n\n with ensure_clean(pname) as path:\n # empty\n tsframe[:0].to_csv(path)\n recons = DataFrame.from_csv(path)\n exp = tsframe[:0]\n exp.index = []\n\n self.assertTrue(recons.columns.equals(exp.columns))\n self.assertEqual(len(recons), 0)\n\n def test_to_csv_float32_nanrep(self):\n df = DataFrame(np.random.randn(1, 4).astype(np.float32))\n df[1] = np.nan\n\n with ensure_clean('__tmp_to_csv_float32_nanrep__.csv') as path:\n df.to_csv(path, na_rep=999)\n\n with open(path) as f:\n lines = f.readlines()\n self.assertEqual(lines[1].split(',')[2], '999')\n\n def test_to_csv_withcommas(self):\n\n # Commas inside fields should be correctly escaped when saving as CSV.\n df = DataFrame({'A': [1, 2, 3], 'B': ['5,6', '7,8', '9,0']})\n\n with ensure_clean('__tmp_to_csv_withcommas__.csv') as path:\n df.to_csv(path)\n df2 = DataFrame.from_csv(path)\n assert_frame_equal(df2, df)\n\n def test_to_csv_mixed(self):\n\n def create_cols(name):\n return [ \"%s%03d\" % (name,i) for i in range(5) ]\n\n df_float = DataFrame(np.random.randn(100, 5),dtype='float64',columns=create_cols('float'))\n df_int = DataFrame(np.random.randn(100, 5),dtype='int64',columns=create_cols('int'))\n df_bool = DataFrame(True,index=df_float.index,columns=create_cols('bool'))\n df_object = DataFrame('foo',index=df_float.index,columns=create_cols('object'))\n df_dt = DataFrame(Timestamp('20010101'),index=df_float.index,columns=create_cols('date'))\n\n # add in some nans\n df_float.ix[30:50,1:3] = np.nan\n\n #### this is a bug in read_csv right now ####\n #df_dt.ix[30:50,1:3] = np.nan\n\n df = pd.concat([ df_float, df_int, df_bool, df_object, df_dt ], axis=1)\n\n # dtype\n dtypes = dict()\n for n,dtype in [('float',np.float64),('int',np.int64),('bool',np.bool),('object',np.object)]:\n for c in create_cols(n):\n dtypes[c] = dtype\n\n with ensure_clean() as filename:\n df.to_csv(filename)\n rs = read_csv(filename, index_col=0, dtype=dtypes, parse_dates=create_cols('date'))\n assert_frame_equal(rs, df)\n\n def test_to_csv_dups_cols(self):\n\n df = DataFrame(np.random.randn(1000, 30),columns=lrange(15)+lrange(15),dtype='float64')\n\n with ensure_clean() as filename:\n df.to_csv(filename) # single dtype, fine\n result = read_csv(filename,index_col=0)\n result.columns = df.columns\n assert_frame_equal(result,df)\n\n df_float = DataFrame(np.random.randn(1000, 3),dtype='float64')\n df_int = DataFrame(np.random.randn(1000, 3),dtype='int64')\n df_bool = DataFrame(True,index=df_float.index,columns=lrange(3))\n df_object = DataFrame('foo',index=df_float.index,columns=lrange(3))\n df_dt = DataFrame(Timestamp('20010101'),index=df_float.index,columns=lrange(3))\n df = pd.concat([ df_float, df_int, df_bool, df_object, df_dt ], axis=1, ignore_index=True)\n\n cols = []\n for i in range(5):\n cols.extend([0,1,2])\n df.columns = cols\n\n from pandas import to_datetime\n with ensure_clean() as filename:\n df.to_csv(filename)\n result = read_csv(filename,index_col=0)\n\n # date cols\n for i in ['0.4','1.4','2.4']:\n result[i] = to_datetime(result[i])\n\n result.columns = df.columns\n assert_frame_equal(result,df)\n\n # GH3457\n from pandas.util.testing import makeCustomDataframe as mkdf\n\n N=10\n df= mkdf(N, 3)\n df.columns = ['a','a','b']\n\n with ensure_clean() as filename:\n df.to_csv(filename)\n\n # read_csv will rename the dups columns\n result = read_csv(filename,index_col=0)\n result = result.rename(columns={ 'a.1' : 'a' })\n assert_frame_equal(result,df)\n\n def test_to_csv_chunking(self):\n\n aa=DataFrame({'A':lrange(100000)})\n aa['B'] = aa.A + 1.0\n aa['C'] = aa.A + 2.0\n aa['D'] = aa.A + 3.0\n\n for chunksize in [10000,50000,100000]:\n with ensure_clean() as filename:\n aa.to_csv(filename,chunksize=chunksize)\n rs = read_csv(filename,index_col=0)\n assert_frame_equal(rs, aa)\n\n @slow\n def test_to_csv_wide_frame_formatting(self):\n # Issue #8621\n df = DataFrame(np.random.randn(1, 100010), columns=None, index=None)\n with ensure_clean() as filename:\n df.to_csv(filename, header=False, index=False)\n rs = read_csv(filename, header=None)\n assert_frame_equal(rs, df)\n\n def test_to_csv_bug(self):\n f1 = StringIO('a,1.0\\nb,2.0')\n df = DataFrame.from_csv(f1, header=None)\n newdf = DataFrame({'t': df[df.columns[0]]})\n\n with ensure_clean() as path:\n newdf.to_csv(path)\n\n recons = read_csv(path, index_col=0)\n assert_frame_equal(recons, newdf, check_names=False) # don't check_names as t != 1\n\n def test_to_csv_unicode(self):\n\n df = DataFrame({u('c/\\u03c3'): [1, 2, 3]})\n with ensure_clean() as path:\n\n df.to_csv(path, encoding='UTF-8')\n df2 = read_csv(path, index_col=0, encoding='UTF-8')\n assert_frame_equal(df, df2)\n\n df.to_csv(path, encoding='UTF-8', index=False)\n df2 = read_csv(path, index_col=None, encoding='UTF-8')\n assert_frame_equal(df, df2)\n\n def test_to_csv_unicode_index_col(self):\n buf = StringIO('')\n df = DataFrame(\n [[u(\"\\u05d0\"), \"d2\", \"d3\", \"d4\"], [\"a1\", \"a2\", \"a3\", \"a4\"]],\n columns=[u(\"\\u05d0\"),\n u(\"\\u05d1\"), u(\"\\u05d2\"), u(\"\\u05d3\")],\n index=[u(\"\\u05d0\"), u(\"\\u05d1\")])\n\n df.to_csv(buf, encoding='UTF-8')\n buf.seek(0)\n\n df2 = read_csv(buf, index_col=0, encoding='UTF-8')\n assert_frame_equal(df, df2)\n\n def test_to_csv_stringio(self):\n buf = StringIO()\n self.frame.to_csv(buf)\n buf.seek(0)\n recons = read_csv(buf, index_col=0)\n assert_frame_equal(recons, self.frame, check_names=False) # TODO to_csv drops column name\n\n def test_to_csv_float_format(self):\n\n df = DataFrame([[0.123456, 0.234567, 0.567567],\n [12.32112, 123123.2, 321321.2]],\n index=['A', 'B'], columns=['X', 'Y', 'Z'])\n\n with ensure_clean() as filename:\n\n df.to_csv(filename, float_format='%.2f')\n\n rs = read_csv(filename, index_col=0)\n xp = DataFrame([[0.12, 0.23, 0.57],\n [12.32, 123123.20, 321321.20]],\n index=['A', 'B'], columns=['X', 'Y', 'Z'])\n assert_frame_equal(rs, xp)\n\n def test_to_csv_quoting(self):\n df = DataFrame({'A': [1, 2, 3], 'B': ['foo', 'bar', 'baz']})\n\n buf = StringIO()\n df.to_csv(buf, index=False, quoting=csv.QUOTE_NONNUMERIC)\n\n result = buf.getvalue()\n expected = ('\"A\",\"B\"\\n'\n '1,\"foo\"\\n'\n '2,\"bar\"\\n'\n '3,\"baz\"\\n')\n\n self.assertEqual(result, expected)\n\n # quoting windows line terminators, presents with encoding?\n # #3503\n text = 'a,b,c\\n1,\"test \\r\\n\",3\\n'\n df = pd.read_csv(StringIO(text))\n buf = StringIO()\n df.to_csv(buf, encoding='utf-8', index=False)\n self.assertEqual(buf.getvalue(), text)\n\n # testing if quoting parameter is passed through with multi-indexes\n # related to issue #7791\n df = pd.DataFrame({'a': [1, 2], 'b': [3, 4], 'c': [5, 6]})\n df = df.set_index(['a', 'b'])\n expected = '\"a\",\"b\",\"c\"\\n\"1\",\"3\",\"5\"\\n\"2\",\"4\",\"6\"\\n'\n self.assertEqual(df.to_csv(quoting=csv.QUOTE_ALL), expected)\n\n def test_to_csv_unicodewriter_quoting(self):\n df = DataFrame({'A': [1, 2, 3], 'B': ['foo', 'bar', 'baz']})\n\n buf = StringIO()\n df.to_csv(buf, index=False, quoting=csv.QUOTE_NONNUMERIC,\n encoding='utf-8')\n\n result = buf.getvalue()\n expected = ('\"A\",\"B\"\\n'\n '1,\"foo\"\\n'\n '2,\"bar\"\\n'\n '3,\"baz\"\\n')\n\n self.assertEqual(result, expected)\n\n def test_to_csv_quote_none(self):\n # GH4328\n df = DataFrame({'A': ['hello', '{\"hello\"}']})\n for encoding in (None, 'utf-8'):\n buf = StringIO()\n df.to_csv(buf, quoting=csv.QUOTE_NONE,\n encoding=encoding, index=False)\n result = buf.getvalue()\n expected = 'A\\nhello\\n{\"hello\"}\\n'\n self.assertEqual(result, expected)\n\n def test_to_csv_index_no_leading_comma(self):\n df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},\n index=['one', 'two', 'three'])\n\n buf = StringIO()\n df.to_csv(buf, index_label=False)\n expected = ('A,B\\n'\n 'one,1,4\\n'\n 'two,2,5\\n'\n 'three,3,6\\n')\n self.assertEqual(buf.getvalue(), expected)\n\n def test_to_csv_line_terminators(self):\n df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},\n index=['one', 'two', 'three'])\n\n buf = StringIO()\n df.to_csv(buf, line_terminator='\\r\\n')\n expected = (',A,B\\r\\n'\n 'one,1,4\\r\\n'\n 'two,2,5\\r\\n'\n 'three,3,6\\r\\n')\n self.assertEqual(buf.getvalue(), expected)\n\n buf = StringIO()\n df.to_csv(buf) # The default line terminator remains \\n\n expected = (',A,B\\n'\n 'one,1,4\\n'\n 'two,2,5\\n'\n 'three,3,6\\n')\n self.assertEqual(buf.getvalue(), expected)\n\n def test_to_csv_from_csv_categorical(self):\n\n # CSV with categoricals should result in the same output as when one would add a \"normal\"\n # Series/DataFrame.\n s = Series(pd.Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']))\n s2 = Series(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])\n res = StringIO()\n s.to_csv(res)\n exp = StringIO()\n s2.to_csv(exp)\n self.assertEqual(res.getvalue(), exp.getvalue())\n\n df = DataFrame({\"s\":s})\n df2 = DataFrame({\"s\":s2})\n res = StringIO()\n df.to_csv(res)\n exp = StringIO()\n df2.to_csv(exp)\n self.assertEqual(res.getvalue(), exp.getvalue())\n\n def test_to_csv_path_is_none(self):\n # GH 8215\n # Make sure we return string for consistency with\n # Series.to_csv()\n csv_str = self.frame.to_csv(path=None)\n self.assertIsInstance(csv_str, str)\n recons = pd.read_csv(StringIO(csv_str), index_col=0)\n assert_frame_equal(self.frame, recons)\n\n def test_to_csv_compression_gzip(self):\n ## GH7615\n ## use the compression kw in to_csv\n df = DataFrame([[0.123456, 0.234567, 0.567567],\n [12.32112, 123123.2, 321321.2]],\n index=['A', 'B'], columns=['X', 'Y', 'Z'])\n\n with ensure_clean() as filename:\n\n df.to_csv(filename, compression=\"gzip\")\n\n # test the round trip - to_csv -> read_csv\n rs = read_csv(filename, compression=\"gzip\", index_col=0)\n assert_frame_equal(df, rs)\n\n # explicitly make sure file is gziped\n import gzip\n f = gzip.open(filename, 'rb')\n text = f.read().decode('utf8')\n f.close()\n for col in df.columns:\n self.assertIn(col, text)\n\n def test_to_csv_compression_bz2(self):\n ## GH7615\n ## use the compression kw in to_csv\n df = DataFrame([[0.123456, 0.234567, 0.567567],\n [12.32112, 123123.2, 321321.2]],\n index=['A', 'B'], columns=['X', 'Y', 'Z'])\n\n with ensure_clean() as filename:\n\n df.to_csv(filename, compression=\"bz2\")\n\n # test the round trip - to_csv -> read_csv\n rs = read_csv(filename, compression=\"bz2\", index_col=0)\n assert_frame_equal(df, rs)\n\n # explicitly make sure file is bz2ed\n import bz2\n f = bz2.BZ2File(filename, 'rb')\n text = f.read().decode('utf8')\n f.close()\n for col in df.columns:\n self.assertIn(col, text)\n\n def test_to_csv_compression_value_error(self):\n ## GH7615\n ## use the compression kw in to_csv\n df = DataFrame([[0.123456, 0.234567, 0.567567],\n [12.32112, 123123.2, 321321.2]],\n index=['A', 'B'], columns=['X', 'Y', 'Z'])\n\n with ensure_clean() as filename:\n # zip compression is not supported and should raise ValueError\n self.assertRaises(ValueError, df.to_csv, filename, compression=\"zip\")\n\n def test_info(self):\n io = StringIO()\n self.frame.info(buf=io)\n self.tsframe.info(buf=io)\n\n frame = DataFrame(np.random.randn(5, 3))\n\n import sys\n sys.stdout = StringIO()\n frame.info()\n frame.info(verbose=False)\n sys.stdout = sys.__stdout__\n\n def test_info_wide(self):\n from pandas import set_option, reset_option\n io = StringIO()\n df = DataFrame(np.random.randn(5, 101))\n df.info(buf=io)\n\n io = StringIO()\n df.info(buf=io, max_cols=101)\n rs = io.getvalue()\n self.assertTrue(len(rs.splitlines()) > 100)\n xp = rs\n\n set_option('display.max_info_columns', 101)\n io = StringIO()\n df.info(buf=io)\n self.assertEqual(rs, xp)\n reset_option('display.max_info_columns')\n\n def test_info_duplicate_columns(self):\n io = StringIO()\n\n # it works!\n frame = DataFrame(np.random.randn(1500, 4),\n columns=['a', 'a', 'b', 'b'])\n frame.info(buf=io)\n\n def test_info_shows_column_dtypes(self):\n dtypes = ['int64', 'float64', 'datetime64[ns]', 'timedelta64[ns]',\n 'complex128', 'object', 'bool']\n data = {}\n n = 10\n for i, dtype in enumerate(dtypes):\n data[i] = np.random.randint(2, size=n).astype(dtype)\n df = DataFrame(data)\n buf = StringIO()\n df.info(buf=buf)\n res = buf.getvalue()\n for i, dtype in enumerate(dtypes):\n name = '%d %d non-null %s' % (i, n, dtype)\n assert name in res\n\n def test_info_max_cols(self):\n df = DataFrame(np.random.randn(10, 5))\n for len_, verbose in [(5, None), (5, False), (10, True)]:\n # For verbose always ^ setting ^ summarize ^ full output\n with option_context('max_info_columns', 4):\n buf = StringIO()\n df.info(buf=buf, verbose=verbose)\n res = buf.getvalue()\n self.assertEqual(len(res.strip().split('\\n')), len_)\n\n for len_, verbose in [(10, None), (5, False), (10, True)]:\n\n # max_cols no exceeded\n with option_context('max_info_columns', 5):\n buf = StringIO()\n df.info(buf=buf, verbose=verbose)\n res = buf.getvalue()\n self.assertEqual(len(res.strip().split('\\n')), len_)\n\n for len_, max_cols in [(10, 5), (5, 4)]:\n # setting truncates\n with option_context('max_info_columns', 4):\n buf = StringIO()\n df.info(buf=buf, max_cols=max_cols)\n res = buf.getvalue()\n self.assertEqual(len(res.strip().split('\\n')), len_)\n\n # setting wouldn't truncate\n with option_context('max_info_columns', 5):\n buf = StringIO()\n df.info(buf=buf, max_cols=max_cols)\n res = buf.getvalue()\n self.assertEqual(len(res.strip().split('\\n')), len_)\n\n def test_info_memory_usage(self):\n # Ensure memory usage is displayed, when asserted, on the last line\n dtypes = ['int64', 'float64', 'datetime64[ns]', 'timedelta64[ns]',\n 'complex128', 'object', 'bool']\n data = {}\n n = 10\n for i, dtype in enumerate(dtypes):\n data[i] = np.random.randint(2, size=n).astype(dtype)\n df = DataFrame(data)\n buf = StringIO()\n # display memory usage case\n df.info(buf=buf, memory_usage=True)\n res = buf.getvalue().splitlines()\n self.assertTrue(\"memory usage: \" in res[-1])\n # do not display memory usage cas\n df.info(buf=buf, memory_usage=False)\n res = buf.getvalue().splitlines()\n self.assertTrue(\"memory usage: \" not in res[-1])\n\n df.info(buf=buf, memory_usage=True)\n res = buf.getvalue().splitlines()\n # memory usage is a lower bound, so print it as XYZ+ MB\n self.assertTrue(re.match(r\"memory usage: [^+]+\\+\", res[-1]))\n\n df.iloc[:, :5].info(buf=buf, memory_usage=True)\n res = buf.getvalue().splitlines()\n # excluded column with object dtype, so estimate is accurate\n self.assertFalse(re.match(r\"memory usage: [^+]+\\+\", res[-1]))\n\n df_with_object_index = pd.DataFrame({'a': [1]}, index=['foo'])\n df_with_object_index.info(buf=buf, memory_usage=True)\n res = buf.getvalue().splitlines()\n self.assertTrue(re.match(r\"memory usage: [^+]+\\+\", res[-1]))\n\n df_with_object_index.info(buf=buf, memory_usage='deep')\n res = buf.getvalue().splitlines()\n self.assertTrue(re.match(r\"memory usage: [^+]+$\", res[-1]))\n\n self.assertTrue(df_with_object_index.memory_usage(index=True, deep=True).sum() \\\n > df_with_object_index.memory_usage(index=True).sum())\n\n df_object = pd.DataFrame({'a': ['a']})\n self.assertTrue(df_object.memory_usage(deep=True).sum() \\\n > df_object.memory_usage().sum())\n\n # Test a DataFrame with duplicate columns\n dtypes = ['int64', 'int64', 'int64', 'float64']\n data = {}\n n = 100\n for i, dtype in enumerate(dtypes):\n data[i] = np.random.randint(2, size=n).astype(dtype)\n df = DataFrame(data)\n df.columns = dtypes\n # Ensure df size is as expected\n df_size = df.memory_usage().sum()\n exp_size = len(dtypes) * n * 8 # cols * rows * bytes\n self.assertEqual(df_size, exp_size)\n # Ensure number of cols in memory_usage is the same as df\n size_df = np.size(df.columns.values) # index=False; default\n self.assertEqual(size_df, np.size(df.memory_usage()))\n\n # assert deep works only on object\n self.assertEqual(df.memory_usage().sum(),df.memory_usage(deep=True).sum())\n\n # test for validity\n DataFrame(1,index=['a'],columns=['A']).memory_usage(index=True)\n DataFrame(1,index=['a'],columns=['A']).index.nbytes\n DataFrame(1,index=pd.MultiIndex.from_product([['a'],range(1000)]),columns=['A']).index.nbytes\n DataFrame(1,index=pd.MultiIndex.from_product([['a'],range(1000)]),columns=['A']).index.values.nbytes\n DataFrame(1,index=pd.MultiIndex.from_product([['a'],range(1000)]),columns=['A']).memory_usage(index=True)\n DataFrame(1,index=pd.MultiIndex.from_product([['a'],range(1000)]),columns=['A']).index.nbytes\n DataFrame(1,index=pd.MultiIndex.from_product([['a'],range(1000)]),columns=['A']).index.values.nbytes\n\n def test_dtypes(self):\n self.mixed_frame['bool'] = self.mixed_frame['A'] > 0\n result = self.mixed_frame.dtypes\n expected = Series(dict((k, v.dtype)\n for k, v in compat.iteritems(self.mixed_frame)),\n index=result.index)\n assert_series_equal(result, expected)\n\n # compat, GH 8722\n with option_context('use_inf_as_null',True):\n df = DataFrame([[1]])\n result = df.dtypes\n assert_series_equal(result,Series({0:np.dtype('int64')}))\n\n def test_convert_objects(self):\n\n oops = self.mixed_frame.T.T\n converted = oops._convert(datetime=True)\n assert_frame_equal(converted, self.mixed_frame)\n self.assertEqual(converted['A'].dtype, np.float64)\n\n # force numeric conversion\n self.mixed_frame['H'] = '1.'\n self.mixed_frame['I'] = '1'\n\n # add in some items that will be nan\n l = len(self.mixed_frame)\n self.mixed_frame['J'] = '1.'\n self.mixed_frame['K'] = '1'\n self.mixed_frame.ix[0:5,['J','K']] = 'garbled'\n converted = self.mixed_frame._convert(datetime=True, numeric=True)\n self.assertEqual(converted['H'].dtype, 'float64')\n self.assertEqual(converted['I'].dtype, 'int64')\n self.assertEqual(converted['J'].dtype, 'float64')\n self.assertEqual(converted['K'].dtype, 'float64')\n self.assertEqual(len(converted['J'].dropna()), l-5)\n self.assertEqual(len(converted['K'].dropna()), l-5)\n\n # via astype\n converted = self.mixed_frame.copy()\n converted['H'] = converted['H'].astype('float64')\n converted['I'] = converted['I'].astype('int64')\n self.assertEqual(converted['H'].dtype, 'float64')\n self.assertEqual(converted['I'].dtype, 'int64')\n\n # via astype, but errors\n converted = self.mixed_frame.copy()\n with assertRaisesRegexp(ValueError, 'invalid literal'):\n converted['H'].astype('int32')\n\n # mixed in a single column\n df = DataFrame(dict(s = Series([1, 'na', 3 ,4])))\n result = df._convert(datetime=True, numeric=True)\n expected = DataFrame(dict(s = Series([1, np.nan, 3 ,4])))\n assert_frame_equal(result, expected)\n\n def test_convert_objects_no_conversion(self):\n mixed1 = DataFrame(\n {'a': [1, 2, 3], 'b': [4.0, 5, 6], 'c': ['x', 'y', 'z']})\n mixed2 = mixed1._convert(datetime=True)\n assert_frame_equal(mixed1, mixed2)\n\n def test_append_series_dict(self):\n df = DataFrame(np.random.randn(5, 4),\n columns=['foo', 'bar', 'baz', 'qux'])\n\n series = df.ix[4]\n with assertRaisesRegexp(ValueError, 'Indexes have overlapping values'):\n df.append(series, verify_integrity=True)\n series.name = None\n with assertRaisesRegexp(TypeError, 'Can only append a Series if '\n 'ignore_index=True'):\n df.append(series, verify_integrity=True)\n\n result = df.append(series[::-1], ignore_index=True)\n expected = df.append(DataFrame({0: series[::-1]}, index=df.columns).T,\n ignore_index=True)\n assert_frame_equal(result, expected)\n\n # dict\n result = df.append(series.to_dict(), ignore_index=True)\n assert_frame_equal(result, expected)\n\n result = df.append(series[::-1][:3], ignore_index=True)\n expected = df.append(DataFrame({0: series[::-1][:3]}).T,\n ignore_index=True)\n assert_frame_equal(result, expected.ix[:, result.columns])\n\n # can append when name set\n row = df.ix[4]\n row.name = 5\n result = df.append(row)\n expected = df.append(df[-1:], ignore_index=True)\n assert_frame_equal(result, expected)\n\n def test_append_list_of_series_dicts(self):\n df = DataFrame(np.random.randn(5, 4),\n columns=['foo', 'bar', 'baz', 'qux'])\n\n dicts = [x.to_dict() for idx, x in df.iterrows()]\n\n result = df.append(dicts, ignore_index=True)\n expected = df.append(df, ignore_index=True)\n assert_frame_equal(result, expected)\n\n # different columns\n dicts = [{'foo': 1, 'bar': 2, 'baz': 3, 'peekaboo': 4},\n {'foo': 5, 'bar': 6, 'baz': 7, 'peekaboo': 8}]\n result = df.append(dicts, ignore_index=True)\n expected = df.append(DataFrame(dicts), ignore_index=True)\n assert_frame_equal(result, expected)\n\n def test_append_empty_dataframe(self):\n\n # Empty df append empty df\n df1 = DataFrame([])\n df2 = DataFrame([])\n result = df1.append(df2)\n expected = df1.copy()\n assert_frame_equal(result, expected)\n\n # Non-empty df append empty df\n df1 = DataFrame(np.random.randn(5, 2))\n df2 = DataFrame()\n result = df1.append(df2)\n expected = df1.copy()\n assert_frame_equal(result, expected)\n\n # Empty df with columns append empty df\n df1 = DataFrame(columns=['bar', 'foo'])\n df2 = DataFrame()\n result = df1.append(df2)\n expected = df1.copy()\n assert_frame_equal(result, expected)\n\n # Non-Empty df with columns append empty df\n df1 = DataFrame(np.random.randn(5, 2), columns=['bar', 'foo'])\n df2 = DataFrame()\n result = df1.append(df2)\n expected = df1.copy()\n assert_frame_equal(result, expected)\n\n def test_append_dtypes(self):\n\n # GH 5754\n # row appends of different dtypes (so need to do by-item)\n # can sometimes infer the correct type\n\n df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(5))\n df2 = DataFrame()\n result = df1.append(df2)\n expected = df1.copy()\n assert_frame_equal(result, expected)\n\n df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1))\n df2 = DataFrame({ 'bar' : 'foo' }, index=lrange(1,2))\n result = df1.append(df2)\n expected = DataFrame({ 'bar' : [ Timestamp('20130101'), 'foo' ]})\n assert_frame_equal(result, expected)\n\n df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1))\n df2 = DataFrame({ 'bar' : np.nan }, index=lrange(1,2))\n result = df1.append(df2)\n expected = DataFrame({ 'bar' : Series([ Timestamp('20130101'), np.nan ],dtype='M8[ns]') })\n assert_frame_equal(result, expected)\n\n df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1))\n df2 = DataFrame({ 'bar' : np.nan }, index=lrange(1,2), dtype=object)\n result = df1.append(df2)\n expected = DataFrame({ 'bar' : Series([ Timestamp('20130101'), np.nan ],dtype='M8[ns]') })\n assert_frame_equal(result, expected)\n\n df1 = DataFrame({ 'bar' : np.nan }, index=lrange(1))\n df2 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1,2))\n result = df1.append(df2)\n expected = DataFrame({ 'bar' : Series([ np.nan, Timestamp('20130101')] ,dtype='M8[ns]') })\n assert_frame_equal(result, expected)\n\n df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1))\n df2 = DataFrame({ 'bar' : 1 }, index=lrange(1,2), dtype=object)\n result = df1.append(df2)\n expected = DataFrame({ 'bar' : Series([ Timestamp('20130101'), 1 ]) })\n assert_frame_equal(result, expected)\n\n def test_asfreq(self):\n offset_monthly = self.tsframe.asfreq(datetools.bmonthEnd)\n rule_monthly = self.tsframe.asfreq('BM')\n\n assert_almost_equal(offset_monthly['A'], rule_monthly['A'])\n\n filled = rule_monthly.asfreq('B', method='pad')\n # TODO: actually check that this worked.\n\n # don't forget!\n filled_dep = rule_monthly.asfreq('B', method='pad')\n\n # test does not blow up on length-0 DataFrame\n zero_length = self.tsframe.reindex([])\n result = zero_length.asfreq('BM')\n self.assertIsNot(result, zero_length)\n\n def test_asfreq_datetimeindex(self):\n df = DataFrame({'A': [1, 2, 3]},\n index=[datetime(2011, 11, 1), datetime(2011, 11, 2),\n datetime(2011, 11, 3)])\n df = df.asfreq('B')\n tm.assertIsInstance(df.index, DatetimeIndex)\n\n ts = df['A'].asfreq('B')\n tm.assertIsInstance(ts.index, DatetimeIndex)\n\n def test_at_time_between_time_datetimeindex(self):\n index = date_range(\"2012-01-01\", \"2012-01-05\", freq='30min')\n df = DataFrame(randn(len(index), 5), index=index)\n akey = time(12, 0, 0)\n bkey = slice(time(13, 0, 0), time(14, 0, 0))\n ainds = [24, 72, 120, 168]\n binds = [26, 27, 28, 74, 75, 76, 122, 123, 124, 170, 171, 172]\n\n result = df.at_time(akey)\n expected = df.ix[akey]\n expected2 = df.ix[ainds]\n assert_frame_equal(result, expected)\n assert_frame_equal(result, expected2)\n self.assertEqual(len(result), 4)\n\n result = df.between_time(bkey.start, bkey.stop)\n expected = df.ix[bkey]\n expected2 = df.ix[binds]\n assert_frame_equal(result, expected)\n assert_frame_equal(result, expected2)\n self.assertEqual(len(result), 12)\n\n result = df.copy()\n result.ix[akey] = 0\n result = result.ix[akey]\n expected = df.ix[akey].copy()\n expected.ix[:] = 0\n assert_frame_equal(result, expected)\n\n result = df.copy()\n result.ix[akey] = 0\n result.ix[akey] = df.ix[ainds]\n assert_frame_equal(result, df)\n\n result = df.copy()\n result.ix[bkey] = 0\n result = result.ix[bkey]\n expected = df.ix[bkey].copy()\n expected.ix[:] = 0\n assert_frame_equal(result, expected)\n\n result = df.copy()\n result.ix[bkey] = 0\n result.ix[bkey] = df.ix[binds]\n assert_frame_equal(result, df)\n\n def test_as_matrix(self):\n frame = self.frame\n mat = frame.as_matrix()\n\n frameCols = frame.columns\n for i, row in enumerate(mat):\n for j, value in enumerate(row):\n col = frameCols[j]\n if np.isnan(value):\n self.assertTrue(np.isnan(frame[col][i]))\n else:\n self.assertEqual(value, frame[col][i])\n\n # mixed type\n mat = self.mixed_frame.as_matrix(['foo', 'A'])\n self.assertEqual(mat[0, 0], 'bar')\n\n df = DataFrame({'real': [1, 2, 3], 'complex': [1j, 2j, 3j]})\n mat = df.as_matrix()\n self.assertEqual(mat[0, 0], 1j)\n\n # single block corner case\n mat = self.frame.as_matrix(['A', 'B'])\n expected = self.frame.reindex(columns=['A', 'B']).values\n assert_almost_equal(mat, expected)\n\n def test_as_matrix_duplicates(self):\n df = DataFrame([[1, 2, 'a', 'b'],\n [1, 2, 'a', 'b']],\n columns=['one', 'one', 'two', 'two'])\n\n result = df.values\n expected = np.array([[1, 2, 'a', 'b'], [1, 2, 'a', 'b']],\n dtype=object)\n\n self.assertTrue(np.array_equal(result, expected))\n\n def test_ftypes(self):\n frame = self.mixed_float\n expected = Series(dict(A = 'float32:dense',\n B = 'float32:dense',\n C = 'float16:dense',\n D = 'float64:dense')).sort_values()\n result = frame.ftypes.sort_values()\n assert_series_equal(result,expected)\n\n def test_values(self):\n self.frame.values[:, 0] = 5.\n self.assertTrue((self.frame.values[:, 0] == 5).all())\n\n def test_deepcopy(self):\n cp = deepcopy(self.frame)\n series = cp['A']\n series[:] = 10\n for idx, value in compat.iteritems(series):\n self.assertNotEqual(self.frame['A'][idx], value)\n\n def test_copy(self):\n cop = self.frame.copy()\n cop['E'] = cop['A']\n self.assertNotIn('E', self.frame)\n\n # copy objects\n copy = self.mixed_frame.copy()\n self.assertIsNot(copy._data, self.mixed_frame._data)\n\n def _check_method(self, method='pearson', check_minp=False):\n if not check_minp:\n correls = self.frame.corr(method=method)\n exp = self.frame['A'].corr(self.frame['C'], method=method)\n assert_almost_equal(correls['A']['C'], exp)\n else:\n result = self.frame.corr(min_periods=len(self.frame) - 8)\n expected = self.frame.corr()\n expected.ix['A', 'B'] = expected.ix['B', 'A'] = nan\n\n def test_corr_pearson(self):\n tm._skip_if_no_scipy()\n self.frame['A'][:5] = nan\n self.frame['B'][5:10] = nan\n\n self._check_method('pearson')\n\n def test_corr_kendall(self):\n tm._skip_if_no_scipy()\n self.frame['A'][:5] = nan\n self.frame['B'][5:10] = nan\n\n self._check_method('kendall')\n\n def test_corr_spearman(self):\n tm._skip_if_no_scipy()\n self.frame['A'][:5] = nan\n self.frame['B'][5:10] = nan\n\n self._check_method('spearman')\n\n def test_corr_non_numeric(self):\n tm._skip_if_no_scipy()\n self.frame['A'][:5] = nan\n self.frame['B'][5:10] = nan\n\n # exclude non-numeric types\n result = self.mixed_frame.corr()\n expected = self.mixed_frame.ix[:, ['A', 'B', 'C', 'D']].corr()\n assert_frame_equal(result, expected)\n\n def test_corr_nooverlap(self):\n tm._skip_if_no_scipy()\n\n # nothing in common\n for meth in ['pearson', 'kendall', 'spearman']:\n df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],\n 'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],\n 'C': [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]})\n rs = df.corr(meth)\n self.assertTrue(isnull(rs.ix['A', 'B']))\n self.assertTrue(isnull(rs.ix['B', 'A']))\n self.assertEqual(rs.ix['A', 'A'], 1)\n self.assertEqual(rs.ix['B', 'B'], 1)\n self.assertTrue(isnull(rs.ix['C', 'C']))\n\n def test_corr_constant(self):\n tm._skip_if_no_scipy()\n\n # constant --> all NA\n\n for meth in ['pearson', 'spearman']:\n df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],\n 'B': [np.nan, np.nan, np.nan, 1, 1, 1]})\n rs = df.corr(meth)\n self.assertTrue(isnull(rs.values).all())\n\n def test_corr_int(self):\n # dtypes other than float64 #1761\n df3 = DataFrame({\"a\": [1, 2, 3, 4], \"b\": [1, 2, 3, 4]})\n\n # it works!\n df3.cov()\n df3.corr()\n\n def test_corr_int_and_boolean(self):\n tm._skip_if_no_scipy()\n\n # when dtypes of pandas series are different\n # then ndarray will have dtype=object,\n # so it need to be properly handled\n df = DataFrame({\"a\": [True, False], \"b\": [1, 0]})\n\n expected = DataFrame(np.ones((2, 2)), index=['a', 'b'], columns=['a', 'b'])\n for meth in ['pearson', 'kendall', 'spearman']:\n assert_frame_equal(df.corr(meth), expected)\n\n def test_cov(self):\n # min_periods no NAs (corner case)\n expected = self.frame.cov()\n result = self.frame.cov(min_periods=len(self.frame))\n\n assert_frame_equal(expected, result)\n\n result = self.frame.cov(min_periods=len(self.frame) + 1)\n self.assertTrue(isnull(result.values).all())\n\n # with NAs\n frame = self.frame.copy()\n frame['A'][:5] = nan\n frame['B'][5:10] = nan\n result = self.frame.cov(min_periods=len(self.frame) - 8)\n expected = self.frame.cov()\n expected.ix['A', 'B'] = np.nan\n expected.ix['B', 'A'] = np.nan\n\n # regular\n self.frame['A'][:5] = nan\n self.frame['B'][:10] = nan\n cov = self.frame.cov()\n\n assert_almost_equal(cov['A']['C'],\n self.frame['A'].cov(self.frame['C']))\n\n # exclude non-numeric types\n result = self.mixed_frame.cov()\n expected = self.mixed_frame.ix[:, ['A', 'B', 'C', 'D']].cov()\n assert_frame_equal(result, expected)\n\n # Single column frame\n df = DataFrame(np.linspace(0.0,1.0,10))\n result = df.cov()\n expected = DataFrame(np.cov(df.values.T).reshape((1,1)),\n index=df.columns,columns=df.columns)\n assert_frame_equal(result, expected)\n df.ix[0] = np.nan\n result = df.cov()\n expected = DataFrame(np.cov(df.values[1:].T).reshape((1,1)),\n index=df.columns,columns=df.columns)\n assert_frame_equal(result, expected)\n\n def test_corrwith(self):\n a = self.tsframe\n noise = Series(randn(len(a)), index=a.index)\n\n b = self.tsframe + noise\n\n # make sure order does not matter\n b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])\n del b['B']\n\n colcorr = a.corrwith(b, axis=0)\n assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))\n\n rowcorr = a.corrwith(b, axis=1)\n assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))\n\n dropped = a.corrwith(b, axis=0, drop=True)\n assert_almost_equal(dropped['A'], a['A'].corr(b['A']))\n self.assertNotIn('B', dropped)\n\n dropped = a.corrwith(b, axis=1, drop=True)\n self.assertNotIn(a.index[-1], dropped.index)\n\n # non time-series data\n index = ['a', 'b', 'c', 'd', 'e']\n columns = ['one', 'two', 'three', 'four']\n df1 = DataFrame(randn(5, 4), index=index, columns=columns)\n df2 = DataFrame(randn(4, 4), index=index[:4], columns=columns)\n correls = df1.corrwith(df2, axis=1)\n for row in index[:4]:\n assert_almost_equal(correls[row], df1.ix[row].corr(df2.ix[row]))\n\n def test_corrwith_with_objects(self):\n df1 = tm.makeTimeDataFrame()\n df2 = tm.makeTimeDataFrame()\n cols = ['A', 'B', 'C', 'D']\n\n df1['obj'] = 'foo'\n df2['obj'] = 'bar'\n\n result = df1.corrwith(df2)\n expected = df1.ix[:, cols].corrwith(df2.ix[:, cols])\n assert_series_equal(result, expected)\n\n result = df1.corrwith(df2, axis=1)\n expected = df1.ix[:, cols].corrwith(df2.ix[:, cols], axis=1)\n assert_series_equal(result, expected)\n\n def test_corrwith_series(self):\n result = self.tsframe.corrwith(self.tsframe['A'])\n expected = self.tsframe.apply(self.tsframe['A'].corr)\n\n assert_series_equal(result, expected)\n\n def test_corrwith_matches_corrcoef(self):\n df1 = DataFrame(np.arange(10000), columns=['a'])\n df2 = DataFrame(np.arange(10000)**2, columns=['a'])\n c1 = df1.corrwith(df2)['a']\n c2 = np.corrcoef(df1['a'],df2['a'])[0][1]\n\n assert_almost_equal(c1, c2)\n self.assertTrue(c1 < 1)\n\n def test_drop_names(self):\n df = DataFrame([[1, 2, 3],[3, 4, 5],[5, 6, 7]], index=['a', 'b', 'c'],\n columns=['d', 'e', 'f'])\n df.index.name, df.columns.name = 'first', 'second'\n df_dropped_b = df.drop('b')\n df_dropped_e = df.drop('e', axis=1)\n df_inplace_b, df_inplace_e = df.copy(), df.copy()\n df_inplace_b.drop('b', inplace=True)\n df_inplace_e.drop('e', axis=1, inplace=True)\n for obj in (df_dropped_b, df_dropped_e, df_inplace_b, df_inplace_e):\n self.assertEqual(obj.index.name, 'first')\n self.assertEqual(obj.columns.name, 'second')\n self.assertEqual(list(df.columns), ['d', 'e', 'f'])\n\n self.assertRaises(ValueError, df.drop, ['g'])\n self.assertRaises(ValueError, df.drop, ['g'], 1)\n\n # errors = 'ignore'\n dropped = df.drop(['g'], errors='ignore')\n expected = Index(['a', 'b', 'c'], name='first')\n self.assert_index_equal(dropped.index, expected)\n\n dropped = df.drop(['b', 'g'], errors='ignore')\n expected = Index(['a', 'c'], name='first')\n self.assert_index_equal(dropped.index, expected)\n\n dropped = df.drop(['g'], axis=1, errors='ignore')\n expected = Index(['d', 'e', 'f'], name='second')\n self.assert_index_equal(dropped.columns, expected)\n\n dropped = df.drop(['d', 'g'], axis=1, errors='ignore')\n expected = Index(['e', 'f'], name='second')\n self.assert_index_equal(dropped.columns, expected)\n\n def test_dropEmptyRows(self):\n N = len(self.frame.index)\n mat = randn(N)\n mat[:5] = nan\n\n frame = DataFrame({'foo': mat}, index=self.frame.index)\n original = Series(mat, index=self.frame.index, name='foo')\n expected = original.dropna()\n inplace_frame1, inplace_frame2 = frame.copy(), frame.copy()\n\n smaller_frame = frame.dropna(how='all')\n # check that original was preserved\n assert_series_equal(frame['foo'], original)\n inplace_frame1.dropna(how='all', inplace=True)\n assert_series_equal(smaller_frame['foo'], expected)\n assert_series_equal(inplace_frame1['foo'], expected)\n\n smaller_frame = frame.dropna(how='all', subset=['foo'])\n inplace_frame2.dropna(how='all', subset=['foo'], inplace=True)\n assert_series_equal(smaller_frame['foo'], expected)\n assert_series_equal(inplace_frame2['foo'], expected)\n\n def test_dropIncompleteRows(self):\n N = len(self.frame.index)\n mat = randn(N)\n mat[:5] = nan\n\n frame = DataFrame({'foo': mat}, index=self.frame.index)\n frame['bar'] = 5\n original = Series(mat, index=self.frame.index, name='foo')\n inp_frame1, inp_frame2 = frame.copy(), frame.copy()\n\n smaller_frame = frame.dropna()\n assert_series_equal(frame['foo'], original)\n inp_frame1.dropna(inplace=True)\n self.assert_numpy_array_equal(smaller_frame['foo'], mat[5:])\n self.assert_numpy_array_equal(inp_frame1['foo'], mat[5:])\n\n samesize_frame = frame.dropna(subset=['bar'])\n assert_series_equal(frame['foo'], original)\n self.assertTrue((frame['bar'] == 5).all())\n inp_frame2.dropna(subset=['bar'], inplace=True)\n self.assertTrue(samesize_frame.index.equals(self.frame.index))\n self.assertTrue(inp_frame2.index.equals(self.frame.index))\n\n def test_dropna(self):\n df = DataFrame(np.random.randn(6, 4))\n df[2][:2] = nan\n\n dropped = df.dropna(axis=1)\n expected = df.ix[:, [0, 1, 3]]\n inp = df.copy()\n inp.dropna(axis=1, inplace=True)\n assert_frame_equal(dropped, expected)\n assert_frame_equal(inp, expected)\n\n dropped = df.dropna(axis=0)\n expected = df.ix[lrange(2, 6)]\n inp = df.copy()\n inp.dropna(axis=0, inplace=True)\n assert_frame_equal(dropped, expected)\n assert_frame_equal(inp, expected)\n\n # threshold\n dropped = df.dropna(axis=1, thresh=5)\n expected = df.ix[:, [0, 1, 3]]\n inp = df.copy()\n inp.dropna(axis=1, thresh=5, inplace=True)\n assert_frame_equal(dropped, expected)\n assert_frame_equal(inp, expected)\n\n dropped = df.dropna(axis=0, thresh=4)\n expected = df.ix[lrange(2, 6)]\n inp = df.copy()\n inp.dropna(axis=0, thresh=4, inplace=True)\n assert_frame_equal(dropped, expected)\n assert_frame_equal(inp, expected)\n\n dropped = df.dropna(axis=1, thresh=4)\n assert_frame_equal(dropped, df)\n\n dropped = df.dropna(axis=1, thresh=3)\n assert_frame_equal(dropped, df)\n\n # subset\n dropped = df.dropna(axis=0, subset=[0, 1, 3])\n inp = df.copy()\n inp.dropna(axis=0, subset=[0, 1, 3], inplace=True)\n assert_frame_equal(dropped, df)\n assert_frame_equal(inp, df)\n\n # all\n dropped = df.dropna(axis=1, how='all')\n assert_frame_equal(dropped, df)\n\n df[2] = nan\n dropped = df.dropna(axis=1, how='all')\n expected = df.ix[:, [0, 1, 3]]\n assert_frame_equal(dropped, expected)\n\n # bad input\n self.assertRaises(ValueError, df.dropna, axis=3)\n\n\n def test_drop_and_dropna_caching(self):\n # tst that cacher updates\n original = Series([1, 2, np.nan], name='A')\n expected = Series([1, 2], dtype=original.dtype, name='A')\n df = pd.DataFrame({'A': original.values.copy()})\n df2 = df.copy()\n df['A'].dropna()\n assert_series_equal(df['A'], original)\n df['A'].dropna(inplace=True)\n assert_series_equal(df['A'], expected)\n df2['A'].drop([1])\n assert_series_equal(df2['A'], original)\n df2['A'].drop([1], inplace=True)\n assert_series_equal(df2['A'], original.drop([1]))\n\n def test_dropna_corner(self):\n # bad input\n self.assertRaises(ValueError, self.frame.dropna, how='foo')\n self.assertRaises(TypeError, self.frame.dropna, how=None)\n # non-existent column - 8303\n self.assertRaises(KeyError, self.frame.dropna, subset=['A','X'])\n\n def test_dropna_multiple_axes(self):\n df = DataFrame([[1, np.nan, 2, 3],\n [4, np.nan, 5, 6],\n [np.nan, np.nan, np.nan, np.nan],\n [7, np.nan, 8, 9]])\n cp = df.copy()\n result = df.dropna(how='all', axis=[0, 1])\n result2 = df.dropna(how='all', axis=(0, 1))\n expected = df.dropna(how='all').dropna(how='all', axis=1)\n\n assert_frame_equal(result, expected)\n assert_frame_equal(result2, expected)\n assert_frame_equal(df, cp)\n\n inp = df.copy()\n inp.dropna(how='all', axis=(0, 1), inplace=True)\n assert_frame_equal(inp, expected)\n\n def test_drop_duplicates(self):\n df = DataFrame({'AAA': ['foo', 'bar', 'foo', 'bar',\n 'foo', 'bar', 'bar', 'foo'],\n 'B': ['one', 'one', 'two', 'two',\n 'two', 'two', 'one', 'two'],\n 'C': [1, 1, 2, 2, 2, 2, 1, 2],\n 'D': lrange(8)})\n\n # single column\n result = df.drop_duplicates('AAA')\n expected = df[:2]\n assert_frame_equal(result, expected)\n\n result = df.drop_duplicates('AAA', keep='last')\n expected = df.ix[[6, 7]]\n assert_frame_equal(result, expected)\n\n result = df.drop_duplicates('AAA', keep=False)\n expected = df.ix[[]]\n assert_frame_equal(result, expected)\n self.assertEqual(len(result), 0)\n\n # deprecate take_last\n with tm.assert_produces_warning(FutureWarning):\n result = df.drop_duplicates('AAA', take_last=True)\n expected = df.ix[[6, 7]]\n assert_frame_equal(result, expected)\n\n # multi column\n expected = df.ix[[0, 1, 2, 3]]\n result = df.drop_duplicates(np.array(['AAA', 'B']))\n assert_frame_equal(result, expected)\n result = df.drop_duplicates(['AAA', 'B'])\n assert_frame_equal(result, expected)\n\n result = df.drop_duplicates(('AAA', 'B'), keep='last')\n expected = df.ix[[0, 5, 6, 7]]\n assert_frame_equal(result, expected)\n\n result = df.drop_duplicates(('AAA', 'B'), keep=False)\n expected = df.ix[[0]]\n assert_frame_equal(result, expected)\n\n # deprecate take_last\n with tm.assert_produces_warning(FutureWarning):\n result = df.drop_duplicates(('AAA', 'B'), take_last=True)\n expected = df.ix[[0, 5, 6, 7]]\n assert_frame_equal(result, expected)\n\n # consider everything\n df2 = df.ix[:, ['AAA', 'B', 'C']]\n\n result = df2.drop_duplicates()\n # in this case only\n expected = df2.drop_duplicates(['AAA', 'B'])\n assert_frame_equal(result, expected)\n\n result = df2.drop_duplicates(keep='last')\n expected = df2.drop_duplicates(['AAA', 'B'], keep='last')\n assert_frame_equal(result, expected)\n\n result = df2.drop_duplicates(keep=False)\n expected = df2.drop_duplicates(['AAA', 'B'], keep=False)\n assert_frame_equal(result, expected)\n\n # deprecate take_last\n with tm.assert_produces_warning(FutureWarning):\n result = df2.drop_duplicates(take_last=True)\n with tm.assert_produces_warning(FutureWarning):\n expected = df2.drop_duplicates(['AAA', 'B'], take_last=True)\n assert_frame_equal(result, expected)\n\n # integers\n result = df.drop_duplicates('C')\n expected = df.iloc[[0,2]]\n assert_frame_equal(result, expected)\n result = df.drop_duplicates('C',keep='last')\n expected = df.iloc[[-2,-1]]\n assert_frame_equal(result, expected)\n\n df['E'] = df['C'].astype('int8')\n result = df.drop_duplicates('E')\n expected = df.iloc[[0,2]]\n assert_frame_equal(result, expected)\n result = df.drop_duplicates('E',keep='last')\n expected = df.iloc[[-2,-1]]\n assert_frame_equal(result, expected)\n\n # GH 11376\n df = pd.DataFrame({'x': [7, 6, 3, 3, 4, 8, 0],\n 'y': [0, 6, 5, 5, 9, 1, 2]})\n expected = df.loc[df.index != 3]\n assert_frame_equal(df.drop_duplicates(), expected)\n\n df = pd.DataFrame([[1 , 0], [0, 2]])\n assert_frame_equal(df.drop_duplicates(), df)\n\n df = pd.DataFrame([[-2, 0], [0, -4]])\n assert_frame_equal(df.drop_duplicates(), df)\n\n x = np.iinfo(np.int64).max / 3 * 2\n df = pd.DataFrame([[-x, x], [0, x + 4]])\n assert_frame_equal(df.drop_duplicates(), df)\n\n df = pd.DataFrame([[-x, x], [x, x + 4]])\n assert_frame_equal(df.drop_duplicates(), df)\n\n def test_drop_duplicates_for_take_all(self):\n df = DataFrame({'AAA': ['foo', 'bar', 'baz', 'bar',\n 'foo', 'bar', 'qux', 'foo'],\n 'B': ['one', 'one', 'two', 'two',\n 'two', 'two', 'one', 'two'],\n 'C': [1, 1, 2, 2, 2, 2, 1, 2],\n 'D': lrange(8)})\n\n # single column\n result = df.drop_duplicates('AAA')\n expected = df.iloc[[0, 1, 2, 6]]\n assert_frame_equal(result, expected)\n\n result = df.drop_duplicates('AAA', keep='last')\n expected = df.iloc[[2, 5, 6, 7]]\n assert_frame_equal(result, expected)\n\n result = df.drop_duplicates('AAA', keep=False)\n expected = df.iloc[[2, 6]]\n assert_frame_equal(result, expected)\n\n # multiple columns\n result = df.drop_duplicates(['AAA', 'B'])\n expected = df.iloc[[0, 1, 2, 3, 4, 6]]\n assert_frame_equal(result, expected)\n\n result = df.drop_duplicates(['AAA', 'B'], keep='last')\n expected = df.iloc[[0, 1, 2, 5, 6, 7]]\n assert_frame_equal(result, expected)\n\n result = df.drop_duplicates(['AAA', 'B'], keep=False)\n expected = df.iloc[[0, 1, 2, 6]]\n assert_frame_equal(result, expected)\n\n def test_drop_duplicates_deprecated_warning(self):\n df = DataFrame({'AAA': ['foo', 'bar', 'foo', 'bar',\n 'foo', 'bar', 'bar', 'foo'],\n 'B': ['one', 'one', 'two', 'two',\n 'two', 'two', 'one', 'two'],\n 'C': [1, 1, 2, 2, 2, 2, 1, 2],\n 'D': lrange(8)})\n expected = df[:2]\n\n # Raises warning\n with tm.assert_produces_warning(False):\n result = df.drop_duplicates(subset='AAA')\n assert_frame_equal(result, expected)\n\n with tm.assert_produces_warning(FutureWarning):\n result = df.drop_duplicates(cols='AAA')\n assert_frame_equal(result, expected)\n\n # Does not allow both subset and cols\n self.assertRaises(TypeError, df.drop_duplicates,\n kwargs={'cols': 'AAA', 'subset': 'B'})\n\n # Does not allow unknown kwargs\n self.assertRaises(TypeError, df.drop_duplicates,\n kwargs={'subset': 'AAA', 'bad_arg': True})\n\n # deprecate take_last\n # Raises warning\n with tm.assert_produces_warning(FutureWarning):\n result = df.drop_duplicates(take_last=False, subset='AAA')\n assert_frame_equal(result, expected)\n\n self.assertRaises(ValueError, df.drop_duplicates, keep='invalid_name')\n\n def test_drop_duplicates_tuple(self):\n df = DataFrame({('AA', 'AB'): ['foo', 'bar', 'foo', 'bar',\n 'foo', 'bar', 'bar', 'foo'],\n 'B': ['one', 'one', 'two', 'two',\n 'two', 'two', 'one', 'two'],\n 'C': [1, 1, 2, 2, 2, 2, 1, 2],\n 'D': lrange(8)})\n\n # single column\n result = df.drop_duplicates(('AA', 'AB'))\n expected = df[:2]\n assert_frame_equal(result, expected)\n\n result = df.drop_duplicates(('AA', 'AB'), keep='last')\n expected = df.ix[[6, 7]]\n assert_frame_equal(result, expected)\n\n result = df.drop_duplicates(('AA', 'AB'), keep=False)\n expected = df.ix[[]] # empty df\n self.assertEqual(len(result), 0)\n assert_frame_equal(result, expected)\n\n # deprecate take_last\n with tm.assert_produces_warning(FutureWarning):\n result = df.drop_duplicates(('AA', 'AB'), take_last=True)\n expected = df.ix[[6, 7]]\n assert_frame_equal(result, expected)\n\n # multi column\n expected = df.ix[[0, 1, 2, 3]]\n result = df.drop_duplicates((('AA', 'AB'), 'B'))\n assert_frame_equal(result, expected)\n\n def test_drop_duplicates_NA(self):\n # none\n df = DataFrame({'A': [None, None, 'foo', 'bar',\n 'foo', 'bar', 'bar', 'foo'],\n 'B': ['one', 'one', 'two', 'two',\n 'two', 'two', 'one', 'two'],\n 'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.],\n 'D': lrange(8)})\n\n # single column\n result = df.drop_duplicates('A')\n expected = df.ix[[0, 2, 3]]\n assert_frame_equal(result, expected)\n\n result = df.drop_duplicates('A', keep='last')\n expected = df.ix[[1, 6, 7]]\n assert_frame_equal(result, expected)\n\n result = df.drop_duplicates('A', keep=False)\n expected = df.ix[[]] # empty df\n assert_frame_equal(result, expected)\n self.assertEqual(len(result), 0)\n\n # deprecate take_last\n with tm.assert_produces_warning(FutureWarning):\n result = df.drop_duplicates('A', take_last=True)\n expected = df.ix[[1, 6, 7]]\n assert_frame_equal(result, expected)\n\n # multi column\n result = df.drop_duplicates(['A', 'B'])\n expected = df.ix[[0, 2, 3, 6]]\n assert_frame_equal(result, expected)\n\n result = df.drop_duplicates(['A', 'B'], keep='last')\n expected = df.ix[[1, 5, 6, 7]]\n assert_frame_equal(result, expected)\n\n result = df.drop_duplicates(['A', 'B'], keep=False)\n expected = df.ix[[6]]\n assert_frame_equal(result, expected)\n\n # deprecate take_last\n with tm.assert_produces_warning(FutureWarning):\n result = df.drop_duplicates(['A', 'B'], take_last=True)\n expected = df.ix[[1, 5, 6, 7]]\n assert_frame_equal(result, expected)\n\n # nan\n df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',\n 'foo', 'bar', 'bar', 'foo'],\n 'B': ['one', 'one', 'two', 'two',\n 'two', 'two', 'one', 'two'],\n 'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.],\n 'D': lrange(8)})\n\n # single column\n result = df.drop_duplicates('C')\n expected = df[:2]\n assert_frame_equal(result, expected)\n\n result = df.drop_duplicates('C', keep='last')\n expected = df.ix[[3, 7]]\n assert_frame_equal(result, expected)\n\n result = df.drop_duplicates('C', keep=False)\n expected = df.ix[[]] # empty df\n assert_frame_equal(result, expected)\n self.assertEqual(len(result), 0)\n\n # deprecate take_last\n with tm.assert_produces_warning(FutureWarning):\n result = df.drop_duplicates('C', take_last=True)\n expected = df.ix[[3, 7]]\n assert_frame_equal(result, expected)\n\n # multi column\n result = df.drop_duplicates(['C', 'B'])\n expected = df.ix[[0, 1, 2, 4]]\n assert_frame_equal(result, expected)\n\n result = df.drop_duplicates(['C', 'B'], keep='last')\n expected = df.ix[[1, 3, 6, 7]]\n assert_frame_equal(result, expected)\n\n result = df.drop_duplicates(['C', 'B'], keep=False)\n expected = df.ix[[1]]\n assert_frame_equal(result, expected)\n\n # deprecate take_last\n with tm.assert_produces_warning(FutureWarning):\n result = df.drop_duplicates(['C', 'B'], take_last=True)\n expected = df.ix[[1, 3, 6, 7]]\n assert_frame_equal(result, expected)\n\n def test_drop_duplicates_NA_for_take_all(self):\n # none\n df = DataFrame({'A': [None, None, 'foo', 'bar',\n 'foo', 'baz', 'bar', 'qux'],\n 'C': [1.0, np.nan, np.nan, np.nan, 1., 2., 3, 1.]})\n\n # single column\n result = df.drop_duplicates('A')\n expected = df.iloc[[0, 2, 3, 5, 7]]\n assert_frame_equal(result, expected)\n\n result = df.drop_duplicates('A', keep='last')\n expected = df.iloc[[1, 4, 5, 6, 7]]\n assert_frame_equal(result, expected)\n\n result = df.drop_duplicates('A', keep=False)\n expected = df.iloc[[5, 7]]\n assert_frame_equal(result, expected)\n\n # nan\n\n # single column\n result = df.drop_duplicates('C')\n expected = df.iloc[[0, 1, 5, 6]]\n assert_frame_equal(result, expected)\n\n result = df.drop_duplicates('C', keep='last')\n expected = df.iloc[[3, 5, 6, 7]]\n assert_frame_equal(result, expected)\n\n result = df.drop_duplicates('C', keep=False)\n expected = df.iloc[[5, 6]]\n assert_frame_equal(result, expected)\n\n def test_drop_duplicates_inplace(self):\n orig = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',\n 'foo', 'bar', 'bar', 'foo'],\n 'B': ['one', 'one', 'two', 'two',\n 'two', 'two', 'one', 'two'],\n 'C': [1, 1, 2, 2, 2, 2, 1, 2],\n 'D': lrange(8)})\n\n # single column\n df = orig.copy()\n df.drop_duplicates('A', inplace=True)\n expected = orig[:2]\n result = df\n assert_frame_equal(result, expected)\n\n df = orig.copy()\n df.drop_duplicates('A', keep='last', inplace=True)\n expected = orig.ix[[6, 7]]\n result = df\n assert_frame_equal(result, expected)\n\n df = orig.copy()\n df.drop_duplicates('A', keep=False, inplace=True)\n expected = orig.ix[[]]\n result = df\n assert_frame_equal(result, expected)\n self.assertEqual(len(df), 0)\n\n # deprecate take_last\n df = orig.copy()\n with tm.assert_produces_warning(FutureWarning):\n df.drop_duplicates('A', take_last=True, inplace=True)\n expected = orig.ix[[6, 7]]\n result = df\n assert_frame_equal(result, expected)\n\n # multi column\n df = orig.copy()\n df.drop_duplicates(['A', 'B'], inplace=True)\n expected = orig.ix[[0, 1, 2, 3]]\n result = df\n assert_frame_equal(result, expected)\n\n df = orig.copy()\n df.drop_duplicates(['A', 'B'], keep='last', inplace=True)\n expected = orig.ix[[0, 5, 6, 7]]\n result = df\n assert_frame_equal(result, expected)\n\n df = orig.copy()\n df.drop_duplicates(['A', 'B'], keep=False, inplace=True)\n expected = orig.ix[[0]]\n result = df\n assert_frame_equal(result, expected)\n\n # deprecate take_last\n df = orig.copy()\n with tm.assert_produces_warning(FutureWarning):\n df.drop_duplicates(['A', 'B'], take_last=True, inplace=True)\n expected = orig.ix[[0, 5, 6, 7]]\n result = df\n assert_frame_equal(result, expected)\n\n # consider everything\n orig2 = orig.ix[:, ['A', 'B', 'C']].copy()\n\n df2 = orig2.copy()\n df2.drop_duplicates(inplace=True)\n # in this case only\n expected = orig2.drop_duplicates(['A', 'B'])\n result = df2\n assert_frame_equal(result, expected)\n\n df2 = orig2.copy()\n df2.drop_duplicates(keep='last', inplace=True)\n expected = orig2.drop_duplicates(['A', 'B'], keep='last')\n result = df2\n assert_frame_equal(result, expected)\n\n df2 = orig2.copy()\n df2.drop_duplicates(keep=False, inplace=True)\n expected = orig2.drop_duplicates(['A', 'B'], keep=False)\n result = df2\n assert_frame_equal(result, expected)\n\n # deprecate take_last\n df2 = orig2.copy()\n with tm.assert_produces_warning(FutureWarning):\n df2.drop_duplicates(take_last=True, inplace=True)\n with tm.assert_produces_warning(FutureWarning):\n expected = orig2.drop_duplicates(['A', 'B'], take_last=True)\n result = df2\n assert_frame_equal(result, expected)\n\n def test_duplicated_deprecated_warning(self):\n df = DataFrame({'AAA': ['foo', 'bar', 'foo', 'bar',\n 'foo', 'bar', 'bar', 'foo'],\n 'B': ['one', 'one', 'two', 'two',\n 'two', 'two', 'one', 'two'],\n 'C': [1, 1, 2, 2, 2, 2, 1, 2],\n 'D': lrange(8)})\n\n # Raises warning\n with tm.assert_produces_warning(False):\n result = df.duplicated(subset='AAA')\n\n with tm.assert_produces_warning(FutureWarning):\n result = df.duplicated(cols='AAA')\n\n # Does not allow both subset and cols\n self.assertRaises(TypeError, df.duplicated,\n kwargs={'cols': 'AAA', 'subset': 'B'})\n\n # Does not allow unknown kwargs\n self.assertRaises(TypeError, df.duplicated,\n kwargs={'subset': 'AAA', 'bad_arg': True})\n\n def test_drop_col_still_multiindex(self):\n arrays = [['a', 'b', 'c', 'top'],\n ['', '', '', 'OD'],\n ['', '', '', 'wx']]\n\n tuples = sorted(zip(*arrays))\n index = MultiIndex.from_tuples(tuples)\n\n df = DataFrame(randn(3, 4), columns=index)\n del df[('a', '', '')]\n assert(isinstance(df.columns, MultiIndex))\n\n def test_drop(self):\n simple = DataFrame({\"A\": [1, 2, 3, 4], \"B\": [0, 1, 2, 3]})\n assert_frame_equal(simple.drop(\"A\", axis=1), simple[['B']])\n assert_frame_equal(simple.drop([\"A\", \"B\"], axis='columns'),\n simple[[]])\n assert_frame_equal(simple.drop([0, 1, 3], axis=0), simple.ix[[2], :])\n assert_frame_equal(simple.drop([0, 3], axis='index'), simple.ix[[1, 2], :])\n\n self.assertRaises(ValueError, simple.drop, 5)\n self.assertRaises(ValueError, simple.drop, 'C', 1)\n self.assertRaises(ValueError, simple.drop, [1, 5])\n self.assertRaises(ValueError, simple.drop, ['A', 'C'], 1)\n\n # errors = 'ignore'\n assert_frame_equal(simple.drop(5, errors='ignore'), simple)\n assert_frame_equal(simple.drop([0, 5], errors='ignore'),\n simple.ix[[1, 2, 3], :])\n assert_frame_equal(simple.drop('C', axis=1, errors='ignore'), simple)\n assert_frame_equal(simple.drop(['A', 'C'], axis=1, errors='ignore'),\n simple[['B']])\n\n #non-unique - wheee!\n nu_df = DataFrame(lzip(range(3), range(-3, 1), list('abc')),\n columns=['a', 'a', 'b'])\n assert_frame_equal(nu_df.drop('a', axis=1), nu_df[['b']])\n assert_frame_equal(nu_df.drop('b', axis='columns'), nu_df['a'])\n\n nu_df = nu_df.set_index(pd.Index(['X', 'Y', 'X']))\n nu_df.columns = list('abc')\n assert_frame_equal(nu_df.drop('X', axis='rows'), nu_df.ix[[\"Y\"], :])\n assert_frame_equal(nu_df.drop(['X', 'Y'], axis=0), nu_df.ix[[], :])\n\n # inplace cache issue\n # GH 5628\n df = pd.DataFrame(np.random.randn(10,3), columns=list('abc'))\n expected = df[~(df.b>0)]\n df.drop(labels=df[df.b>0].index, inplace=True)\n assert_frame_equal(df,expected)\n\n def test_fillna(self):\n self.tsframe.ix[:5,'A'] = nan\n self.tsframe.ix[-5:,'A'] = nan\n\n zero_filled = self.tsframe.fillna(0)\n self.assertTrue((zero_filled.ix[:5,'A'] == 0).all())\n\n padded = self.tsframe.fillna(method='pad')\n self.assertTrue(np.isnan(padded.ix[:5,'A']).all())\n self.assertTrue((padded.ix[-5:,'A'] == padded.ix[-5,'A']).all())\n\n # mixed type\n self.mixed_frame.ix[5:20,'foo'] = nan\n self.mixed_frame.ix[-10:,'A'] = nan\n result = self.mixed_frame.fillna(value=0)\n result = self.mixed_frame.fillna(method='pad')\n\n self.assertRaises(ValueError, self.tsframe.fillna)\n self.assertRaises(ValueError, self.tsframe.fillna, 5, method='ffill')\n\n # mixed numeric (but no float16)\n mf = self.mixed_float.reindex(columns=['A','B','D'])\n mf.ix[-10:,'A'] = nan\n result = mf.fillna(value=0)\n _check_mixed_float(result, dtype = dict(C = None))\n\n result = mf.fillna(method='pad')\n _check_mixed_float(result, dtype = dict(C = None))\n\n # empty frame (GH #2778)\n df = DataFrame(columns=['x'])\n for m in ['pad','backfill']:\n df.x.fillna(method=m,inplace=1)\n df.x.fillna(method=m)\n\n # with different dtype (GH3386)\n df = DataFrame([['a','a',np.nan,'a'],['b','b',np.nan,'b'],['c','c',np.nan,'c']])\n\n result = df.fillna({ 2: 'foo' })\n expected = DataFrame([['a','a','foo','a'],['b','b','foo','b'],['c','c','foo','c']])\n assert_frame_equal(result, expected)\n\n df.fillna({ 2: 'foo' }, inplace=True)\n assert_frame_equal(df, expected)\n\n # limit and value\n df = DataFrame(np.random.randn(10,3))\n df.iloc[2:7,0] = np.nan\n df.iloc[3:5,2] = np.nan\n\n expected = df.copy()\n expected.iloc[2,0] = 999\n expected.iloc[3,2] = 999\n result = df.fillna(999,limit=1)\n assert_frame_equal(result, expected)\n\n # with datelike\n # GH 6344\n df = DataFrame({\n 'Date':[pd.NaT, Timestamp(\"2014-1-1\")],\n 'Date2':[ Timestamp(\"2013-1-1\"), pd.NaT]\n })\n\n expected = df.copy()\n expected['Date'] = expected['Date'].fillna(df.ix[0,'Date2'])\n result = df.fillna(value={'Date':df['Date2']})\n assert_frame_equal(result, expected)\n\n def test_fillna_dtype_conversion(self):\n # make sure that fillna on an empty frame works\n df = DataFrame(index=[\"A\",\"B\",\"C\"], columns = [1,2,3,4,5])\n result = df.get_dtype_counts().sort_values()\n expected = Series({ 'object' : 5 })\n assert_series_equal(result, expected)\n\n result = df.fillna(1)\n expected = DataFrame(1, index=[\"A\",\"B\",\"C\"], columns = [1,2,3,4,5])\n result = result.get_dtype_counts().sort_values()\n expected = Series({ 'int64' : 5 })\n assert_series_equal(result, expected)\n\n # empty block\n df = DataFrame(index=lrange(3),columns=['A','B'],dtype='float64')\n result = df.fillna('nan')\n expected = DataFrame('nan',index=lrange(3),columns=['A','B'])\n assert_frame_equal(result, expected)\n\n # equiv of replace\n df = DataFrame(dict(A = [1,np.nan], B = [1.,2.]))\n for v in ['',1,np.nan,1.0]:\n expected = df.replace(np.nan,v)\n result = df.fillna(v)\n assert_frame_equal(result, expected)\n\n def test_fillna_datetime_columns(self):\n # GH 7095\n df = pd.DataFrame({'A': [-1, -2, np.nan],\n 'B': date_range('20130101', periods=3),\n 'C': ['foo', 'bar', None],\n 'D': ['foo2', 'bar2', None]},\n index=date_range('20130110', periods=3))\n result = df.fillna('?')\n expected = pd.DataFrame({'A': [-1, -2, '?'],\n 'B': date_range('20130101', periods=3),\n 'C': ['foo', 'bar', '?'],\n 'D': ['foo2', 'bar2', '?']},\n index=date_range('20130110', periods=3))\n self.assert_frame_equal(result, expected)\n\n df = pd.DataFrame({'A': [-1, -2, np.nan],\n 'B': [pd.Timestamp('2013-01-01'), pd.Timestamp('2013-01-02'), pd.NaT],\n 'C': ['foo', 'bar', None],\n 'D': ['foo2', 'bar2', None]},\n index=date_range('20130110', periods=3))\n result = df.fillna('?')\n expected = pd.DataFrame({'A': [-1, -2, '?'],\n 'B': [pd.Timestamp('2013-01-01'), pd.Timestamp('2013-01-02'), '?'],\n 'C': ['foo', 'bar', '?'],\n 'D': ['foo2', 'bar2', '?']},\n index=date_range('20130110', periods=3))\n self.assert_frame_equal(result, expected)\n\n def test_ffill(self):\n self.tsframe['A'][:5] = nan\n self.tsframe['A'][-5:] = nan\n\n assert_frame_equal(self.tsframe.ffill(),\n self.tsframe.fillna(method='ffill'))\n\n def test_bfill(self):\n self.tsframe['A'][:5] = nan\n self.tsframe['A'][-5:] = nan\n\n assert_frame_equal(self.tsframe.bfill(),\n self.tsframe.fillna(method='bfill'))\n\n def test_fillna_skip_certain_blocks(self):\n # don't try to fill boolean, int blocks\n\n df = DataFrame(np.random.randn(10, 4).astype(int))\n\n # it works!\n df.fillna(np.nan)\n\n def test_fillna_inplace(self):\n df = DataFrame(np.random.randn(10, 4))\n df[1][:4] = np.nan\n df[3][-4:] = np.nan\n\n expected = df.fillna(value=0)\n self.assertIsNot(expected, df)\n\n df.fillna(value=0, inplace=True)\n assert_frame_equal(df, expected)\n\n df[1][:4] = np.nan\n df[3][-4:] = np.nan\n expected = df.fillna(method='ffill')\n self.assertIsNot(expected, df)\n\n df.fillna(method='ffill', inplace=True)\n assert_frame_equal(df, expected)\n\n def test_fillna_dict_series(self):\n df = DataFrame({'a': [nan, 1, 2, nan, nan],\n 'b': [1, 2, 3, nan, nan],\n 'c': [nan, 1, 2, 3, 4]})\n\n result = df.fillna({'a': 0, 'b': 5})\n\n expected = df.copy()\n expected['a'] = expected['a'].fillna(0)\n expected['b'] = expected['b'].fillna(5)\n assert_frame_equal(result, expected)\n\n # it works\n result = df.fillna({'a': 0, 'b': 5, 'd': 7})\n\n # Series treated same as dict\n result = df.fillna(df.max())\n expected = df.fillna(df.max().to_dict())\n assert_frame_equal(result, expected)\n\n # disable this for now\n with assertRaisesRegexp(NotImplementedError, 'column by column'):\n df.fillna(df.max(1), axis=1)\n\n def test_fillna_dataframe(self):\n # GH 8377\n df = DataFrame({'a': [nan, 1, 2, nan, nan],\n 'b': [1, 2, 3, nan, nan],\n 'c': [nan, 1, 2, 3, 4]},\n index = list('VWXYZ'))\n\n # df2 may have different index and columns\n df2 = DataFrame({'a': [nan, 10, 20, 30, 40],\n 'b': [50, 60, 70, 80, 90],\n 'foo': ['bar']*5},\n index = list('VWXuZ'))\n\n result = df.fillna(df2)\n\n # only those columns and indices which are shared get filled\n expected = DataFrame({'a': [nan, 1, 2, nan, 40],\n 'b': [1, 2, 3, nan, 90],\n 'c': [nan, 1, 2, 3, 4]},\n index = list('VWXYZ'))\n\n assert_frame_equal(result, expected)\n\n def test_fillna_columns(self):\n df = DataFrame(np.random.randn(10, 10))\n df.values[:, ::2] = np.nan\n\n result = df.fillna(method='ffill', axis=1)\n expected = df.T.fillna(method='pad').T\n assert_frame_equal(result, expected)\n\n df.insert(6, 'foo', 5)\n result = df.fillna(method='ffill', axis=1)\n expected = df.astype(float).fillna(method='ffill', axis=1)\n assert_frame_equal(result, expected)\n\n\n def test_fillna_invalid_method(self):\n with assertRaisesRegexp(ValueError, 'ffil'):\n self.frame.fillna(method='ffil')\n\n def test_fillna_invalid_value(self):\n # list\n self.assertRaises(TypeError, self.frame.fillna, [1, 2])\n # tuple\n self.assertRaises(TypeError, self.frame.fillna, (1, 2))\n # frame with series\n self.assertRaises(ValueError, self.frame.iloc[:,0].fillna, self.frame)\n\n def test_replace_inplace(self):\n self.tsframe['A'][:5] = nan\n self.tsframe['A'][-5:] = nan\n\n tsframe = self.tsframe.copy()\n tsframe.replace(nan, 0, inplace=True)\n assert_frame_equal(tsframe, self.tsframe.fillna(0))\n\n self.assertRaises(TypeError, self.tsframe.replace, nan, inplace=True)\n self.assertRaises(TypeError, self.tsframe.replace, nan)\n\n # mixed type\n self.mixed_frame.ix[5:20,'foo'] = nan\n self.mixed_frame.ix[-10:,'A'] = nan\n\n result = self.mixed_frame.replace(np.nan, 0)\n expected = self.mixed_frame.fillna(value=0)\n assert_frame_equal(result, expected)\n\n tsframe = self.tsframe.copy()\n tsframe.replace([nan], [0], inplace=True)\n assert_frame_equal(tsframe, self.tsframe.fillna(0))\n\n def test_regex_replace_scalar(self):\n obj = {'a': list('ab..'), 'b': list('efgh')}\n dfobj = DataFrame(obj)\n mix = {'a': lrange(4), 'b': list('ab..')}\n dfmix = DataFrame(mix)\n\n ### simplest cases\n ## regex -> value\n # obj frame\n res = dfobj.replace(r'\\s*\\.\\s*', nan, regex=True)\n assert_frame_equal(dfobj, res.fillna('.'))\n\n # mixed\n res = dfmix.replace(r'\\s*\\.\\s*', nan, regex=True)\n assert_frame_equal(dfmix, res.fillna('.'))\n\n ## regex -> regex\n # obj frame\n res = dfobj.replace(r'\\s*(\\.)\\s*', r'\\1\\1\\1', regex=True)\n objc = obj.copy()\n objc['a'] = ['a', 'b', '...', '...']\n expec = DataFrame(objc)\n assert_frame_equal(res, expec)\n\n # with mixed\n res = dfmix.replace(r'\\s*(\\.)\\s*', r'\\1\\1\\1', regex=True)\n mixc = mix.copy()\n mixc['b'] = ['a', 'b', '...', '...']\n expec = DataFrame(mixc)\n assert_frame_equal(res, expec)\n\n # everything with compiled regexs as well\n res = dfobj.replace(re.compile(r'\\s*\\.\\s*'), nan, regex=True)\n assert_frame_equal(dfobj, res.fillna('.'))\n\n # mixed\n res = dfmix.replace(re.compile(r'\\s*\\.\\s*'), nan, regex=True)\n assert_frame_equal(dfmix, res.fillna('.'))\n\n ## regex -> regex\n # obj frame\n res = dfobj.replace(re.compile(r'\\s*(\\.)\\s*'), r'\\1\\1\\1')\n objc = obj.copy()\n objc['a'] = ['a', 'b', '...', '...']\n expec = DataFrame(objc)\n assert_frame_equal(res, expec)\n\n # with mixed\n res = dfmix.replace(re.compile(r'\\s*(\\.)\\s*'), r'\\1\\1\\1')\n mixc = mix.copy()\n mixc['b'] = ['a', 'b', '...', '...']\n expec = DataFrame(mixc)\n assert_frame_equal(res, expec)\n\n res = dfmix.replace(regex=re.compile(r'\\s*(\\.)\\s*'), value=r'\\1\\1\\1')\n mixc = mix.copy()\n mixc['b'] = ['a', 'b', '...', '...']\n expec = DataFrame(mixc)\n assert_frame_equal(res, expec)\n\n res = dfmix.replace(regex=r'\\s*(\\.)\\s*', value=r'\\1\\1\\1')\n mixc = mix.copy()\n mixc['b'] = ['a', 'b', '...', '...']\n expec = DataFrame(mixc)\n assert_frame_equal(res, expec)\n\n def test_regex_replace_scalar_inplace(self):\n obj = {'a': list('ab..'), 'b': list('efgh')}\n dfobj = DataFrame(obj)\n mix = {'a': lrange(4), 'b': list('ab..')}\n dfmix = DataFrame(mix)\n\n ### simplest cases\n ## regex -> value\n # obj frame\n res = dfobj.copy()\n res.replace(r'\\s*\\.\\s*', nan, regex=True, inplace=True)\n assert_frame_equal(dfobj, res.fillna('.'))\n\n # mixed\n res = dfmix.copy()\n res.replace(r'\\s*\\.\\s*', nan, regex=True, inplace=True)\n assert_frame_equal(dfmix, res.fillna('.'))\n\n ## regex -> regex\n # obj frame\n res = dfobj.copy()\n res.replace(r'\\s*(\\.)\\s*', r'\\1\\1\\1', regex=True, inplace=True)\n objc = obj.copy()\n objc['a'] = ['a', 'b', '...', '...']\n expec = DataFrame(objc)\n assert_frame_equal(res, expec)\n\n # with mixed\n res = dfmix.copy()\n res.replace(r'\\s*(\\.)\\s*', r'\\1\\1\\1', regex=True, inplace=True)\n mixc = mix.copy()\n mixc['b'] = ['a', 'b', '...', '...']\n expec = DataFrame(mixc)\n assert_frame_equal(res, expec)\n\n # everything with compiled regexs as well\n res = dfobj.copy()\n res.replace(re.compile(r'\\s*\\.\\s*'), nan, regex=True, inplace=True)\n assert_frame_equal(dfobj, res.fillna('.'))\n\n # mixed\n res = dfmix.copy()\n res.replace(re.compile(r'\\s*\\.\\s*'), nan, regex=True, inplace=True)\n assert_frame_equal(dfmix, res.fillna('.'))\n\n ## regex -> regex\n # obj frame\n res = dfobj.copy()\n res.replace(re.compile(r'\\s*(\\.)\\s*'), r'\\1\\1\\1', regex=True,\n inplace=True)\n objc = obj.copy()\n objc['a'] = ['a', 'b', '...', '...']\n expec = DataFrame(objc)\n assert_frame_equal(res, expec)\n\n # with mixed\n res = dfmix.copy()\n res.replace(re.compile(r'\\s*(\\.)\\s*'), r'\\1\\1\\1', regex=True,\n inplace=True)\n mixc = mix.copy()\n mixc['b'] = ['a', 'b', '...', '...']\n expec = DataFrame(mixc)\n assert_frame_equal(res, expec)\n\n res = dfobj.copy()\n res.replace(regex=r'\\s*\\.\\s*', value=nan, inplace=True)\n assert_frame_equal(dfobj, res.fillna('.'))\n\n # mixed\n res = dfmix.copy()\n res.replace(regex=r'\\s*\\.\\s*', value=nan, inplace=True)\n assert_frame_equal(dfmix, res.fillna('.'))\n\n ## regex -> regex\n # obj frame\n res = dfobj.copy()\n res.replace(regex=r'\\s*(\\.)\\s*', value=r'\\1\\1\\1', inplace=True)\n objc = obj.copy()\n objc['a'] = ['a', 'b', '...', '...']\n expec = DataFrame(objc)\n assert_frame_equal(res, expec)\n\n # with mixed\n res = dfmix.copy()\n res.replace(regex=r'\\s*(\\.)\\s*', value=r'\\1\\1\\1', inplace=True)\n mixc = mix.copy()\n mixc['b'] = ['a', 'b', '...', '...']\n expec = DataFrame(mixc)\n assert_frame_equal(res, expec)\n\n # everything with compiled regexs as well\n res = dfobj.copy()\n res.replace(regex=re.compile(r'\\s*\\.\\s*'), value=nan, inplace=True)\n assert_frame_equal(dfobj, res.fillna('.'))\n\n # mixed\n res = dfmix.copy()\n res.replace(regex=re.compile(r'\\s*\\.\\s*'), value=nan, inplace=True)\n assert_frame_equal(dfmix, res.fillna('.'))\n\n ## regex -> regex\n # obj frame\n res = dfobj.copy()\n res.replace(regex=re.compile(r'\\s*(\\.)\\s*'), value=r'\\1\\1\\1',\n inplace=True)\n objc = obj.copy()\n objc['a'] = ['a', 'b', '...', '...']\n expec = DataFrame(objc)\n assert_frame_equal(res, expec)\n\n # with mixed\n res = dfmix.copy()\n res.replace(regex=re.compile(r'\\s*(\\.)\\s*'), value=r'\\1\\1\\1',\n inplace=True)\n mixc = mix.copy()\n mixc['b'] = ['a', 'b', '...', '...']\n expec = DataFrame(mixc)\n assert_frame_equal(res, expec)\n\n def test_regex_replace_list_obj(self):\n obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}\n dfobj = DataFrame(obj)\n\n ## lists of regexes and values\n # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]\n to_replace_res = [r'\\s*\\.\\s*', r'e|f|g']\n values = [nan, 'crap']\n res = dfobj.replace(to_replace_res, values, regex=True)\n expec = DataFrame({'a': ['a', 'b', nan, nan], 'b': ['crap'] * 3 +\n ['h'], 'c': ['h', 'crap', 'l', 'o']})\n assert_frame_equal(res, expec)\n\n # list of [re1, re2, ..., reN] -> [re1, re2, .., reN]\n to_replace_res = [r'\\s*(\\.)\\s*', r'(e|f|g)']\n values = [r'\\1\\1', r'\\1_crap']\n res = dfobj.replace(to_replace_res, values, regex=True)\n expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e_crap',\n 'f_crap',\n 'g_crap', 'h'],\n 'c': ['h', 'e_crap', 'l', 'o']})\n\n assert_frame_equal(res, expec)\n\n # list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN\n # or vN)]\n to_replace_res = [r'\\s*(\\.)\\s*', r'e']\n values = [r'\\1\\1', r'crap']\n res = dfobj.replace(to_replace_res, values, regex=True)\n expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',\n 'h'],\n 'c': ['h', 'crap', 'l', 'o']})\n assert_frame_equal(res, expec)\n\n to_replace_res = [r'\\s*(\\.)\\s*', r'e']\n values = [r'\\1\\1', r'crap']\n res = dfobj.replace(value=values, regex=to_replace_res)\n expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',\n 'h'],\n 'c': ['h', 'crap', 'l', 'o']})\n assert_frame_equal(res, expec)\n\n def test_regex_replace_list_obj_inplace(self):\n ### same as above with inplace=True\n ## lists of regexes and values\n obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}\n dfobj = DataFrame(obj)\n\n ## lists of regexes and values\n # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]\n to_replace_res = [r'\\s*\\.\\s*', r'e|f|g']\n values = [nan, 'crap']\n res = dfobj.copy()\n res.replace(to_replace_res, values, inplace=True, regex=True)\n expec = DataFrame({'a': ['a', 'b', nan, nan], 'b': ['crap'] * 3 +\n ['h'], 'c': ['h', 'crap', 'l', 'o']})\n assert_frame_equal(res, expec)\n\n # list of [re1, re2, ..., reN] -> [re1, re2, .., reN]\n to_replace_res = [r'\\s*(\\.)\\s*', r'(e|f|g)']\n values = [r'\\1\\1', r'\\1_crap']\n res = dfobj.copy()\n res.replace(to_replace_res, values, inplace=True, regex=True)\n expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e_crap',\n 'f_crap',\n 'g_crap', 'h'],\n 'c': ['h', 'e_crap', 'l', 'o']})\n\n assert_frame_equal(res, expec)\n\n # list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN\n # or vN)]\n to_replace_res = [r'\\s*(\\.)\\s*', r'e']\n values = [r'\\1\\1', r'crap']\n res = dfobj.copy()\n res.replace(to_replace_res, values, inplace=True, regex=True)\n expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',\n 'h'],\n 'c': ['h', 'crap', 'l', 'o']})\n assert_frame_equal(res, expec)\n\n to_replace_res = [r'\\s*(\\.)\\s*', r'e']\n values = [r'\\1\\1', r'crap']\n res = dfobj.copy()\n res.replace(value=values, regex=to_replace_res, inplace=True)\n expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',\n 'h'],\n 'c': ['h', 'crap', 'l', 'o']})\n assert_frame_equal(res, expec)\n\n def test_regex_replace_list_mixed(self):\n ## mixed frame to make sure this doesn't break things\n mix = {'a': lrange(4), 'b': list('ab..')}\n dfmix = DataFrame(mix)\n\n ## lists of regexes and values\n # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]\n to_replace_res = [r'\\s*\\.\\s*', r'a']\n values = [nan, 'crap']\n mix2 = {'a': lrange(4), 'b': list('ab..'), 'c': list('halo')}\n dfmix2 = DataFrame(mix2)\n res = dfmix2.replace(to_replace_res, values, regex=True)\n expec = DataFrame({'a': mix2['a'], 'b': ['crap', 'b', nan, nan],\n 'c': ['h', 'crap', 'l', 'o']})\n assert_frame_equal(res, expec)\n\n # list of [re1, re2, ..., reN] -> [re1, re2, .., reN]\n to_replace_res = [r'\\s*(\\.)\\s*', r'(a|b)']\n values = [r'\\1\\1', r'\\1_crap']\n res = dfmix.replace(to_replace_res, values, regex=True)\n expec = DataFrame({'a': mix['a'], 'b': ['a_crap', 'b_crap', '..',\n '..']})\n\n assert_frame_equal(res, expec)\n\n # list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN\n # or vN)]\n to_replace_res = [r'\\s*(\\.)\\s*', r'a', r'(b)']\n values = [r'\\1\\1', r'crap', r'\\1_crap']\n res = dfmix.replace(to_replace_res, values, regex=True)\n expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})\n assert_frame_equal(res, expec)\n\n to_replace_res = [r'\\s*(\\.)\\s*', r'a', r'(b)']\n values = [r'\\1\\1', r'crap', r'\\1_crap']\n res = dfmix.replace(regex=to_replace_res, value=values)\n expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})\n assert_frame_equal(res, expec)\n\n def test_regex_replace_list_mixed_inplace(self):\n mix = {'a': lrange(4), 'b': list('ab..')}\n dfmix = DataFrame(mix)\n # the same inplace\n ## lists of regexes and values\n # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]\n to_replace_res = [r'\\s*\\.\\s*', r'a']\n values = [nan, 'crap']\n res = dfmix.copy()\n res.replace(to_replace_res, values, inplace=True, regex=True)\n expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b', nan, nan]})\n assert_frame_equal(res, expec)\n\n # list of [re1, re2, ..., reN] -> [re1, re2, .., reN]\n to_replace_res = [r'\\s*(\\.)\\s*', r'(a|b)']\n values = [r'\\1\\1', r'\\1_crap']\n res = dfmix.copy()\n res.replace(to_replace_res, values, inplace=True, regex=True)\n expec = DataFrame({'a': mix['a'], 'b': ['a_crap', 'b_crap', '..',\n '..']})\n\n assert_frame_equal(res, expec)\n\n # list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN\n # or vN)]\n to_replace_res = [r'\\s*(\\.)\\s*', r'a', r'(b)']\n values = [r'\\1\\1', r'crap', r'\\1_crap']\n res = dfmix.copy()\n res.replace(to_replace_res, values, inplace=True, regex=True)\n expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})\n assert_frame_equal(res, expec)\n\n to_replace_res = [r'\\s*(\\.)\\s*', r'a', r'(b)']\n values = [r'\\1\\1', r'crap', r'\\1_crap']\n res = dfmix.copy()\n res.replace(regex=to_replace_res, value=values, inplace=True)\n expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})\n assert_frame_equal(res, expec)\n\n def test_regex_replace_dict_mixed(self):\n mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}\n dfmix = DataFrame(mix)\n\n ## dicts\n # single dict {re1: v1}, search the whole frame\n # need test for this...\n\n # list of dicts {re1: v1, re2: v2, ..., re3: v3}, search the whole\n # frame\n res = dfmix.replace({'b': r'\\s*\\.\\s*'}, {'b': nan}, regex=True)\n res2 = dfmix.copy()\n res2.replace({'b': r'\\s*\\.\\s*'}, {'b': nan}, inplace=True, regex=True)\n expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':\n mix['c']})\n assert_frame_equal(res, expec)\n assert_frame_equal(res2, expec)\n\n # list of dicts {re1: re11, re2: re12, ..., reN: re1N}, search the\n # whole frame\n res = dfmix.replace({'b': r'\\s*(\\.)\\s*'}, {'b': r'\\1ty'}, regex=True)\n res2 = dfmix.copy()\n res2.replace({'b': r'\\s*(\\.)\\s*'}, {'b': r'\\1ty'}, inplace=True,\n regex=True)\n expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', '.ty', '.ty'], 'c':\n mix['c']})\n assert_frame_equal(res, expec)\n assert_frame_equal(res2, expec)\n\n res = dfmix.replace(regex={'b': r'\\s*(\\.)\\s*'}, value={'b': r'\\1ty'})\n res2 = dfmix.copy()\n res2.replace(regex={'b': r'\\s*(\\.)\\s*'}, value={'b': r'\\1ty'},\n inplace=True)\n expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', '.ty', '.ty'], 'c':\n mix['c']})\n assert_frame_equal(res, expec)\n assert_frame_equal(res2, expec)\n\n # scalar -> dict\n # to_replace regex, {value: value}\n expec = DataFrame({'a': mix['a'], 'b': [nan, 'b', '.', '.'], 'c':\n mix['c']})\n res = dfmix.replace('a', {'b': nan}, regex=True)\n res2 = dfmix.copy()\n res2.replace('a', {'b': nan}, regex=True, inplace=True)\n assert_frame_equal(res, expec)\n assert_frame_equal(res2, expec)\n\n res = dfmix.replace('a', {'b': nan}, regex=True)\n res2 = dfmix.copy()\n res2.replace(regex='a', value={'b': nan}, inplace=True)\n expec = DataFrame({'a': mix['a'], 'b': [nan, 'b', '.', '.'], 'c':\n mix['c']})\n assert_frame_equal(res, expec)\n assert_frame_equal(res2, expec)\n\n def test_regex_replace_dict_nested(self):\n # nested dicts will not work until this is implemented for Series\n mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}\n dfmix = DataFrame(mix)\n res = dfmix.replace({'b': {r'\\s*\\.\\s*': nan}}, regex=True)\n res2 = dfmix.copy()\n res4 = dfmix.copy()\n res2.replace({'b': {r'\\s*\\.\\s*': nan}}, inplace=True, regex=True)\n res3 = dfmix.replace(regex={'b': {r'\\s*\\.\\s*': nan}})\n res4.replace(regex={'b': {r'\\s*\\.\\s*': nan}}, inplace=True)\n expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':\n mix['c']})\n assert_frame_equal(res, expec)\n assert_frame_equal(res2, expec)\n assert_frame_equal(res3, expec)\n assert_frame_equal(res4, expec)\n\n def test_regex_replace_dict_nested_gh4115(self):\n df = pd.DataFrame({'Type':['Q','T','Q','Q','T'], 'tmp':2})\n expected = DataFrame({'Type': [0,1,0,0,1], 'tmp': 2})\n result = df.replace({'Type': {'Q':0,'T':1}})\n assert_frame_equal(result, expected)\n\n def test_regex_replace_list_to_scalar(self):\n mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}\n df = DataFrame(mix)\n expec = DataFrame({'a': mix['a'], 'b': np.array([nan] * 4),\n 'c': [nan, nan, nan, 'd']})\n\n res = df.replace([r'\\s*\\.\\s*', 'a|b'], nan, regex=True)\n res2 = df.copy()\n res3 = df.copy()\n res2.replace([r'\\s*\\.\\s*', 'a|b'], nan, regex=True, inplace=True)\n res3.replace(regex=[r'\\s*\\.\\s*', 'a|b'], value=nan, inplace=True)\n assert_frame_equal(res, expec)\n assert_frame_equal(res2, expec)\n assert_frame_equal(res3, expec)\n\n def test_regex_replace_str_to_numeric(self):\n # what happens when you try to replace a numeric value with a regex?\n mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}\n df = DataFrame(mix)\n res = df.replace(r'\\s*\\.\\s*', 0, regex=True)\n res2 = df.copy()\n res2.replace(r'\\s*\\.\\s*', 0, inplace=True, regex=True)\n res3 = df.copy()\n res3.replace(regex=r'\\s*\\.\\s*', value=0, inplace=True)\n expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', 0, 0], 'c':\n mix['c']})\n assert_frame_equal(res, expec)\n assert_frame_equal(res2, expec)\n assert_frame_equal(res3, expec)\n\n def test_regex_replace_regex_list_to_numeric(self):\n mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}\n df = DataFrame(mix)\n res = df.replace([r'\\s*\\.\\s*', 'b'], 0, regex=True)\n res2 = df.copy()\n res2.replace([r'\\s*\\.\\s*', 'b'], 0, regex=True, inplace=True)\n res3 = df.copy()\n res3.replace(regex=[r'\\s*\\.\\s*', 'b'], value=0, inplace=True)\n expec = DataFrame({'a': mix['a'], 'b': ['a', 0, 0, 0], 'c': ['a', 0,\n nan,\n 'd']})\n assert_frame_equal(res, expec)\n assert_frame_equal(res2, expec)\n assert_frame_equal(res3, expec)\n\n def test_regex_replace_series_of_regexes(self):\n mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}\n df = DataFrame(mix)\n s1 = Series({'b': r'\\s*\\.\\s*'})\n s2 = Series({'b': nan})\n res = df.replace(s1, s2, regex=True)\n res2 = df.copy()\n res2.replace(s1, s2, inplace=True, regex=True)\n res3 = df.copy()\n res3.replace(regex=s1, value=s2, inplace=True)\n expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':\n mix['c']})\n assert_frame_equal(res, expec)\n assert_frame_equal(res2, expec)\n assert_frame_equal(res3, expec)\n\n def test_regex_replace_numeric_to_object_conversion(self):\n mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}\n df = DataFrame(mix)\n expec = DataFrame({'a': ['a', 1, 2, 3], 'b': mix['b'], 'c': mix['c']})\n res = df.replace(0, 'a')\n assert_frame_equal(res, expec)\n self.assertEqual(res.a.dtype, np.object_)\n\n def test_replace_regex_metachar(self):\n metachars = '[]', '()', '\\d', '\\w', '\\s'\n\n for metachar in metachars:\n df = DataFrame({'a': [metachar, 'else']})\n result = df.replace({'a': {metachar: 'paren'}})\n expected = DataFrame({'a': ['paren', 'else']})\n tm.assert_frame_equal(result, expected)\n\n def test_replace(self):\n self.tsframe['A'][:5] = nan\n self.tsframe['A'][-5:] = nan\n\n zero_filled = self.tsframe.replace(nan, -1e8)\n assert_frame_equal(zero_filled, self.tsframe.fillna(-1e8))\n assert_frame_equal(zero_filled.replace(-1e8, nan), self.tsframe)\n\n self.tsframe['A'][:5] = nan\n self.tsframe['A'][-5:] = nan\n self.tsframe['B'][:5] = -1e8\n\n # empty\n df = DataFrame(index=['a', 'b'])\n assert_frame_equal(df, df.replace(5, 7))\n\n def test_replace_list(self):\n obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}\n dfobj = DataFrame(obj)\n\n ## lists of regexes and values\n # list of [v1, v2, ..., vN] -> [v1, v2, ..., vN]\n to_replace_res = [r'.', r'e']\n values = [nan, 'crap']\n res = dfobj.replace(to_replace_res, values)\n expec = DataFrame({'a': ['a', 'b', nan, nan],\n 'b': ['crap', 'f', 'g', 'h'], 'c': ['h', 'crap',\n 'l', 'o']})\n assert_frame_equal(res, expec)\n\n # list of [v1, v2, ..., vN] -> [v1, v2, .., vN]\n to_replace_res = [r'.', r'f']\n values = [r'..', r'crap']\n res = dfobj.replace(to_replace_res, values)\n expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e', 'crap', 'g',\n 'h'],\n 'c': ['h', 'e', 'l', 'o']})\n\n assert_frame_equal(res, expec)\n\n def test_replace_series_dict(self):\n # from GH 3064\n df = DataFrame({'zero': {'a': 0.0, 'b': 1}, 'one': {'a': 2.0, 'b': 0}})\n result = df.replace(0, {'zero': 0.5, 'one': 1.0})\n expected = DataFrame({'zero': {'a': 0.5, 'b': 1}, 'one': {'a': 2.0, 'b': 1.0}})\n assert_frame_equal(result, expected)\n\n result = df.replace(0, df.mean())\n assert_frame_equal(result, expected)\n\n # series to series/dict\n df = DataFrame({'zero': {'a': 0.0, 'b': 1}, 'one': {'a': 2.0, 'b': 0}})\n s = Series({'zero': 0.0, 'one': 2.0})\n result = df.replace(s, {'zero': 0.5, 'one': 1.0})\n expected = DataFrame({'zero': {'a': 0.5, 'b': 1}, 'one': {'a': 1.0, 'b': 0.0}})\n assert_frame_equal(result, expected)\n\n result = df.replace(s, df.mean())\n assert_frame_equal(result, expected)\n\n def test_replace_convert(self):\n # gh 3907\n df = DataFrame([['foo', 'bar', 'bah'], ['bar', 'foo', 'bah']])\n m = {'foo': 1, 'bar': 2, 'bah': 3}\n rep = df.replace(m)\n expec = Series([ np.int64] * 3)\n res = rep.dtypes\n assert_series_equal(expec, res)\n\n def test_replace_mixed(self):\n self.mixed_frame.ix[5:20,'foo'] = nan\n self.mixed_frame.ix[-10:,'A'] = nan\n\n result = self.mixed_frame.replace(np.nan, -18)\n expected = self.mixed_frame.fillna(value=-18)\n assert_frame_equal(result, expected)\n assert_frame_equal(result.replace(-18, nan), self.mixed_frame)\n\n result = self.mixed_frame.replace(np.nan, -1e8)\n expected = self.mixed_frame.fillna(value=-1e8)\n assert_frame_equal(result, expected)\n assert_frame_equal(result.replace(-1e8, nan), self.mixed_frame)\n\n # int block upcasting\n df = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0,1],dtype='int64') })\n expected = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0.5,1],dtype='float64') })\n result = df.replace(0, 0.5)\n assert_frame_equal(result,expected)\n\n df.replace(0, 0.5, inplace=True)\n assert_frame_equal(df,expected)\n\n # int block splitting\n df = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0,1],dtype='int64'), 'C' : Series([1,2],dtype='int64') })\n expected = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0.5,1],dtype='float64'), 'C' : Series([1,2],dtype='int64') })\n result = df.replace(0, 0.5)\n assert_frame_equal(result,expected)\n\n # to object block upcasting\n df = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0,1],dtype='int64') })\n expected = DataFrame({ 'A' : Series([1,'foo'],dtype='object'), 'B' : Series([0,1],dtype='int64') })\n result = df.replace(2, 'foo')\n assert_frame_equal(result,expected)\n\n expected = DataFrame({ 'A' : Series(['foo','bar'],dtype='object'), 'B' : Series([0,'foo'],dtype='object') })\n result = df.replace([1,2], ['foo','bar'])\n assert_frame_equal(result,expected)\n\n # test case from\n df = DataFrame({'A' : Series([3,0],dtype='int64'), 'B' : Series([0,3],dtype='int64') })\n result = df.replace(3, df.mean().to_dict())\n expected = df.copy().astype('float64')\n m = df.mean()\n expected.iloc[0,0] = m[0]\n expected.iloc[1,1] = m[1]\n assert_frame_equal(result,expected)\n\n def test_replace_simple_nested_dict(self):\n df = DataFrame({'col': range(1, 5)})\n expected = DataFrame({'col': ['a', 2, 3, 'b']})\n\n result = df.replace({'col': {1: 'a', 4: 'b'}})\n tm.assert_frame_equal(expected, result)\n\n # in this case, should be the same as the not nested version\n result = df.replace({1: 'a', 4: 'b'})\n tm.assert_frame_equal(expected, result)\n\n def test_replace_simple_nested_dict_with_nonexistent_value(self):\n df = DataFrame({'col': range(1, 5)})\n expected = DataFrame({'col': ['a', 2, 3, 'b']})\n\n result = df.replace({-1: '-', 1: 'a', 4: 'b'})\n tm.assert_frame_equal(expected, result)\n\n result = df.replace({'col': {-1: '-', 1: 'a', 4: 'b'}})\n tm.assert_frame_equal(expected, result)\n\n def test_interpolate(self):\n pass\n\n def test_replace_value_is_none(self):\n self.assertRaises(TypeError, self.tsframe.replace, nan)\n orig_value = self.tsframe.iloc[0, 0]\n orig2 = self.tsframe.iloc[1, 0]\n\n self.tsframe.iloc[0, 0] = nan\n self.tsframe.iloc[1, 0] = 1\n\n result = self.tsframe.replace(to_replace={nan: 0})\n expected = self.tsframe.T.replace(to_replace={nan: 0}).T\n assert_frame_equal(result, expected)\n\n result = self.tsframe.replace(to_replace={nan: 0, 1: -1e8})\n tsframe = self.tsframe.copy()\n tsframe.iloc[0, 0] = 0\n tsframe.iloc[1, 0] = -1e8\n expected = tsframe\n assert_frame_equal(expected, result)\n self.tsframe.iloc[0, 0] = orig_value\n self.tsframe.iloc[1, 0] = orig2\n\n def test_replace_for_new_dtypes(self):\n\n # dtypes\n tsframe = self.tsframe.copy().astype(np.float32)\n tsframe['A'][:5] = nan\n tsframe['A'][-5:] = nan\n\n zero_filled = tsframe.replace(nan, -1e8)\n assert_frame_equal(zero_filled, tsframe.fillna(-1e8))\n assert_frame_equal(zero_filled.replace(-1e8, nan), tsframe)\n\n tsframe['A'][:5] = nan\n tsframe['A'][-5:] = nan\n tsframe['B'][:5] = -1e8\n\n b = tsframe['B']\n b[b == -1e8] = nan\n tsframe['B'] = b\n result = tsframe.fillna(method='bfill')\n assert_frame_equal(result, tsframe.fillna(method='bfill'))\n\n def test_replace_dtypes(self):\n # int\n df = DataFrame({'ints': [1, 2, 3]})\n result = df.replace(1, 0)\n expected = DataFrame({'ints': [0, 2, 3]})\n assert_frame_equal(result, expected)\n\n df = DataFrame({'ints': [1, 2, 3]}, dtype=np.int32)\n result = df.replace(1, 0)\n expected = DataFrame({'ints': [0, 2, 3]}, dtype=np.int32)\n assert_frame_equal(result, expected)\n\n df = DataFrame({'ints': [1, 2, 3]}, dtype=np.int16)\n result = df.replace(1, 0)\n expected = DataFrame({'ints': [0, 2, 3]}, dtype=np.int16)\n assert_frame_equal(result, expected)\n\n # bools\n df = DataFrame({'bools': [True, False, True]})\n result = df.replace(False, True)\n self.assertTrue(result.values.all())\n\n # complex blocks\n df = DataFrame({'complex': [1j, 2j, 3j]})\n result = df.replace(1j, 0j)\n expected = DataFrame({'complex': [0j, 2j, 3j]})\n assert_frame_equal(result, expected)\n\n # datetime blocks\n prev = datetime.today()\n now = datetime.today()\n df = DataFrame({'datetime64': Index([prev, now, prev])})\n result = df.replace(prev, now)\n expected = DataFrame({'datetime64': Index([now] * 3)})\n assert_frame_equal(result, expected)\n\n def test_replace_input_formats(self):\n # both dicts\n to_rep = {'A': np.nan, 'B': 0, 'C': ''}\n values = {'A': 0, 'B': -1, 'C': 'missing'}\n df = DataFrame({'A': [np.nan, 0, np.inf], 'B': [0, 2, 5],\n 'C': ['', 'asdf', 'fd']})\n filled = df.replace(to_rep, values)\n expected = {}\n for k, v in compat.iteritems(df):\n expected[k] = v.replace(to_rep[k], values[k])\n assert_frame_equal(filled, DataFrame(expected))\n\n result = df.replace([0, 2, 5], [5, 2, 0])\n expected = DataFrame({'A': [np.nan, 5, np.inf], 'B': [5, 2, 0],\n 'C': ['', 'asdf', 'fd']})\n assert_frame_equal(result, expected)\n\n # dict to scalar\n filled = df.replace(to_rep, 0)\n expected = {}\n for k, v in compat.iteritems(df):\n expected[k] = v.replace(to_rep[k], 0)\n assert_frame_equal(filled, DataFrame(expected))\n\n self.assertRaises(TypeError, df.replace, to_rep, [np.nan, 0, ''])\n\n # scalar to dict\n values = {'A': 0, 'B': -1, 'C': 'missing'}\n df = DataFrame({'A': [np.nan, 0, np.nan], 'B': [0, 2, 5],\n 'C': ['', 'asdf', 'fd']})\n filled = df.replace(np.nan, values)\n expected = {}\n for k, v in compat.iteritems(df):\n expected[k] = v.replace(np.nan, values[k])\n assert_frame_equal(filled, DataFrame(expected))\n\n # list to list\n to_rep = [np.nan, 0, '']\n values = [-2, -1, 'missing']\n result = df.replace(to_rep, values)\n expected = df.copy()\n for i in range(len(to_rep)):\n expected.replace(to_rep[i], values[i], inplace=True)\n assert_frame_equal(result, expected)\n\n self.assertRaises(ValueError, df.replace, to_rep, values[1:])\n\n # list to scalar\n to_rep = [np.nan, 0, '']\n result = df.replace(to_rep, -1)\n expected = df.copy()\n for i in range(len(to_rep)):\n expected.replace(to_rep[i], -1, inplace=True)\n assert_frame_equal(result, expected)\n\n def test_replace_limit(self):\n pass\n\n def test_replace_dict_no_regex(self):\n answer = Series({0: 'Strongly Agree', 1: 'Agree', 2: 'Neutral', 3:\n 'Disagree', 4: 'Strongly Disagree'})\n weights = {'Agree': 4, 'Disagree': 2, 'Neutral': 3, 'Strongly Agree':\n 5, 'Strongly Disagree': 1}\n expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1})\n result = answer.replace(weights)\n tm.assert_series_equal(result, expected)\n\n def test_replace_series_no_regex(self):\n answer = Series({0: 'Strongly Agree', 1: 'Agree', 2: 'Neutral', 3:\n 'Disagree', 4: 'Strongly Disagree'})\n weights = Series({'Agree': 4, 'Disagree': 2, 'Neutral': 3,\n 'Strongly Agree': 5, 'Strongly Disagree': 1})\n expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1})\n result = answer.replace(weights)\n tm.assert_series_equal(result, expected)\n\n def test_replace_dict_tuple_list_ordering_remains_the_same(self):\n df = DataFrame(dict(A=[nan, 1]))\n res1 = df.replace(to_replace={nan: 0, 1: -1e8})\n res2 = df.replace(to_replace=(1, nan), value=[-1e8, 0])\n res3 = df.replace(to_replace=[1, nan], value=[-1e8, 0])\n\n expected = DataFrame({'A': [0, -1e8]})\n tm.assert_frame_equal(res1, res2)\n tm.assert_frame_equal(res2, res3)\n tm.assert_frame_equal(res3, expected)\n\n def test_replace_doesnt_replace_without_regex(self):\n from pandas.compat import StringIO\n raw = \"\"\"fol T_opp T_Dir T_Enh\n 0 1 0 0 vo\n 1 2 vr 0 0\n 2 2 0 0 0\n 3 3 0 bt 0\"\"\"\n df = read_csv(StringIO(raw), sep=r'\\s+')\n res = df.replace({'\\D': 1})\n tm.assert_frame_equal(df, res)\n\n def test_replace_bool_with_string(self):\n df = DataFrame({'a': [True, False], 'b': list('ab')})\n result = df.replace(True, 'a')\n expected = DataFrame({'a': ['a', False], 'b': df.b})\n tm.assert_frame_equal(result, expected)\n\n def test_replace_pure_bool_with_string_no_op(self):\n df = DataFrame(np.random.rand(2, 2) > 0.5)\n result = df.replace('asdf', 'fdsa')\n tm.assert_frame_equal(df, result)\n\n def test_replace_bool_with_bool(self):\n df = DataFrame(np.random.rand(2, 2) > 0.5)\n result = df.replace(False, True)\n expected = DataFrame(np.ones((2, 2), dtype=bool))\n tm.assert_frame_equal(result, expected)\n\n def test_replace_with_dict_with_bool_keys(self):\n df = DataFrame({0: [True, False], 1: [False, True]})\n with tm.assertRaisesRegexp(TypeError, 'Cannot compare types .+'):\n df.replace({'asdf': 'asdb', True: 'yes'})\n\n def test_replace_truthy(self):\n df = DataFrame({'a': [True, True]})\n r = df.replace([np.inf, -np.inf], np.nan)\n e = df\n tm.assert_frame_equal(r, e)\n\n def test_replace_int_to_int_chain(self):\n df = DataFrame({'a': lrange(1, 5)})\n with tm.assertRaisesRegexp(ValueError, \"Replacement not allowed .+\"):\n df.replace({'a': dict(zip(range(1, 5), range(2, 6)))})\n\n def test_replace_str_to_str_chain(self):\n a = np.arange(1, 5)\n astr = a.astype(str)\n bstr = np.arange(2, 6).astype(str)\n df = DataFrame({'a': astr})\n with tm.assertRaisesRegexp(ValueError, \"Replacement not allowed .+\"):\n df.replace({'a': dict(zip(astr, bstr))})\n\n def test_replace_swapping_bug(self):\n df = pd.DataFrame({'a': [True, False, True]})\n res = df.replace({'a': {True: 'Y', False: 'N'}})\n expect = pd.DataFrame({'a': ['Y', 'N', 'Y']})\n tm.assert_frame_equal(res, expect)\n\n df = pd.DataFrame({'a': [0, 1, 0]})\n res = df.replace({'a': {0: 'Y', 1: 'N'}})\n expect = pd.DataFrame({'a': ['Y', 'N', 'Y']})\n tm.assert_frame_equal(res, expect)\n\n def test_replace_period(self):\n d = {'fname':\n {'out_augmented_AUG_2011.json': pd.Period(year=2011, month=8, freq='M'),\n 'out_augmented_JAN_2011.json': pd.Period(year=2011, month=1, freq='M'),\n 'out_augmented_MAY_2012.json': pd.Period(year=2012, month=5, freq='M'),\n 'out_augmented_SUBSIDY_WEEK.json': pd.Period(year=2011, month=4, freq='M'),\n 'out_augmented_AUG_2012.json': pd.Period(year=2012, month=8, freq='M'),\n 'out_augmented_MAY_2011.json': pd.Period(year=2011, month=5, freq='M'),\n 'out_augmented_SEP_2013.json': pd.Period(year=2013, month=9, freq='M')}}\n\n df = pd.DataFrame(['out_augmented_AUG_2012.json',\n 'out_augmented_SEP_2013.json',\n 'out_augmented_SUBSIDY_WEEK.json',\n 'out_augmented_MAY_2012.json',\n 'out_augmented_MAY_2011.json',\n 'out_augmented_AUG_2011.json',\n 'out_augmented_JAN_2011.json'], columns=['fname'])\n tm.assert_equal(set(df.fname.values), set(d['fname'].keys()))\n expected = DataFrame({'fname': [d['fname'][k]\n for k in df.fname.values]})\n result = df.replace(d)\n tm.assert_frame_equal(result, expected)\n\n def test_replace_datetime(self):\n d = {'fname':\n {'out_augmented_AUG_2011.json': pd.Timestamp('2011-08'),\n 'out_augmented_JAN_2011.json': pd.Timestamp('2011-01'),\n 'out_augmented_MAY_2012.json': pd.Timestamp('2012-05'),\n 'out_augmented_SUBSIDY_WEEK.json': pd.Timestamp('2011-04'),\n 'out_augmented_AUG_2012.json': pd.Timestamp('2012-08'),\n 'out_augmented_MAY_2011.json': pd.Timestamp('2011-05'),\n 'out_augmented_SEP_2013.json': pd.Timestamp('2013-09')}}\n\n df = pd.DataFrame(['out_augmented_AUG_2012.json',\n 'out_augmented_SEP_2013.json',\n 'out_augmented_SUBSIDY_WEEK.json',\n 'out_augmented_MAY_2012.json',\n 'out_augmented_MAY_2011.json',\n 'out_augmented_AUG_2011.json',\n 'out_augmented_JAN_2011.json'], columns=['fname'])\n tm.assert_equal(set(df.fname.values), set(d['fname'].keys()))\n expected = DataFrame({'fname': [d['fname'][k]\n for k in df.fname.values]})\n result = df.replace(d)\n tm.assert_frame_equal(result, expected)\n\n def test_replace_datetimetz(self):\n\n # GH 11326\n # behaving poorly when presented with a datetime64[ns, tz]\n df = DataFrame({'A' : date_range('20130101',periods=3,tz='US/Eastern'),\n 'B' : [0, np.nan, 2]})\n result = df.replace(np.nan,1)\n expected = DataFrame({'A' : date_range('20130101',periods=3,tz='US/Eastern'),\n 'B' : Series([0, 1, 2],dtype='float64')})\n assert_frame_equal(result, expected)\n\n result = df.fillna(1)\n assert_frame_equal(result, expected)\n\n result = df.replace(0,np.nan)\n expected = DataFrame({'A' : date_range('20130101',periods=3,tz='US/Eastern'),\n 'B' : [np.nan, np.nan, 2]})\n assert_frame_equal(result, expected)\n\n result = df.replace(Timestamp('20130102',tz='US/Eastern'),Timestamp('20130104',tz='US/Eastern'))\n expected = DataFrame({'A' : [Timestamp('20130101',tz='US/Eastern'),\n Timestamp('20130104',tz='US/Eastern'),\n Timestamp('20130103',tz='US/Eastern')],\n 'B' : [0, np.nan, 2]})\n assert_frame_equal(result, expected)\n\n result = df.copy()\n result.iloc[1,0] = np.nan\n result = result.replace({'A' : pd.NaT }, Timestamp('20130104',tz='US/Eastern'))\n assert_frame_equal(result, expected)\n\n # coerce to object\n result = df.copy()\n result.iloc[1,0] = np.nan\n result = result.replace({'A' : pd.NaT }, Timestamp('20130104',tz='US/Pacific'))\n expected = DataFrame({'A' : [Timestamp('20130101',tz='US/Eastern'),\n Timestamp('20130104',tz='US/Pacific'),\n Timestamp('20130103',tz='US/Eastern')],\n 'B' : [0, np.nan, 2]})\n assert_frame_equal(result, expected)\n\n result = df.copy()\n result.iloc[1,0] = np.nan\n result = result.replace({'A' : np.nan }, Timestamp('20130104'))\n expected = DataFrame({'A' : [Timestamp('20130101',tz='US/Eastern'),\n Timestamp('20130104'),\n Timestamp('20130103',tz='US/Eastern')],\n 'B' : [0, np.nan, 2]})\n assert_frame_equal(result, expected)\n\n def test_combine_multiple_frames_dtypes(self):\n\n # GH 2759\n A = DataFrame(data=np.ones((10, 2)), columns=['foo', 'bar'], dtype=np.float64)\n B = DataFrame(data=np.ones((10, 2)), dtype=np.float32)\n results = pd.concat((A, B), axis=1).get_dtype_counts()\n expected = Series(dict( float64 = 2, float32 = 2 ))\n assert_series_equal(results,expected)\n\n def test_ops(self):\n\n # tst ops and reversed ops in evaluation\n # GH7198\n\n # smaller hits python, larger hits numexpr\n for n in [ 4, 4000 ]:\n\n df = DataFrame(1,index=range(n),columns=list('abcd'))\n df.iloc[0] = 2\n m = df.mean()\n\n for op_str, op, rop in [('+','__add__','__radd__'),\n ('-','__sub__','__rsub__'),\n ('*','__mul__','__rmul__'),\n ('/','__truediv__','__rtruediv__')]:\n\n base = DataFrame(np.tile(m.values,n).reshape(n,-1),columns=list('abcd'))\n expected = eval(\"base{op}df\".format(op=op_str))\n\n # ops as strings\n result = eval(\"m{op}df\".format(op=op_str))\n assert_frame_equal(result,expected)\n\n # these are commutative\n if op in ['+','*']:\n result = getattr(df,op)(m)\n assert_frame_equal(result,expected)\n\n # these are not\n elif op in ['-','/']:\n result = getattr(df,rop)(m)\n assert_frame_equal(result,expected)\n\n # GH7192\n df = DataFrame(dict(A=np.random.randn(25000)))\n df.iloc[0:5] = np.nan\n expected = (1-np.isnan(df.iloc[0:25]))\n result = (1-np.isnan(df)).iloc[0:25]\n assert_frame_equal(result,expected)\n\n def test_truncate(self):\n offset = datetools.bday\n\n ts = self.tsframe[::3]\n\n start, end = self.tsframe.index[3], self.tsframe.index[6]\n\n start_missing = self.tsframe.index[2]\n end_missing = self.tsframe.index[7]\n\n # neither specified\n truncated = ts.truncate()\n assert_frame_equal(truncated, ts)\n\n # both specified\n expected = ts[1:3]\n\n truncated = ts.truncate(start, end)\n assert_frame_equal(truncated, expected)\n\n truncated = ts.truncate(start_missing, end_missing)\n assert_frame_equal(truncated, expected)\n\n # start specified\n expected = ts[1:]\n\n truncated = ts.truncate(before=start)\n assert_frame_equal(truncated, expected)\n\n truncated = ts.truncate(before=start_missing)\n assert_frame_equal(truncated, expected)\n\n # end specified\n expected = ts[:3]\n\n truncated = ts.truncate(after=end)\n assert_frame_equal(truncated, expected)\n\n truncated = ts.truncate(after=end_missing)\n assert_frame_equal(truncated, expected)\n\n self.assertRaises(ValueError, ts.truncate,\n before=ts.index[-1] - 1,\n after=ts.index[0] +1)\n\n def test_truncate_copy(self):\n index = self.tsframe.index\n truncated = self.tsframe.truncate(index[5], index[10])\n truncated.values[:] = 5.\n self.assertFalse((self.tsframe.values[5:11] == 5).any())\n\n def test_xs(self):\n idx = self.frame.index[5]\n xs = self.frame.xs(idx)\n for item, value in compat.iteritems(xs):\n if np.isnan(value):\n self.assertTrue(np.isnan(self.frame[item][idx]))\n else:\n self.assertEqual(value, self.frame[item][idx])\n\n # mixed-type xs\n test_data = {\n 'A': {'1': 1, '2': 2},\n 'B': {'1': '1', '2': '2', '3': '3'},\n }\n frame = DataFrame(test_data)\n xs = frame.xs('1')\n self.assertEqual(xs.dtype, np.object_)\n self.assertEqual(xs['A'], 1)\n self.assertEqual(xs['B'], '1')\n\n with tm.assertRaises(KeyError):\n self.tsframe.xs(self.tsframe.index[0] - datetools.bday)\n\n # xs get column\n series = self.frame.xs('A', axis=1)\n expected = self.frame['A']\n assert_series_equal(series, expected)\n\n # view is returned if possible\n series = self.frame.xs('A', axis=1)\n series[:] = 5\n self.assertTrue((expected == 5).all())\n\n def test_xs_corner(self):\n # pathological mixed-type reordering case\n df = DataFrame(index=[0])\n df['A'] = 1.\n df['B'] = 'foo'\n df['C'] = 2.\n df['D'] = 'bar'\n df['E'] = 3.\n\n xs = df.xs(0)\n assert_almost_equal(xs, [1., 'foo', 2., 'bar', 3.])\n\n # no columns but Index(dtype=object)\n df = DataFrame(index=['a', 'b', 'c'])\n result = df.xs('a')\n expected = Series([], name='a', index=pd.Index([], dtype=object))\n assert_series_equal(result, expected)\n\n def test_xs_duplicates(self):\n df = DataFrame(randn(5, 2), index=['b', 'b', 'c', 'b', 'a'])\n\n cross = df.xs('c')\n exp = df.iloc[2]\n assert_series_equal(cross, exp)\n\n def test_xs_keep_level(self):\n df = DataFrame({'day': {0: 'sat', 1: 'sun'},\n 'flavour': {0: 'strawberry', 1: 'strawberry'},\n 'sales': {0: 10, 1: 12},\n 'year': {0: 2008, 1: 2008}}).set_index(['year','flavour','day'])\n result = df.xs('sat', level='day', drop_level=False)\n expected = df[:1]\n assert_frame_equal(result, expected)\n\n result = df.xs([2008, 'sat'], level=['year', 'day'], drop_level=False)\n assert_frame_equal(result, expected)\n\n def test_pivot(self):\n data = {\n 'index': ['A', 'B', 'C', 'C', 'B', 'A'],\n 'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],\n 'values': [1., 2., 3., 3., 2., 1.]\n }\n\n frame = DataFrame(data)\n pivoted = frame.pivot(\n index='index', columns='columns', values='values')\n\n expected = DataFrame({\n 'One': {'A': 1., 'B': 2., 'C': 3.},\n 'Two': {'A': 1., 'B': 2., 'C': 3.}\n })\n expected.index.name, expected.columns.name = 'index', 'columns'\n\n assert_frame_equal(pivoted, expected)\n\n # name tracking\n self.assertEqual(pivoted.index.name, 'index')\n self.assertEqual(pivoted.columns.name, 'columns')\n\n # don't specify values\n pivoted = frame.pivot(index='index', columns='columns')\n self.assertEqual(pivoted.index.name, 'index')\n self.assertEqual(pivoted.columns.names, (None, 'columns'))\n\n # pivot multiple columns\n wp = tm.makePanel()\n lp = wp.to_frame()\n df = lp.reset_index()\n assert_frame_equal(df.pivot('major', 'minor'), lp.unstack())\n\n def test_pivot_duplicates(self):\n data = DataFrame({'a': ['bar', 'bar', 'foo', 'foo', 'foo'],\n 'b': ['one', 'two', 'one', 'one', 'two'],\n 'c': [1., 2., 3., 3., 4.]})\n with assertRaisesRegexp(ValueError, 'duplicate entries'):\n data.pivot('a', 'b', 'c')\n\n def test_pivot_empty(self):\n df = DataFrame({}, columns=['a', 'b', 'c'])\n result = df.pivot('a', 'b', 'c')\n expected = DataFrame({})\n assert_frame_equal(result, expected, check_names=False)\n\n def test_pivot_integer_bug(self):\n df = DataFrame(data=[(\"A\", \"1\", \"A1\"), (\"B\", \"2\", \"B2\")])\n\n result = df.pivot(index=1, columns=0, values=2)\n repr(result)\n self.assert_numpy_array_equal(result.columns, ['A', 'B'])\n\n def test_pivot_index_none(self):\n # gh-3962\n data = {\n 'index': ['A', 'B', 'C', 'C', 'B', 'A'],\n 'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],\n 'values': [1., 2., 3., 3., 2., 1.]\n }\n\n frame = DataFrame(data).set_index('index')\n result = frame.pivot(columns='columns', values='values')\n expected = DataFrame({\n 'One': {'A': 1., 'B': 2., 'C': 3.},\n 'Two': {'A': 1., 'B': 2., 'C': 3.}\n })\n\n expected.index.name, expected.columns.name = 'index', 'columns'\n assert_frame_equal(result, expected)\n\n # omit values\n result = frame.pivot(columns='columns')\n\n expected.columns = pd.MultiIndex.from_tuples([('values', 'One'),\n ('values', 'Two')],\n names=[None, 'columns'])\n expected.index.name = 'index'\n assert_frame_equal(result, expected, check_names=False)\n self.assertEqual(result.index.name, 'index',)\n self.assertEqual(result.columns.names, (None, 'columns'))\n expected.columns = expected.columns.droplevel(0)\n\n data = {\n 'index': range(7),\n 'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],\n 'values': [1., 2., 3., 3., 2., 1.]\n }\n\n result = frame.pivot(columns='columns', values='values')\n\n expected.columns.name = 'columns'\n assert_frame_equal(result, expected)\n\n def test_reindex(self):\n newFrame = self.frame.reindex(self.ts1.index)\n\n for col in newFrame.columns:\n for idx, val in compat.iteritems(newFrame[col]):\n if idx in self.frame.index:\n if np.isnan(val):\n self.assertTrue(np.isnan(self.frame[col][idx]))\n else:\n self.assertEqual(val, self.frame[col][idx])\n else:\n self.assertTrue(np.isnan(val))\n\n for col, series in compat.iteritems(newFrame):\n self.assertTrue(tm.equalContents(series.index, newFrame.index))\n emptyFrame = self.frame.reindex(Index([]))\n self.assertEqual(len(emptyFrame.index), 0)\n\n # Cython code should be unit-tested directly\n nonContigFrame = self.frame.reindex(self.ts1.index[::2])\n\n for col in nonContigFrame.columns:\n for idx, val in compat.iteritems(nonContigFrame[col]):\n if idx in self.frame.index:\n if np.isnan(val):\n self.assertTrue(np.isnan(self.frame[col][idx]))\n else:\n self.assertEqual(val, self.frame[col][idx])\n else:\n self.assertTrue(np.isnan(val))\n\n for col, series in compat.iteritems(nonContigFrame):\n self.assertTrue(tm.equalContents(series.index,\n nonContigFrame.index))\n\n # corner cases\n\n # Same index, copies values but not index if copy=False\n newFrame = self.frame.reindex(self.frame.index, copy=False)\n self.assertIs(newFrame.index, self.frame.index)\n\n # length zero\n newFrame = self.frame.reindex([])\n self.assertTrue(newFrame.empty)\n self.assertEqual(len(newFrame.columns), len(self.frame.columns))\n\n # length zero with columns reindexed with non-empty index\n newFrame = self.frame.reindex([])\n newFrame = newFrame.reindex(self.frame.index)\n self.assertEqual(len(newFrame.index), len(self.frame.index))\n self.assertEqual(len(newFrame.columns), len(self.frame.columns))\n\n # pass non-Index\n newFrame = self.frame.reindex(list(self.ts1.index))\n self.assertTrue(newFrame.index.equals(self.ts1.index))\n\n # copy with no axes\n result = self.frame.reindex()\n assert_frame_equal(result,self.frame)\n self.assertFalse(result is self.frame)\n\n def test_reindex_nan(self):\n df = pd.DataFrame([[1, 2], [3, 5], [7, 11], [9, 23]],\n index=[2, np.nan, 1, 5], columns=['joe', 'jim'])\n\n i, j = [np.nan, 5, 5, np.nan, 1, 2, np.nan], [1, 3, 3, 1, 2, 0, 1]\n tm.assert_frame_equal(df.reindex(i), df.iloc[j])\n\n df.index = df.index.astype('object')\n tm.assert_frame_equal(df.reindex(i), df.iloc[j], check_index_type=False)\n\n # GH10388\n df = pd.DataFrame({'other':['a', 'b', np.nan, 'c'],\n 'date':['2015-03-22', np.nan, '2012-01-08', np.nan],\n 'amount':[2, 3, 4, 5]})\n\n df['date'] = pd.to_datetime(df.date)\n df['delta'] = (pd.to_datetime('2015-06-18') - df['date']).shift(1)\n\n left = df.set_index(['delta', 'other', 'date']).reset_index()\n right = df.reindex(columns=['delta', 'other', 'date', 'amount'])\n assert_frame_equal(left, right)\n\n def test_reindex_name_remains(self):\n s = Series(random.rand(10))\n df = DataFrame(s, index=np.arange(len(s)))\n i = Series(np.arange(10), name='iname')\n\n df = df.reindex(i)\n self.assertEqual(df.index.name, 'iname')\n\n df = df.reindex(Index(np.arange(10), name='tmpname'))\n self.assertEqual(df.index.name, 'tmpname')\n\n s = Series(random.rand(10))\n df = DataFrame(s.T, index=np.arange(len(s)))\n i = Series(np.arange(10), name='iname')\n df = df.reindex(columns=i)\n self.assertEqual(df.columns.name, 'iname')\n\n def test_reindex_int(self):\n smaller = self.intframe.reindex(self.intframe.index[::2])\n\n self.assertEqual(smaller['A'].dtype, np.int64)\n\n bigger = smaller.reindex(self.intframe.index)\n self.assertEqual(bigger['A'].dtype, np.float64)\n\n smaller = self.intframe.reindex(columns=['A', 'B'])\n self.assertEqual(smaller['A'].dtype, np.int64)\n\n def test_reindex_like(self):\n other = self.frame.reindex(index=self.frame.index[:10],\n columns=['C', 'B'])\n\n assert_frame_equal(other, self.frame.reindex_like(other))\n\n def test_reindex_columns(self):\n newFrame = self.frame.reindex(columns=['A', 'B', 'E'])\n\n assert_series_equal(newFrame['B'], self.frame['B'])\n self.assertTrue(np.isnan(newFrame['E']).all())\n self.assertNotIn('C', newFrame)\n\n # length zero\n newFrame = self.frame.reindex(columns=[])\n self.assertTrue(newFrame.empty)\n\n def test_reindex_axes(self):\n\n # GH 3317, reindexing by both axes loses freq of the index\n from datetime import datetime\n df = DataFrame(np.ones((3, 3)), index=[datetime(2012, 1, 1), datetime(2012, 1, 2), datetime(2012, 1, 3)], columns=['a', 'b', 'c'])\n time_freq = date_range('2012-01-01', '2012-01-03', freq='d')\n some_cols = ['a', 'b']\n\n index_freq = df.reindex(index=time_freq).index.freq\n both_freq = df.reindex(index=time_freq, columns=some_cols).index.freq\n seq_freq = df.reindex(index=time_freq).reindex(columns=some_cols).index.freq\n self.assertEqual(index_freq, both_freq)\n self.assertEqual(index_freq, seq_freq)\n\n def test_reindex_fill_value(self):\n df = DataFrame(np.random.randn(10, 4))\n\n # axis=0\n result = df.reindex(lrange(15))\n self.assertTrue(np.isnan(result.values[-5:]).all())\n\n result = df.reindex(lrange(15), fill_value=0)\n expected = df.reindex(lrange(15)).fillna(0)\n assert_frame_equal(result, expected)\n\n # axis=1\n result = df.reindex(columns=lrange(5), fill_value=0.)\n expected = df.copy()\n expected[4] = 0.\n assert_frame_equal(result, expected)\n\n result = df.reindex(columns=lrange(5), fill_value=0)\n expected = df.copy()\n expected[4] = 0\n assert_frame_equal(result, expected)\n\n result = df.reindex(columns=lrange(5), fill_value='foo')\n expected = df.copy()\n expected[4] = 'foo'\n assert_frame_equal(result, expected)\n\n # reindex_axis\n result = df.reindex_axis(lrange(15), fill_value=0., axis=0)\n expected = df.reindex(lrange(15)).fillna(0)\n assert_frame_equal(result, expected)\n\n result = df.reindex_axis(lrange(5), fill_value=0., axis=1)\n expected = df.reindex(columns=lrange(5)).fillna(0)\n assert_frame_equal(result, expected)\n\n # other dtypes\n df['foo'] = 'foo'\n result = df.reindex(lrange(15), fill_value=0)\n expected = df.reindex(lrange(15)).fillna(0)\n assert_frame_equal(result, expected)\n\n def test_reindex_dups(self):\n\n # GH4746, reindex on duplicate index error messages\n arr = np.random.randn(10)\n df = DataFrame(arr,index=[1,2,3,4,5,1,2,3,4,5])\n\n # set index is ok\n result = df.copy()\n result.index = list(range(len(df)))\n expected = DataFrame(arr,index=list(range(len(df))))\n assert_frame_equal(result,expected)\n\n # reindex fails\n self.assertRaises(ValueError, df.reindex, index=list(range(len(df))))\n\n def test_align(self):\n af, bf = self.frame.align(self.frame)\n self.assertIsNot(af._data, self.frame._data)\n\n af, bf = self.frame.align(self.frame, copy=False)\n self.assertIs(af._data, self.frame._data)\n\n # axis = 0\n other = self.frame.ix[:-5, :3]\n af, bf = self.frame.align(other, axis=0, fill_value=-1)\n self.assertTrue(bf.columns.equals(other.columns))\n # test fill value\n join_idx = self.frame.index.join(other.index)\n diff_a = self.frame.index.difference(join_idx)\n diff_b = other.index.difference(join_idx)\n diff_a_vals = af.reindex(diff_a).values\n diff_b_vals = bf.reindex(diff_b).values\n self.assertTrue((diff_a_vals == -1).all())\n\n af, bf = self.frame.align(other, join='right', axis=0)\n self.assertTrue(bf.columns.equals(other.columns))\n self.assertTrue(bf.index.equals(other.index))\n self.assertTrue(af.index.equals(other.index))\n\n # axis = 1\n other = self.frame.ix[:-5, :3].copy()\n af, bf = self.frame.align(other, axis=1)\n self.assertTrue(bf.columns.equals(self.frame.columns))\n self.assertTrue(bf.index.equals(other.index))\n\n # test fill value\n join_idx = self.frame.index.join(other.index)\n diff_a = self.frame.index.difference(join_idx)\n diff_b = other.index.difference(join_idx)\n diff_a_vals = af.reindex(diff_a).values\n diff_b_vals = bf.reindex(diff_b).values\n self.assertTrue((diff_a_vals == -1).all())\n\n af, bf = self.frame.align(other, join='inner', axis=1)\n self.assertTrue(bf.columns.equals(other.columns))\n\n af, bf = self.frame.align(other, join='inner', axis=1, method='pad')\n self.assertTrue(bf.columns.equals(other.columns))\n\n # test other non-float types\n af, bf = self.intframe.align(other, join='inner', axis=1, method='pad')\n self.assertTrue(bf.columns.equals(other.columns))\n\n af, bf = self.mixed_frame.align(self.mixed_frame,\n join='inner', axis=1, method='pad')\n self.assertTrue(bf.columns.equals(self.mixed_frame.columns))\n\n af, bf = self.frame.align(other.ix[:, 0], join='inner', axis=1,\n method=None, fill_value=None)\n self.assertTrue(bf.index.equals(Index([])))\n\n af, bf = self.frame.align(other.ix[:, 0], join='inner', axis=1,\n method=None, fill_value=0)\n self.assertTrue(bf.index.equals(Index([])))\n\n # mixed floats/ints\n af, bf = self.mixed_float.align(other.ix[:, 0], join='inner', axis=1,\n method=None, fill_value=0)\n self.assertTrue(bf.index.equals(Index([])))\n\n af, bf = self.mixed_int.align(other.ix[:, 0], join='inner', axis=1,\n method=None, fill_value=0)\n self.assertTrue(bf.index.equals(Index([])))\n\n # try to align dataframe to series along bad axis\n self.assertRaises(ValueError, self.frame.align, af.ix[0, :3],\n join='inner', axis=2)\n\n # align dataframe to series with broadcast or not\n idx = self.frame.index\n s = Series(range(len(idx)), index=idx)\n\n left, right = self.frame.align(s, axis=0)\n tm.assert_index_equal(left.index, self.frame.index)\n tm.assert_index_equal(right.index, self.frame.index)\n self.assertTrue(isinstance(right, Series))\n\n left, right = self.frame.align(s, broadcast_axis=1)\n tm.assert_index_equal(left.index, self.frame.index)\n expected = {}\n for c in self.frame.columns:\n expected[c] = s\n expected = DataFrame(expected, index=self.frame.index,\n columns=self.frame.columns)\n assert_frame_equal(right, expected)\n\n # GH 9558\n df = DataFrame({'a':[1,2,3], 'b':[4,5,6]})\n result = df[df['a'] == 2]\n expected = DataFrame([[2, 5]], index=[1], columns=['a', 'b'])\n assert_frame_equal(result, expected)\n\n result = df.where(df['a'] == 2, 0)\n expected = DataFrame({'a':[0, 2, 0], 'b':[0, 5, 0]})\n assert_frame_equal(result, expected)\n\n def _check_align(self, a, b, axis, fill_axis, how, method, limit=None):\n aa, ab = a.align(b, axis=axis, join=how, method=method, limit=limit,\n fill_axis=fill_axis)\n\n join_index, join_columns = None, None\n\n ea, eb = a, b\n if axis is None or axis == 0:\n join_index = a.index.join(b.index, how=how)\n ea = ea.reindex(index=join_index)\n eb = eb.reindex(index=join_index)\n\n if axis is None or axis == 1:\n join_columns = a.columns.join(b.columns, how=how)\n ea = ea.reindex(columns=join_columns)\n eb = eb.reindex(columns=join_columns)\n\n ea = ea.fillna(axis=fill_axis, method=method, limit=limit)\n eb = eb.fillna(axis=fill_axis, method=method, limit=limit)\n\n assert_frame_equal(aa, ea)\n assert_frame_equal(ab, eb)\n\n def test_align_fill_method_inner(self):\n for meth in ['pad', 'bfill']:\n for ax in [0, 1, None]:\n for fax in [0, 1]:\n self._check_align_fill('inner', meth, ax, fax)\n\n def test_align_fill_method_outer(self):\n for meth in ['pad', 'bfill']:\n for ax in [0, 1, None]:\n for fax in [0, 1]:\n self._check_align_fill('outer', meth, ax, fax)\n\n def test_align_fill_method_left(self):\n for meth in ['pad', 'bfill']:\n for ax in [0, 1, None]:\n for fax in [0, 1]:\n self._check_align_fill('left', meth, ax, fax)\n\n def test_align_fill_method_right(self):\n for meth in ['pad', 'bfill']:\n for ax in [0, 1, None]:\n for fax in [0, 1]:\n self._check_align_fill('right', meth, ax, fax)\n\n def _check_align_fill(self, kind, meth, ax, fax):\n left = self.frame.ix[0:4, :10]\n right = self.frame.ix[2:, 6:]\n empty = self.frame.ix[:0, :0]\n\n self._check_align(left, right, axis=ax, fill_axis=fax,\n how=kind, method=meth)\n self._check_align(left, right, axis=ax, fill_axis=fax,\n how=kind, method=meth, limit=1)\n\n # empty left\n self._check_align(empty, right, axis=ax, fill_axis=fax,\n how=kind, method=meth)\n self._check_align(empty, right, axis=ax, fill_axis=fax,\n how=kind, method=meth, limit=1)\n\n # empty right\n self._check_align(left, empty, axis=ax, fill_axis=fax,\n how=kind, method=meth)\n self._check_align(left, empty, axis=ax, fill_axis=fax,\n how=kind, method=meth, limit=1)\n\n # both empty\n self._check_align(empty, empty, axis=ax, fill_axis=fax,\n how=kind, method=meth)\n self._check_align(empty, empty, axis=ax, fill_axis=fax,\n how=kind, method=meth, limit=1)\n\n def test_align_int_fill_bug(self):\n # GH #910\n X = np.arange(10*10, dtype='float64').reshape(10, 10)\n Y = np.ones((10, 1), dtype=int)\n\n df1 = DataFrame(X)\n df1['0.X'] = Y.squeeze()\n\n df2 = df1.astype(float)\n\n result = df1 - df1.mean()\n expected = df2 - df2.mean()\n assert_frame_equal(result, expected)\n\n def test_align_multiindex(self):\n # GH 10665\n # same test cases as test_align_multiindex in test_series.py\n\n midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],\n names=('a', 'b', 'c'))\n idx = pd.Index(range(2), name='b')\n df1 = pd.DataFrame(np.arange(12,dtype='int64'), index=midx)\n df2 = pd.DataFrame(np.arange(2,dtype='int64'), index=idx)\n\n # these must be the same results (but flipped)\n res1l, res1r = df1.align(df2, join='left')\n res2l, res2r = df2.align(df1, join='right')\n\n expl = df1\n tm.assert_frame_equal(expl, res1l)\n tm.assert_frame_equal(expl, res2r)\n expr = pd.DataFrame([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)\n tm.assert_frame_equal(expr, res1r)\n tm.assert_frame_equal(expr, res2l)\n\n res1l, res1r = df1.align(df2, join='right')\n res2l, res2r = df2.align(df1, join='left')\n\n exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)],\n names=('a', 'b', 'c'))\n expl = pd.DataFrame([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)\n tm.assert_frame_equal(expl, res1l)\n tm.assert_frame_equal(expl, res2r)\n expr = pd.DataFrame([0, 0, 1, 1] * 2, index=exp_idx)\n tm.assert_frame_equal(expr, res1r)\n tm.assert_frame_equal(expr, res2l)\n\n def test_where(self):\n default_frame = DataFrame(np.random.randn(5, 3),columns=['A','B','C'])\n\n def _safe_add(df):\n # only add to the numeric items\n def is_ok(s):\n return issubclass(s.dtype.type, (np.integer,np.floating)) and s.dtype != 'uint8'\n return DataFrame(dict([ (c,s+1) if is_ok(s) else (c,s) for c, s in compat.iteritems(df) ]))\n\n def _check_get(df, cond, check_dtypes = True):\n other1 = _safe_add(df)\n rs = df.where(cond, other1)\n rs2 = df.where(cond.values, other1)\n for k, v in rs.iteritems():\n exp = Series(np.where(cond[k], df[k], other1[k]),index=v.index)\n assert_series_equal(v, exp, check_names=False)\n assert_frame_equal(rs, rs2)\n\n # dtypes\n if check_dtypes:\n self.assertTrue((rs.dtypes == df.dtypes).all() == True)\n\n # check getting\n for df in [ default_frame, self.mixed_frame, self.mixed_float, self.mixed_int ]:\n cond = df > 0\n _check_get(df, cond)\n\n\n # upcasting case (GH # 2794)\n df = DataFrame(dict([ (c,Series([1]*3,dtype=c)) for c in ['int64','int32','float32','float64'] ]))\n df.ix[1,:] = 0\n result = df.where(df>=0).get_dtype_counts()\n\n #### when we don't preserve boolean casts ####\n #expected = Series({ 'float32' : 1, 'float64' : 3 })\n\n expected = Series({ 'float32' : 1, 'float64' : 1, 'int32' : 1, 'int64' : 1 })\n assert_series_equal(result, expected)\n\n # aligning\n def _check_align(df, cond, other, check_dtypes = True):\n rs = df.where(cond, other)\n for i, k in enumerate(rs.columns):\n result = rs[k]\n d = df[k].values\n c = cond[k].reindex(df[k].index).fillna(False).values\n\n if np.isscalar(other):\n o = other\n else:\n if isinstance(other,np.ndarray):\n o = Series(other[:,i],index=result.index).values\n else:\n o = other[k].values\n\n new_values = d if c.all() else np.where(c, d, o)\n expected = Series(new_values, index=result.index, name=k)\n\n # since we can't always have the correct numpy dtype\n # as numpy doesn't know how to downcast, don't check\n assert_series_equal(result, expected, check_dtype=False)\n\n # dtypes\n # can't check dtype when other is an ndarray\n\n if check_dtypes and not isinstance(other,np.ndarray):\n self.assertTrue((rs.dtypes == df.dtypes).all() == True)\n\n for df in [ self.mixed_frame, self.mixed_float, self.mixed_int ]:\n\n # other is a frame\n cond = (df > 0)[1:]\n _check_align(df, cond, _safe_add(df))\n\n # check other is ndarray\n cond = df > 0\n _check_align(df, cond, (_safe_add(df).values))\n\n # integers are upcast, so don't check the dtypes\n cond = df > 0\n check_dtypes = all([ not issubclass(s.type,np.integer) for s in df.dtypes ])\n _check_align(df, cond, np.nan, check_dtypes = check_dtypes)\n\n # invalid conditions\n df = default_frame\n err1 = (df + 1).values[0:2, :]\n self.assertRaises(ValueError, df.where, cond, err1)\n\n err2 = cond.ix[:2, :].values\n other1 = _safe_add(df)\n self.assertRaises(ValueError, df.where, err2, other1)\n\n self.assertRaises(ValueError, df.mask, True)\n self.assertRaises(ValueError, df.mask, 0)\n\n # where inplace\n def _check_set(df, cond, check_dtypes = True):\n dfi = df.copy()\n econd = cond.reindex_like(df).fillna(True)\n expected = dfi.mask(~econd)\n\n dfi.where(cond, np.nan, inplace=True)\n assert_frame_equal(dfi, expected)\n\n # dtypes (and confirm upcasts)x\n if check_dtypes:\n for k, v in compat.iteritems(df.dtypes):\n if issubclass(v.type,np.integer) and not cond[k].all():\n v = np.dtype('float64')\n self.assertEqual(dfi[k].dtype, v)\n\n for df in [ default_frame, self.mixed_frame, self.mixed_float, self.mixed_int ]:\n\n cond = df > 0\n _check_set(df, cond)\n\n cond = df >= 0\n _check_set(df, cond)\n\n # aligining\n cond = (df >= 0)[1:]\n _check_set(df, cond)\n\n # GH 10218\n # test DataFrame.where with Series slicing\n df = DataFrame({'a': range(3), 'b': range(4, 7)})\n result = df.where(df['a'] == 1)\n expected = df[df['a'] == 1].reindex(df.index)\n assert_frame_equal(result, expected)\n\n def test_where_bug(self):\n\n # GH 2793\n\n df = DataFrame({'a': [1.0, 2.0, 3.0, 4.0], 'b': [4.0, 3.0, 2.0, 1.0]}, dtype = 'float64')\n expected = DataFrame({'a': [np.nan, np.nan, 3.0, 4.0], 'b': [4.0, 3.0, np.nan, np.nan]}, dtype = 'float64')\n result = df.where(df > 2, np.nan)\n assert_frame_equal(result, expected)\n\n result = df.copy()\n result.where(result > 2, np.nan, inplace=True)\n assert_frame_equal(result, expected)\n\n # mixed\n for dtype in ['int16','int8','int32','int64']:\n df = DataFrame({'a': np.array([1, 2, 3, 4],dtype=dtype), 'b': np.array([4.0, 3.0, 2.0, 1.0], dtype = 'float64') })\n expected = DataFrame({'a': [np.nan, np.nan, 3.0, 4.0], 'b': [4.0, 3.0, np.nan, np.nan]}, dtype = 'float64')\n result = df.where(df > 2, np.nan)\n assert_frame_equal(result, expected)\n\n result = df.copy()\n result.where(result > 2, np.nan, inplace=True)\n assert_frame_equal(result, expected)\n\n # transpositional issue\n # GH7506\n a = DataFrame({ 0 : [1,2], 1 : [3,4], 2 : [5,6]})\n b = DataFrame({ 0 : [np.nan,8], 1:[9,np.nan], 2:[np.nan,np.nan]})\n do_not_replace = b.isnull() | (a > b)\n\n expected = a.copy()\n expected[~do_not_replace] = b\n\n result = a.where(do_not_replace,b)\n assert_frame_equal(result,expected)\n\n a = DataFrame({ 0 : [4,6], 1 : [1,0]})\n b = DataFrame({ 0 : [np.nan,3],1:[3,np.nan]})\n do_not_replace = b.isnull() | (a > b)\n\n expected = a.copy()\n expected[~do_not_replace] = b\n\n result = a.where(do_not_replace,b)\n assert_frame_equal(result,expected)\n\n def test_where_datetime(self):\n\n # GH 3311\n df = DataFrame(dict(A = date_range('20130102',periods=5),\n B = date_range('20130104',periods=5),\n C = np.random.randn(5)))\n\n stamp = datetime(2013,1,3)\n result = df[df>stamp]\n expected = df.copy()\n expected.loc[[0,1],'A'] = np.nan\n assert_frame_equal(result,expected)\n\n def test_where_none(self):\n # GH 4667\n # setting with None changes dtype\n df = DataFrame({'series': Series(range(10))}).astype(float)\n df[df > 7] = None\n expected = DataFrame({'series': Series([0,1,2,3,4,5,6,7,np.nan,np.nan]) })\n assert_frame_equal(df, expected)\n\n # GH 7656\n df = DataFrame([{'A': 1, 'B': np.nan, 'C': 'Test'}, {'A': np.nan, 'B': 'Test', 'C': np.nan}])\n expected = df.where(~isnull(df), None)\n with tm.assertRaisesRegexp(TypeError, 'boolean setting on mixed-type'):\n df.where(~isnull(df), None, inplace=True)\n\n def test_where_align(self):\n\n def create():\n df = DataFrame(np.random.randn(10,3))\n df.iloc[3:5,0] = np.nan\n df.iloc[4:6,1] = np.nan\n df.iloc[5:8,2] = np.nan\n return df\n\n # series\n df = create()\n expected = df.fillna(df.mean())\n result = df.where(pd.notnull(df),df.mean(),axis='columns')\n assert_frame_equal(result, expected)\n\n df.where(pd.notnull(df),df.mean(),inplace=True,axis='columns')\n assert_frame_equal(df, expected)\n\n df = create().fillna(0)\n expected = df.apply(lambda x, y: x.where(x>0,y), y=df[0])\n result = df.where(df>0,df[0],axis='index')\n assert_frame_equal(result, expected)\n result = df.where(df>0,df[0],axis='rows')\n assert_frame_equal(result, expected)\n\n # frame\n df = create()\n expected = df.fillna(1)\n result = df.where(pd.notnull(df),DataFrame(1,index=df.index,columns=df.columns))\n assert_frame_equal(result, expected)\n\n def test_where_complex(self):\n # GH 6345\n expected = DataFrame([[1+1j, 2], [np.nan, 4+1j]], columns=['a', 'b'])\n df = DataFrame([[1+1j, 2], [5+1j, 4+1j]], columns=['a', 'b'])\n df[df.abs() >= 5] = np.nan\n assert_frame_equal(df,expected)\n\n def test_where_axis(self):\n # GH 9736\n df = DataFrame(np.random.randn(2, 2))\n mask = DataFrame([[False, False], [False, False]])\n s = Series([0, 1])\n\n expected = DataFrame([[0, 0], [1, 1]], dtype='float64')\n result = df.where(mask, s, axis='index')\n assert_frame_equal(result, expected)\n\n result = df.copy()\n result.where(mask, s, axis='index', inplace=True)\n assert_frame_equal(result, expected)\n\n expected = DataFrame([[0, 1], [0, 1]], dtype='float64')\n result = df.where(mask, s, axis='columns')\n assert_frame_equal(result, expected)\n\n result = df.copy()\n result.where(mask, s, axis='columns', inplace=True)\n assert_frame_equal(result, expected)\n\n # Upcast needed\n df = DataFrame([[1, 2], [3, 4]], dtype='int64')\n mask = DataFrame([[False, False], [False, False]])\n s = Series([0, np.nan])\n\n expected = DataFrame([[0, 0], [np.nan, np.nan]], dtype='float64')\n result = df.where(mask, s, axis='index')\n assert_frame_equal(result, expected)\n\n result = df.copy()\n result.where(mask, s, axis='index', inplace=True)\n assert_frame_equal(result, expected)\n\n expected = DataFrame([[0, np.nan], [0, np.nan]], dtype='float64')\n result = df.where(mask, s, axis='columns')\n assert_frame_equal(result, expected)\n\n expected = DataFrame({0 : np.array([0, 0], dtype='int64'),\n 1 : np.array([np.nan, np.nan], dtype='float64')})\n result = df.copy()\n result.where(mask, s, axis='columns', inplace=True)\n assert_frame_equal(result, expected)\n\n # Multiple dtypes (=> multiple Blocks)\n df = pd.concat([DataFrame(np.random.randn(10, 2)),\n DataFrame(np.random.randint(0, 10, size=(10, 2)))],\n ignore_index=True, axis=1)\n mask = DataFrame(False, columns=df.columns, index=df.index)\n s1 = Series(1, index=df.columns)\n s2 = Series(2, index=df.index)\n\n result = df.where(mask, s1, axis='columns')\n expected = DataFrame(1.0, columns=df.columns, index=df.index)\n expected[2] = expected[2].astype(int)\n expected[3] = expected[3].astype(int)\n assert_frame_equal(result, expected)\n\n result = df.copy()\n result.where(mask, s1, axis='columns', inplace=True)\n assert_frame_equal(result, expected)\n\n result = df.where(mask, s2, axis='index')\n expected = DataFrame(2.0, columns=df.columns, index=df.index)\n expected[2] = expected[2].astype(int)\n expected[3] = expected[3].astype(int)\n assert_frame_equal(result, expected)\n\n result = df.copy()\n result.where(mask, s2, axis='index', inplace=True)\n assert_frame_equal(result, expected)\n\n # DataFrame vs DataFrame\n d1 = df.copy().drop(1, axis=0)\n expected = df.copy()\n expected.loc[1, :] = np.nan\n\n result = df.where(mask, d1)\n assert_frame_equal(result, expected)\n result = df.where(mask, d1, axis='index')\n assert_frame_equal(result, expected)\n result = df.copy()\n result.where(mask, d1, inplace=True)\n assert_frame_equal(result, expected)\n result = df.copy()\n result.where(mask, d1, inplace=True, axis='index')\n assert_frame_equal(result, expected)\n\n d2 = df.copy().drop(1, axis=1)\n expected = df.copy()\n expected.loc[:, 1] = np.nan\n\n result = df.where(mask, d2)\n assert_frame_equal(result, expected)\n result = df.where(mask, d2, axis='columns')\n assert_frame_equal(result, expected)\n result = df.copy()\n result.where(mask, d2, inplace=True)\n assert_frame_equal(result, expected)\n result = df.copy()\n result.where(mask, d2, inplace=True, axis='columns')\n assert_frame_equal(result, expected)\n\n def test_mask(self):\n df = DataFrame(np.random.randn(5, 3))\n cond = df > 0\n\n rs = df.where(cond, np.nan)\n assert_frame_equal(rs, df.mask(df <= 0))\n assert_frame_equal(rs, df.mask(~cond))\n\n other = DataFrame(np.random.randn(5, 3))\n rs = df.where(cond, other)\n assert_frame_equal(rs, df.mask(df <= 0, other))\n assert_frame_equal(rs, df.mask(~cond, other))\n\n def test_mask_inplace(self):\n # GH8801\n df = DataFrame(np.random.randn(5, 3))\n cond = df > 0\n\n rdf = df.copy()\n\n rdf.where(cond, inplace=True)\n assert_frame_equal(rdf, df.where(cond))\n assert_frame_equal(rdf, df.mask(~cond))\n\n rdf = df.copy()\n rdf.where(cond, -df, inplace=True)\n assert_frame_equal(rdf, df.where(cond, -df))\n assert_frame_equal(rdf, df.mask(~cond, -df))\n\n def test_mask_edge_case_1xN_frame(self):\n # GH4071\n df = DataFrame([[1, 2]])\n res = df.mask(DataFrame([[True, False]]))\n expec = DataFrame([[nan, 2]])\n assert_frame_equal(res, expec)\n\n #----------------------------------------------------------------------\n # Transposing\n\n def test_transpose(self):\n frame = self.frame\n dft = frame.T\n for idx, series in compat.iteritems(dft):\n for col, value in compat.iteritems(series):\n if np.isnan(value):\n self.assertTrue(np.isnan(frame[col][idx]))\n else:\n self.assertEqual(value, frame[col][idx])\n\n # mixed type\n index, data = tm.getMixedTypeDict()\n mixed = DataFrame(data, index=index)\n\n mixed_T = mixed.T\n for col, s in compat.iteritems(mixed_T):\n self.assertEqual(s.dtype, np.object_)\n\n def test_transpose_get_view(self):\n dft = self.frame.T\n dft.values[:, 5:10] = 5\n\n self.assertTrue((self.frame.values[5:10] == 5).all())\n\n #----------------------------------------------------------------------\n # Renaming\n\n def test_rename(self):\n mapping = {\n 'A': 'a',\n 'B': 'b',\n 'C': 'c',\n 'D': 'd'\n }\n\n renamed = self.frame.rename(columns=mapping)\n renamed2 = self.frame.rename(columns=str.lower)\n\n assert_frame_equal(renamed, renamed2)\n assert_frame_equal(renamed2.rename(columns=str.upper),\n self.frame, check_names=False)\n\n # index\n data = {\n 'A': {'foo': 0, 'bar': 1}\n }\n\n # gets sorted alphabetical\n df = DataFrame(data)\n renamed = df.rename(index={'foo': 'bar', 'bar': 'foo'})\n self.assert_numpy_array_equal(renamed.index, ['foo', 'bar'])\n\n renamed = df.rename(index=str.upper)\n self.assert_numpy_array_equal(renamed.index, ['BAR', 'FOO'])\n\n # have to pass something\n self.assertRaises(TypeError, self.frame.rename)\n\n # partial columns\n renamed = self.frame.rename(columns={'C': 'foo', 'D': 'bar'})\n self.assert_numpy_array_equal(renamed.columns, ['A', 'B', 'foo', 'bar'])\n\n # other axis\n renamed = self.frame.T.rename(index={'C': 'foo', 'D': 'bar'})\n self.assert_numpy_array_equal(renamed.index, ['A', 'B', 'foo', 'bar'])\n\n # index with name\n index = Index(['foo', 'bar'], name='name')\n renamer = DataFrame(data, index=index)\n renamed = renamer.rename(index={'foo': 'bar', 'bar': 'foo'})\n self.assert_numpy_array_equal(renamed.index, ['bar', 'foo'])\n self.assertEqual(renamed.index.name, renamer.index.name)\n\n # MultiIndex\n tuples_index = [('foo1', 'bar1'), ('foo2', 'bar2')]\n tuples_columns = [('fizz1', 'buzz1'), ('fizz2', 'buzz2')]\n index = MultiIndex.from_tuples(tuples_index, names=['foo', 'bar'])\n columns = MultiIndex.from_tuples(tuples_columns, names=['fizz', 'buzz'])\n renamer = DataFrame([(0,0),(1,1)], index=index, columns=columns)\n renamed = renamer.rename(index={'foo1': 'foo3', 'bar2': 'bar3'},\n columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'})\n new_index = MultiIndex.from_tuples([('foo3', 'bar1'), ('foo2', 'bar3')])\n new_columns = MultiIndex.from_tuples([('fizz3', 'buzz1'), ('fizz2', 'buzz3')])\n self.assert_numpy_array_equal(renamed.index, new_index)\n self.assert_numpy_array_equal(renamed.columns, new_columns)\n self.assertEqual(renamed.index.names, renamer.index.names)\n self.assertEqual(renamed.columns.names, renamer.columns.names)\n\n def test_rename_nocopy(self):\n renamed = self.frame.rename(columns={'C': 'foo'}, copy=False)\n renamed['foo'] = 1.\n self.assertTrue((self.frame['C'] == 1.).all())\n\n def test_rename_inplace(self):\n self.frame.rename(columns={'C': 'foo'})\n self.assertIn('C', self.frame)\n self.assertNotIn('foo', self.frame)\n\n c_id = id(self.frame['C'])\n frame = self.frame.copy()\n frame.rename(columns={'C': 'foo'}, inplace=True)\n\n self.assertNotIn('C', frame)\n self.assertIn('foo', frame)\n self.assertNotEqual(id(frame['foo']), c_id)\n\n def test_rename_bug(self):\n # GH 5344\n # rename set ref_locs, and set_index was not resetting\n df = DataFrame({ 0 : ['foo','bar'], 1 : ['bah','bas'], 2 : [1,2]})\n df = df.rename(columns={0 : 'a'})\n df = df.rename(columns={1 : 'b'})\n df = df.set_index(['a','b'])\n df.columns = ['2001-01-01']\n expected = DataFrame([[1],[2]],index=MultiIndex.from_tuples([('foo','bah'),('bar','bas')],\n names=['a','b']),\n columns=['2001-01-01'])\n assert_frame_equal(df,expected)\n\n #----------------------------------------------------------------------\n # Time series related\n def test_diff(self):\n the_diff = self.tsframe.diff(1)\n\n assert_series_equal(the_diff['A'],\n self.tsframe['A'] - self.tsframe['A'].shift(1))\n\n # int dtype\n a = 10000000000000000\n b = a + 1\n s = Series([a, b])\n\n rs = DataFrame({'s': s}).diff()\n self.assertEqual(rs.s[1], 1)\n\n # mixed numeric\n tf = self.tsframe.astype('float32')\n the_diff = tf.diff(1)\n assert_series_equal(the_diff['A'],\n tf['A'] - tf['A'].shift(1))\n\n # issue 10907\n df = pd.DataFrame({'y': pd.Series([2]), 'z': pd.Series([3])})\n df.insert(0, 'x', 1)\n result = df.diff(axis=1)\n expected = pd.DataFrame({'x':np.nan, 'y':pd.Series(1), 'z':pd.Series(1)}).astype('float64')\n assert_frame_equal(result, expected)\n\n\n def test_diff_timedelta(self):\n # GH 4533\n df = DataFrame(dict(time=[Timestamp('20130101 9:01'),\n Timestamp('20130101 9:02')],\n value=[1.0,2.0]))\n\n res = df.diff()\n exp = DataFrame([[pd.NaT, np.nan],\n [Timedelta('00:01:00'), 1]],\n columns=['time', 'value'])\n assert_frame_equal(res, exp)\n\n def test_diff_mixed_dtype(self):\n df = DataFrame(np.random.randn(5, 3))\n df['A'] = np.array([1, 2, 3, 4, 5], dtype=object)\n\n result = df.diff()\n self.assertEqual(result[0].dtype, np.float64)\n\n def test_diff_neg_n(self):\n rs = self.tsframe.diff(-1)\n xp = self.tsframe - self.tsframe.shift(-1)\n assert_frame_equal(rs, xp)\n\n def test_diff_float_n(self):\n rs = self.tsframe.diff(1.)\n xp = self.tsframe.diff(1)\n assert_frame_equal(rs, xp)\n\n def test_diff_axis(self):\n # GH 9727\n df = DataFrame([[1., 2.], [3., 4.]])\n assert_frame_equal(df.diff(axis=1), DataFrame([[np.nan, 1.], [np.nan, 1.]]))\n assert_frame_equal(df.diff(axis=0), DataFrame([[np.nan, np.nan], [2., 2.]]))\n\n def test_pct_change(self):\n rs = self.tsframe.pct_change(fill_method=None)\n assert_frame_equal(rs, self.tsframe / self.tsframe.shift(1) - 1)\n\n rs = self.tsframe.pct_change(2)\n filled = self.tsframe.fillna(method='pad')\n assert_frame_equal(rs, filled / filled.shift(2) - 1)\n\n rs = self.tsframe.pct_change(fill_method='bfill', limit=1)\n filled = self.tsframe.fillna(method='bfill', limit=1)\n assert_frame_equal(rs, filled / filled.shift(1) - 1)\n\n rs = self.tsframe.pct_change(freq='5D')\n filled = self.tsframe.fillna(method='pad')\n assert_frame_equal(rs, filled / filled.shift(freq='5D') - 1)\n\n def test_pct_change_shift_over_nas(self):\n s = Series([1., 1.5, np.nan, 2.5, 3.])\n\n df = DataFrame({'a': s, 'b': s})\n\n chg = df.pct_change()\n expected = Series([np.nan, 0.5, np.nan, 2.5 / 1.5 - 1, .2])\n edf = DataFrame({'a': expected, 'b': expected})\n assert_frame_equal(chg, edf)\n\n def test_shift(self):\n # naive shift\n shiftedFrame = self.tsframe.shift(5)\n self.assertTrue(shiftedFrame.index.equals(self.tsframe.index))\n\n shiftedSeries = self.tsframe['A'].shift(5)\n assert_series_equal(shiftedFrame['A'], shiftedSeries)\n\n shiftedFrame = self.tsframe.shift(-5)\n self.assertTrue(shiftedFrame.index.equals(self.tsframe.index))\n\n shiftedSeries = self.tsframe['A'].shift(-5)\n assert_series_equal(shiftedFrame['A'], shiftedSeries)\n\n # shift by 0\n unshifted = self.tsframe.shift(0)\n assert_frame_equal(unshifted, self.tsframe)\n\n # shift by DateOffset\n shiftedFrame = self.tsframe.shift(5, freq=datetools.BDay())\n self.assertEqual(len(shiftedFrame), len(self.tsframe))\n\n shiftedFrame2 = self.tsframe.shift(5, freq='B')\n assert_frame_equal(shiftedFrame, shiftedFrame2)\n\n d = self.tsframe.index[0]\n shifted_d = d + datetools.BDay(5)\n assert_series_equal(self.tsframe.xs(d),\n shiftedFrame.xs(shifted_d), check_names=False)\n\n # shift int frame\n int_shifted = self.intframe.shift(1)\n\n # Shifting with PeriodIndex\n ps = tm.makePeriodFrame()\n shifted = ps.shift(1)\n unshifted = shifted.shift(-1)\n self.assertTrue(shifted.index.equals(ps.index))\n\n tm.assert_dict_equal(unshifted.ix[:, 0].valid(), ps.ix[:, 0],\n compare_keys=False)\n\n shifted2 = ps.shift(1, 'B')\n shifted3 = ps.shift(1, datetools.bday)\n assert_frame_equal(shifted2, shifted3)\n assert_frame_equal(ps, shifted2.shift(-1, 'B'))\n\n assertRaisesRegexp(ValueError, 'does not match PeriodIndex freq',\n ps.shift, freq='D')\n\n\n # shift other axis\n # GH 6371\n df = DataFrame(np.random.rand(10,5))\n expected = pd.concat([DataFrame(np.nan,index=df.index,columns=[0]),df.iloc[:,0:-1]],ignore_index=True,axis=1)\n result = df.shift(1,axis=1)\n assert_frame_equal(result,expected)\n\n # shift named axis\n df = DataFrame(np.random.rand(10,5))\n expected = pd.concat([DataFrame(np.nan,index=df.index,columns=[0]),df.iloc[:,0:-1]],ignore_index=True,axis=1)\n result = df.shift(1,axis='columns')\n assert_frame_equal(result,expected)\n\n def test_shift_bool(self):\n df = DataFrame({'high': [True, False],\n 'low': [False, False]})\n rs = df.shift(1)\n xp = DataFrame(np.array([[np.nan, np.nan],\n [True, False]], dtype=object),\n columns=['high', 'low'])\n assert_frame_equal(rs, xp)\n\n def test_shift_categorical(self):\n # GH 9416\n s1 = pd.Series(['a', 'b', 'c'], dtype='category')\n s2 = pd.Series(['A', 'B', 'C'], dtype='category')\n df = DataFrame({'one': s1, 'two': s2})\n rs = df.shift(1)\n xp = DataFrame({'one': s1.shift(1), 'two': s2.shift(1)})\n assert_frame_equal(rs, xp)\n\n def test_shift_empty(self):\n # Regression test for #8019\n df = DataFrame({'foo': []})\n rs = df.shift(-1)\n\n assert_frame_equal(df, rs)\n\n def test_tshift(self):\n # PeriodIndex\n ps = tm.makePeriodFrame()\n shifted = ps.tshift(1)\n unshifted = shifted.tshift(-1)\n\n assert_frame_equal(unshifted, ps)\n\n shifted2 = ps.tshift(freq='B')\n assert_frame_equal(shifted, shifted2)\n\n shifted3 = ps.tshift(freq=datetools.bday)\n assert_frame_equal(shifted, shifted3)\n\n assertRaisesRegexp(ValueError, 'does not match', ps.tshift, freq='M')\n\n # DatetimeIndex\n shifted = self.tsframe.tshift(1)\n unshifted = shifted.tshift(-1)\n\n assert_frame_equal(self.tsframe, unshifted)\n\n shifted2 = self.tsframe.tshift(freq=self.tsframe.index.freq)\n assert_frame_equal(shifted, shifted2)\n\n inferred_ts = DataFrame(self.tsframe.values,\n Index(np.asarray(self.tsframe.index)),\n columns=self.tsframe.columns)\n shifted = inferred_ts.tshift(1)\n unshifted = shifted.tshift(-1)\n assert_frame_equal(shifted, self.tsframe.tshift(1))\n assert_frame_equal(unshifted, inferred_ts)\n\n no_freq = self.tsframe.ix[[0, 5, 7], :]\n self.assertRaises(ValueError, no_freq.tshift)\n\n def test_apply(self):\n # ufunc\n applied = self.frame.apply(np.sqrt)\n assert_series_equal(np.sqrt(self.frame['A']), applied['A'])\n\n # aggregator\n applied = self.frame.apply(np.mean)\n self.assertEqual(applied['A'], np.mean(self.frame['A']))\n\n d = self.frame.index[0]\n applied = self.frame.apply(np.mean, axis=1)\n self.assertEqual(applied[d], np.mean(self.frame.xs(d)))\n self.assertIs(applied.index, self.frame.index) # want this\n\n # invalid axis\n df = DataFrame(\n [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])\n self.assertRaises(ValueError, df.apply, lambda x: x, 2)\n\n # GH9573\n df = DataFrame({'c0':['A','A','B','B'], 'c1':['C','C','D','D']})\n df = df.apply(lambda ts: ts.astype('category'))\n self.assertEqual(df.shape, (4, 2))\n self.assertTrue(isinstance(df['c0'].dtype, com.CategoricalDtype))\n self.assertTrue(isinstance(df['c1'].dtype, com.CategoricalDtype))\n\n def test_apply_mixed_datetimelike(self):\n # mixed datetimelike\n # GH 7778\n df = DataFrame({ 'A' : date_range('20130101',periods=3), 'B' : pd.to_timedelta(np.arange(3),unit='s') })\n result = df.apply(lambda x: x, axis=1)\n assert_frame_equal(result, df)\n\n def test_apply_empty(self):\n # empty\n applied = self.empty.apply(np.sqrt)\n self.assertTrue(applied.empty)\n\n applied = self.empty.apply(np.mean)\n self.assertTrue(applied.empty)\n\n no_rows = self.frame[:0]\n result = no_rows.apply(lambda x: x.mean())\n expected = Series(np.nan, index=self.frame.columns)\n assert_series_equal(result, expected)\n\n no_cols = self.frame.ix[:, []]\n result = no_cols.apply(lambda x: x.mean(), axis=1)\n expected = Series(np.nan, index=self.frame.index)\n assert_series_equal(result, expected)\n\n # 2476\n xp = DataFrame(index=['a'])\n rs = xp.apply(lambda x: x['a'], axis=1)\n assert_frame_equal(xp, rs)\n\n # reduce with an empty DataFrame\n x = []\n result = self.empty.apply(x.append, axis=1, reduce=False)\n assert_frame_equal(result, self.empty)\n result = self.empty.apply(x.append, axis=1, reduce=True)\n assert_series_equal(result, Series([], index=pd.Index([], dtype=object)))\n\n empty_with_cols = DataFrame(columns=['a', 'b', 'c'])\n result = empty_with_cols.apply(x.append, axis=1, reduce=False)\n assert_frame_equal(result, empty_with_cols)\n result = empty_with_cols.apply(x.append, axis=1, reduce=True)\n assert_series_equal(result, Series([], index=pd.Index([], dtype=object)))\n\n # Ensure that x.append hasn't been called\n self.assertEqual(x, [])\n\n def test_apply_standard_nonunique(self):\n df = DataFrame(\n [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])\n rs = df.apply(lambda s: s[0], axis=1)\n xp = Series([1, 4, 7], ['a', 'a', 'c'])\n assert_series_equal(rs, xp)\n\n rs = df.T.apply(lambda s: s[0], axis=0)\n assert_series_equal(rs, xp)\n\n def test_apply_broadcast(self):\n broadcasted = self.frame.apply(np.mean, broadcast=True)\n agged = self.frame.apply(np.mean)\n\n for col, ts in compat.iteritems(broadcasted):\n self.assertTrue((ts == agged[col]).all())\n\n broadcasted = self.frame.apply(np.mean, axis=1, broadcast=True)\n agged = self.frame.apply(np.mean, axis=1)\n for idx in broadcasted.index:\n self.assertTrue((broadcasted.xs(idx) == agged[idx]).all())\n\n def test_apply_raw(self):\n result0 = self.frame.apply(np.mean, raw=True)\n result1 = self.frame.apply(np.mean, axis=1, raw=True)\n\n expected0 = self.frame.apply(lambda x: x.values.mean())\n expected1 = self.frame.apply(lambda x: x.values.mean(), axis=1)\n\n assert_series_equal(result0, expected0)\n assert_series_equal(result1, expected1)\n\n # no reduction\n result = self.frame.apply(lambda x: x * 2, raw=True)\n expected = self.frame * 2\n assert_frame_equal(result, expected)\n\n def test_apply_axis1(self):\n d = self.frame.index[0]\n tapplied = self.frame.apply(np.mean, axis=1)\n self.assertEqual(tapplied[d], np.mean(self.frame.xs(d)))\n\n def test_apply_ignore_failures(self):\n result = self.mixed_frame._apply_standard(np.mean, 0,\n ignore_failures=True)\n expected = self.mixed_frame._get_numeric_data().apply(np.mean)\n assert_series_equal(result, expected)\n\n def test_apply_mixed_dtype_corner(self):\n df = DataFrame({'A': ['foo'],\n 'B': [1.]})\n result = df[:0].apply(np.mean, axis=1)\n # the result here is actually kind of ambiguous, should it be a Series\n # or a DataFrame?\n expected = Series(np.nan, index=pd.Index([], dtype='int64'))\n assert_series_equal(result, expected)\n\n df = DataFrame({'A': ['foo'],\n 'B': [1.]})\n result = df.apply(lambda x: x['A'], axis=1)\n expected = Series(['foo'],index=[0])\n assert_series_equal(result, expected)\n\n result = df.apply(lambda x: x['B'], axis=1)\n expected = Series([1.],index=[0])\n assert_series_equal(result, expected)\n\n def test_apply_empty_infer_type(self):\n no_cols = DataFrame(index=['a', 'b', 'c'])\n no_index = DataFrame(columns=['a', 'b', 'c'])\n\n def _check(df, f):\n test_res = f(np.array([], dtype='f8'))\n is_reduction = not isinstance(test_res, np.ndarray)\n\n def _checkit(axis=0, raw=False):\n res = df.apply(f, axis=axis, raw=raw)\n if is_reduction:\n agg_axis = df._get_agg_axis(axis)\n tm.assertIsInstance(res, Series)\n self.assertIs(res.index, agg_axis)\n else:\n tm.assertIsInstance(res, DataFrame)\n\n _checkit()\n _checkit(axis=1)\n _checkit(raw=True)\n _checkit(axis=0, raw=True)\n\n _check(no_cols, lambda x: x)\n _check(no_cols, lambda x: x.mean())\n _check(no_index, lambda x: x)\n _check(no_index, lambda x: x.mean())\n\n result = no_cols.apply(lambda x: x.mean(), broadcast=True)\n tm.assertIsInstance(result, DataFrame)\n\n def test_apply_with_args_kwds(self):\n def add_some(x, howmuch=0):\n return x + howmuch\n\n def agg_and_add(x, howmuch=0):\n return x.mean() + howmuch\n\n def subtract_and_divide(x, sub, divide=1):\n return (x - sub) / divide\n\n result = self.frame.apply(add_some, howmuch=2)\n exp = self.frame.apply(lambda x: x + 2)\n assert_frame_equal(result, exp)\n\n result = self.frame.apply(agg_and_add, howmuch=2)\n exp = self.frame.apply(lambda x: x.mean() + 2)\n assert_series_equal(result, exp)\n\n res = self.frame.apply(subtract_and_divide, args=(2,), divide=2)\n exp = self.frame.apply(lambda x: (x - 2.) / 2.)\n assert_frame_equal(res, exp)\n\n def test_apply_yield_list(self):\n result = self.frame.apply(list)\n assert_frame_equal(result, self.frame)\n\n def test_apply_reduce_Series(self):\n self.frame.ix[::2, 'A'] = np.nan\n expected = self.frame.mean(1)\n result = self.frame.apply(np.mean, axis=1)\n assert_series_equal(result, expected)\n\n def test_apply_differently_indexed(self):\n df = DataFrame(np.random.randn(20, 10))\n\n result0 = df.apply(Series.describe, axis=0)\n expected0 = DataFrame(dict((i, v.describe())\n for i, v in compat.iteritems(df)),\n columns=df.columns)\n assert_frame_equal(result0, expected0)\n\n result1 = df.apply(Series.describe, axis=1)\n expected1 = DataFrame(dict((i, v.describe())\n for i, v in compat.iteritems(df.T)),\n columns=df.index).T\n assert_frame_equal(result1, expected1)\n\n def test_apply_modify_traceback(self):\n data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',\n 'bar', 'bar', 'bar', 'bar',\n 'foo', 'foo', 'foo'],\n 'B': ['one', 'one', 'one', 'two',\n 'one', 'one', 'one', 'two',\n 'two', 'two', 'one'],\n 'C': ['dull', 'dull', 'shiny', 'dull',\n 'dull', 'shiny', 'shiny', 'dull',\n 'shiny', 'shiny', 'shiny'],\n 'D': np.random.randn(11),\n 'E': np.random.randn(11),\n 'F': np.random.randn(11)})\n\n data.loc[4,'C'] = np.nan\n\n def transform(row):\n if row['C'].startswith('shin') and row['A'] == 'foo':\n row['D'] = 7\n return row\n\n def transform2(row):\n if (notnull(row['C']) and row['C'].startswith('shin')\n and row['A'] == 'foo'):\n row['D'] = 7\n return row\n\n try:\n transformed = data.apply(transform, axis=1)\n except AttributeError as e:\n self.assertEqual(len(e.args), 2)\n self.assertEqual(e.args[1], 'occurred at index 4')\n self.assertEqual(e.args[0], \"'float' object has no attribute 'startswith'\")\n\n def test_apply_bug(self):\n\n # GH 6125\n import datetime\n positions = pd.DataFrame([[1, 'ABC0', 50], [1, 'YUM0', 20],\n [1, 'DEF0', 20], [2, 'ABC1', 50],\n [2, 'YUM1', 20], [2, 'DEF1', 20]],\n columns=['a', 'market', 'position'])\n def f(r):\n return r['market']\n expected = positions.apply(f, axis=1)\n\n positions = DataFrame([[datetime.datetime(2013, 1, 1), 'ABC0', 50],\n [datetime.datetime(2013, 1, 2), 'YUM0', 20],\n [datetime.datetime(2013, 1, 3), 'DEF0', 20],\n [datetime.datetime(2013, 1, 4), 'ABC1', 50],\n [datetime.datetime(2013, 1, 5), 'YUM1', 20],\n [datetime.datetime(2013, 1, 6), 'DEF1', 20]],\n columns=['a', 'market', 'position'])\n result = positions.apply(f, axis=1)\n assert_series_equal(result,expected)\n\n def test_swapaxes(self):\n df = DataFrame(np.random.randn(10, 5))\n assert_frame_equal(df.T, df.swapaxes(0, 1))\n assert_frame_equal(df.T, df.swapaxes(1, 0))\n assert_frame_equal(df, df.swapaxes(0, 0))\n self.assertRaises(ValueError, df.swapaxes, 2, 5)\n\n def test_apply_convert_objects(self):\n data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',\n 'bar', 'bar', 'bar', 'bar',\n 'foo', 'foo', 'foo'],\n 'B': ['one', 'one', 'one', 'two',\n 'one', 'one', 'one', 'two',\n 'two', 'two', 'one'],\n 'C': ['dull', 'dull', 'shiny', 'dull',\n 'dull', 'shiny', 'shiny', 'dull',\n 'shiny', 'shiny', 'shiny'],\n 'D': np.random.randn(11),\n 'E': np.random.randn(11),\n 'F': np.random.randn(11)})\n\n result = data.apply(lambda x: x, axis=1)\n assert_frame_equal(result._convert(datetime=True), data)\n\n def test_apply_attach_name(self):\n result = self.frame.apply(lambda x: x.name)\n expected = Series(self.frame.columns, index=self.frame.columns)\n assert_series_equal(result, expected)\n\n result = self.frame.apply(lambda x: x.name, axis=1)\n expected = Series(self.frame.index, index=self.frame.index)\n assert_series_equal(result, expected)\n\n # non-reductions\n result = self.frame.apply(lambda x: np.repeat(x.name, len(x)))\n expected = DataFrame(np.tile(self.frame.columns,\n (len(self.frame.index), 1)),\n index=self.frame.index,\n columns=self.frame.columns)\n assert_frame_equal(result, expected)\n\n result = self.frame.apply(lambda x: np.repeat(x.name, len(x)),\n axis=1)\n expected = DataFrame(np.tile(self.frame.index,\n (len(self.frame.columns), 1)).T,\n index=self.frame.index,\n columns=self.frame.columns)\n assert_frame_equal(result, expected)\n\n def test_apply_multi_index(self):\n s = DataFrame([[1,2], [3,4], [5,6]])\n s.index = MultiIndex.from_arrays([['a','a','b'], ['c','d','d']])\n s.columns = ['col1','col2']\n res = s.apply(lambda x: Series({'min': min(x), 'max': max(x)}), 1)\n tm.assertIsInstance(res.index, MultiIndex)\n\n def test_apply_dict(self):\n\n # GH 8735\n A = DataFrame([['foo', 'bar'], ['spam', 'eggs']])\n A_dicts = pd.Series([dict([(0, 'foo'), (1, 'spam')]),\n dict([(0, 'bar'), (1, 'eggs')])])\n B = DataFrame([[0, 1], [2, 3]])\n B_dicts = pd.Series([dict([(0, 0), (1, 2)]), dict([(0, 1), (1, 3)])])\n fn = lambda x: x.to_dict()\n\n for df, dicts in [(A, A_dicts), (B, B_dicts)]:\n reduce_true = df.apply(fn, reduce=True)\n reduce_false = df.apply(fn, reduce=False)\n reduce_none = df.apply(fn, reduce=None)\n\n assert_series_equal(reduce_true, dicts)\n assert_frame_equal(reduce_false, df)\n assert_series_equal(reduce_none, dicts)\n\n def test_applymap(self):\n applied = self.frame.applymap(lambda x: x * 2)\n assert_frame_equal(applied, self.frame * 2)\n result = self.frame.applymap(type)\n\n # GH #465, function returning tuples\n result = self.frame.applymap(lambda x: (x, x))\n tm.assertIsInstance(result['A'][0], tuple)\n\n # GH 2909, object conversion to float in constructor?\n df = DataFrame(data=[1,'a'])\n result = df.applymap(lambda x: x)\n self.assertEqual(result.dtypes[0], object)\n\n df = DataFrame(data=[1.,'a'])\n result = df.applymap(lambda x: x)\n self.assertEqual(result.dtypes[0], object)\n\n # GH2786\n df = DataFrame(np.random.random((3,4)))\n df2 = df.copy()\n cols = ['a','a','a','a']\n df.columns = cols\n\n expected = df2.applymap(str)\n expected.columns = cols\n result = df.applymap(str)\n assert_frame_equal(result,expected)\n\n # datetime/timedelta\n df['datetime'] = Timestamp('20130101')\n df['timedelta'] = Timedelta('1 min')\n result = df.applymap(str)\n for f in ['datetime','timedelta']:\n self.assertEqual(result.loc[0,f],str(df.loc[0,f]))\n\n def test_filter(self):\n # items\n filtered = self.frame.filter(['A', 'B', 'E'])\n self.assertEqual(len(filtered.columns), 2)\n self.assertNotIn('E', filtered)\n\n filtered = self.frame.filter(['A', 'B', 'E'], axis='columns')\n self.assertEqual(len(filtered.columns), 2)\n self.assertNotIn('E', filtered)\n\n # other axis\n idx = self.frame.index[0:4]\n filtered = self.frame.filter(idx, axis='index')\n expected = self.frame.reindex(index=idx)\n assert_frame_equal(filtered, expected)\n\n # like\n fcopy = self.frame.copy()\n fcopy['AA'] = 1\n\n filtered = fcopy.filter(like='A')\n self.assertEqual(len(filtered.columns), 2)\n self.assertIn('AA', filtered)\n\n # like with ints in column names\n df = DataFrame(0., index=[0, 1, 2], columns=[0, 1, '_A', '_B'])\n filtered = df.filter(like='_')\n self.assertEqual(len(filtered.columns), 2)\n\n # regex with ints in column names\n # from PR #10384\n df = DataFrame(0., index=[0, 1, 2], columns=['A1', 1, 'B', 2, 'C'])\n expected = DataFrame(0., index=[0, 1, 2], columns=pd.Index([1, 2], dtype=object))\n filtered = df.filter(regex='^[0-9]+$')\n assert_frame_equal(filtered, expected)\n\n expected = DataFrame(0., index=[0, 1, 2], columns=[0, '0', 1, '1'])\n filtered = expected.filter(regex='^[0-9]+$') # shouldn't remove anything\n assert_frame_equal(filtered, expected)\n\n # pass in None\n with assertRaisesRegexp(TypeError, 'Must pass'):\n self.frame.filter(items=None)\n\n # objects\n filtered = self.mixed_frame.filter(like='foo')\n self.assertIn('foo', filtered)\n\n # unicode columns, won't ascii-encode\n df = self.frame.rename(columns={'B': u('\\u2202')})\n filtered = df.filter(like='C')\n self.assertTrue('C' in filtered)\n\n def test_filter_regex_search(self):\n fcopy = self.frame.copy()\n fcopy['AA'] = 1\n\n # regex\n filtered = fcopy.filter(regex='[A]+')\n self.assertEqual(len(filtered.columns), 2)\n self.assertIn('AA', filtered)\n\n # doesn't have to be at beginning\n df = DataFrame({'aBBa': [1, 2],\n 'BBaBB': [1, 2],\n 'aCCa': [1, 2],\n 'aCCaBB': [1, 2]})\n\n result = df.filter(regex='BB')\n exp = df[[x for x in df.columns if 'BB' in x]]\n assert_frame_equal(result, exp)\n\n def test_filter_corner(self):\n empty = DataFrame()\n\n result = empty.filter([])\n assert_frame_equal(result, empty)\n\n result = empty.filter(like='foo')\n assert_frame_equal(result, empty)\n\n def test_select(self):\n f = lambda x: x.weekday() == 2\n result = self.tsframe.select(f, axis=0)\n expected = self.tsframe.reindex(\n index=self.tsframe.index[[f(x) for x in self.tsframe.index]])\n assert_frame_equal(result, expected)\n\n result = self.frame.select(lambda x: x in ('B', 'D'), axis=1)\n expected = self.frame.reindex(columns=['B', 'D'])\n\n assert_frame_equal(result, expected, check_names=False) # TODO should reindex check_names?\n\n def test_reorder_levels(self):\n index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],\n labels=[[0, 0, 0, 0, 0, 0],\n [0, 1, 2, 0, 1, 2],\n [0, 1, 0, 1, 0, 1]],\n names=['L0', 'L1', 'L2'])\n df = DataFrame({'A': np.arange(6), 'B': np.arange(6)}, index=index)\n\n # no change, position\n result = df.reorder_levels([0, 1, 2])\n assert_frame_equal(df, result)\n\n # no change, labels\n result = df.reorder_levels(['L0', 'L1', 'L2'])\n assert_frame_equal(df, result)\n\n # rotate, position\n result = df.reorder_levels([1, 2, 0])\n e_idx = MultiIndex(levels=[['one', 'two', 'three'], [0, 1], ['bar']],\n labels=[[0, 1, 2, 0, 1, 2],\n [0, 1, 0, 1, 0, 1],\n [0, 0, 0, 0, 0, 0]],\n names=['L1', 'L2', 'L0'])\n expected = DataFrame({'A': np.arange(6), 'B': np.arange(6)},\n index=e_idx)\n assert_frame_equal(result, expected)\n\n result = df.reorder_levels([0, 0, 0])\n e_idx = MultiIndex(levels=[['bar'], ['bar'], ['bar']],\n labels=[[0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0]],\n names=['L0', 'L0', 'L0'])\n expected = DataFrame({'A': np.arange(6), 'B': np.arange(6)},\n index=e_idx)\n assert_frame_equal(result, expected)\n\n result = df.reorder_levels(['L0', 'L0', 'L0'])\n assert_frame_equal(result, expected)\n\n def test_sort_values(self):\n\n # API for 9816\n\n # sort_index\n frame = DataFrame(np.arange(16).reshape(4, 4), index=[1, 2, 3, 4],\n columns=['A', 'B', 'C', 'D'])\n\n # 9816 deprecated\n with tm.assert_produces_warning(FutureWarning):\n frame.sort(columns='A')\n with tm.assert_produces_warning(FutureWarning):\n frame.sort()\n\n unordered = frame.ix[[3, 2, 4, 1]]\n expected = unordered.sort_index()\n\n result = unordered.sort_index(axis=0)\n assert_frame_equal(result, expected)\n\n unordered = frame.ix[:, [2, 1, 3, 0]]\n expected = unordered.sort_index(axis=1)\n\n result = unordered.sort_index(axis=1)\n assert_frame_equal(result, expected)\n assert_frame_equal(result, expected)\n\n # sortlevel\n mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))\n df = DataFrame([[1, 2], [3, 4]], mi)\n\n result = df.sort_index(level='A', sort_remaining=False)\n expected = df.sortlevel('A', sort_remaining=False)\n assert_frame_equal(result, expected)\n\n df = df.T\n result = df.sort_index(level='A', axis=1, sort_remaining=False)\n expected = df.sortlevel('A', axis=1, sort_remaining=False)\n assert_frame_equal(result, expected)\n\n # MI sort, but no by\n mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))\n df = DataFrame([[1, 2], [3, 4]], mi)\n result = df.sort_index(sort_remaining=False)\n expected = df.sort_index()\n assert_frame_equal(result, expected)\n\n def test_sort_index(self):\n frame = DataFrame(np.arange(16).reshape(4, 4), index=[1, 2, 3, 4],\n columns=['A', 'B', 'C', 'D'])\n\n # axis=0\n unordered = frame.ix[[3, 2, 4, 1]]\n sorted_df = unordered.sort_index(axis=0)\n expected = frame\n assert_frame_equal(sorted_df, expected)\n\n sorted_df = unordered.sort_index(ascending=False)\n expected = frame[::-1]\n assert_frame_equal(sorted_df, expected)\n\n # axis=1\n unordered = frame.ix[:, ['D', 'B', 'C', 'A']]\n sorted_df = unordered.sort_index(axis=1)\n expected = frame\n assert_frame_equal(sorted_df, expected)\n\n sorted_df = unordered.sort_index(axis=1, ascending=False)\n expected = frame.ix[:, ::-1]\n assert_frame_equal(sorted_df, expected)\n\n # by column\n sorted_df = frame.sort_values(by='A')\n indexer = frame['A'].argsort().values\n expected = frame.ix[frame.index[indexer]]\n assert_frame_equal(sorted_df, expected)\n\n sorted_df = frame.sort_values(by='A', ascending=False)\n indexer = indexer[::-1]\n expected = frame.ix[frame.index[indexer]]\n assert_frame_equal(sorted_df, expected)\n\n sorted_df = frame.sort_values(by='A', ascending=False)\n assert_frame_equal(sorted_df, expected)\n\n # GH4839\n sorted_df = frame.sort_values(by=['A'], ascending=[False])\n assert_frame_equal(sorted_df, expected)\n\n # check for now\n sorted_df = frame.sort_values(by='A')\n assert_frame_equal(sorted_df, expected[::-1])\n expected = frame.sort_values(by='A')\n assert_frame_equal(sorted_df, expected)\n\n expected = frame.sort_values(by=['A', 'B'], ascending=False)\n sorted_df = frame.sort_values(by=['A', 'B'])\n assert_frame_equal(sorted_df, expected[::-1])\n\n self.assertRaises(ValueError, lambda : frame.sort_values(by=['A','B'], axis=2, inplace=True))\n\n msg = 'When sorting by column, axis must be 0'\n with assertRaisesRegexp(ValueError, msg):\n frame.sort_values(by='A', axis=1)\n\n msg = r'Length of ascending \\(5\\) != length of by \\(2\\)'\n with assertRaisesRegexp(ValueError, msg):\n frame.sort_values(by=['A', 'B'], axis=0, ascending=[True] * 5)\n\n def test_sort_index_categorical_index(self):\n\n df = DataFrame({'A' : np.arange(6,dtype='int64'),\n 'B' : Series(list('aabbca')).astype('category',categories=list('cab')) }).set_index('B')\n\n result = df.sort_index()\n expected = df.iloc[[4,0,1,5,2,3]]\n assert_frame_equal(result, expected)\n\n result = df.sort_index(ascending=False)\n expected = df.iloc[[3,2,5,1,0,4]]\n assert_frame_equal(result, expected)\n\n def test_sort_nan(self):\n # GH3917\n nan = np.nan\n df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],\n 'B': [9, nan, 5, 2, 5, 4, 5]})\n\n # sort one column only\n expected = DataFrame(\n {'A': [nan, 1, 1, 2, 4, 6, 8],\n 'B': [5, 9, 2, nan, 5, 5, 4]},\n index=[2, 0, 3, 1, 6, 4, 5])\n sorted_df = df.sort_values(['A'], na_position='first')\n assert_frame_equal(sorted_df, expected)\n\n expected = DataFrame(\n {'A': [nan, 8, 6, 4, 2, 1, 1],\n 'B': [5, 4, 5, 5, nan, 9, 2]},\n index=[2, 5, 4, 6, 1, 0, 3])\n sorted_df = df.sort_values(['A'], na_position='first', ascending=False)\n assert_frame_equal(sorted_df, expected)\n\n # na_position='last', order\n expected = DataFrame(\n {'A': [1, 1, 2, 4, 6, 8, nan],\n 'B': [2, 9, nan, 5, 5, 4, 5]},\n index=[3, 0, 1, 6, 4, 5, 2])\n sorted_df = df.sort_values(['A','B'])\n assert_frame_equal(sorted_df, expected)\n\n # na_position='first', order\n expected = DataFrame(\n {'A': [nan, 1, 1, 2, 4, 6, 8],\n 'B': [5, 2, 9, nan, 5, 5, 4]},\n index=[2, 3, 0, 1, 6, 4, 5])\n sorted_df = df.sort_values(['A','B'], na_position='first')\n assert_frame_equal(sorted_df, expected)\n\n # na_position='first', not order\n expected = DataFrame(\n {'A': [nan, 1, 1, 2, 4, 6, 8],\n 'B': [5, 9, 2, nan, 5, 5, 4]},\n index=[2, 0, 3, 1, 6, 4, 5])\n sorted_df = df.sort_values(['A','B'], ascending=[1,0], na_position='first')\n assert_frame_equal(sorted_df, expected)\n\n # na_position='last', not order\n expected = DataFrame(\n {'A': [8, 6, 4, 2, 1, 1, nan],\n 'B': [4, 5, 5, nan, 2, 9, 5]},\n index=[5, 4, 6, 1, 3, 0, 2])\n sorted_df = df.sort_values(['A','B'], ascending=[0,1], na_position='last')\n assert_frame_equal(sorted_df, expected)\n\n # Test DataFrame with nan label\n df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],\n 'B': [9, nan, 5, 2, 5, 4, 5]},\n index = [1, 2, 3, 4, 5, 6, nan])\n\n # NaN label, ascending=True, na_position='last'\n sorted_df = df.sort_index(kind='quicksort', ascending=True, na_position='last')\n expected = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],\n 'B': [9, nan, 5, 2, 5, 4, 5]},\n index = [1, 2, 3, 4, 5, 6, nan])\n assert_frame_equal(sorted_df, expected)\n\n # NaN label, ascending=True, na_position='first'\n sorted_df = df.sort_index(na_position='first')\n expected = DataFrame({'A': [4, 1, 2, nan, 1, 6, 8],\n 'B': [5, 9, nan, 5, 2, 5, 4]},\n index = [nan, 1, 2, 3, 4, 5, 6])\n assert_frame_equal(sorted_df, expected)\n\n # NaN label, ascending=False, na_position='last'\n sorted_df = df.sort_index(kind='quicksort', ascending=False)\n expected = DataFrame({'A': [8, 6, 1, nan, 2, 1, 4],\n 'B': [4, 5, 2, 5, nan, 9, 5]},\n index = [6, 5, 4, 3, 2, 1, nan])\n assert_frame_equal(sorted_df, expected)\n\n # NaN label, ascending=False, na_position='first'\n sorted_df = df.sort_index(kind='quicksort', ascending=False, na_position='first')\n expected = DataFrame({'A': [4, 8, 6, 1, nan, 2, 1],\n 'B': [5, 4, 5, 2, 5, nan, 9]},\n index = [nan, 6, 5, 4, 3, 2, 1])\n assert_frame_equal(sorted_df, expected)\n\n def test_stable_descending_sort(self):\n # GH #6399\n df = DataFrame([[2, 'first'], [2, 'second'], [1, 'a'], [1, 'b']],\n columns=['sort_col', 'order'])\n sorted_df = df.sort_values(by='sort_col', kind='mergesort',\n ascending=False)\n assert_frame_equal(df, sorted_df)\n\n def test_stable_descending_multicolumn_sort(self):\n nan = np.nan\n df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],\n 'B': [9, nan, 5, 2, 5, 4, 5]})\n # test stable mergesort\n expected = DataFrame(\n {'A': [nan, 8, 6, 4, 2, 1, 1],\n 'B': [5, 4, 5, 5, nan, 2, 9]},\n index=[2, 5, 4, 6, 1, 3, 0])\n sorted_df = df.sort_values(['A','B'], ascending=[0,1], na_position='first',\n kind='mergesort')\n assert_frame_equal(sorted_df, expected)\n\n expected = DataFrame(\n {'A': [nan, 8, 6, 4, 2, 1, 1],\n 'B': [5, 4, 5, 5, nan, 9, 2]},\n index=[2, 5, 4, 6, 1, 0, 3])\n sorted_df = df.sort_values(['A','B'], ascending=[0,0], na_position='first',\n kind='mergesort')\n assert_frame_equal(sorted_df, expected)\n\n def test_sort_index_multicolumn(self):\n import random\n A = np.arange(5).repeat(20)\n B = np.tile(np.arange(5), 20)\n random.shuffle(A)\n random.shuffle(B)\n frame = DataFrame({'A': A, 'B': B,\n 'C': np.random.randn(100)})\n\n # use .sort_values #9816\n with tm.assert_produces_warning(FutureWarning):\n frame.sort_index(by=['A', 'B'])\n result = frame.sort_values(by=['A', 'B'])\n indexer = np.lexsort((frame['B'], frame['A']))\n expected = frame.take(indexer)\n assert_frame_equal(result, expected)\n\n # use .sort_values #9816\n with tm.assert_produces_warning(FutureWarning):\n frame.sort_index(by=['A', 'B'], ascending=False)\n result = frame.sort_values(by=['A', 'B'], ascending=False)\n indexer = np.lexsort((frame['B'].rank(ascending=False),\n frame['A'].rank(ascending=False)))\n expected = frame.take(indexer)\n assert_frame_equal(result, expected)\n\n # use .sort_values #9816\n with tm.assert_produces_warning(FutureWarning):\n frame.sort_index(by=['B', 'A'])\n result = frame.sort_values(by=['B', 'A'])\n indexer = np.lexsort((frame['A'], frame['B']))\n expected = frame.take(indexer)\n assert_frame_equal(result, expected)\n\n def test_sort_index_inplace(self):\n frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],\n columns=['A', 'B', 'C', 'D'])\n\n # axis=0\n unordered = frame.ix[[3, 2, 4, 1]]\n a_id = id(unordered['A'])\n df = unordered.copy()\n df.sort_index(inplace=True)\n expected = frame\n assert_frame_equal(df, expected)\n self.assertNotEqual(a_id, id(df['A']))\n\n df = unordered.copy()\n df.sort_index(ascending=False, inplace=True)\n expected = frame[::-1]\n assert_frame_equal(df, expected)\n\n # axis=1\n unordered = frame.ix[:, ['D', 'B', 'C', 'A']]\n df = unordered.copy()\n df.sort_index(axis=1, inplace=True)\n expected = frame\n assert_frame_equal(df, expected)\n\n df = unordered.copy()\n df.sort_index(axis=1, ascending=False, inplace=True)\n expected = frame.ix[:, ::-1]\n assert_frame_equal(df, expected)\n\n def test_sort_index_different_sortorder(self):\n A = np.arange(20).repeat(5)\n B = np.tile(np.arange(5), 20)\n\n indexer = np.random.permutation(100)\n A = A.take(indexer)\n B = B.take(indexer)\n\n df = DataFrame({'A': A, 'B': B,\n 'C': np.random.randn(100)})\n\n # use .sort_values #9816\n with tm.assert_produces_warning(FutureWarning):\n df.sort_index(by=['A', 'B'], ascending=[1, 0])\n result = df.sort_values(by=['A', 'B'], ascending=[1, 0])\n\n ex_indexer = np.lexsort((df.B.max() - df.B, df.A))\n expected = df.take(ex_indexer)\n assert_frame_equal(result, expected)\n\n # test with multiindex, too\n idf = df.set_index(['A', 'B'])\n\n result = idf.sort_index(ascending=[1, 0])\n expected = idf.take(ex_indexer)\n assert_frame_equal(result, expected)\n\n # also, Series!\n result = idf['C'].sort_index(ascending=[1, 0])\n assert_series_equal(result, expected['C'])\n\n def test_sort_inplace(self):\n frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],\n columns=['A', 'B', 'C', 'D'])\n\n sorted_df = frame.copy()\n sorted_df.sort_values(by='A', inplace=True)\n expected = frame.sort_values(by='A')\n assert_frame_equal(sorted_df, expected)\n\n sorted_df = frame.copy()\n sorted_df.sort_values(by='A', ascending=False, inplace=True)\n expected = frame.sort_values(by='A', ascending=False)\n assert_frame_equal(sorted_df, expected)\n\n sorted_df = frame.copy()\n sorted_df.sort_values(by=['A', 'B'], ascending=False, inplace=True)\n expected = frame.sort_values(by=['A', 'B'], ascending=False)\n assert_frame_equal(sorted_df, expected)\n\n def test_sort_index_duplicates(self):\n\n ### with 9816, these are all translated to .sort_values\n\n df = DataFrame([lrange(5,9), lrange(4)],\n columns=['a', 'a', 'b', 'b'])\n\n with assertRaisesRegexp(ValueError, 'duplicate'):\n # use .sort_values #9816\n with tm.assert_produces_warning(FutureWarning):\n df.sort_index(by='a')\n with assertRaisesRegexp(ValueError, 'duplicate'):\n df.sort_values(by='a')\n\n with assertRaisesRegexp(ValueError, 'duplicate'):\n # use .sort_values #9816\n with tm.assert_produces_warning(FutureWarning):\n df.sort_index(by=['a'])\n with assertRaisesRegexp(ValueError, 'duplicate'):\n df.sort_values(by=['a'])\n\n with assertRaisesRegexp(ValueError, 'duplicate'):\n # use .sort_values #9816\n with tm.assert_produces_warning(FutureWarning):\n # multi-column 'by' is separate codepath\n df.sort_index(by=['a', 'b'])\n with assertRaisesRegexp(ValueError, 'duplicate'):\n # multi-column 'by' is separate codepath\n df.sort_values(by=['a', 'b'])\n\n # with multi-index\n # GH4370\n df = DataFrame(np.random.randn(4,2),columns=MultiIndex.from_tuples([('a',0),('a',1)]))\n with assertRaisesRegexp(ValueError, 'levels'):\n # use .sort_values #9816\n with tm.assert_produces_warning(FutureWarning):\n df.sort_index(by='a')\n with assertRaisesRegexp(ValueError, 'levels'):\n df.sort_values(by='a')\n\n # convert tuples to a list of tuples\n # use .sort_values #9816\n with tm.assert_produces_warning(FutureWarning):\n df.sort_index(by=[('a',1)])\n expected = df.sort_values(by=[('a',1)])\n\n # use .sort_values #9816\n with tm.assert_produces_warning(FutureWarning):\n df.sort_index(by=('a',1))\n result = df.sort_values(by=('a',1))\n assert_frame_equal(result, expected)\n\n def test_sortlevel(self):\n mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))\n df = DataFrame([[1, 2], [3, 4]], mi)\n res = df.sortlevel('A', sort_remaining=False)\n assert_frame_equal(df, res)\n\n res = df.sortlevel(['A', 'B'], sort_remaining=False)\n assert_frame_equal(df, res)\n\n def test_sort_datetimes(self):\n\n # GH 3461, argsort / lexsort differences for a datetime column\n df = DataFrame(['a','a','a','b','c','d','e','f','g'],\n columns=['A'],\n index=date_range('20130101',periods=9))\n dts = [Timestamp(x)\n for x in ['2004-02-11','2004-01-21','2004-01-26',\n '2005-09-20','2010-10-04','2009-05-12',\n '2008-11-12','2010-09-28','2010-09-28']]\n df['B'] = dts[::2] + dts[1::2]\n df['C'] = 2.\n df['A1'] = 3.\n\n df1 = df.sort_values(by='A')\n df2 = df.sort_values(by=['A'])\n assert_frame_equal(df1,df2)\n\n df1 = df.sort_values(by='B')\n df2 = df.sort_values(by=['B'])\n assert_frame_equal(df1,df2)\n\n def test_frame_column_inplace_sort_exception(self):\n s = self.frame['A']\n with assertRaisesRegexp(ValueError, \"This Series is a view\"):\n s.sort_values(inplace=True)\n\n cp = s.copy()\n cp.sort_values() # it works!\n\n def test_combine_first(self):\n # disjoint\n head, tail = self.frame[:5], self.frame[5:]\n\n combined = head.combine_first(tail)\n reordered_frame = self.frame.reindex(combined.index)\n assert_frame_equal(combined, reordered_frame)\n self.assertTrue(tm.equalContents(combined.columns, self.frame.columns))\n assert_series_equal(combined['A'], reordered_frame['A'])\n\n # same index\n fcopy = self.frame.copy()\n fcopy['A'] = 1\n del fcopy['C']\n\n fcopy2 = self.frame.copy()\n fcopy2['B'] = 0\n del fcopy2['D']\n\n combined = fcopy.combine_first(fcopy2)\n\n self.assertTrue((combined['A'] == 1).all())\n assert_series_equal(combined['B'], fcopy['B'])\n assert_series_equal(combined['C'], fcopy2['C'])\n assert_series_equal(combined['D'], fcopy['D'])\n\n # overlap\n head, tail = reordered_frame[:10].copy(), reordered_frame\n head['A'] = 1\n\n combined = head.combine_first(tail)\n self.assertTrue((combined['A'][:10] == 1).all())\n\n # reverse overlap\n tail['A'][:10] = 0\n combined = tail.combine_first(head)\n self.assertTrue((combined['A'][:10] == 0).all())\n\n # no overlap\n f = self.frame[:10]\n g = self.frame[10:]\n combined = f.combine_first(g)\n assert_series_equal(combined['A'].reindex(f.index), f['A'])\n assert_series_equal(combined['A'].reindex(g.index), g['A'])\n\n # corner cases\n comb = self.frame.combine_first(self.empty)\n assert_frame_equal(comb, self.frame)\n\n comb = self.empty.combine_first(self.frame)\n assert_frame_equal(comb, self.frame)\n\n comb = self.frame.combine_first(DataFrame(index=[\"faz\", \"boo\"]))\n self.assertTrue(\"faz\" in comb.index)\n\n # #2525\n df = DataFrame({'a': [1]}, index=[datetime(2012, 1, 1)])\n df2 = DataFrame({}, columns=['b'])\n result = df.combine_first(df2)\n self.assertTrue('b' in result)\n\n def test_combine_first_mixed_bug(self):\n idx = Index(['a', 'b', 'c', 'e'])\n ser1 = Series([5.0, -9.0, 4.0, 100.], index=idx)\n ser2 = Series(['a', 'b', 'c', 'e'], index=idx)\n ser3 = Series([12, 4, 5, 97], index=idx)\n\n frame1 = DataFrame({\"col0\": ser1,\n \"col2\": ser2,\n \"col3\": ser3})\n\n idx = Index(['a', 'b', 'c', 'f'])\n ser1 = Series([5.0, -9.0, 4.0, 100.], index=idx)\n ser2 = Series(['a', 'b', 'c', 'f'], index=idx)\n ser3 = Series([12, 4, 5, 97], index=idx)\n\n frame2 = DataFrame({\"col1\": ser1,\n \"col2\": ser2,\n \"col5\": ser3})\n\n combined = frame1.combine_first(frame2)\n self.assertEqual(len(combined.columns), 5)\n\n # gh 3016 (same as in update)\n df = DataFrame([[1.,2.,False, True],[4.,5.,True,False]],\n columns=['A','B','bool1','bool2'])\n\n other = DataFrame([[45,45]],index=[0],columns=['A','B'])\n result = df.combine_first(other)\n assert_frame_equal(result, df)\n\n df.ix[0,'A'] = np.nan\n result = df.combine_first(other)\n df.ix[0,'A'] = 45\n assert_frame_equal(result, df)\n\n # doc example\n df1 = DataFrame({'A' : [1., np.nan, 3., 5., np.nan],\n 'B' : [np.nan, 2., 3., np.nan, 6.]})\n\n df2 = DataFrame({'A' : [5., 2., 4., np.nan, 3., 7.],\n 'B' : [np.nan, np.nan, 3., 4., 6., 8.]})\n\n result = df1.combine_first(df2)\n expected = DataFrame({ 'A' : [1,2,3,5,3,7.], 'B' : [np.nan,2,3,4,6,8] })\n assert_frame_equal(result,expected)\n\n # GH3552, return object dtype with bools\n df1 = DataFrame([[np.nan, 3.,True], [-4.6, np.nan, True], [np.nan, 7., False]])\n df2 = DataFrame([[-42.6, np.nan, True], [-5., 1.6, False]], index=[1, 2])\n\n result = df1.combine_first(df2)[2]\n expected = Series([True, True, False], name=2)\n assert_series_equal(result, expected)\n\n # GH 3593, converting datetime64[ns] incorrecly\n df0 = DataFrame({\"a\":[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]})\n df1 = DataFrame({\"a\":[None, None, None]})\n df2 = df1.combine_first(df0)\n assert_frame_equal(df2, df0)\n\n df2 = df0.combine_first(df1)\n assert_frame_equal(df2, df0)\n\n df0 = DataFrame({\"a\":[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]})\n df1 = DataFrame({\"a\":[datetime(2000, 1, 2), None, None]})\n df2 = df1.combine_first(df0)\n result = df0.copy()\n result.iloc[0,:] = df1.iloc[0,:]\n assert_frame_equal(df2, result)\n\n df2 = df0.combine_first(df1)\n assert_frame_equal(df2, df0)\n\n def test_update(self):\n df = DataFrame([[1.5, nan, 3.],\n [1.5, nan, 3.],\n [1.5, nan, 3],\n [1.5, nan, 3]])\n\n other = DataFrame([[3.6, 2., np.nan],\n [np.nan, np.nan, 7]], index=[1, 3])\n\n df.update(other)\n\n expected = DataFrame([[1.5, nan, 3],\n [3.6, 2, 3],\n [1.5, nan, 3],\n [1.5, nan, 7.]])\n assert_frame_equal(df, expected)\n\n def test_update_dtypes(self):\n\n # gh 3016\n df = DataFrame([[1.,2.,False, True],[4.,5.,True,False]],\n columns=['A','B','bool1','bool2'])\n\n other = DataFrame([[45,45]],index=[0],columns=['A','B'])\n df.update(other)\n\n expected = DataFrame([[45.,45.,False, True],[4.,5.,True,False]],\n columns=['A','B','bool1','bool2'])\n assert_frame_equal(df, expected)\n\n def test_update_nooverwrite(self):\n df = DataFrame([[1.5, nan, 3.],\n [1.5, nan, 3.],\n [1.5, nan, 3],\n [1.5, nan, 3]])\n\n other = DataFrame([[3.6, 2., np.nan],\n [np.nan, np.nan, 7]], index=[1, 3])\n\n df.update(other, overwrite=False)\n\n expected = DataFrame([[1.5, nan, 3],\n [1.5, 2, 3],\n [1.5, nan, 3],\n [1.5, nan, 3.]])\n assert_frame_equal(df, expected)\n\n def test_update_filtered(self):\n df = DataFrame([[1.5, nan, 3.],\n [1.5, nan, 3.],\n [1.5, nan, 3],\n [1.5, nan, 3]])\n\n other = DataFrame([[3.6, 2., np.nan],\n [np.nan, np.nan, 7]], index=[1, 3])\n\n df.update(other, filter_func=lambda x: x > 2)\n\n expected = DataFrame([[1.5, nan, 3],\n [1.5, nan, 3],\n [1.5, nan, 3],\n [1.5, nan, 7.]])\n assert_frame_equal(df, expected)\n\n def test_update_raise(self):\n df = DataFrame([[1.5, 1, 3.],\n [1.5, nan, 3.],\n [1.5, nan, 3],\n [1.5, nan, 3]])\n\n other = DataFrame([[2., nan],\n [nan, 7]], index=[1, 3], columns=[1, 2])\n with assertRaisesRegexp(ValueError, \"Data overlaps\"):\n df.update(other, raise_conflict=True)\n\n def test_update_from_non_df(self):\n d = {'a': Series([1, 2, 3, 4]), 'b': Series([5, 6, 7, 8])}\n df = DataFrame(d)\n\n d['a'] = Series([5, 6, 7, 8])\n df.update(d)\n\n expected = DataFrame(d)\n\n assert_frame_equal(df, expected)\n\n d = {'a': [1, 2, 3, 4], 'b': [5, 6, 7, 8]}\n df = DataFrame(d)\n\n d['a'] = [5, 6, 7, 8]\n df.update(d)\n\n expected = DataFrame(d)\n\n assert_frame_equal(df, expected)\n\n def test_combineAdd(self):\n\n with tm.assert_produces_warning(FutureWarning):\n # trivial\n comb = self.frame.combineAdd(self.frame)\n assert_frame_equal(comb, self.frame * 2)\n\n # more rigorous\n a = DataFrame([[1., nan, nan, 2., nan]],\n columns=np.arange(5))\n b = DataFrame([[2., 3., nan, 2., 6., nan]],\n columns=np.arange(6))\n expected = DataFrame([[3., 3., nan, 4., 6., nan]],\n columns=np.arange(6))\n\n result = a.combineAdd(b)\n assert_frame_equal(result, expected)\n result2 = a.T.combineAdd(b.T)\n assert_frame_equal(result2, expected.T)\n\n expected2 = a.combine(b, operator.add, fill_value=0.)\n assert_frame_equal(expected, expected2)\n\n # corner cases\n comb = self.frame.combineAdd(self.empty)\n assert_frame_equal(comb, self.frame)\n\n comb = self.empty.combineAdd(self.frame)\n assert_frame_equal(comb, self.frame)\n\n # integer corner case\n df1 = DataFrame({'x': [5]})\n df2 = DataFrame({'x': [1]})\n df3 = DataFrame({'x': [6]})\n comb = df1.combineAdd(df2)\n assert_frame_equal(comb, df3)\n\n # mixed type GH2191\n df1 = DataFrame({'A': [1, 2], 'B': [3, 4]})\n df2 = DataFrame({'A': [1, 2], 'C': [5, 6]})\n rs = df1.combineAdd(df2)\n xp = DataFrame({'A': [2, 4], 'B': [3, 4.], 'C': [5, 6.]})\n assert_frame_equal(xp, rs)\n\n # TODO: test integer fill corner?\n\n def test_combineMult(self):\n\n with tm.assert_produces_warning(FutureWarning):\n # trivial\n comb = self.frame.combineMult(self.frame)\n\n assert_frame_equal(comb, self.frame ** 2)\n\n # corner cases\n comb = self.frame.combineMult(self.empty)\n assert_frame_equal(comb, self.frame)\n\n comb = self.empty.combineMult(self.frame)\n assert_frame_equal(comb, self.frame)\n\n def test_combine_generic(self):\n df1 = self.frame\n df2 = self.frame.ix[:-5, ['A', 'B', 'C']]\n\n combined = df1.combine(df2, np.add)\n combined2 = df2.combine(df1, np.add)\n self.assertTrue(combined['D'].isnull().all())\n self.assertTrue(combined2['D'].isnull().all())\n\n chunk = combined.ix[:-5, ['A', 'B', 'C']]\n chunk2 = combined2.ix[:-5, ['A', 'B', 'C']]\n\n exp = self.frame.ix[:-5, ['A', 'B', 'C']].reindex_like(chunk) * 2\n assert_frame_equal(chunk, exp)\n assert_frame_equal(chunk2, exp)\n\n def test_clip(self):\n median = self.frame.median().median()\n\n capped = self.frame.clip_upper(median)\n self.assertFalse((capped.values > median).any())\n\n floored = self.frame.clip_lower(median)\n self.assertFalse((floored.values < median).any())\n\n double = self.frame.clip(upper=median, lower=median)\n self.assertFalse((double.values != median).any())\n\n def test_dataframe_clip(self):\n\n # GH #2747\n df = DataFrame(np.random.randn(1000,2))\n\n for lb, ub in [(-1,1),(1,-1)]:\n clipped_df = df.clip(lb, ub)\n\n lb, ub = min(lb,ub), max(ub,lb)\n lb_mask = df.values <= lb\n ub_mask = df.values >= ub\n mask = ~lb_mask & ~ub_mask\n self.assertTrue((clipped_df.values[lb_mask] == lb).all() == True)\n self.assertTrue((clipped_df.values[ub_mask] == ub).all() == True)\n self.assertTrue((clipped_df.values[mask] == df.values[mask]).all() == True)\n\n def test_clip_against_series(self):\n # GH #6966\n\n df = DataFrame(np.random.randn(1000, 2))\n lb = Series(np.random.randn(1000))\n ub = lb + 1\n\n clipped_df = df.clip(lb, ub, axis=0)\n\n for i in range(2):\n lb_mask = df.iloc[:, i] <= lb\n ub_mask = df.iloc[:, i] >= ub\n mask = ~lb_mask & ~ub_mask\n\n result = clipped_df.loc[lb_mask, i]\n assert_series_equal(result, lb[lb_mask], check_names=False)\n self.assertEqual(result.name, i)\n\n result = clipped_df.loc[ub_mask, i]\n assert_series_equal(result, ub[ub_mask], check_names=False)\n self.assertEqual(result.name, i)\n\n assert_series_equal(clipped_df.loc[mask, i], df.loc[mask, i])\n\n def test_clip_against_frame(self):\n df = DataFrame(np.random.randn(1000, 2))\n lb = DataFrame(np.random.randn(1000, 2))\n ub = lb + 1\n\n clipped_df = df.clip(lb, ub)\n\n lb_mask = df <= lb\n ub_mask = df >= ub\n mask = ~lb_mask & ~ub_mask\n\n assert_frame_equal(clipped_df[lb_mask], lb[lb_mask])\n assert_frame_equal(clipped_df[ub_mask], ub[ub_mask])\n assert_frame_equal(clipped_df[mask], df[mask])\n\n def test_get_X_columns(self):\n # numeric and object columns\n\n df = DataFrame({'a': [1, 2, 3],\n 'b' : [True, False, True],\n 'c': ['foo', 'bar', 'baz'],\n 'd': [None, None, None],\n 'e': [3.14, 0.577, 2.773]})\n\n self.assert_numpy_array_equal(df._get_numeric_data().columns,\n ['a', 'b', 'e'])\n\n def test_is_mixed_type(self):\n self.assertFalse(self.frame._is_mixed_type)\n self.assertTrue(self.mixed_frame._is_mixed_type)\n\n def test_get_numeric_data(self):\n intname = np.dtype(np.int_).name\n floatname = np.dtype(np.float_).name\n datetime64name = np.dtype('M8[ns]').name\n objectname = np.dtype(np.object_).name\n\n df = DataFrame({'a': 1., 'b': 2, 'c': 'foo', 'f' : Timestamp('20010102')},\n index=np.arange(10))\n result = df.get_dtype_counts()\n expected = Series({'int64': 1, 'float64' : 1, datetime64name: 1, objectname : 1})\n result.sort_index()\n expected.sort_index()\n assert_series_equal(result, expected)\n\n df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',\n 'd' : np.array([1.]*10,dtype='float32'),\n 'e' : np.array([1]*10,dtype='int32'),\n 'f' : np.array([1]*10,dtype='int16'),\n 'g' : Timestamp('20010102')},\n index=np.arange(10))\n\n result = df._get_numeric_data()\n expected = df.ix[:, ['a', 'b','d','e','f']]\n assert_frame_equal(result, expected)\n\n only_obj = df.ix[:, ['c','g']]\n result = only_obj._get_numeric_data()\n expected = df.ix[:, []]\n assert_frame_equal(result, expected)\n\n df = DataFrame.from_dict({'a':[1,2], 'b':['foo','bar'],'c':[np.pi,np.e]})\n result = df._get_numeric_data()\n expected = DataFrame.from_dict({'a':[1,2], 'c':[np.pi,np.e]})\n assert_frame_equal(result, expected)\n\n df = result.copy()\n result = df._get_numeric_data()\n expected = df\n assert_frame_equal(result, expected)\n\n def test_bool_describe_in_mixed_frame(self):\n df = DataFrame({\n 'string_data': ['a', 'b', 'c', 'd', 'e'],\n 'bool_data': [True, True, False, False, False],\n 'int_data': [10, 20, 30, 40, 50],\n })\n\n # Boolean data and integer data is included in .describe() output, string data isn't\n self.assert_numpy_array_equal(df.describe().columns, ['bool_data', 'int_data'])\n\n bool_describe = df.describe()['bool_data']\n\n # Both the min and the max values should stay booleans\n self.assertEqual(bool_describe['min'].dtype, np.bool_)\n self.assertEqual(bool_describe['max'].dtype, np.bool_)\n\n self.assertFalse(bool_describe['min'])\n self.assertTrue(bool_describe['max'])\n\n # For numeric operations, like mean or median, the values True/False are cast to\n # the integer values 1 and 0\n assert_almost_equal(bool_describe['mean'], 0.4)\n assert_almost_equal(bool_describe['50%'], 0)\n\n def test_reduce_mixed_frame(self):\n # GH 6806\n df = DataFrame({\n 'bool_data': [True, True, False, False, False],\n 'int_data': [10, 20, 30, 40, 50],\n 'string_data': ['a', 'b', 'c', 'd', 'e'],\n })\n df.reindex(columns=['bool_data', 'int_data', 'string_data'])\n test = df.sum(axis=0)\n assert_almost_equal(test.values, [2, 150, 'abcde'])\n assert_series_equal(test, df.T.sum(axis=1))\n\n def test_count(self):\n f = lambda s: notnull(s).sum()\n self._check_stat_op('count', f,\n has_skipna=False,\n has_numeric_only=True,\n check_dtype=False,\n check_dates=True)\n\n # corner case\n frame = DataFrame()\n ct1 = frame.count(1)\n tm.assertIsInstance(ct1, Series)\n\n ct2 = frame.count(0)\n tm.assertIsInstance(ct2, Series)\n\n # GH #423\n df = DataFrame(index=lrange(10))\n result = df.count(1)\n expected = Series(0, index=df.index)\n assert_series_equal(result, expected)\n\n df = DataFrame(columns=lrange(10))\n result = df.count(0)\n expected = Series(0, index=df.columns)\n assert_series_equal(result, expected)\n\n df = DataFrame()\n result = df.count()\n expected = Series(0, index=[])\n assert_series_equal(result, expected)\n\n def test_sum(self):\n self._check_stat_op('sum', np.sum, has_numeric_only=True)\n\n # mixed types (with upcasting happening)\n self._check_stat_op('sum', np.sum, frame=self.mixed_float.astype('float32'),\n has_numeric_only=True, check_dtype=False, check_less_precise=True)\n\n def test_stat_operators_attempt_obj_array(self):\n data = {\n 'a': [-0.00049987540199591344, -0.0016467257772919831,\n 0.00067695870775883013],\n 'b': [-0, -0, 0.0],\n 'c': [0.00031111847529610595, 0.0014902627951905339,\n -0.00094099200035979691]\n }\n df1 = DataFrame(data, index=['foo', 'bar', 'baz'],\n dtype='O')\n methods = ['sum', 'mean', 'prod', 'var', 'std', 'skew', 'min', 'max']\n\n # GH #676\n df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],\n 2: [np.nan, 4]}, dtype=object)\n\n for df in [df1, df2]:\n for meth in methods:\n self.assertEqual(df.values.dtype, np.object_)\n result = getattr(df, meth)(1)\n expected = getattr(df.astype('f8'), meth)(1)\n\n if not tm._incompat_bottleneck_version(meth):\n assert_series_equal(result, expected)\n\n def test_mean(self):\n self._check_stat_op('mean', np.mean, check_dates=True)\n\n def test_product(self):\n self._check_stat_op('product', np.prod)\n\n def test_median(self):\n def wrapper(x):\n if isnull(x).any():\n return np.nan\n return np.median(x)\n\n self._check_stat_op('median', wrapper, check_dates=True)\n\n def test_min(self):\n self._check_stat_op('min', np.min, check_dates=True)\n self._check_stat_op('min', np.min, frame=self.intframe)\n\n def test_cummin(self):\n self.tsframe.ix[5:10, 0] = nan\n self.tsframe.ix[10:15, 1] = nan\n self.tsframe.ix[15:, 2] = nan\n\n # axis = 0\n cummin = self.tsframe.cummin()\n expected = self.tsframe.apply(Series.cummin)\n assert_frame_equal(cummin, expected)\n\n # axis = 1\n cummin = self.tsframe.cummin(axis=1)\n expected = self.tsframe.apply(Series.cummin, axis=1)\n assert_frame_equal(cummin, expected)\n\n # works\n df = DataFrame({'A': np.arange(20)}, index=np.arange(20))\n result = df.cummin()\n\n # fix issue\n cummin_xs = self.tsframe.cummin(axis=1)\n self.assertEqual(np.shape(cummin_xs), np.shape(self.tsframe))\n\n def test_cummax(self):\n self.tsframe.ix[5:10, 0] = nan\n self.tsframe.ix[10:15, 1] = nan\n self.tsframe.ix[15:, 2] = nan\n\n # axis = 0\n cummax = self.tsframe.cummax()\n expected = self.tsframe.apply(Series.cummax)\n assert_frame_equal(cummax, expected)\n\n # axis = 1\n cummax = self.tsframe.cummax(axis=1)\n expected = self.tsframe.apply(Series.cummax, axis=1)\n assert_frame_equal(cummax, expected)\n\n # works\n df = DataFrame({'A': np.arange(20)}, index=np.arange(20))\n result = df.cummax()\n\n # fix issue\n cummax_xs = self.tsframe.cummax(axis=1)\n self.assertEqual(np.shape(cummax_xs), np.shape(self.tsframe))\n\n def test_max(self):\n self._check_stat_op('max', np.max, check_dates=True)\n self._check_stat_op('max', np.max, frame=self.intframe)\n\n def test_mad(self):\n f = lambda x: np.abs(x - x.mean()).mean()\n self._check_stat_op('mad', f)\n\n def test_var_std(self):\n alt = lambda x: np.var(x, ddof=1)\n self._check_stat_op('var', alt)\n\n alt = lambda x: np.std(x, ddof=1)\n self._check_stat_op('std', alt)\n\n result = self.tsframe.std(ddof=4)\n expected = self.tsframe.apply(lambda x: x.std(ddof=4))\n assert_almost_equal(result, expected)\n\n result = self.tsframe.var(ddof=4)\n expected = self.tsframe.apply(lambda x: x.var(ddof=4))\n assert_almost_equal(result, expected)\n\n arr = np.repeat(np.random.random((1, 1000)), 1000, 0)\n result = nanops.nanvar(arr, axis=0)\n self.assertFalse((result < 0).any())\n if nanops._USE_BOTTLENECK:\n nanops._USE_BOTTLENECK = False\n result = nanops.nanvar(arr, axis=0)\n self.assertFalse((result < 0).any())\n nanops._USE_BOTTLENECK = True\n\n def test_numeric_only_flag(self):\n # GH #9201\n methods = ['sem', 'var', 'std']\n df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])\n # set one entry to a number in str format\n df1.ix[0, 'foo'] = '100'\n\n df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])\n # set one entry to a non-number str\n df2.ix[0, 'foo'] = 'a'\n\n for meth in methods:\n result = getattr(df1, meth)(axis=1, numeric_only=True)\n expected = getattr(df1[['bar', 'baz']], meth)(axis=1)\n assert_series_equal(expected, result)\n\n result = getattr(df2, meth)(axis=1, numeric_only=True)\n expected = getattr(df2[['bar', 'baz']], meth)(axis=1)\n assert_series_equal(expected, result)\n\n # df1 has all numbers, df2 has a letter inside\n self.assertRaises(TypeError, lambda : getattr(df1, meth)(axis=1, numeric_only=False))\n self.assertRaises(TypeError, lambda : getattr(df2, meth)(axis=1, numeric_only=False))\n\n def test_sem(self):\n alt = lambda x: np.std(x, ddof=1)/np.sqrt(len(x))\n self._check_stat_op('sem', alt)\n\n result = self.tsframe.sem(ddof=4)\n expected = self.tsframe.apply(lambda x: x.std(ddof=4)/np.sqrt(len(x)))\n assert_almost_equal(result, expected)\n\n arr = np.repeat(np.random.random((1, 1000)), 1000, 0)\n result = nanops.nansem(arr, axis=0)\n self.assertFalse((result < 0).any())\n if nanops._USE_BOTTLENECK:\n nanops._USE_BOTTLENECK = False\n result = nanops.nansem(arr, axis=0)\n self.assertFalse((result < 0).any())\n nanops._USE_BOTTLENECK = True\n\n def test_skew(self):\n tm._skip_if_no_scipy()\n from scipy.stats import skew\n\n def alt(x):\n if len(x) < 3:\n return np.nan\n return skew(x, bias=False)\n\n self._check_stat_op('skew', alt)\n\n def test_kurt(self):\n tm._skip_if_no_scipy()\n\n from scipy.stats import kurtosis\n\n def alt(x):\n if len(x) < 4:\n return np.nan\n return kurtosis(x, bias=False)\n\n self._check_stat_op('kurt', alt)\n\n index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],\n labels=[[0, 0, 0, 0, 0, 0],\n [0, 1, 2, 0, 1, 2],\n [0, 1, 0, 1, 0, 1]])\n df = DataFrame(np.random.randn(6, 3), index=index)\n\n kurt = df.kurt()\n kurt2 = df.kurt(level=0).xs('bar')\n assert_series_equal(kurt, kurt2, check_names=False)\n self.assertTrue(kurt.name is None)\n self.assertEqual(kurt2.name, 'bar')\n\n def _check_stat_op(self, name, alternative, frame=None, has_skipna=True,\n has_numeric_only=False, check_dtype=True, check_dates=False,\n check_less_precise=False):\n if frame is None:\n frame = self.frame\n # set some NAs\n frame.ix[5:10] = np.nan\n frame.ix[15:20, -2:] = np.nan\n\n f = getattr(frame, name)\n\n if check_dates:\n df = DataFrame({'b': date_range('1/1/2001', periods=2)})\n _f = getattr(df, name)\n result = _f()\n self.assertIsInstance(result, Series)\n\n df['a'] = lrange(len(df))\n result = getattr(df, name)()\n self.assertIsInstance(result, Series)\n self.assertTrue(len(result))\n\n if has_skipna:\n def skipna_wrapper(x):\n nona = x.dropna()\n if len(nona) == 0:\n return np.nan\n return alternative(nona)\n\n def wrapper(x):\n return alternative(x.values)\n\n result0 = f(axis=0, skipna=False)\n result1 = f(axis=1, skipna=False)\n assert_series_equal(result0, frame.apply(wrapper),\n check_dtype=check_dtype,\n check_less_precise=check_less_precise)\n assert_series_equal(result1, frame.apply(wrapper, axis=1),\n check_dtype=False,\n check_less_precise=check_less_precise) # HACK: win32\n else:\n skipna_wrapper = alternative\n wrapper = alternative\n\n result0 = f(axis=0)\n result1 = f(axis=1)\n assert_series_equal(result0, frame.apply(skipna_wrapper),\n check_dtype=check_dtype,\n check_less_precise=check_less_precise)\n if not tm._incompat_bottleneck_version(name):\n assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),\n check_dtype=False,\n check_less_precise=check_less_precise)\n\n # check dtypes\n if check_dtype:\n lcd_dtype = frame.values.dtype\n self.assertEqual(lcd_dtype, result0.dtype)\n self.assertEqual(lcd_dtype, result1.dtype)\n\n # result = f(axis=1)\n # comp = frame.apply(alternative, axis=1).reindex(result.index)\n # assert_series_equal(result, comp)\n\n # bad axis\n assertRaisesRegexp(ValueError, 'No axis named 2', f, axis=2)\n # make sure works on mixed-type frame\n getattr(self.mixed_frame, name)(axis=0)\n getattr(self.mixed_frame, name)(axis=1)\n\n if has_numeric_only:\n getattr(self.mixed_frame, name)(axis=0, numeric_only=True)\n getattr(self.mixed_frame, name)(axis=1, numeric_only=True)\n getattr(self.frame, name)(axis=0, numeric_only=False)\n getattr(self.frame, name)(axis=1, numeric_only=False)\n\n # all NA case\n if has_skipna:\n all_na = self.frame * np.NaN\n r0 = getattr(all_na, name)(axis=0)\n r1 = getattr(all_na, name)(axis=1)\n if not tm._incompat_bottleneck_version(name):\n self.assertTrue(np.isnan(r0).all())\n self.assertTrue(np.isnan(r1).all())\n\n def test_mode(self):\n df = pd.DataFrame({\"A\": [12, 12, 11, 12, 19, 11],\n \"B\": [10, 10, 10, np.nan, 3, 4],\n \"C\": [8, 8, 8, 9, 9, 9],\n \"D\": np.arange(6,dtype='int64'),\n \"E\": [8, 8, 1, 1, 3, 3]})\n assert_frame_equal(df[[\"A\"]].mode(),\n pd.DataFrame({\"A\": [12]}))\n expected = pd.Series([], dtype='int64', name='D').to_frame()\n assert_frame_equal(df[[\"D\"]].mode(), expected)\n expected = pd.Series([1, 3, 8], dtype='int64', name='E').to_frame()\n assert_frame_equal(df[[\"E\"]].mode(), expected)\n assert_frame_equal(df[[\"A\", \"B\"]].mode(),\n pd.DataFrame({\"A\": [12], \"B\": [10.]}))\n assert_frame_equal(df.mode(),\n pd.DataFrame({\"A\": [12, np.nan, np.nan],\n \"B\": [10, np.nan, np.nan],\n \"C\": [8, 9, np.nan],\n \"D\": [np.nan, np.nan, np.nan],\n \"E\": [1, 3, 8]}))\n\n # outputs in sorted order\n df[\"C\"] = list(reversed(df[\"C\"]))\n com.pprint_thing(df[\"C\"])\n com.pprint_thing(df[\"C\"].mode())\n a, b = (df[[\"A\", \"B\", \"C\"]].mode(),\n pd.DataFrame({\"A\": [12, np.nan],\n \"B\": [10, np.nan],\n \"C\": [8, 9]}))\n com.pprint_thing(a)\n com.pprint_thing(b)\n assert_frame_equal(a, b)\n # should work with heterogeneous types\n df = pd.DataFrame({\"A\": np.arange(6,dtype='int64'),\n \"B\": pd.date_range('2011', periods=6),\n \"C\": list('abcdef')})\n exp = pd.DataFrame({\"A\": pd.Series([], dtype=df[\"A\"].dtype),\n \"B\": pd.Series([], dtype=df[\"B\"].dtype),\n \"C\": pd.Series([], dtype=df[\"C\"].dtype)})\n assert_frame_equal(df.mode(), exp)\n\n # and also when not empty\n df.loc[1, \"A\"] = 0\n df.loc[4, \"B\"] = df.loc[3, \"B\"]\n df.loc[5, \"C\"] = 'e'\n exp = pd.DataFrame({\"A\": pd.Series([0], dtype=df[\"A\"].dtype),\n \"B\": pd.Series([df.loc[3, \"B\"]], dtype=df[\"B\"].dtype),\n \"C\": pd.Series(['e'], dtype=df[\"C\"].dtype)})\n\n assert_frame_equal(df.mode(), exp)\n\n def test_sum_corner(self):\n axis0 = self.empty.sum(0)\n axis1 = self.empty.sum(1)\n tm.assertIsInstance(axis0, Series)\n tm.assertIsInstance(axis1, Series)\n self.assertEqual(len(axis0), 0)\n self.assertEqual(len(axis1), 0)\n\n def test_sum_object(self):\n values = self.frame.values.astype(int)\n frame = DataFrame(values, index=self.frame.index,\n columns=self.frame.columns)\n deltas = frame * timedelta(1)\n deltas.sum()\n\n def test_sum_bool(self):\n # ensure this works, bug report\n bools = np.isnan(self.frame)\n bools.sum(1)\n bools.sum(0)\n\n def test_mean_corner(self):\n # unit test when have object data\n the_mean = self.mixed_frame.mean(axis=0)\n the_sum = self.mixed_frame.sum(axis=0, numeric_only=True)\n self.assertTrue(the_sum.index.equals(the_mean.index))\n self.assertTrue(len(the_mean.index) < len(self.mixed_frame.columns))\n\n # xs sum mixed type, just want to know it works...\n the_mean = self.mixed_frame.mean(axis=1)\n the_sum = self.mixed_frame.sum(axis=1, numeric_only=True)\n self.assertTrue(the_sum.index.equals(the_mean.index))\n\n # take mean of boolean column\n self.frame['bool'] = self.frame['A'] > 0\n means = self.frame.mean(0)\n self.assertEqual(means['bool'], self.frame['bool'].values.mean())\n\n def test_stats_mixed_type(self):\n # don't blow up\n self.mixed_frame.std(1)\n self.mixed_frame.var(1)\n self.mixed_frame.mean(1)\n self.mixed_frame.skew(1)\n\n def test_median_corner(self):\n def wrapper(x):\n if isnull(x).any():\n return np.nan\n return np.median(x)\n\n self._check_stat_op('median', wrapper, frame=self.intframe,\n check_dtype=False, check_dates=True)\n\n def test_quantile(self):\n from numpy import percentile\n\n q = self.tsframe.quantile(0.1, axis=0)\n self.assertEqual(q['A'], percentile(self.tsframe['A'], 10))\n q = self.tsframe.quantile(0.9, axis=1)\n q = self.intframe.quantile(0.1)\n self.assertEqual(q['A'], percentile(self.intframe['A'], 10))\n\n # test degenerate case\n q = DataFrame({'x': [], 'y': []}).quantile(0.1, axis=0)\n assert(np.isnan(q['x']) and np.isnan(q['y']))\n\n # non-numeric exclusion\n df = DataFrame({'col1':['A','A','B','B'], 'col2':[1,2,3,4]})\n rs = df.quantile(0.5)\n xp = df.median()\n assert_series_equal(rs, xp)\n\n # axis\n df = DataFrame({\"A\": [1, 2, 3], \"B\": [2, 3, 4]}, index=[1, 2, 3])\n result = df.quantile(.5, axis=1)\n expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3])\n assert_series_equal(result, expected)\n\n result = df.quantile([.5, .75], axis=1)\n expected = DataFrame({1: [1.5, 1.75], 2: [2.5, 2.75],\n 3: [3.5, 3.75]}, index=[0.5, 0.75])\n assert_frame_equal(result, expected, check_index_type=True)\n\n # We may want to break API in the future to change this\n # so that we exclude non-numeric along the same axis\n # See GH #7312\n df = DataFrame([[1, 2, 3],\n ['a', 'b', 4]])\n result = df.quantile(.5, axis=1)\n expected = Series([3., 4.], index=[0, 1])\n assert_series_equal(result, expected)\n\n def test_quantile_axis_parameter(self):\n # GH 9543/9544\n\n df = DataFrame({\"A\": [1, 2, 3], \"B\": [2, 3, 4]}, index=[1, 2, 3])\n\n result = df.quantile(.5, axis=0)\n\n expected = Series([2., 3.], index=[\"A\", \"B\"])\n assert_series_equal(result, expected)\n\n expected = df.quantile(.5, axis=\"index\")\n assert_series_equal(result, expected)\n\n result = df.quantile(.5, axis=1)\n\n expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3])\n assert_series_equal(result, expected)\n\n result = df.quantile(.5, axis=\"columns\")\n assert_series_equal(result, expected)\n\n self.assertRaises(ValueError, df.quantile, 0.1, axis=-1)\n self.assertRaises(ValueError, df.quantile, 0.1, axis=\"column\")\n\n def test_quantile_multi(self):\n df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]],\n columns=['a', 'b', 'c'])\n result = df.quantile([.25, .5])\n expected = DataFrame([[1.5, 1.5, 1.5], [2., 2., 2.]],\n index=[.25, .5], columns=['a', 'b', 'c'])\n assert_frame_equal(result, expected)\n\n # axis = 1\n result = df.quantile([.25, .5], axis=1)\n expected = DataFrame([[1.5, 1.5, 1.5], [2., 2., 2.]],\n index=[.25, .5], columns=[0, 1, 2])\n\n # empty\n result = DataFrame({'x': [], 'y': []}).quantile([0.1, .9], axis=0)\n expected = DataFrame({'x': [np.nan, np.nan], 'y': [np.nan, np.nan]},\n index=[.1, .9])\n assert_frame_equal(result, expected)\n\n def test_quantile_datetime(self):\n df = DataFrame({'a': pd.to_datetime(['2010', '2011']), 'b': [0, 5]})\n\n # exclude datetime\n result = df.quantile(.5)\n expected = Series([2.5], index=['b'])\n\n # datetime\n result = df.quantile(.5, numeric_only=False)\n expected = Series([Timestamp('2010-07-02 12:00:00'), 2.5],\n index=['a', 'b'])\n assert_series_equal(result, expected)\n\n # datetime w/ multi\n result = df.quantile([.5], numeric_only=False)\n expected = DataFrame([[Timestamp('2010-07-02 12:00:00'), 2.5]],\n index=[.5], columns=['a', 'b'])\n assert_frame_equal(result, expected)\n\n # axis = 1\n df['c'] = pd.to_datetime(['2011', '2012'])\n result = df[['a', 'c']].quantile(.5, axis=1, numeric_only=False)\n expected = Series([Timestamp('2010-07-02 12:00:00'),\n Timestamp('2011-07-02 12:00:00')],\n index=[0, 1])\n assert_series_equal(result, expected)\n\n result = df[['a', 'c']].quantile([.5], axis=1, numeric_only=False)\n expected = DataFrame([[Timestamp('2010-07-02 12:00:00'),\n Timestamp('2011-07-02 12:00:00')]],\n index=[0.5], columns=[0, 1])\n assert_frame_equal(result, expected)\n\n def test_quantile_invalid(self):\n msg = 'percentiles should all be in the interval \\\\[0, 1\\\\]'\n for invalid in [-1, 2, [0.5, -1], [0.5, 2]]:\n with tm.assertRaisesRegexp(ValueError, msg):\n self.tsframe.quantile(invalid)\n\n def test_cumsum(self):\n self.tsframe.ix[5:10, 0] = nan\n self.tsframe.ix[10:15, 1] = nan\n self.tsframe.ix[15:, 2] = nan\n\n # axis = 0\n cumsum = self.tsframe.cumsum()\n expected = self.tsframe.apply(Series.cumsum)\n assert_frame_equal(cumsum, expected)\n\n # axis = 1\n cumsum = self.tsframe.cumsum(axis=1)\n expected = self.tsframe.apply(Series.cumsum, axis=1)\n assert_frame_equal(cumsum, expected)\n\n # works\n df = DataFrame({'A': np.arange(20)}, index=np.arange(20))\n result = df.cumsum()\n\n # fix issue\n cumsum_xs = self.tsframe.cumsum(axis=1)\n self.assertEqual(np.shape(cumsum_xs), np.shape(self.tsframe))\n\n def test_cumprod(self):\n self.tsframe.ix[5:10, 0] = nan\n self.tsframe.ix[10:15, 1] = nan\n self.tsframe.ix[15:, 2] = nan\n\n # axis = 0\n cumprod = self.tsframe.cumprod()\n expected = self.tsframe.apply(Series.cumprod)\n assert_frame_equal(cumprod, expected)\n\n # axis = 1\n cumprod = self.tsframe.cumprod(axis=1)\n expected = self.tsframe.apply(Series.cumprod, axis=1)\n assert_frame_equal(cumprod, expected)\n\n # fix issue\n cumprod_xs = self.tsframe.cumprod(axis=1)\n self.assertEqual(np.shape(cumprod_xs), np.shape(self.tsframe))\n\n # ints\n df = self.tsframe.fillna(0).astype(int)\n df.cumprod(0)\n df.cumprod(1)\n\n # ints32\n df = self.tsframe.fillna(0).astype(np.int32)\n df.cumprod(0)\n df.cumprod(1)\n\n def test_rank(self):\n tm._skip_if_no_scipy()\n from scipy.stats import rankdata\n\n self.frame['A'][::2] = np.nan\n self.frame['B'][::3] = np.nan\n self.frame['C'][::4] = np.nan\n self.frame['D'][::5] = np.nan\n\n ranks0 = self.frame.rank()\n ranks1 = self.frame.rank(1)\n mask = np.isnan(self.frame.values)\n\n fvals = self.frame.fillna(np.inf).values\n\n exp0 = np.apply_along_axis(rankdata, 0, fvals)\n exp0[mask] = np.nan\n\n exp1 = np.apply_along_axis(rankdata, 1, fvals)\n exp1[mask] = np.nan\n\n assert_almost_equal(ranks0.values, exp0)\n assert_almost_equal(ranks1.values, exp1)\n\n # integers\n df = DataFrame(np.random.randint(0, 5, size=40).reshape((10, 4)))\n\n result = df.rank()\n exp = df.astype(float).rank()\n assert_frame_equal(result, exp)\n\n result = df.rank(1)\n exp = df.astype(float).rank(1)\n assert_frame_equal(result, exp)\n\n def test_rank2(self):\n from datetime import datetime\n df = DataFrame([[1, 3, 2], [1, 2, 3]])\n expected = DataFrame([[1.0, 3.0, 2.0], [1, 2, 3]]) / 3.0\n result = df.rank(1, pct=True)\n assert_frame_equal(result, expected)\n\n df = DataFrame([[1, 3, 2], [1, 2, 3]])\n expected = df.rank(0) / 2.0\n result = df.rank(0, pct=True)\n assert_frame_equal(result, expected)\n\n\n\n df = DataFrame([['b', 'c', 'a'], ['a', 'c', 'b']])\n expected = DataFrame([[2.0, 3.0, 1.0], [1, 3, 2]])\n result = df.rank(1, numeric_only=False)\n assert_frame_equal(result, expected)\n\n\n expected = DataFrame([[2.0, 1.5, 1.0], [1, 1.5, 2]])\n result = df.rank(0, numeric_only=False)\n assert_frame_equal(result, expected)\n\n df = DataFrame([['b', np.nan, 'a'], ['a', 'c', 'b']])\n expected = DataFrame([[2.0, nan, 1.0], [1.0, 3.0, 2.0]])\n result = df.rank(1, numeric_only=False)\n assert_frame_equal(result, expected)\n\n expected = DataFrame([[2.0, nan, 1.0], [1.0, 1.0, 2.0]])\n result = df.rank(0, numeric_only=False)\n assert_frame_equal(result, expected)\n\n # f7u12, this does not work without extensive workaround\n data = [[datetime(2001, 1, 5), nan, datetime(2001, 1, 2)],\n [datetime(2000, 1, 2), datetime(2000, 1, 3),\n datetime(2000, 1, 1)]]\n df = DataFrame(data)\n\n # check the rank\n expected = DataFrame([[2., nan, 1.],\n [2., 3., 1.]])\n result = df.rank(1, numeric_only=False)\n assert_frame_equal(result, expected)\n\n # mixed-type frames\n self.mixed_frame['datetime'] = datetime.now()\n self.mixed_frame['timedelta'] = timedelta(days=1,seconds=1)\n\n result = self.mixed_frame.rank(1)\n expected = self.mixed_frame.rank(1, numeric_only=True)\n assert_frame_equal(result, expected)\n\n df = DataFrame({\"a\":[1e-20, -5, 1e-20+1e-40, 10, 1e60, 1e80, 1e-30]})\n exp = DataFrame({\"a\":[ 3.5, 1. , 3.5, 5. , 6. , 7. , 2. ]})\n assert_frame_equal(df.rank(), exp)\n\n def test_rank_na_option(self):\n tm._skip_if_no_scipy()\n from scipy.stats import rankdata\n\n self.frame['A'][::2] = np.nan\n self.frame['B'][::3] = np.nan\n self.frame['C'][::4] = np.nan\n self.frame['D'][::5] = np.nan\n\n # bottom\n ranks0 = self.frame.rank(na_option='bottom')\n ranks1 = self.frame.rank(1, na_option='bottom')\n\n fvals = self.frame.fillna(np.inf).values\n\n exp0 = np.apply_along_axis(rankdata, 0, fvals)\n exp1 = np.apply_along_axis(rankdata, 1, fvals)\n\n assert_almost_equal(ranks0.values, exp0)\n assert_almost_equal(ranks1.values, exp1)\n\n # top\n ranks0 = self.frame.rank(na_option='top')\n ranks1 = self.frame.rank(1, na_option='top')\n\n fval0 = self.frame.fillna((self.frame.min() - 1).to_dict()).values\n fval1 = self.frame.T\n fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T\n fval1 = fval1.fillna(np.inf).values\n\n exp0 = np.apply_along_axis(rankdata, 0, fval0)\n exp1 = np.apply_along_axis(rankdata, 1, fval1)\n\n assert_almost_equal(ranks0.values, exp0)\n assert_almost_equal(ranks1.values, exp1)\n\n # descending\n\n # bottom\n ranks0 = self.frame.rank(na_option='top', ascending=False)\n ranks1 = self.frame.rank(1, na_option='top', ascending=False)\n\n fvals = self.frame.fillna(np.inf).values\n\n exp0 = np.apply_along_axis(rankdata, 0, -fvals)\n exp1 = np.apply_along_axis(rankdata, 1, -fvals)\n\n assert_almost_equal(ranks0.values, exp0)\n assert_almost_equal(ranks1.values, exp1)\n\n # descending\n\n # top\n ranks0 = self.frame.rank(na_option='bottom', ascending=False)\n ranks1 = self.frame.rank(1, na_option='bottom', ascending=False)\n\n fval0 = self.frame.fillna((self.frame.min() - 1).to_dict()).values\n fval1 = self.frame.T\n fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T\n fval1 = fval1.fillna(np.inf).values\n\n exp0 = np.apply_along_axis(rankdata, 0, -fval0)\n exp1 = np.apply_along_axis(rankdata, 1, -fval1)\n\n assert_almost_equal(ranks0.values, exp0)\n assert_almost_equal(ranks1.values, exp1)\n\n def test_axis_aliases(self):\n\n f = self.frame\n\n # reg name\n expected = f.sum(axis=0)\n result = f.sum(axis='index')\n assert_series_equal(result, expected)\n\n expected = f.sum(axis=1)\n result = f.sum(axis='columns')\n assert_series_equal(result, expected)\n\n def test_combine_first_mixed(self):\n a = Series(['a', 'b'], index=lrange(2))\n b = Series(lrange(2), index=lrange(2))\n f = DataFrame({'A': a, 'B': b})\n\n a = Series(['a', 'b'], index=lrange(5, 7))\n b = Series(lrange(2), index=lrange(5, 7))\n g = DataFrame({'A': a, 'B': b})\n\n combined = f.combine_first(g)\n\n def test_more_asMatrix(self):\n values = self.mixed_frame.as_matrix()\n self.assertEqual(values.shape[1], len(self.mixed_frame.columns))\n\n def test_reindex_boolean(self):\n frame = DataFrame(np.ones((10, 2), dtype=bool),\n index=np.arange(0, 20, 2),\n columns=[0, 2])\n\n reindexed = frame.reindex(np.arange(10))\n self.assertEqual(reindexed.values.dtype, np.object_)\n self.assertTrue(isnull(reindexed[0][1]))\n\n reindexed = frame.reindex(columns=lrange(3))\n self.assertEqual(reindexed.values.dtype, np.object_)\n self.assertTrue(isnull(reindexed[1]).all())\n\n def test_reindex_objects(self):\n reindexed = self.mixed_frame.reindex(columns=['foo', 'A', 'B'])\n self.assertIn('foo', reindexed)\n\n reindexed = self.mixed_frame.reindex(columns=['A', 'B'])\n self.assertNotIn('foo', reindexed)\n\n def test_reindex_corner(self):\n index = Index(['a', 'b', 'c'])\n dm = self.empty.reindex(index=[1, 2, 3])\n reindexed = dm.reindex(columns=index)\n self.assertTrue(reindexed.columns.equals(index))\n\n # ints are weird\n\n smaller = self.intframe.reindex(columns=['A', 'B', 'E'])\n self.assertEqual(smaller['E'].dtype, np.float64)\n\n def test_reindex_axis(self):\n cols = ['A', 'B', 'E']\n reindexed1 = self.intframe.reindex_axis(cols, axis=1)\n reindexed2 = self.intframe.reindex(columns=cols)\n assert_frame_equal(reindexed1, reindexed2)\n\n rows = self.intframe.index[0:5]\n reindexed1 = self.intframe.reindex_axis(rows, axis=0)\n reindexed2 = self.intframe.reindex(index=rows)\n assert_frame_equal(reindexed1, reindexed2)\n\n self.assertRaises(ValueError, self.intframe.reindex_axis, rows, axis=2)\n\n # no-op case\n cols = self.frame.columns.copy()\n newFrame = self.frame.reindex_axis(cols, axis=1)\n assert_frame_equal(newFrame, self.frame)\n\n def test_reindex_with_nans(self):\n df = DataFrame([[1, 2], [3, 4], [np.nan, np.nan], [7, 8], [9, 10]],\n columns=['a', 'b'],\n index=[100.0, 101.0, np.nan, 102.0, 103.0])\n\n result = df.reindex(index=[101.0, 102.0, 103.0])\n expected = df.iloc[[1, 3, 4]]\n assert_frame_equal(result, expected)\n\n result = df.reindex(index=[103.0])\n expected = df.iloc[[4]]\n assert_frame_equal(result, expected)\n\n result = df.reindex(index=[101.0])\n expected = df.iloc[[1]]\n assert_frame_equal(result, expected)\n\n def test_reindex_multi(self):\n df = DataFrame(np.random.randn(3, 3))\n\n result = df.reindex(lrange(4), lrange(4))\n expected = df.reindex(lrange(4)).reindex(columns=lrange(4))\n\n assert_frame_equal(result, expected)\n\n df = DataFrame(np.random.randint(0, 10, (3, 3)))\n\n result = df.reindex(lrange(4), lrange(4))\n expected = df.reindex(lrange(4)).reindex(columns=lrange(4))\n\n assert_frame_equal(result, expected)\n\n df = DataFrame(np.random.randint(0, 10, (3, 3)))\n\n result = df.reindex(lrange(2), lrange(2))\n expected = df.reindex(lrange(2)).reindex(columns=lrange(2))\n\n assert_frame_equal(result, expected)\n\n df = DataFrame(np.random.randn(5, 3) + 1j, columns=['a', 'b', 'c'])\n\n result = df.reindex(index=[0, 1], columns=['a', 'b'])\n expected = df.reindex([0, 1]).reindex(columns=['a', 'b'])\n\n assert_frame_equal(result, expected)\n\n def test_rename_objects(self):\n renamed = self.mixed_frame.rename(columns=str.upper)\n self.assertIn('FOO', renamed)\n self.assertNotIn('foo', renamed)\n\n def test_fill_corner(self):\n self.mixed_frame.ix[5:20,'foo'] = nan\n self.mixed_frame.ix[-10:,'A'] = nan\n\n filled = self.mixed_frame.fillna(value=0)\n self.assertTrue((filled.ix[5:20,'foo'] == 0).all())\n del self.mixed_frame['foo']\n\n empty_float = self.frame.reindex(columns=[])\n result = empty_float.fillna(value=0)\n\n def test_count_objects(self):\n dm = DataFrame(self.mixed_frame._series)\n df = DataFrame(self.mixed_frame._series)\n\n tm.assert_series_equal(dm.count(), df.count())\n tm.assert_series_equal(dm.count(1), df.count(1))\n\n def test_cumsum_corner(self):\n dm = DataFrame(np.arange(20).reshape(4, 5),\n index=lrange(4), columns=lrange(5))\n result = dm.cumsum()\n\n #----------------------------------------------------------------------\n # Stacking / unstacking\n\n def test_stack_unstack(self):\n stacked = self.frame.stack()\n stacked_df = DataFrame({'foo': stacked, 'bar': stacked})\n\n unstacked = stacked.unstack()\n unstacked_df = stacked_df.unstack()\n\n assert_frame_equal(unstacked, self.frame)\n assert_frame_equal(unstacked_df['bar'], self.frame)\n\n unstacked_cols = stacked.unstack(0)\n unstacked_cols_df = stacked_df.unstack(0)\n assert_frame_equal(unstacked_cols.T, self.frame)\n assert_frame_equal(unstacked_cols_df['bar'].T, self.frame)\n\n def test_stack_ints(self):\n df = DataFrame(\n np.random.randn(30, 27),\n columns=MultiIndex.from_tuples(\n list(itertools.product(range(3), repeat=3))\n )\n )\n assert_frame_equal(\n df.stack(level=[1, 2]),\n df.stack(level=1).stack(level=1)\n )\n assert_frame_equal(\n df.stack(level=[-2, -1]),\n df.stack(level=1).stack(level=1)\n )\n\n df_named = df.copy()\n df_named.columns.set_names(range(3), inplace=True)\n assert_frame_equal(\n df_named.stack(level=[1, 2]),\n df_named.stack(level=1).stack(level=1)\n )\n\n def test_stack_mixed_levels(self):\n columns = MultiIndex.from_tuples(\n [('A', 'cat', 'long'), ('B', 'cat', 'long'),\n ('A', 'dog', 'short'), ('B', 'dog', 'short')],\n names=['exp', 'animal', 'hair_length']\n )\n df = DataFrame(randn(4, 4), columns=columns)\n\n animal_hair_stacked = df.stack(level=['animal', 'hair_length'])\n exp_hair_stacked = df.stack(level=['exp', 'hair_length'])\n\n # GH #8584: Need to check that stacking works when a number\n # is passed that is both a level name and in the range of\n # the level numbers\n df2 = df.copy()\n df2.columns.names = ['exp', 'animal', 1]\n assert_frame_equal(df2.stack(level=['animal', 1]),\n animal_hair_stacked, check_names=False)\n assert_frame_equal(df2.stack(level=['exp', 1]),\n exp_hair_stacked, check_names=False)\n\n # When mixed types are passed and the ints are not level\n # names, raise\n self.assertRaises(ValueError, df2.stack, level=['animal', 0])\n\n # GH #8584: Having 0 in the level names could raise a\n # strange error about lexsort depth\n df3 = df.copy()\n df3.columns.names = ['exp', 'animal', 0]\n assert_frame_equal(df3.stack(level=['animal', 0]),\n animal_hair_stacked, check_names=False)\n\n def test_stack_int_level_names(self):\n columns = MultiIndex.from_tuples(\n [('A', 'cat', 'long'), ('B', 'cat', 'long'),\n ('A', 'dog', 'short'), ('B', 'dog', 'short')],\n names=['exp', 'animal', 'hair_length']\n )\n df = DataFrame(randn(4, 4), columns=columns)\n\n exp_animal_stacked = df.stack(level=['exp', 'animal'])\n animal_hair_stacked = df.stack(level=['animal', 'hair_length'])\n exp_hair_stacked = df.stack(level=['exp', 'hair_length'])\n\n df2 = df.copy()\n df2.columns.names = [0, 1, 2]\n assert_frame_equal(df2.stack(level=[1, 2]), animal_hair_stacked,\n check_names=False )\n assert_frame_equal(df2.stack(level=[0, 1]), exp_animal_stacked,\n check_names=False)\n assert_frame_equal(df2.stack(level=[0, 2]), exp_hair_stacked,\n check_names=False)\n\n # Out-of-order int column names\n df3 = df.copy()\n df3.columns.names = [2, 0, 1]\n assert_frame_equal(df3.stack(level=[0, 1]), animal_hair_stacked,\n check_names=False)\n assert_frame_equal(df3.stack(level=[2, 0]), exp_animal_stacked,\n check_names=False)\n assert_frame_equal(df3.stack(level=[2, 1]), exp_hair_stacked,\n check_names=False)\n\n\n def test_unstack_bool(self):\n df = DataFrame([False, False],\n index=MultiIndex.from_arrays([['a', 'b'], ['c', 'l']]),\n columns=['col'])\n rs = df.unstack()\n xp = DataFrame(np.array([[False, np.nan], [np.nan, False]],\n dtype=object),\n index=['a', 'b'],\n columns=MultiIndex.from_arrays([['col', 'col'],\n ['c', 'l']]))\n assert_frame_equal(rs, xp)\n\n def test_unstack_level_binding(self):\n # GH9856\n mi = pd.MultiIndex(\n levels=[[u('foo'), u('bar')], [u('one'), u('two')],\n [u('a'), u('b')]],\n labels=[[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0]],\n names=[u('first'), u('second'), u('third')])\n s = pd.Series(0, index=mi)\n result = s.unstack([1, 2]).stack(0)\n\n expected_mi = pd.MultiIndex(\n levels=[['foo', 'bar'], ['one', 'two']],\n labels=[[0, 0, 1, 1], [0, 1, 0, 1]],\n names=['first', 'second'])\n\n expected = pd.DataFrame(np.array([[np.nan, 0],\n [0, np.nan],\n [np.nan, 0],\n [0, np.nan]],\n dtype=np.float64),\n index=expected_mi,\n columns=pd.Index(['a', 'b'], name='third'))\n\n assert_frame_equal(result, expected)\n\n def test_unstack_to_series(self):\n # check reversibility\n data = self.frame.unstack()\n\n self.assertTrue(isinstance(data, Series))\n undo = data.unstack().T\n assert_frame_equal(undo, self.frame)\n\n # check NA handling\n data = DataFrame({'x': [1, 2, np.NaN], 'y': [3.0, 4, np.NaN]})\n data.index = Index(['a', 'b', 'c'])\n result = data.unstack()\n\n midx = MultiIndex(levels=[['x', 'y'], ['a', 'b', 'c']],\n labels=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])\n expected = Series([1, 2, np.NaN, 3, 4, np.NaN], index=midx)\n\n assert_series_equal(result, expected)\n\n # check composability of unstack\n old_data = data.copy()\n for _ in range(4):\n data = data.unstack()\n assert_frame_equal(old_data, data)\n\n def test_unstack_dtypes(self):\n\n # GH 2929\n rows = [[1, 1, 3, 4],\n [1, 2, 3, 4],\n [2, 1, 3, 4],\n [2, 2, 3, 4]]\n\n df = DataFrame(rows, columns=list('ABCD'))\n result = df.get_dtype_counts()\n expected = Series({'int64' : 4})\n assert_series_equal(result, expected)\n\n # single dtype\n df2 = df.set_index(['A','B'])\n df3 = df2.unstack('B')\n result = df3.get_dtype_counts()\n expected = Series({'int64' : 4})\n assert_series_equal(result, expected)\n\n # mixed\n df2 = df.set_index(['A','B'])\n df2['C'] = 3.\n df3 = df2.unstack('B')\n result = df3.get_dtype_counts()\n expected = Series({'int64' : 2, 'float64' : 2})\n assert_series_equal(result, expected)\n\n df2['D'] = 'foo'\n df3 = df2.unstack('B')\n result = df3.get_dtype_counts()\n expected = Series({'float64' : 2, 'object' : 2})\n assert_series_equal(result, expected)\n\n # GH7405\n for c, d in (np.zeros(5), np.zeros(5)), \\\n (np.arange(5, dtype='f8'), np.arange(5, 10, dtype='f8')):\n\n df = DataFrame({'A': ['a']*5, 'C':c, 'D':d,\n 'B':pd.date_range('2012-01-01', periods=5)})\n\n right = df.iloc[:3].copy(deep=True)\n\n df = df.set_index(['A', 'B'])\n df['D'] = df['D'].astype('int64')\n\n left = df.iloc[:3].unstack(0)\n right = right.set_index(['A', 'B']).unstack(0)\n right[('D', 'a')] = right[('D', 'a')].astype('int64')\n\n self.assertEqual(left.shape, (3, 2))\n tm.assert_frame_equal(left, right)\n\n def test_unstack_non_unique_index_names(self):\n idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')],\n names=['c1', 'c1'])\n df = DataFrame([1, 2], index=idx)\n with tm.assertRaises(ValueError):\n df.unstack('c1')\n\n with tm.assertRaises(ValueError):\n df.T.stack('c1')\n\n def test_unstack_nan_index(self): # GH7466\n cast = lambda val: '{0:1}'.format('' if val != val else val)\n nan = np.nan\n\n def verify(df):\n mk_list = lambda a: list(a) if isinstance(a, tuple) else [a]\n rows, cols = df.notnull().values.nonzero()\n for i, j in zip(rows, cols):\n left = sorted(df.iloc[i, j].split('.'))\n right = mk_list(df.index[i]) + mk_list(df.columns[j])\n right = sorted(list(map(cast, right)))\n self.assertEqual(left, right)\n\n df = DataFrame({'jim':['a', 'b', nan, 'd'],\n 'joe':['w', 'x', 'y', 'z'],\n 'jolie':['a.w', 'b.x', ' .y', 'd.z']})\n\n left = df.set_index(['jim', 'joe']).unstack()['jolie']\n right = df.set_index(['joe', 'jim']).unstack()['jolie'].T\n assert_frame_equal(left, right)\n\n for idx in permutations(df.columns[:2]):\n mi = df.set_index(list(idx))\n for lev in range(2):\n udf = mi.unstack(level=lev)\n self.assertEqual(udf.notnull().values.sum(), len(df))\n verify(udf['jolie'])\n\n df = DataFrame({'1st':['d'] * 3 + [nan] * 5 + ['a'] * 2 +\n ['c'] * 3 + ['e'] * 2 + ['b'] * 5,\n '2nd':['y'] * 2 + ['w'] * 3 + [nan] * 3 +\n ['z'] * 4 + [nan] * 3 + ['x'] * 3 + [nan] * 2,\n '3rd':[67,39,53,72,57,80,31,18,11,30,59,\n 50,62,59,76,52,14,53,60,51]})\n\n df['4th'], df['5th'] = \\\n df.apply(lambda r: '.'.join(map(cast, r)), axis=1), \\\n df.apply(lambda r: '.'.join(map(cast, r.iloc[::-1])), axis=1)\n\n for idx in permutations(['1st', '2nd', '3rd']):\n mi = df.set_index(list(idx))\n for lev in range(3):\n udf = mi.unstack(level=lev)\n self.assertEqual(udf.notnull().values.sum(), 2 * len(df))\n for col in ['4th', '5th']:\n verify(udf[col])\n\n # GH7403\n df = pd.DataFrame({'A': list('aaaabbbb'),'B':range(8), 'C':range(8)})\n df.iloc[3, 1] = np.NaN\n left = df.set_index(['A', 'B']).unstack(0)\n\n vals = [[3, 0, 1, 2, nan, nan, nan, nan],\n [nan, nan, nan, nan, 4, 5, 6, 7]]\n vals = list(map(list, zip(*vals)))\n idx = Index([nan, 0, 1, 2, 4, 5, 6, 7], name='B')\n cols = MultiIndex(levels=[['C'], ['a', 'b']],\n labels=[[0, 0], [0, 1]],\n names=[None, 'A'])\n\n right = DataFrame(vals, columns=cols, index=idx)\n assert_frame_equal(left, right)\n\n df = DataFrame({'A': list('aaaabbbb'), 'B':list(range(4))*2,\n 'C':range(8)})\n df.iloc[2,1] = np.NaN\n left = df.set_index(['A', 'B']).unstack(0)\n\n vals = [[2, nan], [0, 4], [1, 5], [nan, 6], [3, 7]]\n cols = MultiIndex(levels=[['C'], ['a', 'b']],\n labels=[[0, 0], [0, 1]],\n names=[None, 'A'])\n idx = Index([nan, 0, 1, 2, 3], name='B')\n right = DataFrame(vals, columns=cols, index=idx)\n assert_frame_equal(left, right)\n\n df = pd.DataFrame({'A': list('aaaabbbb'),'B':list(range(4))*2,\n 'C':range(8)})\n df.iloc[3,1] = np.NaN\n left = df.set_index(['A', 'B']).unstack(0)\n\n vals = [[3, nan], [0, 4], [1, 5], [2, 6], [nan, 7]]\n cols = MultiIndex(levels=[['C'], ['a', 'b']],\n labels=[[0, 0], [0, 1]],\n names=[None, 'A'])\n idx = Index([nan, 0, 1, 2, 3], name='B')\n right = DataFrame(vals, columns=cols, index=idx)\n assert_frame_equal(left, right)\n\n # GH7401\n df = pd.DataFrame({'A': list('aaaaabbbbb'), 'C':np.arange(10),\n 'B':date_range('2012-01-01', periods=5).tolist()*2 })\n\n df.iloc[3,1] = np.NaN\n left = df.set_index(['A', 'B']).unstack()\n\n vals = np.array([[3, 0, 1, 2, nan, 4], [nan, 5, 6, 7, 8, 9]])\n idx = Index(['a', 'b'], name='A')\n cols = MultiIndex(levels=[['C'], date_range('2012-01-01', periods=5)],\n labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],\n names=[None, 'B'])\n\n right = DataFrame(vals, columns=cols, index=idx)\n assert_frame_equal(left, right)\n\n # GH4862\n vals = [['Hg', nan, nan, 680585148],\n ['U', 0.0, nan, 680585148],\n ['Pb', 7.07e-06, nan, 680585148],\n ['Sn', 2.3614e-05, 0.0133, 680607017],\n ['Ag', 0.0, 0.0133, 680607017],\n ['Hg', -0.00015, 0.0133, 680607017]]\n df = DataFrame(vals, columns=['agent', 'change', 'dosage', 's_id'],\n index=[17263, 17264, 17265, 17266, 17267, 17268])\n\n left = df.copy().set_index(['s_id','dosage','agent']).unstack()\n\n vals = [[nan, nan, 7.07e-06, nan, 0.0],\n [0.0, -0.00015, nan, 2.3614e-05, nan]]\n\n idx = MultiIndex(levels=[[680585148, 680607017], [0.0133]],\n labels=[[0, 1], [-1, 0]],\n names=['s_id', 'dosage'])\n\n cols = MultiIndex(levels=[['change'], ['Ag', 'Hg', 'Pb', 'Sn', 'U']],\n labels=[[0, 0, 0, 0, 0], [0, 1, 2, 3, 4]],\n names=[None, 'agent'])\n\n right = DataFrame(vals, columns=cols, index=idx)\n assert_frame_equal(left, right)\n\n left = df.ix[17264:].copy().set_index(['s_id','dosage','agent'])\n assert_frame_equal(left.unstack(), right)\n\n # GH9497 - multiple unstack with nulls\n df = DataFrame({'1st':[1, 2, 1, 2, 1, 2],\n '2nd':pd.date_range('2014-02-01', periods=6, freq='D'),\n 'jim':100 + np.arange(6),\n 'joe':(np.random.randn(6) * 10).round(2)})\n\n df['3rd'] = df['2nd'] - pd.Timestamp('2014-02-02')\n df.loc[1, '2nd'] = df.loc[3, '2nd'] = nan\n df.loc[1, '3rd'] = df.loc[4, '3rd'] = nan\n\n left = df.set_index(['1st', '2nd', '3rd']).unstack(['2nd', '3rd'])\n self.assertEqual(left.notnull().values.sum(), 2 * len(df))\n\n for col in ['jim', 'joe']:\n for _, r in df.iterrows():\n key = r['1st'], (col, r['2nd'], r['3rd'])\n self.assertEqual(r[col], left.loc[key])\n\n def test_stack_datetime_column_multiIndex(self):\n # GH 8039\n t = datetime(2014, 1, 1)\n df = DataFrame([1, 2, 3, 4], columns=MultiIndex.from_tuples([(t, 'A', 'B')]))\n result = df.stack()\n\n eidx = MultiIndex.from_product([(0, 1, 2, 3), ('B',)])\n ecols = MultiIndex.from_tuples([(t, 'A')])\n expected = DataFrame([1, 2, 3, 4], index=eidx, columns=ecols)\n assert_frame_equal(result, expected)\n\n def test_stack_partial_multiIndex(self):\n # GH 8844\n def _test_stack_with_multiindex(multiindex):\n df = DataFrame(np.arange(3 * len(multiindex)).reshape(3, len(multiindex)),\n columns=multiindex)\n for level in (-1, 0, 1, [0, 1], [1, 0]):\n result = df.stack(level=level, dropna=False)\n\n if isinstance(level, int):\n # Stacking a single level should not make any all-NaN rows,\n # so df.stack(level=level, dropna=False) should be the same\n # as df.stack(level=level, dropna=True).\n expected = df.stack(level=level, dropna=True)\n if isinstance(expected, Series):\n assert_series_equal(result, expected)\n else:\n assert_frame_equal(result, expected)\n\n df.columns = MultiIndex.from_tuples(df.columns.get_values(),\n names=df.columns.names)\n expected = df.stack(level=level, dropna=False)\n if isinstance(expected, Series):\n assert_series_equal(result, expected)\n else:\n assert_frame_equal(result, expected)\n\n full_multiindex = MultiIndex.from_tuples([('B', 'x'), ('B', 'z'),\n ('A', 'y'),\n ('C', 'x'), ('C', 'u')],\n names=['Upper', 'Lower'])\n for multiindex_columns in ([0, 1, 2, 3, 4],\n [0, 1, 2, 3], [0, 1, 2, 4],\n [0, 1, 2], [1, 2, 3], [2, 3, 4],\n [0, 1], [0, 2], [0, 3],\n [0], [2], [4]):\n _test_stack_with_multiindex(full_multiindex[multiindex_columns])\n if len(multiindex_columns) > 1:\n multiindex_columns.reverse()\n _test_stack_with_multiindex(full_multiindex[multiindex_columns])\n\n df = DataFrame(np.arange(6).reshape(2, 3), columns=full_multiindex[[0, 1, 3]])\n result = df.stack(dropna=False)\n expected = DataFrame([[0, 2], [1, nan], [3, 5], [4, nan]],\n index=MultiIndex(levels=[[0, 1], ['u', 'x', 'y', 'z']],\n labels=[[0, 0, 1, 1], [1, 3, 1, 3]],\n names=[None, 'Lower']),\n columns=Index(['B', 'C'], name='Upper'),\n dtype=df.dtypes[0])\n assert_frame_equal(result, expected)\n\n def test_repr_with_mi_nat(self):\n df = DataFrame({'X': [1, 2]},\n index=[[pd.NaT, pd.Timestamp('20130101')], ['a', 'b']])\n res = repr(df)\n exp = ' X\\nNaT a 1\\n2013-01-01 b 2'\n nose.tools.assert_equal(res, exp)\n\n def test_reset_index(self):\n stacked = self.frame.stack()[::2]\n stacked = DataFrame({'foo': stacked, 'bar': stacked})\n\n names = ['first', 'second']\n stacked.index.names = names\n deleveled = stacked.reset_index()\n for i, (lev, lab) in enumerate(zip(stacked.index.levels,\n stacked.index.labels)):\n values = lev.take(lab)\n name = names[i]\n assert_almost_equal(values, deleveled[name])\n\n stacked.index.names = [None, None]\n deleveled2 = stacked.reset_index()\n self.assert_numpy_array_equal(deleveled['first'],\n deleveled2['level_0'])\n self.assert_numpy_array_equal(deleveled['second'],\n deleveled2['level_1'])\n\n # default name assigned\n rdf = self.frame.reset_index()\n self.assert_numpy_array_equal(rdf['index'], self.frame.index.values)\n\n # default name assigned, corner case\n df = self.frame.copy()\n df['index'] = 'foo'\n rdf = df.reset_index()\n self.assert_numpy_array_equal(rdf['level_0'], self.frame.index.values)\n\n # but this is ok\n self.frame.index.name = 'index'\n deleveled = self.frame.reset_index()\n self.assert_numpy_array_equal(deleveled['index'],\n self.frame.index.values)\n self.assert_numpy_array_equal(deleveled.index,\n np.arange(len(deleveled)))\n\n # preserve column names\n self.frame.columns.name = 'columns'\n resetted = self.frame.reset_index()\n self.assertEqual(resetted.columns.name, 'columns')\n\n # only remove certain columns\n frame = self.frame.reset_index().set_index(['index', 'A', 'B'])\n rs = frame.reset_index(['A', 'B'])\n\n assert_frame_equal(rs, self.frame, check_names=False) # TODO should reset_index check_names ?\n\n rs = frame.reset_index(['index', 'A', 'B'])\n assert_frame_equal(rs, self.frame.reset_index(), check_names=False)\n\n rs = frame.reset_index(['index', 'A', 'B'])\n assert_frame_equal(rs, self.frame.reset_index(), check_names=False)\n\n rs = frame.reset_index('A')\n xp = self.frame.reset_index().set_index(['index', 'B'])\n assert_frame_equal(rs, xp, check_names=False)\n\n # test resetting in place\n df = self.frame.copy()\n resetted = self.frame.reset_index()\n df.reset_index(inplace=True)\n assert_frame_equal(df, resetted, check_names=False)\n\n frame = self.frame.reset_index().set_index(['index', 'A', 'B'])\n rs = frame.reset_index('A', drop=True)\n xp = self.frame.copy()\n del xp['A']\n xp = xp.set_index(['B'], append=True)\n assert_frame_equal(rs, xp, check_names=False)\n\n def test_reset_index_right_dtype(self):\n time = np.arange(0.0, 10, np.sqrt(2) / 2)\n s1 = Series((9.81 * time ** 2) / 2,\n index=Index(time, name='time'),\n name='speed')\n df = DataFrame(s1)\n\n resetted = s1.reset_index()\n self.assertEqual(resetted['time'].dtype, np.float64)\n\n resetted = df.reset_index()\n self.assertEqual(resetted['time'].dtype, np.float64)\n\n def test_reset_index_multiindex_col(self):\n vals = np.random.randn(3, 3).astype(object)\n idx = ['x', 'y', 'z']\n full = np.hstack(([[x] for x in idx], vals))\n df = DataFrame(vals, Index(idx, name='a'),\n columns=[['b', 'b', 'c'], ['mean', 'median', 'mean']])\n rs = df.reset_index()\n xp = DataFrame(full, columns=[['a', 'b', 'b', 'c'],\n ['', 'mean', 'median', 'mean']])\n assert_frame_equal(rs, xp)\n\n rs = df.reset_index(col_fill=None)\n xp = DataFrame(full, columns=[['a', 'b', 'b', 'c'],\n ['a', 'mean', 'median', 'mean']])\n assert_frame_equal(rs, xp)\n\n rs = df.reset_index(col_level=1, col_fill='blah')\n xp = DataFrame(full, columns=[['blah', 'b', 'b', 'c'],\n ['a', 'mean', 'median', 'mean']])\n assert_frame_equal(rs, xp)\n\n df = DataFrame(vals,\n MultiIndex.from_arrays([[0, 1, 2], ['x', 'y', 'z']],\n names=['d', 'a']),\n columns=[['b', 'b', 'c'], ['mean', 'median', 'mean']])\n rs = df.reset_index('a', )\n xp = DataFrame(full, Index([0, 1, 2], name='d'),\n columns=[['a', 'b', 'b', 'c'],\n ['', 'mean', 'median', 'mean']])\n assert_frame_equal(rs, xp)\n\n rs = df.reset_index('a', col_fill=None)\n xp = DataFrame(full, Index(lrange(3), name='d'),\n columns=[['a', 'b', 'b', 'c'],\n ['a', 'mean', 'median', 'mean']])\n assert_frame_equal(rs, xp)\n\n rs = df.reset_index('a', col_fill='blah', col_level=1)\n xp = DataFrame(full, Index(lrange(3), name='d'),\n columns=[['blah', 'b', 'b', 'c'],\n ['a', 'mean', 'median', 'mean']])\n assert_frame_equal(rs, xp)\n\n def test_reset_index_with_datetimeindex_cols(self):\n # GH5818\n #\n df = pd.DataFrame([[1, 2], [3, 4]],\n columns=pd.date_range('1/1/2013', '1/2/2013'),\n index=['A', 'B'])\n\n result = df.reset_index()\n expected = pd.DataFrame([['A', 1, 2], ['B', 3, 4]],\n columns=['index', datetime(2013, 1, 1),\n datetime(2013, 1, 2)])\n assert_frame_equal(result, expected)\n\n #----------------------------------------------------------------------\n # Tests to cope with refactored internals\n def test_as_matrix_numeric_cols(self):\n self.frame['foo'] = 'bar'\n\n values = self.frame.as_matrix(['A', 'B', 'C', 'D'])\n self.assertEqual(values.dtype, np.float64)\n\n def test_as_matrix_lcd(self):\n\n # mixed lcd\n values = self.mixed_float.as_matrix(['A', 'B', 'C', 'D'])\n self.assertEqual(values.dtype, np.float64)\n\n values = self.mixed_float.as_matrix(['A', 'B', 'C' ])\n self.assertEqual(values.dtype, np.float32)\n\n values = self.mixed_float.as_matrix(['C'])\n self.assertEqual(values.dtype, np.float16)\n\n values = self.mixed_int.as_matrix(['A','B','C','D'])\n self.assertEqual(values.dtype, np.int64)\n\n values = self.mixed_int.as_matrix(['A','D'])\n self.assertEqual(values.dtype, np.int64)\n\n # guess all ints are cast to uints....\n values = self.mixed_int.as_matrix(['A','B','C'])\n self.assertEqual(values.dtype, np.int64)\n\n values = self.mixed_int.as_matrix(['A','C'])\n self.assertEqual(values.dtype, np.int32)\n\n values = self.mixed_int.as_matrix(['C','D'])\n self.assertEqual(values.dtype, np.int64)\n\n values = self.mixed_int.as_matrix(['A'])\n self.assertEqual(values.dtype, np.int32)\n\n values = self.mixed_int.as_matrix(['C'])\n self.assertEqual(values.dtype, np.uint8)\n\n def test_constructor_with_convert(self):\n # this is actually mostly a test of lib.maybe_convert_objects\n # #2845\n df = DataFrame({'A' : [2**63-1] })\n result = df['A']\n expected = Series(np.asarray([2**63-1], np.int64), name='A')\n assert_series_equal(result, expected)\n\n df = DataFrame({'A' : [2**63] })\n result = df['A']\n expected = Series(np.asarray([2**63], np.object_), name='A')\n assert_series_equal(result, expected)\n\n df = DataFrame({'A' : [datetime(2005, 1, 1), True] })\n result = df['A']\n expected = Series(np.asarray([datetime(2005, 1, 1), True], np.object_),\n name='A')\n assert_series_equal(result, expected)\n\n df = DataFrame({'A' : [None, 1] })\n result = df['A']\n expected = Series(np.asarray([np.nan, 1], np.float_), name='A')\n assert_series_equal(result, expected)\n\n df = DataFrame({'A' : [1.0, 2] })\n result = df['A']\n expected = Series(np.asarray([1.0, 2], np.float_), name='A')\n assert_series_equal(result, expected)\n\n df = DataFrame({'A' : [1.0+2.0j, 3] })\n result = df['A']\n expected = Series(np.asarray([1.0+2.0j, 3], np.complex_), name='A')\n assert_series_equal(result, expected)\n\n df = DataFrame({'A' : [1.0+2.0j, 3.0] })\n result = df['A']\n expected = Series(np.asarray([1.0+2.0j, 3.0], np.complex_), name='A')\n assert_series_equal(result, expected)\n\n df = DataFrame({'A' : [1.0+2.0j, True] })\n result = df['A']\n expected = Series(np.asarray([1.0+2.0j, True], np.object_), name='A')\n assert_series_equal(result, expected)\n\n df = DataFrame({'A' : [1.0, None] })\n result = df['A']\n expected = Series(np.asarray([1.0, np.nan], np.float_), name='A')\n assert_series_equal(result, expected)\n\n df = DataFrame({'A' : [1.0+2.0j, None] })\n result = df['A']\n expected = Series(np.asarray([1.0+2.0j, np.nan], np.complex_), name='A')\n assert_series_equal(result, expected)\n\n df = DataFrame({'A' : [2.0, 1, True, None] })\n result = df['A']\n expected = Series(np.asarray([2.0, 1, True, None], np.object_), name='A')\n assert_series_equal(result, expected)\n\n df = DataFrame({'A' : [2.0, 1, datetime(2006, 1, 1), None] })\n result = df['A']\n expected = Series(np.asarray([2.0, 1, datetime(2006, 1, 1),\n None], np.object_), name='A')\n assert_series_equal(result, expected)\n\n def test_construction_with_mixed(self):\n # test construction edge cases with mixed types\n\n # f7u12, this does not work without extensive workaround\n data = [[datetime(2001, 1, 5), nan, datetime(2001, 1, 2)],\n [datetime(2000, 1, 2), datetime(2000, 1, 3),\n datetime(2000, 1, 1)]]\n df = DataFrame(data)\n\n # check dtypes\n result = df.get_dtype_counts().sort_values()\n expected = Series({ 'datetime64[ns]' : 3 })\n\n # mixed-type frames\n self.mixed_frame['datetime'] = datetime.now()\n self.mixed_frame['timedelta'] = timedelta(days=1,seconds=1)\n self.assertEqual(self.mixed_frame['datetime'].dtype, 'M8[ns]')\n self.assertEqual(self.mixed_frame['timedelta'].dtype, 'm8[ns]')\n result = self.mixed_frame.get_dtype_counts().sort_values()\n expected = Series({ 'float64' : 4,\n 'object' : 1,\n 'datetime64[ns]' : 1,\n 'timedelta64[ns]' : 1}).sort_values()\n assert_series_equal(result,expected)\n\n def test_construction_with_conversions(self):\n\n # convert from a numpy array of non-ns timedelta64\n arr = np.array([1,2,3],dtype='timedelta64[s]')\n s = Series(arr)\n expected = Series(timedelta_range('00:00:01',periods=3,freq='s'))\n assert_series_equal(s,expected)\n\n df = DataFrame(index=range(3))\n df['A'] = arr\n expected = DataFrame({'A' : timedelta_range('00:00:01',periods=3,freq='s')},\n index=range(3))\n assert_frame_equal(df,expected)\n\n # convert from a numpy array of non-ns datetime64\n #### note that creating a numpy datetime64 is in LOCAL time!!!!\n #### seems to work for M8[D], but not for M8[s]\n\n s = Series(np.array(['2013-01-01','2013-01-02','2013-01-03'],dtype='datetime64[D]'))\n assert_series_equal(s,Series(date_range('20130101',periods=3,freq='D')))\n #s = Series(np.array(['2013-01-01 00:00:01','2013-01-01 00:00:02','2013-01-01 00:00:03'],dtype='datetime64[s]'))\n #assert_series_equal(s,date_range('20130101 00:00:01',period=3,freq='s'))\n\n expected = DataFrame({\n 'dt1' : Timestamp('20130101'),\n 'dt2' : date_range('20130101',periods=3),\n #'dt3' : date_range('20130101 00:00:01',periods=3,freq='s'),\n },index=range(3))\n\n\n df = DataFrame(index=range(3))\n df['dt1'] = np.datetime64('2013-01-01')\n df['dt2'] = np.array(['2013-01-01','2013-01-02','2013-01-03'],dtype='datetime64[D]')\n #df['dt3'] = np.array(['2013-01-01 00:00:01','2013-01-01 00:00:02','2013-01-01 00:00:03'],dtype='datetime64[s]')\n assert_frame_equal(df, expected)\n\n def test_constructor_frame_copy(self):\n cop = DataFrame(self.frame, copy=True)\n cop['A'] = 5\n self.assertTrue((cop['A'] == 5).all())\n self.assertFalse((self.frame['A'] == 5).all())\n\n def test_constructor_ndarray_copy(self):\n df = DataFrame(self.frame.values)\n\n self.frame.values[5] = 5\n self.assertTrue((df.values[5] == 5).all())\n\n df = DataFrame(self.frame.values, copy=True)\n self.frame.values[6] = 6\n self.assertFalse((df.values[6] == 6).all())\n\n def test_constructor_series_copy(self):\n series = self.frame._series\n\n df = DataFrame({'A': series['A']})\n df['A'][:] = 5\n\n self.assertFalse((series['A'] == 5).all())\n\n def test_constructor_compound_dtypes(self):\n # GH 5191\n # compound dtypes should raise not-implementederror\n\n def f(dtype):\n return DataFrame(data = list(itertools.repeat((datetime(2001, 1, 1), \"aa\", 20), 9)),\n columns=[\"A\", \"B\", \"C\"], dtype=dtype)\n\n self.assertRaises(NotImplementedError, f, [(\"A\",\"datetime64[h]\"), (\"B\",\"str\"), (\"C\",\"int32\")])\n\n # these work (though results may be unexpected)\n f('int64')\n f('float64')\n\n # 10822\n # invalid error message on dt inference\n if not is_platform_windows():\n f('M8[ns]')\n\n def test_assign_columns(self):\n self.frame['hi'] = 'there'\n\n frame = self.frame.copy()\n frame.columns = ['foo', 'bar', 'baz', 'quux', 'foo2']\n assert_series_equal(self.frame['C'], frame['baz'], check_names=False)\n assert_series_equal(self.frame['hi'], frame['foo2'], check_names=False)\n\n def test_columns_with_dups(self):\n\n # GH 3468 related\n\n # basic\n df = DataFrame([[1,2]], columns=['a','a'])\n df.columns = ['a','a.1']\n str(df)\n expected = DataFrame([[1,2]], columns=['a','a.1'])\n assert_frame_equal(df, expected)\n\n df = DataFrame([[1,2,3]], columns=['b','a','a'])\n df.columns = ['b','a','a.1']\n str(df)\n expected = DataFrame([[1,2,3]], columns=['b','a','a.1'])\n assert_frame_equal(df, expected)\n\n # with a dup index\n df = DataFrame([[1,2]], columns=['a','a'])\n df.columns = ['b','b']\n str(df)\n expected = DataFrame([[1,2]], columns=['b','b'])\n assert_frame_equal(df, expected)\n\n # multi-dtype\n df = DataFrame([[1,2,1.,2.,3.,'foo','bar']], columns=['a','a','b','b','d','c','c'])\n df.columns = list('ABCDEFG')\n str(df)\n expected = DataFrame([[1,2,1.,2.,3.,'foo','bar']], columns=list('ABCDEFG'))\n assert_frame_equal(df, expected)\n\n # this is an error because we cannot disambiguate the dup columns\n self.assertRaises(Exception, lambda x: DataFrame([[1,2,'foo','bar']], columns=['a','a','a','a']))\n\n # dups across blocks\n df_float = DataFrame(np.random.randn(10, 3),dtype='float64')\n df_int = DataFrame(np.random.randn(10, 3),dtype='int64')\n df_bool = DataFrame(True,index=df_float.index,columns=df_float.columns)\n df_object = DataFrame('foo',index=df_float.index,columns=df_float.columns)\n df_dt = DataFrame(Timestamp('20010101'),index=df_float.index,columns=df_float.columns)\n df = pd.concat([ df_float, df_int, df_bool, df_object, df_dt ], axis=1)\n\n self.assertEqual(len(df._data._blknos), len(df.columns))\n self.assertEqual(len(df._data._blklocs), len(df.columns))\n\n # testing iget\n for i in range(len(df.columns)):\n df.iloc[:,i]\n\n # dup columns across dtype GH 2079/2194\n vals = [[1, -1, 2.], [2, -2, 3.]]\n rs = DataFrame(vals, columns=['A', 'A', 'B'])\n xp = DataFrame(vals)\n xp.columns = ['A', 'A', 'B']\n assert_frame_equal(rs, xp)\n\n def test_insert_column_bug_4032(self):\n\n # GH4032, inserting a column and renaming causing errors\n df = DataFrame({'b': [1.1, 2.2]})\n df = df.rename(columns={})\n df.insert(0, 'a', [1, 2])\n\n result = df.rename(columns={})\n str(result)\n expected = DataFrame([[1,1.1],[2, 2.2]],columns=['a','b'])\n assert_frame_equal(result,expected)\n df.insert(0, 'c', [1.3, 2.3])\n\n result = df.rename(columns={})\n str(result)\n\n expected = DataFrame([[1.3,1,1.1],[2.3,2, 2.2]],columns=['c','a','b'])\n assert_frame_equal(result,expected)\n\n def test_cast_internals(self):\n casted = DataFrame(self.frame._data, dtype=int)\n expected = DataFrame(self.frame._series, dtype=int)\n assert_frame_equal(casted, expected)\n\n casted = DataFrame(self.frame._data, dtype=np.int32)\n expected = DataFrame(self.frame._series, dtype=np.int32)\n assert_frame_equal(casted, expected)\n\n def test_consolidate(self):\n self.frame['E'] = 7.\n consolidated = self.frame.consolidate()\n self.assertEqual(len(consolidated._data.blocks), 1)\n\n # Ensure copy, do I want this?\n recons = consolidated.consolidate()\n self.assertIsNot(recons, consolidated)\n assert_frame_equal(recons, consolidated)\n\n self.frame['F'] = 8.\n self.assertEqual(len(self.frame._data.blocks), 3)\n self.frame.consolidate(inplace=True)\n self.assertEqual(len(self.frame._data.blocks), 1)\n\n def test_consolidate_inplace(self):\n frame = self.frame.copy()\n\n # triggers in-place consolidation\n for letter in range(ord('A'), ord('Z')):\n self.frame[chr(letter)] = chr(letter)\n\n def test_as_matrix_consolidate(self):\n self.frame['E'] = 7.\n self.assertFalse(self.frame._data.is_consolidated())\n _ = self.frame.as_matrix()\n self.assertTrue(self.frame._data.is_consolidated())\n\n def test_modify_values(self):\n self.frame.values[5] = 5\n self.assertTrue((self.frame.values[5] == 5).all())\n\n # unconsolidated\n self.frame['E'] = 7.\n self.frame.values[6] = 6\n self.assertTrue((self.frame.values[6] == 6).all())\n\n def test_boolean_set_uncons(self):\n self.frame['E'] = 7.\n\n expected = self.frame.values.copy()\n expected[expected > 1] = 2\n\n self.frame[self.frame > 1] = 2\n assert_almost_equal(expected, self.frame.values)\n\n def test_xs_view(self):\n \"\"\"\n in 0.14 this will return a view if possible\n a copy otherwise, but this is numpy dependent\n \"\"\"\n\n dm = DataFrame(np.arange(20.).reshape(4, 5),\n index=lrange(4), columns=lrange(5))\n\n dm.xs(2)[:] = 10\n self.assertTrue((dm.xs(2) == 10).all())\n\n def test_boolean_indexing(self):\n idx = lrange(3)\n cols = ['A','B','C']\n df1 = DataFrame(index=idx, columns=cols,\n data=np.array([[0.0, 0.5, 1.0],\n [1.5, 2.0, 2.5],\n [3.0, 3.5, 4.0]],\n dtype=float))\n df2 = DataFrame(index=idx, columns=cols,\n data=np.ones((len(idx), len(cols))))\n\n expected = DataFrame(index=idx, columns=cols,\n data=np.array([[0.0, 0.5, 1.0],\n [1.5, 2.0, -1],\n [-1, -1, -1]], dtype=float))\n\n df1[df1 > 2.0 * df2] = -1\n assert_frame_equal(df1, expected)\n with assertRaisesRegexp(ValueError, 'Item wrong length'):\n df1[df1.index[:-1] > 2] = -1\n\n def test_boolean_indexing_mixed(self):\n df = DataFrame(\n {long(0): {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},\n long(1): {35: np.nan,\n 40: 0.32632316859446198,\n 43: np.nan,\n 49: 0.32632316859446198,\n 50: 0.39114724480578139},\n long(2): {35: np.nan, 40: np.nan, 43: 0.29012581014105987, 49: np.nan, 50: np.nan},\n long(3): {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},\n long(4): {35: 0.34215328467153283, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},\n 'y': {35: 0, 40: 0, 43: 0, 49: 0, 50: 1}})\n\n # mixed int/float ok\n df2 = df.copy()\n df2[df2>0.3] = 1\n expected = df.copy()\n expected.loc[40,1] = 1\n expected.loc[49,1] = 1\n expected.loc[50,1] = 1\n expected.loc[35,4] = 1\n assert_frame_equal(df2,expected)\n\n df['foo'] = 'test'\n with tm.assertRaisesRegexp(TypeError, 'boolean setting on mixed-type'):\n df[df > 0.3] = 1\n\n def test_sum_bools(self):\n df = DataFrame(index=lrange(1), columns=lrange(10))\n bools = isnull(df)\n self.assertEqual(bools.sum(axis=1)[0], 10)\n\n def test_fillna_col_reordering(self):\n idx = lrange(20)\n cols = [\"COL.\" + str(i) for i in range(5, 0, -1)]\n data = np.random.rand(20, 5)\n df = DataFrame(index=lrange(20), columns=cols, data=data)\n filled = df.fillna(method='ffill')\n self.assertEqual(df.columns.tolist(), filled.columns.tolist())\n\n def test_take(self):\n\n # homogeneous\n #----------------------------------------\n order = [3, 1, 2, 0]\n for df in [self.frame]:\n\n result = df.take(order, axis=0)\n expected = df.reindex(df.index.take(order))\n assert_frame_equal(result, expected)\n\n # axis = 1\n result = df.take(order, axis=1)\n expected = df.ix[:, ['D', 'B', 'C', 'A']]\n assert_frame_equal(result, expected, check_names=False)\n\n # neg indicies\n order = [2,1,-1]\n for df in [self.frame]:\n\n result = df.take(order, axis=0)\n expected = df.reindex(df.index.take(order))\n assert_frame_equal(result, expected)\n\n # axis = 1\n result = df.take(order, axis=1)\n expected = df.ix[:, ['C', 'B', 'D']]\n assert_frame_equal(result, expected, check_names=False)\n\n # illegal indices\n self.assertRaises(IndexError, df.take, [3,1,2,30], axis=0)\n self.assertRaises(IndexError, df.take, [3,1,2,-31], axis=0)\n self.assertRaises(IndexError, df.take, [3,1,2,5], axis=1)\n self.assertRaises(IndexError, df.take, [3,1,2,-5], axis=1)\n\n # mixed-dtype\n #----------------------------------------\n order = [4, 1, 2, 0, 3]\n for df in [self.mixed_frame]:\n\n result = df.take(order, axis=0)\n expected = df.reindex(df.index.take(order))\n assert_frame_equal(result, expected)\n\n # axis = 1\n result = df.take(order, axis=1)\n expected = df.ix[:, ['foo', 'B', 'C', 'A', 'D']]\n assert_frame_equal(result, expected)\n\n # neg indicies\n order = [4,1,-2]\n for df in [self.mixed_frame]:\n\n result = df.take(order, axis=0)\n expected = df.reindex(df.index.take(order))\n assert_frame_equal(result, expected)\n\n # axis = 1\n result = df.take(order, axis=1)\n expected = df.ix[:, ['foo', 'B', 'D']]\n assert_frame_equal(result, expected)\n\n # by dtype\n order = [1, 2, 0, 3]\n for df in [self.mixed_float,self.mixed_int]:\n\n result = df.take(order, axis=0)\n expected = df.reindex(df.index.take(order))\n assert_frame_equal(result, expected)\n\n # axis = 1\n result = df.take(order, axis=1)\n expected = df.ix[:, ['B', 'C', 'A', 'D']]\n assert_frame_equal(result, expected)\n\n def test_iterkv_deprecation(self):\n with tm.assert_produces_warning(FutureWarning):\n self.mixed_float.iterkv()\n\n def test_iterkv_names(self):\n for k, v in compat.iteritems(self.mixed_frame):\n self.assertEqual(v.name, k)\n\n def test_series_put_names(self):\n series = self.mixed_frame._series\n for k, v in compat.iteritems(series):\n self.assertEqual(v.name, k)\n\n def test_dot(self):\n a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'],\n columns=['p', 'q', 'r', 's'])\n b = DataFrame(np.random.randn(4, 2), index=['p', 'q', 'r', 's'],\n columns=['one', 'two'])\n\n result = a.dot(b)\n expected = DataFrame(np.dot(a.values, b.values),\n index=['a', 'b', 'c'],\n columns=['one', 'two'])\n # Check alignment\n b1 = b.reindex(index=reversed(b.index))\n result = a.dot(b)\n assert_frame_equal(result, expected)\n\n # Check series argument\n result = a.dot(b['one'])\n assert_series_equal(result, expected['one'], check_names=False)\n self.assertTrue(result.name is None)\n\n result = a.dot(b1['one'])\n assert_series_equal(result, expected['one'], check_names=False)\n self.assertTrue(result.name is None)\n\n # can pass correct-length arrays\n row = a.ix[0].values\n\n result = a.dot(row)\n exp = a.dot(a.ix[0])\n assert_series_equal(result, exp)\n\n with assertRaisesRegexp(ValueError, 'Dot product shape mismatch'):\n a.dot(row[:-1])\n\n a = np.random.rand(1, 5)\n b = np.random.rand(5, 1)\n A = DataFrame(a)\n B = DataFrame(b)\n\n # it works\n result = A.dot(b)\n\n # unaligned\n df = DataFrame(randn(3, 4), index=[1, 2, 3], columns=lrange(4))\n df2 = DataFrame(randn(5, 3), index=lrange(5), columns=[1, 2, 3])\n\n assertRaisesRegexp(ValueError, 'aligned', df.dot, df2)\n\n def test_idxmin(self):\n frame = self.frame\n frame.ix[5:10] = np.nan\n frame.ix[15:20, -2:] = np.nan\n for skipna in [True, False]:\n for axis in [0, 1]:\n for df in [frame, self.intframe]:\n result = df.idxmin(axis=axis, skipna=skipna)\n expected = df.apply(\n Series.idxmin, axis=axis, skipna=skipna)\n assert_series_equal(result, expected)\n\n self.assertRaises(ValueError, frame.idxmin, axis=2)\n\n def test_idxmax(self):\n frame = self.frame\n frame.ix[5:10] = np.nan\n frame.ix[15:20, -2:] = np.nan\n for skipna in [True, False]:\n for axis in [0, 1]:\n for df in [frame, self.intframe]:\n result = df.idxmax(axis=axis, skipna=skipna)\n expected = df.apply(\n Series.idxmax, axis=axis, skipna=skipna)\n assert_series_equal(result, expected)\n\n self.assertRaises(ValueError, frame.idxmax, axis=2)\n\n def test_stale_cached_series_bug_473(self):\n\n # this is chained, but ok\n with option_context('chained_assignment',None):\n Y = DataFrame(np.random.random((4, 4)), index=('a', 'b', 'c', 'd'),\n columns=('e', 'f', 'g', 'h'))\n repr(Y)\n Y['e'] = Y['e'].astype('object')\n Y['g']['c'] = np.NaN\n repr(Y)\n result = Y.sum()\n exp = Y['g'].sum()\n self.assertTrue(isnull(Y['g']['c']))\n\n def test_index_namedtuple(self):\n from collections import namedtuple\n IndexType = namedtuple(\"IndexType\", [\"a\", \"b\"])\n idx1 = IndexType(\"foo\", \"bar\")\n idx2 = IndexType(\"baz\", \"bof\")\n index = Index([idx1, idx2],\n name=\"composite_index\", tupleize_cols=False)\n df = DataFrame([(1, 2), (3, 4)], index=index, columns=[\"A\", \"B\"])\n result = df.ix[IndexType(\"foo\", \"bar\")][\"A\"]\n self.assertEqual(result, 1)\n\n def test_empty_nonzero(self):\n df = DataFrame([1, 2, 3])\n self.assertFalse(df.empty)\n df = DataFrame(index=['a', 'b'], columns=['c', 'd']).dropna()\n self.assertTrue(df.empty)\n self.assertTrue(df.T.empty)\n\n def test_any_all(self):\n\n self._check_bool_op('any', np.any, has_skipna=True, has_bool_only=True)\n self._check_bool_op('all', np.all, has_skipna=True, has_bool_only=True)\n\n df = DataFrame(randn(10, 4)) > 0\n df.any(1)\n df.all(1)\n df.any(1, bool_only=True)\n df.all(1, bool_only=True)\n\n # skip pathological failure cases\n # class CantNonzero(object):\n\n # def __nonzero__(self):\n # raise ValueError\n\n # df[4] = CantNonzero()\n\n # it works!\n # df.any(1)\n # df.all(1)\n # df.any(1, bool_only=True)\n # df.all(1, bool_only=True)\n\n # df[4][4] = np.nan\n # df.any(1)\n # df.all(1)\n # df.any(1, bool_only=True)\n # df.all(1, bool_only=True)\n\n def test_consolidate_datetime64(self):\n # numpy vstack bug\n\n data = \"\"\"\\\nstarting,ending,measure\n2012-06-21 00:00,2012-06-23 07:00,77\n2012-06-23 07:00,2012-06-23 16:30,65\n2012-06-23 16:30,2012-06-25 08:00,77\n2012-06-25 08:00,2012-06-26 12:00,0\n2012-06-26 12:00,2012-06-27 08:00,77\n\"\"\"\n df = read_csv(StringIO(data), parse_dates=[0, 1])\n\n ser_starting = df.starting\n ser_starting.index = ser_starting.values\n ser_starting = ser_starting.tz_localize('US/Eastern')\n ser_starting = ser_starting.tz_convert('UTC')\n\n ser_ending = df.ending\n ser_ending.index = ser_ending.values\n ser_ending = ser_ending.tz_localize('US/Eastern')\n ser_ending = ser_ending.tz_convert('UTC')\n\n df.starting = ser_starting.index\n df.ending = ser_ending.index\n\n tm.assert_index_equal(pd.DatetimeIndex(df.starting), ser_starting.index)\n tm.assert_index_equal(pd.DatetimeIndex(df.ending), ser_ending.index)\n\n def _check_bool_op(self, name, alternative, frame=None, has_skipna=True,\n has_bool_only=False):\n if frame is None:\n frame = self.frame > 0\n # set some NAs\n frame = DataFrame(frame.values.astype(object), frame.index,\n frame.columns)\n frame.ix[5:10] = np.nan\n frame.ix[15:20, -2:] = np.nan\n\n f = getattr(frame, name)\n\n if has_skipna:\n def skipna_wrapper(x):\n nona = x.dropna().values\n return alternative(nona)\n\n def wrapper(x):\n return alternative(x.values)\n\n result0 = f(axis=0, skipna=False)\n result1 = f(axis=1, skipna=False)\n assert_series_equal(result0, frame.apply(wrapper))\n assert_series_equal(result1, frame.apply(wrapper, axis=1),\n check_dtype=False) # HACK: win32\n else:\n skipna_wrapper = alternative\n wrapper = alternative\n\n result0 = f(axis=0)\n result1 = f(axis=1)\n assert_series_equal(result0, frame.apply(skipna_wrapper))\n assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),\n check_dtype=False)\n\n # result = f(axis=1)\n # comp = frame.apply(alternative, axis=1).reindex(result.index)\n # assert_series_equal(result, comp)\n\n # bad axis\n self.assertRaises(ValueError, f, axis=2)\n\n # make sure works on mixed-type frame\n mixed = self.mixed_frame\n mixed['_bool_'] = np.random.randn(len(mixed)) > 0\n getattr(mixed, name)(axis=0)\n getattr(mixed, name)(axis=1)\n\n class NonzeroFail:\n\n def __nonzero__(self):\n raise ValueError\n\n mixed['_nonzero_fail_'] = NonzeroFail()\n\n if has_bool_only:\n getattr(mixed, name)(axis=0, bool_only=True)\n getattr(mixed, name)(axis=1, bool_only=True)\n getattr(frame, name)(axis=0, bool_only=False)\n getattr(frame, name)(axis=1, bool_only=False)\n\n # all NA case\n if has_skipna:\n all_na = frame * np.NaN\n r0 = getattr(all_na, name)(axis=0)\n r1 = getattr(all_na, name)(axis=1)\n if name == 'any':\n self.assertFalse(r0.any())\n self.assertFalse(r1.any())\n else:\n self.assertTrue(r0.all())\n self.assertTrue(r1.all())\n\n def test_strange_column_corruption_issue(self):\n\n df = DataFrame(index=[0, 1])\n df[0] = nan\n wasCol = {}\n # uncommenting these makes the results match\n # for col in xrange(100, 200):\n # wasCol[col] = 1\n # df[col] = nan\n\n for i, dt in enumerate(df.index):\n for col in range(100, 200):\n if not col in wasCol:\n wasCol[col] = 1\n df[col] = nan\n df[col][dt] = i\n\n myid = 100\n\n first = len(df.ix[isnull(df[myid]), [myid]])\n second = len(df.ix[isnull(df[myid]), [myid]])\n self.assertTrue(first == second == 0)\n\n def test_inplace_return_self(self):\n # re #1893\n\n data = DataFrame({'a': ['foo', 'bar', 'baz', 'qux'],\n 'b': [0, 0, 1, 1],\n 'c': [1, 2, 3, 4]})\n\n def _check_f(base, f):\n result = f(base)\n self.assertTrue(result is None)\n\n # -----DataFrame-----\n\n # set_index\n f = lambda x: x.set_index('a', inplace=True)\n _check_f(data.copy(), f)\n\n # reset_index\n f = lambda x: x.reset_index(inplace=True)\n _check_f(data.set_index('a'), f)\n\n # drop_duplicates\n f = lambda x: x.drop_duplicates(inplace=True)\n _check_f(data.copy(), f)\n\n # sort\n f = lambda x: x.sort_values('b', inplace=True)\n _check_f(data.copy(), f)\n\n # sort_index\n f = lambda x: x.sort_index(inplace=True)\n _check_f(data.copy(), f)\n\n # sortlevel\n f = lambda x: x.sortlevel(0, inplace=True)\n _check_f(data.set_index(['a', 'b']), f)\n\n # fillna\n f = lambda x: x.fillna(0, inplace=True)\n _check_f(data.copy(), f)\n\n # replace\n f = lambda x: x.replace(1, 0, inplace=True)\n _check_f(data.copy(), f)\n\n # rename\n f = lambda x: x.rename({1: 'foo'}, inplace=True)\n _check_f(data.copy(), f)\n\n # -----Series-----\n d = data.copy()['c']\n\n # reset_index\n f = lambda x: x.reset_index(inplace=True, drop=True)\n _check_f(data.set_index('a')['c'], f)\n\n # fillna\n f = lambda x: x.fillna(0, inplace=True)\n _check_f(d.copy(), f)\n\n # replace\n f = lambda x: x.replace(1, 0, inplace=True)\n _check_f(d.copy(), f)\n\n # rename\n f = lambda x: x.rename({1: 'foo'}, inplace=True)\n _check_f(d.copy(), f)\n\n def test_isin(self):\n # GH #4211\n df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],\n 'ids2': ['a', 'n', 'c', 'n']},\n index=['foo', 'bar', 'baz', 'qux'])\n other = ['a', 'b', 'c']\n\n result = df.isin(other)\n expected = DataFrame([df.loc[s].isin(other) for s in df.index])\n assert_frame_equal(result, expected)\n\n def test_isin_empty(self):\n df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})\n result = df.isin([])\n expected = pd.DataFrame(False, df.index, df.columns)\n assert_frame_equal(result, expected)\n\n def test_isin_dict(self):\n df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})\n d = {'A': ['a']}\n\n expected = DataFrame(False, df.index, df.columns)\n expected.loc[0, 'A'] = True\n\n result = df.isin(d)\n assert_frame_equal(result, expected)\n\n # non unique columns\n df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})\n df.columns = ['A', 'A']\n expected = DataFrame(False, df.index, df.columns)\n expected.loc[0, 'A'] = True\n result = df.isin(d)\n assert_frame_equal(result, expected)\n\n def test_isin_with_string_scalar(self):\n #GH4763\n df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],\n 'ids2': ['a', 'n', 'c', 'n']},\n index=['foo', 'bar', 'baz', 'qux'])\n with tm.assertRaises(TypeError):\n df.isin('a')\n\n with tm.assertRaises(TypeError):\n df.isin('aaa')\n\n def test_isin_df(self):\n df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})\n df2 = DataFrame({'A': [0, 2, 12, 4], 'B': [2, np.nan, 4, 5]})\n expected = DataFrame(False, df1.index, df1.columns)\n result = df1.isin(df2)\n expected['A'].loc[[1, 3]] = True\n expected['B'].loc[[0, 2]] = True\n assert_frame_equal(result, expected)\n\n # partial overlapping columns\n df2.columns = ['A', 'C']\n result = df1.isin(df2)\n expected['B'] = False\n assert_frame_equal(result, expected)\n\n def test_isin_df_dupe_values(self):\n df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})\n # just cols duped\n df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],\n columns=['B', 'B'])\n with tm.assertRaises(ValueError):\n df1.isin(df2)\n\n # just index duped\n df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],\n columns=['A', 'B'], index=[0, 0, 1, 1])\n with tm.assertRaises(ValueError):\n df1.isin(df2)\n\n # cols and index:\n df2.columns = ['B', 'B']\n with tm.assertRaises(ValueError):\n df1.isin(df2)\n\n def test_isin_dupe_self(self):\n other = DataFrame({'A': [1, 0, 1, 0], 'B': [1, 1, 0, 0]})\n df = DataFrame([[1, 1], [1, 0], [0, 0]], columns=['A','A'])\n result = df.isin(other)\n expected = DataFrame(False, index=df.index, columns=df.columns)\n expected.loc[0] = True\n expected.iloc[1, 1] = True\n assert_frame_equal(result, expected)\n\n def test_isin_against_series(self):\n df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]},\n index=['a', 'b', 'c', 'd'])\n s = pd.Series([1, 3, 11, 4], index=['a', 'b', 'c', 'd'])\n expected = DataFrame(False, index=df.index, columns=df.columns)\n expected['A'].loc['a'] = True\n expected.loc['d'] = True\n result = df.isin(s)\n assert_frame_equal(result, expected)\n\n def test_isin_multiIndex(self):\n idx = MultiIndex.from_tuples([(0, 'a', 'foo'), (0, 'a', 'bar'),\n (0, 'b', 'bar'), (0, 'b', 'baz'),\n (2, 'a', 'foo'), (2, 'a', 'bar'),\n (2, 'c', 'bar'), (2, 'c', 'baz'),\n (1, 'b', 'foo'), (1, 'b', 'bar'),\n (1, 'c', 'bar'), (1, 'c', 'baz')])\n df1 = DataFrame({'A': np.ones(12),\n 'B': np.zeros(12)}, index=idx)\n df2 = DataFrame({'A': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],\n 'B': [1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1]})\n # against regular index\n expected = DataFrame(False, index=df1.index, columns=df1.columns)\n result = df1.isin(df2)\n assert_frame_equal(result, expected)\n\n df2.index = idx\n expected = df2.values.astype(np.bool)\n expected[:, 1] = ~expected[:, 1]\n expected = DataFrame(expected, columns=['A', 'B'], index=idx)\n\n result = df1.isin(df2)\n assert_frame_equal(result, expected)\n\n def test_to_csv_date_format(self):\n from pandas import to_datetime\n pname = '__tmp_to_csv_date_format__'\n with ensure_clean(pname) as path:\n for engine in [None, 'python']:\n w = FutureWarning if engine == 'python' else None\n\n dt_index = self.tsframe.index\n datetime_frame = DataFrame({'A': dt_index, 'B': dt_index.shift(1)}, index=dt_index)\n\n with tm.assert_produces_warning(w, check_stacklevel=False):\n datetime_frame.to_csv(path, date_format='%Y%m%d', engine=engine)\n\n # Check that the data was put in the specified format\n test = read_csv(path, index_col=0)\n\n datetime_frame_int = datetime_frame.applymap(lambda x: int(x.strftime('%Y%m%d')))\n datetime_frame_int.index = datetime_frame_int.index.map(lambda x: int(x.strftime('%Y%m%d')))\n\n assert_frame_equal(test, datetime_frame_int)\n\n with tm.assert_produces_warning(w, check_stacklevel=False):\n datetime_frame.to_csv(path, date_format='%Y-%m-%d', engine=engine)\n\n # Check that the data was put in the specified format\n test = read_csv(path, index_col=0)\n datetime_frame_str = datetime_frame.applymap(lambda x: x.strftime('%Y-%m-%d'))\n datetime_frame_str.index = datetime_frame_str.index.map(lambda x: x.strftime('%Y-%m-%d'))\n\n assert_frame_equal(test, datetime_frame_str)\n\n # Check that columns get converted\n datetime_frame_columns = datetime_frame.T\n\n with tm.assert_produces_warning(w, check_stacklevel=False):\n datetime_frame_columns.to_csv(path, date_format='%Y%m%d', engine=engine)\n\n test = read_csv(path, index_col=0)\n\n datetime_frame_columns = datetime_frame_columns.applymap(lambda x: int(x.strftime('%Y%m%d')))\n # Columns don't get converted to ints by read_csv\n datetime_frame_columns.columns = datetime_frame_columns.columns.map(lambda x: x.strftime('%Y%m%d'))\n\n assert_frame_equal(test, datetime_frame_columns)\n\n # test NaTs\n nat_index = to_datetime(['NaT'] * 10 + ['2000-01-01', '1/1/2000', '1-1-2000'])\n nat_frame = DataFrame({'A': nat_index}, index=nat_index)\n\n with tm.assert_produces_warning(w, check_stacklevel=False):\n nat_frame.to_csv(path, date_format='%Y-%m-%d', engine=engine)\n\n test = read_csv(path, parse_dates=[0, 1], index_col=0)\n\n assert_frame_equal(test, nat_frame)\n\n def test_to_csv_with_dst_transitions(self):\n\n with ensure_clean('csv_date_format_with_dst') as path:\n # make sure we are not failing on transitions\n times = pd.date_range(\"2013-10-26 23:00\", \"2013-10-27 01:00\",\n tz=\"Europe/London\",\n freq=\"H\",\n ambiguous='infer')\n\n for i in [times, times+pd.Timedelta('10s')]:\n time_range = np.array(range(len(i)), dtype='int64')\n df = DataFrame({'A' : time_range}, index=i)\n df.to_csv(path,index=True)\n\n # we have to reconvert the index as we\n # don't parse the tz's\n result = read_csv(path,index_col=0)\n result.index = pd.to_datetime(result.index).tz_localize('UTC').tz_convert('Europe/London')\n assert_frame_equal(result,df)\n\n # GH11619\n idx = pd.date_range('2015-01-01', '2015-12-31', freq = 'H', tz='Europe/Paris')\n df = DataFrame({'values' : 1, 'idx' : idx},\n index=idx)\n with ensure_clean('csv_date_format_with_dst') as path:\n df.to_csv(path,index=True)\n result = read_csv(path,index_col=0)\n result.index = pd.to_datetime(result.index).tz_localize('UTC').tz_convert('Europe/Paris')\n result['idx'] = pd.to_datetime(result['idx']).astype('datetime64[ns, Europe/Paris]')\n assert_frame_equal(result,df)\n\n # assert working\n df.astype(str)\n\n with ensure_clean('csv_date_format_with_dst') as path:\n df.to_pickle(path)\n result = pd.read_pickle(path)\n assert_frame_equal(result,df)\n\n\n def test_concat_empty_dataframe_dtypes(self):\n df = DataFrame(columns=list(\"abc\"))\n df['a'] = df['a'].astype(np.bool_)\n df['b'] = df['b'].astype(np.int32)\n df['c'] = df['c'].astype(np.float64)\n\n result = pd.concat([df, df])\n self.assertEqual(result['a'].dtype, np.bool_)\n self.assertEqual(result['b'].dtype, np.int32)\n self.assertEqual(result['c'].dtype, np.float64)\n\n result = pd.concat([df, df.astype(np.float64)])\n self.assertEqual(result['a'].dtype, np.object_)\n self.assertEqual(result['b'].dtype, np.float64)\n self.assertEqual(result['c'].dtype, np.float64)\n\n def test_empty_frame_dtypes_ftypes(self):\n empty_df = pd.DataFrame()\n assert_series_equal(empty_df.dtypes, pd.Series(dtype=np.object))\n assert_series_equal(empty_df.ftypes, pd.Series(dtype=np.object))\n\n nocols_df = pd.DataFrame(index=[1,2,3])\n assert_series_equal(nocols_df.dtypes, pd.Series(dtype=np.object))\n assert_series_equal(nocols_df.ftypes, pd.Series(dtype=np.object))\n\n norows_df = pd.DataFrame(columns=list(\"abc\"))\n assert_series_equal(norows_df.dtypes, pd.Series(np.object, index=list(\"abc\")))\n assert_series_equal(norows_df.ftypes, pd.Series('object:dense', index=list(\"abc\")))\n\n norows_int_df = pd.DataFrame(columns=list(\"abc\")).astype(np.int32)\n assert_series_equal(norows_int_df.dtypes, pd.Series(np.dtype('int32'), index=list(\"abc\")))\n assert_series_equal(norows_int_df.ftypes, pd.Series('int32:dense', index=list(\"abc\")))\n\n odict = OrderedDict\n df = pd.DataFrame(odict([('a', 1), ('b', True), ('c', 1.0)]), index=[1, 2, 3])\n assert_series_equal(df.dtypes, pd.Series(odict([('a', np.int64),\n ('b', np.bool),\n ('c', np.float64)])))\n assert_series_equal(df.ftypes, pd.Series(odict([('a', 'int64:dense'),\n ('b', 'bool:dense'),\n ('c', 'float64:dense')])))\n\n # same but for empty slice of df\n assert_series_equal(df[:0].dtypes, pd.Series(odict([('a', np.int64),\n ('b', np.bool),\n ('c', np.float64)])))\n assert_series_equal(df[:0].ftypes, pd.Series(odict([('a', 'int64:dense'),\n ('b', 'bool:dense'),\n ('c', 'float64:dense')])))\n\n def test_dtypes_are_correct_after_column_slice(self):\n # GH6525\n df = pd.DataFrame(index=range(5), columns=list(\"abc\"), dtype=np.float_)\n odict = OrderedDict\n assert_series_equal(df.dtypes,\n pd.Series(odict([('a', np.float_), ('b', np.float_),\n ('c', np.float_),])))\n assert_series_equal(df.iloc[:,2:].dtypes,\n pd.Series(odict([('c', np.float_)])))\n assert_series_equal(df.dtypes,\n pd.Series(odict([('a', np.float_), ('b', np.float_),\n ('c', np.float_),])))\n\n def test_set_index_names(self):\n df = pd.util.testing.makeDataFrame()\n df.index.name = 'name'\n\n self.assertEqual(df.set_index(df.index).index.names, ['name'])\n\n mi = MultiIndex.from_arrays(df[['A', 'B']].T.values, names=['A', 'B'])\n mi2 = MultiIndex.from_arrays(df[['A', 'B', 'A', 'B']].T.values,\n names=['A', 'B', 'A', 'B'])\n\n df = df.set_index(['A', 'B'])\n\n self.assertEqual(df.set_index(df.index).index.names, ['A', 'B'])\n\n # Check that set_index isn't converting a MultiIndex into an Index\n self.assertTrue(isinstance(df.set_index(df.index).index, MultiIndex))\n\n # Check actual equality\n tm.assert_index_equal(df.set_index(df.index).index, mi)\n\n # Check that [MultiIndex, MultiIndex] yields a MultiIndex rather\n # than a pair of tuples\n self.assertTrue(isinstance(df.set_index([df.index, df.index]).index, MultiIndex))\n\n # Check equality\n tm.assert_index_equal(df.set_index([df.index, df.index]).index, mi2)\n\n def test_select_dtypes_include(self):\n df = DataFrame({'a': list('abc'),\n 'b': list(range(1, 4)),\n 'c': np.arange(3, 6).astype('u1'),\n 'd': np.arange(4.0, 7.0, dtype='float64'),\n 'e': [True, False, True],\n 'f': pd.Categorical(list('abc'))})\n ri = df.select_dtypes(include=[np.number])\n ei = df[['b', 'c', 'd']]\n tm.assert_frame_equal(ri, ei)\n\n ri = df.select_dtypes(include=[np.number,'category'])\n ei = df[['b', 'c', 'd', 'f']]\n tm.assert_frame_equal(ri, ei)\n\n def test_select_dtypes_exclude(self):\n df = DataFrame({'a': list('abc'),\n 'b': list(range(1, 4)),\n 'c': np.arange(3, 6).astype('u1'),\n 'd': np.arange(4.0, 7.0, dtype='float64'),\n 'e': [True, False, True]})\n re = df.select_dtypes(exclude=[np.number])\n ee = df[['a', 'e']]\n tm.assert_frame_equal(re, ee)\n\n def test_select_dtypes_exclude_include(self):\n df = DataFrame({'a': list('abc'),\n 'b': list(range(1, 4)),\n 'c': np.arange(3, 6).astype('u1'),\n 'd': np.arange(4.0, 7.0, dtype='float64'),\n 'e': [True, False, True],\n 'f': pd.date_range('now', periods=3).values})\n exclude = np.datetime64,\n include = np.bool_, 'integer'\n r = df.select_dtypes(include=include, exclude=exclude)\n e = df[['b', 'c', 'e']]\n tm.assert_frame_equal(r, e)\n\n exclude = 'datetime',\n include = 'bool', 'int64', 'int32'\n r = df.select_dtypes(include=include, exclude=exclude)\n e = df[['b', 'e']]\n tm.assert_frame_equal(r, e)\n\n def test_select_dtypes_not_an_attr_but_still_valid_dtype(self):\n df = DataFrame({'a': list('abc'),\n 'b': list(range(1, 4)),\n 'c': np.arange(3, 6).astype('u1'),\n 'd': np.arange(4.0, 7.0, dtype='float64'),\n 'e': [True, False, True],\n 'f': pd.date_range('now', periods=3).values})\n df['g'] = df.f.diff()\n assert not hasattr(np, 'u8')\n r = df.select_dtypes(include=['i8', 'O'], exclude=['timedelta'])\n e = df[['a', 'b']]\n tm.assert_frame_equal(r, e)\n\n r = df.select_dtypes(include=['i8', 'O', 'timedelta64[ns]'])\n e = df[['a', 'b', 'g']]\n tm.assert_frame_equal(r, e)\n\n def test_select_dtypes_empty(self):\n df = DataFrame({'a': list('abc'), 'b': list(range(1, 4))})\n with tm.assertRaisesRegexp(ValueError, 'at least one of include or '\n 'exclude must be nonempty'):\n df.select_dtypes()\n\n def test_select_dtypes_raises_on_string(self):\n df = DataFrame({'a': list('abc'), 'b': list(range(1, 4))})\n with tm.assertRaisesRegexp(TypeError, 'include and exclude .+ non-'):\n df.select_dtypes(include='object')\n with tm.assertRaisesRegexp(TypeError, 'include and exclude .+ non-'):\n df.select_dtypes(exclude='object')\n with tm.assertRaisesRegexp(TypeError, 'include and exclude .+ non-'):\n df.select_dtypes(include=int, exclude='object')\n\n def test_select_dtypes_bad_datetime64(self):\n df = DataFrame({'a': list('abc'),\n 'b': list(range(1, 4)),\n 'c': np.arange(3, 6).astype('u1'),\n 'd': np.arange(4.0, 7.0, dtype='float64'),\n 'e': [True, False, True],\n 'f': pd.date_range('now', periods=3).values})\n with tm.assertRaisesRegexp(ValueError, '.+ is too specific'):\n df.select_dtypes(include=['datetime64[D]'])\n\n with tm.assertRaisesRegexp(ValueError, '.+ is too specific'):\n df.select_dtypes(exclude=['datetime64[as]'])\n\n def test_select_dtypes_str_raises(self):\n df = DataFrame({'a': list('abc'),\n 'g': list(u('abc')),\n 'b': list(range(1, 4)),\n 'c': np.arange(3, 6).astype('u1'),\n 'd': np.arange(4.0, 7.0, dtype='float64'),\n 'e': [True, False, True],\n 'f': pd.date_range('now', periods=3).values})\n string_dtypes = set((str, 'str', np.string_, 'S1',\n 'unicode', np.unicode_, 'U1'))\n try:\n string_dtypes.add(unicode)\n except NameError:\n pass\n for dt in string_dtypes:\n with tm.assertRaisesRegexp(TypeError,\n 'string dtypes are not allowed'):\n df.select_dtypes(include=[dt])\n with tm.assertRaisesRegexp(TypeError,\n 'string dtypes are not allowed'):\n df.select_dtypes(exclude=[dt])\n\n def test_select_dtypes_bad_arg_raises(self):\n df = DataFrame({'a': list('abc'),\n 'g': list(u('abc')),\n 'b': list(range(1, 4)),\n 'c': np.arange(3, 6).astype('u1'),\n 'd': np.arange(4.0, 7.0, dtype='float64'),\n 'e': [True, False, True],\n 'f': pd.date_range('now', periods=3).values})\n with tm.assertRaisesRegexp(TypeError, 'data type.*not understood'):\n df.select_dtypes(['blargy, blarg, blarg'])\n\n def test_assign(self):\n df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})\n original = df.copy()\n result = df.assign(C=df.B / df.A)\n expected = df.copy()\n expected['C'] = [4, 2.5, 2]\n assert_frame_equal(result, expected)\n\n # lambda syntax\n result = df.assign(C=lambda x: x.B / x.A)\n assert_frame_equal(result, expected)\n\n # original is unmodified\n assert_frame_equal(df, original)\n\n # Non-Series array-like\n result = df.assign(C=[4, 2.5, 2])\n assert_frame_equal(result, expected)\n # original is unmodified\n assert_frame_equal(df, original)\n\n result = df.assign(B=df.B / df.A)\n expected = expected.drop('B', axis=1).rename(columns={'C': 'B'})\n assert_frame_equal(result, expected)\n\n # overwrite\n result = df.assign(A=df.A + df.B)\n expected = df.copy()\n expected['A'] = [5, 7, 9]\n assert_frame_equal(result, expected)\n\n # lambda\n result = df.assign(A=lambda x: x.A + x.B)\n assert_frame_equal(result, expected)\n\n def test_assign_multiple(self):\n df = DataFrame([[1, 4], [2, 5], [3, 6]], columns=['A', 'B'])\n result = df.assign(C=[7, 8, 9], D=df.A, E=lambda x: x.B)\n expected = DataFrame([[1, 4, 7, 1, 4], [2, 5, 8, 2, 5],\n [3, 6, 9, 3, 6]], columns=list('ABCDE'))\n assert_frame_equal(result, expected)\n\n def test_assign_alphabetical(self):\n # GH 9818\n df = DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])\n result = df.assign(D=df.A + df.B, C=df.A - df.B)\n expected = DataFrame([[1, 2, -1, 3], [3, 4, -1, 7]],\n columns=list('ABCD'))\n assert_frame_equal(result, expected)\n result = df.assign(C=df.A - df.B, D=df.A + df.B)\n assert_frame_equal(result, expected)\n\n def test_assign_bad(self):\n df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})\n # non-keyword argument\n with tm.assertRaises(TypeError):\n df.assign(lambda x: x.A)\n with tm.assertRaises(AttributeError):\n df.assign(C=df.A, D=df.A + df.C)\n with tm.assertRaises(KeyError):\n df.assign(C=lambda df: df.A, D=lambda df: df['A'] + df['C'])\n with tm.assertRaises(KeyError):\n df.assign(C=df.A, D=lambda x: x['A'] + x['C'])\n\n def test_dataframe_metadata(self):\n\n df = SubclassedDataFrame({'X': [1, 2, 3], 'Y': [1, 2, 3]},\n index=['a', 'b', 'c'])\n df.testattr = 'XXX'\n\n self.assertEqual(df.testattr, 'XXX')\n self.assertEqual(df[['X']].testattr, 'XXX')\n self.assertEqual(df.loc[['a', 'b'], :].testattr, 'XXX')\n self.assertEqual(df.iloc[[0, 1], :].testattr, 'XXX')\n\n # GH9776\n self.assertEqual(df.iloc[0:1, :].testattr, 'XXX')\n\n # GH10553\n unpickled = self.round_trip_pickle(df)\n assert_frame_equal(df, unpickled)\n self.assertEqual(df._metadata, unpickled._metadata)\n self.assertEqual(df.testattr, unpickled.testattr)\n\n def test_nlargest(self):\n # GH10393\n from string import ascii_lowercase\n df = pd.DataFrame({'a': np.random.permutation(10),\n 'b': list(ascii_lowercase[:10])})\n result = df.nlargest(5, 'a')\n expected = df.sort_values('a', ascending=False).head(5)\n tm.assert_frame_equal(result, expected)\n\n def test_nlargest_multiple_columns(self):\n from string import ascii_lowercase\n df = pd.DataFrame({'a': np.random.permutation(10),\n 'b': list(ascii_lowercase[:10]),\n 'c': np.random.permutation(10).astype('float64')})\n result = df.nlargest(5, ['a', 'b'])\n expected = df.sort_values(['a', 'b'], ascending=False).head(5)\n tm.assert_frame_equal(result, expected)\n\n def test_nsmallest(self):\n from string import ascii_lowercase\n df = pd.DataFrame({'a': np.random.permutation(10),\n 'b': list(ascii_lowercase[:10])})\n result = df.nsmallest(5, 'a')\n expected = df.sort_values('a').head(5)\n tm.assert_frame_equal(result, expected)\n\n def test_nsmallest_multiple_columns(self):\n from string import ascii_lowercase\n df = pd.DataFrame({'a': np.random.permutation(10),\n 'b': list(ascii_lowercase[:10]),\n 'c': np.random.permutation(10).astype('float64')})\n result = df.nsmallest(5, ['a', 'c'])\n expected = df.sort_values(['a', 'c']).head(5)\n tm.assert_frame_equal(result, expected)\n\n def test_to_panel_expanddim(self):\n # GH 9762\n\n class SubclassedFrame(DataFrame):\n @property\n def _constructor_expanddim(self):\n return SubclassedPanel\n\n class SubclassedPanel(Panel):\n pass\n\n index = MultiIndex.from_tuples([(0, 0), (0, 1), (0, 2)])\n df = SubclassedFrame({'X':[1, 2, 3], 'Y': [4, 5, 6]}, index=index)\n result = df.to_panel()\n self.assertTrue(isinstance(result, SubclassedPanel))\n expected = SubclassedPanel([[[1, 2, 3]], [[4, 5, 6]]],\n items=['X', 'Y'], major_axis=[0],\n minor_axis=[0, 1, 2],\n dtype='int64')\n tm.assert_panel_equal(result, expected)\n\n\ndef skip_if_no_ne(engine='numexpr'):\n if engine == 'numexpr':\n try:\n import numexpr as ne\n except ImportError:\n raise nose.SkipTest(\"cannot query engine numexpr when numexpr not \"\n \"installed\")\n\n\ndef skip_if_no_pandas_parser(parser):\n if parser != 'pandas':\n raise nose.SkipTest(\"cannot evaluate with parser {0!r}\".format(parser))\n\n\nclass TestDataFrameQueryWithMultiIndex(object):\n def check_query_with_named_multiindex(self, parser, engine):\n tm.skip_if_no_ne(engine)\n a = tm.choice(['red', 'green'], size=10)\n b = tm.choice(['eggs', 'ham'], size=10)\n index = MultiIndex.from_arrays([a, b], names=['color', 'food'])\n df = DataFrame(randn(10, 2), index=index)\n ind = Series(df.index.get_level_values('color').values, index=index,\n name='color')\n\n # equality\n res1 = df.query('color == \"red\"', parser=parser, engine=engine)\n res2 = df.query('\"red\" == color', parser=parser, engine=engine)\n exp = df[ind == 'red']\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n # inequality\n res1 = df.query('color != \"red\"', parser=parser, engine=engine)\n res2 = df.query('\"red\" != color', parser=parser, engine=engine)\n exp = df[ind != 'red']\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n # list equality (really just set membership)\n res1 = df.query('color == [\"red\"]', parser=parser, engine=engine)\n res2 = df.query('[\"red\"] == color', parser=parser, engine=engine)\n exp = df[ind.isin(['red'])]\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n res1 = df.query('color != [\"red\"]', parser=parser, engine=engine)\n res2 = df.query('[\"red\"] != color', parser=parser, engine=engine)\n exp = df[~ind.isin(['red'])]\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n # in/not in ops\n res1 = df.query('[\"red\"] in color', parser=parser, engine=engine)\n res2 = df.query('\"red\" in color', parser=parser, engine=engine)\n exp = df[ind.isin(['red'])]\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n res1 = df.query('[\"red\"] not in color', parser=parser, engine=engine)\n res2 = df.query('\"red\" not in color', parser=parser, engine=engine)\n exp = df[~ind.isin(['red'])]\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n def test_query_with_named_multiindex(self):\n for parser, engine in product(['pandas'], ENGINES):\n yield self.check_query_with_named_multiindex, parser, engine\n\n def check_query_with_unnamed_multiindex(self, parser, engine):\n tm.skip_if_no_ne(engine)\n a = tm.choice(['red', 'green'], size=10)\n b = tm.choice(['eggs', 'ham'], size=10)\n index = MultiIndex.from_arrays([a, b])\n df = DataFrame(randn(10, 2), index=index)\n ind = Series(df.index.get_level_values(0).values, index=index)\n\n res1 = df.query('ilevel_0 == \"red\"', parser=parser, engine=engine)\n res2 = df.query('\"red\" == ilevel_0', parser=parser, engine=engine)\n exp = df[ind == 'red']\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n # inequality\n res1 = df.query('ilevel_0 != \"red\"', parser=parser, engine=engine)\n res2 = df.query('\"red\" != ilevel_0', parser=parser, engine=engine)\n exp = df[ind != 'red']\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n # list equality (really just set membership)\n res1 = df.query('ilevel_0 == [\"red\"]', parser=parser, engine=engine)\n res2 = df.query('[\"red\"] == ilevel_0', parser=parser, engine=engine)\n exp = df[ind.isin(['red'])]\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n res1 = df.query('ilevel_0 != [\"red\"]', parser=parser, engine=engine)\n res2 = df.query('[\"red\"] != ilevel_0', parser=parser, engine=engine)\n exp = df[~ind.isin(['red'])]\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n # in/not in ops\n res1 = df.query('[\"red\"] in ilevel_0', parser=parser, engine=engine)\n res2 = df.query('\"red\" in ilevel_0', parser=parser, engine=engine)\n exp = df[ind.isin(['red'])]\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n res1 = df.query('[\"red\"] not in ilevel_0', parser=parser, engine=engine)\n res2 = df.query('\"red\" not in ilevel_0', parser=parser, engine=engine)\n exp = df[~ind.isin(['red'])]\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n #### LEVEL 1 ####\n ind = Series(df.index.get_level_values(1).values, index=index)\n res1 = df.query('ilevel_1 == \"eggs\"', parser=parser, engine=engine)\n res2 = df.query('\"eggs\" == ilevel_1', parser=parser, engine=engine)\n exp = df[ind == 'eggs']\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n # inequality\n res1 = df.query('ilevel_1 != \"eggs\"', parser=parser, engine=engine)\n res2 = df.query('\"eggs\" != ilevel_1', parser=parser, engine=engine)\n exp = df[ind != 'eggs']\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n # list equality (really just set membership)\n res1 = df.query('ilevel_1 == [\"eggs\"]', parser=parser, engine=engine)\n res2 = df.query('[\"eggs\"] == ilevel_1', parser=parser, engine=engine)\n exp = df[ind.isin(['eggs'])]\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n res1 = df.query('ilevel_1 != [\"eggs\"]', parser=parser, engine=engine)\n res2 = df.query('[\"eggs\"] != ilevel_1', parser=parser, engine=engine)\n exp = df[~ind.isin(['eggs'])]\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n # in/not in ops\n res1 = df.query('[\"eggs\"] in ilevel_1', parser=parser, engine=engine)\n res2 = df.query('\"eggs\" in ilevel_1', parser=parser, engine=engine)\n exp = df[ind.isin(['eggs'])]\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n res1 = df.query('[\"eggs\"] not in ilevel_1', parser=parser, engine=engine)\n res2 = df.query('\"eggs\" not in ilevel_1', parser=parser, engine=engine)\n exp = df[~ind.isin(['eggs'])]\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n def test_query_with_unnamed_multiindex(self):\n for parser, engine in product(['pandas'], ENGINES):\n yield self.check_query_with_unnamed_multiindex, parser, engine\n\n def check_query_with_partially_named_multiindex(self, parser, engine):\n tm.skip_if_no_ne(engine)\n a = tm.choice(['red', 'green'], size=10)\n b = np.arange(10)\n index = MultiIndex.from_arrays([a, b])\n index.names = [None, 'rating']\n df = DataFrame(randn(10, 2), index=index)\n res = df.query('rating == 1', parser=parser, engine=engine)\n ind = Series(df.index.get_level_values('rating').values, index=index,\n name='rating')\n exp = df[ind == 1]\n assert_frame_equal(res, exp)\n\n res = df.query('rating != 1', parser=parser, engine=engine)\n ind = Series(df.index.get_level_values('rating').values, index=index,\n name='rating')\n exp = df[ind != 1]\n assert_frame_equal(res, exp)\n\n res = df.query('ilevel_0 == \"red\"', parser=parser, engine=engine)\n ind = Series(df.index.get_level_values(0).values, index=index)\n exp = df[ind == \"red\"]\n assert_frame_equal(res, exp)\n\n res = df.query('ilevel_0 != \"red\"', parser=parser, engine=engine)\n ind = Series(df.index.get_level_values(0).values, index=index)\n exp = df[ind != \"red\"]\n assert_frame_equal(res, exp)\n\n def test_query_with_partially_named_multiindex(self):\n for parser, engine in product(['pandas'], ENGINES):\n yield self.check_query_with_partially_named_multiindex, parser, engine\n\n def test_query_multiindex_get_index_resolvers(self):\n for parser, engine in product(['pandas'], ENGINES):\n yield self.check_query_multiindex_get_index_resolvers, parser, engine\n\n def check_query_multiindex_get_index_resolvers(self, parser, engine):\n df = mkdf(10, 3, r_idx_nlevels=2, r_idx_names=['spam', 'eggs'])\n resolvers = df._get_index_resolvers()\n\n def to_series(mi, level):\n level_values = mi.get_level_values(level)\n s = level_values.to_series()\n s.index = mi\n return s\n\n col_series = df.columns.to_series()\n expected = {'index': df.index,\n 'columns': col_series,\n 'spam': to_series(df.index, 'spam'),\n 'eggs': to_series(df.index, 'eggs'),\n 'C0': col_series}\n for k, v in resolvers.items():\n if isinstance(v, Index):\n assert v.is_(expected[k])\n elif isinstance(v, Series):\n tm.assert_series_equal(v, expected[k])\n else:\n raise AssertionError(\"object must be a Series or Index\")\n\n def test_raise_on_panel_with_multiindex(self):\n for parser, engine in product(PARSERS, ENGINES):\n yield self.check_raise_on_panel_with_multiindex, parser, engine\n\n def check_raise_on_panel_with_multiindex(self, parser, engine):\n tm.skip_if_no_ne()\n p = tm.makePanel(7)\n p.items = tm.makeCustomIndex(len(p.items), nlevels=2)\n with tm.assertRaises(NotImplementedError):\n pd.eval('p + 1', parser=parser, engine=engine)\n\n def test_raise_on_panel4d_with_multiindex(self):\n for parser, engine in product(PARSERS, ENGINES):\n yield self.check_raise_on_panel4d_with_multiindex, parser, engine\n\n def check_raise_on_panel4d_with_multiindex(self, parser, engine):\n tm.skip_if_no_ne()\n p4d = tm.makePanel4D(7)\n p4d.items = tm.makeCustomIndex(len(p4d.items), nlevels=2)\n with tm.assertRaises(NotImplementedError):\n pd.eval('p4d + 1', parser=parser, engine=engine)\n\n\nclass TestDataFrameQueryNumExprPandas(tm.TestCase):\n\n @classmethod\n def setUpClass(cls):\n super(TestDataFrameQueryNumExprPandas, cls).setUpClass()\n cls.engine = 'numexpr'\n cls.parser = 'pandas'\n tm.skip_if_no_ne(cls.engine)\n\n @classmethod\n def tearDownClass(cls):\n super(TestDataFrameQueryNumExprPandas, cls).tearDownClass()\n del cls.engine, cls.parser\n\n def test_date_query_with_attribute_access(self):\n engine, parser = self.engine, self.parser\n skip_if_no_pandas_parser(parser)\n df = DataFrame(randn(5, 3))\n df['dates1'] = date_range('1/1/2012', periods=5)\n df['dates2'] = date_range('1/1/2013', periods=5)\n df['dates3'] = date_range('1/1/2014', periods=5)\n res = df.query('@df.dates1 < 20130101 < @df.dates3', engine=engine,\n parser=parser)\n expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]\n assert_frame_equal(res, expec)\n\n def test_date_query_no_attribute_access(self):\n engine, parser = self.engine, self.parser\n df = DataFrame(randn(5, 3))\n df['dates1'] = date_range('1/1/2012', periods=5)\n df['dates2'] = date_range('1/1/2013', periods=5)\n df['dates3'] = date_range('1/1/2014', periods=5)\n res = df.query('dates1 < 20130101 < dates3', engine=engine,\n parser=parser)\n expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]\n tm.assert_frame_equal(res, expec)\n\n def test_date_query_with_NaT(self):\n engine, parser = self.engine, self.parser\n n = 10\n df = DataFrame(randn(n, 3))\n df['dates1'] = date_range('1/1/2012', periods=n)\n df['dates2'] = date_range('1/1/2013', periods=n)\n df['dates3'] = date_range('1/1/2014', periods=n)\n df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT\n df.loc[np.random.rand(n) > 0.5, 'dates3'] = pd.NaT\n res = df.query('dates1 < 20130101 < dates3', engine=engine,\n parser=parser)\n expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]\n assert_frame_equal(res, expec)\n\n def test_date_index_query(self):\n engine, parser = self.engine, self.parser\n n = 10\n df = DataFrame(randn(n, 3))\n df['dates1'] = date_range('1/1/2012', periods=n)\n df['dates3'] = date_range('1/1/2014', periods=n)\n df.set_index('dates1', inplace=True, drop=True)\n res = df.query('index < 20130101 < dates3', engine=engine,\n parser=parser)\n expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]\n assert_frame_equal(res, expec)\n\n def test_date_index_query_with_NaT(self):\n engine, parser = self.engine, self.parser\n n = 10\n df = DataFrame(randn(n, 3))\n df['dates1'] = date_range('1/1/2012', periods=n)\n df['dates3'] = date_range('1/1/2014', periods=n)\n df.iloc[0, 0] = pd.NaT\n df.set_index('dates1', inplace=True, drop=True)\n res = df.query('index < 20130101 < dates3', engine=engine,\n parser=parser)\n expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]\n assert_frame_equal(res, expec)\n\n def test_date_index_query_with_NaT_duplicates(self):\n engine, parser = self.engine, self.parser\n n = 10\n d = {}\n d['dates1'] = date_range('1/1/2012', periods=n)\n d['dates3'] = date_range('1/1/2014', periods=n)\n df = DataFrame(d)\n df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT\n df.set_index('dates1', inplace=True, drop=True)\n res = df.query('index < 20130101 < dates3', engine=engine, parser=parser)\n expec = df[(df.index.to_series() < '20130101') & ('20130101' < df.dates3)]\n assert_frame_equal(res, expec)\n\n def test_date_query_with_non_date(self):\n engine, parser = self.engine, self.parser\n\n n = 10\n df = DataFrame({'dates': date_range('1/1/2012', periods=n),\n 'nondate': np.arange(n)})\n\n ops = '==', '!=', '<', '>', '<=', '>='\n\n for op in ops:\n with tm.assertRaises(TypeError):\n df.query('dates %s nondate' % op, parser=parser, engine=engine)\n\n def test_query_syntax_error(self):\n engine, parser = self.engine, self.parser\n df = DataFrame({\"i\": lrange(10), \"+\": lrange(3, 13),\n \"r\": lrange(4, 14)})\n with tm.assertRaises(SyntaxError):\n df.query('i - +', engine=engine, parser=parser)\n\n def test_query_scope(self):\n from pandas.computation.ops import UndefinedVariableError\n engine, parser = self.engine, self.parser\n skip_if_no_pandas_parser(parser)\n\n df = DataFrame(np.random.randn(20, 2), columns=list('ab'))\n\n a, b = 1, 2\n res = df.query('a > b', engine=engine, parser=parser)\n expected = df[df.a > df.b]\n tm.assert_frame_equal(res, expected)\n\n res = df.query('@a > b', engine=engine, parser=parser)\n expected = df[a > df.b]\n tm.assert_frame_equal(res, expected)\n\n # no local variable c\n with tm.assertRaises(UndefinedVariableError):\n df.query('@a > b > @c', engine=engine, parser=parser)\n\n # no column named 'c'\n with tm.assertRaises(UndefinedVariableError):\n df.query('@a > b > c', engine=engine, parser=parser)\n\n def test_query_doesnt_pickup_local(self):\n from pandas.computation.ops import UndefinedVariableError\n\n engine, parser = self.engine, self.parser\n n = m = 10\n df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))\n\n # we don't pick up the local 'sin'\n with tm.assertRaises(UndefinedVariableError):\n df.query('sin > 5', engine=engine, parser=parser)\n\n def test_query_builtin(self):\n from pandas.computation.engines import NumExprClobberingError\n engine, parser = self.engine, self.parser\n\n n = m = 10\n df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))\n\n df.index.name = 'sin'\n with tm.assertRaisesRegexp(NumExprClobberingError,\n 'Variables in expression.+'):\n df.query('sin > 5', engine=engine, parser=parser)\n\n def test_query(self):\n engine, parser = self.engine, self.parser\n df = DataFrame(np.random.randn(10, 3), columns=['a', 'b', 'c'])\n\n assert_frame_equal(df.query('a < b', engine=engine, parser=parser),\n df[df.a < df.b])\n assert_frame_equal(df.query('a + b > b * c', engine=engine,\n parser=parser),\n df[df.a + df.b > df.b * df.c])\n\n def test_query_index_with_name(self):\n engine, parser = self.engine, self.parser\n df = DataFrame(np.random.randint(10, size=(10, 3)),\n index=Index(range(10), name='blob'),\n columns=['a', 'b', 'c'])\n res = df.query('(blob < 5) & (a < b)', engine=engine, parser=parser)\n expec = df[(df.index < 5) & (df.a < df.b)]\n assert_frame_equal(res, expec)\n\n res = df.query('blob < b', engine=engine, parser=parser)\n expec = df[df.index < df.b]\n\n assert_frame_equal(res, expec)\n\n def test_query_index_without_name(self):\n engine, parser = self.engine, self.parser\n df = DataFrame(np.random.randint(10, size=(10, 3)),\n index=range(10), columns=['a', 'b', 'c'])\n\n # \"index\" should refer to the index\n res = df.query('index < b', engine=engine, parser=parser)\n expec = df[df.index < df.b]\n assert_frame_equal(res, expec)\n\n # test against a scalar\n res = df.query('index < 5', engine=engine, parser=parser)\n expec = df[df.index < 5]\n assert_frame_equal(res, expec)\n\n def test_nested_scope(self):\n engine = self.engine\n parser = self.parser\n\n skip_if_no_pandas_parser(parser)\n\n df = DataFrame(np.random.randn(5, 3))\n df2 = DataFrame(np.random.randn(5, 3))\n expected = df[(df > 0) & (df2 > 0)]\n\n result = df.query('(@df > 0) & (@df2 > 0)', engine=engine, parser=parser)\n assert_frame_equal(result, expected)\n\n result = pd.eval('df[df > 0 and df2 > 0]', engine=engine,\n parser=parser)\n assert_frame_equal(result, expected)\n\n result = pd.eval('df[df > 0 and df2 > 0 and df[df > 0] > 0]',\n engine=engine, parser=parser)\n expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]\n assert_frame_equal(result, expected)\n\n result = pd.eval('df[(df>0) & (df2>0)]', engine=engine, parser=parser)\n expected = df.query('(@df>0) & (@df2>0)', engine=engine, parser=parser)\n assert_frame_equal(result, expected)\n\n def test_nested_raises_on_local_self_reference(self):\n from pandas.computation.ops import UndefinedVariableError\n\n df = DataFrame(np.random.randn(5, 3))\n\n # can't reference ourself b/c we're a local so @ is necessary\n with tm.assertRaises(UndefinedVariableError):\n df.query('df > 0', engine=self.engine, parser=self.parser)\n\n def test_local_syntax(self):\n skip_if_no_pandas_parser(self.parser)\n\n engine, parser = self.engine, self.parser\n df = DataFrame(randn(100, 10), columns=list('abcdefghij'))\n b = 1\n expect = df[df.a < b]\n result = df.query('a < @b', engine=engine, parser=parser)\n assert_frame_equal(result, expect)\n\n expect = df[df.a < df.b]\n result = df.query('a < b', engine=engine, parser=parser)\n assert_frame_equal(result, expect)\n\n def test_chained_cmp_and_in(self):\n skip_if_no_pandas_parser(self.parser)\n engine, parser = self.engine, self.parser\n cols = list('abc')\n df = DataFrame(randn(100, len(cols)), columns=cols)\n res = df.query('a < b < c and a not in b not in c', engine=engine,\n parser=parser)\n ind = (df.a < df.b) & (df.b < df.c) & ~df.b.isin(df.a) & ~df.c.isin(df.b)\n expec = df[ind]\n assert_frame_equal(res, expec)\n\n def test_local_variable_with_in(self):\n engine, parser = self.engine, self.parser\n skip_if_no_pandas_parser(parser)\n a = Series(np.random.randint(3, size=15), name='a')\n b = Series(np.random.randint(10, size=15), name='b')\n df = DataFrame({'a': a, 'b': b})\n\n expected = df.loc[(df.b - 1).isin(a)]\n result = df.query('b - 1 in a', engine=engine, parser=parser)\n tm.assert_frame_equal(expected, result)\n\n b = Series(np.random.randint(10, size=15), name='b')\n expected = df.loc[(b - 1).isin(a)]\n result = df.query('@b - 1 in a', engine=engine, parser=parser)\n tm.assert_frame_equal(expected, result)\n\n def test_at_inside_string(self):\n engine, parser = self.engine, self.parser\n skip_if_no_pandas_parser(parser)\n c = 1\n df = DataFrame({'a': ['a', 'a', 'b', 'b', '@c', '@c']})\n result = df.query('a == \"@c\"', engine=engine, parser=parser)\n expected = df[df.a == \"@c\"]\n tm.assert_frame_equal(result, expected)\n\n def test_query_undefined_local(self):\n from pandas.computation.ops import UndefinedVariableError\n engine, parser = self.engine, self.parser\n skip_if_no_pandas_parser(parser)\n df = DataFrame(np.random.rand(10, 2), columns=list('ab'))\n with tm.assertRaisesRegexp(UndefinedVariableError,\n \"local variable 'c' is not defined\"):\n df.query('a == @c', engine=engine, parser=parser)\n\n def test_index_resolvers_come_after_columns_with_the_same_name(self):\n n = 1\n a = np.r_[20:101:20]\n\n df = DataFrame({'index': a, 'b': np.random.randn(a.size)})\n df.index.name = 'index'\n result = df.query('index > 5', engine=self.engine, parser=self.parser)\n expected = df[df['index'] > 5]\n tm.assert_frame_equal(result, expected)\n\n df = DataFrame({'index': a, 'b': np.random.randn(a.size)})\n result = df.query('ilevel_0 > 5', engine=self.engine, parser=self.parser)\n expected = df.loc[df.index[df.index > 5]]\n tm.assert_frame_equal(result, expected)\n\n df = DataFrame({'a': a, 'b': np.random.randn(a.size)})\n df.index.name = 'a'\n result = df.query('a > 5', engine=self.engine, parser=self.parser)\n expected = df[df.a > 5]\n tm.assert_frame_equal(result, expected)\n\n result = df.query('index > 5', engine=self.engine, parser=self.parser)\n expected = df.loc[df.index[df.index > 5]]\n tm.assert_frame_equal(result, expected)\n\n def test_inf(self):\n n = 10\n df = DataFrame({'a': np.random.rand(n), 'b': np.random.rand(n)})\n df.loc[::2, 0] = np.inf\n ops = '==', '!='\n d = dict(zip(ops, (operator.eq, operator.ne)))\n for op, f in d.items():\n q = 'a %s inf' % op\n expected = df[f(df.a, np.inf)]\n result = df.query(q, engine=self.engine, parser=self.parser)\n tm.assert_frame_equal(result, expected)\n\n\nclass TestDataFrameQueryNumExprPython(TestDataFrameQueryNumExprPandas):\n\n @classmethod\n def setUpClass(cls):\n super(TestDataFrameQueryNumExprPython, cls).setUpClass()\n cls.engine = 'numexpr'\n cls.parser = 'python'\n tm.skip_if_no_ne(cls.engine)\n cls.frame = _frame.copy()\n\n def test_date_query_no_attribute_access(self):\n engine, parser = self.engine, self.parser\n df = DataFrame(randn(5, 3))\n df['dates1'] = date_range('1/1/2012', periods=5)\n df['dates2'] = date_range('1/1/2013', periods=5)\n df['dates3'] = date_range('1/1/2014', periods=5)\n res = df.query('(dates1 < 20130101) & (20130101 < dates3)',\n engine=engine, parser=parser)\n expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]\n tm.assert_frame_equal(res, expec)\n def test_date_query_with_NaT(self):\n engine, parser = self.engine, self.parser\n n = 10\n df = DataFrame(randn(n, 3))\n df['dates1'] = date_range('1/1/2012', periods=n)\n df['dates2'] = date_range('1/1/2013', periods=n)\n df['dates3'] = date_range('1/1/2014', periods=n)\n df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT\n df.loc[np.random.rand(n) > 0.5, 'dates3'] = pd.NaT\n res = df.query('(dates1 < 20130101) & (20130101 < dates3)',\n engine=engine, parser=parser)\n expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]\n assert_frame_equal(res, expec)\n\n def test_date_index_query(self):\n engine, parser = self.engine, self.parser\n n = 10\n df = DataFrame(randn(n, 3))\n df['dates1'] = date_range('1/1/2012', periods=n)\n df['dates3'] = date_range('1/1/2014', periods=n)\n df.set_index('dates1', inplace=True, drop=True)\n res = df.query('(index < 20130101) & (20130101 < dates3)',\n engine=engine, parser=parser)\n expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]\n assert_frame_equal(res, expec)\n\n def test_date_index_query_with_NaT(self):\n engine, parser = self.engine, self.parser\n n = 10\n df = DataFrame(randn(n, 3))\n df['dates1'] = date_range('1/1/2012', periods=n)\n df['dates3'] = date_range('1/1/2014', periods=n)\n df.iloc[0, 0] = pd.NaT\n df.set_index('dates1', inplace=True, drop=True)\n res = df.query('(index < 20130101) & (20130101 < dates3)',\n engine=engine, parser=parser)\n expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]\n assert_frame_equal(res, expec)\n\n def test_date_index_query_with_NaT_duplicates(self):\n engine, parser = self.engine, self.parser\n n = 10\n df = DataFrame(randn(n, 3))\n df['dates1'] = date_range('1/1/2012', periods=n)\n df['dates3'] = date_range('1/1/2014', periods=n)\n df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT\n df.set_index('dates1', inplace=True, drop=True)\n with tm.assertRaises(NotImplementedError):\n df.query('index < 20130101 < dates3', engine=engine, parser=parser)\n\n def test_nested_scope(self):\n from pandas.computation.ops import UndefinedVariableError\n engine = self.engine\n parser = self.parser\n # smoke test\n x = 1\n result = pd.eval('x + 1', engine=engine, parser=parser)\n self.assertEqual(result, 2)\n\n df = DataFrame(np.random.randn(5, 3))\n df2 = DataFrame(np.random.randn(5, 3))\n\n # don't have the pandas parser\n with tm.assertRaises(SyntaxError):\n df.query('(@df>0) & (@df2>0)', engine=engine, parser=parser)\n\n with tm.assertRaises(UndefinedVariableError):\n df.query('(df>0) & (df2>0)', engine=engine, parser=parser)\n\n expected = df[(df > 0) & (df2 > 0)]\n result = pd.eval('df[(df > 0) & (df2 > 0)]', engine=engine,\n parser=parser)\n tm.assert_frame_equal(expected, result)\n\n expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]\n result = pd.eval('df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]',\n engine=engine, parser=parser)\n tm.assert_frame_equal(expected, result)\n\n\nclass TestDataFrameQueryPythonPandas(TestDataFrameQueryNumExprPandas):\n\n @classmethod\n def setUpClass(cls):\n super(TestDataFrameQueryPythonPandas, cls).setUpClass()\n cls.engine = 'python'\n cls.parser = 'pandas'\n cls.frame = _frame.copy()\n\n def test_query_builtin(self):\n engine, parser = self.engine, self.parser\n\n n = m = 10\n df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))\n\n df.index.name = 'sin'\n expected = df[df.index > 5]\n result = df.query('sin > 5', engine=engine, parser=parser)\n tm.assert_frame_equal(expected, result)\n\n\nclass TestDataFrameQueryPythonPython(TestDataFrameQueryNumExprPython):\n\n @classmethod\n def setUpClass(cls):\n super(TestDataFrameQueryPythonPython, cls).setUpClass()\n cls.engine = cls.parser = 'python'\n cls.frame = _frame.copy()\n\n def test_query_builtin(self):\n engine, parser = self.engine, self.parser\n\n n = m = 10\n df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))\n\n df.index.name = 'sin'\n expected = df[df.index > 5]\n result = df.query('sin > 5', engine=engine, parser=parser)\n tm.assert_frame_equal(expected, result)\n\n\nPARSERS = 'python', 'pandas'\nENGINES = 'python', 'numexpr'\n\n\nclass TestDataFrameQueryStrings(object):\n def check_str_query_method(self, parser, engine):\n tm.skip_if_no_ne(engine)\n df = DataFrame(randn(10, 1), columns=['b'])\n df['strings'] = Series(list('aabbccddee'))\n expect = df[df.strings == 'a']\n\n if parser != 'pandas':\n col = 'strings'\n lst = '\"a\"'\n\n lhs = [col] * 2 + [lst] * 2\n rhs = lhs[::-1]\n\n eq, ne = '==', '!='\n ops = 2 * ([eq] + [ne])\n\n for lhs, op, rhs in zip(lhs, ops, rhs):\n ex = '{lhs} {op} {rhs}'.format(lhs=lhs, op=op, rhs=rhs)\n assertRaises(NotImplementedError, df.query, ex, engine=engine,\n parser=parser, local_dict={'strings': df.strings})\n else:\n res = df.query('\"a\" == strings', engine=engine, parser=parser)\n assert_frame_equal(res, expect)\n\n res = df.query('strings == \"a\"', engine=engine, parser=parser)\n assert_frame_equal(res, expect)\n assert_frame_equal(res, df[df.strings.isin(['a'])])\n\n expect = df[df.strings != 'a']\n res = df.query('strings != \"a\"', engine=engine, parser=parser)\n assert_frame_equal(res, expect)\n\n res = df.query('\"a\" != strings', engine=engine, parser=parser)\n assert_frame_equal(res, expect)\n assert_frame_equal(res, df[~df.strings.isin(['a'])])\n\n def test_str_query_method(self):\n for parser, engine in product(PARSERS, ENGINES):\n yield self.check_str_query_method, parser, engine\n\n def test_str_list_query_method(self):\n for parser, engine in product(PARSERS, ENGINES):\n yield self.check_str_list_query_method, parser, engine\n\n def check_str_list_query_method(self, parser, engine):\n tm.skip_if_no_ne(engine)\n df = DataFrame(randn(10, 1), columns=['b'])\n df['strings'] = Series(list('aabbccddee'))\n expect = df[df.strings.isin(['a', 'b'])]\n\n if parser != 'pandas':\n col = 'strings'\n lst = '[\"a\", \"b\"]'\n\n lhs = [col] * 2 + [lst] * 2\n rhs = lhs[::-1]\n\n eq, ne = '==', '!='\n ops = 2 * ([eq] + [ne])\n\n for lhs, op, rhs in zip(lhs, ops, rhs):\n ex = '{lhs} {op} {rhs}'.format(lhs=lhs, op=op, rhs=rhs)\n with tm.assertRaises(NotImplementedError):\n df.query(ex, engine=engine, parser=parser)\n else:\n res = df.query('strings == [\"a\", \"b\"]', engine=engine,\n parser=parser)\n assert_frame_equal(res, expect)\n\n res = df.query('[\"a\", \"b\"] == strings', engine=engine,\n parser=parser)\n assert_frame_equal(res, expect)\n\n expect = df[~df.strings.isin(['a', 'b'])]\n\n res = df.query('strings != [\"a\", \"b\"]', engine=engine,\n parser=parser)\n assert_frame_equal(res, expect)\n\n res = df.query('[\"a\", \"b\"] != strings', engine=engine,\n parser=parser)\n assert_frame_equal(res, expect)\n\n def check_query_with_string_columns(self, parser, engine):\n tm.skip_if_no_ne(engine)\n df = DataFrame({'a': list('aaaabbbbcccc'),\n 'b': list('aabbccddeeff'),\n 'c': np.random.randint(5, size=12),\n 'd': np.random.randint(9, size=12)})\n if parser == 'pandas':\n res = df.query('a in b', parser=parser, engine=engine)\n expec = df[df.a.isin(df.b)]\n assert_frame_equal(res, expec)\n\n res = df.query('a in b and c < d', parser=parser, engine=engine)\n expec = df[df.a.isin(df.b) & (df.c < df.d)]\n assert_frame_equal(res, expec)\n else:\n with assertRaises(NotImplementedError):\n df.query('a in b', parser=parser, engine=engine)\n\n with assertRaises(NotImplementedError):\n df.query('a in b and c < d', parser=parser, engine=engine)\n\n def test_query_with_string_columns(self):\n for parser, engine in product(PARSERS, ENGINES):\n yield self.check_query_with_string_columns, parser, engine\n\n def check_object_array_eq_ne(self, parser, engine):\n tm.skip_if_no_ne(engine)\n df = DataFrame({'a': list('aaaabbbbcccc'),\n 'b': list('aabbccddeeff'),\n 'c': np.random.randint(5, size=12),\n 'd': np.random.randint(9, size=12)})\n res = df.query('a == b', parser=parser, engine=engine)\n exp = df[df.a == df.b]\n assert_frame_equal(res, exp)\n\n res = df.query('a != b', parser=parser, engine=engine)\n exp = df[df.a != df.b]\n assert_frame_equal(res, exp)\n\n def test_object_array_eq_ne(self):\n for parser, engine in product(PARSERS, ENGINES):\n yield self.check_object_array_eq_ne, parser, engine\n\n def check_query_with_nested_strings(self, parser, engine):\n tm.skip_if_no_ne(engine)\n skip_if_no_pandas_parser(parser)\n from pandas.compat import StringIO\n raw = \"\"\"id event timestamp\n 1 \"page 1 load\" 1/1/2014 0:00:01\n 1 \"page 1 exit\" 1/1/2014 0:00:31\n 2 \"page 2 load\" 1/1/2014 0:01:01\n 2 \"page 2 exit\" 1/1/2014 0:01:31\n 3 \"page 3 load\" 1/1/2014 0:02:01\n 3 \"page 3 exit\" 1/1/2014 0:02:31\n 4 \"page 1 load\" 2/1/2014 1:00:01\n 4 \"page 1 exit\" 2/1/2014 1:00:31\n 5 \"page 2 load\" 2/1/2014 1:01:01\n 5 \"page 2 exit\" 2/1/2014 1:01:31\n 6 \"page 3 load\" 2/1/2014 1:02:01\n 6 \"page 3 exit\" 2/1/2014 1:02:31\n \"\"\"\n df = pd.read_csv(StringIO(raw), sep=r'\\s{2,}', engine='python',\n parse_dates=['timestamp'])\n expected = df[df.event == '\"page 1 load\"']\n res = df.query(\"\"\"'\"page 1 load\"' in event\"\"\", parser=parser,\n engine=engine)\n tm.assert_frame_equal(expected, res)\n\n def test_query_with_nested_string(self):\n for parser, engine in product(PARSERS, ENGINES):\n yield self.check_query_with_nested_strings, parser, engine\n\n def check_query_with_nested_special_character(self, parser, engine):\n skip_if_no_pandas_parser(parser)\n tm.skip_if_no_ne(engine)\n df = DataFrame({'a': ['a', 'b', 'test & test'],\n 'b': [1, 2, 3]})\n res = df.query('a == \"test & test\"', parser=parser, engine=engine)\n expec = df[df.a == 'test & test']\n tm.assert_frame_equal(res, expec)\n\n def test_query_with_nested_special_character(self):\n for parser, engine in product(PARSERS, ENGINES):\n yield self.check_query_with_nested_special_character, parser, engine\n\n def check_query_lex_compare_strings(self, parser, engine):\n tm.skip_if_no_ne(engine=engine)\n import operator as opr\n\n a = Series(tm.choice(list('abcde'), 20))\n b = Series(np.arange(a.size))\n df = DataFrame({'X': a, 'Y': b})\n\n ops = {'<': opr.lt, '>': opr.gt, '<=': opr.le, '>=': opr.ge}\n\n for op, func in ops.items():\n res = df.query('X %s \"d\"' % op, engine=engine, parser=parser)\n expected = df[func(df.X, 'd')]\n assert_frame_equal(res, expected)\n\n def test_query_lex_compare_strings(self):\n for parser, engine in product(PARSERS, ENGINES):\n yield self.check_query_lex_compare_strings, parser, engine\n\n def check_query_single_element_booleans(self, parser, engine):\n tm.skip_if_no_ne(engine)\n columns = 'bid', 'bidsize', 'ask', 'asksize'\n data = np.random.randint(2, size=(1, len(columns))).astype(bool)\n df = DataFrame(data, columns=columns)\n res = df.query('bid & ask', engine=engine, parser=parser)\n expected = df[df.bid & df.ask]\n assert_frame_equal(res, expected)\n\n def test_query_single_element_booleans(self):\n for parser, engine in product(PARSERS, ENGINES):\n yield self.check_query_single_element_booleans, parser, engine\n\n def check_query_string_scalar_variable(self, parser, engine):\n tm.skip_if_no_ne(engine)\n df = pd.DataFrame({'Symbol': ['BUD US', 'BUD US', 'IBM US', 'IBM US'],\n 'Price': [109.70, 109.72, 183.30, 183.35]})\n e = df[df.Symbol == 'BUD US']\n symb = 'BUD US'\n r = df.query('Symbol == @symb', parser=parser, engine=engine)\n tm.assert_frame_equal(e, r)\n\n def test_query_string_scalar_variable(self):\n for parser, engine in product(['pandas'], ENGINES):\n yield self.check_query_string_scalar_variable, parser, engine\n\n\nclass TestDataFrameEvalNumExprPandas(tm.TestCase):\n\n @classmethod\n def setUpClass(cls):\n super(TestDataFrameEvalNumExprPandas, cls).setUpClass()\n cls.engine = 'numexpr'\n cls.parser = 'pandas'\n tm.skip_if_no_ne()\n\n def setUp(self):\n self.frame = DataFrame(randn(10, 3), columns=list('abc'))\n\n def tearDown(self):\n del self.frame\n\n def test_simple_expr(self):\n res = self.frame.eval('a + b', engine=self.engine, parser=self.parser)\n expect = self.frame.a + self.frame.b\n assert_series_equal(res, expect)\n\n def test_bool_arith_expr(self):\n res = self.frame.eval('a[a < 1] + b', engine=self.engine,\n parser=self.parser)\n expect = self.frame.a[self.frame.a < 1] + self.frame.b\n assert_series_equal(res, expect)\n\n def test_invalid_type_for_operator_raises(self):\n df = DataFrame({'a': [1, 2], 'b': ['c', 'd']})\n ops = '+', '-', '*', '/'\n for op in ops:\n with tm.assertRaisesRegexp(TypeError,\n \"unsupported operand type\\(s\\) for \"\n \".+: '.+' and '.+'\"):\n df.eval('a {0} b'.format(op), engine=self.engine,\n parser=self.parser)\n\n\nclass TestDataFrameEvalNumExprPython(TestDataFrameEvalNumExprPandas):\n\n @classmethod\n def setUpClass(cls):\n super(TestDataFrameEvalNumExprPython, cls).setUpClass()\n cls.engine = 'numexpr'\n cls.parser = 'python'\n tm.skip_if_no_ne(cls.engine)\n\n\nclass TestDataFrameEvalPythonPandas(TestDataFrameEvalNumExprPandas):\n\n @classmethod\n def setUpClass(cls):\n super(TestDataFrameEvalPythonPandas, cls).setUpClass()\n cls.engine = 'python'\n cls.parser = 'pandas'\n\n\nclass TestDataFrameEvalPythonPython(TestDataFrameEvalNumExprPython):\n\n @classmethod\n def setUpClass(cls):\n super(TestDataFrameEvalPythonPython, cls).tearDownClass()\n cls.engine = cls.parser = 'python'\n\n\nif __name__ == '__main__':\n nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n exit=False)\n"
] |
[
[
"numpy.ma.masked_all",
"numpy.random.rand",
"numpy.array_equal",
"numpy.random.choice",
"numpy.median",
"pandas.core.nanops.nansem",
"pandas.compat.OrderedDict",
"numpy.tile",
"scipy.stats.skew",
"pandas.core.common.pprint_thing",
"pandas.util.testing.getSeriesData",
"pandas.util.testing.makeStringIndex",
"numpy.random.random",
"pandas.compat.lzip",
"pandas.core.common.isnull",
"pandas.read_pickle",
"pandas.notnull",
"numpy.empty",
"pandas.set_option",
"pandas.compat.iteritems",
"pandas.DataFrame",
"pandas.util.testing.rands_array",
"pandas.core.dtypes.DatetimeTZDtype",
"pandas.sparse.api.SparseDataFrame",
"numpy.nonzero",
"pandas.core.nanops.nanvar",
"pandas.util.testing.skip_if_no_ne",
"pandas.util.testing.makeTimeSeries",
"pandas.util.testing.makeDataFrame",
"pandas.Period",
"numpy.percentile",
"pandas.util.testing.assert_almost_equal",
"pandas.util.testing.makePeriodFrame",
"pandas.tseries.tools.to_datetime",
"numpy.iinfo",
"numpy.isinf",
"pandas.Index",
"pandas.eval",
"numpy.cov",
"pandas.util.testing.getMixedTypeDict",
"numpy.random.permutation",
"pandas.util.testing.makePanel4D",
"pandas.util.testing.assert_dict_equal",
"pandas.util.testing.assert_series_equal",
"numpy.dot",
"pandas.core.common.is_float_dtype",
"pandas.DatetimeIndex",
"pandas.DataFrame.from_items",
"numpy.mean",
"pandas.concat",
"pandas.compat.range",
"pandas.period_range",
"numpy.arange",
"pandas.util.testing.assert_produces_warning",
"pandas.util.testing.equalContents",
"scipy.stats.kurtosis",
"numpy.array",
"numpy.zeros",
"numpy.lexsort",
"numpy.round",
"numpy.random.randn",
"pandas.MultiIndex.from_tuples",
"numpy.random.shuffle",
"pandas.util.testing.rands",
"numpy.ma.masked_array",
"pandas.isnull",
"pandas.util.testing.assert_frame_equal",
"numpy.ma.mrecords.fromarrays",
"pandas.DataFrame.from_dict",
"pandas.date_range",
"numpy.ones",
"pandas.option_context",
"pandas.Series",
"numpy.repeat",
"pandas.util.testing.assertIsInstance",
"pandas.compat.u",
"numpy.where",
"pandas.util.testing.choice",
"pandas.Timedelta",
"pandas.compat.is_platform_windows",
"pandas.timedelta_range",
"pandas.util.testing.assert_panel_equal",
"numpy.atleast_2d",
"pandas.to_datetime",
"pandas.compat.text_type",
"pandas.MultiIndex.from_arrays",
"numpy.std",
"numpy.isscalar",
"numpy.corrcoef",
"numpy.datetime64",
"pandas.util.testing.makeCategoricalIndex",
"pandas.util.testing.makeTimeDataFrame",
"numpy.core.records.fromarrays",
"pandas.compat.lmap",
"numpy.asarray",
"pandas.util.testing.makePanel",
"numpy.abs",
"numpy.linspace",
"pandas.compat.StringIO",
"pandas.util.testing.makeCustomDataframe",
"pandas.reset_option",
"pandas.Timestamp",
"pandas.util.testing._skip_if_no_scipy",
"numpy.apply_along_axis",
"pandas.compat.map",
"numpy.size",
"pandas.read_csv",
"numpy.dtype",
"pandas.util.testing.ensure_clean",
"pandas.compat.long",
"pandas.util.testing.assert_numpy_array_equal",
"pandas.DataFrame.from_records",
"pandas.core.common.is_integer_dtype",
"pandas.merge",
"numpy.putmask",
"pandas.util.testing.assertRaises",
"pandas.util.testing.assert_index_equal",
"numpy.ma.copy",
"numpy.random.randint",
"pandas.MultiIndex",
"numpy.sqrt",
"pandas.util.testing._incompat_bottleneck_version",
"pandas.core.format.set_option",
"pandas.DataFrame.from_csv",
"numpy.shape",
"pandas.compat.lrange",
"pandas.core.common.is_integer",
"pandas.MultiIndex.from_product",
"numpy.timedelta64",
"pandas.util.testing.getTimeSeriesData",
"numpy.hstack",
"numpy.isnan",
"pandas.compat.zip",
"pandas.util.misc.is_little_endian",
"pandas.to_timedelta",
"pandas.util.testing.SubclassedDataFrame",
"pandas.Categorical",
"pandas.util.testing.assertRaisesRegexp",
"pandas.core.datetools.BDay",
"numpy.var"
]
] |
leonsariel/python
|
[
"dd68c21a02417341031b40c945152a61be12e3eb"
] |
[
"finance/tutorial/tester.py"
] |
[
"# _*_ coding: utf-8 _*_\n__author__ = 'Di Meng'\n__date__ = '1/3/2018 10:16 PM'\n\n# _*_ coding: utf-8 _*_\n__author__ = 'Di Meng'\n__date__ = '1/3/2018 9:26 PM'\n\nfrom tutorial.feature_functions import *\nimport pandas as pd\nimport plotly as py\nimport json\n\nfrom plotly import tools\nimport plotly.graph_objs as go\n\n#loading our data\ndf = pd.read_csv('EURUSD_hours.csv')\ndf.columns = ['date','open','high','low','close','volume']\ndf.date = pd.to_datetime(df.date,format='%d.%m.%Y %H:%M:%S.%f')\ndf = df.set_index(df.date)\ndf = df[['open','high','low','close','volume']]\ndf.drop_duplicates(keep=False)\ndf = df.iloc[:500]\n\n#moving average\nma = df.close.rolling(center=False, window=30).mean()\n\n\n# detrended = detrend(df, method='difference')\n\n# f = fourier(df, [10, 15],method='difference')\n\n#HA\n# HAresults = candles(df, [1])\n# HA = HAresults.candles[1]\n\n#wad\nresults = wadl(df, [15])\nline = results.wadl[15]\nprint(line['close'])\n\n# draw grarphs\ntrace = go.Ohlc(x=df.index, open=df.open, high=df.high, low=df.low, close=df.close, name='Currency Quote')\ntrace1 = go.Scatter(x=df.index, y=ma)\n\ntrace2 = go.Scatter(x=df.index, y=(line.close.to_json()))\n\n# linear detrand plot\n# trace2 = go.Scatter(x=df.index, y=detrended)\n\n# difference detrand plot\n# trace2 = go.Scatter(x=df.index, y=detrended)\n\n\n\n\ndata = [trace, trace1, trace2]\nfig = tools.make_subplots(rows=2,cols=1,shared_xaxes=True)\nfig.append_trace(trace,1,1)\nfig.append_trace(trace1,1,1)\nfig.append_trace(trace2,2,1)\n\npy.offline.plot(fig, filename=\"test.html\")"
] |
[
[
"pandas.to_datetime",
"pandas.read_csv"
]
] |
KevinLee3627/pi-temp-monitor
|
[
"0ab519f19693a201fa5a49e58cfa7e73becd7206"
] |
[
"monitor_temp.py"
] |
[
"from gpiozero import CPUTemperature\nfrom tabulate import tabulate\nfrom math import floor\nimport numpy as np\nimport termplotlib as tpl\nimport time\nimport shutil\n\ndef roundNum(num, digits):\n return floor(num * 10 ** digits) / (10 ** digits)\n\ndef CtoF(temp):\n fahrenheit = (temp + 1.8) + 32\n rounded = roundNum(fahrenheit, 3)\n return str(rounded)\n\ncpu = CPUTemperature()\ncolors = {\n 'HEADER': '\\033[95m',\n 'OKBLUE': '\\033[94m',\n 'OKCYAN': '\\033[96m',\n 'OKGREEN': '\\033[92m',\n 'WARNING': '\\033[93m',\n 'FAIL': '\\033[91m',\n 'ENDC': '\\033[0m',\n 'BOLD': '\\033[1m',\n 'UNDERLINE': '\\033[4m',\n}\n\ntimes = [0]\ntemps = [cpu.temperature]\n\n\nwhile True:\n tickRate = 2 #takes data every {tickRate} seconds\n minutes = 5\n numPoints = int(60 / tickRate * minutes)\n width, height = shutil.get_terminal_size()\n\n if len(temps) > numPoints:\n temps = temps[-numPoints:]\n times = times[-numPoints:]\n\n temps.append(cpu.temperature)\n times.append(times[-1] + tickRate)\n\n averageTemp = roundNum(np.average(temps), 3)\n\n cpuTempColor = ''\n if cpu.temperature < 50:\n cpuTempColor = colors['OKBLUE']\n elif cpu.temperature < 65:\n cpuTempColor = colors['OKCYAN']\n elif cpu.temperature < 80:\n cpuTempColor = colors['OKGREEN']\n else:\n cpuTempColor = colors['FAIL'] + colors['BOLD']\n\n table = [[\n f\"{cpuTempColor}{str(cpu.temperature)}\\N{DEGREE SIGN}C / {CtoF(cpu.temperature)}\\N{DEGREE SIGN}F\\n\",\n f\"{colors['OKGREEN']}{averageTemp} / {CtoF(averageTemp)}\\N{DEGREE SIGN}F\\n\",\n f\"{colors['OKGREEN']}{np.amax(temps)} / {CtoF(np.amax(temps))}\\N{DEGREE SIGN}F\\n\",\n f\"{colors['OKGREEN']}{np.amin(temps)} / {CtoF(np.amin(temps))}\\N{DEGREE SIGN}F\"\n ]]\n\n headers = [\n f\"{colors['OKGREEN']}CPU TEMPERATURE\",\n f\"{colors['OKGREEN']}Average Temperature (last {minutes} minutes)\",\n f\"{colors['FAIL']}Peak Temperature (last {minutes} minutes)\",\n f\"{colors['OKCYAN']}Lowest Temperature (last {minutes} minutes){colors['OKGREEN']}\", #OKGREEN at end is to make sure table lines are green, not cyan\n ]\n\n print('\\n')\n fig = tpl.figure()\n plotConfig = {\n 'width': width-2,\n 'height': height-5,\n 'label': 'CPU Temperature',\n 'xlabel': 'Time (s)',\n 'xlim': [times[0], times[-1:]],\n 'ylim': [np.amin(temps)-2, np.amax(temps)+2],\n 'title': f\"CPU Temperature over last {minutes} minutes\",\n }\n fig.plot(times, temps, **plotConfig)\n fig.show()\n # width=width-2, height=height-5, label='CPU Temperature', xlabel='Time (s)', , ylim=[np.amin(temps)-2, np.amax(temps)+2], title='CPU Temperature over last 5 minutes'\n print('\\n')\n print(tabulate(table, headers=headers))\n\n time.sleep(tickRate)"
] |
[
[
"numpy.average",
"numpy.amax",
"numpy.amin"
]
] |
ShuanDeMorian/deepspeech.pytorch
|
[
"58d7a693447ead632ef9b625681790ee8b5f6b82"
] |
[
"data/data_loader.py"
] |
[
"import os\nimport subprocess\nfrom tempfile import NamedTemporaryFile\n\nfrom torch.distributed import get_rank\nfrom torch.distributed import get_world_size\nfrom torch.utils.data.sampler import Sampler\n\nimport librosa\nimport numpy as np\nimport scipy.signal\nimport torch\nfrom scipy.io.wavfile import read\nimport math\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data import Dataset\nfrom .spec_augment import spec_augment\n\nfrom hangul_utils import split_syllable_char, split_syllables, join_jamos\n\nwindows = {'hamming': scipy.signal.hamming, 'hann': scipy.signal.hann, 'blackman': scipy.signal.blackman,\n 'bartlett': scipy.signal.bartlett}\n\n\ndef load_audio(path):\n# sample_rate, sound = read(path)\n sound, sr = librosa.load(path, sr=16000)\n \n# librosa.output.write_wav('org.wav', sound, sr)\n# print('save 1')\n \n# sound = sound.astype('float32') / 32767 # normalize audio\n sound = librosa.util.normalize(sound) # normalize audio\n sound = sound.astype('float32')\n \n# librosa.output.write_wav('norm.wav', sound, sr)\n# print('save 2')\n \n if len(sound.shape) > 1:\n if sound.shape[1] == 1:\n sound = sound.squeeze()\n else:\n sound = sound.mean(axis=1) # multiple channels, average\n return sound\n\n\nclass AudioParser(object):\n def parse_transcript(self, transcript_path):\n \"\"\"\n :param transcript_path: Path where transcript is stored from the manifest file\n :return: Transcript in training/testing format\n \"\"\"\n raise NotImplementedError\n\n def parse_audio(self, audio_path):\n \"\"\"\n :param audio_path: Path where audio is stored from the manifest file\n :return: Audio in training/testing format\n \"\"\"\n raise NotImplementedError\n\n\nclass NoiseInjection(object):\n def __init__(self,\n path=None,\n sample_rate=16000,\n noise_levels=(0, 0.5)):\n \"\"\"\n Adds noise to an input signal with specific SNR. Higher the noise level, the more noise added.\n Modified code from https://github.com/willfrey/audio/blob/master/torchaudio/transforms.py\n \"\"\"\n if path is not None and not os.path.exists(path):\n print(\"Directory doesn't exist: {}\".format(path))\n raise IOError\n self.paths = path is not None and librosa.util.find_files(path)\n self.sample_rate = sample_rate\n self.noise_levels = noise_levels\n\n def inject_noise(self, data):\n noise_path = np.random.choice(self.paths)\n noise_level = np.random.uniform(*self.noise_levels)\n return self.inject_noise_sample(data, noise_path, noise_level)\n\n def inject_noise_sample(self, data, noise_path, noise_level):\n noise_len = get_audio_length(noise_path)\n data_len = len(data) / self.sample_rate\n noise_start = np.random.rand() * (noise_len - data_len)\n noise_end = noise_start + data_len\n noise_dst = audio_with_sox(noise_path, self.sample_rate, noise_start, noise_end)\n assert len(data) == len(noise_dst)\n noise_energy = np.sqrt(noise_dst.dot(noise_dst) / noise_dst.size)\n data_energy = np.sqrt(data.dot(data) / data.size)\n data += noise_level * noise_dst * data_energy / noise_energy\n return data\n\n\nclass SpectrogramParser(AudioParser):\n def __init__(self, audio_conf, normalize=False, speed_volume_perturb=False, spec_augment=False):\n \"\"\"\n Parses audio file into spectrogram with optional normalization and various augmentations\n :param audio_conf: Dictionary containing the sample rate, window and the window length/stride in seconds\n :param normalize(default False): Apply standard mean and deviation normalization to audio tensor\n :param speed_volume_perturb(default False): Apply random tempo and gain perturbations\n :param spec_augment(default False): Apply simple spectral augmentation to mel spectograms\n \"\"\"\n super(SpectrogramParser, self).__init__()\n self.window_stride = audio_conf['window_stride']\n self.window_size = audio_conf['window_size']\n self.sample_rate = audio_conf['sample_rate']\n self.window = windows.get(audio_conf['window'], windows['hamming'])\n self.normalize = normalize\n self.speed_volume_perturb = speed_volume_perturb\n self.spec_augment = spec_augment\n self.noiseInjector = NoiseInjection(audio_conf['noise_dir'], self.sample_rate,\n audio_conf['noise_levels']) if audio_conf.get(\n 'noise_dir') is not None else None\n self.noise_prob = audio_conf.get('noise_prob')\n\n def parse_audio(self, audio_path,audio=None,change_speed=None):\n if audio is not None:\n y = audio\n elif self.speed_volume_perturb:\n y = load_randomly_augmented_audio(audio_path, self.sample_rate)\n# librosa.output.write_wav('test.wav', y, sr=16000, norm=False)\n# print('test')\n else:\n y = load_audio(audio_path)\n \n# librosa.output.write_wav('y1.wav', y, sr=16000)\n# print('save@@@@@@@@@@@@') \n \n # change audio speed\n if change_speed is not None:\n y = librosa.effects.time_stretch(y, change_speed)\n \n if self.noiseInjector:\n add_noise = np.random.binomial(1, self.noise_prob)\n if add_noise:\n y = self.noiseInjector.inject_noise(y)\n \n# librosa.output.write_wav('y2.wav', y, sr=16000)\n# print('save@@@@@@@@@@@@') \n# import sys\n# sys.exit()\n \n n_fft = int(self.sample_rate * self.window_size)\n win_length = n_fft\n hop_length = int(self.sample_rate * self.window_stride)\n # STFT\n D = librosa.stft(y, n_fft=n_fft, hop_length=hop_length,\n win_length=win_length, window=self.window)\n spect, phase = librosa.magphase(D)\n # S = log(S+1)\n spect = np.log1p(spect)\n spect = torch.FloatTensor(spect)\n if self.normalize:\n mean = spect.mean()\n std = spect.std()\n spect.add_(-mean)\n spect.div_(std)\n\n if self.spec_augment:\n spect = spec_augment(spect)\n\n return spect\n\n def parse_transcript(self, transcript_path):\n raise NotImplementedError\n\n\nclass SpectrogramDataset(Dataset, SpectrogramParser):\n def __init__(self, audio_conf, manifest_filepath, labels, normalize=False, speed_volume_perturb=False, spec_augment=False):\n \"\"\"\n Dataset that loads tensors via a csv containing file paths to audio files and transcripts separated by\n a comma. Each new line is a different sample. Example below:\n\n /path/to/audio.wav,/path/to/audio.txt\n ...\n\n :param audio_conf: Dictionary containing the sample rate, window and the window length/stride in seconds\n :param manifest_filepath: Path to manifest csv as describe above\n :param labels: String containing all the possible characters to map to\n :param normalize: Apply standard mean and deviation normalization to audio tensor\n :param speed_volume_perturb(default False): Apply random tempo and gain perturbations\n :param spec_augment(default False): Apply simple spectral augmentation to mel spectograms\n \"\"\"\n with open(manifest_filepath) as f:\n ids = f.readlines()\n ids = [x.strip().split(',') for x in ids]\n self.ids = ids\n self.size = len(ids)\n self.labels_map = dict([(labels[i], i) for i in range(len(labels))])\n\n try:\n self.use_jamo = audio_conf['use_jamo']\n except:\n self.use_jamo = False\n \n super(SpectrogramDataset, self).__init__(audio_conf, normalize, speed_volume_perturb, spec_augment)\n\n def __getitem__(self, index):\n sample = self.ids[index]\n audio_path, transcript_path = sample[0], sample[1]\n spect = self.parse_audio(audio_path)\n transcript = self.parse_transcript(transcript_path)\n \n return spect, transcript\n\n def parse_transcript(self, transcript_path):\n with open(transcript_path, 'r', encoding='utf8') as transcript_file:\n# with open(transcript_path, 'r', encoding='utf-16') as transcript_file:\n transcript = transcript_file.read().replace('\\n', '')\n \n if self.use_jamo:\n transcript = split_syllables(transcript)\n \n transcript = list(filter(None, [self.labels_map.get(x) for x in list(transcript)]))\n return transcript\n\n def __len__(self):\n return self.size\n\n\ndef _collate_fn(batch):\n def func(p):\n return p[0].size(1)\n\n batch = sorted(batch, key=lambda sample: sample[0].size(1), reverse=True)\n longest_sample = max(batch, key=func)[0]\n freq_size = longest_sample.size(0)\n minibatch_size = len(batch)\n max_seqlength = longest_sample.size(1)\n inputs = torch.zeros(minibatch_size, 1, freq_size, max_seqlength)\n input_percentages = torch.FloatTensor(minibatch_size)\n target_sizes = torch.IntTensor(minibatch_size)\n targets = []\n for x in range(minibatch_size):\n sample = batch[x]\n tensor = sample[0]\n target = sample[1]\n seq_length = tensor.size(1)\n inputs[x][0].narrow(1, 0, seq_length).copy_(tensor)\n input_percentages[x] = seq_length / float(max_seqlength)\n target_sizes[x] = len(target)\n targets.extend(target)\n targets = torch.IntTensor(targets)\n return inputs, targets, input_percentages, target_sizes\n\n\nclass AudioDataLoader(DataLoader):\n def __init__(self, *args, **kwargs):\n \"\"\"\n Creates a data loader for AudioDatasets.\n \"\"\"\n super(AudioDataLoader, self).__init__(*args, **kwargs)\n self.collate_fn = _collate_fn\n\n\nclass BucketingSampler(Sampler):\n def __init__(self, data_source, batch_size=1):\n \"\"\"\n Samples batches assuming they are in order of size to batch similarly sized samples together.\n \"\"\"\n super(BucketingSampler, self).__init__(data_source)\n self.data_source = data_source\n ids = list(range(0, len(data_source)))\n self.bins = [ids[i:i + batch_size] for i in range(0, len(ids), batch_size)]\n\n def __iter__(self):\n for ids in self.bins:\n np.random.shuffle(ids)\n yield ids\n\n def __len__(self):\n return len(self.bins)\n\n def shuffle(self, epoch):\n np.random.shuffle(self.bins)\n\n\nclass DistributedBucketingSampler(Sampler):\n def __init__(self, data_source, batch_size=1, num_replicas=None, rank=None):\n \"\"\"\n Samples batches assuming they are in order of size to batch similarly sized samples together.\n \"\"\"\n super(DistributedBucketingSampler, self).__init__(data_source)\n if num_replicas is None:\n num_replicas = get_world_size()\n if rank is None:\n rank = get_rank()\n self.data_source = data_source\n self.ids = list(range(0, len(data_source)))\n self.batch_size = batch_size\n self.bins = [self.ids[i:i + batch_size] for i in range(0, len(self.ids), batch_size)]\n self.num_replicas = num_replicas\n self.rank = rank\n self.num_samples = int(math.ceil(len(self.bins) * 1.0 / self.num_replicas))\n self.total_size = self.num_samples * self.num_replicas\n\n def __iter__(self):\n offset = self.rank\n # add extra samples to make it evenly divisible\n bins = self.bins + self.bins[:(self.total_size - len(self.bins))]\n assert len(bins) == self.total_size\n samples = bins[offset::self.num_replicas] # Get every Nth bin, starting from rank\n return iter(samples)\n\n def __len__(self):\n return self.num_samples\n\n def shuffle(self, epoch):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(epoch)\n bin_ids = list(torch.randperm(len(self.bins), generator=g))\n self.bins = [self.bins[i] for i in bin_ids]\n\n\ndef get_audio_length(path):\n output = subprocess.check_output(['soxi -D \\\"%s\\\"' % path.strip()], shell=True)\n return float(output)\n\n\ndef audio_with_sox(path, sample_rate, start_time, end_time):\n \"\"\"\n crop and resample the recording with sox and loads it.\n \"\"\"\n with NamedTemporaryFile(suffix=\".wav\") as tar_file:\n tar_filename = tar_file.name\n sox_params = \"sox \\\"{}\\\" -r {} -c 1 -b 16 -e si {} trim {} ={} >/dev/null 2>&1\".format(path, sample_rate,\n tar_filename, start_time,\n end_time)\n os.system(sox_params)\n y = load_audio(tar_filename)\n return y\n\n\ndef augment_audio_with_sox(path, sample_rate, tempo, gain):\n \"\"\"\n Changes tempo and gain of the recording with sox and loads it.\n \"\"\"\n with NamedTemporaryFile(suffix=\".wav\") as augmented_file:\n augmented_filename = augmented_file.name\n sox_augment_params = [\"tempo\", \"{:.3f}\".format(tempo), \"gain\", \"{:.3f}\".format(gain)]\n sox_params = \"sox \\\"{}\\\" -r {} -c 1 -b 16 -e si {} {} >/dev/null 2>&1\".format(path, sample_rate,\n augmented_filename,\n \" \".join(sox_augment_params))\n os.system(sox_params)\n y = load_audio(augmented_filename)\n return y\n\n\n# original tempo_range=(0.85,1.15)\n# original gain_range=(-6,8)\ndef load_randomly_augmented_audio(path, sample_rate=16000, tempo_range=(0.85,1.15),\n gain_range=(-6, 8)):\n \"\"\"\n Picks tempo and gain uniformly, applies it to the utterance by using sox utility.\n Returns the augmented utterance.\n \"\"\"\n low_tempo, high_tempo = tempo_range\n tempo_value = np.random.uniform(low=low_tempo, high=high_tempo)\n low_gain, high_gain = gain_range\n gain_value = np.random.uniform(low=low_gain, high=high_gain)\n audio = augment_audio_with_sox(path=path, sample_rate=sample_rate,\n tempo=tempo_value, gain=gain_value)\n \n return audio\n"
] |
[
[
"torch.zeros",
"torch.distributed.get_world_size",
"numpy.random.binomial",
"numpy.random.choice",
"numpy.random.rand",
"torch.IntTensor",
"torch.FloatTensor",
"torch.Generator",
"numpy.random.shuffle",
"numpy.log1p",
"numpy.random.uniform",
"torch.distributed.get_rank"
]
] |
kuanhanl/k_aug
|
[
"5ceaccbf9e699a9dffe284de686f1b623cafbec5"
] |
[
"Reduce_hessian/tests/B1.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 12 14:25:43 2020\n\n@author: greg6\n\"\"\"\n\nimport numpy as np\n\nt = [i for i in range(3)]\nlam = [100+i*10 for i in range(2)]\ncom = [\"A\",\"B\",\"C\"]\n\nS = dict()\nfor l in lam:\n for u,c in enumerate(com):\n S[(l,c)] = l+0.1*u\n\nC = dict()\nfor i in t:\n for u,c in enumerate(com):\n C[(i,c)] = (i+0.1*u)\n\nnt = len(t)\nnw = len(lam)\nnc = len(com)\nnparams = 2\n\nnd = nw*nt\nntheta = nc*(nw+nt)+nparams\n\nB_matrix = np.zeros((ntheta,nw*nt))\nfor i, t in enumerate(t):\n for j, l in enumerate(lam):\n for k, c in enumerate(com):\n # r_idx1 = k*nt+i\n r_idx1 = i * nc + k\n r_idx2 = j * nc + k + nc * nt\n # r_idx2 = j * nc + k + nc * nw\n # c_idx = i+j*nt\n c_idx = i * nw + j\n # print(j, k, r_idx2)\n B_matrix[r_idx1, c_idx] = S[l, c]\n # try:\n B_matrix[r_idx2, c_idx] = C[t, c]"
] |
[
[
"numpy.zeros"
]
] |
vpeterson/mne-python
|
[
"a6e2222a7e76f5b13a371697b1b61d22ac5bf67d"
] |
[
"mne/io/kit/kit.py"
] |
[
"\"\"\"Conversion tool from SQD to FIF.\n\nRawKIT class is adapted from Denis Engemann et al.'s mne_bti2fiff.py.\n\"\"\"\n\n# Authors: Teon Brooks <teon.brooks@gmail.com>\n# Joan Massich <mailsik@gmail.com>\n# Christian Brodbeck <christianbrodbeck@nyu.edu>\n#\n# License: BSD (3-clause)\n\nfrom collections import defaultdict, OrderedDict\nfrom math import sin, cos\nfrom os import SEEK_CUR, path as op\nfrom struct import unpack\n\nimport numpy as np\nfrom scipy import linalg\n\nfrom ..pick import pick_types\nfrom ...utils import (verbose, logger, warn, fill_doc, _check_option,\n _stamp_to_dt)\nfrom ...transforms import apply_trans, als_ras_trans\nfrom ..base import BaseRaw\nfrom ..utils import _mult_cal_one\nfrom ...epochs import BaseEpochs\nfrom ..constants import FIFF\nfrom ..meas_info import _empty_info\nfrom .constants import KIT, LEGACY_AMP_PARAMS\nfrom .coreg import read_mrk\nfrom ...event import read_events\n\nfrom .._digitization import _set_dig_kit\n\n\ndef _call_digitization(info, mrk, elp, hsp, kit_info):\n # Use values from kit_info only if all others are None\n if mrk is None and elp is None and hsp is None:\n mrk = kit_info.get('mrk', None)\n elp = kit_info.get('elp', None)\n hsp = kit_info.get('hsp', None)\n\n # prepare mrk\n if isinstance(mrk, list):\n mrk = [read_mrk(marker) if isinstance(marker, str)\n else marker for marker in mrk]\n mrk = np.mean(mrk, axis=0)\n\n # setup digitization\n if mrk is not None and elp is not None and hsp is not None:\n dig_points, dev_head_t = _set_dig_kit(\n mrk, elp, hsp, kit_info['eeg_dig'])\n info['dig'] = dig_points\n info['dev_head_t'] = dev_head_t\n elif mrk is not None or elp is not None or hsp is not None:\n raise ValueError(\"mrk, elp and hsp need to be provided as a group \"\n \"(all or none)\")\n\n return info\n\n\nclass UnsupportedKITFormat(ValueError):\n \"\"\"Our reader is not guaranteed to work with old files.\"\"\"\n\n def __init__(self, sqd_version, *args, **kwargs): # noqa: D102\n self.sqd_version = sqd_version\n ValueError.__init__(self, *args, **kwargs)\n\n\n@fill_doc\nclass RawKIT(BaseRaw):\n \"\"\"Raw object from KIT SQD file.\n\n Parameters\n ----------\n input_fname : str\n Path to the sqd file.\n mrk : None | str | array_like, shape (5, 3) | list of str or array_like\n Marker points representing the location of the marker coils with\n respect to the MEG Sensors, or path to a marker file.\n If list, all of the markers will be averaged together.\n elp : None | str | array_like, shape (8, 3)\n Digitizer points representing the location of the fiducials and the\n marker coils with respect to the digitized head shape, or path to a\n file containing these points.\n hsp : None | str | array, shape (n_points, 3)\n Digitizer head shape points, or path to head shape file. If more than\n 10,000 points are in the head shape, they are automatically decimated.\n stim : list of int | '<' | '>' | None\n Channel-value correspondence when converting KIT trigger channels to a\n Neuromag-style stim channel. For '<', the largest values are assigned\n to the first channel (default). For '>', the largest values are\n assigned to the last channel. Can also be specified as a list of\n trigger channel indexes. If None, no synthesized channel is generated.\n slope : '+' | '-'\n How to interpret values on KIT trigger channels when synthesizing a\n Neuromag-style stim channel. With '+', a positive slope (low-to-high)\n is interpreted as an event. With '-', a negative slope (high-to-low)\n is interpreted as an event.\n stimthresh : float\n The threshold level for accepting voltage changes in KIT trigger\n channels as a trigger event. If None, stim must also be set to None.\n %(preload)s\n stim_code : 'binary' | 'channel'\n How to decode trigger values from stim channels. 'binary' read stim\n channel events as binary code, 'channel' encodes channel number.\n allow_unknown_format : bool\n Force reading old data that is not officially supported. Alternatively,\n read and re-save the data with the KIT MEG Laboratory application.\n %(standardize_names)s\n %(verbose)s\n\n Notes\n -----\n ``elp`` and ``hsp`` are usually the exported text files (*.txt) from the\n Polhemus FastScan system. hsp refers to the headshape surface points. elp\n refers to the points in head-space that corresponds to the HPI points.\n Currently, '*.elp' and '*.hsp' files are NOT supported.\n\n See Also\n --------\n mne.io.Raw : Documentation of attribute and methods.\n \"\"\"\n\n @verbose\n def __init__(self, input_fname, mrk=None, elp=None, hsp=None, stim='>',\n slope='-', stimthresh=1, preload=False, stim_code='binary',\n allow_unknown_format=False, standardize_names=None,\n verbose=None): # noqa: D102\n logger.info('Extracting SQD Parameters from %s...' % input_fname)\n input_fname = op.abspath(input_fname)\n self.preload = False\n logger.info('Creating Raw.info structure...')\n info, kit_info = get_kit_info(\n input_fname, allow_unknown_format, standardize_names)\n kit_info['slope'] = slope\n kit_info['stimthresh'] = stimthresh\n if kit_info['acq_type'] != KIT.CONTINUOUS:\n raise TypeError('SQD file contains epochs, not raw data. Wrong '\n 'reader.')\n logger.info('Creating Info structure...')\n\n last_samps = [kit_info['n_samples'] - 1]\n self._raw_extras = [kit_info]\n self._set_stimchannels(info, stim, stim_code)\n super(RawKIT, self).__init__(\n info, preload, last_samps=last_samps, filenames=[input_fname],\n raw_extras=self._raw_extras, verbose=verbose)\n self.info = _call_digitization(\n info=self.info, mrk=mrk, elp=elp, hsp=hsp, kit_info=kit_info)\n logger.info('Ready.')\n\n def read_stim_ch(self, buffer_size=1e5):\n \"\"\"Read events from data.\n\n Parameter\n ---------\n buffer_size : int\n The size of chunk to by which the data are scanned.\n\n Returns\n -------\n events : array, [samples]\n The event vector (1 x samples).\n \"\"\"\n buffer_size = int(buffer_size)\n start = int(self.first_samp)\n stop = int(self.last_samp + 1)\n\n pick = pick_types(self.info, meg=False, ref_meg=False,\n stim=True, exclude=[])\n stim_ch = np.empty((1, stop), dtype=np.int64)\n for b_start in range(start, stop, buffer_size):\n b_stop = b_start + buffer_size\n x = self[pick, b_start:b_stop][0]\n stim_ch[:, b_start:b_start + x.shape[1]] = x\n\n return stim_ch\n\n def _set_stimchannels(self, info, stim, stim_code):\n \"\"\"Specify how the trigger channel is synthesized from analog channels.\n\n Has to be done before loading data. For a RawKIT instance that has been\n created with preload=True, this method will raise a\n NotImplementedError.\n\n Parameters\n ----------\n info : instance of MeasInfo\n The measurement info.\n stim : list of int | '<' | '>'\n Can be submitted as list of trigger channels.\n If a list is not specified, the default triggers extracted from\n misc channels will be used with specified directionality.\n '<' means that largest values assigned to the first channel\n in sequence.\n '>' means the largest trigger assigned to the last channel\n in sequence.\n stim_code : 'binary' | 'channel'\n How to decode trigger values from stim channels. 'binary' read stim\n channel events as binary code, 'channel' encodes channel number.\n \"\"\"\n if self.preload:\n raise NotImplementedError(\"Can't change stim channel after \"\n \"loading data\")\n _check_option('stim_code', stim_code, ['binary', 'channel'])\n\n if stim is not None:\n if isinstance(stim, str):\n picks = _default_stim_chs(info)\n if stim == '<':\n stim = picks[::-1]\n elif stim == '>':\n stim = picks\n else:\n raise ValueError(\"stim needs to be list of int, '>' or \"\n \"'<', not %r\" % str(stim))\n else:\n stim = np.asarray(stim, int)\n if stim.max() >= self._raw_extras[0]['nchan']:\n raise ValueError(\n 'Got stim=%s, but sqd file only has %i channels' %\n (stim, self._raw_extras[0]['nchan']))\n\n # modify info\n nchan = self._raw_extras[0]['nchan'] + 1\n info['chs'].append(dict(\n cal=KIT.CALIB_FACTOR, logno=nchan, scanno=nchan, range=1.0,\n unit=FIFF.FIFF_UNIT_NONE, unit_mul=FIFF.FIFF_UNITM_NONE,\n ch_name='STI 014',\n coil_type=FIFF.FIFFV_COIL_NONE, loc=np.full(12, np.nan),\n kind=FIFF.FIFFV_STIM_CH, coord_frame=FIFF.FIFFV_COORD_UNKNOWN))\n info._update_redundant()\n\n self._raw_extras[0]['stim'] = stim\n self._raw_extras[0]['stim_code'] = stim_code\n\n def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):\n \"\"\"Read a chunk of raw data.\"\"\"\n sqd = self._raw_extras[fi]\n nchan = sqd['nchan']\n data_left = (stop - start) * nchan\n conv_factor = sqd['conv_factor']\n\n n_bytes = sqd['dtype'].itemsize\n assert n_bytes in (2, 4)\n # Read up to 100 MB of data at a time.\n blk_size = min(data_left, (100000000 // n_bytes // nchan) * nchan)\n with open(self._filenames[fi], 'rb', buffering=0) as fid:\n # extract data\n pointer = start * nchan * n_bytes\n fid.seek(sqd['dirs'][KIT.DIR_INDEX_RAW_DATA]['offset'] + pointer)\n stim = sqd['stim']\n for blk_start in np.arange(0, data_left, blk_size) // nchan:\n blk_size = min(blk_size, data_left - blk_start * nchan)\n block = np.fromfile(fid, dtype=sqd['dtype'], count=blk_size)\n block = block.reshape(nchan, -1, order='F').astype(float)\n blk_stop = blk_start + block.shape[1]\n data_view = data[:, blk_start:blk_stop]\n block *= conv_factor\n\n # Create a synthetic stim channel\n if stim is not None:\n stim_ch = _make_stim_channel(\n block[stim, :], sqd['slope'], sqd['stimthresh'],\n sqd['stim_code'], stim)\n block = np.vstack((block, stim_ch))\n\n _mult_cal_one(data_view, block, idx, cals, mult)\n # cals are all unity, so can be ignored\n\n\ndef _default_stim_chs(info):\n \"\"\"Return default stim channels for SQD files.\"\"\"\n return pick_types(info, meg=False, ref_meg=False, misc=True,\n exclude=[])[:8]\n\n\ndef _make_stim_channel(trigger_chs, slope, threshold, stim_code,\n trigger_values):\n \"\"\"Create synthetic stim channel from multiple trigger channels.\"\"\"\n if slope == '+':\n trig_chs_bin = trigger_chs > threshold\n elif slope == '-':\n trig_chs_bin = trigger_chs < threshold\n else:\n raise ValueError(\"slope needs to be '+' or '-'\")\n # trigger value\n if stim_code == 'binary':\n trigger_values = 2 ** np.arange(len(trigger_chs))\n elif stim_code != 'channel':\n raise ValueError(\"stim_code must be 'binary' or 'channel', got %s\" %\n repr(stim_code))\n trig_chs = trig_chs_bin * trigger_values[:, np.newaxis]\n return np.array(trig_chs.sum(axis=0), ndmin=2)\n\n\nclass EpochsKIT(BaseEpochs):\n \"\"\"Epochs Array object from KIT SQD file.\n\n Parameters\n ----------\n input_fname : str\n Path to the sqd file.\n events : str | array, shape (n_events, 3)\n Path to events file. If array, it is the events typically returned\n by the read_events function. If some events don't match the events\n of interest as specified by event_id,they will be marked as 'IGNORED'\n in the drop log.\n event_id : int | list of int | dict | None\n The id of the event to consider. If dict,\n the keys can later be used to access associated events. Example:\n dict(auditory=1, visual=3). If int, a dict will be created with\n the id as string. If a list, all events with the IDs specified\n in the list are used. If None, all events will be used with\n and a dict is created with string integer names corresponding\n to the event id integers.\n tmin : float\n Start time before event.\n baseline : None or tuple of length 2 (default (None, 0))\n The time interval to apply baseline correction.\n If None do not apply it. If baseline is (a, b)\n the interval is between \"a (s)\" and \"b (s)\".\n If a is None the beginning of the data is used\n and if b is None then b is set to the end of the interval.\n If baseline is equal to (None, None) all the time\n interval is used.\n The baseline (a, b) includes both endpoints, i.e. all\n timepoints t such that a <= t <= b.\n reject : dict | None\n Rejection parameters based on peak-to-peak amplitude.\n Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.\n If reject is None then no rejection is done. Example::\n\n reject = dict(grad=4000e-13, # T / m (gradiometers)\n mag=4e-12, # T (magnetometers)\n eeg=40e-6, # V (EEG channels)\n eog=250e-6 # V (EOG channels)\n )\n flat : dict | None\n Rejection parameters based on flatness of signal.\n Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values\n are floats that set the minimum acceptable peak-to-peak amplitude.\n If flat is None then no rejection is done.\n reject_tmin : scalar | None\n Start of the time window used to reject epochs (with the default None,\n the window will start with tmin).\n reject_tmax : scalar | None\n End of the time window used to reject epochs (with the default None,\n the window will end with tmax).\n mrk : None | str | array_like, shape = (5, 3) | list of str or array_like\n Marker points representing the location of the marker coils with\n respect to the MEG Sensors, or path to a marker file.\n If list, all of the markers will be averaged together.\n elp : None | str | array_like, shape = (8, 3)\n Digitizer points representing the location of the fiducials and the\n marker coils with respect to the digitized head shape, or path to a\n file containing these points.\n hsp : None | str | array, shape = (n_points, 3)\n Digitizer head shape points, or path to head shape file. If more than\n 10`000 points are in the head shape, they are automatically decimated.\n allow_unknown_format : bool\n Force reading old data that is not officially supported. Alternatively,\n read and re-save the data with the KIT MEG Laboratory application.\n %(standardize_names)s\n %(verbose)s\n\n Notes\n -----\n ``elp`` and ``hsp`` are usually the exported text files (*.txt) from the\n Polhemus FastScan system. hsp refers to the headshape surface points. elp\n refers to the points in head-space that corresponds to the HPI points.\n Currently, '*.elp' and '*.hsp' files are NOT supported.\n\n See Also\n --------\n mne.Epochs : Documentation of attribute and methods.\n \"\"\"\n\n @verbose\n def __init__(self, input_fname, events, event_id=None, tmin=0,\n baseline=None, reject=None, flat=None, reject_tmin=None,\n reject_tmax=None, mrk=None, elp=None, hsp=None,\n allow_unknown_format=False, standardize_names=None,\n verbose=None): # noqa: D102\n\n if isinstance(events, str):\n events = read_events(events)\n\n logger.info('Extracting KIT Parameters from %s...' % input_fname)\n input_fname = op.abspath(input_fname)\n self.info, kit_info = get_kit_info(\n input_fname, allow_unknown_format, standardize_names)\n kit_info.update(filename=input_fname)\n self._raw_extras = [kit_info]\n self._filenames = []\n if len(events) != self._raw_extras[0]['n_epochs']:\n raise ValueError('Event list does not match number of epochs.')\n\n if self._raw_extras[0]['acq_type'] == KIT.EPOCHS:\n self._raw_extras[0]['data_length'] = KIT.INT\n else:\n raise TypeError('SQD file contains raw data, not epochs or '\n 'average. Wrong reader.')\n\n if event_id is None: # convert to int to make typing-checks happy\n event_id = {str(e): int(e) for e in np.unique(events[:, 2])}\n\n for key, val in event_id.items():\n if val not in events[:, 2]:\n raise ValueError('No matching events found for %s '\n '(event id %i)' % (key, val))\n\n data = self._read_kit_data()\n assert data.shape == (self._raw_extras[0]['n_epochs'],\n self.info['nchan'],\n self._raw_extras[0]['frame_length'])\n tmax = ((data.shape[2] - 1) / self.info['sfreq']) + tmin\n super(EpochsKIT, self).__init__(\n self.info, data, events, event_id, tmin, tmax, baseline,\n reject=reject, flat=flat, reject_tmin=reject_tmin,\n reject_tmax=reject_tmax, filename=input_fname, verbose=verbose)\n self.info = _call_digitization(\n info=self.info, mrk=mrk, elp=elp, hsp=hsp, kit_info=kit_info)\n logger.info('Ready.')\n\n def _read_kit_data(self):\n \"\"\"Read epochs data.\n\n Returns\n -------\n data : array, [channels x samples]\n the data matrix (channels x samples).\n times : array, [samples]\n returns the time values corresponding to the samples.\n \"\"\"\n info = self._raw_extras[0]\n epoch_length = info['frame_length']\n n_epochs = info['n_epochs']\n n_samples = info['n_samples']\n filename = info['filename']\n dtype = info['dtype']\n nchan = info['nchan']\n\n with open(filename, 'rb', buffering=0) as fid:\n fid.seek(info['dirs'][KIT.DIR_INDEX_RAW_DATA]['offset'])\n count = n_samples * nchan\n data = np.fromfile(fid, dtype=dtype, count=count)\n data = data.reshape((n_samples, nchan)).T\n data = data * info['conv_factor']\n data = data.reshape((nchan, n_epochs, epoch_length))\n data = data.transpose((1, 0, 2))\n\n return data\n\n\ndef _read_dir(fid):\n return dict(offset=np.fromfile(fid, np.uint32, 1)[0],\n size=np.fromfile(fid, np.int32, 1)[0],\n max_count=np.fromfile(fid, np.int32, 1)[0],\n count=np.fromfile(fid, np.int32, 1)[0])\n\n\n@verbose\ndef get_kit_info(rawfile, allow_unknown_format, standardize_names=None,\n verbose=None):\n \"\"\"Extract all the information from the sqd/con file.\n\n Parameters\n ----------\n rawfile : str\n KIT file to be read.\n allow_unknown_format : bool\n Force reading old data that is not officially supported. Alternatively,\n read and re-save the data with the KIT MEG Laboratory application.\n %(standardize_names)s\n %(verbose)s\n\n Returns\n -------\n info : instance of Info\n An Info for the instance.\n sqd : dict\n A dict containing all the sqd parameter settings.\n \"\"\"\n sqd = dict()\n sqd['rawfile'] = rawfile\n unsupported_format = False\n sqd['dirs'] = dirs = list()\n with open(rawfile, 'rb', buffering=0) as fid: # buffering=0 for np bug\n #\n # directories (0)\n #\n dirs.append(_read_dir(fid))\n dirs.extend(_read_dir(fid) for _ in range(dirs[0]['count'] - 1))\n assert len(dirs) == dirs[KIT.DIR_INDEX_DIR]['count']\n\n #\n # system (1)\n #\n fid.seek(dirs[KIT.DIR_INDEX_SYSTEM]['offset'])\n # check file format version\n version, revision = unpack('2i', fid.read(2 * KIT.INT))\n if version < 2 or (version == 2 and revision < 3):\n version_string = \"V%iR%03i\" % (version, revision)\n if allow_unknown_format:\n unsupported_format = True\n logger.warning(\"Force loading KIT format %s\", version_string)\n else:\n raise UnsupportedKITFormat(\n version_string,\n \"SQD file format %s is not officially supported. \"\n \"Set allow_unknown_format=True to load it anyways.\" %\n (version_string,))\n\n sysid = unpack('i', fid.read(KIT.INT))[0]\n # basic info\n system_name = unpack('128s', fid.read(128))[0].decode()\n # model name\n model_name = unpack('128s', fid.read(128))[0].decode()\n # channels\n sqd['nchan'] = channel_count = unpack('i', fid.read(KIT.INT))[0]\n comment = unpack('256s', fid.read(256))[0].decode()\n create_time, last_modified_time = unpack('2i', fid.read(2 * KIT.INT))\n fid.seek(KIT.INT * 3, SEEK_CUR) # reserved\n dewar_style = unpack('i', fid.read(KIT.INT))[0]\n fid.seek(KIT.INT * 3, SEEK_CUR) # spare\n fll_type = unpack('i', fid.read(KIT.INT))[0]\n fid.seek(KIT.INT * 3, SEEK_CUR) # spare\n trigger_type = unpack('i', fid.read(KIT.INT))[0]\n fid.seek(KIT.INT * 3, SEEK_CUR) # spare\n adboard_type = unpack('i', fid.read(KIT.INT))[0]\n fid.seek(KIT.INT * 29, SEEK_CUR) # reserved\n\n if version < 2 or (version == 2 and revision <= 3):\n adc_range = float(unpack('i', fid.read(KIT.INT))[0])\n else:\n adc_range = unpack('d', fid.read(KIT.DOUBLE))[0]\n adc_polarity, adc_allocated, adc_stored = unpack('3i',\n fid.read(3 * KIT.INT))\n system_name = system_name.replace('\\x00', '')\n system_name = system_name.strip().replace('\\n', '/')\n model_name = model_name.replace('\\x00', '')\n model_name = model_name.strip().replace('\\n', '/')\n\n full_version = f'V{version:d}R{revision:03d}'\n logger.debug(\"SQD file basic information:\")\n logger.debug(\"Meg160 version = %s\", full_version)\n logger.debug(\"System ID = %i\", sysid)\n logger.debug(\"System name = %s\", system_name)\n logger.debug(\"Model name = %s\", model_name)\n logger.debug(\"Channel count = %i\", channel_count)\n logger.debug(\"Comment = %s\", comment)\n logger.debug(\"Dewar style = %i\", dewar_style)\n logger.debug(\"FLL type = %i\", fll_type)\n logger.debug(\"Trigger type = %i\", trigger_type)\n logger.debug(\"A/D board type = %i\", adboard_type)\n logger.debug(\"ADC range = +/-%s[V]\", adc_range / 2.)\n logger.debug(\"ADC allocate = %i[bit]\", adc_allocated)\n logger.debug(\"ADC bit = %i[bit]\", adc_stored)\n # MGH description: 'acquisition (megacq) VectorView system at NMR-MGH'\n description = \\\n f'{system_name} ({sysid}) {full_version} {model_name}'\n sqd['dtype'] = np.dtype(getattr(np, f'int{adc_allocated}'))\n\n # check that we can read this file\n if fll_type not in KIT.FLL_SETTINGS:\n fll_types = sorted(KIT.FLL_SETTINGS.keys())\n use_fll_type = fll_types[\n np.searchsorted(fll_types, fll_type) - 1]\n warn('Unknown site filter settings (FLL) for system '\n '\"%s\" model \"%s\" (ID %s), will assume FLL %d->%d, check '\n 'your data for correctness, including channel scales and '\n 'filter settings!'\n % (system_name, model_name, sysid, fll_type, use_fll_type))\n fll_type = use_fll_type\n\n #\n # channel information (4)\n #\n chan_dir = dirs[KIT.DIR_INDEX_CHANNELS]\n chan_offset, chan_size = chan_dir['offset'], chan_dir['size']\n sqd['channels'] = channels = []\n exg_gains = list()\n for i in range(channel_count):\n fid.seek(chan_offset + chan_size * i)\n channel_type, = unpack('i', fid.read(KIT.INT))\n # System 52 mislabeled reference channels as NULL. This was fixed\n # in system 53; not sure about 51...\n if sysid == 52 and i < 160 and channel_type == KIT.CHANNEL_NULL:\n channel_type = KIT.CHANNEL_MAGNETOMETER_REFERENCE\n\n if channel_type in KIT.CHANNELS_MEG:\n if channel_type not in KIT.CH_TO_FIFF_COIL:\n raise NotImplementedError(\n \"KIT channel type %i can not be read. Please contact \"\n \"the mne-python developers.\" % channel_type)\n channels.append({\n 'type': channel_type,\n # (x, y, z, theta, phi) for all MEG channels. Some channel\n # types have additional information which we're not using.\n 'loc': np.fromfile(fid, dtype='d', count=5),\n })\n if channel_type in KIT.CHANNEL_NAME_NCHAR:\n fid.seek(16, SEEK_CUR) # misc fields\n channels[-1]['name'] = _read_name(fid, channel_type)\n elif channel_type in KIT.CHANNELS_MISC:\n channel_no, = unpack('i', fid.read(KIT.INT))\n fid.seek(4, SEEK_CUR)\n name = _read_name(fid, channel_type)\n channels.append({\n 'type': channel_type,\n 'no': channel_no,\n 'name': name,\n })\n if channel_type in (KIT.CHANNEL_EEG, KIT.CHANNEL_ECG):\n offset = 6 if channel_type == KIT.CHANNEL_EEG else 8\n fid.seek(offset, SEEK_CUR)\n exg_gains.append(np.fromfile(fid, 'd', 1)[0])\n elif channel_type == KIT.CHANNEL_NULL:\n channels.append({'type': channel_type})\n else:\n raise IOError(\"Unknown KIT channel type: %i\" % channel_type)\n exg_gains = np.array(exg_gains)\n\n #\n # Channel sensitivity information: (5)\n #\n\n # only sensor channels requires gain. the additional misc channels\n # (trigger channels, audio and voice channels) are passed\n # through unaffected\n fid.seek(dirs[KIT.DIR_INDEX_CALIBRATION]['offset'])\n # (offset [Volt], gain [Tesla/Volt]) for each channel\n sensitivity = np.fromfile(fid, dtype='d', count=channel_count * 2)\n sensitivity.shape = (channel_count, 2)\n channel_offset, channel_gain = sensitivity.T\n assert (channel_offset == 0).all() # otherwise we have a problem\n\n #\n # amplifier gain (7)\n #\n fid.seek(dirs[KIT.DIR_INDEX_AMP_FILTER]['offset'])\n amp_data = unpack('i', fid.read(KIT.INT))[0]\n if fll_type >= 100: # Kapper Type\n # gain: mask bit\n gain1 = (amp_data & 0x00007000) >> 12\n gain2 = (amp_data & 0x70000000) >> 28\n gain3 = (amp_data & 0x07000000) >> 24\n amp_gain = (KIT.GAINS[gain1] * KIT.GAINS[gain2] * KIT.GAINS[gain3])\n # filter settings\n hpf = (amp_data & 0x00000700) >> 8\n lpf = (amp_data & 0x00070000) >> 16\n bef = (amp_data & 0x00000003) >> 0\n else: # Hanger Type\n # gain\n input_gain = (amp_data & 0x1800) >> 11\n output_gain = (amp_data & 0x0007) >> 0\n amp_gain = KIT.GAINS[input_gain] * KIT.GAINS[output_gain]\n # filter settings\n hpf = (amp_data & 0x007) >> 4\n lpf = (amp_data & 0x0700) >> 8\n bef = (amp_data & 0xc000) >> 14\n hpf_options, lpf_options, bef_options = KIT.FLL_SETTINGS[fll_type]\n sqd['highpass'] = KIT.HPFS[hpf_options][hpf]\n sqd['lowpass'] = KIT.LPFS[lpf_options][lpf]\n sqd['notch'] = KIT.BEFS[bef_options][bef]\n\n #\n # Acquisition Parameters (8)\n #\n fid.seek(dirs[KIT.DIR_INDEX_ACQ_COND]['offset'])\n sqd['acq_type'], = acq_type, = unpack('i', fid.read(KIT.INT))\n sqd['sfreq'], = unpack('d', fid.read(KIT.DOUBLE))\n if acq_type == KIT.CONTINUOUS:\n # samples_count, = unpack('i', fid.read(KIT.INT))\n fid.seek(KIT.INT, SEEK_CUR)\n sqd['n_samples'], = unpack('i', fid.read(KIT.INT))\n elif acq_type == KIT.EVOKED or acq_type == KIT.EPOCHS:\n sqd['frame_length'], = unpack('i', fid.read(KIT.INT))\n sqd['pretrigger_length'], = unpack('i', fid.read(KIT.INT))\n sqd['average_count'], = unpack('i', fid.read(KIT.INT))\n sqd['n_epochs'], = unpack('i', fid.read(KIT.INT))\n if acq_type == KIT.EVOKED:\n sqd['n_samples'] = sqd['frame_length']\n else:\n sqd['n_samples'] = sqd['frame_length'] * sqd['n_epochs']\n else:\n raise IOError(\"Invalid acquisition type: %i. Your file is neither \"\n \"continuous nor epoched data.\" % (acq_type,))\n\n #\n # digitization information (12 and 26)\n #\n dig_dir = dirs[KIT.DIR_INDEX_DIG_POINTS]\n cor_dir = dirs[KIT.DIR_INDEX_COREG]\n dig = dict()\n hsp = list()\n if dig_dir['count'] > 0 and cor_dir['count'] > 0:\n # directories (0)\n fid.seek(dig_dir['offset'])\n for _ in range(dig_dir['count']):\n name = _read_name(fid, n=8).strip()\n # Sometimes there are mismatches (e.g., AFz vs AFZ) between\n # the channel name and its digitized, name, so let's be case\n # insensitive. It will also prevent collisions with HSP\n name = name.lower()\n rr = np.fromfile(fid, 'd', 3)\n if name:\n assert name not in dig\n dig[name] = rr\n else:\n hsp.append(rr)\n\n # nasion, lpa, rpa, HPI in native space\n elp = [dig.pop(key) for key in (\n 'fidnz', 'fidt9', 'fidt10',\n 'hpi_1', 'hpi_2', 'hpi_3', 'hpi_4')]\n if 'hpi_5' in dig and dig['hpi_5'].any():\n elp.append(dig.pop('hpi_5'))\n elp = np.array(elp)\n hsp = np.array(hsp, float).reshape(-1, 3)\n assert elp.shape in ((7, 3), (8, 3))\n # coregistration\n fid.seek(cor_dir['offset'])\n mrk = np.zeros((elp.shape[0] - 3, 3))\n for _ in range(cor_dir['count']):\n done = np.fromfile(fid, np.int32, 1)[0]\n fid.seek(16 * KIT.DOUBLE + # meg_to_mri\n 16 * KIT.DOUBLE, # mri_to_meg\n SEEK_CUR)\n marker_count = np.fromfile(fid, np.int32, 1)[0]\n if not done:\n continue\n assert marker_count >= len(mrk)\n for mi in range(len(mrk)):\n mri_type, meg_type, mri_done, meg_done = \\\n np.fromfile(fid, np.int32, 4)\n assert meg_done\n fid.seek(3 * KIT.DOUBLE, SEEK_CUR) # mri_pos\n mrk[mi] = np.fromfile(fid, 'd', 3)\n fid.seek(256, SEEK_CUR) # marker_file (char)\n sqd.update(hsp=hsp, elp=elp, mrk=mrk)\n\n all_names = set(ch.get('name', '') for ch in channels)\n if standardize_names is None and all_names.difference({'', 'EEG'}):\n standardize_names = True\n warn('standardize_names defaults to True in 0.21 but will change '\n 'to False in 0.22', DeprecationWarning)\n\n # precompute conversion factor for reading data\n if unsupported_format:\n if sysid not in LEGACY_AMP_PARAMS:\n raise IOError(\"Legacy parameters for system ID %i unavailable\" %\n (sysid,))\n adc_range, adc_stored = LEGACY_AMP_PARAMS[sysid]\n is_meg = np.array([ch['type'] in KIT.CHANNELS_MEG for ch in channels])\n ad_to_volt = adc_range / (2 ** adc_stored)\n ad_to_tesla = ad_to_volt / amp_gain * channel_gain\n conv_factor = np.where(is_meg, ad_to_tesla, ad_to_volt)\n # XXX this is a bit of a hack. Should probably do this more cleanly at\n # some point... the 2 ** (adc_stored - 14) was emperically determined using\n # the test files with known amplitudes. The conv_factors need to be\n # replaced by these values otherwise we're off by a factor off 5000.0\n # for the EEG data.\n is_exg = [ch['type'] in (KIT.CHANNEL_EEG, KIT.CHANNEL_ECG)\n for ch in channels]\n exg_gains /= 2 ** (adc_stored - 14)\n conv_factor[is_exg] = exg_gains\n sqd['conv_factor'] = conv_factor[:, np.newaxis]\n\n # Create raw.info dict for raw fif object with SQD data\n info = _empty_info(float(sqd['sfreq']))\n info.update(meas_date=_stamp_to_dt((create_time, 0)),\n lowpass=sqd['lowpass'],\n highpass=sqd['highpass'], kit_system_id=sysid,\n description=description)\n\n # Creates a list of dicts of meg channels for raw.info\n logger.info('Setting channel info structure...')\n info['chs'] = fiff_channels = []\n channel_index = defaultdict(lambda: 0)\n sqd['eeg_dig'] = OrderedDict()\n for idx, ch in enumerate(channels, 1):\n if ch['type'] in KIT.CHANNELS_MEG:\n ch_name = ch.get('name', '')\n if ch_name == '' or standardize_names:\n ch_name = 'MEG %03d' % idx\n # create three orthogonal vector\n # ch_angles[0]: theta, ch_angles[1]: phi\n theta, phi = np.radians(ch['loc'][3:])\n x = sin(theta) * cos(phi)\n y = sin(theta) * sin(phi)\n z = cos(theta)\n vec_z = np.array([x, y, z])\n vec_z /= linalg.norm(vec_z)\n vec_x = np.zeros(vec_z.size, dtype=np.float64)\n if vec_z[1] < vec_z[2]:\n if vec_z[0] < vec_z[1]:\n vec_x[0] = 1.0\n else:\n vec_x[1] = 1.0\n elif vec_z[0] < vec_z[2]:\n vec_x[0] = 1.0\n else:\n vec_x[2] = 1.0\n vec_x -= np.sum(vec_x * vec_z) * vec_z\n vec_x /= linalg.norm(vec_x)\n vec_y = np.cross(vec_z, vec_x)\n # transform to Neuromag like coordinate space\n vecs = np.vstack((ch['loc'][:3], vec_x, vec_y, vec_z))\n vecs = apply_trans(als_ras_trans, vecs)\n unit = FIFF.FIFF_UNIT_T\n loc = vecs.ravel()\n else:\n ch_type_label = KIT.CH_LABEL[ch['type']]\n channel_index[ch_type_label] += 1\n ch_type_index = channel_index[ch_type_label]\n ch_name = ch.get('name', '')\n eeg_name = ch_name.lower()\n # some files have all EEG labeled as EEG\n if ch_name in ('', 'EEG') or standardize_names:\n ch_name = '%s %03i' % (ch_type_label, ch_type_index)\n unit = FIFF.FIFF_UNIT_V\n loc = np.zeros(12)\n if eeg_name and eeg_name in dig:\n loc[:3] = sqd['eeg_dig'][eeg_name] = dig[eeg_name]\n fiff_channels.append(dict(\n cal=KIT.CALIB_FACTOR, logno=idx, scanno=idx, range=KIT.RANGE,\n unit=unit, unit_mul=KIT.UNIT_MUL, ch_name=ch_name,\n coord_frame=FIFF.FIFFV_COORD_DEVICE,\n coil_type=KIT.CH_TO_FIFF_COIL[ch['type']],\n kind=KIT.CH_TO_FIFF_KIND[ch['type']], loc=loc))\n info._update_redundant()\n return info, sqd\n\n\ndef _read_name(fid, ch_type=None, n=None):\n n = n if ch_type is None else KIT.CHANNEL_NAME_NCHAR[ch_type]\n return fid.read(n).split(b'\\x00')[0].decode('utf-8')\n\n\n@fill_doc\ndef read_raw_kit(input_fname, mrk=None, elp=None, hsp=None, stim='>',\n slope='-', stimthresh=1, preload=False, stim_code='binary',\n allow_unknown_format=False, standardize_names=None,\n verbose=None):\n \"\"\"Reader function for Ricoh/KIT conversion to FIF.\n\n Parameters\n ----------\n input_fname : str\n Path to the sqd file.\n mrk : None | str | array_like, shape (5, 3) | list of str or array_like\n Marker points representing the location of the marker coils with\n respect to the MEG Sensors, or path to a marker file.\n If list, all of the markers will be averaged together.\n elp : None | str | array_like, shape (8, 3)\n Digitizer points representing the location of the fiducials and the\n marker coils with respect to the digitized head shape, or path to a\n file containing these points.\n hsp : None | str | array, shape (n_points, 3)\n Digitizer head shape points, or path to head shape file. If more than\n 10,000 points are in the head shape, they are automatically decimated.\n stim : list of int | '<' | '>'\n Channel-value correspondence when converting KIT trigger channels to a\n Neuromag-style stim channel. For '<', the largest values are assigned\n to the first channel (default). For '>', the largest values are\n assigned to the last channel. Can also be specified as a list of\n trigger channel indexes.\n slope : '+' | '-'\n How to interpret values on KIT trigger channels when synthesizing a\n Neuromag-style stim channel. With '+', a positive slope (low-to-high)\n is interpreted as an event. With '-', a negative slope (high-to-low)\n is interpreted as an event.\n stimthresh : float\n The threshold level for accepting voltage changes in KIT trigger\n channels as a trigger event.\n %(preload)s\n stim_code : 'binary' | 'channel'\n How to decode trigger values from stim channels. 'binary' read stim\n channel events as binary code, 'channel' encodes channel number.\n allow_unknown_format : bool\n Force reading old data that is not officially supported. Alternatively,\n read and re-save the data with the KIT MEG Laboratory application.\n %(standardize_names)s\n %(verbose)s\n\n Returns\n -------\n raw : instance of RawKIT\n A Raw object containing KIT data.\n\n See Also\n --------\n mne.io.Raw : Documentation of attribute and methods.\n\n Notes\n -----\n If mrk, hsp or elp are array_like inputs, then the numbers in xyz\n coordinates should be in units of meters.\n \"\"\"\n return RawKIT(input_fname=input_fname, mrk=mrk, elp=elp, hsp=hsp,\n stim=stim, slope=slope, stimthresh=stimthresh,\n preload=preload, stim_code=stim_code,\n allow_unknown_format=allow_unknown_format,\n standardize_names=standardize_names, verbose=verbose)\n\n\n@fill_doc\ndef read_epochs_kit(input_fname, events, event_id=None, mrk=None, elp=None,\n hsp=None, allow_unknown_format=False,\n standardize_names=None, verbose=None):\n \"\"\"Reader function for Ricoh/KIT epochs files.\n\n Parameters\n ----------\n input_fname : str\n Path to the sqd file.\n events : array, shape (n_events, 3)\n The events typically returned by the read_events function.\n If some events don't match the events of interest as specified\n by event_id, they will be marked as 'IGNORED' in the drop log.\n event_id : int | list of int | dict | None\n The id of the event to consider. If dict,\n the keys can later be used to access associated events. Example:\n dict(auditory=1, visual=3). If int, a dict will be created with\n the id as string. If a list, all events with the IDs specified\n in the list are used. If None, all events will be used with\n and a dict is created with string integer names corresponding\n to the event id integers.\n mrk : None | str | array_like, shape (5, 3) | list of str or array_like\n Marker points representing the location of the marker coils with\n respect to the MEG Sensors, or path to a marker file.\n If list, all of the markers will be averaged together.\n elp : None | str | array_like, shape (8, 3)\n Digitizer points representing the location of the fiducials and the\n marker coils with respect to the digitized head shape, or path to a\n file containing these points.\n hsp : None | str | array, shape (n_points, 3)\n Digitizer head shape points, or path to head shape file. If more than\n 10,000 points are in the head shape, they are automatically decimated.\n allow_unknown_format : bool\n Force reading old data that is not officially supported. Alternatively,\n read and re-save the data with the KIT MEG Laboratory application.\n %(standardize_names)s\n %(verbose)s\n\n Returns\n -------\n epochs : instance of Epochs\n The epochs.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n epochs = EpochsKIT(input_fname=input_fname, events=events,\n event_id=event_id, mrk=mrk, elp=elp, hsp=hsp,\n allow_unknown_format=allow_unknown_format,\n standardize_names=standardize_names,\n verbose=verbose)\n return epochs\n"
] |
[
[
"numpy.full",
"numpy.array",
"numpy.empty",
"numpy.asarray",
"numpy.zeros",
"numpy.sum",
"numpy.mean",
"numpy.where",
"numpy.radians",
"numpy.arange",
"numpy.fromfile",
"numpy.unique",
"numpy.searchsorted",
"scipy.linalg.norm",
"numpy.cross",
"numpy.vstack"
]
] |
NCTUMLlab/Adversarial-Masking-Transformers-for-Language-Understanding
|
[
"b43fb91cf99ee3ffaf137cd0be87b67448995c9b"
] |
[
"models/transformer.py"
] |
[
"# Copyright (c) 2017-present, Facebook, Inc.\r\n# All rights reserved.\r\n#\r\n# This source code is licensed under the license found in the LICENSE file in\r\n# the root directory of this source tree. An additional grant of patent rights\r\n# can be found in the PATENTS file in the same directory.\r\n\r\nimport math\r\n\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom numpy.random import uniform\r\nfrom fairseq import options, utils\r\nfrom fairseq.models import (\r\n FairseqEncoder,\r\n FairseqIncrementalDecoder,\r\n FairseqEncoderDecoderModel,\r\n register_model,\r\n register_model_architecture,\r\n)\r\nfrom fairseq.modules import (\r\n AdaptiveSoftmax,\r\n LayerNorm,\r\n MultiheadAttention,\r\n PositionalEmbedding,\r\n SinusoidalPositionalEmbedding,\r\n)\r\nfrom bert import BertTokenizer\r\nDEFAULT_MAX_SOURCE_POSITIONS = 1024\r\nDEFAULT_MAX_TARGET_POSITIONS = 1024\r\n\r\nfrom bert import BertModel\r\n\r\n@register_model('transformer')\r\nclass TransformerModel(FairseqEncoderDecoderModel):\r\n \"\"\"\r\n Transformer model from `\"Attention Is All You Need\" (Vaswani, et al, 2017)\r\n <https://arxiv.org/abs/1706.03762>`_.\r\n\r\n Args:\r\n encoder (TransformerEncoder): the encoder\r\n decoder (TransformerDecoder): the decoder\r\n\r\n The Transformer model provides the following named architectures and\r\n command-line arguments:\r\n\r\n .. argparse::\r\n :ref: fairseq.models.transformer_parser\r\n :prog:\r\n \"\"\"\r\n\r\n def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False, args=None):\r\n super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep, args)\r\n\r\n @staticmethod\r\n def add_args(parser):\r\n \"\"\"Add model-specific arguments to the parser.\"\"\"\r\n # fmt: off\r\n parser.add_argument('--activation-fn',\r\n choices=utils.get_available_activation_fns(),\r\n help='activation function to use')\r\n parser.add_argument('--dropout', type=float, metavar='D',\r\n help='dropout probability')\r\n parser.add_argument('--attention-dropout', type=float, metavar='D',\r\n help='dropout probability for attention weights')\r\n parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',\r\n help='dropout probability after activation in FFN.')\r\n parser.add_argument('--encoder-embed-path', type=str, metavar='STR',\r\n help='path to pre-trained encoder embedding')\r\n parser.add_argument('--encoder-embed-dim', type=int, metavar='N',\r\n help='encoder embedding dimension')\r\n parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',\r\n help='encoder embedding dimension for FFN')\r\n parser.add_argument('--encoder-layers', type=int, metavar='N',\r\n help='num encoder layers')\r\n parser.add_argument('--encoder-attention-heads', type=int, metavar='N',\r\n help='num encoder attention heads')\r\n parser.add_argument('--encoder-normalize-before', action='store_true',\r\n help='apply layernorm before each encoder block')\r\n parser.add_argument('--encoder-learned-pos', action='store_true',\r\n help='use learned positional embeddings in the encoder')\r\n parser.add_argument('--decoder-embed-path', type=str, metavar='STR',\r\n help='path to pre-trained decoder embedding')\r\n parser.add_argument('--decoder-embed-dim', type=int, metavar='N',\r\n help='decoder embedding dimension')\r\n parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',\r\n help='decoder embedding dimension for FFN')\r\n parser.add_argument('--decoder-layers', type=int, metavar='N',\r\n help='num decoder layers')\r\n parser.add_argument('--decoder-attention-heads', type=int, metavar='N',\r\n help='num decoder attention heads')\r\n parser.add_argument('--decoder-learned-pos', action='store_true',\r\n help='use learned positional embeddings in the decoder')\r\n parser.add_argument('--decoder-normalize-before', action='store_true',\r\n help='apply layernorm before each decoder block')\r\n parser.add_argument('--share-decoder-input-output-embed', action='store_true',\r\n help='share decoder input and output embeddings')\r\n parser.add_argument('--share-all-embeddings', action='store_true',\r\n help='share encoder, decoder and output embeddings'\r\n ' (requires shared dictionary and embed dim)')\r\n parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',\r\n help='if set, disables positional embeddings (outside self attention)')\r\n parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',\r\n help='comma separated list of adaptive softmax cutoff points. '\r\n 'Must be used with adaptive_loss criterion'),\r\n parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',\r\n help='sets adaptive softmax dropout for the tail projections')\r\n # fmt: on\r\n\r\n @classmethod\r\n def build_model(cls, args, task):\r\n \"\"\"Build a new model instance.\"\"\"\r\n\r\n # make sure all arguments are present in older models\r\n base_architecture(args)\r\n\r\n if not hasattr(args, 'max_source_positions'):\r\n args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS\r\n if not hasattr(args, 'max_target_positions'):\r\n args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS\r\n\r\n src_dict, tgt_dict = task.source_dictionary, task.target_dictionary\r\n if len(task.datasets) > 0:\r\n src_berttokenizer = next(iter(task.datasets.values())).berttokenizer\r\n else:\r\n src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name)\r\n\r\n def build_embedding(dictionary, embed_dim, path=None):\r\n num_embeddings = len(dictionary)\r\n padding_idx = dictionary.pad()\r\n emb = Embedding(num_embeddings, embed_dim, padding_idx)\r\n # if provided, load from preloaded dictionaries\r\n if path:\r\n embed_dict = utils.parse_embedding(path)\r\n utils.load_embedding(embed_dict, dictionary, emb)\r\n return emb\r\n\r\n if args.share_all_embeddings:\r\n if src_dict != tgt_dict:\r\n raise ValueError('--share-all-embeddings requires a joined dictionary')\r\n if args.encoder_embed_dim != args.decoder_embed_dim:\r\n raise ValueError(\r\n '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')\r\n if args.decoder_embed_path and (\r\n args.decoder_embed_path != args.encoder_embed_path):\r\n raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path')\r\n encoder_embed_tokens = build_embedding(\r\n src_dict, args.encoder_embed_dim, args.encoder_embed_path\r\n )\r\n decoder_embed_tokens = encoder_embed_tokens\r\n args.share_decoder_input_output_embed = True\r\n else:\r\n encoder_embed_tokens = build_embedding(\r\n src_dict, args.encoder_embed_dim, args.encoder_embed_path\r\n )\r\n decoder_embed_tokens = build_embedding(\r\n tgt_dict, args.decoder_embed_dim, args.decoder_embed_path\r\n )\r\n bertencoder = BertModel.from_pretrained(args.bert_model_name)\r\n args.bert_out_dim = bertencoder.hidden_size\r\n encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)\r\n decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)\r\n\r\n return TransformerModel(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep, args)\r\n\r\n @classmethod\r\n def build_encoder(cls, args, src_dict, embed_tokens):\r\n return TransformerEncoder(args, src_dict, embed_tokens)\r\n\r\n @classmethod\r\n def build_decoder(cls, args, tgt_dict, embed_tokens):\r\n return TransformerDecoder(args, tgt_dict, embed_tokens)\r\n\r\n@register_model('transformers2')\r\nclass TransformerS2Model(FairseqEncoderDecoderModel):\r\n \"\"\"\r\n Transformer model from `\"Attention Is All You Need\" (Vaswani, et al, 2017)\r\n <https://arxiv.org/abs/1706.03762>`_.\r\n\r\n Args:\r\n encoder (TransformerEncoder): the encoder\r\n decoder (TransformerDecoder): the decoder\r\n\r\n The Transformer model provides the following named architectures and\r\n command-line arguments:\r\n\r\n .. argparse::\r\n :ref: fairseq.models.transformer_parser\r\n :prog:\r\n \"\"\"\r\n\r\n def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False, args=None):\r\n super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep, args)\r\n\r\n @staticmethod\r\n def add_args(parser):\r\n \"\"\"Add model-specific arguments to the parser.\"\"\"\r\n # fmt: off\r\n parser.add_argument('--activation-fn',\r\n choices=utils.get_available_activation_fns(),\r\n help='activation function to use')\r\n parser.add_argument('--dropout', type=float, metavar='D',\r\n help='dropout probability')\r\n parser.add_argument('--attention-dropout', type=float, metavar='D',\r\n help='dropout probability for attention weights')\r\n parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',\r\n help='dropout probability after activation in FFN.')\r\n parser.add_argument('--encoder-embed-path', type=str, metavar='STR',\r\n help='path to pre-trained encoder embedding')\r\n parser.add_argument('--encoder-embed-dim', type=int, metavar='N',\r\n help='encoder embedding dimension')\r\n parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',\r\n help='encoder embedding dimension for FFN')\r\n parser.add_argument('--encoder-layers', type=int, metavar='N',\r\n help='num encoder layers')\r\n parser.add_argument('--encoder-attention-heads', type=int, metavar='N',\r\n help='num encoder attention heads')\r\n parser.add_argument('--encoder-normalize-before', action='store_true',\r\n help='apply layernorm before each encoder block')\r\n parser.add_argument('--encoder-learned-pos', action='store_true',\r\n help='use learned positional embeddings in the encoder')\r\n parser.add_argument('--decoder-embed-path', type=str, metavar='STR',\r\n help='path to pre-trained decoder embedding')\r\n parser.add_argument('--decoder-embed-dim', type=int, metavar='N',\r\n help='decoder embedding dimension')\r\n parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',\r\n help='decoder embedding dimension for FFN')\r\n parser.add_argument('--decoder-layers', type=int, metavar='N',\r\n help='num decoder layers')\r\n parser.add_argument('--decoder-attention-heads', type=int, metavar='N',\r\n help='num decoder attention heads')\r\n parser.add_argument('--decoder-learned-pos', action='store_true',\r\n help='use learned positional embeddings in the decoder')\r\n parser.add_argument('--decoder-normalize-before', action='store_true',\r\n help='apply layernorm before each decoder block')\r\n parser.add_argument('--share-decoder-input-output-embed', action='store_true',\r\n help='share decoder input and output embeddings')\r\n parser.add_argument('--share-all-embeddings', action='store_true',\r\n help='share encoder, decoder and output embeddings'\r\n ' (requires shared dictionary and embed dim)')\r\n parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',\r\n help='if set, disables positional embeddings (outside self attention)')\r\n parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',\r\n help='comma separated list of adaptive softmax cutoff points. '\r\n 'Must be used with adaptive_loss criterion'),\r\n parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',\r\n help='sets adaptive softmax dropout for the tail projections')\r\n # fmt: on\r\n\r\n @classmethod\r\n def build_model(cls, args, task):\r\n \"\"\"Build a new model instance.\"\"\"\r\n\r\n # make sure all arguments are present in older models\r\n base_architecture(args)\r\n\r\n if not hasattr(args, 'max_source_positions'):\r\n args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS\r\n if not hasattr(args, 'max_target_positions'):\r\n args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS\r\n\r\n src_dict, tgt_dict = task.source_dictionary, task.target_dictionary\r\n if len(task.datasets) > 0:\r\n src_berttokenizer = next(iter(task.datasets.values())).berttokenizer\r\n else:\r\n src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name)\r\n\r\n def build_embedding(dictionary, embed_dim, path=None):\r\n num_embeddings = len(dictionary)\r\n padding_idx = dictionary.pad()\r\n emb = Embedding(num_embeddings, embed_dim, padding_idx)\r\n # if provided, load from preloaded dictionaries\r\n if path:\r\n embed_dict = utils.parse_embedding(path)\r\n utils.load_embedding(embed_dict, dictionary, emb)\r\n return emb\r\n\r\n if args.share_all_embeddings:\r\n if src_dict != tgt_dict:\r\n raise ValueError('--share-all-embeddings requires a joined dictionary')\r\n if args.encoder_embed_dim != args.decoder_embed_dim:\r\n raise ValueError(\r\n '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')\r\n if args.decoder_embed_path and (\r\n args.decoder_embed_path != args.encoder_embed_path):\r\n raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path')\r\n encoder_embed_tokens = build_embedding(\r\n src_dict, args.encoder_embed_dim, args.encoder_embed_path\r\n )\r\n decoder_embed_tokens = encoder_embed_tokens\r\n args.share_decoder_input_output_embed = True\r\n else:\r\n encoder_embed_tokens = build_embedding(\r\n src_dict, args.encoder_embed_dim, args.encoder_embed_path\r\n )\r\n decoder_embed_tokens = build_embedding(\r\n tgt_dict, args.decoder_embed_dim, args.decoder_embed_path\r\n )\r\n bertencoder = BertModel.from_pretrained(args.bert_model_name)\r\n args.bert_out_dim = bertencoder.hidden_size\r\n encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)\r\n decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)\r\n\r\n return TransformerS2Model(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep, args)\r\n\r\n @classmethod\r\n def build_encoder(cls, args, src_dict, embed_tokens):\r\n return TransformerS2Encoder(args, src_dict, embed_tokens)\r\n\r\n @classmethod\r\n def build_decoder(cls, args, tgt_dict, embed_tokens):\r\n return TransformerDecoder(args, tgt_dict, embed_tokens)\r\n\r\n def forward(self, src_tokens, src_lengths, prev_output_tokens, bert_input, **kwargs):\r\n \"\"\"\r\n Run the forward pass for an encoder-decoder model.\r\n\r\n First feed a batch of source tokens through the encoder. Then, feed the\r\n encoder output and previous decoder outputs (i.e., input feeding/teacher\r\n forcing) to the decoder to produce the next outputs::\r\n\r\n encoder_out = self.encoder(src_tokens, src_lengths)\r\n return self.decoder(prev_output_tokens, encoder_out)\r\n\r\n Args:\r\n src_tokens (LongTensor): tokens in the source language of shape\r\n `(batch, src_len)`\r\n src_lengths (LongTensor): source sentence lengths of shape `(batch)`\r\n prev_output_tokens (LongTensor): previous decoder outputs of shape\r\n `(batch, tgt_len)`, for input feeding/teacher forcing\r\n\r\n Returns:\r\n tuple:\r\n - the decoder's output of shape `(batch, tgt_len, vocab)`\r\n - a dictionary with any model-specific outputs\r\n \"\"\"\r\n bert_encoder_padding_mask = bert_input.eq(self.berttokenizer.pad())\r\n bert_encoder_out, _ = self.bert_encoder(bert_input, output_all_encoded_layers=True, attention_mask= ~ bert_encoder_padding_mask)\r\n bert_encoder_out = bert_encoder_out[self.bert_output_layer]\r\n if self.mask_cls_sep:\r\n bert_encoder_padding_mask += bert_input.eq(self.berttokenizer.cls())\r\n bert_encoder_padding_mask += bert_input.eq(self.berttokenizer.sep())\r\n bert_encoder_out = bert_encoder_out.permute(1,0,2).contiguous()\r\n bert_encoder_out = {\r\n 'bert_encoder_out': bert_encoder_out,\r\n 'bert_encoder_padding_mask': bert_encoder_padding_mask,\r\n }\r\n encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, bert_encoder_out=bert_encoder_out)\r\n decoder_out = self.decoder(prev_output_tokens, encoder_out=encoder_out, bert_encoder_out=bert_encoder_out, **kwargs)\r\n return decoder_out\r\n\r\n@register_model('transformerstack')\r\nclass TransformerModelStack(FairseqEncoderDecoderModel):\r\n \"\"\"\r\n Transformer model from `\"Attention Is All You Need\" (Vaswani, et al, 2017)\r\n <https://arxiv.org/abs/1706.03762>`_.\r\n\r\n Args:\r\n encoder (TransformerEncoder): the encoder\r\n decoder (TransformerDecoder): the decoder\r\n\r\n The Transformer model provides the following named architectures and\r\n command-line arguments:\r\n\r\n .. argparse::\r\n :ref: fairseq.models.transformer_parser\r\n :prog:\r\n \"\"\"\r\n\r\n def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False):\r\n super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep)\r\n\r\n @staticmethod\r\n def add_args(parser):\r\n \"\"\"Add model-specific arguments to the parser.\"\"\"\r\n # fmt: off\r\n parser.add_argument('--activation-fn',\r\n choices=utils.get_available_activation_fns(),\r\n help='activation function to use')\r\n parser.add_argument('--dropout', type=float, metavar='D',\r\n help='dropout probability')\r\n parser.add_argument('--attention-dropout', type=float, metavar='D',\r\n help='dropout probability for attention weights')\r\n parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',\r\n help='dropout probability after activation in FFN.')\r\n parser.add_argument('--encoder-embed-path', type=str, metavar='STR',\r\n help='path to pre-trained encoder embedding')\r\n parser.add_argument('--encoder-embed-dim', type=int, metavar='N',\r\n help='encoder embedding dimension')\r\n parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',\r\n help='encoder embedding dimension for FFN')\r\n parser.add_argument('--encoder-layers', type=int, metavar='N',\r\n help='num encoder layers')\r\n parser.add_argument('--encoder-attention-heads', type=int, metavar='N',\r\n help='num encoder attention heads')\r\n parser.add_argument('--encoder-normalize-before', action='store_true',\r\n help='apply layernorm before each encoder block')\r\n parser.add_argument('--encoder-learned-pos', action='store_true',\r\n help='use learned positional embeddings in the encoder')\r\n parser.add_argument('--decoder-embed-path', type=str, metavar='STR',\r\n help='path to pre-trained decoder embedding')\r\n parser.add_argument('--decoder-embed-dim', type=int, metavar='N',\r\n help='decoder embedding dimension')\r\n parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',\r\n help='decoder embedding dimension for FFN')\r\n parser.add_argument('--decoder-layers', type=int, metavar='N',\r\n help='num decoder layers')\r\n parser.add_argument('--decoder-attention-heads', type=int, metavar='N',\r\n help='num decoder attention heads')\r\n parser.add_argument('--decoder-learned-pos', action='store_true',\r\n help='use learned positional embeddings in the decoder')\r\n parser.add_argument('--decoder-normalize-before', action='store_true',\r\n help='apply layernorm before each decoder block')\r\n parser.add_argument('--share-decoder-input-output-embed', action='store_true',\r\n help='share decoder input and output embeddings')\r\n parser.add_argument('--share-all-embeddings', action='store_true',\r\n help='share encoder, decoder and output embeddings'\r\n ' (requires shared dictionary and embed dim)')\r\n parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',\r\n help='if set, disables positional embeddings (outside self attention)')\r\n parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',\r\n help='comma separated list of adaptive softmax cutoff points. '\r\n 'Must be used with adaptive_loss criterion'),\r\n parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',\r\n help='sets adaptive softmax dropout for the tail projections')\r\n\r\n # fmt: on\r\n\r\n @classmethod\r\n def build_model(cls, args, task):\r\n \"\"\"Build a new model instance.\"\"\"\r\n\r\n # make sure all arguments are present in older models\r\n base_architecture(args)\r\n\r\n if not hasattr(args, 'max_source_positions'):\r\n args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS\r\n if not hasattr(args, 'max_target_positions'):\r\n args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS\r\n\r\n src_dict, tgt_dict = task.source_dictionary, task.target_dictionary\r\n if len(task.datasets) > 0:\r\n src_berttokenizer = next(iter(task.datasets.values())).berttokenizer\r\n else:\r\n src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name)\r\n\r\n def build_embedding(dictionary, embed_dim, path=None):\r\n num_embeddings = len(dictionary)\r\n padding_idx = dictionary.pad()\r\n emb = Embedding(num_embeddings, embed_dim, padding_idx)\r\n # if provided, load from preloaded dictionaries\r\n if path:\r\n embed_dict = utils.parse_embedding(path)\r\n utils.load_embedding(embed_dict, dictionary, emb)\r\n return emb\r\n\r\n if args.share_all_embeddings:\r\n if src_dict != tgt_dict:\r\n raise ValueError('--share-all-embeddings requires a joined dictionary')\r\n if args.encoder_embed_dim != args.decoder_embed_dim:\r\n raise ValueError(\r\n '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')\r\n if args.decoder_embed_path and (\r\n args.decoder_embed_path != args.encoder_embed_path):\r\n raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path')\r\n encoder_embed_tokens = build_embedding(\r\n src_dict, args.encoder_embed_dim, args.encoder_embed_path\r\n )\r\n decoder_embed_tokens = encoder_embed_tokens\r\n args.share_decoder_input_output_embed = True\r\n else:\r\n encoder_embed_tokens = build_embedding(\r\n src_dict, args.encoder_embed_dim, args.encoder_embed_path\r\n )\r\n decoder_embed_tokens = build_embedding(\r\n tgt_dict, args.decoder_embed_dim, args.decoder_embed_path\r\n )\r\n bertencoder = BertModel.from_pretrained(args.bert_model_name)\r\n args.bert_out_dim = bertencoder.hidden_size\r\n encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)\r\n decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)\r\n\r\n return TransformerModel(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep)\r\n\r\n @classmethod\r\n def build_encoder(cls, args, src_dict, embed_tokens):\r\n return TransformerEncoder(args, src_dict, embed_tokens)\r\n\r\n @classmethod\r\n def build_decoder(cls, args, tgt_dict, embed_tokens):\r\n return TransformerDecoderStack(args, tgt_dict, embed_tokens)\r\n\r\nclass TransformerEncoder(FairseqEncoder):\r\n \"\"\"\r\n Transformer encoder consisting of *args.encoder_layers* layers. Each layer\r\n is a :class:`TransformerEncoderLayer`.\r\n\r\n Args:\r\n args (argparse.Namespace): parsed command-line arguments\r\n dictionary (~fairseq.data.Dictionary): encoding dictionary\r\n embed_tokens (torch.nn.Embedding): input embedding\r\n \"\"\"\r\n\r\n def __init__(self, args, dictionary, embed_tokens):\r\n super().__init__(dictionary)\r\n self.register_buffer('version', torch.Tensor([3]))\r\n\r\n self.dropout = args.dropout\r\n\r\n embed_dim = embed_tokens.embedding_dim\r\n self.padding_idx = embed_tokens.padding_idx\r\n self.max_source_positions = args.max_source_positions\r\n\r\n self.embed_tokens = embed_tokens\r\n self.embed_scale = math.sqrt(embed_dim)\r\n self.embed_positions = PositionalEmbedding(\r\n args.max_source_positions, embed_dim, self.padding_idx,\r\n learned=args.encoder_learned_pos,\r\n ) if not args.no_token_positional_embeddings else None\r\n\r\n self.layers = nn.ModuleList([])\r\n self.layers.extend([\r\n TransformerEncoderLayer(args)\r\n for i in range(args.encoder_layers)\r\n ])\r\n\r\n if args.encoder_normalize_before:\r\n self.layer_norm = LayerNorm(embed_dim)\r\n else:\r\n self.layer_norm = None\r\n\r\n def forward(self, src_tokens, src_lengths):\r\n \"\"\"\r\n Args:\r\n src_tokens (LongTensor): tokens in the source language of shape\r\n `(batch, src_len)`\r\n src_lengths (torch.LongTensor): lengths of each source sentence of\r\n shape `(batch)`\r\n\r\n Returns:\r\n dict:\r\n - **encoder_out** (Tensor): the last encoder layer's output of\r\n shape `(src_len, batch, embed_dim)`\r\n - **encoder_padding_mask** (ByteTensor): the positions of\r\n padding elements of shape `(batch, src_len)`\r\n \"\"\"\r\n # embed tokens and positions\r\n x = self.embed_scale * self.embed_tokens(src_tokens)\r\n if self.embed_positions is not None:\r\n x += self.embed_positions(src_tokens)\r\n x = F.dropout(x, p=self.dropout, training=self.training)\r\n\r\n # B x T x C -> T x B x C\r\n x = x.transpose(0, 1)\r\n\r\n # compute padding mask\r\n encoder_padding_mask = src_tokens.eq(self.padding_idx)\r\n if not encoder_padding_mask.any():\r\n encoder_padding_mask = None\r\n\r\n # encoder layers\r\n for layer in self.layers:\r\n x = layer(x, encoder_padding_mask)\r\n\r\n if self.layer_norm:\r\n x = self.layer_norm(x)\r\n\r\n return {\r\n 'encoder_out': x, # T x B x C\r\n 'encoder_padding_mask': encoder_padding_mask, # B x T\r\n }\r\n\r\n def reorder_encoder_out(self, encoder_out, bert_outs, new_order):\r\n \"\"\"\r\n Reorder encoder output according to *new_order*.\r\n\r\n Args:\r\n encoder_out: output from the ``forward()`` method\r\n new_order (LongTensor): desired order\r\n\r\n Returns:\r\n *encoder_out* rearranged according to *new_order*\r\n \"\"\"\r\n if encoder_out['encoder_out'] is not None:\r\n encoder_out['encoder_out'] = \\\r\n encoder_out['encoder_out'].index_select(1, new_order)\r\n if encoder_out['encoder_padding_mask'] is not None:\r\n encoder_out['encoder_padding_mask'] = \\\r\n encoder_out['encoder_padding_mask'].index_select(0, new_order)\r\n if bert_outs['bert_encoder_out'] is not None:\r\n bert_outs['bert_encoder_out'] = \\\r\n bert_outs['bert_encoder_out'].index_select(1, new_order)\r\n if bert_outs['bert_encoder_padding_mask'] is not None:\r\n bert_outs['bert_encoder_padding_mask'] = \\\r\n bert_outs['bert_encoder_padding_mask'].index_select(0, new_order)\r\n return encoder_out, bert_outs\r\n\r\n def max_positions(self):\r\n \"\"\"Maximum input length supported by the encoder.\"\"\"\r\n if self.embed_positions is None:\r\n return self.max_source_positions\r\n return min(self.max_source_positions, self.embed_positions.max_positions())\r\n\r\n def upgrade_state_dict_named(self, state_dict, name):\r\n \"\"\"Upgrade a (possibly old) state dict for new versions of fairseq.\"\"\"\r\n if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):\r\n weights_key = '{}.embed_positions.weights'.format(name)\r\n if weights_key in state_dict:\r\n del state_dict[weights_key]\r\n state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)\r\n for i in range(len(self.layers)):\r\n # update layer norms\r\n self.layers[i].upgrade_state_dict_named(state_dict, \"{}.layers.{}\".format(name, i))\r\n\r\n version_key = '{}.version'.format(name)\r\n if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:\r\n # earlier checkpoints did not normalize after the stack of layers\r\n self.layer_norm = None\r\n self.normalize = False\r\n state_dict[version_key] = torch.Tensor([1])\r\n return state_dict\r\n\r\n\r\nclass TransformerS2Encoder(FairseqEncoder):\r\n \"\"\"\r\n Transformer encoder consisting of *args.encoder_layers* layers. Each layer\r\n is a :class:`TransformerEncoderLayer`.\r\n\r\n Args:\r\n args (argparse.Namespace): parsed command-line arguments\r\n dictionary (~fairseq.data.Dictionary): encoding dictionary\r\n embed_tokens (torch.nn.Embedding): input embedding\r\n \"\"\"\r\n\r\n def __init__(self, args, dictionary, embed_tokens):\r\n super().__init__(dictionary)\r\n self.register_buffer('version', torch.Tensor([3]))\r\n\r\n self.dropout = args.dropout\r\n \r\n self.output_mask = nn.Softmax(dim = 0)\r\n self.t_layer = nn.Linear(512, 1)\r\n self.output_vocab_linear = nn.Linear(512, embed_tokens.num_embeddings)\r\n \r\n embed_dim = embed_tokens.embedding_dim\r\n self.padding_idx = embed_tokens.padding_idx\r\n self.max_source_positions = args.max_source_positions\r\n\r\n self.embed_tokens = embed_tokens\r\n self.embed_scale = math.sqrt(embed_dim)\r\n self.embed_positions = PositionalEmbedding(\r\n args.max_source_positions, embed_dim, self.padding_idx,\r\n learned=args.encoder_learned_pos,\r\n ) if not args.no_token_positional_embeddings else None\r\n\r\n bert_gates = getattr(args, 'bert_gates', [1, 1, 1, 1, 1, 1])\r\n bert_gates = [x == 1 for x in bert_gates]\r\n assert len(bert_gates) == args.encoder_layers\r\n\r\n self.layers = nn.ModuleList([])\r\n self.layers.extend([\r\n TransformerS2EncoderLayer(args, bert_gate=bert_gates[i])\r\n for i in range(args.encoder_layers)\r\n ])\r\n \r\n if args.encoder_normalize_before:\r\n self.layer_norm = LayerNorm(embed_dim)\r\n else:\r\n self.layer_norm = None\r\n\r\n self.mask_embedding = nn.init.normal_(nn.Parameter(torch.zeros((1, embed_dim))))\r\n \r\n self.mask_layers = nn.ModuleList([])\r\n self.mask_layers.extend([\r\n TransformerEncoderLayer(args)\r\n for i in range(2)\r\n ])\r\n\r\n if args.encoder_normalize_before:\r\n self.mask_layer_norm = LayerNorm(embed_dim)\r\n else:\r\n self.layer_norm = None\r\n ''' \r\n self.x = None\r\n self.unmask_output = None\r\n self.mask_output = None\r\n self.encoder_vocab_output = None\r\n\r\n self.backwards = 0\r\n '''\r\n self.i = 0\r\n\r\n def forward(self, src_tokens, src_lengths, bert_encoder_out):\r\n \"\"\"\r\n Args:\r\n src_tokens (LongTensor): tokens in the source language of shape\r\n `(batch, src_len)`\r\n src_lengths (torch.LongTensor): lengths of each source sentence of\r\n shape `(batch)`\r\n\r\n Returns:\r\n dict:\r\n - **encoder_out** (Tensor): the last encoder layer's output of\r\n shape `(src_len, batch, embed_dim)`\r\n - **encoder_padding_mask** (ByteTensor): the positions of\r\n padding elements of shape `(batch, src_len)`\r\n \"\"\"\r\n # embed tokens and positions\r\n x = self.embed_scale * self.embed_tokens(src_tokens)\r\n\r\n if self.embed_positions is not None:\r\n x += self.embed_positions(src_tokens)\r\n x = F.dropout(x, p=self.dropout, training=self.training)\r\n\r\n # B x T x C -> T x B x C\r\n \r\n \r\n # T x B mask model\r\n \r\n\r\n ###########\r\n ###########\r\n ###########\r\n '''\r\n mask_output = self.mask(src_tokens , x)\r\n p = mask_output\r\n p = p.transpose(0, 1)\r\n t_p = torch.argsort(p,dim=1)\r\n ratio = 0.2\r\n self.ratio = ratio\r\n p_mask = torch.where(t_p<t_p.size(1)*ratio,torch.zeros_like(p),torch.ones_like(p))\r\n self.p_mask = p_mask\r\n p_mask = p_mask.unsqueeze(-1).transpose(0,1)\r\n\r\n self.mask_output = p\r\n \r\n \r\n if self.training:\r\n x = x * p_mask.detach()\r\n else:\r\n x = x\r\n ###########\r\n ###########\r\n ###########\r\n # t_p[t_p>t_p.size*ratio] = 1\r\n # t_p[t_p<=t_p.size*ratio] = 0\r\n # t_p.permute(1,0)\r\n \r\n \r\n # model.encoder.mask_output \r\n '''\r\n\r\n x = x.transpose(0, 1)\r\n # compute padding mask\r\n encoder_padding_mask = src_tokens.eq(self.padding_idx)\r\n if not encoder_padding_mask.any():\r\n encoder_padding_mask = None\r\n\r\n # encoder layers\r\n for layer in self.layers:\r\n x = layer(x, encoder_padding_mask, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask'])\r\n\r\n if self.layer_norm:\r\n x = self.layer_norm(x)\r\n\r\n # if self.training:\r\n '''\r\n self.encoder_vocab_output = self.encodeMLM(src_tokens, src_lengths, bert_encoder_out)\r\n '''\r\n '''\r\n ##########################\r\n if self.i%1==0:\r\n import scipy.io as scio\r\n self.encoder_vocab_output = self.encodeMLM(src_tokens, src_lengths, bert_encoder_out)\r\n scio.savemat(\"/home/iojhui/bert-nmt/data\"+str(self.i)+\".mat\", {'mask_output':self.mask_output.detach().cpu().numpy(),\"src_tokens\":src_tokens.cpu().numpy()})\r\n \r\n\r\n\r\n self.i+=1\r\n ########################\r\n '''\r\n return {\r\n 'encoder_out': x, # T x B x C\r\n 'encoder_padding_mask': encoder_padding_mask, # B x T\r\n }\r\n def encodeMLM(self, src_tokens, src_lengths, bert_encoder_out):\r\n \"\"\"\r\n Args:\r\n src_tokens (LongTensor): tokens in the source language of shape\r\n `(batch, src_len)`\r\n src_lengths (torch.LongTensor): lengths of each source sentence of\r\n shape `(batch)`\r\n\r\n Returns:\r\n dict:\r\n - **encoder_out** (Tensor): the last encoder layer's output of\r\n shape `(src_len, batch, embed_dim)`\r\n - **encoder_padding_mask** (ByteTensor): the positions of\r\n padding elements of shape `(batch, src_len)`\r\n \"\"\"\r\n # embed tokens and positions\r\n self.src_tokens = src_tokens\r\n x = self.embed_scale * self.embed_tokens(src_tokens)\r\n '''\r\n ratio = 0.3\r\n mask = np.random.choice(src_tokens.size()[1], (int(src_tokens.size()[1] * ratio), ),replace = False)\r\n \r\n \r\n if mask is not None:\r\n '''\r\n '''\r\n if x.size(1)<10:\r\n mask = [4]\r\n else:\r\n mask = [7,9]\r\n x[:, mask] = self.mask_embedding\r\n \r\n '''\r\n mask_output = self.mask(src_tokens , x)\r\n p = mask_output\r\n p = p\r\n t_p = torch.argsort(p,dim=1)\r\n ratio = 0.2\r\n self.ratio = ratio\r\n p_mask = torch.where(t_p<t_p.size(1)*ratio,torch.zeros_like(p),torch.ones_like(p))\r\n self.p_mask = p_mask\r\n p_mask = p_mask.unsqueeze(-1)\r\n\r\n self.mask_output = p\r\n \r\n \r\n x = x * p_mask.detach()\r\n \r\n\r\n if self.embed_positions is not None:\r\n x += self.embed_positions(src_tokens)\r\n x = F.dropout(x, p=self.dropout, training=self.training)\r\n\r\n # B x T x C -> T x B x C\r\n x = x.transpose(0, 1)\r\n \r\n \r\n # compute padding mask\r\n encoder_padding_mask = src_tokens.eq(self.padding_idx)\r\n if not encoder_padding_mask.any():\r\n encoder_padding_mask = None\r\n\r\n # encoder layers\r\n for layer in self.layers:\r\n x = layer(x, encoder_padding_mask, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask'])\r\n\r\n if self.layer_norm:\r\n x = self.layer_norm(x)\r\n encoder_vocab_output = self.output_vocab_linear(x)\r\n self.encoder_vocab_output2 = torch.nn.functional.softmax(encoder_vocab_output,dim=-1)\r\n self.token = src_tokens\r\n return encoder_vocab_output\r\n \r\n def mask(self, src_tokens, x):\r\n x = x.transpose(0, 1)\r\n # compute padding mask\r\n encoder_padding_mask = src_tokens.eq(self.padding_idx)\r\n if not encoder_padding_mask.any():\r\n encoder_padding_mask = None\r\n\r\n # encoder layers\r\n for layer in self.mask_layers:\r\n x = layer(x, encoder_padding_mask)\r\n\r\n if self.layer_norm:\r\n x = self.mask_layer_norm(x)\r\n x = self.t_layer(x).squeeze(-1)\r\n if encoder_padding_mask is not None:\r\n x = x.masked_fill(encoder_padding_mask.transpose(0,1),value=torch.tensor(float('-inf')))\r\n return self.output_mask(x).transpose(0, 1)\r\n \r\n def reorder_encoder_out(self, encoder_out, bert_outs, new_order):\r\n \"\"\"\r\n Reorder encoder output according to *new_order*.\r\n\r\n Args:\r\n encoder_out: output from the ``forward()`` method\r\n new_order (LongTensor): desired order\r\n\r\n Returns:\r\n *encoder_out* rearranged according to *new_order*\r\n \"\"\"\r\n if encoder_out['encoder_out'] is not None:\r\n encoder_out['encoder_out'] = \\\r\n encoder_out['encoder_out'].index_select(1, new_order)\r\n if encoder_out['encoder_padding_mask'] is not None:\r\n encoder_out['encoder_padding_mask'] = \\\r\n encoder_out['encoder_padding_mask'].index_select(0, new_order)\r\n if bert_outs['bert_encoder_out'] is not None:\r\n bert_outs['bert_encoder_out'] = \\\r\n bert_outs['bert_encoder_out'].index_select(1, new_order)\r\n if bert_outs['bert_encoder_padding_mask'] is not None:\r\n bert_outs['bert_encoder_padding_mask'] = \\\r\n bert_outs['bert_encoder_padding_mask'].index_select(0, new_order)\r\n return encoder_out, bert_outs\r\n\r\n def max_positions(self):\r\n \"\"\"Maximum input length supported by the encoder.\"\"\"\r\n if self.embed_positions is None:\r\n return self.max_source_positions\r\n return min(self.max_source_positions, self.embed_positions.max_positions())\r\n\r\n def upgrade_state_dict_named(self, state_dict, name):\r\n \"\"\"Upgrade a (possibly old) state dict for new versions of fairseq.\"\"\"\r\n if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):\r\n weights_key = '{}.embed_positions.weights'.format(name)\r\n if weights_key in state_dict:\r\n del state_dict[weights_key]\r\n state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)\r\n for i in range(len(self.layers)):\r\n # update layer norms\r\n self.layers[i].upgrade_state_dict_named(state_dict, \"{}.layers.{}\".format(name, i))\r\n\r\n version_key = '{}.version'.format(name)\r\n if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:\r\n # earlier checkpoints did not normalize after the stack of layers\r\n self.layer_norm = None\r\n self.normalize = False\r\n state_dict[version_key] = torch.Tensor([1])\r\n return state_dict\r\n\r\n\r\n\r\nclass TransformerDecoder(FairseqIncrementalDecoder):\r\n \"\"\"\r\n Transformer decoder consisting of *args.decoder_layers* layers. Each layer\r\n is a :class:`TransformerDecoderLayer`.\r\n\r\n Args:\r\n args (argparse.Namespace): parsed command-line arguments\r\n dictionary (~fairseq.data.Dictionary): decoding dictionary\r\n embed_tokens (torch.nn.Embedding): output embedding\r\n no_encoder_attn (bool, optional): whether to attend to encoder outputs\r\n (default: False).\r\n \"\"\"\r\n\r\n def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):\r\n super().__init__(dictionary)\r\n self.register_buffer('version', torch.Tensor([3]))\r\n\r\n self.dropout = args.dropout\r\n self.share_input_output_embed = args.share_decoder_input_output_embed\r\n\r\n input_embed_dim = embed_tokens.embedding_dim\r\n embed_dim = args.decoder_embed_dim\r\n self.output_embed_dim = args.decoder_output_dim\r\n\r\n padding_idx = embed_tokens.padding_idx\r\n self.max_target_positions = args.max_target_positions\r\n\r\n self.embed_tokens = embed_tokens\r\n self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim\r\n\r\n self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None\r\n\r\n self.embed_positions = PositionalEmbedding(\r\n args.max_target_positions, embed_dim, padding_idx,\r\n learned=args.decoder_learned_pos,\r\n ) if not args.no_token_positional_embeddings else None\r\n\r\n bert_gates = getattr(args, 'bert_gates', [1, 1, 1, 1, 1, 1])\r\n bert_gates = [x == 1 for x in bert_gates]\r\n assert len(bert_gates) == args.decoder_layers\r\n print('bert_gates', bert_gates)\r\n self.layers = nn.ModuleList([])\r\n decoder_no_bert = getattr(args, 'decoder_no_bert', False)\r\n if decoder_no_bert:\r\n self.layers.extend([\r\n TransformerStandardDecoderLayer(args, no_encoder_attn, bert_gate=bert_gates[i])\r\n for i in range(args.decoder_layers)\r\n ])\r\n else:\r\n self.layers.extend([\r\n TransformerDecoderLayer(args, no_encoder_attn, bert_gate=bert_gates[i])\r\n for i in range(args.decoder_layers)\r\n ])\r\n\r\n self.adaptive_softmax = None\r\n\r\n self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \\\r\n if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None\r\n\r\n if args.adaptive_softmax_cutoff is not None:\r\n self.adaptive_softmax = AdaptiveSoftmax(\r\n len(dictionary),\r\n self.output_embed_dim,\r\n options.eval_str_list(args.adaptive_softmax_cutoff, type=int),\r\n dropout=args.adaptive_softmax_dropout,\r\n adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None,\r\n factor=args.adaptive_softmax_factor,\r\n tie_proj=args.tie_adaptive_proj,\r\n )\r\n elif not self.share_input_output_embed:\r\n self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.output_embed_dim))\r\n nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5)\r\n\r\n if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False):\r\n self.layer_norm = LayerNorm(embed_dim)\r\n else:\r\n self.layer_norm = None\r\n \r\n\r\n def forward(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused):\r\n \"\"\"\r\n Args:\r\n prev_output_tokens (LongTensor): previous decoder outputs of shape\r\n `(batch, tgt_len)`, for input feeding/teacher forcing\r\n encoder_out (Tensor, optional): output from the encoder, used for\r\n encoder-side attention\r\n incremental_state (dict): dictionary used for storing state during\r\n :ref:`Incremental decoding`\r\n\r\n Returns:\r\n tuple:\r\n - the decoder's output of shape `(batch, tgt_len, vocab)`\r\n - a dictionary with any model-specific outputs\r\n \"\"\"\r\n x, extra = self.extract_features(prev_output_tokens, encoder_out, bert_encoder_out, incremental_state)\r\n x = self.output_layer(x)\r\n return x, extra\r\n\r\n def extract_features(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused):\r\n \"\"\"\r\n Similar to *forward* but only return features.\r\n\r\n Returns:\r\n tuple:\r\n - the decoder's features of shape `(batch, tgt_len, embed_dim)`\r\n - a dictionary with any model-specific outputs\r\n \"\"\"\r\n # embed positions\r\n positions = self.embed_positions(\r\n prev_output_tokens,\r\n incremental_state=incremental_state,\r\n ) if self.embed_positions is not None else None\r\n\r\n if incremental_state is not None:\r\n prev_output_tokens = prev_output_tokens[:, -1:]\r\n if positions is not None:\r\n positions = positions[:, -1:]\r\n\r\n # embed tokens and positions\r\n x = self.embed_scale * self.embed_tokens(prev_output_tokens)\r\n\r\n if self.project_in_dim is not None:\r\n x = self.project_in_dim(x)\r\n\r\n if positions is not None:\r\n x += positions\r\n x = F.dropout(x, p=self.dropout, training=self.training)\r\n\r\n # B x T x C -> T x B x C\r\n x = x.transpose(0, 1)\r\n attn = None\r\n\r\n inner_states = [x]\r\n\r\n # decoder layers\r\n for layer in self.layers:\r\n x, attn = layer(\r\n x,\r\n encoder_out['encoder_out'] if encoder_out is not None else None,\r\n encoder_out['encoder_padding_mask'] if encoder_out is not None else None,\r\n bert_encoder_out['bert_encoder_out'],\r\n bert_encoder_out['bert_encoder_padding_mask'],\r\n incremental_state,\r\n self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None,\r\n )\r\n inner_states.append(x)\r\n\r\n if self.layer_norm:\r\n x = self.layer_norm(x)\r\n\r\n # T x B x C -> B x T x C\r\n x = x.transpose(0, 1)\r\n\r\n if self.project_out_dim is not None:\r\n x = self.project_out_dim(x)\r\n\r\n return x, {'attn': attn, 'inner_states': inner_states}\r\n\r\n def output_layer(self, features, **kwargs):\r\n \"\"\"Project features to the vocabulary size.\"\"\"\r\n if self.adaptive_softmax is None:\r\n # project back to size of vocabulary\r\n if self.share_input_output_embed:\r\n return F.linear(features, self.embed_tokens.weight)\r\n else:\r\n return F.linear(features, self.embed_out)\r\n else:\r\n return features\r\n\r\n def max_positions(self):\r\n \"\"\"Maximum output length supported by the decoder.\"\"\"\r\n if self.embed_positions is None:\r\n return self.max_target_positions\r\n return min(self.max_target_positions, self.embed_positions.max_positions())\r\n\r\n def buffered_future_mask(self, tensor):\r\n dim = tensor.size(0)\r\n if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device:\r\n self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1)\r\n if self._future_mask.size(0) < dim:\r\n self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1)\r\n return self._future_mask[:dim, :dim]\r\n\r\n def upgrade_state_dict_named(self, state_dict, name):\r\n \"\"\"Upgrade a (possibly old) state dict for new versions of fairseq.\"\"\"\r\n if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):\r\n weights_key = '{}.embed_positions.weights'.format(name)\r\n if weights_key in state_dict:\r\n del state_dict[weights_key]\r\n state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)\r\n\r\n for i in range(len(self.layers)):\r\n # update layer norms\r\n layer_norm_map = {\r\n '0': 'self_attn_layer_norm',\r\n '1': 'encoder_attn_layer_norm',\r\n '2': 'final_layer_norm'\r\n }\r\n for old, new in layer_norm_map.items():\r\n for m in ('weight', 'bias'):\r\n k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m)\r\n if k in state_dict:\r\n state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k]\r\n del state_dict[k]\r\n\r\n version_key = '{}.version'.format(name)\r\n if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:\r\n # earlier checkpoints did not normalize after the stack of layers\r\n self.layer_norm = None\r\n self.normalize = False\r\n state_dict[version_key] = torch.Tensor([1])\r\n\r\n return state_dict\r\n\r\nclass TransformerDecoderStack(FairseqIncrementalDecoder):\r\n \"\"\"\r\n Transformer decoder consisting of *args.decoder_layers* layers. Each layer\r\n is a :class:`TransformerDecoderLayer`.\r\n\r\n Args:\r\n args (argparse.Namespace): parsed command-line arguments\r\n dictionary (~fairseq.data.Dictionary): decoding dictionary\r\n embed_tokens (torch.nn.Embedding): output embedding\r\n no_encoder_attn (bool, optional): whether to attend to encoder outputs\r\n (default: False).\r\n \"\"\"\r\n\r\n def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):\r\n super().__init__(dictionary)\r\n self.register_buffer('version', torch.Tensor([3]))\r\n\r\n self.dropout = args.dropout\r\n self.share_input_output_embed = args.share_decoder_input_output_embed\r\n\r\n input_embed_dim = embed_tokens.embedding_dim\r\n embed_dim = args.decoder_embed_dim\r\n self.output_embed_dim = args.decoder_output_dim\r\n\r\n padding_idx = embed_tokens.padding_idx\r\n self.max_target_positions = args.max_target_positions\r\n\r\n self.embed_tokens = embed_tokens\r\n self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim\r\n\r\n self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None\r\n\r\n self.embed_positions = PositionalEmbedding(\r\n args.max_target_positions, embed_dim, padding_idx,\r\n learned=args.decoder_learned_pos,\r\n ) if not args.no_token_positional_embeddings else None\r\n\r\n self.layers = nn.ModuleList([])\r\n self.layers.extend([\r\n TransformerDecoderLayerStack(args, no_encoder_attn)\r\n for _ in range(args.decoder_layers)\r\n ])\r\n\r\n self.adaptive_softmax = None\r\n\r\n self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \\\r\n if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None\r\n\r\n if args.adaptive_softmax_cutoff is not None:\r\n self.adaptive_softmax = AdaptiveSoftmax(\r\n len(dictionary),\r\n self.output_embed_dim,\r\n options.eval_str_list(args.adaptive_softmax_cutoff, type=int),\r\n dropout=args.adaptive_softmax_dropout,\r\n adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None,\r\n factor=args.adaptive_softmax_factor,\r\n tie_proj=args.tie_adaptive_proj,\r\n )\r\n elif not self.share_input_output_embed:\r\n self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.output_embed_dim))\r\n nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5)\r\n\r\n if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False):\r\n self.layer_norm = LayerNorm(embed_dim)\r\n else:\r\n self.layer_norm = None\r\n\r\n\r\n def forward(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused):\r\n \"\"\"\r\n Args:\r\n prev_output_tokens (LongTensor): previous decoder outputs of shape\r\n `(batch, tgt_len)`, for input feeding/teacher forcing\r\n encoder_out (Tensor, optional): output from the encoder, used for\r\n encoder-side attention\r\n incremental_state (dict): dictionary used for storing state during\r\n :ref:`Incremental decoding`\r\n\r\n Returns:\r\n tuple:\r\n - the decoder's output of shape `(batch, tgt_len, vocab)`\r\n - a dictionary with any model-specific outputs\r\n \"\"\"\r\n x, extra = self.extract_features(prev_output_tokens, encoder_out, bert_encoder_out, incremental_state)\r\n x = self.output_layer(x)\r\n return x, extra\r\n\r\n def extract_features(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused):\r\n \"\"\"\r\n Similar to *forward* but only return features.\r\n\r\n Returns:\r\n tuple:\r\n - the decoder's features of shape `(batch, tgt_len, embed_dim)`\r\n - a dictionary with any model-specific outputs\r\n \"\"\"\r\n # embed positions\r\n positions = self.embed_positions(\r\n prev_output_tokens,\r\n incremental_state=incremental_state,\r\n ) if self.embed_positions is not None else None\r\n\r\n if incremental_state is not None:\r\n prev_output_tokens = prev_output_tokens[:, -1:]\r\n if positions is not None:\r\n positions = positions[:, -1:]\r\n\r\n # embed tokens and positions\r\n x = self.embed_scale * self.embed_tokens(prev_output_tokens)\r\n\r\n if self.project_in_dim is not None:\r\n x = self.project_in_dim(x)\r\n\r\n if positions is not None:\r\n x += positions\r\n x = F.dropout(x, p=self.dropout, training=self.training)\r\n\r\n # B x T x C -> T x B x C\r\n x = x.transpose(0, 1)\r\n attn = None\r\n\r\n inner_states = [x]\r\n\r\n # decoder layers\r\n for layer in self.layers:\r\n x, attn = layer(\r\n x,\r\n encoder_out['encoder_out'] if encoder_out is not None else None,\r\n encoder_out['encoder_padding_mask'] if encoder_out is not None else None,\r\n bert_encoder_out['bert_encoder_out'],\r\n bert_encoder_out['bert_encoder_padding_mask'],\r\n incremental_state,\r\n self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None,\r\n )\r\n inner_states.append(x)\r\n\r\n if self.layer_norm:\r\n x = self.layer_norm(x)\r\n\r\n # T x B x C -> B x T x C\r\n x = x.transpose(0, 1)\r\n\r\n if self.project_out_dim is not None:\r\n x = self.project_out_dim(x)\r\n\r\n return x, {'attn': attn, 'inner_states': inner_states}\r\n\r\n def output_layer(self, features, **kwargs):\r\n \"\"\"Project features to the vocabulary size.\"\"\"\r\n if self.adaptive_softmax is None:\r\n # project back to size of vocabulary\r\n if self.share_input_output_embed:\r\n return F.linear(features, self.embed_tokens.weight)\r\n else:\r\n return F.linear(features, self.embed_out)\r\n else:\r\n return features\r\n\r\n def max_positions(self):\r\n \"\"\"Maximum output length supported by the decoder.\"\"\"\r\n if self.embed_positions is None:\r\n return self.max_target_positions\r\n return min(self.max_target_positions, self.embed_positions.max_positions())\r\n\r\n def buffered_future_mask(self, tensor):\r\n dim = tensor.size(0)\r\n if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device:\r\n self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1)\r\n if self._future_mask.size(0) < dim:\r\n self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1)\r\n return self._future_mask[:dim, :dim]\r\n\r\n def upgrade_state_dict_named(self, state_dict, name):\r\n \"\"\"Upgrade a (possibly old) state dict for new versions of fairseq.\"\"\"\r\n if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):\r\n weights_key = '{}.embed_positions.weights'.format(name)\r\n if weights_key in state_dict:\r\n del state_dict[weights_key]\r\n state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)\r\n\r\n for i in range(len(self.layers)):\r\n # update layer norms\r\n layer_norm_map = {\r\n '0': 'self_attn_layer_norm',\r\n '1': 'encoder_attn_layer_norm',\r\n '2': 'final_layer_norm'\r\n }\r\n for old, new in layer_norm_map.items():\r\n for m in ('weight', 'bias'):\r\n k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m)\r\n if k in state_dict:\r\n state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k]\r\n del state_dict[k]\r\n\r\n version_key = '{}.version'.format(name)\r\n if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:\r\n # earlier checkpoints did not normalize after the stack of layers\r\n self.layer_norm = None\r\n self.normalize = False\r\n state_dict[version_key] = torch.Tensor([1])\r\n\r\n return state_dict\r\n\r\nclass TransformerEncoderLayer(nn.Module):\r\n \"\"\"Encoder layer block.\r\n\r\n In the original paper each operation (multi-head attention or FFN) is\r\n postprocessed with: `dropout -> add residual -> layernorm`. In the\r\n tensor2tensor code they suggest that learning is more robust when\r\n preprocessing each layer with layernorm and postprocessing with:\r\n `dropout -> add residual`. We default to the approach in the paper, but the\r\n tensor2tensor approach can be enabled by setting\r\n *args.encoder_normalize_before* to ``True``.\r\n\r\n Args:\r\n args (argparse.Namespace): parsed command-line arguments\r\n \"\"\"\r\n\r\n def __init__(self, args):\r\n super().__init__()\r\n self.embed_dim = args.encoder_embed_dim\r\n self.self_attn = MultiheadAttention(\r\n self.embed_dim, args.encoder_attention_heads,\r\n dropout=args.attention_dropout, self_attention=True\r\n )\r\n self.self_attn_layer_norm = LayerNorm(self.embed_dim)\r\n self.dropout = args.dropout\r\n self.activation_fn = utils.get_activation_fn(\r\n activation=getattr(args, 'activation_fn', 'relu')\r\n )\r\n self.activation_dropout = getattr(args, 'activation_dropout', 0)\r\n if self.activation_dropout == 0:\r\n # for backwards compatibility with models that use args.relu_dropout\r\n self.activation_dropout = getattr(args, 'relu_dropout', 0)\r\n self.normalize_before = args.encoder_normalize_before\r\n self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim)\r\n self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim)\r\n self.final_layer_norm = LayerNorm(self.embed_dim)\r\n\r\n def upgrade_state_dict_named(self, state_dict, name):\r\n \"\"\"\r\n Rename layer norm states from `...layer_norms.0.weight` to\r\n `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to\r\n `...final_layer_norm.weight`\r\n \"\"\"\r\n layer_norm_map = {\r\n '0': 'self_attn_layer_norm',\r\n '1': 'final_layer_norm'\r\n }\r\n for old, new in layer_norm_map.items():\r\n for m in ('weight', 'bias'):\r\n k = '{}.layer_norms.{}.{}'.format(name, old, m)\r\n if k in state_dict:\r\n state_dict[\r\n '{}.{}.{}'.format(name, new, m)\r\n ] = state_dict[k]\r\n del state_dict[k]\r\n\r\n def forward(self, x, encoder_padding_mask):\r\n \"\"\"\r\n Args:\r\n x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`\r\n encoder_padding_mask (ByteTensor): binary ByteTensor of shape\r\n `(batch, src_len)` where padding elements are indicated by ``1``.\r\n\r\n Returns:\r\n encoded output of shape `(batch, src_len, embed_dim)`\r\n \"\"\"\r\n residual = x\r\n x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)\r\n x, attn_weight = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask)\r\n x = F.dropout(x, p=self.dropout, training=self.training)\r\n x = residual + x\r\n x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)\r\n \r\n self.attn_weight = attn_weight\r\n\r\n residual = x\r\n x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)\r\n x = self.activation_fn(self.fc1(x))\r\n x = F.dropout(x, p=self.activation_dropout, training=self.training)\r\n x = self.fc2(x)\r\n x = F.dropout(x, p=self.dropout, training=self.training)\r\n x = residual + x\r\n x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)\r\n return x\r\n\r\n def maybe_layer_norm(self, layer_norm, x, before=False, after=False):\r\n assert before ^ after\r\n if after ^ self.normalize_before:\r\n return layer_norm(x)\r\n else:\r\n return x\r\n\r\nclass TransformerS2EncoderLayer(nn.Module):\r\n \"\"\"Encoder layer block.\r\n\r\n In the original paper each operation (multi-head attention or FFN) is\r\n postprocessed with: `dropout -> add residual -> layernorm`. In the\r\n tensor2tensor code they suggest that learning is more robust when\r\n preprocessing each layer with layernorm and postprocessing with:\r\n `dropout -> add residual`. We default to the approach in the paper, but the\r\n tensor2tensor approach can be enabled by setting\r\n *args.encoder_normalize_before* to ``True``.\r\n\r\n Args:\r\n args (argparse.Namespace): parsed command-line arguments\r\n \"\"\"\r\n\r\n def __init__(self, args, bert_gate=True):\r\n super().__init__()\r\n self.embed_dim = args.encoder_embed_dim\r\n self.self_attn = MultiheadAttention(\r\n self.embed_dim, args.encoder_attention_heads,\r\n dropout=args.attention_dropout, self_attention=True\r\n )\r\n bert_out_dim = args.bert_out_dim\r\n self.bert_attn = MultiheadAttention(\r\n self.embed_dim, args.encoder_attention_heads,\r\n kdim=bert_out_dim, vdim=bert_out_dim,\r\n dropout=args.attention_dropout,\r\n )\r\n self.self_attn_layer_norm = LayerNorm(self.embed_dim)\r\n self.dropout = args.dropout\r\n self.activation_fn = utils.get_activation_fn(\r\n activation=getattr(args, 'activation_fn', 'relu')\r\n )\r\n self.activation_dropout = getattr(args, 'activation_dropout', 0)\r\n if self.activation_dropout == 0:\r\n # for backwards compatibility with models that use args.relu_dropout\r\n self.activation_dropout = getattr(args, 'relu_dropout', 0)\r\n self.normalize_before = args.encoder_normalize_before\r\n self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim)\r\n self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim)\r\n self.final_layer_norm = LayerNorm(self.embed_dim)\r\n self.encoder_ratio = args.encoder_ratio\r\n self.bert_ratio = args.bert_ratio\r\n\r\n self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False)\r\n self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25)\r\n assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5\r\n self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False)\r\n\r\n if not bert_gate:\r\n self.bert_ratio = 0.\r\n self.encoder_bert_dropout = False\r\n self.encoder_bert_mixup = False\r\n\r\n def upgrade_state_dict_named(self, state_dict, name):\r\n \"\"\"\r\n Rename layer norm states from `...layer_norms.0.weight` to\r\n `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to\r\n `...final_layer_norm.weight`\r\n \"\"\"\r\n layer_norm_map = {\r\n '0': 'self_attn_layer_norm',\r\n '1': 'final_layer_norm'\r\n }\r\n for old, new in layer_norm_map.items():\r\n for m in ('weight', 'bias'):\r\n k = '{}.layer_norms.{}.{}'.format(name, old, m)\r\n if k in state_dict:\r\n state_dict[\r\n '{}.{}.{}'.format(name, new, m)\r\n ] = state_dict[k]\r\n del state_dict[k]\r\n\r\n def forward(self, x, encoder_padding_mask, bert_encoder_out, bert_encoder_padding_mask):\r\n \"\"\"\r\n Args:\r\n x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`\r\n encoder_padding_mask (ByteTensor): binary ByteTensor of shape\r\n `(batch, src_len)` where padding elements are indicated by ``1``.\r\n\r\n Returns:\r\n encoded output of shape `(batch, src_len, embed_dim)`\r\n \"\"\"\r\n residual = x\r\n x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)\r\n x1, _ = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask)\r\n x2, _ = self.bert_attn(query=x, key=bert_encoder_out, value=bert_encoder_out, key_padding_mask=bert_encoder_padding_mask)\r\n x1 = F.dropout(x1, p=self.dropout, training=self.training)\r\n x2 = F.dropout(x2, p=self.dropout, training=self.training)\r\n ratios = self.get_ratio()\r\n x = residual + ratios[0] * x1 + ratios[1] * x2\r\n x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)\r\n\r\n residual = x\r\n x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)\r\n x = self.activation_fn(self.fc1(x))\r\n x = F.dropout(x, p=self.activation_dropout, training=self.training)\r\n x = self.fc2(x)\r\n x = F.dropout(x, p=self.dropout, training=self.training)\r\n x = residual + x\r\n x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)\r\n return x\r\n\r\n def get_ratio(self):\r\n if self.encoder_bert_dropout:\r\n frand = float(uniform(0, 1))\r\n if self.encoder_bert_mixup and self.training:\r\n return [frand, 1 - frand]\r\n if frand < self.encoder_bert_dropout_ratio and self.training:\r\n return [1, 0]\r\n elif frand > 1 - self.encoder_bert_dropout_ratio and self.training:\r\n return [0, 1]\r\n else:\r\n return [0.5, 0.5]\r\n else:\r\n return [self.encoder_ratio, self.bert_ratio]\r\n\r\n def maybe_layer_norm(self, layer_norm, x, before=False, after=False):\r\n assert before ^ after\r\n if after ^ self.normalize_before:\r\n return layer_norm(x)\r\n else:\r\n return x\r\n\r\nclass TransformerDecoderLayer(nn.Module):\r\n \"\"\"Decoder layer block.\r\n\r\n In the original paper each operation (multi-head attention, encoder\r\n attention or FFN) is postprocessed with: `dropout -> add residual ->\r\n layernorm`. In the tensor2tensor code they suggest that learning is more\r\n robust when preprocessing each layer with layernorm and postprocessing with:\r\n `dropout -> add residual`. We default to the approach in the paper, but the\r\n tensor2tensor approach can be enabled by setting\r\n *args.decoder_normalize_before* to ``True``.\r\n\r\n Args:\r\n args (argparse.Namespace): parsed command-line arguments\r\n no_encoder_attn (bool, optional): whether to attend to encoder outputs\r\n (default: False).\r\n \"\"\"\r\n\r\n def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, bert_gate=True):\r\n super().__init__()\r\n self.embed_dim = args.decoder_embed_dim\r\n self.self_attn = MultiheadAttention(\r\n embed_dim=self.embed_dim,\r\n num_heads=args.decoder_attention_heads,\r\n dropout=args.attention_dropout,\r\n add_bias_kv=add_bias_kv,\r\n add_zero_attn=add_zero_attn,\r\n self_attention=True\r\n )\r\n self.dropout = args.dropout\r\n self.activation_fn = utils.get_activation_fn(\r\n activation=getattr(args, 'activation_fn', 'relu')\r\n )\r\n self.activation_dropout = getattr(args, 'activation_dropout', 0)\r\n if self.activation_dropout == 0:\r\n # for backwards compatibility with models that use args.relu_dropout\r\n self.activation_dropout = getattr(args, 'relu_dropout', 0)\r\n self.normalize_before = args.decoder_normalize_before\r\n\r\n # use layerNorm rather than FusedLayerNorm for exporting.\r\n # char_inputs can be used to determint this.\r\n # TODO remove this once we update apex with the fix\r\n export = getattr(args, 'char_inputs', False)\r\n self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)\r\n\r\n if no_encoder_attn:\r\n self.encoder_attn = None\r\n self.encoder_attn_layer_norm = None\r\n else:\r\n self.encoder_attn = MultiheadAttention(\r\n self.embed_dim, args.decoder_attention_heads,\r\n dropout=args.attention_dropout, encoder_decoder_attention=True\r\n )\r\n bert_out_dim = args.bert_out_dim\r\n self.bert_attn = MultiheadAttention(\r\n self.embed_dim, args.decoder_attention_heads,\r\n kdim=bert_out_dim, vdim=bert_out_dim,\r\n dropout=args.attention_dropout, encoder_decoder_attention=True\r\n )\r\n self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export)\r\n\r\n self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim)\r\n self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim)\r\n\r\n self.final_layer_norm = LayerNorm(self.embed_dim, export=export)\r\n self.need_attn = True\r\n\r\n self.onnx_trace = False\r\n self.encoder_ratio = args.encoder_ratio\r\n self.bert_ratio = args.bert_ratio\r\n\r\n self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False)\r\n self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25)\r\n assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5\r\n self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False)\r\n\r\n if not bert_gate:\r\n self.bert_ratio = 0.\r\n self.encoder_bert_dropout = False\r\n self.encoder_bert_mixup = False\r\n\r\n def prepare_for_onnx_export_(self):\r\n self.onnx_trace = True\r\n\r\n def forward(\r\n self,\r\n x,\r\n encoder_out=None,\r\n encoder_padding_mask=None,\r\n bert_encoder_out=None,\r\n bert_encoder_padding_mask=None,\r\n incremental_state=None,\r\n prev_self_attn_state=None,\r\n prev_attn_state=None,\r\n self_attn_mask=None,\r\n self_attn_padding_mask=None,\r\n ):\r\n \"\"\"\r\n Args:\r\n x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`\r\n encoder_padding_mask (ByteTensor): binary ByteTensor of shape\r\n `(batch, src_len)` where padding elements are indicated by ``1``.\r\n\r\n Returns:\r\n encoded output of shape `(batch, src_len, embed_dim)`\r\n \"\"\"\r\n residual = x\r\n x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)\r\n if prev_self_attn_state is not None:\r\n if incremental_state is None:\r\n incremental_state = {}\r\n prev_key, prev_value = prev_self_attn_state\r\n saved_state = {\"prev_key\": prev_key, \"prev_value\": prev_value}\r\n self.self_attn._set_input_buffer(incremental_state, saved_state)\r\n x, attn = self.self_attn(\r\n query=x,\r\n key=x,\r\n value=x,\r\n key_padding_mask=self_attn_padding_mask,\r\n incremental_state=incremental_state,\r\n need_weights=False,\r\n attn_mask=self_attn_mask,\r\n )\r\n x = F.dropout(x, p=self.dropout, training=self.training)\r\n x = residual + x\r\n x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)\r\n\r\n if self.encoder_attn is not None:\r\n residual = x\r\n x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True)\r\n if prev_attn_state is not None:\r\n if incremental_state is None:\r\n incremental_state = {}\r\n prev_key, prev_value = prev_attn_state\r\n saved_state = {\"prev_key\": prev_key, \"prev_value\": prev_value}\r\n self.encoder_attn._set_input_buffer(incremental_state, saved_state)\r\n x1, attn = self.encoder_attn(\r\n query=x,\r\n key=encoder_out,\r\n value=encoder_out,\r\n key_padding_mask=encoder_padding_mask,\r\n incremental_state=incremental_state,\r\n static_kv=True,\r\n need_weights=(not self.training and self.need_attn),\r\n )\r\n x2, _ = self.bert_attn(\r\n query=x,\r\n key=bert_encoder_out,\r\n value=bert_encoder_out,\r\n key_padding_mask=bert_encoder_padding_mask,\r\n incremental_state=incremental_state,\r\n static_kv=True,\r\n need_weights=(not self.training and self.need_attn),\r\n )\r\n x1 = F.dropout(x1, p=self.dropout, training=self.training)\r\n x2 = F.dropout(x2, p=self.dropout, training=self.training)\r\n ratios = self.get_ratio()\r\n x = residual + ratios[0] * x1 + ratios[1] * x2\r\n x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True)\r\n\r\n residual = x\r\n x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)\r\n x = self.activation_fn(self.fc1(x))\r\n x = F.dropout(x, p=self.activation_dropout, training=self.training)\r\n x = self.fc2(x)\r\n x = F.dropout(x, p=self.dropout, training=self.training)\r\n x = residual + x\r\n x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)\r\n if self.onnx_trace and incremental_state is not None:\r\n saved_state = self.self_attn._get_input_buffer(incremental_state)\r\n self_attn_state = saved_state[\"prev_key\"], saved_state[\"prev_value\"]\r\n return x, attn, self_attn_state\r\n return x, attn\r\n\r\n def get_ratio(self):\r\n if self.encoder_bert_dropout:\r\n frand = float(uniform(0, 1))\r\n if self.encoder_bert_mixup and self.training:\r\n return [frand, 1 - frand]\r\n if frand < self.encoder_bert_dropout_ratio and self.training:\r\n return [1, 0]\r\n elif frand > 1 - self.encoder_bert_dropout_ratio and self.training:\r\n return [0, 1]\r\n else:\r\n return [0.5, 0.5]\r\n else:\r\n return [self.encoder_ratio, self.bert_ratio]\r\n\r\n def maybe_layer_norm(self, layer_norm, x, before=False, after=False):\r\n assert before ^ after\r\n if after ^ self.normalize_before:\r\n return layer_norm(x)\r\n else:\r\n return x\r\n\r\n def make_generation_fast_(self, need_attn=False, **kwargs):\r\n self.need_attn = need_attn\r\n\r\nclass TransformerStandardDecoderLayer(nn.Module):\r\n \"\"\"Decoder layer block.\r\n\r\n In the original paper each operation (multi-head attention, encoder\r\n attention or FFN) is postprocessed with: `dropout -> add residual ->\r\n layernorm`. In the tensor2tensor code they suggest that learning is more\r\n robust when preprocessing each layer with layernorm and postprocessing with:\r\n `dropout -> add residual`. We default to the approach in the paper, but the\r\n tensor2tensor approach can be enabled by setting\r\n *args.decoder_normalize_before* to ``True``.\r\n\r\n Args:\r\n args (argparse.Namespace): parsed command-line arguments\r\n no_encoder_attn (bool, optional): whether to attend to encoder outputs\r\n (default: False).\r\n \"\"\"\r\n\r\n def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, bert_gate=True):\r\n super().__init__()\r\n self.embed_dim = args.decoder_embed_dim\r\n self.self_attn = MultiheadAttention(\r\n embed_dim=self.embed_dim,\r\n num_heads=args.decoder_attention_heads,\r\n dropout=args.attention_dropout,\r\n add_bias_kv=add_bias_kv,\r\n add_zero_attn=add_zero_attn,\r\n self_attention=True\r\n )\r\n self.dropout = args.dropout\r\n self.activation_fn = utils.get_activation_fn(\r\n activation=getattr(args, 'activation_fn', 'relu')\r\n )\r\n self.activation_dropout = getattr(args, 'activation_dropout', 0)\r\n if self.activation_dropout == 0:\r\n # for backwards compatibility with models that use args.relu_dropout\r\n self.activation_dropout = getattr(args, 'relu_dropout', 0)\r\n self.normalize_before = args.decoder_normalize_before\r\n\r\n # use layerNorm rather than FusedLayerNorm for exporting.\r\n # char_inputs can be used to determint this.\r\n # TODO remove this once we update apex with the fix\r\n export = getattr(args, 'char_inputs', False)\r\n self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)\r\n\r\n if no_encoder_attn:\r\n self.encoder_attn = None\r\n self.encoder_attn_layer_norm = None\r\n else:\r\n self.encoder_attn = MultiheadAttention(\r\n self.embed_dim, args.decoder_attention_heads,\r\n dropout=args.attention_dropout, encoder_decoder_attention=True\r\n )\r\n # bert_out_dim = args.bert_out_dim\r\n # self.bert_attn = MultiheadAttention(\r\n # self.embed_dim, args.decoder_attention_heads,\r\n # kdim=bert_out_dim, vdim=bert_out_dim,\r\n # dropout=args.attention_dropout, encoder_decoder_attention=True\r\n # )\r\n self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export)\r\n\r\n self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim)\r\n self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim)\r\n\r\n self.final_layer_norm = LayerNorm(self.embed_dim, export=export)\r\n self.need_attn = True\r\n\r\n self.onnx_trace = False\r\n self.encoder_ratio = args.encoder_ratio\r\n self.bert_ratio = args.bert_ratio\r\n if not bert_gate:\r\n self.bert_ratio = 0.\r\n self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False)\r\n self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25)\r\n assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5\r\n self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False)\r\n\r\n def prepare_for_onnx_export_(self):\r\n self.onnx_trace = True\r\n\r\n def forward(\r\n self,\r\n x,\r\n encoder_out=None,\r\n encoder_padding_mask=None,\r\n bert_encoder_out=None,\r\n bert_encoder_padding_mask=None,\r\n incremental_state=None,\r\n prev_self_attn_state=None,\r\n prev_attn_state=None,\r\n self_attn_mask=None,\r\n self_attn_padding_mask=None,\r\n ):\r\n \"\"\"\r\n Args:\r\n x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`\r\n encoder_padding_mask (ByteTensor): binary ByteTensor of shape\r\n `(batch, src_len)` where padding elements are indicated by ``1``.\r\n\r\n Returns:\r\n encoded output of shape `(batch, src_len, embed_dim)`\r\n \"\"\"\r\n residual = x\r\n x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)\r\n if prev_self_attn_state is not None:\r\n if incremental_state is None:\r\n incremental_state = {}\r\n prev_key, prev_value = prev_self_attn_state\r\n saved_state = {\"prev_key\": prev_key, \"prev_value\": prev_value}\r\n self.self_attn._set_input_buffer(incremental_state, saved_state)\r\n x, attn = self.self_attn(\r\n query=x,\r\n key=x,\r\n value=x,\r\n key_padding_mask=self_attn_padding_mask,\r\n incremental_state=incremental_state,\r\n need_weights=False,\r\n attn_mask=self_attn_mask,\r\n )\r\n x = F.dropout(x, p=self.dropout, training=self.training)\r\n x = residual + x\r\n x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)\r\n\r\n if self.encoder_attn is not None:\r\n residual = x\r\n x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True)\r\n if prev_attn_state is not None:\r\n if incremental_state is None:\r\n incremental_state = {}\r\n prev_key, prev_value = prev_attn_state\r\n saved_state = {\"prev_key\": prev_key, \"prev_value\": prev_value}\r\n self.encoder_attn._set_input_buffer(incremental_state, saved_state)\r\n x1, attn = self.encoder_attn(\r\n query=x,\r\n key=encoder_out,\r\n value=encoder_out,\r\n key_padding_mask=encoder_padding_mask,\r\n incremental_state=incremental_state,\r\n static_kv=True,\r\n need_weights=(not self.training and self.need_attn),\r\n )\r\n # x2, _ = self.bert_attn(\r\n # query=x,\r\n # key=bert_encoder_out,\r\n # value=bert_encoder_out,\r\n # key_padding_mask=bert_encoder_padding_mask,\r\n # incremental_state=incremental_state,\r\n # static_kv=True,\r\n # need_weights=(not self.training and self.need_attn),\r\n # )\r\n x1 = F.dropout(x1, p=self.dropout, training=self.training)\r\n # x2 = F.dropout(x2, p=self.dropout, training=self.training)\r\n # ratios = self.get_ratio()\r\n x = residual + x1\r\n x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True)\r\n\r\n residual = x\r\n x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)\r\n x = self.activation_fn(self.fc1(x))\r\n x = F.dropout(x, p=self.activation_dropout, training=self.training)\r\n x = self.fc2(x)\r\n x = F.dropout(x, p=self.dropout, training=self.training)\r\n x = residual + x\r\n x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)\r\n if self.onnx_trace and incremental_state is not None:\r\n saved_state = self.self_attn._get_input_buffer(incremental_state)\r\n self_attn_state = saved_state[\"prev_key\"], saved_state[\"prev_value\"]\r\n return x, attn, self_attn_state\r\n return x, attn\r\n\r\n def get_ratio(self):\r\n if self.encoder_bert_dropout:\r\n frand = float(uniform(0, 1))\r\n if self.encoder_bert_mixup and self.training:\r\n return [frand, 1 - frand]\r\n if frand < self.encoder_bert_dropout_ratio and self.training:\r\n return [1, 0]\r\n elif frand > 1 - self.encoder_bert_dropout_ratio and self.training:\r\n return [0, 1]\r\n else:\r\n return [0.5, 0.5]\r\n else:\r\n return [self.encoder_ratio, self.bert_ratio]\r\n\r\n def maybe_layer_norm(self, layer_norm, x, before=False, after=False):\r\n assert before ^ after\r\n if after ^ self.normalize_before:\r\n return layer_norm(x)\r\n else:\r\n return x\r\n\r\n def make_generation_fast_(self, need_attn=False, **kwargs):\r\n self.need_attn = need_attn\r\nclass TransformerDecoderLayerStack(nn.Module):\r\n def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False):\r\n super().__init__()\r\n self.embed_dim = args.decoder_embed_dim\r\n self.self_attn = MultiheadAttention(\r\n embed_dim=self.embed_dim,\r\n num_heads=args.decoder_attention_heads,\r\n dropout=args.attention_dropout,\r\n add_bias_kv=add_bias_kv,\r\n add_zero_attn=add_zero_attn,\r\n )\r\n self.dropout = args.dropout\r\n self.activation_fn = utils.get_activation_fn(\r\n activation=getattr(args, 'activation_fn', 'relu')\r\n )\r\n self.activation_dropout = getattr(args, 'activation_dropout', 0)\r\n if self.activation_dropout == 0:\r\n # for backwards compatibility with models that use args.relu_dropout\r\n self.activation_dropout = getattr(args, 'relu_dropout', 0)\r\n self.normalize_before = args.decoder_normalize_before\r\n\r\n # use layerNorm rather than FusedLayerNorm for exporting.\r\n # char_inputs can be used to determint this.\r\n # TODO remove this once we update apex with the fix\r\n export = getattr(args, 'char_inputs', False)\r\n self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)\r\n\r\n if no_encoder_attn:\r\n self.encoder_attn = None\r\n self.encoder_attn_layer_norm = None\r\n else:\r\n self.encoder_attn = MultiheadAttention(\r\n self.embed_dim, args.decoder_attention_heads,\r\n dropout=args.attention_dropout, encoder_decoder_attention=True\r\n )\r\n self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export)\r\n bert_out_dim = args.bert_out_dim\r\n self.bert_attn = MultiheadAttention(\r\n self.embed_dim, args.decoder_attention_heads,\r\n kdim=bert_out_dim, vdim=bert_out_dim,\r\n dropout=args.attention_dropout, encoder_decoder_attention=True\r\n )\r\n self.bert_attn_layer_norm = LayerNorm(self.embed_dim, export=export)\r\n self.bert_first = args.bert_first\r\n self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim)\r\n self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim)\r\n\r\n self.final_layer_norm = LayerNorm(self.embed_dim, export=export)\r\n self.need_attn = True\r\n\r\n self.onnx_trace = False\r\n\r\n def prepare_for_onnx_export_(self):\r\n self.onnx_trace = True\r\n\r\n def forward(\r\n self,\r\n x,\r\n encoder_out=None,\r\n encoder_padding_mask=None,\r\n bert_encoder_out=None,\r\n bert_encoder_padding_mask=None,\r\n incremental_state=None,\r\n prev_self_attn_state=None,\r\n prev_attn_state=None,\r\n self_attn_mask=None,\r\n self_attn_padding_mask=None,\r\n ):\r\n \"\"\"\r\n Args:\r\n x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`\r\n encoder_padding_mask (ByteTensor): binary ByteTensor of shape\r\n `(batch, src_len)` where padding elements are indicated by ``1``.\r\n Returns:\r\n encoded output of shape `(batch, src_len, embed_dim)`\r\n \"\"\"\r\n residual = x\r\n x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)\r\n if prev_self_attn_state is not None:\r\n if incremental_state is None:\r\n incremental_state = {}\r\n prev_key, prev_value = prev_self_attn_state\r\n saved_state = {\"prev_key\": prev_key, \"prev_value\": prev_value}\r\n self.self_attn._set_input_buffer(incremental_state, saved_state)\r\n x, attn = self.self_attn(\r\n query=x,\r\n key=x,\r\n value=x,\r\n key_padding_mask=self_attn_padding_mask,\r\n incremental_state=incremental_state,\r\n need_weights=False,\r\n attn_mask=self_attn_mask,\r\n )\r\n x = F.dropout(x, p=self.dropout, training=self.training)\r\n x = residual + x\r\n x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)\r\n\r\n if self.encoder_attn is not None:\r\n\r\n if prev_attn_state is not None:\r\n if incremental_state is None:\r\n incremental_state = {}\r\n prev_key, prev_value = prev_attn_state\r\n saved_state = {\"prev_key\": prev_key, \"prev_value\": prev_value}\r\n self.encoder_attn._set_input_buffer(incremental_state, saved_state)\r\n def sinattn(attnlayer, x, layer_norm, keyorvalue, key_padding, incremental_state):\r\n residual = x\r\n x = self.maybe_layer_norm(layer_norm, x, before=True)\r\n x, attn = attnlayer(\r\n query=x,\r\n key=keyorvalue,\r\n value=keyorvalue,\r\n key_padding_mask=key_padding,\r\n incremental_state=incremental_state,\r\n static_kv=True,\r\n need_weights=(not self.training and self.need_attn),\r\n )\r\n x = F.dropout(x, p=self.dropout, training=self.training)\r\n x = residual + x\r\n x = self.maybe_layer_norm(layer_norm, x, after=True)\r\n return x, attn\r\n if self.bert_first:\r\n x, attn = sinattn(self.bert_attn, x, self.bert_attn_layer_norm, bert_encoder_out,\r\n bert_encoder_padding_mask, incremental_state)\r\n x, attn = sinattn(self.encoder_attn, x, self.encoder_attn_layer_norm, encoder_out, encoder_padding_mask,\r\n incremental_state)\r\n else:\r\n x, attn = sinattn(self.encoder_attn, x, self.encoder_attn_layer_norm, encoder_out, encoder_padding_mask,\r\n incremental_state)\r\n x, attn = sinattn(self.bert_attn, x, self.bert_attn_layer_norm, bert_encoder_out,\r\n bert_encoder_padding_mask, incremental_state)\r\n\r\n\r\n residual = x\r\n x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)\r\n x = self.activation_fn(self.fc1(x))\r\n x = F.dropout(x, p=self.activation_dropout, training=self.training)\r\n x = self.fc2(x)\r\n x = F.dropout(x, p=self.dropout, training=self.training)\r\n x = residual + x\r\n x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)\r\n if self.onnx_trace and incremental_state is not None:\r\n saved_state = self.self_attn._get_input_buffer(incremental_state)\r\n self_attn_state = saved_state[\"prev_key\"], saved_state[\"prev_value\"]\r\n return x, attn, self_attn_state\r\n return x, attn\r\n\r\n def maybe_layer_norm(self, layer_norm, x, before=False, after=False):\r\n assert before ^ after\r\n if after ^ self.normalize_before:\r\n return layer_norm(x)\r\n else:\r\n return x\r\n\r\n def make_generation_fast_(self, need_attn=False, **kwargs):\r\n self.need_attn = need_attn\r\n\r\ndef Embedding(num_embeddings, embedding_dim, padding_idx):\r\n m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)\r\n nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)\r\n nn.init.constant_(m.weight[padding_idx], 0)\r\n return m\r\n\r\n\r\ndef Linear(in_features, out_features, bias=True):\r\n m = nn.Linear(in_features, out_features, bias)\r\n nn.init.xavier_uniform_(m.weight)\r\n if bias:\r\n nn.init.constant_(m.bias, 0.)\r\n return m\r\n\r\n\r\n@register_model_architecture('transformer', 'transformer')\r\ndef base_architecture(args):\r\n args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)\r\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)\r\n args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048)\r\n args.encoder_layers = getattr(args, 'encoder_layers', 6)\r\n args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)\r\n args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)\r\n args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False)\r\n args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)\r\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim)\r\n args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim)\r\n args.decoder_layers = getattr(args, 'decoder_layers', 6)\r\n args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)\r\n args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False)\r\n args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)\r\n args.attention_dropout = getattr(args, 'attention_dropout', 0.)\r\n args.activation_dropout = getattr(args, 'activation_dropout', 0.)\r\n args.activation_fn = getattr(args, 'activation_fn', 'relu')\r\n args.dropout = getattr(args, 'dropout', 0.1)\r\n args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)\r\n args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)\r\n args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)\r\n args.share_all_embeddings = getattr(args, 'share_all_embeddings', False)\r\n args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)\r\n args.adaptive_input = getattr(args, 'adaptive_input', False)\r\n\r\n args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)\r\n args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)\r\n\r\n\r\n@register_model_architecture('transformers2', 'transformers2')\r\ndef base_architecture_s2(args):\r\n args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)\r\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)\r\n args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048)\r\n args.encoder_layers = getattr(args, 'encoder_layers', 6)\r\n args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)\r\n args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)\r\n args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False)\r\n args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)\r\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim)\r\n args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim)\r\n args.decoder_layers = getattr(args, 'decoder_layers', 6)\r\n args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)\r\n args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False)\r\n args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)\r\n args.attention_dropout = getattr(args, 'attention_dropout', 0.)\r\n args.activation_dropout = getattr(args, 'activation_dropout', 0.)\r\n args.activation_fn = getattr(args, 'activation_fn', 'relu')\r\n args.dropout = getattr(args, 'dropout', 0.1)\r\n args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)\r\n args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)\r\n args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)\r\n args.share_all_embeddings = getattr(args, 'share_all_embeddings', False)\r\n args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)\r\n args.adaptive_input = getattr(args, 'adaptive_input', False)\r\n\r\n args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)\r\n args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)\r\n\r\n@register_model_architecture('transformerstack', 'transformerstack')\r\ndef base_stack_architecture(args):\r\n args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)\r\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)\r\n args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048)\r\n args.encoder_layers = getattr(args, 'encoder_layers', 6)\r\n args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)\r\n args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)\r\n args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False)\r\n args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)\r\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim)\r\n args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim)\r\n args.decoder_layers = getattr(args, 'decoder_layers', 6)\r\n args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)\r\n args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False)\r\n args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)\r\n args.attention_dropout = getattr(args, 'attention_dropout', 0.)\r\n args.activation_dropout = getattr(args, 'activation_dropout', 0.)\r\n args.activation_fn = getattr(args, 'activation_fn', 'relu')\r\n args.dropout = getattr(args, 'dropout', 0.1)\r\n args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)\r\n args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)\r\n args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)\r\n args.share_all_embeddings = getattr(args, 'share_all_embeddings', False)\r\n args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)\r\n args.adaptive_input = getattr(args, 'adaptive_input', False)\r\n\r\n args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)\r\n args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)\r\n\r\n\r\n\r\n\r\n@register_model_architecture('transformer', 'transformer_iwslt_de_en')\r\ndef transformer_iwslt_de_en(args):\r\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)\r\n args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024)\r\n args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)\r\n args.encoder_layers = getattr(args, 'encoder_layers', 6)\r\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)\r\n args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024)\r\n args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)\r\n args.decoder_layers = getattr(args, 'decoder_layers', 6)\r\n base_architecture(args)\r\n\r\n@register_model_architecture('transformers2', 'transformer_s2_iwslt_de_en')\r\ndef transformer_s2_iwslt_de_en(args):\r\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)\r\n args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024)\r\n args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)\r\n args.encoder_layers = getattr(args, 'encoder_layers', 6)\r\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)\r\n args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024)\r\n args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)\r\n args.decoder_layers = getattr(args, 'decoder_layers', 6)\r\n base_architecture_s2(args)\r\n\r\n@register_model_architecture('transformerstack', 'transformerstack_iwslt_de_en')\r\ndef transformerstack_iwslt_de_en(args):\r\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)\r\n args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024)\r\n args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)\r\n args.encoder_layers = getattr(args, 'encoder_layers', 6)\r\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)\r\n args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024)\r\n args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)\r\n args.decoder_layers = getattr(args, 'decoder_layers', 6)\r\n base_stack_architecture(args)\r\n\r\n@register_model_architecture('transformers2', 'transformer_wmt_en_de')\r\ndef transformer_wmt_en_de(args):\r\n base_architecture_s2(args)\r\n\r\n\r\n# parameters used in the \"Attention Is All You Need\" paper (Vaswani et al., 2017)\r\n@register_model_architecture('transformer', 'transformer_vaswani_wmt_en_de_big')\r\ndef transformer_vaswani_wmt_en_de_big(args):\r\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)\r\n args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096)\r\n args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16)\r\n args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)\r\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024)\r\n args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096)\r\n args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)\r\n args.dropout = getattr(args, 'dropout', 0.3)\r\n base_architecture(args)\r\n\r\n@register_model_architecture('transformers2', 'transformer_s2_vaswani_wmt_en_de_big')\r\ndef transformer_s2_vaswani_wmt_en_de_big(args):\r\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)\r\n args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096)\r\n args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16)\r\n args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)\r\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024)\r\n args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096)\r\n args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)\r\n args.dropout = getattr(args, 'dropout', 0.3)\r\n base_architecture_s2(args)\r\n\r\n@register_model_architecture('transformer', 'transformer_vaswani_wmt_en_fr_big')\r\ndef transformer_vaswani_wmt_en_fr_big(args):\r\n args.dropout = getattr(args, 'dropout', 0.1)\r\n transformer_vaswani_wmt_en_de_big(args)\r\n\r\n\r\n@register_model_architecture('transformer', 'transformer_wmt_en_de_big')\r\ndef transformer_wmt_en_de_big(args):\r\n args.attention_dropout = getattr(args, 'attention_dropout', 0.1)\r\n transformer_vaswani_wmt_en_de_big(args)\r\n\r\n\r\n# default parameters used in tensor2tensor implementation\r\n@register_model_architecture('transformer', 'transformer_wmt_en_de_big_t2t')\r\ndef transformer_wmt_en_de_big_t2t(args):\r\n args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', True)\r\n args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', True)\r\n args.attention_dropout = getattr(args, 'attention_dropout', 0.1)\r\n args.activation_dropout = getattr(args, 'activation_dropout', 0.1)\r\n transformer_vaswani_wmt_en_de_big(args)\r\n"
] |
[
[
"torch.nn.Linear",
"torch.zeros",
"torch.nn.ModuleList",
"torch.nn.init.constant_",
"torch.nn.Softmax",
"torch.nn.functional.dropout",
"torch.argsort",
"torch.nn.init.xavier_uniform_",
"torch.FloatTensor",
"torch.nn.init.normal_",
"torch.nn.functional.linear",
"numpy.random.uniform",
"torch.ones_like",
"torch.nn.functional.softmax",
"torch.zeros_like",
"torch.Tensor",
"torch.nn.Embedding"
]
] |
OrenBochman/probability
|
[
"eb4cff2c441e52f0604236b30d422577e498349c"
] |
[
"tensorflow_probability/python/distributions/zipf_test.py"
] |
[
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\nfrom scipy import stats\nimport tensorflow.compat.v2 as tf\nimport tensorflow_probability as tfp\n\nfrom tensorflow_probability.python.internal import test_util\n\ntfd = tfp.distributions\n\n\n@test_util.test_all_tf_execution_regimes\nclass ZipfTest(test_util.TestCase):\n\n def assertBetween(self, x, minimum, maximum):\n self.assertGreaterEqual(x, minimum)\n self.assertLessEqual(x, maximum)\n\n def assertAllBetween(self, a, minval, maxval, atol=1e-6):\n a = self._GetNdArray(a)\n minval = self._GetNdArray(minval)\n maxval = self._GetNdArray(maxval)\n\n self.assertEqual(a.shape, minval.shape)\n self.assertEqual(a.shape, maxval.shape)\n\n for idx, _ in np.ndenumerate(a):\n self.assertBetween(a[idx], minval[idx] - atol, maxval[idx] + atol)\n\n def testZipfShape(self):\n power = tf.constant([3.0] * 5)\n zipf = tfd.Zipf(power=power, validate_args=True)\n\n self.assertEqual(self.evaluate(zipf.batch_shape_tensor()), (5,))\n self.assertEqual(zipf.batch_shape, tf.TensorShape([5]))\n self.assertAllEqual(self.evaluate(zipf.event_shape_tensor()), [])\n self.assertEqual(zipf.event_shape, tf.TensorShape([]))\n\n def testInvalidPower(self):\n invalid_powers = [-.02, 0.5, -2., .99, 1.]\n for power in invalid_powers:\n with self.assertRaisesOpError(\"Condition x > y\"):\n zipf = tfd.Zipf(power=power, validate_args=True)\n self.evaluate(zipf.mean())\n\n def testNanPower(self):\n zipf = tfd.Zipf(power=np.nan, validate_args=False)\n self.assertAllNan(self.evaluate(zipf.power))\n\n def testValidPower_ImplicitlyConvertsToFloat32(self):\n powers = [2, 10, 1.1]\n for power in powers:\n zipf = tfd.Zipf(power=power, validate_args=True)\n self.assertEqual(zipf.power.dtype, tf.float32)\n\n def testEventDtype(self):\n for power_dtype in [tf.float32, tf.float64]:\n for event_dtype in [tf.int32, tf.int64, tf.float32, tf.float64]:\n power_dtype = tf.float32\n event_dtype = tf.int32\n power = tf.constant(5., dtype=power_dtype)\n zipf = tfd.Zipf(power=power, dtype=event_dtype, validate_args=True)\n self.assertEqual(zipf.dtype, event_dtype)\n self.assertEqual(\n zipf.dtype, zipf.sample(10, seed=test_util.test_seed()).dtype)\n self.assertEqual(\n zipf.dtype, zipf.sample(1, seed=test_util.test_seed()).dtype)\n self.assertEqual(zipf.dtype, zipf.mode().dtype)\n\n def testInvalidEventDtype(self):\n with self.assertRaisesWithPredicateMatch(\n TypeError, \"power.dtype .* not a supported .* type\"):\n power = tf.constant(5., dtype=tf.float16)\n zipf = tfd.Zipf(power=power, dtype=tf.int32, validate_args=True)\n self.evaluate(zipf.sample(seed=test_util.test_seed()))\n\n def testZipfLogPmf_InvalidArgs(self):\n power = tf.constant([4.0])\n # Non-integer samples are rejected if validate_args is True and\n # interpolate_nondiscrete is False.\n zipf = tfd.Zipf(\n power=power, interpolate_nondiscrete=False, validate_args=True)\n non_integer_samples = [0.99, 4.5, 5.001, 1e-5]\n for x in non_integer_samples:\n\n with self.assertRaisesOpError(\"cannot contain fractional components\"):\n self.evaluate(zipf.log_prob(x))\n\n with self.assertRaisesOpError(\"cannot contain fractional components\"):\n self.evaluate(zipf.prob(x))\n\n # Negative samples are rejected if validate_args is True.\n zipf = tfd.Zipf(power=power, validate_args=True)\n negative_samples = [-3, -2, -1]\n for x in negative_samples:\n with self.assertRaisesOpError(\"must be non-negative\"):\n self.evaluate(zipf.log_prob(x))\n\n with self.assertRaisesOpError(\"must be non-negative\"):\n self.evaluate(zipf.prob(x))\n\n def testZipfLogPmf_IntegerArgs(self):\n batch_size = 9\n power = tf.constant([3.0] * batch_size)\n power_v = 3.0\n x = np.array([-3., -0., 0., 2., 3., 4., 5., 6., 7.], dtype=np.float32)\n zipf = tfd.Zipf(power=power, validate_args=False)\n log_pmf = zipf.log_prob(x)\n self.assertEqual((batch_size,), log_pmf.shape)\n self.assertAllClose(self.evaluate(log_pmf), stats.zipf.logpmf(x, power_v))\n\n pmf = zipf.prob(x)\n self.assertEqual((batch_size,), pmf.shape)\n self.assertAllClose(self.evaluate(pmf), stats.zipf.pmf(x, power_v))\n\n def testZipfLogPmf_NonIntegerArgs(self):\n batch_size = 12\n power = tf.constant([3.0] * batch_size)\n power_v = 3.0\n x = [-3., -0.5, 0., 2., 2.2, 3., 3.1, 4., 5., 5.5, 6., 7.2]\n\n zipf = tfd.Zipf(power=power, validate_args=False)\n log_pmf = zipf.log_prob(x)\n self.assertEqual((batch_size,), log_pmf.shape)\n\n # Check that log_pmf(x) of tfd.Zipf is between the values of\n # stats.zipf.logpmf for ceil(x) and floor(x).\n log_pmf_values = self.evaluate(log_pmf)\n floor_x = np.floor(x)\n ceil_x = np.ceil(x)\n self.assertAllBetween(log_pmf_values, stats.zipf.logpmf(ceil_x, power_v),\n stats.zipf.logpmf(floor_x, power_v))\n\n # Check that pmf(x) of tfd.Zipf is between the values of stats.zipf.pmf for\n # ceil(x) and floor(x).\n pmf = zipf.prob(x)\n self.assertEqual((batch_size,), pmf.shape)\n\n pmf_values = self.evaluate(pmf)\n self.assertAllBetween(pmf_values, stats.zipf.pmf(ceil_x, power_v),\n stats.zipf.pmf(floor_x, power_v))\n\n def testZipfLogPmf_NonIntegerArgsNoInterpolation(self):\n batch_size = 12\n power = tf.constant([3.0] * batch_size)\n power_v = 3.0\n x = [-3., -0.5, 0., 2., 2.2, 3., 3.1, 4., 5., 5.5, 6., 7.2]\n\n zipf = tfd.Zipf(\n power=power, interpolate_nondiscrete=False, validate_args=False)\n log_pmf = zipf.log_prob(x)\n self.assertEqual((batch_size,), log_pmf.shape)\n\n log_pmf_values = self.evaluate(log_pmf)\n self.assertAllClose(log_pmf_values, stats.zipf.logpmf(x, power_v))\n\n pmf = zipf.prob(x)\n self.assertEqual((batch_size,), pmf.shape)\n\n pmf_values = self.evaluate(pmf)\n self.assertAllClose(pmf_values, stats.zipf.pmf(x, power_v))\n\n def testZipfLogPmfMultidimensional_IntegerArgs(self):\n batch_size = 6\n power = tf.constant([[2.0, 4.0, 5.0]] * batch_size)\n power_v = [2.0, 4.0, 5.0]\n x = np.array([[2.1, 3.5, 4.9, 5., 6.6, 7.]], dtype=np.int32).T\n\n zipf = tfd.Zipf(power=power, validate_args=True)\n log_pmf = zipf.log_prob(x)\n self.assertEqual((6, 3), log_pmf.shape)\n self.assertAllClose(self.evaluate(log_pmf), stats.zipf.logpmf(x, power_v))\n\n pmf = zipf.prob(x)\n self.assertEqual((6, 3), pmf.shape)\n self.assertAllClose(self.evaluate(pmf), stats.zipf.pmf(x, power_v))\n\n def testZipfLogPmfMultidimensional_NonIntegerArgs(self):\n batch_size = 6\n power = tf.constant([[2.0, 4.0, 5.0]] * batch_size)\n power_v = [2.0, 4.0, 5.0]\n x = np.array([[2., 3.2, 4.3, 5.5, 6.9, 7.]], dtype=np.float32).T\n floor_x = np.floor(x)\n ceil_x = np.ceil(x)\n\n zipf = tfd.Zipf(power=power, validate_args=True)\n log_pmf = zipf.log_prob(x)\n self.assertEqual((6, 3), log_pmf.shape)\n self.assertAllBetween(\n self.evaluate(log_pmf), stats.zipf.logpmf(ceil_x, power_v),\n stats.zipf.logpmf(floor_x, power_v))\n\n pmf = zipf.prob(x)\n self.assertEqual((6, 3), pmf.shape)\n self.assertAllBetween(\n self.evaluate(pmf), stats.zipf.pmf(ceil_x, power_v),\n stats.zipf.pmf(floor_x, power_v))\n\n def testZipfCdf_IntegerArgs(self):\n batch_size = 12\n power = tf.constant([3.0] * batch_size)\n power_v = 3.0\n x = [-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8]\n\n zipf = tfd.Zipf(power=power, validate_args=False)\n log_cdf = zipf.log_cdf(x)\n self.assertEqual((batch_size,), log_cdf.shape)\n self.assertAllClose(self.evaluate(log_cdf), stats.zipf.logcdf(x, power_v))\n\n cdf = zipf.cdf(x)\n self.assertEqual((batch_size,), cdf.shape)\n self.assertAllClose(self.evaluate(cdf), stats.zipf.cdf(x, power_v))\n\n def testZipfCdf_NonIntegerArgsNoInterpolation(self):\n batch_size = 12\n power = tf.constant([3.0] * batch_size)\n power_v = 3.0\n x = [-3.5, -0.5, 0., 1, 1.1, 2.2, 3.1, 4., 5., 5.5, 6.4, 7.8]\n\n zipf = tfd.Zipf(\n power=power, interpolate_nondiscrete=False, validate_args=False)\n log_cdf = zipf.log_cdf(x)\n self.assertEqual((batch_size,), log_cdf.shape)\n self.assertAllClose(self.evaluate(log_cdf), stats.zipf.logcdf(x, power_v))\n\n cdf = zipf.cdf(x)\n self.assertEqual((batch_size,), cdf.shape)\n self.assertAllClose(self.evaluate(cdf), stats.zipf.cdf(x, power_v))\n\n def testZipfCdf_NonIntegerArgsInterpolated(self):\n batch_size = 12\n power = tf.constant([3.0] * batch_size)\n power_v = 3.0\n x = [-3.5, -0.5, 0., 1, 1.1, 2.2, 3.1, 4., 5., 5.5, 6.4, 7.8]\n floor_x = np.floor(x)\n ceil_x = np.ceil(x)\n\n zipf = tfd.Zipf(power=power, validate_args=False)\n log_cdf = zipf.log_cdf(x)\n self.assertEqual((batch_size,), log_cdf.shape)\n self.assertAllBetween(\n self.evaluate(log_cdf), stats.zipf.logcdf(floor_x, power_v),\n stats.zipf.logcdf(ceil_x, power_v))\n\n cdf = zipf.cdf(x)\n self.assertEqual((batch_size,), cdf.shape)\n self.assertAllBetween(\n self.evaluate(cdf), stats.zipf.cdf(floor_x, power_v),\n stats.zipf.cdf(ceil_x, power_v))\n\n def testZipfCdf_NonIntegerArgs(self):\n batch_size = 12\n power = tf.constant([3.0] * batch_size)\n power_v = 3.0\n x = [-3.5, -0.5, 0., 1, 1.1, 2.2, 3.1, 4., 5., 5.5, 6.4, 7.8]\n floor_x = np.floor(x)\n ceil_x = np.ceil(x)\n\n zipf = tfd.Zipf(power=power, validate_args=False)\n log_cdf = zipf.log_cdf(x)\n self.assertEqual((batch_size,), log_cdf.shape)\n self.assertAllBetween(\n self.evaluate(log_cdf), stats.zipf.logcdf(floor_x, power_v),\n stats.zipf.logcdf(ceil_x, power_v))\n\n cdf = zipf.cdf(x)\n self.assertEqual((batch_size,), cdf.shape)\n self.assertAllBetween(\n self.evaluate(cdf), stats.zipf.cdf(floor_x, power_v),\n stats.zipf.cdf(ceil_x, power_v))\n\n def testZipfCdfMultidimensional_IntegerArgs(self):\n batch_size = 6\n power = tf.constant([[2.0, 4.0, 5.0]] * batch_size)\n power_v = [2.0, 4.0, 5.0]\n x = np.array([[2., 3., 4., 5., 6., 7.]], dtype=np.float32).T\n\n zipf = tfd.Zipf(power=power, validate_args=True)\n log_cdf = zipf.log_cdf(x)\n self.assertEqual((6, 3), log_cdf.shape)\n self.assertAllClose(self.evaluate(log_cdf), stats.zipf.logcdf(x, power_v))\n\n cdf = zipf.cdf(x)\n self.assertEqual((6, 3), cdf.shape)\n self.assertAllClose(self.evaluate(cdf), stats.zipf.cdf(x, power_v))\n\n def testZipfCdfMultidimensional_NonIntegerArgs(self):\n batch_size = 6\n power = tf.constant([[2.0, 4.0, 5.0]] * batch_size)\n power_v = [2.0, 4.0, 5.0]\n x = np.array([[2.3, 3.5, 4.1, 5.5, 6.8, 7.9]], dtype=np.float32).T\n floor_x = np.floor(x)\n ceil_x = np.ceil(x)\n\n zipf = tfd.Zipf(power=power, validate_args=True)\n log_cdf = zipf.log_cdf(x)\n self.assertEqual((6, 3), log_cdf.shape)\n self.assertAllBetween(\n self.evaluate(log_cdf), stats.zipf.logcdf(floor_x, power_v),\n stats.zipf.logcdf(ceil_x, power_v))\n\n cdf = zipf.cdf(x)\n self.assertEqual((6, 3), cdf.shape)\n self.assertAllBetween(\n self.evaluate(cdf), stats.zipf.cdf(floor_x, power_v),\n stats.zipf.cdf(ceil_x, power_v))\n\n def testZipfMean(self):\n power_v = [2.0, 3.0, 2.5]\n zipf = tfd.Zipf(power=power_v, validate_args=True)\n self.assertEqual((3,), zipf.mean().shape)\n self.assertAllClose(self.evaluate(zipf.mean()), stats.zipf.mean(power_v))\n\n def testZipfVariance(self):\n power_v = [4.0, 3.0, 5.5] # var is undefined for power <= 3\n zipf = tfd.Zipf(power=power_v, validate_args=True)\n self.assertEqual((3,), zipf.variance().shape)\n stat_vars = np.vectorize(stats.zipf.var)(power_v)\n self.assertAllClose(self.evaluate(zipf.variance()), stat_vars)\n\n def testZipfStd(self):\n power_v = [4.0, 3.5, 4.5]\n zipf = tfd.Zipf(power=power_v, validate_args=True)\n self.assertEqual((3,), zipf.stddev().shape)\n stat_stddevs = np.vectorize(stats.zipf.std)(power_v)\n self.assertAllClose(self.evaluate(zipf.stddev()), stat_stddevs)\n\n def testZipfMode(self):\n power_v = [10.0, 3.0, 2.5, 3.2, 1.1, 0.05]\n zipf = tfd.Zipf(power=power_v, validate_args=False)\n self.assertEqual((6,), zipf.mode().shape)\n self.assertAllClose(self.evaluate(zipf.mode()), np.ones_like(power_v))\n\n def testZipfSample(self):\n power_v = 5.\n n = int(500e4)\n\n for power_dtype in [tf.float32, tf.float64]:\n power = tf.constant(power_v, dtype=power_dtype)\n for dtype in [tf.int32, tf.int64, tf.float32, tf.float64]:\n zipf = tfd.Zipf(power=power, dtype=dtype, validate_args=True)\n samples = zipf.sample(n, seed=test_util.test_seed())\n sample_values = self.evaluate(samples)\n self.assertEqual((n,), samples.shape)\n self.assertEqual((n,), sample_values.shape)\n self.assertAllClose(\n sample_values.mean(), stats.zipf.mean(power_v), rtol=.01)\n self.assertAllClose(\n sample_values.std(), stats.zipf.std(power_v), rtol=.03)\n\n def testZipfSample_ValidateArgs(self):\n power_v = 3.\n n = int(100e3)\n\n for power_dtype in [tf.float32, tf.float64]:\n power = tf.constant(power_v, dtype=power_dtype)\n\n for dtype in [tf.int32, tf.int64, tf.float32, tf.float64]:\n zipf = tfd.Zipf(power=power, dtype=dtype, validate_args=True)\n samples = zipf.sample(n, seed=test_util.test_seed())\n self.evaluate(samples)\n\n def testZipfSampleMultidimensionalMean(self):\n power_v = np.array([np.arange(5, 15, dtype=np.float32)]) # 1 x 10\n zipf = tfd.Zipf(power=power_v, validate_args=True)\n n = int(100e3)\n samples = zipf.sample(n, seed=test_util.test_seed())\n sample_values = self.evaluate(samples)\n self.assertEqual((n, 1, 10,), samples.shape)\n self.assertEqual((n, 1, 10,), sample_values.shape)\n\n # stats.zipf wants float64 params.\n stats_mean = np.vectorize(stats.zipf.mean)(power_v.astype(np.float64))\n self.assertAllClose(sample_values.mean(axis=0), stats_mean, rtol=.01)\n\n def testZipfSampleMultidimensionalStd(self):\n power_v = np.array([np.arange(5, 10, dtype=np.float32)]) # 1 x 5\n zipf = tfd.Zipf(power=power_v, validate_args=True)\n n = int(100e4)\n samples = zipf.sample(n, seed=test_util.test_seed())\n sample_values = self.evaluate(samples)\n self.assertEqual((n, 1, 5), samples.shape)\n self.assertEqual((n, 1, 5), sample_values.shape)\n\n # stats.zipf wants float64 params.\n stats_std = np.vectorize(stats.zipf.std)(power_v.astype(np.float64))\n self.assertAllClose(sample_values.std(axis=0), stats_std, rtol=.04)\n\n # Test that sampling with the same seed twice gives the same results.\n def testZipfSampleMultipleTimes(self):\n n = 1000\n seed = test_util.test_seed()\n power = 1.5\n\n zipf1 = tfd.Zipf(power=power, name=\"zipf1\", validate_args=True)\n tf.random.set_seed(seed)\n samples1 = self.evaluate(zipf1.sample(n, seed=seed))\n\n zipf2 = tfd.Zipf(power=power, name=\"zipf2\", validate_args=True)\n tf.random.set_seed(seed)\n samples2 = self.evaluate(zipf2.sample(n, seed=seed))\n\n self.assertAllEqual(samples1, samples2)\n\n def testZipfSample_AvoidsInfiniteLoop(self):\n zipf = tfd.Zipf(power=1., validate_args=False)\n n = 1000\n self.evaluate(zipf.sample(n, seed=test_util.test_seed()))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
] |
[
[
"scipy.stats.zipf.logcdf",
"numpy.array",
"numpy.ndenumerate",
"numpy.ceil",
"numpy.vectorize",
"numpy.ones_like",
"tensorflow.compat.v2.TensorShape",
"tensorflow.compat.v2.test.main",
"scipy.stats.zipf.std",
"scipy.stats.zipf.pmf",
"scipy.stats.zipf.logpmf",
"tensorflow.compat.v2.random.set_seed",
"numpy.arange",
"scipy.stats.zipf.mean",
"scipy.stats.zipf.cdf",
"tensorflow.compat.v2.constant",
"numpy.floor"
]
] |
rochamatcomp/python-rocha
|
[
"bbf8b559f8052f8c081be29ef21d3e1f697477c3"
] |
[
"tests/test_plots.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\n:mod:`plots` -- Tests data plots\n================================\n\n.. module:: plots\n :platform: Unix, Windows\n :synopsis: Tests of the raster plots and processed data plots.\n.. moduleauthor:: Andre Rocha <rocha.matcomp@gmail.com>\n\"\"\"\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.testing.decorators import image_comparison\n\nfrom src.rocha import plots\n\n@image_comparison(baseline_images=['test_plot'],\n extensions=['png'])\ndef test_plot():\n \"\"\"\n Test the rasters plot as multiples subplots.\n \"\"\"\n rasters = ['data/relatives/forest_111.tif',\n 'data/relatives/forest_112.tif',\n 'data/relatives/forest_113.tif',\n 'data/relatives/forest_121.tif',\n 'data/relatives/forest_122.tif',\n 'data/relatives/forest_123.tif',\n 'data/relatives/forest_211.tif',\n 'data/relatives/forest_212.tif',\n 'data/relatives/forest_213.tif',\n 'data/relatives/forest_221.tif',\n 'data/relatives/forest_222.tif',\n 'data/relatives/forest_223.tif']\n\n title = 'Mean precipitation (mm/day)'\n subtitles = ['HadGEM2 RCP4.5', 'HadGEM2 RCP8.5', 'MIROC5 RCP4.5', 'MIROC5 RCP8.5']\n labels = ['2011-2040', '2041-2070', '2071-2100']\n\n color = 'RdYlBu_r'\n\n rows = 3\n cols = 4\n\n plots.maps(rasters, rows, cols, color, title, subtitles, labels)"
] |
[
[
"matplotlib.testing.decorators.image_comparison"
]
] |
harshita1000/crest
|
[
"64918b85d31e7939fce874431b6059c0c9cca7b7"
] |
[
"third_party/augment_ops.py"
] |
[
"# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Various ops for augmentation.\"\"\"\n\nimport math\n\nimport tensorflow as tf\nfrom tensorflow_addons import image as tfa_image\n\n# Default replace value\nREPLACE_VALUE = 128\n\n\ndef blend(image1, image2, factor):\n \"\"\"Blend image1 and image2 using 'factor'.\n\n A value of factor 0.0 means only image1 is used.\n A value of 1.0 means only image2 is used. A value between 0.0 and\n 1.0 means we linearly interpolate the pixel values between the two\n images. A value greater than 1.0 \"extrapolates\" the difference\n between the two pixel values, and we clip the results to values\n between 0 and 255.\n\n Args:\n image1: An image Tensor.\n image2: An image Tensor.\n factor: A floating point value above 0.0.\n\n Returns:\n A blended image Tensor.\n \"\"\"\n image1 = tf.cast(image1, tf.float32)\n image2 = tf.cast(image2, tf.float32)\n return tf.saturate_cast(image1 + factor * (image2 - image1), tf.uint8)\n\n\ndef wrap(image):\n \"\"\"Returns 'image' with an extra channel set to all 1s.\"\"\"\n shape = tf.shape(image)\n extended_channel = tf.ones([shape[0], shape[1], 1], image.dtype)\n extended = tf.concat([image, extended_channel], 2)\n return extended\n\n\ndef unwrap(image):\n \"\"\"Unwraps an image produced by wrap.\n\n Where there is a 0 in the last channel for every spatial position,\n the rest of the three channels in that spatial dimension are grayed\n (set to 128). Operations like translate and shear on a wrapped\n Tensor will leave 0s in empty locations. Some transformations look\n at the intensity of values to do preprocessing, and we want these\n empty pixels to assume the 'average' value, rather than pure black.\n\n\n Args:\n image: A 3D Image Tensor with 4 channels.\n\n Returns:\n image: A 3D image Tensor with 3 channels.\n \"\"\"\n image_shape = tf.shape(image)\n # Flatten the spatial dimensions.\n flattened_image = tf.reshape(image, [-1, image_shape[2]])\n\n # Find all pixels where the last channel is zero.\n alpha_channel = tf.expand_dims(flattened_image[:, image_shape[2] - 1], 1)\n\n replace = tf.constant([REPLACE_VALUE, REPLACE_VALUE, REPLACE_VALUE, 1],\n image.dtype)\n\n # Where they are zero, fill them in with 'replace'.\n flattened_image = tf.where(\n tf.equal(alpha_channel, 0),\n tf.ones_like(flattened_image, dtype=image.dtype) * replace,\n flattened_image)\n\n image = tf.reshape(flattened_image, image_shape)\n image = tf.slice(image, [0, 0, 0],\n [image_shape[0], image_shape[1], image_shape[2] - 1])\n return image\n\n\ndef solarize(image, threshold=128):\n # For each pixel in the image, select the pixel\n # if the value is less than the threshold.\n # Otherwise, subtract 255 from the pixel.\n threshold = tf.saturate_cast(threshold, image.dtype)\n return tf.where(image < threshold, image, 255 - image)\n\n\ndef solarize_add(image, addition=0, threshold=128):\n # For each pixel in the image less than threshold\n # we add 'addition' amount to it and then clip the\n # pixel value to be between 0 and 255. The value\n # of 'addition' is between -128 and 128\n threshold = tf.saturate_cast(threshold, image.dtype)\n added_im = tf.cast(image, tf.int32) + tf.cast(addition, tf.int32)\n added_im = tf.saturate_cast(added_im, tf.uint8)\n return tf.where(image < threshold, added_im, image)\n\n\ndef invert(image):\n \"\"\"Inverts the image pixels.\"\"\"\n return 255 - tf.convert_to_tensor(image)\n\n\ndef invert_blend(image, factor):\n \"\"\"Implements blend of invert with original image.\"\"\"\n return blend(invert(image), image, factor)\n\n\ndef color(image, factor):\n \"\"\"Equivalent of PIL Color.\"\"\"\n degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image))\n return blend(degenerate, image, factor)\n\n\ndef contrast(image, factor):\n \"\"\"Equivalent of PIL Contrast.\"\"\"\n grayscale_im = tf.image.rgb_to_grayscale(image)\n mean = tf.reduce_mean(tf.cast(grayscale_im, tf.float32))\n mean = tf.saturate_cast(mean + 0.5, tf.uint8)\n\n degenerate = tf.ones_like(grayscale_im, dtype=tf.uint8) * mean\n degenerate = tf.image.grayscale_to_rgb(degenerate)\n\n return blend(degenerate, image, factor)\n\n\ndef brightness(image, factor):\n \"\"\"Equivalent of PIL Brightness.\"\"\"\n degenerate = tf.zeros_like(image)\n return blend(degenerate, image, factor)\n\n\ndef posterize(image, bits):\n \"\"\"Equivalent of PIL Posterize.\"\"\"\n shift = tf.cast(8 - bits, image.dtype)\n return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift)\n\n\ndef rotate(image, degrees):\n \"\"\"Equivalent of PIL Rotation.\"\"\"\n # Convert from degrees to radians\n degrees_to_radians = math.pi / 180.0\n radians = degrees * degrees_to_radians\n\n # In practice, we should randomize the rotation degrees by flipping\n # it negatively half the time, but that's done on 'degrees' outside\n # of the function.\n image = tfa_image.transform_ops.rotate(wrap(image), radians)\n return unwrap(image)\n\n\ndef translate_x(image, pixels):\n \"\"\"Equivalent of PIL Translate in X dimension.\"\"\"\n image = tfa_image.translate_ops.translate(wrap(image), [-pixels, 0])\n return unwrap(image)\n\n\ndef translate_y(image, pixels):\n \"\"\"Equivalent of PIL Translate in Y dimension.\"\"\"\n image = tfa_image.translate_ops.translate(wrap(image), [0, -pixels])\n return unwrap(image)\n\n\ndef shear_x(image, level):\n \"\"\"Equivalent of PIL Shearing in X dimension.\"\"\"\n # Shear parallel to x axis is a projective transform\n # with a matrix form of:\n # [1 level\n # 0 1]\n image = tfa_image.transform_ops.transform(\n wrap(image), [1., level, 0., 0., 1., 0., 0., 0.])\n return unwrap(image)\n\n\ndef shear_y(image, level):\n \"\"\"Equivalent of PIL Shearing in Y dimension.\"\"\"\n # Shear parallel to y axis is a projective transform\n # with a matrix form of:\n # [1 0\n # level 1]\n image = tfa_image.transform_ops.transform(\n wrap(image), [1., 0., 0., level, 1., 0., 0., 0.])\n return unwrap(image)\n\n\ndef autocontrast(image):\n \"\"\"Implements Autocontrast function from PIL using TF ops.\"\"\"\n\n def scale_channel(channel):\n \"\"\"Scale the 2D image using the autocontrast rule.\"\"\"\n # A possibly cheaper version can be done using cumsum/unique_with_counts\n # over the histogram values, rather than iterating over the entire image.\n # to compute mins and maxes.\n lo = tf.cast(tf.reduce_min(channel), tf.float32)\n hi = tf.cast(tf.reduce_max(channel), tf.float32)\n\n # Scale the image, making the lowest value 0 and the highest value 255.\n def scale_values(im):\n scale = 255.0 / (hi - lo)\n offset = -lo * scale\n im = tf.cast(im, tf.float32) * scale + offset\n return tf.saturate_cast(im, tf.uint8)\n\n result = tf.cond(hi > lo, lambda: scale_values(channel), lambda: channel)\n return result\n\n # Assumes RGB for now. Scales each channel independently\n # and then stacks the result.\n s1 = scale_channel(image[:, :, 0])\n s2 = scale_channel(image[:, :, 1])\n s3 = scale_channel(image[:, :, 2])\n image = tf.stack([s1, s2, s3], 2)\n return image\n\n\ndef autocontrast_blend(image, factor):\n \"\"\"Implements blend of autocontrast with original image.\"\"\"\n return blend(autocontrast(image), image, factor)\n\n\ndef sharpness(image, factor):\n \"\"\"Implements Sharpness function from PIL using TF ops.\"\"\"\n orig_im = image\n image = tf.cast(image, tf.float32)\n # Make image 4D for conv operation\n image = tf.expand_dims(image, 0)\n # SMOOTH PIL Kernel\n kernel = tf.constant([[1, 1, 1], [1, 5, 1], [1, 1, 1]],\n dtype=tf.float32,\n shape=[3, 3, 1, 1]) / 13.\n # Tile across channel dimension\n kernel = tf.tile(kernel, [1, 1, 3, 1])\n strides = [1, 1, 1, 1]\n degenerate = tf.nn.depthwise_conv2d(\n image, kernel, strides, padding='VALID', dilations=[1, 1])\n degenerate = tf.squeeze(tf.saturate_cast(degenerate, tf.uint8), [0])\n\n # For the borders of the resulting image, fill in the values of the\n # original image.\n mask = tf.ones_like(degenerate)\n padded_mask = tf.pad(mask, [[1, 1], [1, 1], [0, 0]])\n padded_degenerate = tf.pad(degenerate, [[1, 1], [1, 1], [0, 0]])\n result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_im)\n\n # Blend the final result\n return blend(result, orig_im, factor)\n\n\ndef equalize(image):\n \"\"\"Implements Equalize function from PIL using TF ops.\"\"\"\n\n def scale_channel(im, c):\n \"\"\"Scale the data in the channel to implement equalize.\"\"\"\n im = tf.cast(im[:, :, c], tf.int32)\n # Compute the histogram of the image channel.\n histo = tf.histogram_fixed_width(im, [0, 255], nbins=256)\n\n # For the purposes of computing the step, filter out the nonzeros.\n nonzero = tf.where(tf.not_equal(histo, 0))\n nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [-1])\n step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255\n\n def build_lut(histo, step):\n # Compute the cumulative sum, shifting by step // 2\n # and then normalization by step.\n lut = (tf.cumsum(histo) + (step // 2)) // step\n # Shift lut, prepending with 0.\n lut = tf.concat([[0], lut[:-1]], 0)\n # Clip the counts to be in range. This is done\n # in the C code for image.point.\n return tf.clip_by_value(lut, 0, 255)\n\n # If step is zero, return the original image. Otherwise, build\n # lut from the full histogram and step and then index from it.\n result = tf.cond(\n tf.equal(step, 0), lambda: im,\n lambda: tf.gather(build_lut(histo, step), im))\n\n return tf.cast(result, tf.uint8)\n\n # Assumes RGB for now. Scales each channel independently\n # and then stacks the result.\n s1 = scale_channel(image, 0)\n s2 = scale_channel(image, 1)\n s3 = scale_channel(image, 2)\n image = tf.stack([s1, s2, s3], 2)\n return image\n\n\ndef equalize_blend(image, factor):\n \"\"\"Implements blend of equalize with original image.\"\"\"\n return blend(equalize(image), image, factor)\n\n\ndef _convolve_image_with_kernel(image, kernel):\n num_channels = tf.shape(image)[-1]\n kernel = tf.tile(kernel, [1, 1, num_channels, 1])\n image = tf.expand_dims(image, axis=0)\n convolved_im = tf.nn.depthwise_conv2d(\n tf.cast(image, tf.float32), kernel, strides=[1, 1, 1, 1], padding='SAME')\n # adding 0.5 for future rounding, same as in PIL:\n # https://github.com/python-pillow/Pillow/blob/555e305a60d7fcefd1ad4aa6c8fd879e2f474192/src/libImaging/Filter.c#L101 # pylint: disable=line-too-long\n convolved_im = convolved_im + 0.5\n return tf.squeeze(convolved_im, axis=0)\n\n\ndef blur(image, factor):\n \"\"\"Blur with the same kernel as ImageFilter.BLUR.\"\"\"\n # See https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageFilter.py # pylint: disable=line-too-long\n # class BLUR(BuiltinFilter):\n # name = \"Blur\"\n # # fmt: off\n # filterargs = (5, 5), 16, 0, (\n # 1, 1, 1, 1, 1,\n # 1, 0, 0, 0, 1,\n # 1, 0, 0, 0, 1,\n # 1, 0, 0, 0, 1,\n # 1, 1, 1, 1, 1,\n # )\n # # fmt: on\n #\n # filterargs are following:\n # (kernel_size_x, kernel_size_y), divisor, offset, kernel\n #\n blur_kernel = tf.constant(\n [[1., 1., 1., 1., 1.], [1., 0., 0., 0., 1.], [1., 0., 0., 0., 1.],\n [1., 0., 0., 0., 1.], [1., 1., 1., 1., 1.]],\n dtype=tf.float32,\n shape=[5, 5, 1, 1]) / 16.0\n blurred_im = _convolve_image_with_kernel(image, blur_kernel)\n return blend(image, blurred_im, factor)\n\n\ndef smooth(image, factor):\n \"\"\"Smooth with the same kernel as ImageFilter.SMOOTH.\"\"\"\n # See https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageFilter.py # pylint: disable=line-too-long\n # class SMOOTH(BuiltinFilter):\n # name = \"Smooth\"\n # # fmt: off\n # filterargs = (3, 3), 13, 0, (\n # 1, 1, 1,\n # 1, 5, 1,\n # 1, 1, 1,\n # )\n # # fmt: on\n #\n # filterargs are following:\n # (kernel_size_x, kernel_size_y), divisor, offset, kernel\n #\n smooth_kernel = tf.constant([[1., 1., 1.], [1., 5., 1.], [1., 1., 1.]],\n dtype=tf.float32,\n shape=[3, 3, 1, 1]) / 13.0\n smoothed_im = _convolve_image_with_kernel(image, smooth_kernel)\n return blend(image, smoothed_im, factor)\n\n\ndef rescale(image, level):\n \"\"\"Rescales image and enlarged cornet.\"\"\"\n # See tf.image.ResizeMethod for full list\n size = image.shape[:2]\n scale = level * 0.25\n scale_height = tf.cast(scale * size[0], tf.int32)\n scale_width = tf.cast(scale * size[1], tf.int32)\n cropped_image = tf.image.crop_to_bounding_box(\n image,\n offset_height=scale_height,\n offset_width=scale_width,\n target_height=size[0] - scale_height,\n target_width=size[1] - scale_width)\n rescaled = tf.image.resize(cropped_image, size, tf.image.ResizeMethod.BICUBIC)\n return tf.saturate_cast(rescaled, tf.uint8)\n\n\nNAME_TO_FUNC = {\n 'Identity': tf.identity,\n 'AutoContrast': autocontrast,\n 'AutoContrastBlend': autocontrast_blend,\n 'Equalize': equalize,\n 'EqualizeBlend': equalize_blend,\n 'Invert': invert,\n 'InvertBlend': invert_blend,\n 'Rotate': rotate,\n 'Posterize': posterize,\n 'Solarize': solarize,\n 'SolarizeAdd': solarize_add,\n 'Color': color,\n 'Contrast': contrast,\n 'Brightness': brightness,\n 'Sharpness': sharpness,\n 'ShearX': shear_x,\n 'ShearY': shear_y,\n 'TranslateX': translate_x,\n 'TranslateY': translate_y,\n 'Blur': blur,\n 'Smooth': smooth,\n 'Rescale': rescale,\n}\n"
] |
[
[
"tensorflow.reduce_min",
"tensorflow.cumsum",
"tensorflow.ones",
"tensorflow.ones_like",
"tensorflow.reshape",
"tensorflow.saturate_cast",
"tensorflow.zeros_like",
"tensorflow.clip_by_value",
"tensorflow.stack",
"tensorflow.tile",
"tensorflow.image.rgb_to_grayscale",
"tensorflow.cast",
"tensorflow.image.resize",
"tensorflow.shape",
"tensorflow.concat",
"tensorflow.constant",
"tensorflow.squeeze",
"tensorflow.pad",
"tensorflow.histogram_fixed_width",
"tensorflow.expand_dims",
"tensorflow.where",
"tensorflow.bitwise.right_shift",
"tensorflow.reduce_sum",
"tensorflow.nn.depthwise_conv2d",
"tensorflow.convert_to_tensor",
"tensorflow.not_equal",
"tensorflow.equal",
"tensorflow.reduce_max",
"tensorflow.gather",
"tensorflow.slice",
"tensorflow.image.crop_to_bounding_box",
"tensorflow.image.grayscale_to_rgb"
]
] |
satya323/spark
|
[
"4f825aad65f2650343e7cfbef39465ebb4e403b6"
] |
[
"python/pyspark/pandas/data_type_ops/base.py"
] |
[
"#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport numbers\nfrom abc import ABCMeta\nfrom itertools import chain\nfrom typing import Any, Optional, Union\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import CategoricalDtype\n\nfrom pyspark.sql import functions as F, Column\nfrom pyspark.sql.types import (\n ArrayType,\n BinaryType,\n BooleanType,\n DataType,\n DateType,\n DecimalType,\n FractionalType,\n IntegralType,\n MapType,\n NullType,\n NumericType,\n StringType,\n StructType,\n TimestampType,\n TimestampNTZType,\n UserDefinedType,\n)\nfrom pyspark.pandas._typing import Dtype, IndexOpsLike, SeriesOrIndex\nfrom pyspark.pandas.spark import functions as SF\nfrom pyspark.pandas.typedef import extension_dtypes\nfrom pyspark.pandas.typedef.typehints import (\n extension_dtypes_available,\n extension_float_dtypes_available,\n extension_object_dtypes_available,\n spark_type_to_pandas_dtype,\n)\n\nif extension_dtypes_available:\n from pandas import Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype\n\nif extension_float_dtypes_available:\n from pandas import Float32Dtype, Float64Dtype\n\nif extension_object_dtypes_available:\n from pandas import BooleanDtype, StringDtype\n\n\ndef is_valid_operand_for_numeric_arithmetic(operand: Any, *, allow_bool: bool = True) -> bool:\n \"\"\"Check whether the `operand` is valid for arithmetic operations against numerics.\"\"\"\n from pyspark.pandas.base import IndexOpsMixin\n\n if isinstance(operand, numbers.Number):\n return not isinstance(operand, bool) or allow_bool\n elif isinstance(operand, IndexOpsMixin):\n if isinstance(operand.dtype, CategoricalDtype):\n return False\n else:\n return isinstance(operand.spark.data_type, NumericType) or (\n allow_bool and isinstance(operand.spark.data_type, BooleanType)\n )\n else:\n return False\n\n\ndef transform_boolean_operand_to_numeric(\n operand: Any, *, spark_type: Optional[DataType] = None\n) -> Any:\n \"\"\"Transform boolean operand to numeric.\n\n If the `operand` is:\n - a boolean IndexOpsMixin, transform the `operand` to the `spark_type`.\n - a boolean literal, transform to the int value.\n Otherwise, return the operand as it is.\n \"\"\"\n from pyspark.pandas.base import IndexOpsMixin\n\n if isinstance(operand, IndexOpsMixin) and isinstance(operand.spark.data_type, BooleanType):\n assert spark_type, \"spark_type must be provided if the operand is a boolean IndexOpsMixin\"\n assert isinstance(spark_type, NumericType), \"spark_type must be NumericType\"\n dtype = spark_type_to_pandas_dtype(\n spark_type, use_extension_dtypes=operand._internal.data_fields[0].is_extension_dtype\n )\n return operand._with_new_scol(\n operand.spark.column.cast(spark_type),\n field=operand._internal.data_fields[0].copy(dtype=dtype, spark_type=spark_type),\n )\n elif isinstance(operand, bool):\n return int(operand)\n else:\n return operand\n\n\ndef _as_categorical_type(\n index_ops: IndexOpsLike, dtype: CategoricalDtype, spark_type: DataType\n) -> IndexOpsLike:\n \"\"\"Cast `index_ops` to categorical dtype, given `dtype` and `spark_type`.\"\"\"\n assert isinstance(dtype, CategoricalDtype)\n if dtype.categories is None:\n codes, uniques = index_ops.factorize()\n return codes._with_new_scol(\n codes.spark.column,\n field=codes._internal.data_fields[0].copy(dtype=CategoricalDtype(categories=uniques)),\n )\n else:\n categories = dtype.categories\n if len(categories) == 0:\n scol = SF.lit(-1)\n else:\n kvs = chain(\n *[(SF.lit(category), SF.lit(code)) for code, category in enumerate(categories)]\n )\n map_scol = F.create_map(*kvs)\n\n scol = F.coalesce(map_scol[index_ops.spark.column], SF.lit(-1))\n return index_ops._with_new_scol(\n scol.cast(spark_type),\n field=index_ops._internal.data_fields[0].copy(\n dtype=dtype, spark_type=spark_type, nullable=False\n ),\n )\n\n\ndef _as_bool_type(index_ops: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:\n \"\"\"Cast `index_ops` to BooleanType Spark type, given `dtype`.\"\"\"\n spark_type = BooleanType()\n if isinstance(dtype, extension_dtypes):\n scol = index_ops.spark.column.cast(spark_type)\n else:\n scol = F.when(index_ops.spark.column.isNull(), SF.lit(False)).otherwise(\n index_ops.spark.column.cast(spark_type)\n )\n return index_ops._with_new_scol(\n scol, field=index_ops._internal.data_fields[0].copy(dtype=dtype, spark_type=spark_type)\n )\n\n\ndef _as_string_type(\n index_ops: IndexOpsLike, dtype: Union[str, type, Dtype], *, null_str: str = str(None)\n) -> IndexOpsLike:\n \"\"\"Cast `index_ops` to StringType Spark type, given `dtype` and `null_str`,\n representing null Spark column. Note that `null_str` is for non-extension dtypes only.\n \"\"\"\n spark_type = StringType()\n if isinstance(dtype, extension_dtypes):\n scol = index_ops.spark.column.cast(spark_type)\n else:\n casted = index_ops.spark.column.cast(spark_type)\n scol = F.when(index_ops.spark.column.isNull(), null_str).otherwise(casted)\n return index_ops._with_new_scol(\n scol, field=index_ops._internal.data_fields[0].copy(dtype=dtype, spark_type=spark_type)\n )\n\n\ndef _as_other_type(\n index_ops: IndexOpsLike, dtype: Union[str, type, Dtype], spark_type: DataType\n) -> IndexOpsLike:\n \"\"\"Cast `index_ops` to a `dtype` (`spark_type`) that needs no pre-processing.\n\n Destination types that need pre-processing: CategoricalDtype, BooleanType, and StringType.\n \"\"\"\n from pyspark.pandas.internal import InternalField\n\n need_pre_process = (\n isinstance(dtype, CategoricalDtype)\n or isinstance(spark_type, BooleanType)\n or isinstance(spark_type, StringType)\n )\n assert not need_pre_process, \"Pre-processing is needed before the type casting.\"\n\n scol = index_ops.spark.column.cast(spark_type)\n return index_ops._with_new_scol(scol, field=InternalField(dtype=dtype))\n\n\ndef _sanitize_list_like(operand: Any) -> None:\n \"\"\"Raise TypeError if operand is list-like.\"\"\"\n if isinstance(operand, (list, tuple, dict, set)):\n raise TypeError(\"The operation can not be applied to %s.\" % type(operand).__name__)\n\n\ndef _is_valid_for_logical_operator(right: Any) -> bool:\n from pyspark.pandas.base import IndexOpsMixin\n\n return isinstance(right, (int, bool)) or (\n isinstance(right, IndexOpsMixin)\n and (\n isinstance(right.spark.data_type, BooleanType)\n or isinstance(right.spark.data_type, IntegralType)\n )\n )\n\n\ndef _is_boolean_type(right: Any) -> bool:\n from pyspark.pandas.base import IndexOpsMixin\n\n return isinstance(right, bool) or (\n isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, BooleanType)\n )\n\n\nclass DataTypeOps(object, metaclass=ABCMeta):\n \"\"\"The base class for binary operations of pandas-on-Spark objects (of different data types).\"\"\"\n\n def __new__(cls, dtype: Dtype, spark_type: DataType) -> \"DataTypeOps\":\n from pyspark.pandas.data_type_ops.binary_ops import BinaryOps\n from pyspark.pandas.data_type_ops.boolean_ops import BooleanOps, BooleanExtensionOps\n from pyspark.pandas.data_type_ops.categorical_ops import CategoricalOps\n from pyspark.pandas.data_type_ops.complex_ops import ArrayOps, MapOps, StructOps\n from pyspark.pandas.data_type_ops.date_ops import DateOps\n from pyspark.pandas.data_type_ops.datetime_ops import DatetimeOps, DatetimeNTZOps\n from pyspark.pandas.data_type_ops.null_ops import NullOps\n from pyspark.pandas.data_type_ops.num_ops import (\n DecimalOps,\n FractionalExtensionOps,\n FractionalOps,\n IntegralExtensionOps,\n IntegralOps,\n )\n from pyspark.pandas.data_type_ops.string_ops import StringOps, StringExtensionOps\n from pyspark.pandas.data_type_ops.udt_ops import UDTOps\n\n if isinstance(dtype, CategoricalDtype):\n return object.__new__(CategoricalOps)\n elif isinstance(spark_type, DecimalType):\n return object.__new__(DecimalOps)\n elif isinstance(spark_type, FractionalType):\n if extension_float_dtypes_available and type(dtype) in [Float32Dtype, Float64Dtype]:\n return object.__new__(FractionalExtensionOps)\n else:\n return object.__new__(FractionalOps)\n elif isinstance(spark_type, IntegralType):\n if extension_dtypes_available and type(dtype) in [\n Int8Dtype,\n Int16Dtype,\n Int32Dtype,\n Int64Dtype,\n ]:\n return object.__new__(IntegralExtensionOps)\n else:\n return object.__new__(IntegralOps)\n elif isinstance(spark_type, StringType):\n if extension_object_dtypes_available and isinstance(dtype, StringDtype):\n return object.__new__(StringExtensionOps)\n else:\n return object.__new__(StringOps)\n elif isinstance(spark_type, BooleanType):\n if extension_object_dtypes_available and isinstance(dtype, BooleanDtype):\n return object.__new__(BooleanExtensionOps)\n else:\n return object.__new__(BooleanOps)\n elif isinstance(spark_type, TimestampType):\n return object.__new__(DatetimeOps)\n elif isinstance(spark_type, TimestampNTZType):\n return object.__new__(DatetimeNTZOps)\n elif isinstance(spark_type, DateType):\n return object.__new__(DateOps)\n elif isinstance(spark_type, BinaryType):\n return object.__new__(BinaryOps)\n elif isinstance(spark_type, ArrayType):\n return object.__new__(ArrayOps)\n elif isinstance(spark_type, MapType):\n return object.__new__(MapOps)\n elif isinstance(spark_type, StructType):\n return object.__new__(StructOps)\n elif isinstance(spark_type, NullType):\n return object.__new__(NullOps)\n elif isinstance(spark_type, UserDefinedType):\n return object.__new__(UDTOps)\n else:\n raise TypeError(\"Type %s was not understood.\" % dtype)\n\n def __init__(self, dtype: Dtype, spark_type: DataType):\n self.dtype = dtype\n self.spark_type = spark_type\n\n @property\n def pretty_name(self) -> str:\n raise NotImplementedError()\n\n def add(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:\n raise TypeError(\"Addition can not be applied to %s.\" % self.pretty_name)\n\n def sub(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:\n raise TypeError(\"Subtraction can not be applied to %s.\" % self.pretty_name)\n\n def mul(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:\n raise TypeError(\"Multiplication can not be applied to %s.\" % self.pretty_name)\n\n def truediv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:\n raise TypeError(\"True division can not be applied to %s.\" % self.pretty_name)\n\n def floordiv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:\n raise TypeError(\"Floor division can not be applied to %s.\" % self.pretty_name)\n\n def mod(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:\n raise TypeError(\"Modulo can not be applied to %s.\" % self.pretty_name)\n\n def pow(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:\n raise TypeError(\"Exponentiation can not be applied to %s.\" % self.pretty_name)\n\n def radd(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:\n raise TypeError(\"Addition can not be applied to %s.\" % self.pretty_name)\n\n def rsub(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:\n raise TypeError(\"Subtraction can not be applied to %s.\" % self.pretty_name)\n\n def rmul(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:\n raise TypeError(\"Multiplication can not be applied to %s.\" % self.pretty_name)\n\n def rtruediv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:\n raise TypeError(\"True division can not be applied to %s.\" % self.pretty_name)\n\n def rfloordiv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:\n raise TypeError(\"Floor division can not be applied to %s.\" % self.pretty_name)\n\n def rmod(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:\n raise TypeError(\"Modulo can not be applied to %s.\" % self.pretty_name)\n\n def rpow(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:\n raise TypeError(\"Exponentiation can not be applied to %s.\" % self.pretty_name)\n\n def __and__(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:\n raise TypeError(\"Bitwise and can not be applied to %s.\" % self.pretty_name)\n\n def xor(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:\n raise TypeError(\"Bitwise xor can not be applied to %s.\" % self.pretty_name)\n\n def __or__(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:\n raise TypeError(\"Bitwise or can not be applied to %s.\" % self.pretty_name)\n\n def rand(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:\n _sanitize_list_like(right)\n return left.__and__(right)\n\n def rxor(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:\n _sanitize_list_like(right)\n return left ^ right\n\n def ror(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:\n _sanitize_list_like(right)\n return left.__or__(right)\n\n def neg(self, operand: IndexOpsLike) -> IndexOpsLike:\n raise TypeError(\"Unary - can not be applied to %s.\" % self.pretty_name)\n\n def abs(self, operand: IndexOpsLike) -> IndexOpsLike:\n raise TypeError(\"abs() can not be applied to %s.\" % self.pretty_name)\n\n def lt(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:\n raise TypeError(\"< can not be applied to %s.\" % self.pretty_name)\n\n def le(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:\n raise TypeError(\"<= can not be applied to %s.\" % self.pretty_name)\n\n def gt(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:\n raise TypeError(\"> can not be applied to %s.\" % self.pretty_name)\n\n def ge(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:\n raise TypeError(\">= can not be applied to %s.\" % self.pretty_name)\n\n def eq(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:\n from pyspark.pandas.base import column_op\n\n _sanitize_list_like(right)\n\n return column_op(Column.__eq__)(left, right)\n\n def ne(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:\n from pyspark.pandas.base import column_op\n\n _sanitize_list_like(right)\n\n return column_op(Column.__ne__)(left, right)\n\n def invert(self, operand: IndexOpsLike) -> IndexOpsLike:\n raise TypeError(\"Unary ~ can not be applied to %s.\" % self.pretty_name)\n\n def restore(self, col: pd.Series) -> pd.Series:\n \"\"\"Restore column when to_pandas.\"\"\"\n return col\n\n def prepare(self, col: pd.Series) -> pd.Series:\n \"\"\"Prepare column when from_pandas.\"\"\"\n return col.replace({np.nan: None})\n\n def isnull(self, index_ops: IndexOpsLike) -> IndexOpsLike:\n return index_ops._with_new_scol(\n index_ops.spark.column.isNull(),\n field=index_ops._internal.data_fields[0].copy(\n dtype=np.dtype(\"bool\"), spark_type=BooleanType(), nullable=False\n ),\n )\n\n def nan_to_null(self, index_ops: IndexOpsLike) -> IndexOpsLike:\n return index_ops.copy()\n\n def astype(self, index_ops: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:\n raise TypeError(\"astype can not be applied to %s.\" % self.pretty_name)\n"
] |
[
[
"pandas.api.types.CategoricalDtype",
"numpy.dtype"
]
] |
RobbinBouwmeester/LIT
|
[
"0516a69fbf1b8e9976524e0c243f82de041df544"
] |
[
"src/lpb.py"
] |
[
"\"\"\"\n Copyright (c) 2017 Robbin Bouwmeester\n Permission is hereby granted, free of charge, to any person\n obtaining a copy of this software and associated documentation\n files (the \"Software\"), to deal in the Software without\n restriction, including without limitation the rights to use,\n copy, modify, merge, publish, distribute, sublicense, and/or sell\n copies of the Software, and to permit persons to whom the\n Software is furnished to do so, subject to the following\n conditions:\n\n The above copyright notice and this permission notice shall be\n included in all copies or substantial portions of the Software.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n OTHER DEALINGS IN THE SOFTWARE.\"\"\"\n\n__author__ = \"Robbin Bouwmeester\"\n__copyright__ = \"Copyright 2017\"\n__credits__ = [\"Robbin Bouwmeester\"]\n__license__ = \"MIT\"\n__version__ = \"0.1\"\n__maintainer__ = \"Robbin Bouwmeester\"\n__email__ = \"Robbin.bouwmeester@ugent.be\"\n__status__ = \"nightly funzies\"\n\nimport pandas as pd\nfrom itertools import groupby\nimport logging\n\nclass LipidBLAST_entry():\n\tdef __init__(self,\n\t\t\t\t name=\"\",\n\t\t\t\t ion=\"\",\n\t\t\t\t mw=0.0,\n\t\t\t\t chem_form=\"\",\n\t\t\t\t num_ms2_peaks=0,\n\t\t\t\t f_acyl_lengths=[],\n\t\t\t\t unsats=[],\n\t\t\t\t ms2=[]):\n\n\t\tself.name = name\n\t\tself.ion = ion\n\t\tself.mw = mw\n\t\tself.chem_form = chem_form\n\t\tself.num_ms2_peaks = num_ms2_peaks\n\t\tself.ms2 = ms2\n\t\tself.f_acyl_lengths = f_acyl_lengths\n\t\tself.unsats = unsats\n\n\tdef __str__(self):\n\t\tret_string = []\n\t\tret_string.append(\"================\")\n\t\tret_string.append(\"\")\n\t\tret_string.append(\"Lipid: %s\" % (self.name))\n\t\tret_string.append(\"MW: %s\" % (self.mw))\n\t\tret_string.append(\"Formula: %s\" % (self.chem_form))\n\t\tret_string.append (\"\")\n\t\tfor f in self.ms2:\n\t\t\tret_string.append(\"%s\\t%s\\t%s\" % (f[0],f[1],f[2]))\n\t\tret_string.append(\"\")\n\t\tret_string.append(\"================\")\n\n\t\treturn(\"\\n\".join(ret_string))\n\nclass LipidBLAST():\n\tdef __init__(self,\n\t\t\t\t f_names=[\"LipidBlast-pos.msp\",\"LipidBlast-neg.msp\"],\n\t\t\t\t min_acyl_length=10,\n\t\t\t\t exclude_lyso=False,\n\t\t\t\t include_ions=[\"[M-H]-\"], #,\"[M+]\",\"[M+H]+\",\"[M+NH4]+\",\"[M-H]-\",\"[M-2H](2-)\",\"[M-Ac-H]-\",\"[M+Na2-H]+\",\"[M+]\",\"[M+NH4]+\",\"[M+Na]+\",\"[M-2H](2-)\",\"[M-Ac-H]-\" \"[M+]\",\"[M+H]+\",\"[M+NH4]+\",\"[M-H]-\",\"[M-2H](2-)\",\"[M-Ac-H]-\",\"[M+Na2-H]+\",\"[M+]\",\"[M+NH4]+\",\"[M+Na]+\",\"[M-2H](2-)\",\"[M-Ac-H]-\"\n\t\t\t\t include_class=[\"PE\",\"GPSer\",\"GPCho\",\"PC\",\"GPA\",\"PE\",\"GPIns\",\"GPEtn\",\"GPGro\"], #,\"SM\",\"TG\",\"CL\", #,\"SM\",\"TG\",\"CL\",\"GPSer\",\"GPCho\",\"PC\",\"GPA\",\"PE\",\"GPIns\",\"GPEtn\",\"GPGro\n\t\t\t\t aggregate_acyls=False,\n\t\t\t\t use_simplified_names=True,\n\t\t\t\t dalt_diff_lookup_bin=1):\n\n\t\tself.f_names = f_names\n\t\tself.min_acyl_length = min_acyl_length\n\t\tself.exclude_lyso = exclude_lyso\n\t\tself.include_ions = include_ions\n\t\tself.include_class = include_class\n\t\tself.use_simplified_names = use_simplified_names\n\t\tself.dalt_diff_lookup_bin = dalt_diff_lookup_bin\n\t\tself.aggregate_acyls = aggregate_acyls\n\t\t\n\t\tself.lpb_dict = {}\n\t\tself.ms1_dict = {}\n\t\tself.ms1_dict_lookup = {}\n\n\t\tself.tot_entr_read = 0\n\n\t\tif len(self.f_names) > 0:\n\t\t\tfor f_name in f_names:\n\t\t\t\tself.read_lpb(f_name)\n\n\tdef __str__(self):\n\t\tret_string = []\n\n\t\tret_string.append(\"Filenames: %s\" % (self.f_names))\n\t\tret_string.append(\"Min acyl length: %s\" % (self.min_acyl_length))\n\t\tret_string.append(\"Exclude lyso: %s\" % (self.exclude_lyso))\n\t\tret_string.append(\"Include ions: %s\" % (self.include_ions))\n\t\tret_string.append(\"Include lipid classes: %s\" % (self.include_class))\n\t\tret_string.append(\"Use simplified names: %s\" % (self.use_simplified_names))\n\t\tret_string.append(\"Lookup diff: %s Da\" % (self.dalt_diff_lookup_bin))\n\t\tret_string.append(\"Total entries read: %s\" % (self.tot_entr_read))\n\t\t\n\t\treturn(\"\\n\".join(ret_string))\n\n\tdef read_lpb(self,f_name):\n\t\tdef _get_general_info(name):\n\t\t\t# Currently limited to max 9 unsats\n\t\t\tunsats = [n[0] for n in name.split(\":\")[1:]]\n\t\t\tclass_name = name.split(\"(\")[0]\n\t\t\tif \"-\" in class_name:\n\t\t\t\tname_split = name.split(\"(\")\n\t\t\t\tname_split[0] = name.split(\"(\")[0].replace(\"-\",\"\")\n\t\t\t\tname = \"(\".join(name_split)\n\n\t\t\tacyl_lengths = name.split(\":\")\n\t\t\tacyl_lengths.pop()\n\t\t\tf_acyl_lengths = []\n\t\t\tfor acl in acyl_lengths:\n\t\t\t\ttry:\n\t\t\t\t\tif \"/\" in acl:\n\t\t\t\t\t\tf_acyl_lengths.append(acl.split(\"/\")[1].replace(\"d\",\"\").replace(\"methyl-\",\"\"))\n\t\t\t\t\telif \"-\" in acl:\n\t\t\t\t\t\tf_acyl_lengths.append(acl.split(\"-\")[1].replace(\"d\",\"\").replace(\"methyl-\",\"\"))\n\t\t\t\t\telse:\n\t\t\t\t\t\tf_acyl_lengths.append(acl.split(\"(\")[1].replace(\"d\",\"\").replace(\"methyl-\",\"\"))\n\t\t\t\texcept:\n\t\t\t\t\tlogging.warning(\"Could not format to get acyl lengths: %s\" % (name))\n\t\t\t\t\treturn([0],[0],\"\")\t\t\t\n\t\t\ttry:\n\t\t\t\tf_acyl_lengths = list(map(int,f_acyl_lengths))\n\t\t\t\tunsats = list(map(int,unsats))\n\t\t\texcept:\n\t\t\t\tlogging.warning(\"Could not format to get acyl lengths: %s\" % (name))\n\t\t\t\treturn([0],[0],\"\")\n\t\t\t\t\n\t\t\treturn(f_acyl_lengths,unsats,class_name)\n\n\t\tdef _simplify_name(class_name,acyls,unsats):\n\t\t\tsimplified_name = \"\"\n\t\t\tsimplified_name += class_name\n\t\t\tsimplified_name += \"(\"\n\t\t\tif not self.aggregate_acyls:\n\t\t\t\tfor f,u in zip(f_acyl_lengths,unsats):\n\t\t\t\t\tsimplified_name += str(f)\n\t\t\t\t\tsimplified_name += \":\"\n\t\t\t\t\tsimplified_name += str(u)\n\t\t\t\t\tsimplified_name += \"/\"\n\t\t\t\tsimplified_name = simplified_name[:-1] \n\t\t\telse:\n\t\t\t\tsimplified_name += str(sum(f_acyl_lengths))\n\t\t\t\tsimplified_name += \":\"\n\t\t\t\tsimplified_name += str(sum(unsats))\n\t\t\t\n\t\t\tsimplified_name += \")\"\n\t\t\treturn(simplified_name)\n\n\t\tdef _get_chem_form(chem_form_native,ion):\n\t\t\tchem_form_ion = \"\"\n\t\t\tfor i,c in enumerate(chem_form_native):\n\t\t\t\tif i+1 >= len(chem_form_native):\n\t\t\t\t\tif c.isdigit(): chem_form_ion += c\n\t\t\t\t\telse: \n\t\t\t\t\t\tchem_form_ion += c\n\t\t\t\t\t\tchem_form_ion += \"1\"\n\t\t\t\telif c.isdigit(): chem_form_ion += c\n\t\t\t\telif c.isupper() and chem_form_native[i+1].isdigit(): chem_form_ion += c\n\t\t\t\telif c.isupper() and chem_form_native[i+1].isupper(): \n\t\t\t\t\tchem_form_ion += c\n\t\t\t\t\tchem_form_ion += \"1\"\n\t\t\t\telif chem_form_native[i+1].isdigit(): chem_form_ion += c\n\t\t\tlist_chem= [''.join(g) for _, g in groupby(chem_form_ion, str.isalpha)]\n\t\t\tchem_form_ion = dict(zip(list_chem[::2],map(int,list_chem[1::2])))\n\n\t\t\tif \"+\" not in ion:\n\t\t\t\tif \"[M-H]-\" in ion:\n\t\t\t\t\ttry: chem_form_ion[\"H\"] -= 1\n\t\t\t\t\texcept KeyError: logging.critical(\"ERROR: could not subtract atom when getting the ionized form from the molecule\")\n\t\t\t\tif \"[M-2H](2-)\" in ion:\n\t\t\t\t\ttry: chem_form_ion[\"H\"] -= 2\n\t\t\t\t\texcept KeyError: logging.critical(\"ERROR: could not subtract atom when getting the ionized form from the molecule\")\n\t\t\t\tif \"[M-Ac-H]-\" in ion:\n\t\t\t\t\ttry: \n\t\t\t\t\t\tchem_form_ion[\"C\"] += 2\n\t\t\t\t\t\tchem_form_ion[\"H\"] += 3\n\t\t\t\t\t\tchem_form_ion[\"O\"] += 2\n\t\t\t\t\texcept KeyError: logging.critical(\"ERROR: could not subtract atom when getting the ionized form from the molecule\")\n\t\t\telse:\n\t\t\t\tif \"[M+H]+\" in ion:\n\t\t\t\t\ttry: chem_form_ion[\"H\"] += 1\n\t\t\t\t\texcept KeyError: logging.critical(\"ERROR: could not add atom when getting the ionized form from the molecule\")\n\t\t\t\tif \"[M+NH4]+\" in ion:\n\t\t\t\t\ttry: \n\t\t\t\t\t\tif chem_form_ion.has_key(\"N\"): chem_form_ion[\"N\"] += 1\n\t\t\t\t\t\telse: chem_form_ion[\"N\"] = 1\n\t\t\t\t\t\tchem_form_ion[\"H\"] += 4\n\t\t\t\t\texcept KeyError: logging.critical(\"ERROR: could not add atom when getting the ionized form from the molecule\")\n\t\t\t\tif \"[M+Na]+\" in ion:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tif chem_form_ion.has_key(\"Na\"): chem_form_ion[\"Na\"] += 1\n\t\t\t\t\t\telse: chem_form_ion[\"Na\"] = 1\n\t\t\t\t\texcept KeyError: logging.critical(\"ERROR: could not add atom when getting the ionized form from the molecule\")\n\t\t\t\tif \"[M+Na2-H]+\" in ion:\n\t\t\t\t\ttry: \n\t\t\t\t\t\tif chem_form_ion.has_key(\"Na\"): chem_form_ion[\"Na\"] += 2\n\t\t\t\t\t\telse: chem_form_ion[\"Na\"] = 2\n\t\t\t\t\t\tchem_form_ion[\"H\"] -= 1\n\t\t\t\t\texcept KeyError: logging.critical(\"ERROR: could not add atom when getting the ionized form from the molecule\")\n\n\t\t\treturn(\"\".join([atom+str(num_atom) for atom,num_atom in sorted(chem_form_ion.items())]))\n\n\t\twith open(f_name) as infile:\n\t\t\tfragments = []\n\t\t\tpre_c_mass = 0.0\n\t\t\tname = \"\"\n\t\t\tion = \"\"\n\t\t\tfor line in infile:\n\t\t\t\tline = line.strip()\n\t\t\t\t#print(line)\n\t\t\t\tif len(line) == 0:\t\t\n\t\t\t\t\tf_acyl_lengths,unsats,class_name = _get_general_info(name)\t\t\t\n\t\t\t\t\tf_acyl_lengths_error = [a for a in f_acyl_lengths if a < self.min_acyl_length and a != 0]\n\t\t\t\t\t\n\t\t\t\t\tif (len(class_name) == 0) or \\\n\t\t\t\t\t\t(ion_type not in self.include_ions) or \\\n\t\t\t\t\t\t(len([c for c in self.include_class if c in name]) == 0) or \\\n\t\t\t\t\t\t(self.exclude_lyso and \"/0:0\" in name) or \\\n\t\t\t\t\t\t(len(f_acyl_lengths_error) > 0):\n\n\t\t\t\t\t\tfragments = []\n\t\t\t\t\t\tpre_c_mass = 0.0\n\t\t\t\t\t\tname = \"\"\n\t\t\t\t\t\tion_type = \"\"\n\t\t\t\t\t\tcontinue\n\t\t\t\n\t\t\t\t\tsimplified_name = _simplify_name(class_name,f_acyl_lengths,unsats)\n\n\t\t\t\t\tnew_entry = LipidBLAST_entry(name=name,\n\t\t\t\t\t\t\t\t\t\t\t\t ion=ion_type,\n\t\t\t\t\t\t\t\t\t\t\t\t mw=pre_c_mass,\n\t\t\t\t\t\t\t\t\t\t\t\t chem_form=chem_form_ion,\n\t\t\t\t\t\t\t\t\t\t\t\t num_ms2_peaks=num_peaks,\n\t\t\t\t\t\t\t\t\t\t\t\t ms2=fragments,\n\t\t\t\t\t\t\t\t\t\t\t\t f_acyl_lengths=f_acyl_lengths,\n\t\t\t\t\t\t\t\t\t\t\t\t unsats=unsats)\n\t\t\t\t\t\n\t\t\t\t\tself.lpb_dict[\"%s|%s\" % (simplified_name,ion_type)] = new_entry\n\n\t\t\t\t\tloc_dict = int(pre_c_mass) - int(pre_c_mass) % self.dalt_diff_lookup_bin\n\n\t\t\t\t\tif loc_dict in self.ms1_dict_lookup.keys():\n\t\t\t\t\t\tself.ms1_dict_lookup[loc_dict][\"%s|%s\" % (simplified_name,ion_type)] = new_entry\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.ms1_dict_lookup[loc_dict] = {}\n\t\t\t\t\t\tself.ms1_dict_lookup[loc_dict][\"%s|%s\" % (simplified_name,ion_type)] = new_entry\n\n\t\t\t\t\tself.tot_entr_read += 1\n\n\t\t\t\t\tfragments = []\n\t\t\t\t\tpre_c_mass = 0.0\n\t\t\t\t\tname = \"\"\n\t\t\t\t\tion_type = \"\"\n\n\t\t\t\telif \":\" in line:\n\t\t\t\t\tif line.startswith(\"PRECURSORMZ\"):\n\t\t\t\t\t\tpre_c_mass = float(line.split(\": \")[1])\n\t\t\t\t\tif line.startswith(\"Name: \"):\n\t\t\t\t\t\tname = line.split(\"; \")[-1]\n\t\t\t\t\t\tion_type = line.split(\"; \")[1]\n\t\t\t\t\tif line.startswith(\"Comment: \"):\n\t\t\t\t\t\t# Some of the chemical formulas contain a \";\" at the end; remove\n\t\t\t\t\t\tchem_form_native = line.split(\"; \")[-1].replace(\";\",\"\")\n\t\t\t\t\t\t#print(chem_form_native)\n\t\t\t\t\t\tchem_form_ion = _get_chem_form(chem_form_native,ion_type)\n\t\t\t\t\tif line.startswith(\"Num Peaks:\"):\n\t\t\t\t\t\tnum_peaks = int(line.split(\": \")[-1])\n\t\t\t\telse:\n\t\t\t\t\tif line==\"\\x1a\": #EOF\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tfragments.append([float(line.split(\" \")[0]),float(line.split(\" \")[1]),line.split(\" \")[2].replace(\"\\\"\",\"\")])\n\nclass PrecursorFilter():\n\tdef __init__(self,db,ppm=10):\n\t\tself.db = db\n\t\tself.ppm = ppm\n\n\tdef retrieve_entry_pre_c_mass(self,pre_c_mass):\n\t\tmass_error_threshold = (pre_c_mass*self.ppm)/1000000\n\n\t\tret_entries = []\n\n\t\tloc_dict = int(pre_c_mass) - int(pre_c_mass) % self.db.dalt_diff_lookup_bin\n\t\tloc_dict_lower = (int(pre_c_mass-mass_error_threshold)) - (int(pre_c_mass-mass_error_threshold)) % self.db.dalt_diff_lookup_bin\n\t\tloc_dict_upper = (int(pre_c_mass+mass_error_threshold)) - (int(pre_c_mass+mass_error_threshold)) % self.db.dalt_diff_lookup_bin\n\n\t\t# TODO set does not have to be list\n\t\tlocs_to_search = list(set([loc_dict,loc_dict_lower,loc_dict_upper]))\n\t\tfor loc in locs_to_search:\n\t\t\ttry:\n\t\t\t\tfor name,entr in self.db.ms1_dict_lookup[loc].items():\n\t\t\t\t\tmass_error = abs(entr.mw-pre_c_mass)\n\t\t\t\t\tif mass_error < mass_error_threshold:\n\t\t\t\t\t\tret_entries.append([name,mass_error,entr])\n\t\t\texcept KeyError:\n\t\t\t\tlogging.warning(\"Could not find an entry in the database for prec mass: %s\" % (pre_c_mass))\n\t\t\t\tcontinue\n\t\treturn(ret_entries)\n\nif __name__ == \"__main__\":\n\tlogging.basicConfig(filename=\"prec_filter.log\",\n\t\t\t\t\t\tlevel=logging.DEBUG,\n\t\t\t\t\t\tfilemode=\"w\",\n\t\t\t\t\t\tformat=\"%(levelname)s:%(created)f:%(asctime)s:%(message)s\")\n\n\tlogging.info(\"Reading the LPB database ...\")\n\tlpb = LipidBLAST()\n\tlogging.info(\"Done reading the LPB database ...\")\n\tlogging.info(lpb)\n\t\n\tstep_three_df = pd.read_csv(\"stepone_new.csv\")\n\tprecf = Precursor_filter(lpb)\n\t\n\tprec_filt_result = []\n\tfor index,row in step_three_df.iterrows():\n\t\tif (index % 10000==0):\n\t\t\tlogging.info(\"Analyzing row number and m/z: %s - %s\" % (index,row[\"mz\"]))\n\t\tprec_hits = precf.retrieve_entry_pre_c_mass(row[\"mz\"])\n\t\tfor hit in prec_hits:\n\t\t\tprec_filt_result.append([row[\"mz\"],hit[2].mw,hit[1],hit[0].split(\"|\")[0],hit[2].chem_form,hit[0].split(\"|\")[1]])\n\t\n\tprec_filt_result = pd.DataFrame(prec_filt_result)\n\tprec_filt_result.columns = [\"Input Mass\",\"Matched Mass\",\"Delta\",\"Abbreviation\",\"Formula\",\"Ion\"]\n\tprec_filt_result.to_excel(\"batch_results.xlsx\",index=False)\n"
] |
[
[
"pandas.DataFrame",
"pandas.read_csv"
]
] |
MathieuTuli/transformers
|
[
"da3db8ba7a18deed492808b0d6c5d29669241fa0"
] |
[
"src/transformers/adas.py"
] |
[
"\"\"\"\n\"\"\"\nfrom __future__ import division\nfrom torch.optim.optimizer import Optimizer, required\n\nimport numpy as np\nimport torch\n\nfrom typing import NamedTuple, List\nfrom dataclasses import dataclass\nfrom enum import Enum\nfrom typing import Union, Tuple\n# from scipy.sparse.linalg import svds\nfrom scipy.optimize import minimize_scalar\n\n\nclass LayerType(Enum):\n CONV = 1\n FC = 2\n NON_CONV = 3\n\n\n@dataclass\nclass LayerMetrics:\n rank: float\n KG: float\n condition: float\n\n\n@dataclass\nclass ConvLayerMetrics:\n input_channel: LayerMetrics\n output_channel: LayerMetrics\n\n\nclass LRMetrics(NamedTuple):\n rank_velocity: List[float]\n r_conv: List[float]\n\n\ndef EVBMF(Y, sigma2=None, H=None):\n \"\"\"Implementation of the analytical solution to Empirical Variational\n Bayes Matrix Factorization.\n\n This function can be used to calculate the analytical solution to\n empirical VBMF.\n This is based on the paper and MatLab code by Nakajima et al.:\n \"Global analytic solution of fully-observed variational Bayesian matrix\n factorization.\"\n\n Notes\n -----\n If sigma2 is unspecified, it is estimated by minimizing the free\n energy.\n If H is unspecified, it is set to the smallest of the sides of the\n input Y.\n\n Attributes\n ----------\n Y : numpy-array\n Input matrix that is to be factorized. Y has shape (L,M), where L<=M.\n\n sigma2 : int or None (default=None)\n Variance of the noise on Y.\n\n H : int or None (default = None)\n Maximum rank of the factorized matrices.\n\n Returns\n -------\n U : numpy-array\n Left-singular vectors.\n\n S : numpy-array\n Diagonal matrix of singular values.\n\n V : numpy-array\n Right-singular vectors.\n\n post : dictionary\n Dictionary containing the computed posterior values.\n\n\n References\n ----------\n .. [1] Nakajima, Shinichi, et al. \"Global analytic solution of\n fully-observed variational Bayesian matrix factorization.\" Journal of\n Machine Learning Research 14.Jan (2013): 1-37.\n\n .. [2] Nakajima, Shinichi, et al. \"Perfect dimensionality recovery by\n variational Bayesian PCA.\" Advances in Neural Information Processing\n Systems. 2012.\n \"\"\"\n L, M = Y.shape # has to be L<=M\n\n if H is None:\n H = L\n\n alpha = L / M\n tauubar = 2.5129 * np.sqrt(alpha)\n\n # SVD of the input matrix, max rank of H\n # U, s, V = np.linalg.svd(Y)\n U, s, V = torch.svd(Y)\n U = U[:, :H]\n s = s[:H]\n V = V[:H].T\n\n # Calculate residual\n residual = 0.\n if H < L:\n # residual = np.sum(np.sum(Y**2)-np.sum(s**2))\n residual = torch.sum(np.sum(Y**2) - np.sum(s**2))\n\n # Estimation of the variance when sigma2 is unspecified\n if sigma2 is None:\n xubar = (1 + tauubar) * (1 + alpha / tauubar)\n eH_ub = int(np.min([np.ceil(L / (1 + alpha)) - 1, H])) - 1\n # upper_bound = (np.sum(s**2)+residual)/(L*M)\n # lower_bound = np.max(\n # [s[eH_ub+1]**2/(M*xubar), np.mean(s[eH_ub+1:]**2)/M])\n upper_bound = (torch.sum(s**2) + residual) / (L * M)\n lower_bound = torch.max(torch.stack(\n [s[eH_ub + 1]**2 / (M * xubar), torch.mean(s[eH_ub + 1:]**2) / M], dim=0))\n\n scale = 1. # /lower_bound\n s = s * np.sqrt(scale)\n residual = residual * scale\n lower_bound = lower_bound * scale\n upper_bound = upper_bound * scale\n\n sigma2_opt = minimize_scalar(\n EVBsigma2, args=(L, M, s.cpu().numpy(), residual, xubar),\n bounds=[lower_bound.cpu().numpy(), upper_bound.cpu().numpy()],\n method='Bounded')\n sigma2 = sigma2_opt.x\n\n # Threshold gamma term\n threshold = np.sqrt(M * sigma2 * (1 + tauubar) * (1 + alpha / tauubar))\n # pos = np.sum(s > threshold)\n pos = torch.sum(s > threshold)\n\n # Formula (15) from [2]\n # d = torch.multiply(s[:pos]/2,\n # 1-torch.divide(\n # torch.tensor((L+M)*sigma2, device=s.device),\n # s[:pos]**2) + torch.sqrt((1-torch.divide(\n # torch.tensor(\n # (L+M)*sigma2, device=s.device),\n # s[:pos]**2))**2 -\n # 4*L*M*sigma2**2/s[:pos]**4))\n # d = np.multiply(s[:pos]/2, 1-np.divide((L+M)*sigma2, s[:pos]**2) + np.sqrt(\n # (1-np.divide((L+M)*sigma2, s[:pos]**2))**2 - 4*L*M*sigma2**2/s[:pos]**4))\n d = (s[:pos] / 2) * (1 - (L + M) * sigma2 / s[:pos]**2\n + torch.sqrt((1 -\n (L + M) * sigma2 / s[:pos]**2)**2 - 4 * L * M * sigma2**2 / s[:pos]**4))\n\n # Computation of the posterior\n # post = {}\n # post['ma'] = np.zeros(H)\n # post['mb'] = np.zeros(H)\n # post['sa2'] = np.zeros(H)\n # post['sb2'] = np.zeros(H)\n # post['cacb'] = np.zeros(H)\n\n # tau = np.multiply(d, s[:pos])/(M*sigma2)\n # delta = np.multiply(np.sqrt(np.divide(M*d, L*s[:pos])), 1+alpha/tau)\n\n # post['ma'][:pos] = np.sqrt(np.multiply(d, delta))\n # post['mb'][:pos] = np.sqrt(np.divide(d, delta))\n # post['sa2'][:pos] = np.divide(sigma2*delta, s[:pos])\n # post['sb2'][:pos] = np.divide(sigma2, np.multiply(delta, s[:pos]))\n # post['cacb'][:pos] = np.sqrt(np.multiply(d, s[:pos])/(L*M))\n # post['sigma2'] = sigma2\n # post['F'] = 0.5*(L*M*np.log(2*np.pi*sigma2) +\n # (residual+np.sum(s**2))/sigma2 + np.sum(\n # M*np.log(tau+1) + L*np.log(tau/alpha + 1) - M*tau))\n\n return U[:, :pos], torch.diag(d), V[:, :pos] # , post\n\n\ndef EVBsigma2(sigma2, L, M, s, residual, xubar):\n H = len(s)\n\n alpha = L / M\n x = s**2 / (M * sigma2)\n\n z1 = x[x > xubar]\n z2 = x[x <= xubar]\n tau_z1 = tau(z1, alpha)\n\n term1 = np.sum(z2 - np.log(z2))\n term2 = np.sum(z1 - tau_z1)\n term3 = np.sum(np.log(np.divide(tau_z1 + 1, z1)))\n term4 = alpha * np.sum(np.log(tau_z1 / alpha + 1))\n\n obj = term1 + term2 + term3 + term4 + residual / (M * sigma2) + (L - H) * np.log(sigma2)\n\n return obj\n\n\ndef phi0(x):\n return x - np.log(x)\n\n\ndef phi1(x, alpha):\n return np.log(tau(x, alpha) + 1) + alpha * np.log(tau(x, alpha) / alpha + 1\n ) - tau(x, alpha)\n\n\ndef tau(x, alpha):\n return 0.5 * (x - (1 + alpha) + np.sqrt((x - (1 + alpha))**2 - 4 * alpha))\n\n\nclass Metrics:\n def __init__(self, params, linear: bool = False) -> None:\n '''\n parameters: list of torch.nn.Module.parameters()\n '''\n self.params = params\n self.history = list()\n mask = list()\n for param_idx, param in enumerate(params):\n param_shape = param.shape\n if not linear:\n if len(param_shape) != 4:\n mask.append(param_idx)\n else:\n if len(param_shape) != 4 and len(param_shape) != 2:\n mask.append(param_idx)\n self.mask = set(mask)\n\n def compute_low_rank(self,\n tensor: torch.Tensor,\n normalizer: float) -> torch.Tensor:\n if tensor.requires_grad:\n tensor = tensor.detach()\n try:\n tensor_size = tensor.shape\n if tensor_size[0] > tensor_size[1]:\n tensor = tensor.T\n U_approx, S_approx, V_approx = EVBMF(tensor)\n except RuntimeError:\n return None, None, None\n rank = S_approx.shape[0] / tensor_size[0] # normalizer\n low_rank_eigen = torch.diag(S_approx).data.cpu().numpy()\n if len(low_rank_eigen) != 0:\n condition = low_rank_eigen[0] / low_rank_eigen[-1]\n sum_low_rank_eigen = low_rank_eigen / \\\n max(low_rank_eigen)\n sum_low_rank_eigen = np.sum(sum_low_rank_eigen)\n else:\n condition = 0\n sum_low_rank_eigen = 0\n KG = sum_low_rank_eigen / tensor_size[0] # normalizer\n return rank, KG, condition\n\n def KG(self, epoch: int) -> np.ndarray:\n KG_list = list()\n for i, (index, metric) in enumerate(self.history[epoch]):\n if isinstance(metric, ConvLayerMetrics):\n KG_list.append((metric.input_channel.KG\n + metric.output_channel.KG) / 2)\n elif isinstance(metric, LayerMetrics):\n KG_list.append(metric.KG)\n return np.array(KG_list)\n\n def __call__(self) -> List[Tuple[int, Union[LayerMetrics,\n ConvLayerMetrics]]]:\n '''\n Computes the knowledge gain (S) and mapping condition (condition)\n '''\n metrics: List[Tuple[int, Union[LayerMetrics,\n ConvLayerMetrics]]] = list()\n for layer_index, layer in enumerate(self.params):\n if layer_index in self.mask:\n metrics.append((layer_index, None))\n continue\n # if np.less(np.prod(layer.shape), 10_000):\n # metrics.append((layer_index, None))\n if len(layer.shape) == 4:\n layer_tensor = layer.data\n tensor_size = layer_tensor.shape\n mode_3_unfold = layer_tensor.permute(1, 0, 2, 3)\n mode_3_unfold = torch.reshape(\n mode_3_unfold, [tensor_size[1], tensor_size[0]\n * tensor_size[2] * tensor_size[3]])\n mode_4_unfold = layer_tensor\n mode_4_unfold = torch.reshape(\n mode_4_unfold, [tensor_size[0], tensor_size[1]\n * tensor_size[2] * tensor_size[3]])\n in_rank, in_KG, in_condition = self.compute_low_rank(\n mode_3_unfold, tensor_size[1])\n if in_rank is None and in_KG is None and in_condition is None:\n if len(self.history) > 0:\n in_rank = self.history[-1][\n layer_index][1].input_channel.rank\n in_KG = self.history[-1][\n layer_index][1].input_channel.KG\n in_condition = self.history[-1][\n layer_index][1].input_channel.condition\n else:\n in_rank = in_KG = in_condition = 0.\n out_rank, out_KG, out_condition = self.compute_low_rank(\n mode_4_unfold, tensor_size[0])\n if out_rank is None and out_KG is None and out_condition is None:\n if len(self.history) > 0:\n out_rank = self.history[-1][\n layer_index][1].output_channel.rank\n out_KG = self.history[-1][\n layer_index][1].output_channel.KG\n out_condition = self.history[-1][\n layer_index][1].output_channel.condition\n else:\n out_rank = out_KG = out_condition = 0.\n metrics.append((layer_index, ConvLayerMetrics(\n input_channel=LayerMetrics(\n rank=in_rank,\n KG=in_KG,\n condition=in_condition),\n output_channel=LayerMetrics(\n rank=out_rank,\n KG=out_KG,\n condition=out_condition))))\n elif len(layer.shape) == 2:\n rank, KG, condition = self.compute_low_rank(\n layer, layer.shape[0])\n if rank is None and KG is None and condition is None:\n if len(self.history) > 0:\n rank = self.history[-1][layer_index][1].rank\n KG = self.history[-1][layer_index][1].KG\n condition = self.history[-1][layer_index][1].condition\n else:\n rank = KG = condition = 0.\n metrics.append((layer_index, LayerMetrics(\n rank=rank,\n KG=KG,\n condition=condition)))\n else:\n metrics.append((layer_index, None))\n self.history.append(metrics)\n return metrics\n\n\nclass Adas(Optimizer):\n \"\"\"\n Vectorized SGD from torch.optim.SGD\n \"\"\"\n\n def __init__(self,\n params,\n lr: float = required,\n beta: float = 0.8,\n step_size: int = None,\n linear: bool = True,\n gamma: float = 1,\n momentum: float = 0,\n dampening: float = 0,\n weight_decay: float = 0,\n nesterov: bool = False):\n if lr is not required and lr < 0.0:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if momentum < 0.0:\n raise ValueError(\"Invalid momentum value: {}\".format(momentum))\n if weight_decay < 0.0:\n raise ValueError(\n \"Invalid weight_decay value: {}\".format(weight_decay))\n\n defaults = dict(lr=lr, momentum=momentum, dampening=dampening,\n weight_decay=weight_decay, nesterov=nesterov)\n if nesterov and (momentum <= 0 or dampening != 0):\n raise ValueError(\n \"Nesterov momentum requires a momentum and zero dampening\")\n super(Adas, self).__init__(params[:2], defaults)\n\n # Adas Specific stuff (not SGD)\n if np.less(beta, 0) or np.greater_equal(beta, 1):\n raise ValueError(f'Invalid beta: {beta}')\n if np.less(gamma, 0):\n raise ValueError(f'Invalid gamma: {gamma}')\n if step_size is not None:\n if np.less_equal(step_size, 0):\n raise ValueError(f'Invalid step_size: {step_size}')\n self.step_size = step_size\n self.gamma = gamma\n self.beta = beta\n self.metrics = metrics = Metrics(params=params[2][\"all_params\"], linear=linear)\n self.lr_vector = np.repeat(a=lr, repeats=len(metrics.params))\n self.velocity = np.zeros(\n len(self.metrics.params) - len(self.metrics.mask))\n self.not_ready = list(range(len(self.velocity)))\n self.init_lr = lr\n self.zeta = 1.\n self.KG = 0.\n\n def __setstate__(self, state):\n super(Adas, self).__setstate__(state)\n for group in self.param_groups:\n group.setdefault('nesterov', False)\n\n def epoch_step(self, epoch: int) -> None:\n self.metrics()\n if epoch == 0:\n velocity = self.init_lr * np.ones(len(self.velocity))\n self.KG = self.metrics.KG(epoch)\n else:\n KG = self.metrics.KG(epoch)\n velocity = KG - self.KG\n self.KG = KG\n for idx in self.not_ready:\n if np.isclose(KG[idx], 0.):\n velocity[idx] = self.init_lr - \\\n self.beta * self.velocity[idx]\n else:\n self.not_ready.remove(idx)\n\n if self.step_size is not None:\n if epoch % self.step_size == 0 and epoch > 0:\n self.lr_vector *= self.gamma\n self.zeta *= self.gamma\n\n self.velocity = np.maximum(\n self.beta * self.velocity + self.zeta * velocity, 0.)\n count = 0\n for i in range(len(self.metrics.params)):\n if i in self.metrics.mask:\n self.lr_vector[i] = self.lr_vector[i - (1 if i > 0 else 0)]\n else:\n self.lr_vector[i] = self.velocity[count]\n count += 1\n\n def step(self, closure: callable = None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n iteration_group = 0\n for group in self.param_groups:\n iteration_group += 1\n weight_decay = group['weight_decay']\n momentum = group['momentum']\n dampening = group['dampening']\n nesterov = group['nesterov']\n\n for p_index, p in enumerate(group['params']):\n if p.grad is None:\n continue\n d_p = p.grad.data\n if weight_decay != 0:\n d_p.add_(p.data, alpha=weight_decay)\n if momentum != 0:\n param_state = self.state[p]\n if 'momentum_buffer' not in param_state:\n buf = param_state['momentum_buffer'] = torch.clone(\n d_p).detach()\n else:\n buf = param_state['momentum_buffer']\n buf.mul_(momentum).add_(d_p, alpha=1 - dampening)\n if nesterov:\n d_p = d_p.add(momentum, buf)\n else:\n d_p = buf\n\n # p.data.add_(-group['lr'], d_p)\n p.data.add_(d_p, alpha=-self.lr_vector[p_index])\n\n return loss\n"
] |
[
[
"numpy.divide",
"torch.reshape",
"numpy.array",
"numpy.less",
"torch.sqrt",
"numpy.isclose",
"numpy.log",
"numpy.ceil",
"numpy.sum",
"torch.svd",
"numpy.greater_equal",
"numpy.less_equal",
"numpy.sqrt",
"numpy.maximum",
"torch.diag",
"torch.clone",
"torch.mean",
"torch.sum"
]
] |
mmachenry/pie-pie-chart
|
[
"d5706c85381b58a3990a20021f6c35c28ee51e0b"
] |
[
"pie_pie_chart.py"
] |
[
"import RPi.GPIO as GPIO\nimport hx711\nimport matplotlib.pyplot as plt\n \n# Read initial calibration and tare weight data then display the plot.\ndef main():\n GPIO.setmode(GPIO.BCM)\n hx = hx711.HX711(dout_pin=5, pd_sck_pin=6)\n zero_the_scale(hx)\n calibrate_scale(hx)\n (tare_weight, total_weight) = get_tare_and_full_weight(hx)\n plot_reading(hx, tare_weight, total_weight - tare_weight)\n\n# Set scale position to zero. The scale should be empty when this is run.\ndef zero_the_scale(hx):\n err = hx.zero()\n if err:\n raise ValueError('Tare is unsuccessful.')\n\n zero_reading = hx.get_raw_data_mean()\n if zero_reading:\n print('Data subtracted by offset: ', zero_reading)\n else:\n raise ValueError('Invalide zero reading')\n\n# Calibrate the scale with prompts to the user.\ndef calibrate_scale (hx):\n input('Put known weight on the scale and then press Enter')\n reading = hx.get_data_mean()\n if reading:\n print('Mean value from HX711 subtracted by offset:', reading)\n user_input = input('Write how many grams it was and press Enter: ')\n try:\n weight = float(user_input)\n print(weight, 'grams')\n except ValueError:\n print('Expected integer or float and I have got:', user_input)\n\n ratio = reading / weight\n hx.set_scale_ratio(ratio)\n print('Ratio is set.')\n else:\n raise ValueError('Cannot calculate mean value.')\n\n# Prompt user and get readings for the tare weight and full pie.\ndef get_tare_and_full_weight (hx):\n input('Put the pie tin on the scale for tare weight and press enter.')\n tare_weight = hx.get_weight_mean(20)\n print (\"Tare weight is \", tare_weight, \"g\")\n\n input('Put the pie on the scale for a full weight and press enter.')\n total_weight = hx.get_weight_mean(20)\n print (\"Full weight is \", total_weight, \"g\")\n\n return (tare_weight, total_weight)\n\n# Continually read data from the sensor, update the pie chart, and display.\ndef plot_reading (hx, tare_weight, full_weight):\n while True:\n current_weight = hx.get_weight_mean(20)\n remaining_weight = max(0,current_weight - tare_weight)\n #print (\"Current weight is \", current_weight, \"g\")\n\n labels = ['Remaining', 'Eaten']\n sizes = [remaining_weight, max(0,full_weight - remaining_weight)]\n colors = ['sandybrown', 'lightgrey']\n explode = (0, 0.1)\n \n title_font = { 'color': 'blue', 'weight': 'bold', 'size': 30 }\n label_font = { 'color': 'black', 'weight': 'normal', 'size': 20 }\n\n h = plt.pie(sizes, explode=explode, labels=labels, colors=colors,\n autopct='%1.1f%%', shadow=True, startangle=180,\n textprops=label_font)\n\n plt.title(\"Pi Day Pie Pie Chart\", title_font)\n\n plt.plot()\n plt.draw()\n plt.pause(1)\n plt.clf()\n\n\nif __name__ == \"__main__\":\n try:\n main()\n except (KeyboardInterrupt, SystemExit):\n print('Happy Pi Day!')\n\n finally:\n GPIO.cleanup()\n"
] |
[
[
"matplotlib.pyplot.pie",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.clf"
]
] |
jcchan23/SAIL
|
[
"878c59e9f1b4e6df3e2424c8213c1df25459e950",
"878c59e9f1b4e6df3e2424c8213c1df25459e950",
"878c59e9f1b4e6df3e2424c8213c1df25459e950"
] |
[
"Repeat/CoMPT/utils_node.py",
"Repeat/MTDSite/utils.py",
"Repeat/GraphSite/dataset.py"
] |
[
"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n'''\n@File : utils_node.py\n@Time : 2022/03/08 14:35:13\n@Author : Jianwen Chen\n@Version : 1.0\n@Contact : chenjw48@mail2.sysu.edu.cn\n@License : (C)Copyright 2021-2022, SAIL-Lab\n'''\n######################################## import area ########################################\n\n# common library\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn import metrics\n\nfrom torch.optim.lr_scheduler import _LRScheduler\n\n######################################## function area ########################################\n\ndef seed_everything(seed=2021):\n os.environ['PYTHONHASHSEED'] = str(seed)\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n\n\ndef initialize_weights(model):\n \"\"\"\n Initializes the weights of a model in place.\n\n :param model: An nn.Module.\n \"\"\"\n for param in model.parameters():\n if param.dim() > 1:\n nn.init.xavier_normal_(param)\n\n\ndef loop(data_loader, model, optimizer, scheduler, device):\n \n batch_size = data_loader.batch_size\n data_loader = tqdm(data_loader) if optimizer is not None else data_loader\n\n loss_sum, y_true, y_pred = 0.0, list(), list()\n \n for batch in data_loader:\n \n smiles, mols, batch_node_features, batch_edge_features, batch_distance_matrix, labels = batch\n # add mask\n batch_masks = torch.sum(torch.abs(batch_node_features), dim=-1) != 0\n \n # (batch, max_length, node_dim)\n batch_node_features = batch_node_features.to(device)\n # (batch, max_length, max_length, edge_dim)\n batch_edge_features = batch_edge_features.to(device)\n # (batch, max_length, max_length)\n batch_distance_matrix = batch_distance_matrix.to(device)\n # (batch, max_length)\n batch_masks = batch_masks.to(device)\n # (batch, max_length, 1)\n labels = labels.to(device)\n \n # (batch, max_length, 1)\n outputs = model(batch_node_features, batch_edge_features, batch_distance_matrix, batch_masks, device)\n \n # loss calculation\n loss = cal_loss(y_true=labels, y_pred=outputs, device=device)\n loss_sum += loss.item()\n \n if optimizer is not None:\n # clear gradients for this training step\n optimizer.zero_grad()\n # back propagation, compute gradients\n loss.backward()\n # apply gradients\n optimizer.step()\n \n # NormLR need step every batch\n if scheduler is not None:\n scheduler.step()\n \n # collect result\n labels = labels.detach().cpu().numpy()\n outputs = outputs.detach().cpu().numpy()\n \n y_true.append([])\n y_pred.append([])\n \n for label, output in zip(labels, outputs):\n label, output = label.flatten(), output.flatten()\n for l, o in zip(label, output):\n if l != 0.0:\n y_true[-1].append(l)\n y_pred[-1].append(o)\n \n # clear cuda cache\n torch.cuda.empty_cache()\n \n # metric calculation\n results = cal_metric(y_true=y_true, y_pred=y_pred)\n results['loss'] = loss_sum / (len(data_loader) * batch_size)\n \n return results\n\n\ndef cal_loss(y_true, y_pred, device):\n y_true, y_pred = y_true.flatten(), y_pred.flatten()\n y_mask = torch.where(y_true != 0.0, torch.full_like(y_true, 1), torch.full_like(y_true, 0))\n loss = torch.sum(torch.abs(y_true - y_pred) * y_mask) / torch.sum(y_mask)\n return loss\n\n\ndef cal_metric(y_true, y_pred):\n concatenate_true, concatenate_pred = np.concatenate(y_true, axis=-1), np.concatenate(y_pred, axis=-1)\n mae = metrics.mean_absolute_error(concatenate_true, concatenate_pred)\n r2 = metrics.r2_score(concatenate_true, concatenate_pred)\n return {'mae':mae, 'r2':r2}\n\n\nclass NoamLR(_LRScheduler):\n \"\"\"\n Noam learning rate scheduler with piecewise linear increase and exponential decay.\n\n The learning rate increases linearly from init_lr to max_lr over the course of\n the first warmup_steps (where warmup_steps = warmup_epochs * steps_per_epoch).\n Then the learning rate decreases exponentially from max_lr to final_lr over the\n course of the remaining total_steps - warmup_steps (where total_steps =\n total_epochs * steps_per_epoch). This is roughly based on the learning rate\n schedule from Attention is All You Need, section 5.3 (https://arxiv.org/abs/1706.03762).\n \"\"\"\n def __init__(self, optimizer, warmup_epochs, total_epochs, steps_per_epoch, init_lr, max_lr, final_lr):\n \"\"\"\n Initializes the learning rate scheduler.\n\n :param optimizer: A PyTorch optimizer.\n :param warmup_epochs: The number of epochs during which to linearly increase the learning rate.\n :param total_epochs: The total number of epochs.\n :param steps_per_epoch: The number of steps (batches) per epoch.\n :param init_lr: The initial learning rate.\n :param max_lr: The maximum learning rate (achieved after warmup_epochs).\n :param final_lr: The final learning rate (achieved after total_epochs).\n \"\"\"\n assert len(optimizer.param_groups) == len(warmup_epochs) == len(total_epochs) == len(init_lr) == len(max_lr) == len(final_lr)\n\n self.num_lrs = len(optimizer.param_groups)\n\n self.optimizer = optimizer\n self.warmup_epochs = np.array(warmup_epochs)\n self.total_epochs = np.array(total_epochs)\n self.steps_per_epoch = steps_per_epoch\n self.init_lr = np.array(init_lr)\n self.max_lr = np.array(max_lr)\n self.final_lr = np.array(final_lr)\n\n self.current_step = 0\n self.lr = init_lr\n self.warmup_steps = (self.warmup_epochs * self.steps_per_epoch).astype(int)\n self.total_steps = self.total_epochs * self.steps_per_epoch\n self.linear_increment = (self.max_lr - self.init_lr) / self.warmup_steps\n\n self.exponential_gamma = (self.final_lr / self.max_lr) ** (1 / (self.total_steps - self.warmup_steps))\n\n super(NoamLR, self).__init__(optimizer)\n\n def get_lr(self):\n \"\"\"Gets a list of the current learning rates.\"\"\"\n return list(self.lr)\n\n def step(self, current_step: int = None):\n \"\"\"\n Updates the learning rate by taking a step.\n\n :param current_step: Optionally specify what step to set the learning rate to.\n If None, current_step = self.current_step + 1.\n \"\"\"\n if current_step is not None:\n self.current_step = current_step\n else:\n self.current_step += 1\n\n for i in range(self.num_lrs):\n if self.current_step <= self.warmup_steps[i]:\n self.lr[i] = self.init_lr[i] + self.current_step * self.linear_increment[i]\n elif self.current_step <= self.total_steps[i]:\n self.lr[i] = self.max_lr[i] * (self.exponential_gamma[i] ** (self.current_step - self.warmup_steps[i]))\n else: # theoretically this case should never be reached since training should stop at total_steps\n self.lr[i] = self.final_lr[i]\n\n self.optimizer.param_groups[i]['lr'] = self.lr[i]\n\n",
"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n'''\n@File : utils.py\n@Time : 2022/01/24 11:12:56\n@Author : Jianwen Chen\n@Version : 1.0\n@Contact : chenjw48@mail2.sysu.edu.cn\n@License : (C)Copyright 2021-2022, SAIL-Lab\n'''\n######################################## import area ########################################\n\n# common library\nimport os\nimport random\nimport pickle\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn import metrics\nfrom torch.optim.lr_scheduler import _LRScheduler\n\n######################################## function area ########################################\n\ndef seed_everything(seed=2021):\n os.environ['PYTHONHASHSEED'] = str(seed)\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n\n\ndef initialize_weights(model):\n \"\"\"\n Initializes the weights of a model in place.\n :param model: An nn.Module.\n \"\"\"\n for param in model.parameters():\n if param.dim() > 1:\n nn.init.xavier_uniform_(param)\n\n\ndef loop(data_loader, model, optimizer, scheduler, device):\n batch_size = data_loader.batch_size\n data_loader = tqdm(data_loader) if optimizer is not None else data_loader\n \n loss_sum, y_true, y_pred = 0.0, list(), list()\n \n predictions = dict()\n \n for batch in data_loader:\n \n names, sequences, graphs, labels, masks = batch\n \n graphs = graphs.to(device)\n labels = labels.to(device)\n outputs = model(graphs, masks, device)\n \n # loss calculation\n # pad_sequence need cpu in model forward and need gpu in loss calculation\n masks = masks.to(device)\n loss = cal_loss(labels, outputs, masks)\n loss_sum += loss.data\n \n if optimizer is not None:\n # clear gradients for this training step\n optimizer.zero_grad()\n # back propagation, compute gradients\n loss.backward()\n # apply gradients\n optimizer.step()\n \n # NormLR needs step every batch\n if scheduler is not None:\n scheduler.step()\n \n # collect result\n labels = labels.detach().cpu().numpy()\n scores = torch.softmax(outputs, dim=1)\n scores = scores.detach().cpu().numpy()\n scores = scores[:, 1]\n for name, (idx, length) in zip(names, masks):\n y_true.append(labels[idx:idx+length].tolist())\n y_pred.append(scores[idx:idx+length].tolist())\n predictions[name] = scores[idx:idx+length].tolist()\n \n # clear cuda cache\n torch.cuda.empty_cache()\n\n # train with threshold = 0.5, test without using threshold\n if optimizer is not None:\n results = cal_metric(y_true, y_pred, best_threshold=0.5)\n results['loss'] = loss_sum / (len(data_loader) * batch_size)\n else:\n results = cal_metric(y_true, y_pred, best_threshold=None)\n\n return results, predictions\n\n\ndef cal_loss(y_true, y_pred, y_mask):\n # y_true.shape = [batch_num_nodes], y_pred.shape = [batch_num_nodes, 2], total_loss.shape = [batch_num_nodes]\n total_loss = nn.CrossEntropyLoss(reduction='none')(y_pred, y_true)\n loss = 0.0\n for idx, length in y_mask:\n loss = loss + torch.mean(total_loss[idx:idx+length])\n return loss\n\n\ndef cal_metric(y_true, y_pred, best_threshold=None):\n concatenate_true, concatenate_pred = np.concatenate(y_true, axis=-1), np.concatenate(y_pred, axis=-1)\n \n if best_threshold is None:\n best_f1, best_threshold = 0, 0\n for threshold in range(100):\n threshold /= 100\n binary_true = concatenate_true\n binary_pred = [1 if pred >= threshold else 0 for pred in concatenate_pred]\n f1 = metrics.f1_score(binary_true, binary_pred)\n if f1 > best_f1:\n best_f1, best_threshold = f1, threshold\n \n binary_true = concatenate_true\n binary_pred = [1 if pred >= best_threshold else 0 for pred in concatenate_pred]\n \n accuracy = metrics.accuracy_score(binary_true, binary_pred)\n auroc = metrics.roc_auc_score(binary_true, concatenate_pred)\n mcc = metrics.matthews_corrcoef(binary_true, binary_pred)\n \n TN, FP, FN, TP = metrics.confusion_matrix(binary_true, binary_pred).ravel()\n sensitive = TP / (TP + FN)\n specificity = TN / (FP + TN)\n precision = TP / (TP + FP)\n \n return {'accuracy': accuracy, 'auroc': auroc, 'mcc': mcc, 'sensitive': sensitive, 'specificity': specificity, 'precision': precision,'threshold': best_threshold}\n\n\nclass NoamLR(_LRScheduler):\n \"\"\"\n Noam learning rate scheduler with piecewise linear increase and exponential decay.\n The learning rate increases linearly from init_lr to max_lr over the course of\n the first warmup_steps (where warmup_steps = warmup_epochs * steps_per_epoch).\n Then the learning rate decreases exponentially from max_lr to final_lr over the\n course of the remaining total_steps - warmup_steps (where total_steps =\n total_epochs * steps_per_epoch). This is roughly based on the learning rate\n schedule from Attention is All You Need, section 5.3 (https://arxiv.org/abs/1706.03762).\n \"\"\"\n def __init__(self, optimizer, warmup_epochs, total_epochs, steps_per_epoch, init_lr, max_lr, final_lr):\n \"\"\"\n Initializes the learning rate scheduler.\n :param optimizer: A PyTorch optimizer.\n :param warmup_epochs: The number of epochs during which to linearly increase the learning rate.\n :param total_epochs: The total number of epochs.\n :param steps_per_epoch: The number of steps (batches) per epoch.\n :param init_lr: The initial learning rate.\n :param max_lr: The maximum learning rate (achieved after warmup_epochs).\n :param final_lr: The final learning rate (achieved after total_epochs).\n \"\"\"\n assert len(optimizer.param_groups) == len(warmup_epochs) == len(total_epochs) == len(init_lr) == len(max_lr) == len(final_lr)\n\n self.num_lrs = len(optimizer.param_groups)\n\n self.optimizer = optimizer\n self.warmup_epochs = np.array(warmup_epochs)\n self.total_epochs = np.array(total_epochs)\n self.steps_per_epoch = steps_per_epoch\n self.init_lr = np.array(init_lr)\n self.max_lr = np.array(max_lr)\n self.final_lr = np.array(final_lr)\n\n self.current_step = 0\n self.lr = init_lr\n self.warmup_steps = (self.warmup_epochs * self.steps_per_epoch).astype(int)\n self.total_steps = self.total_epochs * self.steps_per_epoch\n self.linear_increment = (self.max_lr - self.init_lr) / self.warmup_steps\n\n self.exponential_gamma = (self.final_lr / self.max_lr) ** (1 / (self.total_steps - self.warmup_steps))\n\n super(NoamLR, self).__init__(optimizer)\n\n def get_lr(self):\n \"\"\"Gets a list of the current learning rates.\"\"\"\n return list(self.lr)\n\n def step(self, current_step: int = None):\n \"\"\"\n Updates the learning rate by taking a step.\n :param current_step: Optionally specify what step to set the learning rate to.\n If None, current_step = self.current_step + 1.\n \"\"\"\n if current_step is not None:\n self.current_step = current_step\n else:\n self.current_step += 1\n\n for i in range(self.num_lrs):\n if self.current_step <= self.warmup_steps[i]:\n self.lr[i] = self.init_lr[i] + self.current_step * self.linear_increment[i]\n elif self.current_step <= self.total_steps[i]:\n self.lr[i] = self.max_lr[i] * (self.exponential_gamma[i] ** (self.current_step - self.warmup_steps[i]))\n else: # theoretically this case should never be reached since training should stop at total_steps\n self.lr[i] = self.final_lr[i]\n\n self.optimizer.param_groups[i]['lr'] = self.lr[i]",
"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n'''\n@File : dataset.py\n@Time : 2022/02/24 10:29:05\n@Author : Jianwen Chen\n@Version : 1.0\n@Contact : chenjw48@mail2.sysu.edu.cn\n@License : (C)Copyright 2021-2022, SAIL-Lab\n'''\n######################################## import area ########################################\n\n# common library\nimport torch\nimport pickle\nimport numpy as np\nfrom torch.utils.data import Dataset, DataLoader\nfrom tqdm import tqdm\n\n######################################## function area ########################################\n\ndef get_loader(names_list, sequences_dict, graphs_dict, labels_dict, batch_size, shuffle, num_workers):\n dataset = ProteinDataset(names_list=names_list, sequences_dict=sequences_dict, graphs_dict=graphs_dict, labels_dict=labels_dict)\n return DataLoader(dataset=dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, collate_fn=collate_fn), dataset.get_features_dim()\n\n\ndef collate_fn(samples):\n names, sequences, graphs, labels = map(list, zip(*samples))\n \n # padding features\n batch_node_features, batch_edge_features = list(), list()\n max_length = max([len(sequence) for sequence in sequences])\n for (node_features, edge_features) in graphs:\n batch_node_features.append(pad_array(node_features, (max_length, node_features.shape[-1])))\n batch_edge_features.append(pad_array(edge_features, (max_length, max_length)))\n \n # mask labels\n idx, masks = 0, list()\n for label in labels:\n masks.append([idx, len(label)])\n idx += len(label)\n \n return names, sequences, \\\n torch.from_numpy(np.array(batch_node_features)).float(),\\\n torch.from_numpy(np.array(batch_edge_features)).float(),\\\n torch.from_numpy(np.concatenate(labels, axis=-1)).long(),\\\n torch.from_numpy(np.array(masks)).long()\n \n\ndef pad_array(array, shape):\n padded_array = np.zeros(shape, dtype=np.float64)\n if len(shape) == 2:\n padded_array[:array.shape[0], :array.shape[1]] = array\n elif len(shape) == 3:\n padded_array[:array.shape[0], :array.shape[1], :] = array\n return padded_array\n\n\ndef normalize_dis(mx):\n mx = 2 / (1 + np.maximum(mx/4, 1))\n mx[np.isinf(mx)] = 0\n mx[np.isnan(mx)] = 0\n return mx\n\n\ndef normalize_adj(mx):\n rowsum = np.array(mx.sum(1))\n r_inv = (rowsum ** -0.5).flatten()\n r_inv[np.isinf(r_inv)] = 0\n r_mat_inv = np.diag(r_inv)\n result = r_mat_inv @ mx @ r_mat_inv\n return result\n\n\nclass ProteinDataset(Dataset):\n def __init__(self, names_list, sequences_dict, graphs_dict, labels_dict):\n super(ProteinDataset, self).__init__()\n self.names = names_list\n self.sequences = [sequences_dict[name] for name in names_list]\n self.graphs = [graphs_dict[name] for name in names_list]\n self.labels = [labels_dict[name] for name in names_list]\n \n def __len__(self):\n return len(self.names)\n \n def __getitem__(self, idx):\n return self.names[idx], self.sequences[idx], self.graphs[idx], self.labels[idx]\n \n def get_features_dim(self):\n return max([node_features.shape[1] for (node_features, _) in self.graphs]), None\n \n\ndef load_dataset(path):\n with open(path, 'r') as f:\n lines = f.readlines()\n \n names_list, sequences_dict, labels_dict = list(), dict(), dict()\n temp_name = \"\"\n for idx, line in enumerate(lines):\n line = line.strip()\n if line == \"\":\n continue\n elif idx % 3 == 0:\n temp_name = line[1:]\n names_list.append(line[1:])\n elif idx % 3 == 1:\n sequences_dict[temp_name] = line\n else:\n labels_dict[temp_name] = [int(num) for num in line]\n temp_name = \"\"\n return names_list, sequences_dict, labels_dict\n\n\n######################################## main area ########################################\n\nif __name__ == '__main__':\n # build the dgl graph cache\n data_path = './data/source'\n result_path = './data/preprocess'\n \n for dataset_name in ['train_569', 'test_129', 'test_181']:\n print(f'build {dataset_name} dgl graph')\n names_list, sequences_dict, labels_dict = load_dataset(f'{data_path}/{dataset_name}.fasta')\n\n graphs_dict = dict()\n \n for name in tqdm(names_list):\n sequence, label = sequences_dict[name], labels_dict[name]\n af2_features = np.load(f'{result_path}/features/af2_node_features/{name}.npy')\n pssm_features = np.load(f'{result_path}/features/pssm/{name}.npy')\n hmm_features = np.load(f'{result_path}/features/hmm/{name}.npy')\n dssp_features = np.load(f'{result_path}/features/dssp/{name}.npy')\n \n # [L, 384 + 20 + 20 + 14 = 438]\n node_features = np.concatenate([af2_features, pssm_features, hmm_features, dssp_features], axis=-1)\n\n # [L, L]\n distance_map = np.load(f'{result_path}/features/af2_edge_features/{name}.npy')\n # mask the -1's rows and columns\n distance_map = np.where(distance_map >= 0, distance_map, float('inf'))\n distance_weight = normalize_dis(distance_map)\n edge_features = distance_weight / (np.sum(distance_weight, axis=-1, keepdims=True) + 1e-5)\n \n if not len(sequence) == len(label) == node_features.shape[0]:\n print(f\"{dataset_name} {name} sequence, label, node features error!\")\n assert False\n if not len(sequence) == edge_features.shape[0] == edge_features.shape[1]:\n print(f\"{dataset_name} {name} edge features error!\")\n assert False\n \n graphs_dict[name] = (node_features, edge_features)\n \n # save graphs\n with open(f'{result_path}/{dataset_name}.pickle', 'wb') as fw:\n pickle.dump([names_list, sequences_dict, graphs_dict, labels_dict], fw)\n"
] |
[
[
"numpy.concatenate",
"numpy.array",
"torch.cuda.manual_seed",
"numpy.random.seed",
"torch.full_like",
"torch.manual_seed",
"torch.abs",
"sklearn.metrics.mean_absolute_error",
"torch.cuda.empty_cache",
"sklearn.metrics.r2_score",
"torch.nn.init.xavier_normal_",
"torch.sum"
],
[
"numpy.concatenate",
"numpy.array",
"torch.cuda.manual_seed",
"sklearn.metrics.confusion_matrix",
"numpy.random.seed",
"sklearn.metrics.matthews_corrcoef",
"torch.nn.CrossEntropyLoss",
"torch.nn.init.xavier_uniform_",
"torch.softmax",
"sklearn.metrics.accuracy_score",
"torch.manual_seed",
"torch.cuda.empty_cache",
"torch.mean",
"sklearn.metrics.f1_score",
"sklearn.metrics.roc_auc_score"
],
[
"numpy.isinf",
"numpy.concatenate",
"numpy.array",
"numpy.isnan",
"numpy.zeros",
"numpy.sum",
"numpy.load",
"torch.utils.data.DataLoader",
"numpy.diag",
"numpy.maximum"
]
] |
aasir22/tools_classification
|
[
"f5a2606f5fa07c1ebc161c467d17f4e7a04c5ebb"
] |
[
"training.py"
] |
[
"from tensorflow.keras.layers import Dense, Flatten\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.applications.vgg19 import VGG19\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nfrom datetime import datetime\nimport numpy as np\nimport os\nimport cv2\nimport matplotlib.pyplot as plt\nfrom Logger.app_logger import App_logger\nfrom sklearn.metrics import accuracy_score,classification_report,confusion_matrix\n\n\n\n\nclass Training:\n\n def __init__(self,train_path,test_path,val_path):\n self.train_path = train_path\n self.test_path = test_path\n self.val_path = val_path\n self.file_object = open(\"Training_Logs/ModelTrainingLog.txt\", 'a+')\n self.log_object = App_logger()\n\n def train(self):\n self.log_object.log(self.file_object,\"Entered in to train method in Training class.Training started\")\n try:\n x_train = []\n\n for folder in os.listdir(self.train_path):\n\n sub_path = self.train_path + \"/\" + folder\n\n for img in os.listdir(sub_path):\n image_path = sub_path + \"/\" + img\n img_arr = cv2.imread(image_path)\n if img_arr is None:\n os.remove(image_path)\n continue\n elif img_arr.shape[0] < 224:\n os.remove(image_path)\n continue\n else:\n img_arr = cv2.resize(img_arr, (224, 224))\n x_train.append(img_arr)\n\n x_test = []\n\n for folder in os.listdir(self.test_path):\n\n sub_path = self.test_path + \"/\" + folder\n\n for img in os.listdir(sub_path):\n image_path = sub_path + \"/\" + img\n\n img_arr = cv2.imread(image_path)\n if img_arr is None:\n os.remove(image_path)\n continue\n elif img_arr.shape[0] < 224:\n os.remove(image_path)\n continue\n else:\n\n img_arr = cv2.resize(img_arr, (224, 224))\n\n x_test.append(img_arr)\n\n\n x_val = []\n\n for folder in os.listdir(self.val_path):\n\n sub_path = self.val_path + \"/\" + folder\n\n for img in os.listdir(sub_path):\n image_path = sub_path + \"/\" + img\n img_arr = cv2.imread(image_path)\n if img_arr is None:\n os.remove(image_path)\n continue\n elif img_arr.shape[0] < 224:\n os.remove(image_path)\n continue\n else:\n img_arr = cv2.resize(img_arr, (224, 224))\n x_val.append(img_arr)\n self.log_object.log(self.file_object, \"Entered in to train method in Training class.train,test,val split successfull\")\n\n train_x = np.array(x_train) / 255.0\n test_x = np.array(x_test) / 255.0\n val_x = np.array(x_val) / 255.0\n\n train_datagen = ImageDataGenerator(rescale=1. / 255)\n test_datagen = ImageDataGenerator(rescale=1. / 255)\n val_datagen = ImageDataGenerator(rescale=1. / 255)\n\n training_set = train_datagen.flow_from_directory(self.train_path,\n target_size=(224, 224),\n batch_size=32,\n class_mode='sparse')\n test_set = test_datagen.flow_from_directory(self.test_path,\n target_size=(224, 224),\n batch_size=32,\n class_mode='sparse')\n val_set = val_datagen.flow_from_directory(self.val_path,\n target_size=(224, 224),\n batch_size=32,\n class_mode='sparse')\n\n train_y = training_set.classes\n test_y = test_set.classes\n val_y = val_set.classes\n\n IMAGE_SIZE = [224, 224]\n\n vgg = VGG19(input_shape= IMAGE_SIZE + [3],weights='imagenet',include_top=False)\n self.log_object.log(self.file_object, \"Entered in to train method in Training class. Model successfully initialized\")\n\n for layer in vgg.layers:\n layer.trainable = False\n\n x = Flatten() (vgg.output)\n\n prediction = Dense(5 ,activation='softmax') (x)\n model = Model(inputs=vgg.input,outputs = prediction)\n model.summary()\n\n model.compile(loss = 'sparse_categorical_crossentropy',\n optimizer='adam',metrics=['accuracy'])\n self.log_object.log(self.file_object, \"Entered in to train method in Training class.Model compile successfull\")\n file_path = 'vgg19_model/checkpoint-{epoch:02d}-{val_accuracy:.2f}.hdf5'\n self.log_object.log(self.file_object,\"check point directory created\")\n check_point = ModelCheckpoint(file_path,monitor='val_accuracy', verbose=1,save_best_only=True, mode='max')\n start = datetime.now()\n self.log_object.log(self.file_object, f\"Entered in to train method in Training class.Training start time {start}\")\n history = model.fit(train_x,train_y,\n validation_data= (val_x,val_y),\n epochs=20,\n callbacks = [check_point],\n batch_size=64, shuffle=True)\n\n duration = datetime.now() - start\n self.log_object.log(self.file_object, f\"Entered in to train method in Training class.Total time taken is {duration}\")\n\n model.save('mech_tools_model.h5')\n self.log_object.log(self.file_object, f\"Entered in to train method in Training class.model saved successfully\")\n\n\n\n # accuracies\n plt.plot(history.history['accuracy'], label='train acc')\n plt.plot(history.history['val_accuracy'], label='val acc')\n plt.legend()\n plt.savefig('vgg-acc-rps-1.png')\n\n # loss\n plt.plot(history.history['loss'], label='train loss')\n plt.plot(history.history['val_loss'], label='val loss')\n plt.legend()\n plt.savefig('vgg-loss-rps-1.png')\n\n self.log_object.log(self.file_object, \"Entered in to train method in Training class.model evaluation started\")\n model.evaluate(test_x, test_y, batch_size=32)\n\n # predict\n y_pred = model.predict(test_x)\n y_pred = np.argmax(y_pred, axis=1)\n self.log_object.log(self.file_object, f\"Entered in to train method in Training class.classification report {classification_report(y_pred, test_y)}\")\n self.log_object.log(self.file_object, f\"Entered in to train method in Training class.confusion matrix is{confusion_matrix(y_pred, test_y)}\")\n except Exception as e:\n # logging the unsuccessful Training\n self.log_object.log(self.file_object, 'Unsuccessful End of Training')\n self.log_object.log(self.file_object,f\"exception occured.exception is {e}\")\n raise Exception\n self.file_object.close()\n\nif __name__ == \"__main__\":\n train_path = \"final_dataset/train\"\n test_path = \"final_dataset/test\"\n val_path = \"final_dataset/val\"\n train_model = Training(train_path, test_path, val_path)\n train_model.train()"
] |
[
[
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"numpy.array",
"sklearn.metrics.confusion_matrix",
"tensorflow.keras.layers.Flatten",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"tensorflow.keras.models.Model",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.layers.Dense",
"sklearn.metrics.classification_report",
"numpy.argmax",
"tensorflow.keras.applications.vgg19.VGG19"
]
] |
Tenvence/polar-inst
|
[
"95b2ef2fbc666469b031367e6aeb471d0465c272"
] |
[
"model/polar_inst.py"
] |
[
"import torch\nimport torch.nn as nn\n\nfrom model.modules.stage_backbone import StageBackbone\nfrom model.modules.feature_pyramid_net import FeaturePyramidNet\nfrom model.modules.polar_head import PolarHead\n\n\nclass PolarInst(nn.Module):\n def __init__(self, num_polars, num_channels, num_classes):\n super(PolarInst, self).__init__()\n\n self.num_classes = num_classes\n\n self.backbone = StageBackbone()\n self.fpn = FeaturePyramidNet(num_channels)\n self.polar_head = PolarHead(num_polars, num_channels, num_classes)\n\n self.distance_scales = [nn.Parameter(torch.tensor(1., dtype=torch.float)) for _ in range(5)]\n\n def forward(self, x):\n batch_size = x.size(0)\n\n backbone_outs = self.backbone(x)\n fpn_outs = self.fpn(backbone_outs['c3'], backbone_outs['c4'], backbone_outs['c5'])\n\n class_pred, distance_pred, centerness_pred = [], [], []\n for idx, (distance_scale, fpn_out) in enumerate(zip(self.distance_scales, fpn_outs.values())):\n head_out = self.polar_head(fpn_out)\n\n head_out['distance'] *= distance_scale\n head_out['distance'] = head_out['distance'].exp()\n\n class_pred.append(head_out['cls'].permute(0, 2, 3, 1).reshape(batch_size, -1, self.num_classes))\n distance_pred.append(head_out['distance'].permute(0, 2, 3, 1).reshape(batch_size, -1, 4))\n centerness_pred.append(head_out['centerness'].permute(0, 2, 3, 1).reshape(batch_size, -1))\n\n class_pred = torch.cat(class_pred, dim=1)\n distance_pred = torch.cat(distance_pred, dim=1)\n centerness_pred = torch.cat(centerness_pred, dim=1)\n\n return class_pred, distance_pred, centerness_pred\n"
] |
[
[
"torch.cat",
"torch.tensor"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.