repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
zjjliujs/caffe
|
[
"9d17cb456274f8e579dfcb0b1e0097d604ae37c6"
] |
[
"python/caffe_double/test/test_solver.py"
] |
[
"import unittest\nimport tempfile\nimport os\nimport numpy as np\nimport six\n\nimport caffe_double\nfrom test_net import simple_net_file\n\n\nclass TestSolver(unittest.TestCase):\n def setUp(self):\n self.num_output = 13\n net_f = simple_net_file(self.num_output)\n f = tempfile.NamedTemporaryFile(mode='w+', delete=False)\n f.write(\"\"\"net: '\"\"\" + net_f + \"\"\"'\n test_iter: 10 test_interval: 10 base_lr: 0.01 momentum: 0.9\n weight_decay: 0.0005 lr_policy: 'inv' gamma: 0.0001 power: 0.75\n display: 100 max_iter: 100 snapshot_after_train: false\n snapshot_prefix: \"model\" \"\"\")\n f.close()\n self.solver = caffe_double.SGDSolver(f.name)\n # also make sure get_solver runs\n caffe_double.get_solver(f.name)\n caffe_double.set_mode_cpu()\n # fill in valid labels\n self.solver.net.blobs['label'].data[...] = \\\n np.random.randint(self.num_output,\n size=self.solver.net.blobs['label'].data.shape)\n self.solver.test_nets[0].blobs['label'].data[...] = \\\n np.random.randint(self.num_output,\n size=self.solver.test_nets[0].blobs['label'].data.shape)\n os.remove(f.name)\n os.remove(net_f)\n\n def test_solve(self):\n self.assertEqual(self.solver.iter, 0)\n self.solver.solve()\n self.assertEqual(self.solver.iter, 100)\n\n def test_apply_update(self):\n net = self.solver.net\n data = net.layers[1].blobs[0].data[...]\n # Reset the weights of that layer to 0\n data[...] = 0\n net.layers[1].blobs[0].diff[...] = 1\n # Apply the update, the initial learning rate should be 0.01\n self.solver.apply_update()\n # Check that the new weights are -0.01, with a precision of 1e-7\n self.assertTrue((data - -0.01 * np.ones(data.shape)).max() < 1e-7)\n\n def test_net_memory(self):\n \"\"\"Check that nets survive after the solver is destroyed.\"\"\"\n\n nets = [self.solver.net] + list(self.solver.test_nets)\n self.assertEqual(len(nets), 2)\n del self.solver\n\n total = 0\n for net in nets:\n for ps in six.itervalues(net.params):\n for p in ps:\n total += p.data.sum() + p.diff.sum()\n for bl in six.itervalues(net.blobs):\n total += bl.data.sum() + bl.diff.sum()\n\n def test_snapshot(self):\n self.solver.snapshot()\n # Check that these files exist and then remove them\n files = ['model_iter_0.caffemodel', 'model_iter_0.solverstate']\n for fn in files:\n assert os.path.isfile(fn)\n os.remove(fn)\n"
] |
[
[
"numpy.ones",
"numpy.random.randint"
]
] |
stepinski/machinelearning
|
[
"1f84883a25616da4cd76bb4655267efd3421e561"
] |
[
"mit-ml/mnist/part2-twodigit/mlp.py"
] |
[
"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom train_utils import batchify_data, run_epoch, train_model, Flatten\nimport utils_multiMNIST as U\npath_to_data_dir = '../Datasets/'\nuse_mini_dataset = True\n\nbatch_size = 64\nnb_classes = 10\nnb_epoch = 30\nnum_classes = 10\nimg_rows, img_cols = 42, 28 # input image dimensions\n\nclass MLP(nn.Module):\n\n def __init__(self, input_dimension):\n super(MLP, self).__init__()\n print(input_dimension)\n self.flatten = Flatten()\n self.linear = nn.Linear(input_dimension,64)\n self.output1 = lambda x : nn.Linear(64,x) # for top digit\n self.output2 = nn.Linear(64,64) \n \n\n def forward(self, x):\n xf = self.flatten(x)\n # use model layers to predict the two digits\n input= self.linear(xf)\n out1=self.output1(input,out_features=x.shape[0])\n out2=self.output2(input,out_features=x.shape[0])\n \n # print(\"test\")\n # print(xl[:, :1])\n # print(xl[:, 1:2])\n return out1,out2\n\ndef main():\n X_train, y_train, X_test, y_test = U.get_data(path_to_data_dir, use_mini_dataset)\n\n # Split into train and dev\n dev_split_index = int(9 * len(X_train) / 10)\n X_dev = X_train[dev_split_index:]\n y_dev = [y_train[0][dev_split_index:], y_train[1][dev_split_index:]]\n X_train = X_train[:dev_split_index]\n y_train = [y_train[0][:dev_split_index], y_train[1][:dev_split_index]]\n\n permutation = np.array([i for i in range(len(X_train))])\n np.random.shuffle(permutation)\n X_train = [X_train[i] for i in permutation]\n y_train = [[y_train[0][i] for i in permutation], [y_train[1][i] for i in permutation]]\n\n # Split dataset into batches\n train_batches = batchify_data(X_train, y_train, batch_size)\n dev_batches = batchify_data(X_dev, y_dev, batch_size)\n test_batches = batchify_data(X_test, y_test, batch_size)\n\n # Load model\n input_dimension = img_rows * img_cols\n model = MLP(input_dimension) # TODO add proper layers to MLP class above\n\n # Train\n train_model(train_batches, dev_batches, model)\n\n ## Evaluate the model on test data\n loss, acc = run_epoch(test_batches, model.eval(), None)\n print('Test loss1: {:.6f} accuracy1: {:.6f} loss2: {:.6f} accuracy2: {:.6f}'.format(loss[0], acc[0], loss[1], acc[1]))\n\nif __name__ == '__main__':\n # Specify seed for deterministic behavior, then shuffle. Do not change seed for official submissions to edx\n np.random.seed(12321) # for reproducibility\n torch.manual_seed(12321) # for reproducibility\n main()\n"
] |
[
[
"numpy.random.seed",
"torch.manual_seed",
"torch.nn.Linear",
"numpy.random.shuffle"
]
] |
Meteodan/arpsEnKFtools
|
[
"848c4c0eb8921d17690bf35a24f6d0714c4bc37f",
"848c4c0eb8921d17690bf35a24f6d0714c4bc37f"
] |
[
"template/1km243x243_033116_newse/master_config.py",
"template/3km153x153_051913_SSEF_idealized_radarda/master_config.py"
] |
[
"\"\"\"\nmaster_config.py -- Contains parameters to configure an end-to-end ARPS-EnKF run\n\"\"\"\nimport os\nfrom datetime import datetime\nimport numpy as np\n\n# Define needed directories and experiment names/tags\n# Base project names and directories\nscratch_base_dir = '/scratch/rice/d/dawson29'\ndepot_base_dir = '/depot/dawson29'\narpsenkftools_base_dir = os.path.join(depot_base_dir, 'apps/Projects/arpsEnKFtools')\nproject_dir = 'Projects/VORTEXSE/simulations/ARPS'\nproject_scr_dir = os.path.join(scratch_base_dir, project_dir)\nproject_depot_dir = os.path.join(depot_base_dir, 'data', project_dir)\nIOP_name = '2016_IOP3'\nIOP_scr_dir = os.path.join(project_scr_dir, IOP_name, 'EnKF')\nIOP_depot_dir = os.path.join(project_depot_dir, IOP_name, 'EnKF')\next_model_data_dir = os.path.join(depot_base_dir,\n 'data/Projects/VORTEXSE/model_data/newse_data')\nsfc_obs_dir = os.path.join(depot_base_dir, 'data/Projects/VORTEXSE/obsdata/2016/sao', IOP_name)\nradar_obs_dir = os.path.join(depot_base_dir, 'data/Projects/VORTEXSE/obsdata/2016/NEXRAD/IOP_3/level2/')\nua_obs_dir = os.path.join(depot_base_dir, 'data/Projects/VORTEXSE/obsdata/2016/raob', IOP_name)\n# TODO: add other obs type directories here\n\n# Experiment name and directories\nexp_name_base = '1km243x243_033116'\nexp_name_tag = '_newse'\nexp_name = exp_name_base + exp_name_tag\nexp_scr_dir = os.path.join(IOP_scr_dir, exp_name)\nprep_work_dir = os.path.join(exp_scr_dir, '{}_prep_work'.format(exp_name))\nexp_depot_dir = os.path.join(IOP_depot_dir, exp_name)\ntemplate_base_dir = os.path.join(arpsenkftools_base_dir, 'template')\ntemplate_exp_dir = os.path.join(template_base_dir, exp_name)\nexternal_icbc_dir = os.path.join(IOP_depot_dir, exp_name+'_icbc')\nsfcdata_dir = os.path.join(project_depot_dir, 'sfcdata')\nsfcdata_file = '{}.sfcdata'.format(exp_name)\nsfcdata_path = os.path.join(sfcdata_dir, sfcdata_file)\ntrndata_dir = os.path.join(project_depot_dir, 'trndata')\ntrndata_file = '{}.trndata'.format(exp_name)\ntrndata_path = os.path.join(trndata_dir, trndata_file)\nradflag_file = '2016_IOP3_5min.radflag'\nradflag_path = os.path.join(template_exp_dir, radflag_file)\nradarinfo_file = 'radarinfo.dat'\nradarinfo_path = os.path.join(template_base_dir, radarinfo_file)\nblacklist_file = 'blacklist.sfc'\nblacklist_file_path = os.path.join(template_exp_dir, blacklist_file)\nremapped_radar_dir = os.path.join(project_depot_dir, '{}/remapped_radar/{}'.format(IOP_name, exp_name))\n\n# Executable file names and directories\narps_base_dir = '/home/dawson29/arps5.4_main'\narps_bin_dir = os.path.join(arps_base_dir, 'bin')\narpstrn_exe_path = os.path.join(arps_bin_dir, 'arpstrn')\narpssfc_exe_path = os.path.join(arps_bin_dir, 'arpssfc')\next2arps_exe_path = os.path.join(arps_bin_dir, 'ext2arps')\narps_exe_path = os.path.join(arps_bin_dir, 'arps_mpi')\narpsenkf_exe_path = os.path.join(arps_bin_dir, 'arpsenkf_mpi')\narpsenkfic_exe_path = os.path.join(arps_bin_dir, 'arpsenkfic')\nwrf2arps_exe_path = os.path.join(arps_bin_dir, 'wrf2arps_mpi')\narpsintrp_exe_path = os.path.join(arps_bin_dir, 'arpsintrp_mpi')\nradremap_exe_path = os.path.join(arps_bin_dir, '88d2arps')\nmpi_exe = 'mpiexec'\nmpi_nproc_flag = '-n'\n\n# Experiment parameters (many of these are namelist parameters that will be inserted in the\n# appropriate namelist input files for the various ARPS programs used in an experiment). See the\n# documentation in the various namelist input files for details on their meanings.\n\n# Basic experiment parameters\nnum_ensemble_members = 36\n# Initial time of entire experiment. Note, for nested ARPS runs this must be consistent with the\n# initial time of the original parent experiment!\ninitial_time = '201603311800'\ninitial_datetime = datetime.strptime(initial_time, '%Y%m%d%H%M')\n# Initial time in seconds from model start corresponding to initial_time (can be different from 0\n# if ext2arps/wrf2arps/arpsintrp is run to produce IC's for several different times)\ninitial_time_sec = 0\nperturb_ic = False\nif perturb_ic:\n external_inifile = '{}.hdf{:06d}'.format(exp_name, initial_time_sec)\n external_inigbf = '{}.hdfgrdbas'.format(exp_name)\nelse:\n external_inifile = 'ena001.hdf{:06d}'.format(initial_time_sec)\n external_inigbf = 'ena001.hdfgrdbas'\nexternal_inifile_path = os.path.join(external_icbc_dir, external_inifile)\nexternal_inigbf_path = os.path.join(external_icbc_dir, external_inigbf)\n\n# ARPS comment_lines namelist parameters\nnocmnt = 2\ncomments = ['ARPS 5.4', 'March 31st, 2016 VSE IOP3']\n\n# Grid and map projection parameters\ngrid_param = {\n 'nx': 243,\n 'ny': 243,\n 'nz': 53,\n 'nproc_x': 6,\n 'nproc_y': 5,\n 'dx': 1000.0,\n 'dy': 1000.0,\n 'dz': 400.0,\n 'strhopt': 1,\n 'dzmin': 20.0,\n 'zrefsfc': 0.0,\n 'dlayer1': 0.0,\n 'dlayer2': 1.0e5,\n 'strhtune': 0.2,\n 'zflat': 1.0e5,\n 'ctrlat': 34.799999,\n 'ctrlon': -87.680000,\n 'mapproj': 2,\n 'trulat1': 33.0,\n 'trulat2': 36.0,\n 'trulon': -87.680000,\n}\n\n# ARPSTRN parameters (note that this is set to use the 30-s terrain data. Will add hooks\n# for the other terrain data source options later)\narpstrn_param = {\n 'trndataopt': 3,\n 'dir_trndata': os.path.join(depot_base_dir, 'data/arpstopo30.data'),\n 'nsmth': 2,\n 'lat_sample': 30,\n 'lon_sample': 30,\n 'trnanxopt': 2,\n 'dirname': trndata_dir,\n 'terndmp': 3\n}\n\n# ARPSSFC parameters\narpssfc_param = {\n 'nstyp': 3,\n 'sfcdmp': 3,\n 'schmopt': 3,\n 'sdatopt': 1,\n 'fstypfl': os.path.join(depot_base_dir, 'data/arpssfc.data/soil_1km.data'),\n 'bstypfl': os.path.join(depot_base_dir, 'data/arpssfc.data/whsoil_1deg.data'),\n 'vdatopt': 1,\n 'fvtypfl': os.path.join(depot_base_dir, 'data/arpssfc.data/naoge1_01l_1km.img'),\n 'bvtypfl': os.path.join(depot_base_dir, 'data/arpssfc.data/owe14d_10min.data'),\n 'ndatopt': 1,\n 'fndvifl': os.path.join(depot_base_dir, 'data/arpssfc.data/naapr92ndl_1km.img'),\n 'bndvifl': os.path.join(depot_base_dir, 'data/arpssfc.data/ndvi9004_10min.data'),\n 'vfrcopt': 1,\n 'vfrcdr': os.path.join(depot_base_dir, 'data/arpssfc.data/'),\n 'nsmthsl': 3,\n 'stypout': 1,\n 'vtypout': 1,\n 'laiout': 1,\n 'rfnsout': 1,\n 'vegout': 1,\n 'ndviout': 1,\n 'dirname': sfcdata_dir\n}\n\n# WRF2ARPS parameters\nwrf2arps_param = {\n 'run_mpi': False,\n 'nproc_x': 5,\n 'nproc_y': 2,\n 'history_interval_sec': 900,\n 'history_interval': '00_00:15:00',\n 'init_timestamp': initial_time,\n 'end_timestamp': '201604010245',\n 'subdir_template': None,\n 'hdmpfmt': 3,\n 'exbcdmp': 3,\n 'dmp_out_joined': 1111111,\n 'wrfexttrnopt': 3,\n 'terndta': trndata_path,\n 'ternfmt': 3,\n 'extntmrg': 7,\n 'dirname': external_icbc_dir\n}\n\n# ARPSINTRP parameters\narpsintrp_param = {\n}\n\n# Radar remapper parameters\nradremap_param = {\n 'radar_list': ['KBMX', 'KGWX', 'KHPX', 'KHTX', 'KNQA', 'KOHX', 'KPAH'],\n 'start_timestamp': '20160331180000',\n 'end_timestamp': '20160401030000',\n 'interval_seconds': 300,\n 'tolerance': 900,\n 'closest_before': True,\n 'nthreads': 10\n}\n\n# EXT2ARPS parameters\next2arps_param = {\n}\n\n# ARPS parameters\n# Note that these include the comment, grid and map projection parameters already defined above\n# Also many of the parameters are shared with EXT2ARPS. So these are ones that are specific\n# to just the ARPS forward model component of the workflow. Parameters that aren't likely\n# to be changed very often but that are present in the namelist aren't included here, but can be\n# added as needed.\n\narps_param = {\n # Inifile and inigbf are only needed here for the arpsenkfic step. They are changed on the fly\n # during the actual ensemble integration to the appropriate ensemble member names\n 'nocmnt': nocmnt,\n 'cmnt(1)': comments[0],\n 'cmnt(2)': comments[1],\n 'runname': exp_name,\n 'initime': initial_datetime.strftime('%Y-%m-%d.%H:%M:00'),\n 'inifile': './{}'.format(external_inifile),\n 'inigbf': './{}'.format(external_inigbf),\n 'dtbig': 2.0,\n 'tstart': float(initial_time_sec),\n 'tstop': float(initial_time_sec),\n 'dtsml': 1.0,\n 'tintegopt': 1,\n 'tintvebd': 900, # DTD: for some reason this has to be an integer now or ARPS flips out...\n 'ngbrz': 10,\n 'brlxhw': 4,\n 'cbcdmp': 0.05,\n 'exbcfmt': 3,\n 'tmixopt': 4,\n 'trbisotp': 0,\n 'tkeopt': 3,\n 'trbvimp': 1,\n 'cfcm4h': 5.0e-4,\n 'cfcm4v': 5.0e-4,\n 'cmix_opt': 1,\n 'mphyopt': 15,\n 'sfcdtfl': sfcdata_file,\n 'sfcfmt': 3,\n 'dtsfc': 2.0,\n 'hdmpfmt': 103,\n 'thisdmp': 300.0,\n 'rfopt': 3,\n 'sv_lkup_tble': 1\n}\n\n# ARPSENKFIC parameters\narpsenkfic_param = {\n}\n\n# ARPSENKF parameters.\narpsenkf_param = {\n 'nrdrused': 1,\n 'radarname': ['KBMX', 'KGWX', 'KHPX', 'KHTX', 'KNQA', 'KOHX', 'KPAH'],\n 'ntwtype': [1, 1, 1, 1, 1, 1, 1],\n 'vcpmode': [11, 11, 11, 11, 11, 11, 11],\n 'rdrlocopt': [1, 1, 1, 1, 1, 1, 1]\n}\n\n# Parameters to generate an appropriate radflag file. Used by \"gen_radflag.py\"\nradflag_param = {\n # Add appropriate \"radar groups\" (i.e. all radars, only WSR-88Ds, only mobile, etc.)\n # And the time range for each to assimilate. Note that the gen_radflag.py script assumes\n # that there is no overlap between the times for each radar group.\n 'radar_groups': {\n 'all_radars': (arpsenkf_param['radarname'], np.arange(0., 31500. + 300., 300.))\n },\n}\n\n",
"\"\"\"\nmaster_config.py -- Contains parameters to configure an end-to-end ARPS-EnKF run\n\"\"\"\nimport os\nimport numpy as np\nfrom datetime import datetime\n\n# Define needed directories and experiment names/tags\n# Base project names and directories\nscratch_base_dir = '/scratch/rice/s/sharm261'\ndepot_base_dir = '/depot/rtanama/users/sharm261'\narpsenkftools_base_dir = '/home/sharm261/arpsEnKFtools'\nproject_dir = 'Projects/051913_OK_idealized/ARPS' # Note, removed redundant \"simulations\" subdirectory here\nproject_scr_dir = os.path.join(scratch_base_dir, project_dir)\nproject_depot_dir = os.path.join(depot_base_dir, 'data', project_dir)\nIOP_name = '' # Not needed for this experiment\nIOP_scr_dir = os.path.join(project_scr_dir, IOP_name, 'EnKF')\nIOP_depot_dir = os.path.join(project_depot_dir, IOP_name, 'EnKF')\nprep_work_dir = os.path.join(IOP_scr_dir, 'prep_work')\nsfc_obs_dir = os.path.join(depot_base_dir, 'data/Projects/051913_OK_idealized/obsdata/sao')\nradar_obs_dir = os.path.join(depot_base_dir, 'data/Projects/051913_OK_idealized/obsdata/nexrad/level2/')\n# TODO: add other obs type directories here\n\n# Experiment name and directories\nexp_name_base = '3km153x153_051913'\nexp_name_tag = '_SSEF_idealized_radarda'\nexp_name = exp_name_base + exp_name_tag\nexp_scr_dir = os.path.join(IOP_scr_dir, exp_name)\nexp_depot_dir = os.path.join(IOP_depot_dir, exp_name)\ntemplate_base_dir = os.path.join(arpsenkftools_base_dir, 'template')\ntemplate_exp_dir = os.path.join(template_base_dir, exp_name)\nexternal_icbc_dir = os.path.join(IOP_depot_dir, exp_name+'_icbc')\nsfcdata_dir = os.path.join(project_depot_dir, 'sfcdata')\nsfcdata_file = '{}.sfcdata'.format(exp_name)\nsfcdata_path = os.path.join(sfcdata_dir, sfcdata_file)\ntrndata_dir = os.path.join(project_depot_dir, 'trndata')\ntrndata_file = '{}.trndata'.format(exp_name)\ntrndata_path = os.path.join(trndata_dir, trndata_file)\nradflag_file = '2013_0519_idealized.radflag'\nradflag_path = os.path.join(template_exp_dir, radflag_file)\nradarinfo_file = 'radarinfo.dat'\nradarinfo_path = os.path.join(template_base_dir, radarinfo_file)\nblacklist_file = 'blacklist.sfc'\nblacklist_file_path = os.path.join(template_exp_dir, blacklist_file)\nremapped_radar_dir = os.path.join(project_depot_dir, 'remapped_radar/{}'.format(exp_name))\n\n# Executable file names and directories\narps_base_dir = '/home/sharm261/arps5.4'\narps_bin_dir = os.path.join(arps_base_dir, 'bin')\narpstrn_exe_path = os.path.join(arps_bin_dir, 'arpstrn')\narpssfc_exe_path = os.path.join(arps_bin_dir, 'arpssfc')\next2arps_exe_path = os.path.join(arps_bin_dir, 'ext2arps')\narps_exe_path = os.path.join(arps_bin_dir, 'arps_mpi')\narpsenkf_exe_path = os.path.join(arps_bin_dir, 'arpsenkf_mpi')\narpsenkfic_exe_path = os.path.join(arps_bin_dir, 'arpsenkfic')\nwrf2arps_exe_path = os.path.join(arps_bin_dir, 'wrf2arps_mpi')\narpsintrp_exe_path = os.path.join(arps_bin_dir, 'arpsintrp_mpi')\nradremap_exe_path = os.path.join(arps_bin_dir, '88d2arps')\nmpi_exe = 'mpiexec'\nmpi_nproc_flag = '-n'\n\n# Experiment parameters (many of these are namelist parameters that will be inserted in the\n# appropriate namelist input files for the various ARPS programs used in an experiment). See the\n# documentation in the various namelist input files for details on their meanings.\n\n# Basic experiment parameters\nnum_ensemble_members = 40\n# Initial time of entire experiment. Note, for nested ARPS runs this must be consistent with the\n# initial time of the original parent experiment!\ninitial_time = '201305191800'\ninitial_datetime = datetime.strptime(initial_time, '%Y%m%d%H%M')\n# Initial time in seconds from model start corresponding to initial_time (can be different from 0\n# if ext2arps/wrf2arps/arpsintrp is run to produce IC's for several different times)\ninitial_time_sec = 0\nperturb_ic = True\nif perturb_ic:\n external_inifile = '{}.hdf{:06d}'.format(exp_name, initial_time_sec)\n external_inigbf = '{}.hdfgrdbas'.format(exp_name)\nelse:\n external_inifile = 'ena001.hdf{:06d}'.format(initial_time_sec)\n external_inigbf = 'ena001.hdfgrdbas'\nexternal_inifile_path = os.path.join(external_icbc_dir, external_inifile)\nexternal_inigbf_path = os.path.join(external_icbc_dir, external_inigbf)\n\n# ARPS comment_lines namelist parameters\nnocmnt = 2\ncomments = ['ARPS 5.4', 'May 19th, 2013 OK outbreak']\n\n# Grid and map projection parameters\ngrid_param = {\n 'nx': 153,\n 'ny': 153,\n 'nz': 35,\n 'nproc_x': 3,\n 'nproc_y': 5,\n 'dx': 3000.0,\n 'dy': 3000.0,\n 'ctrlat': 35.3331,\n 'ctrlon': -97.2775,\n 'trulat1': 33.0,\n 'trulat2': 36.0,\n 'trulon': -97.2775,\n}\n\n# ARPSTRN parameters (note that this is set to use the 30-s terrain data. Will add hooks\n# for the other terrain data source options later)\narpstrn_param = {\n}\n\n# ARPSSFC parameters\narpssfc_param = {\n}\n\n# ARPSINTRP parameters\narpsintrp_param = {\n}\n\n# Radar remapper parameters\nradremap_param = {\n 'radar_list': ['KTLX'],\n 'start_timestamp': '20130519200000',\n 'end_timestamp': '20130519230000',\n 'interval_seconds': 300,\n 'tolerance': 300,\n 'closest_before': True,\n 'nthreads': 10}\n\n# EXT2ARPS parameters\next2arps_param = {\n}\n\n# ARPS parameters\n# Note that these include the comment, grid and map projection parameters already defined above\n# Also many of the parameters are shared with EXT2ARPS. So these are ones that are specific\n# to just the ARPS forward model component of the workflow. Parameters that aren't likely\n# to be changed very often but that are present in the namelist aren't included here, but can be\n# added as needed.\n\narps_param = {\n # Inifile and inigbf are only needed here for the arpsenkfic step. They are changed on the fly\n # during the actual ensemble integration to the appropriate ensemble member names\n 'nocmnt': nocmnt,\n 'cmnt(1)': comments[0],\n 'cmnt(2)': comments[1],\n 'runname': exp_name,\n 'initime': initial_datetime.strftime('%Y-%m-%d.%H:%M:00'),\n 'inifile': './{}'.format(external_inifile),\n 'inigbf': './{}'.format(external_inigbf),\n 'dtbig': 4.0,\n 'tstart': float(initial_time_sec),\n 'tstop': float(initial_time_sec),\n 'mphyopt': 15\n}\n\n# ARPSENKFIC parameters\narpsenkfic_param = {\n 'iniprtopt': 2,\n 'iniprt_ptprt': 2,\n 'iniprt_qv': 2,\n 'smoothopt': 2,\n 'lhor': 36000.0,\n 'lver': 7200.0,\n 'prtibgn': 3,\n 'prtiend': grid_param['nx'] - 2,\n 'prtjbgn': 3,\n 'prtjend': grid_param['ny'] - 2,\n 'prtkbgn': 3,\n 'prtkend': grid_param['nz'] - 2,\n 'prtibgnu': 3,\n 'prtiendu': grid_param['nx'] - 2,\n 'prtjbgnv': 3,\n 'prtjendv': grid_param['ny'] - 2,\n 'prtkbgnw': 3,\n 'prtkendw': grid_param['nz'] - 2,\n 'r0h_uv': 6000.0,\n 'r0v_uv': 3000.0,\n 'r0h_w': 6000.0,\n 'r0v_w': 3000.0,\n 'r0h_ptprt': 6000.0,\n 'r0v_ptprt': 3000.0,\n 'r0h_pprt': 6000.0,\n 'r0v_pprt': 3000.0,\n 'r0h_qv': 6000.0,\n 'r0v_qv': 3000.0,\n 'r0h_qli': 6000.0,\n 'r0v_qli': 3000.0,\n 'stdu': 2.0,\n 'stdv': 2.0,\n 'stdw': 0.0,\n 'stdptprt': 1.0,\n 'stdpprt': 0.0,\n 'stdqv': 0.0006,\n 'stdqrelative': 0.1,\n}\n\n# ARPSENKF parameters.\narpsenkf_param = {\n 'nrdrused': 1,\n 'radarname': ['KTLX'],\n 'ntwtype': [1,1,1,1],\n 'vcpmode': [11,11,11,11],\n 'rdrlocopt': [1,1,1,1]\n}\n\n# Parameters to generate an appropriate radflag file. Used by \"gen_radflag.py\"\nradflag_param = {\n # Add appropriate \"radar groups\" (i.e. all radars, only WSR-88Ds, only mobile, etc.)\n # And the time range for each to assimilate. Note that the gen_radflag.py script assumes\n # that there is no overlap between the times for each radar group.\n 'radar_groups': {\n 'all_radars': (arpsenkf_param['radarname'], np.arange(7200, 18000. + 300., 300.))\n },\n}\n\n"
] |
[
[
"numpy.arange"
],
[
"numpy.arange"
]
] |
githubcstahlhut/EDoHa
|
[
"56283eac605b2b50988cc2f7ee696242eec1f34e",
"56283eac605b2b50988cc2f7ee696242eec1f34e"
] |
[
"src/main/python/TFELTrainer.py",
"src/main/python/TFELAttentionTrainer.py"
] |
[
"\nimport argparse\nimport glob\n\nimport numpy as np\nimport pandas as pd\n\nimport tensorflow as tf\n\nfrom keras.preprocessing import sequence\nfrom tensorflow.contrib import rnn\n\nfrom sklearn.metrics import classification_report, precision_recall_fscore_support\n\nfrom vectorizer import TokenizingEmbeddingVectorizer\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(\"Trains a simple BiLSTM to detect sentential arguments across multiple topics.\")\n\n parser.add_argument(\"--embeddings\", type=str, help=\"The path to the embedding folder.\")\n parser.add_argument(\"--train\", type=str, help=\"The training data file.\")\n parser.add_argument(\"--test\", type=str, help=\"The testing data file.\")\n parser.add_argument(\"--epochs\", type=int)\n parser.add_argument(\"--seed\", type=int)\n\n return parser.parse_args()\n\n\ndef read_data(data_path):\n data = pd.read_csv(data_path, sep=\",\", quotechar=\"'\", header=0)\n # data = pd.read_csv(data_path)\n return data\n\n\ndef create_model(embeddings, max_length, seed):\n\n tf.set_random_seed(seed)\n\n # Based on https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/bidirectional_rnn.py\n num_classes = 2\n dims = 100\n learning_rate = 0.001\n X_t = tf.placeholder(tf.int32, [None, max_length], name=\"topic_input\")\n X_s = tf.placeholder(tf.int32, [None, max_length], name=\"sentence_input\")\n L_t = tf.placeholder(tf.int32, [None, ], name=\"topic_length\")\n L_s = tf.placeholder(tf.int32, [None, ], name=\"sentence_length\")\n Y = tf.placeholder(tf.float32, [None, num_classes], name=\"target\")\n\n def BiRNN(x, layer):\n\n with tf.variable_scope('encoder_{}'.format(layer),reuse=False):\n # Prepare data shape to match `rnn` function requirements\n # Current data input shape: (batch_size, timesteps, n_input)\n # Required shape: 'timesteps' tensors list of shape (batch_size, num_input)\n\n # Unstack to get a list of 'timesteps' tensors of shape (batch_size, num_input)\n x = tf.unstack(x, max_length, 1)\n\n # Define lstm cells with tensorflow\n # Forward direction cell\n lstm_fw_cell = rnn.BasicLSTMCell(dims, forget_bias=1.0)\n # Backward direction cell\n lstm_bw_cell = rnn.BasicLSTMCell(dims, forget_bias=1.0)\n\n # Get lstm cell output\n outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x, dtype=tf.float32)\n\n print(\"BiLSTM lengths: \", len(outputs))\n # Linear activation, using rnn inner loop last output\n return outputs[-1]\n\n\n topic_word_embeddings = tf.Variable(embeddings, dtype=tf.float32, name=\"topic_embeddings\")\n topic_embedded_word_id = tf.nn.embedding_lookup(topic_word_embeddings, X_t)\n\n sentence_word_embeddings = tf.Variable(embeddings, dtype=tf.float32, name=\"sentence_embeddings\")\n sentence_embedded_word_id = tf.nn.embedding_lookup(sentence_word_embeddings, X_s)\n\n topic_bilstm_out = BiRNN(topic_embedded_word_id, \"topic\")\n sentence_bilstm_out = BiRNN(sentence_embedded_word_id, \"sentence\") \n output = tf.concat((topic_bilstm_out, sentence_bilstm_out), axis=1)\n logits = tf.layers.dense(output, 2)\n prediction = tf.nn.softmax(logits, name=\"output\")\n\n # Define loss and optimizer\n loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y))\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n train_op = optimizer.minimize(loss_op, name=\"train\")\n\n # Initialize the variables (i.e. assign their default value)\n init = tf.global_variables_initializer()\n return X_t, X_s, L_t, L_s, Y, prediction, train_op\n\n\nif \"__main__\"==__name__:\n\n args = parse_arguments()\n train_data = read_data(args.train)# .sample(frac=1, random_state=args.seed)[::1]\n\n train_links = train_data[[\"topic\", \"candidate\"]].values\n train_labels = train_data[\"label\"].values == \"link\"\n two_d_train_labels = np.zeros((train_labels.shape[0], 2))\n two_d_train_labels[np.where(train_labels==0), 0] = 1\n two_d_train_labels[np.where(train_labels==1), 1] = 1\n\n hypotheses = train_links[:, 0]\n sentences = train_links[:, 1]\n\n vectorizer = TokenizingEmbeddingVectorizer(args.embeddings)\n\n tokenized_hypotheses = vectorizer.tokenize_sentences(hypotheses)\n tokenized_sentences = vectorizer.tokenize_sentences(sentences)\n\n hypothesis_max_length = max(map(lambda s: len(s.split(\" \")), hypotheses))\n sentence_max_length = max(map(lambda s: len(s.split(\" \")), sentences))\n\n vectorized_hypotheses = vectorizer.sentences_to_padded_indices(hypotheses, sentence_max_length)\n vectorized_sentences = vectorizer.sentences_to_padded_indices(sentences, sentence_max_length)\n\n print(\"hypotheses.shape: \", vectorized_hypotheses.shape)\n print(\"hypotheses: \", vectorized_hypotheses)\n print(\"sentences.shape: \", vectorized_sentences.shape)\n print(\"sentences: \", vectorized_sentences)\n\n # Train model\n X_t, X_s, L_t, L_s, Y, output, train_op = create_model(vectorizer.embeddings, sentence_max_length, args.seed)\n # initialise graph\n init = tf.global_variables_initializer()\n\n session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)\n sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)\n sess.run(init)\n\n batch_size = 32\n\n for i in range(args.epochs):\n # Run batches, because...\n num_batches = (len(train_data) + batch_size -1) // batch_size # Add one to get the remaining samples\n print(\"Training epoch {0}/{1} with {2} batches for {3} samples.\".format(i+1, args.epochs, num_batches, len(train_data)))\n for batch in range(num_batches):\n begin_idx = batch * batch_size\n end_idx = min((batch+1)*batch_size, len(train_data)) # make sure to not go beyond number of samples\n print(\"\\tRunning batch {0} of {1} with indices [{2}:{3}]\".format(batch, num_batches, begin_idx, end_idx))\n feed_dict = {X_t: vectorized_hypotheses[begin_idx:end_idx], X_s: vectorized_sentences[begin_idx:end_idx], Y:two_d_train_labels[begin_idx:end_idx]}\n sess.run(train_op, feed_dict=feed_dict)\n\n test_data = read_data(args.test)\n test_links = test_data[[\"topic\", \"candidate\"]].values\n test_labels = test_data[\"label\"].values == \"link\"\n\n test_hypotheses = test_links[:, 0]\n test_sentences = test_links[:, 1]\n\n test_vectorized_hypotheses = vectorizer.sentences_to_padded_indices(test_hypotheses, sentence_max_length)\n test_vectorized_sentences = vectorizer.sentences_to_padded_indices(test_sentences, sentence_max_length)\n\n\n test_feed_dict = {X_t: test_vectorized_hypotheses, X_s: test_vectorized_sentences}\n raw_preds = sess.run(output, test_feed_dict)\n preds = np.argmax(raw_preds, axis=1)\n print(preds)\n print(classification_report(test_labels, preds, target_names=[\"no-link\", \"link\"]))\n print(\"Macro: \", precision_recall_fscore_support(test_labels, preds, average=\"macro\"))\n print(\"Evidence: \", precision_recall_fscore_support(test_labels, preds, labels=[1]))\n\n\n builder = tf.saved_model.builder.SavedModelBuilder(\"hypothesisevidencelinking\")\n builder.add_meta_graph_and_variables(\n sess,\n [tf.saved_model.tag_constants.SERVING],\n signature_def_map= {\n \"magic_model\": tf.saved_model.signature_def_utils.predict_signature_def({\"topic_input\": X_t, \"sentence_input\": X_s, \"target\": Y}, {\"output\": output})\n })\n builder.save()\n",
"import argparse\nimport glob\n\nimport numpy as np\nimport pandas as pd\n\nimport tensorflow as tf\n\nfrom keras.preprocessing import sequence\nfrom tensorflow.contrib import rnn\nfrom tensorflow.contrib import seq2seq\n\nfrom sklearn.metrics import classification_report, precision_recall_fscore_support\n\nfrom evidencedetection.vectorizer import TokenizingEmbeddingVectorizer\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(\"Trains a simple BiLSTM to detect sentential arguments across multiple topics.\")\n\n parser.add_argument(\"--embeddings\", type=str, help=\"The path to the embedding folder.\")\n parser.add_argument(\"--train\", type=str, help=\"The training data file.\")\n parser.add_argument(\"--test\", type=str, help=\"The testing data file.\")\n parser.add_argument(\"--epochs\", type=int)\n parser.add_argument(\"--seed\", type=int)\n\n return parser.parse_args()\n\n\ndef read_data(data_path):\n data = pd.read_csv(data_path, sep=\",\", quotechar=\"'\", header=0)\n # data = pd.read_csv(data_path)\n return data\n\n\ndef _mask_3d(inputs, sentence_lengths, mask_value, dimension=2):\n\n if dimension == 1:\n inputs = tf.transpose(inputs, [0, 2, 1])\n\n time_steps1 = tf.shape(inputs)[1]\n time_steps2 = tf.shape(inputs)[2]\n\n pad_values = mask_value * tf.ones_like(inputs, dtype=tf.float32)\n mask = tf.sequence_mask(sentence_lengths, time_steps2)\n\n mask_3d = tf.tile(tf.expand_dims(mask, 1), (1, time_steps1, 1))\n masked = tf.where(mask_3d, inputs, pad_values)\n\n if dimension == 1:\n masked = tf.transpose(masked, [0, 2, 1])\n return masked\n\n\ndef _atten_softmax3d(inputs):\n\n shape = tf.shape(inputs)\n num_units = shape[2]\n inputs = tf.reshape(inputs, tf.stack([-1, num_units]))\n soft_max = tf.nn.softmax(inputs)\n soft_max = tf.reshape(soft_max, shape)\n return soft_max\n\n\ndef _inter_atten(claim, sent, claim_lengths, sent_lengths):\n\n with tf.variable_scope('inter-attention') as scope:\n sent_T = tf.transpose(sent, [0, 2, 1])\n attention = tf.matmul(claim, sent_T)\n\n masked = _mask_3d(attention, sent_lengths, -np.inf)\n att_sent1 = _atten_softmax3d(masked)\n\n att_transpose = tf.transpose(attention, [0, 2, 1])\n masked = _mask_3d(att_transpose, claim_lengths, -np.inf)\n att_sent2 = _atten_softmax3d(masked)\n\n alpha = tf.matmul(att_sent2, claim, name=\"alpha\")\n # self.alpha = alpha\n beta = tf.matmul(att_sent1, sent, name=\"beta\")\n\n return alpha, beta\n # return att_sent1, att_sent2\n\n\n\ndef create_model(embeddings, hypothesis_max_length, sentence_max_length, seed):\n\n tf.set_random_seed(seed)\n\n # Based on https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/bidirectional_rnn.py\n num_classes = 2\n dims = 100\n learning_rate = 0.001\n X_t = tf.placeholder(tf.int32, [None, hypothesis_max_length], name=\"topic_input\")\n X_s = tf.placeholder(tf.int32, [None, sentence_max_length], name=\"sentence_input\")\n L_t = tf.placeholder(tf.int32, [None, ], name=\"topic_length\")\n L_s = tf.placeholder(tf.int32, [None, ], name=\"sentence_length\")\n Y = tf.placeholder(tf.float32, [None, num_classes], name=\"target\")\n\n def BiRNN(x, layer):\n\n with tf.variable_scope('encoder_{}'.format(layer),reuse=False):\n # Prepare data shape to match `rnn` function requirements\n # Current data input shape: (batch_size, timesteps, n_input)\n # Required shape: 'timesteps' tensors list of shape (batch_size, num_input)\n\n # Unstack to get a list of 'timesteps' tensors of shape (batch_size, num_input)\n # x = tf.unstack(x, max_length, 1)\n\n # Define lstm cells with tensorflow\n # Forward direction cell\n lstm_fw_cell = rnn.BasicLSTMCell(dims, forget_bias=1.0)\n # Backward direction cell\n lstm_bw_cell = rnn.BasicLSTMCell(dims, forget_bias=1.0)\n\n\n ((fw_outputs, bw_outputs), (fw_states, bw_states)) = tf.nn.bidirectional_dynamic_rnn(lstm_fw_cell,\n lstm_bw_cell,\n x,\n dtype=tf.float32)\n outputs = tf.concat([fw_outputs, bw_outputs], axis=2)\n\n\n # print(\"BiLSTM lengths: \", len(outputs))\n # Linear activation, using rnn inner loop last output\n return outputs\n\n def BiRNNAtt(x, attention, layer) :\n with tf.variable_scope('encoder_sentence_{}'.format(layer),reuse=False):\n # Prepare data shape to match `rnn` function requirements\n # Current data input shape: (batch_size, timesteps, n_input)\n # Required shape: 'timesteps' tensors list of shape (batch_size, num_input)\n\n # Unstack to get a list of 'timesteps' tensors of shape (batch_size, num_input)\n # x = tf.unstack(x, max_length, 1)\n\n # Define lstm cells with tensorflow\n # Forward direction cell\n lstm_fw_cell = rnn.BasicLSTMCell(dims, forget_bias=1.0)\n lstm_fw_att = seq2seq.AttentionWrapper(lstm_fw_cell, attention)\n # Backward direction cell\n lstm_bw_cell = rnn.BasicLSTMCell(dims, forget_bias=1.0)\n lstm_bw_att = seq2seq.AttentionWrapper(lstm_bw_cell, attention)\n\n\n ((fw_outputs, bw_outputs), (fw_states, bw_states)) = tf.nn.bidirectional_dynamic_rnn(lstm_fw_att,\n lstm_bw_att,\n x,\n dtype=tf.float32)\n outputs = tf.concat([fw_outputs, bw_outputs], axis=2)\n\n\n # print(\"BiLSTM lengths: \", len(outputs))\n # Linear activation, using rnn inner loop last output\n return outputs\n\n\n topic_word_embeddings = tf.Variable(embeddings, dtype=tf.float32, name=\"topic_embeddings\")\n topic_embedded_word_id = tf.nn.embedding_lookup(topic_word_embeddings, X_t)\n\n sentence_word_embeddings = tf.Variable(embeddings, dtype=tf.float32, name=\"sentence_embeddings\")\n sentence_embedded_word_id = tf.nn.embedding_lookup(sentence_word_embeddings, X_s)\n\n topic_bilstm_out = BiRNN(topic_embedded_word_id, \"topic\")\n\n attention_mechanism = seq2seq.LuongAttention(100, topic_bilstm_out, L_t)\n # sentence_bilstm_out = BiRNNAtt(sentence_embedded_word_id, attention_mechanism, \"sentence\")\n sentence_bilstm_out = BiRNN(sentence_embedded_word_id, \"sentence\")\n # output = tf.concat((topic_bilstm_out[:, -1], sentence_bilstm_out[:, -1]), axis=1)\n sentence_attention, topic_attention = _inter_atten(topic_bilstm_out, sentence_bilstm_out, L_t, L_s) # TODO CST 2019-06-28: Add sentence lengths as input\n # wheigh by attention\n topic_att_wheighted = tf.multiply(topic_bilstm_out, tf.multiply(topic_bilstm_out, topic_attention))\n sentence_att_wheighted = tf.multiply(sentence_bilstm_out, tf.multiply(sentence_bilstm_out, sentence_attention))\n # attention diff\n topic_att_diff = tf.subtract(topic_bilstm_out, topic_attention)\n sentence_att_diff = tf.subtract(sentence_bilstm_out, sentence_attention)\n # attention_output = tf.reduce_sum(tf.concat((topic_attention, sentence_attention), axis=1), axis=1)\n # attention_output = tf.reduce_sum(tf.concat((topic_attention, sentence_attention, topic_att_wheighted, sentence_att_wheighted, topic_att_diff, sentence_att_diff), axis=1), axis=1)\n attention_output = tf.reduce_sum(tf.concat((topic_att_wheighted, sentence_att_wheighted), axis=1), axis=1)\n output = tf.concat((topic_bilstm_out[:, -1], sentence_bilstm_out[:, -1], attention_output), axis=1)\n # output = attention_output\n # output = tf.concat((topic_bilstm_out[:, -1], sentence_bilstm_out[:, -1]), axis=1)\n\n logits = tf.layers.dense(output, 2)\n prediction = tf.nn.softmax(logits, name=\"output\")\n\n # Define loss and optimizer\n loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y))\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n train_op = optimizer.minimize(loss_op, name=\"train\")\n\n # Initialize the variables (i.e. assign their default value)\n init = tf.global_variables_initializer()\n return X_t, X_s, L_t, L_s, Y, prediction, train_op\n\n\nif \"__main__\"==__name__:\n\n args = parse_arguments()\n train_data = read_data(args.train).sample(frac=1)[::1]\n\n train_links = train_data[[\"topic\", \"candidate\"]].values\n train_labels = train_data[\"label\"].values == \"link\"\n # train_labels = train_data[\"label\"].values\n two_d_train_labels = np.zeros((train_labels.shape[0], 2))\n two_d_train_labels[np.where(train_labels==0), 0] = 1\n two_d_train_labels[np.where(train_labels==1), 1] = 1\n\n hypotheses = train_links[:, 0]\n sentences = train_links[:, 1]\n\n vectorizer = TokenizingEmbeddingVectorizer(args.embeddings, [\"[REF\", \"[REF]\"])\n\n tokenized_hypotheses = vectorizer.tokenize_sentences(hypotheses)\n tokenized_sentences = vectorizer.tokenize_sentences(sentences)\n\n hypothesis_lengths = list(map(lambda s: len(s.split(\" \")), hypotheses))\n hypothesis_max_length = max(hypothesis_lengths)\n sentence_lengths = list(map(lambda s: len(s.split(\" \")), sentences))\n sentence_max_length = max(sentence_lengths)\n\n vectorized_hypotheses = vectorizer.sentences_to_padded_indices(hypotheses, hypothesis_max_length, padding=\"pre\")\n vectorized_sentences = vectorizer.sentences_to_padded_indices(sentences, sentence_max_length, padding=\"pre\")\n\n print(\"hypotheses.shape: \", vectorized_hypotheses.shape)\n print(\"hypotheses: \", vectorized_hypotheses)\n print(\"sentences.shape: \", vectorized_sentences.shape)\n print(\"sentences: \", vectorized_sentences)\n\n\n # Train model\n X_t, X_s, L_t, L_s, Y, output, train_op = create_model(vectorizer.embeddings, hypothesis_max_length, sentence_max_length, args.seed)\n # initialise graph\n init = tf.global_variables_initializer()\n\n session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)\n sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)\n # sess = tf.Session(graph=tf.get_default_graph())\n sess.run(init)\n\n batch_size = 32\n\n for i in range(args.epochs):\n # Run batches, because...\n num_batches = (len(train_data) + batch_size -1) // batch_size # Add one to get the remaining samples\n print(\"Training epoch {0}/{1} with {2} batches for {3} samples.\".format(i+1, args.epochs, num_batches, len(train_data)))\n for batch in range(num_batches):\n begin_idx = batch * batch_size\n end_idx = min((batch+1)*batch_size, len(train_data)) # make sure to not go beyond number of samples\n print(\"\\tRunning batch {0} of {1} with indices [{2}:{3}]\".format(batch, num_batches, begin_idx, end_idx))\n feed_dict = {X_t: vectorized_hypotheses[begin_idx:end_idx],\n X_s: vectorized_sentences[begin_idx:end_idx],\n L_t: hypothesis_lengths[begin_idx:end_idx],\n L_s: sentence_lengths[begin_idx:end_idx],\n Y:two_d_train_labels[begin_idx:end_idx]}\n sess.run(train_op, feed_dict=feed_dict)\n\n test_data = read_data(args.test)\n test_links = test_data[[\"topic\", \"candidate\"]].values\n test_labels = test_data[\"label\"].values == \"link\"\n # test_labels = test_data[\"label\"].values\n\n test_hypotheses = test_links[:, 0]\n test_sentences = test_links[:, 1]\n\n test_hypothesis_lengths = list(map(lambda s: len(s.split(\" \")), test_hypotheses))\n test_sentence_lengths = list(map(lambda s: len(s.split(\" \")), test_sentences))\n\n test_vectorized_hypotheses = vectorizer.sentences_to_padded_indices(test_hypotheses, hypothesis_max_length, padding=\"pre\")\n test_vectorized_sentences = vectorizer.sentences_to_padded_indices(test_sentences, sentence_max_length, padding=\"pre\")\n\n\n test_feed_dict = {X_t: test_vectorized_hypotheses, X_s: test_vectorized_sentences, L_t: test_hypothesis_lengths, L_s: test_sentence_lengths}\n raw_preds = sess.run(output, test_feed_dict)\n preds = np.argmax(raw_preds, axis=1)\n print(preds)\n print(classification_report(test_labels, preds, target_names=[\"no-link\", \"link\"]))\n print(\"Macro: \", precision_recall_fscore_support(test_labels, preds, average=\"macro\"))\n\n\n builder = tf.saved_model.builder.SavedModelBuilder(\"evidencelinking-en-trainable\")\n builder.add_meta_graph_and_variables(\n sess,\n [tf.saved_model.tag_constants.SERVING],\n signature_def_map= {\n \"magic_model\": tf.saved_model.signature_def_utils.predict_signature_def({\"topic_input\": X_t, \"sentence_input\": X_s, \"target\": Y}, {\"output\": output})\n })\n builder.save()\n"
] |
[
[
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.contrib.rnn.BasicLSTMCell",
"numpy.where",
"tensorflow.nn.embedding_lookup",
"tensorflow.nn.softmax",
"pandas.read_csv",
"tensorflow.global_variables_initializer",
"tensorflow.set_random_seed",
"tensorflow.concat",
"tensorflow.get_default_graph",
"tensorflow.Variable",
"tensorflow.ConfigProto",
"numpy.argmax",
"tensorflow.layers.dense",
"tensorflow.contrib.rnn.static_bidirectional_rnn",
"tensorflow.train.AdamOptimizer",
"numpy.zeros",
"sklearn.metrics.precision_recall_fscore_support",
"tensorflow.placeholder",
"tensorflow.unstack",
"sklearn.metrics.classification_report",
"tensorflow.saved_model.builder.SavedModelBuilder",
"tensorflow.saved_model.signature_def_utils.predict_signature_def"
],
[
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.contrib.rnn.BasicLSTMCell",
"tensorflow.ones_like",
"tensorflow.matmul",
"tensorflow.reshape",
"numpy.where",
"tensorflow.nn.embedding_lookup",
"tensorflow.stack",
"tensorflow.nn.softmax",
"pandas.read_csv",
"tensorflow.global_variables_initializer",
"tensorflow.set_random_seed",
"tensorflow.shape",
"tensorflow.concat",
"tensorflow.nn.bidirectional_dynamic_rnn",
"tensorflow.get_default_graph",
"tensorflow.subtract",
"tensorflow.Variable",
"tensorflow.transpose",
"tensorflow.ConfigProto",
"tensorflow.variable_scope",
"numpy.argmax",
"tensorflow.layers.dense",
"tensorflow.train.AdamOptimizer",
"numpy.zeros",
"tensorflow.where",
"tensorflow.expand_dims",
"sklearn.metrics.precision_recall_fscore_support",
"tensorflow.placeholder",
"tensorflow.sequence_mask",
"tensorflow.contrib.seq2seq.LuongAttention",
"tensorflow.multiply",
"tensorflow.saved_model.signature_def_utils.predict_signature_def",
"sklearn.metrics.classification_report",
"tensorflow.saved_model.builder.SavedModelBuilder",
"tensorflow.contrib.seq2seq.AttentionWrapper"
]
] |
TortoiseHam/fastestimator
|
[
"6061a4fbbeb62a2194ef82ba8017f651710d0c65",
"6061a4fbbeb62a2194ef82ba8017f651710d0c65",
"6061a4fbbeb62a2194ef82ba8017f651710d0c65",
"6061a4fbbeb62a2194ef82ba8017f651710d0c65"
] |
[
"fastestimator/trace/metric/recall.py",
"fastestimator/trace/xai/grad_cam.py",
"apphub/anomaly_detection/alocc/alocc_torch.py",
"fastestimator/op/numpyop/univariate/rua.py"
] |
[
"# Copyright 2019 The FastEstimator Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nfrom typing import Any, Dict, Union, Iterable\n\nimport numpy as np\nfrom sklearn.metrics import recall_score\n\nfrom fastestimator.trace.meta.per_ds import per_ds\nfrom fastestimator.trace.trace import Trace\nfrom fastestimator.util.data import Data\nfrom fastestimator.util.traceability_util import traceable\nfrom fastestimator.util.util import to_number\n\n\n@per_ds\n@traceable()\nclass Recall(Trace):\n \"\"\"Compute recall for a classification task and report it back to the logger.\n\n Args:\n true_key: Name of the key that corresponds to ground truth in the batch dictionary.\n pred_key: Name of the key that corresponds to predicted score in the batch dictionary.\n mode: What mode(s) to execute this Trace in. For example, \"train\", \"eval\", \"test\", or \"infer\". To execute\n regardless of mode, pass None. To execute in all modes except for a particular one, you can pass an argument\n like \"!infer\" or \"!train\".\n ds_id: What dataset id(s) to execute this Trace in. To execute regardless of ds_id, pass None. To execute in all\n ds_ids except for a particular one, you can pass an argument like \"!ds1\".\n output_name: Name of the key to store to the state.\n per_ds: Whether to automatically compute this metric individually for every ds_id it runs on, in addition to\n computing an aggregate across all ds_ids on which it runs. This is automatically False if `output_name`\n contains a \"|\" character.\n **kwargs: Additional keyword arguments that pass to sklearn.metrics.recall_score()\n\n Raises:\n ValueError: One of [\"y_true\", \"y_pred\", \"average\"] argument exists in `kwargs`.\n \"\"\"\n def __init__(self,\n true_key: str,\n pred_key: str,\n mode: Union[None, str, Iterable[str]] = (\"eval\", \"test\"),\n ds_id: Union[None, str, Iterable[str]] = None,\n output_name: str = \"recall\",\n per_ds: bool = True,\n **kwargs) -> None:\n Recall.check_kwargs(kwargs)\n super().__init__(inputs=(true_key, pred_key), outputs=output_name, mode=mode, ds_id=ds_id)\n self.binary_classification = None\n self.y_true = []\n self.y_pred = []\n self.kwargs = kwargs\n self.per_ds = per_ds\n\n @property\n def true_key(self) -> str:\n return self.inputs[0]\n\n @property\n def pred_key(self) -> str:\n return self.inputs[1]\n\n def on_epoch_begin(self, data: Data) -> None:\n self.y_true = []\n self.y_pred = []\n\n def on_batch_end(self, data: Data) -> None:\n y_true, y_pred = to_number(data[self.true_key]), to_number(data[self.pred_key])\n self.binary_classification = y_pred.shape[-1] == 1\n if y_true.shape[-1] > 1 and y_true.ndim > 1:\n y_true = np.argmax(y_true, axis=-1)\n if y_pred.shape[-1] > 1:\n y_pred = np.argmax(y_pred, axis=-1)\n else:\n y_pred = np.round(y_pred)\n assert y_pred.size == y_true.size\n self.y_pred.extend(y_pred.ravel())\n self.y_true.extend(y_true.ravel())\n\n def on_epoch_end(self, data: Data) -> None:\n if self.binary_classification:\n score = recall_score(self.y_true, self.y_pred, average='binary', **self.kwargs)\n else:\n score = recall_score(self.y_true, self.y_pred, average=None, **self.kwargs)\n data.write_with_log(self.outputs[0], score)\n\n @staticmethod\n def check_kwargs(kwargs: Dict[str, Any]) -> None:\n \"\"\"Check if `kwargs` has any blacklist argument and raise an error if it does.\n\n Args:\n kwargs: Keywork arguments to be examined.\n\n Raises:\n ValueError: One of [\"y_true\", \"y_pred\", \"average\"] argument exists in `kwargs`.\n \"\"\"\n blacklist = [\"y_true\", \"y_pred\", \"average\"]\n illegal_kwarg = [x for x in blacklist if x in kwargs]\n if illegal_kwarg:\n raise ValueError(\n f\"Arguments {illegal_kwarg} cannot exist in kwargs, since FastEstimator will later directly use them in\"\n \" sklearn.metrics.recall_score()\")\n",
"# Copyright 2021 The FastEstimator Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nfrom typing import Any, Dict, Iterable, Optional, TypeVar, Union\n\nimport cv2\nimport numpy as np\nimport tensorflow as tf\nimport torch\n\nfrom fastestimator.backend.argmax import argmax\nfrom fastestimator.backend.concat import concat\nfrom fastestimator.backend.get_image_dims import get_image_dims\nfrom fastestimator.backend.reduce_max import reduce_max\nfrom fastestimator.backend.squeeze import squeeze\nfrom fastestimator.trace.trace import Trace\nfrom fastestimator.util.data import Data\nfrom fastestimator.util.img_data import ImgData\nfrom fastestimator.util.traceability_util import traceable\nfrom fastestimator.util.util import to_number\n\nTensor = TypeVar('Tensor', tf.Tensor, torch.Tensor, np.ndarray)\n\n\n@traceable()\nclass GradCAM(Trace):\n \"\"\"A trace which draws GradCAM heatmaps on top of images.\n\n These are useful for visualizing supports for a model's classification. See https://arxiv.org/pdf/1610.02391.pdf\n for more details.\n\n Args:\n images: The key corresponding to images onto which to draw the CAM outputs.\n grads: The key corresponding to gradients of the model output with respect to a convolution layer of the model.\n You can easily extract these from any model by using the 'intermediate_layers' variable in a ModelOp, along\n with the GradientOp. Make sure to select a particular component of y_pred when computing gradients rather\n than using the entire vector. See our GradCAM XAI tutorial for an example.\n n_components: How many principal components to visualize.\n n_samples: How many images in total to display every epoch (or None to display all available images).\n labels: The key corresponding to the true labels of the images to be visualized.\n preds: The key corresponding to the model prediction for each image.\n label_mapping: {class_string: model_output_value}.\n outputs: The key into which to write the eigencam images.\n mode: What mode(s) to execute this Op in. For example, \"train\", \"eval\", \"test\", or \"infer\". To execute\n regardless of mode, pass None. To execute in all modes except for a particular one, you can pass an argument\n like \"!infer\" or \"!train\".\n ds_id: What dataset id(s) to execute this Trace in. To execute regardless of ds_id, pass None. To execute in all\n ds_ids except for a particular one, you can pass an argument like \"!ds1\".\n \"\"\"\n def __init__(self,\n images: str,\n grads: str,\n n_components: int = 3,\n n_samples: Optional[int] = 5,\n labels: Optional[str] = None,\n preds: Optional[str] = None,\n label_mapping: Optional[Dict[str, Any]] = None,\n outputs: str = \"gradcam\",\n mode: Union[None, str, Iterable[str]] = \"!train\",\n ds_id: Union[None, str, Iterable[str]] = None):\n self.image_key = images\n self.grad_key = grads\n self.true_label_key = labels\n self.pred_label_key = preds\n inputs = [x for x in (images, grads, labels, preds) if x is not None]\n self.n_components = n_components\n self.n_samples = n_samples\n # TODO - handle non-hashable labels\n self.label_mapping = {val: key for key, val in label_mapping.items()} if label_mapping else None\n super().__init__(inputs=inputs, outputs=outputs, mode=mode, ds_id=ds_id)\n self.images = []\n self.grads = []\n self.labels = []\n self.preds = []\n self.n_found = 0\n\n def _reset(self) -> None:\n \"\"\"Clear memory for next epoch.\n \"\"\"\n self.images = []\n self.grads = []\n self.labels = []\n self.preds = []\n self.n_found = 0\n\n def on_batch_end(self, data: Data) -> None:\n if self.n_samples is None or self.n_found < self.n_samples:\n self.images.append(data[self.image_key])\n self.grads.append(data[self.grad_key])\n if self.true_label_key:\n self.labels.append(data[self.true_label_key])\n if self.pred_label_key:\n self.preds.append(data[self.pred_label_key])\n self.n_found += len(data[self.image_key])\n\n def on_epoch_end(self, data: Data) -> None:\n # Keep only the user-specified number of samples\n images = concat(self.images)[:self.n_samples or self.n_found]\n _, height, width = get_image_dims(images)\n grads = to_number(concat(self.grads)[:self.n_samples or self.n_found])\n if tf.is_tensor(images):\n grads = np.moveaxis(grads, source=-1, destination=1) # grads should be channel first\n args = {}\n labels = None if not self.labels else concat(self.labels)[:self.n_samples or self.n_found]\n if labels is not None:\n if len(labels.shape) > 1:\n labels = argmax(labels, axis=-1)\n if self.label_mapping:\n labels = np.array([self.label_mapping[clazz] for clazz in to_number(squeeze(labels))])\n args[self.true_label_key] = labels\n preds = None if not self.preds else concat(self.preds)[:self.n_samples or self.n_found]\n if preds is not None:\n if len(preds.shape) > 1:\n preds = argmax(preds, axis=-1)\n if self.label_mapping:\n preds = np.array([self.label_mapping[clazz] for clazz in to_number(squeeze(preds))])\n args[self.pred_label_key] = preds\n args[self.image_key] = images\n # Clear memory\n self._reset()\n # Make the image\n # TODO: In future maybe allow multiple different grads to have side-by-side comparisons of classes\n components = [np.mean(grads, axis=1)]\n components = [np.maximum(component, 0) for component in components]\n masks = []\n for component_batch in components:\n img_batch = []\n for img in component_batch:\n img = cv2.resize(img, (height, width))\n img = img - np.min(img)\n img = img / np.max(img)\n img = cv2.cvtColor(cv2.applyColorMap(np.uint8(255 * img), cv2.COLORMAP_JET), cv2.COLOR_BGR2RGB)\n img = np.float32(img) / 255\n img_batch.append(img)\n img_batch = np.array(img_batch, dtype=np.float32)\n # Switch to channel first for pytorch\n if isinstance(images, torch.Tensor):\n img_batch = np.moveaxis(img_batch, source=-1, destination=1)\n masks.append(img_batch)\n\n components = [images + mask for mask in masks] # This seems to work even if the image is 1 channel instead of 3\n components = [image / reduce_max(image) for image in components]\n\n for elem in components:\n args[self.grad_key] = elem\n\n result = ImgData(**args)\n data.write_without_log(self.outputs[0], result)\n",
"# Copyright 2019 The FastEstimator Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport tempfile\n\nimport numpy as np\nimport tensorflow as tf\nimport torch\nimport torch.nn as nn\nfrom sklearn.metrics import auc, f1_score, roc_curve\nfrom torch.nn.init import normal_\n\nimport fastestimator as fe\nfrom fastestimator.backend import binary_crossentropy\nfrom fastestimator.op.numpyop import LambdaOp\nfrom fastestimator.op.numpyop.univariate import ChannelTranspose, ExpandDims, Normalize\nfrom fastestimator.op.tensorop import TensorOp\nfrom fastestimator.op.tensorop.model import ModelOp, UpdateOp\nfrom fastestimator.trace import Trace\nfrom fastestimator.trace.io import BestModelSaver\nfrom fastestimator.util import to_number\n\n\nclass reconstructor(nn.Module):\n def __init__(self):\n super().__init__()\n self.encoder = nn.Sequential(\n nn.Conv2d(1, 32, 5, stride=2, padding=2), # (self, in_channels, out_channels, kernel_size, stride=1,\n nn.BatchNorm2d(32),\n nn.LeakyReLU(negative_slope=0.2, inplace=True),\n nn.Conv2d(32, 64, 5, stride=2, padding=2),\n nn.BatchNorm2d(64),\n nn.LeakyReLU(negative_slope=0.2, inplace=True),\n nn.Conv2d(64, 128, 5, stride=2, padding=2),\n nn.LeakyReLU(negative_slope=0.2, inplace=True),\n nn.BatchNorm2d(128),\n )\n self.decoder = nn.Sequential(nn.ConvTranspose2d(128, 32, 5, stride=2, padding=2),\n nn.BatchNorm2d(32),\n nn.ReLU(True),\n nn.ConvTranspose2d(32, 16, 5, stride=2, padding=2, output_padding=1),\n nn.BatchNorm2d(16),\n nn.ReLU(True),\n nn.ConvTranspose2d(16, 1, 5, stride=2, padding=2, output_padding=1),\n nn.Tanh())\n\n for layer in self.encoder:\n if isinstance(layer, nn.Conv2d):\n normal_(layer.weight.data, mean=0, std=0.02)\n\n for layer in self.decoder:\n if isinstance(layer, nn.ConvTranspose2d):\n normal_(layer.weight.data, mean=0, std=0.02)\n\n def forward(self, x):\n x = self.encoder(x)\n x = self.decoder(x)\n return x\n\n\nclass Flatten(nn.Module):\n def forward(self, x):\n return x.view(x.size(0), -1)\n\n\nclass discriminator(nn.Module):\n def __init__(self):\n super().__init__()\n self.layers = nn.Sequential(nn.Conv2d(1, 16, 5, stride=2, padding=2),\n nn.BatchNorm2d(16),\n nn.LeakyReLU(negative_slope=0.2, inplace=True),\n nn.Conv2d(16, 32, 5, stride=2, padding=2),\n nn.BatchNorm2d(32),\n nn.LeakyReLU(negative_slope=0.2, inplace=True),\n nn.Conv2d(32, 64, 5, stride=2, padding=2),\n nn.BatchNorm2d(64),\n nn.LeakyReLU(negative_slope=0.2, inplace=True),\n nn.Conv2d(64, 128, 5, stride=2, padding=2),\n nn.LeakyReLU(negative_slope=0.2, inplace=True),\n Flatten(),\n nn.Linear(512, 1),\n nn.Sigmoid())\n\n for layer in self.layers:\n if isinstance(layer, nn.Conv2d):\n normal_(layer.weight.data, mean=0, std=0.02)\n\n def forward(self, x):\n x = self.layers(x)\n return x\n\n\nclass RLoss(TensorOp):\n def __init__(self, alpha=0.2, inputs=None, outputs=None, mode=None):\n super().__init__(inputs, outputs, mode)\n self.alpha = alpha\n\n def forward(self, data, state):\n fake_score, x_fake, x = data\n recon_loss = binary_crossentropy(y_true=x, y_pred=x_fake, from_logits=True)\n adv_loss = binary_crossentropy(y_pred=fake_score, y_true=torch.ones_like(fake_score), from_logits=True)\n return adv_loss + self.alpha * recon_loss\n\n\nclass DLoss(TensorOp):\n def forward(self, data, state):\n true_score, fake_score = data\n real_loss = binary_crossentropy(y_pred=true_score, y_true=torch.ones_like(true_score), from_logits=True)\n fake_loss = binary_crossentropy(y_pred=fake_score, y_true=torch.zeros_like(fake_score), from_logits=True)\n total_loss = real_loss + fake_loss\n return total_loss\n\n\nclass F1AUCScores(Trace):\n \"\"\"Computes F1-Score and AUC Score for a classification task and reports it back to the logger.\n \"\"\"\n def __init__(self, true_key, pred_key, mode=(\"eval\", \"test\"), output_name=(\"auc_score\", \"f1_score\")):\n super().__init__(inputs=(true_key, pred_key), outputs=output_name, mode=mode)\n self.y_true = []\n self.y_pred = []\n\n @property\n def true_key(self):\n return self.inputs[0]\n\n @property\n def pred_key(self):\n return self.inputs[1]\n\n def on_epoch_begin(self, data):\n self.y_true = []\n self.y_pred = []\n\n def on_batch_end(self, data):\n y_true, y_pred = to_number(data[self.true_key]), to_number(data[self.pred_key])\n assert y_pred.size == y_true.size\n self.y_pred.extend(y_pred.ravel())\n self.y_true.extend(y_true.ravel())\n\n def on_epoch_end(self, data):\n fpr, tpr, thresholds = roc_curve(self.y_true, self.y_pred, pos_label=1) # (y, score, positive_label)\n roc_auc = auc(fpr, tpr)\n eer_threshold = thresholds[np.nanargmin(np.absolute((1 - tpr - fpr)))]\n y_pred_class = np.copy(self.y_pred)\n y_pred_class[y_pred_class >= eer_threshold] = 1\n y_pred_class[y_pred_class < eer_threshold] = 0\n f_score = f1_score(self.y_true, y_pred_class, pos_label=0)\n\n data.write_with_log(self.outputs[0], roc_auc)\n data.write_with_log(self.outputs[1], f_score)\n\n\ndef get_estimator(epochs=20, batch_size=128, max_train_steps_per_epoch=None, save_dir=tempfile.mkdtemp()):\n # Dataset Creation\n (x_train, y_train), (x_eval, y_eval) = tf.keras.datasets.mnist.load_data()\n x_eval0, y_eval0 = x_eval[np.where((y_eval == 1))], np.ones(y_eval[np.where((y_eval == 1))].shape)\n x_eval1, y_eval1 = x_eval[np.where((y_eval != 1))], y_eval[np.where((y_eval != 1))]\n\n # Ensuring outliers comprise 50% of the dataset\n index = np.random.choice(x_eval1.shape[0], int(x_eval0.shape[0]), replace=False)\n x_eval1, y_eval1 = x_eval1[index], np.zeros(y_eval1[index].shape)\n\n x_train, y_train = x_train[np.where((y_train == 1))], np.zeros(y_train[np.where((y_train == 1))].shape)\n train_data = fe.dataset.NumpyDataset({\"x\": x_train, \"y\": y_train})\n\n x_eval, y_eval = np.concatenate([x_eval0, x_eval1]), np.concatenate([y_eval0, y_eval1])\n eval_data = fe.dataset.NumpyDataset({\"x\": x_eval, \"y\": y_eval})\n\n pipeline = fe.Pipeline(\n train_data=train_data,\n eval_data=eval_data,\n batch_size=batch_size,\n ops=[\n ExpandDims(inputs=\"x\", outputs=\"x\"),\n Normalize(inputs=\"x\", outputs=\"x\", mean=1.0, std=1.0, max_pixel_value=127.5),\n LambdaOp(fn=lambda x: x + np.random.normal(loc=0.0, scale=0.155, size=(28, 28, 1)).astype(np.float32),\n inputs=\"x\",\n outputs=\"x_w_noise\",\n mode=\"train\"),\n ChannelTranspose(inputs=\"x\", outputs=\"x\"),\n ChannelTranspose(inputs=\"x_w_noise\", outputs=\"x_w_noise\", mode=\"train\")\n ])\n\n recon_model = fe.build(model_fn=reconstructor,\n optimizer_fn=lambda x: torch.optim.RMSprop(x, lr=2e-4),\n model_name=\"reconstructor\")\n disc_model = fe.build(model_fn=discriminator,\n optimizer_fn=lambda x: torch.optim.RMSprop(x, lr=1e-4),\n model_name=\"discriminator\")\n\n network = fe.Network(ops=[\n ModelOp(model=recon_model, inputs=\"x_w_noise\", outputs=\"x_fake\", mode=\"train\"),\n ModelOp(model=recon_model, inputs=\"x\", outputs=\"x_fake\", mode=\"eval\"),\n ModelOp(model=disc_model, inputs=\"x_fake\", outputs=\"fake_score\"),\n ModelOp(model=disc_model, inputs=\"x\", outputs=\"true_score\"),\n RLoss(inputs=(\"fake_score\", \"x_fake\", \"x\"), outputs=\"rloss\"),\n UpdateOp(model=recon_model, loss_name=\"rloss\"),\n DLoss(inputs=(\"true_score\", \"fake_score\"), outputs=\"dloss\"),\n UpdateOp(model=disc_model, loss_name=\"dloss\"),\n ])\n\n traces = [\n F1AUCScores(true_key=\"y\", pred_key=\"fake_score\", mode=\"eval\", output_name=[\"auc_score\", \"f1_score\"]),\n BestModelSaver(model=recon_model, save_dir=save_dir, metric='f1_score', save_best_mode='max'),\n BestModelSaver(model=disc_model, save_dir=save_dir, metric='f1_score', save_best_mode='max'),\n ]\n\n estimator = fe.Estimator(pipeline=pipeline,\n network=network,\n epochs=epochs,\n traces=traces,\n max_train_steps_per_epoch=max_train_steps_per_epoch,\n log_steps=50)\n\n return estimator\n\n\nif __name__ == \"__main__\":\n est = get_estimator()\n est.fit()\n",
"# Copyright 2021 The FastEstimator Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport inspect\nimport random\nfrom typing import Any, Dict, Iterable, List, Tuple, Union\n\nimport numpy as np\nfrom PIL import Image, ImageOps\n\nfrom fastestimator.op.numpyop.meta.one_of import OneOf\nfrom fastestimator.op.numpyop.meta.sometimes import Sometimes\nfrom fastestimator.op.numpyop.numpyop import NumpyOp, forward_numpyop\nfrom fastestimator.op.numpyop.univariate.autocontrast import AutoContrast\nfrom fastestimator.op.numpyop.univariate.brightness import Brightness\nfrom fastestimator.op.numpyop.univariate.color import Color\nfrom fastestimator.op.numpyop.univariate.contrast import Contrast\nfrom fastestimator.op.numpyop.univariate.posterize import Posterize as PosterizeAug\nfrom fastestimator.op.numpyop.univariate.sharpness import Sharpness\nfrom fastestimator.op.numpyop.univariate.shear_x import ShearX\nfrom fastestimator.op.numpyop.univariate.shear_y import ShearY\nfrom fastestimator.op.numpyop.univariate.translate_x import TranslateX\nfrom fastestimator.op.numpyop.univariate.translate_y import TranslateY\nfrom fastestimator.util.traceability_util import traceable\nfrom fastestimator.util.util import param_to_range, to_list, to_set\n\n\n@traceable()\nclass Rotate(NumpyOp):\n \"\"\"Rotate the input by an angle selected randomly.\n\n This is a wrapper for functionality provided by the PIL library:\n https://github.com/python-pillow/Pillow/tree/master/src/PIL.\n\n Args:\n inputs: Key(s) of images to be modified.\n outputs: Key(s) into which to write the modified images.\n mode: What mode(s) to execute this Op in. For example, \"train\", \"eval\", \"test\", or \"infer\". To execute\n regardless of mode, pass None. To execute in all modes except for a particular one, you can pass an argument\n like \"!infer\" or \"!train\".\n ds_id: What dataset id(s) to execute this Op in. To execute regardless of ds_id, pass None. To execute in all\n ds_ids except for a particular one, you can pass an argument like \"!ds1\".\n limit: Range from which the angle can be picked. If limit is a single int the range is considered from\n (0, limit).\n\n Image types:\n uint8\n \"\"\"\n def __init__(self,\n inputs: Union[str, Iterable[str]],\n outputs: Union[str, Iterable[str]],\n mode: Union[None, str, Iterable[str]] = None,\n ds_id: Union[None, str, Iterable[str]] = None,\n limit: Union[int, Tuple[int, int]] = 30):\n super().__init__(inputs=to_list(inputs), outputs=to_list(outputs), mode=mode, ds_id=ds_id)\n self.limit = param_to_range(limit)\n\n def set_rua_level(self, magnitude_coef: float) -> None:\n \"\"\"Set the augmentation intensity based on the magnitude_coef.\n\n This method is specifically designed to be invoked by the RUA Op.\n\n Args:\n magnitude_coef: The desired augmentation intensity (range [0-1]).\n \"\"\"\n param_mid = (self.limit[1] + self.limit[0]) / 2\n param_extent = magnitude_coef * ((self.limit[1] - self.limit[0]) / 2)\n self.limit = (param_mid - param_extent, param_mid + param_extent)\n\n def forward(self, data: List[np.ndarray], state: Dict[str, Any]) -> List[np.ndarray]:\n degree = random.uniform(self.limit[0], self.limit[1])\n return [Rotate._apply_rotate(elem, degree) for elem in data]\n\n @staticmethod\n def _apply_rotate(data: np.ndarray, degree: float) -> np.ndarray:\n \"\"\"Rotate the image.\n\n Args:\n data: The image to be modified.\n degree: Angle for image rotation.\n\n Returns:\n The image after applying rotation.\n \"\"\"\n im = Image.fromarray(data)\n im = im.rotate(degree)\n return np.array(im)\n\n\n@traceable()\nclass Identity(NumpyOp):\n \"\"\"Pass the input as-is.\n\n Args:\n inputs: Key(s) of images.\n outputs: Key(s) into which to write the images.\n mode: What mode(s) to execute this Op in. For example, \"train\", \"eval\", \"test\", or \"infer\". To execute\n regardless of mode, pass None. To execute in all modes except for a particular one, you can pass an argument\n like \"!infer\" or \"!train\".\n ds_id: What dataset id(s) to execute this Op in. To execute regardless of ds_id, pass None. To execute in all\n ds_ids except for a particular one, you can pass an argument like \"!ds1\".\n \"\"\"\n def __init__(self,\n inputs: Union[str, Iterable[str]],\n outputs: Union[str, Iterable[str]],\n mode: Union[None, str, Iterable[str]] = None,\n ds_id: Union[None, str, Iterable[str]] = None):\n super().__init__(inputs=to_list(inputs), outputs=to_list(outputs), mode=mode, ds_id=ds_id)\n\n def set_rua_level(self, magnitude_coef: float) -> None:\n \"\"\"A method which will be invoked by the RUA Op to adjust the augmentation intensity.\n\n Args:\n magnitude_coef: The desired augmentation intensity (range [0-1]).\n \"\"\"\n\n\n@traceable()\nclass Equalize(NumpyOp):\n \"\"\"Equalize the image histogram.\n\n This is a wrapper for functionality provided by the PIL library:\n https://github.com/python-pillow/Pillow/tree/master/src/PIL.\n\n Args:\n inputs: Key(s) of images to be modified.\n outputs: Key(s) into which to write the modified images.\n mode: What mode(s) to execute this Op in. For example, \"train\", \"eval\", \"test\", or \"infer\". To execute\n regardless of mode, pass None. To execute in all modes except for a particular one, you can pass an argument\n like \"!infer\" or \"!train\".\n ds_id: What dataset id(s) to execute this Op in. To execute regardless of ds_id, pass None. To execute in all\n ds_ids except for a particular one, you can pass an argument like \"!ds1\".\n\n Image types:\n uint8\n \"\"\"\n def __init__(self,\n inputs: Union[str, Iterable[str]],\n outputs: Union[str, Iterable[str]],\n mode: Union[None, str, Iterable[str]] = None,\n ds_id: Union[None, str, Iterable[str]] = None):\n super().__init__(inputs=to_list(inputs), outputs=to_list(outputs), mode=mode, ds_id=ds_id)\n\n def set_rua_level(self, magnitude_coef: float) -> None:\n \"\"\"A method which will be invoked by the RUA Op to adjust the augmentation intensity.\n\n Args:\n magnitude_coef: The desired augmentation intensity (range [0-1]).\n \"\"\"\n\n def forward(self, data: List[np.ndarray], state: Dict[str, Any]) -> List[np.ndarray]:\n return [Equalize._apply_equalize(elem) for elem in data]\n\n @staticmethod\n def _apply_equalize(data: np.ndarray) -> np.ndarray:\n \"\"\"Equalize the image histogram.\n\n Args:\n data: The image to be modified.\n\n Returns:\n The image after applying equalize.\n \"\"\"\n im = Image.fromarray(data)\n im = ImageOps.equalize(im)\n return np.array(im)\n\n\n@traceable()\nclass Posterize(PosterizeAug):\n \"\"\"Reduce the number of bits for the image.\n\n Args:\n inputs: Key(s) of images to be modified.\n outputs: Key(s) into which to write the modified images.\n mode: What mode(s) to execute this Op in. For example, \"train\", \"eval\", \"test\", or \"infer\". To execute\n regardless of mode, pass None. To execute in all modes except for a particular one, you can pass an argument\n like \"!infer\" or \"!train\".\n ds_id: What dataset id(s) to execute this Op in. To execute regardless of ds_id, pass None. To execute in all\n ds_ids except for a particular one, you can pass an argument like \"!ds1\".\n num_bits: Number of high bits. If num_bits is a single value, the range will be [num_bits, num_bits]. A triplet\n of ints will be interpreted as [r, g, b], and a triplet of pairs as [[r1, r1], [g1, g2], [b1, b2]]. Must be\n in the range [0, 8].\n\n Image types:\n uint8\n \"\"\"\n def __init__(self,\n inputs: Union[str, Iterable[str]],\n outputs: Union[str, Iterable[str]],\n mode: Union[None, str, Iterable[str]] = None,\n ds_id: Union[None, str, Iterable[str]] = None,\n num_bits: Union[int,\n Tuple[int, int],\n Tuple[int, int, int],\n Tuple[Tuple[int, int], Tuple[int, int], Tuple[int, int]]] = 7):\n self.num_bits = num_bits\n super().__init__(inputs=inputs, outputs=outputs, mode=mode, ds_id=ds_id, num_bits=num_bits)\n\n def set_rua_level(self, magnitude_coef: float) -> None:\n \"\"\"Set the augmentation intensity based on the magnitude_coef.\n\n This method is specifically designed to be invoked by the RUA Op.\n\n Args:\n magnitude_coef: The desired augmentation intensity (range [0-1]).\n \"\"\"\n if isinstance(self.num_bits, tuple) and len(self.num_bits) == 3:\n num_bits = []\n for i in self.num_bits:\n num_bits.append(Posterize._range_tuple(num_bits=i, magnitude_coef=magnitude_coef))\n self.num_bits = tuple(num_bits)\n else:\n self.num_bits = Posterize._range_tuple(num_bits=self.num_bits, magnitude_coef=magnitude_coef)\n super().__init__(inputs=self.inputs,\n outputs=self.outputs,\n mode=self.mode,\n ds_id=self.ds_id,\n num_bits=self.num_bits)\n\n @staticmethod\n def _range_tuple(num_bits: Union[int, Tuple[int, int]], magnitude_coef: float) -> Tuple[int, int]:\n \"\"\"Process num_bits for posterization based on augmentation intensity.\n\n Args:\n num_bits: Number of high bits.\n magnitude_coef: The desired augmentation intensity (range [0-1]).\n\n Returns:\n The range of high bits after adjusting augmentation intensity.\n \"\"\"\n if isinstance(num_bits, tuple):\n param_mid = (num_bits[0] + num_bits[1])/2\n param_extent = magnitude_coef * ((num_bits[1] - num_bits[0])/2)\n bits_range = (round(param_mid - param_extent), round(param_mid + param_extent))\n else:\n bits_range = (round(8-(magnitude_coef*num_bits)), 8)\n return bits_range\n\n\n@traceable()\nclass Solarize(NumpyOp):\n \"\"\"Invert all pixel values above a threshold.\n\n Args:\n inputs: Key(s) of images to be modified.\n outputs: Key(s) into which to write the modified images.\n mode: What mode(s) to execute this Op in. For example, \"train\", \"eval\", \"test\", or \"infer\". To execute\n regardless of mode, pass None. To execute in all modes except for a particular one, you can pass an argument\n like \"!infer\" or \"!train\".\n ds_id: What dataset id(s) to execute this Op in. To execute regardless of ds_id, pass None. To execute in all\n ds_ids except for a particular one, you can pass an argument like \"!ds1\".\n threshold: Range for the solarizing threshold. If threshold is a single value 't', the range will be [0, t].\n\n Image types:\n uint8\n \"\"\"\n def __init__(self,\n inputs: Union[str, Iterable[str]],\n outputs: Union[str, Iterable[str]],\n mode: Union[None, str, Iterable[str]] = None,\n ds_id: Union[None, str, Iterable[str]] = None,\n threshold: Union[int, Tuple[int, int], float, Tuple[float, float]] = 256):\n super().__init__(inputs=to_list(inputs), outputs=to_list(outputs), mode=mode, ds_id=ds_id)\n self.threshold = threshold\n\n def set_rua_level(self, magnitude_coef: Union[int, float]) -> None:\n \"\"\"Set the augmentation intensity based on the magnitude_coef.\n\n This method is specifically designed to be invoked by the RUA Op.\n\n Args:\n magnitude_coef: The desired augmentation intensity (range [0-1]).\n \"\"\"\n if isinstance(self.threshold, tuple):\n self.threshold = magnitude_coef * (self.threshold[1] - self.threshold[0]) + self.threshold[0]\n else:\n self.threshold = magnitude_coef * self.threshold\n\n def forward(self, data: List[np.ndarray], state: Dict[str, Any]) -> List[np.ndarray]:\n if isinstance(self.threshold, tuple):\n threshold = 256 - round(random.uniform(self.threshold[0], self.threshold[1]))\n else:\n threshold = 256 - round(random.uniform(0, self.threshold))\n return [Solarize._apply_solarize(elem, threshold) for elem in data]\n\n @staticmethod\n def _apply_solarize(data: np.ndarray, threshold: int) -> np.ndarray:\n \"\"\"Invert all pixel values of the image above a threshold.\n\n Args:\n data: The image to be modified.\n threshold: Solarizing threshold.\n\n Returns:\n The image after applying solarize.\n \"\"\"\n data = np.where(data < threshold, data, 255 - data)\n return data\n\n\n@traceable()\nclass OneOfMultiVar(OneOf):\n \"\"\"Perform one of several possible NumpyOps.\n\n Note that OneOfMultiVar accepts both univariate and multivariate ops and allows the list of passed NumpyOps to have\n different input and output keys. OneOfMultiVar should not be used to wrap an op whose output key(s) do not already\n exist in the data dictionary. This would result in a problem when future ops / traces attempt to reference the\n output key, but OneOfMultiVar declined to generate it. If you want to create a default value for a new key, simply\n use a LambdaOp before invoking the OneOfMultiVar.\n\n Args:\n *numpy_ops: A list of ops to choose between with uniform probability.\n \"\"\"\n def __init__(self, *numpy_ops: NumpyOp) -> None:\n inputs = to_set(numpy_ops[0].inputs)\n outputs = to_set(numpy_ops[0].outputs)\n mode = numpy_ops[0].mode\n ds_id = numpy_ops[0].ds_id\n self.in_list = numpy_ops[0].in_list\n self.out_list = numpy_ops[0].out_list\n for op in numpy_ops[1:]:\n assert self.in_list == op.in_list, \"All ops within OneOf must share the same input configuration\"\n assert self.out_list == op.out_list, \"All ops within OneOf must share the same output configuration\"\n assert mode == op.mode, \"All ops within a OneOf must share the same mode\"\n\n for inp in op.inputs:\n inputs.add(inp)\n\n for out in op.outputs:\n outputs.add(out)\n\n # Bypassing OneOf Op's restriction of same input and output key(s) on the list of passed NumpyOps.\n super(OneOf, self).__init__(inputs=inputs.union(outputs), outputs=outputs, mode=mode, ds_id=ds_id)\n self.ops = numpy_ops\n\n def forward(self, data: List[np.ndarray], state: Dict[str, Any]) -> List[np.ndarray]:\n data = {key: elem for key, elem in zip(self.inputs, data)}\n forward_numpyop([random.choice(self.ops)], data, state)\n return [data[key] for key in self.outputs]\n\n\n@traceable()\nclass RUA(NumpyOp):\n \"\"\"Apply RUA augmentation strategy.\n\n Note that all augmentation ops passed to RUA should have a set_rua_level method to modify their strength based on\n the level. Custom NumpyOps can be passed to the `choices` argument along with names of augmentations to add. Passing\n 'defaults' adds the default list of augmentations along with any custom NumpyOps specified by the user.\n The default augmentations are: 'Rotate', 'Identity', 'AutoContrast', 'Equalize', 'Posterize', 'Solarize',\n 'Sharpness', 'Contrast', 'Color', 'Brightness', 'ShearX', 'ShearY', 'TranslateX' and 'TranslateY'.\n To add specific augmentations from the default list, their names can be passed. Ex: 'Rotate'.\n To remove specific augmentations from the list, you can negate their names. Ex: '!Rotate' will load all the\n augmentations except 'Rotate'.\n\n Example combinations which are not allowed:\n choices = ['defaults', 'Rotate'] # augmentations from the default list are redundant with 'defaults'.\n choices = ['defaults', '!Rotate'] # negated augmentations automatically load the default list.\n choices = ['!Solarize', 'Rotate'] # Cannot mix negated and normal augmentations.\n\n RUA should not have augmentation ops whose output key(s) do not already exist in the data dictionary. This would\n result in a problem when future ops / traces attempt to reference the output key, but RUA declined to generate it.\n If you want to create a default value for a new key, simply use a LambdaOp before invoking RUA.\n\n Args:\n inputs: Key(s) of images to be modified.\n outputs: Key(s) into which to write the modified images.\n mode: What mode(s) to execute this Op in. For example, \"train\", \"eval\", \"test\", or \"infer\". To execute\n regardless of mode, pass None. To execute in all modes except for a particular one, you can pass an argument\n like \"!infer\" or \"!train\".\n ds_id: What dataset id(s) to execute this Op in. To execute regardless of ds_id, pass None. To execute in all\n ds_ids except for a particular one, you can pass an argument like \"!ds1\".\n choices: List of augmentations to apply.\n level: Factor to set the range for magnitude of augmentation. Must be in the range [0, 30].\n\n Image types:\n uint8\n \"\"\"\n def __init__(self,\n inputs: Union[str, Iterable[str]],\n outputs: Union[str, Iterable[str]],\n mode: Union[None, str, Iterable[str]] = None,\n ds_id: Union[None, str, Iterable[str]] = None,\n choices: Union[str, NumpyOp, List[Union[str, NumpyOp]]] = \"defaults\",\n level: Union[int, float] = 18):\n self.default_aug_dict = {\n \"Rotate\": Rotate(inputs=inputs, outputs=outputs, mode=mode, ds_id=ds_id,limit=90),\n \"Identity\": Identity(inputs=inputs, outputs=outputs, mode=mode, ds_id=ds_id),\n \"AutoContrast\": AutoContrast(inputs=inputs, outputs=outputs, mode=mode, ds_id=ds_id),\n \"Equalize\": Equalize(inputs=inputs, outputs=outputs, mode=mode, ds_id=ds_id),\n \"Posterize\": Posterize(inputs=inputs, outputs=outputs, mode=mode, ds_id=ds_id,num_bits=7),\n \"Solarize\": Solarize(inputs=inputs, outputs=outputs, mode=mode, ds_id=ds_id,threshold=256),\n \"Sharpness\": Sharpness(inputs=inputs, outputs=outputs, mode=mode, ds_id=ds_id,limit=0.9),\n \"Contrast\": Contrast(inputs=inputs, outputs=outputs, mode=mode, ds_id=ds_id,limit=0.9),\n \"Color\": Color(inputs=inputs, outputs=outputs, mode=mode, ds_id=ds_id,limit=0.9),\n \"Brightness\": Brightness(inputs=inputs, outputs=outputs, mode=mode, ds_id=ds_id,limit=0.9),\n \"ShearX\": ShearX(inputs=inputs, outputs=outputs, mode=mode, ds_id=ds_id,shear_coef=0.5),\n \"ShearY\": ShearY(inputs=inputs, outputs=outputs, mode=mode, ds_id=ds_id,shear_coef=0.5),\n \"TranslateX\": TranslateX(inputs=inputs, outputs=outputs, mode=mode, ds_id=ds_id,shift_limit=0.33),\n \"TranslateY\": TranslateY(inputs=inputs, outputs=outputs, mode=mode, ds_id=ds_id,shift_limit=0.33)\n }\n aug_options = self._parse_aug_choices(magnitude_coef=(level / 30.), choices=to_list(choices))\n\n inputs, outputs = to_set(inputs), to_set(outputs)\n for op in aug_options:\n for inp in op.inputs:\n inputs.add(inp)\n\n for out in op.outputs:\n outputs.add(out)\n super().__init__(inputs=inputs.union(outputs), outputs=outputs, mode=mode, ds_id=ds_id)\n\n # Calculating number of augmentation to apply at each training iteration\n N_min = 1\n N_max = min(len(aug_options), 5)\n N = level * (N_max - N_min) / 30 + N_min\n N_guarantee, N_p = int(N), N % 1\n\n self.ops = [OneOfMultiVar(*aug_options) for _ in range(N_guarantee)]\n if N_p > 0:\n self.ops.append(Sometimes(OneOfMultiVar(*aug_options), prob=N_p))\n\n def _parse_aug_choices(self, magnitude_coef: float, choices: List[Union[str, NumpyOp]]) -> List[NumpyOp]:\n \"\"\"Parse the augmentation choices to determine the final list of augmentations to apply.\n\n Args:\n magnitude_coef: The desired augmentation intensity (range [0-1]).\n choices: List of augmentations to apply.\n\n Returns:\n List of augmentations to apply.\n\n Raises:\n AssertionError: If augmentations to add and remove are mixed.\n AttributeError: If augmentation choices don't have a 'set_rua_level' method.\n ValueError: If 'defaults' is provided with augmentation strings to add or remove, or wrong names are\n provided.\n \"\"\"\n custom_ops = [op for op in choices if not isinstance(op, str)]\n remove_ops = [op for op in choices if isinstance(op, str) and op.startswith(\"!\")]\n add_ops = [op for op in choices if isinstance(op, str) and not (op.startswith(\"!\") or (op == \"defaults\"))]\n aug_names = list(self.default_aug_dict.keys())\n\n assert len(remove_ops)==0 or len(add_ops)==0, \\\n \"RUA supports either add or remove ops, but not both. Found {} and {}\".format(add_ops, remove_ops)\n\n if len(remove_ops) > 0:\n if \"defaults\" in choices:\n raise ValueError(\"Can't provide 'defaults' value with ops to remove, found: {}\".format(remove_ops))\n remove_ops = [op[1:] for op in remove_ops]\n\n for op in remove_ops:\n if op not in aug_names:\n raise ValueError(\"Unable to remove {}, list of augmentations available: {}\".format(op, aug_names))\n\n aug_list = [aug for aug_name, aug in self.default_aug_dict.items() if aug_name not in remove_ops]\n else:\n if \"defaults\" in choices:\n if len(add_ops) > 0:\n raise ValueError(\"Can't pass 'defaults' value with default list's ops, found: {}\".format(add_ops))\n aug_list = list(self.default_aug_dict.values())\n elif len(add_ops) > 0:\n for op in add_ops:\n if op not in aug_names:\n raise ValueError(\"Unable to add {}, list of augmentations available: {}\".format(op, aug_names))\n\n aug_list = [self.default_aug_dict[aug_name] for aug_name in add_ops]\n else:\n aug_list = []\n aug_list = aug_list + custom_ops\n\n for op in aug_list:\n if hasattr(op, \"set_rua_level\") and inspect.ismethod(getattr(op, \"set_rua_level\")):\n op.set_rua_level(magnitude_coef=magnitude_coef)\n else:\n raise AttributeError(\n \"RUA Augmentations should have a 'set_rua_level' method but it's not present in Op: {}\".format(\n op.__class__.__name__))\n\n return aug_list\n\n def forward(self, data: List[np.ndarray], state: Dict[str, Any]) -> List[np.ndarray]:\n data = {key: elem for key, elem in zip(self.inputs, data)}\n forward_numpyop(self.ops, data, state)\n return [data[key] for key in self.outputs]\n"
] |
[
[
"numpy.round",
"numpy.argmax",
"sklearn.metrics.recall_score"
],
[
"numpy.max",
"numpy.array",
"numpy.uint8",
"numpy.min",
"numpy.mean",
"numpy.float32",
"numpy.moveaxis",
"tensorflow.is_tensor",
"numpy.maximum"
],
[
"torch.nn.Linear",
"numpy.copy",
"torch.nn.BatchNorm2d",
"torch.nn.LeakyReLU",
"numpy.where",
"sklearn.metrics.f1_score",
"numpy.concatenate",
"numpy.random.normal",
"torch.nn.ConvTranspose2d",
"torch.nn.init.normal_",
"torch.zeros_like",
"tensorflow.keras.datasets.mnist.load_data",
"numpy.zeros",
"torch.nn.Tanh",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"numpy.absolute",
"torch.optim.RMSprop",
"torch.nn.Sigmoid",
"torch.ones_like",
"sklearn.metrics.auc",
"sklearn.metrics.roc_curve"
],
[
"numpy.where",
"numpy.array"
]
] |
als11044/trimesh
|
[
"a29735c47cf6a473ba77fdf8be0d3f6fd104c9fc",
"a29735c47cf6a473ba77fdf8be0d3f6fd104c9fc"
] |
[
"trimesh/geometry.py",
"trimesh/path/simplify.py"
] |
[
"import numpy as np\n\nfrom .transformations import rotation_matrix\nfrom .constants import tol, log\n\nfrom . import util\n\ntry:\n from scipy.sparse import coo_matrix\nexcept ImportError:\n log.warning('scipy.sparse.coo_matrix unavailable')\n\n\ndef plane_transform(origin, normal):\n '''\n Given the origin and normal of a plane, find the transform that will move\n that plane to be coplanar with the XY plane\n\n Parameters\n ----------\n origin: (3,) float, point in space\n normal: (3,) float, plane normal vector\n\n Returns\n ---------\n transform: (4,4) float, transformation matrix\n '''\n transform = align_vectors(normal, [0, 0, 1])\n transform[0:3, 3] = -np.dot(transform, np.append(origin, 1))[0:3]\n return transform\n\n\ndef transform_around(matrix, point):\n '''\n Given a transformation matrix, apply its rotation component around a\n point in space.\n\n Parameters\n ----------\n matrix: (4,4) float, transformation matrix\n point: (3,) float, point in space\n\n Returns\n ---------\n result: (4,4) transformation matrix\n '''\n point = np.array(point)\n translate = np.eye(4)\n translate[0:3, 3] = -point\n result = np.dot(matrix, translate)\n translate[0:3, 3] = point\n result = np.dot(translate, result)\n return result\n\n\ndef align_vectors(vector_start, vector_end, return_angle=False):\n '''\n Returns the 4x4 transformation matrix which will rotate from\n vector_start to vector_end, eg:\n\n vector_end == np.dot(T, np.append(vector_start, 1))[0:3]\n\n\n Parameters\n -----------\n vector_start: (3,) float, vector in space\n vector_end: (3,) float, vector in space\n return_angle: bool, return angle between vectors or not\n\n Returns\n -----------\n transform: (4,4) float, transformation matrix\n angle: float, angle in radians (only returned if flag set)\n\n '''\n start = np.asanyarray(vector_start, dtype=np.float64)\n start /= np.linalg.norm(start)\n end = np.asanyarray(vector_end, dtype=np.float64)\n end /= np.linalg.norm(end)\n\n cross = np.cross(start, end)\n # we clip the norm to 1, as otherwise floating point bs\n # can cause the arcsin to error\n norm = np.linalg.norm(cross)\n norm = np.clip(norm, -1.0, 1.0)\n direction = np.sign(np.dot(start, end))\n\n if norm < tol.zero:\n # if the norm is zero, the vectors are the same\n # and no rotation is needed\n T = np.eye(4)\n T[0:3] *= direction\n else:\n angle = np.arcsin(norm)\n if direction < 0:\n angle = np.pi - angle\n T = rotation_matrix(angle, cross)\n\n check = np.abs(np.dot(T[:3, :3], start) - end)\n if not (check < 1e-5).all():\n raise ValueError('aligning vectors failed!')\n\n if return_angle:\n return T, angle\n return T\n\n\ndef faces_to_edges(faces, return_index=False):\n '''\n Given a list of faces (n,3), return a list of edges (n*3,2)\n\n Parameters\n -----------\n faces: (n,3) int, vertex indices representing faces\n\n Returns\n -----------\n edges: (n*3, 2) int, vertex indices representing edges\n '''\n faces = np.asanyarray(faces)\n edges = np.column_stack((faces[:, (0, 1)],\n faces[:, (1, 2)],\n faces[:, (2, 0)])).reshape(-1, 2)\n if return_index:\n face_index = np.tile(np.arange(len(faces)), (3, 1)).T.reshape(-1)\n return edges, face_index\n return edges\n\n\ndef vector_angle(pairs):\n '''\n Find the angles between vector pairs\n\n Parameters\n ----------\n pairs: (n,2,3) set of vector pairs\n\n Returns\n ----------\n angles: (n,) float, angles between vectors\n\n Examples\n ----------\n angles = mesh.face_normals[mesh.face_adjacency]\n '''\n pairs = np.asanyarray(pairs)\n if not util.is_shape(pairs, (-1, 2, 3)):\n raise ValueError('pairs must be (n,2,3)!')\n dots = util.diagonal_dot(pairs[:, 0], pairs[:, 1])\n # clip for floating point error\n dots = np.clip(dots, -1.0, 1.0)\n angles = np.abs(np.arccos(dots))\n return angles\n\n\ndef triangulate_quads(quads):\n '''\n Given a set of quad faces, return them as triangle faces.\n\n Parameters\n -----------\n quads: (n,4) int, vertex indices of quad faces\n\n Returns\n -----------\n faces: (m,3) int, vertex indices of triangular faces\n '''\n if len(quads) == 0:\n return quads\n quads = np.asanyarray(quads)\n faces = np.vstack((quads[:, [0, 1, 2]],\n quads[:, [2, 3, 0]]))\n return faces\n\n\ndef mean_vertex_normals(vertex_count, faces, face_normals, **kwargs):\n '''\n Find vertex normals from the mean of the faces that contain that vertex.\n\n Parameters\n -----------\n vertex_count: int, the number of vertices faces refer to\n faces: (n,3) int, list of vertex indices\n face_normals: (n,3) float, normal vector for each face\n\n Returns\n -----------\n vertex_normals: (vertex_count, 3) float normals for every vertex\n Uncontained vertices will be zero.\n '''\n def summed_sparse():\n # use a sparse matrix of which face contains each vertex to\n # figure out the summed normal at each vertex\n # allow cached sparse matrix to be passed\n if 'sparse' in kwargs:\n sparse = kwargs['sparse']\n else:\n sparse = index_sparse(vertex_count, faces)\n summed = sparse.dot(face_normals)\n log.debug('Generated vertex normals using sparse matrix')\n return summed\n\n def summed_loop():\n # loop through every face, in tests was ~50x slower than\n # doing this with a sparse matrix\n summed = np.zeros((vertex_count, 3))\n for face, normal in zip(faces, face_normals):\n summed[face] += normal\n return summed\n\n try:\n summed = summed_sparse()\n except BaseException:\n log.warning('Unable to generate sparse matrix! Falling back!',\n exc_info=True)\n summed = summed_loop()\n unit_normals, valid = util.unitize(summed, check_valid=True)\n vertex_normals = np.zeros((vertex_count, 3), dtype=np.float64)\n vertex_normals[valid] = unit_normals\n\n return vertex_normals\n\n\ndef index_sparse(column_count, indices):\n '''\n Return a sparse matrix for which vertices are contained in which faces.\n\n Returns\n ---------\n sparse: scipy.sparse.coo_matrix of shape (column_count, len(faces))\n dtype is boolean\n\n Examples\n ----------\n In [1]: sparse = faces_sparse(len(mesh.vertices), mesh.faces)\n\n In [2]: sparse.shape\n Out[2]: (12, 20)\n\n In [3]: mesh.faces.shape\n Out[3]: (20, 3)\n\n In [4]: mesh.vertices.shape\n Out[4]: (12, 3)\n\n In [5]: dense = sparse.toarray().astype(int)\n\n In [6]: dense\n Out[6]:\n array([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0],\n [0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0],\n [0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1],\n [1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0],\n [0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1],\n [0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1]])\n\n In [7]: dense.sum(axis=0)\n Out[7]: array([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3])\n '''\n indices = np.asanyarray(indices)\n column_count = int(column_count)\n\n row = indices.reshape(-1)\n col = np.tile(np.arange(len(indices)).reshape(\n (-1, 1)), (1, indices.shape[1])).reshape(-1)\n\n shape = (column_count, len(indices))\n data = np.ones(len(col), dtype=np.bool)\n sparse = coo_matrix((data, (row, col)),\n shape=shape,\n dtype=np.bool)\n return sparse\n\n\ndef medial_axis(samples, contains):\n '''\n Given a set of samples on a boundary, find the approximate medial axis based\n on a voronoi diagram and a containment function which can assess whether\n a point is inside or outside of the closed geometry.\n\n Parameters\n ----------\n samples: (n,d) set of points on the boundary of the geometry\n contains: function which takes (m,d) points and returns an (m) bool array\n\n Returns\n ----------\n lines: (n,2,2) set of line segments\n '''\n\n from scipy.spatial import Voronoi\n from .path.io.load import load_path\n\n # create the voronoi diagram, after vertically stacking the points\n # deque from a sequnce into a clean (m,2) array\n voronoi = Voronoi(samples)\n # which voronoi vertices are contained inside the original polygon\n contained = contains(voronoi.vertices)\n # ridge vertices of -1 are outside, make sure they are False\n contained = np.append(contained, False)\n inside = [i for i in voronoi.ridge_vertices if contained[i].all()]\n line_indices = np.vstack([util.stack_lines(i)\n for i in inside if len(i) >= 2])\n lines = voronoi.vertices[line_indices]\n return load_path(lines)\n",
"import numpy as np\n\nimport copy\nimport collections\n\nfrom . import arc\nfrom . import entities\n\nfrom ..nsphere import fit_nsphere\nfrom ..util import unitize, diagonal_dot\nfrom ..constants import log\nfrom ..constants import tol_path as tol\n\n\ndef fit_circle_check(points, scale, prior=None, final=False, verbose=False):\n '''\n Fit a circle, and reject the fit if:\n * the radius is larger than tol.radius_min*scale or tol.radius_max*scale\n * any segment spans more than tol.seg_angle\n * any segment is longer than tol.seg_frac*scale\n * the fit deviates by more than tol.radius_frac*radius\n * the segments on the ends deviate from tangent by more than tol.tangent\n\n Parameters\n ---------\n points: (n, d) set of points which represent a path\n prior: (center, radius) tuple for best guess, or None if unknown\n scale: float, what is the overall scale of the set of points\n verbose: boolean, if True output log.debug messages for the reasons\n for fit rejection. Potentially generates hundreds of thousands of\n messages so only suggested in manual debugging.\n\n Returns\n ---------\n if fit is acceptable:\n (center, radius) tuple\n else:\n None\n '''\n # an arc needs at least three points\n if len(points) < 3:\n return None\n\n # do a least squares fit on the points\n C, R, r_deviation = fit_nsphere(points, prior=prior)\n\n # check to make sure radius is between min and max allowed\n if not tol.radius_min < (R / scale) < tol.radius_max:\n if verbose:\n log.debug('circle fit error: R %f', R / scale)\n return None\n\n # check point radius error\n r_error = r_deviation / R\n if r_error > tol.radius_frac:\n if verbose:\n log.debug('circle fit error: fit %s', str(r_error))\n return None\n\n vectors = np.diff(points, axis=0)\n segment = np.linalg.norm(vectors, axis=1)\n\n # approximate angle in radians, segments are linear length\n # not arc length but this is close and avoids a cosine\n angle = segment / R\n\n if (angle > tol.seg_angle).any():\n if verbose:\n log.debug('circle fit error: angle %s', str(angle))\n return None\n\n if final and (angle > tol.seg_angle_min).sum() < 3:\n log.debug('final: angle %s', str(angle))\n return None\n\n # check segment length as a fraction of drawing scale\n scaled = segment / scale\n\n if (scaled > tol.seg_frac).any():\n if verbose:\n log.debug('circle fit error: segment %s', str(scaled))\n return None\n\n # check to make sure the line segments on the ends are actually\n # tangent with the candidate circle fit\n mid_pt = points[[0, -2]] + (vectors[[0, -1]] * .5)\n radial = unitize(mid_pt - C)\n ends = unitize(vectors[[0, -1]])\n tangent = np.abs(np.arccos(diagonal_dot(radial, ends)))\n tangent = np.abs(tangent - np.pi / 2).max()\n if tangent > tol.tangent:\n if verbose:\n log.debug('circle fit error: tangent %f',\n np.degrees(tangent))\n return None\n\n return (C, R)\n\n\ndef is_circle(points, scale, verbose=False):\n '''\n Given a set of points, quickly determine if they represent\n a circle or not.\n '''\n\n # make sure input is a numpy array\n points = np.asanyarray(points)\n scale = float(scale)\n\n # can only be a circle if the first and last point are the\n # same (AKA is a closed path)\n if np.linalg.norm(points[0] - points[-1]) > tol.merge:\n return None\n\n box = points.ptp(axis=0)\n # the bounding box size of the points\n # check aspect ratio as an early exit if the path is not a circle\n aspect = np.divide(*box)\n if np.abs(aspect - 1.0) > tol.aspect_frac:\n return None\n\n # fit a circle with tolerance checks\n CR = fit_circle_check(points, scale=scale)\n if CR is None:\n return None\n\n # return the circle as three control points\n control = arc.angles_to_threepoint([0, np.pi * .5], *CR)\n return control\n\n\ndef merge_colinear(points, scale=None):\n '''\n Given a set of points representing a path in space,\n merge points which are colinear.\n\n Parameters\n ----------\n points: (n, d) set of points (where d is dimension)\n scale: float, scale of drawing\n\n Returns\n ----------\n merged: (j, d) set of points with colinear and duplicate\n points merged, where (j < n)\n '''\n points = np.array(points)\n if scale is None:\n scale = np.ptp(points, axis=0).max()\n\n # the vector from one point to the next\n direction = points[1:] - points[:-1]\n # the length of the direction vector\n direction_norm = np.linalg.norm(direction, axis=1)\n # make sure points don't have zero length\n direction_ok = direction_norm > tol.merge\n\n # remove duplicate points\n points = np.vstack((points[0], points[1:][direction_ok]))\n direction = direction[direction_ok]\n direction_norm = direction_norm[direction_ok]\n\n # create a vector between every other point, then turn it perpendicular\n # if we have points A B C D\n # and direction vectors A-B, B-C, etc\n # these will be perpendicular to the vectors A-C, B-D, etc\n perpendicular = (points[2:] - points[:-2]).T[::-1].T\n perpendicular /= np.linalg.norm(perpendicular, axis=1).reshape((-1, 1))\n\n # find the projection of each direction vector\n # onto the perpendicular vector\n projection = np.abs(diagonal_dot(perpendicular, direction[:-1]))\n\n projection_ratio = np.max((projection / direction_norm[1:],\n projection / direction_norm[:-1]), axis=0)\n\n mask = np.ones(len(points), dtype=np.bool)\n # since we took diff, we need to offset by one\n mask[1:-1][projection_ratio < 1e-4 * scale] = False\n\n merged = points[mask]\n return merged\n\n\ndef resample_spline(points, smooth=.001, count=None, degree=3):\n '''\n Resample a path in space, smoothing along a b-spline.\n\n Parameters\n -----------\n points: (n, dimension) float, points in space\n smooth: float, smoothing amount\n count: number of samples in output\n degree: int, degree of spline polynomial\n\n Returns\n ---------\n resampled: (count, dimension) float, points in space\n '''\n from scipy.interpolate import splprep, splev\n if count is None:\n count = len(points)\n points = np.asanyarray(points)\n closed = np.linalg.norm(points[0] - points[-1]) < tol.merge\n\n tpl = splprep(points.T, s=smooth, k=degree)[0]\n i = np.linspace(0.0, 1.0, count)\n resampled = np.column_stack(splev(i, tpl))\n\n if closed:\n shared = resampled[[0, -1]].mean(axis=0)\n resampled[0] = shared\n resampled[-1] = shared\n\n return resampled\n\n\ndef points_to_spline_entity(points, smooth=.0005, count=None):\n '''\n Create a spline entity from a curve in space\n\n Parameters\n -----------\n points: (n, dimension) float, points in space\n smooth: float, smoothing amount\n count: int, number of samples in result\n\n Returns\n ---------\n entity: entities.BSpline object with points indexed at zero\n control: (m, dimension) float, new vertices for entity\n '''\n\n from scipy.interpolate import splprep\n if count is None:\n count = len(points)\n points = np.asanyarray(points)\n closed = np.linalg.norm(points[0] - points[-1]) < tol.merge\n\n knots, control, degree = splprep(points.T, s=smooth)[0]\n control = np.transpose(control)\n index = np.arange(len(control))\n\n if closed:\n control[0] = control[[0, -1]].mean(axis=0)\n control = control[:-1]\n index[-1] = index[0]\n\n entity = entities.BSpline(points=index,\n knots=knots,\n closed=closed)\n\n return entity, control\n\n\ndef three_point(indices):\n '''\n Given a long list of ordered indices,\n return the first, middle and last.\n\n Parameters\n -----------\n indices: (n,) array\n\n Returns\n ----------\n three: (3,) array\n '''\n three = [indices[0],\n indices[int(len(indices) / 2)],\n indices[-1]]\n return np.array(three)\n\n\ndef simplify_basic(drawing):\n '''\n Merge colinear segments and fit circles.\n\n Parameters\n -----------\n drawing: Path2D object\n\n Returns\n -----------\n simplified: Path2D with circles.\n '''\n\n if any(i.__class__.__name__ != 'Line' for i in drawing.entities):\n log.debug('Path contains non- linear entities, skipping')\n return drawing\n\n # we are going to do a bookkeeping to avoid having\n # to recompute literally everything when simplification is ran\n cache = copy.deepcopy(drawing._cache)\n\n # store new values\n vertices_new = collections.deque()\n entities_new = collections.deque()\n\n for polygon in drawing.polygons_closed:\n\n # clean up things like self intersections\n buffered = polygon.buffer(0.0)\n # get the exterior as an (n,2) array\n # since we generated these from the closed\n points = merge_colinear(np.array(buffered.exterior.coords),\n scale=drawing.scale)\n # check to see if the closed entity represents a circle\n circle = is_circle(points,\n scale=drawing.scale)\n\n if circle is not None:\n # the points are circular enough for our high standards\n # so replace them with a closed Arc entity\n entities_new.append(entities.Arc(points=np.arange(3) +\n len(vertices_new),\n closed=True))\n vertices_new.extend(circle)\n else:\n # save this path as a closed Line entity\n # we cleaned up colinear points so it will still\n # be simpler than the source data\n indexes = np.arange(len(points)) + len(vertices_new)\n entities_new.append(entities.Line(points=indexes))\n vertices_new.extend(points)\n\n # create the new drawing object\n simplified = type(drawing)(entities=entities_new,\n vertices=vertices_new)\n\n # we have changed every path to a single closed entity\n # either a closed arc, or a closed line\n # therefore all closed paths are now represented by a single entity\n cache.cache.update({'paths': np.arange(len(entities_new)).reshape((-1, 1)),\n 'path_valid': np.ones(len(entities_new), dtype=np.bool),\n 'dangling': np.array([])})\n simplified._cache = cache\n # set the cache ID so it won't dump when a value is requested\n simplified._cache.id_set()\n\n return simplified\n"
] |
[
[
"scipy.sparse.coo_matrix",
"numpy.array",
"numpy.dot",
"numpy.linalg.norm",
"scipy.spatial.Voronoi",
"numpy.zeros",
"numpy.append",
"numpy.arcsin",
"numpy.arccos",
"numpy.eye",
"numpy.clip",
"numpy.column_stack",
"numpy.cross",
"numpy.asanyarray",
"numpy.vstack"
],
[
"numpy.divide",
"numpy.max",
"numpy.array",
"numpy.linalg.norm",
"numpy.diff",
"numpy.degrees",
"numpy.transpose",
"numpy.arange",
"numpy.abs",
"scipy.interpolate.splprep",
"numpy.ptp",
"scipy.interpolate.splev",
"numpy.linspace",
"numpy.asanyarray",
"numpy.vstack"
]
] |
Bondify/gtfs_functions
|
[
"4cd237fe5d326219428018ff0cd58152bceadf73"
] |
[
"build/lib/gtfs_functions/gtfs_funtions.py"
] |
[
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jul 10 15:20:33 2020\r\n@author: santi\r\n\"\"\"\r\n\r\ndef save_gdf(data, file_name, geojson=False, shapefile=True):\r\n import warnings\r\n warnings.filterwarnings(\"ignore\")\r\n import zipfile\r\n import os\r\n \r\n geojson_path = file_name + '.geojson'\r\n shape_path = file_name + '.shp'\r\n zip_path = file_name + '.zip'\r\n\r\n # -------------------------------------------------------\r\n # ----------- Save geojson (it's lighter) ---------------\r\n # -------------------------------------------------------\r\n if geojson:\r\n data.to_file(\r\n filename = geojson_path, \r\n driver=\"GeoJSON\"\r\n )\r\n\r\n # -------------------------------------------------------\r\n # ----------------- Save shapefile ----------------------\r\n # -------------------------------------------------------\r\n if shapefile:\r\n data.to_file(\r\n driver = 'ESRI Shapefile',\r\n filename = shape_path,\r\n )\r\n # create the .prj file\r\n prj_name = file_name + '.prj'\r\n prj = open(prj_name, \"w\")\r\n \r\n prj_write = 'GEOGCS[\"GCS_WGS_1984\",DATUM[\"D_WGS_1984\",SPHEROID[\"WGS_1984\",6378137,298.257223563]],PRIMEM[\"Greenwich\",0],UNIT[\"Degree\",0.017453292519943295]]'\r\n # call the function and supply the epsg code\r\n prj.write(prj_write)\r\n prj.close()\r\n \r\n if shapefile:\r\n extensions = ['.cpg', '.dbf','.prj', '.shp', '.shx']\r\n \r\n zipObj = zipfile.ZipFile(zip_path, 'w')\r\n \r\n for ex in extensions:\r\n zipObj.write(file_name + ex) \r\n os.remove(file_name + ex) # in case I want to remove the files out of the shapefile\r\n \r\n zipObj.close()\r\n \r\n \r\ndef import_gtfs(gtfs_path, busiest_date = True):\r\n import warnings\r\n warnings.filterwarnings(\"ignore\")\r\n import os\r\n import pandas as pd\r\n import zipfile\r\n\r\n try:\r\n import partridge as ptg \r\n except ImportError as e:\r\n os.system('pip install partridge')\r\n import partridge as ptg\r\n # Partridge to read the feed\r\n # service_ids = pd.read_csv(gtfs_path + '/trips.txt')['service_id'].unique()\r\n # service_ids = frozenset(tuple(service_ids))\r\n \r\n if busiest_date:\r\n service_ids = ptg.read_busiest_date(gtfs_path)[1]\r\n else:\r\n with zipfile.ZipFile(gtfs_path) as myzip:\r\n myzip.extract(\"trips.txt\")\r\n service_ids = pd.read_csv('trips.txt')['service_id'].unique()\r\n service_ids = frozenset(tuple(service_ids))\r\n os.remove('trips.txt')\r\n \r\n view = {'trips.txt': {'service_id': service_ids}}\r\n \r\n feed = ptg.load_geo_feed(gtfs_path, view)\r\n \r\n routes = feed.routes\r\n trips = feed.trips\r\n stop_times = feed.stop_times\r\n stops = feed.stops\r\n shapes = feed.shapes\r\n \r\n # Get routes info in trips\r\n trips = pd.merge(trips, routes, how='left').loc[:, ['trip_id', 'route_id',\r\n 'service_id', 'direction_id','shape_id']]\r\n \r\n # Get trips, routes and stops info in stop_times\r\n stop_times = pd.merge(stop_times, trips, how='left') \r\n stop_times = pd.merge(stop_times, stops, how='left')\r\n \r\n return routes, stops, stop_times, trips, shapes\r\n\r\ndef cut_gtfs(stop_times, stops, shapes):\r\n import warnings\r\n warnings.filterwarnings(\"ignore\")\r\n import os\r\n import pandas as pd\r\n#--------------------------------------------------------\r\n os.system('apt install libspatialindex-dev')\r\n os.system('pip install rtree')\r\n#----------------------------------------------------------\r\n try:\r\n import geopandas as gpd \r\n except ImportError as e:\r\n os.system('pip install geopandas')\r\n import geopandas as gpd\r\n try:\r\n import utm\r\n except ImportError as e:\r\n os.system('pip install utm')\r\n import utm\r\n\r\n from shapely.ops import nearest_points\r\n from shapely.geometry import Point, LineString, MultiLineString, MultiPoint\r\n from shapely.ops import split\r\n from shapely import geometry, ops\r\n\r\n # Get the right epsg code for later conversations\r\n shapes.crs = {'init':'epsg:4326'}\r\n\r\n lat = shapes.geometry.iloc[0].coords[0][1]\r\n lon = shapes.geometry.iloc[0].coords[0][0]\r\n\r\n zone = utm.from_latlon(lat, lon)\r\n\r\n def code(zone):\r\n #The EPSG code is 32600+zone for positive latitudes and 32700+zone for negatives.\r\n if lat <0:\r\n epsg_code = 32700 + zone[2]\r\n else:\r\n epsg_code = 32600 + zone[2]\r\n return epsg_code\r\n\r\n epsg = code(zone)\r\n\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n # --------------------- FIND THE CLOSEST POINT TO EACH LINE --------------------\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------ \r\n\r\n # Data frame with stop sequence for route and direction\r\n sseq = stop_times.drop_duplicates(subset=['stop_id','stop_name', 'stop_sequence', 'shape_id'])[['route_id','direction_id','stop_id','stop_name', 'stop_sequence', 'shape_id']]\r\n\r\n # Data frames with the number of stops for each route and direction and shape_id\r\n route_shapes = sseq.pivot_table('stop_id',\r\n index = ['route_id', 'direction_id', 'shape_id'],\r\n aggfunc='count').reset_index()\r\n route_shapes.columns = ['route_id','direction_id', 'shape_id', 'stops_count']\r\n\r\n # List of shape_ids\r\n shape_id_list = shapes.shape_id.unique()\r\n\r\n # Create a DataFrame with the pair (stop, nearest_point) for each shape_id\r\n def find_shape_closest_points(shape_id):\r\n #shape_id = row.shape_id\r\n route_id = route_shapes.loc[route_shapes.shape_id == shape_id, 'route_id'].values[0]\r\n direction_id = route_shapes.loc[route_shapes.shape_id == shape_id, 'direction_id'].values[0]\r\n\r\n # Look for the shape\r\n shape = shapes.loc[shapes.shape_id == shape_id,'geometry'].values[0]\r\n\r\n\r\n # Look for the stop_ids of this shape\r\n route_stop_ids = sseq.loc[(sseq['route_id'] == route_id) \r\n & (sseq['direction_id'] == direction_id)\r\n &(sseq['shape_id'] == shape_id)]\r\n\r\n # Look for the geometry of these stops\r\n # merged = pd.merge(route_stop_ids, stops, how='left')\r\n # route_stop_geom = merged.geometry\r\n route_stop_geom = pd.merge(route_stop_ids, stops, how='left').geometry\r\n\r\n # Look for the nearest points of these stops that are in the shape\r\n points_in_shape = route_stop_geom.apply(lambda x: nearest_points(x, shape))\r\n\r\n d = dict(shape_id=shape_id, points=list(points_in_shape))\r\n\r\n return d\r\n\r\n shape_closest_points = [find_shape_closest_points(s) for s in shape_id_list]\r\n\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n # --------------------- CREATE LINES THAT CUT THE SHAPE ------------------------\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n\r\n shape_trans_lines = pd.DataFrame()\r\n # First we define a function that will help us create the line to intersect the shape\r\n\r\n # ---------------- THIS IS THE VALUE YOU SHOULD CHANGE IF THE CUTTING GEOMETRY AND ---\r\n # ---------------- THE LINE INTERSECT -------------------------------------------------\r\n offset = 0.0001\r\n\r\n def create_line(row):\r\n # Formula to make the line longer\r\n # a = (y1-b)/x1\r\n # b = (y2-x2/x1*y1)/(1-x2/x1)\r\n if row[0] == row[1]:\r\n x1 = row[0].x - offset\r\n y1 = row[0].y - offset\r\n\r\n x2 = row[0].x \r\n y2 = row[0].y\r\n\r\n x3 = row[0].x + offset\r\n y3 = row[0].y + offset\r\n\r\n else: \r\n x1 = row[0].x\r\n y1 = row[0].y\r\n\r\n x2 = row[1].x\r\n y2 = row[1].y\r\n\r\n # If x2==x1 it will give the error \"ZeroDivisionError\"\r\n if float(x2) != float(x1):\r\n b = (y2-x2/x1*y1)/(1-x2/x1)\r\n a = (y1-b)/x1\r\n\r\n if x2 - x1 < 0: # We should create an \"if\" to check if we need to do -1 or +1 depending on x2-x1\r\n x3 = x2 - 3*(x1 - x2)#offset\r\n else:\r\n x3 = x2 + 3*(x2 - x1)#offset\r\n\r\n y3 = a*x3 + b\r\n\r\n else:\r\n x3 = x2\r\n b = 0\r\n a = 0\r\n\r\n if y2-y1 < 0:\r\n #y3 = y2 - offset/5\r\n y3 = y2 - 3*(y1-y2) #offset/10000000\r\n else: \r\n #y3 = y2 + offset/5\r\n y3 = y2 + 3*(y2-y1) #offset/10000000\r\n\r\n trans = LineString([Point(x1,y1), Point(x2,y2), Point(x3, y3)])\r\n return trans\r\n\r\n # For each shape we need to create transversal lines and separete the shape in segments \r\n def find_shape_trans_lines(shape_closest_points):\r\n # Choose the shape\r\n shape_id = shape_closest_points['shape_id']\r\n\r\n # Choose the pair (stop, nearest point to shape) to create the line\r\n scp = shape_closest_points['points']\r\n\r\n lines = [create_line(p) for p in scp]\r\n # scp.apply(create_line)\r\n\r\n d = dict(shape_id=shape_id, trans_lines=lines)\r\n\r\n return d\r\n\r\n shape_trans_lines = [find_shape_trans_lines(shape_closest_points[i]) for i in range(0, len(shape_closest_points))]\r\n\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------ CUT THE SHAPES --------------------------------\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n # Set the tolerance of the cuts\r\n tolerance = 0.0001\r\n\r\n loops_route_id = []\r\n loops_direction_id = []\r\n loops_shape_id = []\r\n\r\n def cut_shapes_(shape_trans_lines, shape_closest_points):\r\n shape_id = shape_trans_lines['shape_id']\r\n route_id = route_shapes.loc[route_shapes.shape_id == shape_id, 'route_id'].values[0]\r\n direction_id = route_shapes.loc[route_shapes.shape_id == shape_id, 'direction_id'].values[0]\r\n\r\n # Check if the line is simple (ie, doesn't intersect itself)\r\n line = shapes.loc[shapes.shape_id == shape_id, 'geometry'].values[0]\r\n if line.is_simple:\r\n # Split the shape in different segments\r\n trans_lines = shape_trans_lines['trans_lines']\r\n\r\n df = sseq.loc[(sseq['route_id'] == route_id) \r\n & (sseq['direction_id'] == direction_id)\r\n & (sseq['shape_id'] == shape_id)].reset_index()\r\n\r\n\r\n #df['segment'] = ''\r\n\r\n d = dict(shape_id = shape_id,route_id=route_id, direction_id=direction_id, stop_id = list(df.stop_id)[:-1], stop_sequence=list(df.stop_sequence)[:-1])\r\n\r\n if len(trans_lines) == 2:\r\n # In case there is a line with only two stops\r\n d['segment'] = [line]\r\n return d\r\n\r\n else:\r\n # trans_lines_all = MultiLineString(list(trans_lines.values))\r\n # trans_lines_cut = MultiLineString(list(trans_lines.values)[1:-1])\r\n\r\n # # Split the shape in different segments, cut by the linestrings created before\r\n # # The result is a geometry collection with the segments of the route\r\n # result = split(line, trans_lines_cut)\r\n try:\r\n trans_lines_all = MultiLineString(trans_lines)\r\n trans_lines_cut = MultiLineString(trans_lines[1:-1])\r\n\r\n # Split the shape in different segments, cut by the linestrings created before\r\n # The result is a geometry collection with the segments of the route\r\n result = split(line, trans_lines_cut)\r\n except ValueError:\r\n # If the cut points are on the line then try to cut with the points instead of lines\r\n test = shape_closest_points['points']\r\n cut_points = [test[i][1] for i in range(len(test))]\r\n cut_points = MultiPoint(cut_points[1:-1])\r\n result = split(line, cut_points)\r\n\r\n if len(result)==len(trans_lines_all)-1:\r\n d['segment'] = [s for s in result]\r\n\r\n return d\r\n else:\r\n loops_route_id.append(route_id)\r\n loops_direction_id.append(direction_id)\r\n loops_shape_id.append(shape_id) \r\n else:\r\n loops_route_id.append(route_id)\r\n loops_direction_id.append(direction_id)\r\n loops_shape_id.append(shape_id)\r\n\r\n segments = [cut_shapes_(shape_trans_lines[i], shape_closest_points[i]) for i in range(0, len(shape_trans_lines))]\r\n\r\n # Remove None values\r\n segments = [i for i in segments if i] \r\n\r\n loops = pd.DataFrame()\r\n loops['route_id'] = loops_route_id\r\n loops['direction_id'] = loops_direction_id\r\n loops['shape_id'] = loops_shape_id\r\n\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n # ------------------------- CUT THE SHAPES WITH LOOPS --------------------------\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n\r\n # Manage the lines with loops\r\n shapes_loop = shapes.loc[shapes.shape_id.isin(loops_shape_id)]\r\n\r\n aux = pd.DataFrame.from_dict(shape_trans_lines)\r\n trans_loop = aux.loc[aux.shape_id.isin(loops_shape_id)]\r\n\r\n aux = pd.DataFrame.from_dict(shape_closest_points)\r\n cut_points_loop = aux.loc[aux.shape_id.isin(loops_shape_id)]\r\n\r\n # Separate the shapes according to possible exceptions\r\n trans_loop['n_segments'] = trans_loop['trans_lines'].map(len)\r\n run_shapes_no_middle = False\r\n run_shapes_one_seg = False\r\n\r\n # Exception 1: Only three stops --> one cut point, two segments\r\n # If there's only one cut_point this will make the\r\n # script skip the \"Middle segments\" part\r\n # (with only one cut point there are only two segments)\r\n\r\n shapes_no_middle = shapes.loc[shapes.shape_id.isin(trans_loop.loc[trans_loop['n_segments'] ==3, 'shape_id'].unique())].reset_index()\r\n\r\n if len(shapes_no_middle) > 0:\r\n run_shapes_no_middle = True\r\n\r\n # Exception 2: Only two stops --> no cut points, one segments\r\n shapes_one_seg = shapes.loc[shapes.shape_id.isin(trans_loop.loc[trans_loop['n_segments'] ==2, 'shape_id'].unique())].reset_index()\r\n\r\n if len(shapes_one_seg) > 0 :\r\n run_shapes_one_seg = True\r\n\r\n # The rest of the shapes\r\n shapes_ok = shapes.loc[shapes.shape_id.isin(trans_loop.loc[trans_loop['n_segments'] >3, 'shape_id'].unique())].reset_index()\r\n\r\n def add_points(row, add_p, cut_points_gdf):\r\n # Calculate the min distance between the stops that intersect this segment\r\n index_track_ = row.name\r\n p = cut_points_gdf.loc[cut_points_gdf.index.isin(add_p.loc[add_p.index_track_==index_track_, 'index_cut'])]\r\n p.crs={'init':'epsg:4326'}\r\n\r\n seg = [LineString([p.geometry.values[i], p.geometry.values[i+1]]) for i in range(0,len(p)-1)]\r\n seg = gpd.GeoSeries(seg)\r\n seg.crs={'init':'epsg:4326'}\r\n dist = seg.to_crs(epsg).length.min() - 5\r\n\r\n\r\n gse = gpd.GeoSeries(row.geometry, index=[row.distance_m])\r\n gse.crs = {'init':'epsg:4326'}\r\n gse = gse.to_crs(epsg)\r\n\r\n length = gse.index[0]\r\n start = gse.values[0].coords[0]\r\n end = gse.values[0].coords[-1]\r\n\r\n num_vert = int(length/dist)\r\n\r\n new_points = [start] + [gse.values[0].interpolate(dist*n) for n in list(range(1, num_vert+1))] + [end]\r\n new_points = [Point(p) for p in new_points]\r\n new_line = LineString(new_points)\r\n\r\n check = gpd.GeoSeries([new_line])\r\n check.crs = {'init':'epsg:{}'.format(epsg)}\r\n check = check.to_crs(epsg=4326)\r\n return check[0]\r\n\r\n # Loop lines with more than three stops\r\n def cut_loops_shapes_ok(shape_id):\r\n # Set the ids\r\n route_id = route_shapes.loc[route_shapes.shape_id == shape_id, 'route_id'].values[0]\r\n direction_id = route_shapes.loc[route_shapes.shape_id == shape_id, 'direction_id'].values[0]\r\n\r\n df = sseq.loc[(sseq['route_id'] == route_id) \r\n & (sseq['direction_id'] == direction_id)\r\n & (sseq['shape_id'] == shape_id)].reset_index()\r\n\r\n d = dict(shape_id = shape_id,route_id=route_id, direction_id=direction_id, stop_id = list(df.stop_id)[:-1], stop_sequence=list(df.stop_sequence)[:-1])\r\n #d = dict(shape_id = shape_id,route_id=route_id, direction_id=direction_id, stop_id = list(df.stop_id), stop_sequence=list(df.stop_sequence))\r\n\r\n # All the necessary information to split the line\r\n # 1- line to be cut\r\n # 2- transversal lines to cut\r\n # 3- closest point on the line\r\n\r\n line = shapes_ok.loc[shapes_ok.shape_id == shape_id, 'geometry'].values[0] \r\n cut_lines = trans_loop.loc[trans_loop.shape_id==shape_id,'trans_lines'].values[0][1:-1] \r\n cut_points = [x[1] for x in cut_points_loop.loc[cut_points_loop.shape_id==shape_id,'points'].values[0][1:-1]]\r\n\r\n cut_gdf = gpd.GeoDataFrame(data=list(range(len(cut_lines))), geometry=cut_lines)\r\n cut_points_gdf = gpd.GeoDataFrame(data=list(range(len(cut_points))), geometry=cut_points)\r\n\r\n # ------------------------------------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------------------------------------\r\n # Make sure the shapes has a point every 100m\r\n # Create a GeoDataFrame with two point segments of the shape and its distance in meters\r\n shape = line.coords\r\n # Create two point segments for the shape\r\n track_l = gpd.GeoSeries([LineString([shape[i], shape[i+1]]) for i in range(0, len(shape)-1)])\r\n track_l.crs={'init':'epsg:4326'}\r\n #Calculate the length of each two point segment in meters\r\n track_dist = track_l.to_crs(epsg=epsg).length\r\n # Create the dataframe\r\n track_l_gdf = gpd.GeoDataFrame(data=dict(distance_m = track_dist), geometry = track_l)\r\n\r\n # Check where stops are closer than points of the track\r\n # To do that we intersect each segment between two segments of the track with our cut lines\r\n how_many = gpd.sjoin(track_l_gdf, cut_gdf, how='left', op='intersects', lsuffix='left', rsuffix='right').reset_index()\r\n how_many.rename(columns=dict(index='index_track_', index_right = 'index_cut'), inplace=True)\r\n\r\n # The filter those that were intersected by more than one cut line\r\n how_manyp = how_many.pivot_table('geometry', index='index_track_', aggfunc='count').reset_index()\r\n how_manyp = how_manyp.loc[how_manyp.geometry>1]\r\n\r\n add_p = how_many.loc[how_many.index_track_.isin(how_manyp.index_track_.unique())]\r\n\r\n # Add intermediate points for segments with length > 100m\r\n track_l_gdf.loc[track_l_gdf.index.isin(how_manyp.index_track_.unique()), 'geometry'] = track_l_gdf.loc[track_l_gdf.index.isin(how_manyp.index_track_.unique())] .apply(lambda x: add_points(x, add_p, cut_points_gdf), axis=1)\r\n\r\n #track_l_gdf.loc[track_l_gdf.distance_m>dist, 'geometry'] = track_l_gdf.loc[track_l_gdf.distance_m>dist].apply(lambda x: add_points(x, dist), axis=1)\r\n\r\n # Take the points and create the LineString again\r\n t = [list(g.coords)[:-1] for g in track_l_gdf.geometry]\r\n flat_list = [item for sublist in t for item in sublist] + [track_l_gdf.geometry.tail(1).values[0].coords[-1]]\r\n\r\n line = LineString(flat_list) \r\n\r\n # ------------------------------------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------------------------------------\r\n # First segment\r\n # We will use i to identify were the next segment should start\r\n for i in range(2, len(line.coords)):\r\n segment = LineString(line.coords[0:i])\r\n if segment.intersects(cut_lines[0]):\r\n points_to_stop = line.coords[0:i-1] + list(cut_points[0].coords)\r\n segment = LineString(points_to_stop)\r\n\r\n # Save the position of the point that makes it to the intersection\r\n #last_point = i\r\n last_point = i-1\r\n d['segment'] = [segment]\r\n #df.loc[0, 'segment'] = segment # assign the linestring to that segment\r\n\r\n break\r\n\r\n # Middle segments\r\n for l in range(1, len(cut_lines)):\r\n nearest_point = list(cut_points[l-1].coords) # segments always start in the one of the cut points\r\n start_iterator = last_point + 1 # start from the last point found in the previous segment\r\n\r\n for i in range(start_iterator, len(line.coords)+1):\r\n points_to_stop = nearest_point + line.coords[last_point:i] # keep adding points to extend the line\r\n segment = LineString(points_to_stop)\r\n\r\n if segment.intersects(cut_lines[l]): \r\n # if the line intersects with the cut line, define the segment\r\n # the segment goes from one cut point to the next one\r\n points_to_stop = nearest_point + line.coords[last_point:i-1] + list(cut_points[l].coords)\r\n segment = LineString(points_to_stop)\r\n\r\n # Save the position of the point that makes it to the intersection\r\n last_point = i-1\r\n d['segment'] = d['segment'] + [segment]\r\n break \r\n\r\n if i==(len(line.coords)):\r\n points_to_stop = nearest_point + list(cut_points[l].coords)\r\n segment = LineString(points_to_stop)\r\n d['segment'] = d['segment'] + [segment]\r\n\r\n # Last segment\r\n # We start at the last cut point and go all the way to the end\r\n nearest_point = list(cut_points[l].coords)\r\n points_to_stop = nearest_point + line.coords[last_point:len(line.coords)]\r\n segment = LineString(points_to_stop)\r\n\r\n d['segment'] = d['segment'] + [segment] \r\n\r\n return d\r\n\r\n segments1 = [cut_loops_shapes_ok(s) for s in shapes_ok.shape_id.unique()]\r\n # Remove None values\r\n segments1 = [i for i in segments1 if i] \r\n segments.extend(segments1)\r\n\r\n # Exception 1: Only three stops --> one cut point, two segments\r\n # If there's only one cut_point this will make the\r\n # script skip the \"Middle segments\" part\r\n # (with only one cut point there are only two segments)\r\n\r\n if run_shapes_no_middle:\r\n #for index, row in shapes_no_middle.iterrows():\r\n def cut_shapes_no_middle(shape_id):\r\n # Set the ids\r\n route_id = route_shapes.loc[route_shapes.shape_id == shape_id, 'route_id'].values[0]\r\n direction_id = route_shapes.loc[route_shapes.shape_id == shape_id, 'direction_id'].values[0]\r\n\r\n df = sseq.loc[(sseq['route_id'] == route_id) \r\n & (sseq['direction_id'] == direction_id)\r\n & (sseq['shape_id'] == shape_id)].reset_index()\r\n\r\n d = dict(shape_id = shape_id, route_id=route_id, direction_id=direction_id, stop_id = list(df.stop_id)[:-1], stop_sequence=list(df.stop_sequence)[:-1])\r\n\r\n # All the necessary information to split the line\r\n # 1- line to be cut\r\n # 2- transversal lines to cut\r\n # 3- closest point on the line\r\n\r\n line = shapes_no_middle.loc[shapes_no_middle.shape_id == shape_id, 'geometry'].values[0] \r\n cut_lines = trans_loop.loc[trans_loop.shape_id==shape_id,'trans_lines'].values[0][1:-1] \r\n cut_points = [x[1] for x in cut_points_loop.loc[cut_points_loop.shape_id==shape_id,'points'].values[0][1:-1]]\r\n\r\n # First segment\r\n # We will use i to identify were the next segment should start\r\n for i in range(2, len(line.coords)):\r\n segment = LineString(line.coords[0:i])\r\n\r\n if segment.intersects(cut_lines[0]):\r\n points_to_stop = line.coords[0:i-1] + list(cut_points[0].coords)\r\n segment = LineString(points_to_stop)\r\n\r\n # Save the position of the point that makes it to the intersection\r\n last_point = i\r\n d['segment'] = [segment]\r\n #df.loc[0, 'segment'] = segment # assign the linestring to that segment\r\n\r\n break\r\n\r\n # Last segment\r\n # We start at the last cut point and go all the way to the end\r\n nearest_point = list(cut_points[0].coords)\r\n points_to_stop = nearest_point + line.coords[last_point-1:len(line.coords)]\r\n segment = LineString(points_to_stop)\r\n\r\n d['segment'] = d['segment'] + [segment]\r\n\r\n return d\r\n\r\n # Apply the function\r\n segments2 = [cut_shapes_no_middle(s) for s in shapes_no_middle.shape_id.unique()]\r\n # Remove None values\r\n segments2 = [i for i in segments2 if i] \r\n segments.extend(segments2)\r\n\r\n # Exception 2: Only two stops --> no cut points, one segments\r\n if run_shapes_one_seg:\r\n #for index, row in shapes_one_seg.iterrows():\r\n def cut_shapes_one_seg(shape_id):\r\n # Set the ids\r\n route_id = route_shapes.loc[route_shapes.shape_id == shape_id, 'route_id'].values[0]\r\n direction_id = route_shapes.loc[route_shapes.shape_id == shape_id, 'direction_id'].values[0]\r\n\r\n df = sseq.loc[(sseq['route_id'] == route_id) \r\n & (sseq['direction_id'] == direction_id)\r\n & (sseq['shape_id'] == shape_id)].reset_index()\r\n\r\n #df['segment'] = ''\r\n d = dict(shape_id = shape_id,route_id=route_id, direction_id=direction_id, stop_id = list(df.stop_id)[:-1], stop_sequence=list(df.stop_sequence)[:-1])\r\n\r\n line = shapes_one_seg.loc[shapes_one_seg.shape_id == shape_id, 'geometry'].values[0] \r\n d['segment'] = [line]\r\n return d\r\n\r\n # Apply function\r\n segments3 = [cut_shapes_one_seg(s) for s in shapes_one_seg.shape_id.unique()]\r\n # Remove None values\r\n segments3 = [i for i in segments3 if i] \r\n segments.extend(segments3)\r\n\r\n\r\n def format_shapes(s, last_id):\r\n df = pd.DataFrame()\r\n df['stop_sequence'] = s['stop_sequence']\r\n df['start_stop_id'] = s['stop_id']\r\n df['end_stop_id'] = s['stop_id'][1:] + [last_id]\r\n df['shape_id'] = s['shape_id']\r\n df['route_id'] = s['route_id']\r\n df['direction_id'] = s['direction_id']\r\n\r\n df['geometry'] = s['segment']\r\n\r\n return df\r\n\r\n df = pd.concat([format_shapes(s, sseq.loc[sseq.shape_id==s['shape_id']].tail(1).stop_id.values[0]) for s in segments])\r\n\r\n df = pd.merge(df, stops[['stop_id', 'stop_name']], left_on='start_stop_id', right_on='stop_id', how='left').drop('stop_id', axis=1)\r\n df.rename(columns=dict(stop_name='start_stop_name'), inplace=True)\r\n df = pd.merge(df, stops[['stop_id', 'stop_name']], left_on='end_stop_id', right_on='stop_id', how='left').drop('stop_id', axis=1)\r\n df.rename(columns=dict(stop_name='end_stop_name'), inplace=True)\r\n df['segment_id'] = df.start_stop_id + '-' + df.end_stop_id\r\n\r\n segments_gdf = gpd.GeoDataFrame(data = df.loc[:,['route_id','direction_id','stop_sequence','start_stop_name', 'end_stop_name', 'start_stop_id', 'end_stop_id','segment_id','shape_id']], geometry = df.geometry)\r\n\r\n segments_gdf.crs = {'init':'epsg:4326'}\r\n segments_gdf['distance_m'] = segments_gdf.geometry.to_crs(epsg=epsg).length\r\n\r\n return segments_gdf\r\n \r\ndef speeds_from_gtfs(routes, stop_times, segments_gdf, cutoffs = [0,6,9,15,19,22,24]):\r\n import warnings\r\n warnings.filterwarnings(\"ignore\")\r\n import pandas as pd\r\n import math\r\n import os\r\n \r\n try:\r\n import geopandas as gpd \r\n except ImportError as e:\r\n os.system('pip install geopandas')\r\n import geopandas as gpd\r\n \r\n routes = routes\r\n stop_times = stop_times\r\n \r\n # Get the runtime between stops\r\n stop_times.sort_values(by = ['trip_id', 'stop_sequence'], ascending = True, inplace=True)\r\n \r\n first_try = stop_times.loc[:,['trip_id', 'arrival_time']]\r\n first_try['trip_id_next'] = first_try['trip_id'].shift(-1)\r\n first_try['arrival_time_next'] = first_try['arrival_time'].shift(-1)\r\n \r\n def runtime(row):\r\n if row.trip_id == row.trip_id_next:\r\n runtime = (row.arrival_time_next - row.arrival_time)/3600\r\n else:\r\n runtime = 0\r\n \r\n return runtime\r\n \r\n first_try['runtime_h'] = first_try.apply(runtime, axis=1)\r\n \r\n if len(first_try) == len(stop_times):\r\n stop_times['runtime_h'] = first_try['runtime_h']\r\n \r\n stop_times.head(2)\r\n # Merge stop_times with segments_gdf to get the distance\r\n segments_gdf['direction_id'] = segments_gdf['direction_id'].map(int)\r\n segments_gdf['stop_sequence'] = segments_gdf['stop_sequence'].map(int)\r\n \r\n speeds = pd.merge(stop_times, segments_gdf[['route_id', 'direction_id', 'start_stop_id', 'stop_sequence', 'segment_id','shape_id', 'distance_m']], \r\n left_on = ['route_id', 'direction_id', 'stop_id', 'stop_sequence', 'shape_id'], \r\n right_on = ['route_id', 'direction_id', 'start_stop_id', 'stop_sequence', 'shape_id'],\r\n how = 'left').drop('start_stop_id', axis=1)\r\n \r\n speeds = speeds.loc[~speeds.distance_m.isnull(),\r\n ['trip_id', 'route_id', 'direction_id', 'shape_id', 'segment_id',\r\n 'arrival_time', 'departure_time', 'stop_id','stop_name',\r\n 'stop_sequence', 'runtime_h', 'distance_m','geometry']\r\n ]\r\n \r\n # Assign a time window to each row\r\n if max(cutoffs)<=24: \r\n speeds_ok = speeds.loc[speeds.departure_time < 24*3600]\r\n speeds_fix = speeds.loc[speeds.departure_time >= 24*3600]\r\n speeds_fix['departure_time'] = [d - 24*3600 for d in speeds_fix.departure_time]\r\n \r\n speeds = speeds_ok.append(speeds_fix)\r\n labels = []\r\n for w in cutoffs:\r\n if float(w).is_integer():\r\n l = str(w) + ':00'\r\n else:\r\n n = math.modf(w)\r\n l= str(int(n[1])) + ':' + str(int(n[0]*60))\r\n labels = labels + [l]\r\n else:\r\n labels = []\r\n for w in cutoffs:\r\n if float(w).is_integer():\r\n if w > 24:\r\n w1 = w-24\r\n l = str(w1) + ':00'\r\n else:\r\n l = str(w) + ':00'\r\n labels = labels + [l]\r\n else:\r\n if w > 24:\r\n w1 = w-24\r\n n = math.modf(w1)\r\n l = str(int(n[1])) + ':' + str(int(n[0]*60))\r\n else:\r\n n = math.modf(w)\r\n l = str(int(n[1])) + ':' + str(int(n[0]*60))\r\n labels = labels + [l]\r\n \r\n labels = [labels[i] + '-' + labels[i+1] for i in range(0, len(labels)-1)]\r\n \r\n speeds['departure_time'] = speeds['departure_time']/3600\r\n \r\n # Put each trips in the right window\r\n speeds['window'] = pd.cut(speeds['departure_time'], bins=cutoffs, right=False, labels=labels)\r\n speeds = speeds.loc[~speeds.window.isnull()]\r\n speeds['window'] = speeds['window'].astype(str)\r\n \r\n # Calculate the speed\r\n speeds.loc[speeds.runtime_h == 0.0, 'runtime_h'] = speeds.loc[speeds.runtime_h != 0.0, 'runtime_h'].mean()\r\n speeds['speed'] = round(speeds['distance_m']/1000/speeds['runtime_h'])\r\n speeds = speeds.loc[~speeds.speed.isnull()]\r\n \r\n # Calculate average speed to modify outliers\r\n avg_speed_route = speeds.pivot_table('speed',\r\n index=['route_id', 'direction_id','window'],\r\n aggfunc='mean').reset_index()\r\n avg_speed_route.rename(columns={'speed':'avg_speed_route'}, inplace=True)\r\n # Assign average speed to outliers\r\n speeds = pd.merge(speeds, avg_speed_route, how='left')\r\n speeds.loc[speeds.speed>120,'speed'] = speeds.loc[speeds.speed>120,'avg_speed_route']\r\n \r\n # Calculate max speed per segment to have a free_flow reference\r\n max_speed_segment = speeds.pivot_table('speed',\r\n index = ['stop_id', 'direction_id'],\r\n aggfunc='max')\r\n max_speed_segment.rename(columns={'speed':'max_kmh'}, inplace=True)\r\n \r\n \r\n # Get the average per route, direction, segment and time of day\r\n speeds_agg = speeds.pivot_table(['speed', 'runtime_h', 'avg_speed_route'],\r\n index=['route_id', 'direction_id', 'segment_id', 'window'],\r\n aggfunc = 'mean'\r\n ).reset_index()\r\n speeds_agg['route_id'] = speeds_agg['route_id'].map(str)\r\n speeds_agg['direction_id'] = speeds_agg['direction_id'].map(int)\r\n \r\n data = pd.merge(speeds_agg, segments_gdf, \r\n left_on=['route_id', 'direction_id', 'segment_id'],\r\n right_on = ['route_id', 'direction_id', 'segment_id'],\r\n how='left').reset_index().sort_values(by = ['route_id', 'direction_id','window','stop_sequence',], ascending=True)\r\n \r\n data.drop(['index'], axis=1, inplace=True)\r\n \r\n # Route name\r\n routes['route_name'] = ''\r\n if routes.route_short_name.isnull().unique()[0]:\r\n routes['route_name'] = routes.route_long_name\r\n elif routes.route_long_name.isnull().unique()[0]: \r\n routes['route_name'] = routes.route_short_name\r\n else:\r\n routes['route_name'] = routes.route_short_name + ' ' + routes.route_long_name\r\n data = pd.merge(data, routes[['route_id', 'route_name']], left_on='route_id', right_on='route_id', how='left')\r\n \r\n # Get the average per segment and time of day\r\n # Then add it to the rest of the data\r\n \r\n all_lines = speeds.pivot_table(['speed', 'runtime_h', 'avg_speed_route'],\r\n index=['segment_id', 'window'],\r\n aggfunc = 'mean'\r\n ).reset_index()\r\n \r\n data_all_lines = pd.merge(\r\n all_lines, \r\n segments_gdf.drop_duplicates(subset=['segment_id']), \r\n left_on=['segment_id'],\r\n right_on = ['segment_id'],\r\n how='left').reset_index().sort_values(by = ['direction_id','window','stop_sequence'], ascending=True)\r\n \r\n data_all_lines.drop(['index'], axis=1, inplace=True)\r\n data_all_lines['route_id'] = 'ALL_LINES'\r\n data_all_lines['route_name'] = 'All lines'\r\n data_all_lines['direction_id'] = 'NA'\r\n data_complete = data.append(data_all_lines)\r\n \r\n data_complete1 = data_complete.loc[~data_complete.route_name.isnull(), :].reset_index()\r\n \r\n \r\n # Get the columns in the right format\r\n int_columns = ['speed']\r\n \r\n for c in int_columns:\r\n data_complete1[c] = data_complete1[c].apply(lambda x: round(x,1))\r\n \r\n \r\n data_complete1 = data_complete1.loc[:,['route_id', 'route_name','direction_id','segment_id', 'window',\r\n 'speed', \r\n 'start_stop_id', 'start_stop_name', 'end_stop_id','end_stop_name', \r\n 'distance_m','stop_sequence', 'shape_id', 'runtime_h','geometry', ]] \r\n \r\n data_complete1.columns = ['route_id', 'route_name','dir_id', 'segment_id','window', \r\n 'speed',\r\n 's_st_id', 's_st_name', 'e_st_id','e_st_name',\r\n 'distance_m', 'stop_seq', 'shape_id','runtime_h', 'geometry']\r\n \r\n # Assign max speeds to each segment\r\n data_complete1 = pd.merge(data_complete1, max_speed_segment,\r\n left_on=['s_st_id', 'dir_id'], right_on = ['stop_id', 'direction_id'],\r\n how='left')\r\n \r\n gdf = gpd.GeoDataFrame(data = data_complete1.drop('geometry', axis=1), geometry=data_complete1.geometry)\r\n \r\n gdf.loc[gdf.dir_id==0,'dir_id'] = 'Inbound'\r\n gdf.loc[gdf.dir_id==1,'dir_id'] = 'Outbound'\r\n \r\n gdf.rename(columns={'speed': 'speed_kmh'}, inplace=True)\r\n gdf['speed_mph'] = gdf['speed_kmh']*0.621371\r\n gdf['max_mph'] = gdf['max_kmh']*0.621371\r\n \r\n gdf = gdf.drop(['shape_id'], axis=1).drop_duplicates()\r\n \r\n return gdf\r\n \r\ndef create_json(gdf, variable, filename,\r\n variable_label,\r\n filter_variables = [],\r\n filter_labels = [],\r\n colors = [],\r\n sizes = ['medium', 'medium', 'medium','medium','large','large'],\r\n breaks = [],\r\n default_values = [],\r\n symbol_layer = False,\r\n categories = ['Healthcare', 'Education', 'Food', 'Financial', 'Entertainment', 'Transportation', 'Others'], \r\n symbols = ['Hospital', 'School','Default', 'Official', 'Special', 'BusStop', 'Default'], \r\n ):\r\n import warnings\r\n warnings.filterwarnings(\"ignore\")\r\n \r\n import os\r\n import json\r\n import pandas as pd\r\n\r\n try:\r\n import utm\r\n except ImportError as e:\r\n os.system('pip install utm')\r\n import utm\r\n\r\n try:\r\n import jenkspy\r\n except ImportError as e:\r\n os.system('pip install jenkspy')\r\n import jenkspy\r\n if symbol_layer:\r\n # All categorical variable layer thing\r\n # We start with Remix Lightrail colors and then add default colors from Plotly\r\n # qualitative_palette = [blue, red, green, yellow, purple, aqua, pink, peach, melon]\r\n if colors == []:\r\n import plotly.express as px\r\n colors = ['#0066a1', '#a92023', '#066a40', '#e89b01', '#613fa6', '#024b50', '#a72051', '#a72f00', '#476800'] + px.colors.qualitative.Light24\r\n fill_color = pd.DataFrame(dict(variable=gdf[variable].unique(), fill_color = colors[0:len(gdf[variable].unique())]))\r\n gdf = pd.merge(gdf, fill_color, left_on=variable, right_on='variable', how='left')\r\n\r\n d = dict(\r\n category = categories,\r\n symbol = symbols\r\n )\r\n\r\n category_symbols = pd.DataFrame(d)\r\n\r\n gdf = pd.merge(gdf, category_symbols, how='left')\r\n\r\n var_symbol_color = gdf.pivot_table('id', index=[variable ,'symbol', 'fill_color'], aggfunc='count').reset_index()\r\n var_symbol_color['symbol_color'] = var_symbol_color.apply(lambda x: '{}{}'.format(x.symbol, x.fill_color), axis=1)\r\n\r\n symbols = []\r\n\r\n for v in gdf.variable.unique():\r\n aux = dict(\r\n input = v,\r\n value = var_symbol_color.loc[var_symbol_color[variable]==v,'symbol_color'].values[0]\r\n )\r\n symbols = symbols + [aux]\r\n\r\n icon = dict(\r\n type = 'categorical',\r\n values = symbols, # list of dict with values\r\n dataCol = variable, # could be amenity, group or catefory for example\r\n defaultValue = \"Default#000\"\r\n )\r\n\r\n label = dict(\r\n type = 'data-column',\r\n dataCol = 'name'\r\n )\r\n\r\n t = dict(\r\n type = 'symbol',\r\n icon = icon,\r\n label = label,\r\n configVersion = 1\r\n )\r\n else:\r\n # All line and circle numerical variable layers thing\r\n if colors == []:\r\n colors = [\"#D83D25\",\"#EF6933\",\"#F89041\",\"#fee090\",\"#91bfdb\",\"#4575b4\"],\r\n\r\n gdf[variable] = gdf[variable].map(int)\r\n \r\n if 'window' in list(gdf.columns):\r\n sort_windows=pd.DataFrame()\r\n sort_windows['window'] = gdf.window.unique()\r\n sort_windows['sort'] = [i.split(':')[0] for i in gdf.window.unique()]\r\n sort_windows['sort'] = sort_windows['sort'].astype(int)\r\n sort_windows.sort_values(by='sort', ascending=True, inplace=True)\r\n sort_windows.reset_index(inplace=True)\r\n \r\n # Calculate breaks the variable\r\n if breaks ==[]:\r\n breaks = jenkspy.jenks_breaks(gdf[variable], nb_class=len(colors))\r\n breaks = [int(b) for b in breaks]\r\n max_value = int(gdf[variable].max())\r\n bl = [int(b) for b in breaks]\r\n \r\n # Colors \r\n stops_color = []\r\n for i in range(len(colors)):\r\n aux = dict(input = bl[i], output = colors[i])\r\n stops_color = stops_color + [aux]\r\n \r\n color = dict(\r\n type='range',\r\n stops = stops_color,\r\n dataCol = variable,\r\n maxInput = max_value\r\n )\r\n \r\n # Sizes\r\n stops_size = []\r\n for i in range(len(colors)):\r\n aux = dict(input = bl[i], output = sizes[i])\r\n stops_size = stops_size + [aux]\r\n \r\n if gdf.geom_type[0] == 'Point':\r\n radius = dict(\r\n type='range',\r\n stops = stops_size,\r\n dataCol = variable,\r\n maxInput = max_value\r\n )\r\n gtype = 'circle'\r\n elif gdf.geom_type[0] == 'LineString':\r\n width = dict(\r\n type='range',\r\n stops = stops_size,\r\n dataCol = variable,\r\n maxInput = max_value\r\n )\r\n gtype = 'line'\r\n else:\r\n print(\"Check the geometry, it is not recognized as a LineString nor a Point\")\r\n \r\n # Legend labels\r\n filter_variables1 = [variable] + filter_variables\r\n filter_labels1 = [variable_label] + filter_labels\r\n \r\n legendLabels = dict(\r\n dataColLabels = {filter_variables1[i]: filter_labels1[i] for i in range(len(filter_variables1))}\r\n )\r\n \r\n # Filterable columns\r\n filterableColumns = []\r\n for f in filter_variables:\r\n if (f == 'route_name') & ('All lines' in list(gdf[f].unique())):\r\n aux = dict(\r\n values= ['All lines'] + list(gdf.loc[gdf.route_id!='ALL_LINES'].route_name.sort_values(ascending=True).unique()),\r\n dataCol = 'route_name',\r\n defaultValue = 'All lines'\r\n )\r\n elif (f != 'window')&(f != 'day_type'):\r\n if default_values[filter_variables.index(f)] == True:\r\n aux = dict(\r\n values = [str(x) for x in gdf[f].sort_values(ascending=True).unique()],\r\n dataCol = f,\r\n defaultValue = str(list(gdf[f].sort_values(ascending=True).unique())[0])\r\n )\r\n else:\r\n aux = dict(\r\n values = [str(x) for x in gdf[f].sort_values(ascending=True).unique()],\r\n dataCol = f\r\n )\r\n elif f == 'window':\r\n if len(sort_windows.window.unique())> 1:\r\n default_val = list(sort_windows.window.unique())[1]\r\n else:\r\n default_val = list(sort_windows.window.unique())[0]\r\n aux = dict(\r\n values = list(sort_windows.window.unique()),\r\n dataCol = 'window',\r\n defaultValue = default_val\r\n )\r\n elif f == 'day_type':\r\n aux = dict(\r\n values = ['Weekday', 'Saturday', 'Sunday'],\r\n dataCol = 'day_type',\r\n defaultValue = 'Weekday'\r\n )\r\n filterableColumns = filterableColumns + [aux]\r\n \r\n # Save the json file\r\n if gtype == 'circle':\r\n t = dict(\r\n type=gtype,\r\n color=color,\r\n radius=radius,\r\n legendLabels=legendLabels,\r\n configVersion= 1,\r\n filterableColumns=filterableColumns\r\n )\r\n elif gtype == 'line':\r\n t = dict(\r\n type=gtype,\r\n color=color,\r\n width=width,\r\n legendLabels=legendLabels,\r\n configVersion= 1,\r\n filterableColumns=filterableColumns\r\n )\r\n json_name = 'json_' + filename + '.json'\r\n with open(json_name, 'w') as outfile:\r\n json.dump(t, outfile)\r\n\r\ndef stops_freq(stop_times, stops, cutoffs = [0,6,9,15,19,22,24]):\r\n import warnings\r\n warnings.filterwarnings(\"ignore\")\r\n import math\r\n import pandas as pd\r\n import os\r\n import re\r\n \r\n try:\r\n import geopandas as gpd \r\n except ImportError as e:\r\n os.system('pip install geopandas')\r\n import geopandas as gpd\r\n \r\n hours = list(range(25))\r\n hours_labels = [str(hours[i]) + ':00' for i in range(len(hours)-1)]\r\n \r\n if max(cutoffs)<=24: \r\n stop_times_ok = stop_times.loc[stop_times.departure_time < 24*3600]\r\n stop_times_fix = stop_times.loc[stop_times.departure_time >= 24*3600]\r\n stop_times_fix['departure_time'] = [d - 24*3600 for d in stop_times_fix.departure_time]\r\n \r\n stop_times = stop_times_ok.append(stop_times_fix)\r\n labels = []\r\n for w in cutoffs:\r\n if float(w).is_integer():\r\n l = str(w) + ':00'\r\n else:\r\n n = math.modf(w)\r\n l= str(int(n[1])) + ':' + str(int(n[0]*60))\r\n labels = labels + [l]\r\n else:\r\n labels = []\r\n for w in cutoffs:\r\n if float(w).is_integer():\r\n if w > 24:\r\n w1 = w-24\r\n l = str(w1) + ':00'\r\n else:\r\n l = str(w) + ':00'\r\n labels = labels + [l]\r\n else:\r\n if w > 24:\r\n w1 = w-24\r\n n = math.modf(w1)\r\n l = str(int(n[1])) + ':' + str(int(n[0]*60))\r\n else:\r\n n = math.modf(w)\r\n l = str(int(n[1])) + ':' + str(int(n[0]*60))\r\n labels = labels + [l]\r\n \r\n labels = [labels[i] + '-' + labels[i+1] for i in range(0, len(labels)-1)]\r\n \r\n stop_times['departure_time'] = stop_times['departure_time']/3600\r\n \r\n # Put each trips in the right window\r\n stop_times['window'] = pd.cut(stop_times['departure_time'], bins=cutoffs, right=False, labels=labels)\r\n stop_times = stop_times.loc[~stop_times.window.isnull()]\r\n stop_times['window'] = stop_times['window'].astype(str)\r\n stop_times['hour'] = pd.cut(stop_times['departure_time'], bins=hours, right=False, labels=hours_labels)\r\n stop_times['hour'] = stop_times['hour'].astype(str)\r\n \r\n trips_per_window = stop_times.pivot_table('trip_id', index=['stop_id', 'direction_id','window'], aggfunc='count').reset_index()\r\n trips_per_hour = stop_times.pivot_table('trip_id', index=['stop_id', 'direction_id','hour'], aggfunc='count').reset_index()\r\n \r\n trips_per_hour.rename(columns={'trip_id':'max_trips'}, inplace=True)\r\n trips_per_hour['max_frequency'] = (60/trips_per_hour['max_trips']).astype(int)\r\n \r\n max_trips = trips_per_hour.pivot_table('max_trips', index=['stop_id', 'direction_id'], aggfunc='max').reset_index()\r\n max_freq = trips_per_hour.pivot_table('max_frequency', index=['stop_id', 'direction_id'], aggfunc='min').reset_index()\r\n \r\n trips_per_window.rename(columns={'trip_id':'ntrips'}, inplace=True)\r\n start_time = trips_per_window['window'].apply(lambda x: int(x.split(':')[0]))\r\n end_time = trips_per_window['window'].apply(lambda x: int(re.search('-(.*?):', x).group(1)))\r\n \r\n trips_per_window['frequency'] = ((end_time - start_time)*60 / trips_per_window.ntrips).astype(int)\r\n stop_frequencies = pd.merge(trips_per_window, max_trips, how = 'left')\r\n stop_frequencies = pd.merge(stop_frequencies, max_freq, how = 'left')\r\n stop_frequencies = pd.merge(stop_frequencies, stops.loc[:, ['stop_id', 'stop_name', 'geometry']], how='left')\r\n stop_frequencies = gpd.GeoDataFrame(data=stop_frequencies.drop('geometry', axis=1), geometry=stop_frequencies.geometry)\r\n \r\n stop_frequencies.loc[stop_frequencies.direction_id == 0, 'direction_id'] = 'Inbound'\r\n stop_frequencies.loc[stop_frequencies.direction_id == 1, 'direction_id'] = 'Outbound'\r\n \r\n stop_frequencies.rename(columns={\r\n 'direction_id': 'dir_id',\r\n 'max_frequency': 'max_freq'\r\n }, inplace=True)\r\n stop_frequencies.sort_values(by='frequency', ascending=False, inplace=True)\r\n \r\n return stop_frequencies\r\n \r\ndef map_gdf(gdf, variable,\r\n colors = [\"#d13870\", \"#e895b3\" ,'#55d992', '#3ab071', '#0e8955','#066a40'],\r\n tooltip_var = [],\r\n tooltip_labels = [],\r\n breaks = []):\r\n import warnings\r\n warnings.filterwarnings(\"ignore\")\r\n import branca\r\n import pandas as pd\r\n import os\r\n import plotly.express as px\r\n try:\r\n import jenkspy\r\n except ImportError as e:\r\n os.system('pip install jenkspy')\r\n import jenkspy\r\n \r\n try:\r\n import folium\r\n except ImportError as e:\r\n os.system('pip install folium')\r\n import folium\r\n\r\n # Look for the center of the map\r\n minx, miny, maxx, maxy = gdf.geometry.total_bounds\r\n \r\n centroid_lat = miny + (maxy - miny)/2\r\n centroid_lon = minx + (maxx - minx)/2 \r\n \r\n if isinstance(gdf[variable].values[0], str):\r\n categorical = True\r\n else: \r\n categorical = False\r\n \r\n # Calculate the breaks if they were not specified\r\n if (breaks == []) & (not categorical):\r\n breaks = jenkspy.jenks_breaks(gdf[variable], nb_class=len(colors))\r\n breaks = [int(b) for b in breaks]\r\n \r\n m = folium.Map(location=[centroid_lat, centroid_lon], \r\n tiles='cartodbpositron', zoom_start=12\r\n )\r\n # If the variable is categorical\r\n if categorical:\r\n gdf['radius'] = 5\r\n # qualitative_palette = [blue, red, green, yellow, purple, aqua, pink, peach,melon]\r\n # We start with Remix Lightrail colors and then add default colors from Plotly\r\n qualitative_palette = ['#0066a1', '#a92023', '#066a40', '#e89b01', '#613fa6', '#024b50', '#a72051', '#a72f00', '#476800']\r\n color_palette = qualitative_palette + px.colors.qualitative.Pastel + px.colors.qualitative.Prism + px.colors.qualitative.Vivid + px.colors.qualitative.Light24\r\n fill_color = pd.DataFrame(dict(variable=gdf[variable].unique(), fill_color = color_palette[0:len(gdf[variable].unique())])) \r\n gdf=pd.merge(gdf, fill_color, left_on=variable, right_on='variable', how='left')\r\n # If the variable is numerical\r\n else:\r\n gdf['radius'] = gdf[variable]\r\n index = [int(b) for b in breaks]\r\n colorscale = branca.colormap.StepColormap(colors, index = index, caption=variable)\r\n gdf['fill_color'] = gdf[variable].apply(lambda x: colorscale(x)) \r\n \r\n if gdf.geom_type.values[0] == 'Point':\r\n # my code for circles\r\n # Create the circles\r\n for i in range(int(len(gdf))):\r\n folium.CircleMarker(\r\n location=[gdf.loc[i, 'geometry'].y, gdf.loc[i, 'geometry'].x], \r\n radius = float(gdf.loc[i, 'radius']),\r\n #popup=geo_data.loc[i, 'stop_name'], \r\n tooltip = tooltip_labels[0] + str(gdf.loc[i, tooltip_var[0]]), \r\n color='#ffffff00',\r\n fill = True,\r\n fill_opacity = .7,\r\n fill_color = str(gdf.loc[i, 'fill_color'])\r\n ).add_to(m)\r\n else:\r\n # Styling function for LineStrings \r\n def style_function(feature):\r\n return {\r\n 'fillOpacity': 0.5,\r\n 'weight': 3,#math.log2(feature['properties']['speed'])*2,\r\n 'color': feature['properties']['fill_color']\r\n }\r\n # my code for lines\r\n geo_data = gdf.__geo_interface__\r\n folium.GeoJson(\r\n geo_data, \r\n style_function = style_function,\r\n tooltip = folium.features.GeoJsonTooltip(fields=tooltip_var,\r\n aliases = tooltip_labels,\r\n labels=True,\r\n sticky=False)\r\n ).add_to(m)\r\n \r\n return m\r\n\r\ndef lines_freq(stop_times, trips, shapes, routes, cutoffs = [0,6,9,15,19,22,24]):\r\n import warnings\r\n warnings.filterwarnings(\"ignore\")\r\n import math\r\n import pandas as pd\r\n import os\r\n import re\r\n \r\n try:\r\n import geopandas as gpd \r\n except ImportError as e:\r\n os.system('pip install geopandas')\r\n import geopandas as gpd\r\n \r\n # Generate the hours of the day\r\n hours = list(range(25))\r\n hours_labels = [str(hours[i]) + ':00' for i in range(len(hours)-1)]\r\n \r\n # Generate the time windows and cutoffs\r\n if max(cutoffs)<=24: \r\n stop_times_ok = stop_times.loc[stop_times.departure_time < 24*3600]\r\n stop_times_fix = stop_times.loc[stop_times.departure_time >= 24*3600]\r\n stop_times_fix['departure_time'] = [d - 24*3600 for d in stop_times_fix.departure_time]\r\n \r\n stop_times = stop_times_ok.append(stop_times_fix)\r\n labels = []\r\n for w in cutoffs:\r\n if float(w).is_integer():\r\n l = str(w) + ':00'\r\n else:\r\n n = math.modf(w)\r\n l= str(int(n[1])) + ':' + str(int(n[0]*60))\r\n labels = labels + [l]\r\n else:\r\n labels = []\r\n for w in cutoffs:\r\n if float(w).is_integer():\r\n if w > 24:\r\n w1 = w-24\r\n l = str(w1) + ':00'\r\n else:\r\n l = str(w) + ':00'\r\n labels = labels + [l]\r\n else:\r\n if w > 24:\r\n w1 = w-24\r\n n = math.modf(w1)\r\n l = str(int(n[1])) + ':' + str(int(n[0]*60))\r\n else:\r\n n = math.modf(w)\r\n l = str(int(n[1])) + ':' + str(int(n[0]*60))\r\n labels = labels + [l]\r\n \r\n # Generate the labels\r\n labels = [labels[i] + '-' + labels[i+1] for i in range(0, len(labels)-1)]\r\n \r\n stop_times['departure_time'] = stop_times['departure_time']/3600\r\n \r\n # Put each trips in the right window\r\n stop_times['window'] = pd.cut(stop_times['departure_time'], bins=cutoffs, right=False, labels=labels)\r\n stop_times = stop_times.loc[~stop_times.window.isnull()]\r\n stop_times['window'] = stop_times['window'].astype(str)\r\n stop_times['hour'] = pd.cut(stop_times['departure_time'], bins=hours, right=False, labels=hours_labels)\r\n stop_times['hour'] = stop_times['hour'].astype(str)\r\n \r\n stop_times_first = stop_times.loc[stop_times.stop_sequence==1,:]\r\n \r\n # Count number of trips per windows and hour\r\n trips_per_window = stop_times_first.pivot_table('trip_id', index=['route_id','direction_id','window'], aggfunc='count').reset_index()\r\n trips_per_hour = stop_times_first.pivot_table('trip_id', index=['route_id', 'direction_id','hour'], aggfunc='count').reset_index()\r\n \r\n # Calculate the hourly frequency\r\n trips_per_hour.rename(columns={'trip_id':'max_trips'}, inplace=True)\r\n trips_per_hour['max_frequency'] = (60/trips_per_hour['max_trips']).astype(int)\r\n \r\n # Get max number of trips and highest frequency\r\n max_trips = trips_per_hour.pivot_table('max_trips', index=['route_id', 'direction_id'], aggfunc='max').reset_index()\r\n max_freq = trips_per_hour.pivot_table('max_frequency', index=['route_id', 'direction_id'], aggfunc='min').reset_index()\r\n \r\n # Calculate frequency per window for each route\r\n trips_per_window.rename(columns={'trip_id':'ntrips'}, inplace=True)\r\n start_time = trips_per_window['window'].apply(lambda x: int(x.split(':')[0]))\r\n end_time = trips_per_window['window'].apply(lambda x: int(re.search('-(.*?):', x).group(1)))\r\n \r\n trips_per_window['frequency'] = ((end_time - start_time)*60 / trips_per_window.ntrips).astype(int)\r\n line_frequencies = pd.merge(trips_per_window, max_trips, how = 'left')\r\n line_frequencies = pd.merge(line_frequencies, max_freq, how = 'left')\r\n \r\n aux = trips.loc[trips.service_id=='1',['route_id', 'direction_id', 'shape_id']].drop_duplicates()\r\n aux = pd.merge(line_frequencies, aux, how='left')\r\n line_frequencies_gdf = pd.merge(aux, shapes, how='left')\r\n # Route name\r\n routes['route_name'] = ''\r\n if routes.route_short_name.isnull().unique()[0]:\r\n routes['route_name'] = routes.route_long_name\r\n elif routes.route_long_name.isnull().unique()[0]: \r\n routes['route_name'] = routes.route_short_name\r\n else:\r\n routes['route_name'] = routes.route_short_name + ' ' + routes.route_long_name\r\n\r\n line_frequencies_gdf = pd.merge(line_frequencies_gdf, routes[['route_id', 'route_name']])\r\n \r\n gdf = gpd.GeoDataFrame(data=line_frequencies_gdf.drop('geometry', axis=1), geometry=line_frequencies_gdf.geometry)\r\n \r\n gdf.loc[gdf.direction_id == 0, 'direction_id'] = 'Inbound'\r\n gdf.loc[gdf.direction_id == 1, 'direction_id'] = 'Outbound'\r\n \r\n \r\n gdf.rename(columns={\r\n 'direction_id': 'dir_id',\r\n 'max_frequency': 'max_freq',\r\n }, inplace=True)\r\n \r\n gdf = gdf.loc[:,['route_id', 'route_name', 'dir_id', 'window',\r\n 'frequency', 'ntrips',\r\n 'max_freq', 'max_trips', 'geometry']]\r\n gdf = gdf.loc[~gdf.geometry.isnull()]\r\n gdf.sort_values(by='frequency', ascending=False, inplace=True)\r\n \r\n return gdf\r\n \r\ndef segments_freq(segments_gdf, stop_times, routes, cutoffs = [0,6,9,15,19,22,24]):\r\n import warnings\r\n warnings.filterwarnings(\"ignore\")\r\n import math\r\n import pandas as pd\r\n import os\r\n import re\r\n \r\n try:\r\n import geopandas as gpd \r\n except ImportError as e:\r\n os.system('pip install geopandas')\r\n import geopandas as gpd\r\n \r\n # Generate the hours of the day\r\n hours = list(range(25))\r\n hours_labels = [str(hours[i]) + ':00' for i in range(len(hours)-1)]\r\n\r\n # Generate the time windows and cutoffs\r\n if max(cutoffs)<=24: \r\n stop_times_ok = stop_times.loc[stop_times.departure_time < 24*3600]\r\n stop_times_fix = stop_times.loc[stop_times.departure_time >= 24*3600]\r\n stop_times_fix['departure_time'] = [d - 24*3600 for d in stop_times_fix.departure_time]\r\n\r\n stop_times = stop_times_ok.append(stop_times_fix)\r\n labels = []\r\n for w in cutoffs:\r\n if float(w).is_integer():\r\n l = str(w) + ':00'\r\n else:\r\n n = math.modf(w)\r\n l= str(int(n[1])) + ':' + str(int(n[0]*60))\r\n labels = labels + [l]\r\n else:\r\n labels = []\r\n for w in cutoffs:\r\n if float(w).is_integer():\r\n if w > 24:\r\n w1 = w-24\r\n l = str(w1) + ':00'\r\n else:\r\n l = str(w) + ':00'\r\n labels = labels + [l]\r\n else:\r\n if w > 24:\r\n w1 = w-24\r\n n = math.modf(w1)\r\n l = str(int(n[1])) + ':' + str(int(n[0]*60))\r\n else:\r\n n = math.modf(w)\r\n l = str(int(n[1])) + ':' + str(int(n[0]*60))\r\n labels = labels + [l]\r\n\r\n # Generate the labels\r\n labels = [labels[i] + '-' + labels[i+1] for i in range(0, len(labels)-1)]\r\n\r\n stop_times['departure_time'] = stop_times['departure_time']/3600\r\n\r\n # Put each trips in the right window\r\n stop_times['window'] = pd.cut(stop_times['departure_time'], bins=cutoffs, right=False, labels=labels)\r\n stop_times = stop_times.loc[~stop_times['window'].isnull()]\r\n stop_times['window'] = stop_times['window'].astype(str)\r\n\r\n stop_times['hour'] = pd.cut(stop_times['departure_time'], bins=hours, right=False, labels=hours_labels)\r\n stop_times['hour'] = stop_times['hour'].astype(str)\r\n\r\n # Count number of trips per windows and hour\r\n\r\n trips_per_window = stop_times.pivot_table('trip_id', index=['route_id','stop_id', 'direction_id','window'], aggfunc='count').reset_index()\r\n trips_per_hour = stop_times.pivot_table('trip_id', index=['route_id','stop_id', 'direction_id','hour'], aggfunc='count').reset_index()\r\n\r\n # Calculate the hourly frequency\r\n trips_per_hour.rename(columns={'trip_id':'max_trips'}, inplace=True)\r\n trips_per_hour['max_frequency'] = (60/trips_per_hour['max_trips']).astype(int)\r\n\r\n # Get max number of trips and highest frequency\r\n max_trips = trips_per_hour.pivot_table('max_trips', index=['route_id','stop_id', 'direction_id'], aggfunc='max').reset_index()\r\n max_freq = trips_per_hour.pivot_table('max_frequency', index=['route_id','stop_id', 'direction_id'], aggfunc='min').reset_index()\r\n\r\n\r\n # Calculate frequency per window for each route\r\n trips_per_window.rename(columns={'trip_id':'ntrips'}, inplace=True)\r\n start_time = trips_per_window['window'].apply(lambda x: int(x.split(':')[0])+(int(x.split(':')[1][:2])/60))\r\n end_time = trips_per_window['window'].apply(lambda x: int(re.search('-(.*?):', x).group(1)) + (int(x.split(':')[2])/60))\r\n\r\n trips_per_window['frequency'] = ((end_time - start_time)*60 / trips_per_window.ntrips).astype(int)\r\n\r\n line_frequencies = pd.merge(trips_per_window, max_trips, how = 'left')\r\n line_frequencies = pd.merge(line_frequencies, max_freq, how = 'left')\r\n line_frequencies = pd.merge(line_frequencies, \r\n segments_gdf.loc[:, ['route_id', 'segment_id', 'start_stop_id', 'start_stop_name', 'end_stop_name','direction_id', 'geometry']],\r\n left_on=['route_id','stop_id', 'direction_id'],\r\n right_on=['route_id','start_stop_id', 'direction_id'], \r\n how='left')\r\n\r\n line_frequencies.drop_duplicates(subset=['route_id', 'stop_id', 'direction_id', 'window', 'ntrips', 'frequency',\r\n 'max_trips', 'max_frequency', 'segment_id', 'start_stop_id',\r\n 'start_stop_name', 'end_stop_name'], inplace=True)\r\n\r\n # Route name\r\n routes['route_name'] = ''\r\n if routes.route_short_name.isnull().unique()[0]:\r\n routes['route_name'] = routes.route_long_name\r\n elif routes.route_long_name.isnull().unique()[0]: \r\n routes['route_name'] = routes.route_short_name\r\n else:\r\n routes['route_name'] = routes.route_short_name + ' ' + routes.route_long_name\r\n \r\n line_frequencies = pd.merge(line_frequencies, routes.loc[:,['route_id','route_name']],how='left')\r\n\r\n # Calculate sum of trips per segment with all lines\r\n all_lines = line_frequencies.pivot_table(['ntrips'],\r\n index=['segment_id', 'window'],\r\n aggfunc = 'sum'\r\n ).reset_index()\r\n\r\n # Calculate frequency per window for all routes\r\n start_time = all_lines['window'].apply(lambda x: int(x.split(':')[0])+(int(x.split(':')[1][:2])/60))\r\n end_time = all_lines['window'].apply(lambda x: int(re.search('-(.*?):', x).group(1)) + (int(x.split(':')[2])/60))\r\n\r\n all_lines['frequency'] = ((end_time - start_time)*60 / all_lines.ntrips).astype(int)\r\n\r\n # Get max number of trips and highest frequency per segment for all routes\r\n max_trips_all_lines = all_lines.pivot_table('ntrips', index=['segment_id'], aggfunc='max').reset_index()\r\n max_freq_all_lines = all_lines.pivot_table('frequency', index=['segment_id'], aggfunc='min').reset_index()\r\n\r\n max_trips_all_lines.rename(columns=dict(ntrips='max_trips'), inplace=True)\r\n max_freq_all_lines.rename(columns=dict(frequency='max_frequency'), inplace=True)\r\n\r\n all_lines = pd.merge(all_lines, max_trips_all_lines, how = 'left')\r\n all_lines = pd.merge(all_lines, max_freq_all_lines, how = 'left')\r\n\r\n data_all_lines = pd.merge(\r\n all_lines, \r\n segments_gdf.drop_duplicates(subset=['segment_id']), \r\n left_on=['segment_id'],\r\n right_on = ['segment_id'],\r\n how='left').reset_index().sort_values(by = ['direction_id','window','stop_sequence'], ascending=True)\r\n\r\n data_all_lines.drop(['index'], axis=1, inplace=True)\r\n data_all_lines['route_id'] = 'ALL_LINES'\r\n data_all_lines['route_name'] = 'All lines'\r\n data_all_lines['direction_id'] = 'NA'\r\n data_complete = line_frequencies.append(data_all_lines).reset_index()\r\n\r\n gdf = gpd.GeoDataFrame(data=data_complete.drop('geometry', axis=1), geometry=data_complete.geometry)\r\n\r\n gdf.loc[gdf.direction_id == 0, 'direction_id'] = 'Inbound'\r\n gdf.loc[gdf.direction_id == 1, 'direction_id'] = 'Outbound'\r\n\r\n\r\n gdf.rename(columns={\r\n 'direction_id': 'dir_id',\r\n 'max_frequency': 'max_freq',\r\n 'start_stop_name': 's_st_name',\r\n 'end_stop_name': 'e_st_name',\r\n 'start_stop_id':'s_st_id'\r\n }, inplace=True)\r\n\r\n gdf = gdf.loc[:,['route_id', 'route_name', 'dir_id', 'segment_id', 'window',\r\n 'frequency', 'ntrips', 's_st_id', 's_st_name', 'e_st_name',\r\n 'max_freq', 'max_trips', 'geometry']]\r\n gdf = gdf.loc[~gdf.geometry.isnull()]\r\n gdf.sort_values(by='frequency', ascending=False, inplace=True)\r\n\r\n return gdf\r\n \r\ndef download_osm(gdf):\r\n # Define the bounding box to query\r\n bounds = gdf.geometry.total_bounds\r\n\r\n # Build the query for overspass-api\r\n overpass_url = \"http://overpass-api.de/api/interpreter\"\r\n# overpass_query = \"\"\"\r\n# [out:json];\r\n# (way[\"highway\"~\"motorway|trunk|primary|secondary|tertiary|unclassified|residential|service|living_street\"]\r\n# [\"access\"!~\"private|no\"]\r\n# ({0}, {1}, {2}, {3}););\r\n# out geom;\r\n# \"\"\".format(bounds[1], bounds[0], bounds[3], bounds[2])\r\n\r\n overpass_query = \"\"\"\r\n [out:json];\r\n (way[\"highway\"~\"motorway|trunk|primary|secondary|tertiary|unclassified|residential|service|living_street\"]\r\n ({0}, {1}, {2}, {3}););\r\n out geom;\r\n \"\"\".format(bounds[1], bounds[0], bounds[3], bounds[2])\r\n\r\n # Query overpass-api\r\n response = requests.get(overpass_url, \r\n params={'data': overpass_query})\r\n\r\n # Put the response in a DataFrame\r\n data = response.json()\r\n ways_df = pd.DataFrame(data['elements'])\r\n\r\n # Parse the content in lists\r\n node_ids = []\r\n lat_lon = []\r\n way_ids = []\r\n oneway = []\r\n segment_seq = []\r\n\r\n n_nodes = [len(n) for n in list(ways_df.nodes)]\r\n\r\n [node_ids.extend(n) for n in list(ways_df.nodes)]\r\n [lat_lon.extend(g) for g in list(ways_df.geometry)]\r\n [way_ids.extend([ways_df.loc[i, 'id']]*n_nodes[i]) for i in range(0, len(ways_df))] \r\n [oneway.extend([ways_df.loc[i, 'tags'].get('oneway', '0')]*n_nodes[i]) for i in range(0, len(ways_df))]\r\n [segment_seq.extend(list(range(1, n_nodes[i]+1))) for i in range(0, len(ways_df))] # segment sequence for that way_id\r\n\r\n # Convert to int to save memory\r\n oneway = [1 if s=='yes' else s for s in oneway] \r\n oneway = [0 if s in ['no', '0', 'reversible', '-1'] else s for s in oneway] \r\n oneway = list(map(int, oneway))\r\n\r\n # ------------------------------------------------------------------------------------\r\n # ------------------------------ NODES -----------------------------------------------\r\n # ------------------------------------------------------------------------------------\r\n\r\n # Parse the json into a dataframe\r\n nodes = pd.DataFrame()\r\n nodes['way_id'] = way_ids\r\n nodes['node_id'] = node_ids\r\n nodes['oneway'] = oneway\r\n nodes['segment_seq'] = segment_seq\r\n\r\n # Get lat,lon values right\r\n lat = [p['lat'] for p in lat_lon]\r\n lon = [p['lon'] for p in lat_lon]\r\n\r\n # Create points\r\n points = [Point(lon[i], lat[i]) for i in range(0, len(lat))]\r\n\r\n # Create GeoDataFrame\r\n nodes_gdf = gpd.GeoDataFrame(data=nodes, geometry = points)\r\n\r\n # ------------------------------------------------------------------------------------\r\n # --------------------------- SEGMENTS -----------------------------------------------\r\n # ------------------------------------------------------------------------------------\r\n\r\n # Define our lists\r\n # Does the node has the same way_id as the next node?\r\n bool_list = nodes['way_id'] == nodes['way_id'].shift(-1)\r\n # Nodes of the segment\r\n segment_nodes = ['{0} - {1}'.format(str(node_ids[i]), str(node_ids[i+1])) for i in range(0,len(node_ids)-1)]\r\n segment_ids = list(range(1, len(segment_nodes)+1))\r\n points_next = points[1:] + [None]\r\n\r\n # Remove the last node of the segment (it is already in the last segment)\r\n segment_nodes = list(compress(segment_nodes, bool_list)) \r\n segment_ids = list(compress(segment_ids, bool_list)) \r\n points = list(compress(points, bool_list)) \r\n points_next = list(compress(points_next, bool_list)) \r\n geometry = [LineString([points[i], points_next[i]]) for i in range(0,len(segment_nodes))]\r\n\r\n # Keep the segments and create the geo data frame\r\n segments = nodes.loc[bool_list, ['way_id', 'oneway', 'segment_seq']]\r\n segments['segment_nodes'] = segment_nodes\r\n segments['osm_segment_id'] = segment_ids\r\n segments_gdf = gpd.GeoDataFrame(data=segments, geometry = geometry)\r\n\r\n # ------------------------------------------------------------------------------------\r\n # --------------------------- ADD OPPOSITE SEGMENTS ----------------------------------\r\n # ------------------------------------------------------------------------------------\r\n\r\n # Create the opposite segments for two way streets\r\n opposite = segments_gdf.loc[segments_gdf.oneway == 0].reset_index()\r\n\r\n opp_nodes = ['{0} - {1}'.format(opposite.loc[i,'segment_nodes'].split(' - ')[1], opposite.loc[i,'segment_nodes'].split(' - ')[0]) for i in range(0,len(opposite))]\r\n opp_way_id = list(opposite.loc[:,'way_id'])\r\n opp_osm_segment_id = list(range(segments_gdf.osm_segment_id.max()+1, segments_gdf.osm_segment_id.max() + len(opposite) + 1))\r\n\r\n opp_geom = opposite.geometry.apply(lambda x: LineString([x.coords[1], x.coords[0]]))\r\n\r\n opp_df = pd.DataFrame()\r\n opp_df['way_id'] = opp_way_id\r\n opp_df['segment_nodes'] = opp_nodes\r\n opp_df['oneway'] = 0\r\n opp_df['osm_segment_id'] = opp_osm_segment_id\r\n opp_df['segment_seq'] = 0\r\n\r\n opp_gdf = gpd.GeoDataFrame(data=opp_df, geometry=opp_geom)\r\n\r\n segments_gdf = segments_gdf.append(opp_gdf)\r\n\r\n # Add \"from\" and \"to\" columns to make the graph generation easier\r\n segments_gdf['from'] = [int(s.split(' - ')[0]) for s in segments_gdf['segment_nodes']]\r\n segments_gdf['to'] = [int(s.split(' - ')[1]) for s in segments_gdf['segment_nodes']]\r\n \r\n return nodes_gdf, segments_gdf\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n "
] |
[
[
"pandas.cut",
"pandas.merge",
"pandas.DataFrame.from_dict",
"pandas.DataFrame",
"pandas.read_csv"
]
] |
merepbj/web-scraping-challenge
|
[
"cf4c401cad78b68af9fe508225ceb48bba99ba83"
] |
[
"Missions_to-Mars/scrape_mars.py"
] |
[
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nfrom splinter import Browser\nimport time\nbrowser = Browser('chrome','chromedriver')\n\ndef scrape(): \n title, paragraph = mars_news(browser)\n \n data = {\n \"news_title\": title, \n \"news_paragraph\": paragraph,\n \"news_image\": mars_images(browser),\n \"news_facts\": mars_facts(),\n \"news_hemisphere\": mars_hemisphere(browser)\n }\n return data\n\n\n# In[2]:\n\n\n# ### NASA Mars News\n# \n# * Scrape the [NASA Mars News Site](https://mars.nasa.gov/news/) and collect the latest News Title and Paragraph Text. Assign the text to variables that you can reference later.\n\n# In[3]:\n\ndef mars_news(browser): \n\n browser.visit('https://mars.nasa.gov/news/')\n title = browser.find_by_css('div.content_title a').text\n paragraph = browser.find_by_css('div.article_teaser_body').text\n return title, paragraph\n \n\n# ### JPL Mars Space Images - Featured Image\n# \n# * Visit the url for JPL Featured Space Image [here](https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/index.html).\n# \n# * Use splinter to navigate the site and find the image url for the current Featured Mars Image and assign the url string to a variable called `featured_image_url`.\n# \n# * Make sure to find the image url to the full size `.jpg` image.\n# \n# * Make sure to save a complete url string for this image.\n\n# In[4]:\n\ndef mars_images(browser):\n\n browser.visit('https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/index.html')\n browser.links.find_by_partial_text('FULL IMAGE').click()\n image = browser.find_by_css('img.fancybox-image')['src']\n return image\n\n# ### Mars Facts\n# \n# * Visit the Mars Facts webpage [here](https://space-facts.com/mars/) and use Pandas to scrape the table containing facts about the planet including Diameter, Mass, etc.\n# \n# * Use Pandas to convert the data to a HTML table string.\n\n# In[5]:\n\ndef mars_facts():\n return pd.read_html('https://space-facts.com/mars/')[0].to_html(classes='table table-stripped')\n\n\n# ### Mars Hemispheres\n# \n# * Visit the USGS Astrogeology site [here](https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars) to obtain high resolution images for each of Mar's hemispheres.\n# \n# * You will need to click each of the links to the hemispheres in order to find the image url to the full resolution image.\n# \n# * Save both the image url string for the full resolution hemisphere image, and the Hemisphere title containing the hemisphere name. Use a Python dictionary to store the data using the keys `img_url` and `title`.\n# \n# * Append the dictionary with the image url string and the hemisphere title to a list. This list will contain one dictionary for each hemisphere.\n\n# In[6]:\n\ndef mars_hemisphere(browser):\n browser.visit('https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars')\n\n\n # In[7]:\n\n\n links = browser.find_by_css('a.itemLink h3')\n\n\n # In[8]:\n\n\n hemispheres = []\n for i in range(len(links)):\n hemisphere = {}\n \n hemisphere['title'] = browser.find_by_css('a.itemLink h3')[i].text\n browser.find_by_css('a.itemLink h3')[i].click()\n hemisphere['url'] = browser.find_by_text('Sample')['href']\n hemispheres.append(hemisphere)\n browser.back()\n browser.quit()\n return hemispheres\n\n\n# In[ ]:\n\n\n\n\n"
] |
[
[
"pandas.read_html"
]
] |
xssstory/cogdl
|
[
"ae8de495c365993f19f04774f083960fd282c2a3",
"ae8de495c365993f19f04774f083960fd282c2a3"
] |
[
"cogdl/tasks/node_classification.py",
"examples/custom_dataset.py"
] |
[
"import argparse\nimport copy\nfrom typing import Optional\nimport scipy.sparse as sp\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom tqdm import tqdm\n\nfrom cogdl.datasets import build_dataset\nfrom cogdl.models import build_model\nfrom cogdl.models.supervised_model import SupervisedHomogeneousNodeClassificationModel\nfrom cogdl.trainers.supervised_trainer import (\n SupervisedHomogeneousNodeClassificationTrainer,\n)\nfrom cogdl.trainers.sampled_trainer import SAINTTrainer\n\nfrom . import BaseTask, register_task\n\n\ndef normalize_adj_row(adj):\n \"\"\"Row-normalize sparse matrix\"\"\"\n rowsum = np.array(adj.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n mx = r_mat_inv.dot(adj)\n return mx\n\n\ndef to_torch_sparse(sparse_mx):\n \"\"\"Convert a scipy sparse matrix to a torch sparse tensor.\"\"\"\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)\n\n\ndef row_l1_normalize(X):\n norm = 1e-6 + X.sum(dim=1, keepdim=True)\n return X/norm\n\n\ndef preprocess_data_sgcpn(data, normalize_feature=True, missing_rate=0):\n data.train_mask = data.train_mask.type(torch.bool)\n data.val_mask = data.val_mask.type(torch.bool)\n # expand test_mask to all rest nodes\n data.test_mask = ~(data.train_mask + data.val_mask)\n # get adjacency matrix\n n = len(data.x)\n adj = sp.csr_matrix((np.ones(data.edge_index.shape[1]), data.edge_index), shape=(n, n))\n adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj) + sp.eye(adj.shape[0])\n adj = normalize_adj_row(adj)\n data.adj = to_torch_sparse(adj).to_dense()\n if normalize_feature:\n data.x = row_l1_normalize(data.x)\n erasing_pool = torch.arange(n)[~data.train_mask]\n size = int(len(erasing_pool) * (missing_rate / 100))\n idx_erased = np.random.choice(erasing_pool, size=size, replace=False)\n if missing_rate > 0:\n data.x[idx_erased] = 0\n return data\n\n\n@register_task(\"node_classification\")\nclass NodeClassification(BaseTask):\n \"\"\"Node classification task.\"\"\"\n\n @staticmethod\n def add_args(parser: argparse.ArgumentParser):\n \"\"\"Add task-specific arguments to the parser.\"\"\"\n # fmt: off\n parser.add_argument(\"--missing-rate\", type=int, default=-1)\n # fmt: on\n\n def __init__(\n self,\n args,\n dataset=None,\n model: Optional[SupervisedHomogeneousNodeClassificationModel] = None,\n ):\n super(NodeClassification, self).__init__(args)\n\n self.args = args\n self.model_name = args.model\n self.device = args.device_id[0] if not args.cpu else \"cpu\"\n dataset = build_dataset(args) if dataset is None else dataset\n if args.missing_rate >= 0:\n if args.model == 'sgcpn':\n assert args.dataset in ['cora', 'citeseer', 'pubmed']\n dataset.data = preprocess_data_sgcpn(dataset.data, normalize_feature=True, missing_rate=0)\n adj_slice = torch.tensor(dataset.data.adj.size())\n adj_slice[0] = 0\n dataset.slices['adj'] = adj_slice\n\n self.dataset = dataset\n self.data = dataset[0]\n args.num_features = dataset.num_features\n args.num_classes = dataset.num_classes\n args.num_nodes = dataset.data.x.shape[0]\n\n self.model: SupervisedHomogeneousNodeClassificationModel = build_model(args) if model is None else model\n self.model.set_device(self.device)\n\n self.trainer: Optional[\n SupervisedHomogeneousNodeClassificationTrainer\n ] = self.model.get_trainer(NodeClassification, self.args)(\n self.args\n ) if self.model.get_trainer(\n NodeClassification, self.args\n ) else None\n\n if not self.trainer:\n self.optimizer = torch.optim.Adam(\n self.model.parameters(), lr=args.lr, weight_decay=args.weight_decay\n ) if not hasattr(self.model, \"get_optimizer\") else self.model.get_optimizer(args)\n self.data.apply(lambda x: x.to(self.device))\n self.model: SupervisedHomogeneousNodeClassificationModel = self.model.to(\n self.device\n )\n self.patience = args.patience\n self.max_epoch = args.max_epoch\n\n def train(self):\n if self.trainer:\n if isinstance(self.trainer, SAINTTrainer):\n self.model = self.trainer.fit(self.model, self.dataset)\n self.data.apply(lambda x: x.to(self.device))\n else:\n result = self.trainer.fit(self.model, self.dataset)\n if issubclass(type(result), torch.nn.Module):\n self.model = result\n else:\n return result\n else:\n epoch_iter = tqdm(range(self.max_epoch))\n patience = 0\n best_score = 0\n best_loss = np.inf\n max_score = 0\n min_loss = np.inf\n best_model = copy.deepcopy(self.model)\n for epoch in epoch_iter:\n self._train_step()\n train_acc, _ = self._test_step(split=\"train\")\n val_acc, val_loss = self._test_step(split=\"val\")\n epoch_iter.set_description(\n f\"Epoch: {epoch:03d}, Train: {train_acc:.4f}, Val: {val_acc:.4f}\"\n )\n if val_loss <= min_loss or val_acc >= max_score:\n if val_loss <= best_loss: # and val_acc >= best_score:\n best_loss = val_loss\n best_score = val_acc\n best_model = copy.deepcopy(self.model)\n min_loss = np.min((min_loss, val_loss))\n max_score = np.max((max_score, val_acc))\n patience = 0\n else:\n patience += 1\n if patience == self.patience:\n epoch_iter.close()\n break\n print(f\"Valid accurracy = {best_score}\")\n self.model = best_model\n test_acc, _ = self._test_step(split=\"test\")\n val_acc, _ = self._test_step(split=\"val\")\n print(f\"Test accuracy = {test_acc}\")\n return dict(Acc=test_acc, ValAcc=val_acc)\n\n def _train_step(self):\n self.model.train()\n self.optimizer.zero_grad()\n self.model.loss(self.data).backward()\n self.optimizer.step()\n\n def _test_step(self, split=\"val\", logits=None):\n self.model.eval()\n logits = logits if logits else self.model.predict(self.data)\n if split == \"train\":\n mask = self.data.train_mask\n elif split == \"val\":\n mask = self.data.val_mask\n else:\n mask = self.data.test_mask\n loss = F.nll_loss(logits[mask], self.data.y[mask]).item()\n\n pred = logits[mask].max(1)[1]\n acc = pred.eq(self.data.y[mask]).sum().item() / mask.sum().item()\n return acc, loss\n",
"from cogdl.data.data import Data\nimport torch\n\nfrom cogdl.tasks import build_task\nfrom cogdl.models import build_model\nfrom cogdl.utils import build_args_from_dict\nfrom cogdl.data import Dataset\n\n\n\"\"\"Define your data\"\"\"\nclass MyData(object):\n def __init__(self):\n num_nodes = 100\n num_edges = 300\n feat_dim = 30\n # load or generate data\n self.edge_index = torch.randint(0, num_nodes, (2, num_edges))\n self.x = torch.randn(num_nodes, feat_dim)\n self.y = torch.randint(0, 2, (num_nodes,))\n\n # set train/val/test mask in node_classification task\n self.train_mask = torch.zeros(num_nodes).bool()\n self.train_mask[0:int(0.3*num_nodes)] = True\n self.val_mask = torch.zeros(num_nodes).bool()\n self.val_mask[int(0.3*num_nodes):int(0.7*num_nodes)] = True\n self.test_mask = torch.zeros(num_nodes).bool()\n self.test_mask[int(0.7*num_nodes):] = True\n\n def apply(self, func):\n for name, value in vars(self).items():\n setattr(self, name, func(value))\n\n @property\n def num_features(self):\n return self.x.shape[1]\n \n @property\n def num_classes(self):\n return int(torch.max(self.y)) + 1\n\n\n\"\"\"Define your dataset\"\"\"\nclass MyDataset(object):\n def __init__(self, datalist):\n self.datalist = datalist\n self.data = self.datalist[0]\n self.num_features = self.datalist[0].num_features\n self.num_classes = self.datalist[0].num_classes\n\n def __getitem__(self, index):\n assert index == 0\n return self.datalist[index]\n\n\n\ndef get_default_args():\n cuda_available = torch.cuda.is_available()\n default_dict = {'hidden_size': 16,\n 'dropout': 0.5,\n 'patience': 100,\n 'max_epoch': 500,\n 'cpu': not cuda_available,\n 'lr': 0.01,\n 'device_id': [0],\n 'weight_decay': 5e-4}\n return build_args_from_dict(default_dict)\n\n\ndef main_dataset():\n args = get_default_args()\n args.task = \"node_classification\"\n args.model = \"gcn\"\n # use customized dataset\n mydata = MyData()\n dataset = MyDataset([mydata])\n args.num_features = dataset.num_features\n args.num_classes = dataset.num_classes\n # use model in cogdl\n model = build_model(args)\n task = build_task(args, dataset, model)\n result = task.train()\n print(result)\n\n\nif __name__ == \"__main__\":\n main_dataset()"
] |
[
[
"torch.Size",
"numpy.isinf",
"numpy.max",
"numpy.random.choice",
"scipy.sparse.diags",
"torch.arange",
"numpy.ones",
"numpy.min",
"torch.sparse.FloatTensor",
"torch.from_numpy",
"scipy.sparse.eye",
"numpy.power",
"torch.nn.functional.nll_loss",
"numpy.vstack"
],
[
"torch.zeros",
"torch.max",
"torch.randint",
"torch.cuda.is_available",
"torch.randn"
]
] |
samuelsmal/drosophVAE
|
[
"4b1887e55a5eed1d26c07b6c43de59ffab5fc7c7",
"4b1887e55a5eed1d26c07b6c43de59ffab5fc7c7"
] |
[
"drosoph_vae/settings/data.py",
"drosoph_vae/settings/skeleton.py"
] |
[
"from enum import Enum\nfrom collections import namedtuple\nimport json\nimport pickle\nfrom datetime import datetime\nimport pathlib\nimport numpy as np\n\nclass Behavior(Enum):\n WALK_FORW = 0\n WALK_BACKW = 1\n PUSH_BALL = 2\n REST = 3\n GROOM_FLEG = 4\n GROOM_ANT = 5\n NONE = 6\n\nExperiment = namedtuple('Experiment', 'study_id, fly_id, experiment_id')\nLabelledSequence = namedtuple('LabelledSequence', ('sequence', 'label') + Experiment._fields)\n\n\ndef experiment_key(study_id=None, experiment_id=None, fly_id=None, obj=None):\n \"\"\"Exhibit A why duck typing is just shit sometimes\"\"\"\n\n if obj:\n return f\"{obj.study_id}-{obj.experiment_id}-{obj.fly_id}\"\n else:\n return f\"{study_id}-{experiment_id}-{fly_id}\"\n\n\n# They the ranges are half-open: [0, 14) in \"mathy\" writing\n_LABELLED_DATA_RAW_ = [\n (( 0, 140), Behavior.REST, '180919_MDN_CsCh', 'Fly6', '001_SG1'),\n ((140, 460), Behavior.WALK_BACKW, '180919_MDN_CsCh', 'Fly6', '001_SG1'),\n ((600, 750), Behavior.WALK_FORW, '180919_MDN_CsCh', 'Fly6', '001_SG1'),\n ((750, 900), Behavior.REST, '180919_MDN_CsCh', 'Fly6', '001_SG1'),\n\n (( 0, 140), Behavior.REST, '180919_MDN_CsCh', 'Fly6', '002_SG1'),\n ((140, 500), Behavior.WALK_BACKW, '180919_MDN_CsCh', 'Fly6', '002_SG1'),\n ((630, 800), Behavior.WALK_FORW, '180919_MDN_CsCh', 'Fly6', '002_SG1'),\n ((790, 900), Behavior.REST, '180919_MDN_CsCh', 'Fly6', '002_SG1'),\n\n (( 0, 140), Behavior.REST, '180919_MDN_CsCh', 'Fly6', '003_SG1'),\n ((140, 500), Behavior.WALK_BACKW, '180919_MDN_CsCh', 'Fly6', '003_SG1'),\n ((570, 750), Behavior.WALK_FORW, '180919_MDN_CsCh', 'Fly6', '003_SG1'),\n\n (( 0, 140), Behavior.REST, '180919_MDN_CsCh', 'Fly6', '004_SG1'),\n ((140, 500), Behavior.WALK_BACKW, '180919_MDN_CsCh', 'Fly6', '004_SG1'),\n ((600, 750), Behavior.WALK_FORW, '180919_MDN_CsCh', 'Fly6', '004_SG1'),\n\n (( 0, 140), Behavior.REST, '180919_MDN_CsCh', 'Fly6', '005_SG1'),\n ((140, 500), Behavior.WALK_BACKW, '180919_MDN_CsCh', 'Fly6', '005_SG1'),\n ((600, 750), Behavior.WALK_FORW, '180919_MDN_CsCh', 'Fly6', '005_SG1'),\n\n (( 0, 150), Behavior.GROOM_FLEG, '180921_aDN_CsCh', 'Fly6', '003_SG1'),\n ((170, 350), Behavior.GROOM_ANT, '180921_aDN_CsCh', 'Fly6', '003_SG1'),\n ((450, 600), Behavior.REST, '180921_aDN_CsCh', 'Fly6', '003_SG1'),\n\n (( 0, 150), Behavior.REST, '180921_aDN_CsCh', 'Fly6', '001_SG1'),\n ((180, 350), Behavior.GROOM_ANT, '180921_aDN_CsCh', 'Fly6', '001_SG1'),\n ((400, 580), Behavior.REST, '180921_aDN_CsCh', 'Fly6', '001_SG1'),\n\n ((250, 600), Behavior.WALK_BACKW, '180918_MDN_CsCh', 'Fly2', '004_SG1'),\n\n ((190, 300), Behavior.GROOM_ANT, '180921_aDN_CsCh', 'Fly4', '003_SG1'),\n\n ((400, 900), Behavior.WALK_FORW, '180918_MDN_PR', 'Fly1', '003_SG1'),\n\n (( 0, 500), Behavior.REST, '180918_MDN_PR', 'Fly1', '004_SG1'),\n ((650, 900), Behavior.WALK_FORW, '180918_MDN_PR', 'Fly1', '004_SG1'),\n\n (( 0, 500), Behavior.REST, '180918_MDN_PR', 'Fly1', '005_SG1'),\n ((500, 900), Behavior.WALK_FORW, '180918_MDN_PR', 'Fly1', '005_SG1'),\n\n (( 0, 100), Behavior.PUSH_BALL, '180918_MDN_PR', 'Fly2', '001_SG1'),\n ((350, 500), Behavior.GROOM_FLEG, '180918_MDN_PR', 'Fly2', '002_SG1'),\n ((400, 530), Behavior.GROOM_FLEG, '180918_MDN_PR', 'Fly2', '003_SG1'),\n\n ((150, 230), Behavior.GROOM_ANT, '180921_aDN_CsCh', 'Fly3', '001_SG1'),\n\n #((170, 210), Behavior.WALK_BACKW, '180919_MDN_CsCh', 'Fly4', '005_SG1'),\n #((210, 600), Behavior.WALK_FORW, '180919_MDN_CsCh', 'Fly4', '005_SG1'),\n #((600, 700), Behavior.PUSH_BALL, '180919_MDN_CsCh', 'Fly4', '005_SG1'),\n\n #((600, 700), Behavior.PUSH_BALL, '180919_MDN_CsCh', 'Fly4', '005_SG1'),\n\n (( 0, 145), Behavior.WALK_FORW, '180920_aDN_CsCh', 'Fly2', '001_SG1'),\n ((145, 225), Behavior.GROOM_ANT, '180920_aDN_CsCh', 'Fly2', '001_SG1'),\n ((225, 671), Behavior.REST, '180920_aDN_CsCh', 'Fly2', '001_SG1'),\n ((671, 683), Behavior.GROOM_ANT, '180920_aDN_CsCh', 'Fly2', '001_SG1'),\n ((683, 761), Behavior.WALK_FORW, '180920_aDN_CsCh', 'Fly2', '001_SG1'),\n ((761, 778), Behavior.REST, '180920_aDN_CsCh', 'Fly2', '001_SG1'),\n ((778, 809), Behavior.WALK_FORW, '180920_aDN_CsCh', 'Fly2', '001_SG1'),\n ((809, 813), Behavior.REST, '180920_aDN_CsCh', 'Fly2', '001_SG1'),\n ((813, 820), Behavior.WALK_BACKW, '180920_aDN_CsCh', 'Fly2', '001_SG1'),\n ((820, 861), Behavior.WALK_FORW, '180920_aDN_CsCh', 'Fly2', '001_SG1'),\n ((861, 868), Behavior.REST, '180920_aDN_CsCh', 'Fly2', '001_SG1'),\n ((868, 879), Behavior.GROOM_ANT, '180920_aDN_CsCh', 'Fly2', '001_SG1'),\n ((879, 900), Behavior.WALK_BACKW, '180920_aDN_CsCh', 'Fly2', '001_SG1'),\n\n (( 0, 143), Behavior.WALK_BACKW, '180920_aDN_CsCh', 'Fly2', '002_SG1'),\n ((143, 254), Behavior.GROOM_ANT, '180920_aDN_CsCh', 'Fly2', '002_SG1'),\n ((254, 822), Behavior.REST, '180920_aDN_CsCh', 'Fly2', '002_SG1'),\n ((822, 900), Behavior.GROOM_ANT, '180920_aDN_CsCh', 'Fly2', '002_SG1'),\n\n (( 0, 145), Behavior.REST, '180920_aDN_CsCh', 'Fly2', '003_SG1'),\n ((145, 247), Behavior.GROOM_ANT, '180920_aDN_CsCh', 'Fly2', '003_SG1'),\n ((247, 653), Behavior.REST, '180920_aDN_CsCh', 'Fly2', '003_SG1'),\n ((653, 785), Behavior.WALK_FORW, '180920_aDN_CsCh', 'Fly2', '003_SG1'),\n ((785, 803), Behavior.REST, '180920_aDN_CsCh', 'Fly2', '003_SG1'),\n ((803, 820), Behavior.NONE, '180920_aDN_CsCh', 'Fly2', '003_SG1'),\n ((820, 859), Behavior.WALK_FORW, '180920_aDN_CsCh', 'Fly2', '003_SG1'),\n ((859, 900), Behavior.REST, '180920_aDN_CsCh', 'Fly2', '003_SG1'),\n\n (( 0, 147), Behavior.REST, '180920_aDN_CsCh', 'Fly2', '004_SG1'),\n ((147, 235), Behavior.GROOM_ANT, '180920_aDN_CsCh', 'Fly2', '004_SG1'),\n ((235, 657), Behavior.REST, '180920_aDN_CsCh', 'Fly2', '004_SG1'),\n ((657, 816), Behavior.WALK_FORW, '180920_aDN_CsCh', 'Fly2', '004_SG1'),\n ((816, 820), Behavior.GROOM_ANT, '180920_aDN_CsCh', 'Fly2', '004_SG1'),\n ((820, 900), Behavior.REST, '180920_aDN_CsCh', 'Fly2', '004_SG1'),\n\n (( 0, 144), Behavior.REST, '180920_aDN_CsCh', 'Fly2', '005_SG1'),\n ((144, 226), Behavior.GROOM_ANT, '180920_aDN_CsCh', 'Fly2', '005_SG1'),\n ((226, 239), Behavior.REST, '180920_aDN_CsCh', 'Fly2', '005_SG1'),\n ((239, 253), Behavior.GROOM_ANT, '180920_aDN_CsCh', 'Fly2', '005_SG1'),\n ((253, 267), Behavior.REST, '180920_aDN_CsCh', 'Fly2', '005_SG1'),\n ((267, 278), Behavior.GROOM_ANT, '180920_aDN_CsCh', 'Fly2', '005_SG1'),\n ((278, 656), Behavior.REST, '180920_aDN_CsCh', 'Fly2', '005_SG1'),\n ((656, 659), Behavior.WALK_FORW, '180920_aDN_CsCh', 'Fly2', '005_SG1'),\n ((659, 665), Behavior.GROOM_ANT, '180920_aDN_CsCh', 'Fly2', '005_SG1'),\n ((665, 757), Behavior.WALK_FORW, '180920_aDN_CsCh', 'Fly2', '005_SG1'),\n ((757, 768), Behavior.REST, '180920_aDN_CsCh', 'Fly2', '005_SG1'),\n ((768, 799), Behavior.WALK_BACKW, '180920_aDN_CsCh', 'Fly2', '005_SG1'),\n ((799, 900), Behavior.REST, '180920_aDN_CsCh', 'Fly2', '005_SG1'),\n]\n\ndef dummy_data_complex_sine_like(length):\n DummyBehaviour = namedtuple('DummyBehaviour', 'type amplitude fraction frequency')\n # make sure that the fractions add up to 1.\n # cluster id, behaviour\n _dummy_behaviours_ = [\n (0, ('sinoid', 1.0, 0.1, 2)),\n (1, ('flat', 0.0, 0.2, 0)),\n (2, ('sinoid', 1.0, 0.2, 3)),\n (3, ('sinoid', 1.0, 0.1, 5)),\n (4, ('flat', 1.0, 0.2, 0)),\n (2, ('sinoid', .5, .2, 3)),\n ]\n\n\n cur_idx = 0\n nb_frames = length\n\n _new_frames_ = np.zeros(nb_frames)\n _cluster_assignments_ = np.zeros(nb_frames)\n\n for l, db in _dummy_behaviours_:\n db = DummyBehaviour(*db)\n cur_idx_end = np.int(nb_frames * db.fraction + cur_idx)\n idx = np.s_[cur_idx:cur_idx_end]\n if db.type == 'sinoid':\n _new_frames_[idx] = db.amplitude * np.sin(np.pi * np.linspace(0, 2, cur_idx_end - cur_idx) * db.frequency)\n elif db.type == 'flat':\n _new_frames_[idx] = db.amplitude\n\n _cluster_assignments_[idx] = l\n\n cur_idx = cur_idx_end\n\n return _new_frames_, _cluster_assignments_\n\n\nLABELLED_SEQUENCES = [LabelledSequence._make(i) for i in _LABELLED_DATA_RAW_]\nEXPERIMENTS = list(set(Experiment(study_id=l.study_id, fly_id=l.fly_id,\n experiment_id=l.experiment_id) \\\n for l in LABELLED_SEQUENCES))\n",
"from enum import Enum\n\nimport numpy as np\n\nnum_cameras = 7\n\n\nclass Tracked(Enum):\n BODY_COXA = 0\n COXA_FEMUR = 1\n FEMUR_TIBIA = 2\n TIBIA_TARSUS = 3\n TARSUS_TIP = 4\n ANTENNA = 5\n STRIPE = 6\n\n\ntracked_points = [Tracked.BODY_COXA, Tracked.COXA_FEMUR, Tracked.FEMUR_TIBIA, Tracked.TIBIA_TARSUS, Tracked.TARSUS_TIP,\n Tracked.BODY_COXA, Tracked.COXA_FEMUR, Tracked.FEMUR_TIBIA, Tracked.TIBIA_TARSUS, Tracked.TARSUS_TIP,\n Tracked.BODY_COXA, Tracked.COXA_FEMUR, Tracked.FEMUR_TIBIA, Tracked.TIBIA_TARSUS, Tracked.TARSUS_TIP,\n Tracked.ANTENNA,\n Tracked.STRIPE, Tracked.STRIPE, Tracked.STRIPE,\n Tracked.BODY_COXA, Tracked.COXA_FEMUR, Tracked.FEMUR_TIBIA, Tracked.TIBIA_TARSUS, Tracked.TARSUS_TIP,\n Tracked.BODY_COXA, Tracked.COXA_FEMUR, Tracked.FEMUR_TIBIA, Tracked.TIBIA_TARSUS, Tracked.TARSUS_TIP,\n Tracked.BODY_COXA, Tracked.COXA_FEMUR, Tracked.FEMUR_TIBIA, Tracked.TIBIA_TARSUS, Tracked.TARSUS_TIP,\n Tracked.ANTENNA,\n Tracked.STRIPE, Tracked.STRIPE, Tracked.STRIPE]\n\nlimb_id = [0, 0, 0, 0, 0,\n 1, 1, 1, 1, 1,\n 2, 2, 2, 2, 2,\n 3,\n 4, 4, 4,\n 5, 5, 5, 5, 5,\n 6, 6, 6, 6, 6,\n 7, 7, 7, 7, 7,\n 8,\n 9, 9, 9]\n\n__limb_visible_left = [True, True, True, True, True,\n False, False, False, False, False]\n\n__limb_visible_right = [False, False, False, False, False,\n True, True, True, True, True]\n\n__limb_visible_mid = [True, True, False, True, False,\n True, True, False, True, False]\n\nbones = [[0, 1], [1, 2], [2, 3], [3, 4],\n [5, 6], [6, 7], [7, 8], [8, 9],\n [10, 11], [11, 12], [12, 13], [13, 14],\n [16, 17], [17, 18],\n [19, 20], [20, 21], [21, 22], [22, 23],\n [24, 25], [25, 26], [26, 27], [27, 28],\n [29, 30], [30, 31], [31, 32], [32, 33],\n [35, 36], [36, 37]]\n\n# bones3d = [[15, 34], [15, 16], [34, 16]]\nbones3d = [[15, 34]]\n\ncolors = [(255, 0, 0),\n (0, 0, 255),\n (0, 255, 0),\n (150, 200, 200),\n (255, 165, 0),\n (255, 255, 0),\n (255, 0, 255),\n (0, 255, 255),\n (150, 200, 200),\n (255, 165, 0)]\n\nnum_joints = len(tracked_points)\nnum_limbs = len(set(limb_id))\n\n\ndef is_body_coxa(joint_id):\n return tracked_points[joint_id] == Tracked.BODY_COXA\n\n\ndef is_coxa_femur(joint_id):\n return tracked_points[joint_id] == Tracked.COXA_FEMUR\n\n\ndef is_femur_tibia(joint_id):\n return tracked_points[joint_id] == Tracked.FEMUR_TIBIA\n\n\ndef is_tibia_tarsus(joint_id):\n return tracked_points[joint_id] == Tracked.TIBIA_TARSUS\n\n\ndef is_antenna(joint_id):\n return tracked_points[joint_id] == Tracked.ANTENNA\n\n\ndef is_stripe(joint_id):\n return tracked_points[joint_id] == Tracked.STRIPE\n\n\ndef is_tarsus_tip(joint_id):\n return tracked_points[joint_id] == Tracked.TARSUS_TIP\n\n\ndef get_limb_id(joint_id):\n return limb_id[joint_id]\n\n\ndef is_joint_visible_left(joint_id):\n return __limb_visible_left[get_limb_id(joint_id)]\n\n\ndef is_joint_visible_right(joint_id):\n return __limb_visible_right[get_limb_id(joint_id)]\n\n\ndef is_limb_visible_left(limb_id):\n return __limb_visible_left[limb_id]\n\n\ndef is_limb_visible_right(limb_id):\n return __limb_visible_right[limb_id]\n\ndef is_limb_visible_mid(limb_id):\n return __limb_visible_mid[limb_id]\n\ndef camera_see_limb(camera_id, limb_id):\n if camera_id < 3:\n return is_limb_visible_left(limb_id)\n elif camera_id==3:\n return is_limb_visible_mid(limb_id)\n elif camera_id > 3:\n return is_limb_visible_right(limb_id)\n else:\n raise NotImplementedError\n\ndef camera_see_joint(camera_id, joint_id):\n if camera_id in [2, 4]: # they cannot see the stripes\n return camera_see_limb(camera_id, limb_id[joint_id]) and not (tracked_points[joint_id]==Tracked.STRIPE and not (limb_id[joint_id] not in [2, 6]))\n elif camera_id == 3:\n return camera_see_limb(camera_id, limb_id[joint_id]) and tracked_points[joint_id] != Tracked.BODY_COXA\n else:\n return camera_see_limb(camera_id, limb_id[joint_id])\n\n\n\nbone_param = np.ones((num_joints, 2), dtype=float)\nbone_param[:, 0] = 0.85\nbone_param[:, 1] = 0.2\nfor joint_id in range(num_joints):\n if is_body_coxa(joint_id) or is_stripe(joint_id) or is_antenna(joint_id):\n bone_param[joint_id, 1] = 10000 # no bone\n\nignore_joint_id = [joint_id for joint_id in\n range(num_joints) if\n is_body_coxa(joint_id) or is_coxa_femur(joint_id) or is_antenna(joint_id)]\n\nignore_joint_id_wo_stripe = [joint_id for joint_id in\n range(num_joints) if\n is_body_coxa(joint_id) or is_coxa_femur(joint_id) or is_antenna(joint_id)]\n"
] |
[
[
"numpy.int",
"numpy.linspace",
"numpy.zeros"
],
[
"numpy.ones"
]
] |
sebastian-lapuschkin/Quantus
|
[
"c3b8a9fb2018f34bd89ba38efa2b2b8c38128b3f"
] |
[
"quantus/metrics/randomisation_metrics.py"
] |
[
"\"\"\"This module contains the collection of randomisation metrics to evaluate attribution-based explanations of neural network models.\"\"\"\nimport random\nimport warnings\nfrom typing import Callable, Dict, List, Union\n\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom .base import Metric\nfrom ..helpers import asserts\nfrom ..helpers import utils\nfrom ..helpers import warn_func\nfrom ..helpers.asserts import attributes_check\nfrom ..helpers.model_interface import ModelInterface\nfrom ..helpers.normalise_func import normalise_by_negative\nfrom ..helpers.similar_func import correlation_spearman, ssim\n\n\nclass ModelParameterRandomisation(Metric):\n \"\"\"\n Implementation of the Model Parameter Randomization Method by Adebayo et. al., 2018.\n\n The Model Parameter Randomization measures the distance between the original attribution and a newly computed\n attribution throughout the process of cascadingly/independently randomizing the model parameters of one layer\n at a time.\n\n References:\n 1) Adebayo, J., Gilmer, J., Muelly, M., Goodfellow, I., Hardt, M., and Kim, B. \"Sanity Checks for Saliency Maps.\"\n arXiv preprint, arXiv:1810.073292v3 (2018)\n\n Assumptions:\n In the original paper multiple distance measures are taken: Spearman rank correlation (with and without abs),\n HOG and SSIM. We have set Spearman as the default value.\n \"\"\"\n\n @attributes_check\n def __init__(self, *args, **kwargs):\n \"\"\"\n Parameters\n ----------\n args: Arguments (optional)\n kwargs: Keyword arguments (optional)\n abs (boolean): Indicates whether absolute operation is applied on the attribution, default=True.\n normalise (boolean): Indicates whether normalise operation is applied on the attribution, default=True.\n normalise_func (callable): Attribution normalisation function applied in case normalise=True,\n default=normalise_by_negative.\n default_plot_func (callable): Callable that plots the metrics result.\n disable_warnings (boolean): Indicates whether the warnings are printed, default=False.\n display_progressbar (boolean): Indicates whether a tqdm-progress-bar is printed, default=False.\n similarity_func (callable): Similarity function applied to compare input and perturbed input,\n default=correlation_spearman.\n layer_order (string): Indicated whether the model is randomized cascadingly or independently.\n Set order=top_down for cascading randomization, set order=independent for independent randomization,\n default=\"independent\".\n \"\"\"\n super().__init__()\n\n self.args = args\n self.kwargs = kwargs\n self.abs = self.kwargs.get(\"abs\", True)\n self.normalise = self.kwargs.get(\"normalise\", True)\n self.normalise_func = self.kwargs.get(\"normalise_func\", normalise_by_negative)\n self.default_plot_func = Callable\n self.disable_warnings = self.kwargs.get(\"disable_warnings\", False)\n self.display_progressbar = self.kwargs.get(\"display_progressbar\", False)\n self.similarity_func = self.kwargs.get(\"similarity_func\", correlation_spearman)\n self.layer_order = kwargs.get(\"layer_order\", \"independent\")\n self.seed = self.kwargs.get(\"seed\", 42)\n self.last_results = {}\n self.all_results = []\n\n # Asserts and warnings.\n asserts.assert_layer_order(layer_order=self.layer_order)\n if not self.disable_warnings:\n warn_func.warn_parameterisation(\n metric_name=self.__class__.__name__,\n sensitive_params=(\n \"similarity metric 'similarity_func' and the order of \"\n \"the layer randomisation 'layer_order'\"\n ),\n citation=(\n \"Adebayo, J., Gilmer, J., Muelly, M., Goodfellow, I., Hardt, M., and Kim, B. \"\n \"'Sanity Checks for Saliency Maps.' arXiv preprint,\"\n \" arXiv:1810.073292v3 (2018)\"\n ),\n )\n warn_func.warn_attributions(normalise=self.normalise, abs=self.abs)\n\n def __call__(\n self,\n model: ModelInterface,\n x_batch: np.array,\n y_batch: np.array,\n a_batch: Union[np.array, None],\n *args,\n **kwargs\n ) -> List[float]:\n \"\"\"\n This implementation represents the main logic of the metric and makes the class object callable.\n It completes batch-wise evaluation of some explanations (a_batch) with respect to some input data\n (x_batch), some output labels (y_batch) and a torch model (model).\n\n Parameters\n model: a torch model e.g., torchvision.models that is subject to explanation\n x_batch: a np.ndarray which contains the input data that are explained\n y_batch: a np.ndarray which contains the output labels that are explained\n a_batch: a Union[np.ndarray, None] which contains pre-computed attributions i.e., explanations\n args: Arguments (optional)\n kwargs: Keyword arguments (optional)\n channel_first (boolean): Indicates of the image dimensions are channel first, or channel last.\n Inferred from the input shape by default.\n explain_func (callable): Callable generating attributions, default=Callable.\n\n Returns\n last_results: a list of float(s) with the evaluation outcome of concerned batch\n\n Examples\n # Enable GPU.\n >> device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n # Load a pre-trained LeNet classification model (architecture at quantus/helpers/models).\n >> model = LeNet()\n >> model.load_state_dict(torch.load(\"tutorials/assets/mnist\"))\n\n # Load MNIST datasets and make loaders.\n >> test_set = torchvision.datasets.MNIST(root='./sample_data', download=True)\n >> test_loader = torch.utils.data.DataLoader(test_set, batch_size=24)\n\n # Load a batch of inputs and outputs to use for XAI evaluation.\n >> x_batch, y_batch = iter(test_loader).next()\n >> x_batch, y_batch = x_batch.cpu().numpy(), y_batch.cpu().numpy()\n\n # Generate Saliency attributions of the test set batch of the test set.\n >> a_batch_saliency = Saliency(model).attribute(inputs=x_batch, target=y_batch, abs=True).sum(axis=1)\n >> a_batch_saliency = a_batch_saliency.cpu().numpy()\n\n # Initialise the metric and evaluate explanations by calling the metric instance.\n >> metric = ModelParameterRandomisation(abs=True, normalise=False)\n >> scores = metric(model=model, x_batch=x_batch, y_batch=y_batch, a_batch=a_batch_saliency, **{}}\n \"\"\"\n # Reshape input batch to channel first order:\n if \"channel_first\" in kwargs and isinstance(kwargs[\"channel_first\"], bool):\n channel_first = kwargs.pop(\"channel_first\")\n else:\n channel_first = utils.infer_channel_first(x_batch)\n x_batch_s = utils.make_channel_first(x_batch, channel_first)\n\n # Wrap the model into an interface\n if model:\n model = utils.get_wrapped_model(model, channel_first)\n\n # Update kwargs.\n self.kwargs = {\n **kwargs,\n **{k: v for k, v in self.__dict__.items() if k not in [\"args\", \"kwargs\"]},\n }\n if \"img_size\" in kwargs:\n warnings.warn(\n \"argument 'img_size' is deprecated and will be removed in future versions.\"\n )\n if \"nr_channels\" in kwargs:\n warnings.warn(\n \"argument 'nr_channels' is deprecated and will be removed in future versions.\"\n )\n\n self.last_results = {}\n\n # Get explanation function and make asserts.\n explain_func = self.kwargs.get(\"explain_func\", Callable)\n asserts.assert_explain_func(explain_func=explain_func)\n\n if a_batch is None:\n\n # Generate explanations.\n a_batch = explain_func(\n model=model.get_model(),\n inputs=x_batch,\n targets=y_batch,\n **self.kwargs,\n )\n a_batch = utils.expand_attribution_channel(a_batch, x_batch_s)\n\n # Asserts.\n asserts.assert_attributions(x_batch=x_batch_s, a_batch=a_batch)\n\n # Create progress bar if desired.\n # Due to the nested for-loops and the requirement of a single progressbar,\n # manual updating will be performed at the end of each inner iteration.\n if self.display_progressbar:\n n_layers = len(\n list(model.get_random_layer_generator(order=self.layer_order))\n )\n n_iterations = n_layers * len(a_batch)\n pbar = tqdm(total=n_iterations)\n\n for layer_name, random_layer_model in model.get_random_layer_generator(\n order=self.layer_order, seed=self.seed\n ):\n\n similarity_scores = []\n\n # Generate an explanation with perturbed model.\n a_perturbed = explain_func(\n model=random_layer_model, inputs=x_batch, targets=y_batch, **self.kwargs\n )\n\n for ix, (a, a_per) in enumerate(zip(a_batch, a_perturbed)):\n\n if self.abs:\n a = np.abs(a)\n a_per = np.abs(a_per)\n\n if self.normalise:\n a = self.normalise_func(a)\n a_per = self.normalise_func(a_per)\n\n # Compute distance measure.\n similarity = self.similarity_func(a_per.flatten(), a.flatten())\n\n similarity_scores.append(similarity)\n\n # Update progress bar if desired.\n if self.display_progressbar:\n pbar.update(1)\n\n # Save similarity scores in a dictionary.\n self.last_results[layer_name] = similarity_scores\n\n # Close progress bar if desired.\n if self.display_progressbar:\n pbar.close()\n\n self.all_results.append(self.last_results)\n\n return self.last_results\n\n\nclass RandomLogit(Metric):\n \"\"\"\n Implementation of the Random Logit Metric by Sixt et al., 2020.\n\n The Random Logit Metric computes the distance between the original explanation and a reference explanation of\n a randomly chosen non-target class.\n\n References:\n 1) Sixt, Leon, Granz, Maximilian, and Landgraf, Tim. \"When Explanations Lie: Why Many Modified BP\n Attributions Fail.\"arXiv preprint, arXiv:1912.09818v6 (2020)\n \"\"\"\n\n @attributes_check\n def __init__(self, *args, **kwargs):\n \"\"\"\n Parameters\n ----------\n args: Arguments (optional)\n kwargs: Keyword arguments (optional)\n abs (boolean): Indicates whether absolute operation is applied on the attribution, default=False.\n normalise (boolean): Indicates whether normalise operation is applied on the attribution, default=True.\n normalise_func (callable): Attribution normalisation function applied in case normalise=True,\n default=normalise_by_negative.\n default_plot_func (callable): Callable that plots the metrics result.\n disable_warnings (boolean): Indicates whether the warnings are printed, default=False.\n display_progressbar (boolean): Indicates whether a tqdm-progress-bar is printed, default=False.\n similarity_func (callable): Similarity function applied to compare input and perturbed input,\n default=ssim.\n num_classes (integer): Number of prediction classes in the input, default=1000.\n \"\"\"\n super().__init__()\n\n self.args = args\n self.kwargs = kwargs\n self.abs = self.kwargs.get(\"abs\", False)\n self.normalise = self.kwargs.get(\"normalise\", True)\n self.default_plot_func = Callable\n self.disable_warnings = self.kwargs.get(\"disable_warnings\", False)\n self.display_progressbar = self.kwargs.get(\"display_progressbar\", False)\n self.normalise_func = self.kwargs.get(\"normalise_func\", normalise_by_negative)\n self.similarity_func = self.kwargs.get(\"similarity_func\", ssim)\n self.num_classes = self.kwargs.get(\"num_classes\", 1000)\n self.seed = self.kwargs.get(\"seed\", 42)\n self.last_results = []\n self.all_results = []\n\n # Asserts and warnings.\n if not self.disable_warnings:\n warn_func.warn_parameterisation(\n metric_name=self.__class__.__name__,\n sensitive_params=(\"similarity metric 'similarity_func'\"),\n citation=(\n \"Sixt, Leon, Granz, Maximilian, and Landgraf, Tim. 'When Explanations Lie: \"\n \"Why Many Modified BP Attributions Fail.' arXiv preprint, \"\n \"arXiv:1912.09818v6 (2020)\"\n ),\n )\n warn_func.warn_attributions(normalise=self.normalise, abs=self.abs)\n\n def __call__(\n self,\n model: ModelInterface,\n x_batch: np.array,\n y_batch: np.array,\n a_batch: Union[np.array, None],\n *args,\n **kwargs\n ) -> List[float]:\n \"\"\"\n This implementation represents the main logic of the metric and makes the class object callable.\n It completes batch-wise evaluation of some explanations (a_batch) with respect to some input data\n (x_batch), some output labels (y_batch) and a torch model (model).\n\n Parameters\n model: a torch model e.g., torchvision.models that is subject to explanation\n x_batch: a np.ndarray which contains the input data that are explained\n y_batch: a np.ndarray which contains the output labels that are explained\n a_batch: a Union[np.ndarray, None] which contains pre-computed attributions i.e., explanations\n args: Arguments (optional)\n kwargs: Keyword arguments (optional)\n channel_first (boolean): Indicates of the image dimensions are channel first, or channel last.\n Inferred from the input shape by default.\n explain_func (callable): Callable generating attributions, default=Callable.\n\n Returns\n last_results: a list of float(s) with the evaluation outcome of concerned batch\n\n Examples\n # Enable GPU.\n >> device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n # Load a pre-trained LeNet classification model (architecture at quantus/helpers/models).\n >> model = LeNet()\n >> model.load_state_dict(torch.load(\"tutorials/assets/mnist\"))\n\n # Load MNIST datasets and make loaders.\n >> test_set = torchvision.datasets.MNIST(root='./sample_data', download=True)\n >> test_loader = torch.utils.data.DataLoader(test_set, batch_size=24)\n\n # Load a batch of inputs and outputs to use for XAI evaluation.\n >> x_batch, y_batch = iter(test_loader).next()\n >> x_batch, y_batch = x_batch.cpu().numpy(), y_batch.cpu().numpy()\n\n # Generate Saliency attributions of the test set batch of the test set.\n >> a_batch_saliency = Saliency(model).attribute(inputs=x_batch, target=y_batch, abs=True).sum(axis=1)\n >> a_batch_saliency = a_batch_saliency.cpu().numpy()\n\n # Initialise the metric and evaluate explanations by calling the metric instance.\n >> metric = RandomLogit(abs=True, normalise=False)\n >> scores = metric(model=model, x_batch=x_batch, y_batch=y_batch, a_batch=a_batch_saliency, **{}}\n \"\"\"\n # Reshape input batch to channel first order:\n if \"channel_first\" in kwargs and isinstance(kwargs[\"channel_first\"], bool):\n channel_first = kwargs.pop(\"channel_first\")\n else:\n channel_first = utils.infer_channel_first(x_batch)\n x_batch_s = utils.make_channel_first(x_batch, channel_first)\n\n # Wrap the model into an interface\n if model:\n model = utils.get_wrapped_model(model, channel_first)\n\n # Update kwargs.\n self.kwargs = {\n **kwargs,\n **{k: v for k, v in self.__dict__.items() if k not in [\"args\", \"kwargs\"]},\n }\n if \"img_size\" in kwargs:\n warnings.warn(\n \"argument 'img_size' is deprecated and will be removed in future versions.\"\n )\n if \"nr_channels\" in kwargs:\n warnings.warn(\n \"argument 'nr_channels' is deprecated and will be removed in future versions.\"\n )\n\n self.last_results = []\n\n # Get explanation function and make asserts.\n explain_func = self.kwargs.get(\"explain_func\", Callable)\n asserts.assert_explain_func(explain_func=explain_func)\n\n if a_batch is None:\n # Generate explanations.\n a_batch = explain_func(\n model=model.get_model(),\n inputs=x_batch,\n targets=y_batch,\n **self.kwargs,\n )\n a_batch = utils.expand_attribution_channel(a_batch, x_batch_s)\n\n # Asserts.\n asserts.assert_attributions(x_batch=x_batch, a_batch=a_batch)\n\n # use tqdm progressbar if not disabled\n if not self.display_progressbar:\n iterator = enumerate(zip(x_batch_s, y_batch, a_batch))\n else:\n iterator = tqdm(\n enumerate(zip(x_batch_s, y_batch, a_batch)), total=len(x_batch_s)\n )\n\n for ix, (x, y, a) in iterator:\n\n if self.abs:\n a = np.abs(a)\n\n if self.normalise:\n a = self.normalise_func(a)\n\n # Randomly select off-class labels.\n random.seed(a=self.seed)\n y_off = np.array(\n [\n random.choice(\n [y_ for y_ in list(np.arange(0, self.num_classes)) if y_ != y]\n )\n ]\n )\n\n # Explain against a random class.\n a_perturbed = explain_func(\n model=model.get_model(),\n inputs=np.expand_dims(x, axis=0),\n targets=y_off,\n **self.kwargs,\n )\n\n if self.abs:\n a_perturbed = np.abs(a_perturbed)\n\n if self.normalise:\n a_perturbed = self.normalise_func(a_perturbed)\n\n self.last_results.append(\n self.similarity_func(a.flatten(), a_perturbed.flatten())\n )\n\n self.all_results.append(self.last_results)\n\n return self.last_results\n"
] |
[
[
"numpy.expand_dims",
"numpy.arange",
"numpy.abs"
]
] |
ddddwee1/SULT
|
[
"0ff31b602d20dd8bc5cf4a6f4f5bc193d636e784",
"0ff31b602d20dd8bc5cf4a6f4f5bc193d636e784",
"0ff31b602d20dd8bc5cf4a6f4f5bc193d636e784",
"0ff31b602d20dd8bc5cf4a6f4f5bc193d636e784",
"0ff31b602d20dd8bc5cf4a6f4f5bc193d636e784"
] |
[
"example/FaceResNet/evaluation.py",
"SUL1/sample/feature_fusion/iterative/conv3d_model.py",
"SUL1/sample/conditional_gan/condgan.py",
"example/RepNet/train.py",
"example/FaceVGG/datareader.py"
] |
[
"import tensorflow as tf \nimport model3 as M \nimport numpy as np \nimport resnet\nimport cv2\n\nclass FaceResNet(M.Model):\n\tdef initialize(self):\n\t\tself.resnet = resnet.ResNet([64,64,128,256,512], [3, 4, 14, 3], 512)\n\n\tdef forward(self, x):\n\t\tfeat = self.resnet(x)\n\t\treturn feat\n\ntf.keras.backend.set_learning_phase(True)\n\nmodel = FaceResNet()\noptimizer = tf.keras.optimizers.Adam(0.0001)\nsaver = M.Saver(model, optimizer)\nsaver.restore('./model/')\n\t\ndef extract_feature(imgname):\n\timg = cv2.imread(imgname)\n\timg = np.float32(img)[None,...]\n\tfeat = model(img).numpy()[0]\n\tfeat = feat.reshape([-1])\n\tfeat = feat / np.linalg.norm(feat)\n\treturn feat \n\nfeat = extract_feature('1.jpg')\n",
"import tensorflow as tf \nimport numpy as np \nimport model as M \n\nclass video_conv3d():\n\tdef __init__(self,classnum, accum=1,isTraining=True, model_path='./model/'):\n\t\tself.accumulation = accum\n\t\tself.classnum = classnum\n\t\tself.model_path = model_path\n\t\tself.global_step = 0\n\t\t# create input placeholder and label placeholder\n\t\tself.input_holder = tf.placeholder(tf.float32,[None,None,16,112,112,3])\n\t\tself.lab_holder = tf.placeholder(tf.float32,[None,classnum])\n\t\tself.dropout = tf.placeholder(tf.float32)\n\n\t\t# build model and classifier and optimizer\n\t\tfeat = tf.map_fn(self.model,self.input_holder)\n\t\tself.feat = self.feat_fusion(feat)\n\n\t\tself.build_classifier()\n\n\t\t# create session and saver\n\t\tself.sess = tf.Session()\n\t\tself.saver = tf.train.Saver()\n\t\tM.loadSess(self.model_path,self.sess,init=True)\n\n\tdef model(self,inp):\n\t\twith tf.variable_scope('conv3d_incep',reuse=tf.AUTO_REUSE):\n\t\t\tmod = M.Model(inp)\n\t\t\tself.blk_num = 0\n\t\t\tmod.conv3dLayer(3,64,activation=M.PARAM_LRELU) \n\t\t\tmod.maxpool3dLayer([1,2,2],stride=[1,2,2]) # 56\n\t\t\tmod.conv3dLayer(3,128,activation=M.PARAM_LRELU)\n\t\t\tmod.maxpool3dLayer(2) # 28\n\t\t\tmod.conv3dLayer(3,256,activation=M.PARAM_LRELU)\n\t\t\tmod.conv3dLayer(3,256,activation=M.PARAM_LRELU)\n\t\t\tmod.maxpool3dLayer(2) # 14\n\t\t\tmod.conv3dLayer(3,512,activation=M.PARAM_LRELU)\n\t\t\tmod.conv3dLayer(3,512,activation=M.PARAM_LRELU)\n\t\t\tmod.maxpool3dLayer(2) # 7\n\t\t\tmod.conv3dLayer(3,512,activation=M.PARAM_LRELU)\n\t\t\tmod.conv3dLayer(3,512,activation=M.PARAM_LRELU)\n\t\t\tmod.maxpool3dLayer(2) # 4\n\t\t\tprint(mod.get_current_layer())\n\t\t\tmod.flatten()\n\t\t\tmod.fcLayer(2048,activation=M.PARAM_LRELU)\n\t\t\tmod.dropout(self.dropout)\n\t\t\tmod.fcLayer(1024,activation=M.PARAM_LRELU)\n\t\t\t# mod.dropout(self.dropout)\n\n\t\treturn mod.get_current_layer()\n\n\tdef feat_fusion(self,feats):\n\t\twith tf.variable_scope('fusion'):\n\t\t\tmod = M.Model(feats)\n\t\t\tmod.dyn_route(3)\n\t\treturn mod.get_current_layer()\n\n\tdef build_classifier(self):\n\t\twith tf.variable_scope('classifier'):\n\t\t\tlogit_layer, eval_layer = M.enforcedClassifier(self.feat, self.lab_holder, dropout=1., multi=None)\n\t\t\t# logit_layer = eval_layer = self.feat\n\t\t\tself.accuracy = M.accuracy(eval_layer, tf.argmax(self.lab_holder,-1))\n\t\tself.eval_layer = eval_layer\n\t\twith tf.variable_scope('optimizer'):\n\t\t\tself.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logit_layer, labels=self.lab_holder))\n\t\t\twith tf.control_dependencies(M.get_update_ops()):\n\t\t\t\ttrainer = M.Trainer(0.0001, self.loss)\n\t\t\t\tself.train_step = trainer.train()\n\t\t\t\tself.accum_step = trainer.accumulate()\n\n\tdef train(self, inp, lab, normalize=True):\n\t\tself.global_step += 1\n\t\tinp = np.float32(inp)\n\t\tlab = np.float32(lab)\n\t\tif normalize:\n\t\t\tinp = inp / 127.5 - 1.\n\t\ttrain_step = self.train_step if self.global_step%self.accumulation==0 else self.accum_step\n\t\tls,acc, _ = self.sess.run([self.loss, self.accuracy, train_step], feed_dict = {self.input_holder:inp, self.lab_holder:lab,\\\n\t\t\t\t\t\t\t\t\tself.dropout:0.5})\n\t\treturn ls,acc \n\n\tdef eval(self, inp, lab, normalize=True):\n\t\tinp = np.float32(inp)\n\t\tlab = np.float32(lab)\n\t\tif normalize:\n\t\t\tinp = inp / 127.5 - 1.\n\t\tls,acc = self.sess.run([self.loss, self.accuracy], feed_dict = {self.input_holder:inp, self.lab_holder:lab, self.dropout:1.0})\n\t\treturn ls,acc \n\n\tdef get_score(self, inp, normalize=True):\n\t\tinp = np.float32(inp)\n\t\tif normalize:\n\t\t\tinp = inp / 127.5 - 1.\n\t\tscr = self.sess.run(self.eval_layer, feed_dict = {self.input_holder:inp, self.dropout:1.0})\n\t\treturn scr \n\n\tdef get_feature(self, inp, normalize=True):\n\t\tinp = np.float32(inp)\n\t\tif normalize:\n\t\t\tinp = inp / 127.5 - 1.\n\t\tfeat = self.sess.run(self.feat, feed_dict = {self.input_holder:inp, self.dropout:1.0})\n\t\treturn feat \n\n\tdef save(self, name):\n\t\tprint('Saving model to',self.model_path+name,'...')\n\t\tself.saver.save(self.sess, self.model_path+name)\n",
"import tensorflow as tf \nimport model as M \nimport numpy as np \nimport cv2\nfrom datetime import datetime\nimport random\n\nZDIM = 64\nIMGPIX = 128\nwith tf.name_scope('vecinp'):\n\tz = tf.placeholder(tf.float32,[None,ZDIM])\nwith tf.name_scope('img'):\n\timgholder = tf.placeholder(tf.float32,[None,128,128,1])\nwith tf.name_scope('classInp'):\n\tclassholder = tf.placeholder(tf.int64,[None])\n\nVARS = {}\nBSIZE = 32\nLR = 0.0002\nBETA=0.4\nCLASS = 1000\n\ndef gen(inp,shape,reuse=False):\n\twith tf.variable_scope('Generator',reuse=reuse):\n\t\tmod = M.Model(inp,shape)\n\t\tmod.fcLayer(4*4*512)\n\t\tmod.construct([4,4,512])\n\t\tmod.deconvLayer(4,256,stride=2,activation=M.PARAM_RELU,batch_norm=True)#8\n\t\tmod.deconvLayer(4,128,stride=2,activation=M.PARAM_RELU,batch_norm=True)#16\n\t\tmod.deconvLayer(4,64,stride=2,activation=M.PARAM_RELU,batch_norm=True)#32\n\t\tmod.deconvLayer(4,32,stride=2,activation=M.PARAM_RELU,batch_norm=True)#64\n\t\tmod.deconvLayer(4,16,stride=2,activation=M.PARAM_RELU,batch_norm=True)#128\n\t\tmod.deconvLayer(4,1,stride=1,activation=M.PARAM_TANH,batch_norm=True)\n\t\tVARS['g'] = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,scope='Generator')\n\t\tprint(len(VARS['g']))\n\t\treturn mod.get_current_layer()\n\ndef dis(inp,shape,reuse=False):\n\twith tf.variable_scope('Discriminator',reuse=reuse):\n\t\tmod = M.Model(inp,shape)\n\t\tmod.convLayer(5,16,stride=2,activation=M.PARAM_ELU,batch_norm=True)#64\n\t\tmod.convLayer(4,32,stride=2,activation=M.PARAM_ELU,batch_norm=True)#32\n\t\tmod.convLayer(4,64,stride=2,activation=M.PARAM_ELU,batch_norm=True)#16\n\t\tmod.convLayer(4,128,stride=2,activation=M.PARAM_ELU,batch_norm=True)#8\n\t\tmod.convLayer(4,256,stride=2,activation=M.PARAM_ELU,batch_norm=True)#4\n\t\tmod.flatten()\n\t\tmod.fcLayer(2)\n\t\tVARS['d'] = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,scope='Discriminator')\n\t\tprint(len(VARS['d']))\n\t\treturn mod.get_current_layer()\n\ndef classifier(inp,shape,reuse=False):\n\twith tf.variable_scope('Classifier',reuse=reuse):\n\t\tmod = M.Model(inp,shape)\n\t\tmod.convLayer(5,32,stride=2,activation=M.PARAM_ELU,batch_norm=True)#64\n\t\tmod.convLayer(4,64,stride=2,activation=M.PARAM_ELU,batch_norm=True)#32\n\t\tmod.convLayer(4,128,stride=2,activation=M.PARAM_ELU,batch_norm=True)#16\n\t\tmod.convLayer(4,256,stride=2,activation=M.PARAM_ELU,batch_norm=True)#8\n\t\tmod.convLayer(4,512,stride=2,activation=M.PARAM_ELU,batch_norm=True)#4\n\t\tmod.flatten()\n\t\tmod.fcLayer(ZDIM)\n\t\ta = mod.l2norm()\n\t\tmod.fcLayer(CLASS)\n\t\tVARS['c'] = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,scope='Classifier')\n\t\tprint(len(VARS['c']))\n\t\treturn mod.get_current_layer(),a[0]\n\ngenerated = gen(z,[None,ZDIM])\ndisfalse = dis(generated,[None,128,128,1])\ndistrue = dis(imgholder,[None,128,128,1],reuse=True)\nclassed,_ = classifier(imgholder,[None,128,128,1])\n_,fv = classifier(generated,[None,128,128,1],reuse=True)\n\n\nwith tf.name_scope('lossG'):\n\tlossG1 = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.ones([BSIZE],dtype=tf.int64),logits=disfalse))\n\tlossG2 = tf.reduce_mean(tf.reduce_sum(tf.square(z-fv)))\n\tlossG3 = tf.reduce_mean((tf.square(imgholder - generated)))\n\ttf.summary.scalar('lossG1',lossG1)\n\ttf.summary.scalar('lossG2',lossG2)\n\ttf.summary.scalar('lossG3',lossG3)\n\tlossG = lossG1 + lossG2 + lossG3\n\t# lossG = lossG1\n\ttf.summary.scalar('lossG',lossG)\nwith tf.name_scope('lossD'):\n\tlossD1 = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.ones([BSIZE],dtype=tf.int64),logits=distrue))\n\tlossD2 = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.zeros([BSIZE],dtype=tf.int64),logits=disfalse))\n\tlossD = 0.5*(lossD1+lossD2)\n\ttf.summary.scalar('lossD',lossD)\nwith tf.name_scope('lossC'):\n\tlossC = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=classholder,logits=classed))\n\ttf.summary.scalar('lossC',lossC)\n\nwith tf.name_scope('opti'):\n\twith tf.name_scope('optiG'):\n\t\ttrainG = tf.train.RMSPropOptimizer(LR).minimize(lossG,var_list=VARS['g'])\n\twith tf.name_scope('optiD'):\n\t\ttrainD = tf.train.RMSPropOptimizer(LR).minimize(lossD,var_list=VARS['d'])\n\twith tf.name_scope('iptiC'):\n\t\ttrainC = tf.train.RMSPropOptimizer(LR).minimize(lossC,var_list=VARS['c'])\n\n# with tf.name_scope('opti'):\n# \twith tf.name_scope('optiG'):\n# \t\ttrainG = tf.train.AdamOptimizer(learning_rate=LR,beta1=BETA).minimize(lossG,var_list=VARS['g'])\n# \twith tf.name_scope('optiD'):\n# \t\ttrainD = tf.train.AdamOptimizer(learning_rate=LR,beta1=BETA).minimize(lossD,var_list=VARS['d'])\n# \twith tf.name_scope('iptiC'):\n# \t\ttrainC = tf.train.AdamOptimizer(learning_rate=LR,beta1=BETA).minimize(lossC,var_list=VARS['c'])\n\n# Use this block when generating imgs\n# noise = tf.placeholder(tf.float32,[None,ZDIM])\n# _,fv = classifier(imgholder,[None,128,128,1])\n# generated = gen(fv+noise,[None,ZDIM])\n\ndef getGeneratedImg(sess,it):\n\ta = np.random.uniform(size=[4,ZDIM],low=-1.0,high=1.0)\n\ta = a/np.linalg.norm(a,axis=1,keepdims=True)\n\timg = sess.run(generated,feed_dict={z:a})\n\timg = img+1\n\timg = img*127\n\timg = img.astype(np.uint8)\n\tfor i in range(4):\n\t\tcv2.imwrite('res/iter'+str(it)+'img'+str(i)+'.jpg',cv2.resize(img[i],(128,128)))\n\ndef getData():\n\tf = open('avclb2.txt')\n\tdt = []\n\tcounter = 0\n\tfor line in f:\n\t\tcounter+=1\n\t\tif (counter+1)%1000==0:\n\t\t\tprint(counter+1)\n\t\t\t# break\n\t\tl = line.replace('\\n','').split(' ')\n\t\timg = np.float32(cv2.resize(cv2.imread(l[0],0),(IMGPIX,IMGPIX))).reshape([128,128,1])\n\t\timg = img / 127.5\n\t\timg = img -1\n\t\tlb = int(l[1])\n\t\tdt.append((img,lb))\n\treturn dt\n\ndef training():\n\tmerged = tf.summary.merge_all()\n\tdata = getData()\n\tsaver = tf.train.Saver()\n\twith tf.Session() as sess:\n\t\twriter = tf.summary.FileWriter('./logs/',sess.graph)\n\t\tM.loadSess('./model/',sess=sess)\n\t\tcounter = M.counter\n\t\tfor i in range(1000000):\n\t\t\tcounter+=1\n\t\t\tsample = random.sample(data,BSIZE)\n\t\t\tx_train = [i[0] for i in sample]\n\t\t\ty_train = [i[1] for i in sample]\n\t\t\ta = np.random.uniform(size=[BSIZE,ZDIM],low=-1.0,high=1.0)\n\t\t\ta = a/np.linalg.norm(a,axis=1,keepdims=True)\n\t\t\t# ge = sess.run(generated,feed_dict={z:a})\n\t\t\tfor _ in range(5):\n\t\t\t\tsess.run(trainG,feed_dict={z:a,imgholder:x_train})\n\t\t\t_,_,mg,lsd,lsg,lsc = sess.run([trainC,trainD,merged,lossD,lossG,lossC],feed_dict={z:a,imgholder:x_train,classholder:y_train})\n\t\t\tif (i)%5 == 0:\n\t\t\t\twriter.add_summary(mg,counter)\n\t\t\t\tprint('iter:',i)\n\t\t\t\tprint('lsd:',lsd)\n\t\t\t\tprint('lsg:',lsg)\n\t\t\t\tprint('lsc:',lsc)\n\t\t\tif (i+1)%100==0:\n\t\t\t\tgetGeneratedImg(sess,i+1)\t\n\t\t\tif (i+1)%1000==0:\n\t\t\t\tsaver.save(sess,'./model/ModelCounter'+str(counter)+'.ckpt')\n\ndef getSample():\n\twith tf.Session() as sess:\n\t\tdata = getData()\n\t\tM.loadSess('./model/',sess=sess)\n\t\tfor i in range(20):\n\t\t\tx_train = random.sample(data,1)\n\t\t\t# print(x_train[0].shape)\n\t\t\tx_train = np.float32(x_train[0][0]).reshape([-1,128,128,1])\n\t\t\tfor j in range(8):\n\t\t\t\t# a = np.random.uniform(size=[1,ZDIM],low=-0.2,high=0.2)\n\t\t\t\ta = np.zeros([1,ZDIM],dtype=np.float32)\n\t\t\t\tgenimg = sess.run(generated,feed_dict={imgholder:x_train,noise:a})\n\t\t\t\tgenimg = (genimg+1)*127\n\t\t\t\tgenimg = genimg.astype(np.uint8)\n\t\t\t\tcv2.imwrite('./sampleimg/'+str(i)+'gen'+str(j)+'.jpg',cv2.resize(genimg[0],(128,128)))\n\t\t\t\tcv2.imwrite('./sampleimg/'+str(i)+'org.jpg',cv2.resize(((x_train[0]+1)*127).astype(np.uint8),(128,128)))\n\n# getSample()\ntraining()",
"import tensorflow as tf \nimport model3 as M \nimport datareader \nimport numpy as np \nimport tqdm \nimport network\n\ndef grad_loss(x, model):\n\tx2d, x3d = x\n\twith tf.GradientTape() as tape:\n\t\tpred, K, reprojected, crit_fake = model(x2d)\n\t\tcrit_real = model.crit(x3d)\n\n\t\tcrit_dis = tf.reduce_mean(tf.square(crit_real - tf.ones_like(crit_real))) + tf.reduce_mean(tf.square(crit_fake - tf.zeros_like(crit_fake)))\n\t\tcrit_gen = tf.reduce_mean(tf.square(crit_fake - tf.ones_like(crit_fake)))\n\n\t\trep_loss = tf.reduce_mean(tf.square(pred - x2d))\n\n\t\tKK = tf.matmul(K, K, transpose_b=True)\n\t\tK_trace = tf.expand_dims(tf.expand_dims(tf.trace(KK), -1), -1)\n\t\tK_loss = tf.reduce_mean(tf.abs(KK / K_trace - tf.eye(2))) \n\n\t\tloss_total_gen = crit_gen + rep_loss + K_loss\n\n\tgen_var = model.get_gen_vars()\n\tdis_var = model.dis.trainable_variables\n\tgrads = tape.gradient([loss_total_gen, crit_dis], [gen_var, dis_var])\n\treturn grads, [crit_dis, crit_gen, rep_loss, K_loss]\n\nreader = datareader.DataReader(16)\nmodel = network.RepNet()\noptim = tf.optimizers.Adam(0.0001, 0.5)\nsaver = M.Saver(model)\nsaver.restore('./model/')\n\nMAXITER = 10000\n\nbar = tqdm(range(MAXITER+1))\nfor i in bar:\n\tbatch = reader.get_next()\n\tgrads, lss = grad_loss(batch, model)\n\n\tgen_var = model.get_gen_vars()\n\tdis_var = model.dis.trainable_variables\n\toptim.apply_gradients(zip(grads[0], gen_var))\n\toptim.apply_gradients(zip(grads[1], dis_var))\n\n\tbar.set_description('CDis:%.4f CGen:%.4f Rep:%.4f K:%.4f'%(lss[0], lss[1], lss[2], lss[3]))\n\n\tif i%1000==0 and i>0:\n\t\tsaver.save('./model/repnet.ckpt')\n",
"import numpy as np \nimport cv2 \nimport random\nfrom multiprocessing.pool import ThreadPool\nimport time \n\ndef adjust_img(img):\n\t# a = np.random.randint(2)\n\t# if a==1:\n\t# \timg = np.flip(img, axis=1)\n\tif random.random()>0.5:\n\t\timg = np.flip(img, axis=1)\n\treturn img \n\n# def process(batch, eye):\n# \t# add more process here\n# \timgs, labels = list(zip(*batch))\n# \t# imgs = [cv2.resize(cv2.imread(i), (128,128)) for i in imgs]\n# \tt = time.time()\n# \timgs = [cv2.imread(i) for i in imgs]\n# \tt2 = time.time()\n# \tprint('DATA TIME', t2-t)\n# \timgs = [adjust_img(i) for i in imgs]\n# \tt3 = time.time()\n# \tprint('FLIP TIME', t3-t2)\n# \tlabels = eye[np.array(labels)]\n# \tbatch = [np.float32(imgs), np.float32(labels)]\n# \tt4 = time.time()\n# \tprint('CVT TIME', t4-t3)\n# \treturn batch\n\ndef process(sample):\n\tbatch, eye = sample\n\t# add more process here\n\timg, label = batch\n\t# imgs = [cv2.resize(cv2.imread(i), (128,128)) for i in imgs]\n\t# t = time.time()\n\timg = cv2.resize(cv2.imread(img), (128,128))\n\t# t2 = time.time()\n\t# print('DATA TIME', t2-t)\n\timg = adjust_img(img)\n\t# t3 = time.time()\n\t# print('FLIP TIME', t3-t2)\n\tlabel = eye[label]\n\t# t4 = time.time()\n\t# print('CVT TIME', t4-t3)\n\treturn img, label\n\nclass DataReader():\n\tdef __init__(self, listfile, bsize):\n\t\tf = open(listfile, 'r')\n\t\tself.data = []\n\t\tprint('Reading text file...')\n\t\tmax_label = 0\n\t\tfor line in f:\n\t\t\tline = line.strip().split('\\t')\n\t\t\timg = line[0]\n\t\t\tlabel = int(line[1])\n\t\t\tif label>max_label:\n\t\t\t\tmax_label = label\n\t\t\tself.data.append([img, label])\n\t\trandom.shuffle(self.data)\n\t\tprint(self.data[0])\n\t\tprint('Finished')\n\t\tself.pos = 0\n\t\tself.epoch = 0\n\t\tself.bsize = bsize\n\t\tself.max_label = label\n\t\tself.iter_per_epoch = len(self.data)//self.bsize\n\t\tself.pool = ThreadPool(processes=32)\n\t\tself.eye = np.eye(self.max_label+1)\n\t\tself.prefetch()\n\t\t\n\t\tprint('max_label:',max_label)\n\n\tdef prefetch(self):\n\t\tif self.pos + self.bsize > len(self.data):\n\t\t\tself.pos = 0\n\t\t\tself.epoch += 1\n\t\t\tprint(self.data[0])\n\t\t\trandom.shuffle(self.data)\n\n\t\tbatch = self.data[self.pos: self.pos+self.bsize]\n\t\targs = (batch, [self.eye]*len(batch))\n\t\targs = list(zip(*args))\n\t\tself.p = self.pool.map_async(process, args)\n\t\tself.pos += self.bsize\n\n\n\tdef get_next(self):\n\t\tbatch = self.p.get()\n\t\tbatch = list(zip(*batch))\n\t\tbatch = [np.float32(_) for _ in batch]\n\t\tself.prefetch()\n\t\treturn batch\n\nif __name__=='__main__':\n\tdata_reader = DataReader('imglist_iccv_clean.txt', 256*4)\n\tfor i in range(100):\n\t\tt1 = time.time()\n\t\tbatch = data_reader.get_next()\n\t\tt2 = time.time()\n\t\tprint(t2-t1)\n\tprint(batch[0].shape)\n\tprint(batch[1].shape)\n"
] |
[
[
"numpy.float32",
"tensorflow.keras.backend.set_learning_phase",
"tensorflow.keras.optimizers.Adam",
"numpy.linalg.norm"
],
[
"tensorflow.argmax",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.map_fn",
"numpy.float32",
"tensorflow.variable_scope",
"tensorflow.placeholder",
"tensorflow.nn.softmax_cross_entropy_with_logits_v2"
],
[
"numpy.linalg.norm",
"tensorflow.zeros",
"numpy.zeros",
"tensorflow.summary.scalar",
"tensorflow.train.RMSPropOptimizer",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.ones",
"tensorflow.variable_scope",
"numpy.random.uniform",
"tensorflow.placeholder",
"tensorflow.name_scope",
"numpy.float32",
"tensorflow.summary.merge_all",
"tensorflow.summary.FileWriter",
"tensorflow.square",
"tensorflow.get_collection",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits"
],
[
"tensorflow.trace",
"tensorflow.GradientTape",
"tensorflow.eye",
"tensorflow.optimizers.Adam",
"tensorflow.matmul",
"tensorflow.ones_like",
"tensorflow.zeros_like",
"tensorflow.square"
],
[
"numpy.float32",
"numpy.flip",
"numpy.eye"
]
] |
seqsense/CenterNet
|
[
"5cd5f3c1f42d8cfb5fc3157f8c1945b6787f11eb"
] |
[
"src/lib/datasets/sample/bbox_sample.py"
] |
[
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torch.utils.data as data\nimport numpy as np\nimport torch\nimport json\nimport cv2\nimport os\nfrom centernet_utils.image import flip, color_aug\nfrom centernet_utils.image import get_affine_transform, affine_transform\nfrom centernet_utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian\nfrom centernet_utils.image import draw_dense_reg\nimport math\n\nclass BBoxTaskDataset(data.Dataset):\n\n def _get_border(self, border, size):\n i = 1\n while size - border // i <= border // i:\n i *= 2\n return border // i\n\n def __getitem__(self, index):\n data_dict = self.image_datas[index]\n file_name = data_dict['file_name']\n objects = data_dict['objects']\n img_path = os.path.join(self.image_dir, file_name)\n num_objs = min(len(objects), self.max_objs)\n\n img = cv2.imread(img_path)\n\n height, width = img.shape[0], img.shape[1]\n c = np.array([img.shape[1] / 2., img.shape[0] / 2.], dtype=np.float32)\n if self.opt.keep_res:\n input_h = (height | self.opt.pad) + 1\n input_w = (width | self.opt.pad) + 1\n s = np.array([input_w, input_h], dtype=np.float32)\n else:\n s = max(img.shape[0], img.shape[1]) * 1.0\n input_h, input_w = self.opt.input_h, self.opt.input_w\n \n flipped = False\n # if self.split == 'train':\n if not self.opt.not_rand_crop:\n s = s * np.random.choice(np.arange(0.6, 1.4, 0.1))\n w_border = self._get_border(128, img.shape[1])\n h_border = self._get_border(128, img.shape[0])\n c[0] = np.random.randint(low=w_border, high=img.shape[1] - w_border)\n c[1] = np.random.randint(low=h_border, high=img.shape[0] - h_border)\n else:\n sf = self.opt.scale\n cf = self.opt.shift\n c[0] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf)\n c[1] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf)\n s = s * np.clip(np.random.randn()*sf + 1, 1 - sf, 1 + sf)\n \n if np.random.random() < self.opt.flip:\n flipped = True\n img = img[:, ::-1, :]\n c[0] = width - c[0] - 1\n \n\n trans_input = get_affine_transform(\n c, s, 0, [input_w, input_h])\n inp = cv2.warpAffine(img, trans_input, \n (input_w, input_h),\n flags=cv2.INTER_LINEAR)\n inp = (inp.astype(np.float32) / 255.)\n if not self.opt.no_color_aug:\n color_aug(self._data_rng, inp, self._eig_val, self._eig_vec)\n inp = (inp - self.mean) / self.std\n inp = inp.transpose(2, 0, 1)\n\n output_h = input_h // self.opt.down_ratio\n output_w = input_w // self.opt.down_ratio\n num_classes = self.num_classes\n trans_output = get_affine_transform(c, s, 0, [output_w, output_h])\n\n hm = np.zeros((num_classes, output_h, output_w), dtype=np.float32)\n wh = np.zeros((self.max_objs, 2), dtype=np.float32)\n dense_wh = np.zeros((2, output_h, output_w), dtype=np.float32)\n reg = np.zeros((self.max_objs, 2), dtype=np.float32)\n ind = np.zeros((self.max_objs), dtype=np.int64)\n reg_mask = np.zeros((self.max_objs), dtype=np.uint8)\n cat_spec_wh = np.zeros((self.max_objs, num_classes * 2), dtype=np.float32)\n cat_spec_mask = np.zeros((self.max_objs, num_classes * 2), dtype=np.uint8)\n \n draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else \\\n draw_umich_gaussian\n\n gt_det = []\n for k in range(num_objs):\n obj = objects[k]\n bbox = obj['bbox']\n cls_id = obj['class_id']\n if flipped:\n bbox[[0, 2]] = width - bbox[[2, 0]] - 1\n bbox[:2] = affine_transform(bbox[:2], trans_output)\n bbox[2:] = affine_transform(bbox[2:], trans_output)\n bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, output_w - 1)\n bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, output_h - 1)\n h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]\n if h > 0 and w > 0:\n radius = gaussian_radius((math.ceil(h), math.ceil(w)))\n radius = max(0, int(radius))\n radius = self.opt.hm_gauss if self.opt.mse_loss else radius\n ct = np.array(\n [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)\n ct_int = ct.astype(np.int32)\n draw_gaussian(hm[cls_id], ct_int, radius)\n wh[k] = 1. * w, 1. * h\n ind[k] = ct_int[1] * output_w + ct_int[0]\n reg[k] = ct - ct_int\n reg_mask[k] = 1\n cat_spec_wh[k, cls_id * 2: cls_id * 2 + 2] = wh[k]\n cat_spec_mask[k, cls_id * 2: cls_id * 2 + 2] = 1\n if self.opt.dense_wh:\n draw_dense_reg(dense_wh, hm.max(axis=0), ct_int, wh[k], radius)\n gt_det.append([ct[0] - w / 2, ct[1] - h / 2, \n ct[0] + w / 2, ct[1] + h / 2, 1, cls_id])\n \n ret = {'input': inp, 'hm': hm, 'reg_mask': reg_mask, 'ind': ind, 'wh': wh}\n if self.opt.dense_wh:\n hm_a = hm.max(axis=0, keepdims=True)\n dense_wh_mask = np.concatenate([hm_a, hm_a], axis=0)\n ret.update({'dense_wh': dense_wh, 'dense_wh_mask': dense_wh_mask})\n del ret['wh']\n elif self.opt.cat_spec_wh:\n ret.update({'cat_spec_wh': cat_spec_wh, 'cat_spec_mask': cat_spec_mask})\n del ret['wh']\n if self.opt.reg_offset:\n ret.update({'reg': reg})\n if self.opt.debug > 0:\n gt_det = np.array(gt_det, dtype=np.float32) if len(gt_det) > 0 else \\\n np.zeros((1, 6), dtype=np.float32)\n meta = {'c': c, 's': s, 'gt_det': gt_det, 'img_id': img_id}\n ret['meta'] = meta\n return ret"
] |
[
[
"numpy.concatenate",
"numpy.array",
"numpy.zeros",
"numpy.random.randn",
"numpy.random.randint",
"numpy.arange",
"numpy.clip",
"numpy.random.random"
]
] |
jozhang97/Side-tuning
|
[
"dea345691fb7ee0230150fe56ddd644efdffa6ac",
"dea345691fb7ee0230150fe56ddd644efdffa6ac",
"dea345691fb7ee0230150fe56ddd644efdffa6ac",
"dea345691fb7ee0230150fe56ddd644efdffa6ac"
] |
[
"evkit/models/forward_inverse.py",
"evkit/utils/parallel.py",
"evkit/models/srl_architectures.py",
"tlkit/data/datasets/taskonomy_dataset.py"
] |
[
"from gym import spaces\nimport multiprocessing.dummy as mp\nimport multiprocessing\nimport numpy as np\nimport os\nimport torch\nimport torch\nimport torch.nn as nn\nfrom torch.nn import Parameter, ModuleList\nimport torch.nn.functional as F\n\nfrom evkit.rl.utils import init, init_normc_\nfrom evkit.utils.misc import is_cuda\nfrom evkit.preprocess import transforms\n\nimport pickle as pkl\n\ninit_ = lambda m: init(m,\n nn.init.orthogonal_,\n lambda x: nn.init.constant_(x, 0),\n nn.init.calculate_gain('relu'))\n\n################################\n# Inverse Models\n# Predict s_{t+1} | s_t, a_t\n################################\nclass ForwardModel(nn.Module):\n \n def __init__(self, state_shape, action_shape, hidden_size):\n super().__init__()\n self.fc1 = init_(nn.Linear(state_shape + action_shape[1], hidden_size))\n self.fc2 = init_(nn.Linear(hidden_size, state_shape))\n \n def forward(self, state, action):\n x = torch.cat([state, action], 1)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return x\n\n################################\n# Inverse Models\n# Predict a_t | s_t, s_{t+1}\n################################\nclass InverseModel(nn.Module):\n \n def __init__(self, input_size, hidden_size, output_size):\n super().__init__()\n self.fc1 = init_(nn.Linear(input_size * 2, hidden_size))\n # Note to stoip gradient\n self.fc2 = init_(nn.Linear(hidden_size, output_size))\n\n def forward(self, phi_t, phi_t_plus_1):\n x = torch.cat([phi_t, phi_t_plus_1], 1)\n x = F.relu(self.fc1(x))\n logits = self.fc2(x)\n return logits\n# ainvprobs = nn.softmax(logits, dim=-1)",
"import torch\nimport torch.nn as nn\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nclass _CustomDataParallel(nn.Module):\n def __init__(self, model, device_ids):\n super(_CustomDataParallel, self).__init__()\n self.model = nn.DataParallel(model, device_ids=device_ids)\n self.model.to(device)\n num_devices = torch.cuda.device_count() if device_ids is None else len(device_ids)\n print(f\"{type(model)} using {num_devices} GPUs!\")\n\n def forward(self, *input, **kwargs):\n return self.model(*input, **kwargs)\n\n def __getattr__(self, name):\n try:\n return super().__getattr__(name)\n except AttributeError:\n return getattr(self.model.module, name)",
"import torch.nn as nn\nfrom torch.nn import Parameter, ModuleList\nimport torch.nn.functional as F\nimport torch\nimport multiprocessing\nimport numpy as np\nimport os\nfrom gym import spaces\nfrom torchvision.models import resnet18\nfrom evkit.rl.utils import init, init_normc_\nfrom evkit.preprocess import transforms\nimport torchvision as vision\nfrom evkit.models.architectures import FrameStacked, Flatten, atari_conv\n\ninit_ = lambda m: init(m,\n nn.init.orthogonal_,\n lambda x: nn.init.constant_(x, 0),\n nn.init.calculate_gain('relu'))\n\nN_CHANNELS = 3\ndef getNChannels():\n return N_CHANNELS\n\n########################\n# SRL\n########################\n\n\nclass BaseModelSRL(nn.Module):\n \"\"\"\n Base Class for a SRL network\n It implements a getState method to retrieve a state from observations\n \"\"\"\n\n def __init__(self):\n super(BaseModelSRL, self).__init__()\n \n def getStates(self, observations):\n \"\"\"\n :param observations: (th.Tensor)\n :return: (th.Tensor)\n \"\"\"\n return self.forward(observations)\n\n def forward(self, x):\n raise NotImplementedError\n\n \n\n\n\nclass BaseModelAutoEncoder(BaseModelSRL):\n \"\"\"\n Base Class for a SRL network (autoencoder family)\n It implements a getState method to retrieve a state from observations\n \"\"\"\n def __init__(self, n_frames, n_map_channels=0, use_target=True, output_size=512): \n super(BaseModelAutoEncoder, self).__init__()\n self.output_size = output_size\n self.n_frames = 4\n self.n_frames = n_frames\n self.output_size = output_size\n self.n_map_channels = n_map_channels\n self.use_target = use_target\n self.use_map = n_map_channels > 0\n\n if self.use_map:\n self.map_tower = nn.Sequential(\n atari_conv(self.n_frames * self.n_map_channels),\n nn.Conv2d(32, 64, kernel_size=4, stride=1), #, padding=3, bias=False),\n nn.ReLU(inplace=True),\n )\n\n if self.use_target:\n self.target_channels = 3\n else:\n self.target_channels = 0\n # Inspired by ResNet:\n # conv3x3 followed by BatchNorm2d\n self.encoder_conv = nn.Sequential(\n # 224x224xN_CHANNELS -> 112x112x64\n nn.Conv2d(getNChannels(), 64, kernel_size=7, stride=2, padding=3, bias=False),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1), # 56x56x64\n\n conv3x3(in_planes=64, out_planes=64, stride=1), # 56x56x64\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2), # 27x27x64\n\n conv3x3(in_planes=64, out_planes=64, stride=2), # 14x14x64\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2) # 6x6x64\n )\n\n self.decoder_conv = nn.Sequential(\n nn.ConvTranspose2d(64, 64, kernel_size=3, stride=2), # 13x13x64\n nn.BatchNorm2d(64),\n nn.ReLU(True),\n\n nn.ConvTranspose2d(64, 64, kernel_size=3, stride=2), # 27x27x64\n nn.BatchNorm2d(64),\n nn.ReLU(True),\n\n nn.ConvTranspose2d(64, 64, kernel_size=3, stride=2), # 55x55x64\n nn.BatchNorm2d(64),\n nn.ReLU(True),\n\n nn.ConvTranspose2d(64, 64, kernel_size=3, stride=2), # 111x111x64\n nn.BatchNorm2d(64),\n nn.ReLU(True),\n\n nn.ConvTranspose2d(64, getNChannels(), kernel_size=4, stride=2), # 224x224xN_CHANNELS\n )\n self.encoder = FrameStacked(self.encoder_conv, self.n_frames)\n self.conv1 = nn.Conv2d(self.n_frames * (64 + self.target_channels), 64, 3, stride=1) # c4 s 4\n self.flatten = Flatten()\n self.fc1 = init_(nn.Linear(64 * 4 * 4 * (self.use_map) + 64 * 4 * 4 * (1), 1024))\n self.fc2 = init_(nn.Linear(1024, self.output_size))\n\n def getStates(self, observations):\n \"\"\"\n :param observations: (th.Tensor)\n :return: (th.Tensor)\n \"\"\"\n return self.encode(observations)\n\n def encode(self, x):\n \"\"\"\n :param x: (th.Tensor)\n :return: (th.Tensor)\n \"\"\"\n# raise NotImplementedError\n self.encoder_conv(x)\n\n def decode(self, x):\n \"\"\"\n :param x: (th.Tensor)\n :return: (th.Tensor)\n \"\"\"\n# raise NotImplementedError\n self.decoder_conv(x)\n\n def forward(self, x):\n \"\"\"\n :param x: (th.Tensor)\n :return: (th.Tensor)\n \"\"\"\n x_taskonomy = x['taskonomy']\n if self.use_target:\n x_taskonomy = torch.cat([x_taskonomy, x[\"target\"]], dim=1)\n x_taskonomy = F.relu(self.conv1(x_taskonomy))\n if self.use_map:\n x_map = x['map']\n x_map = self.map_tower(x_map)\n x_taskonomy = torch.cat([x_map, x_taskonomy], dim=1)\n x = self.flatten(x_taskonomy)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n return x\n encoded = self.encode(x) \n# decoded = self.decode(encoded).view(input_shape)\n return encoded #, decoded\n \n \n \ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"\"\n From PyTorch Resnet implementation\n 3x3 convolution with padding\n :param in_planes: (int)\n :param out_planes: (int)\n :param stride: (int)\n \"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\n\ndef srl_features_transform(task_path, dtype=np.float32):\n ''' rescale_centercrop_resize\n \n Args:\n output_size: A tuple CxWxH\n dtype: of the output (must be np, not torch)\n \n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n '''\n _rescale_thunk = transforms.rescale_centercrop_resize((3, 224, 224))\n if task_path != 'pixels_as_state':\n# net = TaskonomyEncoder().cuda()\n net = nn.Sequential(\n # 224x224xN_CHANNELS -> 112x112x64\n nn.Conv2d(getNChannels(), 64, kernel_size=7, stride=2, padding=3, bias=False),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1), # 56x56x64\n\n conv3x3(in_planes=64, out_planes=64, stride=1), # 56x56x64\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2), # 27x27x64\n\n conv3x3(in_planes=64, out_planes=64, stride=2), # 14x14x64\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2) # 6x6x64\n ).cuda()\n\n net.eval()\n if task_path != 'None':\n checkpoint = torch.load(task_path)\n# checkpoint = {k.replace('model.encoder_conv.', ''): v for k, v in checkpoint.items() if 'encoder_conv' in k}\n checkpoint = {k.replace('model.conv_layers.', '').replace('model.encoder_conv.', ''): v for k, v in checkpoint.items() if 'encoder_conv' in k or 'conv_layers' in k}\n net.load_state_dict(checkpoint)\n\n def encode(x):\n if task_path == 'pixels_as_state':\n return x\n with torch.no_grad():\n return net(x)\n \n def _features_transform_thunk(obs_space):\n rescale, _ = _rescale_thunk(obs_space)\n def pipeline(x):\n# x = rescale(x).view(1, 3, 224, 224)\n x = torch.Tensor(x).cuda()\n x = encode(x)\n return x.cpu()\n if task_path == 'pixels_as_state':\n raise NotImplementedError\n return pixels_as_state_pipeline, spaces.Box(-1, 1, (8, 16, 16), dtype)\n else:\n return pipeline, spaces.Box(-1, 1, (64, 6, 6), dtype)\n \n return _features_transform_thunk\n\n",
"from collections import namedtuple, Counter, defaultdict\nfrom tlkit.data.sequential_tasks_dataloaders import ConcatenatedDataLoader, CyclingDataLoader, ErrorPassingConcatenatedDataLoader, ErrorPassingCyclingDataLoader\nfrom tlkit.utils import SINGLE_IMAGE_TASKS, TASKS_TO_CHANNELS\nimport torch\nimport torch.utils.data as utils\nimport torchvision.transforms as transforms\nimport torchvision.datasets as ds\nimport torch.utils.data as data\nfrom tqdm import tqdm\nfrom PIL import Image, ImageFile\nimport numpy as np\nimport os\nimport torch.multiprocessing as mp\nfrom torch.utils.data import DataLoader\nimport warnings\n\nfrom tlkit.data.img_transforms import default_loader, get_transform\nfrom tlkit.data.splits import SPLIT_TO_NUM_IMAGES, taskonomy_no_midlevel as split_taskonomy_no_midlevel\n\nTRAIN_BUILDINGS = split_taskonomy_no_midlevel['fullplus']['train']\nVAL_BUILDINGS = split_taskonomy_no_midlevel['fullplus']['val']\nTEST_BUILDINGS = split_taskonomy_no_midlevel['fullplus']['test']\n\n\nImageFile.LOAD_TRUNCATED_IMAGES = True # TODO Test this\n\n\nclass TaskonomyData(data.Dataset):\n '''\n Loads data for the Taskonomy dataset.\n This expects that the data is structured\n \n /path/to/data/\n rgb/\n modelk/\n point_i_view_j.png\n ... \n depth_euclidean/\n ... (other tasks)\n \n If one would like to use pretrained representations, then they can be added into the directory as:\n /path/to/data/\n rgb_encoding/\n modelk/\n point_i_view_j.npy\n ...\n \n Basically, any other folder name will work as long as it is named the same way.\n '''\n def __init__(self, data_path,\n tasks,\n buildings,\n transform=None,\n load_to_mem=False,\n zip_file_name=False,\n max_images=None):\n '''\n data_path: Path to data\n tasks: Which tasks to load. Any subfolder will work as long as data is named accordingly\n buildings: Which models to include. See `splits.taskonomy`\n transform: one transform per task.\n \n Note: This assumes that all images are present in all (used) subfolders\n '''\n self.return_tuple = True\n if isinstance(tasks, str):\n tasks = [tasks]\n transform = [transform] \n self.return_tuple = False\n \n self.buildings = buildings\n self.cached_data = {}\n self.data_path = data_path\n self.load_to_mem = load_to_mem\n self.tasks = tasks\n self.zip_file_name = zip_file_name\n\n self.urls = {task: make_dataset(os.path.join(data_path, task), buildings, max_images)\n for task in tasks}\n\n # Validate number of images\n n_images_task = [(len(obs), task) for task, obs in self.urls.items()]\n print(\"\\t\" + \" | \".join([\"{}: {}\".format(k, task) for task, k in n_images_task]))\n if max(n_images_task)[0] != min(n_images_task)[0]:\n print(\"Each task must have the same number of images. However, the max != min ({} != {}). Number of images per task is: \\n\\t{}\".format(\n max(n_images_task)[0], min(n_images_task)[0], \"\\n\\t\".join([str(t) for t in n_images_task])))\n\n # count number of frames per building per task\n all_buildings = defaultdict(dict)\n for task, obs in self.urls.items():\n c = Counter([url.split(\"/\")[-2] for url in obs])\n for building in c:\n all_buildings[building][task] = c[building]\n\n # find where the number of distinct counts is more than 1\n print('Removing data from the following buildings')\n buildings_to_remove = []\n for b, count in all_buildings.items():\n if len(set(list(count.values()))) > 1:\n print(f\"\\t{b}:\", count)\n buildings_to_remove.append(b)\n # [(len(obs), task) for task, obs in self.urls.items()]\n\n # redo the loading with fewer buildings\n buildings_redo = [b for b in buildings if b not in buildings_to_remove]\n self.urls = {task: make_dataset(os.path.join(data_path, task), buildings_redo)\n for task in tasks}\n n_images_task = [(len(obs), task) for task, obs in self.urls.items()]\n print(\"\\t\" + \" | \".join([\"{}: {}\".format(k, task) for task, k in n_images_task]))\n assert max(n_images_task)[0] == min(n_images_task)[0], \\\n \"Each task must have the same number of images. However, the max != min ({} != {}). Number of images per task is: \\n\\t{}\".format(\n max(n_images_task)[0], min(n_images_task)[0], \"\\n\\t\".join([str(t) for t in n_images_task]))\n self.size = max(n_images_task)[0]\n\n # Perhaps load some things into main memory\n if load_to_mem:\n print('Writing activations to memory')\n for t, task in zip(transform, tasks):\n self.cached_data[task] = [None] * len(self)\n for i, url in enumerate(self.urls[task]):\n self.cached_data[task][i] = t(default_loader(url))\n self.cached_data[task] = torch.stack(self.cached_data[task])\n# self.cached_data = torch.stack(self.cached_data)\n print('Finished writing some activations to memory')\n \n self.transform = transform\n\n\n def __len__(self):\n return self.size\n\n def __getitem__(self, index):\n fpaths = [self.urls[task][index] for task in self.tasks]\n \n if self.load_to_mem:\n result = tuple([self.cached_data[task][index] for task in self.tasks])\n else:\n result = [default_loader(path) for path in fpaths]\n if self.transform is not None:\n # result = [transform(tensor) for transform, tensor in zip(self.transform, result)]\n result_post = []\n for i, (transform, tensor) in enumerate(zip(self.transform, result)):\n try:\n result_post.append(transform(tensor))\n except Exception as e:\n print(self.tasks[i], transform, tensor)\n raise e\n result = result_post\n\n # handle 2 channel outputs\n for i in range(len(self.tasks)):\n task = self.tasks[i]\n base_task = [t for t in SINGLE_IMAGE_TASKS if t in task]\n if len(base_task) == 0:\n continue\n else:\n base_task = base_task[0]\n num_channels = TASKS_TO_CHANNELS[base_task]\n if 'decoding' in task and result[i].shape[0] != num_channels:\n assert torch.sum(result[i][num_channels:,:,:]) < 1e-5, 'unused channels should be 0.'\n result[i] = result[i][:num_channels,:,:]\n\n if self.zip_file_name:\n result = tuple(zip(fpaths, result))\n\n if self.return_tuple:\n return result\n else:\n return result[0]\n\n \n\ndef make_dataset(dir, folders=None, max_images=None):\n # folders are building names. If None, get all the images (from both building folders and dir)\n has_reached_capacity = lambda images, max_images: not max_images is None and len(images) >= max_images\n images = []\n dir = os.path.expanduser(dir)\n if not os.path.isdir(dir):\n assert \"bad directory\"\n\n for subfolder in sorted(os.listdir(dir)):\n subfolder_path = os.path.join(dir, subfolder)\n if os.path.isdir(subfolder_path) and (folders is None or subfolder in folders):\n for fname in sorted(os.listdir(subfolder_path)):\n path = os.path.join(subfolder_path, fname)\n if not has_reached_capacity(images, max_images):\n images.append(path)\n\n # If folders/buildings are not specified, use images in dir\n if folders is None and os.path.isfile(subfolder_path) and not has_reached_capacity(images, max_images):\n images.append(subfolder_path)\n\n return images\n\n\ndef get_dataloaders(data_path,\n tasks,\n batch_size=64,\n batch_size_val=4,\n zip_file_name=False,\n train_folders=TRAIN_BUILDINGS,\n val_folders=VAL_BUILDINGS,\n test_folders=TEST_BUILDINGS,\n transform=None,\n num_workers=0,\n load_to_mem=False,\n pin_memory=False,\n max_images=None):\n \"\"\"\n :param data_path: directory that data is stored at\n :param tasks: names of subdirectories to return observations from\n :param batch_size:\n :param zip_file_name: when returning an observation, this will zip the fpath to it. E.g. (/path/to/img.png, OBS)\n :param train_folders: in a big data dir, which subfolders contain our training data\n :param val_folders: in a big data dir, which subfolders contain our val data\n :param max_images: maximum number of images in any dataset\n :return: dictionary of dataloaders\n \"\"\"\n\n if transform is None:\n if isinstance(tasks, str):\n transform = get_transform(tasks)\n else:\n transform = [get_transform(task) if len(task.split(' ')) == 1 else get_transform(*task.split(' ')) for task in tasks]\n tasks = [t.split(' ')[0] for t in tasks] # handle special data operations\n\n if isinstance(train_folders, str):\n train_folders = split_taskonomy_no_midlevel[train_folders]['train']\n if isinstance(val_folders, str):\n val_folders = split_taskonomy_no_midlevel[val_folders]['val']\n if isinstance(test_folders, str):\n test_folders = split_taskonomy_no_midlevel[test_folders]['test']\n\n\n dataloaders = {}\n print(f\"Taskonomy dataset TRAIN folders: {train_folders}\")\n dataset = TaskonomyData(data_path, tasks, buildings=train_folders,\n transform=transform, zip_file_name=zip_file_name,\n load_to_mem=load_to_mem, max_images=max_images)\n if len(dataset) == 0:\n print(f'\\tNO IMAGES FOUND for tasks {tasks} at path {data_path}')\n dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=pin_memory)\n dataloaders['train'] = dataloader\n\n print(f\"Taskonomy dataset VAL folders: {val_folders}\")\n dataset = TaskonomyData(data_path, tasks, buildings=val_folders,\n transform=transform, zip_file_name=zip_file_name, load_to_mem=load_to_mem, max_images=max_images)\n\n if len(dataset) == 0:\n print(f'\\tNO IMAGES FOUND for tasks {tasks} at path {data_path}')\n dataloader = DataLoader(dataset, batch_size=batch_size_val, shuffle=False, num_workers=num_workers, pin_memory=pin_memory)\n dataloaders['val'] = dataloader\n\n print(f\"Taskonomy dataset TEST folders: {test_folders}\")\n dataset = TaskonomyData(data_path, tasks, buildings=test_folders,\n transform=transform, zip_file_name=zip_file_name, load_to_mem=load_to_mem, max_images=max_images)\n if len(dataset) == 0:\n print(f'\\tNO IMAGES FOUND for tasks {tasks} at path {data_path}')\n dataloader = DataLoader(dataset, batch_size=batch_size_val, shuffle=False, num_workers=num_workers, pin_memory=pin_memory)\n dataloaders['test'] = dataloader\n return dataloaders\n\n\ndef get_lifelong_dataloaders(data_path,\n sources,\n targets,\n masks,\n epochs_per_task=5,\n epochs_until_cycle=0,\n split='fullplus',\n batch_size=64,\n batch_size_val=4,\n transform=None,\n num_workers=0,\n load_to_mem=False,\n pin_memory=False,\n speedup_no_rigidity=False,\n max_images_per_task=None):\n\n phases = ['train', 'val', 'test']\n dataloaders = {phase: [] for phase in phases}\n\n if isinstance(masks, bool):\n masks = [masks] * len(sources)\n\n masks = [['mask_valid'] if mask else [] for mask in masks]\n\n for i, (source, target, mask) in enumerate(zip(sources, targets, masks)):\n print(f'# Task {i} dataloader: {source} -> {target}')\n tasks = source + target + mask\n dl = get_dataloaders(\n data_path,\n tasks,\n batch_size=batch_size,\n batch_size_val=batch_size_val,\n train_folders=split,\n val_folders=split,\n test_folders=split,\n transform=transform,\n num_workers=num_workers,\n load_to_mem=load_to_mem,\n pin_memory=pin_memory,\n max_images=max_images_per_task,\n )\n for phase in phases:\n dataloaders[phase].append(dl[phase])\n\n if speedup_no_rigidity:\n # For methods that do not forget (no intransigence) by construction.\n # In validation, we only compute task performance for just-trained task and next-to-be-trained task\n epoch_lengths = [len(dl.dataset) for dl in dataloaders['val']]\n epoch_length = min(epoch_lengths) if min(epoch_lengths) == max(epoch_lengths) else None\n\n dl_just_trained = CyclingDataLoader(dataloaders['val'], epochs_until_cycle=1, start_dl=0,\n epoch_length_per_dl=epoch_length)\n dl_next_to_be_trained = CyclingDataLoader(dataloaders['val'], epochs_until_cycle=0, start_dl=0,\n epoch_length_per_dl=epoch_length)\n dataloaders['val'] = ErrorPassingConcatenatedDataLoader([dl_just_trained, dl_next_to_be_trained], zip_idx=False)\n else:\n dataloaders['val'] = ErrorPassingConcatenatedDataLoader(dataloaders['val'])\n\n train_epoch_length = SPLIT_TO_NUM_IMAGES[split] if split is not None else min([len(dl.dataset) for dl in dataloaders['train']])\n dataloaders['train'] = ErrorPassingCyclingDataLoader(dataloaders['train'], epoch_length_per_dl=epochs_per_task * train_epoch_length, epochs_until_cycle=epochs_until_cycle)\n dataloaders['test'] = ErrorPassingConcatenatedDataLoader(dataloaders['test'])\n return dataloaders\n\n\n\n\n"
] |
[
[
"torch.nn.init.calculate_gain",
"torch.cat",
"torch.nn.Linear",
"torch.nn.init.constant_"
],
[
"torch.cuda.is_available",
"torch.nn.DataParallel",
"torch.cuda.device_count"
],
[
"torch.nn.Linear",
"torch.cat",
"torch.nn.init.constant_",
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ConvTranspose2d",
"torch.no_grad",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.load",
"torch.nn.init.calculate_gain",
"torch.Tensor"
],
[
"torch.stack",
"torch.utils.data.DataLoader",
"torch.sum"
]
] |
Bullldoger/NLA--project
|
[
"05e7a39ca43b6eea7e74ad62ea7de445414e1a2b"
] |
[
"notebooks/models.py"
] |
[
"import numpy as np\nimport scipy as scp\nfrom scipy.sparse import csr_matrix\nfrom scipy.sparse.linalg import svds\nfrom collections import Counter\nfrom nltk.corpus import stopwords\n\nclass Word2Vec(object):\n \n def __init__(self, sentences):\n \"\"\"\n sentences -- preprocessed sentences of reviews\n vocab -- vocabulary of a corpus words; {words: index}\n D -- word-context co-occurence matrix\n W -- matrix of words embeddings\n C -- matrix of contexts embeddings\n d -- dimension of words and reviews embeddings\n \"\"\"\n\n self.sentences = sentences\n self.vocab = None\n self.D = None\n self.W = None\n self.C = None\n self.d = 200\n self.Wt = None\n self.Ct = None\n \n ###################### DATA PROCESSING ######################\n \n ###### Create vocabulary from given sentences ######\n \n def create_vocabulary(self, r=200):\n \"\"\"\n r -- word occurence treshold\n \"\"\"\n \n self.vocab = dict()\n word_count = dict()\n idx = 0\n \n print('Creating vocabulary')\n for sentence in self.sentences:\n for word in sentence:\n if word not in word_count:\n word_count[word] = 1\n else:\n word_count[word] += 1\n\n for word, count in word_count.items():\n if word_count[word] >= r:\n self.vocab[word] = idx\n idx += 1\n \n \n \n ###### Create word-context co-occurence matrix ######\n \n def create_corpus_matrix(self, L=2):\n \"\"\"\n L -- size of the sliding window\n \"\"\"\n \n print('Creating corpus matrix')\n # initialization\n words_counts = Counter()\n for sentence_index, sentence in enumerate(self.sentences):\n for word_index, word in enumerate(sentence):\n if word in self.vocab:\n around_indexes = [i for i in range(max(word_index - L, 0), \n min(word_index + L + 1, len(sentence))) \n if i != word_index]\n for occur_word_index in around_indexes:\n occur_word = sentence[occur_word_index]\n if occur_word in self.vocab:\n skipgram = (word, occur_word)\n if skipgram in words_counts:\n words_counts[skipgram] += 1\n else:\n words_counts[skipgram] = 1\n rows = list()\n cols = list()\n values = list()\n\n\n for (word_1, word_2), sharp in words_counts.items(): \n rows.append(self.vocab[word_1])\n cols.append(self.vocab[word_2])\n values.append(sharp)\n\n self.D = scp.sparse.csr_matrix((values, (rows, cols)))\n \n \n ###################### AUXILARY FUNCTIONS ######################\n \n ###### Sigmoid ######\n def sigmoid(self, x):\n return 1 / (1 + np.exp(-x))\n \n ##### Loss function #####\n def loss(self, k):\n wc_ = self.D.sum()\n w_ = np.array(self.D.sum(axis=1))\n c_ = np.array(self.D.sum(axis=0))\n loss = self.D.toarray() * np.log(self.sigmoid(self.X)) + (k * w_ * c_ / wc_) * np.log(self.sigmoid(-self.X))\n return np.sum(loss)\n \n ###### Gradient of the objective function ######\n def grad(self, k):\n wc_ = self.D.sum()\n w_ = np.array(self.D.sum(axis=1))\n c_ = np.array(self.D.sum(axis=0))\n gr = self.D.toarray() * self.sigmoid(-self.X) - (k * w_ * c_ / wc_) * self.sigmoid(self.X)\n return gr\n\n ###################### DIFFERENT METHODS FOR WORD EMBEDDINGS COMPUTATION ######################\n \n ###### Create matrix of words embeddings by IMF ###### \n def compute_embedds_IMF(self, k, alpha=.5):\n \"\"\"\n k -- negative sampling hyperparameter\n alpha -- hyperparameter for matrix representation\n \"\"\"\n print('Computing of words embeddings')\n all_observations = self.D.sum()\n\n rows = []\n cols = []\n sppmi_values = []\n\n sum_over_words = np.array(self.D.sum(axis=0)).flatten()\n sum_over_contexts = np.array(self.D.sum(axis=1)).flatten()\n\n for word_index_1, word_index_2 in zip(self.D.nonzero()[0], \n self.D.nonzero()[1]):\n sg_count = self.D[word_index_1, word_index_2]\n\n pwc = sg_count\n pw = sum_over_contexts[word_index_1]\n pc = sum_over_words[word_index_2]\n\n spmi_value = np.log2(pwc * all_observations / (pw * pc * k))\n sppmi_value = max(spmi_value, 0)\n\n rows.append(word_index_1)\n cols.append(word_index_2)\n sppmi_values.append(sppmi_value)\n\n sppmi_mat = scp.sparse.csr_matrix((sppmi_values, (rows, cols)))\n U, S, V = scp.sparse.linalg.svds(sppmi_mat, self.d)\n self.W = U @ np.diag(np.power(S, alpha))\n self.C = np.diag(np.power(S, alpha)) @ V\n self.X = self.W @ self.C\n \n # SGNS objective\n print(\"Value of the SGNS's objective: \", self.loss(k))\n \n ###### Create matrix of words embeddings by Riemannian optimization ######\n def compute_embedds_riem(self, k, step=5e-5, max_iter=20, alpha=.5):\n self.X = self.W @ self.C\n U, S, Vt = np.linalg.svd(self.X, full_matrices=False)\n U, S, Vt = U[:, :self.d], S[:self.d], Vt[:self.d, :]\n \n for i in range(max_iter):\n print(f\"Value of the SGNS's objective on the {i} iteration: \\n {self.loss(k)}\")\n grad_step = self.X + step * self.grad(k)\n U, S = np.linalg.qr(grad_step @ Vt.T)\n V, St = np.linalg.qr(grad_step.T @ U)\n self.X = U @ S @ V.T\n \n U_, S_, Vt_ = np.linalg.svd(self.X)\n U_, S_, Vt_ = U_[:, :self.d], S_[:self.d], Vt_[:self.d, :]\n self.W = U_ @ np.power(np.diag(S_), alpha)\n \n ###### Create matrix of words embeddings by EMF (AMEMF) ###### \n def compute_embedds_EMF(self, k, step=1e-3, max_iter=50, eps=1e-3, iters=20):\n \"\"\"\n k -- negative sampling hyperparameter\n \"\"\"\n # initialization\n m = len(self.vocab)\n self.Wt = np.random.rand(self.d, m)\n self.Ct = np.random.rand(self.d, m)\n Wt_prvs = np.zeros(self.Wt.shape)\n Ct_prvs = np.zeros(self.Ct.shape)\n \n wc_ = self.D.sum()\n w_ = np.array(self.D.sum(axis=1))\n c_ = np.array(self.D.sum(axis=0))\n Q = self.D.toarray() + k * w_ * c_ / wc_\n \n error = lambda M, M_prvs: np.linalg.norm(M - M_prvs)\n \n for i in range(max_iter):\n print(f'{i} iteration')\n # minimize over C\n \n for j in range(iters):\n Wt_prvs = self.Wt\n E = Q * self.sigmoid(self.Ct.T @ self.Wt)\n self.Wt = self.Wt - step * self.Ct @ (E - self.D.toarray())\n print(error(self.Wt, Wt_prvs))\n \n \"\"\"if error(self.Wt, Wt_prvs) <= eps:\n break\"\"\"\n print('First loop finished')\n \n # minimize over W\n \n for j in range(iters):\n Ct_prvs = self.Ct\n E = Q * self.sigmoid(self.Ct.T @ self.Wt)\n self.Ct = self.Ct - (step * (E - self.D.toarray()) @ self.Wt.T).T\n \n \"\"\"if error(self.Ct, Ct_prvs) > eps:\n break\"\"\"\n print('Second loop finished')\n \n ###### Get vector embedding for a given word ######\n def get_word_embedding(self, word):\n if word in self.vocab:\n idx = self.vocab[word]\n return self.W[idx, :]\n else:\n print('This word is not in the vocabulary')\n \n ###### Get vector embedding for a given word ######\n def get_word_embedding2(self, word):\n if word in self.vocab:\n idx = self.vocab[word]\n return self.Wt.T[idx, :]\n else:\n print('This word is not in the vocabulary')\n \n ###################### REVIEW EMBEDDINGS ######################\n \n ##### Compute review embeddings #####\n def get_review_embedding(self, review):\n \"\"\"\n review -- current review to be embedded\n \"\"\"\n\n review_vec = np.zeros(self.d)\n words_count = 0\n stops = set(stopwords.words(\"english\"))\n\n for word in review:\n if (word in self.vocab) and not (word in stops):\n review_vec += self.get_word_embedding(word)\n words_count += 1\n review_vec /= words_count\n return review_vec\n \n ##### Compute review embeddings #####\n def get_review_embedding2(self, review):\n \"\"\"\n review -- current review to be embedded\n \"\"\"\n\n review_vec = np.zeros(self.d)\n words_count = 0\n stops = set(stopwords.words(\"english\"))\n\n for word in review:\n if (word in self.vocab) and not (word in stops):\n review_vec += self.get_word_embedding2(word)\n words_count += 1\n review_vec /= words_count\n return review_vec\n \n ##### Create matrix 'embeddings-reviews' #####\n def get_features_matrix(self, reviews):\n \"\"\"\n reviews -- the whole collection of reviews\n \"\"\"\n X = np.zeros((len(reviews), self.d))\n for idx, review in enumerate(reviews):\n X[idx, :] = self.get_review_embedding(review)\n return X \n \n def get_features_matrix2(self, reviews):\n \"\"\"\n reviews -- the whole collection of reviews\n \"\"\"\n X = np.zeros((len(reviews), self.d))\n for idx, review in enumerate(reviews):\n X[idx, :] = self.get_review_embedding2(review)\n return X "
] |
[
[
"scipy.sparse.linalg.svds",
"numpy.linalg.norm",
"numpy.random.rand",
"numpy.zeros",
"numpy.sum",
"numpy.exp",
"numpy.linalg.qr",
"numpy.power",
"numpy.linalg.svd",
"scipy.sparse.csr_matrix",
"numpy.diag",
"numpy.log2"
]
] |
pazeshun/jsk_apc
|
[
"0ff42000ad5992f8a31e719a5360a39cf4fa1fde",
"0ff42000ad5992f8a31e719a5360a39cf4fa1fde",
"0ff42000ad5992f8a31e719a5360a39cf4fa1fde"
] |
[
"demos/instance_occlsegm/instance_occlsegm_lib/datasets/apc/apc2016/mit.py",
"demos/selective_dualarm_stowing/node_scripts/alex_proba_estimation.py",
"demos/instance_occlsegm/examples/instance_occlsegm/instance_to_semantic/sample_roi_unpooling_2d.py"
] |
[
"import itertools\nimport os\nimport os.path as osp\n\nimport chainer\nimport numpy as np\nimport skimage.io\ntry:\n from sklearn.model_selection import train_test_split\nexcept ImportError:\n from sklearn.cross_validation import train_test_split\n\nfrom .base import class_names_apc2016\nimport instance_occlsegm_lib.data\nimport instance_occlsegm_lib.image\n\n\ndef ids_from_scene_dir(scene_dir, empty_scene_dir):\n for i_frame in itertools.count():\n empty_file = osp.join(\n empty_scene_dir, 'frame-{:06}.color.png'.format(i_frame))\n rgb_file = osp.join(\n scene_dir, 'frame-{:06}.color.png'.format(i_frame))\n segm_file = osp.join(\n scene_dir, 'segm/frame-{:06}.segm.png'.format(i_frame))\n if not (osp.exists(rgb_file) and osp.exists(segm_file)):\n break\n data_id = (empty_file, rgb_file, segm_file)\n yield data_id\n\n\ndef bin_id_from_scene_dir(scene_dir):\n caminfo = open(osp.join(scene_dir, 'cam.info.txt')).read()\n loc = caminfo.splitlines()[0].split(': ')[-1]\n if loc == 'shelf':\n bin_id = caminfo.splitlines()[1][-1]\n else:\n bin_id = 'tote'\n return bin_id\n\n\nclass MitAPC2016Dataset(chainer.dataset.DatasetMixin):\n\n class_names = class_names_apc2016\n datasets_dir = osp.expanduser('~/data/datasets/APC2016')\n\n def __init__(self, split, locations=('shelf', 'tote')):\n assert split in ['all', 'train', 'valid']\n self.split = split\n assert all(loc in ['shelf', 'tote'] for loc in locations)\n self._locations = locations\n self.dataset_dir = osp.join(self.datasets_dir, 'benchmark')\n if not osp.exists(self.dataset_dir):\n self.download()\n self._init_ids()\n\n def __len__(self):\n return len(self._ids[self.split])\n\n def _init_ids(self):\n data_ids = []\n # office\n contain_dir = osp.join(self.dataset_dir, 'office/test')\n for loc in self._locations:\n loc_dir = osp.join(contain_dir, loc)\n data_ids += self._get_ids_from_loc_dir('office', loc_dir)\n # warehouse\n contain_dir = osp.join(self.dataset_dir, 'warehouse')\n for sub in ['practice', 'competition']:\n sub_contain_dir = osp.join(contain_dir, sub)\n for loc in self._locations:\n loc_dir = osp.join(sub_contain_dir, loc)\n data_ids += self._get_ids_from_loc_dir('warehouse', loc_dir)\n ids_train, ids_valid = train_test_split(\n data_ids, test_size=0.25, random_state=5)\n self._ids = {'all': data_ids, 'train': ids_train, 'valid': ids_valid}\n\n def _get_ids_from_loc_dir(self, env, loc_dir):\n assert env in ('office', 'warehouse')\n loc = osp.basename(loc_dir)\n data_ids = []\n for scene_dir in os.listdir(loc_dir):\n scene_dir = osp.join(loc_dir, scene_dir)\n bin_id = bin_id_from_scene_dir(scene_dir)\n empty_dir = osp.join(\n self.dataset_dir, env, 'empty', loc, 'scene-{}'.format(bin_id))\n data_ids += list(ids_from_scene_dir(scene_dir, empty_dir))\n return data_ids\n\n def _load_from_id(self, data_id):\n empty_file, rgb_file, segm_file = data_id\n img = skimage.io.imread(rgb_file)\n img_empty = skimage.io.imread(empty_file)\n # Label value is multiplied by 9:\n # ex) 0: 0/6=0 (background), 54: 54/6=9 (dasani_bottle_water)\n lbl = skimage.io.imread(segm_file, as_gray=True) / 6\n lbl = lbl.astype(np.int32)\n # infer bin mask\n mask_fg = lbl > 0\n if mask_fg.sum() == 0:\n lbl[...] = -1\n else:\n y1, x1, y2, x2 = instance_occlsegm_lib.image.masks_to_bboxes(\n [mask_fg])[0]\n mask_bin = np.zeros_like(mask_fg)\n mask_bin[y1:y2, x1:x2] = True\n lbl[~mask_bin] = -1\n # copy object region in rgb image\n img_empty[mask_fg] = img[mask_fg]\n return img_empty, lbl\n\n def __getitem__(self, i):\n data_id = self._ids[self.split][i]\n img, lbl = self._load_from_id(data_id)\n return img, lbl\n\n def download(self):\n # XXX: this is optional\n # path = osp.join(self.datasets_dir, 'APC2016mit_training.zip')\n # fcn.data.cached_download(\n # url='https://drive.google.com/uc?id=0B4mCa-2YGnp7ZEMwcW5rcVBpeG8', # NOQA\n # path=path,\n # )\n instance_occlsegm_lib.data.download(\n url='https://drive.google.com/uc?id=0B9P1L--7Wd2vZHlSQjJSV0x4eXc',\n path=osp.join(self.datasets_dir, 'APC2016mit_benchmark.zip'),\n md5='bdb56b152a7cec0e652898338e519e79',\n postprocess=instance_occlsegm_lib.data.extractall,\n )\n",
"#!/usr/bin/env python\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport chainer\nimport chainer.functions as F\nimport chainer.links as L\nimport chainer.serializers as S\nfrom chainer import Variable\nimport numpy as np\n\nimport cv_bridge\nfrom jsk_recognition_msgs.msg import ClassificationResult\nfrom jsk_topic_tools import ConnectionBasedTransport\nfrom jsk_topic_tools.log_utils import logerr_throttle\nimport message_filters\nimport rospy\nfrom sensor_msgs.msg import Image\n\n\nclass AlexBatchNormalization(chainer.Chain):\n def __init__(self, n_class=1000):\n super(AlexBatchNormalization, self).__init__(\n conv1=L.Convolution2D(3, 96, 11, stride=4, pad=4),\n bn1=L.BatchNormalization(96),\n conv2=L.Convolution2D(96, 256, 5, stride=1, pad=1),\n bn2=L.BatchNormalization(256),\n conv3=L.Convolution2D(256, 384, 3, stride=1, pad=1),\n conv4=L.Convolution2D(384, 384, 3, stride=1, pad=1),\n conv5=L.Convolution2D(384, 256, 3, stride=1, pad=1),\n bn5=L.BatchNormalization(256),\n fc6=L.Linear(33280, 4096),\n fc7=L.Linear(4096, 4096),\n fc8=L.Linear(4096, n_class),\n )\n self.train = False\n\n def __call__(self, x, t=None):\n h = F.relu(self.bn1(self.conv1(x), test=not self.train))\n h = F.max_pooling_2d(h, 3, stride=2)\n h = F.relu(self.bn2(self.conv2(h), test=not self.train))\n h = F.max_pooling_2d(h, 3, stride=2)\n h = F.relu(self.conv3(h))\n h = F.relu(self.conv4(h))\n h = F.relu(self.bn5(self.conv5(h)))\n h = F.max_pooling_2d(h, 3, stride=3)\n h = F.dropout(F.relu(self.fc6(h)), train=self.train)\n h = F.dropout(F.relu(self.fc7(h)), train=self.train)\n h = self.fc8(h)\n self.proba = F.sigmoid(h)\n\n if t is None:\n assert not self.train\n return\n\n self.loss = F.softmax_cross_entropy(h, t)\n self.acc = F.accuracy(self.pred, t)\n if self.train:\n return self.loss\n\n\nclass AlexProbaEstimation(ConnectionBasedTransport):\n\n def __init__(self):\n super(self.__class__, self).__init__()\n self.gpu = rospy.get_param('~gpu', -1)\n model_h5 = rospy.get_param('~model_h5')\n self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])\n self.target_names = rospy.get_param('~target_names')\n self.model = AlexBatchNormalization(n_class=len(self.target_names))\n S.load_hdf5(model_h5, self.model)\n if self.gpu != -1:\n self.model.to_gpu(self.gpu)\n self.pub = self.advertise('~output', ClassificationResult,\n queue_size=1)\n self.pub_input = self.advertise(\n '~debug/net_input', Image, queue_size=1)\n\n def subscribe(self):\n # larger buff_size is necessary for taking time callback\n # http://stackoverflow.com/questions/26415699/ros-subscriber-not-up-to-date/29160379#29160379 # NOQA\n sub = message_filters.Subscriber(\n '~input', Image, queue_size=1, buff_size=2**24)\n sub_mask = message_filters.Subscriber(\n '~input/mask', Image, queue_size=1, buff_size=2**24)\n self.subs = [sub, sub_mask]\n queue_size = rospy.get_param('~queue_size', 10)\n if rospy.get_param('~approximate_sync', False):\n slop = rospy.get_param('~slop', 0.1)\n sync = message_filters.ApproximateTimeSynchronizer(\n self.subs, queue_size=queue_size, slop=slop)\n else:\n sync = message_filters.TimeSynchronizer(\n self.subs, queue_size=queue_size)\n sync.registerCallback(self._recognize)\n\n def unsubscribe(self):\n for sub in self.subs:\n sub.unregister()\n\n def _recognize(self, imgmsg, mask_msg=None):\n bridge = cv_bridge.CvBridge()\n bgr = bridge.imgmsg_to_cv2(imgmsg, desired_encoding='bgr8')\n if mask_msg is not None:\n mask = bridge.imgmsg_to_cv2(mask_msg)\n if mask.shape != bgr.shape[:2]:\n logerr_throttle(10,\n 'Size of input image and mask is different')\n return\n elif mask.size == 0:\n logerr_throttle(10, 'Size of input mask is 0')\n return\n bgr[mask < 128] = self.mean_bgr\n input_msg = bridge.cv2_to_imgmsg(bgr.astype(np.uint8), encoding='bgr8')\n input_msg.header = imgmsg.header\n self.pub_input.publish(input_msg)\n\n blob = (bgr - self.mean_bgr).transpose((2, 0, 1))\n x_data = np.array([blob], dtype=np.float32)\n if self.gpu != -1:\n x_data = chainer.cuda.to_gpu(x_data, device=self.gpu)\n x = Variable(x_data, volatile=True)\n\n self.model.train = False\n self.model(x)\n\n proba = chainer.cuda.to_cpu(self.model.proba.data)[0]\n cls_msg = ClassificationResult(\n header=imgmsg.header,\n labels=None,\n label_names=None,\n label_proba=None,\n probabilities=proba,\n target_names=self.target_names)\n self.pub.publish(cls_msg)\n\n\nif __name__ == '__main__':\n rospy.init_node('alex_proba_estimation')\n app = AlexProbaEstimation()\n rospy.spin()\n",
"#!/usr/bin/env python\n\nimport os\nimport os.path as osp\n\nimport numpy as np\n\nimport instance_occlsegm_lib\nfrom instance_occlsegm_lib.contrib import instance_occlsegm\n\n\ndef get_data():\n dataset = instance_occlsegm_lib.datasets.apc.\\\n ARC2017InstanceSegmentationDataset(split='train')\n\n img, bboxes, labels, masks = dataset[0]\n fg_class_names = dataset.class_names\n class_names = tuple(['__background__'] + list(fg_class_names))\n n_fg_class = len(fg_class_names)\n\n n_instance = len(bboxes)\n mask_n_classes = []\n for i in range(n_instance):\n bbox = bboxes[i]\n label = labels[i]\n mask = masks[i]\n\n y1, x1, y2, x2 = bbox.astype(int)\n\n mask = mask[y1:y2, x1:x2]\n fg = mask.astype(bool)\n mask = mask.astype(np.float32)\n mask[fg] = np.random.uniform(0.75, 0.95, size=fg.sum())\n mask[~fg] = np.random.uniform(0.05, 0.25, size=(~fg).sum())\n mask = instance_occlsegm_lib.image.resize(mask, height=14, width=14)\n\n mask_n_class = np.zeros((n_fg_class, 14, 14))\n mask_n_class = mask_n_class.astype(np.float32)\n mask_n_class[label] = mask\n mask_n_classes.append(mask_n_class)\n mask_n_classes = np.asarray(mask_n_classes)\n\n return img, bboxes, labels, mask_n_classes, class_names\n\n\ndef main():\n out_dir = 'logs/sample_roi_unpooling_2d'\n try:\n os.makedirs(out_dir)\n except OSError:\n pass\n\n img, bboxes, labels, masks, class_names = get_data()\n\n x = masks\n outh, outw = img.shape[:2]\n rois = bboxes.astype(np.float32)\n roi_indices = np.zeros((len(rois), 1), dtype=np.float32)\n indices_and_rois = np.hstack((roi_indices, rois))\n\n y = instance_occlsegm.functions.roi_unpooling_2d(\n x,\n indices_and_rois,\n outb=1,\n outh=outh,\n outw=outw,\n spatial_scale=1,\n axes='yx',\n )\n y = y[0].array\n\n imgs = []\n for yi in y:\n # print(yi.min(), yi.max())\n imgs.append(instance_occlsegm_lib.image.colorize_heatmap(yi))\n viz = instance_occlsegm_lib.image.tile(imgs, boundary=True)\n instance_occlsegm_lib.io.imsave(osp.join(out_dir, '001.jpg'), viz)\n\n proba = y\n max_proba = proba.max(axis=0)\n viz = instance_occlsegm_lib.image.colorize_depth(max_proba)\n instance_occlsegm_lib.io.imsave(osp.join(out_dir, '002.jpg'), viz)\n bg = max_proba < 0.5\n lbl = np.argmax(proba, axis=0) + 1\n lbl[bg] = 0\n\n viz = instance_occlsegm_lib.image.label2rgb(\n lbl, img=img, label_names=class_names)\n instance_occlsegm_lib.io.imsave(osp.join(out_dir, '003.jpg'), viz)\n\n print('Write to:', out_dir)\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.zeros_like",
"sklearn.cross_validation.train_test_split"
],
[
"numpy.array"
],
[
"numpy.hstack",
"numpy.argmax",
"numpy.asarray",
"numpy.zeros"
]
] |
uchikun2493/nn_modules
|
[
"ad3486b842fc543561d39227de5daaa475d3513a"
] |
[
"samples/make_dataset.py"
] |
[
"import numpy as np\n\n# irisデータセットの読み込み\n# num_train: 学習データ数(残りはテストデータ)\n# random: ランダムに抽出するか\ndef load_iris(num_train=100, random=True):\n\n from sklearn.datasets import load_iris\n iris = load_iris()\n data = iris.data.astype(np.float32)\n label = iris.target.astype(np.int64)\n\n if random:\n perm = np.random.permutation(data.shape[0])\n a = perm[0:num_train]\n b = perm[num_train:]\n else:\n number = [i for i in range(len(data))]\n a = number[0:num_train]\n b = number[num_train:]\n\n train_data = data[a, :]\n train_teach = label[a]\n test_data = data[b, :]\n test_teach = label[b]\n\n return train_data, train_teach, test_data, test_teach\n\n"
] |
[
[
"numpy.random.permutation",
"sklearn.datasets.load_iris"
]
] |
ishine/neurst
|
[
"2ba322393fcfed4261b33f4a657e12bbe321baaa",
"2ba322393fcfed4261b33f4a657e12bbe321baaa",
"2ba322393fcfed4261b33f4a657e12bbe321baaa"
] |
[
"examples/prune_tune/src/partial_trainer.py",
"tests/neurst/layers/decoders/transformer_decoder_test.py",
"examples/prune_tune/src/mask_sequence_generator.py"
] |
[
"# Copyright 2020 ByteDance Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nimport pickle\nfrom distutils.version import LooseVersion\n\nimport tensorflow as tf\nfrom absl import logging\n\nfrom examples.prune_tune.src.partial_tuning_optimizer import create_partial_tuning_optimizer\nfrom neurst.data.dataset_utils import map_data_for_keras\nfrom neurst.data.datasets.multiple_dataset import MultipleDataset\nfrom neurst.exps import register_exp\nfrom neurst.exps.trainer import Trainer\nfrom neurst.models.model_utils import summary_model_variables\nfrom neurst.optimizers.schedules import build_lr_schedule\nfrom neurst.sparsity.pruning_optimizer import create_pruning_optimizer\nfrom neurst.training import CustomCheckpointCallback, LearningRateScheduler, MetricReductionCallback, training_utils\nfrom neurst.training.gradaccum_keras_model import GradAccumKerasModel\nfrom neurst.utils import compat\nfrom neurst.utils.flags_core import Flag\n\n\n@register_exp(\"prune_tune_train\")\nclass PruneTuneTrainer(Trainer):\n \"\"\" Trainer for all tasks. \"\"\"\n\n def __init__(self, args, **kwargs):\n \"\"\" Initializes a util class for training neural models. \"\"\"\n super(PruneTuneTrainer, self).__init__(args, **kwargs)\n if args[\"mask_pkl\"]:\n with tf.io.gfile.GFile(args[\"mask_pkl\"], 'rb') as f:\n self.load_mask = pickle.load(f)\n else:\n self.mask_dir = os.path.join(self.model_dir, \"mask.pkl\")\n self.load_mask = None\n self._partial_tuning = args[\"partial_tuning\"]\n\n @staticmethod\n def class_or_method_args():\n this_args = super(PruneTuneTrainer, PruneTuneTrainer).class_or_method_args()\n this_args.extend(\n [Flag(\"partial_tuning\", dtype=Flag.TYPE.BOOLEAN, default=False,\n help=\"Train partial weights according to mask\"),\n Flag(\"mask_pkl\", dtype=Flag.TYPE.STRING, default=None,\n help=\"The file to the masks\")])\n return this_args\n\n def run(self):\n \"\"\" Training a neural model.\n\n Step 1: Create training model\n Step 2: Restore checkpoint/pretrain model/global_step if exists.\n Step 3: Fetch training data.\n Step 5: Fetch training training.\n Step 6: TRAIN!!!\n \"\"\"\n if self._hvd_backend == \"horovod\":\n import horovod.tensorflow.keras as hvd\n elif self._hvd_backend == \"byteps\":\n import byteps.tensorflow.keras as hvd\n\n tfds = training_utils.build_datasets(compat.ModeKeys.TRAIN, self.strategy,\n self.custom_dataset, self.task)\n if isinstance(self.custom_dataset, MultipleDataset):\n _tfds = None\n for _, ds in tfds.items():\n if _tfds is None:\n _tfds = ds\n else:\n _tfds = _tfds.concatenate(ds)\n tfds = _tfds\n tfds = tfds.prefetch(tf.data.experimental.AUTOTUNE)\n # Step 1: create a model\n with training_utils.get_strategy_scope(self.strategy):\n inps = self.task.create_inputs(compat.ModeKeys.TRAIN)\n formatted_inps = self.task.example_to_input(inps, compat.ModeKeys.TRAIN)\n model_out = self.model(formatted_inps, is_training=True)\n for metric_layer in self.task.build_metric_layer():\n model_out = metric_layer([formatted_inps, model_out])\n if (LooseVersion(tf.__version__) < LooseVersion(\"2.3\")\n or LooseVersion(tf.__version__) >= LooseVersion(\"2.5\")):\n logging.info(f\"Warning: Need further check on AccumgradKerasModel when TF version={tf.__version__}. \"\n f\"Here we ignore update_cycle={self._update_cycle}, \"\n f\"clip_value={self._clip_value}, clip_norm={self._clip_norm}.\")\n keras_model = tf.keras.Model(inps, model_out)\n elif compat.IS_PREV_TF_2_4_0:\n from neurst.training.gradaccum_keras_model import TF23GradAccumKerasModel\n keras_model = TF23GradAccumKerasModel(inps, model_out,\n update_cycle=self._update_cycle,\n clip_value=self._clip_value,\n clip_norm=self._clip_norm,\n freeze_variables=self._freeze_variables)\n else:\n keras_model = GradAccumKerasModel(inps, model_out,\n update_cycle=self._update_cycle,\n clip_value=self._clip_value,\n clip_norm=self._clip_norm,\n freeze_variables=self._freeze_variables)\n\n loss = self._criterion.reduce_loss(formatted_inps, model_out)\n if compat.is_tf_tensor(loss) or isinstance(loss, (list, tuple)):\n keras_model.add_loss(loss)\n elif isinstance(loss, dict):\n for _name, _loss in loss.items():\n keras_model.add_loss(_loss)\n keras_model.add_metric(_loss, name=_name + \"_mean\", aggregation=\"mean\")\n else:\n raise ValueError(\"criterion.reduce_loss returns \"\n \"unsupported value of type: {}\".format(type(loss)))\n self._restore_ckpt_or_pretrain()\n self._lr_schedule = build_lr_schedule(self._lr_schedule_args)\n if self._pruning_schedule is not None:\n self._optimizer = create_pruning_optimizer(self._optimizer, self.model, self._pruning_schedule,\n pruning_variable_pattern=self._pruning_variable_pattern,\n nopruning_variable_pattern=self._nopruning_variable_pattern,\n keep_prune_property=True)\n if self._partial_tuning is True:\n self._optimizer = create_partial_tuning_optimizer(self._optimizer, self.model, self.load_mask)\n self._optimizer = training_utils.handle_fp16_and_distributed_optimizer(\n self._optimizer, self._lr_schedule, self._hvd_backend)\n if self._hvd_backend is None:\n keras_model.compile(self._optimizer)\n else:\n # NOTE: we already add Horovod DistributedOptimizer in `_handle_fp16_and_distributed_optimizer`.\n # Horovod: Specify `experimental_run_tf_function=False` to ensure TensorFlow\n # uses hvd.DistributedOptimizer() to compute gradients.\n keras_model.compile(self._optimizer, experimental_run_tf_function=False)\n keras_model.summary()\n summary_model_variables(self.model, self._freeze_variables)\n # initialize the checkpoint manager\n _ = compat.get_saver_or_default(self.model, self.model_dir, max_to_keep=self._checkpoints_max_to_keep)\n # build training training\n if not self._tb_log_dir:\n self._tb_log_dir = os.path.join(self.model_dir, \"train\")\n\n training_callbacks = [MetricReductionCallback(self.strategy, self._summary_steps, self._tb_log_dir,\n device=\"GPU:0\", lr_schedule=self._lr_schedule)]\n if self._hvd_backend is None or hvd.rank() == 0:\n training_callbacks.append(\n CustomCheckpointCallback(self.task.model_configs(self.model),\n save_checkpoint_steps=self._save_checkpoint_steps))\n if self._validator is not None:\n training_callbacks.append(self._validator.build(self.strategy, self.task, self.model))\n if self._hvd_backend is not None:\n # Horovod: average metrics among workers at the end of every epoch.\n #\n # Note: This callback must be in the list before the ReduceLROnPlateau,\n # TensorBoard or other metrics-based training.\n # NOTE!!! HERE we already integrate the metric averaging behaviour into the MetricReductionCallback.\n # training_callbacks.insert(0, hvd.callbacks.MetricAverageCallback(device=\"GPU:0\"))\n\n # Horovod: broadcast initial variable states from rank 0 to all other processes.\n # This is necessary to ensure consistent initialization of all workers when\n # training is started with random weights or restored from a checkpoint.\n training_callbacks.insert(0, hvd.callbacks.BroadcastGlobalVariablesCallback(0, device=\"GPU:0\"))\n if self._lr_schedule is not None:\n training_callbacks.append(LearningRateScheduler(self._lr_schedule))\n\n if self._experimental_count_batch_num:\n logging.info(\"Scanning the dataset......\")\n iterator = iter(training_utils.maybe_distribution_dataset(self.strategy, tfds))\n cnt = 0\n for _ in iterator:\n cnt += 1\n logging.info(f\"Total {cnt} batches per EPOCH.\")\n\n history = keras_model.fit(\n map_data_for_keras(tfds.repeat()),\n initial_epoch=0,\n epochs=1,\n steps_per_epoch=self._train_steps, # * args[\"update_cycle\"],\n verbose=2,\n callbacks=training_callbacks)\n logging.info(history.history)\n\n if self._pruning_schedule is not None:\n mask = [\n tf.Variable(\n (tf.cast(tf.math.not_equal(weight, 0.), weight.dtype.base_dtype)),\n dtype=weight.dtype.base_dtype,\n trainable=False,\n synchronization=tf.VariableSynchronization.ON_READ,\n aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA) for weight in keras_model.trainable_weights\n ]\n # np.save(self.mask_dir, mask)\n with open(self.mask_dir, 'wb') as f:\n pickle.dump(mask, f)\n\n if self._partial_tuning is True:\n mask = self.load_mask\n # np.save(self.mask_dir, mask)\n saved_mask_dir = os.path.join(self.model_dir, \"mask.pkl\")\n with open(saved_mask_dir, 'wb') as f:\n pickle.dump(mask, f)\n",
"# Copyright 2020 ByteDance Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport numpy\nimport tensorflow as tf\n\nfrom neurst.layers.decoders.transformer_decoder import TransformerDecoder\n\n\ndef test_transformer_decoder():\n dmodel = 4\n batch_size = 2\n num_layers = 1\n num_self_attention_heads = 2\n hidden_size = dmodel\n filter_size = 16\n self_attention_dropout_rate = 0.1\n ffn_dropout_rate = 0.1\n layer_postprocess_dropout_rate = 0.1\n # max_len = 4\n # max_decoder_len = 3\n\n decoder = TransformerDecoder(\n num_layers=num_layers,\n num_attention_heads=num_self_attention_heads,\n hidden_size=hidden_size,\n filter_size=filter_size,\n attention_dropout_rate=self_attention_dropout_rate,\n ffn_dropout_rate=ffn_dropout_rate,\n layer_postprocess_dropout_rate=layer_postprocess_dropout_rate)\n encoder_outputs = tf.convert_to_tensor(\n [[[-0.37282175, 0.62301564, -2.0221813, -0.00875833],\n [0.31516594, -1.117763, -1.0697726, 0.80373234],\n [-0.717022, 0.3300997, -0.44306225, 1.550383],\n [-1.5516962, 0.6025011, 1.8262954, 0.42469704]],\n\n [[-0.98617625, 2.2856202, -1.3063533, 0.4174998],\n [1.5724765, 1.2201295, 1.1479746, 0.7810888],\n [0.8343642, -1.073388, 1.2718492, -0.7290778],\n [-1.4126722, 1.8000795, -2.118672, -0.1366007]]], dtype=tf.float32)\n encoder_inputs_padding = tf.convert_to_tensor(\n [[0, 0, 0, 0], [0, 0, 1., 1.]], dtype=tf.float32)\n decoder_inputs = tf.convert_to_tensor(\n [[[8.6675537e-01, 2.2135425e-01, 1.4054185e+00, -4.2268831e-01],\n [1.9606155e+00, -1.8318410e+00, -1.8158482e+00, -3.7030798e-01],\n [-1.1357157e-03, 5.5629879e-01, 6.6107117e-02, -1.7330967e+00]],\n\n [[-1.1870812e+00, -5.4499257e-01, -8.6622888e-01, -7.4098641e-01],\n [2.2233427e-01, 5.3582352e-01, 3.0567116e-01, 1.0201423e-01],\n [-1.8053315e+00, 7.2125041e-01, 1.0072237e+00, -2.0333264e+00]]], dtype=tf.float32)\n # test for training\n cache = decoder.create_decoding_internal_cache(\n encoder_outputs, encoder_inputs_padding, is_inference=False)\n _ = decoder(decoder_inputs, cache, is_training=False)\n for w in decoder.trainable_weights:\n if \"layer_0/self_attention_prepost_wrapper/self_attention/output_transform/kernel\" in w.name:\n tf.compat.v1.assign(w, tf.convert_to_tensor(\n [[0.39332086, -0.3676856, -0.50203305, 0.6782059],\n [-0.41239128, -0.15406412, 0.3964849, -0.79016757],\n [0.6749844, -0.09548753, 0.16253561, -0.0560202],\n [-0.4699119, 0.82842, 0.35657936, -0.45770356]],\n dtype=tf.float32))\n elif \"layer_0/self_attention_prepost_wrapper/self_attention/qkv_transform/kernel\" in w.name:\n tf.compat.v1.assign(w, tf.convert_to_tensor(\n [[0.03949255, 0.32946128, 0.38817757, 0.47047406, 0.07609951,\n 0.03131855, 0.15958023, 0.3292094, 0.42809182, 0.27969742,\n 0.39156157, -0.604576],\n [0.4869359, -0.590637, 0.3092571, 0.10321742, 0.45608515,\n 0.27015948, 0.2959339, 0.32079375, 0.480197, -0.35878542,\n 0.04467481, 0.467416],\n [-0.40064478, -0.05089319, -0.0999378, -0.6048573, 0.4379304,\n 0.3692366, 0.39103013, 0.24920046, -0.37060317, -0.03119427,\n 0.25101495, -0.21076846],\n [0.42842942, 0.48276085, -0.2498649, -0.0978691, -0.01024461,\n -0.04072392, -0.43499938, -0.09718102, 0.18174142, 0.07100755,\n -0.6075252, -0.3018506]],\n dtype=tf.float32))\n elif \"layer_0/encdec_attention_prepost_wrapper/encdec_attention/output_transform/kernel\" in w.name:\n tf.compat.v1.assign(w, tf.convert_to_tensor(\n [[-0.31871676, 0.46451026, -0.32600254, -0.42110354],\n [0.45953768, -0.52176374, -0.47615638, -0.7818449],\n [0.7724063, -0.25975162, -0.49630436, 0.4681155],\n [0.7189149, 0.25591546, 0.2100411, -0.3439259]],\n dtype=tf.float32))\n elif \"layer_0/encdec_attention_prepost_wrapper/encdec_attention/q_transform/kernel\" in w.name:\n tf.compat.v1.assign(w, tf.convert_to_tensor(\n [[0.27346164, -0.12056953, 0.4617111, 0.3126462],\n [-0.65311253, 0.24505383, 0.56249744, -0.5582411],\n [-0.47464705, -0.60553044, 0.3019113, 0.33609575],\n [-0.24644238, -0.16026068, -0.0945828, -0.05111927]],\n dtype=tf.float32))\n elif \"layer_0/encdec_attention_prepost_wrapper/encdec_attention/kv_transform/kernel\" in w.name:\n tf.compat.v1.assign(w, tf.convert_to_tensor(\n [[-0.4204824, -0.23150605, 0.12045383, -0.6538836, 0.29070246,\n -0.38376695, 0.65055054, -0.51375425],\n [0.67025226, 0.0928542, -0.56662744, 0.12781924, -0.6193744,\n -0.61801594, 0.07964879, 0.16530299],\n [-0.06940353, -0.08732289, 0.24984497, 0.18489975, 0.5354368,\n -0.07608587, -0.5801205, -0.17658263],\n [0.54784423, -0.39817223, -0.11673075, 0.14106786, -0.1637184,\n 0.00750518, -0.44365695, -0.38458544]],\n dtype=tf.float32))\n elif \"layer_0/ffn_prepost_wrapper/ffn/dense1/kernel\" in w.name:\n tf.compat.v1.assign(w, tf.convert_to_tensor(\n [[-2.9522404e-01, -1.1858380e-01, 1.3743329e-01, -3.3782017e-01,\n -3.8876867e-01, 4.8396683e-01, 1.5062505e-01, -3.7749952e-01,\n -2.9512924e-01, -1.6212821e-02, -1.8608570e-04, -4.1960135e-01,\n 5.3800035e-01, 2.7734953e-01, 5.5179596e-03, -3.4055352e-02],\n [2.1051055e-01, 3.6151302e-01, 3.1045640e-01, -1.1510965e-01,\n 4.6738219e-01, 1.2504590e-01, -1.9454169e-01, 4.1786206e-01,\n -3.7045652e-01, 3.3854598e-01, -5.0978750e-01, 5.2220762e-01,\n 1.6077441e-01, -3.9631999e-01, 2.1259248e-01, 2.3286474e-01],\n [-1.0005751e-01, -5.0858349e-01, 3.6911082e-01, -5.1783592e-02,\n 7.1038425e-02, -1.1148521e-01, -5.3392905e-01, 3.6009926e-01,\n 7.9382658e-02, 1.0371411e-01, -5.0254786e-01, 1.7596281e-01,\n -9.2926025e-03, -6.4194202e-04, -1.4125884e-02, 4.7321141e-01],\n [2.8647327e-01, 2.6127762e-01, 4.5843053e-01, 4.9775457e-01,\n 3.8056010e-01, -4.0995055e-01, 3.6980593e-01, 3.3520699e-02,\n -1.8056035e-03, 1.6578972e-02, 1.6026449e-01, -2.4952739e-01,\n -3.1434530e-01, -1.3158950e-01, 7.9998970e-03, 1.1293548e-01]],\n dtype=tf.float32))\n elif \"layer_0/ffn_prepost_wrapper/ffn/dense2/kernel\" in w.name:\n tf.compat.v1.assign(w, tf.convert_to_tensor(\n [[0.2794218, 0.29263318, 0.42604703, -0.24461824],\n [0.32469118, -0.2654639, 0.17872995, 0.06222689],\n [-0.07604656, -0.29360557, -0.462821, 0.3731665],\n [0.27989155, 0.53663385, -0.12042063, 0.34913152],\n [-0.50028926, 0.08958912, 0.50753117, -0.03860039],\n [0.12980306, -0.47548878, 0.5443562, -0.41777247],\n [0.16824102, -0.5271052, -0.18454444, 0.2987221],\n [0.22610295, -0.3761598, 0.4983195, 0.31664205],\n [-0.36606842, -0.3778124, 0.01393354, 0.23516071],\n [0.26510388, -0.47218412, 0.42749757, 0.22174352],\n [0.4139307, 0.09682184, -0.1447433, -0.07231569],\n [0.01711905, -0.18132755, 0.03224993, 0.2071482],\n [0.12195373, -0.52764714, 0.48840046, -0.21843264],\n [0.12467605, -0.45452338, 0.05892056, -0.2852741],\n [-0.5464495, -0.4856094, -0.29271287, 0.10828984],\n [0.37080926, 0.01543814, 0.10875225, -0.2678996]],\n dtype=tf.float32))\n\n assert numpy.sum((decoder(decoder_inputs, cache, is_training=False).numpy()\n - numpy.array([[[0.4727962, -0.6863654, 1.387909, -1.1743398],\n [1.4770155, -1.2802002, 0.18456227, -0.38137752],\n [0.6776164, -0.4934968, 1.1886327, -1.3727522]],\n [[-1.6973993, 0.26954588, 0.59817475, 0.82967865],\n [-1.6315649, -0.0030859, 0.7861572, 0.8484935],\n [-1.4942819, 0.42606276, 1.246516, -0.17829692]]])) ** 2) < 1e-9\n\n # for inference\n cache = decoder.create_decoding_internal_cache(\n encoder_outputs, encoder_inputs_padding, is_inference=True)\n decoder_inputs = tf.convert_to_tensor(\n [[1.9606155e+00, -1.8318410e+00, -1.8158482e+00, -3.7030798e-01],\n [-1.1357157e-03, 5.5629879e-01, 6.6107117e-02, -1.7330967e+00]], dtype=tf.float32)\n assert numpy.sum(\n (decoder(decoder_inputs, cache, is_training=False).numpy()\n - numpy.array([[1.4581295, -1.3640043, -0.1138487, 0.01972346],\n [-0.06228875, -1.0514979, 1.6223053, -0.5085185]])) ** 2) < 1e-9\n assert numpy.sum(\n (cache[\"decoding_states\"][\"layer_0\"][\"self_attention\"][\"keys\"].numpy()\n - numpy.array(numpy.reshape([[[-0.63596207, -0.49432975, -0.36614707, 0.03477353]],\n [[0.6539597, 0.4846998, 1.2206339, 0.67560077]]],\n [batch_size, 1, num_self_attention_heads,\n hidden_size // num_self_attention_heads]))) ** 2) < 1e-9\n assert numpy.sum(\n (cache[\"decoding_states\"][\"layer_0\"][\"self_attention\"][\"values\"].numpy()\n - numpy.array(numpy.reshape([[[0.6045396, 0.78576076, 0.3205938, -1.2158906]],\n [[0.14660448, -0.38737938, 1.2869109, 0.6795136]]],\n [batch_size, 1, num_self_attention_heads,\n hidden_size // num_self_attention_heads]))) ** 2) < 1e-9\n\n\nif __name__ == \"__main__\":\n test_transformer_decoder()\n",
"# Copyright 2020 ByteDance Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport pickle\n\nimport tensorflow as tf\nimport tensorflow.keras.backend as K\nfrom absl import logging\n\nfrom neurst.exps import register_exp\nfrom neurst.exps.sequence_generator import SequenceGenerator\nfrom neurst.utils import compat\nfrom neurst.utils.flags_core import Flag\n\n\n@register_exp([\"mask_predict\", \"mask_generation\"])\nclass MaskSequenceGenerator(SequenceGenerator):\n \"\"\" Entry for sequence generation. \"\"\"\n\n def __init__(self, args, **kwargs):\n \"\"\" Initializes a util class for sequence generation. \"\"\"\n self._loaded_mask = None\n if args[\"mask_pkl\"]:\n logging.info(f\"Loading mask from {args['mask_pkl']}\")\n with tf.io.gfile.GFile(args[\"mask_pkl\"], 'rb') as f:\n self._loaded_mask = pickle.load(f)\n super(MaskSequenceGenerator, self).__init__(args, **kwargs)\n\n @staticmethod\n def class_or_method_args():\n this_flags = super(MaskSequenceGenerator, MaskSequenceGenerator).class_or_method_args()\n this_flags.append(Flag(\"mask_pkl\", dtype=Flag.TYPE.STRING, default=None,\n help=\"The path to the mask pkl file.\"), )\n return this_flags\n\n @staticmethod\n def build_generation_model(task, model, search_layer, output_sequence_only=True):\n \"\"\" Build keras model for generation.\n\n Args:\n task: The task object.\n model: An instance of neurst.models.model.BaseModel\n search_layer: A sequence search object.\n output_sequence_only: Only generated sequences will output if True.\n\n Returns: the generation model.\n \"\"\"\n if search_layer is None:\n raise ValueError(\n \"The parameters for generation method must be provided: \"\n \"search_method, search_method.params, ...\")\n inps = task.create_inputs(compat.ModeKeys.INFER)\n formatted_inps = task.example_to_input(inps, compat.ModeKeys.INFER)\n search_layer.set_model(model)\n generation_ops = search_layer(formatted_inps)\n if output_sequence_only:\n generation_ops = generation_ops[0]\n keras_model = tf.keras.Model(inps, generation_ops)\n return keras_model\n\n def apply_mask(self, model, masks):\n tuples = []\n for (weight, mask) in list(zip(model.trainable_weights, masks)):\n masked_weight = weight * tf.cast(mask, weight.dtype.base_dtype)\n tuples.append((weight, masked_weight))\n\n K.batch_set_value(tuples)\n\n def _build_and_restore_model(self):\n \"\"\" Build a single model or ensemble model. \"\"\"\n model = super(MaskSequenceGenerator, self)._build_and_restore_model()\n if self._loaded_mask is not None:\n self.apply_mask(model, self._loaded_mask)\n return model\n"
] |
[
[
"tensorflow.math.not_equal",
"tensorflow.io.gfile.GFile",
"tensorflow.keras.Model"
],
[
"numpy.array",
"tensorflow.convert_to_tensor",
"numpy.reshape"
],
[
"tensorflow.keras.backend.batch_set_value",
"tensorflow.keras.Model",
"tensorflow.cast",
"tensorflow.io.gfile.GFile"
]
] |
mrotke/pyStock
|
[
"76aad7c8bdd112d3a53ed013cbe9ff660a90d5bf"
] |
[
"lib/moneyflowindex.py"
] |
[
"# Add import from parent directory possible\nimport sys\nimport pandas as pd\nimport numpy\nimport matplotlib.pyplot as plt\nfrom lib.DataOperations import *\nfrom lib.ReportSignals import *\nfrom lib.Stock import *\nfrom lib.indicator import indicator\n\n# Creates MoneyFlowIndex object\n\n\ndef CreateMoneyFlowIndex(high, low, close, volume, info, n=14):\n return MoneyFlowIndex(high, low, close, volume, info, n)\n\n\n# MoneyFlowIndex object which creates MoneyFlowIndex data\nclass MoneyFlowIndex(indicator):\n\n def __init__(self, high, low, close, volume, info, n=14):\n indicator.__init__(self, 'MFI%u' % n, 'momentum')\n self.n = n\n self.info = info\n self.typicalPrice = (high + low + close) / 3\n self.moneyFlow, self.posFlow, self.negFlow, self.mfi = self.InitMoneyFlow(\n self.typicalPrice, volume, n)\n # money on the market plot\n self.moneyMarket = self.moneyFlow.cumsum()\n\n # Signals\n fromBottom, fromTop = FindIntersections(self.mfi, 20)\n self.buy = fromBottom\n fromBottom, fromTop = FindIntersections(self.mfi, 80)\n self.sell = fromTop\n # TrenToFall / TrendToRise\n fromBottom, fromTop = FindIntersections(self.mfi, 10)\n self.buyStrong = fromBottom\n fromBottom, fromTop = FindIntersections(self.mfi, 90)\n self.sellStrong = fromTop\n\n # returns AverageTrueRange\n def GetMoneyFlow(self):\n return self.MoneyFlow\n\n # Set MoneyFlowIndex indicator\n def InitMoneyFlow(self, tp, volume, n):\n moneyFlow = tp * volume\n posFlow = pd.Series()\n negFlow = pd.Series()\n\n for i in range(1, len(moneyFlow)):\n if (moneyFlow[i] >= 0):\n posFlow = posFlow.append(\n pd.Series(moneyFlow.values[i], index=[moneyFlow.index[i]]))\n negFlow = negFlow.append(\n pd.Series(0, index=[moneyFlow.index[i]]))\n else:\n posFlow = posFlow.append(\n pd.Series(0, index=[moneyFlow.index[i]]))\n negFlow = negFlow.append(\n pd.Series(abs(moneyFlow.values[i]), index=[moneyFlow.index[i]]))\n\n posFlowAvg = CreateMovingAverage(posFlow, n)\n negFlowAvg = CreateMovingAverage(negFlow, n)\n moneyRatio = posFlowAvg / negFlowAvg\n moneyFlowIndex = (100 * posFlowAvg) / (posFlowAvg + negFlowAvg)\n return moneyFlow, posFlow, negFlow, moneyFlowIndex\n\n # Export indicator signals to report\n def ExportSignals(self, reportSignals):\n reportSignals.AddDataframeSignals(self.buy, 'MFI', 'buy')\n reportSignals.AddDataframeSignals(self.sell, 'MFI', 'sell')\n reportSignals.AddDataframeSignals(self.buyStrong, 'MFI', 'buyStrong')\n reportSignals.AddDataframeSignals(self.sellStrong, 'MFI', 'sellStrong')\n\n # retunrs -100...100 value\n def GetUnifiedValue(self):\n return (self.mfi[-1] - 50)*2\n\n # Plot method\n def PlotPosNegFlow(self):\n plt.bar(self.negFlow.index, self.negFlow, color='red', label='')\n plt.bar(self.posFlow.index, self.posFlow, color='green', label='')\n # MoneyFlowIndex\n# plt.plot(self.posFlow.index, self.posFlow, label='PosFlow' + str(self.n), linewidth=1.0, color = 'green')\n# plt.plot(self.negFlow.index, self.negFlow, label='NegFlow' + str(self.n), linewidth=1.0, color = 'red')\n\n # Plot method\n\n def Plot(self):\n # MoneyFlowIndex\n plt.plot(self.mfi.index, self.mfi, label='MFI'\n + str(self.n), linewidth=1.0, color='#000000')\n x_axis = self.mfi.index.get_level_values(0)\n\n # OverBought\n overBought = CreateHorizontalLine(self.mfi.index, 80, 80, True)\n plt.plot(overBought.index, overBought, '--',\n label='Overbought', linewidth=1.0, color='#940006')\n# plt.fill_between(x_axis, self.mfi, overBought['value'],\n# where=self.mfi>overBought.values,color='#ffb3b3')\n # OverBought - Gene Quong and Avrum Soudack\n overBought = CreateHorizontalLine(self.mfi.index, 90, 90)\n plt.plot(overBought.index, overBought, '--',\n linewidth=0.6, color='#940006')\n\n # OverSold\n overSold = CreateHorizontalLine(self.mfi.index, 20, 20, True)\n plt.plot(overSold.index, overSold, '--',\n label='Oversold', linewidth=1.0, color='#169400')\n# plt.fill_between(x_axis, self.mfi, overSold['value'],\n# where=self.mfi<overSold.values,color='#b3ffb3')\n # OverSold - Gene Quong and Avrum Soudack\n overSold = CreateHorizontalLine(self.mfi.index, 10, 10)\n plt.plot(overSold.index, overSold, '--',\n linewidth=0.6, color='#169400')\n\n# # Signals plottting\n if (self.buy is not None and self.buy.size):\n plt.plot(self.buy.index, self.buy, 'o', color='#000000', ms=8)\n plt.plot(self.buy.index, self.buy, 'o',\n label='Buy', color='#00FF00')\n if (self.buyStrong is not None and self.buyStrong.size):\n plt.plot(self.buyStrong.index, self.buyStrong,\n 's', color='#000000', ms=8)\n plt.plot(self.buyStrong.index, self.buyStrong,\n 's', label='BuyStrong', color='#00FF00')\n if (self.sell is not None and self.sell.size):\n plt.plot(self.sell.index, self.sell, 'o', color='#000000', ms=8)\n plt.plot(self.sell.index, self.sell, 'o',\n label='Sell', color='#FF0000')\n if (self.sellStrong is not None and self.sellStrong.size):\n plt.plot(self.sellStrong.index, self.sellStrong,\n 's', color='#000000', ms=8)\n plt.plot(self.sellStrong.index, self.sellStrong,\n 's', label='SellStrong', color='#FF0000')\n\n # Limits of plot\n plt.ylim(top=100, bottom=0)\n"
] |
[
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.bar",
"pandas.Series"
]
] |
busySZl/pyfiberamp
|
[
"e6ddb34413e145cd662f7f0f23290bd872871978"
] |
[
"pyfiberamp/spectroscopies/spectroscopy.py"
] |
[
"from pyfiberamp.helper_funcs import *\n\nfrom scipy.interpolate import UnivariateSpline\nimport matplotlib.pyplot as plt\n\n\nclass Spectroscopy:\n @classmethod\n def from_files(cls, absorption_cross_section_file, emission_cross_section_file, upper_state_lifetime):\n absorption_spectrum = load_spectrum(absorption_cross_section_file)\n gain_spectrum = load_spectrum(emission_cross_section_file)\n return cls(absorption_spectrum, gain_spectrum, upper_state_lifetime)\n\n def __init__(self, absorption_cross_sections, emission_cross_sections, upper_state_lifetime):\n self.absorption_cs_spectrum = absorption_cross_sections\n self.emission_cs_spectrum = emission_cross_sections\n self.absorption_cs_interp = self._make_cross_section_interpolate(absorption_cross_sections)\n self.gain_cs_interp = self._make_cross_section_interpolate(emission_cross_sections)\n self.upper_state_lifetime = upper_state_lifetime\n\n @staticmethod\n def _make_cross_section_interpolate(spectrum):\n \"\"\"Creates a cubic spline interpolate from the imported cross section data. Cross section is assumed to be\n zero outside the imported data range.\"\"\"\n frequency = wl_to_freq(spectrum[::-1, 0])\n cross_section = spectrum[::-1, 1]\n spline = UnivariateSpline(frequency, cross_section, s=CROSS_SECTION_SMOOTHING_FACTOR, ext='zeros')\n\n def interp(freq):\n cross_sec = spline(freq)\n cross_sec[cross_sec < 0] = 0\n return cross_sec\n\n return interp\n\n def plot_gain_and_absorption_spectrum(self):\n \"\"\"Convenience plotting function to draw the imported cross section data and the calculated interpolates to\n check that they match.\"\"\"\n fig, ax = plt.subplots()\n gain = self.emission_cs_spectrum\n absorption = self.absorption_cs_spectrum\n gain_wls = np.linspace(gain[0, 0], gain[-1, 0], SPECTRUM_PLOT_NPOINTS)\n gain_vs = wl_to_freq(gain_wls)\n absorption_wls = np.linspace(absorption[0, 0], absorption[-1, 0], SPECTRUM_PLOT_NPOINTS)\n absorption_vs = wl_to_freq(absorption_wls)\n ax.plot(gain[:, 0] * 1e9, gain[:, 1], label='Gain')\n ax.plot(absorption[:, 0] * 1e9, absorption[:, 1], label='Absorption')\n ax.plot(absorption_wls * 1e9, self.absorption_cs_interp(absorption_vs), label='Absorption spline')\n ax.plot(gain_wls * 1e9, self.gain_cs_interp(gain_vs), label='Gain spline')\n ax.legend()\n ax.set_xlabel('Wavelength (nm)', fontsize=18)\n ax.set_ylabel('Gain/Absorption cross sections', fontsize=18)\n plt.show()\n\n\nYbGermanoSilicate= Spectroscopy.from_files(YB_ABSORPTION_CS_FILE, YB_EMISSION_CS_FILE, YB_UPPER_STATE_LIFETIME)\n"
] |
[
[
"scipy.interpolate.UnivariateSpline",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
]
] |
aws-samples/amazon-sagemaker-local-mode
|
[
"f470d7b543f7895094816c3f58b9981e044764d8",
"f470d7b543f7895094816c3f58b9981e044764d8"
] |
[
"scikit_learn_script_mode_local_serving_no_model_artifact/code/inference.py",
"lightgbm_bring_your_own_container_local_training_and_serving/lightgbm_bring_your_own_container_local_training_and_serving.py"
] |
[
"import logging\nimport sys\nimport numpy as np\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nlogger.addHandler(logging.StreamHandler(sys.stdout))\n\n\n# Perform prediction on the deserialized object, with the loaded model\ndef predict_fn(input_object, model):\n logger.info(\"predict_fn\")\n logger.info(f\"input_object: {input_object}\")\n\n response = np.average(input_object)\n logger.info(f\"returning response: {response}\")\n\n return response\n\n# Dummy model_fn function\ndef model_fn(model_dir):\n dummy_model = {}\n return dummy_model",
"# This is a sample Python program that trains a simple LightGBM Regression model, and then performs inference.\n# This implementation will work on your local computer.\n#\n# Prerequisites:\n# 1. Install required Python packages:\n# pip install boto3 sagemaker pandas scikit-learn\n# pip install 'sagemaker[local]'\n# 2. Docker Desktop has to be installed on your computer, and running.\n# 3. Open terminal and run the following commands:\n# docker build -t sagemaker-lightgbm-regression-local container/.\n########################################################################################################################\n\nimport pandas as pd\nfrom sagemaker.estimator import Estimator\nfrom sagemaker.local import LocalSession\nfrom sagemaker.predictor import csv_serializer\nfrom sklearn.datasets import load_boston\nfrom sklearn.model_selection import train_test_split\n\nsagemaker_session = LocalSession()\nsagemaker_session.config = {'local': {'local_code': True}}\n\n# For local training a dummy role will be sufficient\nrole = 'arn:aws:iam::111111111111:role/service-role/AmazonSageMaker-ExecutionRole-20200101T000001'\n\ndata = load_boston()\n\nX_train, X_test, y_train, y_test = train_test_split(data.data, data.target, test_size=0.25, random_state=45)\nX_val, X_test, y_val, y_test = train_test_split(X_test, y_test, test_size=0.5, random_state=45)\n\ntrainX = pd.DataFrame(X_train, columns=data.feature_names)\ntrainX['target'] = y_train\n\nvalX = pd.DataFrame(X_test, columns=data.feature_names)\nvalX['target'] = y_test\n\ntestX = pd.DataFrame(X_test, columns=data.feature_names)\n\nlocal_train = './data/train/boston_train.csv'\nlocal_validation = './data/validation/boston_validation.csv'\nlocal_test = './data/test/boston_test.csv'\n\ntrainX.to_csv(local_train, header=None, index=False)\nvalX.to_csv(local_validation, header=None, index=False)\ntestX.to_csv(local_test, header=None, index=False)\n\nimage = 'sagemaker-lightgbm-regression-local'\n\nlocal_lightgbm = Estimator(\n image,\n role,\n instance_count=1,\n instance_type=\"local\",\n hyperparameters={'boosting_type': 'gbdt',\n 'objective': 'regression',\n 'num_leaves': 31,\n 'learning_rate': 0.05,\n 'feature_fraction': 0.9,\n 'bagging_fraction': 0.8,\n 'bagging_freq': 5,\n 'verbose': 0})\n\ntrain_location = 'file://'+local_train\nvalidation_location = 'file://'+local_validation\nlocal_lightgbm.fit({'train':train_location, 'validation': validation_location}, logs=True)\n\npredictor = local_lightgbm.deploy(1, 'local', serializer=csv_serializer)\n\nwith open(local_test, 'r') as f:\n payload = f.read().strip()\n\npredicted = predictor.predict(payload).decode('utf-8')\nprint(predicted)\n\npredictor.delete_endpoint(predictor.endpoint)\n"
] |
[
[
"numpy.average"
],
[
"sklearn.model_selection.train_test_split",
"pandas.DataFrame",
"sklearn.datasets.load_boston"
]
] |
tomstark99/play-fair
|
[
"5b4ad20ebb96d1162f3bd696aba0a6b57006ab0a"
] |
[
"src/models/components/consensus.py"
] |
[
"import torch.nn\nfrom torch import nn\n\n\nclass SegmentConsensus(torch.nn.Module):\n def __init__(self, consensus_type, dim=1):\n super().__init__()\n self.consensus_type = consensus_type\n self.dim = dim\n\n def forward(self, input_tensor):\n if self.consensus_type == \"avg\":\n output = input_tensor.mean(dim=self.dim, keepdim=True)\n elif self.consensus_type == \"identity\":\n output = input_tensor\n else:\n raise NotImplementedError(\"Only avg and identity consensus implemented\")\n return output\n\n\nclass AverageConsensus(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Args:\n x: tensor of shape :math:`(N, T, C)`\n\n Returns:\n Input tensor averaged over the time dimension of shape :math:`(N, C)`\n \"\"\"\n assert x.dim() == 3\n return x.mean(dim=1)\n\n\nclass ClassifierConsensus(nn.Module):\n def __init__(self, input_dim: int, output_dim: int, input_relu: bool = True,\n dropout: float = 0):\n super().__init__()\n self.classifier = nn.Linear(input_dim, output_dim)\n self.relu = nn.ReLU() if input_relu else None\n self.dropout = nn.Dropout(dropout)\n self.consensus = AverageConsensus()\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n if self.relu is not None:\n x = self.relu(x)\n x = self.dropout(x)\n x = self.classifier(x)\n return self.consensus(x)\n\n\n"
] |
[
[
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.ReLU"
]
] |
sebamenabar/oc-fewshot-public
|
[
"eb12bd5b426518fd8353304f0760f5c24f1b3c12",
"eb12bd5b426518fd8353304f0760f5c24f1b3c12",
"eb12bd5b426518fd8353304f0760f5c24f1b3c12",
"2dad8c9f24cb1bfe72d8b13b33d28f6788d86ca8"
] |
[
"fewshot/experiments/metrics.py",
"fewshot/models/modules/online_imp_memory.py",
"fewshot/data/iterators/semisupervised_episode_iterator_tests.py",
"fewshot/models/modules/gru.py"
] |
[
"\"\"\"Metrics.\n\nAuthor: Mengye Ren (mren@cs.toronto.edu)\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport numpy as np\nimport sklearn.metrics\n\n\ndef label_equal(pred, label, axis=-1):\n return pred == label.astype(pred.dtype)\n\n\ndef top1_correct(pred, label, axis=-1):\n \"\"\"Calculates top 1 correctness.\"\"\"\n assert pred.shape[0] == label.shape[0], '{} != {}'.format(\n pred.shape[0], label.shape[0])\n pred_idx = np.argmax(pred, axis=axis)\n return pred_idx == label.astype(pred_idx.dtype)\n\n\ndef top1_acc(pred, label, axis=-1):\n \"\"\"Calculates top 1 accuracy.\"\"\"\n return top1_correct(pred, label, axis=axis).mean()\n\n\ndef topk_acc(pred, label, k, axis=-1):\n \"\"\"Calculates top 5 accuracy.\"\"\"\n assert pred.shape[0] == label.shape[0], '{} != {}'.format(\n pred.shape[0], label.shape[0])\n topk_choices = np.argsort(pred, axis=axis)\n if len(topk_choices.shape) == 2:\n topk_choices = topk_choices[:, ::-1][:, :k]\n else:\n raise NotImplementedError()\n return np.sum(topk_choices == np.expand_dims(label, axis), axis=axis).mean()\n\n\ndef stderr(array, axis=0):\n \"\"\"Calculates standard error.\"\"\"\n if len(array) > 0:\n return array.std(axis=axis) / np.sqrt(float(array.shape[0]))\n else:\n return 0.0\n\n\ndef mean(array, axis=0):\n \"\"\"Calculates standard error.\"\"\"\n return array.mean(axis=axis) if len(array) > 0 else 0.0\n\n\ndef calc_ap(results_list, verbose=True):\n unk_id = results_list[0]['pred'].shape[-1] - 1\n y_gt_list = []\n y_full_list = []\n pred_list = []\n score_list = []\n cat_list = []\n\n for r in results_list:\n flag = r['flag'].astype(np.bool)\n flag_ = np.expand_dims(flag, -1)\n gt_ = r['y_gt'][flag]\n pred_ = np.argmax(r['pred'][:, :, :-1], axis=-1)[flag]\n score_ = r['pred'][:, :, -1][flag] # Unknown score.\n\n y_gt_list.append(gt_)\n pred_list.append(pred_)\n score_list.append(score_) # Category agnostic\n\n if verbose:\n print('y_gt', y_gt_list[-1], y_gt_list[-1].shape)\n print('pred', pred_list[-1], pred_list[-1].shape)\n y_gt = np.concatenate(y_gt_list)\n score = np.concatenate(score_list)\n y_pred = np.concatenate(pred_list)\n\n N = len(y_gt)\n sortidx = np.argsort(score)\n score = score[sortidx]\n y_gt = y_gt[sortidx]\n y_pred = y_pred[sortidx]\n\n tp = (y_gt == y_pred).astype(np.float64)\n pos = (y_gt < unk_id).astype(np.float64)\n npos = pos.sum()\n\n if verbose:\n print('score sorted', score)\n print('y_gt', y_gt)\n print('y_pred', y_pred)\n print('tp', tp)\n print('unk id', unk_id)\n\n recall = np.zeros([N], dtype=np.float64)\n tp_cumsum = np.cumsum(tp)\n if verbose:\n print('npos', npos)\n print('tp cumsum', tp_cumsum)\n precision = tp_cumsum / np.arange(1, N + 1).astype(np.float64)\n recall = tp_cumsum / npos\n precision = np.concatenate([[1.0], precision])\n recall = np.concatenate([[0.0], recall])\n ap = sklearn.metrics.auc(recall, precision)\n if verbose:\n print('precision', precision)\n print('recall', recall)\n print('ap', ap)\n return ap\n\n\ndef calc_interval(y_gt, y=None):\n if y is None:\n y = y_gt\n B = y_gt.shape[0]\n # Last time we have seen a class.\n last_seen = np.zeros([B, y_gt.max() + 1]) - 1\n ninterval = np.zeros(y.shape, dtype=np.int64)\n for i in range(y.shape[1]):\n last_seen_ = last_seen[np.arange(B), y_gt[:, i]]\n ninterval[:, i] = i - last_seen_\n last_seen[np.arange(B), y_gt[:, i]] = i\n return ninterval\n\n\ndef calc_nshot(y_gt, y=None):\n if y is None:\n y = y_gt\n nway = np.max(y_gt)\n B, T = y_gt.shape\n waylist = np.arange(nway + 1)\n onehot_bool = np.expand_dims(y_gt, -1) == waylist\n onehot_bool_y = np.expand_dims(y, -1) == waylist\n onehot = (onehot_bool).astype(np.int64) # [B, T, K]\n onehot_cumsum = np.cumsum(onehot, axis=1)\n nshot = onehot_cumsum[onehot_bool_y].reshape([B, T]) - (y_gt == y).astype(\n np.int64)\n return nshot\n\n\ndef calc_nshot_ap(results_list, nshot_max):\n unk_id = results_list[0]['pred'].shape[-1] - 1\n nshot_list = [calc_nshot(r['y_full']) for r in results_list]\n ap_list = [0.0] * nshot_max\n\n for n in range(1, nshot_max + 1):\n sel_list = [s == n for s in nshot_list]\n y_gt_list = [r['y_gt'][s][None, :] for s, r in zip(sel_list, results_list)]\n pred_list = [\n r['pred'][s][None, :, :] for s, r in zip(sel_list, results_list)\n ]\n flag_list = [r['flag'][s][None, :] for s, r in zip(sel_list, results_list)]\n subresults = [{\n 'y_gt': y,\n 'pred': p,\n 'flag': f\n } for y, p, f in zip(y_gt_list, pred_list, flag_list)]\n ap_list[n - 1] = calc_ap(subresults, verbose=False)\n return np.array(ap_list)\n\n\ndef calc_acc(results_list):\n unk_id = results_list[0]['pred'].shape[-1] - 1\n y_gt_list = []\n pred_list = []\n acc_list = []\n\n for r in results_list:\n flag = r['flag'].astype(np.bool)\n y_gt_list.append(r['y_gt'][flag])\n flag_ = np.expand_dims(flag, -1)\n pred_list.append(np.argmax(r['pred'][:, :, :-1], axis=-1)[flag])\n if len(y_gt_list[-1]) > 0:\n acc_list.append(\n np.mean((y_gt_list[-1] == pred_list[-1]).astype(np.float64)))\n y_gt = np.concatenate(y_gt_list)\n y_pred = np.concatenate(pred_list)\n correct = (y_pred == y_gt).astype(np.float64)\n return correct.mean(), stderr(np.array(acc_list))\n\n\ndef calc_nshot_acc(results_list, nshot_max, labeled=False):\n unk_id = results_list[0]['pred'].shape[-1] - 1\n if labeled:\n nshot_list = [calc_nshot(r['y_s'], y=r['y_full']) for r in results_list]\n else:\n nshot_list = [calc_nshot(r['y_full']) for r in results_list]\n acc_list = [0.0] * nshot_max\n stderr_list = [0.0] * nshot_max\n for n in range(1, nshot_max + 1):\n sel_list = [s == n for s in nshot_list]\n known_list = [r['y_gt'] < unk_id for r in results_list]\n sel_list = [np.logical_and(s, k) for s, k in zip(sel_list, known_list)]\n y_gt_list = [r['y_gt'][s][None, :] for s, r in zip(sel_list, results_list)]\n pred_list = [\n r['pred'][s][None, :, :] for s, r in zip(sel_list, results_list)\n ]\n flag_list = [r['flag'][s][None, :] for s, r in zip(sel_list, results_list)]\n subresults = [{\n 'y_gt': y,\n 'pred': p,\n 'flag': f\n } for y, p, f in zip(y_gt_list, pred_list, flag_list)]\n acc_list[n - 1], stderr_list[n - 1] = calc_acc(subresults)\n return np.array(acc_list), np.array(stderr_list)\n\n\ndef calc_nshot_acc_2d(results_list, nappear_max, nshot_max):\n \"\"\"Combining labeled and unlabeled. X-axis number of appearances, Y-axis\n number of labels.\"\"\"\n N = nappear_max\n M = nshot_max\n unk_id = results_list[0]['pred'].shape[-1] - 1\n acc_list = np.zeros([N, M])\n stderr_list = np.zeros([N, M])\n nappear_list = [calc_nshot(r['y_full']) for r in results_list]\n nshot_list = [calc_nshot(r['y_s'], y=r['y_full']) for r in results_list]\n for n in range(1, N + 1):\n for m in range(1, M + 1):\n sel_list = [\n np.logical_and(nappear_ == n, nshot_ == m)\n for nappear_, nshot_ in zip(nappear_list, nshot_list)\n ]\n if m > n:\n assert all([np.logical_not(s).all() for s in sel_list])\n known_list = [r['y_gt'] < unk_id for r in results_list]\n sel_list = [np.logical_and(s, k) for s, k in zip(sel_list, known_list)]\n y_gt_list = [\n r['y_gt'][s][None, :] for s, r in zip(sel_list, results_list)\n ]\n pred_list = [\n r['pred'][s][None, :, :] for s, r in zip(sel_list, results_list)\n ]\n flag_list = [\n r['flag'][s][None, :] for s, r in zip(sel_list, results_list)\n ]\n subresults = [{\n 'y_gt': y,\n 'pred': p,\n 'flag': f\n } for y, p, f in zip(y_gt_list, pred_list, flag_list)]\n acc_list[n - 1, m - 1], stderr_list[n - 1, m - 1] = calc_acc(subresults)\n return acc_list, stderr_list\n\n\ndef calc_nshot_acc_3d(results_list, nappear_max, nshot_max, ninterval_split):\n unk_id = results_list[0]['pred'].shape[-1] - 1\n N = nappear_max\n M = nshot_max\n K = len(ninterval_split) - 1\n acc_list = np.zeros([N, M, K])\n stderr_list = np.zeros([N, M, K])\n nappear_list = [calc_nshot(r['y_full']) for r in results_list]\n nshot_list = [calc_nshot(r['y_s'], y=r['y_full']) for r in results_list]\n ninterval_list = [calc_interval(r['y_full']) for r in results_list]\n for n in range(1, N + 1):\n for m in range(1, M + 1):\n for k in range(1, K + 1):\n sel_list = [\n np.logical_and(\n np.logical_and(nappear_ == n, nshot_ == m),\n np.logical_and(ninterval_ >= ninterval_split[k - 1],\n ninterval_ < ninterval_split[k])) for nappear_,\n nshot_, ninterval_ in zip(nappear_list, nshot_list, ninterval_list)\n ]\n if m > n:\n assert all([np.logical_not(s).all() for s in sel_list])\n\n known_list = [r['y_gt'] < unk_id for r in results_list]\n sel_list = [np.logical_and(s, k) for s, k in zip(sel_list, known_list)]\n y_gt_list = [\n r['y_gt'][s][None, :] for s, r in zip(sel_list, results_list)\n ]\n pred_list = [\n r['pred'][s][None, :, :] for s, r in zip(sel_list, results_list)\n ]\n flag_list = [\n r['flag'][s][None, :] for s, r in zip(sel_list, results_list)\n ]\n subresults = [{\n 'y_gt': y,\n 'pred': p,\n 'flag': f\n } for y, p, f in zip(y_gt_list, pred_list, flag_list)]\n acc_list[n - 1, m - 1, k - 1], stderr_list[n - 1, m - 1, k -\n 1] = calc_acc(subresults)\n return acc_list, stderr_list\n\n\ndef calc_nshot_acc_2dv2(results_list, nappear_max, ninterval_split):\n unk_id = results_list[0]['pred'].shape[-1] - 1\n N = nappear_max\n K = len(ninterval_split) - 1\n acc_list = np.zeros([N, K])\n stderr_list = np.zeros([N, K])\n nappear_list = [calc_nshot(r['y_full']) for r in results_list]\n nshot_list = [calc_nshot(r['y_s'], y=r['y_full']) for r in results_list]\n ninterval_list = [calc_interval(r['y_full']) for r in results_list]\n # print(ninterval_list[0])\n for n in range(1, N + 1):\n for k in range(1, K + 1):\n sel_list = [\n np.logical_and(\n nappear_ == n,\n np.logical_and(ninterval_ >= ninterval_split[k - 1],\n ninterval_ < ninterval_split[k]))\n for nappear_, ninterval_ in zip(nappear_list, ninterval_list)\n ]\n known_list = [r['y_gt'] < unk_id for r in results_list]\n sel_list = [np.logical_and(s, k) for s, k in zip(sel_list, known_list)]\n y_gt_list = [\n r['y_gt'][s][None, :] for s, r in zip(sel_list, results_list)\n ]\n pred_list = [\n r['pred'][s][None, :, :] for s, r in zip(sel_list, results_list)\n ]\n flag_list = [\n r['flag'][s][None, :] for s, r in zip(sel_list, results_list)\n ]\n subresults = [{\n 'y_gt': y,\n 'pred': p,\n 'flag': f\n } for y, p, f in zip(y_gt_list, pred_list, flag_list)]\n acc_list[n - 1, k - 1], stderr_list[n - 1, k - 1] = calc_acc(subresults)\n return acc_list, stderr_list\n\n\ndef calc_nshot_acc_2dv3(results_list, nshot_max, ninterval_split):\n unk_id = results_list[0]['pred'].shape[-1] - 1\n M = nshot_max\n K = len(ninterval_split) - 1\n acc_list = np.zeros([M, K])\n stderr_list = np.zeros([M, K])\n nappear_list = [calc_nshot(r['y_full']) for r in results_list]\n nshot_list = [calc_nshot(r['y_s'], y=r['y_full']) for r in results_list]\n ninterval_list = [calc_interval(r['y_full']) for r in results_list]\n for m in range(1, M + 1):\n for k in range(1, K + 1):\n sel_list = [\n np.logical_and(\n nshot_ == m,\n np.logical_and(ninterval_ >= ninterval_split[k - 1],\n ninterval_ < ninterval_split[k]))\n for nshot_, ninterval_ in zip(nshot_list, ninterval_list)\n ]\n known_list = [r['y_gt'] < unk_id for r in results_list]\n sel_list = [np.logical_and(s, k) for s, k in zip(sel_list, known_list)]\n y_gt_list = [\n r['y_gt'][s][None, :] for s, r in zip(sel_list, results_list)\n ]\n pred_list = [\n r['pred'][s][None, :, :] for s, r in zip(sel_list, results_list)\n ]\n flag_list = [\n r['flag'][s][None, :] for s, r in zip(sel_list, results_list)\n ]\n subresults = [{\n 'y_gt': y,\n 'pred': p,\n 'flag': f\n } for y, p, f in zip(y_gt_list, pred_list, flag_list)]\n acc_list[m - 1, k - 1], stderr_list[m - 1, k - 1] = calc_acc(subresults)\n return acc_list, stderr_list\n\n\ndef calc_acc_time(results_list, tmax):\n acc_time = [] # [T, N] T=number of timestep; N=number of episodes\n for t in range(tmax):\n acc_time.append([])\n for i, r in enumerate(results_list):\n # Support set metrics, accumulate per time step.\n correct = label_equal(r['pred_id'], r['y_gt']) # [B, T]\n for t in range(tmax):\n if r['flag'][:, t].sum() > 0:\n acc_time[t].append(correct[:, t].sum() / r['flag'][:, t].sum())\n acc_time = [np.array(l) for l in acc_time]\n return np.array([mean(l) for l in acc_time]), np.array(\n [stderr(l) for l in acc_time])\n\n\ndef calc_acc_time_label(results_list, tmax):\n unk_id = results_list[0]['pred'].shape[-1] - 1\n acc_time = [] # [T, N] T=number of timestep; N=number of episodes\n for t in range(tmax):\n acc_time.append([])\n for i, r in enumerate(results_list):\n # Support set metrics, accumulate per time step.\n correct = label_equal(np.argmax(r['pred'][:, :, :-1], axis=-1),\n r['y_gt']) # [B, T]\n T = r['y_gt'].shape[1]\n flag = r['flag']\n is_unk = (r['y_gt'] == unk_id).astype(np.float32) # [B, T]\n flag = flag * (1.0 - is_unk)\n for t in range(tmax):\n if flag[:, t].sum() > 0:\n acc_time[t].append(correct[:, t].sum())\n acc_time = [np.array(l) for l in acc_time]\n return np.array([mean(l) for l in acc_time]), np.array(\n [stderr(l) for l in acc_time])\n\n\nif __name__ == '__main__':\n y_s = np.array([[1, 10, 3, 2, 10, 2, 3, 1, 2, 2, 2, 2]])\n y_full = np.array([[1, 2, 3, 2, 2, 2, 3, 1, 2, 2, 2, 2]])\n y_gt = np.array([[10, 10, 10, 10, 2, 2, 3, 1, 2, 2, 2, 2]])\n pred = np.array([[10, 10, 10, 10, 2, 2, 3, 1, 2, 3, 2, 2]])\n flag = np.array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])\n pred2 = np.zeros([1, y_s.shape[1], 11])\n pred2[np.zeros([y_s.shape[1]], dtype=y_s.dtype),\n np.arange(y_s.shape[1]), pred[0]] = 1.0\n print(pred2)\n results_list = [{\n 'y_s': y_s,\n 'y_gt': y_gt,\n 'y_full': y_full,\n 'pred': pred2,\n 'flag': flag\n }]\n print(calc_nshot_acc(results_list, nshot_max=5, labeled=True))\n print(calc_nshot_acc(results_list, nshot_max=5))\n print(calc_ap(results_list, verbose=True))\n print(calc_nshot_acc_2d(results_list, nappear_max=5, nshot_max=5))\n print('interval', calc_interval(y_full))\n acc, se = calc_acc_time_label(results_list, tmax=12)\n print(acc)\n",
"\"\"\"Online mixture memory that performs online clustering.\n\nAuthor: Mengye Ren (mren@cs.toronto.edu)\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport tensorflow as tf\n\nfrom fewshot.models.modules.example_memory import ExampleMemory\nfrom fewshot.models.registry import RegisterModule\n\nINF = 1e6\n\n\n@RegisterModule(\"online_imp_memory\")\n@RegisterModule(\"online_mixture_memory\") # Legacy name\nclass OnlineIMPMemory(ExampleMemory):\n\n def forward_one(self,\n x,\n y,\n t,\n cmean,\n clabel,\n cusage,\n is_training=tf.constant(True)):\n y_ = self.retrieve(x, t, cmean, clabel, cusage, is_training=is_training)\n # klogits, remain, new = self._infer_one(x, cmean, clabel, cusage, y=y)\n klogits, new = self._infer_one(x, cmean, clabel, cusage, y=y)\n new_id = tf.reduce_sum(tf.cast(tf.greater(cusage, 0), tf.int64),\n [1]) # [B]\n kidx = tf.where(tf.less(new, 0.0), tf.argmax(klogits, axis=1), new_id)\n cmean, clabel, cusage = self.store(x, kidx, y, t, cmean, clabel, cusage)\n return y_, (cmean, clabel, cusage)\n\n def retrieve(self,\n x,\n t,\n cmean,\n clabel,\n cusage,\n is_training=tf.constant(True)):\n # clogits, remain, new = self.infer(x, t, cmean, clabel, cusage)\n clogits, new = self.infer(x, t, cmean, clabel, cusage)\n new_ = tf.reshape(new, [-1, 1])\n pad = tf.zeros_like(clogits)[:, :-1] - INF\n # TODO use scatter_nd to assign unknown ID.\n logits_unk = tf.concat([pad, new_], axis=1)\n logits = tf.maximum(clogits, logits_unk)\n return logits\n\n def infer(self, x, t, cmean, clabel, cusage):\n \"\"\"Infer cluster ID. Either goes into one of the existing cluster\n or become a new cluster. This procedure is for prediction purpose.\n\n Args:\n x: Input. [B, D]\n cmean: Cluster centers. [B, K, D]\n clabel: Cluster labels. [B, K]\n cusage: Usage binary vector for the cluster. [B, K]\n\n Returns:\n logits: Cluster logits. [B, M]\n new_prob: New cluster probability. [B]\n \"\"\"\n # logits, remain, new = self._infer_one(x, cmean, clabel, cusage)\n logits, new = self._infer_one(x, cmean, clabel, cusage)\n kprob = tf.nn.softmax(logits) # [B, K]\n clabel_onehot = tf.one_hot(clabel, self.unknown_id + 1) # [B, K', C]\n # [B, K, 1] * [B, K, C] = [B, C]\n cprob = tf.reduce_sum(tf.expand_dims(kprob, -1) * clabel_onehot, [1])\n cprob = tf.maximum(cprob, 1e-6) # Delta.\n return tf.math.log(cprob), new\n\n def get_initial_state(self, bsize):\n \"\"\"Initial state for the RNN.\"\"\"\n M = self.max_items\n dim = self.dim\n\n # Cluster storage.\n cmean = tf.zeros([bsize, M, dim], dtype=self.dtype)\n clabel = tf.zeros([bsize, M], dtype=tf.int64)\n\n # Number of examples per cluster.\n cusage = tf.zeros([bsize, M], dtype=self.dtype)\n return cmean, clabel, cusage\n\n def _infer_one(self, x, cmean, clabel, cusage, y=None, verbose=False):\n \"\"\"Infers one example.\n\n Args:\n x: Input. [B, D]\n cmean: Cluster centers. [B, K, D]\n clabel: Cluster labels. [B, K]\n cusage: Usage binary vector for the cluster. [B, K]\n\n Returns:\n logits: Cluster logits. [B, M]\n remain: Old cluster logit. [B]\n \"\"\"\n # verbose = y is not None\n # verbose = False\n # Whether a cluster is used.\n cusage_flag = tf.greater(cusage, 0) # [B, K]\n\n # Returns cluster ID and label.\n x_ = tf.expand_dims(x, 1) # [B, 1, D]\n pdist = tf.squeeze(self.compute_euclidean_dist_sq(x_, cmean), 1) # [B, K]\n pdist += tf.where(cusage_flag, 0.0, INF)\n\n if y is not None:\n y_ = tf.expand_dims(tf.cast(y, clabel.dtype), -1) # [B]\n rel_flag = tf.logical_or(\n tf.equal(clabel, y_), tf.equal(clabel, self.unknown_id))\n pdist += tf.where(rel_flag, 0.0, INF)\n\n # Variance parameter.\n labeled_cluster = clabel < self.unknown_id\n sigma = tf.where(labeled_cluster, self.sigma_l, self.sigma_u)\n\n # Need to consider labeled case here.\n min_dist = tf.reduce_min(pdist, [-1]) # [B]\n # remain = (self._beta - min_dist) / self._gamma # [B]\n new = (min_dist - self._beta) / self._gamma # [B]\n pdist = pdist / (2.0 * sigma**2)\n return -pdist, new\n\n def store(self, x, kidx, y, t, cmean, clabel, cusage):\n \"\"\"Stores a new example.\n\n Args:\n x: Input. [B, ...].\n kidx: Cluster Idx. [B]\n y: Label. [B]\n t: Int. Timestep.\n cmean: [B, M, D].\n clabel: [B, M].\n cusage: [B, M].\n \"\"\"\n # Push into the example storage.\n bidx = tf.range(x.shape[0], dtype=tf.int64) # [B]\n bkidx = tf.stack([bidx, kidx], axis=-1) # [B, 2]\n # cusage_ = tf.cast(tf.expand_dims(cusage, -1), self.dtype) # [B, M, 1]\n\n cmean_cur = tf.gather_nd(cmean, bkidx) # [B, D]\n count = tf.gather_nd(cusage, bkidx) # [B]\n count_ = tf.expand_dims(count, -1) # [B]\n cmean_update = cmean_cur * count_ / (count_ + 1.0) + x / (count_ + 1.0)\n cmean_new = tf.tensor_scatter_nd_update(cmean, bkidx, cmean_update)\n\n cusage_update = count + 1\n cusage_new = tf.tensor_scatter_nd_update(cusage, bkidx, cusage_update)\n\n clabel_cur = tf.gather_nd(clabel, bkidx) # [B]\n clabel_cur = tf.where(tf.greater(count, 0), clabel_cur, self.unknown_id)\n # Prefer labeled vs. unlabeled.\n clabel_upd = tf.minimum(clabel_cur, tf.cast(y, clabel_cur.dtype))\n clabel_new = tf.tensor_scatter_nd_update(clabel, bkidx, clabel_upd)\n return cmean_new, clabel_new, cusage_new\n",
"\"\"\"Unit tests for semi-supervised episode iterator.\n\nAuthor: Mengye Ren (mren@cs.toronto.edu)\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport tensorflow as tf\nimport unittest\n\nfrom fewshot.data.datasets.omniglot import OmniglotDataset\nfrom fewshot.data.iterators.semisupervised_episode_iterator import SemiSupervisedEpisodeIterator # NOQA\nfrom fewshot.data.preprocessors import NormalizationPreprocessor\nfrom fewshot.data.samplers.crp_sampler import CRPSampler\nfrom fewshot.data.samplers.semisupervised_episode_sampler import SemiSupervisedEpisodeSampler # NOQA\n\n\nclass SemiSupervisedEpisodeIteratorTests(unittest.TestCase):\n\n def test_basic(self):\n folder = '/mnt/local/data/omniglot'\n omniglot = OmniglotDataset(folder, 'train')\n preprocessor = NormalizationPreprocessor()\n for bsize in [1, 2]:\n sampler = CRPSampler(0)\n sampler2 = SemiSupervisedEpisodeSampler(sampler, 0)\n it = SemiSupervisedEpisodeIterator(\n omniglot,\n sampler2,\n batch_size=bsize,\n nclasses=10,\n nquery=5,\n preprocessor=preprocessor,\n expand=True,\n fix_unknown=True,\n label_ratio=0.5,\n nd=5,\n sd=1,\n md=2,\n alpha=0.5,\n theta=1.0)\n for x in range(2):\n b = it.next()\n print(b)\n print('support', tf.reduce_max(b.train_images),\n tf.reduce_min(b.train_images), tf.shape(b.train_images))\n print('support label', b.train_labels, tf.shape(b.train_labels))\n print('support gt', b.train_groundtruth, tf.shape(b.train_groundtruth))\n print('query', tf.reduce_max(b.test_images),\n tf.reduce_min(b.test_images), tf.shape(b.test_images))\n print('query label', b.test_labels, tf.shape(b.test_labels))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"from __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport tensorflow as tf\n\nfrom fewshot.models.registry import RegisterModule\nfrom fewshot.models.modules.container_module import ContainerModule\nfrom fewshot.models.modules.nnlib import Linear\nfrom fewshot.models.modules.layer_norm import LayerNorm\nfrom fewshot.models.variable_context import variable_scope\n\n\n@RegisterModule('gru')\nclass GRU(ContainerModule):\n \"\"\"Gated recurrent unit\"\"\"\n\n def __init__(self, name, nin, nout, dtype=tf.float32):\n super(GRU, self).__init__(dtype=dtype)\n self._nin = nin\n self._nout = nout\n\n with variable_scope(name):\n self._gates = Linear(\"gates_linear\", nin + nout, 2 * nout)\n self._linear = Linear(\"linear\", nin + nout, nout)\n\n def forward(self, x, h_last):\n \"\"\"Forward one timestep.\n\n Args:\n x: [B, D]. Input.\n h_last: [B, D]. Hidden states of the previous timestep.\n\n Returns:\n h\n \"\"\"\n D = self.nout\n x_comb = tf.concat([x, h_last], axis=-1)\n gates = self._gates(x_comb)\n r_gate = tf.math.sigmoid(gates[:, :D])\n z_gate = tf.math.sigmoid(gates[:, D:])\n h_hat = tf.math.tanh(self._linear(tf.concat([x, h_last * r_gate])))\n h = (1.0 - z_gate) * h_hat + z_gate * h_hat\n return h\n\n def end_iteration(self, h_last):\n return h_last\n\n def get_initial_state(self, bsize):\n return tf.zeros([bsize, self.nout], dtype=self.dtype)\n\n @property\n def nin(self):\n return self._nin\n\n @property\n def nout(self):\n return self._nout\n\n @property\n def in_dim(self):\n return self._nin\n\n @property\n def memory_dim(self):\n return self._nout\n\n\n@RegisterModule('gru1dmod')\nclass GRU1DMod(GRU):\n \"\"\"GRU with 1-d gates and without activation\"\"\"\n\n def __init__(self, name, nin, nout, layernorm=False, bias_init=-2.0, dtype=tf.float32):\n super(GRU, self).__init__(dtype=dtype)\n self._nin = nin\n self._nout = nout\n self._layernorm = layernorm\n self._gates = Linear(\n \"gates_linear\", nin + nout, 1, b_init=lambda: tf.ones(1) * bias_init)\n # self._gates = Linear(\n # \"gates_linear\", nin + nout, 1, b_init=lambda: tf.ones(1) * 2.0)\n # self._gates = Linear(\n # \"gates_linear\", nin + nout, 1, b_init=lambda: tf.zeros(1))\n if layernorm:\n self._ln = LayerNorm(\"layernorm\", nin + nout, dtype=dtype)\n # assert False\n\n def forward(self, x, h_last):\n \"\"\"Forward one timestep.\n\n Args:\n x: [B, D]. Input.\n h_last: [B, D]. Hidden states of the previous timestep.\n\n Returns:\n h\n \"\"\"\n x_comb = tf.concat([x, h_last], axis=-1)\n if self._layernorm:\n x_comb = self._ln(x_comb)\n gates = self._gates(x_comb)\n f_gate = tf.math.sigmoid(gates)\n # tf.print('f gate', f_gate)\n h = (1.0 - f_gate) * h_last + f_gate * x\n return h, h\n\n def end_iteration(self, h_last):\n return h_last\n\n def get_initial_state(self, bsize):\n return tf.zeros([bsize, self.nout], dtype=self.dtype)\n\n @property\n def nin(self):\n return self._nin\n\n @property\n def nout(self):\n return self._nout\n\n @property\n def in_dim(self):\n return self._nin\n\n @property\n def memory_dim(self):\n return self._nout\n\n\n@RegisterModule('lstm1dmod')\nclass LSTM1DMod(ContainerModule):\n \"\"\"A standard LSTM module.\"\"\"\n\n def __init__(self, name, nin, nout, dtype=tf.float32):\n super(LSTM1DMod, self).__init__(dtype=dtype)\n self._nin = nin\n self._nout = nout\n\n with variable_scope(name):\n self._gates = Linear(\"gates_linear\", nin + nout, nout + 2)\n # self._gates2 = Linear(\"gates_linear\", nout, nout)\n\n def forward(self, x, c_last, h_last):\n \"\"\"Forward one timestep.\n\n Args:\n x: [B, D]. Input.\n c_last: [B, D]. Cell states of the previous time step.\n h_last: [B, D]. Hidden states of the previous time step.\n\n Returns:\n A tuple of output and the hidden states.\n \"\"\"\n x_comb = tf.concat([x, h_last], axis=-1)\n gates = self._gates(x_comb)\n D = self.nout\n f_gate = tf.sigmoid(gates[:, :1])\n i_gate = tf.sigmoid(gates[:, 1:2])\n # o_gate = tf.sigmoid(gates[:, 2:2 + D])\n o_gate = tf.sigmoid(gates[:, 2:3])\n # c = c_last * f_gate + x * i_gate\n c = c_last * f_gate + x * (1 - f_gate)\n h = o_gate * tf.tanh(c)\n # h = tf.tanh(c2)\n return h, (c, h)\n\n def end_iteration(self, h_last):\n \"\"\"End recurrent iterations.\"\"\"\n return h_last\n\n def get_initial_state(self, bsize):\n return (tf.zeros([bsize, self.nout], dtype=self.dtype),\n tf.zeros([bsize, self.nout], dtype=self.dtype))\n\n @property\n def nin(self):\n return self._nin\n\n @property\n def nout(self):\n return self._nout\n\n @property\n def in_dim(self):\n return self._nin\n\n @property\n def memory_dim(self):\n return self._nout\n"
] |
[
[
"numpy.concatenate",
"numpy.max",
"numpy.array",
"numpy.logical_not",
"numpy.zeros",
"numpy.logical_and",
"numpy.argmax",
"numpy.arange",
"numpy.argsort",
"numpy.cumsum",
"numpy.expand_dims"
],
[
"tensorflow.reduce_min",
"tensorflow.reshape",
"tensorflow.zeros_like",
"tensorflow.greater",
"tensorflow.stack",
"tensorflow.nn.softmax",
"tensorflow.one_hot",
"tensorflow.cast",
"tensorflow.concat",
"tensorflow.less",
"tensorflow.argmax",
"tensorflow.math.log",
"tensorflow.constant",
"tensorflow.zeros",
"tensorflow.range",
"tensorflow.expand_dims",
"tensorflow.where",
"tensorflow.gather_nd",
"tensorflow.equal",
"tensorflow.tensor_scatter_nd_update",
"tensorflow.maximum"
],
[
"tensorflow.reduce_max",
"tensorflow.shape",
"tensorflow.reduce_min"
],
[
"tensorflow.zeros",
"tensorflow.concat",
"tensorflow.sigmoid",
"tensorflow.math.sigmoid",
"tensorflow.ones",
"tensorflow.tanh"
]
] |
exmee/HSSD
|
[
"cf1d26c32b1a5a95c6c17460dda445c408d7b5dc"
] |
[
"resnet_v1.py"
] |
[
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains definitions for the original form of Residual Networks.\n\nThe 'v1' residual networks (ResNets) implemented in this module were proposed\nby:\n[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n Deep Residual Learning for Image Recognition. arXiv:1512.03385\n\nOther variants were introduced in:\n[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n Identity Mappings in Deep Residual Networks. arXiv: 1603.05027\n\nThe networks defined in this module utilize the bottleneck building block of\n[1] with projection shortcuts only for increasing depths. They employ batch\nnormalization *after* every weight layer. This is the architecture used by\nMSRA in the Imagenet and MSCOCO 2016 competition models ResNet-101 and\nResNet-152. See [2; Fig. 1a] for a comparison between the current 'v1'\narchitecture and the alternative 'v2' architecture of [2] which uses batch\nnormalization *before* every weight layer in the so-called full pre-activation\nunits.\n\nTypical use:\n\n from tensorflow.contrib.slim.nets import resnet_v1\n\nResNet-101 for image classification into 1000 classes:\n\n # inputs has shape [batch, 224, 224, 3]\n with slim.arg_scope(resnet_v1.resnet_arg_scope(is_training)):\n net, end_points = resnet_v1.resnet_v1_101(inputs, 1000)\n\nResNet-101 for semantic segmentation into 21 classes:\n\n # inputs has shape [batch, 513, 513, 3]\n with slim.arg_scope(resnet_v1.resnet_arg_scope(is_training)):\n net, end_points = resnet_v1.resnet_v1_101(inputs,\n 21,\n global_pool=False,\n output_stride=16)\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n#import Conv2DWN\n\nimport resnet_utils\nfrom blocks import attention_block\nfrom config import args\n\nresnet_arg_scope = resnet_utils.resnet_arg_scope\nslim = tf.contrib.slim\n\n'''分割部分和跳跃连接层中使用attention,检测为0.789'''\n@slim.add_arg_scope\ndef tail_att(inputs, skip, depth, depth_bottleneck, stride=1, rate=1,\n outputs_collections=None, scope=None):\n assert stride == 1\n if args.resize == 'bilinear': #双线性插值\n resize_method = tf.image.ResizeMethod.BILINEAR\n if args.resize == 'nearest': #最近邻插值\n resize_method = tf.image.ResizeMethod.NEAREST_NEIGHBOR\n with tf.variable_scope(scope, 'bottleneck_skip', [inputs, skip]) as sc:\n depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)\n #skip = attention_block(skip)\n res_inpt = tf.image.resize_images(inputs, tf.shape(skip)[1:3], method=resize_method) #res_inpt为input经过插值处理\n if depth != depth_in: #\n shortcut = slim.conv2d(res_inpt, depth, [1, 1], stride=stride,\n activation_fn=None, scope='shortcut') #第一次resskip时,2048 != 512,要把shortcut的深度变为512\n else:\n shortcut = res_inpt\n\n # print(\"Live from skip bottleneck block! We got %s as input and %s as skip connection\" % (inputs.get_shape(), skip.get_shape()))\n concat = tf.concat([res_inpt, skip], 3)\n residual = slim.conv2d(concat, depth_bottleneck, [1, 1], stride=stride,\n scope='conv1')\n residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, 1,\n rate=rate, scope='conv2')\n residual = slim.conv2d(residual, depth, [1, 1], stride=1,\n activation_fn=None, scope='conv3')\n\n output = tf.nn.relu(shortcut + residual)\n '''********************attention****************************'''\n #output = attention_block(output)\n '''********************attention****************************'''\n\n return slim.utils.collect_named_outputs(outputs_collections, sc.name,\n output)\n\n@slim.add_arg_scope #attention_layer\ndef attention_layer(inputs, skip, depth, depth_bottleneck, scale=2, stride=1, rate=1,\n outputs_collections=None, scope=None):\n assert stride == 1\n if args.resize == 'bilinear': #双线性插值\n resize_method = tf.image.ResizeMethod.BILINEAR\n if args.resize == 'nearest': #最近邻插值\n resize_method = tf.image.ResizeMethod.NEAREST_NEIGHBOR\n with tf.variable_scope(scope, 'bottleneck_skip', [inputs, skip]) as sc:\n depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)\n #res_inpt = tf.image.resize_images(inputs, tf.shape(skip)[1:3], method=resize_method)\n res_inpt = slim.conv2d(inputs, depth * scale * scale, [1, 1], stride=1, padding='SAME',\n activation_fn=None, scope=scope)\n\n res_inpt = tf.depth_to_space(res_inpt, scale)\n res_inpt = tf.image.resize_images(res_inpt, tf.shape(skip)[1:3], method=resize_method)\n if depth != depth_in: #\n shortcut = slim.conv2d(res_inpt, depth, [1, 1], stride=stride,\n activation_fn=None, scope='shortcut') #第一次resskip时,2048 != 512,要把shortcut的深度变为512\n else:\n shortcut = res_inpt\n\n # print(\"Live from skip bottleneck block! We got %s as input and %s as skip connection\" % (inputs.get_shape(), skip.get_shape()))\n concat = tf.concat([res_inpt, skip], 3)\n residual = slim.conv2d(concat, depth_bottleneck, [1, 1], stride=stride,\n scope='conv1')\n residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, 1,\n rate=rate, scope='conv2')\n residual = slim.conv2d(residual, depth, [1, 1], stride=1,\n activation_fn=None, scope='conv3')\n\n output = tf.nn.relu(shortcut + residual)\n output = attention_block(output)\n # print(\"So far in the end of bottleneck skip we have %s\" % (output.get_shape()))\n\n return slim.utils.collect_named_outputs(outputs_collections, sc.name,\n output)\n\n@slim.add_arg_scope #attention\ndef attention(inputs, skip, depth, depth_bottleneck, scale=2, stride=1, rate=1,\n outputs_collections=None, scope=None):\n assert stride == 1\n if args.resize == 'bilinear': #双线性插值\n resize_method = tf.image.ResizeMethod.BILINEAR\n if args.resize == 'nearest': #最近邻插值\n resize_method = tf.image.ResizeMethod.NEAREST_NEIGHBOR\n with tf.variable_scope(scope, 'bottleneck_skip', [inputs, skip]) as sc:\n depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)\n #res_inpt = tf.image.resize_images(inputs, tf.shape(skip)[1:3], method=resize_method)\n skip = attention_block(skip)\n res_inpt = slim.conv2d(inputs, depth * scale * scale, [1, 1], stride=1, padding='SAME',\n activation_fn=None, scope=scope)\n\n res_inpt = tf.depth_to_space(res_inpt, scale)\n res_inpt = tf.image.resize_images(res_inpt, tf.shape(skip)[1:3], method=resize_method)\n if depth != depth_in: #\n shortcut = slim.conv2d(res_inpt, depth, [1, 1], stride=stride,\n activation_fn=None, scope='shortcut') #第一次resskip时,2048 != 512,要把shortcut的深度变为512\n else:\n shortcut = res_inpt\n\n # print(\"Live from skip bottleneck block! We got %s as input and %s as skip connection\" % (inputs.get_shape(), skip.get_shape()))\n concat = tf.concat([res_inpt, skip], 3)\n residual = slim.conv2d(concat, depth_bottleneck, [1, 1], stride=stride,\n scope='conv1')\n residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, 1,\n rate=rate, scope='conv2')\n residual = slim.conv2d(residual, depth, [1, 1], stride=1,\n activation_fn=None, scope='conv3')\n\n output = tf.nn.relu(shortcut + residual)\n # print(\"So far in the end of bottleneck skip we have %s\" % (output.get_shape()))\n\n return slim.utils.collect_named_outputs(outputs_collections, sc.name,\n output)\n\n#定义像素混洗\n@slim.add_arg_scope\ndef sub_pixel(inputs, skip, depth, outputs_collections=None, scale=2,scope=None):\n if args.resize == 'bilinear':\n resize_method = tf.image.ResizeMethod.BILINEAR\n if args.resize == 'nearest':\n resize_method = tf.image.ResizeMethod.NEAREST_NEIGHBOR\n with tf.variable_scope(scope, 'sub_pixel', [inputs]) as sc:\n depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)\n if depth != depth_in:\n inputs = slim.conv2d(inputs, depth, [1, 1], stride=1, padding='SAME',\n activation_fn=None, scope='shortcut')\n\n inputs = slim.conv2d(inputs, depth*scale*scale, [1, 1], stride=1, padding='SAME',\n activation_fn=None, scope=scope)\n output = tf.depth_to_space(inputs, scale)\n output = tf.image.resize_images(output, tf.shape(skip)[1:3], method=resize_method)\n return slim.utils.collect_named_outputs(outputs_collections, sc.name,\n output)\n\n@slim.add_arg_scope #shuffle pixel\ndef sub_pixel_skip(inputs, skip, depth, depth_bottleneck, scale=2, stride=1, rate=1,\n outputs_collections=None, scope=None):\n assert stride == 1\n if args.resize == 'bilinear': #双线性插值\n resize_method = tf.image.ResizeMethod.BILINEAR\n if args.resize == 'nearest': #最近邻插值\n resize_method = tf.image.ResizeMethod.NEAREST_NEIGHBOR\n with tf.variable_scope(scope, 'bottleneck_skip', [inputs, skip]) as sc:\n depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)\n #res_inpt = tf.image.resize_images(inputs, tf.shape(skip)[1:3], method=resize_method)\n res_inpt = slim.conv2d(inputs, depth * scale * scale, [1, 1], stride=1, padding='SAME',\n activation_fn=None, scope=scope)\n\n res_inpt = tf.depth_to_space(res_inpt, scale)\n res_inpt = tf.image.resize_images(res_inpt, tf.shape(skip)[1:3], method=resize_method)\n if depth != depth_in: #\n shortcut = slim.conv2d(res_inpt, depth, [1, 1], stride=stride,\n activation_fn=None, scope='shortcut') #第一次resskip时,2048 != 512,要把shortcut的深度变为512\n else:\n shortcut = res_inpt\n\n # print(\"Live from skip bottleneck block! We got %s as input and %s as skip connection\" % (inputs.get_shape(), skip.get_shape()))\n concat = tf.concat([res_inpt, skip], 3)\n residual = slim.conv2d(concat, depth_bottleneck, [1, 1], stride=stride,\n scope='conv1')\n residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, 1,\n rate=rate, scope='conv2')\n residual = slim.conv2d(residual, depth, [1, 1], stride=1,\n activation_fn=None, scope='conv3')\n\n output = tf.nn.relu(shortcut + residual)\n # print(\"So far in the end of bottleneck skip we have %s\" % (output.get_shape()))\n\n return slim.utils.collect_named_outputs(outputs_collections, sc.name,\n output)\n\n@slim.add_arg_scope #不加concat层\ndef noconcat(inputs, skip, depth, depth_bottleneck, stride=1, rate=1,\n outputs_collections=None, scope=None):\n assert stride == 1\n if args.resize == 'bilinear':\n resize_method = tf.image.ResizeMethod.BILINEAR\n if args.resize == 'nearest':\n resize_method = tf.image.ResizeMethod.NEAREST_NEIGHBOR\n with tf.variable_scope(scope, 'bottleneck_skip', [inputs, skip]) as sc:\n depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)\n res_inpt = tf.image.resize_images(inputs, tf.shape(skip)[1:3], method=resize_method)\n if depth != depth_in: #\n shortcut = slim.conv2d(res_inpt, depth, [1, 1], stride=stride,\n activation_fn=None, scope='shortcut')\n else:\n shortcut = res_inpt\n\n # print(\"Live from skip bottleneck block! We got %s as input and %s as skip connection\" % (inputs.get_shape(), skip.get_shape()))\n #concat = tf.concat([res_inpt, skip], 3)\n residual = slim.conv2d(res_inpt, depth_bottleneck, [1, 1], stride=stride,\n scope='conv1')\n residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, 1,\n rate=rate, scope='conv2')\n residual = slim.conv2d(residual, depth, [1, 1], stride=1,\n activation_fn=None, scope='conv3')\n\n output = tf.nn.relu(shortcut + residual)\n # print(\"So far in the end of bottleneck skip we have %s\" % (output.get_shape()))\n\n return slim.utils.collect_named_outputs(outputs_collections, sc.name,\n output)\n\n@slim.add_arg_scope\ndef bottleneck_skip(inputs, skip, depth, depth_bottleneck, stride=1, rate=1,\n outputs_collections=None, scope=None):\n assert stride == 1\n if args.resize == 'bilinear': #双线性插值\n resize_method = tf.image.ResizeMethod.BILINEAR\n if args.resize == 'nearest': #最近邻插值\n resize_method = tf.image.ResizeMethod.NEAREST_NEIGHBOR\n with tf.variable_scope(scope, 'bottleneck_skip', [inputs, skip]) as sc:\n depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)\n #skip = attention_block(skip)\n res_inpt = tf.image.resize_images(inputs, tf.shape(skip)[1:3], method=resize_method) #res_inpt为input经过插值处理\n if depth != depth_in: #\n shortcut = slim.conv2d(res_inpt, depth, [1, 1], stride=stride,\n activation_fn=None, scope='shortcut') #第一次resskip时,2048 != 512,要把shortcut的深度变为512\n else:\n shortcut = res_inpt\n\n # print(\"Live from skip bottleneck block! We got %s as input and %s as skip connection\" % (inputs.get_shape(), skip.get_shape()))\n concat = tf.concat([res_inpt, skip], 3)\n residual = slim.conv2d(concat, depth_bottleneck, [1, 1], stride=stride,\n scope='conv1')\n residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, 1,\n rate=rate, scope='conv2')\n residual = slim.conv2d(residual, depth, [1, 1], stride=1,\n activation_fn=None, scope='conv3')\n\n output = tf.nn.relu(shortcut + residual)\n # print(\"So far in the end of bottleneck skip we have %s\" % (output.get_shape()))\n\n return slim.utils.collect_named_outputs(outputs_collections, sc.name,\n output)\n\n\n@slim.add_arg_scope\ndef bottleneck(inputs, depth, depth_bottleneck, stride, rate=1,\n outputs_collections=None, scope=None):\n\n \"\"\"Bottleneck residual unit variant with BN after convolutions.\n\n This is the original residual unit proposed in [1]. See Fig. 1(a) of [2] for\n its definition. Note that we use here the bottleneck variant which has an\n extra bottleneck layer.\n\n When putting together two consecutive ResNet blocks that use this unit, one\n should use stride = 2 in the last unit of the first block.\n\n Args:\n inputs: A tensor of size [batch, height, width, channels].\n depth: The depth of the ResNet unit output.\n depth_bottleneck: The depth of the bottleneck layers.\n stride: The ResNet unit's stride. Determines the amount of downsampling of\n the units output compared to its input.\n rate: An integer, rate for atrous convolution.\n outputs_collections: Collection to add the ResNet unit output.\n scope: Optional variable_scope.\n\n Returns:\n The ResNet unit's output.\n \"\"\"\n with tf.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc:\n depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)\n if depth == depth_in:\n shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')\n else:\n shortcut = slim.conv2d(inputs, depth, [1, 1], stride=stride,\n activation_fn=None, scope='shortcut')\n\n # residual = slim.conv2d(inputs, depth_bottleneck, [1, 1], stride=1,\n # scope='conv1')\n # residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride,\n # rate=rate, scope='conv2')\n\n residual = slim.conv2d(inputs, depth_bottleneck, [1, 1], stride=stride,\n scope='conv1')\n residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, 1,\n rate=rate, scope='conv2')\n residual = slim.conv2d(residual, depth, [1, 1], stride=1,\n activation_fn=None, scope='conv3')\n\n output = tf.nn.relu(shortcut + residual)\n\n return slim.utils.collect_named_outputs(outputs_collections, sc.name,\n output)\n\n\ndef resnet_v1(inputs,\n blocks,\n num_classes=None,\n global_pool=True,\n output_stride=None,\n include_root_block=True,\n reuse=None,\n scope=None):\n \"\"\"Generator for v1 ResNet models.\n\n This function generates a family of ResNet v1 models. See the resnet_v1_*()\n methods for specific model instantiations, obtained by selecting different\n block instantiations that produce ResNets of various depths.\n\n Training for image classification on Imagenet is usually done with [224, 224]\n inputs, resulting in [7, 7] feature maps at the output of the last ResNet\n block for the ResNets defined in [1] that have nominal stride equal to 32.\n However, for dense prediction tasks we advise that one uses inputs with\n spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In\n this case the feature maps at the ResNet output will have spatial shape\n [(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]\n and corners exactly aligned with the input image corners, which greatly\n facilitates alignment of the features to the image. Using as input [225, 225]\n images results in [8, 8] feature maps at the output of the last ResNet block.\n\n For dense prediction tasks, the ResNet needs to run in fully-convolutional\n (FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all\n have nominal stride equal to 32 and a good choice in FCN mode is to use\n output_stride=16 in order to increase the density of the computed features at\n small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.\n\n Args:\n inputs: A tensor of size [batch, height_in, width_in, channels].\n blocks: A list of length equal to the number of ResNet blocks. Each element\n is a resnet_utils.Block object describing the units in the block.\n num_classes: Number of predicted classes for classification tasks. If None\n we return the features before the logit layer.\n global_pool: If True, we perform global average pooling before computing the\n logits. Set to True for image classification, False for dense prediction.\n output_stride: If None, then the output will be computed at the nominal\n network stride. If output_stride is not None, it specifies the requested\n ratio of input to output spatial resolution.\n include_root_block: If True, include the initial convolution followed by\n max-pooling, if False excludes it.\n reuse: whether or not the network and its variables should be reused. To be\n able to reuse 'scope' must be given.\n scope: Optional variable_scope.\n\n Returns:\n net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].\n If global_pool is False, then height_out and width_out are reduced by a\n factor of output_stride compared to the respective height_in and width_in,\n else both height_out and width_out equal one. If num_classes is None, then\n net is the output of the last ResNet block, potentially after global\n average pooling. If num_classes is not None, net contains the pre-softmax\n activations.\n end_points: A dictionary from components of the network to the corresponding\n activation.\n\n Raises:\n ValueError: If the target output_stride is not valid.\n \"\"\"\n with tf.variable_scope(scope, 'resnet_v1', [inputs], reuse=reuse) as sc:\n end_points_collection = sc.name + '_end_points'\n with slim.arg_scope([slim.conv2d, bottleneck,\n resnet_utils.stack_blocks_dense],\n outputs_collections=end_points_collection):\n net = inputs\n if include_root_block:\n if output_stride is not None:\n if output_stride % 4 != 0:\n raise ValueError('The output_stride needs to be a multiple of 4.')\n output_stride /= 4\n net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')\n net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')\n net = resnet_utils.stack_blocks_dense(net, blocks, output_stride)\n if global_pool:\n # Global average pooling.\n net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)\n pooled_features = net\n if num_classes is not None:\n net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,\n normalizer_fn=None, scope='logits')\n # Convert end_points_collection into a dictionary of end_points.\n end_points = slim.utils.convert_collection_to_dict(end_points_collection)\n if global_pool:\n end_points['pooled'] = slim.flatten(pooled_features, scope='pool_output')\n if num_classes is not None:\n end_points['predictions'] = slim.softmax(net, scope='predictions')\n return net, end_points\nresnet_v1.default_image_size = 224\n\n\ndef resnet_v1_50(inputs,\n num_classes=None,\n global_pool=True,\n output_stride=None,\n reuse=None,\n scope='resnet_v1_50'):\n \"\"\"ResNet-50 model of [1]. See resnet_v1() for arg and return description.\"\"\"\n blocks = [\n resnet_utils.Block(\n 'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]),\n resnet_utils.Block(\n 'block2', bottleneck, [(512, 128, 1)] * 3 + [(512, 128, 2)]),\n resnet_utils.Block(\n 'block3', bottleneck, [(1024, 256, 1)] * 5 + [(1024, 256, 2)]),\n resnet_utils.Block(\n 'block4', bottleneck, [(2048, 512, 1)] * 3)\n ]\n return resnet_v1(inputs, blocks, num_classes, global_pool, output_stride,\n include_root_block=True, reuse=reuse, scope=scope)\n\n\ndef resnet_v1_101(inputs,\n num_classes=None,\n global_pool=True,\n output_stride=None,\n reuse=None,\n scope='resnet_v1_101'):\n \"\"\"ResNet-101 model of [1]. See resnet_v1() for arg and return description.\"\"\"\n blocks = [\n resnet_utils.Block(\n 'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]),\n resnet_utils.Block(\n 'block2', bottleneck, [(512, 128, 1)] * 3 + [(512, 128, 2)]),\n resnet_utils.Block(\n 'block3', bottleneck, [(1024, 256, 1)] * 22 + [(1024, 256, 2)]),\n resnet_utils.Block(\n 'block4', bottleneck, [(2048, 512, 1)] * 3)\n ]\n return resnet_v1(inputs, blocks, num_classes, global_pool, output_stride,\n include_root_block=True, reuse=reuse, scope=scope)\n\n\ndef resnet_v1_152(inputs,\n num_classes=None,\n global_pool=True,\n output_stride=None,\n reuse=None,\n scope='resnet_v1_152'):\n \"\"\"ResNet-152 model of [1]. See resnet_v1() for arg and return description.\"\"\"\n blocks = [\n resnet_utils.Block(\n 'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]),\n resnet_utils.Block(\n 'block2', bottleneck, [(512, 128, 1)] * 7 + [(512, 128, 2)]),\n resnet_utils.Block(\n 'block3', bottleneck, [(1024, 256, 1)] * 35 + [(1024, 256, 2)]),\n resnet_utils.Block(\n 'block4', bottleneck, [(2048, 512, 1)] * 3)]\n return resnet_v1(inputs, blocks, num_classes, global_pool, output_stride,\n include_root_block=True, reuse=reuse, scope=scope)\n\n\ndef resnet_v1_200(inputs,\n num_classes=None,\n global_pool=True,\n output_stride=None,\n reuse=None,\n scope='resnet_v1_200'):\n \"\"\"ResNet-200 model of [2]. See resnet_v1() for arg and return description.\"\"\"\n blocks = [\n resnet_utils.Block(\n 'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]),\n resnet_utils.Block(\n 'block2', bottleneck, [(512, 128, 1)] * 23 + [(512, 128, 2)]),\n resnet_utils.Block(\n 'block3', bottleneck, [(1024, 256, 1)] * 35 + [(1024, 256, 2)]),\n resnet_utils.Block(\n 'block4', bottleneck, [(2048, 512, 1)] * 3)]\n return resnet_v1(inputs, blocks, num_classes, global_pool, output_stride,\n include_root_block=True, reuse=reuse, scope=scope)\n"
] |
[
[
"tensorflow.shape",
"tensorflow.nn.relu",
"tensorflow.concat",
"tensorflow.variable_scope",
"tensorflow.depth_to_space",
"tensorflow.reduce_mean"
]
] |
collector-m/LiDAR-MOS
|
[
"7ccbb63b4ee7c40195b35dd0dddd71473fae25b1"
] |
[
"utils/auxiliary/filelist2files.py"
] |
[
"#!/usr/bin/python3\n\nimport os\nimport sys\nimport shutil\nimport numpy as np\nimport scipy.io as sio\n\nfrom tqdm import tqdm\n\ndef pack(array):\n \"\"\" convert a boolean array into a bitwise array. \"\"\"\n array = array.reshape((-1))\n\n #compressing bit flags.\n # yapf: disable\n compressed = array[::8] << 7 | array[1::8] << 6 | array[2::8] << 5 | array[3::8] << 4 | array[4::8] << 3 | array[5::8] << 2 | array[6::8] << 1 | array[7::8]\n # yapf: enable\n\n return np.array(compressed, dtype=np.uint8)\n\nif __name__ == \"__main__\":\n \"\"\"\n Convert a given directory of mat files and the given filelist into a separate directory\n containing the files in the file list.\n \"\"\"\n\n if len(sys.argv) < 2:\n print(\"./filelist2files.py <input-root-directory> <output-root-directory> [<filelist>]\")\n exit(1)\n\n src_dir = sys.argv[1]\n dst_dir = sys.argv[2]\n\n files = None\n\n if len(sys.argv) > 3:\n files = [line.strip().split(\"_\") for line in open(sys.argv[3])]\n else:\n\n seq_dirs = [d for d in os.listdir(src_dir) if os.path.isdir(os.path.join(src_dir, d))]\n files = []\n for d in seq_dirs:\n files.extend([(d, os.path.splitext(f)[0]) for f in os.listdir(os.path.join(src_dir, d, \"input\"))])\n\n print(\"Processing {} files.\".format(len(files)))\n\n for seq_dir, filename in tqdm(files):\n\n if os.path.exists(os.path.join(src_dir, seq_dir, \"input\", filename + \".mat\")):\n data = sio.loadmat(os.path.join(src_dir, seq_dir, \"input\", filename + \".mat\"))\n\n out_dir = os.path.join(dst_dir, seq_dir, \"voxels\")\n os.makedirs(out_dir, exist_ok=True)\n\n compressed = pack(data[\"voxels\"])\n compressed.tofile(os.path.join(out_dir, os.path.splitext(filename)[0] + \".bin\"))\n\n if os.path.exists(os.path.join(src_dir, seq_dir, \"target_gt\", filename + \".mat\")):\n data = sio.loadmat(os.path.join(src_dir, seq_dir, \"target_gt\", filename + \".mat\"))\n\n out_dir = os.path.join(dst_dir, seq_dir, \"voxels\")\n os.makedirs(out_dir, exist_ok=True)\n\n labels = data[\"voxels\"].astype(np.uint16)\n labels.tofile(os.path.join(out_dir, os.path.splitext(filename)[0] + \".label\"))\n\n occlusions = pack(data[\"occluded\"])\n occlusions.tofile(os.path.join(out_dir, os.path.splitext(filename)[0] + \".occluded\"))\n\n invalid = pack(data[\"invalid\"])\n invalid.tofile(os.path.join(out_dir, os.path.splitext(filename)[0] + \".invalid\"))\n"
] |
[
[
"numpy.array"
]
] |
artursbm/fuzzy-logic
|
[
"79a4879deb7b09b4738b0c82234506b8ab1b0392"
] |
[
"fuzzy_c_means/main_fcm_validation.py"
] |
[
"# Artur Mello\n# Fuzzy C Means - Algorithm validation and performance analysis\n# TP 1 - Sistemas Nebulosos\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import io\nfrom fuzzy_c_means import fuzzy_c_means\n\n\ndef main():\n k = 4\n samples = np.asarray(io.loadmat(\"fcm_dataset.mat\")[\"x\"])\n avg_iterations = 0\n reps = 100\n\n for i in range(reps):\n\n samples, centroids, data_clusters, iterations = fuzzy_c_means(samples, k)\n avg_iterations += iterations\n\n plt.scatter(samples[:,0], samples[:, 1], c=data_clusters[:, 0])\n plt.scatter(centroids[:,0], centroids[:, 1], c='red')\n plt.title('Amostras Categorizadas')\n plt.xlabel('x1')\n plt.ylabel('x2')\n plt.savefig('teste_fcm.png')\n plt.show()\n print(\"Convergência alcançada, em média, em {} iterações\".format(avg_iterations/reps))\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.title",
"scipy.io.loadmat",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.scatter"
]
] |
jancervenka/pandas
|
[
"b2ebd5ae14580dde793e40097c6a283d82c69ad9"
] |
[
"pandas/conftest.py"
] |
[
"from collections import abc\nfrom datetime import date, time, timedelta, timezone\nfrom decimal import Decimal\nimport operator\nimport os\n\nfrom dateutil.tz import tzlocal, tzutc\nimport hypothesis\nfrom hypothesis import strategies as st\nimport numpy as np\nimport pytest\nfrom pytz import FixedOffset, utc\n\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nfrom pandas import DataFrame\nimport pandas._testing as tm\nfrom pandas.core import ops\nfrom pandas.core.indexes.api import Index, MultiIndex\n\nhypothesis.settings.register_profile(\n \"ci\",\n # Hypothesis timing checks are tuned for scalars by default, so we bump\n # them from 200ms to 500ms per test case as the global default. If this\n # is too short for a specific test, (a) try to make it faster, and (b)\n # if it really is slow add `@settings(deadline=...)` with a working value,\n # or `deadline=None` to entirely disable timeouts for that test.\n deadline=500,\n suppress_health_check=(hypothesis.HealthCheck.too_slow,),\n)\nhypothesis.settings.load_profile(\"ci\")\n\n\ndef pytest_addoption(parser):\n parser.addoption(\"--skip-slow\", action=\"store_true\", help=\"skip slow tests\")\n parser.addoption(\"--skip-network\", action=\"store_true\", help=\"skip network tests\")\n parser.addoption(\"--skip-db\", action=\"store_true\", help=\"skip db tests\")\n parser.addoption(\n \"--run-high-memory\", action=\"store_true\", help=\"run high memory tests\"\n )\n parser.addoption(\"--only-slow\", action=\"store_true\", help=\"run only slow tests\")\n parser.addoption(\n \"--strict-data-files\",\n action=\"store_true\",\n help=\"Fail if a test is skipped for missing data file.\",\n )\n\n\ndef pytest_runtest_setup(item):\n if \"slow\" in item.keywords and item.config.getoption(\"--skip-slow\"):\n pytest.skip(\"skipping due to --skip-slow\")\n\n if \"slow\" not in item.keywords and item.config.getoption(\"--only-slow\"):\n pytest.skip(\"skipping due to --only-slow\")\n\n if \"network\" in item.keywords and item.config.getoption(\"--skip-network\"):\n pytest.skip(\"skipping due to --skip-network\")\n\n if \"db\" in item.keywords and item.config.getoption(\"--skip-db\"):\n pytest.skip(\"skipping due to --skip-db\")\n\n if \"high_memory\" in item.keywords and not item.config.getoption(\n \"--run-high-memory\"\n ):\n pytest.skip(\"skipping high memory test since --run-high-memory was not set\")\n\n\n@pytest.fixture(autouse=True)\ndef configure_tests():\n \"\"\"\n Configure settings for all tests and test modules.\n \"\"\"\n pd.set_option(\"chained_assignment\", \"raise\")\n\n\n@pytest.fixture(autouse=True)\ndef add_imports(doctest_namespace):\n \"\"\"\n Make `np` and `pd` names available for doctests.\n \"\"\"\n doctest_namespace[\"np\"] = np\n doctest_namespace[\"pd\"] = pd\n\n\n@pytest.fixture(params=[\"bsr\", \"coo\", \"csc\", \"csr\", \"dia\", \"dok\", \"lil\"])\ndef spmatrix(request):\n \"\"\"\n Yields scipy sparse matrix classes.\n \"\"\"\n from scipy import sparse\n\n return getattr(sparse, request.param + \"_matrix\")\n\n\n@pytest.fixture(params=[0, 1, \"index\", \"columns\"], ids=lambda x: f\"axis {repr(x)}\")\ndef axis(request):\n \"\"\"\n Fixture for returning the axis numbers of a DataFrame.\n \"\"\"\n return request.param\n\n\naxis_frame = axis\n\n\n@pytest.fixture(params=[0, \"index\"], ids=lambda x: f\"axis {repr(x)}\")\ndef axis_series(request):\n \"\"\"\n Fixture for returning the axis numbers of a Series.\n \"\"\"\n return request.param\n\n\n@pytest.fixture\ndef ip():\n \"\"\"\n Get an instance of IPython.InteractiveShell.\n\n Will raise a skip if IPython is not installed.\n \"\"\"\n pytest.importorskip(\"IPython\", minversion=\"6.0.0\")\n from IPython.core.interactiveshell import InteractiveShell\n\n return InteractiveShell()\n\n\n@pytest.fixture(params=[True, False, None])\ndef observed(request):\n \"\"\"\n Pass in the observed keyword to groupby for [True, False]\n This indicates whether categoricals should return values for\n values which are not in the grouper [False / None], or only values which\n appear in the grouper [True]. [None] is supported for future compatibility\n if we decide to change the default (and would need to warn if this\n parameter is not passed).\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=[True, False, None])\ndef ordered_fixture(request):\n \"\"\"\n Boolean 'ordered' parameter for Categorical.\n \"\"\"\n return request.param\n\n\n_all_arithmetic_operators = [\n \"__add__\",\n \"__radd__\",\n \"__sub__\",\n \"__rsub__\",\n \"__mul__\",\n \"__rmul__\",\n \"__floordiv__\",\n \"__rfloordiv__\",\n \"__truediv__\",\n \"__rtruediv__\",\n \"__pow__\",\n \"__rpow__\",\n \"__mod__\",\n \"__rmod__\",\n]\n\n\n@pytest.fixture(params=_all_arithmetic_operators)\ndef all_arithmetic_operators(request):\n \"\"\"\n Fixture for dunder names for common arithmetic operations.\n \"\"\"\n return request.param\n\n\n@pytest.fixture(\n params=[\n operator.add,\n ops.radd,\n operator.sub,\n ops.rsub,\n operator.mul,\n ops.rmul,\n operator.truediv,\n ops.rtruediv,\n operator.floordiv,\n ops.rfloordiv,\n operator.mod,\n ops.rmod,\n operator.pow,\n ops.rpow,\n ]\n)\ndef all_arithmetic_functions(request):\n \"\"\"\n Fixture for operator and roperator arithmetic functions.\n\n Notes\n -----\n This includes divmod and rdivmod, whereas all_arithmetic_operators\n does not.\n \"\"\"\n return request.param\n\n\n_all_numeric_reductions = [\n \"sum\",\n \"max\",\n \"min\",\n \"mean\",\n \"prod\",\n \"std\",\n \"var\",\n \"median\",\n \"kurt\",\n \"skew\",\n]\n\n\n@pytest.fixture(params=_all_numeric_reductions)\ndef all_numeric_reductions(request):\n \"\"\"\n Fixture for numeric reduction names.\n \"\"\"\n return request.param\n\n\n_all_boolean_reductions = [\"all\", \"any\"]\n\n\n@pytest.fixture(params=_all_boolean_reductions)\ndef all_boolean_reductions(request):\n \"\"\"\n Fixture for boolean reduction names.\n \"\"\"\n return request.param\n\n\n_cython_table = pd.core.base.SelectionMixin._cython_table.items()\n\n\n@pytest.fixture(params=list(_cython_table))\ndef cython_table_items(request):\n \"\"\"\n Yields a tuple of a function and its corresponding name. Correspond to\n the list of aggregator \"Cython functions\" used on selected table items.\n \"\"\"\n return request.param\n\n\ndef _get_cython_table_params(ndframe, func_names_and_expected):\n \"\"\"\n Combine frame, functions from SelectionMixin._cython_table\n keys and expected result.\n\n Parameters\n ----------\n ndframe : DataFrame or Series\n func_names_and_expected : Sequence of two items\n The first item is a name of a NDFrame method ('sum', 'prod') etc.\n The second item is the expected return value.\n\n Returns\n -------\n list\n List of three items (DataFrame, function, expected result)\n \"\"\"\n results = []\n for func_name, expected in func_names_and_expected:\n results.append((ndframe, func_name, expected))\n results += [\n (ndframe, func, expected)\n for func, name in _cython_table\n if name == func_name\n ]\n return results\n\n\n@pytest.fixture(params=[\"__eq__\", \"__ne__\", \"__le__\", \"__lt__\", \"__ge__\", \"__gt__\"])\ndef all_compare_operators(request):\n \"\"\"\n Fixture for dunder names for common compare operations\n\n * >=\n * >\n * ==\n * !=\n * <\n * <=\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=[\"__le__\", \"__lt__\", \"__ge__\", \"__gt__\"])\ndef compare_operators_no_eq_ne(request):\n \"\"\"\n Fixture for dunder names for compare operations except == and !=\n\n * >=\n * >\n * <\n * <=\n \"\"\"\n return request.param\n\n\n@pytest.fixture(\n params=[\"__and__\", \"__rand__\", \"__or__\", \"__ror__\", \"__xor__\", \"__rxor__\"]\n)\ndef all_logical_operators(request):\n \"\"\"\n Fixture for dunder names for common logical operations\n\n * |\n * &\n * ^\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=[None, \"gzip\", \"bz2\", \"zip\", \"xz\"])\ndef compression(request):\n \"\"\"\n Fixture for trying common compression types in compression tests.\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=[\"gzip\", \"bz2\", \"zip\", \"xz\"])\ndef compression_only(request):\n \"\"\"\n Fixture for trying common compression types in compression tests excluding\n uncompressed case.\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=[True, False])\ndef writable(request):\n \"\"\"\n Fixture that an array is writable.\n \"\"\"\n return request.param\n\n\n@pytest.fixture(scope=\"module\")\ndef datetime_tz_utc():\n \"\"\"\n Yields the UTC timezone object from the datetime module.\n \"\"\"\n return timezone.utc\n\n\n@pytest.fixture(params=[\"utc\", \"dateutil/UTC\", utc, tzutc(), timezone.utc])\ndef utc_fixture(request):\n \"\"\"\n Fixture to provide variants of UTC timezone strings and tzinfo objects.\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=[\"inner\", \"outer\", \"left\", \"right\"])\ndef join_type(request):\n \"\"\"\n Fixture for trying all types of join operations.\n \"\"\"\n return request.param\n\n\n@pytest.fixture\ndef strict_data_files(pytestconfig):\n \"\"\"\n Returns the configuration for the test setting `--strict-data-files`.\n \"\"\"\n return pytestconfig.getoption(\"--strict-data-files\")\n\n\n@pytest.fixture\ndef datapath(strict_data_files):\n \"\"\"\n Get the path to a data file.\n\n Parameters\n ----------\n path : str\n Path to the file, relative to ``pandas/tests/``\n\n Returns\n -------\n path including ``pandas/tests``.\n\n Raises\n ------\n ValueError\n If the path doesn't exist and the --strict-data-files option is set.\n \"\"\"\n BASE_PATH = os.path.join(os.path.dirname(__file__), \"tests\")\n\n def deco(*args):\n path = os.path.join(BASE_PATH, *args)\n if not os.path.exists(path):\n if strict_data_files:\n raise ValueError(\n f\"Could not find file {path} and --strict-data-files is set.\"\n )\n else:\n pytest.skip(f\"Could not find {path}.\")\n return path\n\n return deco\n\n\n@pytest.fixture\ndef iris(datapath):\n \"\"\"\n The iris dataset as a DataFrame.\n \"\"\"\n return pd.read_csv(datapath(\"data\", \"iris.csv\"))\n\n\n@pytest.fixture(params=[\"nlargest\", \"nsmallest\"])\ndef nselect_method(request):\n \"\"\"\n Fixture for trying all nselect methods.\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=[\"left\", \"right\", \"both\", \"neither\"])\ndef closed(request):\n \"\"\"\n Fixture for trying all interval closed parameters.\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=[\"left\", \"right\", \"both\", \"neither\"])\ndef other_closed(request):\n \"\"\"\n Secondary closed fixture to allow parametrizing over all pairs of closed.\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=[None, np.nan, pd.NaT, float(\"nan\"), np.float(\"NaN\"), pd.NA])\ndef nulls_fixture(request):\n \"\"\"\n Fixture for each null type in pandas.\n \"\"\"\n return request.param\n\n\nnulls_fixture2 = nulls_fixture # Generate cartesian product of nulls_fixture\n\n\n@pytest.fixture(params=[None, np.nan, pd.NaT])\ndef unique_nulls_fixture(request):\n \"\"\"\n Fixture for each null type in pandas, each null type exactly once.\n \"\"\"\n return request.param\n\n\n# Generate cartesian product of unique_nulls_fixture:\nunique_nulls_fixture2 = unique_nulls_fixture\n\n\nTIMEZONES = [\n None,\n \"UTC\",\n \"US/Eastern\",\n \"Asia/Tokyo\",\n \"dateutil/US/Pacific\",\n \"dateutil/Asia/Singapore\",\n tzutc(),\n tzlocal(),\n FixedOffset(300),\n FixedOffset(0),\n FixedOffset(-300),\n timezone.utc,\n timezone(timedelta(hours=1)),\n timezone(timedelta(hours=-1), name=\"foo\"),\n]\nTIMEZONE_IDS = [repr(i) for i in TIMEZONES]\n\n\n@td.parametrize_fixture_doc(str(TIMEZONE_IDS))\n@pytest.fixture(params=TIMEZONES, ids=TIMEZONE_IDS)\ndef tz_naive_fixture(request):\n \"\"\"\n Fixture for trying timezones including default (None): {0}\n \"\"\"\n return request.param\n\n\n@td.parametrize_fixture_doc(str(TIMEZONE_IDS[1:]))\n@pytest.fixture(params=TIMEZONES[1:], ids=TIMEZONE_IDS[1:])\ndef tz_aware_fixture(request):\n \"\"\"\n Fixture for trying explicit timezones: {0}\n \"\"\"\n return request.param\n\n\n# Generate cartesian product of tz_aware_fixture:\ntz_aware_fixture2 = tz_aware_fixture\n\n\n# ----------------------------------------------------------------\n# Dtypes\n# ----------------------------------------------------------------\n\nUNSIGNED_INT_DTYPES = [\"uint8\", \"uint16\", \"uint32\", \"uint64\"]\nUNSIGNED_EA_INT_DTYPES = [\"UInt8\", \"UInt16\", \"UInt32\", \"UInt64\"]\nSIGNED_INT_DTYPES = [int, \"int8\", \"int16\", \"int32\", \"int64\"]\nSIGNED_EA_INT_DTYPES = [\"Int8\", \"Int16\", \"Int32\", \"Int64\"]\nALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES\nALL_EA_INT_DTYPES = UNSIGNED_EA_INT_DTYPES + SIGNED_EA_INT_DTYPES\n\nFLOAT_DTYPES = [float, \"float32\", \"float64\"]\nCOMPLEX_DTYPES = [complex, \"complex64\", \"complex128\"]\nSTRING_DTYPES = [str, \"str\", \"U\"]\n\nDATETIME64_DTYPES = [\"datetime64[ns]\", \"M8[ns]\"]\nTIMEDELTA64_DTYPES = [\"timedelta64[ns]\", \"m8[ns]\"]\n\nBOOL_DTYPES = [bool, \"bool\"]\nBYTES_DTYPES = [bytes, \"bytes\"]\nOBJECT_DTYPES = [object, \"object\"]\n\nALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES\nALL_NUMPY_DTYPES = (\n ALL_REAL_DTYPES\n + COMPLEX_DTYPES\n + STRING_DTYPES\n + DATETIME64_DTYPES\n + TIMEDELTA64_DTYPES\n + BOOL_DTYPES\n + OBJECT_DTYPES\n + BYTES_DTYPES\n)\n\n\n@pytest.fixture(params=STRING_DTYPES)\ndef string_dtype(request):\n \"\"\"\n Parametrized fixture for string dtypes.\n\n * str\n * 'str'\n * 'U'\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=BYTES_DTYPES)\ndef bytes_dtype(request):\n \"\"\"\n Parametrized fixture for bytes dtypes.\n\n * bytes\n * 'bytes'\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=OBJECT_DTYPES)\ndef object_dtype(request):\n \"\"\"\n Parametrized fixture for object dtypes.\n\n * object\n * 'object'\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=DATETIME64_DTYPES)\ndef datetime64_dtype(request):\n \"\"\"\n Parametrized fixture for datetime64 dtypes.\n\n * 'datetime64[ns]'\n * 'M8[ns]'\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=TIMEDELTA64_DTYPES)\ndef timedelta64_dtype(request):\n \"\"\"\n Parametrized fixture for timedelta64 dtypes.\n\n * 'timedelta64[ns]'\n * 'm8[ns]'\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=FLOAT_DTYPES)\ndef float_dtype(request):\n \"\"\"\n Parameterized fixture for float dtypes.\n\n * float\n * 'float32'\n * 'float64'\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=COMPLEX_DTYPES)\ndef complex_dtype(request):\n \"\"\"\n Parameterized fixture for complex dtypes.\n\n * complex\n * 'complex64'\n * 'complex128'\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=SIGNED_INT_DTYPES)\ndef sint_dtype(request):\n \"\"\"\n Parameterized fixture for signed integer dtypes.\n\n * int\n * 'int8'\n * 'int16'\n * 'int32'\n * 'int64'\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=UNSIGNED_INT_DTYPES)\ndef uint_dtype(request):\n \"\"\"\n Parameterized fixture for unsigned integer dtypes.\n\n * 'uint8'\n * 'uint16'\n * 'uint32'\n * 'uint64'\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=ALL_INT_DTYPES)\ndef any_int_dtype(request):\n \"\"\"\n Parameterized fixture for any integer dtype.\n\n * int\n * 'int8'\n * 'uint8'\n * 'int16'\n * 'uint16'\n * 'int32'\n * 'uint32'\n * 'int64'\n * 'uint64'\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=ALL_EA_INT_DTYPES)\ndef any_nullable_int_dtype(request):\n \"\"\"\n Parameterized fixture for any nullable integer dtype.\n\n * 'UInt8'\n * 'Int8'\n * 'UInt16'\n * 'Int16'\n * 'UInt32'\n * 'Int32'\n * 'UInt64'\n * 'Int64'\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=ALL_REAL_DTYPES)\ndef any_real_dtype(request):\n \"\"\"\n Parameterized fixture for any (purely) real numeric dtype.\n\n * int\n * 'int8'\n * 'uint8'\n * 'int16'\n * 'uint16'\n * 'int32'\n * 'uint32'\n * 'int64'\n * 'uint64'\n * float\n * 'float32'\n * 'float64'\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=ALL_NUMPY_DTYPES)\ndef any_numpy_dtype(request):\n \"\"\"\n Parameterized fixture for all numpy dtypes.\n\n * bool\n * 'bool'\n * int\n * 'int8'\n * 'uint8'\n * 'int16'\n * 'uint16'\n * 'int32'\n * 'uint32'\n * 'int64'\n * 'uint64'\n * float\n * 'float32'\n * 'float64'\n * complex\n * 'complex64'\n * 'complex128'\n * str\n * 'str'\n * 'U'\n * bytes\n * 'bytes'\n * 'datetime64[ns]'\n * 'M8[ns]'\n * 'timedelta64[ns]'\n * 'm8[ns]'\n * object\n * 'object'\n \"\"\"\n return request.param\n\n\n# categoricals are handled separately\n_any_skipna_inferred_dtype = [\n (\"string\", [\"a\", np.nan, \"c\"]),\n (\"string\", [\"a\", pd.NA, \"c\"]),\n (\"bytes\", [b\"a\", np.nan, b\"c\"]),\n (\"empty\", [np.nan, np.nan, np.nan]),\n (\"empty\", []),\n (\"mixed-integer\", [\"a\", np.nan, 2]),\n (\"mixed\", [\"a\", np.nan, 2.0]),\n (\"floating\", [1.0, np.nan, 2.0]),\n (\"integer\", [1, np.nan, 2]),\n (\"mixed-integer-float\", [1, np.nan, 2.0]),\n (\"decimal\", [Decimal(1), np.nan, Decimal(2)]),\n (\"boolean\", [True, np.nan, False]),\n (\"boolean\", [True, pd.NA, False]),\n (\"datetime64\", [np.datetime64(\"2013-01-01\"), np.nan, np.datetime64(\"2018-01-01\")]),\n (\"datetime\", [pd.Timestamp(\"20130101\"), np.nan, pd.Timestamp(\"20180101\")]),\n (\"date\", [date(2013, 1, 1), np.nan, date(2018, 1, 1)]),\n # The following two dtypes are commented out due to GH 23554\n # ('complex', [1 + 1j, np.nan, 2 + 2j]),\n # ('timedelta64', [np.timedelta64(1, 'D'),\n # np.nan, np.timedelta64(2, 'D')]),\n (\"timedelta\", [timedelta(1), np.nan, timedelta(2)]),\n (\"time\", [time(1), np.nan, time(2)]),\n (\"period\", [pd.Period(2013), pd.NaT, pd.Period(2018)]),\n (\"interval\", [pd.Interval(0, 1), np.nan, pd.Interval(0, 2)]),\n]\nids, _ = zip(*_any_skipna_inferred_dtype) # use inferred type as fixture-id\n\n\n@pytest.fixture(params=_any_skipna_inferred_dtype, ids=ids)\ndef any_skipna_inferred_dtype(request):\n \"\"\"\n Fixture for all inferred dtypes from _libs.lib.infer_dtype\n\n The covered (inferred) types are:\n * 'string'\n * 'empty'\n * 'bytes'\n * 'mixed'\n * 'mixed-integer'\n * 'mixed-integer-float'\n * 'floating'\n * 'integer'\n * 'decimal'\n * 'boolean'\n * 'datetime64'\n * 'datetime'\n * 'date'\n * 'timedelta'\n * 'time'\n * 'period'\n * 'interval'\n\n Returns\n -------\n inferred_dtype : str\n The string for the inferred dtype from _libs.lib.infer_dtype\n values : np.ndarray\n An array of object dtype that will be inferred to have\n `inferred_dtype`\n\n Examples\n --------\n >>> import pandas._libs.lib as lib\n >>>\n >>> def test_something(any_skipna_inferred_dtype):\n ... inferred_dtype, values = any_skipna_inferred_dtype\n ... # will pass\n ... assert lib.infer_dtype(values, skipna=True) == inferred_dtype\n \"\"\"\n inferred_dtype, values = request.param\n values = np.array(values, dtype=object) # object dtype to avoid casting\n\n # correctness of inference tested in tests/dtypes/test_inference.py\n return inferred_dtype, values\n\n\n@pytest.fixture(\n params=[\n getattr(pd.offsets, o)\n for o in pd.offsets.__all__\n if issubclass(getattr(pd.offsets, o), pd.offsets.Tick)\n ]\n)\ndef tick_classes(request):\n \"\"\"\n Fixture for Tick based datetime offsets available for a time series.\n \"\"\"\n return request.param\n\n\n# ----------------------------------------------------------------\n# Global setup for tests using Hypothesis\n\n\n# Registering these strategies makes them globally available via st.from_type,\n# which is use for offsets in tests/tseries/offsets/test_offsets_properties.py\nfor name in \"MonthBegin MonthEnd BMonthBegin BMonthEnd\".split():\n cls = getattr(pd.tseries.offsets, name)\n st.register_type_strategy(\n cls, st.builds(cls, n=st.integers(-99, 99), normalize=st.booleans())\n )\n\nfor name in \"YearBegin YearEnd BYearBegin BYearEnd\".split():\n cls = getattr(pd.tseries.offsets, name)\n st.register_type_strategy(\n cls,\n st.builds(\n cls,\n n=st.integers(-5, 5),\n normalize=st.booleans(),\n month=st.integers(min_value=1, max_value=12),\n ),\n )\n\nfor name in \"QuarterBegin QuarterEnd BQuarterBegin BQuarterEnd\".split():\n cls = getattr(pd.tseries.offsets, name)\n st.register_type_strategy(\n cls,\n st.builds(\n cls,\n n=st.integers(-24, 24),\n normalize=st.booleans(),\n startingMonth=st.integers(min_value=1, max_value=12),\n ),\n )\n\n\n@pytest.fixture\ndef datetime_series():\n \"\"\"\n Fixture for Series of floats with DatetimeIndex\n \"\"\"\n s = tm.makeTimeSeries()\n s.name = \"ts\"\n return s\n\n\n@pytest.fixture\ndef float_frame():\n \"\"\"\n Fixture for DataFrame of floats with index of unique strings\n\n Columns are ['A', 'B', 'C', 'D'].\n\n A B C D\n P7GACiRnxd -0.465578 -0.361863 0.886172 -0.053465\n qZKh6afn8n -0.466693 -0.373773 0.266873 1.673901\n tkp0r6Qble 0.148691 -0.059051 0.174817 1.598433\n wP70WOCtv8 0.133045 -0.581994 -0.992240 0.261651\n M2AeYQMnCz -1.207959 -0.185775 0.588206 0.563938\n QEPzyGDYDo -0.381843 -0.758281 0.502575 -0.565053\n r78Jwns6dn -0.653707 0.883127 0.682199 0.206159\n ... ... ... ... ...\n IHEGx9NO0T -0.277360 0.113021 -1.018314 0.196316\n lPMj8K27FA -1.313667 -0.604776 -1.305618 -0.863999\n qa66YMWQa5 1.110525 0.475310 -0.747865 0.032121\n yOa0ATsmcE -0.431457 0.067094 0.096567 -0.264962\n 65znX3uRNG 1.528446 0.160416 -0.109635 -0.032987\n eCOBvKqf3e 0.235281 1.622222 0.781255 0.392871\n xSucinXxuV -1.263557 0.252799 -0.552247 0.400426\n\n [30 rows x 4 columns]\n \"\"\"\n return DataFrame(tm.getSeriesData())\n\n\n@pytest.fixture(params=[pd.Index, pd.Series], ids=[\"index\", \"series\"])\ndef index_or_series(request):\n \"\"\"\n Fixture to parametrize over Index and Series, made necessary by a mypy\n bug, giving an error:\n\n List item 0 has incompatible type \"Type[Series]\"; expected \"Type[PandasObject]\"\n\n See GH#29725\n \"\"\"\n return request.param\n\n\n@pytest.fixture\ndef dict_subclass():\n \"\"\"\n Fixture for a dictionary subclass.\n \"\"\"\n\n class TestSubDict(dict):\n def __init__(self, *args, **kwargs):\n dict.__init__(self, *args, **kwargs)\n\n return TestSubDict\n\n\n@pytest.fixture\ndef non_mapping_dict_subclass():\n \"\"\"\n Fixture for a non-mapping dictionary subclass.\n \"\"\"\n\n class TestNonDictMapping(abc.Mapping):\n def __init__(self, underlying_dict):\n self._data = underlying_dict\n\n def __getitem__(self, key):\n return self._data.__getitem__(key)\n\n def __iter__(self):\n return self._data.__iter__()\n\n def __len__(self):\n return self._data.__len__()\n\n return TestNonDictMapping\n\n\ndef _gen_mi():\n # a MultiIndex used to test the general functionality of this object\n\n # See Also: tests.multi.conftest.idx\n major_axis = Index([\"foo\", \"bar\", \"baz\", \"qux\"])\n minor_axis = Index([\"one\", \"two\"])\n\n major_codes = np.array([0, 0, 1, 2, 3, 3])\n minor_codes = np.array([0, 1, 0, 1, 0, 1])\n index_names = [\"first\", \"second\"]\n mi = MultiIndex(\n levels=[major_axis, minor_axis],\n codes=[major_codes, minor_codes],\n names=index_names,\n verify_integrity=False,\n )\n return mi\n\n\nindices_dict = {\n \"unicode\": tm.makeUnicodeIndex(100),\n \"string\": tm.makeStringIndex(100),\n \"datetime\": tm.makeDateIndex(100),\n \"datetime-tz\": tm.makeDateIndex(100, tz=\"US/Pacific\"),\n \"period\": tm.makePeriodIndex(100),\n \"timedelta\": tm.makeTimedeltaIndex(100),\n \"int\": tm.makeIntIndex(100),\n \"uint\": tm.makeUIntIndex(100),\n \"range\": tm.makeRangeIndex(100),\n \"float\": tm.makeFloatIndex(100),\n \"bool\": tm.makeBoolIndex(2),\n \"categorical\": tm.makeCategoricalIndex(100),\n \"interval\": tm.makeIntervalIndex(100),\n \"empty\": Index([]),\n \"tuples\": MultiIndex.from_tuples(zip([\"foo\", \"bar\", \"baz\"], [1, 2, 3])),\n \"multi\": _gen_mi(),\n \"repeats\": Index([0, 0, 1, 1, 2, 2]),\n}\n\n\n@pytest.fixture(params=indices_dict.keys())\ndef indices(request):\n # copy to avoid mutation, e.g. setting .name\n return indices_dict[request.param].copy()\n\n\ndef _create_series(index):\n \"\"\" Helper for the _series dict \"\"\"\n size = len(index)\n data = np.random.randn(size)\n return pd.Series(data, index=index, name=\"a\")\n\n\n_series = {\n f\"series-with-{index_id}-index\": _create_series(index)\n for index_id, index in indices_dict.items()\n}\n\n\n_narrow_dtypes = [\n np.float16,\n np.float32,\n np.int8,\n np.int16,\n np.int32,\n np.uint8,\n np.uint16,\n np.uint32,\n]\n_narrow_series = {\n f\"{dtype.__name__}-series\": tm.makeFloatSeries(name=\"a\").astype(dtype)\n for dtype in _narrow_dtypes\n}\n\n_index_or_series_objs = {**indices_dict, **_series, **_narrow_series}\n\n\n@pytest.fixture(params=_index_or_series_objs.keys())\ndef index_or_series_obj(request):\n \"\"\"\n Fixture for tests on indexes, series and series with a narrow dtype\n copy to avoid mutation, e.g. setting .name\n \"\"\"\n return _index_or_series_objs[request.param].copy(deep=True)\n"
] |
[
[
"pandas._testing.makeTimedeltaIndex",
"pandas.Timestamp",
"pandas._testing.makeUnicodeIndex",
"pandas._testing.makeDateIndex",
"pandas.set_option",
"pandas.Interval",
"pandas.core.base.SelectionMixin._cython_table.items",
"pandas.core.indexes.api.Index",
"pandas._testing.makeFloatSeries",
"pandas._testing.makeUIntIndex",
"pandas._testing.makeBoolIndex",
"pandas.Period",
"pandas._testing.makeFloatIndex",
"numpy.array",
"pandas._testing.makePeriodIndex",
"numpy.float",
"numpy.random.randn",
"pandas._testing.makeStringIndex",
"pandas._testing.makeCategoricalIndex",
"pandas._testing.makeIntervalIndex",
"numpy.datetime64",
"pandas._testing.makeTimeSeries",
"pandas._testing.getSeriesData",
"pandas.core.indexes.api.MultiIndex",
"pandas.Series",
"pandas._testing.makeRangeIndex",
"pandas._testing.makeIntIndex"
]
] |
liragabriel/DS
|
[
"d75402d5c11dc9c6832260e49b591128fbc1b9ca"
] |
[
"netstats/lista_dataframe.py"
] |
[
"import pandas as pd\nfrom netstats.fsan import Fsan\n\n\nclass ListaDataframe:\n\n def __init__(self, operacao):\n self.operacao = operacao\n\n\n def dataframe(self):\n\n \"\"\"\n Retorna uma lista de dataframes por FSAN, cada dataframe contém as operações realizadas\n\n com a FSAN.\n\n Returns\n -------\n list\n \"\"\"\n\n fsan = Fsan(self.operacao).lista_de_fsans()\n\n sequencia = []\n for i in fsan:\n lista = []\n for j in self.operacao.operacao:\n if i in j or i+':' in j:\n lista.append(j)\n sequencia.append(lista)\n\n lista_data = []\n for i in sequencia:\n lista_data.append(pd.DataFrame(i))\n pd.set_option('display.max_colwidth', -1)\n\n for i in range(len(lista_data)):\n lista_data[i].columns = [fsan[i]]\n\n return lista_data\n"
] |
[
[
"pandas.DataFrame",
"pandas.set_option"
]
] |
haddocking/disvis
|
[
"a922bd079b41ad5ef3ac33f4e68968f8978626d2"
] |
[
"disvis/IO/mmcif.py"
] |
[
"from __future__ import print_function\nimport sys\nfrom collections import OrderedDict\nimport numpy as np\n\ndef parse_cif(infile):\n if isinstance(infile, file):\n pass\n elif isinstance(infile, str):\n infile = open(infile)\n else:\n raise TypeError(\"Input should either be a file or string.\")\n\n atom_site = OrderedDict()\n with infile as f:\n for line in f:\n \n if line.startswith('_atom_site.'):\n words = line.split('.')\n atom_site[words[1].strip()] = [] \n\n if line.startswith('ATOM'):\n words = line.split()\n for key, word in zip(atom_site, words):\n atom_site[key].append(word)\n\n natoms = len(atom_site['id'])\n dtype = [('atom_id', np.int64), ('name', np.str_, 4), \n ('resn', np.str_, 4), ('chain', np.str_, 2), \n ('resi', np.int64), ('x', np.float64),\n ('y', np.float64), ('z', np.float64), \n ('occupancy', np.float64), ('bfactor', np.float64),\n ('element', np.str_, 2), ('charge', np.str_, 2),\n ('model', np.int64),\n ]\n\n cifdata = np.zeros(natoms, dtype=dtype)\n cifdata['atom_id'] = np.asarray(atom_site['id'], dtype=np.int64)\n cifdata['name'] = atom_site['label_atom_id']\n cifdata['resn'] = atom_site['label_comp_id']\n cifdata['chain'] = atom_site['label_asym_id']\n cifdata['resi'] = atom_site['label_seq_id']\n cifdata['x'] = atom_site['Cartn_x']\n cifdata['y'] = atom_site['Cartn_y']\n cifdata['z'] = atom_site['Cartn_z']\n cifdata['occupancy'] = atom_site['occupancy']\n cifdata['bfactor'] = atom_site['B_iso_or_equiv']\n cifdata['element'] = atom_site['type_symbol'].title()\n cifdata['charge'] = atom_site['pdbx_formal_charge']\n cifdata['model'] = atom_site['pdbx_PDB_model_num']\n\n return cifdata\n\nif __name__=='__main__': \n import sys\n infile = sys.argv[1]\n data = parse_cif(infile)\n\n"
] |
[
[
"numpy.asarray",
"numpy.zeros"
]
] |
kgrozdanic/lumen-data-science-2022
|
[
"115e14d8502210c662a68913365dc9c1179c3998"
] |
[
"src/data/outlier_detection.py"
] |
[
"import numpy as np\nimport pandas as pd\nimport cv2\nfrom imutils import paths\nimport argparse\nimport pickle\nimport vptree\nimport matplotlib.pyplot as plt\nimport time\nfrom tqdm import tqdm\nimport os\nfrom skimage.io import imread, imshow\nimport seaborn as sns\nfrom src.helpers import *\n\nMAIN_PATH = \"../../data/full_75x75/train\"\nPATH_224 = \"../../data/full_170x170/train\"\n\nPATH = \"../../data/full_170x170/\"\n\nrgb = [\"R\", \"G\", \"B\"]\nsuffix = [\"mean\", \"relative\", \"std\"]\nfeature_cols = [r + '_' + s for s in suffix for r in rgb] + [\"mean\", \"std\"]\n\nposition = [\"\", \"_NE\", \"_NW\", \"_SW\", \"_SE\", \"_center\"]\ncols = [col + pos for pos in position for col in feature_cols]\n\n\ndef show_imgs(uuids):\n fig, ax = plt.subplots(len(uuids), 4, figsize=(16, 4 * len(uuids)))\n\n for i, uuid in enumerate(uuids):\n img_path = PATH_224 + \"/\" + uuid\n for j, angle in enumerate([0, 90, 180, 270]):\n path = f\"{img_path}/{angle}.jpg\"\n img = imread(path)\n if len(uuids) == 1:\n ax[j].imshow(img, cmap=plt.cm.gray)\n else:\n ax[i][j].imshow(img, cmap=plt.cm.gray)\n plt.tight_layout()\n\n\ndef generate_features() -> pd.DataFrame:\n df = pd.DataFrame(columns=[\"uuid\", \"angle\", *cols])\n imagePaths = list(paths.list_images(MAIN_PATH))\n\n # Loading and hashing the images\n for img_path in tqdm(imagePaths):\n original_image = imread(img_path)\n features = []\n\n img_path = os.path.normpath(img_path).split(\"\\\\\")\n uuid = img_path[-2]\n angle = img_path[-1].split('.')[0]\n\n for pos in position:\n if pos == \"_NW\":\n image = original_image[:40, :40]\n elif pos == \"_NE\":\n image = original_image[:40, 40:]\n elif pos == \"_SE\":\n image = original_image[40:, 40:]\n elif pos == \"_SW\":\n image = original_image[40:, :40]\n elif pos == \"_center\":\n image = original_image[20:60, 20:60]\n else:\n image = original_image\n\n f_mean = image.sum((0, 1)) / (image.shape[0] * image.shape[1])\n f_relative = image.sum((0, 1)) / (image.sum((0, 1)).sum() - image.sum((0, 1)))\n f_std = image.std((0, 1))\n\n M = image.mean()\n S = image.std()\n\n features += [*f_mean, *f_relative, *f_std, M, S]\n\n df.loc[len(df.index)] = [uuid, angle, *features]\n return df\n\n\ndef detect_possible_outliers(df: pd.DataFrame) -> pd.DataFrame:\n print('Detection possible outliers')\n N_L = 90\n N_s = 90\n\n figures_path = '../../reports/outliers/'\n possible_outliers = set()\n\n for pos in tqdm(position):\n for col in feature_cols:\n indices = list(set(df[col].nlargest(n=N_L).index).difference(possible_outliers))\n uuids = df.loc[indices, \"uuid\"].tolist()\n possible_outliers.update(uuids)\n\n indices = list(set(df[col].nsmallest(n=N_s).index).difference(possible_outliers))\n uuids = df.loc[indices, \"uuid\"].tolist()\n possible_outliers.update(uuids)\n\n N_L += 7\n N_s += 7\n\n possible_outliers = list(possible_outliers)\n old = list(pd.read_csv('../../reports/outlier_detection/possible_outliers.csv')['uuid'].values)\n possible_outliers = list(set(possible_outliers).difference(old))\n\n old = list(pd.read_csv('../../reports/outlier_detection/possible_outliers_full.csv')['uuid'].values)\n possible_outliers = list(set(possible_outliers).difference(old))\n\n print(f\"Found {len(possible_outliers)} possible outliers\")\n return pd.DataFrame({\"uuid\": possible_outliers})\n\n\n\n\n\ndef read_manual_outliers():\n # df_possible_outliers = set(pd.read_csv('../../reports/outlier_detection/possible_outliers.csv')[\"uuid\"].values.tolist())\n\n not_outliers = list(paths.list_images('../../reports/outlier_detection/sigurno_outlieri'))\n not_outliers = list(set([os.path.normpath(outlier).split(\"\\\\\")[-1].split('.')[-2] for outlier in not_outliers]))\n\n # outliers = list(df_possible_outliers.difference(not_outliers))\n\n df_true_outliers = pd.DataFrame({'uuid': not_outliers})\n df_true_outliers.to_csv('../../reports/outlier_detection/true_outliers.csv', index=False)\n\n\n print(f'Done: {len(df_true_outliers)} true outliers')\n\n\ndef main():\n command = input(\"Generate features (Y) or detect true outliers (n)?\")\n\n if command == \"Y\":\n # df = generate_features()\n # df.to_csv(\"../../reports/outlier_detection/outlier_features_full.csv\", index=False)\n\n # df = pd.read_csv(\"../../reports/outlier_detection/outlier_features_full.csv\")\n #\n # df_possible_outliers = detect_possible_outliers(df)\n # df_possible_outliers.to_csv('../../reports/outlier_detection/possible_outliers_full2.csv', index=False)\n\n figures_path = '../../reports/outlier_detection/template_matching_images/'\n generate_images_from_csv('../../reports/outlier_detection/template_matching_outlier_detection.csv', figures_path, PATH)\n print(\"Run this script again with 'n' argument.\")\n\n elif command == \"n\":\n read_manual_outliers()\n\n else:\n print(\"krivo xd\")\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"pandas.DataFrame",
"pandas.read_csv",
"matplotlib.pyplot.tight_layout"
]
] |
Keirua/blog.keiruaprod.fr
|
[
"76e6623ff3d625690e1dad02efa5e12073be5381"
] |
[
"charts/cats.py"
] |
[
"import cutecharts.charts as ctc\nimport pandas as pd\nimport numpy as np\n\n# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html\ndf=pd.read_csv('catsshouldnt.csv', sep=',')\n\n# https://github.com/cutecharts/cutecharts.py#-usage\nchart = ctc.Bar('Follower count for @catsshouldnt',width='500px',height='400px')\nchart.set_options(\n labels=list(df[\"date\"]),\n x_label=\"Date\",\n y_label=\"Follower count\" ,\n colors=['#FFF1C9','#F7B7A3','#EA5F89','#9B3192','#57167E','#47B39C','#00529B']\n)\n\nchart.add_series(\"Follower count\",list(df[\"Follower count\"]))\nchart.render()"
] |
[
[
"pandas.read_csv"
]
] |
holli/probability
|
[
"7a0ce5e5beff91051028258dfbc7bc6cf0c4998d",
"3e84aa840b624f4184819f1e6ce9180c7997aad9"
] |
[
"tensorflow_probability/python/bijectors/sinh_arcsinh_test.py",
"tensorflow_probability/python/bijectors/invert.py"
] |
[
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for SinhArcsinh Bijector.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\n\nimport numpy as np\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_probability.python import bijectors as tfb\nfrom tensorflow_probability.python.bijectors import bijector_test_util\nfrom tensorflow_probability.python.internal import test_util\n\n\n@test_util.test_all_tf_execution_regimes\nclass SinhArcsinhTest(test_util.TestCase):\n \"\"\"Tests correctness of the power transformation.\"\"\"\n\n def testBijectorVersusNumpyRewriteOfBasicFunctions(self):\n skewness = 0.2\n tailweight = 2.0\n multiplier = 2.0 / np.sinh(np.arcsinh(2.0) * tailweight)\n bijector = tfb.SinhArcsinh(\n skewness=skewness, tailweight=tailweight, validate_args=True)\n self.assertStartsWith(bijector.name, \"sinh_arcsinh\")\n x = np.array([[[-2.01], [2.], [1e-4]]]).astype(np.float32)\n y = np.sinh((np.arcsinh(x) + skewness) * tailweight) * multiplier\n self.assertAllClose(y, self.evaluate(bijector.forward(x)))\n self.assertAllClose(x, self.evaluate(bijector.inverse(y)))\n self.assertAllClose(\n np.sum(\n np.log(np.cosh(\n np.arcsinh(y / multiplier) / tailweight - skewness)) -\n np.log(tailweight) - np.log(np.sqrt((y / multiplier)**2 + 1))\n - np.log(multiplier),\n axis=-1),\n self.evaluate(bijector.inverse_log_det_jacobian(y, event_ndims=1)),\n rtol=2e-6)\n self.assertAllClose(\n self.evaluate(-bijector.inverse_log_det_jacobian(y, event_ndims=1)),\n self.evaluate(bijector.forward_log_det_jacobian(x, event_ndims=1)),\n rtol=1e-4,\n atol=0.)\n\n def testSkew(self):\n # Will broadcast together to shape [3, 2].\n x = [-1., 1.]\n skewness = [[-1.], [0.], [1.]]\n bijector = tfb.SinhArcsinh(skewness=skewness, validate_args=True)\n y = self.evaluate(bijector.forward(x))\n\n # For skew < 0, |forward(-1)| > |forward(1)|\n self.assertGreater(np.abs(y[0, 0]), np.abs(y[0, 1]))\n\n # For skew = 0, |forward(-1)| = |forward(1)|\n self.assertAllClose(np.abs(y[1, 0]), np.abs(y[1, 1]))\n\n # For skew > 0, |forward(-1)| < |forward(1)|\n self.assertLess(np.abs(y[2, 0]), np.abs(y[2, 1]))\n\n def testKurtosis(self):\n x = np.logspace(-2, 2, 1000).astype(np.float32)\n tailweight = [[0.5], [1.0], [2.0]]\n bijector = tfb.SinhArcsinh(tailweight=tailweight, validate_args=True)\n y = self.evaluate(bijector.forward(x))\n mean = np.mean(x, axis=-1)\n stddev = np.std(x, axis=-1, ddof=0)\n kurtosis = np.mean((y - mean) ** 4, axis=-1) / (stddev ** 4)\n self.assertAllClose(kurtosis, np.sort(kurtosis))\n\n def testScalarCongruencySkewness1Tailweight0p5(self):\n bijector = tfb.SinhArcsinh(\n skewness=1.0, tailweight=0.5, validate_args=True)\n bijector_test_util.assert_scalar_congruency(\n bijector, lower_x=-2., upper_x=2.0, eval_func=self.evaluate, rtol=0.05)\n\n def testScalarCongruencySkewnessNeg1Tailweight1p5(self):\n bijector = tfb.SinhArcsinh(\n skewness=-1.0, tailweight=1.5, validate_args=True)\n bijector_test_util.assert_scalar_congruency(\n bijector, lower_x=-2., upper_x=2.0, eval_func=self.evaluate, rtol=0.05)\n\n def testBijectiveAndFiniteSkewnessNeg1Tailweight0p5(self):\n bijector = tfb.SinhArcsinh(\n skewness=-1., tailweight=0.5, validate_args=True)\n x = np.concatenate((-np.logspace(-2, 10, 1000), [0], np.logspace(\n -2, 10, 1000))).astype(np.float32)\n bijector_test_util.assert_bijective_and_finite(\n bijector, x, x, eval_func=self.evaluate, event_ndims=0, rtol=1e-3)\n\n def testBijectiveAndFiniteSkewness1Tailweight3(self):\n bijector = tfb.SinhArcsinh(skewness=1., tailweight=3., validate_args=True)\n x = np.concatenate((-np.logspace(-2, 5, 1000), [0], np.logspace(\n -2, 5, 1000))).astype(np.float32)\n bijector_test_util.assert_bijective_and_finite(\n bijector, x, x, eval_func=self.evaluate, event_ndims=0, rtol=1e-3)\n\n def testBijectorEndpoints(self):\n for dtype in (np.float32, np.float64):\n bijector = tfb.SinhArcsinh(\n skewness=dtype(0.), tailweight=dtype(1.), validate_args=True)\n bounds = np.array(\n [np.finfo(dtype).min, np.finfo(dtype).max], dtype=dtype)\n # Note that the above bijector is the identity bijector. Hence, the\n # log_det_jacobian will be 0. Because of this we use atol.\n bijector_test_util.assert_bijective_and_finite(\n bijector, bounds, bounds, eval_func=self.evaluate, event_ndims=0,\n atol=2e-6)\n\n def testBijectorOverRange(self):\n for dtype in (np.float32, np.float64):\n skewness = np.array([1.2, 5.], dtype=dtype)\n tailweight = np.array([2., 10.], dtype=dtype)\n # The inverse will be defined up to where sinh is valid, which is\n # arcsinh(np.finfo(dtype).max).\n log_boundary = np.log(\n np.sinh(np.arcsinh(np.finfo(dtype).max) / tailweight - skewness))\n x = np.array([\n np.logspace(-2, log_boundary[0], base=np.e, num=1000),\n np.logspace(-2, log_boundary[1], base=np.e, num=1000)\n ], dtype=dtype)\n # Ensure broadcasting works.\n x = np.swapaxes(x, 0, 1)\n multiplier = 2. / np.sinh(np.arcsinh(2.) * tailweight)\n y = np.sinh((np.arcsinh(x) + skewness) * tailweight) * multiplier\n bijector = tfb.SinhArcsinh(\n skewness=skewness, tailweight=tailweight, validate_args=True)\n\n self.assertAllClose(\n y, self.evaluate(bijector.forward(x)), rtol=1e-4, atol=0.)\n self.assertAllClose(\n x, self.evaluate(bijector.inverse(y)), rtol=1e-4, atol=0.)\n\n # On IBM PPC systems, longdouble (np.float128) is same as double except\n # that it can have more precision. Type double being of 8 bytes, can't\n # hold square of max of float64 (which is also 8 bytes).\n # Below test fails due to overflow error giving inf. This check avoids\n # that error by skipping square calculation and corresponding assert.\n\n if (np.amax(y) <= np.sqrt(np.finfo(np.float128).max) and\n np.fabs(np.amin(y)) <= np.sqrt(np.fabs(np.finfo(np.float128).min))):\n\n # Do the numpy calculation in float128 to avoid inf/nan.\n y_float128 = np.float128(y)\n self.assertAllClose(\n np.log(np.cosh(\n np.arcsinh(y_float128 / multiplier)\n / tailweight - skewness) / np.sqrt(\n (y_float128 / multiplier)**2 + 1))\n - np.log(tailweight) - np.log(multiplier),\n self.evaluate(\n bijector.inverse_log_det_jacobian(y, event_ndims=0)),\n rtol=1e-4,\n atol=0.)\n self.assertAllClose(\n self.evaluate(-bijector.inverse_log_det_jacobian(y, event_ndims=0)),\n self.evaluate(bijector.forward_log_det_jacobian(x, event_ndims=0)),\n rtol=1e-4,\n atol=0.)\n\n def testZeroTailweightRaises(self):\n with self.assertRaisesOpError(\"Argument `tailweight` must be positive\"):\n self.evaluate(\n tfb.SinhArcsinh(tailweight=0., validate_args=True).forward(1.0))\n\n def testDefaultDtypeIsFloat32(self):\n bijector = tfb.SinhArcsinh()\n self.assertEqual(bijector.tailweight.dtype, np.float32)\n self.assertEqual(bijector.skewness.dtype, np.float32)\n\n def testVariableTailweight(self):\n x = tf.Variable(1.)\n b = tfb.SinhArcsinh(tailweight=x, validate_args=True)\n self.evaluate(x.initializer)\n self.assertIs(x, b.tailweight)\n self.assertEqual((), self.evaluate(b.forward(0.5)).shape)\n with self.assertRaisesOpError(\"Argument `tailweight` must be positive.\"):\n with tf.control_dependencies([x.assign(-1.)]):\n self.assertEqual((), self.evaluate(b.forward(0.5)).shape)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Invert bijector.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.bijectors import bijector as bijector_lib\n\n__all__ = [\n \"Invert\",\n]\n\n\nclass Invert(bijector_lib.Bijector):\n \"\"\"Bijector which inverts another Bijector.\n\n Example Use: [ExpGammaDistribution (see Background & Context)](\n https://reference.wolfram.com/language/ref/ExpGammaDistribution.html)\n models `Y=log(X)` where `X ~ Gamma`.\n\n ```python\n exp_gamma_distribution = TransformedDistribution(\n distribution=Gamma(concentration=1., rate=2.),\n bijector=bijector.Invert(bijector.Exp())\n ```\n\n \"\"\"\n\n def __init__(self, bijector, validate_args=False, name=None):\n \"\"\"Creates a `Bijector` which swaps the meaning of `inverse` and `forward`.\n\n Note: An inverted bijector's `inverse_log_det_jacobian` is often more\n efficient if the base bijector implements `_forward_log_det_jacobian`. If\n `_forward_log_det_jacobian` is not implemented then the following code is\n used:\n\n ```python\n y = self.inverse(x, **kwargs)\n return -self.inverse_log_det_jacobian(y, **kwargs)\n ```\n\n Args:\n bijector: Bijector instance.\n validate_args: Python `bool` indicating whether arguments should be\n checked for correctness.\n name: Python `str`, name given to ops managed by this object.\n \"\"\"\n\n if not bijector._is_injective: # pylint: disable=protected-access\n raise NotImplementedError(\n \"Invert is not implemented for non-injective bijectors.\")\n\n name = name or \"_\".join([\"invert\", bijector.name])\n with tf.name_scope(name) as name:\n self._bijector = bijector\n super(Invert, self).__init__(\n forward_min_event_ndims=bijector.inverse_min_event_ndims,\n inverse_min_event_ndims=bijector.forward_min_event_ndims,\n is_constant_jacobian=bijector.is_constant_jacobian,\n validate_args=validate_args,\n dtype=bijector.dtype,\n name=name)\n\n def forward_event_shape(self, input_shape):\n return self.bijector.inverse_event_shape(input_shape)\n\n def forward_event_shape_tensor(self, input_shape):\n return self.bijector.inverse_event_shape_tensor(input_shape)\n\n def inverse_event_shape(self, output_shape):\n return self.bijector.forward_event_shape(output_shape)\n\n def inverse_event_shape_tensor(self, output_shape):\n return self.bijector.forward_event_shape_tensor(output_shape)\n\n @property\n def bijector(self):\n return self._bijector\n\n def _internal_is_increasing(self, **kwargs):\n return self.bijector._internal_is_increasing(**kwargs) # pylint: disable=protected-access\n\n def forward(self, x, **kwargs):\n return self.bijector.inverse(x, **kwargs)\n\n def inverse(self, y, **kwargs):\n return self.bijector.forward(y, **kwargs)\n\n def inverse_log_det_jacobian(self, y, event_ndims, **kwargs):\n return self.bijector.forward_log_det_jacobian(y, event_ndims, **kwargs)\n\n def forward_log_det_jacobian(self, x, event_ndims, **kwargs):\n return self.bijector.inverse_log_det_jacobian(x, event_ndims, **kwargs)\n"
] |
[
[
"numpy.array",
"numpy.log",
"numpy.mean",
"numpy.std",
"numpy.finfo",
"numpy.swapaxes",
"tensorflow.compat.v2.Variable",
"numpy.sort",
"numpy.abs",
"numpy.float128",
"numpy.arcsinh",
"numpy.amax",
"numpy.amin",
"numpy.sqrt",
"numpy.logspace",
"tensorflow.compat.v2.test.main"
],
[
"tensorflow.compat.v2.name_scope"
]
] |
bluetyson/discretize
|
[
"a4ead91d6a1f84658ab20946da5fa86dc9ccc831",
"a4ead91d6a1f84658ab20946da5fa86dc9ccc831"
] |
[
"tutorials/inner_products/2_physical_properties.py",
"discretize/mixins/omfModule.py"
] |
[
"\"\"\"\nConstitutive Relations\n======================\n\nWhen solving PDEs using the finite volume approach, inner products may\ncontain constitutive relations; examples include Ohm's law and Hooke's law.\nFor this class of inner products, you will learn how to:\n\n - Construct the inner-product matrix in the case of isotropic and anisotropic constitutive relations\n - Construct the inverse of the inner-product matrix\n - Work with constitutive relations defined by the reciprocal of a parameter\n\nLet :math:`\\\\vec{J}` and :math:`\\\\vec{E}` be two physically related\nquantities. If their relationship is isotropic (defined by a constant\n:math:`\\\\sigma`), then the constitutive relation is given by:\n\n.. math::\n \\\\vec{J} = \\\\sigma \\\\vec{E}\n\nThe inner product between a vector :math:`\\\\vec{v}` and the right-hand side\nof this expression is given by:\n\n.. math::\n (\\\\vec{v}, \\\\sigma \\\\vec{E} ) = \\\\int_\\\\Omega \\\\vec{v} \\\\cdot \\\\sigma \\\\vec{E} \\\\, dv\n\nJust like in the previous tutorial, we would like to approximate the inner\nproduct numerically using an *inner-product matrix* such that:\n\n.. math::\n (\\\\vec{v}, \\\\sigma \\\\vec{E} ) \\\\approx \\\\mathbf{v^T M_\\\\sigma e}\n\nwhere the inner product matrix :math:`\\\\mathbf{M_\\\\sigma}` now depends on:\n\n 1. the dimensions and discretization of the mesh\n 2. where :math:`\\\\mathbf{v}` and :math:`\\\\mathbf{e}` live\n 3. the spatial distribution of the property :math:`\\\\sigma`\n\nIn the case of anisotropy, the constitutive relations are defined by a tensor\n(:math:`\\\\Sigma`). Here, the constitutive relation is of the form:\n\n.. math::\n \\\\vec{J} = \\\\Sigma \\\\vec{E}\n\nwhere\n\n.. math::\n \\\\Sigma = \\\\begin{bmatrix} \\\\sigma_{1} & \\\\sigma_{4} & \\\\sigma_{5} \\n\n \\\\sigma_{4} & \\\\sigma_{2} & \\\\sigma_{6} \\n\n \\\\sigma_{5} & \\\\sigma_{6} & \\\\sigma_{3} \\\\end{bmatrix}\n\nIs symmetric and defined by 6 independent parameters. The inner product between\na vector :math:`\\\\vec{v}` and the right-hand side of this expression is given\nby:\n\n.. math::\n (\\\\vec{v}, \\\\Sigma \\\\vec{E} ) = \\\\int_\\\\Omega \\\\vec{v} \\\\cdot \\\\Sigma \\\\vec{E} \\\\, dv\n\nOnce again we would like to approximate the inner product numerically using an\n*inner-product matrix* :math:`\\\\mathbf{M_\\\\Sigma}` such that:\n\n.. math::\n (\\\\vec{v}, \\\\Sigma \\\\vec{E} ) \\\\approx \\\\mathbf{v^T M_\\\\Sigma e}\n \n\n\n\"\"\"\n\n####################################################\n#\n# Import Packages\n# ---------------\n#\n# Here we import the packages required for this tutorial\n#\n\nfrom discretize import TensorMesh\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# sphinx_gallery_thumbnail_number = 1\n\n#####################################################\n# Inner Product for a Single Cell\n# -------------------------------\n#\n# Here we compare the inner product matricies for a single cell when the\n# constitutive relationship is:\n# \n# - **isotropic:** :math:`\\sigma_1 = \\sigma_2 = \\sigma_3 = \\sigma` and :math:`\\sigma_4 = \\sigma_5 = \\sigma_6 = 0`; e.g. :math:`\\vec{J} = \\sigma \\vec{E}`\n# - **diagonal anisotropic:** independent parameters :math:`\\sigma_1, \\sigma_2, \\sigma_3` and :math:`\\sigma_4 = \\sigma_5 = \\sigma_6 = 0`\n# - **fully anisotropic:** independent parameters :math:`\\sigma_1, \\sigma_2, \\sigma_3, \\sigma_4, \\sigma_5, \\sigma_6`\n# \n# When approximating the inner product according to the finite volume approach,\n# the constitutive parameters are defined at cell centers; even if the\n# fields/fluxes live at cell edges/faces. As we will see, inner-product\n# matricies are generally diagonal; except for in the fully anisotropic case\n# where the inner product matrix contains a significant number of non-diagonal\n# entries.\n# \n\n# Create a single 3D cell\nh = np.ones(1)\nmesh = TensorMesh([h, h, h])\n\n# Define 6 constitutive parameters for the cell\nsig1, sig2, sig3, sig4, sig5, sig6 = 6, 5, 4, 3, 2, 1\n\n# Isotropic case\nsig = sig1*np.ones((1, 1))\nsig_tensor_1 = np.diag(sig1*np.ones(3))\nMe1 = mesh.getEdgeInnerProduct(sig) # Edges inner product matrix\nMf1 = mesh.getFaceInnerProduct(sig) # Faces inner product matrix\n\n# Diagonal anisotropic\nsig = np.c_[sig1, sig2, sig3]\nsig_tensor_2 = np.diag(np.array([sig1, sig2, sig3]))\nMe2 = mesh.getEdgeInnerProduct(sig)\nMf2 = mesh.getFaceInnerProduct(sig)\n\n# Full anisotropic\nsig = np.c_[sig1, sig2, sig3, sig4, sig5, sig6]\nsig_tensor_3 = np.diag(np.array([sig1, sig2, sig3]))\nsig_tensor_3[(0, 1), (1, 0)] = sig4\nsig_tensor_3[(0, 2), (2, 0)] = sig5\nsig_tensor_3[(1, 2), (2, 1)] = sig6\nMe3 = mesh.getEdgeInnerProduct(sig)\nMf3 = mesh.getFaceInnerProduct(sig)\n\n# Plotting matrix entries\nfig = plt.figure(figsize=(12, 12))\n\nax1 = fig.add_subplot(331)\nax1.imshow(sig_tensor_1)\nax1.set_title('Property Tensor (isotropic)')\n\nax2 = fig.add_subplot(332)\nax2.imshow(sig_tensor_2)\nax2.set_title('Property Tensor (diagonal anisotropic)')\n\nax3 = fig.add_subplot(333)\nax3.imshow(sig_tensor_3)\nax3.set_title('Property Tensor (full anisotropic)')\n\nax4 = fig.add_subplot(334)\nax4.imshow(Mf1.todense())\nax4.set_title('M-faces Matrix (isotropic)')\n\nax5 = fig.add_subplot(335)\nax5.imshow(Mf2.todense())\nax5.set_title('M-faces Matrix (diagonal anisotropic)')\n\nax6 = fig.add_subplot(336)\nax6.imshow(Mf3.todense())\nax6.set_title('M-faces Matrix (full anisotropic)')\n\nax7 = fig.add_subplot(337)\nax7.imshow(Me1.todense())\nax7.set_title('M-edges Matrix (isotropic)')\n\nax8 = fig.add_subplot(338)\nax8.imshow(Me2.todense())\nax8.set_title('M-edges Matrix (diagonal anisotropic)')\n\nax9 = fig.add_subplot(339)\nax9.imshow(Me3.todense())\nax9.set_title('M-edges Matrix (full anisotropic)')\n\n\n#############################################################\n# Spatially Variant Parameters\n# ----------------------------\n#\n# In practice, the parameter :math:`\\sigma` or tensor :math:`\\Sigma` will\n# vary spatially. In this case, we define the parameter\n# :math:`\\sigma` (or parameters :math:`\\Sigma`) for each cell. When\n# creating the inner product matrix, we enter these parameters as\n# a numpy array. This is demonstrated below. Properties of the resulting\n# inner product matricies are discussed.\n#\n\n# Create a small 3D mesh\nh = np.ones(5)\nmesh = TensorMesh([h, h, h])\n\n# Isotropic case: (nC, ) numpy array\nsig = np.random.rand(mesh.nC) # sig for each cell\nMe1 = mesh.getEdgeInnerProduct(sig) # Edges inner product matrix\nMf1 = mesh.getFaceInnerProduct(sig) # Faces inner product matrix\n\n# Linear case: (nC, dim) numpy array\nsig = np.random.rand(mesh.nC, mesh.dim)\nMe2 = mesh.getEdgeInnerProduct(sig)\nMf2 = mesh.getFaceInnerProduct(sig)\n\n# Anisotropic case: (nC, 3) for 2D and (nC, 6) for 3D\nsig = np.random.rand(mesh.nC, 6)\nMe3 = mesh.getEdgeInnerProduct(sig)\nMf3 = mesh.getFaceInnerProduct(sig)\n\n# Properties of inner product matricies\nprint('\\n FACE INNER PRODUCT MATRIX')\nprint('- Number of faces :', mesh.nF)\nprint('- Dimensions of operator :', str(mesh.nF), 'x', str(mesh.nF))\nprint('- Number non-zero (isotropic) :', str(Mf1.nnz))\nprint('- Number non-zero (linear) :', str(Mf2.nnz))\nprint('- Number non-zero (anisotropic):', str(Mf3.nnz), '\\n')\n\nprint('\\n EDGE INNER PRODUCT MATRIX')\nprint('- Number of faces :', mesh.nE)\nprint('- Dimensions of operator :', str(mesh.nE), 'x', str(mesh.nE))\nprint('- Number non-zero (isotropic) :', str(Me1.nnz))\nprint('- Number non-zero (linear) :', str(Me2.nnz))\nprint('- Number non-zero (anisotropic):', str(Me3.nnz), '\\n')\n\n\n#############################################################\n# Inverse\n# -------\n#\n# The final discretized system using the finite volume method may contain\n# the inverse of the inner-product matrix. Here we show how to call this\n# using the *invMat* keyword argument.\n#\n# For the isotropic and diagonally anisotropic cases, the inner product matrix\n# is diagonal. As a result, its inverse can be easily formed. For the full\n# anisotropic case however, we cannot expicitly form the inverse because the\n# inner product matrix contains a significant number of off-diagonal elements.\n#\n# For the isotropic and diagonal anisotropic cases we can form\n# :math:`\\mathbf{M}^{-1}` then apply it to a vector using the :math:`*`\n# operator. For the full anisotropic case, we must form the inner product\n# matrix and do a numerical solve.\n#\n\n# Create a small 3D mesh\nh = np.ones(5)\nmesh = TensorMesh([h, h, h])\n\n# Isotropic case: (nC, ) numpy array\nsig = np.random.rand(mesh.nC)\nMe1_inv = mesh.getEdgeInnerProduct(sig, invMat=True)\nMf1_inv = mesh.getFaceInnerProduct(sig, invMat=True)\n\n# Diagonal anisotropic: (nC, dim) numpy array\nsig = np.random.rand(mesh.nC, mesh.dim)\nMe2_inv = mesh.getEdgeInnerProduct(sig, invMat=True)\nMf2_inv = mesh.getFaceInnerProduct(sig, invMat=True)\n\n# Full anisotropic: (nC, 3) for 2D and (nC, 6) for 3D\nsig = np.random.rand(mesh.nC, 6)\nMe3 = mesh.getEdgeInnerProduct(sig)\nMf3 = mesh.getFaceInnerProduct(sig)\n\n\n###########################################################################\n# Reciprocal Properties\n# ---------------------\n#\n# At times, the constitutive relation may be defined by the reciprocal of\n# a parameter (:math:`\\rho`). Here we demonstrate how inner product matricies\n# can be formed using the keyword argument *invProp*. We will do this for a\n# single cell and plot the matrix elements. We can easily extend this to\n# a mesh comprised of many cells.\n#\n# In this case, the constitutive relation is given by:\n#\n# .. math::\n# \\vec{J} = \\frac{1}{\\rho} \\vec{E}\n#\n# The inner product between a vector :math:`\\\\vec{v}` and the right-hand side\n# of the expression is given by:\n#\n# .. math::\n# (\\vec{v}, \\rho^{-1} \\vec{E} ) = \\int_\\Omega \\vec{v} \\cdot \\rho^{-1} \\vec{E} \\, dv\n#\n# where the inner product is approximated using an inner product matrix\n# :math:`\\mathbf{M_{\\rho^{-1}}}` as follows:\n#\n# .. math::\n# (\\vec{v}, \\rho^{-1} \\vec{E} ) \\approx \\mathbf{v^T M_{\\rho^{-1}} e}\n#\n# In the case that the constitutive relation is defined by a\n# tensor :math:`P`, e.g.:\n#\n# .. math::\n# \\vec{J} = P \\vec{E}\n#\n# where\n#\n# .. math::\n# P = \\begin{bmatrix} \\rho_{1}^{-1} & \\rho_{4}^{-1} & \\rho_{5}^{-1} \\\\\n# \\rho_{4}^{-1} & \\rho_{2}^{-1} & \\rho_{6}^{-1} \\\\\n# \\rho_{5}^{-1} & \\rho_{6}^{-1} & \\rho_{3}^{-1} \\end{bmatrix}\n#\n# The inner product between a vector :math:`\\vec{v}` and the right-hand side of\n# this expression is given by:\n#\n# .. math::\n# (\\vec{v}, P \\vec{E} ) = \\int_\\Omega \\vec{v} \\cdot P \\vec{E} \\, dv\n#\n# Once again we would like to approximate the inner product numerically using an\n# *inner-product matrix* :math:`\\mathbf{M_P}` such that:\n#\n# .. math::\n# (\\vec{v}, P \\vec{E} ) \\approx \\mathbf{v^T M_P e}\n#\n# Here we demonstrate how to form the inner-product matricies\n# :math:`\\mathbf{M_{\\rho^{-1}}}` and :math:`\\mathbf{M_P}`.\n#\n\n# Create a small 3D mesh\nh = np.ones(1)\nmesh = TensorMesh([h, h, h])\n\n# Define 6 constitutive parameters for the cell\nrho1, rho2, rho3, rho4, rho5, rho6 = 1./6., 1./5., 1./4., 1./3., 1./2., 1\n\n# Isotropic case\nrho = rho1*np.ones((1, 1))\nMe1 = mesh.getEdgeInnerProduct(rho, invProp=True) # Edges inner product matrix\nMf1 = mesh.getFaceInnerProduct(rho, invProp=True) # Faces inner product matrix\n\n# Diagonal anisotropic case\nrho = np.c_[rho1, rho2, rho3]\nMe2 = mesh.getEdgeInnerProduct(rho, invProp=True)\nMf2 = mesh.getFaceInnerProduct(rho, invProp=True)\n\n# Full anisotropic case\nrho = np.c_[rho1, rho2, rho3, rho4, rho5, rho6]\nMe3 = mesh.getEdgeInnerProduct(rho, invProp=True)\nMf3 = mesh.getFaceInnerProduct(rho, invProp=True)\n\n# Plotting matrix entries\nfig = plt.figure(figsize=(14, 9))\n\nax1 = fig.add_subplot(231)\nax1.imshow(Mf1.todense())\nax1.set_title('Isotropic (Faces)')\n\nax2 = fig.add_subplot(232)\nax2.imshow(Mf2.todense())\nax2.set_title('Diagonal Anisotropic (Faces)')\n\nax3 = fig.add_subplot(233)\nax3.imshow(Mf3.todense())\nax3.set_title('Full Anisotropic (Faces)')\n\nax4 = fig.add_subplot(234)\nax4.imshow(Me1.todense())\nax4.set_title('Isotropic (Edges)')\n\nax5 = fig.add_subplot(235)\nax5.imshow(Me2.todense())\nax5.set_title('Diagonal Anisotropic (Edges)')\n\nax6 = fig.add_subplot(236)\nax6.imshow(Me3.todense())\nax6.set_title('Full Anisotropic (Edges)')\n",
"\"\"\"\nA class for converting ``discretize`` meshes to OMF objects\n\"\"\"\n\nimport omf\nimport numpy as np\n\n\nimport discretize\n\n\ndef ravel_data_array(arr, nx, ny, nz):\n \"\"\"Ravel's a numpy array into proper order for passing to the OMF\n specification from ``discretize``/UBC formats\n \"\"\"\n dim = (nz, ny, nx)\n return np.reshape(arr, dim, order='C').ravel(order='F')\n\n\ndef unravel_data_array(arr, nx, ny, nz):\n \"\"\"Unravel's a numpy array from the OMF specification to\n ``discretize``/UBC formats - the is the inverse of ``ravel_data_array``\n \"\"\"\n dim = (nz, ny, nx)\n return np.reshape(arr, dim, order='F').ravel(order='C')\n\n\nclass InterfaceOMF(object):\n\n\n def _tensor_mesh_to_omf(mesh, models=None):\n \"\"\"\n Constructs an :class:`omf.VolumeElement` object of this tensor mesh and\n the given models as cell data of that grid.\n\n Parameters\n ----------\n\n mesh : discretize.TensorMesh\n The tensor mesh to convert to a :class:`omf.VolumeElement`\n\n models : dict(numpy.ndarray)\n Name('s) and array('s). Match number of cells\n\n \"\"\"\n if models is None:\n models = {}\n # Make the geometry\n geometry = omf.VolumeGridGeometry()\n # Set tensors\n tensors = mesh.h\n if len(tensors) < 1:\n raise RuntimeError(\"Your mesh is empty... fill it out before converting to OMF\")\n elif len(tensors) == 1:\n geometry.tensor_u = tensors[0]\n geometry.tensor_v = np.array([0.0,])\n geometry.tensor_w = np.array([0.0,])\n elif len(tensors) == 2:\n geometry.tensor_u = tensors[0]\n geometry.tensor_v = tensors[1]\n geometry.tensor_w = np.array([0.0,])\n elif len(tensors) == 3:\n geometry.tensor_u = tensors[0]\n geometry.tensor_v = tensors[1]\n geometry.tensor_w = tensors[2]\n else:\n raise RuntimeError(\"This mesh is too high-dimensional for OMF\")\n # Set rotation axes\n geometry.axis_u = mesh.axis_u\n geometry.axis_v = mesh.axis_v\n geometry.axis_w = mesh.axis_w\n # Set the origin\n geometry.origin = mesh.x0\n # Make sure the geometry is built correctly\n geometry.validate()\n # Make the volume elemet (the OMF object)\n omfmesh = omf.VolumeElement(\n geometry=geometry,\n )\n # Add model data arrays onto the cells of the mesh\n omfmesh.data = []\n for name, arr in models.items():\n data = omf.ScalarData(name=name,\n array=ravel_data_array(arr, mesh.nCx, mesh.nCy, mesh.nCz),\n location='cells')\n omfmesh.data.append(data)\n # Validate to make sure a proper OMF object is returned to the user\n omfmesh.validate()\n return omfmesh\n\n\n def _tree_mesh_to_omf(mesh, models=None):\n raise NotImplementedError('Not possible until OMF v2 is released.')\n\n\n def _curvilinear_mesh_to_omf(mesh, models=None):\n raise NotImplementedError('Not currently possible.')\n\n\n def _cyl_mesh_to_omf(mesh, models=None):\n raise NotImplementedError('Not currently possible.')\n\n\n def to_omf(mesh, models=None):\n \"\"\"Convert this mesh object to it's proper ``omf`` data object with\n the given model dictionary as the cell data of that dataset.\n\n Parameters\n ----------\n\n models : dict(numpy.ndarray)\n Name('s) and array('s). Match number of cells\n\n \"\"\"\n # TODO: mesh.validate()\n converters = {\n # TODO: 'tree' : InterfaceOMF._tree_mesh_to_omf,\n 'tensor' : InterfaceOMF._tensor_mesh_to_omf,\n # TODO: 'curv' : InterfaceOMF._curvilinear_mesh_to_omf,\n # TODO: 'CylMesh' : InterfaceOMF._cyl_mesh_to_omf,\n }\n key = mesh._meshType.lower()\n try:\n convert = converters[key]\n except KeyError:\n raise RuntimeError('Mesh type `{}` is not currently supported for OMF conversion.'.format(key))\n # Convert the data object\n return convert(mesh, models=models)\n\n\n @staticmethod\n def _omf_volume_to_tensor(element):\n \"\"\"Convert an :class:`omf.VolumeElement` to :class:`discretize.TensorMesh`\n \"\"\"\n geometry = element.geometry\n h = [geometry.tensor_u, geometry.tensor_v, geometry.tensor_w]\n mesh = discretize.TensorMesh(h)\n mesh.axis_u = geometry.axis_u\n mesh.axis_v = geometry.axis_v\n mesh.axis_w = geometry.axis_w\n mesh.x0 = geometry.origin\n\n data_dict = {}\n for data in element.data:\n # NOTE: this is agnostic about data location - i.e. nodes vs cells\n data_dict[data.name] = unravel_data_array(np.array(data.array), mesh.nCx, mesh.nCy, mesh.nCz)\n\n # Return TensorMesh and data dictionary\n return mesh, data_dict\n\n\n @staticmethod\n def from_omf(element):\n \"\"\"Convert an OMF element to it's proper ``discretize`` type.\n Automatically determines the output type. Returns both the mesh and a\n dictionary of model arrays.\n \"\"\"\n element.validate()\n converters = {\n omf.VolumeElement.__name__ : InterfaceOMF._omf_volume_to_tensor,\n }\n key = element.__class__.__name__\n try:\n convert = converters[key]\n except KeyError:\n raise RuntimeError('OMF type `{}` is not currently supported for conversion.'.format(key))\n # Convert the data object\n return convert(element)\n"
] |
[
[
"numpy.array",
"numpy.ones",
"numpy.random.rand",
"matplotlib.pyplot.figure"
],
[
"numpy.array",
"numpy.reshape"
]
] |
kalnun/pandas-ta
|
[
"60b6cc42f6c53bfdc18fe77e9d70a00712ce3149"
] |
[
"pandas_ta/volatility/true_range.py"
] |
[
"# -*- coding: utf-8 -*-\nfrom pandas import DataFrame\nfrom ..utils import get_drift, get_offset, non_zero_range, verify_series\n\ndef true_range(high, low, close, drift=None, offset=None, **kwargs):\n \"\"\"Indicator: True Range\"\"\"\n # Validate arguments\n high = verify_series(high)\n low = verify_series(low)\n close = verify_series(close)\n high_low_range = non_zero_range(high, low)\n drift = get_drift(drift)\n offset = get_offset(offset)\n\n # Calculate Result\n prev_close = close.shift(drift)\n ranges = [high_low_range, high - prev_close, prev_close - low]\n true_range = DataFrame(ranges).T\n true_range = true_range.abs().max(axis=1)\n\n # Offset\n if offset != 0:\n true_range = true_range.shift(offset)\n\n # Handle fills\n if \"fillna\" in kwargs:\n true_range.fillna(kwargs[\"fillna\"], inplace=True)\n if \"fill_method\" in kwargs:\n true_range.fillna(method=kwargs[\"fill_method\"], inplace=True)\n\n # Name and Categorize it\n true_range.name = f\"TRUERANGE_{drift}\"\n true_range.category = \"volatility\"\n\n return true_range\n\n\n\ntrue_range.__doc__ = \\\n\"\"\"True Range\n\nAn method to expand a classical range (high minus low) to include\npossible gap scenarios.\n\nSources:\n https://www.macroption.com/true-range/\n\nCalculation:\n Default Inputs:\n drift=1\n ABS = Absolute Value\n prev_close = close.shift(drift)\n TRUE_RANGE = ABS([high - low, high - prev_close, low - prev_close]) \n\nArgs:\n high (pd.Series): Series of 'high's\n low (pd.Series): Series of 'low's\n close (pd.Series): Series of 'close's\n drift (int): The shift period. Default: 1\n offset (int): How many periods to offset the result. Default: 0\n\nKwargs:\n fillna (value, optional): pd.DataFrame.fillna(value)\n fill_method (value, optional): Type of fill method\n\nReturns:\n pd.Series: New feature\n\"\"\""
] |
[
[
"pandas.DataFrame"
]
] |
CRingrose94/geomeTRIC
|
[
"5d8eada8c0fafc4aa354adae4a2d84b5b8d943b2"
] |
[
"geometric/tests/test_batch_opt.py"
] |
[
"\"\"\"\nA set of tests for using the QCEngine project\n\"\"\"\n\nimport copy\nimport numpy as np\nimport tempfile\nimport logging\nimport math\nfrom geometric.molecule import bohr2ang\n\nlogger = logging.getLogger(__name__)\n\nfrom . import addons\nimport geometric.optimize as gt \nfrom geometric.internal import CartesianCoordinates,\\\n PrimitiveInternalCoordinates, DelocalizedInternalCoordinates\nfrom geometric.nifty import ang2bohr\n\nlocalizer = addons.in_folder\ntest_logger = addons.test_logger\n\n_base_schema = {\n \"schema_version\": 1,\n \"molecule\": {\n \"geometry\": [\n 0.0, 0.0, -0.1294769411935893,\n 0.0, -1.494187339479985, 1.0274465079245698,\n 0.0, 1.494187339479985, 1.0274465079245698\n ],\n \"symbols\": [\"O\", \"H\", \"H\"],\n \"connectivity\": [[0, 1, 1], [0, 2, 1]]\n },\n \"driver\": \"gradient\",\n \"model\": {\n \"method\": \"UFF\",\n \"basis\": None\n },\n \"keywords\": {},\n \"program\": \"rdkit\"\n } # yapf: disable\n\n_geo2 = [0.0139, -0.4830, 0.2848,\n 0.0628, -0.2860, 0.7675,\n 0.0953, -1.0031, 0.4339]\n\n@addons.using_qcengine\n@addons.using_rdkit\n\n\n\n\nclass BatchOptimizer(object):\n \"\"\" Demo BatchOptmizer for runnig pytest test \"\"\"\n \n def __init__(self, **kwargs):\n self.kwargs = kwargs\n self.params = gt.OptParams(**kwargs)\n \n \n def _initOptimizer(self, schemas):\n \"\"\" initilize all OptObjects for the schmas passed.\n \n Arguements\n ----------\n schemas: list of schemas for qcengine\n \n return\n ------\n list of OptOject's for each schema\n \"\"\"\n \n #=========================================#\n #| Set up the internal coordinate system |#\n #=========================================#\n # First item in tuple: The class to be initialized\n # Second item in tuple: Whether to connect non-bonded fragments\n # Third item in tuple: Whether to throw in all Cartesian (no effect if second item is True)\n CoordSysDict = {'cart':(CartesianCoordinates, False, False),\n 'prim':(PrimitiveInternalCoordinates, True, False),\n 'dlc':(DelocalizedInternalCoordinates, True, False),\n 'hdlc':(DelocalizedInternalCoordinates, False, True),\n 'tric':(DelocalizedInternalCoordinates, False, False)}\n coordsys = self.kwargs.get('coordsys', 'tric')\n CoordClass, connect, addcart = CoordSysDict[coordsys.lower()]\n\n optimizers = []\n for schema in schemas:\n M, engine = gt.get_molecule_engine(engine='qcengine', qcschema=schema, **self.kwargs)\n coords = M.xyzs[0].flatten() * ang2bohr\n \n # Read in the constraints\n constraints = self.kwargs.get('constraints', None) #Constraint input file (optional)\n if constraints is not None:\n Cons, CVals = gt.ParseConstraints(M, open(constraints).read())\n else:\n Cons = None\n CVals = None\n \n IC = CoordClass(M, build=True, connect=connect, addcart=addcart, constraints=Cons, \n cvals=CVals[0] if CVals is not None else None)\n tmpDir = tempfile.mkdtemp(\".tmp\", \"batchOpt\")\n\n optimizer = gt.Optimizer(coords, M, IC, engine, tmpDir, self.params)\n optimizer.calcEnergyForce()\n optimizer.prepareFirstStep()\n logger.debug(\"[AU]: e=%.5f bl=%.5f,%.5f g=%.4f\" % (\n optimizer.E, optimizer.X[0],optimizer.X[3], optimizer.gradx[0]))\n optimizers.append(optimizer)\n \n return optimizers\n \n\n def _batchComputeEnergyAndForces(self, optimizers):\n \"\"\" This just an mockup. if this was NNP this would work in one batch\n on the GPU.\n \"\"\"\n for optimizer in optimizers:\n if optimizer.state == gt.OPT_STATE.NEEDS_EVALUATION:\n optimizer.calcEnergyForce()\n logger.debug(\"[AU]: e=%.5f bl=%.5f,%.5f g=%.4f\" % (\n optimizer.E, optimizer.X[0],optimizer.X[3], optimizer.gradx[0]))\n \n def optimizeMols(self, schemas):\n \"\"\" Optmize all molecules as represented by the schemas.\n \n return\n ------\n list of optimized Molecule's\n \"\"\"\n optimizers = self._initOptimizer(schemas)\n res = []\n \n # Optimization Loop, while not all have completed optimization\n while len(optimizers) > 0:\n nextOptObjs = []\n \n # take one step, energy and gradient must have been stored in optObj\n for optimizer in optimizers: \n optimizer.step()\n\n self._batchComputeEnergyAndForces(optimizers)\n\n # evaluate step\n for optimizer in optimizers: \n if optimizer.state == gt.OPT_STATE.NEEDS_EVALUATION:\n \n optimizer.evaluateStep() \n if optimizer.state in [gt.OPT_STATE.CONVERGED, gt.OPT_STATE.FAILED]:\n logger.info(\"Optmization convereged!\")\n res.append(optimizer.progress)\n continue\n nextOptObjs.append(optimizer)\n if len(nextOptObjs) == 0: break ######## All Done\n \n # step and evaluation completed, next step for remaining conformations\n optimizers = nextOptObjs\n \n return res\n \n\n@addons.using_qcengine\n@addons.using_rdkit\ndef test_rdkit_simple(test_logger):\n\n schema1 = copy.deepcopy(_base_schema)\n schema2 = copy.deepcopy(_base_schema)\n schema2['molecule']['geometry']= [c / bohr2ang for c in _geo2]\n \n opts = {\"qcengine\": True, \"input\": \"tmp_data\", \"qce_program\": \"rdkit\"}\n\n bOptimizer = BatchOptimizer(**opts)\n ret = bOptimizer.optimizeMols([schema1, schema2])\n\n # Currently in angstrom\n ref = np.array([0., 0., -0.0644928042, 0., -0.7830365196, 0.5416895554, 0., 0.7830365196, 0.5416895554])\n assert np.allclose(ref, ret[0].xyzs[-1].ravel(), atol=1.e-5)\n \n # check that distances in ref are same as in ret[1]\n refAt = ref.reshape(-1,3)\n retAt = ret[1].xyzs[-1]\n for atRef,atRet in zip(refAt,retAt):\n for atRef2,atRet2 in zip(refAt,retAt):\n d2Ref = np.power(atRef[0]-atRef2[0],2) + np.power(atRef[1]-atRef2[1],2) +np.power(atRef[2]-atRef2[2],2)\n d2Ret = np.power(atRet[0]-atRet2[0],2) + np.power(atRet[1]-atRet2[1],2) +np.power(atRet[2]-atRet2[2],2)\n \n assert math.isclose(d2Ref, d2Ret, abs_tol=1e-3)\n \n \n \n\n_N2_schema = {\n \"schema_version\": 1,\n \"molecule\": {\n \"geometry\": [\n 0.0, 0., 0.,\n 1.9, 0., 0.\n ],\n \"symbols\": [\"N\", \"N\"],\n \"connectivity\": [[0, 1, 3]]\n },\n \"driver\": \"gradient\",\n \"model\": {\n \"method\": \"UFF\",\n \"basis\": None\n },\n \"keywords\": {},\n \"program\": \"rdkit\"\n } # yapf: disable\n\n_N2_geo2 = [0.0, 0., 0.,\n 0.6, 0., 0.,]\n\n\n\n@addons.using_qcengine\n@addons.using_rdkit\ndef test_rdkit_N2(test_logger):\n\n schema1 = copy.deepcopy(_N2_schema)\n schema2 = copy.deepcopy(_N2_schema)\n schema2['molecule']['geometry']= [c / bohr2ang for c in _N2_geo2]\n \n opts = {\"qcengine\": True, \"input\": \"tmp_data\", \"qce_program\": \"rdkit\"}\n\n bOptimizer = BatchOptimizer(**opts)\n ret = bOptimizer.optimizeMols([schema1, schema2])\n\n # Currently in angstrom\n ref = np.array([-0.05729, 0., 0., 1.06272, 0., 0.])\n assert np.allclose(ref, ret[0].xyzs[-1].ravel(), atol=1.e-3)\n \n # check that distances in ref are same as in ret[1]\n refAt = ref.reshape(-1,3)\n retAt = ret[1].xyzs[-1]\n for atRef,atRet in zip(refAt,retAt):\n for atRef2,atRet2 in zip(refAt,retAt):\n d2Ref = np.power(atRef[0]-atRef2[0],2) + np.power(atRef[1]-atRef2[1],2) +np.power(atRef[2]-atRef2[2],2)\n d2Ret = np.power(atRet[0]-atRet2[0],2) + np.power(atRet[1]-atRet2[1],2) +np.power(atRet[2]-atRet2[2],2)\n \n assert math.isclose(d2Ref, d2Ret, abs_tol=1e-3)\n"
] |
[
[
"numpy.array",
"numpy.power"
]
] |
kapteyn-astro/kapteyn
|
[
"f12332cfd567c7c0da40628dcfc7b297971ee636",
"f12332cfd567c7c0da40628dcfc7b297971ee636"
] |
[
"doc/source/EXAMPLES/mu_ticklabeldemo.py",
"kapteyn/interpolation.py"
] |
[
"from kapteyn import maputils\nfrom matplotlib import pylab as plt\n\nheader = {'NAXIS': 2 ,'NAXIS1':100 , 'NAXIS2': 100 ,\n'CDELT1': -7.165998823000E-03, 'CRPIX1': 5.100000000000E+01 ,\n'CRVAL1': -5.128208479590E+01, 'CTYPE1': 'RA---NCP', 'CUNIT1': 'DEGREE ',\n'CDELT2': 7.165998823000E-03, 'CRPIX2': 5.100000000000E+01,\n'CRVAL2': 6.015388802060E+01, 'CTYPE2': 'DEC--NCP ', 'CUNIT2': 'DEGREE'\n}\n\nfig = plt.figure()\nframe = fig.add_axes([0.20,0.15,0.75,0.8])\nf = maputils.FITSimage(externalheader=header)\nannim = f.Annotatedimage(frame)\ngrat = annim.Graticule()\ngrat2 = annim.Graticule(skyout='Galactic')\ngrat.setp_ticklabel(plotaxis=\"bottom\", position=\"20h34m\", fmt=\"%g\",\n color='r', rotation=30)\ngrat.setp_ticklabel(plotaxis='left', color='b', rotation=20,\n fontsize=14, fontweight='bold', style='italic')\ngrat.setp_ticklabel(plotaxis='left', color='m', position=\"60d0m0s\", \n fmt=\"DMS\", tex=False) \ngrat.setp_axislabel(plotaxis='left', xpos=-0.25, ypos=0.5)\n# Rotation is inherited from previous setting \ngrat2.setp_gratline(color='g')\ngrat2.setp_ticklabel(visible=False)\ngrat2.setp_axislabel(visible=False)\n\nannim.plot()\nplt.show()\n",
"# Copyright (C) 2003-2005 Peter J. Verveer\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n#\n# 3. The name of the author may not be used to endorse or promote\n# products derived from this software without specific prior\n# written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS\n# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE\n# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport math\nimport numpy\nfrom . import _ni_support\nfrom kapteyn import _nd_image\n\ndef _extend_mode_to_code(mode):\n mode = _ni_support._extend_mode_to_code(mode)\n return mode\n\ndef spline_filter1d(input, order = 3, axis = -1, output = numpy.float64,\n output_type = None):\n \"\"\"Calculates a one-dimensional spline filter along the given axis.\n\n The lines of the array along the given axis are filtered by a\n spline filter. The order of the spline must be >= 2 and <= 5.\n \"\"\"\n if order < 0 or order > 5:\n raise RuntimeError('spline order not supported')\n input = numpy.asarray(input)\n if numpy.iscomplexobj(input):\n raise TypeError('Complex type not supported')\n output, return_value = _ni_support._get_output(output, input,\n output_type)\n if order in [0, 1]:\n output[...] = numpy.array(input)\n else:\n axis = _ni_support._check_axis(axis, input.ndim)\n _nd_image.spline_filter1d(input, order, axis, output)\n return return_value\n\n\ndef spline_filter(input, order = 3, output = numpy.float64,\n output_type = None):\n \"\"\"Multi-dimensional spline filter.\n\n Note: The multi-dimensional filter is implemented as a sequence of\n one-dimensional spline filters. The intermediate arrays are stored\n in the same data type as the output. Therefore, for output types\n with a limited precision, the results may be imprecise because\n intermediate results may be stored with insufficient precision.\n \"\"\"\n if order < 2 or order > 5:\n raise RuntimeError('spline order not supported')\n input = numpy.asarray(input)\n if numpy.iscomplexobj(input):\n raise TypeError('Complex type not supported')\n output, return_value = _ni_support._get_output(output, input,\n output_type)\n if order not in [0, 1] and input.ndim > 0:\n for axis in range(input.ndim):\n spline_filter1d(input, order, axis, output = output)\n input = output\n else:\n output[...] = input[...]\n return return_value\n\ndef geometric_transform(input, mapping, output_shape = None,\n output_type = None, output = None, order = 3,\n mode = 'constant', cval = 0.0, prefilter = True,\n extra_arguments = (), extra_keywords = {}):\n \"\"\"Apply an arbritrary geometric transform.\n\n The given mapping function is used to find, for each point in the\n output, the corresponding coordinates in the input. The value of the\n input at those coordinates is determined by spline interpolation of\n the requested order.\n\n mapping must be a callable object that accepts a tuple of length\n equal to the output array rank and returns the corresponding input\n coordinates as a tuple of length equal to the input array\n rank. Points outside the boundaries of the input are filled\n according to the given mode ('constant', 'nearest', 'reflect' or\n 'wrap'). The output shape can optionally be given. If not given,\n it is equal to the input shape. The parameter prefilter determines\n if the input is pre-filtered before interpolation (necessary for\n spline interpolation of order > 1). If False it is assumed that\n the input is already filtered. The extra_arguments and\n extra_keywords arguments can be used to provide extra arguments\n and keywords that are passed to the mapping function at each call.\n\n Example\n -------\n >>> a = arange(12.).reshape((4,3))\n >>> def shift_func(output_coordinates):\n ... return (output_coordinates[0]-0.5, output_coordinates[1]-0.5)\n ...\n >>> print geometric_transform(a,shift_func)\n array([[ 0. , 0. , 0. ],\n [ 0. , 1.3625, 2.7375],\n [ 0. , 4.8125, 6.1875],\n [ 0. , 8.2625, 9.6375]])\n \"\"\"\n if order < 0 or order > 5:\n raise RuntimeError('spline order not supported')\n input = numpy.asarray(input)\n if numpy.iscomplexobj(input):\n raise TypeError('Complex type not supported')\n if output_shape is None:\n output_shape = input.shape\n if input.ndim < 1 or len(output_shape) < 1:\n raise RuntimeError('input and output rank must be > 0')\n mode = _extend_mode_to_code(mode)\n if prefilter and order > 1:\n filtered = spline_filter(input, order, output = numpy.float64)\n else:\n filtered = input\n output, return_value = _ni_support._get_output(output, input,\n output_type, shape = output_shape)\n _nd_image.geometric_transform(filtered, mapping, None, None, None,\n output, order, mode, cval, extra_arguments, extra_keywords)\n return return_value\n\n\ndef map_coordinates(input, coordinates, output_type = None, output = None,\n order = 3, mode = 'constant', cval = 0.0, prefilter = True):\n \"\"\"\n Map the input array to new coordinates by interpolation.\n\n The array of coordinates is used to find, for each point in the output,\n the corresponding coordinates in the input. The value of the input at\n those coordinates is determined by spline interpolation of the\n requested order.\n\n The shape of the output is derived from that of the coordinate\n array by dropping the first axis. The values of the array along\n the first axis are the coordinates in the input array at which the\n output value is found.\n\n Parameters\n ----------\n input : ndarray\n The input array\n coordinates : array_like\n The coordinates at which `input` is evaluated.\n output_type : deprecated\n Use `output` instead.\n output : dtype, optional\n If the output has to have a certain type, specify the dtype.\n The default behavior is for the output to have the same type\n as `input`.\n order : int, optional\n The order of the spline interpolation, default is 3.\n The order has to be in the range 0-5.\n mode : str, optional\n Points outside the boundaries of the input are filled according\n to the given mode ('constant', 'nearest', 'reflect' or 'wrap').\n Default is 'constant'.\n cval : scalar, optional\n Value used for points outside the boundaries of the input if\n `mode='constant`. Default is 0.0\n prefilter : bool, optional\n The parameter prefilter determines if the input is\n pre-filtered with `spline_filter`_ before interpolation\n (necessary for spline interpolation of order > 1).\n If False, it is assumed that the input is already filtered.\n\n Returns\n -------\n return_value : ndarray\n The result of transforming the input. The shape of the\n output is derived from that of `coordinates` by dropping\n the first axis.\n\n\n See Also\n --------\n spline_filter, geometric_transform, scipy.interpolate\n\n Examples\n --------\n >>> import scipy.ndimage\n >>> a = np.arange(12.).reshape((4,3))\n >>> print a\n array([[ 0., 1., 2.],\n [ 3., 4., 5.],\n [ 6., 7., 8.],\n [ 9., 10., 11.]])\n >>> sp.ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)\n [ 2. 7.]\n\n Above, the interpolated value of a[0.5, 0.5] gives output[0], while\n a[2, 1] is output[1].\n\n >>> inds = np.array([[0.5, 2], [0.5, 4]])\n >>> sp.ndimage.map_coordinates(a, inds, order=1, cval=-33.3)\n array([ 2. , -33.3])\n >>> sp.ndimage.map_coordinates(a, inds, order=1, mode='nearest')\n array([ 2., 8.])\n >>> sp.ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)\n array([ True, False], dtype=bool\n\n \"\"\"\n if order < 0 or order > 5:\n raise RuntimeError('spline order not supported')\n input = numpy.asarray(input)\n if numpy.iscomplexobj(input):\n raise TypeError('Complex type not supported')\n coordinates = numpy.asarray(coordinates)\n if numpy.iscomplexobj(coordinates):\n raise TypeError('Complex type not supported')\n output_shape = coordinates.shape[1:]\n if input.ndim < 1 or len(output_shape) < 1:\n raise RuntimeError('input and output rank must be > 0')\n if coordinates.shape[0] != input.ndim:\n raise RuntimeError('invalid shape for coordinate array')\n mode = _extend_mode_to_code(mode)\n if prefilter and order > 1:\n filtered = spline_filter(input, order, output = numpy.float64)\n else:\n filtered = input\n output, return_value = _ni_support._get_output(output, input,\n output_type, shape = output_shape)\n _nd_image.geometric_transform(filtered, None, coordinates, None, None,\n output, order, mode, cval, None, None)\n return return_value\n\n\ndef affine_transform(input, matrix, offset = 0.0, output_shape = None,\n output_type = None, output = None, order = 3,\n mode = 'constant', cval = 0.0, prefilter = True):\n \"\"\"Apply an affine transformation.\n\n The given matrix and offset are used to find for each point in the\n output the corresponding coordinates in the input by an affine\n transformation. The value of the input at those coordinates is\n determined by spline interpolation of the requested order. Points\n outside the boundaries of the input are filled according to the given\n mode. The output shape can optionally be given. If not given it is\n equal to the input shape. The parameter prefilter determines if the\n input is pre-filtered before interpolation, if False it is assumed\n that the input is already filtered.\n\n The matrix must be two-dimensional or can also be given as a\n one-dimensional sequence or array. In the latter case, it is\n assumed that the matrix is diagonal. A more efficient algorithms\n is then applied that exploits the separability of the problem.\n \"\"\"\n if order < 0 or order > 5:\n raise RuntimeError('spline order not supported')\n input = numpy.asarray(input)\n if numpy.iscomplexobj(input):\n raise TypeError('Complex type not supported')\n if output_shape is None:\n output_shape = input.shape\n if input.ndim < 1 or len(output_shape) < 1:\n raise RuntimeError('input and output rank must be > 0')\n mode = _extend_mode_to_code(mode)\n if prefilter and order > 1:\n filtered = spline_filter(input, order, output = numpy.float64)\n else:\n filtered = input\n output, return_value = _ni_support._get_output(output, input,\n output_type, shape = output_shape)\n matrix = numpy.asarray(matrix, dtype = numpy.float64)\n if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:\n raise RuntimeError('no proper affine matrix provided')\n if matrix.shape[0] != input.ndim:\n raise RuntimeError('affine matrix has wrong number of rows')\n if matrix.ndim == 2 and matrix.shape[1] != output.ndim:\n raise RuntimeError('affine matrix has wrong number of columns')\n if not matrix.flags.contiguous:\n matrix = matrix.copy()\n offset = _ni_support._normalize_sequence(offset, input.ndim)\n offset = numpy.asarray(offset, dtype = numpy.float64)\n if offset.ndim != 1 or offset.shape[0] < 1:\n raise RuntimeError('no proper offset provided')\n if not offset.flags.contiguous:\n offset = offset.copy()\n if matrix.ndim == 1:\n _nd_image.zoom_shift(filtered, matrix, offset, output, order,\n mode, cval)\n else:\n _nd_image.geometric_transform(filtered, None, None, matrix, offset,\n output, order, mode, cval, None, None)\n return return_value\n\n\ndef shift(input, shift, output_type = None, output = None, order = 3,\n mode = 'constant', cval = 0.0, prefilter = True):\n \"\"\"Shift an array.\n\n The array is shifted using spline interpolation of the requested\n order. Points outside the boundaries of the input are filled according\n to the given mode. The parameter prefilter determines if the input is\n pre-filtered before interpolation, if False it is assumed that the\n input is already filtered.\n \"\"\"\n if order < 0 or order > 5:\n raise RuntimeError('spline order not supported')\n input = numpy.asarray(input)\n if numpy.iscomplexobj(input):\n raise TypeError('Complex type not supported')\n if input.ndim < 1:\n raise RuntimeError('input and output rank must be > 0')\n mode = _extend_mode_to_code(mode)\n if prefilter and order > 1:\n filtered = spline_filter(input, order, output = numpy.float64)\n else:\n filtered = input\n output, return_value = _ni_support._get_output(output, input,\n output_type)\n shift = _ni_support._normalize_sequence(shift, input.ndim)\n shift = [-ii for ii in shift]\n shift = numpy.asarray(shift, dtype = numpy.float64)\n if not shift.flags.contiguous:\n shift = shift.copy()\n _nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval)\n return return_value\n\n\ndef zoom(input, zoom, output_type = None, output = None, order = 3,\n mode = 'constant', cval = 0.0, prefilter = True):\n \"\"\"Zoom an array.\n\n The array is zoomed using spline interpolation of the requested order.\n Points outside the boundaries of the input are filled according to the\n given mode. The parameter prefilter determines if the input is pre-\n filtered before interpolation, if False it is assumed that the input\n is already filtered.\n \"\"\"\n if order < 0 or order > 5:\n raise RuntimeError('spline order not supported')\n input = numpy.asarray(input)\n if numpy.iscomplexobj(input):\n raise TypeError('Complex type not supported')\n if input.ndim < 1:\n raise RuntimeError('input and output rank must be > 0')\n mode = _extend_mode_to_code(mode)\n if prefilter and order > 1:\n filtered = spline_filter(input, order, output = numpy.float64)\n else:\n filtered = input\n zoom = _ni_support._normalize_sequence(zoom, input.ndim)\n output_shape = tuple([int(ii * jj) for ii, jj in zip(input.shape, zoom)])\n zoom = (numpy.array(input.shape)-1)/(numpy.array(output_shape,float)-1)\n output, return_value = _ni_support._get_output(output, input,\n output_type, shape = output_shape)\n zoom = numpy.asarray(zoom, dtype = numpy.float64)\n zoom = numpy.ascontiguousarray(zoom)\n _nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval)\n return return_value\n\ndef _minmax(coor, minc, maxc):\n if coor[0] < minc[0]:\n minc[0] = coor[0]\n if coor[0] > maxc[0]:\n maxc[0] = coor[0]\n if coor[1] < minc[1]:\n minc[1] = coor[1]\n if coor[1] > maxc[1]:\n maxc[1] = coor[1]\n return minc, maxc\n\ndef rotate(input, angle, axes = (1, 0), reshape = True,\n output_type = None, output = None, order = 3,\n mode = 'constant', cval = 0.0, prefilter = True):\n \"\"\"Rotate an array.\n\n The array is rotated in the plane defined by the two axes given by the\n axes parameter using spline interpolation of the requested order. The\n angle is given in degrees. Points outside the boundaries of the input\n are filled according to the given mode. If reshape is true, the output\n shape is adapted so that the input array is contained completely in\n the output. The parameter prefilter determines if the input is pre-\n filtered before interpolation, if False it is assumed that the input\n is already filtered.\n \"\"\"\n input = numpy.asarray(input)\n axes = list(axes)\n rank = input.ndim\n if axes[0] < 0:\n axes[0] += rank\n if axes[1] < 0:\n axes[1] += rank\n if axes[0] < 0 or axes[1] < 0 or axes[0] > rank or axes[1] > rank:\n raise RuntimeError('invalid rotation plane specified')\n if axes[0] > axes[1]:\n axes = axes[1], axes[0]\n angle = numpy.pi / 180 * angle\n m11 = math.cos(angle)\n m12 = math.sin(angle)\n m21 = -math.sin(angle)\n m22 = math.cos(angle)\n matrix = numpy.array([[m11, m12],\n [m21, m22]], dtype = numpy.float64)\n iy = input.shape[axes[0]]\n ix = input.shape[axes[1]]\n if reshape:\n mtrx = numpy.array([[ m11, -m21],\n [-m12, m22]], dtype = numpy.float64)\n minc = [0, 0]\n maxc = [0, 0]\n coor = numpy.dot(mtrx, [0, ix])\n minc, maxc = _minmax(coor, minc, maxc)\n coor = numpy.dot(mtrx, [iy, 0])\n minc, maxc = _minmax(coor, minc, maxc)\n coor = numpy.dot(mtrx, [iy, ix])\n minc, maxc = _minmax(coor, minc, maxc)\n oy = int(maxc[0] - minc[0] + 0.5)\n ox = int(maxc[1] - minc[1] + 0.5)\n else:\n oy = input.shape[axes[0]]\n ox = input.shape[axes[1]]\n offset = numpy.zeros((2,), dtype = numpy.float64)\n offset[0] = float(oy) / 2.0 - 0.5\n offset[1] = float(ox) / 2.0 - 0.5\n offset = numpy.dot(matrix, offset)\n tmp = numpy.zeros((2,), dtype = numpy.float64)\n tmp[0] = float(iy) / 2.0 - 0.5\n tmp[1] = float(ix) / 2.0 - 0.5\n offset = tmp - offset\n output_shape = list(input.shape)\n output_shape[axes[0]] = oy\n output_shape[axes[1]] = ox\n output_shape = tuple(output_shape)\n output, return_value = _ni_support._get_output(output, input,\n output_type, shape = output_shape)\n if input.ndim <= 2:\n affine_transform(input, matrix, offset, output_shape, None, output,\n order, mode, cval, prefilter)\n else:\n coordinates = []\n size = numpy.product(input.shape,axis=0)\n size /= input.shape[axes[0]]\n size /= input.shape[axes[1]]\n for ii in range(input.ndim):\n if ii not in axes:\n coordinates.append(0)\n else:\n coordinates.append(slice(None, None, None))\n iter_axes = list(range(input.ndim))\n iter_axes.reverse()\n iter_axes.remove(axes[0])\n iter_axes.remove(axes[1])\n os = (output_shape[axes[0]], output_shape[axes[1]])\n for ii in range(size):\n ia = input[tuple(coordinates)]\n oa = output[tuple(coordinates)]\n affine_transform(ia, matrix, offset, os, None, oa, order, mode,\n cval, prefilter)\n for jj in iter_axes:\n if coordinates[jj] < input.shape[jj] - 1:\n coordinates[jj] += 1\n break\n else:\n coordinates[jj] = 0\n return return_value\n"
] |
[
[
"matplotlib.pylab.figure",
"matplotlib.pylab.show"
],
[
"numpy.product",
"numpy.array",
"numpy.dot",
"numpy.asarray",
"numpy.zeros",
"numpy.ascontiguousarray",
"numpy.iscomplexobj"
]
] |
forsubmissionanonymity/nips_2021_2271
|
[
"81a9eccb222738ccab1c540a87b701b0b9783ba3"
] |
[
"utils/utils_squad_evaluate.py"
] |
[
"\"\"\" Official evaluation script for SQuAD version 2.0.\r\n Modified by XLNet authors to update `find_best_threshold` scripts for SQuAD V2.0\r\nIn addition to basic functionality, we also compute additional statistics and\r\nplot precision-recall curves if an additional na_prob.json file is provided.\r\nThis file is expected to map question ID's to the model's predicted probability\r\nthat a question is unanswerable.\r\n\"\"\"\r\nimport argparse\r\nimport collections\r\nimport json\r\nimport numpy as np\r\nimport os\r\nimport re\r\nimport string\r\nimport sys\r\n\r\nclass EVAL_OPTS():\r\n def __init__(self, data_file, pred_file, out_file=\"\",\r\n na_prob_file=\"na_prob.json\", na_prob_thresh=1.0,\r\n out_image_dir=None, verbose=False):\r\n self.data_file = data_file\r\n self.pred_file = pred_file\r\n self.out_file = out_file\r\n self.na_prob_file = na_prob_file\r\n self.na_prob_thresh = na_prob_thresh\r\n self.out_image_dir = out_image_dir\r\n self.verbose = verbose\r\n\r\nOPTS = None\r\n\r\ndef parse_args():\r\n parser = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.')\r\n parser.add_argument('data_file', metavar='data.json', help='Input data JSON file.')\r\n parser.add_argument('pred_file', metavar='pred.json', help='Model predictions.')\r\n parser.add_argument('--out-file', '-o', metavar='eval.json',\r\n help='Write accuracy metrics to file (default is stdout).')\r\n parser.add_argument('--na-prob-file', '-n', metavar='na_prob.json',\r\n help='Model estimates of probability of no answer.')\r\n parser.add_argument('--na-prob-thresh', '-t', type=float, default=1.0,\r\n help='Predict \"\" if no-answer probability exceeds this (default = 1.0).')\r\n parser.add_argument('--out-image-dir', '-p', metavar='out_images', default=None,\r\n help='Save precision-recall curves to directory.')\r\n parser.add_argument('--verbose', '-v', action='store_true')\r\n if len(sys.argv) == 1:\r\n parser.print_help()\r\n sys.exit(1)\r\n return parser.parse_args()\r\n\r\ndef make_qid_to_has_ans(dataset):\r\n qid_to_has_ans = {}\r\n for article in dataset:\r\n for p in article['paragraphs']:\r\n for qa in p['qas']:\r\n qid_to_has_ans[qa['id']] = bool(qa['answers'])\r\n return qid_to_has_ans\r\n\r\ndef normalize_answer(s):\r\n \"\"\"Lower text and remove punctuation, articles and extra whitespace.\"\"\"\r\n def remove_articles(text):\r\n regex = re.compile(r'\\b(a|an|the)\\b', re.UNICODE)\r\n return re.sub(regex, ' ', text)\r\n def white_space_fix(text):\r\n return ' '.join(text.split())\r\n def remove_punc(text):\r\n exclude = set(string.punctuation)\r\n return ''.join(ch for ch in text if ch not in exclude)\r\n def lower(text):\r\n return text.lower()\r\n return white_space_fix(remove_articles(remove_punc(lower(s))))\r\n\r\ndef get_tokens(s):\r\n if not s: return []\r\n return normalize_answer(s).split()\r\n\r\ndef compute_exact(a_gold, a_pred):\r\n return int(normalize_answer(a_gold) == normalize_answer(a_pred))\r\n\r\ndef compute_f1(a_gold, a_pred):\r\n gold_toks = get_tokens(a_gold)\r\n pred_toks = get_tokens(a_pred)\r\n common = collections.Counter(gold_toks) & collections.Counter(pred_toks)\r\n num_same = sum(common.values())\r\n if len(gold_toks) == 0 or len(pred_toks) == 0:\r\n # If either is no-answer, then F1 is 1 if they agree, 0 otherwise\r\n return int(gold_toks == pred_toks)\r\n if num_same == 0:\r\n return 0\r\n precision = 1.0 * num_same / len(pred_toks)\r\n recall = 1.0 * num_same / len(gold_toks)\r\n f1 = (2 * precision * recall) / (precision + recall)\r\n return f1\r\n\r\ndef get_raw_scores(dataset, preds):\r\n exact_scores = {}\r\n f1_scores = {}\r\n for article in dataset:\r\n for p in article['paragraphs']:\r\n for qa in p['qas']:\r\n qid = qa['id']\r\n gold_answers = [a['text'] for a in qa['answers']\r\n if normalize_answer(a['text'])]\r\n if not gold_answers:\r\n # For unanswerable questions, only correct answer is empty string\r\n gold_answers = ['']\r\n if qid not in preds:\r\n print('Missing prediction for %s' % qid)\r\n continue\r\n a_pred = preds[qid]\r\n # Take max over all gold answers\r\n exact_scores[qid] = max(compute_exact(a, a_pred) for a in gold_answers)\r\n f1_scores[qid] = max(compute_f1(a, a_pred) for a in gold_answers)\r\n return exact_scores, f1_scores\r\n\r\ndef apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh):\r\n new_scores = {}\r\n for qid, s in scores.items():\r\n pred_na = na_probs[qid] > na_prob_thresh\r\n if pred_na:\r\n new_scores[qid] = float(not qid_to_has_ans[qid])\r\n else:\r\n new_scores[qid] = s\r\n return new_scores\r\n\r\ndef make_eval_dict(exact_scores, f1_scores, qid_list=None):\r\n if not qid_list:\r\n total = len(exact_scores)\r\n return collections.OrderedDict([\r\n ('exact', 100.0 * sum(exact_scores.values()) / total),\r\n ('f1', 100.0 * sum(f1_scores.values()) / total),\r\n ('total', total),\r\n ])\r\n else:\r\n total = len(qid_list)\r\n return collections.OrderedDict([\r\n ('exact', 100.0 * sum(exact_scores[k] for k in qid_list) / total),\r\n ('f1', 100.0 * sum(f1_scores[k] for k in qid_list) / total),\r\n ('total', total),\r\n ])\r\n\r\ndef merge_eval(main_eval, new_eval, prefix):\r\n for k in new_eval:\r\n main_eval['%s_%s' % (prefix, k)] = new_eval[k]\r\n\r\ndef plot_pr_curve(precisions, recalls, out_image, title):\r\n plt.step(recalls, precisions, color='b', alpha=0.2, where='post')\r\n plt.fill_between(recalls, precisions, step='post', alpha=0.2, color='b')\r\n plt.xlabel('Recall')\r\n plt.ylabel('Precision')\r\n plt.xlim([0.0, 1.05])\r\n plt.ylim([0.0, 1.05])\r\n plt.title(title)\r\n plt.savefig(out_image)\r\n plt.clf()\r\n\r\ndef make_precision_recall_eval(scores, na_probs, num_true_pos, qid_to_has_ans,\r\n out_image=None, title=None):\r\n qid_list = sorted(na_probs, key=lambda k: na_probs[k])\r\n true_pos = 0.0\r\n cur_p = 1.0\r\n cur_r = 0.0\r\n precisions = [1.0]\r\n recalls = [0.0]\r\n avg_prec = 0.0\r\n for i, qid in enumerate(qid_list):\r\n if qid_to_has_ans[qid]:\r\n true_pos += scores[qid]\r\n cur_p = true_pos / float(i+1)\r\n cur_r = true_pos / float(num_true_pos)\r\n if i == len(qid_list) - 1 or na_probs[qid] != na_probs[qid_list[i+1]]:\r\n # i.e., if we can put a threshold after this point\r\n avg_prec += cur_p * (cur_r - recalls[-1])\r\n precisions.append(cur_p)\r\n recalls.append(cur_r)\r\n if out_image:\r\n plot_pr_curve(precisions, recalls, out_image, title)\r\n return {'ap': 100.0 * avg_prec}\r\n\r\ndef run_precision_recall_analysis(main_eval, exact_raw, f1_raw, na_probs, \r\n qid_to_has_ans, out_image_dir):\r\n if out_image_dir and not os.path.exists(out_image_dir):\r\n os.makedirs(out_image_dir)\r\n num_true_pos = sum(1 for v in qid_to_has_ans.values() if v)\r\n if num_true_pos == 0:\r\n return\r\n pr_exact = make_precision_recall_eval(\r\n exact_raw, na_probs, num_true_pos, qid_to_has_ans,\r\n out_image=os.path.join(out_image_dir, 'pr_exact.png'),\r\n title='Precision-Recall curve for Exact Match score')\r\n pr_f1 = make_precision_recall_eval(\r\n f1_raw, na_probs, num_true_pos, qid_to_has_ans,\r\n out_image=os.path.join(out_image_dir, 'pr_f1.png'),\r\n title='Precision-Recall curve for F1 score')\r\n oracle_scores = {k: float(v) for k, v in qid_to_has_ans.items()}\r\n pr_oracle = make_precision_recall_eval(\r\n oracle_scores, na_probs, num_true_pos, qid_to_has_ans,\r\n out_image=os.path.join(out_image_dir, 'pr_oracle.png'),\r\n title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)')\r\n merge_eval(main_eval, pr_exact, 'pr_exact')\r\n merge_eval(main_eval, pr_f1, 'pr_f1')\r\n merge_eval(main_eval, pr_oracle, 'pr_oracle')\r\n\r\ndef histogram_na_prob(na_probs, qid_list, image_dir, name):\r\n if not qid_list:\r\n return\r\n x = [na_probs[k] for k in qid_list]\r\n weights = np.ones_like(x) / float(len(x))\r\n plt.hist(x, weights=weights, bins=20, range=(0.0, 1.0))\r\n plt.xlabel('Model probability of no-answer')\r\n plt.ylabel('Proportion of dataset')\r\n plt.title('Histogram of no-answer probability: %s' % name)\r\n plt.savefig(os.path.join(image_dir, 'na_prob_hist_%s.png' % name))\r\n plt.clf()\r\n\r\ndef find_best_thresh(preds, scores, na_probs, qid_to_has_ans):\r\n num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])\r\n cur_score = num_no_ans\r\n best_score = cur_score\r\n best_thresh = 0.0\r\n qid_list = sorted(na_probs, key=lambda k: na_probs[k])\r\n for i, qid in enumerate(qid_list):\r\n if qid not in scores: continue\r\n if qid_to_has_ans[qid]:\r\n diff = scores[qid]\r\n else:\r\n if preds[qid]:\r\n diff = -1\r\n else:\r\n diff = 0\r\n cur_score += diff\r\n if cur_score > best_score:\r\n best_score = cur_score\r\n best_thresh = na_probs[qid]\r\n return 100.0 * best_score / len(scores), best_thresh\r\n\r\ndef find_best_thresh_v2(preds, scores, na_probs, qid_to_has_ans):\r\n num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])\r\n cur_score = num_no_ans\r\n best_score = cur_score\r\n best_thresh = 0.0\r\n qid_list = sorted(na_probs, key=lambda k: na_probs[k])\r\n for i, qid in enumerate(qid_list):\r\n if qid not in scores: continue\r\n if qid_to_has_ans[qid]:\r\n diff = scores[qid]\r\n else:\r\n if preds[qid]:\r\n diff = -1\r\n else:\r\n diff = 0\r\n cur_score += diff\r\n if cur_score > best_score:\r\n best_score = cur_score\r\n best_thresh = na_probs[qid]\r\n\r\n has_ans_score, has_ans_cnt = 0, 0\r\n for qid in qid_list:\r\n if not qid_to_has_ans[qid]: continue\r\n has_ans_cnt += 1\r\n\r\n if qid not in scores: continue\r\n has_ans_score += scores[qid]\r\n\r\n return 100.0 * best_score / len(scores), best_thresh, 1.0 * has_ans_score / has_ans_cnt\r\n\r\ndef find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):\r\n best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans)\r\n best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans)\r\n main_eval['best_exact'] = best_exact\r\n main_eval['best_exact_thresh'] = exact_thresh\r\n main_eval['best_f1'] = best_f1\r\n main_eval['best_f1_thresh'] = f1_thresh\r\n\r\ndef find_all_best_thresh_v2(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):\r\n best_exact, exact_thresh, has_ans_exact = find_best_thresh_v2(preds, exact_raw, na_probs, qid_to_has_ans)\r\n best_f1, f1_thresh, has_ans_f1 = find_best_thresh_v2(preds, f1_raw, na_probs, qid_to_has_ans)\r\n main_eval['best_exact'] = best_exact\r\n main_eval['best_exact_thresh'] = exact_thresh\r\n main_eval['best_f1'] = best_f1\r\n main_eval['best_f1_thresh'] = f1_thresh\r\n main_eval['has_ans_exact'] = has_ans_exact\r\n main_eval['has_ans_f1'] = has_ans_f1\r\n\r\ndef main(OPTS):\r\n with open(OPTS.data_file) as f:\r\n dataset_json = json.load(f)\r\n dataset = dataset_json['data']\r\n with open(OPTS.pred_file) as f:\r\n preds = json.load(f)\r\n if OPTS.na_prob_file:\r\n with open(OPTS.na_prob_file) as f:\r\n na_probs = json.load(f)\r\n else:\r\n na_probs = {k: 0.0 for k in preds}\r\n qid_to_has_ans = make_qid_to_has_ans(dataset) # maps qid to True/False\r\n has_ans_qids = [k for k, v in qid_to_has_ans.items() if v]\r\n no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v]\r\n exact_raw, f1_raw = get_raw_scores(dataset, preds)\r\n exact_thresh = apply_no_ans_threshold(exact_raw, na_probs, qid_to_has_ans,\r\n OPTS.na_prob_thresh)\r\n f1_thresh = apply_no_ans_threshold(f1_raw, na_probs, qid_to_has_ans,\r\n OPTS.na_prob_thresh)\r\n out_eval = make_eval_dict(exact_thresh, f1_thresh)\r\n if has_ans_qids:\r\n has_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=has_ans_qids)\r\n merge_eval(out_eval, has_ans_eval, 'HasAns')\r\n if no_ans_qids:\r\n no_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=no_ans_qids)\r\n merge_eval(out_eval, no_ans_eval, 'NoAns')\r\n if OPTS.na_prob_file:\r\n find_all_best_thresh(out_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans)\r\n if OPTS.na_prob_file and OPTS.out_image_dir:\r\n run_precision_recall_analysis(out_eval, exact_raw, f1_raw, na_probs, \r\n qid_to_has_ans, OPTS.out_image_dir)\r\n histogram_na_prob(na_probs, has_ans_qids, OPTS.out_image_dir, 'hasAns')\r\n histogram_na_prob(na_probs, no_ans_qids, OPTS.out_image_dir, 'noAns')\r\n if OPTS.out_file:\r\n with open(OPTS.out_file, 'w') as f:\r\n json.dump(out_eval, f)\r\n else:\r\n print(json.dumps(out_eval, indent=2))\r\n return out_eval\r\n \r\nif __name__ == '__main__':\r\n OPTS = parse_args()\r\n if OPTS.out_image_dir:\r\n import matplotlib\r\n matplotlib.use('Agg')\r\n import matplotlib.pyplot as plt \r\n main(OPTS)"
] |
[
[
"matplotlib.use",
"numpy.ones_like",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.step",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.clf"
]
] |
leyiweb/Deep-SAD-PyTorch
|
[
"305667c84b92167792816794f84b41273a7b41c0"
] |
[
"src/networks/layers/standard.py"
] |
[
"import torch\n\nfrom torch.nn import Module\nfrom torch.nn import init\nfrom torch.nn.parameter import Parameter\n\n\n# Acknowledgements: https://github.com/wohlert/semi-supervised-pytorch\nclass Standardize(Module):\n \"\"\"\n Applies (element-wise) standardization with trainable translation parameter μ and scale parameter σ, i.e. computes\n (x - μ) / σ where '/' is applied element-wise.\n\n Args:\n in_features: size of each input sample\n out_features: size of each output sample\n bias: If set to False, the layer will not learn a translation parameter μ.\n Default: ``True``\n\n Attributes:\n mu: the learnable translation parameter μ.\n std: the learnable scale parameter σ.\n \"\"\"\n __constants__ = ['mu']\n\n def __init__(self, in_features, bias=True, eps=1e-6):\n super(Standardize, self).__init__()\n self.in_features = in_features\n self.out_features = in_features\n self.eps = eps\n self.std = Parameter(torch.Tensor(in_features))\n if bias:\n self.mu = Parameter(torch.Tensor(in_features))\n else:\n self.register_parameter('mu', None)\n self.reset_parameters()\n\n def reset_parameters(self):\n init.constant_(self.std, 1)\n if self.mu is not None:\n init.constant_(self.mu, 0)\n\n def forward(self, x):\n if self.mu is not None:\n x -= self.mu\n x = torch.div(x, self.std + self.eps)\n return x\n\n def extra_repr(self):\n return 'in_features={}, out_features={}, bias={}'.format(\n self.in_features, self.out_features, self.mu is not None\n )\n"
] |
[
[
"torch.div",
"torch.Tensor",
"torch.nn.init.constant_"
]
] |
BlueAmulet/BasicSR
|
[
"7040913d8659a05af4c2428feb71c260efbf1e9c"
] |
[
"codes/models/modules/loss.py"
] |
[
"import torch\nimport torch.nn as nn\nimport math\nimport numbers\nfrom torch.nn import functional as F\nimport numpy as np\n\ndef LoG(imgHF): #Laplacian of Gaussian\n # The LoG operator calculates the second spatial derivative of an image. \n # This means that in areas where the image has a constant intensity (i.e. \n # where the intensity gradient is zero), the LoG response will be zero. \n # In the vicinity of a change in intensity, however, the LoG response \n # will be positive on the darker side, and negative on the lighter side. \n # This means that at a reasonably sharp edge between two regions of \n # uniform but different intensities, the LoG response will be:\n # - zero at a long distance from the edge,\n # - positive just to one side of the edge,\n # - negative just to the other side of the edge,\n # - zero at some point in between, on the edge itself.\n # The enhancement sharpens the edges but also increases noise. If the \n # original image is filtered with a simple Laplacian (a LoG filter \n # with a very narrow Gaussian), the resulting output is rather noisy.\n # Combining this output with the original will give a noisy result. \n # On the other hand, using a larger σ for the Gaussian will reduce \n # the noise, but the sharpening effect will be reduced. \n\n # The 2-D LoG can be approximated by a 5 by 5 convolution kernel such as:\n weight = [\n [0, 0, 1, 0, 0],\n [0, 1, 2, 1, 0],\n [1, 2, -16, 2, 1],\n [0, 1, 2, 1, 0],\n [0, 0, 1, 0, 0]\n ]\n weight = np.array(weight)\n weight_np = np.zeros((1, 1, 5, 5))\n \n \"\"\"\n # 3x3 Laplacian kernels (without Gaussian smoothing)\n # These kernels are approximating a second derivative measurement on \n # the image, they are very sensitive to noise. To counter this, the \n # image is often Gaussian smoothed before applying the Laplacian filter.\n # Note that the output can contain negative and non-integer values, \n # so for display purposes the image has been normalized.\n ## 3x3 v1:\n weight = [\n [0, -1, 0],\n [-1, 4, -1],\n [0, -1, 0]\n ]\n \n ## 3x3 v2:\n # weight = [\n # [-1, -1, -1],\n # [-1, 8, -1],\n # [-1, -1, -1]\n # ]\n\n weight = np.array(weight)\n weight_np = np.zeros((1, 1, 3, 3))\n \"\"\"\n \n weight_np[0, 0, :, :] = weight\n weight_np = np.repeat(weight_np, imgHF.shape[1], axis=1)\n weight_np = np.repeat(weight_np, imgHF.shape[0], axis=0)\n\n weight = torch.from_numpy(weight_np).type(torch.FloatTensor).to('cuda:0')\n \n return nn.functional.conv2d(imgHF, weight, padding=1)\n\nclass GaussianSmoothing(nn.Module):\n def __init__(self, channels, kernel_size=15, sigma=3, dim=2):\n super(GaussianSmoothing, self).__init__()\n if isinstance(kernel_size, numbers.Number):\n kernel_size = [kernel_size] * dim\n if isinstance(sigma, numbers.Number):\n sigma = [sigma] * dim\n\n kernel = 1\n meshgrids = torch.meshgrid(\n [\n torch.arange(size, dtype=torch.float32)\n for size in kernel_size\n ]\n )\n for size, std, mgrid in zip(kernel_size, sigma, meshgrids):\n mean = (size - 1) / 2\n kernel *= 1 / (std * math.sqrt(2 * math.pi)) * \\\n torch.exp(-((mgrid - mean) / std) ** 2 / 2)\n\n kernel = kernel / torch.sum(kernel)\n kernel = kernel.view(1, 1, *kernel.size())\n kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))\n\n self.register_buffer('weight', kernel)\n self.groups = channels\n\n if dim == 1:\n self.conv = F.conv1d\n elif dim == 2:\n self.conv = F.conv2d\n elif dim == 3:\n self.conv = F.conv3d\n else:\n raise RuntimeError(\n 'Only 1, 2 and 3 dimensions are supported. Received {}.'.format(dim)\n )\n\n def forward(self, input):\n return self.conv(input, weight=self.weight, groups=self.groups)\n\nclass CharbonnierLoss(nn.Module):\n \"\"\"Charbonnier Loss (L1)\"\"\"\n\n def __init__(self, eps=1e-6):\n super(CharbonnierLoss, self).__init__()\n self.eps = eps\n\n def forward(self, x, y):\n b, c, h, w = y.size()\n diff = x - y\n loss = torch.sum(torch.sqrt(diff * diff + self.eps))\n #loss = torch.sum(torch.sqrt((x - y).pow(2) + self.eps **2)) / x.shape[0]\n return loss/(c*b*h*w)\n \n# Define GAN loss: [vanilla | lsgan | wgan-gp]\n# https://tuatini.me/creating-and-shipping-deep-learning-models-into-production/\nclass GANLoss(nn.Module):\n def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0):\n super(GANLoss, self).__init__()\n self.gan_type = gan_type.lower()\n self.real_label_val = real_label_val\n self.fake_label_val = fake_label_val\n\n if self.gan_type == 'vanilla':\n self.loss = nn.BCEWithLogitsLoss()\n elif self.gan_type == 'lsgan':\n self.loss = nn.MSELoss()\n elif self.gan_type == 'srpgan':\n self.loss = nn.BCELoss() #0.001 * F.binary_cross_entropy(d_sr_out, torch.ones_like(d_sr_out))\n elif self.gan_type == 'wgan-gp':\n\n def wgan_loss(input, target):\n # target is boolean\n return -1 * input.mean() if target else input.mean()\n\n self.loss = wgan_loss\n else:\n raise NotImplementedError('GAN type [{:s}] is not found'.format(self.gan_type))\n\n def get_target_label(self, input, target_is_real):\n if self.gan_type == 'wgan-gp':\n return target_is_real\n if target_is_real:\n return torch.empty_like(input).fill_(self.real_label_val) #torch.ones_like(d_sr_out)\n else:\n return torch.empty_like(input).fill_(self.fake_label_val) #torch.zeros_like(d_sr_out)\n\n def forward(self, input, target_is_real):\n target_label = self.get_target_label(input, target_is_real)\n loss = self.loss(input, target_label)\n return loss\n\n\nclass GradientPenaltyLoss(nn.Module):\n def __init__(self, device=torch.device('cpu')):\n super(GradientPenaltyLoss, self).__init__()\n self.register_buffer('grad_outputs', torch.Tensor())\n self.grad_outputs = self.grad_outputs.to(device)\n\n def get_grad_outputs(self, input):\n if self.grad_outputs.size() != input.size():\n self.grad_outputs.resize_(input.size()).fill_(1.0)\n return self.grad_outputs\n\n def forward(self, interp, interp_crit):\n grad_outputs = self.get_grad_outputs(interp_crit)\n grad_interp = torch.autograd.grad(outputs=interp_crit, inputs=interp, \\\n grad_outputs=grad_outputs, create_graph=True, retain_graph=True, only_inputs=True)[0]\n grad_interp = grad_interp.view(grad_interp.size(0), -1)\n grad_interp_norm = grad_interp.norm(2, dim=1)\n\n loss = ((grad_interp_norm - 1)**2).mean()\n return loss\n\n\nclass HFENLoss(nn.Module): # Edge loss with pre_smooth\n # In order to further penalize the diferences in fine details, such as edges, \n # a gradient-domain L1 loss can be used, where each gradient ∇(·) is computed \n # using a High Frequency Error Norm (HFEN). The metric uses a Laplacian of\n # Gaussian kernel for edge-detection. The Laplacian works to detect\n # edges, but is sensitive to noise, so the image can be pre-smoothed with a\n # Gaussian filter first to make edge-detection work better. The recommended \n # parameter of σ = 1.5 for Gaussian kernel size can be used.\n def __init__(self, loss_f='L1', device='cuda:0', pre_smooth=True, relative=False):\n super(HFENLoss, self).__init__()\n self.device = device\n self.loss_f = loss_f #loss function\n self.pre_smooth = pre_smooth\n self.relative = relative\n self.laplacian = False\n\n if loss_f=='l2':\n self.criterion = nn.MSELoss(reduction='sum').to(device)\n elif loss_f=='elastic':\n self.criterion = ElasticLoss(reduction='sum').to(device)\n elif loss_f=='cb':\n self.criterion = CharbonnierLoss().to(device)\n else: #if loss_f=='l1':\n self.criterion = nn.L1Loss(reduction='sum').to(device)\n\n def forward(self, input, target, eps=0.01):\n c = input.shape[1]\n\n # Note that, since the range of color values can be significantly\n # large, we apply a logarithmic function to the ground truth image to\n # compress its range before computing the loss, i.e., c = log(1 + c˜),\n # where ˜c is the ground truth image in the linear domain.\n # Note: This may not hold true if image range is already [0,1] or [-1,1]\n # input = torch.log(1 + input) #(eps=1e-7)\n \n if self.pre_smooth:\n # As Laplace operator may detect edges as well as noise (isolated, out-of-range), \n # it may be desirable to smooth the image first by a convolution with a Gaussian \n # kernel of width sigma. This will add an additional Gaussian smoothing before LoG\n # to reduce noise and only focus on Edge loss.\n # Configure Gaussian kernel\n smoothing = GaussianSmoothing(c, 11, 1.5) #default: (c, 15, 1.5) | paper: (3, 11, 1.5) | simpler: (c, 5, 1)\n smoothing = smoothing.to(self.device) #.to('cuda:0')\n # Pad input and target\n input_smooth = nn.functional.pad(input, (2, 2, 2, 2), mode='reflect')\n target_smooth = nn.functional.pad(target, (2, 2, 2, 2), mode='reflect')\n # Apply Gaussian kernel \n input_smooth = smoothing(input_smooth)\n target_smooth = smoothing(target_smooth)\n else:\n if self.relative: \n if self.laplacian:\n input_smooth = input\n target_smooth = target\n else:\n input_smooth = nn.functional.pad(input, (1, 1, 1, 1), mode='reflect')\n target_smooth = nn.functional.pad(target, (1, 1, 1, 1), mode='reflect')\n else:\n input_smooth = input\n target_smooth = target\n \n # If using Gaussian+laplacian instead of LoG\n # Needs more testing, look at SSIM that also uses gaussian convolution\n if self.laplacian:\n #Gaussian, needs to be applied for \"Laplacian of Gauss\" (LoG)\n if self.pre_smooth:\n pad_size = 11 #5,7,9,11\n LoG_kernel = 17 #5,9,13,17\n else: \n pad_size = 7 #>= 2\n LoG_kernel = (2*pad_size)+1 #LoG-> pad: 5 -> 2, 15 -> 7, etc\n gaussian = GaussianSmoothing(c, LoG_kernel, 1.5).to(self.device) #default: (c, 15, 1.5) | paper: (3, 11, 1.5) | simpler: (c, 5, 1)\n input_smooth = nn.functional.pad(input_smooth, (pad_size,pad_size,pad_size,pad_size), mode='reflect')\n target_smooth = nn.functional.pad(target_smooth, (pad_size,pad_size,pad_size,pad_size), mode='reflect')\n # Apply Gaussian kernel \n input_smooth = gaussian(input_smooth)\n target_smooth = gaussian(target_smooth)\n \n \"\"\"\n if self.loss_f == 'L2':\n x = torch.sum(torch.pow((LoG(input_smooth-target_smooth)), 2))\n elif self.loss_f == 'elastic':\n x = torch.sum(torch.pow((LoG(input_smooth-target_smooth)), 2))\n else: #loss_f == 'L1':\n x = torch.abs(LoG(input_smooth-target_smooth)).sum()\n \"\"\"\n \n if self.relative:\n # Comparing to the original HFEN, introducing the division by |c|+epsilon \n # better models the human vision system’s sensitivity to variations\n # in the dark areas. (where epsilon = 0.01, to prevent values of 0 in the\n # denominator)\n # x = self.criterion(LoG(input_smooth)/(target+eps),LoG(target_smooth)/(target+eps))\n x = self.criterion(LoG(input_smooth)/(target+eps).norm(),LoG(target_smooth)/(target+eps).norm())\n # x = self.criterion(lap.Laplacian(LoG_kernel)(input_smooth)/(target+eps),lap.Laplacian(LoG_kernel)(target_smooth)/(target+eps))\n \n else:\n # To calculate the HFEN, a 5x5 rotationally symmetric Laplacian of Gaussian \n # (LoG) filter is used to capture the edges in the absolute reconstruction error \n # image and the HFEN is calculated as the Frobenius norm of the error edge image.\n # x = self.criterion(LoG(input_smooth),LoG(target_smooth)) # No normalization (HFEN needs normalization, can use a case later)\n x = self.criterion(LoG(input_smooth),LoG(target_smooth))/torch.sum(torch.pow(LoG(target_smooth), 2))\n # x = self.criterion(lap.Laplacian(LoG_kernel)(input_smooth),lap.Laplacian(LoG_kernel)(target_smooth))/torch.sum(torch.pow(lap.Laplacian(LoG_kernel)(target_smooth), 2))\n \n # if self.normalized:\n # if self.loss_f == 'l2':\n # x = x / torch.sum(torch.pow(LoG(target), 2))\n ## x = x / target.norm()\n # else: #elif self.loss_f == 'l1':\n # x = x / torch.sum(torch.abs(LoG(target)))\n \n return x\n\nclass TVLoss(nn.Module):\n def __init__(self, tvloss_weight=1, p=1):\n super(TVLoss, self).__init__()\n self.tvloss_weight = tvloss_weight\n assert p in [1, 2]\n self.p = p\n\n def forward(self, x): \n batch_size = x.size()[0]\n img_shape = x.shape\n h_x = x.size()[2]\n w_x = x.size()[3]\n count_h = self.tensor_size(x[:, :, 1:, :])\n count_w = self.tensor_size(x[:, :, :, 1:])\n \n if len(img_shape) == 3 or len(img_shape) == 4:\n if self.p == 1:\n # loss = torch.sum(torch.abs(x[:,:,:-1,:] - x[:,:,1:,:])) + torch.sum(torch.abs(x[:,:,:,:-1] - x[:,:,:,1:]))\n # return self.tvloss_weight * 2 * loss/((count_h/2+count_w/2)*batch_size) #/ x.size(0) / (x.size(2)-1) / (x.size(3)-1)\n \n # Alternative calculation, same results:\n #h_tv = torch.abs((x[:, :, 1:, :] - x[:, :, :h_x - 1, :])).sum()\n #w_tv = torch.abs((x[:, :, :, 1:] - x[:, :, :, :w_x - 1])).sum()\n #return self.tvloss_weight * 2 * (h_tv / count_h + w_tv / count_w) / batch_size # For use with the alternative calculation\n \n # Alternative calculation 2: https://kornia.readthedocs.io/en/latest/_modules/kornia/losses/total_variation.html#total_variation\n pixel_dif1 = x[..., 1:, :] - x[..., :-1, :]\n pixel_dif2 = x[..., :, 1:] - x[..., :, :-1]\n reduce_axes = (-3, -2, -1)\n loss = self.tvloss_weight*(pixel_dif1.abs().sum(dim=reduce_axes) + pixel_dif2.abs().sum(dim=reduce_axes)) # Calculates the TV loss for each image in the batch\n loss = loss.sum() / batch_size # averages the TV loss all the images in the batch \n return loss\n \n # loss = self.tvloss_weight*((x[:,:,1:,:] - x[:,:,:-1,:]).abs().sum(dim=(-3, -2, -1)) + (x[:,:,:,1:] - x[:,:,:,:-1]).abs().sum(dim=(-3, -2, -1)))\n # loss = loss.sum() / batch_size # averages the TV loss all the images in the batch \n # return loss\n \n else:\n #loss = torch.sum(torch.sqrt((x[:,:,:-1,:] - x[:,:,1:,:])**2)) + torch.sum(torch.sqrt((x[:,:,:,:-1] - x[:,:,:,1:])**2)) # Doesn't work, magnitude is too large\n #return self.tvloss_weight * 2 * loss/((count_h/2+count_w/2)*batch_size) #/ x.size(0) / (x.size(2)-1) / (x.size(3)-1) #For use with the alternative calculation that doesn't work yet\n\n # Alternative calculation: # This one works\n # h_tv = torch.pow((x[:, :, 1:, :] - x[:, :, :h_x - 1, :]), 2).sum()\n # w_tv = torch.pow((x[:, :, :, 1:] - x[:, :, :, :w_x - 1]), 2).sum()\n # return self.tvloss_weight * 2 * (h_tv / count_h + w_tv / count_w) / batch_size\n \n # Alternative calculation 2: https://kornia.readthedocs.io/en/latest/_modules/kornia/losses/total_variation.html#total_variation\n pixel_dif1 = x[..., 1:, :] - x[..., :-1, :]\n pixel_dif2 = x[..., :, 1:] - x[..., :, :-1]\n reduce_axes = (-3, -2, -1)\n loss = self.tvloss_weight*(torch.pow(pixel_dif1,2).sum(dim=reduce_axes) + torch.pow(pixel_dif2,2).sum(dim=reduce_axes)) # Calculates the TV loss for each image in the batch\n loss = loss.sum() / batch_size # averages the TV loss all the images in the batch \n return loss\n \n else:\n raise ValueError(\"Expected input tensor to be of ndim 3 or 4, but got \" + str(len(img_shape)))\n\n #return self.tvloss_weight * 2 *loss\n \n @staticmethod\n def tensor_size(t):\n return t.size()[1] * t.size()[2] * t.size()[3]\n \nclass ElasticLoss(nn.Module):\n def __init__(self, a=0.2, reduction='mean'): #a=0.5 default\n super(ElasticLoss, self).__init__()\n self.alpha = torch.FloatTensor([a, 1 - a]).to('cuda:0')\n self.reduction = reduction\n\n def forward(self, input, target):\n if not isinstance(input, tuple):\n input = (input,)\n\n for i in range(len(input)):\n l2 = nn.functional.mse_loss(input[i].squeeze(), target.squeeze()).mul(self.alpha[0], reduction=self.reduction)\n l1 = nn.functional.l1_loss(input[i].squeeze(), target.squeeze()).mul(self.alpha[1], reduction=self.reduction)\n loss = l1 + l2\n\n return loss\n\nclass RelativeL1(nn.Module):\n # Comparing to the regular L1, introducing the division by |c|+epsilon \n # better models the human vision system’s sensitivity to variations\n # in the dark areas. (where epsilon = 0.01, to prevent values of 0 in the\n # denominator)\n def __init__(self):\n super().__init__()\n self.criterion = torch.nn.L1Loss()\n\n def forward(self, input, target):\n base = target +.01\n\n return self.criterion(input/base, target/base)\n\n\n# https://github.com/dmarnerides/hdr-expandnet/blob/master/train.py\n# Can be used to replace L1 pixel loss, but includes a cosine similarity term \n# to ensure color correctness of the RGB vectors of each pixel.\n# lambda is a constant factor that adjusts the contribution of the cosine similarity term\n# It provides improved color stability, especially for low luminance values, which\n# are frequent in HDR images, since slight variations in any of theRGB components of these \n# low values do not contribute much totheL1loss, but they may however cause noticeable \n# color shifts. More in the paper: https://arxiv.org/pdf/1803.02266.pdf\nclass L1CosineSim(nn.Module):\n def __init__(self, loss_lambda=5):\n super(L1CosineSim, self).__init__()\n self.similarity = torch.nn.CosineSimilarity(dim=1, eps=1e-20)\n self.l1_loss = nn.L1Loss()\n self.loss_lambda = loss_lambda\n\n def forward(self, x, y):\n cosine_term = (1 - self.similarity(x, y)).mean()\n return self.l1_loss(x, y) + self.loss_lambda * cosine_term\n\n\n\"\"\" \nclass LossCombo(nn.Module):\n def __init__(self, monitor_writer, *losses):\n super().__init__()\n self.monitor_writer = monitor_writer\n pass\n\n self.losses = []\n self.losses_names = []\n self.factors = []\n\n for name, loss, factor in losses:\n self.losses.append(loss)\n self.losses_names.append(name)\n self.factors.append(factor)\n\n self.add_module(name, loss)\n\n def multi_gpu(self):\n pass\n #self.losses = [nn.DataParallel(x) for x in self.losses]\n\n def forward(self, input, target, additional_losses):\n loss_results = []\n for idx, loss in enumerate(self.losses):\n loss_results.append(loss(input, target))\n\n for name, loss_result, factor in zip(self.losses_names, loss_results, self.factors):\n #print(loss_result)\n self.monitor_writer.add_scalar(name, loss_result*factor)\n\n for name, loss_result, factor in additional_losses:\n loss_result = loss_result.mean()\n #print(loss_result)\n self.monitor_writer.add_scalar(name, loss_result*factor)\n\n\n total_loss = sum([factor*loss_result for factor, loss_result in zip(self.factors, loss_results)]) + sum([factor*loss_result.mean() for name, loss_result, factor in additional_losses])\n self.monitor_writer.add_scalar(\"total_loss\", total_loss)\n\n return total_loss\n\"\"\""
] |
[
[
"torch.nn.BCEWithLogitsLoss",
"torch.nn.functional.pad",
"torch.exp",
"torch.sum",
"torch.sqrt",
"torch.FloatTensor",
"torch.autograd.grad",
"torch.nn.BCELoss",
"torch.Tensor",
"torch.nn.functional.conv2d",
"torch.device",
"numpy.array",
"numpy.zeros",
"torch.pow",
"torch.nn.MSELoss",
"torch.arange",
"torch.nn.L1Loss",
"torch.from_numpy",
"numpy.repeat",
"torch.nn.CosineSimilarity",
"torch.empty_like"
]
] |
zhafen/galaxy-dive
|
[
"e1127da25d10f699b3ada01b1b4635255f4f3917",
"e1127da25d10f699b3ada01b1b4635255f4f3917"
] |
[
"galaxy_dive/trends/data_products.py",
"galaxy_dive/tests/test_read_data/test_metafile.py"
] |
[
"#!/usr/bin/env python\n'''Compilation of functions for interfacing with miscellanious data products.\n\n@author: Zach Hafen\n@contact: zachary.h.hafen@gmail.com\n@status: Development\n'''\n\nimport copy\nimport numpy as np\nimport os\nimport pandas as pd\n\n########################################################################\n\ndef tidal_tensor_data_grudic(\n snum,\n ids = None,\n data_dir = '/work/03532/mgrudic/tidal_tensor/tidal_tensor_data',\n):\n '''Load data Mike Grudic processed that contains Tidal Tensor, velocity\n dispersion, and items used for calculating the aforementioned quantities.\n\n Args:\n snum (int): Snapshot to retrieve the data for.\n\n ids (array-like): IDs to retrieve. Defaults to all.\n\n data_dir (str): Path to directory containing data.\n\n Returns:\n pandas.DataFrame\n DataFrame containing quantities. When given an ID not in the data\n returns NaN values for that ID.\n '''\n\n def invalid_data_result():\n '''Results when the data is invalid in some form.'''\n base_arr = np.full( len( ids ), np.nan )\n standin_data = {}\n data_keys = [\n 'ID',\n 'Txx',\n 'Tyy',\n 'Tzz',\n 'Txy',\n 'Tyz',\n 'Tzx',\n 'sigma_v',\n 'r_search',\n 'cond_num',\n ]\n for key in data_keys:\n standin_data[key] = copy.deepcopy( base_arr )\n standin_data['ID'] = ids\n df = pd.DataFrame( standin_data )\n df = df.set_index( 'ID' )\n\n return df\n\n # Load the data\n filename = 'tidal_tensor_{}.npy'.format( snum )\n file_path = os.path.join( data_dir, filename )\n try:\n full_arr = np.load( file_path )\n except FileNotFoundError:\n return invalid_data_result()\n \n # Convert to a pandas data frame to get the selected IDs out.\n data = {\n 'ID': full_arr[:,0].astype( int ),\n 'Txx': full_arr[:,1],\n 'Tyy': full_arr[:,2],\n 'Tzz': full_arr[:,3],\n 'Txy': full_arr[:,4],\n 'Tyz': full_arr[:,5],\n 'Tzx': full_arr[:,6],\n 'sigma_v': full_arr[:,7],\n 'r_search': full_arr[:,8],\n 'cond_num': full_arr[:,9],\n }\n df = pd.DataFrame( data, )\n df = df.set_index( 'ID' )\n\n # Select on IDs\n if ids is not None:\n try:\n df = df.loc[ids]\n except KeyError:\n return invalid_data_result()\n\n return df\n",
"#!/usr/bin/env python\n'''Testing for read_metafile.py\n\n@author: Zach Hafen\n@contact: zachary.h.hafen@gmail.com\n@status: Development\n'''\n\nimport numpy as np\nimport numpy.testing as npt\nimport unittest\n\nimport galaxy_dive.read_data.metafile as read_metafile\n\nsdir = './tests/data/sdir'\nsdir2 = './tests/data/sdir2'\n\n########################################################################\n\nclass TestMetafileReader( unittest.TestCase ):\n\n def setUp( self ):\n\n self.metafile_reader = read_metafile.MetafileReader( sdir )\n\n ########################################################################\n\n def test_get_snapshot_times( self ):\n\n expected = 13.55350759 # Time in Gyr for snapshot 580\n\n self.metafile_reader.get_snapshot_times()\n\n actual = self.metafile_reader.snapshot_times['time[Gyr]'][580]\n\n npt.assert_allclose( expected, actual )\n\n ########################################################################\n\n def test_get_snapshot_times_old_filename( self ):\n\n expected = 0.0049998745585804194\n\n # Give it the right directory\n self.metafile_reader.sdir = sdir2\n\n self.metafile_reader.get_snapshot_times()\n\n actual = self.metafile_reader.snapshot_times['redshift'][439]\n\n npt.assert_allclose( expected, actual )\n\n ########################################################################\n\n def test_get_used_parameters( self ):\n\n OmegaBaryon_expected = 0.0455\n\n self.metafile_reader.get_used_parameters()\n\n OmegaBaryon_actual = float( self.metafile_reader.used_parameters['OmegaBaryon'] )\n\n npt.assert_allclose( OmegaBaryon_expected, OmegaBaryon_actual )\n"
] |
[
[
"pandas.DataFrame",
"numpy.load"
],
[
"numpy.testing.assert_allclose"
]
] |
kthyng/octant
|
[
"65591d87797fa74e0c092d5f50fb0cd703eb412e"
] |
[
"octant/python-gsw/gsw/gibbs/practical_salinity.py"
] |
[
"# -*- coding: utf-8 -*-\n\nfrom __future__ import division\n\nimport numpy as np\n\nfrom library import Hill_ratio_at_SP2\nfrom gsw.utilities import match_args_return\n\n__all__ = [\n 'SP_from_C',\n 'C_from_SP',\n 'SP_from_R',\n 'R_from_SP',\n 'SP_salinometer',\n 'SP_from_SK',\n 'SK_from_SP'\n ]\n\n# Constants:\na = (0.0080, -0.1692, 25.3851, 14.0941, -7.0261, 2.7081)\n\nb = (0.0005, -0.0056, -0.0066, -0.0375, 0.0636, -0.0144)\n\nc = (0.6766097, 2.00564e-2, 1.104259e-4, -6.9698e-7, 1.0031e-9)\n\nd = (3.426e-2, 4.464e-4, 4.215e-1, -3.107e-3)\n\ne = (2.070e-5, -6.370e-10, 3.989e-15)\n\nP = (4.577801212923119e-3, 1.924049429136640e-1, 2.183871685127932e-5,\n -7.292156330457999e-3, 1.568129536470258e-4, -1.478995271680869e-6,\n 9.086442524716395e-4, -1.949560839540487e-5, -3.223058111118377e-6,\n 1.175871639741131e-7, -7.522895856600089e-5, -2.254458513439107e-6,\n 6.179992190192848e-7, 1.005054226996868e-8, -1.923745566122602e-9,\n 2.259550611212616e-6, 1.631749165091437e-7, -5.931857989915256e-9,\n -4.693392029005252e-9, 2.571854839274148e-10, 4.198786822861038e-12)\n\nq = (5.540896868127855e-5, 2.015419291097848e-1, -1.445310045430192e-5,\n -1.567047628411722e-2, 2.464756294660119e-4, -2.575458304732166e-7,\n 5.071449842454419e-3, -9.081985795339206e-5, -3.635420818812898e-6,\n 2.249490528450555e-8, -1.143810377431888e-3, 2.066112484281530e-5,\n 7.482907137737503e-7, 4.019321577844724e-8, -5.755568141370501e-10,\n 1.120748754429459e-4, -2.420274029674485e-6, -4.774829347564670e-8,\n -4.279037686797859e-9, -2.045829202713288e-10, 5.025109163112005e-12)\n\nr = (3.432285006604888e-3, 1.672940491817403e-1, 2.640304401023995e-5,\n 1.082267090441036e-1, -6.296778883666940e-5, -4.542775152303671e-7,\n -1.859711038699727e-1, 7.659006320303959e-4, -4.794661268817618e-7,\n 8.093368602891911e-9, 1.001140606840692e-1, -1.038712945546608e-3,\n -6.227915160991074e-6, 2.798564479737090e-8, -1.343623657549961e-10,\n 1.024345179842964e-2, 4.981135430579384e-4, 4.466087528793912e-6,\n 1.960872795577774e-8, -2.723159418888634e-10, 1.122200786423241e-12)\n\nu = (5.180529787390576e-3, 1.052097167201052e-3, 3.666193708310848e-5,\n 7.112223828976632, -3.631366777096209e-4, -7.336295318742821e-7,\n -1.576886793288888e+2, -1.840239113483083e-3, 8.624279120240952e-6,\n 1.233529799729501e-8, 1.826482800939545e+3, 1.633903983457674e-1,\n -9.201096427222349e-5, -9.187900959754842e-8, -1.442010369809705e-10,\n -8.542357182595853e+3, -1.408635241899082, 1.660164829963661e-4,\n 6.797409608973845e-7, 3.345074990451475e-10, 8.285687652694768e-13)\n\nk = 0.0162\n\na, b, c, d, e, P, q, r, u, k = map(np.asarray, (a, b, c, d, e, P, q, r, u, k))\n\n\n@match_args_return\ndef SP_from_C(C, t, p):\n r\"\"\"Calculates Practical Salinity, SP, from conductivity, C, primarily\n using the PSS-78 algorithm. Note that the PSS-78 algorithm for Practical\n Salinity is only valid in the range 2 < SP < 42. If the PSS-78 algorithm\n produces a Practical Salinity that is less than 2 then the Practical\n Salinity is recalculated with a modified form of the Hill et al. (1986)\n formula. The modification of the Hill et al. (1986) expression is to ensure\n that it is exactly consistent with PSS-78 at SP = 2. Note that the input\n values of conductivity need to be in units of mS/cm (not S/m).\n\n Parameters\n ----------\n C : array\n conductivity [mS cm :sup:`-1`]\n t : array\n in-situ temperature [:math:`^\\circ` C (ITS-90)]\n p : array\n sea pressure [dbar]\n (i.e. absolute pressure - 10.1325 dbar)\n\n Returns\n -------\n SP : array\n Practical Salinity [psu (PSS-78), unitless]\n\n Examples\n --------\n TODO\n\n See Also\n --------\n TODO\n\n Notes\n -----\n TODO\n\n References\n ----------\n .. [1] Culkin and Smith, 1980: Determination of the Concentration of\n Potassium Chloride Solution Having the Same Electrical Conductivity, at\n 15C and Infinite Frequency, as Standard Seawater of Salinity 35.0000\n (Chlorinity 19.37394), IEEE J. Oceanic Eng, 5, 22-23.\n\n .. [2] Hill, K.D., T.M. Dauphinee & D.J. Woods, 1986: The extension of the\n Practical Salinity Scale 1978 to low salinities. IEEE J. Oceanic Eng., 11,\n 109 - 112.\n\n .. [3] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp. Appendix E.\n\n .. [4] Unesco, 1983: Algorithms for computation of fundamental properties\n of seawater. Unesco Technical Papers in Marine Science, 44, 53 pp.\n\n Modifications:\n 2011-04-01. Paul Barker, Trevor McDougall and Rich Pawlowicz.\n \"\"\"\n\n C, t, p = np.broadcast_arrays(C, t, p)\n\n t68 = t * 1.00024\n ft68 = (t68 - 15) / (1 + k * (t68 - 15))\n\n # The dimensionless conductivity ratio, R, is the conductivity input, C,\n # divided by the present estimate of C(SP=35, t_68=15, p=0) which is\n # 42.9140 mS/cm (=4.29140 S/m), (Culkin and Smith, 1980).\n\n R = 0.023302418791070513 * C # 0.023302418791070513 = 1./42.9140\n\n # rt_lc corresponds to rt as defined in the UNESCO 44 (1983) routines.\n rt_lc = c[0] + (c[1] + (c[2] + (c[3] + c[4] * t68) * t68) * t68) * t68\n Rp = (1 + (p * (e[0] + e[1] * p + e[2] * p ** 2)) /\n (1 + d[0] * t68 + d[1] * t68 ** 2 + (d[2] + d[3] * t68) * R))\n Rt = R / (Rp * rt_lc)\n\n Rt[Rt < 0] = np.nan\n Rtx = np.sqrt(Rt)\n\n SP = a[0] + (a[1] + (a[2] + (a[3] + (a[4] + a[5] * Rtx) * Rtx) * Rtx) *\n Rtx) * Rtx + ft68 * (b[0] + (b[1] + (b[2] + (b[3] + (b[4] + b[5] *\n Rtx) * Rtx) * Rtx) * Rtx) * Rtx)\n\n # The following section of the code is designed for SP < 2 based on the\n # Hill et al. (1986) algorithm. This algorithm is adjusted so that it is\n # exactly equal to the PSS-78 algorithm at SP = 2.\n\n I2, = np.nonzero(np.ravel(SP) < 2)\n if len(I2) > 0:\n Hill_ratio = Hill_ratio_at_SP2(t[I2])\n x = 400 * Rt[I2]\n sqrty = 10 * Rtx[I2]\n part1 = 1 + x * (1.5 + x)\n part2 = 1 + sqrty * (1 + sqrty * (1 + sqrty))\n SP_Hill_raw = SP[I2] - a[0] / part1 - b[0] * ft68[I2] / part2\n SP[I2] = Hill_ratio * SP_Hill_raw\n\n SP = np.maximum(SP, 0) # Ensure that SP is non-negative.\n\n return SP\n\n\ndef C_from_SP(SP, t, p):\n r\"\"\"Calculates conductivity, C, from (SP, t, p) using PSS-78 in the range\n 2 < SP < 42. If the input Practical Salinity is less than 2 then a modified\n form of the Hill et al. (1986) fomula is used for Practical Salinity. The\n modification of the Hill et al. (1986) expression is to ensure that it is\n exactly consistent with PSS-78 at SP = 2.\n\n The conductivity ratio returned by this function is consistent with the\n input value of Practical Salinity, SP, to 2x10^-14 psu over the full range\n of input parameters (from pure fresh water up to SP = 42 psu). This error\n of 2x10^-14 psu is machine precision at typical seawater salinities. This\n accuracy is achieved by having four different polynomials for the starting\n value of Rtx (the square root of Rt) in four different ranges of SP, and by\n using one and a half iterations of a computationally efficient modified\n Newton-Raphson technique to find the root of the equation.\n\n Parameters\n ----------\n SP : array\n Practical Salinity [psu (PSS-78), unitless]\n t : array\n in-situ temperature [:math:`^\\circ` C (ITS-90)]\n p : array\n sea pressure [dbar]\n (i.e. absolute pressure - 10.1325 dbar)\n\n Returns\n -------\n C : array\n conductivity [mS cm :sup:`-1`]\n\n See Also\n --------\n TODO\n\n Notes\n -----\n Note that strictly speaking PSS-78 (Unesco, 1983) defines Practical\n Salinity in terms of the conductivity ratio, R, without actually\n specifying the value of C(35,15,0) (which we currently take to be\n 42.9140 mS/cm).\n\n Examples\n --------\n TODO\n\n References\n ----------\n .. [1] Hill, K.D., T.M. Dauphinee and D.J. Woods, 1986: The extension of\n the Practical Salinity Scale 1978 to low salinities. IEEE J. Oceanic Eng.,\n OE-11, 1, 109 - 112.\n\n .. [2] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp. See appendix E.\n\n .. [3] Unesco, 1983: Algorithms for computation of fundamental properties\n of seawater. Unesco Technical Papers in Marine Science, 44, 53 pp.\n \"\"\"\n\n C = 42.9140 * R_from_SP(SP, t, p)\n\n return C\n\n\n@match_args_return\ndef SP_from_R(R, t, p):\n r\"\"\"Calculates Practical Salinity, SP, from the conductivity ratio, R,\n primarily using the PSS-78 algorithm. Note that the PSS-78 algorithm for\n Practical Salinity is only valid in the range 2 < SP < 42. If the PSS-78\n algorithm produces a Practical Salinity that is less than 2 then the\n Practical Salinity is recalculated with a modified form of the Hill et al.\n (1986) formula. The modification of the Hill et al. (1986) expression are\n to ensure that it is exactly consistent with PSS-78 at SP = 2.\n\n Parameters\n ----------\n R : array_like\n conductivity ratio [unitless]\n t : array_like\n in-situ temperature [:math:`^\\circ` C (ITS-90)]\n p : array\n sea pressure [dbar]\n (i.e. absolute pressure - 10.1325 dbar)\n\n Returns\n -------\n SP : array\n Practical Salinity [psu (PSS-78), unitless]\n\n Examples\n --------\n TODO\n\n See Also\n --------\n TODO\n\n Notes\n -----\n TODO\n\n References\n ----------\n .. [1] Culkin and Smith, 1980: Determination of the Concentration of\n Potassium Chloride Solution Having the Same Electrical Conductivity, at\n 15C and Infinite Frequency, as Standard Seawater of Salinity 35.0000\n (Chlorinity 19.37394), IEEE J. Oceanic Eng, 5, 22-23.\n\n .. [2] Hill, K.D., T.M. Dauphinee & D.J. Woods, 1986: The extension of the\n Practical Salinity Scale 1978 to low salinities. IEEE J. Oceanic Eng.,\n 11, 109 - 112.\n\n .. [3] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp. Appendix E.\n\n .. [4] Unesco, 1983: Algorithms for computation of fundamental properties\n of seawater. Unesco Technical Papers in Marine Science, 44, 53 pp.\n\n Modifications:\n 2011-04-01. Paul Barker, Trevor McDougall and Rich Pawlowicz.\n \"\"\"\n\n R, t, p = np.broadcast_arrays(R, t, p)\n\n t68 = t * 1.00024\n ft68 = (t68 - 15) / (1 + k * (t68 - 15))\n\n # rt_lc corresponds to rt as defined in the UNESCO 44 (1983) routines.\n rt_lc = c[0] + (c[1] + (c[2] + (c[3] + c[4] * t68) * t68) * t68) * t68\n Rp = (1 + (p * (e[0] + e[1] * p + e[2] * p ** 2)) /\n (1 + d[0] * t68 + d[1] * t68 ** 2 + (d[2] + d[3] * t68) * R))\n Rt = R / (Rp * rt_lc)\n\n Rt[Rt < 0] = np.nan\n Rtx = np.sqrt(Rt)\n\n SP = a[0] + (a[1] + (a[2] + (a[3] + (a[4] + a[5] * Rtx) * Rtx) * Rtx) *\n Rtx) * Rtx + ft68 * (b[0] + (b[1] + (b[2] + (b[3] + (b[4] + b[5] *\n Rtx) * Rtx) * Rtx) * Rtx) * Rtx)\n\n # The following section of the code is designed for SP < 2 based on the\n # Hill et al. (1986) algorithm. This algorithm is adjusted so that it is\n # exactly equal to the PSS-78 algorithm at SP = 2.\n\n I2 = SP < 2\n if I2.any():\n Hill_ratio = Hill_ratio_at_SP2(t[I2])\n x = 400 * Rt[I2]\n sqrty = 10 * Rtx[I2]\n part1 = 1 + x * (1.5 + x)\n part2 = 1 + sqrty * (1 + sqrty * (1 + sqrty))\n SP_Hill_raw = SP[I2] - a[0] / part1 - b[0] * ft68[I2] / part2\n SP[I2] = Hill_ratio * SP_Hill_raw\n\n SP = np.maximum(SP, 0) # Ensure that SP is non-negative.\n\n return SP\n\n\n@match_args_return\ndef R_from_SP(SP, t, p):\n r\"\"\"Calculates conductivity ratio from (SP, t, p) using PSS-78 in the range\n 2 < SP < 42. If the input Practical Salinity is less than 2 then a\n modified form of the Hill et al. (1986) formula is used for Practical\n Salinity. The modification of the Hill et al. (1986) expression is to\n ensure that it is exactly consistent with PSS-78 at SP = 2.\n\n The conductivity ratio returned by this function is consistent with the\n input value of Practical Salinity, SP, to 2x10^-14 psu over the full range\n of input parameters (from pure fresh water up to SP = 42 psu). This error\n of 2x10^-14 psu is machine precision at typical seawater salinities. This\n accuracy is achieved by having four different polynomials for the starting\n value of Rtx (the square root of Rt) in four different ranges of SP, and by\n using one and a half iterations of a computationally efficient modified\n Newton-Raphson technique to find the root of the equation.\n\n Parameters\n ----------\n SP : array\n Practical Salinity [psu (PSS-78), unitless]\n t : array_like\n in-situ temperature [:math:`^\\circ` C (ITS-90)]\n p : array\n sea pressure [dbar]\n (i.e. absolute pressure - 10.1325 dbar)\n\n Returns\n -------\n R : array_like\n conductivity ratio [unitless]\n\n Examples\n --------\n TODO\n\n See Also\n --------\n TODO\n\n Notes\n -----\n Strictly speaking PSS-78 (Unesco, 1983) defines Practical Salinity in terms\n of the conductivity ratio, R, without actually specifying the value of\n C(35, 15, 0) (which we currently take to be 42.9140 mS cm^-1.\n Culkin and Smith, 1980).\n\n References\n ----------\n .. [1] Culkin and Smith, 1980: Determination of the Concentration of\n Potassium Chloride Solution Having the Same Electrical Conductivity, at\n 15C and Infinite Frequency, as Standard Seawater of Salinity 35.0000\n (Chlorinity 19.37394), IEEE J. Oceanic Eng, 5, 22-23.\n\n .. [2] Hill, K.D., T.M. Dauphinee & D.J. Woods, 1986: The extension of the\n Practical Salinity Scale 1978 to low salinities. IEEE J. Oceanic Eng.,\n 11, 109 - 112.\n\n .. [3] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp. Appendix E.\n\n .. [4] Unesco, 1983: Algorithms for computation of fundamental properties\n of seawater. Unesco Technical Papers in Marine Science, 44, 53 pp.\n\n Modifications:\n 2011-04-06. Paul Barker, Trevor McDougall and Rich Pawlowicz.\n \"\"\"\n\n # These few lines ensure that SP is non-negative.\n if (SP < 0).any():\n raise ValueError('R_from_SP: SP must be non-negative!')\n\n SP, t, p = np.broadcast_arrays(SP, t, p)\n\n # Setting up the constants\n t68 = t * 1.00024\n ft68 = (t68 - 15) / (1 + k * (t68 - 15))\n\n x = np.sqrt(SP)\n Rtx = np.zeros_like(SP) * np.nan\n\n # Finding the starting value of Rtx, the square root of Rt, using four\n # different polynomials of SP and t68.\n # TODO: Test case that cover all those \"ifs\"\n I = SP >= 9\n if I.any():\n Rtx[I] = P[0] + x[I] * (P[1] + P[4] * t68[I] + x[I] * (P[3] + P[7] *\n t68[I] + x[I] * (P[6] + P[11] * t68[I] + x[I] * (P[10] + P[16] *\n t68[I] + x[I] * P[15])))) + t68[I] * (P[2] + t68[I] * (P[5] + x[I] *\n x[I] * (P[12] + x[I] * P[17]) + P[8] * x[I] + t68[I] * (P[9] + x[I] *\n (P[13] + x[I] * P[18]) + t68[I] * (P[14] + P[19] * x[I] + P[20] *\n t68[I]))))\n\n I = np.logical_and(SP >= 0.25, SP < 9)\n if I.any():\n Rtx[I] = q[0] + x[I] * (q[1] + q[4] * t68[I] + x[I] * (q[3] + q[7] *\n t68[I] + x[I] * (q[6] + q[11] * t68[I] + x[I] * (q[10] + q[16] *\n t68[I] + x[I] * q[15])))) + t68[I] * (q[2] + t68[I] * (q[5] + x[I] *\n x[I] * (q[12] + x[I] * q[17]) + q[8] * x[I] + t68[I] * (q[9] + x[I] *\n (q[13] + x[I] * q[18]) + t68[I] * (q[14] + q[19] * x[I] + q[20] *\n t68[I]))))\n\n I = np.logical_and(SP >= 0.003, SP < 0.25)\n if I.any():\n Rtx[I] = r[0] + x[I] * (r[1] + r[4] * t68[I] + x[I] * (r[3] + r[7] *\n t68[I] + x[I] * (r[6] + r[11] * t68[I] + x[I] * (r[10] + r[16] *\n t68[I] + x[I] * r[15])))) + t68[I] * (r[2] + t68[I] * (r[5] + x[I] *\n x[I] * (r[12] + x[I] * r[17]) + r[8] * x[I] + t68[I] * (r[9] + x[I] *\n (r[13] + x[I] * r[18]) + t68[I] * (r[14] + r[19] * x[I] + r[20] *\n t68[I]))))\n\n I = SP < 0.003\n if I.any():\n Rtx[I] = u[0] + x[I] * (u[1] + u[4] * t68[I] + x[I] * (u[3] + u[7] *\n t68[I] + x[I] * (u[6] + u[11] * t68[I] + x[I] * (u[10] + u[16] *\n t68[I] + x[I] * u[15])))) + t68[I] * (u[2] + t68[I] * (u[5] + x[I] *\n x[I] * (u[12] + x[I] * u[17]) + u[8] * x[I] + t68[I] * (u[9] + x[I] *\n (u[13] + x[I] * u[18]) + t68[I] * (u[14] + u[19] * x[I] + u[20] *\n t68[I]))))\n\n # Finding the starting value of dSP_dRtx, the derivative of SP with\n # respect to Rtx.\n dSP_dRtx = a[1] + (2 * a[2] + (3 * a[3] + (4 * a[4] + 5 * a[5] * Rtx) *\n Rtx) * Rtx) * Rtx + ft68 * (b[1] + (2 * b[2] + (3 * b[3] +\n (4 * b[4] + 5 * b[5] * Rtx) * Rtx) * Rtx) * Rtx)\n\n # TODO: Test case that cover all those \"ifs\"\n I2 = SP < 2\n if I2.any():\n x = 400 * (Rtx[I2] ** 2)\n sqrty = 10 * Rtx[I2]\n part1 = 1 + x * (1.5 + x)\n part2 = 1 + sqrty * (1 + sqrty * (1 + sqrty))\n Hill_ratio = Hill_ratio_at_SP2(t[I2])\n dSP_dRtx[I2] = (dSP_dRtx[I2] + a[0] * 800 * Rtx[I2] * (1.5 + 2 * x) /\n (part1 ** 2) + b[0] * ft68[I2] * (10 + sqrty * (20 +\n 30 * sqrty)) / (part2 ** 2))\n\n dSP_dRtx[I2] = Hill_ratio * dSP_dRtx[I2]\n\n \"\"\"One iteration through the modified Newton-Raphson method achieves an\n error in Practical Salinity of about 10^-12 for all combinations of the\n inputs. One and a half iterations of the modified Newton-Raphson method\n achieves a maximum error in terms of Practical Salinity of better than\n 2x10^-14 everywhere.\n\n We recommend one and a half iterations of the modified Newton-Raphson\n method.\"\"\"\n\n # Begin the modified Newton-Raphson method.\n SP_est = (a[0] + (a[1] + (a[2] + (a[3] + (a[4] + a[5] * Rtx) * Rtx) *\n Rtx) * Rtx) * Rtx + ft68 * (b[0] + (b[1] + (b[2] + (b[3] +\n (b[4] + b[5] * Rtx) * Rtx) * Rtx) * Rtx) * Rtx))\n\n # TODO: Test case that cover all those \"ifs\"\n I2 = SP_est < 2\n if I2.any():\n x = 400 * (Rtx[I2] ** 2)\n sqrty = 10 * Rtx[I2]\n part1 = 1 + x * (1.5 + x)\n part2 = 1 + sqrty * (1 + sqrty * (1 + sqrty))\n SP_Hill_raw = SP_est[I2] - a[0] / part1 - b[0] * ft68[I2] / part2\n Hill_ratio = Hill_ratio_at_SP2(t[I2])\n SP_est[I2] = Hill_ratio * SP_Hill_raw\n\n Rtx_old = Rtx\n Rtx = Rtx_old - (SP_est - SP) / dSP_dRtx\n\n # This mean value of Rtx, Rtxm, is the value of Rtx at which the\n # derivative dSP_dRtx is evaluated.\n Rtxm = 0.5 * (Rtx + Rtx_old)\n\n dSP_dRtx = a[1] + (2 * a[2] + (3 * a[3] + (4 * a[4] + 5 * a[5] *\n Rtxm) * Rtxm) * Rtxm) * Rtxm + ft68 * (b[1] + (2 * b[2] +\n (3 * b[3] + (4 * b[4] + 5 * b[5] * Rtxm) * Rtxm) * Rtxm) * Rtxm)\n\n # TODO: Test case that cover all those \"ifs\"\n I2 = SP_est < 2\n if I2.any():\n x = 400 * (Rtxm[I2] ** 2)\n sqrty = 10 * Rtxm[I2]\n part1 = 1 + x * (1.5 + x)\n part2 = 1 + sqrty * (1 + sqrty * (1 + sqrty))\n dSP_dRtx[I2] = (dSP_dRtx[I2] + a[0] * 800 * Rtxm[I2] * (1.5 + 2 *\n x) / (part1 ** 2) + b[0] * ft68[I2] * (10 + sqrty *\n (20 + 30 * sqrty)) / (part2 ** 2))\n Hill_ratio = Hill_ratio_at_SP2(t[I2])\n dSP_dRtx[I2] = Hill_ratio * dSP_dRtx[I2]\n\n # End of the one full iteration of the modified Newton-Raphson technique.\n Rtx = Rtx_old - (SP_est - SP) / dSP_dRtx # Updated Rtx\n\n # Now we do another half iteration of the modified Newton-Raphson\n # technique, making a total of one and a half modified N-R iterations.\n\n SP_est = a[0] + (a[1] + (a[2] + (a[3] + (a[4] + a[5] * Rtx) * Rtx) *\n Rtx) * Rtx) * Rtx + ft68 * (b[0] + (b[1] + (b[2] + (b[3] +\n (b[4] + b[5] * Rtx) * Rtx) * Rtx) * Rtx) * Rtx)\n\n # TODO: Test case that cover all those \"ifs\"\n I2 = SP_est < 2\n if I2.any():\n x = 400 * (Rtx[I2] ** 2)\n sqrty = 10 * Rtx[I2]\n part1 = 1 + x * (1.5 + x)\n part2 = 1 + sqrty * (1 + sqrty * (1 + sqrty))\n SP_Hill_raw = SP_est[I2] - a[0] / part1 - b[0] * ft68[I2] / part2\n Hill_ratio = Hill_ratio_at_SP2(t[I2])\n SP_est[I2] = Hill_ratio * SP_Hill_raw\n\n Rtx = Rtx - (SP_est - SP) / dSP_dRtx\n\n \"\"\" TODO: add this as a kw.\n Return the error, SP_error, in Rtx (in terms of psu).\n\n SP_est = (a[0] + (a[1] + (a[2] + (a[3] + (a[4] + a[5] * Rtx) * Rtx) *\n Rtx) * Rtx) * Rtx + ft68 * (b[0] + (b[1] + (b[2] + (b[3] +\n (b[4] + b[5] * Rtx) * Rtx) * Rtx) * Rtx) * Rtx))\n I2 = SP_est < 2\n if I2.any():\n x = 400 * (Rtx[I2] ** 2)\n sqrty = 10 * Rtx[I2]\n part1 = 1 + x * (1.5 + x)\n part2 = 1 + sqrty * (1 + sqrty * (1 + sqrty))\n SP_Hill_raw = SP_est[I2] - a[0] / part1 - b[0] * ft68[I2] / part2\n Hill_ratio = Hill_ratio_at_SP2(t[I2])\n SP_est[I2] = Hill_ratio * SP_Hill_raw\n\n SP_error = np.abs(SP - SP_est)\n\n This is the end of the error testing\n \"\"\"\n\n # Now go from Rtx to Rt and then to the conductivity ratio R at pressure p.\n Rt = Rtx ** 2\n A = d[2] + d[3] * t68\n B = 1 + d[0] * t68 + d[1] * t68 ** 2\n C = p * (e[0] + e[1] * p + e[2] * p ** 2)\n # rt_lc (i.e. rt_lower_case) corresponds to rt as defined in the\n # UNESCO 44 (1983) routines.\n rt_lc = c[0] + (c[1] + (c[2] + (c[3] + c[4] * t68) * t68) * t68) * t68\n\n D = B - A * rt_lc * Rt\n E = rt_lc * Rt * A * (B + C)\n Ra = np.sqrt(D ** 2 + 4 * E) - D\n\n return 0.5 * Ra / A\n\n\n@match_args_return\ndef SP_salinometer(Rt, t):\n r\"\"\"Calculates Practical Salinity SP from a salinometer, primarily using\n the PSS-78 algorithm. Note that the PSS-78 algorithm for Practical\n Salinity is only valid in the range 2 < SP < 42. If the PSS-78 algorithm\n produces a Practical Salinity that is less than 2 then the Practical\n Salinity is recalculated with a modified form of the Hill et al. (1986)\n formula. The modification of the Hill et al. (1986) expression is to\n ensure that it is exactly consistent with PSS-78 at SP = 2.\n\n A laboratory salinometer has the ratio of conductivities, Rt, as an output,\n and the present function uses this conductivity ratio and the temperature t\n of the salinometer bath as the two input variables.\n\n Parameters\n ----------\n Rt : array\n C(SP,t_68,0)/C(SP=35,t_68,0) [unitless]\n conductivity ratio\n :math:`R = \\frac{C(S, t_68, 0)}{C(35, 15(IPTS-68),0)} [unitless]\n\n t : array\n Temperature of the bath of the salinometer [:math:`^\\circ` C (ITS-90)]\n\n Returns\n -------\n SP : array\n Practical Salinity [psu (PSS-78), unitless]\n\n See Also\n --------\n TODO: sw.sals\n\n Notes\n -----\n TODO\n\n Examples\n --------\n TODO\n\n References\n -----------\n ..[1] Fofonoff, P. and R.C. Millard Jr. 1983: Algorithms for computation of\n fundamental properties of seawater. Unesco Tech. Pap. in Mar. Sci., 44,\n 53 pp.\n\n ..[2] Hill, K.D., T.M. Dauphinee & D.J. Woods, 1986: The extension of the\n Practical Salinity Scale 1978 to low salinities. IEEE J. Oceanic Eng., 11,\n 109 - 112.\n\n .. [3] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp. See appendix E of this TEOS-10 Manual, and in\n particular, Eqns. (E.2.1) and (E.2.6).\n\n Modifications:\n 2011-04-30. Paul Barker, Trevor McDougall and Rich Pawlowicz. Version 3.0\n \"\"\"\n\n Rt, t = np.broadcast_arrays(Rt, t)\n\n t68 = t * 1.00024\n ft68 = (t68 - 15) / (1 + k * (t68 - 15))\n\n Rt[Rt < 0] = np.NaN\n Rtx = np.sqrt(Rt)\n\n SP = a[0] + (a[1] + (a[2] + (a[3] + (a[4] + a[5] * Rtx) * Rtx) * Rtx) *\n Rtx) * Rtx + ft68 * (b[0] + (b[1] + (b[2] + (b[3] + (b[4] + b[5] *\n Rtx) * Rtx) * Rtx) * Rtx) * Rtx)\n\n \"\"\"The following section of the code is designed for SP < 2 based on the\n Hill et al. (1986) algorithm. This algorithm is adjusted so that it is\n exactly equal to the PSS-78 algorithm at SP = 2.\"\"\"\n\n I2 = SP < 2\n if I2.any():\n Hill_ratio = Hill_ratio_at_SP2(t[I2])\n x = 400 * Rt[I2]\n sqrty = 10 * Rtx[I2]\n part1 = 1 + x * (1.5 + x)\n part2 = 1 + sqrty * (1 + sqrty * (1 + sqrty))\n SP_Hill_raw = SP[I2] - a[0] / part1 - b[0] * ft68[I2] / part2\n SP[I2] = Hill_ratio * SP_Hill_raw\n # Ensure that SP is non-negative.\n SP = np.maximum(SP, 0)\n return SP\n\n\n@match_args_return\ndef SP_from_SK(SK):\n r\"\"\"Calculates Practical Salinity from Knudsen Salinity.\n\n Parameters\n ----------\n SK : array_like\n Knudsen Salinity [parts per thousand, ppt]\n\n Returns\n -------\n SP : array\n Practical Salinity [psu (PSS-78), unitless]\n\n Examples\n --------\n TODO\n\n See Also\n --------\n TODO\n\n Notes\n -----\n TODO\n\n References\n ----------\n .. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp. See Appendix A.3.\n\n Modifications:\n 2011-11-16. Trevor McDougall and Paul Barker.\n \"\"\"\n\n SP = (SK - 0.03) * (1.80655 / 1.805)\n return np.maximum(SP, 0) # Ensure that SP is non-negative.\n\n\n@match_args_return\ndef SK_from_SP(SP):\n r\"\"\"Calculates Knudsen Salinity from Practical Salinity.\n\n Parameters\n ----------\n SP : array\n Practical Salinity [psu (PSS-78), unitless]\n\n Returns\n -------\n SK : array_like\n Knudsen Salinity [parts per thousand, ppt]\n\n Examples\n --------\n TODO\n\n See Also\n --------\n TODO\n\n Notes\n -----\n TODO\n\n References\n ----------\n .. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp. See Appendix A.3.\n\n Modifications:\n 2011-11-16. Trevor McDougall and Paul Barker.\n \"\"\"\n SP = np.maximum(SP, 0) # Ensure that SP is non-negative.\n\n return 0.03 + SP * (1.805 / 1.80655)\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n"
] |
[
[
"numpy.zeros_like",
"numpy.broadcast_arrays",
"numpy.logical_and",
"numpy.ravel",
"numpy.sqrt",
"numpy.maximum"
]
] |
coryell/TensorNetwork
|
[
"9225390dc75c4a5f1d3f963608249a0c3aca826c"
] |
[
"tensornetwork/backends/shell/shell_backend.py"
] |
[
"# Copyright 2019 The TensorNetwork Authors\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\nimport functools\r\nimport operator\r\nfrom tensornetwork.backends import base_backend\r\n#pylint: disable=line-too-long\r\nfrom typing import Optional, Sequence, Tuple, List, Any, Union, Type, Callable, Text\r\nimport numpy as np\r\n\r\n\r\nclass ShellTensor:\r\n\r\n def __init__(self, shape: Tuple[int, ...], dtype=None):\r\n self.shape = shape\r\n self.dtype = dtype\r\n\r\n def reshape(self, new_shape: Tuple[int, ...]):\r\n self.shape = new_shape\r\n return self\r\n\r\n\r\nTensor = ShellTensor\r\n\r\n\r\nclass ShellBackend(base_backend.BaseBackend):\r\n \"\"\"See base_backend.BaseBackend for documentation.\"\"\"\r\n\r\n def __init__(self):\r\n super(ShellBackend, self).__init__()\r\n self.name = \"shell\"\r\n\r\n def tensordot(self, a: Tensor, b: Tensor,\r\n axes: Sequence[Sequence[int]]) -> Tensor:\r\n # Does not work when axis < 0\r\n gen_a = (x for i, x in enumerate(a.shape) if i not in axes[0])\r\n gen_b = (x for i, x in enumerate(b.shape) if i not in axes[1])\r\n return ShellTensor(tuple(self._concat_generators(gen_a, gen_b)))\r\n\r\n def _concat_generators(self, *gen):\r\n \"\"\"Concatenates Python generators.\"\"\"\r\n for g in gen:\r\n yield from g\r\n\r\n def reshape(self, tensor: Tensor, shape: Sequence[int]) -> Tensor:\r\n tensor = tensor.reshape(tuple(shape))\r\n return tensor\r\n\r\n def transpose(self, tensor: Tensor, perm: Sequence[int]) -> Tensor:\r\n shape = tuple(tensor.shape[i] for i in perm)\r\n tensor = tensor.reshape(tuple(shape))\r\n return tensor\r\n\r\n def svd_decomposition(self,\r\n tensor: Tensor,\r\n split_axis: int,\r\n max_singular_values: Optional[int] = None,\r\n max_truncation_error: Optional[float] = None,\r\n relative: Optional[bool] = False\r\n ) -> Tuple[Tensor, Tensor, Tensor, Tensor]:\r\n if max_truncation_error is not None:\r\n raise NotImplementedError(\"SVD with truncation shape cannot be \"\r\n \"calculated without explicit tensor values.\")\r\n left_dims = tensor.shape[:split_axis]\r\n right_dims = tensor.shape[split_axis:]\r\n dim_s0 = min(\r\n functools.reduce(operator.mul, left_dims),\r\n functools.reduce(operator.mul, right_dims))\r\n if max_singular_values is not None:\r\n dim_s = min(dim_s0, max_singular_values)\r\n else:\r\n dim_s = dim_s0\r\n\r\n u = ShellTensor(left_dims + (dim_s,))\r\n vh = ShellTensor((dim_s,) + right_dims)\r\n s = ShellTensor((dim_s,))\r\n s_rest = ShellTensor((dim_s0 - dim_s,))\r\n return u, s, vh, s_rest\r\n\r\n def qr_decomposition(self, tensor: Tensor,\r\n split_axis: int) -> Tuple[Tensor, Tensor]:\r\n\r\n left_dims = tensor.shape[:split_axis]\r\n right_dims = tensor.shape[split_axis:]\r\n center_dim = min(tensor.shape)\r\n q = ShellTensor(left_dims + (center_dim,))\r\n r = ShellTensor((center_dim,) + right_dims)\r\n return q, r\r\n\r\n def rq_decomposition(self, tensor: Tensor,\r\n split_axis: int) -> Tuple[Tensor, Tensor]:\r\n\r\n left_dims = tensor.shape[:split_axis]\r\n right_dims = tensor.shape[split_axis:]\r\n center_dim = min(tensor.shape)\r\n q = ShellTensor(left_dims + (center_dim,))\r\n r = ShellTensor((center_dim,) + right_dims)\r\n return q, r\r\n\r\n def shape_concat(self, values: Sequence[Tensor], axis: int) -> Tensor:\r\n shape = values[0].shape\r\n if axis < 0:\r\n axis += len(shape)\r\n concat_size = sum(v.shape[axis] for v in values)\r\n new_shape = shape[:axis] + (concat_size,) + shape[axis + 1:]\r\n return ShellTensor(new_shape)\r\n\r\n def concat_shape(self, values) -> Sequence:\r\n tuple_values = (tuple(v) for v in values)\r\n return functools.reduce(operator.concat, tuple_values)\r\n\r\n def shape_tensor(self, tensor: Tensor) -> Tuple:\r\n return tensor.shape\r\n\r\n def shape_tuple(self, tensor: Tensor) -> Tuple[Optional[int], ...]:\r\n return tensor.shape\r\n\r\n def shape_prod(self, values: Tensor) -> int:\r\n # This is different from the BaseBackend prod!\r\n # prod calculates the product of tensor elements and cannot implemented\r\n # for shell tensors\r\n # This returns the product of sizes instead\r\n return self.shape_product(values.shape)\r\n\r\n def shape_product(self, shape: Sequence[int]) -> int:\r\n return functools.reduce(operator.mul, shape)\r\n\r\n def sqrt(self, tensor: Tensor) -> Tensor:\r\n return tensor\r\n\r\n def diag(self, tensor: Tensor) -> Tensor:\r\n shape = tensor.shape\r\n new_tensor = ShellTensor((3 - len(shape)) * shape)\r\n return new_tensor\r\n\r\n def convert_to_tensor(self, tensor: Any) -> Tensor:\r\n shell_tensor = ShellTensor(tuple(tensor.shape))\r\n return shell_tensor\r\n\r\n def trace(self, tensor: Tensor) -> Tensor:\r\n return ShellTensor(tensor.shape[:-2])\r\n\r\n def outer_product(self, tensor1: Tensor, tensor2: Tensor) -> Tensor:\r\n return ShellTensor(tensor1.shape + tensor2.shape)\r\n\r\n def einsum(self, expression: str, *tensors: Tensor) -> Tensor:\r\n expr_list = expression.split(\",\")\r\n expr_list[-1], res = expr_list[-1].split(\"->\")\r\n shape = tuple(self._find_char(expr_list, char, tensors) for char in res)\r\n return ShellTensor(shape)\r\n\r\n def _find_char(self, expr_list: List[str], char: str,\r\n tensors: Sequence[Tensor]) -> int:\r\n \"\"\"Finds character in einsum tensor expression.\r\n\r\n Args:\r\n expr_list: List with expression for input tensors in einsum.\r\n char: One character string (letter) that corresponds to a specific\r\n einsum component.\r\n\r\n Returns:\r\n size: Size of the axis that corresponds to this einsum expression\r\n character.\r\n \"\"\"\r\n for i, expr in enumerate(expr_list):\r\n ind = expr.find(char)\r\n if ind != -1:\r\n return tensors[i].shape[ind]\r\n raise ValueError(\"Einsum output expression contains letters not given\"\r\n \"in input.\")\r\n\r\n def norm(self, tensor: Tensor) -> Tensor:\r\n return ShellTensor(())\r\n\r\n def eye(self,\r\n N: int,\r\n dtype: Optional[Type[np.number]] = None,\r\n M: Optional[int] = None) -> Tensor:\r\n if not M:\r\n M = N\r\n return ShellTensor((N, M))\r\n\r\n def ones(self,\r\n shape: Tuple[int, ...],\r\n dtype: Optional[Type[np.number]] = None) -> Tensor:\r\n return ShellTensor(shape)\r\n\r\n def zeros(self,\r\n shape: Tuple[int, ...],\r\n dtype: Optional[Type[np.number]] = None) -> Tensor:\r\n\r\n return ShellTensor(shape)\r\n\r\n def randn(self,\r\n shape: Tuple[int, ...],\r\n dtype: Optional[Type[np.number]] = None,\r\n seed: Optional[int] = None) -> Tensor:\r\n return ShellTensor(shape)\r\n\r\n def random_uniform(self,\r\n shape: Tuple[int, ...],\r\n boundaries: Optional[Tuple[float, float]] = (0.0, 1.0),\r\n dtype: Optional[Type[np.number]] = None,\r\n seed: Optional[int] = None) -> Tensor:\r\n return ShellTensor(shape)\r\n\r\n def conj(self, tensor: Tensor) -> Tensor:\r\n return tensor\r\n\r\n def eigh(self, matrix: Tensor) -> Tuple[Tensor, Tensor]:\r\n shape = matrix.shape\r\n return ShellTensor((shape[0],)), ShellTensor(shape)\r\n\r\n def eigs(self,\r\n A: Callable,\r\n initial_state: Optional[Tensor] = None,\r\n num_krylov_vecs: Optional[int] = 200,\r\n numeig: Optional[int] = 1,\r\n tol: Optional[float] = 1E-8,\r\n which: Optional[Text] = 'LR',\r\n maxiter: Optional[int] = None,\r\n dtype: Optional[Type] = None) -> Tuple[List, List]:\r\n\r\n if (initial_state is not None) and hasattr(A, 'shape'):\r\n if initial_state.shape != A.shape[1]:\r\n raise ValueError(\r\n \"A.shape[1]={} and initial_state.shape={} are incompatible.\".format(\r\n A.shape[1], initial_state.shape))\r\n\r\n if initial_state is None:\r\n if not hasattr(A, 'shape'):\r\n raise AttributeError(\"`A` has no attribute `shape`. Cannot initialize \"\r\n \"lanczos. Please provide a valid `initial_state`\")\r\n return [ShellTensor(tuple()) for _ in range(numeig)\r\n ], [ShellTensor((A.shape[0],)) for _ in range(numeig)]\r\n\r\n if initial_state is not None:\r\n return [ShellTensor(tuple()) for _ in range(numeig)\r\n ], [ShellTensor(initial_state.shape) for _ in range(numeig)]\r\n\r\n raise ValueError(\r\n '`A` has no attribut shape and no `initial_state` is given.')\r\n\r\n def eigsh_lanczos(self,\r\n A: Callable,\r\n initial_state: Optional[Tensor] = None,\r\n num_krylov_vecs: Optional[int] = 200,\r\n numeig: Optional[int] = 1,\r\n tol: Optional[float] = 1E-8,\r\n delta: Optional[float] = 1E-8,\r\n ndiag: Optional[int] = 20,\r\n reorthogonalize: Optional[bool] = False\r\n ) -> Tuple[List, List]:\r\n\r\n if num_krylov_vecs < numeig:\r\n raise ValueError('`num_krylov_vecs` >= `numeig` required!')\r\n\r\n if numeig > 1 and not reorthogonalize:\r\n raise ValueError(\r\n \"Got numeig = {} > 1 and `reorthogonalize = False`. \"\r\n \"Use `reorthogonalize=True` for `numeig > 1`\".format(numeig))\r\n\r\n if (initial_state is not None) and hasattr(A, 'shape'):\r\n if initial_state.shape != A.shape[1]:\r\n raise ValueError(\r\n \"A.shape[1]={} and initial_state.shape={} are incompatible.\".format(\r\n A.shape[1], initial_state.shape))\r\n\r\n if initial_state is None:\r\n if not hasattr(A, 'shape'):\r\n raise AttributeError(\"`A` has no attribute `shape`. Cannot initialize \"\r\n \"lanczos. Please provide a valid `initial_state`\")\r\n return [ShellTensor(tuple()) for _ in range(numeig)\r\n ], [ShellTensor(A.shape[0]) for _ in range(numeig)]\r\n\r\n if initial_state is not None:\r\n return [ShellTensor(tuple()) for _ in range(numeig)\r\n ], [ShellTensor(initial_state.shape) for _ in range(numeig)]\r\n\r\n raise ValueError(\r\n '`A` has no attribut shape adn no `initial_state` is given.')\r\n\r\n def addition(self, tensor1: Tensor, tensor2: Tensor) -> Tensor:\r\n raise NotImplementedError(\"Shell tensor has not implemented addition( + )\")\r\n\r\n def subtraction(self, tensor1: Tensor, tensor2: Tensor) -> Tensor:\r\n raise NotImplementedError(\"Shell tensor has not implemented subtraction( - )\")\r\n\r\n def multiply(self, tensor1: Tensor, tensor2: Tensor) -> Tensor:\r\n a = np.ones(tensor1.shape)\r\n b = np.ones(tensor2.shape)\r\n return ShellTensor((a * b).shape)\r\n\r\n def divide(self, tensor1: Tensor, tensor2: Tensor) -> Tensor:\r\n raise NotImplementedError(\"Shell tensor has not implemented add( / )\")\r\n\r\n def index_update(self, tensor: Tensor, mask: Tensor,\r\n assignee: Tensor) -> Tensor:\r\n return ShellTensor(tensor.shape)\r\n\r\n def inv(self, matrix: Tensor) -> Tensor:\r\n if len(matrix.shape) > 2:\r\n raise ValueError(\r\n \"input to shell backend method `inv` has shape {}. Only matrices are supported.\"\r\n .format(matrix.shape))\r\n return ShellTensor(matrix.shape)\r\n"
] |
[
[
"numpy.ones"
]
] |
abhinavagarwalla/MAL-inference-deepsort
|
[
"3dc2010f76dc249e60d3e970247faa7e7c5ffca6"
] |
[
"setup.py"
] |
[
"from setuptools import setup\nfrom torch.utils.cpp_extension import BuildExtension, CUDAExtension\n\nsetup(\n name='retinanet',\n version='0.1',\n description='Fast and accurate single shot object detector',\n author = 'NVIDIA Corporation',\n author_email='fchabert@nvidia.com',\n packages=['retinanet', 'retinanet.backbones'],\n ext_modules=[CUDAExtension('retinanet._C',\n ['csrc/extensions.cpp', 'csrc/engine.cpp', 'csrc/cuda/decode.cu', 'csrc/cuda/nms.cu'],\n extra_compile_args={\n 'cxx': ['-std=c++14', '-O2', '-Wall'],\n 'nvcc': [\n '-std=c++14', '--expt-extended-lambda', '--use_fast_math', '-Xcompiler', '-Wall',\n '-gencode=arch=compute_50,code=sm_50', '-gencode=arch=compute_52,code=sm_52',\n '-gencode=arch=compute_60,code=sm_60', '-gencode=arch=compute_61,code=sm_61',\n '-gencode=arch=compute_70,code=sm_70', '-gencode=arch=compute_72,code=sm_72',\n '-gencode=arch=compute_75,code=sm_75', '-gencode=arch=compute_75,code=compute_75'\n ],\n },\n library_dirs= ['/usr/local/lib/'],\n libraries=['nvinfer', 'nvinfer_plugin', 'nvonnxparser', 'opencv_core', 'opencv_highgui', 'opencv_imgproc', 'opencv_imgcodecs'])\n ],\n cmdclass={'build_ext': BuildExtension.with_options(no_python_abi_suffix=True)},\n install_requires=[\n 'torch>=1.0.0a0',\n #'torchvision',\n 'apex @ git+https://github.com/NVIDIA/apex',\n 'pycocotools @ git+https://github.com/nvidia/cocoapi.git#subdirectory=PythonAPI',\n 'pillow>=6.2.2',\n 'requests',\n ],\n entry_points = {'console_scripts': ['retinanet=retinanet.main:main']}\n)\n"
] |
[
[
"torch.utils.cpp_extension.CUDAExtension",
"torch.utils.cpp_extension.BuildExtension.with_options"
]
] |
doublejtoh/tensorflow-resnet-image-clustering
|
[
"b81ba44910c863f14e1a0b5b3422226c1241e8a1"
] |
[
"src/main.py"
] |
[
"import tensorflow as tf\nfrom os import path as ospath\nfrom model import Model\nfrom config import LABELS_PRED, TRAINING_IMG_DIR, TRAINING_DATA_DIR, TRAINING_JSON_PATH, TEST_IMG_DIR, TEST_DATA_DIR, CHECKPOINT_PATH, CHECKPOINT_SAVE_EPOCH, CHECKPOINT_MAX_TO_KEEP, _IMAGE_WIDTH, _IMAGE_HEIGHT, _IMAGE_CHANNELS, _NUM_CLASSES, _NUM_IMAGES\n\ndef define_flags():\n tf.app.flags.DEFINE_integer('max_training_epochs', 100000,\n 'Maximum training epoch. \\n'\n 'If larger, training ends.')\n tf.app.flags.DEFINE_integer('batch_size', 16,\n 'Batch size')\n tf.app.flags.DEFINE_string('training_data_dir', TRAINING_IMG_DIR,\n 'Training data directory')\n tf.app.flags.DEFINE_string('training_json_path', TRAINING_JSON_PATH,\n 'Training data labels mapping file path')\n tf.app.flags.DEFINE_string('test_data_dir', TEST_IMG_DIR,\n 'Test data directory')\n tf.app.flags.DEFINE_string('checkpoint_path', CHECKPOINT_PATH,\n 'Save/Saved checkpoint path')\n tf.app.flags.DEFINE_integer('num_images', _NUM_IMAGES,\n 'Total number of training data images.')\n tf.app.flags.DEFINE_integer('checkpoint_save_epoch', CHECKPOINT_SAVE_EPOCH,\n 'Checkpoint save for every \"checkpoint_save_epoch\" epoch.')\n tf.app.flags.DEFINE_integer('checkpoint_max_to_keep', CHECKPOINT_MAX_TO_KEEP,\n 'Checkpoint files max to keep')\n tf.app.flags.DEFINE_integer('resnet_size', 50,\n 'resnet size selection.'\n 'must be one of [50, 101, 152]')\n tf.app.flags.DEFINE_boolean('training_predict', False,\n 'On training dataset, \\n'\n 'make labels_pred.txt (predictions) \\n')\n tf.app.flags.DEFINE_string('training_predict_output_path', TRAINING_DATA_DIR,\n 'Output path where labels_pred.txt and \\n')\n tf.app.flags.DEFINE_boolean('test_predict', False,\n 'On test dataset, \\n'\n 'make labels_pred.txt (predictions) \\n')\n tf.app.flags.DEFINE_string('test_predict_output_path', TEST_DATA_DIR,\n 'Output path where labels_pred.txt and \\n')\n\ndef main():\n FLAGS = tf.app.flags.FLAGS\n resnet_model = Model(\n resnet_size=FLAGS.resnet_size,\n initial_kernel_size=7,\n initial_kernel_stride=2,\n kernel_strides=[1, 2, 2, 2],\n initial_pool_size=3,\n initial_pool_stride=2,\n initial_filters=64,\n input_width=_IMAGE_WIDTH,\n input_height=_IMAGE_HEIGHT,\n input_channels=_IMAGE_CHANNELS,\n num_classes=_NUM_CLASSES,\n data_format='channels_last'\n )\n if FLAGS.training_predict:\n resnet_model.predict(\n flags=FLAGS,\n data_dir=FLAGS.training_data_dir,\n pred_out_path=ospath.join(FLAGS.training_predict_output_path, LABELS_PRED)\n )\n elif FLAGS.test_predict:\n resnet_model.predict(\n flags=FLAGS,\n data_dir=FLAGS.test_data_dir,\n pred_out_path=ospath.join(FLAGS.test_predict_output_path, LABELS_PRED)\n )\n else:\n resnet_model.train(FLAGS)\n\n\nif __name__ == '__main__':\n define_flags()\n main()\n"
] |
[
[
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.app.flags.DEFINE_boolean",
"tensorflow.app.flags.DEFINE_string"
]
] |
Mottl/pandas
|
[
"d7af297d6fd70be6b1b0c03771127b9aedcef84b",
"6111f645c5adc5bdcd3810b4112392bda3583d59"
] |
[
"pandas/core/groupby/groupby.py",
"pandas/tests/arrays/categorical/test_constructors.py"
] |
[
"\"\"\"\nProvide the groupby split-apply-combine paradigm. Define the GroupBy\nclass providing the base-class of operations.\n\nThe SeriesGroupBy and DataFrameGroupBy sub-class\n(defined in pandas.core.groupby.generic)\nexpose these user-facing objects to provide specific functionailty.\n\"\"\"\n\nimport collections\nfrom contextlib import contextmanager\nimport datetime\nfrom functools import partial, wraps\nimport types\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import Timestamp, groupby as libgroupby\nimport pandas.compat as compat\nfrom pandas.compat import callable, range, set_function_name, zip\nfrom pandas.compat.numpy import function as nv\nfrom pandas.errors import AbstractMethodError\nfrom pandas.util._decorators import Appender, Substitution, cache_readonly\nfrom pandas.util._validators import validate_kwargs\n\nfrom pandas.core.dtypes.cast import maybe_downcast_to_dtype\nfrom pandas.core.dtypes.common import (\n ensure_float, is_extension_array_dtype, is_numeric_dtype, is_scalar)\nfrom pandas.core.dtypes.missing import isna, notna\n\nimport pandas.core.algorithms as algorithms\nfrom pandas.core.base import (\n DataError, GroupByError, PandasObject, SelectionMixin, SpecificationError)\nimport pandas.core.common as com\nfrom pandas.core.config import option_context\nfrom pandas.core.frame import DataFrame\nfrom pandas.core.generic import NDFrame\nfrom pandas.core.groupby import base\nfrom pandas.core.index import Index, MultiIndex\nfrom pandas.core.series import Series\nfrom pandas.core.sorting import get_group_index_sorter\n\n_common_see_also = \"\"\"\n See Also\n --------\n pandas.Series.%(name)s\n pandas.DataFrame.%(name)s\n pandas.Panel.%(name)s\n\"\"\"\n\n_apply_docs = dict(\n template=\"\"\"\n Apply function `func` group-wise and combine the results together.\n\n The function passed to `apply` must take a {input} as its first\n argument and return a DataFrame, Series or scalar. `apply` will\n then take care of combining the results back together into a single\n dataframe or series. `apply` is therefore a highly flexible\n grouping method.\n\n While `apply` is a very flexible method, its downside is that\n using it can be quite a bit slower than using more specific methods\n like `agg` or `transform`. Pandas offers a wide range of method that will\n be much faster than using `apply` for their specific purposes, so try to\n use them before reaching for `apply`.\n\n Parameters\n ----------\n func : callable\n A callable that takes a {input} as its first argument, and\n returns a dataframe, a series or a scalar. In addition the\n callable may take positional and keyword arguments.\n args, kwargs : tuple and dict\n Optional positional and keyword arguments to pass to `func`.\n\n Returns\n -------\n applied : Series or DataFrame\n\n See Also\n --------\n pipe : Apply function to the full GroupBy object instead of to each\n group.\n aggregate : Apply aggregate function to the GroupBy object.\n transform : Apply function column-by-column to the GroupBy object.\n Series.apply : Apply a function to a Series.\n DataFrame.apply : Apply a function to each row or column of a DataFrame.\n \"\"\",\n dataframe_examples=\"\"\"\n >>> df = pd.DataFrame({'A': 'a a b'.split(),\n 'B': [1,2,3],\n 'C': [4,6, 5]})\n >>> g = df.groupby('A')\n\n Notice that ``g`` has two groups, ``a`` and ``b``.\n Calling `apply` in various ways, we can get different grouping results:\n\n Example 1: below the function passed to `apply` takes a DataFrame as\n its argument and returns a DataFrame. `apply` combines the result for\n each group together into a new DataFrame:\n\n >>> g[['B', 'C']].apply(lambda x: x / x.sum())\n B C\n 0 0.333333 0.4\n 1 0.666667 0.6\n 2 1.000000 1.0\n\n Example 2: The function passed to `apply` takes a DataFrame as\n its argument and returns a Series. `apply` combines the result for\n each group together into a new DataFrame:\n\n >>> g[['B', 'C']].apply(lambda x: x.max() - x.min())\n B C\n A\n a 1 2\n b 0 0\n\n Example 3: The function passed to `apply` takes a DataFrame as\n its argument and returns a scalar. `apply` combines the result for\n each group together into a Series, including setting the index as\n appropriate:\n\n >>> g.apply(lambda x: x.C.max() - x.B.min())\n A\n a 5\n b 2\n dtype: int64\n \"\"\",\n series_examples=\"\"\"\n >>> s = pd.Series([0, 1, 2], index='a a b'.split())\n >>> g = s.groupby(s.index)\n\n From ``s`` above we can see that ``g`` has two groups, ``a`` and ``b``.\n Calling `apply` in various ways, we can get different grouping results:\n\n Example 1: The function passed to `apply` takes a Series as\n its argument and returns a Series. `apply` combines the result for\n each group together into a new Series:\n\n >>> g.apply(lambda x: x*2 if x.name == 'b' else x/2)\n 0 0.0\n 1 0.5\n 2 4.0\n dtype: float64\n\n Example 2: The function passed to `apply` takes a Series as\n its argument and returns a scalar. `apply` combines the result for\n each group together into a Series, including setting the index as\n appropriate:\n\n >>> g.apply(lambda x: x.max() - x.min())\n a 1\n b 0\n dtype: int64\n\n Notes\n -----\n In the current implementation `apply` calls `func` twice on the\n first group to decide whether it can take a fast or slow code\n path. This can lead to unexpected behavior if `func` has\n side-effects, as they will take effect twice for the first\n group.\n\n Examples\n --------\n {examples}\n \"\"\")\n\n_pipe_template = \"\"\"\\\nApply a function `func` with arguments to this %(klass)s object and return\nthe function's result.\n\n%(versionadded)s\n\nUse `.pipe` when you want to improve readability by chaining together\nfunctions that expect Series, DataFrames, GroupBy or Resampler objects.\nInstead of writing\n\n>>> h(g(f(df.groupby('group')), arg1=a), arg2=b, arg3=c)\n\nYou can write\n\n>>> (df.groupby('group')\n... .pipe(f)\n... .pipe(g, arg1=a)\n... .pipe(h, arg2=b, arg3=c))\n\nwhich is much more readable.\n\nParameters\n----------\nfunc : callable or tuple of (callable, string)\n Function to apply to this %(klass)s object or, alternatively,\n a `(callable, data_keyword)` tuple where `data_keyword` is a\n string indicating the keyword of `callable` that expects the\n %(klass)s object.\nargs : iterable, optional\n positional arguments passed into `func`.\nkwargs : dict, optional\n a dictionary of keyword arguments passed into `func`.\n\nReturns\n-------\nobject : the return type of `func`.\n\nSee Also\n--------\npandas.Series.pipe : Apply a function with arguments to a series.\npandas.DataFrame.pipe: Apply a function with arguments to a dataframe.\napply : Apply function to each group instead of to the\n full %(klass)s object.\n\nNotes\n-----\nSee more `here\n<http://pandas.pydata.org/pandas-docs/stable/groupby.html#piping-function-calls>`_\n\nExamples\n--------\n%(examples)s\n\"\"\"\n\n_transform_template = \"\"\"\nCall function producing a like-indexed %(klass)s on each group and\nreturn a %(klass)s having the same indexes as the original object\nfilled with the transformed values\n\nParameters\n----------\nf : function\n Function to apply to each group\n\nReturns\n-------\n%(klass)s\n\nSee Also\n--------\naggregate, transform\n\nNotes\n-----\nEach group is endowed the attribute 'name' in case you need to know\nwhich group you are working on.\n\nThe current implementation imposes three requirements on f:\n\n* f must return a value that either has the same shape as the input\n subframe or can be broadcast to the shape of the input subframe.\n For example, f returns a scalar it will be broadcast to have the\n same shape as the input subframe.\n* if this is a DataFrame, f must support application column-by-column\n in the subframe. If f also supports application to the entire subframe,\n then a fast path is used starting from the second chunk.\n* f must not mutate groups. Mutation is not supported and may\n produce unexpected results.\n\nExamples\n--------\n\n# Same shape\n>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',\n... 'foo', 'bar'],\n... 'B' : ['one', 'one', 'two', 'three',\n... 'two', 'two'],\n... 'C' : [1, 5, 5, 2, 5, 5],\n... 'D' : [2.0, 5., 8., 1., 2., 9.]})\n>>> grouped = df.groupby('A')\n>>> grouped.transform(lambda x: (x - x.mean()) / x.std())\n C D\n0 -1.154701 -0.577350\n1 0.577350 0.000000\n2 0.577350 1.154701\n3 -1.154701 -1.000000\n4 0.577350 -0.577350\n5 0.577350 1.000000\n\n# Broadcastable\n>>> grouped.transform(lambda x: x.max() - x.min())\n C D\n0 4 6.0\n1 3 8.0\n2 4 6.0\n3 3 8.0\n4 4 6.0\n5 3 8.0\n\"\"\"\n\n\nclass GroupByPlot(PandasObject):\n \"\"\"\n Class implementing the .plot attribute for groupby objects.\n \"\"\"\n\n def __init__(self, groupby):\n self._groupby = groupby\n\n def __call__(self, *args, **kwargs):\n def f(self):\n return self.plot(*args, **kwargs)\n f.__name__ = 'plot'\n return self._groupby.apply(f)\n\n def __getattr__(self, name):\n def attr(*args, **kwargs):\n def f(self):\n return getattr(self.plot, name)(*args, **kwargs)\n return self._groupby.apply(f)\n return attr\n\n\n@contextmanager\ndef _group_selection_context(groupby):\n \"\"\"\n Set / reset the _group_selection_context.\n \"\"\"\n groupby._set_group_selection()\n yield groupby\n groupby._reset_group_selection()\n\n\nclass _GroupBy(PandasObject, SelectionMixin):\n _group_selection = None\n _apply_whitelist = frozenset()\n\n def __init__(self, obj, keys=None, axis=0, level=None,\n grouper=None, exclusions=None, selection=None, as_index=True,\n sort=True, group_keys=True, squeeze=False,\n observed=False, **kwargs):\n\n self._selection = selection\n\n if isinstance(obj, NDFrame):\n obj._consolidate_inplace()\n\n self.level = level\n\n if not as_index:\n if not isinstance(obj, DataFrame):\n raise TypeError('as_index=False only valid with DataFrame')\n if axis != 0:\n raise ValueError('as_index=False only valid for axis=0')\n\n self.as_index = as_index\n self.keys = keys\n self.sort = sort\n self.group_keys = group_keys\n self.squeeze = squeeze\n self.observed = observed\n self.mutated = kwargs.pop('mutated', False)\n\n if grouper is None:\n from pandas.core.groupby.grouper import _get_grouper\n grouper, exclusions, obj = _get_grouper(obj, keys,\n axis=axis,\n level=level,\n sort=sort,\n observed=observed,\n mutated=self.mutated)\n\n self.obj = obj\n self.axis = obj._get_axis_number(axis)\n self.grouper = grouper\n self.exclusions = set(exclusions) if exclusions else set()\n\n # we accept no other args\n validate_kwargs('group', kwargs, {})\n\n def __len__(self):\n return len(self.groups)\n\n def __unicode__(self):\n # TODO: Better unicode/repr for GroupBy object\n return object.__repr__(self)\n\n def _assure_grouper(self):\n \"\"\"\n We create the grouper on instantiation sub-classes may have a\n different policy.\n \"\"\"\n pass\n\n @property\n def groups(self):\n \"\"\"\n Dict {group name -> group labels}.\n \"\"\"\n self._assure_grouper()\n return self.grouper.groups\n\n @property\n def ngroups(self):\n self._assure_grouper()\n return self.grouper.ngroups\n\n @property\n def indices(self):\n \"\"\"\n Dict {group name -> group indices}.\n \"\"\"\n self._assure_grouper()\n return self.grouper.indices\n\n def _get_indices(self, names):\n \"\"\"\n Safe get multiple indices, translate keys for\n datelike to underlying repr.\n \"\"\"\n\n def get_converter(s):\n # possibly convert to the actual key types\n # in the indices, could be a Timestamp or a np.datetime64\n if isinstance(s, (Timestamp, datetime.datetime)):\n return lambda key: Timestamp(key)\n elif isinstance(s, np.datetime64):\n return lambda key: Timestamp(key).asm8\n else:\n return lambda key: key\n\n if len(names) == 0:\n return []\n\n if len(self.indices) > 0:\n index_sample = next(iter(self.indices))\n else:\n index_sample = None # Dummy sample\n\n name_sample = names[0]\n if isinstance(index_sample, tuple):\n if not isinstance(name_sample, tuple):\n msg = (\"must supply a tuple to get_group with multiple\"\n \" grouping keys\")\n raise ValueError(msg)\n if not len(name_sample) == len(index_sample):\n try:\n # If the original grouper was a tuple\n return [self.indices[name] for name in names]\n except KeyError:\n # turns out it wasn't a tuple\n msg = (\"must supply a a same-length tuple to get_group\"\n \" with multiple grouping keys\")\n raise ValueError(msg)\n\n converters = [get_converter(s) for s in index_sample]\n names = [tuple(f(n) for f, n in zip(converters, name))\n for name in names]\n\n else:\n converter = get_converter(index_sample)\n names = [converter(name) for name in names]\n\n return [self.indices.get(name, []) for name in names]\n\n def _get_index(self, name):\n \"\"\"\n Safe get index, translate keys for datelike to underlying repr.\n \"\"\"\n return self._get_indices([name])[0]\n\n @cache_readonly\n def _selected_obj(self):\n\n if self._selection is None or isinstance(self.obj, Series):\n if self._group_selection is not None:\n return self.obj[self._group_selection]\n return self.obj\n else:\n return self.obj[self._selection]\n\n def _reset_group_selection(self):\n \"\"\"\n Clear group based selection.\n\n Used for methods needing to return info on each group regardless of\n whether a group selection was previously set.\n \"\"\"\n if self._group_selection is not None:\n # GH12839 clear cached selection too when changing group selection\n self._group_selection = None\n self._reset_cache('_selected_obj')\n\n def _set_group_selection(self):\n \"\"\"\n Create group based selection.\n\n Used when selection is not passed directly but instead via a grouper.\n\n NOTE: this should be paired with a call to _reset_group_selection\n \"\"\"\n grp = self.grouper\n if not (self.as_index and\n getattr(grp, 'groupings', None) is not None and\n self.obj.ndim > 1 and\n self._group_selection is None):\n return\n\n ax = self.obj._info_axis\n groupers = [g.name for g in grp.groupings\n if g.level is None and g.in_axis]\n\n if len(groupers):\n # GH12839 clear selected obj cache when group selection changes\n self._group_selection = ax.difference(Index(groupers),\n sort=False).tolist()\n self._reset_cache('_selected_obj')\n\n def _set_result_index_ordered(self, result):\n # set the result index on the passed values object and\n # return the new object, xref 8046\n\n # the values/counts are repeated according to the group index\n # shortcut if we have an already ordered grouper\n if not self.grouper.is_monotonic:\n index = Index(np.concatenate(\n self._get_indices(self.grouper.result_index)))\n result.set_axis(index, axis=self.axis, inplace=True)\n result = result.sort_index(axis=self.axis)\n\n result.set_axis(self.obj._get_axis(self.axis), axis=self.axis,\n inplace=True)\n return result\n\n def _dir_additions(self):\n return self.obj._dir_additions() | self._apply_whitelist\n\n def __getattr__(self, attr):\n if attr in self._internal_names_set:\n return object.__getattribute__(self, attr)\n if attr in self.obj:\n return self[attr]\n if hasattr(self.obj, attr):\n return self._make_wrapper(attr)\n\n raise AttributeError(\"%r object has no attribute %r\" %\n (type(self).__name__, attr))\n\n @Substitution(klass='GroupBy',\n versionadded='.. versionadded:: 0.21.0',\n examples=\"\"\"\\\n>>> df = pd.DataFrame({'A': 'a b a b'.split(), 'B': [1, 2, 3, 4]})\n>>> df\n A B\n0 a 1\n1 b 2\n2 a 3\n3 b 4\n\nTo get the difference between each groups maximum and minimum value in one\npass, you can do\n\n>>> df.groupby('A').pipe(lambda x: x.max() - x.min())\n B\nA\na 2\nb 2\"\"\")\n @Appender(_pipe_template)\n def pipe(self, func, *args, **kwargs):\n return com._pipe(self, func, *args, **kwargs)\n\n plot = property(GroupByPlot)\n\n def _make_wrapper(self, name):\n if name not in self._apply_whitelist:\n is_callable = callable(getattr(self._selected_obj, name, None))\n kind = ' callable ' if is_callable else ' '\n msg = (\"Cannot access{0}attribute {1!r} of {2!r} objects, try \"\n \"using the 'apply' method\".format(kind, name,\n type(self).__name__))\n raise AttributeError(msg)\n\n self._set_group_selection()\n\n # need to setup the selection\n # as are not passed directly but in the grouper\n f = getattr(self._selected_obj, name)\n if not isinstance(f, types.MethodType):\n return self.apply(lambda self: getattr(self, name))\n\n f = getattr(type(self._selected_obj), name)\n\n def wrapper(*args, **kwargs):\n # a little trickery for aggregation functions that need an axis\n # argument\n kwargs_with_axis = kwargs.copy()\n if ('axis' not in kwargs_with_axis or\n kwargs_with_axis['axis'] is None):\n kwargs_with_axis['axis'] = self.axis\n\n def curried_with_axis(x):\n return f(x, *args, **kwargs_with_axis)\n\n def curried(x):\n return f(x, *args, **kwargs)\n\n # preserve the name so we can detect it when calling plot methods,\n # to avoid duplicates\n curried.__name__ = curried_with_axis.__name__ = name\n\n # special case otherwise extra plots are created when catching the\n # exception below\n if name in base.plotting_methods:\n return self.apply(curried)\n\n try:\n return self.apply(curried_with_axis)\n except Exception:\n try:\n return self.apply(curried)\n except Exception:\n\n # related to : GH3688\n # try item-by-item\n # this can be called recursively, so need to raise\n # ValueError\n # if we don't have this method to indicated to aggregate to\n # mark this column as an error\n try:\n return self._aggregate_item_by_item(name,\n *args, **kwargs)\n except (AttributeError):\n raise ValueError\n\n return wrapper\n\n def get_group(self, name, obj=None):\n \"\"\"\n Constructs NDFrame from group with provided name.\n\n Parameters\n ----------\n name : object\n the name of the group to get as a DataFrame\n obj : NDFrame, default None\n the NDFrame to take the DataFrame out of. If\n it is None, the object groupby was called on will\n be used\n\n Returns\n -------\n group : same type as obj\n \"\"\"\n if obj is None:\n obj = self._selected_obj\n\n inds = self._get_index(name)\n if not len(inds):\n raise KeyError(name)\n\n return obj._take(inds, axis=self.axis)\n\n def __iter__(self):\n \"\"\"\n Groupby iterator.\n\n Returns\n -------\n Generator yielding sequence of (name, subsetted object)\n for each group\n \"\"\"\n return self.grouper.get_iterator(self.obj, axis=self.axis)\n\n @Appender(_apply_docs['template']\n .format(input=\"dataframe\",\n examples=_apply_docs['dataframe_examples']))\n def apply(self, func, *args, **kwargs):\n\n func = self._is_builtin_func(func)\n\n # this is needed so we don't try and wrap strings. If we could\n # resolve functions to their callable functions prior, this\n # wouldn't be needed\n if args or kwargs:\n if callable(func):\n\n @wraps(func)\n def f(g):\n with np.errstate(all='ignore'):\n return func(g, *args, **kwargs)\n else:\n raise ValueError('func must be a callable if args or '\n 'kwargs are supplied')\n else:\n f = func\n\n # ignore SettingWithCopy here in case the user mutates\n with option_context('mode.chained_assignment', None):\n try:\n result = self._python_apply_general(f)\n except Exception:\n\n # gh-20949\n # try again, with .apply acting as a filtering\n # operation, by excluding the grouping column\n # This would normally not be triggered\n # except if the udf is trying an operation that\n # fails on *some* columns, e.g. a numeric operation\n # on a string grouper column\n\n with _group_selection_context(self):\n return self._python_apply_general(f)\n\n return result\n\n def _python_apply_general(self, f):\n keys, values, mutated = self.grouper.apply(f, self._selected_obj,\n self.axis)\n\n return self._wrap_applied_output(\n keys,\n values,\n not_indexed_same=mutated or self.mutated)\n\n def _iterate_slices(self):\n yield self._selection_name, self._selected_obj\n\n def transform(self, func, *args, **kwargs):\n raise AbstractMethodError(self)\n\n def _cumcount_array(self, ascending=True):\n \"\"\"\n Parameters\n ----------\n ascending : bool, default True\n If False, number in reverse, from length of group - 1 to 0.\n\n Notes\n -----\n this is currently implementing sort=False\n (though the default is sort=True) for groupby in general\n \"\"\"\n ids, _, ngroups = self.grouper.group_info\n sorter = get_group_index_sorter(ids, ngroups)\n ids, count = ids[sorter], len(ids)\n\n if count == 0:\n return np.empty(0, dtype=np.int64)\n\n run = np.r_[True, ids[:-1] != ids[1:]]\n rep = np.diff(np.r_[np.nonzero(run)[0], count])\n out = (~run).cumsum()\n\n if ascending:\n out -= np.repeat(out[run], rep)\n else:\n out = np.repeat(out[np.r_[run[1:], True]], rep) - out\n\n rev = np.empty(count, dtype=np.intp)\n rev[sorter] = np.arange(count, dtype=np.intp)\n return out[rev].astype(np.int64, copy=False)\n\n def _try_cast(self, result, obj, numeric_only=False):\n \"\"\"\n Try to cast the result to our obj original type,\n we may have roundtripped through object in the mean-time.\n\n If numeric_only is True, then only try to cast numerics\n and not datetimelikes.\n\n \"\"\"\n if obj.ndim > 1:\n dtype = obj.values.dtype\n else:\n dtype = obj.dtype\n\n if not is_scalar(result):\n if is_extension_array_dtype(dtype):\n # The function can return something of any type, so check\n # if the type is compatible with the calling EA.\n try:\n result = obj.values._from_sequence(result)\n except Exception:\n # https://github.com/pandas-dev/pandas/issues/22850\n # pandas has no control over what 3rd-party ExtensionArrays\n # do in _values_from_sequence. We still want ops to work\n # though, so we catch any regular Exception.\n pass\n elif numeric_only and is_numeric_dtype(dtype) or not numeric_only:\n result = maybe_downcast_to_dtype(result, dtype)\n\n return result\n\n def _transform_should_cast(self, func_nm):\n \"\"\"\n Parameters:\n -----------\n func_nm: str\n The name of the aggregation function being performed\n\n Returns:\n --------\n bool\n Whether transform should attempt to cast the result of aggregation\n \"\"\"\n return (self.size().fillna(0) > 0).any() and (\n func_nm not in base.cython_cast_blacklist)\n\n def _cython_transform(self, how, numeric_only=True, **kwargs):\n output = collections.OrderedDict()\n for name, obj in self._iterate_slices():\n is_numeric = is_numeric_dtype(obj.dtype)\n if numeric_only and not is_numeric:\n continue\n\n try:\n result, names = self.grouper.transform(obj.values, how,\n **kwargs)\n except NotImplementedError:\n continue\n except AssertionError as e:\n raise GroupByError(str(e))\n if self._transform_should_cast(how):\n output[name] = self._try_cast(result, obj)\n else:\n output[name] = result\n\n if len(output) == 0:\n raise DataError('No numeric types to aggregate')\n\n return self._wrap_transformed_output(output, names)\n\n def _cython_agg_general(self, how, alt=None, numeric_only=True,\n min_count=-1):\n output = {}\n for name, obj in self._iterate_slices():\n is_numeric = is_numeric_dtype(obj.dtype)\n if numeric_only and not is_numeric:\n continue\n\n try:\n result, names = self.grouper.aggregate(obj.values, how,\n min_count=min_count)\n except AssertionError as e:\n raise GroupByError(str(e))\n output[name] = self._try_cast(result, obj)\n\n if len(output) == 0:\n raise DataError('No numeric types to aggregate')\n\n return self._wrap_aggregated_output(output, names)\n\n def _python_agg_general(self, func, *args, **kwargs):\n func = self._is_builtin_func(func)\n f = lambda x: func(x, *args, **kwargs)\n\n # iterate through \"columns\" ex exclusions to populate output dict\n output = {}\n for name, obj in self._iterate_slices():\n try:\n result, counts = self.grouper.agg_series(obj, f)\n output[name] = self._try_cast(result, obj, numeric_only=True)\n except TypeError:\n continue\n\n if len(output) == 0:\n return self._python_apply_general(f)\n\n if self.grouper._filter_empty_groups:\n\n mask = counts.ravel() > 0\n for name, result in compat.iteritems(output):\n\n # since we are masking, make sure that we have a float object\n values = result\n if is_numeric_dtype(values.dtype):\n values = ensure_float(values)\n\n output[name] = self._try_cast(values[mask], result)\n\n return self._wrap_aggregated_output(output)\n\n def _wrap_applied_output(self, *args, **kwargs):\n raise AbstractMethodError(self)\n\n def _concat_objects(self, keys, values, not_indexed_same=False):\n from pandas.core.reshape.concat import concat\n\n def reset_identity(values):\n # reset the identities of the components\n # of the values to prevent aliasing\n for v in com._not_none(*values):\n ax = v._get_axis(self.axis)\n ax._reset_identity()\n return values\n\n if not not_indexed_same:\n result = concat(values, axis=self.axis)\n ax = self._selected_obj._get_axis(self.axis)\n\n if isinstance(result, Series):\n result = result.reindex(ax)\n else:\n\n # this is a very unfortunate situation\n # we have a multi-index that is NOT lexsorted\n # and we have a result which is duplicated\n # we can't reindex, so we resort to this\n # GH 14776\n if isinstance(ax, MultiIndex) and not ax.is_unique:\n indexer = algorithms.unique1d(\n result.index.get_indexer_for(ax.values))\n result = result.take(indexer, axis=self.axis)\n else:\n result = result.reindex(ax, axis=self.axis)\n\n elif self.group_keys:\n\n values = reset_identity(values)\n if self.as_index:\n\n # possible MI return case\n group_keys = keys\n group_levels = self.grouper.levels\n group_names = self.grouper.names\n\n result = concat(values, axis=self.axis, keys=group_keys,\n levels=group_levels, names=group_names,\n sort=False)\n else:\n\n # GH5610, returns a MI, with the first level being a\n # range index\n keys = list(range(len(values)))\n result = concat(values, axis=self.axis, keys=keys)\n else:\n values = reset_identity(values)\n result = concat(values, axis=self.axis)\n\n if (isinstance(result, Series) and\n getattr(self, '_selection_name', None) is not None):\n\n result.name = self._selection_name\n\n return result\n\n def _apply_filter(self, indices, dropna):\n if len(indices) == 0:\n indices = np.array([], dtype='int64')\n else:\n indices = np.sort(np.concatenate(indices))\n if dropna:\n filtered = self._selected_obj.take(indices, axis=self.axis)\n else:\n mask = np.empty(len(self._selected_obj.index), dtype=bool)\n mask.fill(False)\n mask[indices.astype(int)] = True\n # mask fails to broadcast when passed to where; broadcast manually.\n mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T\n filtered = self._selected_obj.where(mask) # Fill with NaNs.\n return filtered\n\n\nclass GroupBy(_GroupBy):\n\n \"\"\"\n Class for grouping and aggregating relational data.\n\n See aggregate, transform, and apply functions on this object.\n\n It's easiest to use obj.groupby(...) to use GroupBy, but you can also do:\n\n ::\n\n grouped = groupby(obj, ...)\n\n Parameters\n ----------\n obj : pandas object\n axis : int, default 0\n level : int, default None\n Level of MultiIndex\n groupings : list of Grouping objects\n Most users should ignore this\n exclusions : array-like, optional\n List of columns to exclude\n name : string\n Most users should ignore this\n\n Returns\n -------\n **Attributes**\n groups : dict\n {group name -> group labels}\n len(grouped) : int\n Number of groups\n\n Notes\n -----\n After grouping, see aggregate, apply, and transform functions. Here are\n some other brief notes about usage. When grouping by multiple groups, the\n result index will be a MultiIndex (hierarchical) by default.\n\n Iteration produces (key, group) tuples, i.e. chunking the data by group. So\n you can write code like:\n\n ::\n\n grouped = obj.groupby(keys, axis=axis)\n for key, group in grouped:\n # do something with the data\n\n Function calls on GroupBy, if not specially implemented, \"dispatch\" to the\n grouped data. So if you group a DataFrame and wish to invoke the std()\n method on each group, you can simply do:\n\n ::\n\n df.groupby(mapper).std()\n\n rather than\n\n ::\n\n df.groupby(mapper).aggregate(np.std)\n\n You can pass arguments to these \"wrapped\" functions, too.\n\n See the online documentation for full exposition on these topics and much\n more\n \"\"\"\n def _bool_agg(self, val_test, skipna):\n \"\"\"\n Shared func to call any / all Cython GroupBy implementations.\n \"\"\"\n\n def objs_to_bool(vals):\n try:\n vals = vals.astype(np.bool)\n except ValueError: # for objects\n vals = np.array([bool(x) for x in vals])\n\n return vals.view(np.uint8)\n\n def result_to_bool(result):\n return result.astype(np.bool, copy=False)\n\n return self._get_cythonized_result('group_any_all', self.grouper,\n aggregate=True,\n cython_dtype=np.uint8,\n needs_values=True,\n needs_mask=True,\n pre_processing=objs_to_bool,\n post_processing=result_to_bool,\n val_test=val_test, skipna=skipna)\n\n @Substitution(name='groupby')\n @Appender(_common_see_also)\n def any(self, skipna=True):\n \"\"\"\n Returns True if any value in the group is truthful, else False.\n\n Parameters\n ----------\n skipna : bool, default True\n Flag to ignore nan values during truth testing\n \"\"\"\n return self._bool_agg('any', skipna)\n\n @Substitution(name='groupby')\n @Appender(_common_see_also)\n def all(self, skipna=True):\n \"\"\"\n Returns True if all values in the group are truthful, else False.\n\n Parameters\n ----------\n skipna : bool, default True\n Flag to ignore nan values during truth testing\n \"\"\"\n return self._bool_agg('all', skipna)\n\n @Substitution(name='groupby')\n @Appender(_common_see_also)\n def count(self):\n \"\"\"\n Compute count of group, excluding missing values.\n \"\"\"\n\n # defined here for API doc\n raise NotImplementedError\n\n @Substitution(name='groupby', see_also=_common_see_also)\n def mean(self, *args, **kwargs):\n \"\"\"\n Compute mean of groups, excluding missing values.\n\n Returns\n -------\n pandas.Series or pandas.DataFrame\n\n %(see_also)s\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],\n ... 'B': [np.nan, 2, 3, 4, 5],\n ... 'C': [1, 2, 1, 1, 2]}, columns=['A', 'B', 'C'])\n\n Groupby one column and return the mean of the remaining columns in\n each group.\n\n >>> df.groupby('A').mean()\n >>>\n B C\n A\n 1 3.0 1.333333\n 2 4.0 1.500000\n\n Groupby two columns and return the mean of the remaining column.\n\n >>> df.groupby(['A', 'B']).mean()\n >>>\n C\n A B\n 1 2.0 2\n 4.0 1\n 2 3.0 1\n 5.0 2\n\n Groupby one column and return the mean of only particular column in\n the group.\n\n >>> df.groupby('A')['B'].mean()\n >>>\n A\n 1 3.0\n 2 4.0\n Name: B, dtype: float64\n \"\"\"\n nv.validate_groupby_func('mean', args, kwargs, ['numeric_only'])\n try:\n return self._cython_agg_general('mean', **kwargs)\n except GroupByError:\n raise\n except Exception: # pragma: no cover\n with _group_selection_context(self):\n f = lambda x: x.mean(axis=self.axis, **kwargs)\n return self._python_agg_general(f)\n\n @Substitution(name='groupby')\n @Appender(_common_see_also)\n def median(self, **kwargs):\n \"\"\"\n Compute median of groups, excluding missing values.\n\n For multiple groupings, the result index will be a MultiIndex\n \"\"\"\n try:\n return self._cython_agg_general('median', **kwargs)\n except GroupByError:\n raise\n except Exception: # pragma: no cover\n\n def f(x):\n if isinstance(x, np.ndarray):\n x = Series(x)\n return x.median(axis=self.axis, **kwargs)\n with _group_selection_context(self):\n return self._python_agg_general(f)\n\n @Substitution(name='groupby')\n @Appender(_common_see_also)\n def std(self, ddof=1, *args, **kwargs):\n \"\"\"\n Compute standard deviation of groups, excluding missing values.\n\n For multiple groupings, the result index will be a MultiIndex.\n\n Parameters\n ----------\n ddof : integer, default 1\n degrees of freedom\n \"\"\"\n\n # TODO: implement at Cython level?\n nv.validate_groupby_func('std', args, kwargs)\n return np.sqrt(self.var(ddof=ddof, **kwargs))\n\n @Substitution(name='groupby')\n @Appender(_common_see_also)\n def var(self, ddof=1, *args, **kwargs):\n \"\"\"\n Compute variance of groups, excluding missing values.\n\n For multiple groupings, the result index will be a MultiIndex.\n\n Parameters\n ----------\n ddof : integer, default 1\n degrees of freedom\n \"\"\"\n nv.validate_groupby_func('var', args, kwargs)\n if ddof == 1:\n try:\n return self._cython_agg_general('var', **kwargs)\n except Exception:\n f = lambda x: x.var(ddof=ddof, **kwargs)\n with _group_selection_context(self):\n return self._python_agg_general(f)\n else:\n f = lambda x: x.var(ddof=ddof, **kwargs)\n with _group_selection_context(self):\n return self._python_agg_general(f)\n\n @Substitution(name='groupby')\n @Appender(_common_see_also)\n def sem(self, ddof=1):\n \"\"\"\n Compute standard error of the mean of groups, excluding missing values.\n\n For multiple groupings, the result index will be a MultiIndex.\n\n Parameters\n ----------\n ddof : integer, default 1\n degrees of freedom\n \"\"\"\n\n return self.std(ddof=ddof) / np.sqrt(self.count())\n\n @Substitution(name='groupby')\n @Appender(_common_see_also)\n def size(self):\n \"\"\"\n Compute group sizes.\n \"\"\"\n result = self.grouper.size()\n\n if isinstance(self.obj, Series):\n result.name = getattr(self.obj, 'name', None)\n return result\n\n @classmethod\n def _add_numeric_operations(cls):\n \"\"\"\n Add numeric operations to the GroupBy generically.\n \"\"\"\n\n def groupby_function(name, alias, npfunc,\n numeric_only=True, _convert=False,\n min_count=-1):\n\n _local_template = \"Compute %(f)s of group values\"\n\n @Substitution(name='groupby', f=name)\n @Appender(_common_see_also)\n @Appender(_local_template)\n def f(self, **kwargs):\n if 'numeric_only' not in kwargs:\n kwargs['numeric_only'] = numeric_only\n if 'min_count' not in kwargs:\n kwargs['min_count'] = min_count\n\n self._set_group_selection()\n try:\n return self._cython_agg_general(\n alias, alt=npfunc, **kwargs)\n except AssertionError as e:\n raise SpecificationError(str(e))\n except Exception:\n result = self.aggregate(\n lambda x: npfunc(x, axis=self.axis))\n if _convert:\n result = result._convert(datetime=True)\n return result\n\n set_function_name(f, name, cls)\n\n return f\n\n def first_compat(x, axis=0):\n\n def first(x):\n\n x = np.asarray(x)\n x = x[notna(x)]\n if len(x) == 0:\n return np.nan\n return x[0]\n\n if isinstance(x, DataFrame):\n return x.apply(first, axis=axis)\n else:\n return first(x)\n\n def last_compat(x, axis=0):\n\n def last(x):\n\n x = np.asarray(x)\n x = x[notna(x)]\n if len(x) == 0:\n return np.nan\n return x[-1]\n\n if isinstance(x, DataFrame):\n return x.apply(last, axis=axis)\n else:\n return last(x)\n\n cls.sum = groupby_function('sum', 'add', np.sum, min_count=0)\n cls.prod = groupby_function('prod', 'prod', np.prod, min_count=0)\n cls.min = groupby_function('min', 'min', np.min, numeric_only=False)\n cls.max = groupby_function('max', 'max', np.max, numeric_only=False)\n cls.first = groupby_function('first', 'first', first_compat,\n numeric_only=False)\n cls.last = groupby_function('last', 'last', last_compat,\n numeric_only=False)\n\n @Substitution(name='groupby')\n @Appender(_common_see_also)\n def ohlc(self):\n \"\"\"\n Compute sum of values, excluding missing values.\n\n For multiple groupings, the result index will be a MultiIndex\n \"\"\"\n\n return self._apply_to_column_groupbys(\n lambda x: x._cython_agg_general('ohlc'))\n\n @Appender(DataFrame.describe.__doc__)\n def describe(self, **kwargs):\n with _group_selection_context(self):\n result = self.apply(lambda x: x.describe(**kwargs))\n if self.axis == 1:\n return result.T\n return result.unstack()\n\n def resample(self, rule, *args, **kwargs):\n \"\"\"\n Provide resampling when using a TimeGrouper.\n\n Given a grouper, the function resamples it according to a string\n \"string\" -> \"frequency\".\n\n See the :ref:`frequency aliases <timeseries.offset-aliases>`\n documentation for more details.\n\n Parameters\n ----------\n rule : str or DateOffset\n The offset string or object representing target grouper conversion.\n *args, **kwargs\n Possible arguments are `how`, `fill_method`, `limit`, `kind` and\n `on`, and other arguments of `TimeGrouper`.\n\n Returns\n -------\n Grouper\n Return a new grouper with our resampler appended.\n\n See Also\n --------\n pandas.Grouper : Specify a frequency to resample with when\n grouping by a key.\n DatetimeIndex.resample : Frequency conversion and resampling of\n time series.\n\n Examples\n --------\n >>> idx = pd.date_range('1/1/2000', periods=4, freq='T')\n >>> df = pd.DataFrame(data=4 * [range(2)],\n ... index=idx,\n ... columns=['a', 'b'])\n >>> df.iloc[2, 0] = 5\n >>> df\n a b\n 2000-01-01 00:00:00 0 1\n 2000-01-01 00:01:00 0 1\n 2000-01-01 00:02:00 5 1\n 2000-01-01 00:03:00 0 1\n\n Downsample the DataFrame into 3 minute bins and sum the values of\n the timestamps falling into a bin.\n\n >>> df.groupby('a').resample('3T').sum()\n a b\n a\n 0 2000-01-01 00:00:00 0 2\n 2000-01-01 00:03:00 0 1\n 5 2000-01-01 00:00:00 5 1\n\n Upsample the series into 30 second bins.\n\n >>> df.groupby('a').resample('30S').sum()\n a b\n a\n 0 2000-01-01 00:00:00 0 1\n 2000-01-01 00:00:30 0 0\n 2000-01-01 00:01:00 0 1\n 2000-01-01 00:01:30 0 0\n 2000-01-01 00:02:00 0 0\n 2000-01-01 00:02:30 0 0\n 2000-01-01 00:03:00 0 1\n 5 2000-01-01 00:02:00 5 1\n\n Resample by month. Values are assigned to the month of the period.\n\n >>> df.groupby('a').resample('M').sum()\n a b\n a\n 0 2000-01-31 0 3\n 5 2000-01-31 5 1\n\n Downsample the series into 3 minute bins as above, but close the right\n side of the bin interval.\n\n >>> df.groupby('a').resample('3T', closed='right').sum()\n a b\n a\n 0 1999-12-31 23:57:00 0 1\n 2000-01-01 00:00:00 0 2\n 5 2000-01-01 00:00:00 5 1\n\n Downsample the series into 3 minute bins and close the right side of\n the bin interval, but label each bin using the right edge instead of\n the left.\n\n >>> df.groupby('a').resample('3T', closed='right', label='right').sum()\n a b\n a\n 0 2000-01-01 00:00:00 0 1\n 2000-01-01 00:03:00 0 2\n 5 2000-01-01 00:03:00 5 1\n\n Add an offset of twenty seconds.\n\n >>> df.groupby('a').resample('3T', loffset='20s').sum()\n a b\n a\n 0 2000-01-01 00:00:20 0 2\n 2000-01-01 00:03:20 0 1\n 5 2000-01-01 00:00:20 5 1\n \"\"\"\n from pandas.core.resample import get_resampler_for_grouping\n return get_resampler_for_grouping(self, rule, *args, **kwargs)\n\n @Substitution(name='groupby')\n @Appender(_common_see_also)\n def rolling(self, *args, **kwargs):\n \"\"\"\n Return a rolling grouper, providing rolling functionality per group.\n \"\"\"\n from pandas.core.window import RollingGroupby\n return RollingGroupby(self, *args, **kwargs)\n\n @Substitution(name='groupby')\n @Appender(_common_see_also)\n def expanding(self, *args, **kwargs):\n \"\"\"\n Return an expanding grouper, providing expanding\n functionality per group.\n \"\"\"\n from pandas.core.window import ExpandingGroupby\n return ExpandingGroupby(self, *args, **kwargs)\n\n def _fill(self, direction, limit=None):\n \"\"\"\n Shared function for `pad` and `backfill` to call Cython method.\n\n Parameters\n ----------\n direction : {'ffill', 'bfill'}\n Direction passed to underlying Cython function. `bfill` will cause\n values to be filled backwards. `ffill` and any other values will\n default to a forward fill\n limit : int, default None\n Maximum number of consecutive values to fill. If `None`, this\n method will convert to -1 prior to passing to Cython\n\n Returns\n -------\n `Series` or `DataFrame` with filled values\n\n See Also\n --------\n pad\n backfill\n \"\"\"\n # Need int value for Cython\n if limit is None:\n limit = -1\n\n return self._get_cythonized_result('group_fillna_indexer',\n self.grouper, needs_mask=True,\n cython_dtype=np.int64,\n result_is_index=True,\n direction=direction, limit=limit)\n\n @Substitution(name='groupby')\n def pad(self, limit=None):\n \"\"\"\n Forward fill the values.\n\n Parameters\n ----------\n limit : integer, optional\n limit of how many values to fill\n\n See Also\n --------\n Series.pad\n DataFrame.pad\n Series.fillna\n DataFrame.fillna\n \"\"\"\n return self._fill('ffill', limit=limit)\n ffill = pad\n\n @Substitution(name='groupby')\n def backfill(self, limit=None):\n \"\"\"\n Backward fill the values.\n\n Parameters\n ----------\n limit : integer, optional\n limit of how many values to fill\n\n See Also\n --------\n Series.backfill\n DataFrame.backfill\n Series.fillna\n DataFrame.fillna\n \"\"\"\n return self._fill('bfill', limit=limit)\n bfill = backfill\n\n @Substitution(name='groupby', see_also=_common_see_also)\n def nth(self, n, dropna=None):\n \"\"\"\n Take the nth row from each group if n is an int, or a subset of rows\n if n is a list of ints.\n\n If dropna, will take the nth non-null row, dropna is either\n Truthy (if a Series) or 'all', 'any' (if a DataFrame);\n this is equivalent to calling dropna(how=dropna) before the\n groupby.\n\n Parameters\n ----------\n n : int or list of ints\n a single nth value for the row or a list of nth values\n dropna : None or str, optional\n apply the specified dropna operation before counting which row is\n the nth row. Needs to be None, 'any' or 'all'\n\n %(see_also)s\n\n Examples\n --------\n\n >>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],\n ... 'B': [np.nan, 2, 3, 4, 5]}, columns=['A', 'B'])\n >>> g = df.groupby('A')\n >>> g.nth(0)\n B\n A\n 1 NaN\n 2 3.0\n >>> g.nth(1)\n B\n A\n 1 2.0\n 2 5.0\n >>> g.nth(-1)\n B\n A\n 1 4.0\n 2 5.0\n >>> g.nth([0, 1])\n B\n A\n 1 NaN\n 1 2.0\n 2 3.0\n 2 5.0\n\n Specifying `dropna` allows count ignoring ``NaN``\n\n >>> g.nth(0, dropna='any')\n B\n A\n 1 2.0\n 2 3.0\n\n NaNs denote group exhausted when using dropna\n\n >>> g.nth(3, dropna='any')\n B\n A\n 1 NaN\n 2 NaN\n\n Specifying `as_index=False` in `groupby` keeps the original index.\n\n >>> df.groupby('A', as_index=False).nth(1)\n A B\n 1 1 2.0\n 4 2 5.0\n \"\"\"\n\n if isinstance(n, int):\n nth_values = [n]\n elif isinstance(n, (set, list, tuple)):\n nth_values = list(set(n))\n if dropna is not None:\n raise ValueError(\n \"dropna option with a list of nth values is not supported\")\n else:\n raise TypeError(\"n needs to be an int or a list/set/tuple of ints\")\n\n nth_values = np.array(nth_values, dtype=np.intp)\n self._set_group_selection()\n\n if not dropna:\n mask_left = np.in1d(self._cumcount_array(), nth_values)\n mask_right = np.in1d(self._cumcount_array(ascending=False) + 1,\n -nth_values)\n mask = mask_left | mask_right\n\n out = self._selected_obj[mask]\n if not self.as_index:\n return out\n\n ids, _, _ = self.grouper.group_info\n out.index = self.grouper.result_index[ids[mask]]\n\n return out.sort_index() if self.sort else out\n\n if dropna not in ['any', 'all']:\n if isinstance(self._selected_obj, Series) and dropna is True:\n warnings.warn(\"the dropna={dropna} keyword is deprecated,\"\n \"use dropna='all' instead. \"\n \"For a Series groupby, dropna must be \"\n \"either None, 'any' or 'all'.\".format(\n dropna=dropna),\n FutureWarning,\n stacklevel=2)\n dropna = 'all'\n else:\n # Note: when agg-ing picker doesn't raise this,\n # just returns NaN\n raise ValueError(\"For a DataFrame groupby, dropna must be \"\n \"either None, 'any' or 'all', \"\n \"(was passed {dropna}).\".format(\n dropna=dropna))\n\n # old behaviour, but with all and any support for DataFrames.\n # modified in GH 7559 to have better perf\n max_len = n if n >= 0 else - 1 - n\n dropped = self.obj.dropna(how=dropna, axis=self.axis)\n\n # get a new grouper for our dropped obj\n if self.keys is None and self.level is None:\n\n # we don't have the grouper info available\n # (e.g. we have selected out\n # a column that is not in the current object)\n axis = self.grouper.axis\n grouper = axis[axis.isin(dropped.index)]\n\n else:\n\n # create a grouper with the original parameters, but on the dropped\n # object\n from pandas.core.groupby.grouper import _get_grouper\n grouper, _, _ = _get_grouper(dropped, key=self.keys,\n axis=self.axis, level=self.level,\n sort=self.sort,\n mutated=self.mutated)\n\n grb = dropped.groupby(grouper, as_index=self.as_index, sort=self.sort)\n sizes, result = grb.size(), grb.nth(n)\n mask = (sizes < max_len).values\n\n # set the results which don't meet the criteria\n if len(result) and mask.any():\n result.loc[mask] = np.nan\n\n # reset/reindex to the original groups\n if (len(self.obj) == len(dropped) or\n len(result) == len(self.grouper.result_index)):\n result.index = self.grouper.result_index\n else:\n result = result.reindex(self.grouper.result_index)\n\n return result\n\n @Substitution(name='groupby')\n def ngroup(self, ascending=True):\n \"\"\"\n Number each group from 0 to the number of groups - 1.\n\n This is the enumerative complement of cumcount. Note that the\n numbers given to the groups match the order in which the groups\n would be seen when iterating over the groupby object, not the\n order they are first observed.\n\n .. versionadded:: 0.20.2\n\n Parameters\n ----------\n ascending : bool, default True\n If False, number in reverse, from number of group - 1 to 0.\n\n See Also\n --------\n .cumcount : Number the rows in each group.\n\n Examples\n --------\n\n >>> df = pd.DataFrame({\"A\": list(\"aaabba\")})\n >>> df\n A\n 0 a\n 1 a\n 2 a\n 3 b\n 4 b\n 5 a\n >>> df.groupby('A').ngroup()\n 0 0\n 1 0\n 2 0\n 3 1\n 4 1\n 5 0\n dtype: int64\n >>> df.groupby('A').ngroup(ascending=False)\n 0 1\n 1 1\n 2 1\n 3 0\n 4 0\n 5 1\n dtype: int64\n >>> df.groupby([\"A\", [1,1,2,3,2,1]]).ngroup()\n 0 0\n 1 0\n 2 1\n 3 3\n 4 2\n 5 0\n dtype: int64\n \"\"\"\n\n with _group_selection_context(self):\n index = self._selected_obj.index\n result = Series(self.grouper.group_info[0], index)\n if not ascending:\n result = self.ngroups - 1 - result\n return result\n\n @Substitution(name='groupby')\n def cumcount(self, ascending=True):\n \"\"\"\n Number each item in each group from 0 to the length of that group - 1.\n\n Essentially this is equivalent to\n\n >>> self.apply(lambda x: pd.Series(np.arange(len(x)), x.index))\n\n Parameters\n ----------\n ascending : bool, default True\n If False, number in reverse, from length of group - 1 to 0.\n\n See Also\n --------\n .ngroup : Number the groups themselves.\n\n Examples\n --------\n\n >>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],\n ... columns=['A'])\n >>> df\n A\n 0 a\n 1 a\n 2 a\n 3 b\n 4 b\n 5 a\n >>> df.groupby('A').cumcount()\n 0 0\n 1 1\n 2 2\n 3 0\n 4 1\n 5 3\n dtype: int64\n >>> df.groupby('A').cumcount(ascending=False)\n 0 3\n 1 2\n 2 1\n 3 1\n 4 0\n 5 0\n dtype: int64\n \"\"\"\n\n with _group_selection_context(self):\n index = self._selected_obj.index\n cumcounts = self._cumcount_array(ascending=ascending)\n return Series(cumcounts, index)\n\n @Substitution(name='groupby')\n @Appender(_common_see_also)\n def rank(self, method='average', ascending=True, na_option='keep',\n pct=False, axis=0):\n \"\"\"\n Provides the rank of values within each group.\n\n Parameters\n ----------\n method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'\n * average: average rank of group\n * min: lowest rank in group\n * max: highest rank in group\n * first: ranks assigned in order they appear in the array\n * dense: like 'min', but rank always increases by 1 between groups\n ascending : boolean, default True\n False for ranks by high (1) to low (N)\n na_option : {'keep', 'top', 'bottom'}, default 'keep'\n * keep: leave NA values where they are\n * top: smallest rank if ascending\n * bottom: smallest rank if descending\n pct : boolean, default False\n Compute percentage rank of data within each group\n axis : int, default 0\n The axis of the object over which to compute the rank.\n\n Returns\n -----\n DataFrame with ranking of values within each group\n \"\"\"\n if na_option not in {'keep', 'top', 'bottom'}:\n msg = \"na_option must be one of 'keep', 'top', or 'bottom'\"\n raise ValueError(msg)\n return self._cython_transform('rank', numeric_only=False,\n ties_method=method, ascending=ascending,\n na_option=na_option, pct=pct, axis=axis)\n\n @Substitution(name='groupby')\n @Appender(_common_see_also)\n def cumprod(self, axis=0, *args, **kwargs):\n \"\"\"\n Cumulative product for each group.\n \"\"\"\n nv.validate_groupby_func('cumprod', args, kwargs,\n ['numeric_only', 'skipna'])\n if axis != 0:\n return self.apply(lambda x: x.cumprod(axis=axis, **kwargs))\n\n return self._cython_transform('cumprod', **kwargs)\n\n @Substitution(name='groupby')\n @Appender(_common_see_also)\n def cumsum(self, axis=0, *args, **kwargs):\n \"\"\"\n Cumulative sum for each group.\n \"\"\"\n nv.validate_groupby_func('cumsum', args, kwargs,\n ['numeric_only', 'skipna'])\n if axis != 0:\n return self.apply(lambda x: x.cumsum(axis=axis, **kwargs))\n\n return self._cython_transform('cumsum', **kwargs)\n\n @Substitution(name='groupby')\n @Appender(_common_see_also)\n def cummin(self, axis=0, **kwargs):\n \"\"\"\n Cumulative min for each group.\n \"\"\"\n if axis != 0:\n return self.apply(lambda x: np.minimum.accumulate(x, axis))\n\n return self._cython_transform('cummin', numeric_only=False)\n\n @Substitution(name='groupby')\n @Appender(_common_see_also)\n def cummax(self, axis=0, **kwargs):\n \"\"\"\n Cumulative max for each group.\n \"\"\"\n if axis != 0:\n return self.apply(lambda x: np.maximum.accumulate(x, axis))\n\n return self._cython_transform('cummax', numeric_only=False)\n\n def _get_cythonized_result(self, how, grouper, aggregate=False,\n cython_dtype=None, needs_values=False,\n needs_mask=False, needs_ngroups=False,\n result_is_index=False,\n pre_processing=None, post_processing=None,\n **kwargs):\n \"\"\"\n Get result for Cythonized functions.\n\n Parameters\n ----------\n how : str, Cythonized function name to be called\n grouper : Grouper object containing pertinent group info\n aggregate : bool, default False\n Whether the result should be aggregated to match the number of\n groups\n cython_dtype : default None\n Type of the array that will be modified by the Cython call. If\n `None`, the type will be inferred from the values of each slice\n needs_values : bool, default False\n Whether the values should be a part of the Cython call\n signature\n needs_mask : bool, default False\n Whether boolean mask needs to be part of the Cython call\n signature\n needs_ngroups : bool, default False\n Whether number of groups is part of the Cython call signature\n result_is_index : bool, default False\n Whether the result of the Cython operation is an index of\n values to be retrieved, instead of the actual values themselves\n pre_processing : function, default None\n Function to be applied to `values` prior to passing to Cython\n Raises if `needs_values` is False\n post_processing : function, default None\n Function to be applied to result of Cython function\n **kwargs : dict\n Extra arguments to be passed back to Cython funcs\n\n Returns\n -------\n `Series` or `DataFrame` with filled values\n \"\"\"\n if result_is_index and aggregate:\n raise ValueError(\"'result_is_index' and 'aggregate' cannot both \"\n \"be True!\")\n if post_processing:\n if not callable(pre_processing):\n raise ValueError(\"'post_processing' must be a callable!\")\n if pre_processing:\n if not callable(pre_processing):\n raise ValueError(\"'pre_processing' must be a callable!\")\n if not needs_values:\n raise ValueError(\"Cannot use 'pre_processing' without \"\n \"specifying 'needs_values'!\")\n\n labels, _, ngroups = grouper.group_info\n output = collections.OrderedDict()\n base_func = getattr(libgroupby, how)\n\n for name, obj in self._iterate_slices():\n if aggregate:\n result_sz = ngroups\n else:\n result_sz = len(obj.values)\n\n if not cython_dtype:\n cython_dtype = obj.values.dtype\n\n result = np.zeros(result_sz, dtype=cython_dtype)\n func = partial(base_func, result, labels)\n if needs_values:\n vals = obj.values\n if pre_processing:\n vals = pre_processing(vals)\n func = partial(func, vals)\n\n if needs_mask:\n mask = isna(obj.values).view(np.uint8)\n func = partial(func, mask)\n\n if needs_ngroups:\n func = partial(func, ngroups)\n\n func(**kwargs) # Call func to modify indexer values in place\n\n if result_is_index:\n result = algorithms.take_nd(obj.values, result)\n\n if post_processing:\n result = post_processing(result)\n\n output[name] = result\n\n if aggregate:\n return self._wrap_aggregated_output(output)\n else:\n return self._wrap_transformed_output(output)\n\n @Substitution(name='groupby')\n @Appender(_common_see_also)\n def shift(self, periods=1, freq=None, axis=0):\n \"\"\"\n Shift each group by periods observations.\n\n Parameters\n ----------\n periods : integer, default 1\n number of periods to shift\n freq : frequency string\n axis : axis to shift, default 0\n \"\"\"\n\n if freq is not None or axis != 0:\n return self.apply(lambda x: x.shift(periods, freq, axis))\n\n return self._get_cythonized_result('group_shift_indexer',\n self.grouper, cython_dtype=np.int64,\n needs_ngroups=True,\n result_is_index=True,\n periods=periods)\n\n @Substitution(name='groupby')\n @Appender(_common_see_also)\n def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None,\n axis=0):\n \"\"\"\n Calculate pct_change of each value to previous entry in group.\n \"\"\"\n if freq is not None or axis != 0:\n return self.apply(lambda x: x.pct_change(periods=periods,\n fill_method=fill_method,\n limit=limit, freq=freq,\n axis=axis))\n filled = getattr(self, fill_method)(limit=limit)\n filled = filled.drop(self.grouper.names, axis=1)\n fill_grp = filled.groupby(self.grouper.labels)\n shifted = fill_grp.shift(periods=periods, freq=freq)\n return (filled / shifted) - 1\n\n @Substitution(name='groupby', see_also=_common_see_also)\n def head(self, n=5):\n \"\"\"\n Returns first n rows of each group.\n\n Essentially equivalent to ``.apply(lambda x: x.head(n))``,\n except ignores as_index flag.\n\n %(see_also)s\n\n Examples\n --------\n\n >>> df = pd.DataFrame([[1, 2], [1, 4], [5, 6]],\n columns=['A', 'B'])\n >>> df.groupby('A', as_index=False).head(1)\n A B\n 0 1 2\n 2 5 6\n >>> df.groupby('A').head(1)\n A B\n 0 1 2\n 2 5 6\n \"\"\"\n self._reset_group_selection()\n mask = self._cumcount_array() < n\n return self._selected_obj[mask]\n\n @Substitution(name='groupby', see_also=_common_see_also)\n def tail(self, n=5):\n \"\"\"\n Returns last n rows of each group.\n\n Essentially equivalent to ``.apply(lambda x: x.tail(n))``,\n except ignores as_index flag.\n\n %(see_also)s\n\n Examples\n --------\n\n >>> df = pd.DataFrame([['a', 1], ['a', 2], ['b', 1], ['b', 2]],\n columns=['A', 'B'])\n >>> df.groupby('A').tail(1)\n A B\n 1 a 2\n 3 b 2\n >>> df.groupby('A').head(1)\n A B\n 0 a 1\n 2 b 1\n \"\"\"\n self._reset_group_selection()\n mask = self._cumcount_array(ascending=False) < n\n return self._selected_obj[mask]\n\n\nGroupBy._add_numeric_operations()\n\n\n@Appender(GroupBy.__doc__)\ndef groupby(obj, by, **kwds):\n if isinstance(obj, Series):\n from pandas.core.groupby.generic import SeriesGroupBy\n klass = SeriesGroupBy\n elif isinstance(obj, DataFrame):\n from pandas.core.groupby.generic import DataFrameGroupBy\n klass = DataFrameGroupBy\n else: # pragma: no cover\n raise TypeError('invalid type: {}'.format(obj))\n\n return klass(obj, by, **kwds)\n",
"# -*- coding: utf-8 -*-\n\nfrom datetime import datetime\n\nimport numpy as np\nimport pytest\n\nfrom pandas.core.dtypes.common import is_float_dtype, is_integer_dtype\nfrom pandas.core.dtypes.dtypes import CategoricalDtype\n\nimport pandas as pd\nfrom pandas import (\n Categorical, CategoricalIndex, DatetimeIndex, Index, Interval,\n IntervalIndex, NaT, Series, Timestamp, date_range, period_range,\n timedelta_range)\nimport pandas.util.testing as tm\n\n\nclass TestCategoricalConstructors(object):\n\n def test_validate_ordered(self):\n # see gh-14058\n exp_msg = \"'ordered' must either be 'True' or 'False'\"\n exp_err = TypeError\n\n # This should be a boolean.\n ordered = np.array([0, 1, 2])\n\n with pytest.raises(exp_err, match=exp_msg):\n Categorical([1, 2, 3], ordered=ordered)\n\n with pytest.raises(exp_err, match=exp_msg):\n Categorical.from_codes([0, 0, 1], categories=['a', 'b', 'c'],\n ordered=ordered)\n\n def test_constructor_empty(self):\n # GH 17248\n c = Categorical([])\n expected = Index([])\n tm.assert_index_equal(c.categories, expected)\n\n c = Categorical([], categories=[1, 2, 3])\n expected = pd.Int64Index([1, 2, 3])\n tm.assert_index_equal(c.categories, expected)\n\n def test_constructor_empty_boolean(self):\n # see gh-22702\n cat = pd.Categorical([], categories=[True, False])\n categories = sorted(cat.categories.tolist())\n assert categories == [False, True]\n\n def test_constructor_tuples(self):\n values = np.array([(1,), (1, 2), (1,), (1, 2)], dtype=object)\n result = Categorical(values)\n expected = Index([(1,), (1, 2)], tupleize_cols=False)\n tm.assert_index_equal(result.categories, expected)\n assert result.ordered is False\n\n def test_constructor_tuples_datetimes(self):\n # numpy will auto reshape when all of the tuples are the\n # same len, so add an extra one with 2 items and slice it off\n values = np.array([(Timestamp('2010-01-01'),),\n (Timestamp('2010-01-02'),),\n (Timestamp('2010-01-01'),),\n (Timestamp('2010-01-02'),),\n ('a', 'b')], dtype=object)[:-1]\n result = Categorical(values)\n expected = Index([(Timestamp('2010-01-01'),),\n (Timestamp('2010-01-02'),)], tupleize_cols=False)\n tm.assert_index_equal(result.categories, expected)\n\n def test_constructor_unsortable(self):\n\n # it works!\n arr = np.array([1, 2, 3, datetime.now()], dtype='O')\n factor = Categorical(arr, ordered=False)\n assert not factor.ordered\n\n # this however will raise as cannot be sorted\n with pytest.raises(TypeError):\n Categorical(arr, ordered=True)\n\n def test_constructor_interval(self):\n result = Categorical([Interval(1, 2), Interval(2, 3), Interval(3, 6)],\n ordered=True)\n ii = IntervalIndex([Interval(1, 2), Interval(2, 3), Interval(3, 6)])\n exp = Categorical(ii, ordered=True)\n tm.assert_categorical_equal(result, exp)\n tm.assert_index_equal(result.categories, ii)\n\n def test_constructor(self):\n\n exp_arr = np.array([\"a\", \"b\", \"c\", \"a\", \"b\", \"c\"], dtype=np.object_)\n c1 = Categorical(exp_arr)\n tm.assert_numpy_array_equal(c1.__array__(), exp_arr)\n c2 = Categorical(exp_arr, categories=[\"a\", \"b\", \"c\"])\n tm.assert_numpy_array_equal(c2.__array__(), exp_arr)\n c2 = Categorical(exp_arr, categories=[\"c\", \"b\", \"a\"])\n tm.assert_numpy_array_equal(c2.__array__(), exp_arr)\n\n # categories must be unique\n with pytest.raises(ValueError):\n Categorical([1, 2], [1, 2, 2])\n\n with pytest.raises(ValueError):\n Categorical([\"a\", \"b\"], [\"a\", \"b\", \"b\"])\n\n # The default should be unordered\n c1 = Categorical([\"a\", \"b\", \"c\", \"a\"])\n assert not c1.ordered\n\n # Categorical as input\n c1 = Categorical([\"a\", \"b\", \"c\", \"a\"])\n c2 = Categorical(c1)\n tm.assert_categorical_equal(c1, c2)\n\n c1 = Categorical([\"a\", \"b\", \"c\", \"a\"], categories=[\"a\", \"b\", \"c\", \"d\"])\n c2 = Categorical(c1)\n tm.assert_categorical_equal(c1, c2)\n\n c1 = Categorical([\"a\", \"b\", \"c\", \"a\"], categories=[\"a\", \"c\", \"b\"])\n c2 = Categorical(c1)\n tm.assert_categorical_equal(c1, c2)\n\n c1 = Categorical([\"a\", \"b\", \"c\", \"a\"], categories=[\"a\", \"c\", \"b\"])\n c2 = Categorical(c1, categories=[\"a\", \"b\", \"c\"])\n tm.assert_numpy_array_equal(c1.__array__(), c2.__array__())\n tm.assert_index_equal(c2.categories, Index([\"a\", \"b\", \"c\"]))\n\n # Series of dtype category\n c1 = Categorical([\"a\", \"b\", \"c\", \"a\"], categories=[\"a\", \"b\", \"c\", \"d\"])\n c2 = Categorical(Series(c1))\n tm.assert_categorical_equal(c1, c2)\n\n c1 = Categorical([\"a\", \"b\", \"c\", \"a\"], categories=[\"a\", \"c\", \"b\"])\n c2 = Categorical(Series(c1))\n tm.assert_categorical_equal(c1, c2)\n\n # Series\n c1 = Categorical([\"a\", \"b\", \"c\", \"a\"])\n c2 = Categorical(Series([\"a\", \"b\", \"c\", \"a\"]))\n tm.assert_categorical_equal(c1, c2)\n\n c1 = Categorical([\"a\", \"b\", \"c\", \"a\"], categories=[\"a\", \"b\", \"c\", \"d\"])\n c2 = Categorical(Series([\"a\", \"b\", \"c\", \"a\"]),\n categories=[\"a\", \"b\", \"c\", \"d\"])\n tm.assert_categorical_equal(c1, c2)\n\n # This should result in integer categories, not float!\n cat = Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])\n assert is_integer_dtype(cat.categories)\n\n # https://github.com/pandas-dev/pandas/issues/3678\n cat = Categorical([np.nan, 1, 2, 3])\n assert is_integer_dtype(cat.categories)\n\n # this should result in floats\n cat = Categorical([np.nan, 1, 2., 3])\n assert is_float_dtype(cat.categories)\n\n cat = Categorical([np.nan, 1., 2., 3.])\n assert is_float_dtype(cat.categories)\n\n # This doesn't work -> this would probably need some kind of \"remember\n # the original type\" feature to try to cast the array interface result\n # to...\n\n # vals = np.asarray(cat[cat.notna()])\n # assert is_integer_dtype(vals)\n\n # corner cases\n cat = Categorical([1])\n assert len(cat.categories) == 1\n assert cat.categories[0] == 1\n assert len(cat.codes) == 1\n assert cat.codes[0] == 0\n\n cat = Categorical([\"a\"])\n assert len(cat.categories) == 1\n assert cat.categories[0] == \"a\"\n assert len(cat.codes) == 1\n assert cat.codes[0] == 0\n\n # Scalars should be converted to lists\n cat = Categorical(1)\n assert len(cat.categories) == 1\n assert cat.categories[0] == 1\n assert len(cat.codes) == 1\n assert cat.codes[0] == 0\n\n # two arrays\n # - when the first is an integer dtype and the second is not\n # - when the resulting codes are all -1/NaN\n with tm.assert_produces_warning(None):\n c_old = Categorical([0, 1, 2, 0, 1, 2],\n categories=[\"a\", \"b\", \"c\"]) # noqa\n\n with tm.assert_produces_warning(None):\n c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa\n categories=[3, 4, 5])\n\n # the next one are from the old docs\n with tm.assert_produces_warning(None):\n c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa\n cat = Categorical([1, 2], categories=[1, 2, 3])\n\n # this is a legitimate constructor\n with tm.assert_produces_warning(None):\n c = Categorical(np.array([], dtype='int64'), # noqa\n categories=[3, 2, 1], ordered=True)\n\n def test_constructor_not_sequence(self):\n # https://github.com/pandas-dev/pandas/issues/16022\n with pytest.raises(TypeError):\n Categorical(['a', 'b'], categories='a')\n\n def test_constructor_with_null(self):\n\n # Cannot have NaN in categories\n with pytest.raises(ValueError):\n Categorical([np.nan, \"a\", \"b\", \"c\"],\n categories=[np.nan, \"a\", \"b\", \"c\"])\n\n with pytest.raises(ValueError):\n Categorical([None, \"a\", \"b\", \"c\"],\n categories=[None, \"a\", \"b\", \"c\"])\n\n with pytest.raises(ValueError):\n Categorical(DatetimeIndex(['nat', '20160101']),\n categories=[NaT, Timestamp('20160101')])\n\n def test_constructor_with_index(self):\n ci = CategoricalIndex(list('aabbca'), categories=list('cab'))\n tm.assert_categorical_equal(ci.values, Categorical(ci))\n\n ci = CategoricalIndex(list('aabbca'), categories=list('cab'))\n tm.assert_categorical_equal(ci.values,\n Categorical(ci.astype(object),\n categories=ci.categories))\n\n def test_constructor_with_generator(self):\n # This was raising an Error in isna(single_val).any() because isna\n # returned a scalar for a generator\n xrange = range\n\n exp = Categorical([0, 1, 2])\n cat = Categorical((x for x in [0, 1, 2]))\n tm.assert_categorical_equal(cat, exp)\n cat = Categorical(xrange(3))\n tm.assert_categorical_equal(cat, exp)\n\n # This uses xrange internally\n from pandas.core.index import MultiIndex\n MultiIndex.from_product([range(5), ['a', 'b', 'c']])\n\n # check that categories accept generators and sequences\n cat = Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))\n tm.assert_categorical_equal(cat, exp)\n cat = Categorical([0, 1, 2], categories=xrange(3))\n tm.assert_categorical_equal(cat, exp)\n\n @pytest.mark.parametrize(\"dtl\", [\n date_range(\"1995-01-01 00:00:00\", periods=5, freq=\"s\"),\n date_range(\"1995-01-01 00:00:00\", periods=5,\n freq=\"s\", tz=\"US/Eastern\"),\n timedelta_range(\"1 day\", periods=5, freq=\"s\")\n ])\n def test_constructor_with_datetimelike(self, dtl):\n # see gh-12077\n # constructor with a datetimelike and NaT\n\n s = Series(dtl)\n c = Categorical(s)\n\n expected = type(dtl)(s)\n expected.freq = None\n\n tm.assert_index_equal(c.categories, expected)\n tm.assert_numpy_array_equal(c.codes, np.arange(5, dtype=\"int8\"))\n\n # with NaT\n s2 = s.copy()\n s2.iloc[-1] = NaT\n c = Categorical(s2)\n\n expected = type(dtl)(s2.dropna())\n expected.freq = None\n\n tm.assert_index_equal(c.categories, expected)\n\n exp = np.array([0, 1, 2, 3, -1], dtype=np.int8)\n tm.assert_numpy_array_equal(c.codes, exp)\n\n result = repr(c)\n assert \"NaT\" in result\n\n def test_constructor_from_index_series_datetimetz(self):\n idx = date_range('2015-01-01 10:00', freq='D', periods=3,\n tz='US/Eastern')\n result = Categorical(idx)\n tm.assert_index_equal(result.categories, idx)\n\n result = Categorical(Series(idx))\n tm.assert_index_equal(result.categories, idx)\n\n def test_constructor_from_index_series_timedelta(self):\n idx = timedelta_range('1 days', freq='D', periods=3)\n result = Categorical(idx)\n tm.assert_index_equal(result.categories, idx)\n\n result = Categorical(Series(idx))\n tm.assert_index_equal(result.categories, idx)\n\n def test_constructor_from_index_series_period(self):\n idx = period_range('2015-01-01', freq='D', periods=3)\n result = Categorical(idx)\n tm.assert_index_equal(result.categories, idx)\n\n result = Categorical(Series(idx))\n tm.assert_index_equal(result.categories, idx)\n\n def test_constructor_invariant(self):\n # GH 14190\n vals = [\n np.array([1., 1.2, 1.8, np.nan]),\n np.array([1, 2, 3], dtype='int64'),\n ['a', 'b', 'c', np.nan],\n [pd.Period('2014-01'), pd.Period('2014-02'), NaT],\n [Timestamp('2014-01-01'), Timestamp('2014-01-02'), NaT],\n [Timestamp('2014-01-01', tz='US/Eastern'),\n Timestamp('2014-01-02', tz='US/Eastern'), NaT],\n ]\n for val in vals:\n c = Categorical(val)\n c2 = Categorical(c)\n tm.assert_categorical_equal(c, c2)\n\n @pytest.mark.parametrize('ordered', [True, False])\n def test_constructor_with_dtype(self, ordered):\n categories = ['b', 'a', 'c']\n dtype = CategoricalDtype(categories, ordered=ordered)\n result = Categorical(['a', 'b', 'a', 'c'], dtype=dtype)\n expected = Categorical(['a', 'b', 'a', 'c'], categories=categories,\n ordered=ordered)\n tm.assert_categorical_equal(result, expected)\n assert result.ordered is ordered\n\n def test_constructor_dtype_and_others_raises(self):\n dtype = CategoricalDtype(['a', 'b'], ordered=True)\n with pytest.raises(ValueError, match=\"Cannot\"):\n Categorical(['a', 'b'], categories=['a', 'b'], dtype=dtype)\n\n with pytest.raises(ValueError, match=\"Cannot\"):\n Categorical(['a', 'b'], ordered=True, dtype=dtype)\n\n with pytest.raises(ValueError, match=\"Cannot\"):\n Categorical(['a', 'b'], ordered=False, dtype=dtype)\n\n @pytest.mark.parametrize('categories', [\n None, ['a', 'b'], ['a', 'c'],\n ])\n @pytest.mark.parametrize('ordered', [True, False])\n def test_constructor_str_category(self, categories, ordered):\n result = Categorical(['a', 'b'], categories=categories,\n ordered=ordered, dtype='category')\n expected = Categorical(['a', 'b'], categories=categories,\n ordered=ordered)\n tm.assert_categorical_equal(result, expected)\n\n def test_constructor_str_unknown(self):\n with pytest.raises(ValueError, match=\"Unknown `dtype`\"):\n Categorical([1, 2], dtype=\"foo\")\n\n def test_constructor_from_categorical_with_dtype(self):\n dtype = CategoricalDtype(['a', 'b', 'c'], ordered=True)\n values = Categorical(['a', 'b', 'd'])\n result = Categorical(values, dtype=dtype)\n # We use dtype.categories, not values.categories\n expected = Categorical(['a', 'b', 'd'], categories=['a', 'b', 'c'],\n ordered=True)\n tm.assert_categorical_equal(result, expected)\n\n def test_constructor_from_categorical_with_unknown_dtype(self):\n dtype = CategoricalDtype(None, ordered=True)\n values = Categorical(['a', 'b', 'd'])\n result = Categorical(values, dtype=dtype)\n # We use values.categories, not dtype.categories\n expected = Categorical(['a', 'b', 'd'], categories=['a', 'b', 'd'],\n ordered=True)\n tm.assert_categorical_equal(result, expected)\n\n def test_constructor_from_categorical_string(self):\n values = Categorical(['a', 'b', 'd'])\n # use categories, ordered\n result = Categorical(values, categories=['a', 'b', 'c'], ordered=True,\n dtype='category')\n expected = Categorical(['a', 'b', 'd'], categories=['a', 'b', 'c'],\n ordered=True)\n tm.assert_categorical_equal(result, expected)\n\n # No string\n result = Categorical(values, categories=['a', 'b', 'c'], ordered=True)\n tm.assert_categorical_equal(result, expected)\n\n def test_constructor_with_categorical_categories(self):\n # GH17884\n expected = Categorical(['a', 'b'], categories=['a', 'b', 'c'])\n\n result = Categorical(\n ['a', 'b'], categories=Categorical(['a', 'b', 'c']))\n tm.assert_categorical_equal(result, expected)\n\n result = Categorical(\n ['a', 'b'], categories=CategoricalIndex(['a', 'b', 'c']))\n tm.assert_categorical_equal(result, expected)\n\n def test_from_codes(self):\n\n # too few categories\n with pytest.raises(ValueError):\n Categorical.from_codes([1, 2], [1, 2])\n\n # no int codes\n with pytest.raises(ValueError):\n Categorical.from_codes([\"a\"], [1, 2])\n\n # no unique categories\n with pytest.raises(ValueError):\n Categorical.from_codes([0, 1, 2], [\"a\", \"a\", \"b\"])\n\n # NaN categories included\n with pytest.raises(ValueError):\n Categorical.from_codes([0, 1, 2], [\"a\", \"b\", np.nan])\n\n # too negative\n with pytest.raises(ValueError):\n Categorical.from_codes([-2, 1, 2], [\"a\", \"b\", \"c\"])\n\n exp = Categorical([\"a\", \"b\", \"c\"], ordered=False)\n res = Categorical.from_codes([0, 1, 2], [\"a\", \"b\", \"c\"])\n tm.assert_categorical_equal(exp, res)\n\n # Not available in earlier numpy versions\n if hasattr(np.random, \"choice\"):\n codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])\n Categorical.from_codes(codes, categories=[\"train\", \"test\"])\n\n def test_from_codes_with_categorical_categories(self):\n # GH17884\n expected = Categorical(['a', 'b'], categories=['a', 'b', 'c'])\n\n result = Categorical.from_codes(\n [0, 1], categories=Categorical(['a', 'b', 'c']))\n tm.assert_categorical_equal(result, expected)\n\n result = Categorical.from_codes(\n [0, 1], categories=CategoricalIndex(['a', 'b', 'c']))\n tm.assert_categorical_equal(result, expected)\n\n # non-unique Categorical still raises\n with pytest.raises(ValueError):\n Categorical.from_codes([0, 1], Categorical(['a', 'b', 'a']))\n\n def test_from_codes_with_nan_code(self):\n # GH21767\n codes = [1, 2, np.nan]\n categories = ['a', 'b', 'c']\n with pytest.raises(ValueError):\n Categorical.from_codes(codes, categories)\n\n def test_from_codes_with_float(self):\n # GH21767\n codes = [1.0, 2.0, 0] # integer, but in float dtype\n categories = ['a', 'b', 'c']\n\n with tm.assert_produces_warning(FutureWarning):\n cat = Categorical.from_codes(codes, categories)\n tm.assert_numpy_array_equal(cat.codes, np.array([1, 2, 0], dtype='i1'))\n\n codes = [1.1, 2.0, 0] # non-integer\n with pytest.raises(ValueError):\n Categorical.from_codes(codes, categories)\n\n @pytest.mark.parametrize('dtype', [None, 'category'])\n def test_from_inferred_categories(self, dtype):\n cats = ['a', 'b']\n codes = np.array([0, 0, 1, 1], dtype='i8')\n result = Categorical._from_inferred_categories(cats, codes, dtype)\n expected = Categorical.from_codes(codes, cats)\n tm.assert_categorical_equal(result, expected)\n\n @pytest.mark.parametrize('dtype', [None, 'category'])\n def test_from_inferred_categories_sorts(self, dtype):\n cats = ['b', 'a']\n codes = np.array([0, 1, 1, 1], dtype='i8')\n result = Categorical._from_inferred_categories(cats, codes, dtype)\n expected = Categorical.from_codes([1, 0, 0, 0], ['a', 'b'])\n tm.assert_categorical_equal(result, expected)\n\n def test_from_inferred_categories_dtype(self):\n cats = ['a', 'b', 'd']\n codes = np.array([0, 1, 0, 2], dtype='i8')\n dtype = CategoricalDtype(['c', 'b', 'a'], ordered=True)\n result = Categorical._from_inferred_categories(cats, codes, dtype)\n expected = Categorical(['a', 'b', 'a', 'd'],\n categories=['c', 'b', 'a'],\n ordered=True)\n tm.assert_categorical_equal(result, expected)\n\n def test_from_inferred_categories_coerces(self):\n cats = ['1', '2', 'bad']\n codes = np.array([0, 0, 1, 2], dtype='i8')\n dtype = CategoricalDtype([1, 2])\n result = Categorical._from_inferred_categories(cats, codes, dtype)\n expected = Categorical([1, 1, 2, np.nan])\n tm.assert_categorical_equal(result, expected)\n\n def test_construction_with_ordered(self):\n # GH 9347, 9190\n cat = Categorical([0, 1, 2])\n assert not cat.ordered\n cat = Categorical([0, 1, 2], ordered=False)\n assert not cat.ordered\n cat = Categorical([0, 1, 2], ordered=True)\n assert cat.ordered\n\n @pytest.mark.xfail(reason=\"Imaginary values not supported in Categorical\")\n def test_constructor_imaginary(self):\n values = [1, 2, 3 + 1j]\n c1 = Categorical(values)\n tm.assert_index_equal(c1.categories, Index(values))\n tm.assert_numpy_array_equal(np.array(c1), np.array(values))\n"
] |
[
[
"pandas.core.config.option_context",
"pandas.core.dtypes.cast.maybe_downcast_to_dtype",
"pandas.util._validators.validate_kwargs",
"pandas.core.sorting.get_group_index_sorter",
"pandas.core.base.DataError",
"numpy.concatenate",
"pandas.core.dtypes.missing.isna",
"pandas.core.index.Index",
"pandas.compat.callable",
"numpy.empty",
"pandas.core.dtypes.missing.notna",
"pandas.compat.iteritems",
"pandas.core.dtypes.common.is_extension_array_dtype",
"numpy.nonzero",
"pandas.core.common._pipe",
"numpy.arange",
"pandas.util._decorators.Appender",
"pandas.util._decorators.Substitution",
"pandas.errors.AbstractMethodError",
"pandas.compat.numpy.function.validate_groupby_func",
"numpy.array",
"pandas.core.resample.get_resampler_for_grouping",
"pandas.core.dtypes.common.is_scalar",
"numpy.zeros",
"pandas.core.window.RollingGroupby",
"pandas.core.groupby.grouper._get_grouper",
"pandas.core.common._not_none",
"pandas.core.dtypes.common.ensure_float",
"pandas.core.window.ExpandingGroupby",
"numpy.asarray",
"numpy.minimum.accumulate",
"numpy.errstate",
"pandas.compat.zip",
"numpy.maximum.accumulate",
"pandas.core.series.Series",
"pandas.core.dtypes.common.is_numeric_dtype",
"pandas.compat.set_function_name",
"pandas._libs.Timestamp",
"numpy.repeat",
"pandas.core.algorithms.take_nd",
"pandas.core.reshape.concat.concat"
],
[
"numpy.random.choice",
"pandas.DatetimeIndex",
"pandas.CategoricalIndex",
"pandas.core.dtypes.dtypes.CategoricalDtype",
"pandas.Timestamp",
"pandas.core.dtypes.common.is_float_dtype",
"pandas.period_range",
"pandas.util.testing.assert_numpy_array_equal",
"pandas.util.testing.assert_categorical_equal",
"pandas.Interval",
"pandas.util.testing.assert_index_equal",
"pandas.timedelta_range",
"numpy.arange",
"pandas.util.testing.assert_produces_warning",
"pandas.core.dtypes.common.is_integer_dtype",
"pandas.Period",
"numpy.array",
"pandas.Categorical.from_codes",
"pandas.Categorical._from_inferred_categories",
"pandas.Index",
"pandas.Int64Index",
"pandas.date_range",
"pandas.Categorical",
"pandas.Series"
]
] |
vctrop/ant_colony_for_continuous_domains
|
[
"a109abfca35be4d0453c7e01f6f755c11ae09473"
] |
[
"acor_plots.py"
] |
[
"#!python3\n\n# Copyright (C) 2020 Victor O. Costa\n\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <https://www.gnu.org/licenses/>.\n \n# Python standar lib\nimport math\n# 3rth party\nimport numpy as np\nimport matplotlib.pyplot as plt\n# Own\nfrom base_metaheuristic import Base\n\nclass ACOr(Base):\n \"\"\" Class for the Ant Colony Optimization for Continuous Domains, following (Socha and Dorigo, 2006) \"\"\"\n\n def __init__(self):\n \"\"\" Constructor \"\"\"\n # Define verbosity and NULL problem definition\n super().__init__()\n \n # Initial algorithm parameters\n self.relative_iterations = None # Array containing the iterations at which best solutions are reported\n self.num_iter = 0 # Number of iterations\n self.pop_size = 5 # Population size\n self.k = 50 # Archive size\n self.q = 0.01 # Locality of search (selection of pivot ants)\n self.xi = 0.85 # Speed of convergence (spreadness of ant generation)\n \n # Optimization results\n self.SA = None # Solution Archive\n self.best_solution = None # Best solution of the archive\n \n\n def set_parameters(self, pop_size, k, q, xi, function_evaluations_array):\n \"\"\" Define values for the parameters used by the algorithm \"\"\"\n # Input error checking\n if len(function_evaluations_array) == 0:\n print(\"Error, objective function evaluation array must not be empty\")\n exit(-1)\n if pop_size <= 0 or k <= 0 or q <= 0 or xi <= 0:\n print(\"Error, parameters must be non-null positives\")\n exit(-1)\n \n \n # Number of function evaluations for ACOr: pop_size * num_iterations\n function_evaluations_array = np.array(function_evaluations_array)\n self.relative_iterations = (function_evaluations_array - k) / pop_size\n all_divisible = (np.array([x.is_integer() for x in self.relative_iterations])).all()\n if not all_divisible:\n print(\"Error, at least one number of function evaluations subtracted by k is not divisible by population size m\")\n exit(-1)\n \n self.num_iter = int(np.max(self.relative_iterations))\n self.pop_size = pop_size\n self.k = k\n self.q = q\n self.xi = xi\n\n \n def define_variables(self, initial_ranges, is_bounded):\n \"\"\" Defines the number of variables, their initial values ranges and wether or not these ranges constrain the variable during the search \"\"\"\n # Input error checking\n if self.num_iter == 0:\n print(\"Error, please set algorithm parameters before variables definition\")\n exit(-1)\n if len(initial_ranges) == 0 or len(is_bounded) == 0:\n print(\"Error, initial_ranges and is_bounded lists must not be empty\")\n exit(-1)\n if len(initial_ranges) != len(is_bounded):\n print(\"Error, the number of variables for initial_ranges and is_bounded must be equal\")\n exit(-1)\n \n self.num_variables = len(initial_ranges)\n self.initial_ranges = initial_ranges\n self.is_bounded = is_bounded\n self.SA = np.zeros((self.k, self.num_variables + 1))\n\n \n def _biased_selection(self, probabilities):\n \"\"\" Returns an index based on a set of probabilities (also known as roulette wheel selection in GA) \"\"\"\n r = np.random.uniform(0, sum(probabilities))\n for i, f in enumerate(probabilities):\n r -= f\n if r <= 0:\n return i\n \n\n def update_success_rate(self, success_count):\n \"\"\" Success rate is not updated in vanilla ACOr \"\"\"\n pass\n \n def control_xi(self):\n \"\"\" Xi is not updated in vanilla ACOr \"\"\"\n pass\n \n def control_q(self):\n \"\"\" q is not updated in vanilla ACOr \"\"\"\n pass\n \n def gaussian_pdf_weights(self, x):\n gaus_std = self.q * self.k\n gaus_avg = 1\n w = (1 / (gaus_std * math.sqrt(2 * math.pi))) * np.exp( (-1/2) * ( ( (x - gaus_avg) / gaus_std ) ** 2) )\n \n return w\n \n def handle_adaptions(self, success_count):\n self.update_success_rate(success_count)\n self.control_q()\n self.control_xi()\n \n def optimize(self):\n \"\"\" Initializes the archive and enter the main loop, until it reaches maximum number of iterations \"\"\"\n # Error checking\n if self.num_variables == None:\n print(\"Error, number of variables and their boundaries must be defined prior to optimization\")\n exit(-1)\n if self.cost_function == None:\n print(\"Error, cost function must be defined prior to optimization\")\n exit(-1)\n \n # Keep solutions defined by function_evaluations_array\n recorded_solutions = []\n q_list =[]\n xi_list = []\n # Initialize the archive by random sampling, respecting each variable's boundaries \n if self.verbosity: print(\"[INITIALIZING SOLUTION ARCHIVE]\")\n pop = np.zeros((self.pop_size, self.num_variables +1))\n w = np.zeros(self.k)\n \n for i in range(self.k):\n for j in range(self.num_variables): \n self.SA[i, j] = np.random.uniform(self.initial_ranges[j][0], self.initial_ranges[j][1]) # Initialize solution archive randomly\n self.SA[i, -1] = self.cost_function(self.SA[i, 0:self.num_variables]) # Get initial cost for each solution\n self.SA = self.SA[self.SA[:, -1].argsort()] # Sort solution archive (best solutions first)\n \n # Array containing indices of solution archive position\n x = np.linspace(1,self.k,self.k) \n w = self.gaussian_pdf_weights(x) # Weights as a gaussian function of rank with mean 1, std qk\n p = w/sum(w) \n \n if self.verbosity: print(\"ALGORITHM MAIN LOOP\")\n # Algorithm runs until it reaches the determined number of iterations\n for iteration in range(self.num_iter):\n if self.verbosity:\n print(\"[%d]\" % iteration)\n print(self.SA[0, :])\n \n success_count = 0 # Count how many ant improve the solution they are sampling from \n Mi = self.SA[:, 0:self.num_variables] # Matrix of means\n for ant in range(self.pop_size): # For each ant in the population\n l = self._biased_selection(p) # Select solution of the SA to sample from based on probabilities p\n # Compute average distances from the chosen solution to other solutions\n # Used as standard deviation of solution generation\n sigmas_array = self.xi * np.sum(np.abs(self.SA[:,:-1] - self.SA[l, :-1]), axis = 0) / (self.k - 1)\n \n for var in range(self.num_variables):\n sigma = sigmas_array[var]\n pop[ant, var] = np.random.normal(Mi[l, var], sigma) # Sample from normal distribution with mean Mi and st. dev. sigma\n \n # Search space boundaries violation is only dealt with when the variable is considered bounded (self.is_bounded)\n if self.is_bounded[var]:\n # Use the hard border strategy\n if pop[ant, var] < self.initial_ranges[var][0]:\n pop[ant, var] = self.initial_ranges[var][0]\n elif pop[ant, var] > self.initial_ranges[var][1]:\n pop[ant, var] = self.initial_ranges[var][1] \n \n # Use the random position strategy\n # if pop[ant, var] < self.initial_ranges[var][0] or pop[ant, var] > self.initial_ranges[var][1]: \n # pop[ant, var] = np.random.uniform(self.initial_ranges[var][0], self.initial_ranges[var][1])\n \n # Evaluate cost of new solution\n pop[ant, -1] = self.cost_function(pop[ant, 0:self.num_variables]) \n \n # Check if the new solution is better than the one the ant sampled from\n if pop[ant, -1] < self.SA[l, -1]:\n success_count += 1\n \n # Compute success rate, updates xi and q (No effect in vanilla ACOr)\n self.handle_adaptions(success_count)\n q_list.append(self.q)\n xi_list.append(self.xi)\n # Append new solutions to the Archive\n self.SA = np.append(self.SA, pop, axis = 0) \n # Update PDF from which ants sample their centers, according to updates in q parameter\n w = self.gaussian_pdf_weights(x) # Weights as a gaussian function of rank with mean 1, std qk\n p = w/sum(w) # Probabilities of selecting solutions as search guides\n \n # Sort solution archive according to the fitness of each solution\n self.SA = self.SA[self.SA[:, -1].argsort()] \n # Remove worst solutions\n self.SA = self.SA[0:self.k, :] \n # Extract current best solution\n self.best_solution = np.array(self.SA[0, :])\n if (self.relative_iterations - 1 == iteration).any():\n recorded_solutions.append(np.array(self.best_solution))\n \n \n return q_list, xi_list, np.array(recorded_solutions)\n \n## The following classes show that the idea of exploration/exploitation adaption based in the success rate of the swarm in AIWPS (Nickabadi et al., 2011) can be applied to ACOr, and possibly many other swarm-based metaheuristics.\n\n# Success rate adaptive ACOr \nclass SRAACOr(ACOr):\n \"\"\" Parent class of all adaptive versions of ACOr.\"\"\"\n \n def __init__(self):\n \"\"\" Constructor \"\"\"\n super().__init__()\n self.success_rate = None\n self.min = {'q' : None,\n 'xi': None}\n self.max = {'q' : None,\n 'xi': None}\n self.map_type = {'q' : None,\n 'xi': None}\n \n self.lin_a = {'q' : None,\n 'xi': None}\n self.lin_b = {'q' : None,\n 'xi': None}\n \n self.sig_K = 2\n self.sig_Q = {'q' : None,\n 'xi': None}\n self.sig_B = {'q' : None,\n 'xi': None}\n \n self.exp_A = {'q' : None,\n 'xi': None}\n self.exp_B = {'q' : None,\n 'xi': None}\n \n \n def update_success_rate(self, success_count):\n \"\"\" Returns the success rate of the swarm at a given iteration,\n considering how many ants generated better solutions than the solutions they sampled from \"\"\"\n self.success_rate = success_count / self.pop_size\n \n \n def parameterize_map(self, parameter):\n if not isinstance(parameter, str) or (parameter != 'q' and parameter != 'xi'):\n print('Parameter must be a string equal to \\'q\\' or \\'xi\\'')\n exit(-1)\n \n if self.map_type[parameter] == 'lin':\n self.lin_a[parameter] = self.max[parameter] - self.min[parameter]\n self.lin_b[parameter] = self.min[parameter]\n elif self.map_type[parameter] == 'sig':\n self.sig_Q[parameter] = (self.sig_K - self.min[parameter]) / self.min[parameter]\n self.sig_B[parameter] = math.log( (self.max[parameter] / (self.sig_K - self.max[parameter])) * self.sig_Q[parameter])\n else:\n self.exp_A[parameter] = self.min[parameter]\n self.exp_B[parameter] = math.log( self.max[parameter] / self.min[parameter] )\n \n \n def evaluate_map(self, parameter, x):\n if not isinstance(parameter, str) or (parameter != 'q' and parameter != 'xi'):\n print('Parameter must be a string equal to \\'q\\' or \\'xi\\'')\n exit(-1)\n \n if self.map_type[parameter] == None:\n print('Please first define the map type of ' + parameter)\n exit(-1)\n \n # Linear map\n if self.map_type[parameter] == 'lin':\n if self.lin_a[parameter] == None or self.lin_b[parameter] == None:\n print('Error, first parameterize the line')\n exit(-1)\n y = self.lin_a[parameter] * x + self.lin_b[parameter]\n # Sigmoidal map\n elif self.map_type[parameter] == 'sig':\n if self.sig_Q[parameter] == None or self.sig_B[parameter] == None:\n print('Error, first parameterize the sigmoid')\n exit(-1)\n y = self.sig_K / (1 + self.sig_Q[parameter] * math.exp(- self.sig_B[parameter] * x))\n # Exponential map\n else:\n if self.exp_A[parameter] == None or self.exp_B[parameter] == None:\n print('Error, first parameterize the exponential')\n exit(-1)\n y = self.exp_A[parameter] * math.exp( self.exp_B[parameter] * x )\n return y\n \n \n# Adaptive elitism level ACOr\nclass AELACOr(SRAACOr):\n \"\"\" Adaptive control of the q parameter \"\"\"\n def __init__(self):\n \"\"\" Constructor \"\"\"\n super().__init__()\n \n def set_parameters(self, pop_size, k, xi, min_q, max_q, map_type, function_evaluations_array):\n \"\"\" Define values for the parameters used by the algorithm \"\"\"\n # Input error checking\n if min_q > max_q:\n print('Error, maximum q must be greater than minimum q')\n exit(-1)\n if min_q <= 0:\n print('Error, minimum q must be greater than zero')\n exit(-1)\n if not isinstance(map_type, str):\n print('Error, map from success rate to q must be a string')\n exit(-1)\n if map_type != 'lin' and map_type != 'sig' and map_type != 'exp':\n print('Error, map type must be \\'lin\\', \\'sig\\' or \\'exp\\'')\n exit(-1)\n if map_type == 'sig' and max_q >= self.sig_K:\n print('Error, maximum q must be lesser than sigmoid K = ' + str(self.sig_K))\n \n # Parameter setting from ACOr class\n super().set_parameters(pop_size, k, max_q, xi, function_evaluations_array) \n\n # Parameterize control curve\n self.min['q'] = min_q\n self.max['q'] = max_q\n self.map_type['q'] = map_type\n self.parameterize_map('q')\n \n \n def control_q(self):\n \"\"\" Use population success rate to update q \"\"\"\n if self.success_rate == None:\n print(\"Error, compute success rate before updating q\")\n exit(-1)\n \n # Compute new q, directly proportional (linearity or not) to the success rate\n self.q = self.evaluate_map('q', self.success_rate)\n \n \n# Adaptive generation dispersion ACOr\nclass AGDACOr(SRAACOr):\n \"\"\" Adaptive control of the xi parameter \"\"\"\n \n def __init__(self):\n \"\"\" Constructor \"\"\"\n super().__init__()\n \n def set_parameters(self, pop_size, k, q, min_xi, max_xi, map_type, function_evaluations_array):\n \"\"\" Define values for the parameters used by the algorithm \"\"\"\n # Input error checking\n if min_xi > max_xi:\n print('Error, maximum xi must be greater than minimum xi')\n exit(-1)\n if min_xi <= 0:\n print('Error, minimum xi must be greater than zero')\n exit(-1)\n if not isinstance(map_type, str):\n print('Error, map from success rate to xi must be a string')\n exit(-1)\n if map_type != 'lin' and map_type != 'sig' and map_type != 'exp':\n print('Error, map type must be \\'lin\\', \\'sig\\' or \\'exp\\'')\n exit(-1)\n if map_type == 'sig' and max_xi >= self.sig_K:\n print('Error, maximum xi must be lesser than sigmoid K = ' + str(self.sig_K))\n \n # Parameter setting from ACOr class\n super().set_parameters(pop_size, k, q, max_xi, function_evaluations_array) \n\n # Minimum and maximum of adaptive xi\n # Parameterize control curve\n self.min['xi'] = min_xi\n self.max['xi'] = max_xi\n self.map_type['xi'] = map_type\n self.parameterize_map('xi')\n \n def control_xi(self):\n \"\"\" Use population success rate to update Xi \"\"\"\n if self.success_rate == None:\n print(\"Error, compute success rate before updating xi\")\n exit(-1)\n \n # Compute new xi, inversely proportional (linearity or not) to the success rate\n self.xi = self.evaluate_map('xi', (1 - self.success_rate))\n\n \n# Bi-adaptive ACOr\nclass BAACOr(SRAACOr):\n \"\"\" Adaptive control of the both q and xi parameters \"\"\"\n \n def __init__(self):\n \"\"\" Constructor \"\"\"\n super().__init__()\n\n \n def set_parameters(self, pop_size, k, min_q, max_q, min_xi, max_xi, q_map_type, xi_map_type, function_evaluations_array):\n \"\"\" Define values for the parameters used by the algorithm \"\"\"\n # Input error checking\n if min_xi > max_xi or min_q > min_q:\n print('Error, maximum parameters must be greater than minimum ones')\n exit(-1)\n if min_xi <= 0 or min_q <= 0:\n print('Error, minimum parameters must be greater than zero')\n exit(-1)\n if not isinstance(q_map_type, str) or not isinstance(xi_map_type, str):\n print('Error, maps from success rate to parameters must be strings')\n exit(-1)\n if (q_map_type != 'lin' and q_map_type != 'sig' and q_map_type != 'exp') or (xi_map_type != 'lin' and xi_map_type != 'sig' and xi_map_type != 'exp'):\n print('Error, map types must be \\'lin\\', \\'sig\\' or \\'exp\\'')\n exit(-1)\n if (q_map_type == 'sig' and max_q >= self.sig_K) or (xi_map_type == 'sig' and max_xi >= self.sig_K):\n print('Error, maximum parameters value must be lesser than sigmoid K = ' + str(self.sig_K))\n \n # Parameter setting from ACOr class\n super().set_parameters(pop_size, k, max_q, max_xi, function_evaluations_array)\n\n # Parameterize xi control curve\n self.min['xi'] = min_xi\n self.max['xi'] = max_xi\n self.map_type['xi'] = xi_map_type\n self.parameterize_map('xi')\n # Parameterize q control curve\n self.min['q'] = min_q\n self.max['q'] = max_q\n self.map_type['q'] = q_map_type\n self.parameterize_map('q')\n \n \n def control_xi(self):\n \"\"\" Use population success rate to update Xi \"\"\"\n if self.success_rate == None:\n print(\"Error, first compute success rate\")\n exit(-1)\n \n # Compute new xi\n self.xi = self.evaluate_map('xi', (1 - self.success_rate))\n \n \n def control_q(self):\n \"\"\" Use population success rate to update Xi \"\"\"\n if self.success_rate == None:\n print(\"Error, first compute success rate\")\n exit(-1)\n \n # Compute new q\n self.q = self.evaluate_map('q', self.success_rate)\n "
] |
[
[
"numpy.max",
"numpy.random.normal",
"numpy.array",
"numpy.zeros",
"numpy.exp",
"numpy.random.uniform",
"numpy.abs",
"numpy.append",
"numpy.linspace"
]
] |
rd11490/owl-map-score-added
|
[
"80ce7e6a08d015a8890253ef2f31fd67213a7868"
] |
[
"map_score.py"
] |
[
"import pandas as pd\nfrom utils.constants import Maps, total_escort_map_distance, total_map_time, calc_map_type, time_to_add\nfrom utils.utils import calc_match_date, calc_season\n\n# Some readability options for pandas print statements\npd.set_option('display.max_columns', 500)\npd.set_option('display.max_rows', 200)\npd.set_option('display.width', 1000)\n\n# Read in our match_map_stats csv file\nframe = pd.read_csv('./map_data/match_map_stats.csv')\n\n# Determine the map type, match date, and season for every map played\nframe['map_type'] = frame['map_name'].apply(calc_map_type)\nframe['match_date'] = frame['round_end_time'].apply(calc_match_date)\nframe['season'] = frame['round_end_time'].apply(calc_season)\n\n# Split our dataframe into four different frames. Each frame will only contain maps for it's specific map type\nescort_maps = frame[frame['map_type'] == Maps.Escort]\nassault_maps = frame[frame['map_type'] == Maps.Assault]\ncontrol_maps = frame[frame['map_type'] == Maps.Control]\nhybrid_maps = frame[frame['map_type'] == Maps.Hybrid]\n\n\n###############################\n# Calculate Assault map score #\n###############################\n# The basic idea behind our calculation for map score is\n# \"How many times could you complete the map in at the rate at which you initially completed the map\".\n# Unfortunately OWL data does not give us partial capture percentage, so we only get an integer N\n# which represents how many control points a team captured.\ndef calculate_assault_map_score(group):\n # I am limiting this analysis to the intial map parameters (2 rounds) and ignore any tie breaker/overtime scenarios.\n row = group[group['map_round'] == 2]\n # There is some old (bad) data in the dataset that needs to be cleaned. This line cleans that for us.\n if row.empty:\n row = group[group['map_round'] == group['map_round'].max()]\n\n # Break out attacker and defender into team 1 and team 2\n team_one = row['attacker'].values[0]\n team_two = row['defender'].values[0]\n\n # Pull out the number of points each team captured\n team_one_points = row['attacker_round_end_score'].values[0]\n team_two_points = row['defender_round_end_score'].values[0]\n\n # Pull out the amount of team each team banked if they completed the map\n team_one_time_banked = row['attacker_time_banked'].values[0]\n team_two_time_banked = row['defender_time_banked'].values[0]\n\n # When determining how much time each team had available we need to pull out the number of points they captured.\n # We can calculate that based on the rule set for the map type.\n # For Assault: 4 Minutes to attack point 1, an additional 4 minutes to attack point 2\n team_one_points_for_time = team_one_points\n team_two_points_for_time = team_two_points\n\n # There is an important exception here. If the winning team does not full cap the map, the number of points\n # they are given credit for is 1 more than they had actually capped.\n # We need to subtract that additional point from their score to properly account for how much time the team used.\n # Because we are always taking the second row (after both teams have attacked)\n # we do not need to account for time banked if team 1 is the winner as they are the second attacker.\n if row['map_winner'].values[0] == team_one:\n team_one_points_for_time -= 1\n elif row['map_winner'].values[0] == team_two and team_two_time_banked > 0.0:\n team_two_points_for_time -= 1\n\n team_one_total_time = total_map_time(Maps.Assault, team_one_points_for_time)\n team_two_total_time = total_map_time(Maps.Assault, team_two_points_for_time)\n\n # Now that we know how much time each team had to attack, how much time they banked,\n # and how many points they captured, we can calculate their cap rate.\n team_one_rate = team_one_points / (team_one_total_time - team_one_time_banked)\n team_two_rate = team_two_points / (team_two_total_time - team_two_time_banked)\n\n # If the team banked time, we want to give them credit for it. We do this by applying their cap rate to their banked\n # time to estimate how many points they could have capped if they kept their current rate.\n if team_one_time_banked > 0.0:\n team_one_score = (team_one_rate * team_one_time_banked) + team_one_points\n else:\n team_one_score = team_one_points\n\n if team_two_time_banked > 0.0:\n team_two_score = (team_two_rate * team_two_time_banked) + team_two_points\n else:\n team_two_score = team_two_points\n\n # Finally we want to divide each team's score by the total number of possible points in order to get a\n # % map completion estimate.\n team_one_score = team_one_score / 2\n team_two_score = team_two_score / 2\n\n return pd.Series({\n 'map_name': row['map_name'].values[0],\n 'map_type': row['map_type'].values[0],\n 'map_winner': row['map_winner'].values[0],\n 'match_date': row['match_date'].values[0],\n 'team_one_name': team_one,\n 'team_two_name': team_two,\n 'team_one_score': team_one_score * 100,\n 'team_two_score': team_two_score * 100,\n 'season': row['season'].values[0]\n })\n\n\n###############################\n# Calculate Payload map score #\n###############################\n# The basic idea behind our calculation for map score for escort and hybrid maps is\n# \"What percentage of an escort map could a team complete at the rate at which they pushed the payload initially\".\n# We can do this by calculating the total distance the payload traveled, add any additional distance using\n# the time banked and the rate at which the team pushed the payload, and dividing by the total distance for the map.\n\ndef calculate_payload_map_score(group):\n # I am limiting this analysis to the intial map parameters (2 rounds) and ignore any tie breaker/overtime scenarios.\n row = group[group['map_round'] == 2]\n # There is some old (bad) data in the dataset that needs to be cleaned. This line cleans that for us.\n if row.empty:\n row = group[group['map_round'] == group['map_round'].max()]\n\n # Pull out the map name\n map_name = row['map_name'].values[0]\n\n # Break out attacker and defender into team 1 and team 2\n team_one = row['attacker'].values[0]\n team_two = row['defender'].values[0]\n\n # Pull out how many points each team was given credit for capping\n team_one_points = row['attacker_round_end_score'].values[0]\n team_two_points = row['defender_round_end_score'].values[0]\n\n # Pull out how much time each team banked if they finished the map\n team_one_time_banked = row['attacker_time_banked'].values[0]\n team_two_time_banked = row['defender_time_banked'].values[0]\n\n # pull out how much distance each team traveled past their final capture point (if they did not complete the map)\n team_one_distance = row['attacker_payload_distance'].values[0]\n team_two_distance = row['defender_payload_distance'].values[0]\n\n # There is an important exception here. If the winning team does not full cap the map, the number of points\n # they are given credit for is 1 more than they had actually capped.\n # We need to subtract that additional point to properly account for how much time the team used\n # and how far they actually pushed the payload. We also need to account for the case of a tie. If the team full\n # caps the map, we do not need to include that value as we are already\n # adding the distance the team traveled at that point.\n team_one_points_for_distance = team_one_points\n team_one_points_for_time = team_one_points\n\n team_two_points_for_distance = team_two_points\n team_two_points_for_time = team_two_points\n\n if team_one_points == 3:\n team_one_points_for_distance -= 1\n if team_two_points == 3:\n team_two_points_for_distance -= 1\n\n # We need to account for hybrid maps where a team full holds and then wins by capping first.\n # We also need to account for when team 1 prevents team two from finishing the map and then wins by\n # matching distance\n if (team_one_points == 1 and team_one_distance == 0) or (row['map_winner'].values[0] == team_one):\n team_one_points_for_time -= 1\n\n if (team_two_points == 1 and team_two_distance == 0) or (\n row['map_winner'].values[0] == team_two and team_two_time_banked > 0.0):\n team_two_points_for_time -= 1\n\n # We add the distance up until the previous capped point and the distance traveled at the current point together\n # to get the total distance traveled\n team_one_total_distance = total_escort_map_distance(map_name, team_one_points_for_distance) + team_one_distance\n team_two_total_distance = total_escort_map_distance(map_name, team_two_points_for_distance) + team_two_distance\n\n # We need to calculate the total amount of time each team had on their push.\n team_one_total_time = total_map_time(Maps.Escort, team_one_points_for_time)\n team_two_total_time = total_map_time(Maps.Escort, team_two_points_for_time)\n\n # Calculate the rate at which the attacking team pushed the payload\n team_one_rate = team_one_total_distance / (team_one_total_time - team_one_time_banked)\n team_two_rate = team_two_total_distance / (team_two_total_time - team_two_time_banked)\n\n # If the team banked time, we want to give them credit for it. We do this by applying their cap rate to their banked\n # time to estimate how much farther they could have pushed the payload had they continued at their current rate.\n if team_one_time_banked > 0.0:\n team_one_score = team_one_total_distance + (team_one_rate * team_one_time_banked)\n else:\n team_one_score = team_one_total_distance\n\n if team_two_time_banked > 0.0:\n team_two_score = team_two_total_distance + (team_two_rate * team_two_time_banked)\n else:\n team_two_score = team_two_total_distance\n\n # Finally we normalize by total map distance in order to get to map completion percentage\n total_map_distance = total_escort_map_distance(map_name, 3)\n team_one_score = team_one_score / total_map_distance\n team_two_score = team_two_score / total_map_distance\n\n return pd.Series({\n 'map_name': map_name,\n 'map_type': row['map_type'].values[0],\n 'map_winner': row['map_winner'].values[0],\n 'match_date': row['match_date'].values[0],\n 'team_one_name': team_one,\n 'team_two_name': team_two,\n 'team_one_score': team_one_score * 100,\n 'team_two_score': team_two_score * 100,\n 'season': row['season'].values[0]\n })\n\n\n###############################\n# Calculate control map score #\n###############################\n# Controls maps are \"easy\" to score because each team is able to get a control percentage.\n# Convert the percentage to a decimal and use it as the map score\ndef calculate_control_map_score(group):\n # Break out attacker and defender into team 1 and team 2\n team_one = group['attacker'].values[0]\n team_two = group['defender'].values[0]\n\n # Pull out how many points each team was given credit for capping\n team_one_score = group['attacker_control_perecent'].sum()/2\n team_two_score = group['defender_control_perecent'].sum()/2\n\n\n return pd.Series({\n 'map_name': group['map_name'].values[0],\n 'map_type': group['map_type'].values[0],\n 'map_winner': group['map_winner'].values[0],\n 'match_date': group['match_date'].values[0],\n 'team_one_name': team_one,\n 'team_two_name': team_two,\n 'team_one_score': team_one_score,\n 'team_two_score': team_two_score,\n 'season': group['season'].values[0]\n })\n# Finally we need to apply our scoring functions to each dataframe of map types,\n# and merge them all back together as a frame of scored maps\ncontrol_maps_score = control_maps.groupby(by=['match_id', 'game_number']).apply(calculate_control_map_score).reset_index()\nassault_scores = assault_maps.groupby(by=['match_id', 'game_number']).apply(calculate_assault_map_score).reset_index()\nescort_maps_score = escort_maps.groupby(by=['match_id', 'game_number']).apply(calculate_payload_map_score).reset_index()\nhybrid_maps_score = hybrid_maps.groupby(by=['match_id', 'game_number']).apply(calculate_payload_map_score).reset_index()\n\nscored_maps = pd.concat([control_maps_score, hybrid_maps_score, escort_maps_score, assault_scores])\n\nscored_maps.to_csv('results/scored_maps.csv', index=False)\n\nprint(scored_maps)\n"
] |
[
[
"pandas.read_csv",
"pandas.Series",
"pandas.set_option",
"pandas.concat"
]
] |
iaqos/ancona
|
[
"f9beefb966c2c98920bc7309d3b52df929082312"
] |
[
"generator.py"
] |
[
"#!/usr/bin/env python3\n# coding=utf-8\n# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Conditional text generation with the auto-regressive models of the library (GPT/GPT-2/CTRL/Transformer-XL/XLNet)\n\"\"\"\n\n\nimport argparse\nimport logging\nimport json\nimport numpy as np\nimport torch\n\nfrom transformers import (\n CTRLLMHeadModel,\n CTRLTokenizer,\n GPT2LMHeadModel,\n GPT2Tokenizer,\n OpenAIGPTLMHeadModel,\n OpenAIGPTTokenizer,\n TransfoXLLMHeadModel,\n TransfoXLTokenizer,\n XLMTokenizer,\n XLMWithLMHeadModel,\n XLNetLMHeadModel,\n XLNetTokenizer,\n)\n\n\nlogging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\", datefmt=\"%m/%d/%Y %H:%M:%S\", level=logging.INFO,\n)\nlogger = logging.getLogger(__name__)\n\nMAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop\n\nMODEL_CLASSES = {\n \"gpt2\": (GPT2LMHeadModel, GPT2Tokenizer),\n \"ctrl\": (CTRLLMHeadModel, CTRLTokenizer),\n \"openai-gpt\": (OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),\n \"xlnet\": (XLNetLMHeadModel, XLNetTokenizer),\n \"transfo-xl\": (TransfoXLLMHeadModel, TransfoXLTokenizer),\n \"xlm\": (XLMWithLMHeadModel, XLMTokenizer),\n}\n\n# Padding text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia\n# in https://github.com/rusiaaman/XLNet-gen#methodology\n# and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e\nPADDING_TEXT = \"\"\"In 1991, the remains of Russian Tsar Nicholas II and his family\n(except for Alexei and Maria) are discovered.\nThe voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the\nremainder of the story. 1883 Western Siberia,\na young Grigori Rasputin is asked by his father and a group of men to perform magic.\nRasputin has a vision and denounces one of the men as a horse thief. Although his\nfather initially slaps him for making such an accusation, Rasputin watches as the\nman is chased outside and beaten. Twenty years later, Rasputin sees a vision of\nthe Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,\nwith people, even a bishop, begging for his blessing. <eod> </s> <eos>\"\"\"\n\n\ndef set_seed(args):\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n\n#\n# Functions to prepare models' input\n#\n\n\ndef prepare_ctrl_input(args, _, tokenizer, prompt_text):\n if args.temperature > 0.7:\n logger.info(\"CTRL typically works better with lower temperatures (and lower top_k).\")\n\n encoded_prompt = tokenizer.encode(prompt_text, add_special_tokens=False)\n if not any(encoded_prompt[0] == x for x in tokenizer.control_codes.values()):\n logger.info(\"WARNING! You are not starting your generation from a control code so you won't get good results\")\n return prompt_text\n\n\ndef prepare_xlm_input(args, model, tokenizer, prompt_text):\n # kwargs = {\"language\": None, \"mask_token_id\": None}\n\n # Set the language\n use_lang_emb = hasattr(model.config, \"use_lang_emb\") and model.config.use_lang_emb\n if hasattr(model.config, \"lang2id\") and use_lang_emb:\n available_languages = model.config.lang2id.keys()\n if args.xlm_language in available_languages:\n language = args.xlm_language\n else:\n language = None\n while language not in available_languages:\n language = input(\"Using XLM. Select language in \" + str(list(available_languages)) + \" >>> \")\n\n model.config.lang_id = model.config.lang2id[language]\n # kwargs[\"language\"] = tokenizer.lang2id[language]\n\n # TODO fix mask_token_id setup when configurations will be synchronized between models and tokenizers\n # XLM masked-language modeling (MLM) models need masked token\n # is_xlm_mlm = \"mlm\" in args.model_name_or_path\n # if is_xlm_mlm:\n # kwargs[\"mask_token_id\"] = tokenizer.mask_token_id\n\n return prompt_text\n\n\ndef prepare_xlnet_input(args, _, tokenizer, prompt_text):\n prompt_text = (args.padding_text if args.padding_text else PADDING_TEXT) + prompt_text\n return prompt_text\n\n\ndef prepare_transfoxl_input(args, _, tokenizer, prompt_text):\n prompt_text = (args.padding_text if args.padding_text else PADDING_TEXT) + prompt_text\n return prompt_text\n\n\nPREPROCESSING_FUNCTIONS = {\n \"ctrl\": prepare_ctrl_input,\n \"xlm\": prepare_xlm_input,\n \"xlnet\": prepare_xlnet_input,\n \"transfo-xl\": prepare_transfoxl_input,\n}\n\n\ndef adjust_length_to_model(length, max_sequence_length):\n if length < 0 and max_sequence_length > 0:\n length = max_sequence_length\n elif 0 < max_sequence_length < length:\n length = max_sequence_length # No generation bigger than model size\n elif length < 0:\n length = MAX_LENGTH # avoid infinite loop\n return length\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--model_type\",\n default=None,\n type=str,\n required=True,\n help=\"Model type selected in the list: \" + \", \".join(MODEL_CLASSES.keys()),\n )\n parser.add_argument(\n \"--model_name_or_path\",\n default=None,\n type=str,\n required=True,\n help=\"Path to pre-trained model or shortcut name selected in the list: \" + \", \".join(MODEL_CLASSES.keys()),\n )\n\n parser.add_argument(\"--prompt\", type=str, default=\"\")\n parser.add_argument(\"--length\", type=int, default=20)\n parser.add_argument(\"--stop_token\", type=str, default=None, help=\"Token at which text generation is stopped\")\n\n parser.add_argument(\n \"--temperature\",\n type=float,\n default=1.0,\n help=\"temperature of 1.0 has no effect, lower tend toward greedy sampling\",\n )\n parser.add_argument(\n \"--repetition_penalty\", type=float, default=1.0, help=\"primarily useful for CTRL model; in that case, use 1.2\"\n )\n parser.add_argument(\"--k\", type=int, default=0)\n parser.add_argument(\"--p\", type=float, default=0.9)\n\n parser.add_argument(\"--padding_text\", type=str, default=\"\", help=\"Padding text for Transfo-XL and XLNet.\")\n parser.add_argument(\"--xlm_language\", type=str, default=\"\", help=\"Optional language when used with the XLM model.\")\n\n parser.add_argument(\"--seed\", type=int, default=42, help=\"random seed for initialization\")\n parser.add_argument(\"--no_cuda\", action=\"store_true\", help=\"Avoid using CUDA when available\")\n parser.add_argument(\"--num_return_sequences\", type=int, default=1, help=\"The number of samples to generate.\")\n parser.add_argument(\"--title\", type=str, default=\"\", help=\"The title of the article.\")\n args = parser.parse_args()\n\n args.device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()\n\n set_seed(args)\n\n # Initialize the model and tokenizer\n try:\n args.model_type = args.model_type.lower()\n model_class, tokenizer_class = MODEL_CLASSES[args.model_type]\n except KeyError:\n raise KeyError(\"the model {} you specified is not supported. You are welcome to add it and open a PR :)\")\n\n tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)\n model = model_class.from_pretrained(args.model_name_or_path)\n model.to(args.device)\n\n args.length = adjust_length_to_model(args.length, max_sequence_length=model.config.max_position_embeddings)\n logger.info(args)\n\n prompt_text = args.prompt if args.prompt else input(\"Model prompt >>> \")\n\n # Different models need different input formatting and/or extra arguments\n requires_preprocessing = args.model_type in PREPROCESSING_FUNCTIONS.keys()\n if requires_preprocessing:\n prepare_input = PREPROCESSING_FUNCTIONS.get(args.model_type)\n preprocessed_prompt_text = prepare_input(args, model, tokenizer, prompt_text)\n encoded_prompt = tokenizer.encode(\n preprocessed_prompt_text, add_special_tokens=False, return_tensors=\"pt\", add_space_before_punct_symbol=True\n )\n else:\n encoded_prompt = tokenizer.encode(prompt_text, add_special_tokens=False, return_tensors=\"pt\")\n encoded_prompt = encoded_prompt.to(args.device)\n\n if encoded_prompt.size()[-1] == 0:\n input_ids = None\n else:\n input_ids = encoded_prompt\n\n output_sequences = model.generate(\n input_ids=input_ids,\n max_length=args.length + len(encoded_prompt[0]),\n temperature=args.temperature,\n top_k=args.k,\n top_p=args.p,\n repetition_penalty=args.repetition_penalty,\n do_sample=True,\n num_return_sequences=args.num_return_sequences,\n )\n\n # Remove the batch dimension when returning multiple sequences\n if len(output_sequences.shape) > 2:\n output_sequences.squeeze_()\n\n generated_sequences = []\n print(args.title,\"\\n\")\n \n for generated_sequence_idx, generated_sequence in enumerate(output_sequences):\n print('\\n')#print(\"=== GENERATED SEQUENCE {} ===\".format(generated_sequence_idx + 1))\n generated_sequence = generated_sequence.tolist()\n\n # Decode text\n text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True)\n\n # Remove all text after the stop token\n text = text[: text.find(args.stop_token) if args.stop_token else None]\n\n # Add the prompt at the beginning of the sequence. Remove the excess text that was used for pre-processing\n total_sequence = (\n text[len(tokenizer.decode(encoded_prompt[0], clean_up_tokenization_spaces=True)) :] + \" [...]\"\n \n )\n\n generated_sequences.append(total_sequence)\n\n print(total_sequence)\n\n print_choice = input(\"Vuoi salvare l'articolo? Se sì, scrivi Y \")\n if print_choice == \"Y\":\n \n date = input(\"Inserisci la data in cui l'articolo sarà pubblicato, nel formato YYYY-MM-GG, es. 2020-12-31 \")\n id_num = input(\"Inserisci l'id \")\n titolo = args.title\n autore = input(\"Inserisci il nome dell'autore \")\n ordine = input(\"In che ordine vuoi gli articoli? Scrivi 231 per avere, nell'ordine, il secondo, il terzo e il primo \")\n sottotitolo = input(\"Inserisci il sottotitolo (è una frase o un pezzo di frase dall'articolo che vuoi compaia per primo) \")\n ord_art = [int(num) for num in ordine] #fare ordinamento articoli\n #print(ord_art)\n #print(type(generated_sequences), len(generated_sequences), \"\\n\", generated_sequences)\n article_dict = {\n \"id\": int(id_num),\n \"data\": date,\n \"titolo\": titolo,\n \"sottotitolo\": sottotitolo,\n \"articolo1\": generated_sequences[ord_art[0] - 1],\n \"articolo2\": generated_sequences[ord_art[1] - 1],\n \"articolo3\": generated_sequences[ord_art[2] - 1],\n \"autore\": autore\n }\n\n #article_json= json.dumps(article_dict)\n with open('{}.json'.format(date),'w', encoding='utf-8') as data:\n json.dump(article_dict, data, ensure_ascii=False)\n \n \n else:\n pass\n return generated_sequences\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"torch.cuda.manual_seed_all",
"numpy.random.seed",
"torch.cuda.device_count",
"torch.manual_seed",
"torch.cuda.is_available"
]
] |
Guoning-Chen/ssd.pytorch
|
[
"49c0e039bc3128ccc0176454059665a739d4e185"
] |
[
"layers/modules/multibox_loss.py"
] |
[
"# -*- coding: utf-8 -*-\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom data import coco as cfg\nfrom ..box_utils import match, log_sum_exp\n\n\nclass MultiBoxLoss(nn.Module):\n \"\"\"SSD Weighted Loss Function\n Compute Targets:\n 1) Produce Confidence Target Indices by matching ground truth boxes\n with (default) 'priorboxes' that have jaccard index > threshold parameter\n (default threshold: 0.5).\n 2) Produce localization target by 'encoding' variance into offsets of ground\n truth boxes and their matched 'priorboxes'.\n 3) Hard negative mining to filter the excessive number of negative examples\n that comes with using a large number of default bounding boxes.\n (default negative:positive ratio 3:1)\n Objective Loss:\n L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N\n Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss\n weighted by α which is set to 1 by cross val.\n Args:\n c: class confidences,\n l: predicted boxes,\n g: ground truth boxes\n N: number of matched default boxes\n See: https://arxiv.org/pdf/1512.02325.pdf for more details.\n \"\"\"\n\n def __init__(self, num_classes, overlap_thresh, prior_for_matching,\n bkg_label, neg_mining, neg_pos, neg_overlap, encode_target,\n use_gpu=True):\n super(MultiBoxLoss, self).__init__()\n self.use_gpu = use_gpu\n self.num_classes = num_classes\n self.threshold = overlap_thresh\n self.background_label = bkg_label\n self.encode_target = encode_target\n self.use_prior_for_matching = prior_for_matching\n self.do_neg_mining = neg_mining\n self.negpos_ratio = neg_pos\n self.neg_overlap = neg_overlap\n self.variance = cfg['variance']\n\n def forward(self, predictions, targets):\n \"\"\"Multibox Loss\n Args:\n predictions (tuple): A tuple containing loc preds, conf preds,\n and prior boxes from SSD net.\n conf shape: torch.size(batch_size,num_priors,num_classes)\n loc shape: torch.size(batch_size,num_priors,4)\n priors shape: torch.size(num_priors,4)\n\n targets (tensor): Ground truth boxes and labels for a batch,\n shape: [batch_size,num_objs,5] (last idx is the label).\n \"\"\"\n loc_data, conf_data, priors = predictions\n num = loc_data.size(0)\n priors = priors[:loc_data.size(1), :]\n num_priors = (priors.size(0))\n num_classes = self.num_classes\n\n # match priors (default boxes) and ground truth boxes\n loc_t = torch.Tensor(num, num_priors, 4)\n conf_t = torch.LongTensor(num, num_priors)\n for idx in range(num):\n truths = targets[idx][:, :-1].data\n labels = targets[idx][:, -1].data\n defaults = priors.data\n match(self.threshold, truths, defaults, self.variance, labels,\n loc_t, conf_t, idx)\n if self.use_gpu:\n loc_t = loc_t.cuda()\n conf_t = conf_t.cuda()\n # wrap targets\n loc_t = Variable(loc_t, requires_grad=False)\n conf_t = Variable(conf_t, requires_grad=False)\n\n pos = conf_t > 0\n num_pos = pos.sum(dim=1, keepdim=True)\n\n # Localization Loss (Smooth L1)\n # Shape: [batch,num_priors,4]\n pos_idx = pos.unsqueeze(pos.dim()).expand_as(loc_data)\n loc_p = loc_data[pos_idx].view(-1, 4)\n loc_t = loc_t[pos_idx].view(-1, 4)\n loss_l = F.smooth_l1_loss(loc_p, loc_t, size_average=False)\n\n # Compute max conf across batch for hard negative mining\n batch_conf = conf_data.view(-1, self.num_classes)\n loss_c = log_sum_exp(batch_conf) - batch_conf.gather(1, conf_t.view(-1, 1))\n\n # Hard Negative Mining\n\n # 为解决一个bug交换了下面这两行\n loss_c = loss_c.view(num, -1)\n loss_c[pos] = 0 # filter out pos boxes for now\n\n _, loss_idx = loss_c.sort(1, descending=True)\n _, idx_rank = loss_idx.sort(1)\n num_pos = pos.long().sum(1, keepdim=True)\n num_neg = torch.clamp(self.negpos_ratio*num_pos, max=pos.size(1)-1)\n neg = idx_rank < num_neg.expand_as(idx_rank)\n\n # Confidence Loss Including Positive and Negative Examples\n pos_idx = pos.unsqueeze(2).expand_as(conf_data)\n neg_idx = neg.unsqueeze(2).expand_as(conf_data)\n conf_p = conf_data[(pos_idx+neg_idx).gt(0)].view(-1, self.num_classes)\n targets_weighted = conf_t[(pos+neg).gt(0)]\n loss_c = F.cross_entropy(conf_p, targets_weighted, size_average=False)\n\n # Sum of losses: L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N\n\n N = num_pos.data.sum()\n loss_l /= N\n loss_c /= N\n return loss_l, loss_c\n"
] |
[
[
"torch.nn.functional.smooth_l1_loss",
"torch.autograd.Variable",
"torch.nn.functional.cross_entropy",
"torch.LongTensor",
"torch.Tensor"
]
] |
arita37/pyvtreat
|
[
"c32e7ce6db11a2ccdd63e545b25028cbec03a3ff"
] |
[
"pkg/build/lib/vtreat/vtreat_api.py"
] |
[
"import warnings\n\nimport pandas\nimport numpy\n\nimport vtreat.vtreat_impl as vtreat_impl\nimport vtreat.util\nimport vtreat.cross_plan\n\n\ndef vtreat_parameters(user_params=None):\n \"\"\"build a vtreat parameters dictionary, adding in user choices\"\"\"\n\n params = {\n \"use_hierarchical_estimate\": True,\n \"coders\": {\n \"clean_copy\",\n \"missing_indicator\",\n \"indicator_code\",\n \"impact_code\",\n \"deviation_code\",\n \"logit_code\",\n \"prevalence_code\",\n },\n \"filter_to_recommended\": True,\n \"indicator_min_fraction\": 0.1,\n \"cross_validation_plan\": vtreat.cross_plan.KWayCrossPlanYStratified(),\n \"cross_validation_k\": 5,\n \"user_transforms\": [],\n \"sparse_indicators\": True,\n \"missingness_imputation\": numpy.mean,\n \"check_for_duplicate_frames\": True,\n \"error_on_duplicate_frames\": False,\n \"retain_cross_plan\": True,\n \"tunable_params\": [\n \"indicator_min_fraction\"\n ],\n }\n pkeys = set(params.keys())\n if user_params is not None:\n for k in user_params.keys():\n if k not in pkeys:\n raise KeyError(\"parameter key \" + str(k) + \" not recognized\")\n params[k] = user_params[k]\n if params[\"error_on_duplicate_frames\"]:\n params[\"check_for_duplicate_frames\"] = True\n for k in params[\"tunable_params\"]:\n if k not in pkeys:\n raise KeyError(\"tunable_params key \" + str(k) + \" not recognized\")\n return params\n\n\ndef unsupervised_parameters(user_params=None):\n \"\"\"build a vtreat parameters dictionary for unsupervised tasks, adding in user choices\"\"\"\n\n params = {\n \"coders\": {\n \"clean_copy\",\n \"missing_indicator\",\n \"indicator_code\",\n \"prevalence_code\",\n },\n \"indicator_min_fraction\": 0.0,\n \"user_transforms\": [],\n \"sparse_indicators\": True,\n \"missingness_imputation\": numpy.mean,\n \"tunable_params\": [\n \"indicator_min_fraction\"\n ],\n }\n pkeys = set(params.keys())\n if user_params is not None:\n for k in user_params.keys():\n if k not in pkeys:\n raise KeyError(\"parameter key \" + str(k) + \" not recognized\")\n params[k] = user_params[k]\n for k in params[\"tunable_params\"]:\n if k not in pkeys:\n raise KeyError(\"tunable_params key \" + str(k) + \" not recognized\")\n return params\n\n\nclass NumericOutcomeTreatment(vtreat_impl.VariableTreatment):\n \"\"\"manage a treatment plan for a numeric outcome (regression)\"\"\"\n\n def __init__(\n self, *,\n var_list=None,\n outcome_name=None,\n cols_to_copy=None,\n params=None,\n imputation_map=None,\n ):\n \"\"\"\n\n :param var_list: list or touple of column names\n :param outcome_name: name of column containing dependent variable\n :param cols_to_copy: list or touple of column names\n :param params: vtreat.vtreat_parameters()\n :param imputation_map: map of column names to custom missing imputation values or functions\n \"\"\"\n params = self.merge_params(params)\n vtreat_impl.VariableTreatment.__init__(\n self,\n var_list=var_list,\n outcome_name=outcome_name,\n cols_to_copy=cols_to_copy,\n params=params,\n imputation_map=imputation_map,\n )\n\n def merge_params(self, p):\n return vtreat_parameters(p)\n\n # noinspection PyPep8Naming\n def transform(self, X):\n X, orig_type = vtreat_impl.ready_data_frame(X)\n self.check_column_names(X.columns)\n if self.last_fit_x_id_ is None:\n raise ValueError(\"called transform on not yet fit treatment\")\n if self.params_['check_for_duplicate_frames'] and (self.last_fit_x_id_ == vtreat.util.hash_data_frame(X)):\n if self.params_[\"error_on_duplicate_frames\"]:\n raise ValueError(\n \"possibly called transform on same data used to fit\\n\" +\n \"(this causes over-fit, please use fit_transform() instead)\")\n warnings.warn(\n \"possibly called transform on same data used to fit\\n\" +\n \"(this causes over-fit, please use fit_transform() instead)\")\n res = vtreat_impl.pre_prep_frame(\n X, col_list=self.var_list_, cols_to_copy=self.cols_to_copy_\n )\n res = vtreat_impl.perform_transform(x=res, transform=self, params=self.params_)\n res = vtreat_impl.limit_to_appropriate_columns(res=res, transform=self)\n res, res_columns = vtreat_impl.back_to_orig_type_data_frame(res, orig_type)\n self.last_result_columns = res_columns\n return res\n\n # noinspection PyPep8Naming\n def fit_transform(self, X, y=None, **fit_params):\n X, orig_type = vtreat_impl.ready_data_frame(X)\n self.check_column_names(X.columns)\n if y is None:\n if self.outcome_name_ is None:\n raise ValueError(\".fit_transform(X) must have outcome_name set\")\n y = numpy.asarray(X[self.outcome_name_])\n else:\n y = numpy.asarray(y)\n if (self.outcome_name_ is not None) and (self.outcome_name_ in X.columns):\n if not numpy.all(X[self.outcome_name_] == y):\n raise ValueError(\".fit_transform(X, y) called with y != X[outcome_name]\")\n if not X.shape[0] == len(y):\n raise ValueError(\"X.shape[0] should equal len(y)\")\n y = vtreat.util.safe_to_numeric_array(y)\n if vtreat.util.is_bad(y).sum() > 0:\n raise ValueError(\"y should not have any missing/NA/NaN values\")\n if numpy.max(y) <= numpy.min(y):\n raise ValueError(\"y does not vary\")\n cross_rows = None\n cross_plan = None\n if self.params_['retain_cross_plan']:\n cross_rows = self.cross_rows_\n cross_plan = self.cross_plan_\n self.clear()\n self.last_fit_x_id_ = vtreat.util.hash_data_frame(X)\n X = vtreat_impl.pre_prep_frame(\n X, col_list=self.var_list_, cols_to_copy=self.cols_to_copy_\n )\n if isinstance(y, pandas.Series):\n y = y.reset_index(inplace=False, drop=True)\n # model for independent transforms\n self.plan_ = None\n self.score_frame_ = None\n self.plan_ = vtreat_impl.fit_numeric_outcome_treatment(\n X=X,\n y=y,\n var_list=self.var_list_,\n outcome_name=self.outcome_name_,\n cols_to_copy=self.cols_to_copy_,\n params=self.params_,\n imputation_map=self.imputation_map_,\n )\n res = vtreat_impl.perform_transform(x=X, transform=self, params=self.params_)\n if (cross_plan is None) or (cross_rows != X.shape[0]):\n if cross_plan is not None:\n warnings.warn(\"Number of rows different than previous fit with retain_cross_plan==True\")\n cross_plan = self.params_[\"cross_validation_plan\"].split_plan(\n n_rows=X.shape[0], k_folds=self.params_[\"cross_validation_k\"], data=X, y=y\n )\n cross_rows = X.shape[0]\n # patch in cross-frame versions of complex columns such as impact\n cross_frame = vtreat_impl.cross_patch_refit_y_aware_cols(\n x=X, y=y, res=res, plan=self.plan_, cross_plan=cross_plan\n )\n cross_frame = vtreat_impl.cross_patch_user_y_aware_cols(\n x=cross_frame,\n y=y,\n res=res,\n params=self.params_,\n cross_plan=cross_plan,\n )\n # use cross_frame to compute variable effects\n self.score_frame_ = vtreat_impl.score_plan_variables(\n cross_frame=cross_frame,\n outcome=y,\n plan=self.plan_,\n params=self.params_,\n is_classification=False\n )\n if (\"filter_to_recommended\" in self.params_.keys()) and self.params_[\"filter_to_recommended\"]:\n self.set_result_restriction(\n set([ci for ci in self.score_frame_[\"variable\"][self.score_frame_[\"recommended\"]]]))\n cross_frame = vtreat_impl.limit_to_appropriate_columns(\n res=cross_frame, transform=self\n )\n cross_frame, res_columns = vtreat_impl.back_to_orig_type_data_frame(cross_frame, orig_type)\n self.last_result_columns = res_columns\n if self.params_['retain_cross_plan']:\n self.cross_plan_ = cross_plan\n self.cross_rows_ = cross_rows\n else:\n self.cross_plan_ = None\n self.cross_rows_ = None\n return cross_frame\n\n\nclass BinomialOutcomeTreatment(vtreat_impl.VariableTreatment):\n \"\"\"manage a treatment plan for a target outcome (binomial classification)\"\"\"\n\n def __init__(\n self,\n *,\n var_list=None,\n outcome_name=None,\n outcome_target=True,\n cols_to_copy=None,\n params=None,\n imputation_map=None,\n ):\n \"\"\"\n\n :param var_list: list or touple of column names\n :param outcome_name: name of column containing dependent variable\n :param outcome_target: value of outcome to consider \"positive\"\n :param cols_to_copy: list or touple of column names\n :param params: vtreat.vtreat_parameters()\n :param imputation_map: map of column names to custom missing imputation values or functions\n \"\"\"\n params = self.merge_params(params)\n vtreat_impl.VariableTreatment.__init__(\n self,\n var_list=var_list,\n outcome_name=outcome_name,\n outcome_target=outcome_target,\n cols_to_copy=cols_to_copy,\n params=params,\n imputation_map=imputation_map,\n )\n\n def merge_params(self, p):\n return vtreat_parameters(p)\n\n # noinspection PyPep8Naming\n def transform(self, X):\n X, orig_type = vtreat_impl.ready_data_frame(X)\n self.check_column_names(X.columns)\n if self.last_fit_x_id_ is None:\n raise ValueError(\"called transform on not yet fit treatment\")\n if self.params_['check_for_duplicate_frames'] and (self.last_fit_x_id_ == vtreat.util.hash_data_frame(X)):\n if self.params_[\"error_on_duplicate_frames\"]:\n raise ValueError(\n \"possibly called transform on same data used to fit\\n\" +\n \"(this causes over-fit, please use fit_transform() instead)\")\n warnings.warn(\n \"possibly called transform on same data used to fit\\n\" +\n \"(this causes over-fit, please use fit_transform() instead)\")\n X = vtreat_impl.pre_prep_frame(\n X, col_list=self.var_list_, cols_to_copy=self.cols_to_copy_\n )\n res = vtreat_impl.perform_transform(x=X, transform=self, params=self.params_)\n res = vtreat_impl.limit_to_appropriate_columns(res=res, transform=self)\n res, res_columns = vtreat_impl.back_to_orig_type_data_frame(res, orig_type)\n self.last_result_columns = res_columns\n return res\n\n # noinspection PyPep8Naming\n def fit_transform(self, X, y=None, **fit_params):\n X, orig_type = vtreat_impl.ready_data_frame(X)\n self.check_column_names(X.columns)\n if y is None:\n if self.outcome_name_ is None:\n raise ValueError(\".fit_transform(X) must have outcome_name set\")\n y = numpy.asarray(X[self.outcome_name_])\n else:\n y = numpy.asarray(y)\n if (self.outcome_name_ is not None) and (self.outcome_name_ in X.columns):\n if not numpy.all(X[self.outcome_name_] == y):\n raise ValueError(\".fit_transform(X, y) called with y != X[outcome_name]\")\n if not X.shape[0] == len(y):\n raise ValueError(\"X.shape[0] should equal len(y)\")\n y_mean = numpy.mean(y == self.outcome_target_)\n if y_mean <= 0 or y_mean >= 1:\n raise ValueError(\"y==outcome_target does not vary\")\n cross_rows = None\n cross_plan = None\n if self.params_['retain_cross_plan']:\n cross_rows = self.cross_rows_\n cross_plan = self.cross_plan_\n self.clear()\n self.last_fit_x_id_ = vtreat.util.hash_data_frame(X)\n X = vtreat_impl.pre_prep_frame(\n X, col_list=self.var_list_, cols_to_copy=self.cols_to_copy_\n )\n if isinstance(y, pandas.Series):\n y = y.reset_index(inplace=False, drop=True)\n # model for independent transforms\n self.plan_ = None\n self.score_frame_ = None\n self.plan_ = vtreat_impl.fit_binomial_outcome_treatment(\n X=X,\n y=y,\n outcome_target=self.outcome_target_,\n var_list=self.var_list_,\n outcome_name=self.outcome_name_,\n cols_to_copy=self.cols_to_copy_,\n params=self.params_,\n imputation_map=self.imputation_map_,\n )\n res = vtreat_impl.perform_transform(x=X, transform=self, params=self.params_)\n if (cross_plan is None) or (cross_rows != X.shape[0]):\n if cross_plan is not None:\n warnings.warn(\"Number of rows different than previous fit with retain_cross_plan==True\")\n cross_plan = self.params_[\"cross_validation_plan\"].split_plan(\n n_rows=X.shape[0], k_folds=self.params_[\"cross_validation_k\"], data=X, y=y\n )\n cross_rows = X.shape[0]\n # patch in cross-frame versions of complex columns such as impact\n cross_frame = vtreat_impl.cross_patch_refit_y_aware_cols(\n x=X, y=y, res=res, plan=self.plan_, cross_plan=cross_plan\n )\n cross_frame = vtreat_impl.cross_patch_user_y_aware_cols(\n x=cross_frame,\n y=y,\n res=res,\n params=self.params_,\n cross_plan=cross_plan,\n )\n # use cross_frame to compute variable effects\n self.score_frame_ = vtreat_impl.score_plan_variables(\n cross_frame=cross_frame,\n outcome=numpy.asarray(\n numpy.asarray(y) == self.outcome_target_, dtype=float\n ),\n plan=self.plan_,\n params=self.params_,\n is_classification=True\n )\n if (\"filter_to_recommended\" in self.params_.keys()) and self.params_[\"filter_to_recommended\"]:\n self.set_result_restriction(\n set([ci for ci in self.score_frame_[\"variable\"][self.score_frame_[\"recommended\"]]]))\n cross_frame = vtreat_impl.limit_to_appropriate_columns(\n res=cross_frame, transform=self\n )\n cross_frame, res_columns = vtreat_impl.back_to_orig_type_data_frame(cross_frame, orig_type)\n self.last_result_columns = res_columns\n if self.params_['retain_cross_plan']:\n self.cross_plan_ = cross_plan\n self.cross_rows_ = cross_rows\n else:\n self.cross_plan_ = None\n self.cross_rows_ = None\n return cross_frame\n\n\nclass MultinomialOutcomeTreatment(vtreat_impl.VariableTreatment):\n \"\"\"manage a treatment plan for a set of outcomes (multinomial classification)\"\"\"\n\n def __init__(\n self,\n *,\n var_list=None,\n outcome_name=None,\n cols_to_copy=None,\n params=None,\n imputation_map=None,\n ):\n \"\"\"\n\n :param var_list: list or touple of column names\n :param outcome_name: name of column containing dependent variable\n :param cols_to_copy: list or touple of column names\n :param params: vtreat.vtreat_parameters()\n :param imputation_map: map of column names to custom missing imputation values or functions\n \"\"\"\n\n params = self.merge_params(params)\n vtreat_impl.VariableTreatment.__init__(\n self,\n var_list=var_list,\n outcome_name=outcome_name,\n cols_to_copy=cols_to_copy,\n params=params,\n imputation_map=imputation_map,\n )\n self.outcomes_ = None\n\n def merge_params(self, p):\n return vtreat_parameters(p)\n\n # noinspection PyPep8Naming\n def transform(self, X):\n X, orig_type = vtreat_impl.ready_data_frame(X)\n self.check_column_names(X.columns)\n if self.last_fit_x_id_ is None:\n raise ValueError(\"called transform on not yet fit treatment\")\n if self.params_['check_for_duplicate_frames'] and (self.last_fit_x_id_ == vtreat.util.hash_data_frame(X)):\n if self.params_[\"error_on_duplicate_frames\"]:\n raise ValueError(\n \"possibly called transform on same data used to fit\\n\" +\n \"(this causes over-fit, please use fit_transform() instead)\")\n warnings.warn(\n \"possibly called transform on same data used to fit\\n\" +\n \"(this causes over-fit, please use fit_transform() instead)\")\n X = vtreat_impl.pre_prep_frame(\n X, col_list=self.var_list_, cols_to_copy=self.cols_to_copy_\n )\n res = vtreat_impl.perform_transform(x=X, transform=self, params=self.params_)\n res = vtreat_impl.limit_to_appropriate_columns(res=res, transform=self)\n res, res_columns = vtreat_impl.back_to_orig_type_data_frame(res, orig_type)\n self.last_result_columns = res_columns\n return res\n\n # noinspection PyPep8Naming\n def fit_transform(self, X, y=None, **fit_params):\n X, orig_type = vtreat_impl.ready_data_frame(X)\n self.check_column_names(X.columns)\n if y is None:\n if self.outcome_name_ is None:\n raise ValueError(\".fit_transform(X) must have outcome_name set\")\n y = numpy.asarray(X[self.outcome_name_])\n else:\n y = numpy.asarray(y)\n if (self.outcome_name_ is not None) and (self.outcome_name_ in X.columns):\n if not numpy.all(X[self.outcome_name_] == y):\n raise ValueError(\".fit_transform(X, y) called with y != X[outcome_name]\")\n if not X.shape[0] == len(y):\n raise ValueError(\"X.shape[0] should equal len(y)\")\n if len(numpy.unique(y)) <= 1:\n raise ValueError(\"y must take on at least 2 values\")\n cross_rows = None\n cross_plan = None\n if self.params_['retain_cross_plan']:\n cross_rows = self.cross_rows_\n cross_plan = self.cross_plan_\n self.clear()\n self.last_fit_x_id_ = vtreat.util.hash_data_frame(X)\n X = vtreat_impl.pre_prep_frame(\n X, col_list=self.var_list_, cols_to_copy=self.cols_to_copy_\n )\n if isinstance(y, pandas.Series):\n y = y.reset_index(inplace=False, drop=True)\n # model for independent transforms\n self.plan_ = None\n self.score_frame_ = None\n self.outcomes_ = numpy.unique(y)\n self.plan_ = vtreat_impl.fit_multinomial_outcome_treatment(\n X=X,\n y=y,\n var_list=self.var_list_,\n outcome_name=self.outcome_name_,\n cols_to_copy=self.cols_to_copy_,\n params=self.params_,\n imputation_map=self.imputation_map_,\n )\n res = vtreat_impl.perform_transform(x=X, transform=self, params=self.params_)\n if (cross_plan is None) or (cross_rows != X.shape[0]):\n if cross_plan is not None:\n warnings.warn(\"Number of rows different than previous fit with retain_cross_plan==True\")\n cross_plan = self.params_[\"cross_validation_plan\"].split_plan(\n n_rows=X.shape[0], k_folds=self.params_[\"cross_validation_k\"], data=X, y=y\n )\n cross_rows = X.shape[0]\n cross_frame = vtreat_impl.cross_patch_refit_y_aware_cols(\n x=X, y=y, res=res, plan=self.plan_, cross_plan=cross_plan\n )\n cross_frame = vtreat_impl.cross_patch_user_y_aware_cols(\n x=cross_frame,\n y=y,\n res=res,\n params=self.params_,\n cross_plan=cross_plan,\n )\n # use cross_frame to compute variable effects\n\n def si(oi):\n sf = vtreat_impl.score_plan_variables(\n cross_frame=cross_frame,\n outcome=numpy.asarray(numpy.asarray(y) == oi, dtype=float),\n plan=self.plan_,\n params=self.params_,\n is_classification=True\n )\n sf[\"outcome_target\"] = oi\n return sf\n\n score_frames = [si(oi) for oi in self.outcomes_]\n self.score_frame_ = pandas.concat(score_frames, axis=0)\n self.score_frame_.reset_index(inplace=True, drop=True)\n if (\"filter_to_recommended\" in self.params_.keys()) and self.params_[\"filter_to_recommended\"]:\n self.set_result_restriction(\n set([ci for ci in self.score_frame_[\"variable\"][self.score_frame_[\"recommended\"]]]))\n cross_frame = vtreat_impl.limit_to_appropriate_columns(\n res=cross_frame, transform=self\n )\n cross_frame, res_columns = vtreat_impl.back_to_orig_type_data_frame(cross_frame, orig_type)\n self.last_result_columns = res_columns\n if self.params_['retain_cross_plan']:\n self.cross_plan_ = cross_plan\n self.cross_rows_ = cross_rows\n else:\n self.cross_plan_ = None\n self.cross_rows_ = None\n return cross_frame\n\n\nclass UnsupervisedTreatment(vtreat_impl.VariableTreatment):\n \"\"\"manage an unsupervised treatment plan\"\"\"\n\n def __init__(self,\n *,\n var_list=None,\n cols_to_copy=None,\n params=None,\n imputation_map=None):\n \"\"\"\n\n :param var_list: list or touple of column names\n :param cols_to_copy: list or touple of column names\n :param params: vtreat.unsupervised_parameters()\n :param imputation_map: map of column names to custom missing imputation values or functions\n \"\"\"\n params = self.merge_params(params)\n vtreat_impl.VariableTreatment.__init__(\n self,\n var_list=var_list,\n outcome_name=None,\n cols_to_copy=cols_to_copy,\n params=params,\n imputation_map=imputation_map,\n )\n\n def merge_params(self, p):\n return unsupervised_parameters(p)\n\n # noinspection PyPep8Naming\n def transform(self, X):\n X, orig_type = vtreat_impl.ready_data_frame(X)\n self.check_column_names(X.columns)\n if self.last_fit_x_id_ is None:\n raise ValueError(\"called transform on not yet fit treatment\")\n X = vtreat_impl.pre_prep_frame(\n X, col_list=self.var_list_, cols_to_copy=self.cols_to_copy_\n )\n res = vtreat_impl.perform_transform(x=X, transform=self, params=self.params_)\n res = vtreat_impl.limit_to_appropriate_columns(res=res, transform=self)\n res, res_columns = vtreat_impl.back_to_orig_type_data_frame(res, orig_type)\n self.last_result_columns = res_columns\n return res\n\n # noinspection PyPep8Naming\n def fit_transform(self, X, y=None, **fit_params):\n X, orig_type = vtreat_impl.ready_data_frame(X)\n self.check_column_names(X.columns)\n if y is not None:\n raise ValueError(\"y should be None\")\n self.clear()\n self.last_fit_x_id_ = vtreat.util.hash_data_frame(X)\n X = vtreat_impl.pre_prep_frame(\n X, col_list=self.var_list_, cols_to_copy=self.cols_to_copy_\n )\n self.plan_ = vtreat_impl.fit_unsupervised_treatment(\n X=X,\n var_list=self.var_list_,\n outcome_name=self.outcome_name_,\n cols_to_copy=self.cols_to_copy_,\n params=self.params_,\n imputation_map=self.imputation_map_,\n )\n res = vtreat_impl.perform_transform(x=X, transform=self, params=self.params_)\n self.score_frame_ = vtreat_impl.pseudo_score_plan_variables(\n cross_frame=res, plan=self.plan_, params=self.params_\n )\n if (\"filter_to_recommended\" in self.params_.keys()) and self.params_[\"filter_to_recommended\"]:\n self.set_result_restriction(\n set([ci for ci in self.score_frame_[\"variable\"][self.score_frame_[\"recommended\"]]]))\n res = vtreat_impl.limit_to_appropriate_columns(res=res, transform=self)\n res, res_columns = vtreat_impl.back_to_orig_type_data_frame(res, orig_type)\n self.last_result_columns = res_columns\n return res\n"
] |
[
[
"numpy.max",
"numpy.asarray",
"numpy.min",
"numpy.mean",
"pandas.concat",
"numpy.all",
"numpy.unique"
]
] |
dmadeka/ray
|
[
"4f8e100fe0417da4fe1098defbfa478088502244"
] |
[
"python/ray/experimental/sgd/pytorch/utils.py"
] |
[
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import namedtuple\nfrom contextlib import closing\nimport numpy as np\nimport socket\nimport time\nimport torch\nimport torch.nn as nn\n\n\ndef train(train_iterator, model, criterion, optimizer):\n \"\"\"Runs 1 training epoch\"\"\"\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n\n timers = {k: TimerStat() for k in [\"d2h\", \"fwd\", \"grad\", \"apply\"]}\n\n # switch to train mode\n model.train()\n\n end = time.time()\n\n for i, (features, target) in enumerate(train_iterator):\n # measure data loading time\n data_time.update(time.time() - end)\n\n # Create non_blocking tensors for distributed training\n with timers[\"d2h\"]:\n if torch.cuda.is_available():\n features = features.cuda(non_blocking=True)\n target = target.cuda(non_blocking=True)\n\n # compute output\n with timers[\"fwd\"]:\n output = model(features)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n losses.update(loss.item(), features.size(0))\n\n with timers[\"grad\"]:\n # compute gradients in a backward pass\n optimizer.zero_grad()\n loss.backward()\n\n with timers[\"apply\"]:\n # Call step of optimizer to update model params\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n stats = {\n \"batch_time\": batch_time.avg,\n \"batch_processed\": losses.count,\n \"train_loss\": losses.avg,\n \"data_time\": data_time.avg,\n }\n stats.update({k: t.mean for k, t in timers.items()})\n return stats\n\n\ndef validate(val_loader, model, criterion):\n batch_time = AverageMeter()\n losses = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n\n with torch.no_grad():\n end = time.time()\n for i, (features, target) in enumerate(val_loader):\n\n if torch.cuda.is_available():\n features = features.cuda(non_blocking=True)\n target = target.cuda(non_blocking=True)\n\n # compute output\n output = model(features)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n losses.update(loss.item(), features.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n stats = {\"batch_time\": batch_time.avg, \"validation_loss\": losses.avg}\n return stats\n\n\nclass TimerStat(object):\n \"\"\"A running stat for conveniently logging the duration of a code block.\n\n Note that this class is *not* thread-safe.\n\n Examples:\n Time a call to 'time.sleep'.\n\n >>> import time\n >>> sleep_timer = TimerStat()\n >>> with sleep_timer:\n ... time.sleep(1)\n >>> round(sleep_timer.mean)\n 1\n \"\"\"\n\n def __init__(self, window_size=10):\n self._window_size = window_size\n self._samples = []\n self._units_processed = []\n self._start_time = None\n self._total_time = 0.0\n self.count = 0\n\n def __enter__(self):\n assert self._start_time is None, \"concurrent updates not supported\"\n self._start_time = time.time()\n\n def __exit__(self, type, value, tb):\n assert self._start_time is not None\n time_delta = time.time() - self._start_time\n self.push(time_delta)\n self._start_time = None\n\n def push(self, time_delta):\n self._samples.append(time_delta)\n if len(self._samples) > self._window_size:\n self._samples.pop(0)\n self.count += 1\n self._total_time += time_delta\n\n def push_units_processed(self, n):\n self._units_processed.append(n)\n if len(self._units_processed) > self._window_size:\n self._units_processed.pop(0)\n\n @property\n def mean(self):\n return np.mean(self._samples)\n\n @property\n def median(self):\n return np.median(self._samples)\n\n @property\n def sum(self):\n return np.sum(self._samples)\n\n @property\n def max(self):\n return np.max(self._samples)\n\n @property\n def first(self):\n return self._samples[0] if self._samples else None\n\n @property\n def last(self):\n return self._samples[-1] if self._samples else None\n\n @property\n def size(self):\n return len(self._samples)\n\n @property\n def mean_units_processed(self):\n return float(np.mean(self._units_processed))\n\n @property\n def mean_throughput(self):\n time_total = sum(self._samples)\n if not time_total:\n return 0.0\n return sum(self._units_processed) / time_total\n\n def reset(self):\n self._samples = []\n self._units_processed = []\n self._start_time = None\n self._total_time = 0.0\n self.count = 0\n\n\ndef find_free_port():\n with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:\n s.bind((\"\", 0))\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n return s.getsockname()[1]\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\nclass Resources(\n namedtuple(\"Resources\", [\"num_cpus\", \"num_gpus\", \"resources\"])):\n __slots__ = ()\n\n def __new__(cls, num_cpus=1, num_gpus=0, resources=None):\n if resources is None:\n resources = {}\n\n return super(Resources, cls).__new__(cls, num_cpus, num_gpus,\n resources)\n\n\ndef sgd_mse_optimizer(model, config):\n \"\"\"Returns the mean squared error criterion and SGD optimizer.\n\n Args:\n model (torch.nn.Module): the model to optimize.\n config (dict): configuration for the optimizer.\n lr (float): the learning rate. defaults to 0.01.\n \"\"\"\n learning_rate = config.get(\"lr\", 0.01)\n criterion = nn.MSELoss()\n optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)\n return criterion, optimizer\n"
] |
[
[
"numpy.max",
"torch.nn.MSELoss",
"numpy.median",
"numpy.sum",
"torch.no_grad",
"numpy.mean",
"torch.cuda.is_available"
]
] |
haydarai/dagster
|
[
"9b9c78e332f976f196d17a38c9840f53679d94cd"
] |
[
"python_modules/libraries/dagster-dbt/dagster_dbt/rpc/solids.py"
] |
[
"import json\nimport time\nfrom typing import Callable, Iterator, Optional\n\nimport pandas as pd\nfrom dagster_pandas import DataFrame\n\nfrom dagster import (\n Array,\n AssetMaterialization,\n Bool,\n DagsterInvalidDefinitionError,\n EventMetadataEntry,\n Failure,\n Field,\n InputDefinition,\n Int,\n Noneable,\n Nothing,\n Output,\n OutputDefinition,\n Permissive,\n RetryRequested,\n String,\n check,\n solid,\n)\nfrom dagster.core.execution.context.compute import SolidExecutionContext\n\nfrom ..errors import DagsterDbtRpcUnexpectedPollOutputError\nfrom .types import DbtRpcOutput\nfrom .utils import log_rpc, raise_for_rpc_error\n\n\ndef _generate_materializations(dro: DbtRpcOutput) -> Iterator[AssetMaterialization]:\n \"\"\"Yields ``AssetMaterializations`` for metadata in the dbt RPC ``DbtRpcOutput``.\"\"\"\n for node_result in dro.result.results:\n if node_result.node[\"resource_type\"] in [\"model\", \"snapshot\"]:\n success = not node_result.fail and not node_result.skip and not node_result.error\n if success:\n entries = [\n EventMetadataEntry.json(data=node_result.node, label=\"Node\"),\n EventMetadataEntry.text(text=str(node_result.status), label=\"Status\"),\n EventMetadataEntry.text(\n text=str(node_result.execution_time), label=\"Execution Time\"\n ),\n EventMetadataEntry.text(\n text=node_result.node[\"config\"][\"materialized\"],\n label=\"Materialization Strategy\",\n ),\n EventMetadataEntry.text(text=node_result.node[\"database\"], label=\"Database\"),\n EventMetadataEntry.text(text=node_result.node[\"schema\"], label=\"Schema\"),\n EventMetadataEntry.text(text=node_result.node[\"alias\"], label=\"Alias\"),\n EventMetadataEntry.text(\n text=node_result.node[\"description\"], label=\"Description\"\n ),\n ]\n for step_timing in node_result.step_timings:\n if step_timing.name == \"execute\":\n execution_entries = [\n EventMetadataEntry.text(\n text=step_timing.started_at.isoformat(timespec=\"seconds\"),\n label=\"Execution Started At\",\n ),\n EventMetadataEntry.text(\n text=step_timing.completed_at.isoformat(timespec=\"seconds\"),\n label=\"Execution Completed At\",\n ),\n EventMetadataEntry.text(\n text=str(step_timing.duration), label=\"Execution Duration\"\n ),\n ]\n entries.extend(execution_entries)\n if step_timing.name == \"compile\":\n execution_entries = [\n EventMetadataEntry.text(\n text=step_timing.started_at.isoformat(timespec=\"seconds\"),\n label=\"Compilation Started At\",\n ),\n EventMetadataEntry.text(\n text=step_timing.completed_at.isoformat(timespec=\"seconds\"),\n label=\"Compilation Completed At\",\n ),\n EventMetadataEntry.text(\n text=str(step_timing.duration), label=\"Compilation Duration\"\n ),\n ]\n entries.extend(execution_entries)\n\n yield AssetMaterialization(\n description=\"A materialized node within the dbt graph.\",\n metadata_entries=entries,\n asset_key=node_result.node[\"unique_id\"],\n )\n\n\ndef _poll_rpc(\n context: SolidExecutionContext, request_token: str, should_yield_materializations: bool = True\n) -> DbtRpcOutput:\n \"\"\"Polls the dbt RPC server for the status of a request until the state is ``success``.\"\"\"\n logs_start = 0\n while True:\n # Poll for the dbt RPC request.\n context.log.debug(f\"RequestToken: {request_token}\")\n resp = context.resources.dbt_rpc.poll(\n request_token=request_token, logs=context.solid_config[\"logs\"], logs_start=logs_start\n )\n raise_for_rpc_error(context, resp)\n\n # Pass dbt RPC logs into the Dagster/Dagit logger.\n if context.solid_config[\"logs\"]:\n logs = resp.json().get(\"result\").get(\"logs\")\n if len(logs) > 0:\n log_rpc(context, logs)\n logs_start += len(logs)\n\n # Stop polling if request's state is no longer \"running\".\n if resp.json().get(\"result\").get(\"state\") != \"running\":\n break\n\n # Sleep for the configured time intervale before polling again.\n context.log.debug(\n f\"Request {request_token} currently in state '{resp.json().get('result').get('state')}' (elapsed time {resp.json().get('result').get('elapsed', 0)} seconds). Sleeping for {context.solid_config.get('interval')}s..\"\n )\n time.sleep(context.solid_config[\"interval\"])\n\n if resp.json().get(\"result\").get(\"state\") != \"success\":\n raise Failure(\n description=f\"Request {request_token} finished with state '{resp.json().get('result').get('state')}' in {resp.json().get('result').get('elapsed')} seconds\",\n )\n\n context.log.info(\n f\"Request {request_token} finished with state '{resp.json().get('result').get('state')}' in {resp.json().get('result').get('elapsed')} seconds\"\n )\n context.log.debug(json.dumps(resp.json().get(\"result\"), indent=2))\n\n polled_run_results = DbtRpcOutput.from_dict(resp.json().get(\"result\"))\n\n if should_yield_materializations:\n for materialization in _generate_materializations(polled_run_results):\n yield materialization\n\n yield Output(polled_run_results)\n\n\ndef unwrap_result(poll_rpc_generator) -> DbtRpcOutput:\n \"\"\"A helper function that extracts the `DbtRpcOutput` value from a generator.\n\n The parameter `poll_rpc_generator` is expected to be an invocation of `_poll_rpc`.\n \"\"\"\n output = None\n for x in poll_rpc_generator:\n output = x\n\n if output is None:\n raise DagsterDbtRpcUnexpectedPollOutputError(\n description=\"poll_rpc yielded None as its last value. Expected value of type Output containing DbtRpcOutput.\",\n )\n\n if not isinstance(output, Output):\n raise DagsterDbtRpcUnexpectedPollOutputError(\n description=f\"poll_rpc yielded value of type {type(output)} as its last value. Expected value of type Output containing DbtRpcOutput.\",\n )\n\n if not isinstance(output.value, DbtRpcOutput):\n raise DagsterDbtRpcUnexpectedPollOutputError(\n description=f\"poll_rpc yielded Output containing {type(output.value)}. Expected DbtRpcOutput.\",\n )\n\n return output.value\n\n\n@solid(\n description=\"A solid to invoke dbt run over RPC.\",\n input_defs=[InputDefinition(name=\"start_after\", dagster_type=Nothing)],\n output_defs=[\n OutputDefinition(\n name=\"request_token\",\n dagster_type=String,\n description=\"The request token of the invoked dbt run.\",\n )\n ],\n config_schema={\n \"models\": Field(\n config=Noneable(Array(String)),\n default_value=None,\n is_required=False,\n description=\"The dbt models to run.\",\n ),\n \"exclude\": Field(\n config=Noneable(Array(String)),\n default_value=None,\n is_required=False,\n description=\"The dbt models to exclude.\",\n ),\n },\n required_resource_keys={\"dbt_rpc\"},\n tags={\"kind\": \"dbt\"},\n)\ndef dbt_rpc_run(context: SolidExecutionContext) -> String:\n \"\"\"This solid sends the ``dbt run`` command to a dbt RPC server and returns the request token.\n\n This dbt RPC solid is asynchronous. The request token can be used in subsequent RPC requests to\n poll the progress of the running dbt process.\n \"\"\"\n resp = context.resources.dbt_rpc.run(\n models=context.solid_config[\"models\"], exclude=context.solid_config[\"exclude\"]\n )\n context.log.debug(resp.text)\n raise_for_rpc_error(context, resp)\n return resp.json().get(\"result\").get(\"request_token\")\n\n\n@solid(\n description=\"A solid to invoke dbt run over RPC and poll the resulting RPC process until it's complete.\",\n input_defs=[InputDefinition(name=\"start_after\", dagster_type=Nothing)],\n output_defs=[OutputDefinition(name=\"result\", dagster_type=DbtRpcOutput)],\n config_schema={\n \"models\": Field(\n config=Noneable(Array(String)),\n default_value=None,\n is_required=False,\n description=\"The dbt models to run.\",\n ),\n \"exclude\": Field(\n config=Noneable(Array(String)),\n default_value=None,\n is_required=False,\n description=\"The dbt models to exclude.\",\n ),\n \"full_refresh\": Field(\n config=Bool,\n description=\"Whether or not to perform a --full-refresh.\",\n is_required=False,\n default_value=False,\n ),\n \"fail_fast\": Field(\n config=Bool,\n description=\"Whether or not to --fail-fast.\",\n is_required=False,\n default_value=False,\n ),\n \"warn_error\": Field(\n config=Bool,\n description=\"Whether or not to --warn-error.\",\n is_required=False,\n default_value=False,\n ),\n \"interval\": Field(\n config=Int,\n is_required=False,\n default_value=10,\n description=\"The interval (in seconds) at which to poll the dbt rpc process.\",\n ),\n \"logs\": Field(\n config=Bool,\n is_required=False,\n default_value=True,\n description=\"Whether or not to return logs from the process.\",\n ),\n \"task_tags\": Permissive(),\n \"max_retries\": Field(config=Int, is_required=False, default_value=5),\n \"retry_interval\": Field(config=Int, is_required=False, default_value=120),\n },\n required_resource_keys={\"dbt_rpc\"},\n tags={\"kind\": \"dbt\"},\n)\ndef dbt_rpc_run_and_wait(context: SolidExecutionContext) -> DbtRpcOutput:\n \"\"\"This solid sends the ``dbt run`` command to a dbt RPC server and returns the result of the\n executed dbt process.\n\n This dbt RPC solid is synchronous, and will periodically poll the dbt RPC server until the dbt\n process is completed.\n \"\"\"\n if context.solid_config[\"task_tags\"]:\n results = context.resources.dbt_rpc.ps().json()\n for task in results[\"result\"][\"rows\"]:\n if task[\"tags\"] == context.solid_config[\"task_tags\"]:\n context.log.warning(\n f\"RPC task with tags {json.dumps(task['tags'])} currently running.\"\n )\n raise RetryRequested(\n max_retries=context.solid_config[\"max_retries\"],\n seconds_to_wait=context.solid_config[\"retry_interval\"],\n )\n\n command = \"\"\n\n if context.solid_config[\"warn_error\"]:\n command += \" --warn-error\"\n\n command += \" run\"\n\n if context.solid_config[\"models\"]:\n models = \" \".join(set(context.solid_config[\"models\"]))\n command += f\" --models {models}\"\n\n if context.solid_config[\"exclude\"]:\n exclude = \" \".join(set(context.solid_config[\"exclude\"]))\n command += f\" --exclude {exclude}\"\n\n if context.solid_config[\"full_refresh\"]:\n command += \" --full-refresh\"\n\n if context.solid_config[\"fail_fast\"]:\n command += \" --fail-fast\"\n\n context.log.debug(f\"Running dbt command: dbt {command}\")\n resp = context.resources.dbt_rpc.cli(cli=command, **context.solid_config[\"task_tags\"])\n context.log.debug(resp.text)\n raise_for_rpc_error(context, resp)\n request_token = resp.json().get(\"result\").get(\"request_token\")\n return _poll_rpc(context, request_token)\n\n\n@solid(\n description=\"A solid to invoke dbt test over RPC.\",\n input_defs=[InputDefinition(name=\"start_after\", dagster_type=Nothing)],\n output_defs=[\n OutputDefinition(\n name=\"request_token\",\n dagster_type=String,\n description=\"The request token of the invoked dbt test.\",\n )\n ],\n config_schema={\n \"models\": Field(\n config=Noneable(Array(String)),\n default_value=None,\n is_required=False,\n description=\"The dbt models to test.\",\n ),\n \"exclude\": Field(\n config=Noneable(Array(String)),\n default_value=None,\n is_required=False,\n description=\"The dbt models to exclude.\",\n ),\n \"data\": Field(\n config=Bool,\n default_value=True,\n is_required=False,\n description=\"Whether or not to run custom data tests.\",\n ),\n \"schema\": Field(\n config=Bool,\n default_value=True,\n is_required=False,\n description=\"Whether or not to run schema tests.\",\n ),\n },\n required_resource_keys={\"dbt_rpc\"},\n tags={\"kind\": \"dbt\"},\n)\ndef dbt_rpc_test(context: SolidExecutionContext) -> String:\n \"\"\"This solid sends the ``dbt test`` command to a dbt RPC server and returns the request token.\n\n This dbt RPC solid is asynchronous. The request token can be used in subsequent RPC requests to\n poll the progress of the running dbt process.\n \"\"\"\n resp = context.resources.dbt_rpc.test(\n models=context.solid_config[\"models\"],\n exclude=context.solid_config[\"exclude\"],\n data=context.solid_config[\"data\"],\n schema=context.solid_config[\"schema\"],\n )\n context.log.debug(resp.text)\n raise_for_rpc_error(context, resp)\n return resp.json().get(\"result\").get(\"request_token\")\n\n\n@solid(\n description=\"A solid to invoke dbt test over RPC and poll the resulting RPC process until it's complete.\",\n input_defs=[InputDefinition(name=\"start_after\", dagster_type=Nothing)],\n output_defs=[OutputDefinition(name=\"result\", dagster_type=DbtRpcOutput)],\n config_schema={\n \"models\": Field(\n config=Noneable(Array(String)),\n default_value=None,\n is_required=False,\n description=\"The dbt models to test.\",\n ),\n \"exclude\": Field(\n config=Noneable(Array(String)),\n default_value=None,\n is_required=False,\n description=\"The dbt models to exclude.\",\n ),\n \"data\": Field(\n config=Bool,\n default_value=True,\n is_required=False,\n description=\"Whether or not to run custom data tests.\",\n ),\n \"schema\": Field(\n config=Bool,\n default_value=True,\n is_required=False,\n description=\"Whether or not to run schema tests.\",\n ),\n \"interval\": Field(\n config=Int,\n is_required=False,\n default_value=10,\n description=\"The interval (in seconds) at which to poll the dbt rpc process.\",\n ),\n \"logs\": Field(\n config=Bool,\n is_required=False,\n default_value=True,\n description=\"Whether or not to return logs from the process.\",\n ),\n },\n required_resource_keys={\"dbt_rpc\"},\n tags={\"kind\": \"dbt\"},\n)\ndef dbt_rpc_test_and_wait(context: SolidExecutionContext) -> DbtRpcOutput:\n \"\"\"This solid sends the ``dbt test`` command to a dbt RPC server and returns the result of the\n executed dbt process.\n\n This dbt RPC solid is synchronous, and will periodically poll the dbt RPC server until the dbt\n process is completed.\n \"\"\"\n resp = context.resources.dbt_rpc.test(\n models=context.solid_config[\"models\"],\n exclude=context.solid_config[\"exclude\"],\n data=context.solid_config[\"data\"],\n schema=context.solid_config[\"schema\"],\n )\n context.log.debug(resp.text)\n raise_for_rpc_error(context, resp)\n request_token = resp.json().get(\"result\").get(\"request_token\")\n return _poll_rpc(context, request_token)\n\n\n@solid(\n description=\"A solid to invoke a dbt run operation over RPC.\",\n input_defs=[InputDefinition(name=\"start_after\", dagster_type=Nothing)],\n output_defs=[\n OutputDefinition(\n name=\"request_token\",\n dagster_type=String,\n description=\"The request token of the invoked dbt run operation.\",\n )\n ],\n config_schema={\n \"macro\": Field(\n config=String,\n is_required=True,\n description=\"The dbt macro to invoke as a run operation\",\n ),\n \"args\": Field(\n config=Noneable(Permissive()),\n is_required=False,\n default_value=None,\n description=\"Arguments to supply to the invoked macro.\",\n ),\n },\n required_resource_keys={\"dbt_rpc\"},\n tags={\"kind\": \"dbt\"},\n)\ndef dbt_rpc_run_operation(context: SolidExecutionContext) -> String:\n \"\"\"This solid sends the ``dbt run-operation`` command to a dbt RPC server and returns the\n request token.\n\n This dbt RPC solid is asynchronous. The request token can be used in subsequent RPC requests to\n poll the progress of the running dbt process.\n \"\"\"\n resp = context.resources.dbt_rpc.run_operation(\n macro=context.solid_config[\"macro\"], args=context.solid_config[\"args\"]\n )\n context.log.debug(resp.text)\n raise_for_rpc_error(context, resp)\n return resp.json().get(\"result\").get(\"request_token\")\n\n\n@solid(\n description=\"A solid to invoke a dbt run operation over RPC and poll the resulting RPC process until it's complete.\",\n input_defs=[InputDefinition(name=\"start_after\", dagster_type=Nothing)],\n output_defs=[OutputDefinition(name=\"result\", dagster_type=DbtRpcOutput)],\n config_schema={\n \"macro\": Field(\n config=String,\n is_required=True,\n description=\"The dbt macro to invoke as a run operation\",\n ),\n \"args\": Field(\n config=Noneable(Permissive()),\n is_required=False,\n default_value=None,\n description=\"Arguments to supply to the invoked macro.\",\n ),\n \"interval\": Field(\n config=Int,\n is_required=False,\n default_value=10,\n description=\"The interval (in seconds) at which to poll the dbt rpc process.\",\n ),\n \"logs\": Field(\n config=Bool,\n is_required=False,\n default_value=True,\n description=\"Whether or not to return logs from the process.\",\n ),\n },\n required_resource_keys={\"dbt_rpc\"},\n tags={\"kind\": \"dbt\"},\n)\ndef dbt_rpc_run_operation_and_wait(context: SolidExecutionContext) -> DbtRpcOutput:\n \"\"\"This solid sends the ``dbt run-operation`` command to a dbt RPC server and returns the result of the\n executed dbt process.\n\n This dbt RPC solid is synchronous, and will periodically poll the dbt RPC server until the dbt\n process is completed.\n \"\"\"\n resp = context.resources.dbt_rpc.run_operation(\n macro=context.solid_config[\"macro\"], args=context.solid_config[\"args\"]\n )\n context.log.debug(resp.text)\n raise_for_rpc_error(context, resp)\n request_token = resp.json().get(\"result\").get(\"request_token\")\n return _poll_rpc(context, request_token)\n\n\n@solid(\n description=\"A solid to invoke a dbt snapshot over RPC.\",\n input_defs=[InputDefinition(name=\"start_after\", dagster_type=Nothing)],\n output_defs=[\n OutputDefinition(\n name=\"request_token\",\n dagster_type=String,\n description=\"The request token of the invoked dbt snapshot.\",\n )\n ],\n config_schema={\n \"select\": Field(\n config=Noneable(Array(String)),\n default_value=None,\n is_required=False,\n description=\"The dbt snapshot files to snapshot.\",\n ),\n \"exclude\": Field(\n config=Noneable(Array(String)),\n default_value=None,\n is_required=False,\n description=\"The dbt snapshot files to exclude from the snapshot.\",\n ),\n },\n required_resource_keys={\"dbt_rpc\"},\n tags={\"kind\": \"dbt\"},\n)\ndef dbt_rpc_snapshot(context: SolidExecutionContext) -> String:\n \"\"\"This solid sends the ``dbt snapshot`` command to a dbt RPC server and returns the\n request token.\n\n This dbt RPC solid is asynchronous. The request token can be used in subsequent RPC requests to\n poll the progress of the running dbt process.\n \"\"\"\n resp = context.resources.dbt_rpc.snapshot(\n select=context.solid_config[\"select\"], exclude=context.solid_config[\"exclude\"]\n )\n context.log.debug(resp.text)\n raise_for_rpc_error(context, resp)\n return resp.json().get(\"result\").get(\"request_token\")\n\n\n@solid(\n description=\"A solid to invoke a dbt snapshot over RPC and poll the resulting RPC process until it's complete.\",\n input_defs=[InputDefinition(name=\"start_after\", dagster_type=Nothing)],\n output_defs=[OutputDefinition(name=\"result\", dagster_type=DbtRpcOutput)],\n config_schema={\n \"select\": Field(\n config=Noneable(Array(String)),\n default_value=None,\n is_required=False,\n description=\"The dbt snapshot files to snapshot.\",\n ),\n \"exclude\": Field(\n config=Noneable(Array(String)),\n default_value=None,\n is_required=False,\n description=\"The dbt snapshot files to exclude from the snapshot.\",\n ),\n \"interval\": Field(\n config=Int,\n is_required=False,\n default_value=10,\n description=\"The interval (in seconds) at which to poll the dbt rpc process.\",\n ),\n \"logs\": Field(\n config=Bool,\n is_required=False,\n default_value=True,\n description=\"Whether or not to return logs from the process.\",\n ),\n \"task_tags\": Permissive(),\n \"max_retries\": Field(config=Int, is_required=False, default_value=5),\n \"retry_interval\": Field(config=Int, is_required=False, default_value=120),\n },\n required_resource_keys={\"dbt_rpc\"},\n tags={\"kind\": \"dbt\"},\n)\ndef dbt_rpc_snapshot_and_wait(context: SolidExecutionContext) -> DbtRpcOutput:\n \"\"\"This solid sends the ``dbt snapshot`` command to a dbt RPC server and returns the result of\n the executed dbt process.\n\n This dbt RPC solid is synchronous, and will periodically poll the dbt RPC server until the dbt\n process is completed.\n \"\"\"\n if context.solid_config[\"task_tags\"]:\n results = context.resources.dbt_rpc.ps().json()\n for task in results[\"result\"][\"rows\"]:\n if task[\"tags\"] == context.solid_config[\"task_tags\"]:\n context.log.warning(\n f\"RPC task with tags {json.dumps(task['tags'])} currently running.\"\n )\n raise RetryRequested(\n max_retries=context.solid_config[\"max_retries\"],\n seconds_to_wait=context.solid_config[\"retry_interval\"],\n )\n\n resp = context.resources.dbt_rpc.snapshot(\n select=context.solid_config[\"select\"], exclude=context.solid_config[\"exclude\"]\n )\n context.log.debug(resp.text)\n raise_for_rpc_error(context, resp)\n request_token = resp.json().get(\"result\").get(\"request_token\")\n return _poll_rpc(context, request_token)\n\n\n@solid(\n description=\"A solid to invoke dbt source snapshot-freshness over RPC.\",\n input_defs=[InputDefinition(name=\"start_after\", dagster_type=Nothing)],\n output_defs=[\n OutputDefinition(\n name=\"request_token\",\n dagster_type=String,\n description=\"The request token of the invoked dbt snapshot.\",\n )\n ],\n config_schema={\n \"select\": Field(\n config=Noneable(Array(String)),\n default_value=None,\n is_required=False,\n description=\"The dbt sources to snapshot-freshness for.\",\n ),\n \"warn_error\": Field(\n config=Bool,\n description=\"Whether or not to --warn-error.\",\n is_required=False,\n default_value=False,\n ),\n },\n required_resource_keys={\"dbt_rpc\"},\n tags={\"kind\": \"dbt\"},\n)\ndef dbt_rpc_snapshot_freshness(context: SolidExecutionContext) -> String:\n \"\"\"This solid sends the ``dbt source snapshot-freshness`` command to a dbt RPC server and\n returns the request token.\n\n This dbt RPC solid is asynchronous. The request token can be used in subsequent RPC requests to\n poll the progress of the running dbt process.\n \"\"\"\n command = \"\"\n\n if context.solid_config[\"warn_error\"]:\n command += \" --warn-error\"\n\n command += \" source snapshot-freshness\"\n\n if context.solid_config[\"select\"]:\n select = \" \".join(set(context.solid_config[\"select\"]))\n command += f\" --select {select}\"\n\n context.log.debug(f\"Running dbt command: dbt {command}\")\n resp = context.resources.dbt_rpc.cli(cli=command)\n context.log.debug(resp.text)\n raise_for_rpc_error(context, resp)\n return resp.json().get(\"result\").get(\"request_token\")\n\n\n@solid(\n description=\"A solid to invoke dbt source snapshot-freshness over RPC and poll the resulting RPC process until it's complete.\",\n input_defs=[InputDefinition(name=\"start_after\", dagster_type=Nothing)],\n output_defs=[OutputDefinition(name=\"result\", dagster_type=DbtRpcOutput)],\n config_schema={\n \"select\": Field(\n config=Noneable(Array(String)),\n default_value=None,\n is_required=False,\n description=\"The dbt sources to snapshot-freshness for.\",\n ),\n \"warn_error\": Field(\n config=Bool,\n description=\"Whether or not to --warn-error.\",\n is_required=False,\n default_value=False,\n ),\n \"interval\": Field(\n config=Int,\n is_required=False,\n default_value=10,\n description=\"The interval (in seconds) at which to poll the dbt rpc process.\",\n ),\n \"logs\": Field(\n config=Bool,\n is_required=False,\n default_value=True,\n description=\"Whether or not to return logs from the process.\",\n ),\n },\n required_resource_keys={\"dbt_rpc\"},\n tags={\"kind\": \"dbt\"},\n)\ndef dbt_rpc_snapshot_freshness_and_wait(context: SolidExecutionContext) -> DbtRpcOutput:\n \"\"\"This solid sends the ``dbt source snapshot`` command to a dbt RPC server and returns the\n result of the executed dbt process.\n\n This dbt RPC solid is synchronous, and will periodically poll the dbt RPC server until the dbt\n process is completed.\n \"\"\"\n command = \"\"\n\n if context.solid_config[\"warn_error\"]:\n command += \" --warn-error\"\n\n command += \" source snapshot-freshness\"\n\n if context.solid_config[\"select\"]:\n select = \" \".join(set(context.solid_config[\"select\"]))\n command += f\" --select {select}\"\n\n context.log.debug(f\"Running dbt command: dbt {command}\")\n resp = context.resources.dbt_rpc.cli(cli=command)\n context.log.debug(resp.text)\n raise_for_rpc_error(context, resp)\n request_token = resp.json().get(\"result\").get(\"request_token\")\n return _poll_rpc(context, request_token)\n\n\n@solid(\n description=\"A solid to compile a SQL query in context of a dbt project over RPC.\",\n input_defs=[\n InputDefinition(name=\"start_after\", dagster_type=Nothing),\n InputDefinition(\n name=\"sql\", description=\"The SQL query to be compiled.\", dagster_type=String\n ),\n ],\n output_defs=[\n OutputDefinition(name=\"sql\", description=\"The compiled SQL query.\", dagster_type=String)\n ],\n config_schema={\n \"name\": Field(config=String),\n \"interval\": Field(\n config=Int,\n is_required=False,\n default_value=10,\n description=\"The interval (in seconds) at which to poll the dbt rpc process.\",\n ),\n \"logs\": Field(\n config=Bool,\n is_required=False,\n default_value=True,\n description=\"Whether or not to return logs from the process.\",\n ),\n },\n required_resource_keys={\"dbt_rpc\"},\n tags={\"kind\": \"dbt\"},\n)\ndef dbt_rpc_compile_sql(context: SolidExecutionContext, sql: String) -> String:\n \"\"\"This solid sends the ``dbt compile`` command to a dbt RPC server and returns the request\n token.\n\n This dbt RPC solid is asynchronous. The request token can be used in subsequent RPC requests to\n poll the progress of the running dbt process.\n \"\"\"\n resp = context.resources.dbt_rpc.compile_sql(sql=sql, name=context.solid_config[\"name\"])\n context.log.debug(resp.text)\n raise_for_rpc_error(context, resp)\n request_token = resp.json().get(\"result\").get(\"request_token\")\n result = unwrap_result(_poll_rpc(context, request_token))\n return result.results[0].node[\"compiled_sql\"]\n\n\ndef create_dbt_rpc_run_sql_solid(\n name: str, output_def: Optional[OutputDefinition] = None, **kwargs\n) -> Callable:\n \"\"\"This function is a factory which constructs a solid that will copy the results of a SQL query\n run within the context of a dbt project to a pandas ``DataFrame``.\n\n Any kwargs passed to this function will be passed along to the underlying :func:`@solid\n <dagster.solid>` decorator. However, note that overriding ``config_schema``, ``input_defs``, and\n ``required_resource_keys`` is not allowed and will throw a :class:`DagsterInvalidDefinitionError\n <dagster.DagsterInvalidDefinitionError>`.\n\n If you would like to configure this solid with different config fields, you could consider using\n :func:`@composite_solid <dagster.composite_solid>` to wrap this solid.\n\n Args:\n name (str): The name of this solid.\n output_def (OutputDefinition, optional): The :class:`OutputDefinition\n <dagster.OutputDefinition>` for the solid. This value should always be a representation\n of a pandas ``DataFrame``. If not specified, the solid will default to an\n :class:`OutputDefinition <dagster.OutputDefinition>` named \"df\" with a ``DataFrame``\n dagster type.\n\n Returns:\n SolidDefinition: Returns the constructed solid definition.\n \"\"\"\n check.str_param(obj=name, param_name=\"name\")\n check.opt_inst_param(obj=output_def, param_name=\"output_def\", ttype=OutputDefinition)\n\n if \"config_schema\" in kwargs:\n raise DagsterInvalidDefinitionError(\"Overriding config_schema is not supported.\")\n\n if \"input_defs\" in kwargs:\n raise DagsterInvalidDefinitionError(\"Overriding input_defs is not supported.\")\n\n if \"required_resource_keys\" in kwargs:\n raise DagsterInvalidDefinitionError(\"Overriding required_resource_keys is not supported.\")\n\n @solid(\n name=name,\n description=kwargs.pop(\n \"description\",\n \"A solid to run a SQL query in context of a dbt project over RPC and return the results in a pandas DataFrame.\",\n ),\n input_defs=[\n InputDefinition(name=\"start_after\", dagster_type=Nothing),\n InputDefinition(\n name=\"sql\", description=\"The SQL query to be run.\", dagster_type=String\n ),\n ],\n output_defs=[\n output_def\n or OutputDefinition(\n name=\"df\", description=\"The results of the SQL query.\", dagster_type=DataFrame\n )\n ],\n config_schema={\n \"name\": Field(config=String),\n \"interval\": Field(\n config=Int,\n is_required=False,\n default_value=10,\n description=\"The interval (in seconds) at which to poll the dbt rpc process.\",\n ),\n \"logs\": Field(\n config=Bool,\n is_required=False,\n default_value=True,\n description=\"Whether or not to return logs from the process.\",\n ),\n },\n required_resource_keys={\"dbt_rpc\"},\n tags={\"kind\": \"dbt\"},\n **kwargs,\n )\n def _dbt_rpc_run_sql(context: SolidExecutionContext, sql: String) -> DataFrame:\n resp = context.resources.dbt_rpc.run_sql(sql=sql, name=context.solid_config[\"name\"])\n context.log.debug(resp.text)\n raise_for_rpc_error(context, resp)\n request_token = resp.json().get(\"result\").get(\"request_token\")\n result = unwrap_result(_poll_rpc(context, request_token))\n table = result.results[0].table\n return pd.DataFrame.from_records(data=table[\"rows\"], columns=table[\"column_names\"])\n\n return _dbt_rpc_run_sql\n"
] |
[
[
"pandas.DataFrame.from_records"
]
] |
wangrui1996/simple_pose_tensorflow
|
[
"6b97bf1cff7836eec638fe54e86e1ec203c0b79f"
] |
[
"utils/create_cpm_id_fulljoints.py"
] |
[
"import cv2\nimport cpm_utils\nimport numpy as np\nimport math\nimport tensorflow as tf\nimport time\nimport json\nimport random\nimport os\n\n\ntfr_file = 'cpm_sample_dataset.tfrecords'\ndataset_dir = '/Users/wangrui/Downloads/id_dataset/data'\n\nSHOW_INFO = False\nbox_size = 32\ninput_size = 256\nnum_of_joints = 6\ngaussian_radius = 2\n\n\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n\ndef _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n\ndef _float64_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))\n\n\n# Create writer\ntfr_writer = tf.python_io.TFRecordWriter(tfr_file)\n\nimg_count = 0\nt1 = time.time()\nimages_dir = os.path.join(dataset_dir, \"images\")\nannotations_dir = os.path.join(dataset_dir, \"annotations\")\n# Loop each dir\nfor file_name in os.listdir(images_dir):\n\n image_path = os.path.join(images_dir, file_name)\n annotation_path = os.path.join(annotations_dir, \"{}.json\".format(file_name.split(\".\")[0]))\n\n #cur_img_path = dataset_dir + person_dir + '/imgs/' + line[0]\n cur_img = cv2.imread(image_path)\n print(image_path)\n inp_f = open(annotation_path, 'r')\n json_data = json.load(inp_f)\n #json_data[\"shapes\"] = \"\"\n\n def get_bbox_and_joints_from_json(shapes):\n assert len(shapes) == 2 # must be len is 2, one is bbox and annother is text\n assert shapes[0][\"label\"] in [\"zhen\",\"fan\",\"zheng\",\"text\"]\n assert shapes[1][\"label\"] in [\"zhen\",\"fan\",\"zheng\",\"text\"]\n bbox_idx = 0\n if shapes[bbox_idx][\"label\"]==\"text\":\n bbox_idx = 1\n\n bbox_point = shapes[bbox_idx][\"points\"]\n bx_x1, bx_y1 = bbox_point[0]\n bx_x2, bx_y2 = bbox_point[2]\n cur_id_bbox = [min([bx_x1, bx_x2]),\n min([bx_y1, bx_y2]),\n max([bx_x1, bx_x2]),\n max([bx_y1, bx_y2])]\n #if cur_hand_bbox[0] < 0: cur_hand_bbox[0] = 0\n #if cur_hand_bbox[1] < 0: cur_hand_bbox[1] = 0\n #if cur_hand_bbox[2] > cur_img.shape[1]: cur_hand_bbox[2] = cur_img.shape[1]\n #if cur_hand_bbox[3] > cur_img.shape[0]: cur_hand_bbox[3] = cur_img.shape[0]\n text_bx = shapes[1-bbox_idx][\"points\"]\n\n tmpx1,tmpy1 = text_bx[0]\n tmpx2,tmpy2 = text_bx[1]\n tmpx3,tmpy3 = text_bx[2]\n text_arr = np.array(text_bx).transpose()\n x_list = text_arr[0]\n y_list = text_arr[1]\n axis_1 = np.where(y_list==y_list.min())[0]\n axis_3 = np.where(x_list==x_list.max())[0]\n axis_2 = 3 - axis_1 - axis_3\n cur_id_joints_x = [-1 for _ in range(6)]\n cur_id_joints_y = [-1 for _ in range(6)]\n sub_add = 0\n is_zhen = True\n if shapes[bbox_idx][\"label\"] == \"fan\":\n is_zhen = False\n sub_add = 3\n\n cur_id_joints_x[sub_add] = x_list[axis_1][0]\n cur_id_joints_y[sub_add] = y_list[axis_1][0]\n cur_id_joints_x[sub_add+1] = x_list[axis_2][0]\n cur_id_joints_y[sub_add+1] = y_list[axis_2][0]\n cur_id_joints_x[sub_add+2] = x_list[axis_3][0]\n cur_id_joints_y[sub_add+2] = y_list[axis_3][0]\n return is_zhen, cur_id_bbox, cur_id_joints_x, cur_id_joints_y\n # Read in bbox and joints coords\n is_zhen, cur_id_bbox, cur_id_joints_x, cur_id_joints_y = get_bbox_and_joints_from_json(json_data[\"shapes\"])\n print(cur_id_bbox)\n if is_zhen:\n gauss_range_list = [0, 1, 2]\n else:\n gauss_range_list = [3, 4, 5]\n #exit(0)\n\n #cur_hand_joints_x = [float(i) for i in line[9:49:2]]\n #cur_hand_joints_x.append(float(line[7]))\n #cur_hand_joints_y = [float(i) for i in line[10:49:2]]\n #cur_hand_joints_y.append(float(line[8]))\n\n # Crop image and adjust joint coords\n cur_img = cur_img[int(float(cur_id_bbox[1])):int(float(cur_id_bbox[3])),\n int(float(cur_id_bbox[0])):int(float(cur_id_bbox[2])),\n :]\n\n #cv2.imshow(\"demo\", cur_img)\n cv2.imwrite(\"demo.jpg\", cur_img)\n #cv2.waitKey(0)\n #exit(0)\n cur_id_joints_x = [x - cur_id_bbox[0] for x in cur_id_joints_x]\n cur_id_joints_y = [x - cur_id_bbox[1] for x in cur_id_joints_y]\n\n # # Display joints\n # for i in range(len(cur_hand_joints_x)):\n # cv2.circle(cur_img, center=(int(cur_hand_joints_x[i]), int(cur_hand_joints_y[i])),radius=3, color=(255,0,0), thickness=-1)\n # cv2.imshow('', cur_img)\n # cv2.waitKey(500)\n # cv2.imshow('', cur_img)\n # cv2.waitKey(1)\n\n output_image = np.ones(shape=(input_size, input_size, 3)) * 128\n output_heatmaps = np.zeros((box_size, box_size, num_of_joints))\n\n # Resize and pad image to fit output image size\n if cur_img.shape[0] > cur_img.shape[1]:\n scale = input_size / (cur_img.shape[0] * 1.0)\n\n # Relocalize points\n cur_id_joints_x = map(lambda x: x * scale, cur_id_joints_x)\n cur_id_joints_y = map(lambda x: x * scale, cur_id_joints_y)\n\n # Resize image\n image = cv2.resize(cur_img, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_LANCZOS4)\n offset = image.shape[1] % 2\n\n output_image[:, int(input_size / 2 - math.floor(image.shape[1] / 2)): int(\n input_size / 2 + math.floor(image.shape[1] / 2) + offset), :] = image\n cur_id_joints_x = map(lambda x: x + (input_size / 2 - math.floor(image.shape[1] / 2)),\n cur_id_joints_x)\n scale = box_size / (cur_img.shape[0] * 1.0)\n # Relocalize points\n cur_id_joints_x = map(lambda x: x * scale, cur_id_joints_x)\n cur_id_joints_y = map(lambda x: x * scale, cur_id_joints_y)\n cur_id_joints_x = np.asarray(list(cur_id_joints_x))\n cur_id_joints_y = np.asarray(list(cur_id_joints_y))\n\n if SHOW_INFO:\n hmap = np.zeros((box_size, box_size))\n # Plot joints\n for i in range(num_of_joints):\n cv2.circle(output_image, (int(cur_id_joints_x[i]), int(cur_id_joints_y[i])), 3, (0, 255, 0), 2)\n\n # Generate joint gaussian map\n\n part_heatmap= cpm_utils.gaussian_img(box_size,box_size,cur_id_joints_x[i],cur_id_joints_y[i],1)\n #part_heatmap = utils.make_gaussian(output_image.shape[0], gaussian_radius,\n # [cur_hand_joints_x[i], cur_hand_joints_y[i]])\n hmap += part_heatmap * 50\n else:\n for i in range(num_of_joints):\n #output_heatmaps[:, :, i] = utils.make_gaussian(box_size, gaussian_radius,\n # [cur_hand_joints_x[i], cur_hand_joints_y[i]])\n if i in gauss_range_list:\n output_heatmaps[:, :, i]= cpm_utils.gaussian_img(box_size,box_size,cur_id_joints_x[i],cur_id_joints_y[i],1)\n\n else:\n scale = input_size / (cur_img.shape[1] * 1.0)\n\n # Relocalize points\n cur_id_joints_x = map(lambda x: x * scale, cur_id_joints_x)\n cur_id_joints_y = map(lambda x: x * scale, cur_id_joints_y)\n\n # Resize image\n image = cv2.resize(cur_img, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_LANCZOS4)\n offset = image.shape[0] % 2\n output_image[int(input_size / 2 - math.floor(image.shape[0] / 2)): int(\n input_size / 2 + math.floor(image.shape[0] / 2) + offset), :, :] = image\n cur_id_joints_y = map(lambda x: x + (input_size / 2 - math.floor(image.shape[0] / 2)),\n cur_id_joints_y)\n scale = box_size / (input_size * 1.0)\n # Relocalize points\n cur_id_joints_x = map(lambda x: x * scale, cur_id_joints_x)\n cur_id_joints_y = map(lambda x: x * scale, cur_id_joints_y)\n cur_id_joints_x = np.asarray(list(cur_id_joints_x))\n cur_id_joints_y = np.asarray(list(cur_id_joints_y))\n\n if SHOW_INFO:\n hmap = np.zeros((box_size, box_size))\n # Plot joints\n for i in range(num_of_joints):\n cv2.circle(output_image, (int(cur_id_joints_x[i]), int(cur_id_joints_y[i])), 3, (0, 255, 0), 2)\n\n # Generate joint gaussian map\n #part_heatmap = cpm_utils.make_gaussian(output_image.shape[0], gaussian_radius,\n # [cur_id_joints_x[i], cur_id_joints_y[i]])\n #hmap += part_heatmap * 50\n cv2.imshow(\"demo\", output_image)\n cv2.waitKey(0)\n else:\n for i in range(num_of_joints):\n if i in gauss_range_list:\n output_heatmaps[:, :, i] = cpm_utils.make_gaussian(box_size, gaussian_radius,\n [cur_id_joints_x[i], cur_id_joints_y[i]])\n if SHOW_INFO:\n cv2.imshow('', hmap.astype(np.uint8))\n cv2.imshow('i', output_image.astype(np.uint8))\n cv2.waitKey(0)\n\n # Create background map\n output_background_map = np.ones((box_size, box_size)) - np.amax(output_heatmaps, axis=2)\n output_heatmaps = np.concatenate((output_heatmaps, output_background_map.reshape((box_size, box_size, 1))),\n axis=2)\n # cv2.imshow('', (output_background_map*255).astype(np.uint8))\n # cv2.imshow('h', (np.amax(output_heatmaps[:, :, 0:21], axis=2)*255).astype(np.uint8))\n # cv2.waitKey(1000)\n\n\n coords_set = np.concatenate((np.reshape(cur_id_joints_x, (num_of_joints, 1)),\n np.reshape(cur_id_joints_y, (num_of_joints, 1))),\n axis=1)\n output_image_raw = output_image.astype(np.uint8).tostring()\n output_heatmaps_raw = output_heatmaps.flatten().tolist()\n output_coords_raw = coords_set.flatten().tolist()\n\n raw_sample = tf.train.Example(features=tf.train.Features(feature={\n 'image': _bytes_feature(output_image_raw),\n 'heatmaps': _float64_feature(output_heatmaps_raw)\n }))\n\n tfr_writer.write(raw_sample.SerializeToString())\n\n img_count += 1\n if img_count % 50 == 0:\n print('Processed %d images, took %f seconds' % (img_count, time.time() - t1))\n t1 = time.time()\n\ntfr_writer.close()\n"
] |
[
[
"tensorflow.train.BytesList",
"tensorflow.train.FloatList",
"tensorflow.train.Int64List",
"numpy.reshape",
"numpy.zeros",
"numpy.array",
"numpy.ones",
"tensorflow.python_io.TFRecordWriter",
"numpy.amax"
]
] |
lexical-kenobi/Face-Vision-3D_Pose
|
[
"07eee33d09018c99251051a983d3842212177e5a",
"07eee33d09018c99251051a983d3842212177e5a"
] |
[
"utils/paf.py",
"utils/inference.py"
] |
[
"#!/usr/bin/env python3\n# coding: utf-8\n\nimport numpy as np\nfrom .ddfa import _parse_param\nfrom .params import u_filter, w_filter, w_exp_filter, std_size, param_mean, param_std\n\n\ndef reconstruct_paf_anchor(param, whitening=True):\n if whitening:\n param = param * param_std + param_mean\n p, offset, alpha_shp, alpha_exp = _parse_param(param)\n anchor = p @ (u_filter + w_filter @ alpha_shp + w_exp_filter @ alpha_exp).reshape(3, -1, order='F') + offset\n anchor[1, :] = std_size + 1 - anchor[1, :]\n return anchor[:2, :]\n\n\ndef gen_offsets(kernel_size):\n offsets = np.zeros((2, kernel_size * kernel_size), dtype=np.int)\n ind = 0\n delta = (kernel_size - 1) // 2\n for i in range(kernel_size):\n y = i - delta\n for j in range(kernel_size):\n x = j - delta\n offsets[0, ind] = x\n offsets[1, ind] = y\n ind += 1\n return offsets\n\n\ndef gen_img_paf(img_crop, param, kernel_size=3):\n \"\"\"Generate PAF image\n img_crop: 120x120\n kernel_size: kernel_size for convolution, should be even number like 3 or 5 or ...\n \"\"\"\n anchor = reconstruct_paf_anchor(param)\n anchor = np.round(anchor).astype(np.int)\n delta = (kernel_size - 1) // 2\n anchor[anchor < delta] = delta\n anchor[anchor >= std_size - delta - 1] = std_size - delta - 1\n\n img_paf = np.zeros((64 * kernel_size, 64 * kernel_size, 3), dtype=np.uint8)\n offsets = gen_offsets(kernel_size)\n for i in range(kernel_size * kernel_size):\n ox, oy = offsets[:, i]\n index0 = anchor[0] + ox\n index1 = anchor[1] + oy\n p = img_crop[index1, index0].reshape(64, 64, 3).transpose(1, 0, 2)\n\n img_paf[oy + delta::kernel_size, ox + delta::kernel_size] = p\n\n return img_paf\n\n\ndef main():\n pass\n\n\nif __name__ == '__main__':\n main()\n",
"#!/usr/bin/env python3\n# coding: utf-8\n__author__ = 'cleardusk'\n\nimport numpy as np\nfrom math import sqrt\nimport scipy.io as sio\nimport matplotlib.pyplot as plt\nfrom .ddfa import reconstruct_vertex\n\n\ndef get_suffix(filename):\n \"\"\"a.jpg -> jpg\"\"\"\n pos = filename.rfind('.')\n if pos == -1:\n return ''\n return filename[pos:]\n\n\ndef crop_img(img, roi_box):\n h, w = img.shape[:2]\n\n sx, sy, ex, ey = [int(round(_)) for _ in roi_box]\n dh, dw = ey - sy, ex - sx\n if len(img.shape) == 3:\n res = np.zeros((dh, dw, 3), dtype=np.uint8)\n else:\n res = np.zeros((dh, dw), dtype=np.uint8)\n if sx < 0:\n sx, dsx = 0, -sx\n else:\n dsx = 0\n\n if ex > w:\n ex, dex = w, dw - (ex - w)\n else:\n dex = dw\n\n if sy < 0:\n sy, dsy = 0, -sy\n else:\n dsy = 0\n\n if ey > h:\n ey, dey = h, dh - (ey - h)\n else:\n dey = dh\n\n res[dsy:dey, dsx:dex] = img[sy:ey, sx:ex]\n return res\n\n\ndef calc_hypotenuse(pts):\n bbox = [min(pts[0, :]), min(pts[1, :]), max(pts[0, :]), max(pts[1, :])]\n center = [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2]\n radius = max(bbox[2] - bbox[0], bbox[3] - bbox[1]) / 2\n bbox = [center[0] - radius, center[1] - radius, center[0] + radius, center[1] + radius]\n llength = sqrt((bbox[2] - bbox[0]) ** 2 + (bbox[3] - bbox[1]) ** 2)\n return llength / 3\n\n\ndef parse_roi_box_from_landmark(pts):\n \"\"\"calc roi box from landmark\"\"\"\n bbox = [min(pts[0, :]), min(pts[1, :]), max(pts[0, :]), max(pts[1, :])]\n center = [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2]\n radius = max(bbox[2] - bbox[0], bbox[3] - bbox[1]) / 2\n bbox = [center[0] - radius, center[1] - radius, center[0] + radius, center[1] + radius]\n\n llength = sqrt((bbox[2] - bbox[0]) ** 2 + (bbox[3] - bbox[1]) ** 2)\n center_x = (bbox[2] + bbox[0]) / 2\n center_y = (bbox[3] + bbox[1]) / 2\n\n roi_box = [0] * 4\n roi_box[0] = center_x - llength / 2\n roi_box[1] = center_y - llength / 2\n roi_box[2] = roi_box[0] + llength\n roi_box[3] = roi_box[1] + llength\n\n return roi_box\n\n\ndef parse_roi_box_from_bbox(bbox):\n left, top, right, bottom = bbox\n old_size = (right - left + bottom - top) / 2\n center_x = right - (right - left) / 2.0\n center_y = bottom - (bottom - top) / 2.0 + old_size * 0.14\n size = int(old_size * 1.58)\n roi_box = [0] * 4\n roi_box[0] = center_x - size / 2\n roi_box[1] = center_y - size / 2\n roi_box[2] = roi_box[0] + size\n roi_box[3] = roi_box[1] + size\n return roi_box\n\n\ndef dump_to_ply(vertex, tri, wfp):\n header = \"\"\"ply\n format ascii 1.0\n element vertex {}\n property float x\n property float y\n property float z\n element face {}\n property list uchar int vertex_indices\n end_header\"\"\"\n\n n_vertex = vertex.shape[1]\n n_face = tri.shape[1]\n header = header.format(n_vertex, n_face)\n\n with open(wfp, 'w') as f:\n f.write(header + '\\n')\n for i in range(n_vertex):\n x, y, z = vertex[:, i]\n f.write('{:.4f} {:.4f} {:.4f}\\n'.format(x, y, z))\n for i in range(n_face):\n idx1, idx2, idx3 = tri[:, i]\n f.write('3 {} {} {}\\n'.format(idx1 - 1, idx2 - 1, idx3 - 1))\n print('Dump tp {}'.format(wfp))\n\n\ndef dump_vertex(vertex, wfp):\n sio.savemat(wfp, {'vertex': vertex})\n print('Dump to {}'.format(wfp))\n\n\ndef _predict_vertices(param, roi_bbox, dense, transform=True):\n vertex = reconstruct_vertex(param, dense=dense)\n sx, sy, ex, ey = roi_bbox\n scale_x = (ex - sx) / 120\n scale_y = (ey - sy) / 120\n vertex[0, :] = vertex[0, :] * scale_x + sx\n vertex[1, :] = vertex[1, :] * scale_y + sy\n\n s = (scale_x + scale_y) / 2\n vertex[2, :] *= s\n\n return vertex\n\n\ndef predict_68pts(param, roi_box):\n return _predict_vertices(param, roi_box, dense=False)\n\n\ndef predict_dense(param, roi_box):\n return _predict_vertices(param, roi_box, dense=True)\n\n\ndef draw_landmarks(img, pts, style='fancy', wfp=None, show_flg=False, **kwargs):\n \"\"\"Draw landmarks using matplotlib\"\"\"\n height, width = img.shape[:2]\n plt.figure(figsize=(12, height / width * 12))\n plt.imshow(img[:, :, ::-1])\n plt.subplots_adjust(left=0, right=1, top=1, bottom=0)\n plt.axis('off')\n\n if not type(pts) in [tuple, list]:\n pts = [pts]\n for i in range(len(pts)):\n if style == 'simple':\n plt.plot(pts[i][0, :], pts[i][1, :], 'o', markersize=4, color='g')\n\n elif style == 'fancy':\n alpha = 0.8\n markersize = 4\n lw = 1.5\n color = kwargs.get('color', 'w')\n markeredgecolor = kwargs.get('markeredgecolor', 'black')\n\n nums = [0, 17, 22, 27, 31, 36, 42, 48, 60, 68]\n\n # close eyes and mouths\n plot_close = lambda i1, i2: plt.plot([pts[i][0, i1], pts[i][0, i2]], [pts[i][1, i1], pts[i][1, i2]],\n color=color, lw=lw, alpha=alpha - 0.1)\n plot_close(41, 36)\n plot_close(47, 42)\n plot_close(59, 48)\n plot_close(67, 60)\n\n for ind in range(len(nums) - 1):\n l, r = nums[ind], nums[ind + 1]\n plt.plot(pts[i][0, l:r], pts[i][1, l:r], color=color, lw=lw, alpha=alpha - 0.1)\n\n plt.plot(pts[i][0, l:r], pts[i][1, l:r], marker='o', linestyle='None', markersize=markersize,\n color=color,\n markeredgecolor=markeredgecolor, alpha=alpha)\n\n if wfp is not None:\n plt.savefig(wfp, dpi=200)\n print('Save visualization result to {}'.format(wfp))\n if show_flg:\n plt.show()\n\n\ndef get_colors(image, vertices):\n [h, w, _] = image.shape\n vertices[0, :] = np.minimum(np.maximum(vertices[0, :], 0), w - 1) # x\n vertices[1, :] = np.minimum(np.maximum(vertices[1, :], 0), h - 1) # y\n ind = np.round(vertices).astype(np.int32)\n colors = image[ind[1, :], ind[0, :], :] # n x 3\n\n return colors\n\n\ndef write_obj_with_colors(obj_name, vertices, triangles, colors):\n triangles = triangles.copy() # meshlab start with 1\n\n if obj_name.split('.')[-1] != 'obj':\n obj_name = obj_name + '.obj'\n\n # write obj\n with open(obj_name, 'w') as f:\n # write vertices & colors\n for i in range(vertices.shape[1]):\n s = 'v {:.4f} {:.4f} {:.4f} {} {} {}\\n'.format(vertices[1, i], vertices[0, i], vertices[2, i], colors[i, 2],\n colors[i, 1], colors[i, 0])\n f.write(s)\n\n # write f: ver ind/ uv ind\n for i in range(triangles.shape[1]):\n s = 'f {} {} {}\\n'.format(triangles[0, i], triangles[1, i], triangles[2, i])\n f.write(s)\n\n\ndef main():\n pass\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.round",
"numpy.zeros"
],
[
"numpy.zeros",
"numpy.round",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"scipy.io.savemat",
"numpy.maximum",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.imshow"
]
] |
binary-husky/hmp2g
|
[
"1a4f4093cd296f07348f4db4c7503aca6e1fb05c"
] |
[
"ALGORITHM/conc_4hist_mathdb/net.py"
] |
[
"import math\nimport torch,time,random\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.distributions.categorical import Categorical\nfrom torch.distributions.multivariate_normal import MultivariateNormal\nfrom torch.nn.modules.linear import Linear\nfrom ..commom.attention import MultiHeadAttention\nfrom ..commom.norm import DynamicNorm\nfrom ..commom.mlp import LinearFinal, SimpleMLP, ResLinear\nfrom UTILS.colorful import print亮紫\nfrom UTILS.tensor_ops import my_view, Args2tensor_Return2numpy, Args2tensor, __hash__, __hashn__, pad_at_dim\nfrom UTILS.tensor_ops import _2cpu2numpy, one_hot_with_nan, gather_righthand, pt_inf\n\n\ndef weights_init(m):\n def init_Linear(m, final_layer=False):\n nn.init.orthogonal_(m.weight.data)\n if final_layer:nn.init.orthogonal_(m.weight.data, gain=0.01)\n if m.bias is not None: nn.init.uniform_(m.bias.data, a=-0.02, b=0.02)\n\n initial_fn_dict = {\n 'Net': None, 'DataParallel':None, 'BatchNorm1d':None, 'Concentration':None,\n 'Pnet':None,'Sequential':None,'DataParallel':None,'Tanh':None,\n 'ModuleList':None,'ModuleDict':None,'MultiHeadAttention':None,\n 'SimpleMLP':None,'Extraction_Module':None,'SelfAttention_Module':None,\n 'ReLU':None,'Softmax':None,'DynamicNorm':None,'EXTRACT':None,\n 'LinearFinal':lambda m:init_Linear(m, final_layer=True),\n 'Linear':init_Linear, 'ResLinear':None, 'LeakyReLU':None,'SimpleAttention':None\n }\n\n classname = m.__class__.__name__\n assert classname in initial_fn_dict.keys(), ('how to handle the initialization of this class? ', classname)\n init_fn = initial_fn_dict[classname]\n if init_fn is None: return\n init_fn(m)\n\nclass Concentration(nn.Module):\n def __init__(self, n_focus_on, h_dim, skip_connect=False, skip_connect_dim=0, adopt_selfattn=False):\n super().__init__()\n self.n_focus_on = n_focus_on\n self.skip_connect = skip_connect\n self.skip_dim = h_dim+skip_connect_dim\n self.CT_W_query = nn.Parameter(torch.Tensor(h_dim, h_dim))\n self.CT_W_key = nn.Parameter(torch.Tensor(h_dim, h_dim))\n self.CT_W_val = nn.Parameter(torch.Tensor(h_dim, h_dim))\n self.CT_motivate_mlp = nn.Sequential(nn.Linear(h_dim * 2, h_dim), nn.ReLU(inplace=True))\n self.AT_forward_mlp = nn.Sequential(nn.Linear((n_focus_on+1)*self.skip_dim, h_dim), nn.ReLU(inplace=True))\n self.adopt_selfattn = adopt_selfattn\n if self.adopt_selfattn:\n self.AT_Attention = Extraction_Module(hidden_dim=self.skip_dim, activate_output=True)\n self.init_parameters()\n\n def init_parameters(self):\n for param in self.parameters():\n stdv = 1. / math.sqrt(param.size(-1))\n param.data.uniform_(-stdv, stdv)\n\n def forward(self, vs, ve, ve_dead, skip_connect_ze=None, skip_connect_zs=None):\n mask = ve_dead\n Q = torch.matmul(vs, self.CT_W_query) \n K = torch.matmul(ve, self.CT_W_key) \n\n norm_factor = 1 / math.sqrt(Q.shape[-1])\n compat = norm_factor * torch.matmul(Q, K.transpose(2, 3)) \n assert compat.shape[-2] == 1\n compat = compat.squeeze(-2)\n compat[mask.bool()] = -math.inf\n score = F.softmax(compat, dim=-1)\n # nodes with no neighbours were softmax into nan, fix them to 0\n score = torch.nan_to_num(score, 0)\n # ----------- motivational brach -------------\n Va = torch.matmul(score.unsqueeze(-2), torch.matmul(ve, self.CT_W_val)) \n v_M = torch.cat((vs, Va), -1).squeeze(-2) \n v_M_final = self.CT_motivate_mlp(v_M)\n # ----------- forward branch -------------\n score_sort_index = torch.argsort(score, dim=-1, descending=True)\n score_sort_drop_index = score_sort_index[..., :self.n_focus_on]\n if self.skip_connect:\n ve = torch.cat((ve, skip_connect_ze), -1)\n vs = torch.cat((vs, skip_connect_zs), -1)\n ve_C = gather_righthand(src=ve, index=score_sort_drop_index, check=False)\n need_padding = (score_sort_drop_index.shape[-1] != self.n_focus_on)\n if need_padding:\n print('the n_focus param is large than input, advise: pad observation instead of pad here')\n ve_C = pad_at_dim(ve_C, dim=-2, n=self.n_focus_on)\n v_C_stack = torch.cat((vs, ve_C), dim=-2)\n if self.adopt_selfattn:\n v_C_stack = self.AT_Attention(v_C_stack, mask=None)\n\n v_C_flat = my_view(v_C_stack, [0, 0, -1]); assert v_C_stack.dim()==4\n v_C_final = self.AT_forward_mlp(v_C_flat)\n return v_C_final, v_M_final\n\n\nclass SimpleAttention(nn.Module):\n def __init__(self, h_dim):\n super().__init__()\n self.W_query = nn.Parameter(torch.Tensor(h_dim, h_dim))\n self.W_key = nn.Parameter(torch.Tensor(h_dim, h_dim))\n self.W_val = nn.Parameter(torch.Tensor(h_dim, h_dim))\n self.init_parameters()\n\n def init_parameters(self):\n for param in self.parameters():\n stdv = 1. / math.sqrt(param.size(-1))\n param.data.uniform_(-stdv, stdv)\n\n def forward(self, k, q, v, mask=None):\n Q = torch.matmul(q, self.W_query) \n K = torch.matmul(k, self.W_key) \n V = torch.matmul(v, self.W_val)\n\n norm_factor = 1 / math.sqrt(Q.shape[-1])\n compat = norm_factor * torch.matmul(Q, K.transpose(2, 3)) \n if mask is not None: compat[mask.bool()] = -math.inf\n score = torch.nan_to_num(F.softmax(compat, dim=-1), 0)\n # ----------- motivational brach -------------\n return torch.matmul(score, V) \n\n\nclass Extraction_Module(nn.Module): # merge by MLP version\n def __init__(self, hidden_dim=128, activate_output=False):\n super().__init__()\n h_dim = hidden_dim\n from .foundation import AlgorithmConfig\n if AlgorithmConfig.use_my_attn:\n self.attn = SimpleAttention(h_dim=h_dim)\n print('use my attn')\n\n if activate_output:\n self.MLP = nn.Sequential(nn.Linear(h_dim * 2, h_dim), nn.ReLU(inplace=True))\n print(\"activate_output\")\n else:\n self.MLP = nn.Sequential(nn.Linear(h_dim * 2, h_dim))\n print(\"no activate_output\")\n\n def forward(self, agent_enc, mask=None):\n attn_out = self.attn(q=agent_enc, k=agent_enc, v=agent_enc, mask=mask)\n concated_attn_result = torch.cat(tensors=(agent_enc, attn_out), dim=-1)\n return self.MLP(concated_attn_result)\n\n\n\n\"\"\"\n network initialize\n\"\"\"\nclass Net(nn.Module):\n def __init__(self, \n rawob_dim, \n n_action):\n super().__init__()\n\n from .foundation import AlgorithmConfig\n\n self.use_normalization = AlgorithmConfig.use_normalization\n self.n_focus_on = AlgorithmConfig.n_focus_on\n self.actor_attn_mod = AlgorithmConfig.actor_attn_mod\n self.dual_conc = AlgorithmConfig.dual_conc\n self.n_entity_placeholder = AlgorithmConfig.n_entity_placeholder\n h_dim = AlgorithmConfig.net_hdim\n\n self.skip_connect = True\n self.n_action = n_action\n self.alternative_critic = AlgorithmConfig.alternative_critic\n self.exp_external_actdim = AlgorithmConfig.exp_external_actdim\n \n # observation normalization\n if self.use_normalization:\n self._batch_norm = DynamicNorm(rawob_dim, only_for_last_dim=True, exclude_one_hot=True, exclude_nan=True)\n\n self.AT_obs_encoder = nn.Sequential(nn.Linear(rawob_dim, h_dim), nn.ReLU(inplace=True), nn.Linear(h_dim, h_dim))\n\n if self.dual_conc:\n self.MIX_conc_core_f = Concentration(\n n_focus_on=self.n_focus_on-1, h_dim=h_dim, \n skip_connect=self.skip_connect, \n skip_connect_dim=rawob_dim, \n adopt_selfattn=self.actor_attn_mod)\n self.MIX_conc_core_h = Concentration(\n n_focus_on=self.n_focus_on, h_dim=h_dim, \n skip_connect=self.skip_connect, \n skip_connect_dim=rawob_dim, \n adopt_selfattn=self.actor_attn_mod)\n else:\n self.MIX_conc_core = Concentration(\n n_focus_on=self.n_focus_on, h_dim=h_dim, \n skip_connect=self.skip_connect, \n skip_connect_dim=rawob_dim, \n adopt_selfattn=self.actor_attn_mod)\n\n if self.exp_external_actdim:\n self.AT_hyper_act_net = nn.Sequential(\n Linear(9, 16),\n nn.ReLU(inplace=True),\n Linear(16, 16),\n nn.ReLU(inplace=True),\n Linear(16, 2)\n )\n tmp_dim = h_dim if not self.dual_conc else h_dim*2\n self.CT_get_value = nn.Sequential(Linear(tmp_dim, h_dim), nn.ReLU(inplace=True),Linear(h_dim, 1))\n self.CT_get_threat = nn.Sequential(Linear(tmp_dim, h_dim), nn.ReLU(inplace=True),Linear(h_dim, 1))\n\n if self.alternative_critic:\n self.CT_get_value_alternative_critic = nn.Sequential(Linear(tmp_dim, h_dim), nn.ReLU(inplace=True),Linear(h_dim, 1))\n\n # part\n self.check_n = self.n_focus_on*2\n self.AT_get_logit_db = nn.Sequential( \n nn.Linear(tmp_dim, h_dim), nn.ReLU(inplace=True),\n nn.Linear(h_dim, h_dim//2), nn.ReLU(inplace=True),\n LinearFinal(h_dim//2, self.n_action))\n\n self.is_recurrent = False\n self.apply(weights_init)\n return\n\n # two ways to support avail_act, but which one is better?\n def logit2act(self, logits_agent_cluster, eval_mode, test_mode, eval_actions=None, avail_act=None):\n if avail_act is not None: logits_agent_cluster = torch.where(avail_act>0, logits_agent_cluster, -pt_inf())\n act_dist = Categorical(logits = logits_agent_cluster)\n if not test_mode: act = act_dist.sample() if not eval_mode else eval_actions\n else: act = torch.argmax(act_dist.probs, axis=2)\n def _get_act_log_probs(distribution, action):\n return distribution.log_prob(action.squeeze(-1)).unsqueeze(-1)\n actLogProbs = _get_act_log_probs(act_dist, act) # the policy gradient loss will feedback from here\n # sum up the log prob of all agents\n distEntropy = act_dist.entropy().mean(-1) if eval_mode else None\n return act, actLogProbs, distEntropy, act_dist.probs\n\n\n @Args2tensor_Return2numpy\n def act(self, *args, **kargs):\n act = self._act if self.dual_conc else self._act_singlec\n return act(*args, **kargs)\n\n @Args2tensor\n def evaluate_actions(self, *args, **kargs):\n act = self._act if self.dual_conc else self._act_singlec\n return act(*args, **kargs, eval_mode=True)\n\n # div entity for DualConc models, distincting friend or hostile (present or history)\n def div_entity(self, mat, type=[(0,),# self\n (1, 2, 3, 4), # current\n (5, 6, 7, 8, 9),], # history\n n=10):\n assert n == self.n_entity_placeholder\n if mat.shape[-2]==n:\n tmp = (mat[..., t, :] for t in type)\n elif mat.shape[-1]==n:\n tmp = (mat[..., t] for t in type)\n return tmp\n\n def _act(self, obs, test_mode, eval_mode=False, eval_actions=None, avail_act=None):\n eval_act = eval_actions if eval_mode else None\n others = {}\n if self.use_normalization:\n # print(obs[0,0,0]) # 0 thread, 0 agent, 0 entity\n obs = self._batch_norm(obs)\n mask_dead = torch.isnan(obs).any(-1) # find dead agents\n obs = torch.nan_to_num_(obs, 0) # replace dead agents' obs, from NaN to 0\n v = self.AT_obs_encoder(obs)\n\n zs, ze_f, ze_h = self.div_entity(obs, n=self.n_entity_placeholder)\n vs, ve_f, ve_h = self.div_entity(v, n=self.n_entity_placeholder)\n _, ve_f_dead, ve_h_dead = self.div_entity(mask_dead, n=self.n_entity_placeholder)\n\n # concentration module\n vh_C, vh_M = self.MIX_conc_core_h(vs=vs, ve=ve_h, ve_dead=ve_h_dead, skip_connect_ze=ze_h, skip_connect_zs=zs)\n vf_C, vf_M = self.MIX_conc_core_f(vs=vs, ve=ve_f, ve_dead=ve_f_dead, skip_connect_ze=ze_f, skip_connect_zs=zs)\n\n # fuse forward path\n v_C_fuse = torch.cat((vf_C, vh_C), dim=-1) # (vs + vs + check_n + check_n)\n logits = self.AT_get_logit_db(v_C_fuse) # diverge here\n\n\n # motivation encoding fusion\n v_M_fuse = torch.cat((vf_M, vh_M), dim=-1)\n # motivation objectives\n value = self.CT_get_value(v_M_fuse)\n threat = self.CT_get_threat(v_M_fuse)\n\n assert not self.alternative_critic\n if self.exp_external_actdim:\n act, actLogProbs, distEntropy, probs = self.logit2act_exp(logits, zs=zs, eval_mode=eval_mode, \n test_mode=test_mode, eval_actions=eval_act, avail_act=avail_act)\n\n else:\n act, actLogProbs, distEntropy, probs = self.logit2act(logits, eval_mode=eval_mode, \n test_mode=test_mode, eval_actions=eval_act, avail_act=avail_act)\n\n def re_scale(t):\n SAFE_LIMIT = 11\n r = 1. /2. * SAFE_LIMIT\n return (torch.tanh_(t/r) + 1.) * r\n\n others['threat'] = re_scale(threat)\n if not eval_mode: return act, value, actLogProbs\n else: return value, actLogProbs, distEntropy, probs, others\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n @staticmethod\n def _get_act_log_probs(distribution, action):\n return distribution.log_prob(action.squeeze(-1)).unsqueeze(-1)\n\n\n def logit2act_exp(self, logits_agent_cluster, zs, eval_mode, test_mode, eval_actions=None, avail_act=None):\n '''\n logits_agent_detach = logits_agent_cluster.detach()\n zs_detach = zs.detach()\n zs_detach = zs_detach.squeeze(-2)\n hyper_obs = torch.cat((logits_agent_detach, zs_detach), axis=-1)\n hyper_act_logits = self.AT_hyper_act_net(hyper_obs)\n hyper_act_logits[..., 0] = 0\n hyper_act_logits[..., 1] = 10\n hyper_act_logits = hyper_act_logits.detach()\n # logits to acts, \n # input: hyper_act_logits\n # output: h_act\n h_dist = Categorical(logits = hyper_act_logits)\n if not test_mode: h_act = h_dist.sample() if not eval_mode else eval_actions\n else: h_act = torch.argmax(h_dist.probs, axis=2)\n # h_act[:] = 1\n SampleLogProb = self._get_act_log_probs(h_dist, torch.ones_like(h_act))\n hActLogProbsRef = self._get_act_log_probs(h_dist, h_act)\n # hActLogProbs = self._get_act_log_probs(h_dist, h_act)\n if avail_act is not None: logits_agent_cluster = torch.where(avail_act>0, logits_agent_cluster, -pt_inf())\n '''\n torch.nn.functional.gumbel_softmax()\n\n act_dist = Categorical(logits = logits_agent_cluster)\n act_sample = act_dist.sample() if not eval_mode else eval_actions\n act_argmax = torch.argmax(act_dist.probs, axis=2)\n # 1 是采样, 0 是贪婪\n act = act_sample # h_act: shape=($n_thread, $n_agent)\n # act = torch.where(h_act==1, act_sample, act_argmax) # h_act: shape=($n_thread, $n_agent)\n # sel_argmax = (act_argmax==act).unsqueeze(-1)\n\n actLogProbs01 = self._get_act_log_probs(act_dist, act) # the policy gradient loss will feedback from here\n # if not eval_mode:\n # which_step = zs[0,0,0,0].item()\n # n_thread = h_act.shape[0]\n # sel_hact1 = (h_act==1).sum().item()/n_thread\n # sel_hact0 = (h_act==0).sum().item()/n_thread\n # from config import GlobalConfig\n # GlobalConfig.data_logger.rec(sel_hact0, '%d-hact0'%round(which_step))\n # GlobalConfig.data_logger.rec(sel_hact1, '%d-hact1'%round(which_step))\n\n\n actLogProbs_notargmax = actLogProbs01 + 1\n #actLogProbs_argmax = torch.log( torch.exp(actLogProbs01 + SampleLogProb) + (1-torch.exp(SampleLogProb)) )\n actLogProbs = actLogProbs_notargmax # torch.where(sel_argmax, actLogProbs_argmax, actLogProbs_notargmax)\n\n # sum up the log prob of all agents\n distEntropy = act_dist.entropy().mean(-1) if eval_mode else None\n return act, actLogProbs, distEntropy, act_dist.probs"
] |
[
[
"torch.nn.functional.gumbel_softmax",
"torch.nn.Linear",
"torch.cat",
"torch.nn.modules.linear.Linear",
"torch.nn.init.uniform_",
"torch.isnan",
"torch.nan_to_num",
"torch.argsort",
"torch.nan_to_num_",
"torch.distributions.categorical.Categorical",
"torch.nn.ReLU",
"torch.tanh_",
"torch.nn.functional.softmax",
"torch.nn.init.orthogonal_",
"torch.Tensor",
"torch.matmul",
"torch.argmax"
]
] |
KanaCS/transformers
|
[
"d4ba8ec0d56a332fdc66d0339db4dfe1a9af7af0"
] |
[
"src/transformers/models/gpt2/modeling_gpt2.py"
] |
[
"# coding=utf-8\n# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch OpenAI GPT-2 model.\"\"\"\n\nimport os\nfrom dataclasses import dataclass\nfrom typing import Optional, Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.checkpoint\nfrom torch.nn import CrossEntropyLoss, MSELoss\n\nfrom ...activations import ACT2FN\nfrom ...file_utils import (\n ModelOutput,\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n replace_return_docstrings,\n)\nfrom ...modeling_outputs import (\n BaseModelOutputWithPastAndCrossAttentions,\n CausalLMOutputWithCrossAttentions,\n SequenceClassifierOutputWithPast,\n)\nfrom ...modeling_utils import (\n Conv1D,\n PreTrainedModel,\n SequenceSummary,\n find_pruneable_heads_and_indices,\n prune_conv1d_layer,\n)\nfrom ...utils import logging\nfrom ...utils.model_parallel_utils import assert_device_map, get_device_map\nfrom .configuration_gpt2 import GPT2Config\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"GPT2Config\"\n_TOKENIZER_FOR_DOC = \"GPT2Tokenizer\"\n\nGPT2_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"gpt2\",\n \"gpt2-medium\",\n \"gpt2-large\",\n \"gpt2-xl\",\n \"distilgpt2\",\n # See all GPT-2 models at https://huggingface.co/models?filter=gpt2\n]\n\n\ndef load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):\n \"\"\"Load tf checkpoints in a pytorch model\"\"\"\n try:\n import re\n\n import tensorflow as tf\n except ImportError:\n logger.error(\n \"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\"\n )\n raise\n tf_path = os.path.abspath(gpt2_checkpoint_path)\n logger.info(\"Converting TensorFlow checkpoint from {}\".format(tf_path))\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n logger.info(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array.squeeze())\n\n for name, array in zip(names, arrays):\n name = name[6:] # skip \"model/\"\n name = name.split(\"/\")\n pointer = model\n for m_name in name:\n if re.fullmatch(r\"[A-Za-z]+\\d+\", m_name):\n scope_names = re.split(r\"(\\d+)\", m_name)\n else:\n scope_names = [m_name]\n if scope_names[0] == \"w\" or scope_names[0] == \"g\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"b\":\n pointer = getattr(pointer, \"bias\")\n elif scope_names[0] == \"wpe\" or scope_names[0] == \"wte\":\n pointer = getattr(pointer, scope_names[0])\n pointer = getattr(pointer, \"weight\")\n else:\n pointer = getattr(pointer, scope_names[0])\n if len(scope_names) >= 2:\n num = int(scope_names[1])\n pointer = pointer[num]\n try:\n assert (\n pointer.shape == array.shape\n ), f\"Pointer shape {pointer.shape} and array shape {array.shape} mismatched\"\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n logger.info(\"Initialize PyTorch weight {}\".format(name))\n pointer.data = torch.from_numpy(array)\n return model\n\n\nclass Attention(nn.Module):\n def __init__(self, nx, n_ctx, config, scale=False, is_cross_attention=False):\n super().__init__()\n\n n_state = nx # in Attention: n_state=768 (nx=n_embd)\n # [switch nx => n_state from Block to Attention to keep identical to TF implem]\n assert n_state % config.n_head == 0\n self.register_buffer(\n \"bias\", torch.tril(torch.ones((n_ctx, n_ctx), dtype=torch.uint8)).view(1, 1, n_ctx, n_ctx)\n )\n self.register_buffer(\"masked_bias\", torch.tensor(-1e4))\n self.n_head = config.n_head\n self.split_size = n_state\n self.scale = scale\n self.is_cross_attention = is_cross_attention\n if self.is_cross_attention:\n self.c_attn = Conv1D(2 * n_state, nx)\n self.q_attn = Conv1D(n_state, nx)\n else:\n self.c_attn = Conv1D(3 * n_state, nx)\n self.c_proj = Conv1D(n_state, nx)\n self.attn_dropout = nn.Dropout(config.attn_pdrop)\n self.resid_dropout = nn.Dropout(config.resid_pdrop)\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n heads, index = find_pruneable_heads_and_indices(\n heads, self.n_head, self.split_size // self.n_head, self.pruned_heads\n )\n index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])\n\n # Prune conv1d layers\n self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)\n self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)\n\n # Update hyper params\n self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))\n self.n_head = self.n_head - len(heads)\n self.pruned_heads = self.pruned_heads.union(heads)\n\n def _attn(self, q, k, v, attention_mask=None, head_mask=None, output_attentions=False):\n w = torch.matmul(q, k)\n if self.scale:\n w = w / (float(v.size(-1)) ** 0.5)\n nd, ns = w.size(-2), w.size(-1)\n\n if not self.is_cross_attention:\n # if only \"normal\" attention layer implements causal mask\n mask = self.bias[:, :, ns - nd : ns, :ns]\n w = torch.where(mask.bool(), w, self.masked_bias.to(w.dtype))\n\n if attention_mask is not None:\n # Apply the attention mask\n w = w + attention_mask\n\n w = nn.Softmax(dim=-1)(w)\n w = self.attn_dropout(w)\n\n # Mask heads if we want to\n if head_mask is not None:\n w = w * head_mask\n\n outputs = (torch.matmul(w, v),)\n if output_attentions:\n outputs += (w,)\n return outputs\n\n def merge_heads(self, x):\n x = x.permute(0, 2, 1, 3).contiguous()\n new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)\n return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states\n\n def split_heads(self, x, k=False):\n new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)\n x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states\n if k:\n return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)\n else:\n return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)\n\n def forward(\n self,\n hidden_states,\n layer_past=None,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n use_cache=False,\n output_attentions=False,\n ):\n if encoder_hidden_states is not None:\n assert hasattr(\n self, \"q_attn\"\n ), \"If class is used as cross attention, the weights `q_attn` have to be defined. Please make sure to instantiate class with `Attention(..., is_cross_attention=True)`.\"\n query = self.q_attn(hidden_states)\n key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2)\n attention_mask = encoder_attention_mask\n else:\n query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2)\n\n query = self.split_heads(query)\n key = self.split_heads(key, k=True)\n value = self.split_heads(value)\n if layer_past is not None:\n past_key, past_value = layer_past[0].transpose(-2, -1), layer_past[1] # transpose back cf below\n key = torch.cat((past_key, key), dim=-1)\n value = torch.cat((past_value, value), dim=-2)\n\n if use_cache is True:\n present = (key.transpose(-2, -1), value) # transpose to have same shapes\n else:\n present = None\n\n attn_outputs = self._attn(query, key, value, attention_mask, head_mask, output_attentions)\n a = attn_outputs[0]\n\n a = self.merge_heads(a)\n a = self.c_proj(a)\n a = self.resid_dropout(a)\n\n return (a, present) + attn_outputs[1:] # a, present, (attentions)\n\n\nclass MLP(nn.Module):\n def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)\n super().__init__()\n nx = config.n_embd\n self.c_fc = Conv1D(n_state, nx)\n self.c_proj = Conv1D(nx, n_state)\n self.act = ACT2FN[config.activation_function]\n self.dropout = nn.Dropout(config.resid_pdrop)\n\n def forward(self, x):\n h = self.act(self.c_fc(x))\n h2 = self.c_proj(h)\n return self.dropout(h2)\n\n\nclass Block(nn.Module):\n def __init__(self, n_ctx, config, scale=False):\n super().__init__()\n hidden_size = config.n_embd\n inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size\n self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)\n self.attn = Attention(hidden_size, n_ctx, config, scale)\n self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)\n if config.add_cross_attention:\n self.crossattention = Attention(hidden_size, n_ctx, config, scale, is_cross_attention=True)\n self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)\n self.mlp = MLP(inner_dim, config)\n\n def forward(\n self,\n hidden_states,\n layer_past=None,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n use_cache=False,\n output_attentions=False,\n ):\n attn_outputs = self.attn(\n self.ln_1(hidden_states),\n layer_past=layer_past,\n attention_mask=attention_mask,\n head_mask=head_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n )\n attn_output = attn_outputs[0] # output_attn: a, present, (attentions)\n outputs = attn_outputs[1:]\n # residual connection\n hidden_states = attn_output + hidden_states\n\n if encoder_hidden_states is not None:\n # add one self-attention block for cross-attention\n assert hasattr(\n self, \"crossattention\"\n ), f\"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`\"\n cross_attn_outputs = self.crossattention(\n self.ln_cross_attn(hidden_states),\n attention_mask=attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n output_attentions=output_attentions,\n )\n attn_output = cross_attn_outputs[0]\n # residual connection\n hidden_states = hidden_states + attn_output\n outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights\n\n feed_forward_hidden_states = self.mlp(self.ln_2(hidden_states))\n # residual connection\n hidden_states = hidden_states + feed_forward_hidden_states\n\n if use_cache:\n outputs = (hidden_states,) + outputs\n else:\n outputs = (hidden_states,) + outputs[1:]\n\n return outputs # hidden_states, present, (attentions, cross_attentions)\n\n\nclass GPT2PreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = GPT2Config\n load_tf_weights = load_tf_weights_in_gpt2\n base_model_prefix = \"transformer\"\n is_parallelizable = True\n\n def __init__(self, *inputs, **kwargs):\n super().__init__(*inputs, **kwargs)\n\n def _init_weights(self, module):\n \"\"\"Initialize the weights.\"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n\n@dataclass\nclass GPT2DoubleHeadsModelOutput(ModelOutput):\n \"\"\"\n Base class for outputs of models predicting if two sentences are consecutive or not.\n\n Args:\n loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided):\n Language modeling loss.\n mc_loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`mc_labels` is provided):\n Multiple choice classification loss.\n logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n mc_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):\n Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).\n past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):\n Tuple of length :obj:`config.n_layers`, containing tuples of tensors of shape :obj:`(batch_size, num_heads,\n sequence_length, embed_size_per_head)`).\n\n Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see\n :obj:`past_key_values` input) to speed up sequential decoding.\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,\n sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n mc_loss: Optional[torch.FloatTensor] = None\n logits: torch.FloatTensor = None\n mc_logits: torch.FloatTensor = None\n past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\nGPT2_START_DOCSTRING = r\"\"\"\n\n This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic\n methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,\n pruning heads etc.)\n\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__\n subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\n general usage and behavior.\n\n Parameters:\n config (:class:`~transformers.GPT2Config`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\"\"\"\n\nGPT2_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, input_ids_length)`):\n :obj:`input_ids_length` = ``sequence_length`` if :obj:`past_key_values` is ``None`` else\n ``past_key_values[0][0].shape[-2]`` (``sequence_length`` of input past key value states). Indices of input\n sequence tokens in the vocabulary.\n\n If :obj:`past_key_values` is used, only ``input_ids`` that do not have their past calculated should be\n passed as ``input_ids``.\n\n Indices can be obtained using :class:`~transformers.GPT2Tokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.n_layers`):\n Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see\n :obj:`past_key_values` output below). Can be used to speed up sequential decoding. The ``input_ids`` which\n have their past given to this model should not be passed as ``input_ids`` as they have already been\n computed.\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, input_ids_length)`, `optional`):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,\n 1]``:\n\n - 0 corresponds to a `sentence A` token,\n - 1 corresponds to a `sentence B` token.\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,\n config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert :obj:`input_ids` indices into associated\n vectors than the model's internal embedding lookup matrix.\n\n If :obj:`past_key_values` is used, optionally only the last :obj:`inputs_embeds` have to be input (see\n :obj:`past_key_values`).\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\nPARALLELIZE_DOCSTRING = r\"\"\"\n This is an experimental feature and is a subject to change at a moment's notice.\n\n Uses a device map to distribute attention modules of the model across several devices. If no device map is given,\n it will evenly distribute blocks across all devices.\n\n Args:\n device_map (:obj:`Dict[int, list]`, optional, defaults to None):\n A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always\n automatically mapped to the first device (for esoteric reasons). That means that the first device should\n have fewer attention modules mapped to it than other devices. For reference, the gpt2 models have the\n following number of attention modules:\n\n - gpt2: 12\n - gpt2-medium: 24\n - gpt2-large: 36\n - gpt2-xl: 48\n\n Example::\n\n # Here is an example of a device map on a machine with 4 GPUs using gpt2-xl, which has a total of 48 attention modules:\n model = GPT2LMHeadModel.from_pretrained('gpt2-xl')\n device_map = {0: [0, 1, 2, 3, 4, 5, 6, 7, 8],\n\n 1: [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21],\n 2: [22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34],\n 3: [35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47]}\n model.parallelize(device_map)\n\"\"\"\nDEPARALLELIZE_DOCSTRING = r\"\"\"\n Moves the model to cpu from a model parallel state.\n\n Example::\n\n # On a 4 GPU machine with gpt2-large:\n model = GPT2LMHeadModel.from_pretrained('gpt2-large')\n device_map = {0: [0, 1, 2, 3, 4, 5, 6, 7],\n\n 1: [8, 9, 10, 11, 12, 13, 14, 15],\n 2: [16, 17, 18, 19, 20, 21, 22, 23],\n 3: [24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]}\n model.parallelize(device_map) # Splits the model across several devices\n model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache()\n\"\"\"\nclass GPT2Embeddings(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.wte = nn.Embedding(config.vocab_size, config.n_embd)\n self.wpe = nn.Embedding(config.n_positions, config.n_embd)\n self.emb3 = nn.Embedding(config.emb3_size, config.n_embd)\n self.emb4 = nn.Embedding(config.emb4_size, config.n_embd)\n # token type embedding also -> wte\n self.drop = nn.Dropout(config.embd_pdrop)\n # layer norm is in the robertamodel \n \n \n def forward(\n self, input_ids=None, token_type_ids=None, position_ids=None, emb3_ids=None, emb4_ids=None, inputs_embeds=None\n ):\n # some processing of forward input is done on Model class (not necessary to move here i think?)\n # tok emb + pos emb\n if inputs_embeds is None:\n inputs_embeds = self.wte(input_ids)\n position_embeds = self.wpe(position_ids)\n hidden_states = inputs_embeds + position_embeds\n\n # tok type emb\n if token_type_ids is not None:\n token_type_embeds = self.wte(token_type_ids)\n hidden_states = hidden_states + token_type_embeds\n\n # third emb\n if emb3_ids is not None:\n emb3_embeds = self.emb3(emb3_ids)\n hidden_states = hidden_states + emb3_embeds\n \n # fourth emb\n if emb4_ids is not None:\n emb4_embeds = self.emb4(emb4_ids)\n hidden_states = hidden_states + emb4_embeds\n \n # fith emb\n # dropout \n hidden_states = self.drop(hidden_states)\n return hidden_states\n\n\n\n@add_start_docstrings(\n \"The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top.\",\n GPT2_START_DOCSTRING,\n)\nclass GPT2Model(GPT2PreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n # NEW\n self.embeddings = GPT2Embeddings(config)\n # NEW\n # self.wte = nn.Embedding(config.vocab_size, config.n_embd)\n # self.wpe = nn.Embedding(config.n_positions, config.n_embd)\n # self.drop = nn.Dropout(config.embd_pdrop)\n self.h = nn.ModuleList([Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)])\n self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)\n\n self.init_weights()\n\n # Model parallel\n self.model_parallel = False\n self.device_map = None\n\n @add_start_docstrings(PARALLELIZE_DOCSTRING)\n def parallelize(self, device_map=None):\n # Check validity of device_map\n self.device_map = (\n get_device_map(len(self.h), range(torch.cuda.device_count())) if device_map is None else device_map\n )\n assert_device_map(self.device_map, len(self.h))\n self.model_parallel = True\n self.first_device = \"cpu\" if \"cpu\" in self.device_map.keys() else \"cuda:\" + str(min(self.device_map.keys()))\n self.last_device = \"cuda:\" + str(max(self.device_map.keys()))\n # self.wte = self.wte.to(self.first_device)\n # self.wpe = self.wpe.to(self.first_device)\n self.embeddings = self.embeddings.to(self.first_device)\n # Load onto devices\n for k, v in self.device_map.items():\n for block in v:\n cuda_device = \"cuda:\" + str(k)\n self.h[block] = self.h[block].to(cuda_device)\n # ln_f to last\n self.ln_f = self.ln_f.to(self.last_device)\n\n @add_start_docstrings(DEPARALLELIZE_DOCSTRING)\n def deparallelize(self):\n self.model_parallel = False\n self.device_map = None\n self.first_device = \"cpu\"\n self.last_device = \"cpu\"\n # self.wte = self.wte.to(\"cpu\")\n # self.wpe = self.wpe.to(\"cpu\")\n for index in range(len(self.h)):\n self.h[index] = self.h[index].to(\"cpu\")\n self.ln_f = self.ln_f.to(\"cpu\")\n torch.cuda.empty_cache()\n\n def get_input_embeddings(self):\n return self.embeddings.wte\n\n def set_input_embeddings(self, new_embeddings):\n self.embeddings.wte = new_embeddings\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.h[layer].attn.prune_heads(heads)\n\n @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"gpt2\",\n output_type=BaseModelOutputWithPastAndCrossAttentions,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n emb3_ids=None,\n emb4_ids=None,\n past_key_values=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n input_ids = input_ids.view(-1, input_shape[-1])\n batch_size = input_ids.shape[0]\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n batch_size = inputs_embeds.shape[0]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n if token_type_ids is not None:\n token_type_ids = token_type_ids.view(-1, input_shape[-1])\n if position_ids is not None:\n position_ids = position_ids.view(-1, input_shape[-1])\n\n if past_key_values is None:\n past_length = 0\n past_key_values = tuple([None] * len(self.h))\n else:\n past_length = past_key_values[0][0].size(-2)\n if position_ids is None:\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)\n position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])\n\n # Attention mask.\n if attention_mask is not None:\n assert batch_size > 0, \"batch_size has to be defined and > 0\"\n attention_mask = attention_mask.view(batch_size, -1)\n # We create a 3D attention mask from a 2D tensor mask.\n # Sizes are [batch_size, 1, 1, to_seq_length]\n # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]\n # this attention mask is more simple than the triangular masking of causal attention\n # used in OpenAI GPT, we just need to prepare the broadcast dimension here.\n attention_mask = attention_mask[:, None, None, :]\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility\n attention_mask = (1.0 - attention_mask) * -10000.0\n\n # If a 2D ou 3D attention mask is provided for the cross-attention\n # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]\n if self.config.add_cross_attention and encoder_hidden_states is not None:\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n if encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)\n encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_attention_mask = None\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # head_mask has shape n_layer x batch x n_heads x N x N\n head_mask = self.get_head_mask(head_mask, self.config.n_layer)\n\n # if inputs_embeds is None:\n # inputs_embeds = self.wte(input_ids)\n # position_embeds = self.wpe(position_ids)\n # hidden_states = inputs_embeds + position_embeds\n\n # if token_type_ids is not None:\n # token_type_embeds = self.wte(token_type_ids)\n # hidden_states = hidden_states + token_type_embeds\n\n # hidden_states = self.drop(hidden_states)\n \n # NEW \n hidden_states = self.embeddings(\n input_ids=input_ids,\n position_ids=position_ids,\n emb3_ids=emb3_ids,\n emb4_ids=emb4_ids,\n token_type_ids=token_type_ids,\n inputs_embeds=inputs_embeds,\n )\n # NEW \n\n output_shape = input_shape + (hidden_states.size(-1),)\n\n presents = () if use_cache else None\n all_self_attentions = () if output_attentions else None\n all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None\n all_hidden_states = () if output_hidden_states else None\n for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):\n\n # Model parallel\n if self.model_parallel:\n torch.cuda.set_device(hidden_states.device)\n # Ensure layer_past is on same device as hidden_states (might not be correct)\n if layer_past is not None:\n layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past)\n # Ensure that attention_mask is always on the same device as hidden_states\n if attention_mask is not None:\n attention_mask = attention_mask.to(hidden_states.device)\n if isinstance(head_mask, torch.Tensor):\n head_mask = head_mask.to(hidden_states.device)\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if getattr(self.config, \"gradient_checkpointing\", False) and self.training:\n\n if use_cache:\n logger.warn(\n \"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting \"\n \"`use_cache=False`...\"\n )\n use_cache = False\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n # None for past_key_value\n return module(*inputs, use_cache, output_attentions)\n\n return custom_forward\n\n outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(block),\n hidden_states,\n None,\n attention_mask,\n head_mask[i],\n encoder_hidden_states,\n encoder_attention_mask,\n )\n else:\n outputs = block(\n hidden_states,\n layer_past=layer_past,\n attention_mask=attention_mask,\n head_mask=head_mask[i],\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n )\n\n hidden_states = outputs[0]\n if use_cache is True:\n presents = presents + (outputs[1],)\n\n if output_attentions:\n all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)\n if self.config.add_cross_attention:\n all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],)\n\n # Model Parallel: If it's the last layer for that device, put things on the next device\n if self.model_parallel:\n for k, v in self.device_map.items():\n if i == v[-1] and \"cuda:\" + str(k) != self.last_device:\n hidden_states = hidden_states.to(\"cuda:\" + str(k + 1))\n\n hidden_states = self.ln_f(hidden_states)\n\n hidden_states = hidden_states.view(*output_shape)\n # Add last hidden state\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)\n\n return BaseModelOutputWithPastAndCrossAttentions(\n last_hidden_state=hidden_states,\n past_key_values=presents,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n cross_attentions=all_cross_attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n \"\"\",\n GPT2_START_DOCSTRING,\n)\nclass GPT2LMHeadModel(GPT2PreTrainedModel):\n _keys_to_ignore_on_load_missing = [r\"h\\.\\d+\\.attn\\.masked_bias\", r\"lm_head\\.weight\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.transformer = GPT2Model(config)\n self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)\n\n self.init_weights()\n\n # Model parallel\n self.model_parallel = False\n self.device_map = None\n\n @add_start_docstrings(PARALLELIZE_DOCSTRING)\n def parallelize(self, device_map=None):\n self.device_map = (\n get_device_map(len(self.transformer.h), range(torch.cuda.device_count()))\n if device_map is None\n else device_map\n )\n assert_device_map(self.device_map, len(self.transformer.h))\n self.transformer.parallelize(self.device_map)\n self.lm_head = self.lm_head.to(self.transformer.first_device)\n self.model_parallel = True\n\n @add_start_docstrings(DEPARALLELIZE_DOCSTRING)\n def deparallelize(self):\n self.transformer.deparallelize()\n self.transformer = self.transformer.to(\"cpu\")\n self.lm_head = self.lm_head.to(\"cpu\")\n self.model_parallel = False\n torch.cuda.empty_cache()\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):\n token_type_ids = kwargs.get(\"token_type_ids\", None)\n # only last token for inputs_ids if past is defined in kwargs\n if past:\n input_ids = input_ids[:, -1].unsqueeze(-1)\n if token_type_ids is not None:\n token_type_ids = token_type_ids[:, -1].unsqueeze(-1)\n\n attention_mask = kwargs.get(\"attention_mask\", None)\n position_ids = kwargs.get(\"position_ids\", None)\n\n if attention_mask is not None and position_ids is None:\n # create position_ids on the fly for batch generation\n position_ids = attention_mask.long().cumsum(-1) - 1\n position_ids.masked_fill_(attention_mask == 0, 1)\n if past:\n position_ids = position_ids[:, -1].unsqueeze(-1)\n else:\n position_ids = None\n return {\n \"input_ids\": input_ids,\n \"past_key_values\": past,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"position_ids\": position_ids,\n \"attention_mask\": attention_mask,\n \"token_type_ids\": token_type_ids,\n }\n\n @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"gpt2\",\n output_type=CausalLMOutputWithCrossAttentions,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n emb3_ids=None,\n emb4_ids=None,\n past_key_values=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n labels=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set\n ``labels = input_ids`` Indices are selected in ``[-100, 0, ..., config.vocab_size]`` All labels set to\n ``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]``\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n transformer_outputs = self.transformer(\n input_ids,\n emb3_ids=emb3_ids,\n emb4_ids=emb4_ids,\n past_key_values=past_key_values,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n hidden_states = transformer_outputs[0]\n\n # Set device for model parallelism\n if self.model_parallel:\n torch.cuda.set_device(self.transformer.first_device)\n hidden_states = hidden_states.to(self.lm_head.weight.device)\n\n lm_logits = self.lm_head(hidden_states)\n\n loss = None\n if labels is not None:\n # Shift so that tokens < n predict n\n shift_logits = lm_logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n # Flatten the tokens\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))\n\n if not return_dict:\n output = (lm_logits,) + transformer_outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return CausalLMOutputWithCrossAttentions(\n loss=loss,\n logits=lm_logits,\n past_key_values=transformer_outputs.past_key_values,\n hidden_states=transformer_outputs.hidden_states,\n attentions=transformer_outputs.attentions,\n cross_attentions=transformer_outputs.cross_attentions,\n )\n\n @staticmethod\n def _reorder_cache(past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]:\n \"\"\"\n This function is used to re-order the :obj:`past_key_values` cache if\n :meth:`~transformers.PretrainedModel.beam_search` or :meth:`~transformers.PretrainedModel.beam_sample` is\n called. This is required to match :obj:`past_key_values` with the correct beam_idx at every generation step.\n \"\"\"\n return tuple(\n tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)\n for layer_past in past\n )\n\n\n@add_start_docstrings(\n \"\"\"\nThe GPT2 Model transformer with a language modeling and a multiple-choice classification head on top e.g. for\nRocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the\ninput embeddings, the classification head takes as input the input of a specified classification token index in the\ninput sequence).\n\"\"\",\n GPT2_START_DOCSTRING,\n)\nclass GPT2DoubleHeadsModel(GPT2PreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n config.num_labels = 1\n self.transformer = GPT2Model(config)\n self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)\n self.multiple_choice_head = SequenceSummary(config)\n\n self.init_weights()\n\n # Model parallel\n self.model_parallel = False\n self.device_map = None\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):\n token_type_ids = kwargs.get(\"token_type_ids\", None)\n # only last token for inputs_ids if past is defined in kwargs\n if past:\n input_ids = input_ids[:, -1].unsqueeze(-1)\n if token_type_ids is not None:\n token_type_ids = token_type_ids[:, -1].unsqueeze(-1)\n\n attention_mask = kwargs.get(\"attention_mask\", None)\n position_ids = kwargs.get(\"position_ids\", None)\n\n if attention_mask is not None and position_ids is None:\n # create position_ids on the fly for batch generation\n position_ids = attention_mask.long().cumsum(-1) - 1\n position_ids.masked_fill_(attention_mask == 0, 1)\n if past:\n position_ids = position_ids[:, -1].unsqueeze(-1)\n else:\n position_ids = None\n\n return {\n \"input_ids\": input_ids,\n \"past_key_values\": past,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"position_ids\": position_ids,\n \"attention_mask\": attention_mask,\n \"token_type_ids\": token_type_ids,\n }\n\n @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=GPT2DoubleHeadsModelOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n emb3_ids=None,\n emb4_ids=None,\n past_key_values=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n mc_token_ids=None,\n labels=None,\n mc_labels=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n r\"\"\"\n mc_token_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, num_choices)`, `optional`, default to index of the last token of the input):\n Index of the classification token in each input sequence. Selected in the range ``[0, input_ids.size(-1) -\n 1[``.\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set\n ``labels = input_ids`` Indices are selected in ``[-1, 0, ..., config.vocab_size]`` All labels set to\n ``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]``\n mc_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size)`, `optional`):\n Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,\n num_choices]`` where `num_choices` is the size of the second dimension of the input tensors. (see\n `input_ids` above)\n\n Return:\n\n Example::\n\n >>> import torch\n >>> from transformers import GPT2Tokenizer, GPT2DoubleHeadsModel\n\n >>> tokenizer = GPT2Tokenizer.from_pretrained('gpt2')\n >>> model = GPT2DoubleHeadsModel.from_pretrained('gpt2')\n\n >>> # Add a [CLS] to the vocabulary (we should train it also!)\n >>> num_added_tokens = tokenizer.add_special_tokens({'cls_token': '[CLS]'})\n\n >>> embedding_layer = model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size\n\n >>> choices = [\"Hello, my dog is cute [CLS]\", \"Hello, my cat is cute [CLS]\"]\n >>> encoded_choices = [tokenizer.encode(s) for s in choices]\n >>> cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices]\n\n >>> input_ids = torch.tensor(encoded_choices).unsqueeze(0) # Batch size: 1, number of choices: 2\n >>> mc_token_ids = torch.tensor([cls_token_location]) # Batch size: 1\n\n >>> outputs = model(input_ids, mc_token_ids=mc_token_ids)\n >>> lm_logits = outputs.logits\n >>> mc_logits = outputs.mc_logits\n\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n transformer_outputs = self.transformer(\n input_ids,\n emb3_ids=emb3_ids,\n emb4_ids=emb4_ids,\n past_key_values=past_key_values,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n hidden_states = transformer_outputs[0]\n\n lm_logits = self.lm_head(hidden_states)\n mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1)\n\n mc_loss = None\n if mc_labels is not None:\n loss_fct = CrossEntropyLoss()\n mc_loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1))\n lm_loss = None\n if labels is not None:\n shift_logits = lm_logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n loss_fct = CrossEntropyLoss()\n lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))\n\n if not return_dict:\n output = (lm_logits, mc_logits) + transformer_outputs[1:]\n if mc_loss is not None:\n output = (mc_loss,) + output\n return ((lm_loss,) + output) if lm_loss is not None else output\n\n return GPT2DoubleHeadsModelOutput(\n loss=lm_loss,\n mc_loss=mc_loss,\n logits=lm_logits,\n mc_logits=mc_logits,\n past_key_values=transformer_outputs.past_key_values,\n hidden_states=transformer_outputs.hidden_states,\n attentions=transformer_outputs.attentions,\n )\n\n @staticmethod\n def _reorder_cache(past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]:\n \"\"\"\n This function is used to re-order the :obj:`past_key_values` cache if\n :meth:`~transformers.PretrainedModel.beam_search` or :meth:`~transformers.PretrainedModel.beam_sample` is\n called. This is required to match :obj:`past_key_values` with the correct beam_idx at every generation step.\n \"\"\"\n return tuple(\n tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)\n for layer_past in past\n )\n\n\n@add_start_docstrings(\n \"\"\"\n The GPT2 Model transformer with a sequence classification head on top (linear layer).\n\n :class:`~transformers.GPT2ForSequenceClassification` uses the last token in order to do the classification, as\n other causal models (e.g. GPT-1) do.\n\n Since it does classification on the last token, it requires to know the position of the last token. If a\n :obj:`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each\n row. If no :obj:`pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot\n guess the padding tokens when :obj:`inputs_embeds` are passed instead of :obj:`input_ids`, it does the same (take\n the last value in each row of the batch).\n \"\"\",\n GPT2_START_DOCSTRING,\n)\nclass GPT2ForSequenceClassification(GPT2PreTrainedModel):\n _keys_to_ignore_on_load_missing = [r\"h\\.\\d+\\.attn\\.masked_bias\", r\"lm_head\\.weight\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n self.transformer = GPT2Model(config)\n self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)\n\n self.init_weights()\n\n # Model parallel\n self.model_parallel = False\n self.device_map = None\n\n @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"microsoft/dialogrpt\",\n output_type=SequenceClassifierOutputWithPast,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n emb3_ids=None,\n emb4_ids=None,\n past_key_values=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,\n config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n transformer_outputs = self.transformer(\n input_ids,\n emb3_ids=emb3_ids,\n emb4_ids=emb4_ids,\n past_key_values=past_key_values,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n hidden_states = transformer_outputs[0]\n logits = self.score(hidden_states)\n\n if input_ids is not None:\n batch_size, sequence_length = input_ids.shape[:2]\n else:\n batch_size, sequence_length = inputs_embeds.shape[:2]\n\n assert (\n self.config.pad_token_id is not None or batch_size == 1\n ), \"Cannot handle batch sizes > 1 if no padding token is defined.\"\n if self.config.pad_token_id is None:\n sequence_lengths = -1\n else:\n if input_ids is not None:\n sequence_lengths = torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1\n else:\n sequence_lengths = -1\n logger.warning(\n f\"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be \"\n f\"unexpected if using padding tokens in conjunction with `inputs_embeds.`\"\n )\n\n pooled_logits = logits[range(batch_size), sequence_lengths]\n\n loss = None\n if labels is not None:\n if self.num_labels == 1:\n # We are doing regression\n loss_fct = MSELoss()\n loss = loss_fct(pooled_logits.view(-1), labels.to(self.dtype).view(-1))\n else:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (pooled_logits,) + transformer_outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutputWithPast(\n loss=loss,\n logits=pooled_logits,\n past_key_values=transformer_outputs.past_key_values,\n hidden_states=transformer_outputs.hidden_states,\n attentions=transformer_outputs.attentions,\n )\n"
] |
[
[
"torch.nn.Linear",
"torch.cat",
"torch.ne",
"torch.ones",
"torch.nn.CrossEntropyLoss",
"torch.nn.LayerNorm",
"torch.nn.Softmax",
"tensorflow.train.list_variables",
"torch.tensor",
"tensorflow.train.load_variable",
"torch.cuda.device_count",
"torch.cuda.set_device",
"torch.cuda.empty_cache",
"torch.matmul",
"torch.nn.Dropout",
"torch.nn.MSELoss",
"torch.arange",
"torch.from_numpy",
"torch.nn.Embedding"
]
] |
tttom/MacroMax
|
[
"e5f66252befb11e9fd906eb6e1a8a8c5eacf1451"
] |
[
"python/macromax/bound.py"
] |
[
"\"\"\"\nThe module provides the abstract :class:`Bound` to represent the boundary of the simulation, e.g. periodic, or\ngradually more absorbing. Specific boundaries are implemented as subclasses and can be used directly as the `bound`\nargument to :func:`macromax.solve` or :class:`macromax.Solution`. The precludes the inclusion of boundaries in the material description.\nIt is sufficient to leave some space for the boundaries.\n\"\"\"\nimport numpy as np\nfrom typing import Union, Sequence, Callable\n\nfrom macromax.utils.array import Grid\n\n\nclass Electric:\n \"\"\" Mixin for Bound to indicate that the electric susceptibility is non-zero.\"\"\"\n pass\n\n\nclass Magnetic:\n \"\"\" Mixin for Bound to indicate that the magnetic susceptibility is non-zero.\"\"\"\n pass\n\n\nclass Bound:\n \"\"\"\n A base class to represent calculation-volume-boundaries.\n Use the sub-classes for practical implementations.\n \"\"\"\n def __init__(self, grid: Union[Grid, Sequence, np.ndarray] = None,\n thickness: Union[float, Sequence, np.ndarray] = 0.0,\n background_permittivity: complex = 1.0):\n \"\"\"\n :param grid: The Grid to which to the boundaries will be applied.\n :param thickness: The thickness as a scalar, vector, or 2d-array (axes x side). Broadcasting is used as necessary.\n :param background_permittivity: The background permittivity of the boundary (default: 1.0 for vacuum). This is\n only used when the absolute permittivity is requested.\n \"\"\"\n if not isinstance(grid, Grid):\n grid = Grid.from_ranges(grid)\n self.__grid = grid\n self.__thickness = np.broadcast_to(thickness, (self.grid.ndim, 2)).astype(float)\n self.__background_permittivity = background_permittivity\n\n @property\n def grid(self):\n \"\"\"The Grid object indicating the uniform Cartesian grid of the entire calculation volume.\"\"\"\n return self.__grid\n\n @property\n def thickness(self) -> np.ndarray:\n \"\"\"\n The thickness as a 2D-array `thickness[axis, front_back]` in meters.\n \"\"\"\n return self.__thickness.copy()\n\n @property\n def background_permittivity(self) -> float:\n \"\"\"A complex scalar indicating the permittivity of the background.\"\"\"\n return self.__background_permittivity\n\n @property\n def electric_susceptibility(self) -> np.ndarray:\n \"\"\"\n The electric susceptibility, chi_E, at every sample point.\n Note that the returned array may have singleton dimensions that must be broadcast!\n \"\"\"\n return np.zeros(self.grid.shape)\n\n @property\n def permittivity(self) -> np.ndarray:\n \"\"\"\n The electric permittivity, epsilon, at every sample point.\n Note that the returned array may have singleton dimensions that must be broadcast!\n \"\"\"\n return self.background_permittivity + self.electric_susceptibility\n\n @property\n def magnetic_susceptibility(self) -> np.ndarray:\n \"\"\"\n The magnetic susceptibility, chi_H, at every sample point.\n Note that the returned array may have singleton dimensions that must be broadcast!\n \"\"\"\n return np.zeros(self.grid.shape)\n\n @property\n def permeability(self) -> np.ndarray:\n \"\"\"\n The magnetic permeability, mu, at every sample point.\n Note that the returned array may have singleton dimensions that must be broadcast!\n \"\"\"\n return 1.0 + self.magnetic_susceptibility\n\n @property\n def inside(self) -> np.ndarray:\n \"\"\"Returns a boolean array indicating True for the voxels between the boundaries.\"\"\"\n result = np.asarray(False)\n for axis in range(self.grid.ndim):\n rng = self.grid[axis]\n result = result & (rng[0] + self.thickness[axis, 0] <= rng & rng < rng[-1] - self.thickness[axis, 1])\n return result\n\n @property\n def outside(self) -> np.ndarray:\n \"\"\"Returns a boolean array indicating True for the voxels in the boundaries, i.e. outside the area of the calculation.\"\"\"\n return np.logical_not(self.inside)\n\n\nclass PeriodicBound(Bound):\n def __init__(self, grid: Union[Grid, Sequence, np.ndarray]):\n \"\"\"\n Constructs an object that represents periodic boundaries.\n \n :param grid: The Grid to which to the boundaries will be applied.\n \"\"\"\n super().__init__(grid=grid, thickness=0.0)\n\n\nclass AbsorbingBound(Bound, Electric):\n def __init__(self, grid: Union[Grid, Sequence, np.ndarray], thickness: Union[float, Sequence, np.ndarray] = 0.0,\n extinction_coefficient_function: Union[Callable, Sequence, np.ndarray] = lambda rel_depth: rel_depth,\n background_permittivity: complex = 1.0):\n \"\"\"\n Constructs a boundary with depth-dependent extinction coefficient, kappa(rel_depth).\n\n :param grid: The Grid to which to the boundaries will be applied.\n :param thickness: The boundary thickness(es) in meters. This can be specified as a 2d-array [axis, side].\n Singleton dimensions are broadcast.\n :param extinction_coefficient_function: A function that returns the extinction coefficient as function of\n the depth in the boundary relative to the total thickness of the boundary.\n :param background_permittivity: (default: 1.0 for vacuum)\n \"\"\"\n super().__init__(grid=grid, thickness=thickness, background_permittivity=background_permittivity)\n self.__extinction_coefficient_functions = np.broadcast_to(extinction_coefficient_function, (self.grid.ndim, 2))\n\n @property\n def is_electric(self) -> bool:\n \"\"\"True when this boundary affects the permittivity, extinction coefficient, or complex refractive index.\"\"\"\n return True\n\n @property\n def extinction(self) -> np.ndarray:\n \"\"\"\n Determines the extinction coefficient, kappa, of the boundary on a plaid grid.\n The only non-zero values are found in the boundaries. At the corners, the maximum extinction value of the\n overlapping dimensions is returned.\n\n Note that the returned array may have singleton dimensions that must be broadcast!\n\n :return: An nd-array with the extinction coefficient, kappa.\n \"\"\"\n kappa = 0.0\n for axis, rng in enumerate(self.grid):\n for back_side in range(2):\n thickness = self.thickness[axis, back_side] * np.sign(self.grid.step[axis])\n if not back_side:\n new_depth_in_boundary = (rng.ravel()[0] + thickness) - rng\n else:\n new_depth_in_boundary = rng - (rng.ravel()[-1] - thickness)\n new_depth_in_boundary *= np.sign(self.grid.step[axis])\n in_boundary = new_depth_in_boundary > 0\n if np.any(in_boundary):\n rel_depth = in_boundary * new_depth_in_boundary / thickness\n kappa_function = self.__extinction_coefficient_functions[axis, back_side]\n kappa = np.maximum(kappa, kappa_function(rel_depth) * in_boundary)\n return kappa\n\n @property\n def electric_susceptibility(self) -> np.ndarray:\n \"\"\"\n The electric susceptibility, chi_E, at every sample point.\n Note that the returned array may have singleton dimensions that must be broadcast!\n \"\"\"\n n = np.lib.scimath.sqrt(self.background_permittivity)\n epsilon = (n + 1j * self.extinction)**2\n return epsilon - self.background_permittivity\n\n\nclass LinearBound(AbsorbingBound):\n def __init__(self, grid: Union[Grid, Sequence, np.ndarray], thickness: Union[float, Sequence, np.ndarray] = 0.0,\n max_extinction_coefficient: Union[float, Sequence, np.ndarray] = 0.1,\n background_permittivity: complex = 1.0):\n \"\"\"\n Constructs a boundary with linearly increasing extinction coefficient, kappa.\n\n :param grid: The Grid to which to the boundaries will be applied.\n :param thickness: The boundary thickness(es) in meters. This can be specified as a 2d-array [axis, side].\n Singleton dimensions are broadcast.\n :param max_extinction_coefficient: The maximum extinction coefficient, reached at the deepest point of the\n boundary at the edge of the calculation volume.\n :param background_permittivity: (default: 1.0 for vacuum)\n \"\"\"\n # Define a linear function for every axis and every side\n kappa_function = np.vectorize(lambda kappa_max: lambda rel_depth: kappa_max * rel_depth)\\\n (max_extinction_coefficient)\n super().__init__(grid=grid, thickness=thickness,\n extinction_coefficient_function=kappa_function,\n background_permittivity=background_permittivity)\n\n"
] |
[
[
"numpy.logical_not",
"numpy.asarray",
"numpy.zeros",
"numpy.vectorize",
"numpy.sign",
"numpy.any",
"numpy.lib.scimath.sqrt",
"numpy.broadcast_to"
]
] |
Lyuyangdaisy/DS_package
|
[
"ca0f220598ee156028646fbefccde08b2ece62ea",
"ca0f220598ee156028646fbefccde08b2ece62ea"
] |
[
"english/clustering/Kmeans/kmeans.py",
"english/calculator_for_rock/pyroxene/calculator.py"
] |
[
"#!/usr/bin/env python\n# coding: utf-8\n\nimport pandas as pd\nfrom sklearn.model_selection import ParameterGrid\nfrom sklearn.base import clone\nfrom sklearn.cluster import KMeans\nfrom sklearn import metrics\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndf = pd.read_excel('4.xlsx')\ndata = df.drop('O7', axis = 1)\nlabels = df['O7']\n\n#The core function, the grid search of unsupervised learning. Please add the required functions on this basis, and finally modify the function name to prevent conflicts\ndef KmeansGridsearch(dmodel, data, param_dict):\n \"\"\"\n dmodel: default model\n data:training data\n labels: real classification\n param_dict: hyperparameter combination dictionary\n \"\"\"\n output_models = []\n # create parameter grid\n # create hyperparametric grid\n param_grid = ParameterGrid(param_dict)\n \n # change the parameter attributes in dbscan according to the param_grid\n # modify the corresponding parameters of DBSCAN object according to the grid hyperparameters, train the model, and get the output data \n for param in param_grid:\n for key, value in param.items():\n setattr(dmodel,key,value)\n dmodel.fit(data)\n model = clone(dmodel)\n output_models.append(model)\n # If you have other data to output, just add it \n return (output_models)\n\n\nkmeans = KMeans()\n# select the parameters to be tested\nkmeans_dict = {'n_clusters':[3,4,5],\n 'init':['k-means++','random']}\noutput = KmeansGridsearch(kmeans,data,kmeans_dict)\n\n# Evaluation criteria for testing\ndef get_marks(estimator, data, name=None):\n \"\"\" To get the score, there are five kinds of actual classification information that are required to know the data set, and there are three kinds that are not required,\n refer to the readme.txt\n \n :param estimator: model\n :param name: initial method\n :param data: feature data set\n \"\"\"\n estimator.fit(data.astype(np.float64))\n print(30 * '*', name, 30 * '*')\n print(\" Model and parameters : \", estimator )\n print(\"Homogeneity Score : \", metrics.homogeneity_score(labels, estimator.labels_))\n print(\"Completeness Score : \", metrics.completeness_score(labels, estimator.labels_))\n print(\"V-Measure Score : \", metrics.v_measure_score(labels, estimator.labels_))\n print(\"Adjusted Rand Score : \", metrics.adjusted_rand_score(labels, estimator.labels_))\n print(\"Adjusted Mutual Info Score: \", metrics.adjusted_mutual_info_score(labels, estimator.labels_))\n print(\"Calinski Harabasz Score: \", metrics.calinski_harabasz_score(data, estimator.labels_))\n print(\"Silhouette Score : \", metrics.silhouette_score(data, estimator.labels_))\n\n# test results\nfor i in range(len(output)):\n get_marks(output[i], data=data, name=\"output\"+ str(i))\n\n# The test results are drawn into images for easy comparison\ndef plotit(estimator, data):\n plt.subplot(3,3,1)\n plt.subplots_adjust(0,0,2,2)\n home = []\n for i in range(len(estimator)):\n home.append(metrics.homogeneity_score(labels, estimator[i].labels_))\n plt.axvline(x=i,linestyle='--',linewidth=1,color='red')\n plt.plot(home)\n plt.title('Homogeneity Score')\n plt.subplot(3,3,2)\n home = []\n for i in range(len(estimator)):\n home.append(metrics.completeness_score(labels, estimator[i].labels_))\n plt.axvline(x=i,linestyle='--',linewidth=1,color='red')\n plt.plot(home)\n plt.title('Completeness Score')\n plt.subplot(3,3,3)\n home = []\n for i in range(len(estimator)):\n home.append(metrics.v_measure_score(labels, estimator[i].labels_))\n plt.axvline(x=i,linestyle='--',linewidth=1,color='red')\n plt.plot(home)\n plt.title('V-Measure Score')\n plt.subplot(3,3,4)\n home = []\n for i in range(len(estimator)):\n home.append(metrics.adjusted_rand_score(labels, estimator[i].labels_))\n plt.axvline(x=i,linestyle='--',linewidth=1,color='red')\n plt.plot(home)\n plt.title('Adjusted Rand Score')\n plt.subplot(3,3,5)\n home = []\n for i in range(len(estimator)):\n home.append(metrics.adjusted_mutual_info_score(labels, estimator[i].labels_))\n plt.axvline(x=i,linestyle='--',linewidth=1,color='red')\n plt.plot(home)\n plt.title('Adjusted Mutual Info Score')\n plt.subplot(3,3,6)\n home = []\n for i in range(len(estimator)):\n home.append(metrics.calinski_harabasz_score(data, estimator[i].labels_))\n plt.axvline(x=i,linestyle='--',linewidth=1,color='red')\n plt.plot(home)\n plt.title('Calinski Harabasz Score')\n plt.subplot(3,3,7)\n home = []\n for i in range(len(estimator)):\n home.append(metrics.silhouette_score(data, estimator[i].labels_))\n plt.axvline(x=i,linestyle='--',linewidth=1,color='red')\n plt.plot(home)\n plt.title('Silhouette Score')\n\nplotit(output,data)\n\n\n\n\n\n",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport numpy as np\nimport re\nimport time\n\ndef find_num(major_el_name):\n \"\"\"\n Find the number of cations and the number of oxygen atoms of the principal element in the listing\n\n :param major_el_name: Listing of principal elements\n :return: Number of cations and number of oxygen atoms\n \"\"\"\n length = len(major_el_name)\n temp_ion_num = [re.findall('\\d?O', major_el_name[i], re.I) for i in range(length)]\n ion_num = []\n for i in range(length):\n ion_num.extend(temp_ion_num[i])\n for j in range(length):\n ion_num[j] = re.findall('\\d*', ion_num[j])[0]\n if ion_num[j] == '':\n ion_num[j] = 1\n else:\n ion_num[j] = int(ion_num[j])\n\n temp_oxy_num = [re.findall('O\\d?', major_el_name[i], re.I) for i in range(length)]\n oxy_num = []\n for i in range(length):\n oxy_num.extend(temp_oxy_num[i])\n for j in range(length):\n oxy_num[j] = re.findall('\\d*', oxy_num[j])[1]\n if oxy_num[j] == '':\n oxy_num[j] = 1\n else:\n oxy_num[j] = int(oxy_num[j])\n return ion_num, oxy_num\n\ndef find_ion(major_el_name):\n \"\"\"\n Find the cation in the principal element of the listing\n\n :param major_el_name: The name of the main element column\n :return: cations\n \"\"\"\n length = len(major_el_name)\n temp = []\n for i in range(length):\n a = re.findall('[a-zA-Z]{1,2}[\\d*]?', major_el_name[i], re.I)\n temp.append(a[0])\n ion = []\n for i in range(length):\n ion.extend(re.findall('[a-zA-Z]{1,2}', temp[i], re.I))\n return ion\n\ndef rel_mole_weight(ion, ion_num, oxy_num):\n \"\"\"\n Calculating Relative Molecular Weight\n\n :param ion: Each cation\n :param ion_num: Number of cations per cation\n :param oxy_num: The number of oxygen atoms corresponding to each cation\n :return: Relative molecular weight\n \"\"\"\n ion_dict = {'Si':28.085, 'Ti':47.867, 'Al':26.981, 'Cr':51.996, 'Fe':55.845, 'Mn':54.938,\n 'Mg':24.305, 'Ca':40.078, 'Na':22.989, 'K':39.098, 'P':30.974, 'Ni':58.693,\n 'Zn':65.390, 'Li':6.941, 'Zr':91.224, 'V':50.941, 'O':15.999}\n length = len(ion)\n if length != len(ion_num) or length != len(oxy_num):\n raise Exception\n\n relative_molecular_weight = []\n for i in range(length):\n a = ion_dict[ion[i]] * ion_num[i] + ion_dict['O'] * oxy_num[i]\n relative_molecular_weight.append(a)\n return relative_molecular_weight\n\ndef conver_ratio(rmw, oxy_num, mf):\n \"\"\"\n Calculation of conversion factors\n\n :param rmw: Relative molecular weight\n :param mf: Mass fraction of the principal element\n :return: Value of the conversion factor\n \"\"\"\n conversion_ratio = float(6) / sum(np.array(oxy_num) * np.array(mf) / np.array(rmw))\n return conversion_ratio\n\ndef output(cr, rmw, ion_num, mf):\n '''\n Calculate the output y for each cation\n\n :param cr: conversion factor\n :param rmw: Relative molecular weight\n :param ion_num: Number of cations\n :param mf: Mass fraction of the principal element\n :return: Output y of each cation\n '''\n y = cr * np.array(mf) * np.array(ion_num) / np.array(rmw)\n return y\n\ndef projection(index, target, y):\n '''\n Calculation of the projection value of a specific cation in the range of 0 to 1\n\n :param index: Index to the specified cation list\n :param target: List of specified cations\n :param y: Output value of each cation y\n :return: Projected values of specific cations\n '''\n sum = 0\n for i in range(len(target)):\n sum += np.array(y[target[i]])\n # sum = np.array(y[target[0]]) + np.array(y[target[1]]) + np.array(y[target[2]])\n proj = np.array(y[target[index]]) / sum\n return proj\n\n\ndef main():\n start_time = time.time()\n print(\"读取文件............\")\n data = pd.read_excel('cal_data_4th.xlsx') # Read the data set\n data.fillna(0, inplace=True) # The interpolation value is zero, there can be no null value\n\n data_columns = list(data.columns)\n # print(\"列名:\", data_columns) # Listing name: principal element\n\n ion_num, oxy_num = find_num(data_columns)\n ion = find_ion(data_columns)\n # print(\"阳离子: \", ion) # Demonstrate cation\n # print(\"阳离子个数: \", ion_num) # Number of cations\n # print(\"氧原子个数: \",oxy_num) # Number of oxygen atoms\n # print(\"维度:\", len(ion), len(ion_num), len(oxy_num)) # Compare whether the latitudes are the same\n\n rmw = rel_mole_weight(ion, ion_num, oxy_num)\n # print(\"相对分子质量:\", np.array(rmw)) # Relative molecular weight\n cr_columns = []\n data_num = data.shape[0]\n for i in range(data_num):\n a = data.iloc[i, :]\n cr = conver_ratio(rmw, oxy_num, a) # Calculation of conversion factors\n cr_columns.append(cr) # Preservation of conversion factors\n\n temp = []\n for j in range(data_num):\n b = data.iloc[j, :]\n y = output(cr_columns[j], rmw, ion_num, b) # Calculate the output value y for each cation\n temp.append(y) # Save output value y\n temp_df = pd.DataFrame(temp) # New DataFrame table to save the output value y\n temp_df.columns = ion # Adds a column name to the DataFrame table with output value y\n # print(temp_df)\n data['换算系数'] = np.array(cr_columns).reshape(-1, 1) # Add a new column [conversion factor] to the original data set [data]\n # print(data['换算系数'])\n # print(data) # Original data set with conversion factors\n new_df = pd.concat([data, temp_df], axis=1) # Merge the DataFrame table of the original dataset with the DataFrame table of the output value y\n # print(new_df) # Data set containing conversion coefficients and y columns of output values for each cation\n\n target = ['Fe', 'Mg', 'Ca'] # Selected cations to be projected\n df1 = new_df[target]\n target_list = []\n for i in range(data_num):\n y = df1.iloc[i, :]\n ls = []\n for j in range(len(target)):\n proj = projection(j, target, y) # Calculation of the projected value of a given cation\n ls.append(proj) # Save projection values\n #print(ls)\n target_list.append(ls)\n target_df = pd.DataFrame(target_list) # New DataFrame table to save projected values\n # print(pd.DataFrame(target_list))\n project_name = [target[i] + '_projected' for i in range(len(target))] # Constructing new listings with projected values\n target_df.columns = project_name # Adds a column name to a DataFrame table that holds the projected values\n final_df = pd.concat([new_df, target_df], axis=1) # Combination of raw data tables with conversion factors and output values and DF tables with stored projection values\n # print(final_df) # The final form we'll need\n\n final_df.to_csv(\"new_cal_data_4th.csv\") # Save the final table as a csv file\n\n end_time = time.time()\n print(\"程序运行时间:{}s\".format(end_time-start_time))\n \n \nif __name__ == '__main__':\n main()\n\n\n\n"
] |
[
[
"sklearn.metrics.adjusted_mutual_info_score",
"pandas.read_excel",
"sklearn.cluster.KMeans",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.title",
"sklearn.model_selection.ParameterGrid",
"sklearn.metrics.calinski_harabasz_score",
"sklearn.metrics.silhouette_score",
"sklearn.metrics.completeness_score",
"sklearn.metrics.homogeneity_score",
"sklearn.metrics.adjusted_rand_score",
"matplotlib.pyplot.axvline",
"sklearn.base.clone",
"sklearn.metrics.v_measure_score",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.subplot"
],
[
"pandas.DataFrame",
"pandas.read_excel",
"numpy.array",
"pandas.concat"
]
] |
dsergio/data-modeling
|
[
"eff6a05c63df4cf8192169abdad01ab2b3854958"
] |
[
"python/kmeansDaysSinceStormNumObs.py"
] |
[
"\"\"\"\nAuthor: David Sergio\n\nKMeans Clustering\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n# import matplotlib as plt\nimport matplotlib.pyplot as plt\nfrom numpy import nan\n\nfrom sklearn.cluster import KMeans\n\n\nweather_observation_data_file = \"..\\\\transform\\\\stage6\\\\all_weather_obs_dates.csv\"\nweather_observation_data = pd.read_csv(weather_observation_data_file)\n\nfor col in weather_observation_data.columns: \n\tweather_observation_data[col] = weather_observation_data[col].replace(\"\", nan)\n\tweather_observation_data[col] = weather_observation_data[col].replace(\" \", nan)\n\tweather_observation_data[col] = weather_observation_data[col].replace(\"NA\", nan)\n\tweather_observation_data[col] = weather_observation_data[col].replace(\"?\", nan)\n\n\nquery = \"numberObservations > 0\"\nquery_result = weather_observation_data.query(query)\n\ncol_list = [\"days_since_storm\", \"numberObservations\"]\nquery_result = query_result[col_list]\n\nprint(query_result)\n\nkmeans = KMeans(n_clusters=3).fit(query_result)\ncentroids = kmeans.cluster_centers_\nprint(centroids)\n\nplt.scatter(query_result['days_since_storm'], query_result['numberObservations'], c= kmeans.labels_.astype(float), s=50, alpha=0.5)\nplt.scatter(centroids[:, 0], centroids[:, 1], c='red', s=50)\nplt.xlabel(\"Days Since Storm\")\nplt.ylabel(\"Number Observations\")\nplt.title(\"KMeans Days Since Storm / Number Observations\")\nplt.savefig(\".\\\\plots\\\\kmeansDaysSinceStormNumObs.png\")"
] |
[
[
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.title",
"sklearn.cluster.KMeans",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.scatter",
"pandas.read_csv"
]
] |
rupakgoyal/panel-
|
[
"4e1e01e1766ebfc2fc1efb409734fd51efc60c01"
] |
[
"panel/tests/pane/test_vega.py"
] |
[
"from __future__ import absolute_import\n\nimport pytest\n\ntry:\n import altair as alt\nexcept:\n alt = None\naltair_available = pytest.mark.skipif(alt is None, reason=\"requires altair\")\n\nimport numpy as np\n\nfrom panel.models.vega import VegaPlot\nfrom panel.pane import Pane, PaneBase, Vega\n\nblank_schema = {'$schema': ''}\n\nvega_example = {\n 'config': {\n 'mark': {'tooltip': None},\n 'view': {'height': 300, 'width': 400}\n },\n 'data': {'values': [{'x': 'A', 'y': 5},\n {'x': 'B', 'y': 3},\n {'x': 'C', 'y': 6},\n {'x': 'D', 'y': 7},\n {'x': 'E', 'y': 2}]},\n 'mark': 'bar',\n 'encoding': {'x': {'type': 'ordinal', 'field': 'x'},\n 'y': {'type': 'quantitative', 'field': 'y'}},\n '$schema': 'https://vega.github.io/schema/vega-lite/v3.2.1.json'\n}\n\nvega_inline_example = {\n 'config': {\n 'view': {'width': 400, 'height': 300},\n 'mark': {'tooltip': None}},\n 'data': {'name': 'data-2f2c0ff233b8675aa09202457ebe7506',\n 'format': {'property': 'features', 'type': 'json'}},\n 'mark': 'geoshape',\n 'encoding': {\n 'color': {\n 'type': 'quantitative',\n 'field': 'properties.percent_no_internet'\n }\n },\n 'projection': {'type': 'albersUsa'},\n '$schema': 'https://vega.github.io/schema/vega-lite/v3.2.1.json',\n 'datasets': {\n 'data-2f2c0ff233b8675aa09202457ebe7506': {\n 'type': 'FeatureCollection',\n 'features': [\n {'id': '0',\n 'type': 'Feature',\n 'properties': {\n 'name': 'Autauga County, Alabama',\n 'percent_no_internet': 0.2341122827016244,\n 'percent_no_internet_normalized': 0.2589760005042632},\n 'geometry': {\n 'type': 'Polygon',\n 'coordinates': [[[-86.411786, 32.706342],\n [-86.411786, 32.410587],\n [-86.499417, 32.344863],\n [-86.817079, 32.339387],\n [-86.915664, 32.662526],\n [-86.411786, 32.706342]]]\n }\n }\n ]\n }\n }\n}\n\ndef test_get_vega_pane_type_from_dict():\n assert PaneBase.get_pane_type(vega_example) is Vega\n\n\ndef test_vega_pane(document, comm):\n pane = Pane(vega_example)\n\n # Create pane\n model = pane.get_root(document, comm=comm)\n assert isinstance(model, VegaPlot)\n\n expected = dict(vega_example, data={})\n\n assert dict(model.data, **blank_schema) == dict(expected, **blank_schema)\n cds_data = model.data_sources['data'].data\n assert np.array_equal(cds_data['x'], np.array(['A', 'B', 'C', 'D', 'E'])) \n assert np.array_equal(cds_data['y'], np.array([5, 3, 6, 7, 2]))\n\n point_example = dict(vega_example, mark='point')\n point_example['data']['values'][0]['x'] = 'C'\n pane.object = point_example\n point_example['data'].pop('values')\n assert model.data == point_example\n cds_data = model.data_sources['data'].data\n assert np.array_equal(cds_data['x'], np.array(['C', 'B', 'C', 'D', 'E'])) \n assert np.array_equal(cds_data['y'], np.array([5, 3, 6, 7, 2]))\n\n pane._cleanup(model)\n assert pane._models == {}\n\n\ndef test_vega_pane_inline(document, comm):\n pane = Pane(vega_inline_example)\n\n # Create pane\n model = pane.get_root(document, comm=comm)\n assert isinstance(model, VegaPlot)\n\n assert dict(model.data, **blank_schema) == dict(vega_inline_example, **blank_schema)\n assert model.data_sources == {}\n\n pane._cleanup(model)\n assert pane._models == {}\n \n\ndef altair_example():\n import altair as alt\n data = alt.Data(values=[{'x': 'A', 'y': 5},\n {'x': 'B', 'y': 3},\n {'x': 'C', 'y': 6},\n {'x': 'D', 'y': 7},\n {'x': 'E', 'y': 2}])\n chart = alt.Chart(data).mark_bar().encode(\n x='x:O', # specify ordinal data\n y='y:Q', # specify quantitative data\n )\n return chart\n\n\n@altair_available\ndef test_get_vega_pane_type_from_altair():\n assert PaneBase.get_pane_type(altair_example()) is Vega\n\n\n@altair_available\ndef test_altair_pane(document, comm):\n pane = Pane(altair_example())\n\n # Create pane\n model = pane.get_root(document, comm=comm)\n assert isinstance(model, VegaPlot)\n\n expected = dict(vega_example, data={})\n assert dict(model.data, **blank_schema) == dict(expected, **blank_schema)\n\n cds_data = model.data_sources['data'].data\n assert np.array_equal(cds_data['x'], np.array(['A', 'B', 'C', 'D', 'E'])) \n assert np.array_equal(cds_data['y'], np.array([5, 3, 6, 7, 2]))\n\n chart = altair_example()\n chart.mark = 'point'\n chart.data.values[0]['x'] = 'C'\n pane.object = chart\n point_example = dict(vega_example, mark='point')\n assert dict(model.data, **blank_schema) == dict(point_example, **blank_schema)\n cds_data = model.data_sources['data'].data\n assert np.array_equal(cds_data['x'], np.array(['C', 'B', 'C', 'D', 'E'])) \n assert np.array_equal(cds_data['y'], np.array([5, 3, 6, 7, 2]))\n\n pane._cleanup(model)\n assert pane._models == {}\n"
] |
[
[
"numpy.array"
]
] |
Waziup/SoilMoistureML
|
[
"26c8ec9ff51889d51b8dbd76c26d7b168282b447"
] |
[
"competition/trans.py"
] |
[
"import pandas as pd\nimport matplotlib.pyplot as plt\nimport pylab\nfrom scipy.signal import argrelextrema\nimport numpy as np\nfrom scipy import signal\n\n\n# Get Sensor observations\ndef getObs(myfile, name):\n\n # read the CSV file, parsing dates and dropping one useless collumn\n obs = pd.read_csv(myfile,\n sep=',',\n parse_dates=[0]).drop('date_received',1);\n \n #Force numerical values in the collumn 'value'\n obs['value'] = obs['value'].apply(pd.to_numeric, errors='coerce')\n\n #Remove duplicates (two measurements with the same time)\n obs2 = obs.drop_duplicates('timestamp', keep='last').set_index('timestamp')\n\n #Read dates with specific format\n obs2.index = pd.to_datetime(obs2.index, format='%Y-%m-%dT%H:%M:%S.%fZ')\n\n #Resample the values: every intermediate values will be averaged and place at the right time\n obs3 = obs2.resample('5min').mean()\n\n #Filling missing values\n obs3['value'].interpolate('time', inplace=True, limit_direction='both')\n\n #rounding all values to 2 decimal places\n obs4 = obs3.round({'value': 2})\n \n #NEw collumn for \"Irrigation\" (on/off)\n iri = name + ' irrigation'\n\n #Find the extremum of the humidity (when the humidity shoots up) => irrigation is on\n obs4[iri] = obs4.iloc[argrelextrema(obs4.value.values, np.less_equal, order=200)[0]]['value']\n\n #replace \"NaN\" with 0 \n obs4[iri] = obs4[iri].fillna(0)\n obs4.loc[obs4[iri] != 0, iri] = 1\n\n obs4.rename(columns={'value': name + ' humidity'}, inplace=True)\n return(obs4);\n\ndef getWeather(myfile, name):\n obs = pd.read_csv(myfile,\n sep=',',\n parse_dates=[4]).drop(['device_id', 'sensor_id', 'date_received'], axis=1);\n \n obs['value'] = obs['value'].apply(pd.to_numeric, errors='coerce')\n obs2 = obs.drop_duplicates('timestamp', keep='last').set_index('timestamp')\n obs2.index = pd.to_datetime(obs2.index, format='%Y-%m-%dT%H:%M:%SZ', errors='coerce')\n obs3 = obs2.resample('5min').mean()\n obs3['value'].interpolate('time', inplace=True, limit_direction='both')\n obs4 = obs3.round({'value': 2})\n obs4.rename(columns={'value': name}, inplace=True)\n return(obs4);\n\n#obs1 = getObs('UGB-PILOTS_Sensor81-SH.csv', 'Soil humidity 1');\n#obs1['obs1 min'] = obs1.soil[(obs1.soil.shift(1) > obs1.soil) & (obs1.soil.shift(-1) > obs1.soil)];\n\n#obs1['obs1 min'] = obs1.iloc[argrelextrema(obs1.soil.values, np.less_equal, order=300)[0]]['Soil humidity 1']\n\n\nobs = [getObs('data/UGB-PILOTS_Sensor80-SH.csv', 'Plot 1'),\n getObs('data/UGB-PILOTS_Sensor81-SH.csv', 'Plot 2'),\n getObs('data/UGB-PILOTS_Sensor82-SH.csv', 'Plot 3'),\n getObs('data/UGB-PILOTS_Sensor84-SH.csv', 'Plot 4'),\n getWeather('data/TP.csv', 'Air temperature (C)'),\n getWeather('data/PA.csv', 'Pressure (KPa)'),\n getWeather('data/WS.csv', 'Wind speed (Km/h)'),\n getWeather('data/WG.csv', 'Wind gust (Km/h)'),\n getWeather('data/WD.csv', 'Wind direction (Deg)'),\n ]\n\nmerged = reduce(lambda x, y: pd.merge(x, y, on = 'timestamp', how='outer'), obs)\n\n#merged = merged.drop(pd.date_range('2018-01-01', '2019-03-12'), errors='ignore')\n#merged = merged[~merged['timestamp'].isin(pd.date_range(start='20150210', end='20190312'))]\n\nmerged.fillna(0)\nmerged = merged.loc['2019-02-23':'2019-06-20']\n#merged = merged.loc['2019-01-01':'2019-06-20']\n\n# Print the first 5 entries\nprint(merged.head(10));\n#print(obs1)\nmerged.to_csv('test2.csv');\n# Make the graphs a bit prettier\n#pd.set_option('display.mpl_style', 'default')\nplt.rcParams['figure.figsize'] = (18, 5)\n#plt.scatter(merged.index, merged['Plot 4 irrigation'])\nplt.plot(merged.index, merged[['Plot 3 humidity','Plot 3 irrigation']])\n\n# Plot the first 500 entries with selected columns\n#merged[['Soil humidity 1', 'Soil humidity 2', 'Soil humidity 3', 'Soil humidity 4']].plot();\n#merged[['Soil humidity 2', 'obs1 min']].plot();\nplt.show()\n"
] |
[
[
"pandas.to_datetime",
"pandas.merge",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show",
"pandas.read_csv",
"scipy.signal.argrelextrema"
]
] |
sunqiang85/DASA
|
[
"c4fdc61db77f59f84c68abec3b985fbd7dc29323"
] |
[
"r2r_src/preprocess_mini_dataset.py"
] |
[
"import os\nimport sys\nimport re\nsys.path.append('build')\nimport MatterSim\nimport string\nimport json\nimport time\nimport math\nfrom collections import Counter, defaultdict\nimport numpy as np\nimport networkx as nx\nfrom param import args\nimport torch.nn.functional as F\nfrom param import args\nfrom tqdm import tqdm\n\n\ndef dump_datasets(splits, scan_ids):\n \"\"\"\n\n :param splits: A list of split.\n if the split is \"something@5000\", it will use a random 5000 data from the data\n :return:\n \"\"\"\n import random\n data = []\n old_state = random.getstate()\n for split in splits:\n # It only needs some part of the dataset?\n components = split.split(\"@\")\n number = -1\n if args.mini:\n number = 40\n if len(components) > 1:\n split, number = components[0], int(components[1])\n\n # Load Json\n # if split in ['train', 'val_seen', 'val_unseen', 'test',\n # 'val_unseen_half1', 'val_unseen_half2', 'val_seen_half1', 'val_seen_half2']: # Add two halves for sanity check\n if \"/\" not in split:\n with open('tasks/R2R/data/R2R_%s.json' % split) as f:\n new_data = json.load(f)\n else:\n with open(split) as f:\n new_data = json.load(f)\n\n # Partition\n if number > 0:\n random.seed(0) # Make the data deterministic, additive\n random.shuffle(new_data)\n new_data = new_data[:number]\n\n\n # Join\n data += new_data\n random.setstate(old_state) # Recover the state of the random generator\n print('read data from %s with %d items' % (splits, len(data)))\n\n filter_data = [c for c in new_data if c['scan'] in scan_ids][:100]\n print(\"filter_data\", split, len(filter_data))\n with open('tasks/R2R/mini_data/R2R_%s.json' % split, 'w') as f:\n json.dump(filter_data, f, indent=1)\n return data\n\n\ndef read_img_features(feature_store, scan_ids):\n import csv\n import base64\n from tqdm import tqdm\n\n # print(\"Start loading the image feature\")\n start = time.time()\n csv.field_size_limit(sys.maxsize)\n\n if \"detectfeat\" in args.features:\n views = int(args.features[10:])\n else:\n views = 36\n\n args.views = views\n print(\"input scan_ids\", scan_ids)\n tsv_fieldnames = ['scanId', 'viewpointId', 'image_w', 'image_h', 'vfov', 'features']\n features = {}\n features_index = []\n features_value = []\n with tqdm(total=10567, position=0, leave=True, ascii=True) as pbar:\n pbar.set_description(\"Start loading the image feature\")\n with open(feature_store, \"r\") as tsv_in_file: # Open the tsv file.\n reader = csv.DictReader(tsv_in_file, delimiter='\\t', fieldnames=tsv_fieldnames)\n for item in reader:\n if item['scanId'] in scan_ids:\n # print(\"item['scanId']\",type(item['scanId']))\n long_id = \"{}_{}\".format(item['scanId'], item['viewpointId'])\n # print(\"long_id\", long_id)\n # print('scan_ids', scan_ids)\n\n features_index.append(long_id)\n ft= np.frombuffer(base64.decodestring(item['features'].encode('ascii')),\n dtype=np.float32).reshape((views, -1))\n features_value.append(ft)\n print(\"len(features\", len(features))\n\n print(\"Finish Loading the image feature from %s in %0.4f seconds\" % (feature_store, time.time() - start))\n np.save(\"tasks/R2R/mini_data/img_feature_index.npy\", features_index)\n np.save(\"tasks/R2R/mini_data/img_feature_value.npy\", features_value)\n index_set = set([c.split('_')[0] for c in features_index])\n print(\"len(index_set)\", len(index_set))\n print(index_set)\n\n\n return features\n\n\ndef dump_depth_features(scan_ids):\n key_array = np.load(args.depth_index_file)\n value_array = np.load(args.depth_value_file)\n\n filtered_keys = []\n filtered_values = []\n for key, value in zip(key_array, value_array):\n if key[0] in scan_ids:\n filtered_keys.append(key)\n filtered_values.append(value)\n np.save(\"tasks/R2R/mini_data/viewpointIds.npy\", np.array(filtered_keys))\n np.save(\"tasks/R2R/mini_data/ResNet-152-imagenet-depth.npy\", np.array(filtered_values))\n\n\n\nif __name__ == '__main__':\n print(\"start\")\n scan_map = {}\n total_scan_ids = []\n for split in ['train', 'val_seen', 'val_unseen', 'test', 'aug_paths']:\n with open('tasks/R2R/data/R2R_%s.json' % split) as f:\n new_data = json.load(f)\n scan_map[split] = set([c['scan'] for c in new_data])\n\n for k,v in scan_map.items():\n print(k,len(v))\n scan_ids = list(scan_map[split])[:1]\n dump_datasets([split], scan_ids)\n total_scan_ids = total_scan_ids + scan_ids\n total_scan_ids = list(set(total_scan_ids))\n print(\"len(total_scan_ids)\",len(total_scan_ids))\n print(total_scan_ids)\n\n feature_store = 'img_features/ResNet-152-imagenet.tsv'\n read_img_features(feature_store, total_scan_ids)\n dump_depth_features(total_scan_ids)"
] |
[
[
"numpy.array",
"numpy.load",
"numpy.save"
]
] |
RUBAIATH-E-ULFATH/Phishing-URL-Detector-Software-Desktop
|
[
"02fc1522a23421334e548df77df6048dc48ca6a8"
] |
[
"machine learning model/src/inspect_model.py"
] |
[
"from ruba_project_1.src.build_dataset import *\nfrom sklearn.externals import joblib\nimport pickle\nimport numpy as np\n\n\ndef main(url):\n #url = \"http://www.facebook.com/\"\n\n X_test = feature_extract(url)\n print(X_test)\n X_test = (np.array(X_test)).reshape(1, -1)\n\n # Load the model from the file\n rf_from_joblib = joblib.load('C:/Users/Rubaiath/PycharmProjects/PhishingDetectionApp/ruba_project_1/notebook_files/model_rf_1.pkl')\n\n # Use the loaded model to make predictions\n # print(rf_from_joblib.predict(X_test)[0])\n status = rf_from_joblib.predict(X_test)[0]\n\n if status == 1:\n print(\"This is a phising website\")\n else:\n print(\"This is a genuine website\")\n return status, X_test\n\n\nif __name__ == \"__main__\":\n url = \"http://www.facebook.com/\"\n main(url)\n"
] |
[
[
"sklearn.externals.joblib.load",
"numpy.array"
]
] |
cns-iu/HuBMAP---Hacking-the-Kidney
|
[
"1a41c887f8edb0b52f5afade384a17dc3d3efec4"
] |
[
"models/1-Tom/train/src/02_train/run.py"
] |
[
"import time\nimport pandas as pd\nimport numpy as np\nimport gc\nfrom os.path import join as opj\nimport matplotlib.pyplot as plt\nimport pickle\nfrom tqdm import tqdm\nimport torchvision\nimport torch\nfrom torch import nn, optim\nfrom torch.utils.data import DataLoader\nfrom dataset import HuBMAPDatasetTrain\nfrom models import build_model\nfrom scheduler import CosineLR\nfrom utils import elapsed_time\nfrom lovasz_loss import lovasz_hinge\nfrom losses import criterion_lovasz_hinge_non_empty\nfrom metrics import dice_sum, dice_sum_2\nfrom get_config import get_config\nconfig = get_config()\n\noutput_path = config['OUTPUT_PATH']\nfold_list = config['FOLD_LIST']\npretrain_path_list = config['pretrain_path_list']\ndevice = config['device']\n\ndef feature_imshow(inp, title=None): \n \"\"\"Imshow for Tensor.\"\"\" \n inp = inp.detach().numpy().transpose((1, 2, 0)) \n # mean = np.array([0.5, 0.5, 0.5]) \n # std = np.array([0.5, 0.5, 0.5])\n MEAN = np.array([0.485, 0.456, 0.406])\n STD = np.array([0.229, 0.224, 0.225])\n # inp = STD * inp + MEAN \n inp = np.clip(inp, 0, 1) \n plt.imshow(inp)\n plt.pause(0.001) # pause a bit so that plots are updated\n\n\ndef run(seed, data_df, pseudo_df, trn_idxs_list, val_idxs_list):\n log_cols = ['fold', 'epoch', 'lr',\n 'loss_trn', 'loss_val',\n 'trn_score', 'val_score', \n 'elapsed_time']\n \n criterion = nn.BCEWithLogitsLoss().to(device)\n criterion_clf = nn.BCEWithLogitsLoss().to(device)\n \n for fold, (trn_idxs, val_idxs) in enumerate(zip(trn_idxs_list, val_idxs_list)):\n if fold in fold_list:\n pass\n else:\n continue\n print('seed = {}, fold = {}'.format(seed, fold))\n \n log_df = pd.DataFrame(columns=log_cols, dtype=object)\n log_counter = 0\n\n #dataset\n trn_df = data_df.iloc[trn_idxs].reset_index(drop=True)\n val_df = data_df.iloc[val_idxs].reset_index(drop=True)\n \n #add pseudo label\n if pseudo_df is not None:\n trn_df = pd.concat([trn_df, pseudo_df], axis=0).reset_index(drop=True)\n \n # dataloader\n valid_dataset = HuBMAPDatasetTrain(val_df, config, mode='valid')\n valid_loader = DataLoader(valid_dataset, batch_size=config['test_batch_size'],\n shuffle=False, num_workers=4, pin_memory=True)\n \n #model\n model = build_model(model_name=config['model_name'],\n resolution=config['resolution'], \n deepsupervision=config['deepsupervision'], \n clfhead=config['clfhead'],\n clf_threshold=config['clf_threshold'],\n load_weights=True).to(device, torch.float32)\n # if pretrain_path_list is not None:\n # model.load_state_dict(torch.load(pretrain_path_list[fold]))\n # print(\"pre-trained models loaded\")\n \n# for p in model.parameters():\n# p.requires_grad = True\n \n optimizer = optim.Adam(model.parameters(), **config['Adam'])\n #optimizer = optim.RMSprop(model.parameters(), **config['RMSprop'])\n \n # Creates a GradScaler once at the beginning of training.\n scaler = torch.cuda.amp.GradScaler()\n \n if config['lr_scheduler_name']=='ReduceLROnPlateau':\n scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, **config['lr_scheduler']['ReduceLROnPlateau'])\n elif config['lr_scheduler_name']=='CosineAnnealingLR':\n #scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, **config['lr_scheduler']['CosineAnnealingLR'])\n scheduler = CosineLR(optimizer, **config['lr_scheduler']['CosineAnnealingLR'])\n elif config['lr_scheduler_name']=='OneCycleLR':\n scheduler = optim.lr_scheduler.OneCycleLR(optimizer, steps_per_epoch=len(train_loader),\n **config['lr_scheduler']['OneCycleLR'])\n \n #training\n val_score_best = -1e+99\n val_score_best2 = -1e+99\n loss_val_best = 1e+99\n epoch_best = 0\n counter_ES = 0\n trn_score = 0\n trn_score_each = 0\n start_time = time.time()\n for epoch in range(1, config['num_epochs']+1):\n if epoch < config['restart_epoch_list'][fold]:\n scheduler.step()\n continue\n \n# if elapsed_time(start_time) > config['time_limit']:\n# print('elapsed_time go beyond {} sec'.format(config['time_limit']))\n# break\n \n #print('lr = ', scheduler.get_lr()[0])\n print('lr : ', [ group['lr'] for group in optimizer.param_groups ])\n \n #train\n trn_df['binned'] = trn_df['binned'].apply(lambda x:config['binned_max'] if x>=config['binned_max'] else x)\n n_sample = trn_df['is_masked'].value_counts().min()\n trn_df_0 = trn_df[trn_df['is_masked']==False].sample(n_sample, replace=True)\n trn_df_1 = trn_df[trn_df['is_masked']==True].sample(n_sample, replace=True)\n \n n_bin = int(trn_df_1['binned'].value_counts().mean())\n trn_df_list = []\n for bin_size in trn_df_1['binned'].unique():\n trn_df_list.append(trn_df_1[trn_df_1['binned']==bin_size].sample(n_bin, replace=True))\n trn_df_1 = pd.concat(trn_df_list, axis=0)\n trn_df_balanced = pd.concat([trn_df_1, trn_df_0], axis=0).reset_index(drop=True)\n train_dataset = HuBMAPDatasetTrain(trn_df_balanced, config, mode='train')\n train_loader = DataLoader(train_dataset, batch_size=config['trn_batch_size'],\n shuffle=True, num_workers=4, pin_memory=True, drop_last=True) \n model.train()\n running_loss_trn = 0\n trn_score_numer = 0\n trn_score_denom = 0\n y_preds = []\n y_trues = []\n counter = 0\n tk0 = tqdm(train_loader, total=int(len(train_loader))) \n feature_test = []\n for i,data in enumerate(tk0):\n optimizer.zero_grad()\n with torch.cuda.amp.autocast():\n batch,c,h,w = data['img'].shape\n if config['clfhead']:\n y_clf = data['label'].to(device, torch.float32, non_blocking=True)\n if config['deepsupervision']:\n logits,logits_deeps,logits_clf = model(data['img'].to(device, torch.float32, non_blocking=True))\n else:\n logits,logits_clf = model(data['img'].to(device, torch.float32, non_blocking=True))\n else:\n if config['deepsupervision']:\n logits,logits_deeps = model(data['img'].to(device, torch.float32, non_blocking=True))\n else:\n logits = model(data['img'].to(device, torch.float32, non_blocking=True))\n y_true = data['mask'].to(device, torch.float32, non_blocking=True)\n dice_numer, dice_denom = dice_sum_2((torch.sigmoid(logits)).detach().cpu().numpy(), \n y_true.detach().cpu().numpy(), \n dice_threshold=config['dice_threshold'])\n # print (\"C1\")\n trn_score_numer += dice_numer \n trn_score_denom += dice_denom\n y_true = y_true.unsqueeze(1)\n # get intermediate data\n # print(logits.shape)\n # print(y_true.shape)\n # print(model.x4.shape)\n feature_test.append(model.x4.cpu())#transpose(1,0).cpu()\n #out = torchvision.utils.make_grid(feature_test) \n #print(out.shape)\n #feature_imshow(out)\n # print (\"C2\")\n loss = criterion(logits,y_true)\n loss += lovasz_hinge(logits.view(-1,h,w), y_true.view(-1,h,w))\n if config['deepsupervision']:\n for logits_deep in logits_deeps:\n loss += 0.1 * criterion_lovasz_hinge_non_empty(criterion, logits_deep, y_true)\n if config['clfhead']:\n loss += criterion_clf(logits_clf.squeeze(-1),y_clf)\n # print (\"C3\")\n scaler.scale(loss).backward()\n scaler.step(optimizer)\n scaler.update()\n #loss.backward()\n #optimizer.step()\n if config['lr_scheduler_name']=='OneCycleLR':\n scheduler.step()\n running_loss_trn += loss.item() * batch\n counter += 1\n # print (\"C5\")\n tk0.set_postfix(loss=(running_loss_trn / (counter * train_loader.batch_size) ))\n # print (\"C6\")\n epoch_loss_trn = running_loss_trn / len(train_dataset)\n trn_score = trn_score_numer / trn_score_denom\n feature_merge = np.concatenate(feature_test, axis=0)\n print(feature_merge.shape)\n fileObject = open(\"featue_test\", 'wb')\n # print (\"C10\")\n pickle.dump(feature_merge, fileObject)\n # print (\"C7\")\n \n #release GPU memory cache\n del data, loss,logits,y_true\n torch.cuda.empty_cache()\n gc.collect()\n # print (\"C8\")\n\n #eval\n model.eval()\n loss_val = 0\n val_score_numer = 0\n val_score_denom = 0\n # print (\"C9\")\n y_preds = []\n y_trues = []\n tk1 = tqdm(valid_loader, total=int(len(valid_loader)))\n for i,data in enumerate(tk1):\n with torch.no_grad():\n batch,c,h,w = data['img'].shape\n if config['clfhead']:\n y_clf = data['label'].to(device, torch.float32, non_blocking=True)\n if config['deepsupervision']:\n logits,logits_deeps,logits_clf = model(data['img'].to(device, torch.float32, non_blocking=True))\n else:\n logits,logits_clf = model(data['img'].to(device, torch.float32, non_blocking=True))\n else:\n if config['deepsupervision']:\n logits,logits_deeps = model(data['img'].to(device, torch.float32, non_blocking=True))\n else:\n logits = model(data['img'].to(device, torch.float32, non_blocking=True))\n y_true = data['mask'].to(device, torch.float32, non_blocking=True)\n dice_numer, dice_denom = dice_sum_2((torch.sigmoid(logits)).detach().cpu().numpy(), \n y_true.detach().cpu().numpy(), \n dice_threshold=config['dice_threshold'])\n val_score_numer += dice_numer \n val_score_denom += dice_denom\n y_true = y_true.unsqueeze(1)\n loss_val += criterion(logits,y_true).item() * batch\n loss_val += lovasz_hinge(logits.view(-1,h,w), y_true.view(-1,h,w)).item() * batch\n if config['deepsupervision']:\n for logits_deep in logits_deeps:\n loss_val += 0.1 * criterion_lovasz_hinge_non_empty(criterion, logits_deep, y_true).item() * batch\n if config['clfhead']:\n loss_val += criterion_clf(logits_clf.squeeze(-1), y_clf).item() * batch\n # print (\"Epoch About Done!\")\n \n \n #release GPU memory cache\n del data,logits,y_true\n torch.cuda.empty_cache()\n gc.collect()\n # print (\"EPoch Done!\")\n loss_val /= len(valid_dataset)\n val_score = val_score_numer / val_score_denom\n \n #logging\n log_df.loc[log_counter,log_cols] = np.array([fold, epoch,\n [ group['lr'] for group in optimizer.param_groups ],\n epoch_loss_trn, loss_val, \n trn_score, val_score,\n elapsed_time(start_time)], dtype='object')\n log_counter += 1\n \n #monitering\n print('epoch {:.0f} loss_trn = {:.5f}, loss_val = {:.5f}, trn_score = {:.4f}, val_score = {:.4f}'.format(epoch, epoch_loss_trn, loss_val, trn_score, val_score))\n if epoch%10 == 0:\n print(' elapsed_time = {:.1f} min'.format((time.time() - start_time)/60))\n \n if config['early_stopping']:\n if loss_val < loss_val_best: #val_score > val_score_best:\n val_score_best = val_score #update\n loss_val_best = loss_val #update\n epoch_best = epoch #update\n counter_ES = 0 #reset\n torch.save(model.state_dict(), output_path+f'model_seed{seed}_fold{fold}_bestloss.pth') #save\n print('model (best loss) saved')\n else:\n counter_ES += 1\n if counter_ES > config['patience']:\n print('early stopping, epoch_best {:.0f}, loss_val_best {:.5f}, val_score_best {:.5f}'.format(epoch_best, loss_val_best, val_score_best))\n break\n else:\n torch.save(model.state_dict(), output_path+f'model_seed{seed}_fold{fold}_bestloss.pth') #save\n \n if val_score > val_score_best2:\n val_score_best2 = val_score #update\n torch.save(model.state_dict(), output_path+f'model_seed{seed}_fold{fold}_bestscore.pth') #save\n print('model (best score) saved')\n \n if config['lr_scheduler_name']=='ReduceLROnPlateau':\n scheduler.step(loss_val)\n #scheduler.step(val_score)\n elif config['lr_scheduler_name']=='CosineAnnealingLR':\n scheduler.step()\n \n #for snapshot ensemble\n if config['lr_scheduler_name']=='CosineAnnealingLR':\n t0 = config['lr_scheduler']['CosineAnnealingLR']['t0']\n if (epoch%(t0+1)==0) or (epoch%(t0)==0) or (epoch%(t0-1)==0):\n torch.save(model.state_dict(), output_path+f'model_seed{seed}_fold{fold}_epoch{epoch}.pth') #save\n print(f'model saved epoch{epoch} for snapshot ensemble')\n \n #save result\n log_df.to_csv(output_path+f'log_seed{seed}_fold{fold}.csv', index=False)\n\n print('')\n \n #best model\n if config['early_stopping']&(counter_ES<=config['patience']):\n print('epoch_best {:d}, val_loss_best {:.5f}, val_score_best {:.5f}'.format(epoch_best, loss_val_best, val_score_best))\n \n del model\n torch.cuda.empty_cache()\n gc.collect()\n \n print('')\n"
] |
[
[
"numpy.concatenate",
"torch.sigmoid",
"numpy.array",
"torch.cuda.amp.autocast",
"pandas.concat",
"pandas.DataFrame",
"torch.no_grad",
"torch.cuda.empty_cache",
"matplotlib.pyplot.pause",
"torch.utils.data.DataLoader",
"numpy.clip",
"torch.nn.BCEWithLogitsLoss",
"torch.cuda.amp.GradScaler",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"matplotlib.pyplot.imshow"
]
] |
BennyZhang-Codes/LDCT-denoising-with-DL-Methods-and-Dicom-Viewer-by-Benny
|
[
"7e1312e8b2846a9a54ca11500db2dd8e305d1a3c"
] |
[
"LDCT_Denoising/Neural_Network/Loss_Func.py"
] |
[
"# -*- coding: utf-8 -*-\r\n\r\nimport torch.nn as nn\r\nfrom torch.nn import functional as F\r\nimport pytorch_ssim\r\n\r\nclass MSE_Loss(nn.Module):\r\n def __init__(self):\r\n super(MSE_Loss, self).__init__()\r\n\r\n def forward(self, input, target):\r\n return F.mse_loss(input, target, reduction='mean')\r\n\r\nclass SSIM_Loss(nn.Module):\r\n def __init__(self):\r\n super(SSIM_Loss, self).__init__()\r\n self.ssim_loss = pytorch_ssim.SSIM()\r\n\r\n def forward(self, input, target):\r\n return -self.ssim_loss(input, target)\r\n"
] |
[
[
"torch.nn.functional.mse_loss"
]
] |
vmware/iot-analytics-benchmark
|
[
"e7fd84af2298cffb85a78e0b3d3bbc342d42f556"
] |
[
"DL/python/send_images_cifar.py"
] |
[
"\"\"\"\nsend_images_cifar.py: sends labeled CIFAR10 images encoded as a string to an inferencing program\n\nUsage: python3 send_images_cifar.py [-h] [-s] [-i IMAGESPERSEC] [-t TOTALIMAGES] | nc <dest IP address> <dest port>\noptional arguments:\n -h, --help show this help message and exit\n -i IMAGESPERSEC, --imagesPerSec IMAGESPERSEC\n -t TOTALIMAGES, --totalImages TOTALIMAGES\n -s, --subtractMean\n\nCIFAR10 dataset from https://www.cs.toronto.edu/~kriz/cifar.html\n(Learning Multiple Layers of Features from Tiny Images, Alex Krizhevsky, 2009, https://www.cs.toronto.edu/~kriz/learning-features-2009-TR.pdf)\n\nCopyright (c) 2019 VMware, Inc.\n\nThis product is licensed to you under the Apache 2.0 license (the \"License\"). You may not use this product except in compliance with the Apache 2.0 License.\n\nThis product may include a number of subcomponents with separate copyright notices and license terms. Your use of these subcomponents is subject to the terms and conditions of the subcomponent's license, as noted in the LICENSE file.\n\"\"\"\n\nimport argparse\nimport random\nfrom time import time, gmtime, strftime, sleep, monotonic\nimport sys\nfrom io import StringIO\nimport numpy as np\nfrom math import exp\nfrom keras.datasets import cifar10\n\nparser = argparse.ArgumentParser(description='Send CIFAR10 images encoded as strings')\nparser.add_argument(\"-i\", \"--imagesPerSec\", type=int, dest=\"imagesPerSec\", default=10)\nparser.add_argument(\"-t\", \"--totalImages\", type=int, dest=\"totalImages\", default=100)\nparser.add_argument(\"-s\", \"--subtractMean\", action=\"store_true\", dest=\"subtractMean\")\nargs = parser.parse_args()\nimages_per_second=args.imagesPerSec; total_images=args.totalImages; subtract_mean=args.subtractMean\n\ndef accurate_wait(wait_in_seconds):\n waitUntil = monotonic() + wait_in_seconds\n while (waitUntil > monotonic()):\n pass\n\n# Send stdout to stderr - cifar10.load_data() writes progress to stdout if data not cached locally\ntemp = sys.stdout\nsys.stdout = sys.stderr\nprint(\"%sZ: Loading and normalizing the CIFAR10 data\" % (strftime(\"%Y-%m-%dT%H:%M:%S\", gmtime())), file=sys.stderr)\n(train_images, train_labels), (test_images, test_labels) = cifar10.load_data()\nsys.stdout = temp\nn_images = test_images.shape[0]\nn_labels = test_labels.shape[0]\n\n# Normalize data.\ntrain_images = train_images.astype('float32') / 255\ntest_images = test_images.astype('float32') / 255\n\nif subtract_mean:\n train_mean = np.mean(train_images, axis=0)\n test_images -= train_mean\n\n# First, write labeled, unraveled images to a list\nlabeled_images = []\nfor i in range(n_images):\n string = StringIO()\n np.savetxt(string, test_images[i].ravel().reshape(1,3072), fmt='%f') # 3072 = 32x32x3\n # Insert (single character) label in front of string, cut final '\\n' from string\n labeled_images.append(str(test_labels.item(i)) + string.getvalue()[:-1])\n\nprint(\"%sZ: Sending %d images per second for a total of %d images\" % (strftime(\"%Y-%m-%dT%H:%M:%S\", gmtime()), images_per_second, total_images), file=sys.stderr, end='')\nif subtract_mean:\n print(\" with pixel mean subtracted\", file=sys.stderr)\nelse:\n print(\"\", file=sys.stderr)\n\nfor i in range(total_images):\n print(labeled_images[i%n_images])\n sys.stdout.flush()\n # Use lognormal distribution to generate a positive random wait time with mean determined from images_per_second and long tail\n mean_wait = float(1.0/images_per_second)\n # Set standard deviation to half the mean_wait\n std_dev = mean_wait/2.0\n fudge_factor = .7 # Needed to reduce wait time to compensate for computation/network time - set empirically\n accurate_wait(fudge_factor*mean_wait*random.lognormvariate(mean_wait,std_dev)/exp(mean_wait + std_dev**2/2))\n if (((i+1) % images_per_second == 0) or (i == total_images-1)):\n print(\"%sZ: %d images sent\" % (strftime(\"%Y-%m-%dT%H:%M:%S\", gmtime()), i+1), file=sys.stderr)\n\nprint(\"\") # Indicate end of send\nprint(\"%sZ: Image stream ended\" % (strftime(\"%Y-%m-%dT%H:%M:%S\", gmtime())), file=sys.stderr)\n"
] |
[
[
"numpy.mean"
]
] |
chivalry/pmdarima
|
[
"83aaa8249fc93b8bc2311431af53d2d10d312eea"
] |
[
"pmdarima/preprocessing/exog/fourier.py"
] |
[
"# -*- coding: utf-8 -*-\n\nimport numpy as np\n\nfrom sklearn.utils.validation import check_is_fitted\n\nfrom .base import BaseExogFeaturizer\nfrom ..base import UpdatableMixin\nfrom ._fourier import C_fourier_terms\n\n__all__ = ['FourierFeaturizer']\n\nsinpi = (lambda x: np.sin(np.pi * x))\ncospi = (lambda x: np.cos(np.pi * x))\n\n\n# Candidate for cythonization?\ndef _fourier_terms(p, times):\n # X = []\n # for e in p:\n # X.append(sinpi(2 * e * times))\n # X.append(cospi(2 * e * times))\n X = C_fourier_terms(p, times)\n return np.asarray(X).T\n\n\nclass FourierFeaturizer(BaseExogFeaturizer, UpdatableMixin):\n \"\"\"Fourier terms for modeling seasonality\n\n This transformer creates an exogenous matrix containing terms from a\n Fourier series, up to order ``k``. It is based on ``R::forecast code`` [1].\n In practice, it permits us to fit a seasonal time series *without* seasonal\n order (i.e., ``seasonal=False``) by supplying decomposed seasonal Fourier\n terms as an exogenous array.\n\n The advantages of this technique, per Hyndman [2]:\n\n * It allows any length seasonality\n * The seasonal pattern is smooth for small values of K (but more wiggly\n seasonality can be handled by increasing K)\n * The short-term dynamics are easily handled with a simple ARMA error\n\n The disadvantage is that the seasonal periodicity of the time series is\n assumed to be fixed.\n\n Functionally, this is a featurizer. This means that exogenous features are\n *derived* from ``y``, as opposed to transforming an existing exog array.\n It also behaves slightly differently in the :func:`transform` stage than\n most other exogenous transformers in that ``exog`` is not a required arg,\n and it takes ``**kwargs``. See the :func:`transform` docstr for more info.\n\n Parameters\n ----------\n m : int\n The seasonal periodicity of the endogenous vector, y.\n\n k : int, optional (default=None)\n The number of sine and cosine terms (each) to include. I.e., if ``k``\n is 2, 4 new features will be generated. ``k`` must not exceed ``m/2``,\n which is the default value if not set. The value of ``k`` can be\n selected by minimizing the AIC.\n\n Notes\n -----\n * Helpful for long seasonal periods (large ``m``) where ``seasonal=True``\n seems to take a very long time to fit a model.\n\n References\n ----------\n .. [1] https://github.com/robjhyndman/forecast/blob/master/R/season.R\n .. [2] https://robjhyndman.com/hyndsight/longseasonality/\n \"\"\"\n\n def __init__(self, m, k=None):\n self.m = m\n self.k = k\n\n def fit(self, y, exogenous=None):\n \"\"\"Fit the transformer\n\n Computes the periods of all the Fourier terms. The values of ``y`` are\n not actually used; only the periodicity is used when computing Fourier\n terms.\n\n Parameters\n ----------\n y : array-like or None, shape=(n_samples,)\n The endogenous (time-series) array.\n\n exogenous : array-like or None, shape=(n_samples, n_features), optional\n The exogenous array of additional covariates. If specified, the\n Fourier terms will be column-bound on the right side of the matrix.\n Otherwise, the Fourier terms will be returned as the new exogenous\n array.\n \"\"\"\n # Since we don't fit any params here, we can just check the params\n _, _ = self._check_y_exog(y, exogenous, null_allowed=True)\n\n m = self.m\n k = self.k\n if k is None:\n k = m // 2\n if 2 * k > m or k < 1:\n raise ValueError(\"k must be a positive integer not greater \"\n \"than m//2\")\n\n # Compute the periods of all Fourier terms. Since R allows multiple\n # seasonality and we do not, we can do this much more simply.\n p = ((np.arange(k) + 1) / m).astype(np.float64) # 1:K / m\n\n # If sinpi is 0... maybe blow up?\n # if abs(2 * p - round(2 * p)) < np.finfo(y.dtype).eps: # min eps\n\n self.p_ = p\n self.k_ = k\n self.n_ = y.shape[0]\n\n return self\n\n def transform(self, y, exogenous=None, n_periods=0, **_):\n \"\"\"Create Fourier term features\n\n When an ARIMA is fit with an exogenous array, it must be forecasted\n with one also. Since at ``predict`` time in a pipeline we won't have\n ``y`` (and we may not yet have an ``exog`` array), we have to know how\n far into the future for which to compute Fourier terms (hence\n ``n_periods``).\n\n This method will compute the Fourier features for a given frequency and\n ``k`` term. Note that the ``y`` values are not used to compute these,\n so this does not pose a risk of data leakage.\n\n Parameters\n ----------\n y : array-like or None, shape=(n_samples,)\n The endogenous (time-series) array. This is unused and technically\n optional for the Fourier terms, since it uses the pre-computed\n ``n`` to calculate the seasonal Fourier terms.\n\n exogenous : array-like or None, shape=(n_samples, n_features), optional\n The exogenous array of additional covariates. If specified, the\n Fourier terms will be column-bound on the right side of the matrix.\n Otherwise, the Fourier terms will be returned as the new exogenous\n array.\n\n n_periods : int, optional (default=0)\n The number of periods in the future to forecast. If ``n_periods``\n is 0, will compute the Fourier features for the training set.\n ``n_periods`` corresponds to the number of samples that will be\n returned.\n \"\"\"\n check_is_fitted(self, \"p_\")\n _, exog = self._check_y_exog(y, exogenous, null_allowed=True)\n\n if n_periods and exog is not None:\n if n_periods != exog.shape[0]:\n raise ValueError(\"If n_periods and exog are specified, \"\n \"n_periods must match dims of exogenous \"\n \"({0} != {1})\"\n .format(n_periods, exog.shape[0]))\n\n times = np.arange(self.n_ + n_periods, dtype=np.float64) + 1\n X_fourier = _fourier_terms(self.p_, times)\n\n # Maybe trim if we're in predict mode... in that case, we only keep the\n # last n_periods rows in the matrix we've created\n if n_periods:\n X_fourier = X_fourier[-n_periods:, :]\n\n if exog is None:\n exog = X_fourier\n else:\n exog = np.hstack([exog, X_fourier])\n\n return y, exog\n\n def update_and_transform(self, y, exogenous, **kwargs):\n \"\"\"Update the params and return the transformed arrays\n\n Since no parameters really get updated in the Fourier featurizer, all\n we do is compose forecasts for ``n_periods=len(y)`` and then update\n ``n_``.\n\n Parameters\n ----------\n y : array-like or None, shape=(n_samples,)\n The endogenous (time-series) array.\n\n exogenous : array-like or None, shape=(n_samples, n_features)\n The exogenous array of additional covariates.\n\n **kwargs : keyword args\n Keyword arguments required by the transform function.\n \"\"\"\n check_is_fitted(self, \"p_\")\n\n self._check_endog(y)\n _, Xt = self.transform(y, exogenous, n_periods=len(y), **kwargs)\n\n # Update this *after* getting the exog features\n self.n_ += len(y)\n return y, Xt\n"
] |
[
[
"numpy.sin",
"sklearn.utils.validation.check_is_fitted",
"numpy.asarray",
"numpy.arange",
"numpy.cos",
"numpy.hstack"
]
] |
msieb1/LTCN
|
[
"c9432891327774edf8193e885cc4f10f53fcaa60",
"c9432891327774edf8193e885cc4f10f53fcaa60"
] |
[
"utils/rot_utils_old.py",
"train_pose_euler_crop.py"
] |
[
"\nimport torch\nimport numpy as np\nimport math\nfrom ipdb import set_trace\n\n\n # Checks if a matrix is a valid rotation matrix.\ndef isRotationMatrix(R) :\n Rt = np.transpose(R)\n shouldBeIdentity = np.dot(Rt, R)\n I = np.identity(3, dtype = R.dtype)\n n = np.linalg.norm(I - shouldBeIdentity)\n return n < 1e-6\n \n\n\ndef norm_sincos(sin, cos):\n stacked_ = torch.cat((sin[None], cos[None]))\n stacked = stacked_ / torch.norm(stacked_)\n return stacked[0], stacked[1]\n\ndef sincos2rotm(a_pred):\n # copy of matlab \n # R = [ cy*cz sy*sx*cz-sz*cx sy*cx*cz+sz*sx \n # cy*sz sy*sx*sz+cz*cx sy*cx*sz-cz*sx \n # -sy cy*sx cy*cx] \n sinx, cosx = norm_sincos(a_pred[0], a_pred[1]) \n siny, cosy = norm_sincos(a_pred[2], a_pred[3]) \n sinz, cosz = norm_sincos(a_pred[4], a_pred[5]) \n r11 = cosy*cosz\n r12 = sinx*siny*cosz - cosx*sinz\n r13 = cosx*siny*cosz + sinx*sinz\n r21 = cosy*sinz\n r22 = sinx*siny*sinz + cosx*cosz\n r23 = cosx*siny*sinz - sinx*cosz\n r31 = -siny\n r32 = sinx*cosy\n r33 = cosx*cosy\n r1 = torch.cat([r11[None],r12[None],r13[None]])\n r2 = torch.cat([r21[None],r22[None],r23[None]])\n r3 = torch.cat([r31[None],r32[None],r33[None]])\n R = torch.stack((r1, r2, r3), dim=0)\n return R \n\ndef axisAngletoRotationMatrix(a):\n v = a[:-1]\n theta = a[-1]\n r11 = 1 + (-v[2]**2 - v[1]**2)*(1-torch.cos(theta)) + 0*torch.sin(theta) \n r12 = (v[0] * v[1])*(1-torch.cos(theta)) - v[2] * torch.sin(theta) \n r13 = (v[0] * v[2])*(1-torch.cos(theta)) + v[1] * torch.sin(theta)\n r21 = (v[0] * v[1])*(1-torch.cos(theta)) + v[2] * torch.sin(theta)\n r22 = 1 + (-v[2]**2 - v[0]**2)*(1-torch.cos(theta)) + 0 * torch.sin(theta)\n r23 = (v[1] * v[2])*(1-torch.cos(theta)) - v[0] * torch.sin(theta)\n r31 = (v[0] * v[2])*(1-torch.cos(theta)) - v[1] * torch.sin(theta)\n r32 = (v[1] * v[2])*(1-torch.cos(theta)) + v[0] * torch.sin(theta)\n r33 = 1 + (-v[1]**2 - v[0]**2)*(1-torch.cos(theta)) + 0 * torch.sin(theta)\n r1 = torch.cat([r11[None],r12[None],r13[None]])\n r2 = torch.cat([r21[None],r22[None],r23[None]])\n r3 = torch.cat([r31[None],r32[None],r33[None]])\n R = torch.stack((r1, r2, r3), dim=0)\n\n return R\n\n\n\n# Calculates Rotation Matrix given euler angles.\ndef eulerAnglesToRotationMatrix(theta, tensor=False) :\n \"\"\"\n Theta is given as euler angles Z-Y-X, corresponding to yaw, pitch, roll\n \"\"\" \n if not tensor:\n R_x = np.array([[1, 0, 0 ],\n [0, math.cos(theta[0]), -math.sin(theta[0]) ],\n [0, math.sin(theta[0]), math.cos(theta[0]) ]\n ])\n \n \n R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1]) ],\n [0, 1, 0 ],\n [-math.sin(theta[1]), 0, math.cos(theta[1]) ]\n ]) \n R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],\n [math.sin(theta[2]), math.cos(theta[2]), 0],\n [0, 0, 1]\n ])\n \n R = np.dot(R_z, np.dot( R_y, R_x ))\n return R\n\n\n \n# Calculates rotation matrix to euler angles\n# The result is the same as MATLAB except the order\n# of the euler angles ( x and z are swapped ).\n# Return X-Y-Z (roll pitch yaw)\ndef rotationMatrixToEulerAngles(R) :\n \n if not R.type() == 'torch.cuda.FloatTensor':\n sy = math.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])\n \n singular = sy < 1e-6\n \n if not singular :\n x = math.atan2(R[2,1] , R[2,2])\n y = math.atan2(-R[2,0], sy)\n z = math.atan2(R[1,0], R[0,0])\n else :\n x = math.atan2(-R[1,2], R[1,1])\n y = math.atan2(-R[2,0], sy)\n z = 0\n return np.array([x, y, z])\n else:\n sy = torch.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])\n singular = sy < 1e-6\n \n if not singular :\n x = torch.atan2(R[2,1] , R[2,2])\n y = torch.atan2(-R[2,0], sy)\n z = torch.atan2(R[1,0], R[0,0])\n else :\n x = torch.atan2(-R[1,2], R[1,1])\n y = torch.atan2(-R[2,0], sy)\n z = 0\n return torch.stack((x, y, z))\n\n# def create_random_rot(tensor=False):\n# \"\"\"\n# vector should be 6 dimensional\n# \"\"\"\n# # random unit vectors\n# u = np.random.rand(3)\n# v = np.random.rand(3)\n# u /= np.linalg.norm(u)\n# v /= np.linalg.norm(v)\n# # subtract (v*u)u from v and normalize\n# v -= v.dot(u)*u\n# v /= np.linalg.norm(v)\n# # build cross product\n# w = np.cross(u, v)\n# w /= np.linalg.norm(w)\n# R = np.hstack([u[:,None], v[:,None], w[:,None]])\n\n# if tensor:\n# return torch.Tensor(R)\n# else:\n# return R\n\n\n\ndef create_rot_from_vector(vector):\n \"\"\"\n vector should be 6 dimensional\n \"\"\"\n # random unit vectors\n u = vector[:3]\n v = vector[3:]\n u /= np.linalg.norm(u)\n v /= np.linalg.norm(v)\n # subtract (v*u)u from v and normalize\n v -= v.dot(u)*u\n v /= np.linalg.norm(v)\n # build cross product\n w = np.cross(u, v)\n w /= np.linalg.norm(w)\n R = np.hstack([u[:,None], v[:,None], w[:,None]])\n return R\n\n\n",
"import matplotlib\nmatplotlib.use('Agg')\nimport os\nfrom os.path import join\nimport argparse\nimport torch\nimport numpy as np\nimport pickle\nimport sys\nimport datetime\nsys.path.append('./utils')\n\nfrom torch import optim\nfrom torch import nn\nfrom torch import multiprocessing\nfrom torch.optim import lr_scheduler\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader, ConcatDataset\nfrom utils.builders import SingleViewDepthTripletBuilder, MultiViewDepthTripletBuilder, MultiViewTripletBuilder, SingleViewTripletBuilder\nfrom utils.builder_utils import distance, Logger, ensure_folder, collate_fn, time_stamped\nfrom utils.vocabulary import Vocabulary\nfrom ipdb import set_trace\nfrom sklearn.preprocessing import OneHotEncoder\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\nfrom torchvision import transforms\nimport torchvision.utils as vutils\nimport torchvision.models as models\nfrom torchvision import datasets\nfrom tensorboardX import SummaryWriter\nimport matplotlib.pyplot as plt\nfrom shutil import copy2\nimport importlib\nfrom pyquaternion import Quaternion\n\nfrom models.pose_predictor_euler_crop import define_model\nfrom utils.plot_utils import plot_mean\nfrom utils.rot_utils_old import create_rot_from_vector, rotationMatrixToEulerAngles, \\\n isRotationMatrix, eulerAnglesToRotationMatrix, \\\n norm_sincos, sincos2rotm\nfrom utils.network_utils import loss_rotation, loss_euler_reparametrize, loss_axisangle, batch_size, apply,\\\n loss_quat, loss_quat_single, euler_XYZ_to_reparam, loss_quat_huber\nfrom utils.plot_utils import plot_mean\n\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" # see issue #152\nos.environ[\"CUDA_VISIBLE_DEVICES\"]= \"1,2,3\"\n \nIMAGE_SIZE = (299, 299)\nNUM_VIEWS = 1\nSAMPLE_SIZE = 40\nVAL_SEQS =5\nTRAIN_SEQS_PER_EPOCH = 80\nLOSS_FN = loss_euler_reparametrize\n\nEXP_ROOT_DIR = '/media/hdd/msieb/data/tcn_data/experiments'\nsys.path.append(EXP_ROOT_DIR)\n\nclass Trainer(object):\n def __init__(self, use_cuda, load_model, model_folder, train_directory, validation_directory, builder, loss_fn, args, multi_gpu=True):\n self.use_cuda = use_cuda\n self.load_model = load_model\n self.model_folder = model_folder\n self.validation_directory = validation_directory\n self.train_directory = train_directory\n self.args = args\n\n self.builder = builder\n self.loss_fn = loss_fn\n self.logdir = join(model_folder, 'logs')\n self.writer = SummaryWriter(self.logdir)\n self.logger = Logger(self.args.log_file)\n self.itr = 0\n\n # Create Model\n self.model = self.create_model()\n if multi_gpu:\n self.model = torch.nn.DataParallel(self.model, device_ids=range(torch.cuda.device_count()))\n\n # Build validation set\n validation_builder = builder(self.args.n_views, validation_directory, IMAGE_SIZE, self.args, toRot=True, sample_size=SAMPLE_SIZE)\n validation_set = [validation_builder.build_set() for i in range(VAL_SEQS)]\n validation_set = ConcatDataset(validation_set)\n self.len_validation_set = len(validation_set)\n del validation_builder\n self.validation_loader = DataLoader(\n validation_set, \n batch_size=8, \n shuffle=False, \n pin_memory=self.use_cuda,\n )\n self.validation_calls = 0\n # Build Training Set\n self.triplet_builder = builder(self.args.n_views, \\\n train_directory, IMAGE_SIZE, self.args, toRot=True, sample_size=SAMPLE_SIZE)\n self.training_queue = multiprocessing.Queue(1)\n dataset_builder_process = multiprocessing.Process(target=self.build_set, args=(self.training_queue, self.triplet_builder, self.logger), daemon=True)\n dataset_builder_process.start()\n\n # Get Logger\n \n\n # Model specific setup\n # self.optimizer = optim.SGD(self.model.parameters(), lr=self.args.lr_start, momentum=0.9)\n self.optimizer = optim.Adam(self.model.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08)\n # This will diminish the learning rate at the milestones ///// 0.1, 0.01, 0.001 if not using automized scheduler\n self.learning_rate_scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, 'min')\n # self.criterion = nn.CrossEntropyLoss()\n\n def train(self):\n\n trn_losses_ = []\n val_losses_= []\n val_acc_ = []\n trn_acc_ = []\n\n\n for epoch in range(self.args.start_epoch, self.args.start_epoch + self.args.epochs):\n print(\"=\" * 20)\n self.logger.info(\"Starting epoch: {0} \".format(epoch))\n\n dataset = self.training_queue.get()\n data_loader = DataLoader(\n dataset=dataset,\n batch_size=self.args.minibatch_size, # batch_size(epoch, self.args.max_minibatch_size),\n shuffle=True,\n pin_memory=self.use_cuda,\n )\n \n train_embedding_features_buffer = []\n train_images_buffer = []\n train_labels = []\n correct = 0\n\n for _ in range(0, 1):\n losses = []\n\n for minibatch in data_loader:\n if self.use_cuda:\n anchor_frames = minibatch[0].cuda()\n #anchor_euler_reparam = minibatch[1].cuda() # load as 3x3 rotation matrix\n anchor_quats = minibatch[1].cuda() # load as 3x3 rotation matrix\n # frames = Variable(minibatch)\n loss, a_pred = self.loss_fn(self.model, anchor_frames, anchor_quats)\n losses.append(loss.data.cpu().numpy()) \n correct += (torch.norm(a_pred - anchor_quats, 2) < 1).data.cpu().numpy().sum() # print(gradcheck(loss_fn, (tcn, minibatch,))) \n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # Add embeddings\n train_labels.append(anchor_quats)\n train_embedding_features_buffer.append(anchor_quats)\n train_images_buffer.append(anchor_frames)\n print(\"logging to {}\".format(self.logdir))\n\n self.writer.add_scalar('data/train_loss', np.mean(losses), self.itr)\n self.writer.add_scalar('data/train_correct', correct / len(data_loader), self.itr)\n self.itr += 1 \n trn_losses_.append(np.mean(losses))\n self.logger.info('train loss: ', np.mean(losses))\n self.logger.info(\"Training score correct {correct}/{total}\".format(\n correct=correct,\n total=len(data_loader)\n ))\n trn_acc_.append(correct)\n\n self.writer.add_image('frame_1', minibatch[0][0], self.itr)\n # self.writer.add_image('pose1', str(minibatch[1][0].data.detach().cpu().numpy()), self.itr)\n self.writer.add_image('frame_2', minibatch[0][1], self.itr)\n # self.writer.add_image('pose_2', str(minibatch[1][1].data.detach().cpu().numpy()), self.itr) \n self.writer.add_image('frame_3', minibatch[0][2], self.itr)\n # self.writer.add_image('pose_3', str(minibatch[1][2].data.detach().cpu().numpy()), self.itr)\n self.writer.add_image('frame_4', minibatch[0][3], self.itr)\n # self.writer.add_image('pose_4', str(minibatch[1][3].data.detach().cpu().numpy()), self.itr)\n # Get embeddings\n features = torch.cat(train_embedding_features_buffer[:30]).squeeze_()\n labels = torch.cat(train_labels[:30]).squeeze_()\n # features = train_embedding_features_buffer.view(train_embedding_features_buffer.shape[0]*train_embedding_features_buffer.shape[1], -1)\n # label = torch.Tensor(np.asarray(label_buffer))\n images = torch.cat(train_images_buffer[:30]).squeeze_()#/255.0, [0, 3, 1, 2]\n self.writer.add_embedding(features, metadata=labels, label_img=images, global_step=epoch)\n \n if epoch % 1 == 0:\n loss, correct = self.validate()\n self.learning_rate_scheduler.step(loss)\n val_losses_.append(loss)\n val_acc_.append(correct)\n\n if epoch % self.args.save_every == 0 and epoch != 0:\n self.logger.info('Saving model.')\n self.save_model(self.model, self.model_filename(self.args.model_name, epoch), join(self.model_folder, 'weight_files'))\n print(\"logging to {}\".format(self.logdir))\n\n plot_mean(trn_losses_, self.model_folder, 'train_loss')\n plot_mean(val_losses_, self.model_folder, 'validation_loss')\n plot_mean(trn_acc_, self.model_folder, 'train_acc')\n plot_mean(val_acc_, self.model_folder, 'validation_accuracy')\n # plot_mean(val_acc_no_margin_, self.model_folder, 'validation_accuracy_no_margin')\n\n def validate(self):\n # Run model on validation data and log results\n correct = 0\n losses = []\n for minibatch in self.validation_loader:\n if self.use_cuda:\n anchor_frames = minibatch[0].cuda()\n #anchor_euler_reparam = minibatch[1].cuda() # load as 3x3 rotation matrix\n anchor_quats = minibatch[1].cuda() # load as 3x3 rotation matrix\n loss, a_pred = self.loss_fn(self.model, anchor_frames, anchor_quats)\n losses.append(loss.data.cpu().numpy())\n correct += (torch.norm(a_pred - anchor_quats, 2) < 0.1).data.cpu().numpy().sum()\n\n self.writer.add_scalar('data/valid_loss', np.mean(losses), self.validation_calls)\n self.writer.add_scalar('data/validation_correct', correct / self.len_validation_set, self.validation_calls)\n\n self.validation_calls += 1\n loss = np.mean(losses)\n self.logger.info(\"Validation score correct {correct}/{total}\".format(\n correct=correct,\n total=self.len_validation_set\n ))\n self.logger.info('val loss: ',loss)\n return loss, correct\n \n\n def model_filename(self, model_name, epoch):\n return \"{model_name}-epoch-{epoch}.pk\".format(model_name=model_name, epoch=epoch)\n\n def save_model(self, model, filename, model_folder):\n ensure_folder(model_folder)\n model_path = os.path.join(model_folder, filename)\n torch.save(model.state_dict(), model_path)\n\n\n def build_set(self, queue, triplet_builder, log):\n while 1:\n datasets = []\n for i in range(TRAIN_SEQS_PER_EPOCH):\n dataset = triplet_builder.build_set()\n datasets.append(dataset)\n dataset = ConcatDataset(datasets)\n # log.info('Created {0} triplets'.format(len(dataset)))\n queue.put(dataset)\n\n def create_model(self):\n model = define_model(pretrained=True)\n # model = PosNet()\n if self.load_model:\n model_path = os.path.join(\n self.model_folder,\n self.load_model\n )\n # map_location allows us to load models trained on cuda to cpu.\n model.load_state_dict(torch.load(model_path, map_location=lambda storage, loc: storage))\n\n if self.use_cuda:\n model = model.cuda()\n return model\n\n def batch_size(self, epoch, max_size):\n exponent = epoch // 100\n return min(max(2 ** (exponent), 2), max_size)\n\ndef main(args):\n # module = importlib.import_module(args.exp_name + '.config')\n # conf = getattr(module, 'Config_Isaac_Server')()\n # EXP_DIR = conf.EXP_DIR\n # MODEL_FOLDER = conf.MODEL_FOLDER\n\n\n # GPU Configuration\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n use_cuda = torch.cuda.is_available()\n\n # Load model\n model_folder = join(EXP_ROOT_DIR, args.exp_name, 'trained_models', args.run_name, time_stamped())\n if not os.path.exists(model_folder):\n os.makedirs(model_folder)\n\n # Get data loader builder and loss function\n builder = getattr(importlib.import_module('utils.builders'), args.builder)\n loss_fn = LOSS_FN\n\n # Define train and validation directories\n train_directory = join(EXP_ROOT_DIR, args.exp_name, 'videos/train/') \n validation_directory = join(EXP_ROOT_DIR, args.exp_name, 'videos/valid/') \n\n # Copies of executed config\n if not os.path.exists('/'.join(os.path.realpath(__file__).split('/')[:-1]) + '/experiments'):\n os.makedirs('/'.join(os.path.realpath(__file__).split('/')[:-1]) + '/experiments')\n copy2('/'.join(os.path.realpath(__file__).split('/')[:-1]) + '/train_tcn_no_captions.py', model_folder)\n copy2('/'.join(os.path.realpath(__file__).split('/')[:-2]) + '/gps-lfd' + '/config.py', model_folder)\n \n # Build training class\n trainer = Trainer(use_cuda, args.load_model, model_folder, train_directory, validation_directory, builder, loss_fn, args) \n trainer.train()\n\n\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--start-epoch', type=int, default=0)\n parser.add_argument('--epochs', type=int, default=1000)\n parser.add_argument('--save-every', type=int, default=10)\n parser.add_argument('--load-model', type=str, required=False)\n \n parser.add_argument('--minibatch-size', type=int, default=8)\n parser.add_argument('--model-name', type=str, default='tcn')\n parser.add_argument('--log-file', type=str, default='./out.log')\n parser.add_argument('--lr-start', type=float, default=0.001)\n parser.add_argument('--n-views', type=int, default=NUM_VIEWS)\n parser.add_argument('--alpha', type=float, default=0.01, help='weighing factor of language loss to triplet loss')\n\n # Model parameters\n \n # Path parameters\n parser.add_argument('--exp-name', type=str, required=True)\n parser.add_argument('--run-name', type=str, required=True)\n parser.add_argument('--builder', type=str, required=True)\n\n args = parser.parse_args()\n print(args)\n\n main(args)\n"
] |
[
[
"torch.cos",
"torch.cat",
"numpy.dot",
"numpy.linalg.norm",
"torch.stack",
"numpy.array",
"torch.sqrt",
"torch.sin",
"torch.norm",
"numpy.identity",
"numpy.transpose",
"numpy.hstack",
"torch.atan2",
"numpy.cross"
],
[
"matplotlib.use",
"torch.utils.data.ConcatDataset",
"torch.multiprocessing.Process",
"torch.cat",
"torch.norm",
"numpy.mean",
"torch.cuda.device_count",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.load",
"torch.multiprocessing.Queue"
]
] |
yidinghe/machine-learning-100-days
|
[
"3050a5a5fd137316e22814c36ab122f0f7b5aec3"
] |
[
"day-2/Day2_Simple_Linear_Regression.py"
] |
[
"#Step 1: Data Preprocessing\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndataset = pd.read_csv('../datasets/studentscores.csv')\nX = dataset.iloc[:, : 1].values\nY = dataset.iloc[:, 1 ].values\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 1/4, random_state = 0)\nprint('X_train')\nprint(X_train)\nprint('X_test')\nprint(X_test)\nprint('Y_train')\nprint(Y_train)\nprint('Y_test')\nprint(Y_test)\n\n#Step 2: LinearRegression \nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor = regressor.fit(X_train, Y_train)\n\n#Step 3: Prediction Outcome\nY_pred = regressor.predict(X_test)\nprint('Y_pred')\nprint(Y_pred)\n\n#Step 4: Visulization\nplt.scatter(X_train, Y_train, color = 'red')\nplt.plot(X_train, regressor.predict(X_train), color = 'blue')\nplt.show()\nplt.scatter(X_test, Y_test, color = 'red')\nplt.plot(X_test, regressor.predict(X_test), color = 'blue')\nplt.show()\n"
] |
[
[
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.show",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.scatter",
"pandas.read_csv"
]
] |
martinjzhang/scDRS
|
[
"69a9fb4e50dbfa6b1afe0dd222b0d349c5db00eb",
"69a9fb4e50dbfa6b1afe0dd222b0d349c5db00eb"
] |
[
"compute_downstream.py",
"tests/test_CLI.py"
] |
[
"import scanpy as sc\nfrom anndata import read_h5ad\nimport pandas as pd\nimport numpy as np\nimport scipy as sp\nimport os\nimport fnmatch\nimport time\nimport argparse\nfrom statsmodels.stats.multitest import multipletests\n\n# Inhouse tools\nimport scdrs.util as util\nimport scdrs.data_loader as dl\nimport scdrs.method as md\n\n\n\"\"\"\n# Fixit\n\n\n# Todo\n- Implement a memory efficient version\n- \"gene_weight\" argument needs to be tested \n\n# Finished\n- Add --n_ctrl (default value 500) \n- Add --cov_file option to regress out covariates stored in COV_FILE before feeding into the score function \n- Add --ctrl_match_opt='mean_var': use mean- and var- matched control genes \n- Change name from scTRS to scdrs (072721)\n- Fixed: Warning for compute_score: Trying to set attribute `.X` of view, copying. (did: v_norm_score = v_raw_score.copy())\n\n\"\"\"\n\nVERSION = \"0.0.1\"\nVERSION = \"beta\"\n\n\ndef main(args):\n sys_start_time = time.time()\n\n MASTHEAD = \"******************************************************************************\\n\"\n MASTHEAD += \"* scDRS downsteam analyses \\n\"\n MASTHEAD += \"* Version %s\\n\" % VERSION\n MASTHEAD += \"* Martin Jinye Zhang and Kangcheng Hou\\n\"\n MASTHEAD += \"* HSPH / Broad Institute / UCLA\\n\"\n MASTHEAD += \"* MIT License\\n\"\n MASTHEAD += \"******************************************************************************\\n\"\n\n ###########################################################################################\n ###### Parse Options ######\n ###########################################################################################\n H5AD_FILE = args.h5ad_file\n SCORE_FILE = args.score_file\n CELLTYPE_LIST = [] if args.cell_type is None else args.cell_type.split(\",\")\n VARIABLE_LIST = [] if args.cell_variable is None else args.cell_variable.split(\",\")\n FLAG_GENE = args.flag_gene == \"True\"\n FLAG_FILTER = args.flag_filter == \"True\"\n FLAG_RAW_COUNT = args.flag_raw_count == \"True\"\n OUT_FOLDER = args.out_folder\n\n header = MASTHEAD\n header += \"Call: ./compute_downstream.py \\\\\\n\"\n header += \"--h5ad_file %s\\\\\\n\" % H5AD_FILE\n header += \"--score_file %s\\\\\\n\" % SCORE_FILE\n header += \"--cell_type %s\\\\\\n\" % args.cell_type\n header += \"--cell_variable %s\\\\\\n\" % args.cell_variable\n header += \"--flag_gene %s\\\\\\n\" % FLAG_GENE\n header += \"--flag_filter %s\\\\\\n\" % FLAG_FILTER\n header += \"--flag_raw_count %s\\\\\\n\" % FLAG_RAW_COUNT\n header += \"--out_folder %s\\n\" % OUT_FOLDER\n print(header)\n\n ###########################################################################################\n ###### Load data ######\n ###########################################################################################\n print(\"Load data:\")\n\n # Load .h5ad file\n adata = read_h5ad(H5AD_FILE)\n if FLAG_FILTER:\n sc.pp.filter_cells(adata, min_genes=250)\n sc.pp.filter_genes(adata, min_cells=50)\n if FLAG_RAW_COUNT:\n sc.pp.normalize_per_cell(adata, counts_per_cell_after=1e4)\n sc.pp.log1p(adata)\n print(\n \"--h5ad_file loaded: n_cell=%d, n_gene=%d (sys_time=%0.1fs)\"\n % (adata.shape[0], adata.shape[1], time.time() - sys_start_time)\n )\n\n # Check CELLTYPE_LIST and VARIABLE_LIST\n temp_list = [x for x in CELLTYPE_LIST + VARIABLE_LIST if x not in adata.obs.columns]\n if len(temp_list) > 0:\n raise ValueError(\n \"Following columns not in adata.obs.columns: %s\" % \",\".join(temp_list)\n )\n else:\n print(\"cell_type and cell_variable are in adata.obs.columns\")\n\n # Load score file\n score_file_pattern = SCORE_FILE.split(os.path.sep)[-1]\n score_dir = SCORE_FILE.replace(os.path.sep + score_file_pattern, \"\")\n score_file_list = [\n x\n for x in os.listdir(score_dir)\n if fnmatch.fnmatch(x, score_file_pattern.replace(\"@\", \"*\"))\n ]\n print(\"Infer score_dir=%s\" % score_dir)\n print(\"Find %s score_files: %s\" % (len(score_file_list), \",\".join(score_file_list)))\n dic_score = {}\n for score_file in score_file_list:\n temp_df = pd.read_csv(\n score_dir + os.path.sep + score_file, sep=\"\\t\", index_col=0\n )\n n_cell_overlap = len(set(adata.obs_names) & set(temp_df.index))\n if n_cell_overlap < 0.1 * adata.shape[0]:\n print(\n \"WARNING: %s skipped, %d/%d cells in adata\"\n % (score_file, n_cell_overlap, adata.shape[0])\n )\n else:\n dic_score[score_file.replace(\".full_score.gz\", \"\")] = temp_df.copy()\n\n print(\n \"--score_file loaded: n_trait=%d, (sys_time=%0.1fs)\"\n % (len(dic_score), time.time() - sys_start_time)\n )\n print(\"\")\n\n ###########################################################################################\n ###### Computation ######\n ###########################################################################################\n STR_ANALYSIS = \"Perform downstream analyses:\"\n i = 1\n for ct in CELLTYPE_LIST:\n STR_ANALYSIS += \"\\n%d. Cell type-level analysis using %s\" % (i, ct)\n STR_ANALYSIS += \": results in @.scdrs_ct.%s\" % (ct)\n i += 1\n if len(VARIABLE_LIST) > 0:\n STR_ANALYSIS += \"\\n%d. Variable-disease correlation analysis for (%s)\" % (\n i,\n \",\".join(VARIABLE_LIST),\n )\n STR_ANALYSIS += \": results in @.scdrs_var\"\n i += 1\n if FLAG_GENE is True:\n STR_ANALYSIS += \"\\n%d. Disease gene prioritization\" % i\n STR_ANALYSIS += \": results in @.scdrs_gene\"\n print(STR_ANALYSIS)\n\n # Compute connectivities if need to do cell type-level analysis\n if (len(CELLTYPE_LIST) > 0) & (\"connectivities\" not in adata.obsp):\n sc.pp.pca(adata, n_comps=20)\n sc.pp.neighbors(adata, n_neighbors=15, n_pcs=20)\n print(\n \"Compute connectivities with `sc.pp.neighbors` because `connectivities` is not found in adata.obsp\"\n )\n\n # A separate file for each trait\n for trait in dic_score.keys():\n cell_list = sorted(set(adata.obs_names) & set(dic_score[trait].index))\n control_list = [\n x for x in dic_score[trait].columns if x.startswith(\"ctrl_norm_score\")\n ]\n n_ctrl = len(control_list)\n df_reg = adata.obs.loc[cell_list, CELLTYPE_LIST + VARIABLE_LIST].copy()\n df_reg = df_reg.join(\n dic_score[trait].loc[cell_list, [\"norm_score\"] + control_list]\n )\n\n # Cell type-disease analysis: association+heterogeneity\n for ct_col in CELLTYPE_LIST:\n ct_list = sorted(set(adata.obs[ct_col]))\n col_list = [\n \"n_cell\",\n \"n_ctrl\",\n \"assoc_mcp\",\n \"assoc_mcz\",\n \"hetero_mcp\",\n \"hetero_mcz\",\n ]\n df_res = pd.DataFrame(index=ct_list, columns=col_list, dtype=np.float32)\n # Basic info\n for ct in ct_list:\n ct_cell_list = list(df_reg.index[df_reg[ct_col] == ct])\n df_res.loc[ct, [\"n_cell\", \"n_ctrl\"]] = [len(ct_cell_list), n_ctrl]\n # Association\n for ct in ct_list:\n ct_cell_list = list(df_reg.index[df_reg[ct_col] == ct])\n score_q95 = np.quantile(df_reg.loc[ct_cell_list, \"norm_score\"], 0.95)\n v_ctrl_score_q95 = np.quantile(\n df_reg.loc[ct_cell_list, control_list], 0.95, axis=0\n )\n mc_p = ((v_ctrl_score_q95 >= score_q95).sum() + 1) / (\n v_ctrl_score_q95.shape[0] + 1\n )\n mc_z = (score_q95 - v_ctrl_score_q95.mean()) / v_ctrl_score_q95.std()\n df_res.loc[ct, [\"assoc_mcp\", \"assoc_mcz\"]] = [mc_p, mc_z]\n # Heterogeneity\n # subset to common set of cells\n df_rls = md.test_gearysc(\n adata[cell_list], df_reg.loc[cell_list, :], groupby=ct_col\n )\n for ct in ct_list:\n mc_p, mc_z = df_rls.loc[ct, [\"pval\", \"zsc\"]]\n df_res.loc[ct, [\"hetero_mcp\", \"hetero_mcz\"]] = [mc_p, mc_z]\n\n df_res.to_csv(\n os.path.join(\n OUT_FOLDER, \"%s.scdrs_ct.%s\" % (trait, ct_col.replace(\" \", \"_\"))\n ),\n sep=\"\\t\",\n index=True,\n )\n print(\n \"%s: cell type-level analysis with label=%s (sys_time=%0.1fs)\"\n % (trait, ct_col, time.time() - sys_start_time)\n )\n\n # Variable-disease correlation\n if len(VARIABLE_LIST) > 0:\n col_list = [\"n_ctrl\", \"corr_mcp\", \"corr_mcz\"]\n df_res = pd.DataFrame(\n index=VARIABLE_LIST, columns=col_list, dtype=np.float32\n )\n for var_col in VARIABLE_LIST:\n corr_ = np.corrcoef(df_reg[var_col], df_reg[\"norm_score\"])[0, 1]\n v_corr_ = [\n np.corrcoef(df_reg[var_col], df_reg[\"ctrl_norm_score_%d\" % x])[0, 1]\n for x in np.arange(n_ctrl)\n ]\n v_corr_ = np.array(v_corr_)\n mc_p = ((v_corr_ >= corr_).sum() + 1) / (v_corr_.shape[0] + 1)\n mc_z = (corr_ - v_corr_.mean()) / v_corr_.std()\n df_res.loc[var_col] = [n_ctrl, mc_p, mc_z]\n df_res.to_csv(\n os.path.join(OUT_FOLDER, \"%s.scdrs_var\" % trait), sep=\"\\t\", index=True\n )\n print(\n \"%s: cell-level variable-disease correlation analysis (sys_time=%0.1fs)\"\n % (trait, time.time() - sys_start_time)\n )\n\n # Gene prioritization\n if FLAG_GENE is True:\n mat_expr = adata[df_reg.index].X.copy()\n v_corr = md._pearson_corr(mat_expr, df_reg[\"norm_score\"].values)\n df_res = pd.DataFrame(\n index=adata.var_names, columns=[\"CORR\", \"RANK\"], dtype=np.float32\n )\n df_res[\"CORR\"] = v_corr\n df_res.sort_values(\"CORR\", ascending=False, inplace=True)\n df_res[\"RANK\"] = np.arange(df_res.shape[0])\n df_res.to_csv(\n os.path.join(OUT_FOLDER, \"%s.scdrs_gene\" % trait), sep=\"\\t\", index=True\n )\n print(\n \"%s: disease gene prioritization (sys_time=%0.1fs)\"\n % (trait, time.time() - sys_start_time)\n )\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"compute score\")\n\n parser.add_argument(\"--h5ad_file\", type=str, required=True)\n parser.add_argument(\n \"--score_file\",\n type=str,\n required=True,\n help=\"@.full_score.gz where @ denotes trait names\",\n )\n parser.add_argument(\n \"--cell_type\",\n type=str,\n required=False,\n default=None,\n help=\"Comma-seprated coloumn names for cell types/tissues, \"\n \"used for assessing cell type-disease association and \"\n \"within-cell type disease association heterogeneity\",\n )\n parser.add_argument(\n \"--cell_variable\",\n type=str,\n required=False,\n default=None,\n help=\"Comma-seprated coloumn names for cell-level variables, \"\n \"used for associating cell-level variables to disease scores\",\n )\n parser.add_argument(\n \"--flag_gene\",\n type=str,\n required=False,\n default=False,\n help=\"If True, perform gene prioritization\",\n )\n parser.add_argument(\n \"--flag_filter\",\n type=str,\n required=False,\n default=\"True\",\n help=\"If to apply cell and gene filters to the h5ad_file data\",\n )\n parser.add_argument(\n \"--flag_raw_count\",\n type=str,\n required=False,\n default=\"True\",\n help=\"If True, apply size factor normalization and log1p transformation\",\n )\n parser.add_argument(\n \"--out_folder\",\n type=str,\n required=True,\n help=\"Save file at out_folder/trait.scdrs_res\",\n )\n\n args = parser.parse_args()\n\n main(args)",
"import scdrs\nimport os\nimport subprocess\nimport pandas as pd\nimport numpy as np\nimport tempfile\nfrom .test_method_score_cell_main import compare_score_file\n\n\ndef test_score_cell_cli():\n \"\"\"\n Test CLI `scdrs compute-score`\n \"\"\"\n # Load toy data\n ROOT_DIR = scdrs.__path__[0]\n H5AD_FILE = os.path.join(ROOT_DIR, \"data/toydata_mouse.h5ad\")\n COV_FILE = os.path.join(ROOT_DIR, \"data/toydata_mouse.cov\")\n assert os.path.exists(H5AD_FILE), \"built-in data toydata_mouse.h5ad missing\"\n assert os.path.exists(COV_FILE), \"built-in data toydata_mouse.cov missing\"\n\n tmp_dir = tempfile.TemporaryDirectory()\n tmp_dir_path = tmp_dir.name\n dict_df_score = {}\n for gs_species in [\"human\", \"mouse\"]:\n gs_file = os.path.join(ROOT_DIR, f\"data/toydata_{gs_species}.gs\")\n # call compute_score.py\n cmds = [\n f\"scdrs compute-score\",\n f\"--h5ad_file {H5AD_FILE}\",\n \"--h5ad_species mouse\",\n f\"--gs_file {gs_file}\",\n f\"--gs_species {gs_species}\",\n f\"--cov_file {COV_FILE}\",\n \"--ctrl_match_opt mean_var\",\n \"--n_ctrl 20\",\n \"--flag_filter_data False\",\n \"--weight_opt vs\",\n \"--flag_raw_count False\",\n \"--flag_return_ctrl_raw_score False\",\n \"--flag_return_ctrl_norm_score False\",\n f\"--out_folder {tmp_dir_path}\",\n ]\n subprocess.check_call(\" \".join(cmds), shell=True)\n dict_df_score[gs_species] = pd.read_csv(\n os.path.join(tmp_dir_path, f\"toydata_gs_{gs_species}.score.gz\"),\n sep=\"\\t\",\n index_col=0,\n )\n # consistency between human and mouse\n assert np.all(dict_df_score[\"mouse\"].pval == dict_df_score[\"human\"].pval)\n\n df_res = dict_df_score[\"mouse\"]\n\n REF_COV_FILE = os.path.join(\n ROOT_DIR, \"data/toydata_gs_mouse.ref_Ctrl20_CovConstCovariate.score.gz\"\n )\n df_ref_res = pd.read_csv(REF_COV_FILE, sep=\"\\t\", index_col=0)\n compare_score_file(df_res, df_ref_res)\n tmp_dir.cleanup()\n return\n\n\ndef test_munge_gs_cli():\n \"\"\"\n Test CLI `scdrs munge-gs`\n \"\"\"\n\n tmp_dir = tempfile.TemporaryDirectory()\n tmp_dir_path = tmp_dir.name\n\n # pval_file and zscore_file\n temp_df = pd.DataFrame(\n data={\n \"HEIGHT\": [0.02, np.nan, 0.4],\n \"BMI\": [0.8, 0.02, np.nan],\n }\n )\n temp_df.index = [\"OR4F5\", \"DAZ1\", \"BPY2B\"]\n temp_df.to_csv(os.path.join(tmp_dir_path, \"pval_file.tsv\"), sep=\"\\t\", index=True)\n temp_df = pd.DataFrame(\n data={\n \"GENE\": [\"OR4F5\", \"DAZ1\", \"BPY2B\"],\n \"HEIGHT\": [2.0537, np.nan, 0.25335],\n \"BMI\": [-0.84162, 2.0537, np.nan],\n }\n )\n temp_df.to_csv(os.path.join(tmp_dir_path, \"zscore_file.tsv\"), sep=\"\\t\", index=False)\n\n dict_df_score = {}\n for input_file in [\"pval_file\", \"zscore_file\"]:\n for selection in [\n \"--n-max 1\",\n \"--n-min 1 --n-max 3 --fdr 0.05\",\n \"--n-min 1 --n-max 3 --fwer 0.05\",\n ]:\n # Call scdrs munge-gs\n input_file_path = os.path.join(tmp_dir_path, \"%s.tsv\" % input_file)\n output_file_path = os.path.join(tmp_dir_path, f\"outfile.gs\")\n cmds = [\n \"scdrs munge-gs\",\n f\"--{input_file} {input_file_path}\",\n f\"--out-file {output_file_path}\",\n \"--weight zscore\",\n selection,\n ]\n subprocess.check_call(\" \".join(cmds), shell=True)\n temp_df = pd.read_csv(\n os.path.join(tmp_dir_path, f\"outfile.gs\"),\n sep=\"\\t\",\n index_col=0,\n )\n\n # Check results\n print('Generated .gs file:')\n print(temp_df)\n err_msg = \"input_file=%s, %s\" % (input_file, selection)\n assert list(temp_df.index) == [\"BMI\", \"HEIGHT\"], err_msg\n assert temp_df.loc[\"BMI\", \"GENESET\"] == \"DAZ1:2.0537\", err_msg\n assert temp_df.loc[\"HEIGHT\", \"GENESET\"] == \"OR4F5:2.0537\", err_msg\n\n tmp_dir.cleanup()\n\n return\n\n\ndef test_downstream_cli():\n \"\"\"\n Test CLI `scdrs perform-downstream`\n\n 1. --group-analysis cell_type\n 2. --corr-analysis causal_variable,non_causal_variable,covariate\n 3. --gene-analysis\n \"\"\"\n\n # Load toy data\n ROOT_DIR = scdrs.__path__[0]\n H5AD_FILE = os.path.join(ROOT_DIR, \"data/toydata_mouse.h5ad\")\n SCORE_FILE = os.path.join(ROOT_DIR, \"data/@.full_score.gz\")\n REF_RES_DIR = os.path.join(ROOT_DIR, \"data/\")\n\n tmp_dir = tempfile.TemporaryDirectory()\n tmp_dir_path = tmp_dir.name\n for task in [\n \"--group-analysis cell_type\",\n \"--corr-analysis causal_variable,non_causal_variable,covariate\",\n \"--gene-analysis\",\n ]:\n # Call scdrs downstream\n cmds = [\n f\"scdrs perform-downstream\",\n f\"--h5ad_file {H5AD_FILE}\",\n f\"--score-file {SCORE_FILE}\",\n task,\n \"--flag-filter-data False\",\n \"--flag-raw-count False\",\n \"--knn-n-neighbors 15\",\n \"--knn-n-pcs 20\",\n f\"--out-folder {tmp_dir_path}\",\n ]\n subprocess.check_call(\" \".join(cmds), shell=True)\n\n # Check consistency between computed results and reference results\n for prefix in [\"toydata_gs_mouse.ref_Ctrl20_CovConstCovariate\"]:\n for suffix in [\"scdrs_group.cell_type\", \"scdrs_gene\", \"scdrs_cell_corr\"]:\n res_path = os.path.join(tmp_dir_path, f\"{prefix}.{suffix}\")\n ref_res_path = os.path.join(REF_RES_DIR, f\"{prefix}.{suffix}\")\n df_res = pd.read_csv(res_path, sep=\"\\t\", index_col=0)\n df_ref_res = pd.read_csv(ref_res_path, sep=\"\\t\", index_col=0)\n print(df_res)\n assert np.allclose(\n df_res.values, df_ref_res.values\n ), '%s, %s'%(prefix, suffix)\n\n tmp_dir.cleanup()\n return"
] |
[
[
"numpy.quantile",
"numpy.array",
"pandas.DataFrame",
"numpy.arange",
"numpy.corrcoef",
"pandas.read_csv"
],
[
"numpy.all",
"pandas.DataFrame",
"pandas.read_csv",
"numpy.allclose"
]
] |
nofarm3/pandas
|
[
"963cf2b5abf4e1ee99a7f6b9031ad485804c5dff"
] |
[
"pandas/core/apply.py"
] |
[
"from __future__ import annotations\n\nimport abc\nimport inspect\nfrom typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Tuple, Type, cast\n\nimport numpy as np\n\nfrom pandas._config import option_context\n\nfrom pandas._libs import lib\nfrom pandas._typing import (\n AggFuncType,\n AggFuncTypeBase,\n AggFuncTypeDict,\n Axis,\n FrameOrSeriesUnion,\n)\nfrom pandas.util._decorators import cache_readonly\n\nfrom pandas.core.dtypes.common import (\n is_dict_like,\n is_extension_array_dtype,\n is_list_like,\n is_sequence,\n)\nfrom pandas.core.dtypes.generic import ABCSeries\n\nfrom pandas.core.aggregation import agg_dict_like, agg_list_like\nfrom pandas.core.construction import (\n array as pd_array,\n create_series_with_explicit_dtype,\n)\n\nif TYPE_CHECKING:\n from pandas import DataFrame, Index, Series\n\nResType = Dict[int, Any]\n\n\ndef frame_apply(\n obj: DataFrame,\n func: AggFuncType,\n axis: Axis = 0,\n raw: bool = False,\n result_type: Optional[str] = None,\n args=None,\n kwds=None,\n) -> FrameApply:\n \"\"\" construct and return a row or column based frame apply object \"\"\"\n axis = obj._get_axis_number(axis)\n klass: Type[FrameApply]\n if axis == 0:\n klass = FrameRowApply\n elif axis == 1:\n klass = FrameColumnApply\n\n return klass(\n obj,\n func,\n raw=raw,\n result_type=result_type,\n args=args,\n kwds=kwds,\n )\n\n\ndef series_apply(\n obj: Series,\n func: AggFuncType,\n convert_dtype: bool = True,\n args=None,\n kwds=None,\n) -> SeriesApply:\n return SeriesApply(\n obj,\n func,\n convert_dtype,\n args,\n kwds,\n )\n\n\nclass Apply(metaclass=abc.ABCMeta):\n axis: int\n\n def __init__(\n self,\n obj: FrameOrSeriesUnion,\n func,\n raw: bool,\n result_type: Optional[str],\n args,\n kwds,\n ):\n self.obj = obj\n self.raw = raw\n self.args = args or ()\n self.kwds = kwds or {}\n\n if result_type not in [None, \"reduce\", \"broadcast\", \"expand\"]:\n raise ValueError(\n \"invalid value for result_type, must be one \"\n \"of {None, 'reduce', 'broadcast', 'expand'}\"\n )\n\n self.result_type = result_type\n\n # curry if needed\n if (\n (kwds or args)\n and not isinstance(func, (np.ufunc, str))\n and not is_list_like(func)\n ):\n\n def f(x):\n return func(x, *args, **kwds)\n\n else:\n f = func\n\n self.f: AggFuncType = f\n\n @property\n def index(self) -> Index:\n return self.obj.index\n\n @abc.abstractmethod\n def apply(self) -> FrameOrSeriesUnion:\n pass\n\n def agg(self) -> Tuple[Optional[FrameOrSeriesUnion], Optional[bool]]:\n \"\"\"\n Provide an implementation for the aggregators.\n\n Returns\n -------\n tuple of result, how.\n\n Notes\n -----\n how can be a string describe the required post-processing, or\n None if not required.\n \"\"\"\n obj = self.obj\n arg = self.f\n args = self.args\n kwargs = self.kwds\n\n _axis = kwargs.pop(\"_axis\", None)\n if _axis is None:\n _axis = getattr(obj, \"axis\", 0)\n\n result = self.maybe_apply_str()\n if result is not None:\n return result, None\n\n if is_dict_like(arg):\n arg = cast(AggFuncTypeDict, arg)\n return agg_dict_like(obj, arg, _axis), True\n elif is_list_like(arg):\n # we require a list, but not a 'str'\n arg = cast(List[AggFuncTypeBase], arg)\n return agg_list_like(obj, arg, _axis=_axis), None\n else:\n result = None\n\n if callable(arg):\n f = obj._get_cython_func(arg)\n if f and not args and not kwargs:\n return getattr(obj, f)(), None\n\n # caller can react\n return result, True\n\n def maybe_apply_str(self) -> Optional[FrameOrSeriesUnion]:\n \"\"\"\n Compute apply in case of a string.\n\n Returns\n -------\n result: Series, DataFrame, or None\n Result when self.f is a string, None otherwise.\n \"\"\"\n f = self.f\n if not isinstance(f, str):\n return None\n # Support for `frame.transform('method')`\n # Some methods (shift, etc.) require the axis argument, others\n # don't, so inspect and insert if necessary.\n func = getattr(self.obj, f, None)\n if callable(func):\n sig = inspect.getfullargspec(func)\n if \"axis\" in sig.args:\n self.kwds[\"axis\"] = self.axis\n return self.obj._try_aggregate_string_function(f, *self.args, **self.kwds)\n\n def maybe_apply_multiple(self) -> Optional[FrameOrSeriesUnion]:\n \"\"\"\n Compute apply in case of a list-like or dict-like.\n\n Returns\n -------\n result: Series, DataFrame, or None\n Result when self.f is a list-like or dict-like, None otherwise.\n \"\"\"\n # Note: dict-likes are list-like\n if not is_list_like(self.f):\n return None\n return self.obj.aggregate(self.f, self.axis, *self.args, **self.kwds)\n\n\nclass FrameApply(Apply):\n obj: DataFrame\n\n # ---------------------------------------------------------------\n # Abstract Methods\n\n @property\n @abc.abstractmethod\n def result_index(self) -> Index:\n pass\n\n @property\n @abc.abstractmethod\n def result_columns(self) -> Index:\n pass\n\n @property\n @abc.abstractmethod\n def series_generator(self) -> Iterator[Series]:\n pass\n\n @abc.abstractmethod\n def wrap_results_for_axis(\n self, results: ResType, res_index: Index\n ) -> FrameOrSeriesUnion:\n pass\n\n # ---------------------------------------------------------------\n\n @property\n def res_columns(self) -> Index:\n return self.result_columns\n\n @property\n def columns(self) -> Index:\n return self.obj.columns\n\n @cache_readonly\n def values(self):\n return self.obj.values\n\n @cache_readonly\n def dtypes(self) -> Series:\n return self.obj.dtypes\n\n @property\n def agg_axis(self) -> Index:\n return self.obj._get_agg_axis(self.axis)\n\n def apply(self) -> FrameOrSeriesUnion:\n \"\"\" compute the results \"\"\"\n # dispatch to agg\n result = self.maybe_apply_multiple()\n if result is not None:\n return result\n\n # all empty\n if len(self.columns) == 0 and len(self.index) == 0:\n return self.apply_empty_result()\n\n # string dispatch\n result = self.maybe_apply_str()\n if result is not None:\n return result\n\n # ufunc\n elif isinstance(self.f, np.ufunc):\n with np.errstate(all=\"ignore\"):\n results = self.obj._mgr.apply(\"apply\", func=self.f)\n # _constructor will retain self.index and self.columns\n return self.obj._constructor(data=results)\n\n # broadcasting\n if self.result_type == \"broadcast\":\n return self.apply_broadcast(self.obj)\n\n # one axis empty\n elif not all(self.obj.shape):\n return self.apply_empty_result()\n\n # raw\n elif self.raw:\n return self.apply_raw()\n\n return self.apply_standard()\n\n def apply_empty_result(self):\n \"\"\"\n we have an empty result; at least 1 axis is 0\n\n we will try to apply the function to an empty\n series in order to see if this is a reduction function\n \"\"\"\n assert callable(self.f)\n\n # we are not asked to reduce or infer reduction\n # so just return a copy of the existing object\n if self.result_type not in [\"reduce\", None]:\n return self.obj.copy()\n\n # we may need to infer\n should_reduce = self.result_type == \"reduce\"\n\n from pandas import Series\n\n if not should_reduce:\n try:\n r = self.f(Series([], dtype=np.float64))\n except Exception:\n pass\n else:\n should_reduce = not isinstance(r, Series)\n\n if should_reduce:\n if len(self.agg_axis):\n r = self.f(Series([], dtype=np.float64))\n else:\n r = np.nan\n\n return self.obj._constructor_sliced(r, index=self.agg_axis)\n else:\n return self.obj.copy()\n\n def apply_raw(self):\n \"\"\" apply to the values as a numpy array \"\"\"\n\n def wrap_function(func):\n \"\"\"\n Wrap user supplied function to work around numpy issue.\n\n see https://github.com/numpy/numpy/issues/8352\n \"\"\"\n\n def wrapper(*args, **kwargs):\n result = func(*args, **kwargs)\n if isinstance(result, str):\n result = np.array(result, dtype=object)\n return result\n\n return wrapper\n\n result = np.apply_along_axis(wrap_function(self.f), self.axis, self.values)\n\n # TODO: mixed type case\n if result.ndim == 2:\n return self.obj._constructor(result, index=self.index, columns=self.columns)\n else:\n return self.obj._constructor_sliced(result, index=self.agg_axis)\n\n def apply_broadcast(self, target: DataFrame) -> DataFrame:\n assert callable(self.f)\n\n result_values = np.empty_like(target.values)\n\n # axis which we want to compare compliance\n result_compare = target.shape[0]\n\n for i, col in enumerate(target.columns):\n res = self.f(target[col])\n ares = np.asarray(res).ndim\n\n # must be a scalar or 1d\n if ares > 1:\n raise ValueError(\"too many dims to broadcast\")\n elif ares == 1:\n\n # must match return dim\n if result_compare != len(res):\n raise ValueError(\"cannot broadcast result\")\n\n result_values[:, i] = res\n\n # we *always* preserve the original index / columns\n result = self.obj._constructor(\n result_values, index=target.index, columns=target.columns\n )\n return result\n\n def apply_standard(self):\n results, res_index = self.apply_series_generator()\n\n # wrap results\n return self.wrap_results(results, res_index)\n\n def apply_series_generator(self) -> Tuple[ResType, Index]:\n assert callable(self.f)\n\n series_gen = self.series_generator\n res_index = self.result_index\n\n results = {}\n\n with option_context(\"mode.chained_assignment\", None):\n for i, v in enumerate(series_gen):\n # ignore SettingWithCopy here in case the user mutates\n results[i] = self.f(v)\n if isinstance(results[i], ABCSeries):\n # If we have a view on v, we need to make a copy because\n # series_generator will swap out the underlying data\n results[i] = results[i].copy(deep=False)\n\n return results, res_index\n\n def wrap_results(self, results: ResType, res_index: Index) -> FrameOrSeriesUnion:\n from pandas import Series\n\n # see if we can infer the results\n if len(results) > 0 and 0 in results and is_sequence(results[0]):\n return self.wrap_results_for_axis(results, res_index)\n\n # dict of scalars\n\n # the default dtype of an empty Series will be `object`, but this\n # code can be hit by df.mean() where the result should have dtype\n # float64 even if it's an empty Series.\n constructor_sliced = self.obj._constructor_sliced\n if constructor_sliced is Series:\n result = create_series_with_explicit_dtype(\n results, dtype_if_empty=np.float64\n )\n else:\n result = constructor_sliced(results)\n result.index = res_index\n\n return result\n\n\nclass FrameRowApply(FrameApply):\n axis = 0\n\n def apply_broadcast(self, target: DataFrame) -> DataFrame:\n return super().apply_broadcast(target)\n\n @property\n def series_generator(self):\n return (self.obj._ixs(i, axis=1) for i in range(len(self.columns)))\n\n @property\n def result_index(self) -> Index:\n return self.columns\n\n @property\n def result_columns(self) -> Index:\n return self.index\n\n def wrap_results_for_axis(\n self, results: ResType, res_index: Index\n ) -> FrameOrSeriesUnion:\n \"\"\" return the results for the rows \"\"\"\n\n if self.result_type == \"reduce\":\n # e.g. test_apply_dict GH#8735\n res = self.obj._constructor_sliced(results)\n res.index = res_index\n return res\n\n elif self.result_type is None and all(\n isinstance(x, dict) for x in results.values()\n ):\n # Our operation was a to_dict op e.g.\n # test_apply_dict GH#8735, test_apply_reduce_to_dict GH#25196 #37544\n res = self.obj._constructor_sliced(results)\n res.index = res_index\n return res\n\n try:\n result = self.obj._constructor(data=results)\n except ValueError as err:\n if \"All arrays must be of the same length\" in str(err):\n # e.g. result = [[2, 3], [1.5], ['foo', 'bar']]\n # see test_agg_listlike_result GH#29587\n res = self.obj._constructor_sliced(results)\n res.index = res_index\n return res\n else:\n raise\n\n if not isinstance(results[0], ABCSeries):\n if len(result.index) == len(self.res_columns):\n result.index = self.res_columns\n\n if len(result.columns) == len(res_index):\n result.columns = res_index\n\n return result\n\n\nclass FrameColumnApply(FrameApply):\n axis = 1\n\n def apply_broadcast(self, target: DataFrame) -> DataFrame:\n result = super().apply_broadcast(target.T)\n return result.T\n\n @property\n def series_generator(self):\n values = self.values\n assert len(values) > 0\n\n # We create one Series object, and will swap out the data inside\n # of it. Kids: don't do this at home.\n ser = self.obj._ixs(0, axis=0)\n mgr = ser._mgr\n blk = mgr.blocks[0]\n\n if is_extension_array_dtype(blk.dtype):\n # values will be incorrect for this block\n # TODO(EA2D): special case would be unnecessary with 2D EAs\n obj = self.obj\n for i in range(len(obj)):\n yield obj._ixs(i, axis=0)\n\n else:\n for (arr, name) in zip(values, self.index):\n # GH#35462 re-pin mgr in case setitem changed it\n ser._mgr = mgr\n blk.values = arr\n ser.name = name\n yield ser\n\n @property\n def result_index(self) -> Index:\n return self.index\n\n @property\n def result_columns(self) -> Index:\n return self.columns\n\n def wrap_results_for_axis(\n self, results: ResType, res_index: Index\n ) -> FrameOrSeriesUnion:\n \"\"\" return the results for the columns \"\"\"\n result: FrameOrSeriesUnion\n\n # we have requested to expand\n if self.result_type == \"expand\":\n result = self.infer_to_same_shape(results, res_index)\n\n # we have a non-series and don't want inference\n elif not isinstance(results[0], ABCSeries):\n result = self.obj._constructor_sliced(results)\n result.index = res_index\n\n # we may want to infer results\n else:\n result = self.infer_to_same_shape(results, res_index)\n\n return result\n\n def infer_to_same_shape(self, results: ResType, res_index: Index) -> DataFrame:\n \"\"\" infer the results to the same shape as the input object \"\"\"\n result = self.obj._constructor(data=results)\n result = result.T\n\n # set the index\n result.index = res_index\n\n # infer dtypes\n result = result.infer_objects()\n\n return result\n\n\nclass SeriesApply(Apply):\n obj: Series\n axis = 0\n\n def __init__(\n self,\n obj: Series,\n func: AggFuncType,\n convert_dtype: bool,\n args,\n kwds,\n ):\n self.convert_dtype = convert_dtype\n\n super().__init__(\n obj,\n func,\n raw=False,\n result_type=None,\n args=args,\n kwds=kwds,\n )\n\n def apply(self) -> FrameOrSeriesUnion:\n obj = self.obj\n\n if len(obj) == 0:\n return self.apply_empty_result()\n\n # dispatch to agg\n result = self.maybe_apply_multiple()\n if result is not None:\n return result\n\n # if we are a string, try to dispatch\n result = self.maybe_apply_str()\n if result is not None:\n return result\n\n return self.apply_standard()\n\n def apply_empty_result(self) -> Series:\n obj = self.obj\n return obj._constructor(dtype=obj.dtype, index=obj.index).__finalize__(\n obj, method=\"apply\"\n )\n\n def apply_standard(self) -> FrameOrSeriesUnion:\n f = self.f\n obj = self.obj\n\n with np.errstate(all=\"ignore\"):\n if isinstance(f, np.ufunc):\n return f(obj)\n\n # row-wise access\n if is_extension_array_dtype(obj.dtype) and hasattr(obj._values, \"map\"):\n # GH#23179 some EAs do not have `map`\n mapped = obj._values.map(f)\n else:\n values = obj.astype(object)._values\n mapped = lib.map_infer(values, f, convert=self.convert_dtype)\n\n if len(mapped) and isinstance(mapped[0], ABCSeries):\n # GH 25959 use pd.array instead of tolist\n # so extension arrays can be used\n return obj._constructor_expanddim(pd_array(mapped), index=obj.index)\n else:\n return obj._constructor(mapped, index=obj.index).__finalize__(\n obj, method=\"apply\"\n )\n"
] |
[
[
"pandas._libs.lib.map_infer",
"pandas.core.construction.array",
"numpy.array",
"numpy.asarray",
"pandas._config.option_context",
"pandas.core.construction.create_series_with_explicit_dtype",
"numpy.errstate",
"pandas.core.aggregation.agg_list_like",
"pandas.core.dtypes.common.is_extension_array_dtype",
"pandas.core.dtypes.common.is_dict_like",
"pandas.core.aggregation.agg_dict_like",
"pandas.Series",
"pandas.core.dtypes.common.is_list_like",
"pandas.core.dtypes.common.is_sequence",
"numpy.empty_like"
]
] |
totucuong/vae-seq
|
[
"0a1bace02c6bac6ab991ab8203a203d3061615ec"
] |
[
"vaeseq/examples/text/dataset.py"
] |
[
"# Copyright 2018 Google, Inc.,\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Dataset for iterating over text.\"\"\"\n\nimport collections\nimport numpy as np\nimport tensorflow as tf\n\n\ndef _split_string(string):\n \"\"\"Splits a byte string into an array of character bytes.\"\"\"\n text = tf.compat.as_text(string)\n ret = np.empty(len(text), dtype=np.object)\n for i, char in enumerate(text):\n ret[i] = tf.compat.as_bytes(char)\n return ret\n\n\ndef vocabulary(filename, max_size=None, num_oov_buckets=1):\n \"\"\"Builds vocabulary and ID lookup tables from the given file.\"\"\"\n\n def _unique_chars(filename):\n \"\"\"Returns the used alphabet as an array of strings.\"\"\"\n counts = collections.Counter()\n with tf.gfile.Open(filename) as file_:\n for line in file_:\n counts.update(_split_string(line))\n alphabet = [k for (k, _) in counts.most_common(max_size)]\n alphabet.sort()\n return np.asarray(alphabet, dtype=np.object)\n\n chars, = tf.py_func(_unique_chars, [filename], [tf.string])\n char_to_id = tf.contrib.lookup.index_table_from_tensor(\n chars, num_oov_buckets=num_oov_buckets)\n id_to_char = tf.contrib.lookup.index_to_string_table_from_tensor(chars, \" \")\n return char_to_id, id_to_char\n\n\ndef characters(filename, batch_size, sequence_size):\n \"\"\"Returns a dataset of characters from the given file.\"\"\"\n\n def _to_chars(line):\n \"\"\"string scalar -> Dataset of characters (string scalars).\"\"\"\n chars, = tf.py_func(_split_string, [line + \"\\n\"], [tf.string])\n chars.set_shape([None])\n return tf.data.Dataset.from_tensor_slices(chars)\n\n return (tf.data.TextLineDataset([filename])\n .flat_map(_to_chars)\n .repeat()\n .batch(tf.to_int64(sequence_size))\n .shuffle(1000)\n .batch(tf.to_int64(batch_size)))\n"
] |
[
[
"numpy.asarray",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.py_func",
"tensorflow.compat.as_bytes",
"tensorflow.to_int64",
"tensorflow.compat.as_text",
"tensorflow.gfile.Open",
"tensorflow.contrib.lookup.index_to_string_table_from_tensor",
"tensorflow.data.TextLineDataset",
"tensorflow.contrib.lookup.index_table_from_tensor"
]
] |
siddharthab/tensorflow
|
[
"fbeca0b40aaec37c1ff7fbc3cf84215755faac51"
] |
[
"tensorflow/python/keras/engine/data_adapter_test.py"
] |
[
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"DataAdapter tests.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.python import keras\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.keras.engine import data_adapter\nfrom tensorflow.python.keras.utils import data_utils\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.platform import test\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass DataAdapterTestBase(test.TestCase, parameterized.TestCase):\n\n def setUp(self):\n super(DataAdapterTestBase, self).setUp()\n self.batch_size = 5\n self.numpy_input = np.zeros((50, 10))\n self.numpy_target = np.ones(50)\n self.tensor_input = constant_op.constant(2.0, shape=(50, 10))\n self.tensor_target = array_ops.ones((50,))\n self.dataset_input = dataset_ops.DatasetV2.from_tensor_slices(\n (self.numpy_input, self.numpy_target)).shuffle(50).batch(\n self.batch_size)\n\n def generator():\n while True:\n yield (np.zeros((self.batch_size, 10)), np.ones(self.batch_size))\n self.generator_input = generator()\n self.sequence_input = TestSequence(batch_size=self.batch_size,\n feature_shape=10)\n self.model = keras.models.Sequential(\n [keras.layers.Dense(8, input_shape=(10,), activation='softmax')])\n\n\nclass TestSequence(data_utils.Sequence):\n\n def __init__(self, batch_size, feature_shape):\n self.batch_size = batch_size\n self.feature_shape = feature_shape\n\n def __getitem__(self, item):\n return (np.zeros((self.batch_size, self.feature_shape)),\n np.ones((self.batch_size,)))\n\n def __len__(self):\n return 10\n\n\nclass TensorLikeDataAdapterTest(DataAdapterTestBase):\n\n def setUp(self):\n super(TensorLikeDataAdapterTest, self).setUp()\n self.adapter_cls = data_adapter.TensorLikeDataAdapter\n\n def test_can_handle_numpy(self):\n self.assertTrue(self.adapter_cls.can_handle(self.numpy_input))\n self.assertTrue(\n self.adapter_cls.can_handle(self.numpy_input, self.numpy_target))\n\n self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))\n self.assertFalse(self.adapter_cls.can_handle(self.generator_input))\n self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))\n\n def test_iterator_expect_batch_size_numpy(self):\n with self.assertRaisesRegexp(\n ValueError, r'`batch_size` or `steps` is required'):\n self.adapter_cls(self.numpy_input, self.numpy_target)\n\n def test_size_numpy(self):\n adapter = self.adapter_cls(\n self.numpy_input, self.numpy_target, batch_size=5)\n self.assertEqual(adapter.get_size(), 10)\n self.assertFalse(adapter.has_partial_batch())\n\n def test_batch_size_numpy(self):\n adapter = self.adapter_cls(\n self.numpy_input, self.numpy_target, batch_size=5)\n self.assertEqual(adapter.batch_size(), 5)\n\n def test_partial_batch_numpy(self):\n adapter = self.adapter_cls(\n self.numpy_input, self.numpy_target, batch_size=4)\n self.assertEqual(adapter.get_size(), 13) # 50/4\n self.assertTrue(adapter.has_partial_batch())\n self.assertEqual(adapter.partial_batch_size(), 2)\n\n def test_training_numpy(self):\n dataset = self.adapter_cls(\n self.numpy_input, self.numpy_target, batch_size=5).get_dataset()\n self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd')\n self.model.fit(dataset)\n\n def test_can_handle(self):\n self.assertTrue(self.adapter_cls.can_handle(self.tensor_input))\n self.assertTrue(\n self.adapter_cls.can_handle(self.tensor_input, self.tensor_target))\n\n self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))\n self.assertFalse(self.adapter_cls.can_handle(self.generator_input))\n self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))\n\n def test_training(self):\n dataset = self.adapter_cls(\n self.tensor_input, self.tensor_target, batch_size=5).get_dataset()\n self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd')\n self.model.fit(dataset)\n\n def test_size(self):\n adapter = self.adapter_cls(\n self.tensor_input, self.tensor_target, batch_size=5)\n self.assertEqual(adapter.get_size(), 10)\n self.assertFalse(adapter.has_partial_batch())\n\n @parameterized.named_parameters(\n ('batch_size_5', 5, None, 5),\n ('batch_size_50', 50, 4, 50), # Sanity check: batch_size takes precedence\n ('steps_1', None, 1, 50),\n ('steps_4', None, 4, 13),\n )\n def test_batch_size(self, batch_size_in, steps, batch_size_out):\n adapter = self.adapter_cls(\n self.tensor_input, self.tensor_target, batch_size=batch_size_in,\n steps=steps)\n self.assertEqual(adapter.batch_size(), batch_size_out)\n\n @parameterized.named_parameters(\n ('batch_size_5', 5, None, 10, 0),\n ('batch_size_4', 4, None, 13, 2),\n ('steps_1', None, 1, 1, 0),\n ('steps_5', None, 5, 5, 0),\n ('steps_4', None, 4, 4, 11),\n )\n def test_partial_batch(\n self, batch_size_in, steps, size, partial_batch_size):\n adapter = self.adapter_cls(\n self.tensor_input, self.tensor_target, batch_size=batch_size_in,\n steps=steps)\n self.assertEqual(adapter.get_size(), size) # 50/steps\n self.assertEqual(adapter.has_partial_batch(), bool(partial_batch_size))\n self.assertEqual(adapter.partial_batch_size(), partial_batch_size or None)\n\n\nclass DatasetAdapterTest(DataAdapterTestBase):\n\n def setUp(self):\n super(DatasetAdapterTest, self).setUp()\n self.adapter_cls = data_adapter.DatasetAdapter\n\n def test_can_handle(self):\n self.assertFalse(self.adapter_cls.can_handle(self.numpy_input))\n self.assertFalse(self.adapter_cls.can_handle(self.tensor_input))\n self.assertTrue(self.adapter_cls.can_handle(self.dataset_input))\n self.assertFalse(self.adapter_cls.can_handle(self.generator_input))\n self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))\n\n def test_training(self):\n dataset = self.adapter_cls(self.dataset_input).get_dataset()\n self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd')\n self.model.fit(dataset)\n\n def test_size(self):\n adapter = self.adapter_cls(self.dataset_input)\n self.assertIsNone(adapter.get_size())\n\n def test_batch_size(self):\n adapter = self.adapter_cls(self.dataset_input)\n self.assertIsNone(adapter.batch_size())\n\n def test_partial_batch(self):\n adapter = self.adapter_cls(self.dataset_input)\n self.assertFalse(adapter.has_partial_batch())\n self.assertIsNone(adapter.partial_batch_size())\n\n\nclass GeneratorDataAdapterTest(DataAdapterTestBase):\n\n def setUp(self):\n super(GeneratorDataAdapterTest, self).setUp()\n self.adapter_cls = data_adapter.GeneratorDataAdapter\n\n def test_can_handle(self):\n self.assertFalse(self.adapter_cls.can_handle(self.numpy_input))\n self.assertFalse(self.adapter_cls.can_handle(self.tensor_input))\n self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))\n self.assertTrue(self.adapter_cls.can_handle(self.generator_input))\n self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))\n\n def test_training(self):\n self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd')\n self.model.fit(self.generator_input, steps_per_epoch=10)\n\n @test_util.run_v2_only\n def test_with_multiprocessing_training(self):\n self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd')\n self.model.fit(self.generator_input, workers=1, use_multiprocessing=True,\n max_queue_size=10, steps_per_epoch=10)\n # Fit twice to ensure there isn't any duplication that prevent the worker\n # from starting.\n self.model.fit(self.generator_input, workers=1, use_multiprocessing=True,\n max_queue_size=10, steps_per_epoch=10)\n\n def test_size(self):\n adapter = self.adapter_cls(self.generator_input)\n self.assertIsNone(adapter.get_size())\n\n def test_batch_size(self):\n adapter = self.adapter_cls(self.generator_input)\n self.assertEqual(adapter.batch_size(), 5)\n\n def test_partial_batch(self):\n adapter = self.adapter_cls(self.generator_input)\n self.assertFalse(adapter.has_partial_batch())\n self.assertIsNone(adapter.partial_batch_size())\n\n\nclass KerasSequenceAdapterTest(DataAdapterTestBase):\n\n def setUp(self):\n super(KerasSequenceAdapterTest, self).setUp()\n self.adapter_cls = data_adapter.KerasSequenceAdapter\n\n def test_can_handle(self):\n self.assertFalse(self.adapter_cls.can_handle(self.numpy_input))\n self.assertFalse(self.adapter_cls.can_handle(self.tensor_input))\n self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))\n self.assertFalse(self.adapter_cls.can_handle(self.generator_input))\n self.assertTrue(self.adapter_cls.can_handle(self.sequence_input))\n\n def test_training(self):\n self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd')\n self.model.fit(self.sequence_input)\n\n @test_util.run_v2_only\n def test_with_multiprocessing_training(self):\n self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd')\n self.model.fit(self.sequence_input, workers=1, use_multiprocessing=True,\n max_queue_size=10, steps_per_epoch=10)\n # Fit twice to ensure there isn't any duplication that prevent the worker\n # from starting.\n self.model.fit(self.sequence_input, workers=1, use_multiprocessing=True,\n max_queue_size=10, steps_per_epoch=10)\n\n def test_size(self):\n adapter = self.adapter_cls(self.sequence_input)\n self.assertEqual(adapter.get_size(), 10)\n\n def test_batch_size(self):\n adapter = self.adapter_cls(self.sequence_input)\n self.assertEqual(adapter.batch_size(), 5)\n\n def test_partial_batch(self):\n adapter = self.adapter_cls(self.sequence_input)\n self.assertFalse(adapter.has_partial_batch())\n self.assertIsNone(adapter.partial_batch_size())\n\n\nif __name__ == '__main__':\n test.main()\n"
] |
[
[
"numpy.zeros",
"numpy.ones",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.keras.layers.Dense",
"tensorflow.python.data.ops.dataset_ops.DatasetV2.from_tensor_slices",
"tensorflow.python.platform.test.main"
]
] |
sivasanarul/amfe_topopt
|
[
"ba7fa1ce756e7ea6e4fd7b2bdb609b83bbfac472"
] |
[
"examples/nonlinear_beam_hyperreduction.py"
] |
[
"# Beam example\n\n# Distributed under BSD-3-Clause License. See LICENSE-File for more information\n#\n\"\"\"\nExample showing a cantilever beam which is loaded on the tip with a force\nshowing nonlinear displacements.\n\nThe beam is reduced with ECSW and NSKTS\n\"\"\"\n\nimport os\nimport time\nimport numpy as np\nfrom h5py import File\n\nfrom amfe.ui import *\nfrom amfe.io import amfe_dir\nfrom amfe.io.mesh import AmfeMeshObjMeshReader\nfrom amfe.io.postprocessing import *\nfrom amfe.material import KirchhoffMaterial\nfrom amfe.solver import *\nfrom amfe.mor import *\nfrom amfe.mor.hyper_red import *\nfrom amfe.structural_dynamics import vibration_modes\n\nstudies = []\nstudies.append('full_ti')\n#studies.append('create_basis_1')\n#studies.append('red_ti')\n#studies.append('ecsw')\n#studies.append('poly3')\n\nOmega = 31.0\n\ntimes = dict([])\ninput_file = amfe_dir('meshes/gmsh/bar.msh')\noutput_file = amfe_dir('results/beam_nonlinear_refactoring/beam_ecsw')\n\n# Define material\nmaterial = KirchhoffMaterial(E=210E9, nu=0.3, rho=1E4, plane_stress=True)\n# Load Mesh\nmesh = import_mesh_from_file(input_file)\n# Create Component\ncomponent = create_structural_component(mesh)\n# Assign material\ncomponent.assign_material(material, [7], 'S')\n# Assign Dirichlet Boundaries\nset_dirichlet_by_group(component, 8, ('ux', 'uy'))\n# Assign Neumann Boundaries\nforce = component.neumann.create_fixed_direction_neumann(np.array([0, -1], dtype=float),\n lambda t: 1E8*np.sin(Omega*t))\ncomponent.assign_neumann('Force', force, [9])\n\n#\nsystem, formulation = create_constrained_mechanical_system_from_component(component, constant_mass=True,\n constant_damping=True,\n constraint_formulation='boolean')\n\n# Solver Factory:\nsolfac = SolverFactory()\nsolfac.set_system(system)\nsolfac.set_dt_initial(0.001)\nsolfac.set_newton_maxiter(30)\nsolfac.set_newton_atol(1e-6)\nsolfac.set_newton_rtol(1e-8)\nsolfac.set_linear_solver('scipy-sparse')\nsolfac.set_nonlinear_solver('newton')\nsolfac.set_analysis_type('transient')\nsolfac.set_integrator('genalpha')\n\nsolver = solfac.create_solver()\n\nsol_dyn_nl_full = AmfeSolution()\n\n\ndef write_callback(t, x, dx, ddx):\n u, du, ddu = formulation.recover(x, dx, ddx, t)\n sol_dyn_nl_full.write_timestep(t, u, du, ddu)\n\n\nno_of_dofs = system.dimension\nx0 = np.zeros(no_of_dofs)\ndx0 = x0.copy()\nt_start = 0.0\nt_end = 1.0\n\nif 'full_ti' in studies:\n t0 = time.time()\n solver.solve(write_callback, t_start, x0, dx0, t_end)\n t1 = time.time()\n times.update({'Nonlinear full solution:': t1-t0})\n print('Full dynamic solution took {} seconds'.format(t1-t0))\n\n write_results_to_paraview(sol_dyn_nl_full, component, output_file + '_dyn_nl_full')\n\n# ------------------- SOLVE LINEAR DYNAMICS ------------------------------\n\nif 'create_basis_1' in studies:\n t0 = time.time()\n K0 = system.K(x0, dx0, 0.0)\n M0 = system.M(x0, dx0, 0.0)\n omega, V = vibration_modes(K0, M0, 6, mass_orth=True)\n\n def sdK(x):\n return system.K(x, x0, 0.0)\n\n Theta = static_derivatives(V, sdK, M0)\n V_extended = augment_with_derivatives(V, Theta)\n t1 = time.time()\n times.update({'nonlinear basis generation:': t1-t0})\n print('nonlinear basis generation took {} seconds'.format(t1-t0))\n\n sol_basis_1 = AmfeSolution()\n for i in np.arange(V_extended.shape[1]):\n u = formulation.u(V_extended[:, i], 0.0)\n sol_basis_1.write_timestep(i, u)\n\n write_results_to_paraview(sol_basis_1, component, output_file + '_reduction_basis_1')\n\n sol_basis_sd = AmfeSolution()\n counter = 0\n for i in np.arange(Theta.shape[1]):\n for j in np.arange(Theta.shape[1]):\n if i > j:\n Theta_u = formulation.u(Theta[:, i, j], 0.0)\n sol_basis_sd.write_timestep(counter, Theta_u)\n counter = counter + 1\n\n write_results_to_paraview(sol_basis_sd, component, output_file + '_static_derivatives')\n\nif 'nskts' in studies:\n # Training Set Generation\n t0 = time.time()\n K0 = system.K(x0, dx0, 0.0)\n M0 = system.M(x0, dx0, 0.0)\n t_max = np.pi/2/Omega\n F_ext_max = system.f_ext(x0, dx0, t_max)\n\n def fint_func(x):\n return system.f_int(x, dx0, 0.0)\n\n def K_func(x):\n return system.K(x, dx0, 0.0)\n\n nskts = compute_nskts(K0, M0, F_ext_max, fint_func, K_func)\n t1 = time.time()\n times.update({'Training-Set Generation (NSKTS):': t1-t0})\n print('Training-Set Generation (NSKTS) took {} seconds'.format(t1 - t0))\n\n sol_nskts = AmfeSolution()\n\n for i in range(nskts.shape[1]):\n # Recover unconstrained u\n u = formulation.u(nskts[:, i], 0.0)\n sol_nskts.write_timestep(i, u)\n\n write_results_to_paraview(sol_nskts, component, output_file + '_nskts')\nelse:\n nskts = np.load(output_file + '_nskts.npy')\n\n\nif 'red_ti' in studies:\n # Reduce system\n t0 = time.time()\n red_system = reduce_mechanical_system(system, V_extended, constant_mass=True, constant_damping=True)\n t1 = time.time()\n times.update({'Reduction step:': t1-t0})\n print('Reduction step took {} seconds'.format(t1 - t0))\n solfac.set_system(red_system)\n\n red_solver = solfac.create_solver()\n\n sol_dyn_nl_red = AmfeSolution()\n\n\n def write_callback(t, x, dx, ddx):\n u, du, ddu = formulation.recover(V_extended.dot(x), V_extended.dot(dx), V_extended.dot(ddx), t)\n sol_dyn_nl_red.write_timestep(t, u, du, ddu)\n\n\n no_of_red_dofs = red_system.dimension\n x0 = np.zeros(no_of_red_dofs)\n dx0 = x0.copy()\n t_start = 0.0\n t_end = 1.0\n\n t0 = time.time()\n red_solver.solve(write_callback, t_start, x0, dx0, t_end)\n t1 = time.time()\n times.update({'Nonlinear full solution:': t1 - t0})\n print('Full dynamic solution took {} seconds'.format(t1 - t0))\n\n write_results_to_paraview(sol_dyn_nl_red, component, output_file + '_dyn_nl_red')\n\n\nif 'ecsw_weight_generation' in studies:\n # Hyperreduction ECSW\n t0 = time.time()\n q_training = np.linalg.solve((V_extended.T @ V_extended), V_extended.T @ nskts)\n\n x_training = V_extended @ q_training\n weights, indices, stats = ecsw_get_weights_from_constrained_training(x_training, component, formulation, V_extended)\n\n np.save(output_file + '_ecsw_weights.npy', weights)\n np.save(output_file + '_ecsw_indices.npy', indices)\n t1 = time.time()\n times.update({'Hyperreduction step:': t1-t0})\n print('Hyperreduction step:'.format(t1 - t0))\n\nif 'ecsw' in studies:\n weights = np.load(output_file + '_ecsw_weights.npy')\n indices = np.load(output_file + '_ecsw_indices.npy')\n\n # Create reduced system\n tagname = 'ecsw_weights'\n ecsw_system, ecsw_formulation, ecsw_component = create_ecsw_hyperreduced_mechanical_system_from_weights(component, V_extended, weights, indices, 'boolean',\n constant_mass=True, constant_damping=True,\n tagname=tagname)\n\n # Solve system\n solfac.set_system(ecsw_system)\n ecsw_solver = solfac.create_solver()\n\n sol_dyn_nl_ecsw = AmfeSolution()\n\n def write_callback(t, x, dx, ddx):\n u, du, ddu = ecsw_formulation.recover(V_extended.dot(x), V_extended.dot(dx), V_extended.dot(ddx), t)\n sol_dyn_nl_ecsw.write_timestep(t, u, du, ddu)\n\n\n # Set initial conditions\n no_of_dofs = ecsw_system.dimension\n x0 = np.zeros(no_of_dofs)\n dx0 = x0.copy()\n # Set start end endtime for time integration\n t_start = 0.0\n t_end = 1.0\n\n # Solve hyperreduced system\n t0 = time.time()\n ecsw_solver.solve(write_callback, t_start, x0, dx0, t_end)\n t1 = time.time()\n times.update({'ECSW solution:': t1 - t0})\n print('ECSW solution took {} seconds'.format(t1 - t0))\n\n # -- POSTPROCESSING --\n # Instantiate Hdf5PostProcessorWriter\n mreader = AmfeMeshObjMeshReader(component.mesh)\n ecsw_output = output_file + '_ecsw_weights.hdf5'\n if os.path.isfile(ecsw_output):\n os.remove(ecsw_output)\n hwriter = Hdf5PostProcessorWriter(mreader, ecsw_output)\n\n # Write Solution\n preader = AmfeSolutionReader(sol_dyn_nl_ecsw, component)\n preader.parse(hwriter)\n\n # Write ECSW weights\n data = ecsw_component.mesh.el_df[tagname].values\n indices = ecsw_component.mesh.el_df.index.values\n hwriter.write_field(tagname, PostProcessDataType.SCALAR, sol_dyn_nl_ecsw.t,\n data, indices, MeshEntityType.ELEMENT)\n\n # Finish writing -> Call return result\n hwriter.return_result()\n\n # Write xdmf file from hdf5 for viewing in paraview\n paraviewfilename = output_file + '_ecsw_weights'\n hdf5resultsfilename = paraviewfilename + '.hdf5'\n xdmfresultsfilename = paraviewfilename + '.xdmf'\n\n fielddict = {'weights': {'mesh_entity_type': MeshEntityType.ELEMENT,\n 'data_type': PostProcessDataType.SCALAR,\n 'hdf5path': '/results/ecsw_weights'\n },\n 'displacement': {'mesh_entity_type': MeshEntityType.NODE,\n 'data_type': PostProcessDataType.VECTOR,\n 'hdf5path': '/results/displacement'\n }\n }\n\n with open(xdmfresultsfilename, 'wb') as xdmffp:\n with File(hdf5resultsfilename, mode='r') as hdf5fp:\n write_xdmf_from_hdf5(xdmffp, hdf5fp, '/mesh/nodes', '/mesh/topology', sol_dyn_nl_ecsw.t, fielddict)\n\n\nif 'poly3' in studies:\n K1, K2, K3 = poly3_get_tensors(system, V_extended)\n poly3_system = create_poly3_hyperreduced_system(system, V_extended, K1, K2, K3)\n\n solfac.set_system(poly3_system)\n poly3_solver = solfac.create_solver()\n\n sol_dyn_nl_poly3 = AmfeSolution()\n\n\n def write_callback(t, x, dx, ddx):\n u, du, ddu = formulation.recover(V_extended.dot(x), V_extended.dot(dx), V_extended.dot(ddx), t)\n sol_dyn_nl_poly3.write_timestep(t, u, du, ddu)\n\n\n no_of_red_dofs = poly3_system.dimension\n x0 = np.zeros(no_of_red_dofs)\n dx0 = x0.copy()\n t_start = 0.0\n t_end = 1.0\n\n t0 = time.time()\n poly3_solver.solve(write_callback, t_start, x0, dx0, t_end)\n t1 = time.time()\n times.update({'Nonlinear full solution:': t1 - t0})\n print('Full dynamic solution took {} seconds'.format(t1 - t0))\n\n write_results_to_paraview(sol_dyn_nl_poly3, component, output_file + '_dyn_nl_poly3')\n"
] |
[
[
"numpy.array",
"numpy.sin",
"numpy.zeros",
"numpy.load",
"numpy.save",
"numpy.arange",
"numpy.linalg.solve"
]
] |
StephAO/gym-minigrid
|
[
"6ab2914c2731a68e41e5b4c97a6877b19d4964b5"
] |
[
"gym_minigrid/minigrid.py"
] |
[
"import math\nimport hashlib\nimport gym\nfrom enum import IntEnum\nimport numpy as np\nfrom gym import error, spaces, utils\nfrom gym.utils import seeding\nfrom .rendering import *\n\n# Size in pixels of a tile in the full-scale human view\nTILE_PIXELS = 32\n\n# Map of color names to RGB values\nCOLORS = {\n 'red' : np.array([255, 0, 0]),\n 'green' : np.array([0, 255, 0]),\n 'blue' : np.array([0, 0, 255]),\n 'purple': np.array([112, 39, 195]),\n 'yellow': np.array([255, 255, 0]),\n 'grey' : np.array([100, 100, 100]),\n 'white' : np.array([255, 255, 255]),\n 'cyan' : np.array([0, 255, 255]),\n 'brown' : np.array([139, 69, 19]),\n 'orange' : np.array([255, 99, 71])\n}\n\nCOLOR_NAMES = sorted(list(COLORS.keys()))\n\n# Used to map colors to integers\nCOLOR_TO_IDX = {\n 'red' : 0,\n 'green' : 1,\n 'blue' : 2,\n 'purple': 3,\n 'yellow': 4,\n 'grey' : 5,\n 'white' : 6,\n 'cyan' : 7,\n 'brown' : 8,\n 'orange' : 9 \n}\n\nIDX_TO_COLOR = dict(zip(COLOR_TO_IDX.values(), COLOR_TO_IDX.keys()))\n\n# Map of object type to integers\nOBJECT_TO_IDX = {\n 'unseen' : 0,\n 'empty' : 1,\n 'wall' : 2,\n 'floor' : 3,\n 'door' : 4,\n 'key' : 5,\n 'ball' : 6,\n 'box' : 7,\n 'goal' : 8,\n 'lava' : 9,\n 'agent' : 10,\n 'square' : 11,\n 'crate' : 12,\n 'circle' : 13\n}\n\nIDX_TO_OBJECT = dict(zip(OBJECT_TO_IDX.values(), OBJECT_TO_IDX.keys()))\n\n# Map of state names to integers\nSTATE_TO_IDX = {\n 'open' : 0,\n 'closed': 1,\n 'locked': 2,\n}\n\n# Map of agent direction indices to vectors\nDIR_TO_VEC = [\n # Pointing right (positive X)\n np.array((1, 0)),\n # Down (positive Y)\n np.array((0, 1)),\n # Pointing left (negative X)\n np.array((-1, 0)),\n # Up (negative Y)\n np.array((0, -1)),\n]\n\nclass WorldObj:\n \"\"\"\n Base class for grid world objects\n \"\"\"\n\n def __init__(self, type, color):\n assert type in OBJECT_TO_IDX, type\n assert color in COLOR_TO_IDX, color\n self.type = type\n self.color = color\n self.contains = None\n\n # Initial position of the object\n self.init_pos = None\n\n # Current position of the object\n self.cur_pos = None\n\n def can_overlap(self):\n \"\"\"Can the agent overlap with this?\"\"\"\n return False\n\n def can_pickup(self):\n \"\"\"Can the agent pick this up?\"\"\"\n return False\n\n def can_contain(self):\n \"\"\"Can this contain another object?\"\"\"\n return False\n\n def see_behind(self):\n \"\"\"Can the agent see behind this object?\"\"\"\n return True\n\n def toggle(self, env, pos):\n \"\"\"Method to trigger/toggle an action this object performs\"\"\"\n return False\n\n def encode(self):\n \"\"\"Encode the a description of this object as a 3-tuple of integers\"\"\"\n return (OBJECT_TO_IDX[self.type], COLOR_TO_IDX[self.color], 0)\n\n @staticmethod\n def decode(type_idx, color_idx, state):\n \"\"\"Create an object from a 3-tuple state description\"\"\"\n\n obj_type = IDX_TO_OBJECT[type_idx]\n color = IDX_TO_COLOR[color_idx]\n\n if obj_type == 'empty' or obj_type == 'unseen':\n return None\n\n # State, 0: open, 1: closed, 2: locked\n is_open = state == 0\n is_locked = state == 2\n\n if obj_type == 'wall':\n v = Wall(color)\n elif obj_type == 'floor':\n v = Floor(color)\n elif obj_type == 'ball':\n v = Ball(color)\n elif obj_type == 'key':\n v = Key(color)\n elif obj_type == 'box':\n v = Box(color)\n elif obj_type == 'door':\n v = Door(color, is_open, is_locked)\n elif obj_type == 'goal':\n v = Goal()\n elif obj_type == 'lava':\n v = Lava()\n elif obj_type == 'circle':\n v = Circle()\n elif obj_type == 'square':\n v = Square()\n elif obj_type == 'crate':\n v = Crate()\n else:\n assert False, \"unknown object type in decode '%s'\" % obj_type\n\n return v\n\n def render(self, r):\n \"\"\"Draw this object with the given renderer\"\"\"\n raise NotImplementedError\n\nclass Goal(WorldObj):\n def __init__(self):\n super().__init__('goal', 'green')\n\n def can_overlap(self):\n return True\n\n def render(self, img):\n fill_coords(img, point_in_rect(0, 1, 0, 1), COLORS[self.color])\n\nclass Floor(WorldObj):\n \"\"\"\n Colored floor tile the agent can walk over\n \"\"\"\n\n def __init__(self, color='blue'):\n super().__init__('floor', color)\n\n def can_overlap(self):\n return True\n\n def render(self, img):\n # Give the floor a pale color\n color = COLORS[self.color] / 2\n fill_coords(img, point_in_rect(0.031, 1, 0.031, 1), color)\n\n\nclass Lava(WorldObj):\n def __init__(self):\n super().__init__('lava', 'red')\n\n def can_overlap(self):\n return True\n\n def render(self, img):\n c = (255, 128, 0)\n\n # Background color\n fill_coords(img, point_in_rect(0, 1, 0, 1), c)\n\n # Little waves\n for i in range(3):\n ylo = 0.3 + 0.2 * i\n yhi = 0.4 + 0.2 * i\n fill_coords(img, point_in_line(0.1, ylo, 0.3, yhi, r=0.03), (0,0,0))\n fill_coords(img, point_in_line(0.3, yhi, 0.5, ylo, r=0.03), (0,0,0))\n fill_coords(img, point_in_line(0.5, ylo, 0.7, yhi, r=0.03), (0,0,0))\n fill_coords(img, point_in_line(0.7, yhi, 0.9, ylo, r=0.03), (0,0,0))\n\nclass Wall(WorldObj):\n def __init__(self, color='grey'):\n super().__init__('wall', color)\n\n def see_behind(self):\n return False\n\n def render(self, img):\n fill_coords(img, point_in_rect(0, 1, 0, 1), COLORS[self.color])\n\nclass Door(WorldObj):\n def __init__(self, color, is_open=False, is_locked=False):\n super().__init__('door', color)\n self.is_open = is_open\n self.is_locked = is_locked\n\n def can_overlap(self):\n \"\"\"The agent can only walk over this cell when the door is open\"\"\"\n return self.is_open\n\n def see_behind(self):\n return self.is_open\n\n def toggle(self, env, pos):\n # If the player has the right key to open the door\n if self.is_locked:\n if isinstance(env.carrying, Key) and env.carrying.color == self.color:\n self.is_locked = False\n self.is_open = True\n return True\n return False\n\n self.is_open = not self.is_open\n return True\n\n def encode(self):\n \"\"\"Encode the a description of this object as a 3-tuple of integers\"\"\"\n\n # State, 0: open, 1: closed, 2: locked\n if self.is_open:\n state = 0\n elif self.is_locked:\n state = 2\n elif not self.is_open:\n state = 1\n\n return (OBJECT_TO_IDX[self.type], COLOR_TO_IDX[self.color], state)\n\n def render(self, img):\n c = COLORS[self.color]\n\n if self.is_open:\n fill_coords(img, point_in_rect(0.88, 1.00, 0.00, 1.00), c)\n fill_coords(img, point_in_rect(0.92, 0.96, 0.04, 0.96), (0,0,0))\n return\n\n # Door frame and door\n if self.is_locked:\n fill_coords(img, point_in_rect(0.00, 1.00, 0.00, 1.00), c)\n fill_coords(img, point_in_rect(0.06, 0.94, 0.06, 0.94), 0.45 * np.array(c))\n\n # Draw key slot\n fill_coords(img, point_in_rect(0.52, 0.75, 0.50, 0.56), c)\n else:\n fill_coords(img, point_in_rect(0.00, 1.00, 0.00, 1.00), c)\n fill_coords(img, point_in_rect(0.04, 0.96, 0.04, 0.96), (0,0,0))\n fill_coords(img, point_in_rect(0.08, 0.92, 0.08, 0.92), c)\n fill_coords(img, point_in_rect(0.12, 0.88, 0.12, 0.88), (0,0,0))\n\n # Draw door handle\n fill_coords(img, point_in_circle(cx=0.75, cy=0.50, r=0.08), c)\n\nclass Key(WorldObj):\n def __init__(self, color='blue'):\n super(Key, self).__init__('key', color)\n\n def can_pickup(self):\n return True\n\n def render(self, img):\n c = COLORS[self.color]\n\n # Vertical quad\n fill_coords(img, point_in_rect(0.50, 0.63, 0.31, 0.88), c)\n\n # Teeth\n fill_coords(img, point_in_rect(0.38, 0.50, 0.59, 0.66), c)\n fill_coords(img, point_in_rect(0.38, 0.50, 0.81, 0.88), c)\n\n # Ring\n fill_coords(img, point_in_circle(cx=0.56, cy=0.28, r=0.190), c)\n fill_coords(img, point_in_circle(cx=0.56, cy=0.28, r=0.064), (0,0,0))\n\nclass Ball(WorldObj):\n def __init__(self, color='blue'):\n super(Ball, self).__init__('ball', color)\n\n def can_pickup(self):\n return True\n\n def render(self, img):\n fill_coords(img, point_in_circle(0.5, 0.5, 0.31), COLORS[self.color])\n\nclass Circle(WorldObj):\n def __init__(self, color='blue'):\n super(Circle, self).__init__('circle', color)\n\n def can_pickup(self):\n return True\n\n def render(self, img):\n fill_coords(img, point_in_circle(0.5, 0.5, 0.31), COLORS[self.color])\n\nclass Box(WorldObj):\n def __init__(self, color, contains=None):\n super(Box, self).__init__('box', color)\n self.contains = contains\n\n def can_pickup(self):\n return True\n\n def render(self, img):\n c = COLORS[self.color]\n\n # Outline\n fill_coords(img, point_in_rect(0.12, 0.88, 0.12, 0.88), c)\n fill_coords(img, point_in_rect(0.18, 0.82, 0.18, 0.82), (0,0,0))\n\n # Horizontal slit\n fill_coords(img, point_in_rect(0.16, 0.84, 0.47, 0.53), c)\n\n def toggle(self, env, pos):\n # Replace the box by its contents\n env.grid.set(*pos, self.contains)\n return True\n\nclass Square(WorldObj):\n def __init__(self, color):\n super(Square, self).__init__('square', color)\n\n def can_pickup(self):\n return True\n\n def render(self, img):\n c = COLORS[self.color]\n # Outline\n fill_coords(img, point_in_rect(0.12, 0.88, 0.12, 0.88), c)\n\n\nclass Crate(WorldObj):\n def __init__(self, color):\n super(Crate, self).__init__('crate', color)\n\n def can_pickup(self):\n return True\n\n def render(self, img):\n c = COLORS[self.color]\n # Outline\n fill_coords(img, point_in_rect(0.1, 0.9, 0.3, 0.7), c)\n\n\nclass Grid:\n \"\"\"\n Represent a grid and operations on it\n \"\"\"\n\n # Static cache of pre-renderer tiles\n tile_cache = {}\n\n def __init__(self, width, height):\n assert width >= 3\n assert height >= 3\n\n self.width = width\n self.height = height\n\n self.grid = [None] * width * height\n\n def __contains__(self, key):\n if isinstance(key, WorldObj):\n for e in self.grid:\n if e is key:\n return True\n elif isinstance(key, tuple):\n for e in self.grid:\n if e is None:\n continue\n if (e.color, e.type) == key:\n return True\n if key[0] is None and key[1] == e.type:\n return True\n return False\n\n def __eq__(self, other):\n grid1 = self.encode()\n grid2 = other.encode()\n return np.array_equal(grid2, grid1)\n\n def __ne__(self, other):\n return not self == other\n\n def copy(self):\n from copy import deepcopy\n return deepcopy(self)\n\n def set(self, i, j, v):\n assert i >= 0 and i < self.width\n assert j >= 0 and j < self.height\n self.grid[j * self.width + i] = v\n\n def get(self, i, j):\n assert i >= 0 and i < self.width\n assert j >= 0 and j < self.height\n return self.grid[j * self.width + i]\n\n def horz_wall(self, x, y, length=None, obj_type=Wall):\n if length is None:\n length = self.width - x\n for i in range(0, length):\n self.set(x + i, y, obj_type())\n\n def vert_wall(self, x, y, length=None, obj_type=Wall):\n if length is None:\n length = self.height - y\n for j in range(0, length):\n self.set(x, y + j, obj_type())\n\n def wall_rect(self, x, y, w, h):\n self.horz_wall(x, y, w)\n self.horz_wall(x, y+h-1, w)\n self.vert_wall(x, y, h)\n self.vert_wall(x+w-1, y, h)\n\n def rotate_left(self):\n \"\"\"\n Rotate the grid to the left (counter-clockwise)\n \"\"\"\n\n grid = Grid(self.height, self.width)\n\n for i in range(self.width):\n for j in range(self.height):\n v = self.get(i, j)\n grid.set(j, grid.height - 1 - i, v)\n\n return grid\n\n def slice(self, topX, topY, width, height):\n \"\"\"\n Get a subset of the grid\n \"\"\"\n\n grid = Grid(width, height)\n\n for j in range(0, height):\n for i in range(0, width):\n x = topX + i\n y = topY + j\n\n if x >= 0 and x < self.width and \\\n y >= 0 and y < self.height:\n v = self.get(x, y)\n else:\n v = Wall()\n\n grid.set(i, j, v)\n\n return grid\n\n @classmethod\n def render_tile(\n cls,\n obj,\n agent_dir=None,\n highlight=False,\n tile_size=TILE_PIXELS,\n subdivs=3\n ):\n \"\"\"\n Render a tile and cache the result\n \"\"\"\n\n # Hash map lookup key for the cache\n key = (agent_dir, highlight, tile_size)\n key = obj.encode() + key if obj else key\n\n if key in cls.tile_cache:\n return cls.tile_cache[key]\n\n img = np.zeros(shape=(tile_size * subdivs, tile_size * subdivs, 3), dtype=np.uint8)\n\n # Draw the grid lines (top and left edges)\n fill_coords(img, point_in_rect(0, 0.031, 0, 1), (100, 100, 100))\n fill_coords(img, point_in_rect(0, 1, 0, 0.031), (100, 100, 100))\n\n if obj != None:\n obj.render(img)\n\n # Overlay the agent on top\n if agent_dir is not None:\n tri_fn = point_in_triangle(\n (0.12, 0.19),\n (0.87, 0.50),\n (0.12, 0.81),\n )\n\n # Rotate the agent based on its direction\n tri_fn = rotate_fn(tri_fn, cx=0.5, cy=0.5, theta=0.5*math.pi*agent_dir)\n fill_coords(img, tri_fn, (255, 0, 0))\n\n # Highlight the cell if needed\n if highlight:\n highlight_img(img)\n\n # Downsample the image to perform supersampling/anti-aliasing\n img = downsample(img, subdivs)\n\n # Cache the rendered tile\n cls.tile_cache[key] = img\n\n return img\n\n def render(\n self,\n tile_size,\n agent_pos=None,\n agent_dir=None,\n highlight_mask=None\n ):\n \"\"\"\n Render this grid at a given scale\n :param r: target renderer object\n :param tile_size: tile size in pixels\n \"\"\"\n\n if highlight_mask is None:\n highlight_mask = np.zeros(shape=(self.width, self.height), dtype=np.bool)\n\n # Compute the total grid size\n width_px = self.width * tile_size\n height_px = self.height * tile_size\n\n img = np.zeros(shape=(height_px, width_px, 3), dtype=np.uint8)\n\n # Render the grid\n for j in range(0, self.height):\n for i in range(0, self.width):\n cell = self.get(i, j)\n\n agent_here = np.array_equal(agent_pos, (i, j))\n tile_img = Grid.render_tile(\n cell,\n agent_dir=agent_dir if agent_here else None,\n highlight=highlight_mask[i, j],\n tile_size=tile_size\n )\n\n ymin = j * tile_size\n ymax = (j+1) * tile_size\n xmin = i * tile_size\n xmax = (i+1) * tile_size\n img[ymin:ymax, xmin:xmax, :] = tile_img\n\n return img\n\n def encode(self, vis_mask=None):\n \"\"\"\n Produce a compact numpy encoding of the grid\n \"\"\"\n\n if vis_mask is None:\n vis_mask = np.ones((self.width, self.height), dtype=bool)\n\n array = np.zeros((self.width, self.height, 3), dtype='uint8')\n\n for i in range(self.width):\n for j in range(self.height):\n if vis_mask[i, j]:\n v = self.get(i, j)\n\n if v is None:\n array[i, j, 0] = OBJECT_TO_IDX['empty']\n array[i, j, 1] = 0\n array[i, j, 2] = 0\n\n else:\n array[i, j, :] = v.encode()\n\n return array\n\n @staticmethod\n def decode(array):\n \"\"\"\n Decode an array grid encoding back into a grid\n \"\"\"\n\n width, height, channels = array.shape\n assert channels == 3\n\n vis_mask = np.ones(shape=(width, height), dtype=np.bool)\n\n grid = Grid(width, height)\n for i in range(width):\n for j in range(height):\n type_idx, color_idx, state = array[i, j]\n v = WorldObj.decode(type_idx, color_idx, state)\n grid.set(i, j, v)\n vis_mask[i, j] = (type_idx != OBJECT_TO_IDX['unseen'])\n\n return grid, vis_mask\n\n def process_vis(grid, agent_pos):\n mask = np.zeros(shape=(grid.width, grid.height), dtype=np.bool)\n\n mask[agent_pos[0], agent_pos[1]] = True\n\n for j in reversed(range(0, grid.height)):\n for i in range(0, grid.width-1):\n if not mask[i, j]:\n continue\n\n cell = grid.get(i, j)\n if cell and not cell.see_behind():\n continue\n\n mask[i+1, j] = True\n if j > 0:\n mask[i+1, j-1] = True\n mask[i, j-1] = True\n\n for i in reversed(range(1, grid.width)):\n if not mask[i, j]:\n continue\n\n cell = grid.get(i, j)\n if cell and not cell.see_behind():\n continue\n\n mask[i-1, j] = True\n if j > 0:\n mask[i-1, j-1] = True\n mask[i, j-1] = True\n\n for j in range(0, grid.height):\n for i in range(0, grid.width):\n if not mask[i, j]:\n grid.set(i, j, None)\n\n return mask\n\nclass MiniGridEnv(gym.Env):\n \"\"\"\n 2D grid world game environment\n \"\"\"\n\n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second' : 10\n }\n\n # Enumeration of possible actions\n class Actions(IntEnum):\n # Turn left, turn right, move forward\n left = 0\n right = 1\n forward = 2\n\n # Pick up an object\n pickup = 3\n # Drop an object\n drop = 4\n # Toggle/activate an object\n toggle = 5\n\n # Done completing task\n done = 6\n\n def __init__(\n self,\n grid_size=None,\n width=None,\n height=None,\n max_steps=100,\n see_through_walls=False,\n seed=1337,\n agent_view_size=7\n ):\n # Can't set both grid_size and width/height\n if grid_size:\n assert width == None and height == None\n width = grid_size\n height = grid_size\n\n # Action enumeration for this environment\n self.actions = MiniGridEnv.Actions\n\n # Actions are discrete integer values\n self.action_space = spaces.Discrete(len(self.actions))\n\n # Number of cells (width and height) in the agent view\n assert agent_view_size % 2 == 1\n assert agent_view_size >= 3\n self.agent_view_size = agent_view_size\n\n # Observations are dictionaries containing an\n # encoding of the grid and a textual 'mission' string\n self.observation_space = spaces.Box(\n low=0,\n high=255,\n shape=(self.agent_view_size, self.agent_view_size, 3),\n dtype='uint8'\n )\n self.observation_space = spaces.Dict({\n 'image': self.observation_space\n })\n\n # Range of possible rewards\n self.reward_range = (0, 1)\n\n # Window to use for human rendering mode\n self.window = None\n\n # Environment configuration\n self.width = width\n self.height = height\n self.max_steps = max_steps\n self.see_through_walls = see_through_walls\n\n # Current position and direction of the agent\n self.agent_pos = None\n self.agent_dir = None\n\n # Initialize the RNG\n self.seed(seed=seed)\n\n # Initialize the state\n self.reset()\n\n def reset(self):\n # Current position and direction of the agent\n self.agent_pos = None\n self.agent_dir = None\n\n # Generate a new random grid at the start of each episode\n # To keep the same grid for each episode, call env.seed() with\n # the same seed before calling env.reset()\n self._gen_grid(self.width, self.height)\n\n # These fields should be defined by _gen_grid\n assert self.agent_pos is not None\n assert self.agent_dir is not None\n\n # Check that the agent doesn't overlap with an object\n start_cell = self.grid.get(*self.agent_pos)\n assert start_cell is None or start_cell.can_overlap()\n\n # Item picked up, being carried, initially nothing\n self.carrying = None\n\n # Step count since episode start\n self.step_count = 0\n\n # Return first observation\n obs = self.gen_obs()\n return obs\n\n def seed(self, seed=1337):\n # Seed the random number generator\n self.np_random, _ = seeding.np_random(seed)\n return [seed]\n\n def hash(self, size=16):\n \"\"\"Compute a hash that uniquely identifies the current state of the environment.\n :param size: Size of the hashing\n \"\"\"\n sample_hash = hashlib.sha256()\n\n to_encode = [self.grid.encode().tolist(), self.agent_pos, self.agent_dir]\n for item in to_encode:\n sample_hash.update(str(item).encode('utf8'))\n\n return sample_hash.hexdigest()[:size]\n\n @property\n def steps_remaining(self):\n return self.max_steps - self.step_count\n\n def __str__(self):\n \"\"\"\n Produce a pretty string of the environment's grid along with the agent.\n A grid cell is represented by 2-character string, the first one for\n the object and the second one for the color.\n \"\"\"\n\n # Map of object types to short string\n OBJECT_TO_STR = {\n 'wall' : 'W',\n 'floor' : 'F',\n 'door' : 'D',\n 'key' : 'K',\n 'ball' : 'A',\n 'box' : 'B',\n 'goal' : 'G',\n 'lava' : 'V',\n }\n\n # Short string for opened door\n OPENDED_DOOR_IDS = '_'\n\n # Map agent's direction to short string\n AGENT_DIR_TO_STR = {\n 0: '>',\n 1: 'V',\n 2: '<',\n 3: '^'\n }\n\n str = ''\n\n for j in range(self.grid.height):\n\n for i in range(self.grid.width):\n if i == self.agent_pos[0] and j == self.agent_pos[1]:\n str += 2 * AGENT_DIR_TO_STR[self.agent_dir]\n continue\n\n c = self.grid.get(i, j)\n\n if c == None:\n str += ' '\n continue\n\n if c.type == 'door':\n if c.is_open:\n str += '__'\n elif c.is_locked:\n str += 'L' + c.color[0].upper()\n else:\n str += 'D' + c.color[0].upper()\n continue\n\n str += OBJECT_TO_STR[c.type] + c.color[0].upper()\n\n if j < self.grid.height - 1:\n str += '\\n'\n\n return str\n\n def _gen_grid(self, width, height):\n assert False, \"_gen_grid needs to be implemented by each environment\"\n\n def _reward(self):\n \"\"\"\n Compute the reward to be given upon success\n \"\"\"\n\n return 1 - 0.9 * (self.step_count / self.max_steps)\n\n def _rand_int(self, low, high):\n \"\"\"\n Generate random integer in [low,high]\n \"\"\"\n\n return self.np_random.randint(low, high)\n\n def _rand_float(self, low, high):\n \"\"\"\n Generate random float in [low,high]\n \"\"\"\n\n return self.np_random.uniform(low, high)\n\n def _rand_bool(self):\n \"\"\"\n Generate random boolean value\n \"\"\"\n\n return (self.np_random.randint(0, 2) == 0)\n\n def _rand_elem(self, iterable):\n \"\"\"\n Pick a random element in a list\n \"\"\"\n\n lst = list(iterable)\n idx = self._rand_int(0, len(lst))\n return lst[idx]\n\n def _rand_subset(self, iterable, num_elems):\n \"\"\"\n Sample a random subset of distinct elements of a list\n \"\"\"\n\n lst = list(iterable)\n assert num_elems <= len(lst)\n\n out = []\n\n while len(out) < num_elems:\n elem = self._rand_elem(lst)\n lst.remove(elem)\n out.append(elem)\n\n return out\n\n def _rand_color(self):\n \"\"\"\n Generate a random color name (string)\n \"\"\"\n\n return self._rand_elem(COLOR_NAMES)\n\n def _rand_pos(self, xLow, xHigh, yLow, yHigh):\n \"\"\"\n Generate a random (x,y) position tuple\n \"\"\"\n\n return (\n self.np_random.randint(xLow, xHigh),\n self.np_random.randint(yLow, yHigh)\n )\n\n def place_obj(self,\n obj,\n top=None,\n size=None,\n reject_fn=None,\n max_tries=math.inf\n ):\n \"\"\"\n Place an object at an empty position in the grid\n\n :param top: top-left position of the rectangle where to place\n :param size: size of the rectangle where to place\n :param reject_fn: function to filter out potential positions\n \"\"\"\n\n if top is None:\n top = (0, 0)\n else:\n top = (max(top[0], 0), max(top[1], 0))\n\n if size is None:\n size = (self.grid.width, self.grid.height)\n\n num_tries = 0\n\n while True:\n # This is to handle with rare cases where rejection sampling\n # gets stuck in an infinite loop\n if num_tries > max_tries:\n raise RecursionError('rejection sampling failed in place_obj')\n\n num_tries += 1\n\n pos = np.array((\n self._rand_int(top[0], min(top[0] + size[0], self.grid.width)),\n self._rand_int(top[1], min(top[1] + size[1], self.grid.height))\n ))\n\n # Don't place the object on top of another object\n if self.grid.get(*pos) != None:\n continue\n\n # Don't place the object where the agent is\n if np.array_equal(pos, self.agent_pos):\n continue\n\n # Check if there is a filtering criterion\n if reject_fn and reject_fn(self, pos):\n continue\n\n break\n\n self.grid.set(*pos, obj)\n\n if obj is not None:\n obj.init_pos = pos\n obj.cur_pos = pos\n\n return pos\n\n def put_obj(self, obj, i, j):\n \"\"\"\n Put an object at a specific position in the grid\n \"\"\"\n\n self.grid.set(i, j, obj)\n obj.init_pos = (i, j)\n obj.cur_pos = (i, j)\n\n def place_agent(\n self,\n top=None,\n size=None,\n rand_dir=True,\n max_tries=math.inf\n ):\n \"\"\"\n Set the agent's starting point at an empty position in the grid\n \"\"\"\n\n self.agent_pos = None\n pos = self.place_obj(None, top, size, max_tries=max_tries)\n self.agent_pos = pos\n\n if rand_dir:\n self.agent_dir = self._rand_int(0, 4)\n\n return pos\n\n @property\n def dir_vec(self):\n \"\"\"\n Get the direction vector for the agent, pointing in the direction\n of forward movement.\n \"\"\"\n\n assert self.agent_dir >= 0 and self.agent_dir < 4\n return DIR_TO_VEC[self.agent_dir]\n\n @property\n def right_vec(self):\n \"\"\"\n Get the vector pointing to the right of the agent.\n \"\"\"\n\n dx, dy = self.dir_vec\n return np.array((-dy, dx))\n\n @property\n def front_pos(self):\n \"\"\"\n Get the position of the cell that is right in front of the agent\n \"\"\"\n\n return self.agent_pos + self.dir_vec\n\n def get_view_coords(self, i, j):\n \"\"\"\n Translate and rotate absolute grid coordinates (i, j) into the\n agent's partially observable view (sub-grid). Note that the resulting\n coordinates may be negative or outside of the agent's view size.\n \"\"\"\n\n ax, ay = self.agent_pos\n dx, dy = self.dir_vec\n rx, ry = self.right_vec\n\n # Compute the absolute coordinates of the top-left view corner\n sz = self.agent_view_size\n hs = self.agent_view_size // 2\n tx = ax + (dx * (sz-1)) - (rx * hs)\n ty = ay + (dy * (sz-1)) - (ry * hs)\n\n lx = i - tx\n ly = j - ty\n\n # Project the coordinates of the object relative to the top-left\n # corner onto the agent's own coordinate system\n vx = (rx*lx + ry*ly)\n vy = -(dx*lx + dy*ly)\n\n return vx, vy\n\n def get_view_exts(self):\n \"\"\"\n Get the extents of the square set of tiles visible to the agent\n Note: the bottom extent indices are not included in the set\n \"\"\"\n\n # Facing right\n if self.agent_dir == 0:\n topX = self.agent_pos[0]\n topY = self.agent_pos[1] - self.agent_view_size // 2\n # Facing down\n elif self.agent_dir == 1:\n topX = self.agent_pos[0] - self.agent_view_size // 2\n topY = self.agent_pos[1]\n # Facing left\n elif self.agent_dir == 2:\n topX = self.agent_pos[0] - self.agent_view_size + 1\n topY = self.agent_pos[1] - self.agent_view_size // 2\n # Facing up\n elif self.agent_dir == 3:\n topX = self.agent_pos[0] - self.agent_view_size // 2\n topY = self.agent_pos[1] - self.agent_view_size + 1\n else:\n assert False, \"invalid agent direction\"\n\n botX = topX + self.agent_view_size\n botY = topY + self.agent_view_size\n\n return (topX, topY, botX, botY)\n\n def relative_coords(self, x, y):\n \"\"\"\n Check if a grid position belongs to the agent's field of view, and returns the corresponding coordinates\n \"\"\"\n\n vx, vy = self.get_view_coords(x, y)\n\n if vx < 0 or vy < 0 or vx >= self.agent_view_size or vy >= self.agent_view_size:\n return None\n\n return vx, vy\n\n def in_view(self, x, y):\n \"\"\"\n check if a grid position is visible to the agent\n \"\"\"\n\n return self.relative_coords(x, y) is not None\n\n def agent_sees(self, x, y):\n \"\"\"\n Check if a non-empty grid position is visible to the agent\n \"\"\"\n\n coordinates = self.relative_coords(x, y)\n if coordinates is None:\n return False\n vx, vy = coordinates\n\n obs = self.gen_obs()\n obs_grid, _ = Grid.decode(obs['image'])\n obs_cell = obs_grid.get(vx, vy)\n world_cell = self.grid.get(x, y)\n\n return obs_cell is not None and obs_cell.type == world_cell.type\n\n def step(self, action):\n self.step_count += 1\n\n reward = -(1 / self.max_steps)\n done = False\n\n # Get the position in front of the agent\n fwd_pos = self.front_pos\n\n # Get the contents of the cell in front of the agent\n fwd_cell = self.grid.get(*fwd_pos)\n\n # Rotate left\n if action == self.actions.left:\n self.agent_dir -= 1\n if self.agent_dir < 0:\n self.agent_dir += 4\n\n # Rotate right\n elif action == self.actions.right:\n self.agent_dir = (self.agent_dir + 1) % 4\n\n # Move forward\n elif action == self.actions.forward:\n if fwd_cell == None or fwd_cell.can_overlap():\n self.agent_pos = fwd_pos\n if fwd_cell != None and fwd_cell.type == 'goal':\n done = True\n reward = self._reward()\n if fwd_cell != None and fwd_cell.type == 'lava':\n done = True\n\n # Pick up an object\n elif action == self.actions.pickup:\n if fwd_cell and fwd_cell.can_pickup():\n if self.carrying is None:\n self.carrying = fwd_cell\n self.carrying.cur_pos = np.array([-1, -1])\n self.grid.set(*fwd_pos, None)\n\n # Drop an object\n elif action == self.actions.drop:\n if not fwd_cell and self.carrying:\n self.grid.set(*fwd_pos, self.carrying)\n self.carrying.cur_pos = fwd_pos\n self.carrying = None\n\n # Toggle/activate an object\n elif action == self.actions.toggle:\n if fwd_cell:\n fwd_cell.toggle(self, fwd_pos)\n\n # Done action (not used by default)\n elif action == self.actions.done:\n pass\n\n else:\n assert False, \"unknown action\"\n\n if self.step_count >= self.max_steps:\n done = True\n\n obs = self.gen_obs()\n\n return obs, reward, done, {}\n\n def gen_obs_grid(self):\n \"\"\"\n Generate the sub-grid observed by the agent.\n This method also outputs a visibility mask telling us which grid\n cells the agent can actually see.\n \"\"\"\n\n topX, topY, botX, botY = self.get_view_exts()\n\n grid = self.grid.slice(topX, topY, self.agent_view_size, self.agent_view_size)\n\n for i in range(self.agent_dir + 1):\n grid = grid.rotate_left()\n\n # Process occluders and visibility\n # Note that this incurs some performance cost\n if not self.see_through_walls:\n vis_mask = grid.process_vis(agent_pos=(self.agent_view_size // 2 , self.agent_view_size - 1))\n else:\n vis_mask = np.ones(shape=(grid.width, grid.height), dtype=np.bool)\n\n # Make it so the agent sees what it's carrying\n # We do this by placing the carried object at the agent's position\n # in the agent's partially observable view\n agent_pos = grid.width // 2, grid.height - 1\n if self.carrying:\n grid.set(*agent_pos, self.carrying)\n else:\n grid.set(*agent_pos, None)\n\n return grid, vis_mask\n\n def gen_obs(self):\n \"\"\"\n Generate the agent's view (partially observable, low-resolution encoding)\n \"\"\"\n\n grid, vis_mask = self.gen_obs_grid()\n\n # Encode the partially observable view into a numpy array\n image = grid.encode(vis_mask)\n\n assert hasattr(self, 'mission'), \"environments must define a textual mission string\"\n\n # Observations are dictionaries containing:\n # - an image (partially observable view of the environment)\n # - the agent's direction/orientation (acting as a compass)\n # - a textual mission string (instructions for the agent)\n obs = {\n 'image': image,\n 'direction': self.agent_dir,\n 'mission': self.mission,\n 'target_cell': self.target_cell,\n 'obj_descs': self.obj_descs\n }\n\n return obs\n\n def get_obs_render(self, obs, tile_size=TILE_PIXELS//2):\n \"\"\"\n Render an agent observation for visualization\n \"\"\"\n\n grid, vis_mask = Grid.decode(obs)\n\n # Render the whole grid\n img = grid.render(\n tile_size,\n agent_pos=(self.agent_view_size // 2, self.agent_view_size - 1),\n agent_dir=3,\n highlight_mask=vis_mask\n )\n\n return img\n\n def render(self, mode='human', close=False, highlight=True, tile_size=TILE_PIXELS):\n \"\"\"\n Render the whole-grid human view\n \"\"\"\n\n if close:\n if self.window:\n self.window.close()\n return\n\n if mode == 'human' and not self.window:\n import gym_minigrid.window\n self.window = gym_minigrid.window.Window('gym_minigrid')\n self.window.show(block=False)\n\n # Compute which cells are visible to the agent\n _, vis_mask = self.gen_obs_grid()\n\n # Compute the world coordinates of the bottom-left corner\n # of the agent's view area\n f_vec = self.dir_vec\n r_vec = self.right_vec\n top_left = self.agent_pos + f_vec * (self.agent_view_size-1) - r_vec * (self.agent_view_size // 2)\n\n # Mask of which cells to highlight\n highlight_mask = np.zeros(shape=(self.width, self.height), dtype=np.bool)\n\n # For each cell in the visibility mask\n for vis_j in range(0, self.agent_view_size):\n for vis_i in range(0, self.agent_view_size):\n # If this cell is not visible, don't highlight it\n if not vis_mask[vis_i, vis_j]:\n continue\n\n # Compute the world coordinates of this cell\n abs_i, abs_j = top_left - (f_vec * vis_j) + (r_vec * vis_i)\n\n if abs_i < 0 or abs_i >= self.width:\n continue\n if abs_j < 0 or abs_j >= self.height:\n continue\n\n # Mark this cell to be highlighted\n highlight_mask[abs_i, abs_j] = True\n\n # Render the whole grid\n img = self.grid.render(\n tile_size,\n self.agent_pos,\n self.agent_dir,\n highlight_mask=highlight_mask if highlight else None\n )\n\n if mode == 'human':\n self.window.set_caption(self.mission)\n self.window.show_img(img)\n\n return img\n\n def close(self):\n if self.window:\n self.window.close()\n return\n"
] |
[
[
"numpy.array",
"numpy.ones",
"numpy.array_equal",
"numpy.zeros"
]
] |
jamesxiu/Oystermaran2021
|
[
"f3703bb220dc5b415942f90bd5761a9985381067"
] |
[
"bag_detection-master/scripts/util.py"
] |
[
"#!/usr/bin/env python3\n\nimport numpy as np\nimport cv2\nimport tensorflow as tf\n\nimport sys\nsys.path.append(\"/home/oyster/Tensorflow/Monk_Object_Detection/13_tf_obj_2/lib/\")\nfrom infer_detector_nano import Infer\n\nfrom bag_detection.msg import FlipPos, PathPos\n\n\ndef get_rectangles(mask, threshold_area):\n \"\"\"\n Extract defined color from image and return rectangles coordinates of large enough contours on given side\n Input: \n mask: Binary Image\n threshold_area: int\n Output:\n list of 1x4 tuples (x, y, w, h) of color blobs \n \"\"\"\n contours, hierarchy = cv2.findContours(mask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n\n rectangles = []\n for contour in contours:\n if cv2.contourArea(contour) > threshold_area:\n rect = cv2.boundingRect(contour)\n rectangles.append(rect)\n return rectangles\n\n\ndef get_contours(mask, threshold_area):\n \"\"\"\n Extract defined color from image and return large contours (UNUSED)\n Input: \n cv_image: Image (BGR)\n lower_range: 1x3 tuple representing lower HSV for target color\n upper_range: 1x3 tuple representing upper HSV for target color\n threshold_area: int\n Output:\n list of openCV contours \n \"\"\"\n contours, hierarchy = cv2.findContours(mask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n\n return [x for x in contours if cv2.contourArea(x) > threshold_area], hierarchy\n\n\n\ndef color_segmentation(image, lower, upper):\n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(hsv, np.array(lower), np.array(upper))\n return mask\n\n\ndef get_mask_pixels(mask):\n return np.transpose((mask>0).nonzero())\n\n\ndef get_avg_depth(depth_img, pixels, low_thres=0, high_thres=1000):\n avg_depth = 0\n i = 0\n for x,y in pixels:\n depth = depth_img[x][y]\n # print(depth)\n if depth > low_thres and depth < high_thres: \n avg_depth += depth\n i += 1\n\n return avg_depth/i\n\n\ndef get_region_box(mask, area=100, side='bottom', image=None):\n left = mask.shape[1]\n right = 0\n top = mask.shape[0]\n bot = 0\n box = None\n\n contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n for contour in contours:\n if cv2.contourArea(contour) > area:\n rect = cv2.boundingRect(contour)\n if image:\n tl = (rect[0], rect[1])\n br = (rect[0]+rect[2], rect[1]+rect[3])\n cv2.rectangle(image, tl, br, (255,0,0), 2)\n if side == 'left':\n if rect[0] < left:\n left = rect[0]\n box = rect\n elif side == 'right':\n if rect[0] > right:\n right = rect[0]\n box = rect\n elif side == 'top':\n if rect[1] < top:\n top = rect[1]\n box = rect\n else:\n if rect[1] > bot:\n bot = rect[1]\n box = rect\n if image:\n cv2.rectangle(image, (box[0], box[1]), (box[0]+box[2], box[1]+box[3]), (0,0,255), 2)\n return box\n\n\ndef get_tf2_detect_fn(path):\n detect_fn=tf.saved_model.load(path)\n return detect_fn\n\ndef detect_objects(detect_fn, image, width=1280, height=720, min_score_thres=0.5):\n image_np = np.array(image)\n input_tensor=tf.convert_to_tensor(image_np)\n input_tensor=input_tensor[tf.newaxis, ...]\n detections=detect_fn(input_tensor)\n print(type(detections))\n\n # This is the way I'm getting my coordinates\n boxes = detections['detection_boxes'][0]\n # print(boxes)\n # get all boxes from an array\n max_boxes_to_draw = boxes.shape[0]\n # get scores to get a threshold\n scores = detections['detection_scores'][0]\n # print(scores)\n # this is set as a default but feel free to adjust it to your needs\n \n # iterate over all objects found\n objects = []\n for i in range(min(max_boxes_to_draw, boxes.shape[0])): \n if scores is None or scores[i] > min_score_thres:\n class_name = detections['detection_classes'][0][i].numpy()\n\n y_min, x_min, y_max, x_max = boxes[i].numpy()\n tl, br = ((int(x_min*width), int(y_min*height)), (int(x_max*width), int(y_max*height)))\n detection = {'class':class_name, 'box': (tl, br)}\n objects.append(detection)\n\n return objects\n\n\ndef get_gtf():\n gtf = Infer();\n print(\"GTFF INITIALIZEDDDDDDDDDDDDDDDDDDDDDDDDDDD\")\n gtf.set_dataset_params(class_list_file = '/home/oyster/Tensorflow/oyster_bag/classes.txt')\n print(\"DATA SET PARAMMMS SETTTTTT\")\n gtf.set_model_params(exported_model_dir = '/home/oyster/Tensorflow/trt_fp16_dir')\n\n return gtf\n\n\ndef gtf_detect_objects(gtf, image_np, min_score_thres=0.5, width=1280, height=720):\n input_tensor = tf.convert_to_tensor(image_np)\n input_tensor = input_tensor[tf.newaxis, ...]\n scores, bboxes, labels = gtf.infer_on_tensor(input_tensor, thresh=0.8);\n \n return bboxes\n\n\ndef get_element(dilation_size, dilation_shape=cv2.MORPH_RECT):\n return cv2.getStructuringElement(dilation_shape, (2 * dilation_size + 1, 2 * dilation_size + 1),\n (dilation_size, dilation_size))\n\ndef canny(img, thres1=100, thres2=200, aperture=1):\n return cv2.Canny(img, thres1, thres2, aperture)\n\n\ndef dilate_bag_row(edges, element):\n return cv2.morphologyEx(edges, cv2.MORPH_CLOSE, element)\n\n\ndef directional_shear(closed, element, vertical=1, shearing_factor=50, shape=cv2.MORPH_RECT):\n # dims = closed.shape[1]\n size = (closed.shape[1] // shearing_factor, 1)\n\n if (vertical):\n # dims = closed.shape[0]\n size = (1, closed.shape[0] // shearing_factor)\n\n structure = cv2.getStructuringElement(shape, size)\n closed = cv2.erode(closed, structure)\n closed = cv2.dilate(closed, structure)\n return cv2.morphologyEx(closed, cv2.MORPH_CLOSE, element)\n\n\ndef bag_rect_detection(img, vertical=1, threshold_area_prop = 0.025, dilation_size=9, dilation_shape=cv2.MORPH_RECT, thres1=100, thres2=200, aperture=1, shearing_factor=50):\n element = get_element(dilation_size, dilation_shape)\n edges = canny(img, thres1, thres2, aperture)\n closed = dilate_bag_row(edges, element)\n closed = directional_shear(closed, element, vertical, shearing_factor, dilation_shape)\n h, w = img.shape[:2]\n threshold_area = threshold_area_prop*h*w \n c_rects = get_rectangles(closed, threshold_area)\n\n return c_rects\n\n\ndef create_flip_pos_msg(top=False, bot=False):\n\n msg = FlipPos()\n msg.top = top\n msg.bot = bot\n msg.top_x = float('inf')\n msg.top_y = float('inf')\n msg.bot_x = float('inf')\n msg.bot_y = float('inf')\n\n return msg\n"
] |
[
[
"tensorflow.convert_to_tensor",
"numpy.array",
"tensorflow.saved_model.load"
]
] |
chloeyutianyi/pytorch
|
[
"6a085648d81ce88ff59d6d1438fdb3707a0d6fb7"
] |
[
"test/quantization/core/test_workflow_ops.py"
] |
[
"import torch\nfrom torch.quantization import (\n FakeQuantize,\n MovingAverageMinMaxObserver,\n default_observer,\n default_affine_fixed_qparams_fake_quant,\n)\n\nfrom torch.quantization._learnable_fake_quantize import _LearnableFakeQuantize\nfrom torch.testing._internal.common_quantized import (\n _fake_quantize_per_channel_affine_reference,\n _fake_quantize_per_channel_affine_grad_reference,\n to_tensor,\n)\nimport torch.nn as nn\n\n# Standard library\nimport io\nimport itertools\nimport unittest\nimport numpy as np\n\n# Testing utils\nfrom hypothesis import given\nfrom hypothesis import strategies as st\nimport torch.testing._internal.hypothesis_utils as hu\nhu.assert_deadline_disabled()\nfrom torch.testing._internal.common_cuda import TEST_CUDA\nfrom torch.testing._internal.common_utils import TestCase\n\n# Reference method for fake quantize\n# Note: because scale/zero_point are left as float in the actual kernel, this mimics how fake_quant works for float16/64\ndef _fake_quantize_per_tensor_affine_reference(X, scale, zero_point, quant_min, quant_max):\n dtype = X.dtype\n res = ((torch.clamp(torch.round(X.to(torch.float32) * (1.0 / scale) + zero_point), quant_min, quant_max) - zero_point) * scale)\n return res.to(dtype)\n\n# Reference method for the gradient of the fake quantize operator\n# Note: because scale/zero_point are left as float in the actual kernel, this mimics how fake_quant works for float16/64\ndef _fake_quantize_per_tensor_affine_grad_reference(dY, X, scale, zero_point, quant_min, quant_max):\n dtype = X.dtype\n Xq = torch.round(X.to(torch.float32) * (1.0 / scale) + zero_point)\n mask = (Xq >= quant_min) * (Xq <= quant_max)\n res = torch.zeros_like(dY)\n res[mask] = dY[mask]\n return res.to(dtype)\n\n# Reference method for the gradients of the fake quantize operator\ndef _fake_quantize_learnable_per_tensor_affine_grad_reference(dY, X, scale, zero_point, quant_min, quant_max, device):\n r\"\"\"This method references the following literatures for back propagation on scale and zero point.\n - https://arxiv.org/pdf/1902.08153.pdf\n - https://arxiv.org/pdf/1903.08066.pdf\n \"\"\"\n zero_point_rounded = int((zero_point + 0.5).clamp(quant_min, quant_max).item())\n Xq = torch.round(X * (1.0 / scale) + zero_point_rounded)\n\n indicate_small_scale = (Xq < quant_min).float().to(device)\n indicate_big_scale = (Xq > quant_max).float().to(device)\n indicate_middle_scale = torch.ones(indicate_small_scale.shape).to(device) - \\\n indicate_small_scale - indicate_big_scale\n\n indicate_saturate_zp = ((Xq < quant_min).float() + (Xq > quant_max).float()).to(device)\n indicate_unsaturate_zp = torch.ones(indicate_saturate_zp.shape).to(device) - indicate_saturate_zp\n\n Xq = Xq.clamp(quant_min, quant_max)\n Xfq = (Xq - zero_point_rounded) * scale\n\n grad_small_scale = quant_min - zero_point_rounded\n grad_big_scale = quant_max - zero_point_rounded\n grad_middle_scale = ((Xfq - X) / scale).to(device)\n\n grad_saturate_zp = -scale.to(device)\n grad_unsaturate_zp = 0\n\n grad_scale = indicate_small_scale * grad_small_scale + \\\n indicate_big_scale * grad_big_scale + \\\n indicate_middle_scale * grad_middle_scale\n grad_zp = indicate_saturate_zp * grad_saturate_zp + \\\n indicate_unsaturate_zp * grad_unsaturate_zp\n grad_X = _fake_quantize_per_tensor_affine_grad_reference(\n dY, X, scale, zero_point, quant_min, quant_max).to(device)\n\n grad_scale = (grad_scale * dY).sum().unsqueeze(dim=0)\n grad_zp = (grad_zp * dY).sum().unsqueeze(dim=0)\n return grad_X, grad_scale, grad_zp\n\n\n# Reference method for quantization.\ndef _quantize_per_tensor(x, scale, zero_point, quant_min, quant_max):\n return ((x / scale) + zero_point).round().clamp(quant_min, quant_max)\n\n# Reference method for the per channel gradients of the learnable fake quantize operator\ndef _fake_quantize_learnable_per_channel_affine_grad_reference(\n dY, X, per_channel_scale, per_channel_zero_point, axis, quant_min, quant_max, device):\n r\"\"\"This method references the following literatures for back propagation on scale and zero point.\n - https://arxiv.org/pdf/1902.08153.pdf\n - https://arxiv.org/pdf/1903.08066.pdf\n \"\"\"\n per_channel_zero_point = ((per_channel_zero_point.detach() + 0.5).clamp(quant_min, quant_max)).type(torch.int32)\n grad_X = _fake_quantize_per_channel_affine_grad_reference(\n dY, X, per_channel_scale, per_channel_zero_point, axis, quant_min, quant_max).to(device)\n per_channel_scale = per_channel_scale.detach().type(torch.float)\n\n grad_scale = torch.zeros([per_channel_scale.size(0)]).to(device)\n grad_zero_point = torch.zeros([per_channel_zero_point.size(0)]).to(device)\n\n X_flattened = torch.unbind(X, dim=axis)\n dY_flattened = torch.unbind(dY, dim=axis)\n\n for i, X_i in enumerate(torch.unbind(X, dim=axis), 0):\n scale_i = per_channel_scale[i]\n zero_point_i = per_channel_zero_point[i]\n X_i = X_flattened[i]\n dY_i = dY_flattened[i]\n\n Xq_i = ((X_i / scale_i) + zero_point_i).round()\n Xfq_i = (Xq_i - zero_point_i) * scale_i\n\n indicate_small_scale_i = (Xq_i < quant_min).float().to(device)\n indicate_big_scale_i = (Xq_i > quant_max).float().to(device)\n indicate_middle_scale_i = torch.ones(indicate_small_scale_i.shape).to(device) - \\\n indicate_small_scale_i - indicate_big_scale_i\n\n indicate_saturate_zp_i = ((Xq_i < quant_min).float() +\n (Xq_i > quant_max).float()).to(device)\n indicate_unsaturate_zp_i = torch.ones(indicate_saturate_zp_i.shape).to(device) - \\\n indicate_saturate_zp_i\n\n Xq_i = Xq_i.clamp(quant_min, quant_max)\n Xfq_i = (Xq_i - zero_point_i) * scale_i\n\n grad_small_scale_i = quant_min - zero_point_i\n grad_big_scale_i = quant_max - zero_point_i\n grad_middle_scale_i = ((Xfq_i - X_i) / scale_i).to(device)\n\n grad_saturate_zp_i = -scale_i.to(device)\n grad_unsaturate_zp_i = 0\n\n grad_scale_i = indicate_small_scale_i * grad_small_scale_i + \\\n indicate_middle_scale_i * grad_middle_scale_i + \\\n indicate_big_scale_i * grad_big_scale_i\n grad_zp_i = indicate_saturate_zp_i * grad_saturate_zp_i + \\\n indicate_unsaturate_zp_i * grad_unsaturate_zp_i\n\n grad_scale_i = (grad_scale_i * dY_i).sum().unsqueeze(dim=0)\n grad_zp_i = (grad_zp_i * dY_i).sum().unsqueeze(dim=0)\n\n grad_scale[i] = grad_scale_i\n grad_zero_point[i] = grad_zp_i\n return grad_X, grad_scale, grad_zero_point\n\nNP_RANDOM_SEED = 19\ntolerance = 1e-6\n\nclass TestFakeQuantizeOps(TestCase):\n @given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),\n X=hu.tensor(shapes=hu.array_shapes(1, 5,),\n qparams=hu.qparams(dtypes=torch.quint8)))\n def test_forward_per_tensor(self, device, X):\n r\"\"\"Tests the forward path of the FakeQuantizePerTensorAffine op.\n \"\"\"\n np.random.seed(NP_RANDOM_SEED)\n X, (scale, zero_point, torch_type) = X\n quant_min = torch.iinfo(torch_type).min\n quant_max = torch.iinfo(torch_type).max\n\n X = to_tensor(X, device)\n Y = _fake_quantize_per_tensor_affine_reference(X.cpu(), scale, zero_point, quant_min, quant_max)\n Y_prime = torch.fake_quantize_per_tensor_affine(\n X, scale, zero_point, quant_min, quant_max)\n np.testing.assert_allclose(Y, Y_prime.cpu(), rtol=tolerance, atol=tolerance)\n\n @given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),\n X=hu.tensor(shapes=hu.array_shapes(1, 5,),\n qparams=hu.qparams(dtypes=torch.quint8)))\n @unittest.skip(\"temporarily disable the test\")\n def test_backward_per_tensor(self, device, X):\n r\"\"\"Tests the backward method.\n \"\"\"\n np.random.seed(NP_RANDOM_SEED)\n X, (scale, zero_point, torch_type) = X\n quant_min = torch.iinfo(torch_type).min\n quant_max = torch.iinfo(torch_type).max\n\n X = to_tensor(X, device)\n X.requires_grad_()\n Y = _fake_quantize_per_tensor_affine_reference(X.cpu(), scale, zero_point, quant_min, quant_max)\n Y_prime = torch.fake_quantize_per_tensor_affine(\n X, scale, zero_point, quant_min, quant_max)\n dout = torch.rand_like(X, dtype=torch.float).to(device)\n dX = _fake_quantize_per_tensor_affine_grad_reference(\n dout, X, scale, zero_point, quant_min, quant_max)\n Y_prime.backward(dout)\n np.testing.assert_allclose(dX.cpu(), X.grad.cpu().detach().numpy(), rtol=tolerance, atol=tolerance)\n\n def test_forward_backward_per_tensor_with_amp(self):\n net = nn.Sequential(nn.Conv2d(1, 1, 3))\n net.qconfig = torch.quantization.get_default_qat_qconfig('fbgemm')\n net_prep = torch.quantization.prepare_qat(net)\n\n with torch.cuda.amp.autocast():\n x = torch.randn(4, 1, 5, 5)\n out = net_prep(x).sum()\n out.backward()\n self.assertTrue(net_prep[0].weight.grad is not None)\n\n def test_forward_per_tensor_half_precision_numerics(self):\n scale = .1\n zero = 0\n maxi = 255\n mini = 0\n\n for i in range(20):\n X1 = torch.randn(5, 5).to(torch.float16)\n Y1 = torch.fake_quantize_per_tensor_affine(X1, scale, zero, mini, maxi)\n Y1r = _fake_quantize_per_tensor_affine_reference(X1, scale, zero, mini, maxi)\n self.assertTrue(torch.allclose(Y1, Y1r, rtol=tolerance, atol=tolerance))\n\n # to force overflow\n X2 = torch.tensor(2**15 + .01).to(torch.float16)\n Y2 = torch.fake_quantize_per_tensor_affine(X2, scale, zero, mini, maxi)\n Y2r = _fake_quantize_per_tensor_affine_reference(X2, scale, zero, mini, maxi)\n self.assertTrue(torch.allclose(Y2, Y2r, rtol=tolerance, atol=tolerance))\n\n scale = 10\n\n # to force underflow\n X3 = torch.tensor(2**-24).to(torch.float16)\n Y3 = torch.fake_quantize_per_tensor_affine(X3, scale, zero, mini, maxi)\n Y3r = _fake_quantize_per_tensor_affine_reference(X3, scale, zero, mini, maxi)\n self.assertTrue(torch.allclose(Y3, Y3r, rtol=tolerance, atol=tolerance))\n\n def _test_forward_per_tensor_cachemask_impl(self, device):\n float_types = (torch.float32, torch.float16, torch.float64)\n torch_types = (torch.qint8, torch.quint8)\n Xs = (torch.randn(4, 8, device=device), torch.randn(4, 16, device=device)[:, ::2])\n tensor_qparam = (True, False)\n for float_type, torch_type, X, tensor_qparams in itertools.product(float_types, torch_types, Xs, tensor_qparam):\n # pick the scale + zp so that some values get clipped\n X = X.to(float_type)\n obs = torch.quantization.MinMaxObserver(torch_type)\n obs.to(device)\n obs(X * 0.75)\n scale, zero_point = obs.calculate_qparams()\n quant_min, quant_max = obs._calculate_qmin_qmax()\n if not tensor_qparam:\n scale, zero_point = float(scale), int(zero_point)\n Y_test = torch.fake_quantize_per_tensor_affine(\n X, scale, zero_point, quant_min, quant_max)\n Y_ref = _fake_quantize_per_tensor_affine_reference(\n X, scale, zero_point, quant_min, quant_max).to(device)\n self.assertTrue(torch.allclose(Y_test, Y_ref, rtol=tolerance, atol=tolerance))\n self.assertTrue(Y_test.dtype == float_type)\n\n def test_forward_per_tensor_cachemask_cpu(self):\n device = torch.device('cpu')\n self._test_forward_per_tensor_cachemask_impl(device)\n\n @unittest.skipIf(not TEST_CUDA, \"No gpu is not available.\")\n def test_forward_per_tensor_cachemask_cuda(self):\n device = torch.device('cuda')\n self._test_forward_per_tensor_cachemask_impl(device)\n\n def _test_backward_per_tensor_cachemask_impl(self, device):\n float_types = (torch.float32, torch.float16, torch.float64)\n torch_types = (torch.qint8, torch.quint8)\n tensor_qparam = (True, False)\n for float_type, torch_type, tensor_qparam in itertools.product(float_types, torch_types, tensor_qparam):\n X = torch.randn(4, 8).to(device).to(float_type)\n X.requires_grad_()\n # pick the scale + zp so that some values get clipped\n obs = torch.quantization.MinMaxObserver(torch_type)\n obs.to(device)\n obs(X * 0.75)\n scale, zero_point = obs.calculate_qparams()\n if not tensor_qparam:\n scale, zero_point = float(scale), int(zero_point)\n quant_min, quant_max = obs._calculate_qmin_qmax()\n\n # forward pass\n Y_test = torch.fake_quantize_per_tensor_affine(\n X, scale, zero_point, quant_min, quant_max)\n Y_ref = _fake_quantize_per_tensor_affine_reference(\n X, scale, zero_point, quant_min, quant_max).to(device)\n self.assertTrue(torch.allclose(Y_test, Y_ref, rtol=tolerance, atol=tolerance))\n\n # backward pass\n dout = torch.rand_like(X, dtype=torch.float).to(device)\n dX = _fake_quantize_per_tensor_affine_grad_reference(\n dout, X, scale, zero_point, quant_min, quant_max)\n Y_test.backward(dout)\n self.assertTrue(torch.allclose(dX, X.grad))\n self.assertTrue(X.grad.dtype == float_type)\n\n def test_backward_per_tensor_cachemask_cpu(self):\n device = torch.device('cpu')\n self._test_backward_per_tensor_cachemask_impl(device)\n\n @unittest.skipIf(not TEST_CUDA, \"No gpu is not available.\")\n def test_backward_per_tensor_cachemask_cuda(self):\n device = torch.device('cuda')\n self._test_backward_per_tensor_cachemask_impl(device)\n\n def _test_learnable_forward_per_tensor(self, X, device, scale_base, zero_point_base):\n X_base = torch.tensor(X).to(device)\n\n for n_bits in (4, 8):\n quant_min, quant_max = 0, 2 ** n_bits - 1\n\n X = X_base.clone().float()\n scale_base = scale_base.to(device).float()\n zero_point_base = zero_point_base.to(dtype=torch.int32, device=device)\n scale = scale_base.clone()\n zero_point = zero_point_base.clamp(quant_min, quant_max)\n\n Y = _fake_quantize_per_tensor_affine_reference(\n X, scale, zero_point, quant_min, quant_max).to(device)\n for grad_factor in [0.1, 1.0, 10.0]:\n Y_prime = torch._fake_quantize_learnable_per_tensor_affine(\n X, scale, zero_point, quant_min, quant_max, grad_factor).to(device)\n self.assertTrue(\n torch.allclose(Y, Y_prime, rtol=tolerance, atol=tolerance),\n \"Expected kernel forward function to have results match the reference forward function\")\n\n @given(X=hu.tensor(shapes=hu.array_shapes(1, 5,),\n elements=hu.floats(-1e3, 1e3, allow_nan=False, allow_infinity=False),\n qparams=hu.qparams(dtypes=torch.quint8)))\n def test_learnable_forward_per_tensor_cpu(self, X):\n X, (_, _, _) = X\n scale_base = torch.normal(mean=0, std=1, size=(1,)).clamp(1e-4, 100)\n zero_point_base = torch.normal(mean=0, std=128, size=(1,))\n self._test_learnable_forward_per_tensor(\n X, 'cpu', scale_base, zero_point_base)\n\n @given(X=hu.tensor(shapes=hu.array_shapes(1, 5,),\n elements=hu.floats(-1e3, 1e3, allow_nan=False, allow_infinity=False),\n qparams=hu.qparams(dtypes=torch.quint8)))\n @unittest.skipIf(not TEST_CUDA, \"No gpu is not available.\")\n def test_learnable_forward_per_tensor_cuda(self, X):\n X, (_, _, _) = X\n scale_base = torch.normal(mean=0, std=1, size=(1,)).clamp(1e-4, 100)\n zero_point_base = torch.normal(mean=0, std=128, size=(1,))\n self._test_learnable_forward_per_tensor(\n X, 'cuda', scale_base, zero_point_base)\n\n def _test_learnable_backward_per_tensor(self, X, device, scale_base, zero_point_base):\n r\"\"\"Tests the backward method with additional backprop support for scale and zero point.\n \"\"\"\n X_base = torch.tensor(X).to(device)\n\n for n_bits in (4, 8):\n quant_min, quant_max = 0, 2 ** n_bits - 1\n\n X = X_base.clone().float().to(device)\n X.requires_grad_()\n scale_base = scale_base.to(device)\n zero_point_base = zero_point_base.to(device)\n scale = scale_base.clone()\n scale.requires_grad_()\n zero_point = zero_point_base.clone().clamp(quant_min, quant_max)\n zero_point.requires_grad_()\n for grad_factor in [0.1, 1.0, 10.0]:\n Y_prime = torch._fake_quantize_learnable_per_tensor_affine(\n X, scale, zero_point, quant_min, quant_max, grad_factor).to(device)\n dout = torch.rand_like(X, dtype=torch.float).to(device)\n dX, dScale, dZeroPoint = _fake_quantize_learnable_per_tensor_affine_grad_reference(\n dout, X, scale, zero_point, quant_min, quant_max, device)\n Y_prime.backward(dout)\n\n expected_dX = dX.to(device).detach()\n actual_dX = X.grad.to(device).detach()\n expected_dScale = dScale.to(device).detach()\n actual_dScale = scale.grad.to(device).detach()\n expected_dZeroPoint = dZeroPoint.to(device).detach()\n actual_dZeroPoint = zero_point.grad.to(device).detach()\n\n self.assertTrue(\n torch.allclose(\n expected_dX, actual_dX, rtol=tolerance, atol=tolerance),\n \"Expected dX to match X.grad\")\n self.assertTrue(\n torch.allclose(\n expected_dScale * grad_factor, actual_dScale, rtol=tolerance, atol=tolerance),\n \"Expected dScale to match scale.grad\")\n self.assertTrue(\n torch.allclose(\n expected_dZeroPoint * grad_factor, actual_dZeroPoint, rtol=tolerance, atol=tolerance),\n \"Expected dZeroPoint to match zero_point.grad\")\n X.grad.data.zero_()\n scale.grad.data.zero_()\n zero_point.grad.data.zero_()\n\n @given(X=hu.tensor(shapes=hu.array_shapes(1, 5,),\n elements=hu.floats(-1e3, 1e3, allow_nan=False, allow_infinity=False),\n qparams=hu.qparams(dtypes=torch.quint8)))\n def test_learnable_backward_per_tensor_cpu(self, X):\n torch.random.manual_seed(NP_RANDOM_SEED)\n X, (_, _, _) = X\n scale_base = torch.normal(mean=0, std=1, size=(1,)).clamp(1e-4, 100)\n zero_point_base = torch.normal(mean=0, std=128, size=(1,))\n self._test_learnable_backward_per_tensor(\n X, 'cpu', scale_base, zero_point_base)\n\n @given(X=hu.tensor(shapes=hu.array_shapes(1, 5,),\n elements=hu.floats(-1e3, 1e3, allow_nan=False, allow_infinity=False),\n qparams=hu.qparams(dtypes=torch.quint8)))\n @unittest.skipIf(not TEST_CUDA, \"No gpu is not available.\")\n def test_learnable_backward_per_tensor_cuda(self, X):\n torch.random.manual_seed(NP_RANDOM_SEED)\n X, (_, _, _) = X\n scale_base = torch.normal(mean=0, std=1, size=(1,)).clamp(1e-4, 100)\n zero_point_base = torch.normal(mean=0, std=128, size=(1,))\n self._test_learnable_backward_per_tensor(\n X, 'cuda', scale_base, zero_point_base)\n\n @given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),\n X=hu.tensor(shapes=hu.array_shapes(1, 5,),\n qparams=hu.qparams(dtypes=[torch.quint8])),\n )\n def test_fq_module_per_tensor(self, device, X):\n np.random.seed(NP_RANDOM_SEED)\n X, (scale, zero_point, torch_type) = X\n quant_min = torch.iinfo(torch_type).min\n quant_max = torch.iinfo(torch_type).max\n\n X = to_tensor(X, device)\n X.requires_grad_()\n fq_module = torch.quantization.default_fake_quant().to(device)\n Y_prime = fq_module(X)\n assert fq_module.scale is not None\n assert fq_module.zero_point is not None\n Y = _fake_quantize_per_tensor_affine_reference(X, fq_module.scale, fq_module.zero_point, quant_min, quant_max)\n np.testing.assert_allclose(Y.cpu().detach().numpy(), Y_prime.cpu().detach().numpy(), rtol=tolerance, atol=tolerance)\n\n # Test backward\n dout = torch.rand_like(X, dtype=torch.float, device=device)\n Y_prime.backward(dout)\n dX = _fake_quantize_per_tensor_affine_grad_reference(dout, X, fq_module.scale, fq_module.zero_point, quant_min, quant_max)\n np.testing.assert_allclose(dX.cpu().numpy(), X.grad.cpu().detach().numpy(), rtol=tolerance, atol=tolerance)\n\n @given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),\n X=hu.tensor(shapes=hu.array_shapes(1, 5,),\n qparams=hu.qparams(dtypes=torch.quint8)))\n def test_fixed_qparams_fq_module(self, device, X):\n X, (scale, zero_point, torch_type) = X\n X = to_tensor(X, device)\n fq_module = default_affine_fixed_qparams_fake_quant()\n fq_module.to(device)\n fixed_scale = fq_module.scale.clone()\n fixed_zero_point = fq_module.zero_point.clone()\n # run fq module and make sure the quantization parameters does not change\n torch.quantization.enable_observer(fq_module)\n fq_module(X)\n self.assertEqual(fixed_scale, fq_module.scale)\n self.assertEqual(fixed_zero_point, fq_module.zero_point)\n\n def test_fq_serializable_per_tensor(self):\n observer = default_observer\n quant_min = 0\n quant_max = 255\n for FakeQuantizeClass in [FakeQuantize, _LearnableFakeQuantize]:\n fq_module = FakeQuantizeClass(observer, quant_min, quant_max)\n X = torch.tensor([-5, -3.5, -2, 0, 3, 5, 7], dtype=torch.float32)\n y_ref = fq_module(X)\n state_dict = fq_module.state_dict()\n self.assertEqual(state_dict['scale'], 0.094488)\n self.assertEqual(state_dict['zero_point'], 53)\n b = io.BytesIO()\n torch.save(state_dict, b)\n b.seek(0)\n loaded_dict = torch.load(b)\n loaded_fq_module = FakeQuantizeClass(observer, quant_min, quant_max)\n loaded_fq_module.load_state_dict(loaded_dict)\n for key in state_dict:\n self.assertEqual(state_dict[key], loaded_fq_module.state_dict()[key])\n\n self.assertEqual(loaded_fq_module.calculate_qparams(), fq_module.calculate_qparams())\n\n def test_fake_quant_control(self):\n for fq_module in [torch.quantization.default_fake_quant(),\n _LearnableFakeQuantize.with_args(observer=MovingAverageMinMaxObserver, quant_min=0,\n quant_max=255,\n dtype=torch.quint8, qscheme=torch.per_tensor_affine,\n reduce_range=True)()]:\n torch.manual_seed(42)\n X = torch.rand(20, 10, dtype=torch.float32)\n # Output of fake quant is not identical to input\n Y = fq_module(X)\n self.assertNotEqual(Y, X)\n if type(fq_module) == _LearnableFakeQuantize:\n fq_module.toggle_fake_quant(False)\n else:\n torch.quantization.disable_fake_quant(fq_module)\n X = torch.rand(20, 10, dtype=torch.float32)\n Y = fq_module(X)\n # Fake quant is disabled,output is identical to input\n self.assertEqual(Y, X)\n\n # Explicit copy at this point in time, because FakeQuant keeps internal\n # state in mutable buffers.\n scale = fq_module.scale.clone().detach()\n zero_point = fq_module.zero_point.clone().detach()\n\n if type(fq_module) == _LearnableFakeQuantize:\n fq_module.toggle_observer_update(False)\n fq_module.toggle_fake_quant(True)\n else:\n torch.quantization.disable_observer(fq_module)\n torch.quantization.enable_fake_quant(fq_module)\n X = 10.0 * torch.rand(20, 10, dtype=torch.float32) - 5.0\n Y = fq_module(X)\n self.assertNotEqual(Y, X)\n # Observer is disabled, scale and zero-point do not change\n self.assertEqual(fq_module.scale, scale)\n self.assertEqual(fq_module.zero_point, zero_point)\n if type(fq_module) == _LearnableFakeQuantize:\n fq_module.toggle_observer_update(True)\n else:\n torch.quantization.enable_observer(fq_module)\n Y = fq_module(X)\n self.assertNotEqual(Y, X)\n # Observer is enabled, scale and zero-point are different\n self.assertNotEqual(fq_module.scale, scale)\n self.assertNotEqual(fq_module.zero_point, zero_point)\n\n def test_fake_quant_preserves_qparam_shapes_for_activations(self):\n class Model(nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n self.linear = nn.Linear(4, 4)\n\n def forward(self, x):\n x = self.linear(x)\n return x\n\n m = Model()\n\n m.qconfig = torch.quantization.get_default_qat_qconfig('fbgemm')\n torch.quantization.prepare_qat(m, inplace=True)\n\n scale_shape_before = m.linear.activation_post_process.scale.shape\n zero_point_shape_before = m.linear.activation_post_process.zero_point.shape\n\n x = torch.rand(4, 4, 4, 4)\n m(x)\n scale_shape_after = m.linear.activation_post_process.scale.shape\n zero_point_shape_after = m.linear.activation_post_process.zero_point.shape\n self.assertEqual(\n scale_shape_before, scale_shape_after,\n msg=\"FakeQuant scale shape must stay consistent\")\n self.assertEqual(\n zero_point_shape_before, zero_point_shape_after,\n msg=\"FakeQuant zero_point shape must stay consistent\")\n\n def fake_quant_scriptable(self):\n observer = default_observer\n quant_min = 0\n quant_max = 255\n for FakeQuantizeClass in [FakeQuantize, _LearnableFakeQuantize]:\n fq_module = FakeQuantizeClass(observer, quant_min, quant_max)\n scripted_module = torch.jit.script(fq_module)\n\n X = torch.tensor([-5, -3.5, -2, 0, 3, 5, 7], dtype=torch.float32)\n\n fq_module(X)\n scripted_module(X)\n self.assertEqual(fq_module.calculate_qparams(), scripted_module.calculate_qparams())\n\n buf = io.BytesIO()\n torch.jit.save(scripted_module, buf)\n buf.seek(0)\n loaded_module = torch.jit.load(buf)\n self.assertEqual(fq_module.calculate_qparams(), loaded_module.calculate_qparams())\n\n\n @given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),\n X=hu.per_channel_tensor(shapes=hu.array_shapes(1, 5,),\n qparams=hu.qparams(dtypes=torch.quint8)))\n def test_forward_per_channel(self, device, X):\n r\"\"\"Tests the forward path of the FakeQuantizePerTensorAffine op.\n \"\"\"\n np.random.seed(NP_RANDOM_SEED)\n X, (scale, zero_point, axis, torch_type) = X\n quant_min = torch.iinfo(torch_type).min\n quant_max = torch.iinfo(torch_type).max\n\n X = to_tensor(X, device)\n scale = to_tensor(scale, device)\n zero_point = torch.tensor(zero_point).to(dtype=torch.int32, device=device)\n Y = _fake_quantize_per_channel_affine_reference(X.cpu(), scale.cpu(), zero_point.cpu(), axis, quant_min, quant_max)\n Y_prime = torch.fake_quantize_per_channel_affine(\n X, scale, zero_point, axis, quant_min, quant_max)\n np.testing.assert_allclose(Y, Y_prime.cpu(), rtol=tolerance, atol=tolerance)\n\n def _test_forward_per_channel_cachemask_impl(self, device):\n torch_types = (torch.qint8, torch.quint8)\n float_types = (torch.float32, torch.float16, torch.float64)\n for torch_type, float_type in itertools.product(torch_types, float_types):\n X = torch.randn(1, 2, 4, 4, dtype=float_type).to(device)\n # pick the scale + zp so that some values get clipped\n axis = 1\n obs = torch.quantization.PerChannelMinMaxObserver(axis, torch_type).to(device)\n obs(X * 0.75)\n scale, zero_point = obs.calculate_qparams()\n # TODO(future PR): fix the wrong dtype in obs.calculate_qparams and remove the cast\n zero_point = zero_point.to(torch.int32)\n quant_min, quant_max = obs._calculate_qmin_qmax()\n\n Y = _fake_quantize_per_channel_affine_reference(\n X.cpu(), scale.cpu(), zero_point.cpu(), axis, quant_min, quant_max)\n Y_prime = torch.fake_quantize_per_channel_affine(\n X, scale, zero_point, axis, quant_min, quant_max)\n np.testing.assert_allclose(Y, Y_prime.cpu(), rtol=tolerance, atol=tolerance)\n self.assertTrue(Y.dtype == float_type)\n\n def test_forward_per_channel_cachemask_cpu(self):\n self._test_forward_per_channel_cachemask_impl('cpu')\n\n @unittest.skipIf(not TEST_CUDA, \"No gpu is not available.\")\n def test_forward_per_channel_cachemask_cuda(self):\n self._test_forward_per_channel_cachemask_impl('cuda')\n\n def test_forward_per_channel_half_precision_numerics(self):\n scale = torch.randn(5).abs()\n zero = torch.randn(5).to(dtype=torch.int)\n axis = 1\n mini = 0\n maxi = 255\n\n for i in range(20):\n X1 = torch.randn(4, 5).to(torch.float16)\n Y1 = torch.fake_quantize_per_channel_affine(X1, scale, zero, axis, mini, maxi)\n Y1r = _fake_quantize_per_channel_affine_reference(X1, scale, zero, axis, mini, maxi)\n self.assertTrue(torch.allclose(Y1, Y1r, rtol=tolerance, atol=tolerance))\n\n # to force overflow\n X2 = torch.randn(4, 5).to(torch.float16)\n X2[0, 0] = 2**15 + .01\n Y2 = torch.fake_quantize_per_channel_affine(X2, scale, zero, axis, mini, maxi)\n Y2r = _fake_quantize_per_channel_affine_reference(X2, scale, zero, axis, mini, maxi)\n self.assertTrue(torch.allclose(Y2, Y2r, rtol=tolerance, atol=tolerance))\n\n scale = torch.zeros(5) + 10\n\n # to force underflow\n X3 = torch.randn(4, 5).to(torch.float16)\n X3[0, 0] = 2**-24\n Y3 = torch.fake_quantize_per_channel_affine(X3, scale, zero, axis, mini, maxi)\n Y3r = _fake_quantize_per_channel_affine_reference(X3, scale, zero, axis, mini, maxi)\n self.assertTrue(torch.allclose(Y3, Y3r, rtol=tolerance, atol=tolerance))\n\n def _test_learnable_forward_per_channel(self, X_base, device, scale_base, zero_point_base, axis):\n r\"\"\"Tests the forward path of the learnable FakeQuantizePerTensorAffine op.\n \"\"\"\n for n_bits in (4, 8):\n quant_min, quant_max = 0, 2 ** (n_bits) - 1\n\n scale_base = scale_base.to(device)\n zero_point_base = zero_point_base.to(device)\n\n X_curr = X_base.clone()\n scale_curr = scale_base.clone()\n zero_point_curr = zero_point_base.clone()\n\n Y = _fake_quantize_per_channel_affine_reference(\n X_curr, scale_curr, zero_point_curr.round().clamp(quant_min, quant_max), axis, quant_min, quant_max).to(device)\n for grad_factor in [0.1, 1.0, 10.0]:\n Y_prime = torch._fake_quantize_learnable_per_channel_affine(\n X_curr, scale_curr, zero_point_curr, axis, quant_min, quant_max, grad_factor).to(device)\n self.assertTrue(\n torch.allclose(Y, Y_prime, rtol=tolerance, atol=tolerance),\n \"Expected kernel forward function to have results match the reference forward function\")\n\n @given(X=hu.per_channel_tensor(shapes=hu.array_shapes(1, 5,),\n qparams=hu.qparams(dtypes=torch.quint8)))\n def test_learnable_forward_per_channel_cpu(self, X):\n torch.random.manual_seed(NP_RANDOM_SEED)\n X, (_, _, axis, _) = X\n X_base = torch.tensor(X).to('cpu')\n channel_size = X_base.size(axis)\n scale_base = torch.normal(mean=0, std=1, size=(channel_size,)).clamp(1e-4, 100)\n zero_point_base = torch.normal(mean=0, std=128, size=(channel_size,))\n self._test_learnable_forward_per_channel(\n X_base, 'cpu', scale_base, zero_point_base, axis)\n\n @given(X=hu.per_channel_tensor(shapes=hu.array_shapes(1, 5,),\n qparams=hu.qparams(dtypes=torch.quint8)))\n @unittest.skipIf(not TEST_CUDA, \"No gpu is not available.\")\n def test_learnable_forward_per_channel_cuda(self, X):\n torch.random.manual_seed(NP_RANDOM_SEED)\n X, (_, _, axis, _) = X\n X_base = torch.tensor(X).to('cuda')\n channel_size = X_base.size(axis)\n scale_base = torch.normal(mean=0, std=1, size=(channel_size,)).clamp(1e-4, 100)\n zero_point_base = torch.normal(mean=0, std=128, size=(channel_size,))\n self._test_learnable_forward_per_channel(\n X_base, 'cuda', scale_base, zero_point_base, axis)\n\n @given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),\n X=hu.per_channel_tensor(shapes=hu.array_shapes(1, 5,),\n qparams=hu.qparams(dtypes=torch.quint8)))\n def test_backward_per_channel(self, device, X):\n r\"\"\"Tests the backward method.\n \"\"\"\n np.random.seed(NP_RANDOM_SEED)\n X, (scale, zero_point, axis, torch_type) = X\n quant_min = torch.iinfo(torch_type).min\n quant_max = torch.iinfo(torch_type).max\n\n X = to_tensor(X, device)\n scale = to_tensor(scale, device)\n zero_point = torch.tensor(zero_point).to(dtype=torch.int32, device=device)\n X.requires_grad_()\n Y_prime = torch.fake_quantize_per_channel_affine(\n X, scale, zero_point, axis, quant_min, quant_max)\n dout = torch.rand_like(X, dtype=torch.float).to(device)\n dX = _fake_quantize_per_channel_affine_grad_reference(\n dout, X, scale, zero_point, axis, quant_min, quant_max)\n Y_prime.backward(dout)\n np.testing.assert_allclose(dX.cpu().detach().numpy(), X.grad.cpu().detach().numpy(), rtol=tolerance, atol=tolerance)\n\n def _test_backward_per_channel_cachemask_impl(self, device):\n torch_types = (torch.qint8, torch.quint8)\n float_types = (torch.float32, torch.float16, torch.float64)\n for torch_type, float_type in itertools.product(torch_types, float_types):\n X = torch.randn(1, 2, 4, 4, dtype=float_type).to(device)\n # pick the scale + zp so that some values get clipped\n axis = 1\n obs = torch.quantization.PerChannelMinMaxObserver(axis, torch_type).to(device)\n obs(X * 0.75)\n scale, zero_point = obs.calculate_qparams()\n # TODO(future PR): fix the wrong dtype in obs.calculate_qparams and remove the cast\n zero_point = zero_point.to(torch.int32)\n quant_min, quant_max = obs._calculate_qmin_qmax()\n X.requires_grad_()\n Y_prime = torch.fake_quantize_per_channel_affine(\n X, scale, zero_point, axis, quant_min, quant_max)\n dout = torch.rand_like(X, dtype=float_type).to(device)\n dX = _fake_quantize_per_channel_affine_grad_reference(\n dout, X, scale, zero_point, axis, quant_min, quant_max)\n Y_prime.backward(dout)\n np.testing.assert_allclose(\n dX.cpu().detach().numpy(), X.grad.cpu().detach().numpy(), rtol=tolerance, atol=tolerance)\n assert(X.grad.dtype == float_type)\n\n\n def test_backward_per_channel_cachemask_cpu(self):\n self._test_backward_per_channel_cachemask_impl('cpu')\n\n @unittest.skipIf(not TEST_CUDA, \"No gpu is not available.\")\n def test_backward_per_channel_cachemask_cuda(self):\n self._test_backward_per_channel_cachemask_impl('cuda')\n\n def _test_learnable_backward_per_channel(self, X_base, device, scale_base, zero_point_base, axis):\n r\"\"\"Tests the backward path of the learnable FakeQuantizePerTensorAffine op.\n \"\"\"\n for n_bits in (4, 8):\n quant_min, quant_max = 0, 2 ** n_bits - 1\n\n scale_base = scale_base.to(device)\n zero_point_base = zero_point_base.to(device=device)\n\n X_curr = X_base.clone()\n X_curr.requires_grad_()\n scale_curr = scale_base.clone()\n scale_curr.requires_grad_()\n zero_point_curr = zero_point_base.clone()\n zero_point_curr.requires_grad_()\n\n for grad_factor in [0.1, 1.0, 10.0]:\n Y_prime = torch._fake_quantize_learnable_per_channel_affine(\n X_curr, scale_curr, zero_point_curr, axis, quant_min, quant_max, grad_factor).to(device)\n\n dout = torch.rand(X_curr.shape, dtype=torch.float).to(device)\n dX, dScale, dZeroPoint = _fake_quantize_learnable_per_channel_affine_grad_reference(\n dout, X_curr, scale_curr, zero_point_curr, axis, quant_min, quant_max, device)\n Y_prime.backward(dout)\n\n dX_expected = dX.to(device).detach()\n dX_actual = X_curr.to(device).grad.detach()\n dScale_expected = dScale.to(device).detach()\n dScale_actual = scale_curr.to(device).grad.detach()\n dZeroPoint_expected = dZeroPoint.to(device).detach()\n dZeroPoint_actual = zero_point_curr.to(device).grad.detach()\n tolerance = 1e-4\n\n self.assertTrue(\n torch.allclose(dX_expected, dX_actual, rtol=tolerance, atol=tolerance),\n \"Expected dX={} to match X.grad={}, X={}, s={}, z={}, dout={}, n_bits={}\".format(\n dX_expected, dX_actual, X_curr, scale_curr, zero_point_curr, dout, n_bits))\n self.assertTrue(\n torch.allclose(dScale_expected * grad_factor, dScale_actual, rtol=tolerance, atol=tolerance),\n \"Expected dScale={} to match scale.grad={}, X={}, s={}, z={}, dout={}, n_bits={}\".format(\n dScale_expected * grad_factor, dScale_actual,\n X_curr, scale_curr, zero_point_curr, dout, n_bits))\n self.assertTrue(\n torch.allclose(dZeroPoint_expected * grad_factor, dZeroPoint_actual, rtol=tolerance, atol=tolerance),\n \"Expected dZeroPoint={} to match zero_point.grad={}, X={}, s={}, z={}, dout={}, n_bits={}\".format(\n dZeroPoint_expected * grad_factor, dZeroPoint_actual,\n X_curr, scale_curr, zero_point_curr, dout, n_bits))\n X_curr.grad.data.zero_()\n scale_curr.grad.data.zero_()\n zero_point_curr.grad.data.zero_()\n\n @given(X=hu.per_channel_tensor(shapes=hu.array_shapes(2, 5,),\n qparams=hu.qparams(dtypes=torch.quint8)))\n def test_learnable_backward_per_channel_cpu(self, X):\n torch.random.manual_seed(NP_RANDOM_SEED)\n X, (_, _, axis, _) = X\n X_base = torch.tensor(X).to('cpu')\n channel_size = X_base.size(axis)\n scale_base = torch.normal(mean=0, std=1, size=(channel_size,)).clamp(1e-4, 100)\n zero_point_base = torch.normal(mean=0, std=128, size=(channel_size,))\n self._test_learnable_backward_per_channel(\n X_base, 'cpu', scale_base, zero_point_base, axis)\n\n @given(X=hu.per_channel_tensor(shapes=hu.array_shapes(2, 5,),\n qparams=hu.qparams(dtypes=torch.quint8)))\n @unittest.skipIf(not TEST_CUDA, \"No gpu is not available.\")\n def test_learnable_backward_per_channel_cuda(self, X):\n torch.random.manual_seed(NP_RANDOM_SEED)\n X, (scale, zero_point, axis, torch_type) = X\n X_base = torch.tensor(X).to('cuda')\n scale_base = to_tensor(scale, 'cuda')\n zero_point_base = to_tensor(zero_point, 'cuda')\n self._test_learnable_backward_per_channel(\n X_base, 'cuda', scale_base, zero_point_base, axis)\n\n def test_numerical_consistency_per_tensor(self):\n self._test_numerical_consistency('per_tensor')\n\n def test_numerical_consistency_per_channel(self):\n self._test_numerical_consistency('per_channel')\n\n def _test_numerical_consistency(self, test_type):\n r\"\"\"Comparing numerical consistency between quantize/dequantize op and the fake quantize op across devices and dtypes\n \"\"\"\n torch.random.manual_seed(NP_RANDOM_SEED)\n torch_types = [torch.qint8, torch.quint8]\n float_types = [torch.float, torch.float16, torch.float64]\n zero_types = [torch.int]\n devices = [torch.device('cpu'), torch.device('cuda')] if torch.cuda.is_available() else [torch.device('cpu')]\n axis = 1\n for i in range(20):\n for torch_type, float_type, device, zero_type in itertools.product(torch_types, float_types, devices, zero_types):\n X = torch.randn(3, 3, device=device).to(float_type)\n scales = (10 * torch.randn(3, device=device)).abs()\n scale = scales.mean().to(float).item()\n zeros = (10 * torch.randn(3, device=device)).abs().to(dtype=zero_type)\n zero = zeros.max().view(1).item()\n quant_min = torch.iinfo(torch_type).min\n quant_max = torch.iinfo(torch_type).max\n\n test_was_run = False\n if test_type == \"per_tensor\":\n test_was_run = True\n Y = torch.dequantize(torch.quantize_per_tensor(X.to('cpu').to(torch.float),\n scale, zero, torch_type)).to(device).to(float_type)\n Y_prime = torch.fake_quantize_per_tensor_affine(X, scale, zero, quant_min, quant_max)\n self.assertEqual(\n Y, Y_prime, \"Difference found between dequant+quant_per_tensor and fake_quantize_per_tensor\")\n\n if test_type == \"per_channel\":\n test_was_run = True\n Y = torch.dequantize(torch.quantize_per_channel(X.to('cpu').to(torch.float), scales.to(\n 'cpu'), zeros.to('cpu'), axis, torch_type)).to(device).to(float_type)\n Y_prime = torch.fake_quantize_per_channel_affine(X, scales, zeros, axis, quant_min, quant_max)\n self.assertEqual(\n Y, Y_prime, \"Difference found between dequant+quant_per_channel and fake_quantize_per_channel\")\n self.assertTrue(test_was_run)\n\nif __name__ == '__main__':\n raise RuntimeError(\"This test file is not meant to be run directly, use:\\n\\n\"\n \"\\tpython test/test_quantization.py TESTNAME\\n\\n\"\n \"instead.\")\n"
] |
[
[
"torch.nn.Linear",
"torch.round",
"torch.cuda.amp.autocast",
"torch.testing._internal.common_quantized.to_tensor",
"torch.ones",
"torch.testing._internal.hypothesis_utils.array_shapes",
"torch._fake_quantize_learnable_per_tensor_affine",
"torch.cuda.is_available",
"torch.load",
"torch.allclose",
"torch.quantization.disable_fake_quant",
"torch.quantization.enable_fake_quant",
"torch.testing._internal.hypothesis_utils.assert_deadline_disabled",
"torch.quantization.enable_observer",
"torch.quantization.PerChannelMinMaxObserver",
"torch.iinfo",
"torch.unbind",
"torch.normal",
"torch.fake_quantize_per_tensor_affine",
"torch.random.manual_seed",
"torch.manual_seed",
"torch.jit.load",
"torch.jit.save",
"torch.tensor",
"torch._fake_quantize_learnable_per_channel_affine",
"torch.zeros_like",
"torch.jit.script",
"torch.testing._internal.common_quantized._fake_quantize_per_channel_affine_grad_reference",
"torch.zeros",
"torch.device",
"torch.quantization.default_affine_fixed_qparams_fake_quant",
"torch.testing._internal.hypothesis_utils.floats",
"torch.save",
"torch.nn.Conv2d",
"torch.testing._internal.hypothesis_utils.qparams",
"torch.testing._internal.common_quantized._fake_quantize_per_channel_affine_reference",
"torch.quantization.prepare_qat",
"torch.fake_quantize_per_channel_affine",
"torch.quantization.MinMaxObserver",
"torch.rand",
"torch.quantization.default_fake_quant",
"torch.rand_like",
"numpy.random.seed",
"torch.quantization.get_default_qat_qconfig",
"torch.quantization.disable_observer",
"torch.quantization._learnable_fake_quantize._LearnableFakeQuantize.with_args",
"torch.randn"
]
] |
metabolize/entente
|
[
"c1b16bb7c7fb83b31db4e8ddaf65f1504374fe7a"
] |
[
"entente/test_restore_correspondence.py"
] |
[
"from entente.restore_correspondence import find_correspondence, restore_correspondence\nimport numpy as np\nimport pytest\nfrom .restore_correspondence import _maybe_tqdm\n\n\ndef create_truncated_test_mesh():\n from .testing import vitra_mesh\n\n # For performance.\n return vitra_mesh().picking_vertices(np.arange(1000))\n\n\ndef test_helper():\n assert [x for x in _maybe_tqdm(iter([1, 2, 3]), progress=True)] == [1, 2, 3]\n assert [x for x in _maybe_tqdm(iter([1, 2, 3]), progress=False)] == [1, 2, 3]\n\n\ndef test_find_correspondence_matched():\n b = create_truncated_test_mesh().v\n expected_correspondence = np.random.permutation(len(b))\n a = b[expected_correspondence]\n\n correspondence = find_correspondence(a, b, progress=False)\n\n np.testing.assert_array_equal(correspondence, expected_correspondence)\n np.testing.assert_array_equal(b[correspondence], a)\n\n\ndef test_find_correspondence_unmatched():\n b = create_truncated_test_mesh().v\n expected_correspondence = np.random.permutation(len(b))\n a = b[expected_correspondence]\n\n a = np.vstack([a, np.array([1.0, 2.0, 3.0])])\n\n with pytest.raises(ValueError):\n find_correspondence(a, b, progress=False)\n\n expected_correspondence = np.append(1 + expected_correspondence, np.array([-1]))\n b = np.vstack([np.array([3.0, 2.0, 1.0]), b])\n expected_unmatched_b = np.array([0])\n\n with pytest.raises(ValueError):\n find_correspondence(a, b, progress=False)\n\n correspondence, unmatched_b = find_correspondence(\n a, b, all_must_match=False, ret_unmatched_b=True, progress=False\n )\n\n np.testing.assert_array_equal(correspondence, expected_correspondence)\n np.testing.assert_array_equal(unmatched_b, expected_unmatched_b)\n reconstructed_a = np.vstack(\n [b[correspondence[np.where(correspondence != -1)]], np.array([1.0, 2.0, 3.0])]\n )\n np.testing.assert_array_equal(reconstructed_a, a)\n\n\ndef test_restore_correspondence():\n from .shuffle import shuffle_vertices\n\n test_mesh = create_truncated_test_mesh()\n shuffled, ordering = shuffle_vertices(test_mesh, ret_new_ordering=True)\n\n restored, v_old_to_new = restore_correspondence(shuffled, test_mesh, progress=False)\n\n np.testing.assert_array_equal(restored.v, test_mesh.v)\n np.testing.assert_array_equal(restored.f, test_mesh.f)\n np.testing.assert_array_equal(v_old_to_new, ordering)\n"
] |
[
[
"numpy.array",
"numpy.testing.assert_array_equal",
"numpy.arange",
"numpy.where"
]
] |
VCG/gp
|
[
"a41d0c52fd09b5e34804b9c6082778a75dfc03c1"
] |
[
"raveler/ray/ray/features/moments.py"
] |
[
"import numpy as np\nfrom scipy.misc import comb as nchoosek\nfrom . import base\n\nclass Manager(base.Null):\n def __init__(self, nmoments=4, use_diff_features=True, oriented=False, \n normalize=False, *args, **kwargs):\n super(Manager, self).__init__()\n self.nmoments = nmoments\n self.use_diff_features = use_diff_features\n self.oriented = oriented\n self.normalize = normalize\n\n @classmethod\n def load_dict(cls, fm_info):\n obj = cls(fm_info['nmoments'], fm_info['use_diff'],\n fm_info['oriented'], fm_info['normalize'])\n return obj\n\n def write_fm(self, json_fm={}):\n if 'feature_list' not in json_fm:\n json_fm['feature_list'] = []\n json_fm['feature_list'].append('moments')\n json_fm['moments'] = {\n 'nmoments' : self.nmoments,\n 'use_diff' : self.use_diff_features,\n 'oriented' : self.oriented,\n 'normalize' : self.normalize\n }\n return json_fm\n\n def compute_moment_sums(self, ar, idxs):\n values = ar[idxs][...,np.newaxis]\n return (values ** np.arange(self.nmoments+1)).sum(axis=0).T\n\n def create_node_cache(self, g, n):\n node_idxs = list(g.node[n]['extent'])\n if self.oriented:\n ar = g.max_probabilities_r\n else:\n ar = g.non_oriented_probabilities_r\n return self.compute_moment_sums(ar, node_idxs)\n\n def create_edge_cache(self, g, n1, n2):\n edge_idxs = list(g[n1][n2]['boundary'])\n if self.oriented:\n ar = g.oriented_probabilities_r\n else:\n ar = g.non_oriented_probabilities_r\n return self.compute_moment_sums(ar, edge_idxs)\n\n def update_node_cache(self, g, n1, n2, dst, src):\n dst += src\n\n def update_edge_cache(self, g, e1, e2, dst, src):\n dst += src\n\n def pixelwise_update_node_cache(self, g, n, dst, idxs, remove=False):\n if len(idxs) == 0: return\n a = -1.0 if remove else 1.0\n if self.oriented:\n ar = g.max_probabilities_r\n else:\n ar = g.non_oriented_probabilities_r\n dst += a * self.compute_moment_sums(ar, idxs)\n\n def pixelwise_update_edge_cache(self, g, n1, n2, dst, idxs, remove=False):\n if len(idxs) == 0: return\n a = -1.0 if remove else 1.0\n if self.oriented:\n ar = g.max_probabilities_r\n else:\n ar = g.non_oriented_probabilities_r\n dst += a * self.compute_moment_sums(ar, idxs)\n\n def compute_node_features(self, g, n, cache=None):\n if cache is None: \n cache = g.node[n][self.default_cache]\n feat = central_moments_from_noncentral_sums(cache)\n if self.normalize:\n feat = ith_root(feat)\n n = feat.ravel()[0]\n return np.concatenate(([n], feat[1:].T.ravel()))\n\n def compute_edge_features(self, g, n1, n2, cache=None):\n if cache is None: \n cache = g[n1][n2][self.default_cache]\n feat = central_moments_from_noncentral_sums(cache)\n if self.normalize:\n feat = ith_root(feat)\n n = feat.ravel()[0]\n return np.concatenate(([n], feat[1:].T.ravel()))\n\n def compute_difference_features(self,g, n1, n2, cache1=None, cache2=None,\n nthroot=False):\n if not self.use_diff_features:\n return np.array([])\n if cache1 is None:\n cache1 = g.node[n1][self.default_cache]\n m1 = central_moments_from_noncentral_sums(cache1)\n\n if cache2 is None:\n cache2 = g.node[n2][self.default_cache]\n m2 = central_moments_from_noncentral_sums(cache2)\n \n if nthroot or self.normalize:\n m1, m2 = map(ith_root, [m1, m2])\n feat = abs(m1-m2)\n n = feat.ravel()[0]\n return np.concatenate(([n], feat[1:].T.ravel()))\n\ndef central_moments_from_noncentral_sums(a):\n \"\"\"Compute moments about the mean from sums of x**i, for i=0, ..., len(a).\n\n The first two moments about the mean (1 and 0) would always be \n uninteresting so the function returns n (the sample size) and mu (the \n sample mean) in their place.\n \"\"\"\n a = a.astype(np.double)\n if len(a) == 1:\n return a\n N = a.copy()[0]\n a /= N\n mu = a.copy()[1]\n ac = np.zeros_like(a)\n for n in range(2,len(a)):\n js = np.arange(n+1)\n if a.ndim > 1: js = js[:,np.newaxis]\n # Formula found in Wikipedia page for \"Central moment\", 2011-07-31\n ac[n] = (nchoosek(n,js) * \n (-1)**(n-js) * a[js.ravel()] * mu**(n-js)).sum(axis=0)\n ac[0] = N\n ac[1] = mu\n return ac\n\ndef ith_root(ar):\n \"\"\"Get the ith root of the array values at ar[i] for i > 1.\"\"\"\n if len(ar) < 2:\n return ar\n ar = ar.copy()\n ar[2:] = np.sign(ar[2:]) * \\\n (abs(ar[2:]) ** (1.0/np.arange(2, len(ar)))[:, np.newaxis])\n return ar\n\n"
] |
[
[
"numpy.zeros_like",
"numpy.array",
"numpy.sign",
"scipy.misc.comb",
"numpy.arange"
]
] |
AndrewQuinn2020/EECS-332-MPs
|
[
"ee164e98bd6b1b05296e4abec69a8b5d5de2581b"
] |
[
"MP3/mp3_testgen.py"
] |
[
"#!/usr/bin/python3\n\n# Anything not directly related to processing here\nimport sys\nfrom math import floor\nfrom pathlib import Path\nfrom random import randint\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom mp3_helper import *\nfrom PIL import Image\n\nnp.set_printoptions(threshold=sys.maxsize)\nnp.set_printoptions(linewidth=1000)\n\n\nimage_dimensions = (5, 5)\n\n\nif __name__ == \"__main__\":\n hello()\n print(\"\\n\\nThis is the test image generator for MP #3.\")\n print(\"We are going to generate a bunch of small bitmaps with colors\")\n print(\"differing by small values.\")\n print('\\n\\nThe name \"test_image_4_xxx.bmp\" indicates 4 different')\n print(\"shades you probably can't see; once you histogram EQ it they should\")\n print(\"fill all or almost all of the whole spectrum. For example,\")\n print(\"a properly histogram EQ'd test_image_2_000.bmp should be pure\")\n print(\"black and pure white.\")\n\n for x in range(2, 5):\n for i in range(0, 5):\n new_bmp = np.random.choice(\n a=list(range(0, x)), size=image_dimensions\n ).astype(np.uint8)\n new_bmp = new_bmp + randint(0, 256 - x)\n print(new_bmp)\n im = Image.fromarray(new_bmp, \"L\")\n\n file_index = str(i).zfill(3)\n im.save(\n os.path.join(\n test_images_dir, \"test_image_{}_{}.bmp\".format(x, file_index)\n )\n )\n"
] |
[
[
"numpy.set_printoptions"
]
] |
stanford-futuredata/Willump-Simple
|
[
"56d52074b671e07a364744e8195fcdc91926c3a8"
] |
[
"tests/benchmark_scripts/product_eval.py"
] |
[
"import argparse\nimport pickle\nimport time\n\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\nfrom product_utils import *\nfrom willump.evaluation.willump_executor import willump_execute\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-c\", \"--cascades\", action=\"store_true\", help=\"Cascade threshold\")\nargs = parser.parse_args()\nif args.cascades:\n cascades_dict = pickle.load(open(base_directory + \"lazada_training_cascades.pk\", \"rb\"))\nelse:\n cascades_dict = None\n\n\n@willump_execute(predict_function=product_predict,\n confidence_function=product_confidence,\n predict_cascades_params=cascades_dict)\ndef product_eval_pipeline(input_x, model, title_vect, color_vect, brand_vect):\n title_result = transform_data(input_x, title_vect)\n color_result = transform_data(input_x, color_vect)\n brand_result = transform_data(input_x, brand_vect)\n return product_predict(model, [title_result, color_result, brand_result])\n\n\nif __name__ == '__main__':\n df = pd.read_csv(base_directory + \"lazada_data_train.csv\", header=None,\n names=['country', 'sku_id', 'title', 'category_lvl_1', 'category_lvl_2', 'category_lvl_3',\n 'short_description', 'price', 'product_type'])\n y = np.loadtxt(base_directory + \"conciseness_train.labels\", dtype=int)\n _, test_df, _, test_y = train_test_split(df, y, test_size=0.2, random_state=42)\n title_vectorizer, color_vectorizer, brand_vectorizer = pickle.load(\n open(base_directory + \"lazada_vectorizers.pk\", \"rb\"))\n model = pickle.load(open(base_directory + \"lazada_model.pk\", \"rb\"))\n\n product_eval_pipeline(test_df, model, title_vectorizer, color_vectorizer, brand_vectorizer)\n product_eval_pipeline(test_df, model, title_vectorizer, color_vectorizer, brand_vectorizer)\n\n start_time = time.time()\n preds = product_eval_pipeline(test_df, model, title_vectorizer, color_vectorizer, brand_vectorizer)\n time_elapsed = time.time() - start_time\n\n print(\"Elapsed Time %fs Num Rows %d Throughput %f rows/sec\" %\n (time_elapsed, len(test_df), len(test_df) / time_elapsed))\n\n print(\"1 - RMSE Score: %f\" % product_score(preds, test_y))\n"
] |
[
[
"sklearn.model_selection.train_test_split",
"pandas.read_csv"
]
] |
Sayar1106/OTTPlatformRecommender
|
[
"85b72dfe9f810e3b6e12f8c7702ef94db3a03190"
] |
[
"sample_model/inference.py"
] |
[
"import joblib\nimport pickle\nimport os\nimport config\nimport pandas as pd\nimport click\n\n\ndef load_model_helper(file_path):\n if os.path.split(\".\")[-1] == \"pickle\":\n return pickle.load(open(file_path, 'wb'))\n \n return joblib.load(file_path)\n\ndef fetch_artist_columns(df, artist_list):\n return [artist for artist in df[\"artists\"].to_list() for a in artist_list if a in artist]\n\n\nclass SpotifyRecommender:\n def __init__(self, model):\n self.model = model\n \n def _predict(self, arr, k=20):\n return self.model.kneighbors(arr, \n n_neighbors=k, \n return_distance=False)\n \n def create_playlist(self, arr):\n predictions = self._predict(arr)\n lookup_table = pd.read_csv(config.LOOKUP_TABLE)\n artist_list = lookup_table.iloc[predictions[0][1:], 1].to_list()\n master_table = pd.read_csv(config.MASTER_TABLE, usecols=[\"artists\", \"name\", \"popularity\"])\n\n songs = master_table[master_table[\"artists\"].isin(fetch_artist_columns(master_table, artist_list))]\n songs = songs.drop_duplicates(subset=[\"name\"], keep=\"first\")\n\n return [*songs[[\"artists\", \"name\"]].sample(n=30).itertuples(name=\"Songs\", index=False)]\n\n\n@click.command()\n@click.option(\"--artist_name\", type=str, help=\"Enter the artist name.\")\ndef main(artist_name):\n model = load_model_helper(config.MODEL_OUTPUT)\n spotify_recommender = SpotifyRecommender(model)\n df = pd.read_csv(config.MODEL_INPUT, usecols=[\"artists\", \"acousticness\", \"danceability\", \n \"energy\", \"instrumentalness\", \n \"liveness\", \"loudness\", \"speechiness\", \n \"tempo\", \"valence\", \"popularity\"])\n arr = df[df[\"artists\"].isin([artist_name])].values[:,1:]\n \n playlist = spotify_recommender.create_playlist(arr)\n print(playlist)\n \n\nif __name__ == \"__main__\":\n main()\n\n \n"
] |
[
[
"pandas.read_csv"
]
] |
zhengcj1/ChID-Dataset
|
[
"f7d9b7b75cccd50455987a623c898b490e8450f6"
] |
[
"Competition/RNN-based Baseline/Models/SAR.py"
] |
[
"import tensorflow as tf\nfrom Models.BasicModel import BasicModel\n\nclass Model(BasicModel):\n def __init__(self,\n learning_rate,\n init_word_embed,\n init_idiom_embed,\n size_embed=200,\n num_units=100, # make sure that num_units = size_embed / 2\n max_gradient_norm=5.0):\n\n assert size_embed == 2 * num_units\n\n super(Model, self).__init__()\n super(Model, self)._create_embedding(init_word_embed, init_idiom_embed)\n\n doc_embedding = tf.cond(self.is_train,\n lambda: tf.nn.dropout(tf.nn.embedding_lookup(self.word_embed_matrix, self.document), 0.5),\n lambda: tf.nn.embedding_lookup(self.word_embed_matrix, self.document))\n # [batch, length, size_embed]\n can_embedding = tf.nn.embedding_lookup(self.idiom_embed_matrix, self.candidates) # [batch, 10, size_embed]\n\n with tf.variable_scope(\"doc\"):\n cell_fw_doc = tf.nn.rnn_cell.LSTMCell(num_units, initializer=tf.orthogonal_initializer())\n cell_bw_doc = tf.nn.rnn_cell.LSTMCell(num_units, initializer=tf.orthogonal_initializer())\n h_doc, _ = tf.nn.bidirectional_dynamic_rnn(cell_fw_doc, cell_bw_doc, doc_embedding, self.doc_length,\n dtype=tf.float32, scope=\"bi_lstm\")\n state_doc = tf.concat(h_doc, 2) # [batch, length, 2 * num_units]\n\n blanks_states = tf.matmul(self.locations, state_doc) # query, [batch, labels, 2 * num_units]\n bilinear_attention = tf.get_variable(\"bilinear_attention\", [2 * num_units, 2 * num_units], tf.float32)\n attention_matrix = tf.matmul(tf.einsum(\"abc,cd->abd\", blanks_states, bilinear_attention), # [batch, labels, 2 * num_units]\n tf.transpose(state_doc, [0, 2, 1])) # [batch, 2 * num_units, length]\n tmp = tf.exp(attention_matrix) * tf.tile(tf.expand_dims(self.mask, axis=1), [1, tf.shape(blanks_states)[1], 1])\n attention = tf.div(tmp, tf.reduce_sum(tmp, axis=-1, keep_dims=True))\n #attention = tf.nn.softmax(attention_matrix) # [batch, labels, length]\n state_attention = tf.matmul(attention, state_doc) # [batch, labels, 2 * num_units]\n\n match_matrix = tf.matmul(state_attention, tf.transpose(can_embedding, [0, 2, 1])) # [batch, labels, 10]\n self.logits = tf.nn.softmax(match_matrix)\n\n super(Model, self)._create_loss()\n super(Model, self)._create_train_step(learning_rate, max_gradient_norm)"
] |
[
[
"tensorflow.exp",
"tensorflow.shape",
"tensorflow.concat",
"tensorflow.nn.bidirectional_dynamic_rnn",
"tensorflow.expand_dims",
"tensorflow.matmul",
"tensorflow.orthogonal_initializer",
"tensorflow.transpose",
"tensorflow.variable_scope",
"tensorflow.get_variable",
"tensorflow.nn.embedding_lookup",
"tensorflow.reduce_sum",
"tensorflow.nn.softmax",
"tensorflow.einsum"
]
] |
atulvpweb/Screeni-py
|
[
"2a0b995ce134fb55977fa2ab38274a72392921fc"
] |
[
"src/classes/Screener.py"
] |
[
"'''\n * Project : Screenipy\n * Author : Pranjal Joshi\n * Created : 28/04/2021\n * Description : Class for analyzing and validating stocks\n'''\n\nimport sys\nimport math\nimport numpy as np\nimport pandas as pd\nimport talib\nimport classes.ConfigManager as ConfigManager\nfrom scipy.signal import argrelextrema\nfrom classes.ColorText import colorText\nfrom classes.SuppressOutput import SuppressOutput\n\n# Exception for newly listed stocks with candle nos < daysToLookback\nclass StockDataNotAdequate(Exception):\n pass\n\n# This Class contains methods for stock analysis and screening validation\nclass tools:\n\n # Private method to find candle type\n # True = Bullish, False = Bearish\n def getCandleType(dailyData):\n if dailyData['Close'][0] >= dailyData['Open'][0]:\n return True\n else:\n return False\n\n # Preprocess the acquired data\n def preprocessData(data, daysToLookback=ConfigManager.daysToLookback):\n if ConfigManager.useEMA:\n sma = talib.EMA(data['Close'],timeperiod=50)\n lma = talib.EMA(data['Close'],timeperiod=200)\n data.insert(6,'SMA',sma)\n data.insert(7,'LMA',lma)\n else:\n sma = data.rolling(window=50).mean()\n lma = data.rolling(window=200).mean()\n data.insert(6,'SMA',sma['Close'])\n data.insert(7,'LMA',lma['Close'])\n vol = data.rolling(window=20).mean()\n rsi = talib.RSI(data['Close'], timeperiod=14)\n data.insert(8,'VolMA',vol['Volume'])\n data.insert(9,'RSI',rsi)\n data = data[::-1] # Reverse the dataframe\n # data = data.fillna(0)\n # data = data.replace([np.inf, -np.inf], 0)\n fullData = data\n trimmedData = data.head(daysToLookback)\n return (fullData, trimmedData)\n\n # Validate LTP within limits\n def validateLTP(data, dict, saveDict, minLTP=ConfigManager.minLTP, maxLTP=ConfigManager.maxLTP):\n data = data.fillna(0)\n data = data.replace([np.inf, -np.inf], 0)\n recent = data.head(1)\n ltp = round(recent['Close'][0],2)\n saveDict['LTP'] = str(ltp)\n verifyStageTwo = True\n if(ConfigManager.stageTwo):\n yearlyLow = data.head(300).min()['Close']\n yearlyHigh = data.head(300).max()['Close']\n if ltp < (2 * yearlyLow) or ltp < (0.75 * yearlyHigh):\n verifyStageTwo = False\n if(ltp >= minLTP and ltp <= maxLTP and verifyStageTwo):\n dict['LTP'] = colorText.GREEN + (\"%.2f\" % ltp) + colorText.END\n return True\n else:\n dict['LTP'] = colorText.FAIL + (\"%.2f\" % ltp) + colorText.END\n return False\n\n # Validate if share prices are consolidating\n def validateConsolidation(data, dict, saveDict, percentage=10):\n data = data.fillna(0)\n data = data.replace([np.inf, -np.inf], 0)\n hc = data.describe()['Close']['max']\n lc = data.describe()['Close']['min']\n if ((hc - lc) <= (hc*percentage/100) and (hc - lc != 0)):\n dict['Consolidating'] = colorText.BOLD + colorText.GREEN + \"Range = \" + str(round((abs((hc-lc)/hc)*100),2))+\"%\" + colorText.END\n else:\n dict['Consolidating'] = colorText.BOLD + colorText.FAIL + \"Range = \" + str(round((abs((hc-lc)/hc)*100),2)) + \"%\" + colorText.END\n saveDict['Consolidating'] = str(round((abs((hc-lc)/hc)*100),2))+\"%\"\n return round((abs((hc-lc)/hc)*100),2)\n\n # Validate Moving averages and look for buy/sell signals\n def validateMovingAverages(data, dict, saveDict, range=2.5):\n data = data.fillna(0)\n data = data.replace([np.inf, -np.inf], 0)\n recent = data.head(1)\n if(recent['SMA'][0] > recent['LMA'][0] and recent['Close'][0] > recent['SMA'][0]):\n dict['MA-Signal'] = colorText.BOLD + colorText.GREEN + 'Bullish' + colorText.END\n saveDict['MA-Signal'] = 'Bullish'\n elif(recent['SMA'][0] < recent['LMA'][0]):\n dict['MA-Signal'] = colorText.BOLD + colorText.FAIL + 'Bearish' + colorText.END\n saveDict['MA-Signal'] = 'Bearish'\n else:\n dict['MA-Signal'] = colorText.BOLD + colorText.WARN + 'Neutral' + colorText.END\n saveDict['MA-Signal'] = 'Neutral'\n\n smaDev = data['SMA'][0] * range / 100\n lmaDev = data['LMA'][0] * range / 100\n open, high, low, close, sma, lma = data['Open'][0], data['High'][0], data['Low'][0], data['Close'][0], data['SMA'][0], data['LMA'][0]\n maReversal = 0\n # Taking Support 50\n if close > sma and low <= (sma + smaDev):\n dict['MA-Signal'] = colorText.BOLD + colorText.GREEN + '50MA-Support' + colorText.END\n saveDict['MA-Signal'] = '50MA-Support'\n maReversal = 1\n # Validating Resistance 50\n elif close < sma and high >= (sma - smaDev):\n dict['MA-Signal'] = colorText.BOLD + colorText.FAIL + '50MA-Resist' + colorText.END\n saveDict['MA-Signal'] = '50MA-Resist'\n maReversal = -1\n # Taking Support 200\n elif close > lma and low <= (lma + lmaDev):\n dict['MA-Signal'] = colorText.BOLD + colorText.GREEN + '200MA-Support' + colorText.END\n saveDict['MA-Signal'] = '200MA-Support'\n maReversal = 1\n # Validating Resistance 200\n elif close < lma and high >= (lma - lmaDev):\n dict['MA-Signal'] = colorText.BOLD + colorText.FAIL + '200MA-Resist' + colorText.END\n saveDict['MA-Signal'] = '200MA-Resist'\n maReversal = -1\n # For a Bullish Candle\n if tools.getCandleType(data):\n # Crossing up 50\n if open < sma and close > sma:\n dict['MA-Signal'] = colorText.BOLD + colorText.GREEN + 'BullCross-50MA' + colorText.END\n saveDict['MA-Signal'] = 'BullCross-50MA'\n maReversal = 1 \n # Crossing up 200\n elif open < lma and close > lma:\n dict['MA-Signal'] = colorText.BOLD + colorText.GREEN + 'BullCross-200MA' + colorText.END\n saveDict['MA-Signal'] = 'BullCross-200MA'\n maReversal = 1\n # For a Bearish Candle\n elif not tools.getCandleType(data):\n # Crossing down 50\n if open > sma and close < sma:\n dict['MA-Signal'] = colorText.BOLD + colorText.FAIL + 'BearCross-50MA' + colorText.END\n saveDict['MA-Signal'] = 'BearCross-50MA'\n maReversal = -1 \n # Crossing up 200\n elif open > lma and close < lma:\n dict['MA-Signal'] = colorText.BOLD + colorText.FAIL + 'BearCross-200MA' + colorText.END\n saveDict['MA-Signal'] = 'BearCross-200MA'\n maReversal = -1\n return maReversal\n\n # Validate if volume of last day is higher than avg\n def validateVolume(data, dict, saveDict, volumeRatio=2.5):\n data = data.fillna(0)\n data = data.replace([np.inf, -np.inf], 0)\n recent = data.head(1)\n ratio = round(recent['Volume'][0]/recent['VolMA'][0],2)\n saveDict['Volume'] = str(ratio)+\"x\"\n if(ratio >= volumeRatio and ratio != np.nan and (not math.isinf(ratio)) and (ratio != 20)):\n dict['Volume'] = colorText.BOLD + colorText.GREEN + str(ratio) + \"x\" + colorText.END\n return True\n else:\n dict['Volume'] = colorText.BOLD + colorText.FAIL + str(ratio) + \"x\" + colorText.END\n return False\n\n # Find accurate breakout value\n def findBreakout(data, dict, saveDict, daysToLookback):\n data = data.fillna(0)\n data = data.replace([np.inf, -np.inf], 0)\n recent = data.head(1)\n data = data[1:]\n hs = round(data.describe()['High']['max'],2)\n hc = round(data.describe()['Close']['max'],2)\n rc = round(recent['Close'][0],2)\n if hs > hc:\n if ((hs - hc) <= (hs*2/100)):\n saveDict['Breaking-Out'] = str(hc)\n if rc >= hc:\n dict['Breaking-Out'] = colorText.BOLD + colorText.GREEN + \"BO: \" + str(hc) + \" R: \" + str(hs) + colorText.END\n return True\n else:\n dict['Breaking-Out'] = colorText.BOLD + colorText.FAIL + \"BO: \" + str(hc) + \" R: \" + str(hs) + colorText.END\n return False\n else: \n noOfHigherShadows = len(data[data.High > hc])\n if(daysToLookback/noOfHigherShadows <= 3):\n saveDict['Breaking-Out'] = str(hs)\n if rc >= hs:\n dict['Breaking-Out'] = colorText.BOLD + colorText.GREEN + \"BO: \" + str(hs) + colorText.END\n return True\n else:\n dict['Breaking-Out'] = colorText.BOLD + colorText.FAIL + \"BO: \" + str(hs) + colorText.END\n return False\n else:\n saveDict['Breaking-Out'] = str(hc) + \", \" + str(hs)\n if rc >= hc:\n dict['Breaking-Out'] = colorText.BOLD + colorText.GREEN + \"BO: \" + str(hc) + \" R: \" + str(hs) + colorText.END\n return True\n else:\n dict['Breaking-Out'] = colorText.BOLD + colorText.FAIL + \"BO: \" + str(hc) + \" R: \" + str(hs) + colorText.END\n return False\n else:\n saveDict['Breaking-Out'] = str(hc)\n if rc >= hc:\n dict['Breaking-Out'] = colorText.BOLD + colorText.GREEN + \"BO: \" + str(hc) + colorText.END\n return True\n else:\n dict['Breaking-Out'] = colorText.BOLD + colorText.FAIL + \"BO: \" + str(hc) + colorText.END\n return False\n\n # Validate 'Inside Bar' structure for recent days\n def validateInsideBar(data, dict, saveDict, daysToLookback=4):\n data = data.fillna(0)\n data = data.replace([np.inf, -np.inf], 0)\n data = data.head(daysToLookback)\n lowsData = data.sort_values(by=['Low'], ascending=False)\n highsData = data.sort_values(by=['High'], ascending=True)\n if(highsData.equals(lowsData)):\n dict['Pattern'] = colorText.BOLD + colorText.GREEN + (\"Inside Bar (%d days)\" % daysToLookback) + colorText.END\n saveDict['Pattern'] = \"Inside Bar (%d days)\" % daysToLookback\n return True\n dict['Pattern'] = ''\n saveDict['Pattern'] = ''\n return False\n\n # Validate if recent volume is lowest of last 'N' Days\n def validateLowestVolume(data, daysForLowestVolume):\n data = data.fillna(0)\n data = data.replace([np.inf, -np.inf], 0)\n if daysForLowestVolume == None:\n daysForLowestVolume = 30\n data = data.head(daysForLowestVolume)\n recent = data.head(1)\n if((recent['Volume'][0] <= data.describe()['Volume']['min']) and recent['Volume'][0] != np.nan):\n return True\n return False\n\n # validate if RSI is within given range\n def validateRSI(data, dict, saveDict, minRSI, maxRSI):\n data = data.fillna(0)\n data = data.replace([np.inf, -np.inf], 0)\n rsi = int(data.head(1)['RSI'][0])\n saveDict['RSI'] = rsi\n if(rsi >= minRSI and rsi <= maxRSI) and (rsi <= 70 and rsi >= 30):\n dict['RSI'] = colorText.BOLD + colorText.GREEN + str(rsi) + colorText.END\n return True\n dict['RSI'] = colorText.BOLD + colorText.FAIL + str(rsi) + colorText.END\n return False\n\n # Find out trend for days to lookback\n def findTrend(data, dict, saveDict, daysToLookback=ConfigManager.daysToLookback,stockName=\"\"):\n data = data.head(daysToLookback)\n data = data[::-1]\n data = data.set_index(np.arange(len(data)))\n data = data.fillna(0)\n data = data.replace([np.inf, -np.inf], 0)\n with SuppressOutput(suppress_stdout=True,suppress_stderr=True):\n data['tops'] = data['Close'].iloc[list(argrelextrema(np.array(data['Close']), np.greater_equal, order=1)[0])]\n data = data.fillna(0)\n data = data.replace([np.inf, -np.inf], 0)\n try:\n try:\n if len(data) < daysToLookback:\n raise StockDataNotAdequate\n slope,c = np.polyfit(data.index[data.tops > 0], data['tops'][data.tops > 0], 1)\n except Exception as e:\n slope,c = 0,0\n angle = np.rad2deg(np.arctan(slope))\n if (angle == 0):\n dict['Trend'] = colorText.BOLD + \"Unknown\" + colorText.END\n saveDict['Trend'] = 'Unknown'\n elif (angle <= 30 and angle >= -30):\n dict['Trend'] = colorText.BOLD + colorText.WARN + \"Sideways\" + colorText.END\n saveDict['Trend'] = 'Sideways'\n elif (angle >= 30 and angle < 61):\n dict['Trend'] = colorText.BOLD + colorText.GREEN + \"Weak Up\" + colorText.END\n saveDict['Trend'] = 'Weak Up'\n elif angle >= 60:\n dict['Trend'] = colorText.BOLD + colorText.GREEN + \"Strong Up\" + colorText.END\n saveDict['Trend'] = 'Strong Up'\n elif (angle >= -30 and angle < -61):\n dict['Trend'] = colorText.BOLD + colorText.FAIL + \"Weak Down\" + colorText.END\n saveDict['Trend'] = 'Weak Down'\n elif angle <= -60:\n dict['Trend'] = colorText.BOLD + colorText.FAIL + \"Strong Down\" + colorText.END\n saveDict['Trend'] = 'Strong Down'\n except np.linalg.LinAlgError:\n dict['Trend'] = colorText.BOLD + \"Unknown\" + colorText.END\n saveDict['Trend'] = 'Unknown'\n return saveDict['Trend']\n \n # Debugging - Experiment with data\n # import matplotlib.pyplot as plt\n # print(saveDict['Trend'])\n # print(slope)\n # print(math.degrees(math.atan(slope)))\n # plt.scatter(data.index[data.tops > 0], data['tops'][data.tops > 0], c='r')\n # plt.plot(data.index, data['Close'])\n # plt.plot(data.index, slope*data.index+c,)\n # plt.show()\n\n '''\n # Find out trend for days to lookback\n def validateVCP(data, dict, saveDict, daysToLookback=ConfigManager.daysToLookback, stockName=None):\n data = data.head(daysToLookback)\n data = data[::-1]\n data = data.set_index(np.arange(len(data)))\n data = data.fillna(0)\n data = data.replace([np.inf, -np.inf], 0)\n data['tops'] = data['Close'].iloc[list(argrelextrema(np.array(data['Close']), np.greater_equal, order=3)[0])]\n data['bots'] = data['Close'].iloc[list(argrelextrema(np.array(data['Close']), np.less_equal, order=3)[0])]\n try:\n try:\n top_slope,top_c = np.polyfit(data.index[data.tops > 0], data['tops'][data.tops > 0], 1)\n bot_slope,bot_c = np.polyfit(data.index[data.bots > 0], data['bots'][data.bots > 0], 1)\n topAngle = math.degrees(math.atan(top_slope))\n vcpAngle = math.degrees(math.atan(bot_slope) - math.atan(top_slope))\n\n # print(math.degrees(math.atan(top_slope)))\n # print(math.degrees(math.atan(bot_slope)))\n # print(vcpAngle)\n # print(topAngle)\n # print(data.max()['bots'])\n # print(data.max()['tops'])\n if (vcpAngle > 20 and vcpAngle < 70) and (topAngle > -10 and topAngle < 10) and (data['bots'].max() <= data['tops'].max()) and (len(data['bots'][data.bots > 0]) > 1):\n print(\"---> GOOD VCP %s at %sRs\" % (stockName, top_c))\n import os\n os.system(\"echo %s >> vcp_plots\\VCP.txt\" % stockName)\n\n import matplotlib.pyplot as plt \n plt.scatter(data.index[data.tops > 0], data['tops'][data.tops > 0], c='g')\n plt.scatter(data.index[data.bots > 0], data['bots'][data.bots > 0], c='r')\n plt.plot(data.index, data['Close'])\n plt.plot(data.index, top_slope*data.index+top_c,'g--')\n plt.plot(data.index, bot_slope*data.index+bot_c,'r--')\n if stockName != None:\n plt.title(stockName)\n # plt.show()\n plt.savefig('vcp_plots\\%s.png' % stockName)\n plt.clf()\n except np.RankWarning:\n pass\n except np.linalg.LinAlgError:\n return False\n '''\n \n"
] |
[
[
"numpy.array",
"numpy.arctan",
"numpy.polyfit"
]
] |
ArneRustad/Master-thesis-cf
|
[
"23b993b2877ff1506896c4181c4151578091b602"
] |
[
"run_hp_idun2.py"
] |
[
"print(\"Starting hyperparameter tuning on Idun\")\r\nimport os\r\nimport helpers.hp_tuning.hp_gen\r\nfrom tabGAN import TabGAN\r\nfrom src import constants as const\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\nn_epochs = 100\r\nn_critic = 10\r\nopt_lr = 0.0002\r\nadam_beta1 = 0.5\r\nnoise_discrete_unif_max = 0\r\nbatch_size = 500\r\nprogress_bar_subsubprocess = False\r\njit_compile_train_step = False\r\n\r\nconst.dir.storage = lambda: \"/cluster/work/arneir\"\r\nprint(\"Storage dir:\", const.dir.storage())\r\n\r\ndataset_train_path = os.path.join(const.dir.data(), \"df_adult_edited_train.csv\")\r\ndataset_test_path = os.path.join(const.dir.data(), \"df_adult_edited_test.csv\")\r\n\r\ndata_train = pd.read_csv(dataset_train_path)\r\ndata_test = pd.read_csv(dataset_test_path)\r\ndiscrete_columns = data_train.columns[data_train.dtypes == \"object\"]\r\n\r\nactivation_function_vec = [(\"LeakyReLU\", False), (\"GELU\", False), (\"GELU\", True)]\r\nn_synthetic_datasets_activation_function_comparison = 10\r\nn_epochs_activation_function = 100\r\n\r\ndef create_tabGAN_for_activation_function(activation_function, approximate):\r\n tg_qtr = TabGAN(data_train, n_critic = n_critic, opt_lr = opt_lr, adam_beta1 = adam_beta1,\r\n quantile_transformation_int = True, quantile_rand_transformation = True,\r\n noise_discrete_unif_max = noise_discrete_unif_max, tf_data_use=True,\r\n activation_function=activation_function, gelu_approximate=approximate)\r\n return tg_qtr\r\n\r\nhelpers.hp_tuning.generate_multiple_datasets_for_multiple_hyperparameters(\r\n create_tabGAN_func=create_tabGAN_for_activation_function,\r\n hyperparams_vec=activation_function_vec,\r\n n_epochs=n_epochs_activation_function,\r\n dataset_dir=const.dir.hyperparams_tuning(),\r\n batch_size=batch_size,\r\n subfolder=\"tabGAN-qtr\",\r\n n_synthetic_datasets=n_synthetic_datasets_activation_function_comparison,\r\n restart=False,\r\n redo_hyperparams_vec = [],\r\n hyperparams_name = \"activation\",\r\n hyperparams_subname=[\"function\", \"approximate\"],\r\n add_comparison_folder=True,\r\n overwrite_dataset=False,\r\n progress_bar_subprocess=True,\r\n progress_bar_subsubprocess=progress_bar_subsubprocess\r\n)\r\n\r\n\r\n\r\nctgan_vec = [(False, False, False, False)]\r\nctgan_vec += [(bin_loss, True, log_freq, add_connection)\r\n for bin_loss in [False, True]\r\n for log_freq in [False, True]\r\n for add_connection in [False, True]]\r\nn_synthetic_datasets_ctgan_comparison = 25\r\nn_epochs_ctgan = 100\r\n\r\ndef create_tabGAN_for_ctgan(ctgan, ctgan_log_frequency, ctgan_binomial_loss, add_connection_query_to_discrete):\r\n if ctgan:\r\n tf_data_use=False\r\n else:\r\n tf_data_use=True\r\n tg_qtr = TabGAN(data_train, n_critic = n_critic, opt_lr = opt_lr, adam_beta1 = adam_beta1,\r\n quantile_transformation_int = True, quantile_rand_transformation = True,\r\n noise_discrete_unif_max = noise_discrete_unif_max, tf_data_use=tf_data_use,\r\n ctgan=ctgan, ctgan_log_frequency=ctgan_log_frequency,\r\n ctgan_binomial_loss=ctgan_binomial_loss,\r\n add_connection_query_to_discrete=add_connection_query_to_discrete)\r\n return tg_qtr\r\n\r\nhelpers.hp_tuning.generate_multiple_datasets_for_multiple_hyperparameters(\r\n create_tabGAN_func=create_tabGAN_for_ctgan,\r\n hyperparams_vec=ctgan_vec,\r\n n_epochs=n_epochs_ctgan,\r\n dataset_dir=const.dir.hyperparams_tuning(),\r\n batch_size=batch_size,\r\n subfolder=\"tabGAN-qtr\",\r\n n_synthetic_datasets=n_synthetic_datasets_ctgan_comparison,\r\n restart = True,\r\n redo_hyperparams_vec = [],\r\n hyperparams_name = \"categorical_query\",\r\n hyperparams_subname=[\"ctgan_binomial_loss\", \"ctgan\", \"log_frequency\", \"add_connection_query_to_discrete\"],\r\n add_comparison_folder=True,\r\n overwrite_dataset=False,\r\n progress_bar_subprocess=True,\r\n progress_bar_subsubprocess=progress_bar_subsubprocess\r\n)"
] |
[
[
"pandas.read_csv"
]
] |
REMeyer/astropy
|
[
"28c49fb618538a01812e586cd07bccdf0591a6c6",
"28c49fb618538a01812e586cd07bccdf0591a6c6",
"28c49fb618538a01812e586cd07bccdf0591a6c6"
] |
[
"astropy/table/pprint.py",
"astropy/modeling/core.py",
"astropy/table/tests/test_info.py"
] |
[
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nfrom ..extern import six\nfrom ..extern.six import text_type\nfrom ..extern.six.moves import zip, range\n\nimport os\nimport sys\nimport re\n\nimport numpy as np\n\nfrom .. import log\nfrom ..utils.console import Getch, color_print, terminal_size, conf\nfrom ..utils.data_info import dtype_info_name\n\n__all__ = []\n\n\ndef default_format_func(format_, val):\n if isinstance(val, bytes):\n return val.decode('utf-8', errors='replace')\n else:\n return text_type(val)\n\n\n# The first three functions are helpers for _auto_format_func\n\ndef _use_str_for_masked_values(format_func):\n \"\"\"Wrap format function to trap masked values.\n\n String format functions and most user functions will not be able to deal\n with masked values, so we wrap them to ensure they are passed to str().\n \"\"\"\n return lambda format_, val: (str(val) if val is np.ma.masked\n else format_func(format_, val))\n\n\ndef _possible_string_format_functions(format_):\n \"\"\"Iterate through possible string-derived format functions.\n\n A string can either be a format specifier for the format built-in,\n a new-style format string, or an old-style format string.\n \"\"\"\n yield lambda format_, val: format(val, format_)\n yield lambda format_, val: format_.format(val)\n yield lambda format_, val: format_ % val\n\n\ndef get_auto_format_func(\n col=None,\n possible_string_format_functions=_possible_string_format_functions):\n \"\"\"\n Return a wrapped ``auto_format_func`` function which is used in\n formatting table columns. This is primarily an internal function but\n gets used directly in other parts of astropy, e.g. `astropy.io.ascii`.\n\n Parameters\n ----------\n col_name : object, optional\n Hashable object to identify column like id or name. Default is None.\n\n possible_string_format_functions : func, optional\n Function that yields possible string formatting functions\n (defaults to internal function to do this).\n\n Returns\n -------\n Wrapped ``auto_format_func`` function\n \"\"\"\n\n def _auto_format_func(format_, val):\n \"\"\"Format ``val`` according to ``format_`` for a plain format specifier,\n old- or new-style format strings, or using a user supplied function.\n More importantly, determine and cache (in _format_funcs) a function\n that will do this subsequently. In this way this complicated logic is\n only done for the first value.\n\n Returns the formatted value.\n \"\"\"\n if format_ is None:\n return default_format_func(format_, val)\n\n if format_ in col.info._format_funcs:\n return col.info._format_funcs[format_](format_, val)\n\n if six.callable(format_):\n format_func = lambda format_, val: format_(val)\n try:\n out = format_func(format_, val)\n if not isinstance(out, six.string_types):\n raise ValueError('Format function for value {0} returned {1} '\n 'instead of string type'\n .format(val, type(val)))\n except Exception as err:\n # For a masked element, the format function call likely failed\n # to handle it. Just return the string representation for now,\n # and retry when a non-masked value comes along.\n if val is np.ma.masked:\n return str(val)\n\n raise ValueError('Format function for value {0} failed: {1}'\n .format(val, err))\n # If the user-supplied function handles formatting masked elements, use\n # it directly. Otherwise, wrap it in a function that traps them.\n try:\n format_func(format_, np.ma.masked)\n except Exception:\n format_func = _use_str_for_masked_values(format_func)\n else:\n # For a masked element, we cannot set string-based format functions yet,\n # as all tests below will fail. Just return the string representation\n # of masked for now, and retry when a non-masked value comes along.\n if val is np.ma.masked:\n return str(val)\n\n for format_func in possible_string_format_functions(format_):\n try:\n # Does this string format method work?\n out = format_func(format_, val)\n # Require that the format statement actually did something.\n if out == format_:\n raise ValueError('the format passed in did nothing.')\n except Exception:\n continue\n else:\n break\n else:\n # None of the possible string functions passed muster.\n raise ValueError('Unable to parse format string {0}'\n .format(format_))\n\n # String-based format functions will fail on masked elements;\n # wrap them in a function that traps them.\n format_func = _use_str_for_masked_values(format_func)\n\n col.info._format_funcs[format_] = format_func\n return out\n\n return _auto_format_func\n\n\nclass TableFormatter(object):\n @staticmethod\n def _get_pprint_size(max_lines=None, max_width=None):\n \"\"\"Get the output size (number of lines and character width) for Column and\n Table pformat/pprint methods.\n\n If no value of ``max_lines`` is supplied then the height of the\n screen terminal is used to set ``max_lines``. If the terminal\n height cannot be determined then the default will be determined\n using the ``astropy.table.conf.max_lines`` configuration item. If a\n negative value of ``max_lines`` is supplied then there is no line\n limit applied.\n\n The same applies for max_width except the configuration item is\n ``astropy.table.conf.max_width``.\n\n Parameters\n ----------\n max_lines : int or None\n Maximum lines of output (header + data rows)\n\n max_width : int or None\n Maximum width (characters) output\n\n Returns\n -------\n max_lines, max_width : int\n\n \"\"\"\n if max_lines is None:\n max_lines = conf.max_lines\n\n if max_width is None:\n max_width = conf.max_width\n\n if max_lines is None or max_width is None:\n lines, width = terminal_size()\n\n if max_lines is None:\n max_lines = lines\n elif max_lines < 0:\n max_lines = sys.maxsize\n if max_lines < 8:\n max_lines = 8\n\n if max_width is None:\n max_width = width\n elif max_width < 0:\n max_width = sys.maxsize\n if max_width < 10:\n max_width = 10\n\n return max_lines, max_width\n\n def _pformat_col(self, col, max_lines=None, show_name=True, show_unit=None,\n show_dtype=False, show_length=None, html=False, align=None):\n \"\"\"Return a list of formatted string representation of column values.\n\n Parameters\n ----------\n max_lines : int\n Maximum lines of output (header + data rows)\n\n show_name : bool\n Include column name. Default is True.\n\n show_unit : bool\n Include a header row for unit. Default is to show a row\n for units only if one or more columns has a defined value\n for the unit.\n\n show_dtype : bool\n Include column dtype. Default is False.\n\n show_length : bool\n Include column length at end. Default is to show this only\n if the column is not shown completely.\n\n html : bool\n Output column as HTML\n\n align : str\n Left/right alignment of columns. Default is '>' (right) for all\n columns. Other allowed values are '<', '^', and '0=' for left,\n centered, and 0-padded, respectively.\n\n Returns\n -------\n lines : list\n List of lines with formatted column values\n\n outs : dict\n Dict which is used to pass back additional values\n defined within the iterator.\n\n \"\"\"\n if show_unit is None:\n show_unit = col.info.unit is not None\n\n outs = {} # Some values from _pformat_col_iter iterator that are needed here\n col_strs_iter = self._pformat_col_iter(col, max_lines, show_name=show_name,\n show_unit=show_unit,\n show_dtype=show_dtype,\n show_length=show_length,\n outs=outs)\n col_strs = list(col_strs_iter)\n if len(col_strs) > 0:\n col_width = max(len(x) for x in col_strs)\n\n if html:\n from ..utils.xml.writer import xml_escape\n n_header = outs['n_header']\n for i, col_str in enumerate(col_strs):\n # _pformat_col output has a header line '----' which is not needed here\n if i == n_header - 1:\n continue\n td = 'th' if i < n_header else 'td'\n val = '<{0}>{1}</{2}>'.format(td, xml_escape(col_str.strip()), td)\n row = ('<tr>' + val + '</tr>')\n if i < n_header:\n row = ('<thead>' + row + '</thead>')\n col_strs[i] = row\n\n if n_header > 0:\n # Get rid of '---' header line\n col_strs.pop(n_header - 1)\n col_strs.insert(0, '<table>')\n col_strs.append('</table>')\n\n # Now bring all the column string values to the same fixed width\n else:\n col_width = max(len(x) for x in col_strs) if col_strs else 1\n\n # Center line header content and generate dashed headerline\n for i in outs['i_centers']:\n col_strs[i] = col_strs[i].center(col_width)\n if outs['i_dashes'] is not None:\n col_strs[outs['i_dashes']] = '-' * col_width\n\n # Format columns according to alignment. `align` arg has precedent, otherwise\n # use `col.format` if it starts as a legal alignment string. If neither applies\n # then right justify.\n re_fill_align = re.compile(r'(?P<fill>.?)(?P<align>[<^>=])')\n match = None\n if align:\n # If there is an align specified then it must match\n match = re_fill_align.match(align)\n if not match:\n raise ValueError(\"column align must be one of '<', '^', '>', or '='\")\n elif isinstance(col.info.format, six.string_types):\n # col.info.format need not match, in which case rjust gets used\n match = re_fill_align.match(col.info.format)\n\n if match:\n fill_char = match.group('fill')\n align_char = match.group('align')\n if align_char == '=':\n if fill_char != '0':\n raise ValueError(\"fill character must be '0' for '=' align\")\n fill_char = '' # str.zfill gets used which does not take fill char arg\n else:\n fill_char = ''\n align_char = '>'\n\n justify_methods = {'<': 'ljust', '^': 'center', '>': 'rjust', '=': 'zfill'}\n justify_method = justify_methods[align_char]\n justify_args = (col_width, fill_char) if fill_char else (col_width,)\n\n for i, col_str in enumerate(col_strs):\n col_strs[i] = getattr(col_str, justify_method)(*justify_args)\n\n if outs['show_length']:\n col_strs.append('Length = {0} rows'.format(len(col)))\n\n return col_strs, outs\n\n def _pformat_col_iter(self, col, max_lines, show_name, show_unit, outs,\n show_dtype=False, show_length=None):\n \"\"\"Iterator which yields formatted string representation of column values.\n\n Parameters\n ----------\n max_lines : int\n Maximum lines of output (header + data rows)\n\n show_name : bool\n Include column name. Default is True.\n\n show_unit : bool\n Include a header row for unit. Default is to show a row\n for units only if one or more columns has a defined value\n for the unit.\n\n outs : dict\n Must be a dict which is used to pass back additional values\n defined within the iterator.\n\n show_dtype : bool\n Include column dtype. Default is False.\n\n show_length : bool\n Include column length at end. Default is to show this only\n if the column is not shown completely.\n \"\"\"\n max_lines, _ = self._get_pprint_size(max_lines, -1)\n\n multidims = getattr(col, 'shape', [0])[1:]\n if multidims:\n multidim0 = tuple(0 for n in multidims)\n multidim1 = tuple(n - 1 for n in multidims)\n trivial_multidims = np.prod(multidims) == 1\n\n i_dashes = None\n i_centers = [] # Line indexes where content should be centered\n n_header = 0\n if show_name:\n i_centers.append(n_header)\n # Get column name (or 'None' if not set)\n col_name = six.text_type(col.info.name)\n if multidims:\n col_name += ' [{0}]'.format(\n ','.join(six.text_type(n) for n in multidims))\n n_header += 1\n yield col_name\n if show_unit:\n i_centers.append(n_header)\n n_header += 1\n yield six.text_type(col.info.unit or '')\n if show_dtype:\n i_centers.append(n_header)\n n_header += 1\n try:\n dtype = dtype_info_name(col.dtype)\n except AttributeError:\n dtype = 'object'\n yield six.text_type(dtype)\n if show_unit or show_name or show_dtype:\n i_dashes = n_header\n n_header += 1\n yield '---'\n\n max_lines -= n_header\n n_print2 = max_lines // 2\n n_rows = len(col)\n\n # This block of code is responsible for producing the function that\n # will format values for this column. The ``format_func`` function\n # takes two args (col_format, val) and returns the string-formatted\n # version. Some points to understand:\n #\n # - col_format could itself be the formatting function, so it will\n # actually end up being called with itself as the first arg. In\n # this case the function is expected to ignore its first arg.\n #\n # - auto_format_func is a function that gets called on the first\n # column value that is being formatted. It then determines an\n # appropriate formatting function given the actual value to be\n # formatted. This might be deterministic or it might involve\n # try/except. The latter allows for different string formatting\n # options like %f or {:5.3f}. When auto_format_func is called it:\n\n # 1. Caches the function in the _format_funcs dict so for subsequent\n # values the right function is called right away.\n # 2. Returns the formatted value.\n #\n # - possible_string_format_functions is a function that yields a\n # succession of functions that might successfully format the\n # value. There is a default, but Mixin methods can override this.\n # See Quantity for an example.\n #\n # - get_auto_format_func() returns a wrapped version of auto_format_func\n # with the column id and possible_string_format_functions as\n # enclosed variables.\n col_format = col.info.format or getattr(col.info, 'default_format', None)\n pssf = (getattr(col.info, 'possible_string_format_functions', None) or\n _possible_string_format_functions)\n auto_format_func = get_auto_format_func(col, pssf)\n format_func = col.info._format_funcs.get(col_format, auto_format_func)\n\n if len(col) > max_lines:\n if show_length is None:\n show_length = True\n i0 = n_print2 - (1 if show_length else 0)\n i1 = n_rows - n_print2 - max_lines % 2\n ii = np.concatenate([np.arange(0, i0 + 1), np.arange(i1 + 1, len(col))])\n else:\n i0 = -1\n ii = np.arange(len(col))\n\n # Add formatted values if within bounds allowed by max_lines\n for i in ii:\n if i == i0:\n yield '...'\n else:\n if multidims:\n # Prevents columns like Column(data=[[(1,)],[(2,)]], name='a')\n # with shape (n,1,...,1) from being printed as if there was\n # more than one element in a row\n if trivial_multidims:\n col_str = format_func(col_format, col[(i,) + multidim0])\n else:\n col_str = (format_func(col_format, col[(i,) + multidim0]) +\n ' .. ' +\n format_func(col_format, col[(i,) + multidim1]))\n else:\n col_str = format_func(col_format, col[i])\n yield col_str\n\n outs['show_length'] = show_length\n outs['n_header'] = n_header\n outs['i_centers'] = i_centers\n outs['i_dashes'] = i_dashes\n\n def _pformat_table(self, table, max_lines=None, max_width=None,\n show_name=True, show_unit=None, show_dtype=False,\n html=False, tableid=None, tableclass=None, align=None):\n \"\"\"Return a list of lines for the formatted string representation of\n the table.\n\n Parameters\n ----------\n max_lines : int or None\n Maximum number of rows to output\n\n max_width : int or None\n Maximum character width of output\n\n show_name : bool\n Include a header row for column names. Default is True.\n\n show_unit : bool\n Include a header row for unit. Default is to show a row\n for units only if one or more columns has a defined value\n for the unit.\n\n show_dtype : bool\n Include a header row for column dtypes. Default is False.\n\n html : bool\n Format the output as an HTML table. Default is False.\n\n tableid : str or None\n An ID tag for the table; only used if html is set. Default is\n \"table{id}\", where id is the unique integer id of the table object,\n id(table)\n\n tableclass : str or list of str or `None`\n CSS classes for the table; only used if html is set. Default is\n none\n\n align : str or list or tuple\n Left/right alignment of columns. Default is '>' (right) for all\n columns. Other allowed values are '<', '^', and '0=' for left,\n centered, and 0-padded, respectively. A list of strings can be\n provided for alignment of tables with multiple columns.\n\n Returns\n -------\n rows : list\n Formatted table as a list of strings\n\n outs : dict\n Dict which is used to pass back additional values\n defined within the iterator.\n\n \"\"\"\n # \"Print\" all the values into temporary lists by column for subsequent\n # use and to determine the width\n max_lines, max_width = self._get_pprint_size(max_lines, max_width)\n cols = []\n\n if show_unit is None:\n show_unit = any(col.info.unit for col in six.itervalues(table.columns))\n\n # Coerce align into a correctly-sized list of alignments (if possible)\n n_cols = len(table.columns)\n if align is None or isinstance(align, six.string_types):\n align = [align] * n_cols\n\n elif isinstance(align, (list, tuple)):\n if len(align) != n_cols:\n raise ValueError('got {0} alignment values instead of '\n 'the number of columns ({1})'\n .format(len(align), n_cols))\n else:\n raise TypeError('align keyword must be str or list or tuple (got {0})'\n .format(type(align)))\n\n for align_, col in zip(align, table.columns.values()):\n lines, outs = self._pformat_col(col, max_lines, show_name=show_name,\n show_unit=show_unit, show_dtype=show_dtype,\n align=align_)\n if outs['show_length']:\n lines = lines[:-1]\n cols.append(lines)\n\n if not cols:\n return ['<No columns>'], {'show_length': False}\n\n # Use the values for the last column since they are all the same\n n_header = outs['n_header']\n\n n_rows = len(cols[0])\n outwidth = lambda cols: sum(len(c[0]) for c in cols) + len(cols) - 1\n dots_col = ['...'] * n_rows\n middle = len(cols) // 2\n while outwidth(cols) > max_width:\n if len(cols) == 1:\n break\n if len(cols) == 2:\n cols[1] = dots_col\n break\n if cols[middle] is dots_col:\n cols.pop(middle)\n middle = len(cols) // 2\n cols[middle] = dots_col\n\n # Now \"print\" the (already-stringified) column values into a\n # row-oriented list.\n rows = []\n if html:\n from ..utils.xml.writer import xml_escape\n\n if tableid is None:\n tableid = 'table{id}'.format(id=id(table))\n\n if tableclass is not None:\n if isinstance(tableclass, list):\n tableclass = ' '.join(tableclass)\n rows.append('<table id=\"{tid}\" class=\"{tcls}\">'.format(\n tid=tableid, tcls=tableclass))\n else:\n rows.append('<table id=\"{tid}\">'.format(tid=tableid))\n\n for i in range(n_rows):\n # _pformat_col output has a header line '----' which is not needed here\n if i == n_header - 1:\n continue\n td = 'th' if i < n_header else 'td'\n vals = ('<{0}>{1}</{2}>'.format(td, xml_escape(col[i].strip()), td)\n for col in cols)\n row = ('<tr>' + ''.join(vals) + '</tr>')\n if i < n_header:\n row = ('<thead>' + row + '</thead>')\n rows.append(row)\n rows.append('</table>')\n else:\n for i in range(n_rows):\n row = ' '.join(col[i] for col in cols)\n rows.append(row)\n\n return rows, outs\n\n def _more_tabcol(self, tabcol, max_lines=None, max_width=None,\n show_name=True, show_unit=None, show_dtype=False):\n \"\"\"Interactive \"more\" of a table or column.\n\n Parameters\n ----------\n max_lines : int or None\n Maximum number of rows to output\n\n max_width : int or None\n Maximum character width of output\n\n show_name : bool\n Include a header row for column names. Default is True.\n\n show_unit : bool\n Include a header row for unit. Default is to show a row\n for units only if one or more columns has a defined value\n for the unit.\n\n show_dtype : bool\n Include a header row for column dtypes. Default is False.\n \"\"\"\n allowed_keys = 'f br<>qhpn'\n\n # Count the header lines\n n_header = 0\n if show_name:\n n_header += 1\n if show_unit:\n n_header += 1\n if show_dtype:\n n_header += 1\n if show_name or show_unit or show_dtype:\n n_header += 1\n\n # Set up kwargs for pformat call. Only Table gets max_width.\n kwargs = dict(max_lines=-1, show_name=show_name, show_unit=show_unit,\n show_dtype=show_dtype)\n if hasattr(tabcol, 'columns'): # tabcol is a table\n kwargs['max_width'] = max_width\n\n # If max_lines is None (=> query screen size) then increase by 2.\n # This is because get_pprint_size leaves 6 extra lines so that in\n # ipython you normally see the last input line.\n max_lines1, max_width = self._get_pprint_size(max_lines, max_width)\n if max_lines is None:\n max_lines1 += 2\n delta_lines = max_lines1 - n_header\n\n # Set up a function to get a single character on any platform\n inkey = Getch()\n\n i0 = 0 # First table/column row to show\n showlines = True\n while True:\n i1 = i0 + delta_lines # Last table/col row to show\n if showlines: # Don't always show the table (e.g. after help)\n try:\n os.system('cls' if os.name == 'nt' else 'clear')\n except Exception:\n pass # No worries if clear screen call fails\n lines = tabcol[i0:i1].pformat(**kwargs)\n colors = ('red' if i < n_header else 'default'\n for i in range(len(lines)))\n for color, line in zip(colors, lines):\n color_print(line, color)\n showlines = True\n print()\n print(\"-- f, <space>, b, r, p, n, <, >, q h (help) --\", end=' ')\n # Get a valid key\n while True:\n try:\n key = inkey().lower()\n except Exception:\n print(\"\\n\")\n log.error('Console does not support getting a character'\n ' as required by more(). Use pprint() instead.')\n return\n if key in allowed_keys:\n break\n print(key)\n\n if key.lower() == 'q':\n break\n elif key == ' ' or key == 'f':\n i0 += delta_lines\n elif key == 'b':\n i0 = i0 - delta_lines\n elif key == 'r':\n pass\n elif key == '<':\n i0 = 0\n elif key == '>':\n i0 = len(tabcol)\n elif key == 'p':\n i0 -= 1\n elif key == 'n':\n i0 += 1\n elif key == 'h':\n showlines = False\n print(\"\"\"\n Browsing keys:\n f, <space> : forward one page\n b : back one page\n r : refresh same page\n n : next row\n p : previous row\n < : go to beginning\n > : go to end\n q : quit browsing\n h : print this help\"\"\", end=' ')\n if i0 < 0:\n i0 = 0\n if i0 >= len(tabcol) - delta_lines:\n i0 = len(tabcol) - delta_lines\n print(\"\\n\")\n",
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n\"\"\"\nThis module defines base classes for all models. The base class of all\nmodels is `~astropy.modeling.Model`. `~astropy.modeling.FittableModel` is\nthe base class for all fittable models. Fittable models can be linear or\nnonlinear in a regression analysis sense.\n\nAll models provide a `__call__` method which performs the transformation in\na purely mathematical way, i.e. the models are unitless. Model instances can\nrepresent either a single model, or a \"model set\" representing multiple copies\nof the same type of model, but with potentially different values of the\nparameters in each model making up the set.\n\"\"\"\n\nfrom __future__ import (absolute_import, unicode_literals, division,\n print_function)\n\nimport abc\nimport copy\nimport inspect\nimport functools\nimport operator\nimport sys\nimport types\n\nfrom collections import defaultdict, OrderedDict\nfrom itertools import chain, islice\n\nimport numpy as np\n\nfrom ..utils import indent, isinstancemethod, metadata\nfrom ..extern import six\nfrom ..extern.six.moves import copyreg, zip\nfrom ..table import Table\nfrom ..units import Quantity, UnitsError, dimensionless_unscaled\nfrom ..units.utils import quantity_asanyarray\nfrom ..utils import (sharedmethod, find_current_module,\n InheritDocstrings, OrderedDescriptorContainer,\n check_broadcast, IncompatibleShapeError, isiterable)\nfrom ..utils.codegen import make_function_with_signature\nfrom ..utils.compat import suppress\nfrom ..utils.compat.funcsigs import signature\nfrom .utils import (combine_labels, make_binary_operator_eval,\n ExpressionTree, AliasDict, get_inputs_and_params,\n _BoundingBox, _combine_equivalency_dict)\nfrom ..nddata.utils import add_array, extract_array\n\nfrom .parameters import Parameter, InputParameterError, param_repr_oneline\n\n\n__all__ = ['Model', 'FittableModel', 'Fittable1DModel', 'Fittable2DModel',\n 'custom_model', 'ModelDefinitionError']\n\n\nclass ModelDefinitionError(TypeError):\n \"\"\"Used for incorrect models definitions\"\"\"\n\n\ndef _model_oper(oper, **kwargs):\n \"\"\"\n Returns a function that evaluates a given Python arithmetic operator\n between two models. The operator should be given as a string, like ``'+'``\n or ``'**'``.\n\n Any additional keyword arguments passed in are passed to\n `_CompoundModelMeta._from_operator`.\n \"\"\"\n\n # Note: Originally this used functools.partial, but that won't work when\n # used in the class definition of _CompoundModelMeta since\n # _CompoundModelMeta has not been defined yet.\n\n # Perform an arithmetic operation on two models.\n return lambda left, right: _CompoundModelMeta._from_operator(oper,\n left, right, **kwargs)\n\n\nclass _ModelMeta(OrderedDescriptorContainer, InheritDocstrings, abc.ABCMeta):\n \"\"\"\n Metaclass for Model.\n\n Currently just handles auto-generating the param_names list based on\n Parameter descriptors declared at the class-level of Model subclasses.\n \"\"\"\n\n registry = set()\n \"\"\"\n A registry of all known concrete (non-abstract) Model subclasses.\n \"\"\"\n\n _is_dynamic = False\n \"\"\"\n This flag signifies whether this class was created in the \"normal\" way,\n with a class statement in the body of a module, as opposed to a call to\n `type` or some other metaclass constructor, such that the resulting class\n does not belong to a specific module. This is important for pickling of\n dynamic classes.\n\n This flag is always forced to False for new classes, so code that creates\n dynamic classes should manually set it to True on those classes when\n creating them.\n \"\"\"\n\n # Default empty dict for _parameters_, which will be empty on model\n # classes that don't have any Parameters\n _parameters_ = OrderedDict()\n\n def __new__(mcls, name, bases, members):\n # See the docstring for _is_dynamic above\n if '_is_dynamic' not in members:\n members['_is_dynamic'] = mcls._is_dynamic\n\n return super(_ModelMeta, mcls).__new__(mcls, name, bases, members)\n\n def __init__(cls, name, bases, members):\n # Make sure OrderedDescriptorContainer gets to run before doing\n # anything else\n super(_ModelMeta, cls).__init__(name, bases, members)\n\n if cls._parameters_:\n if hasattr(cls, '_param_names'):\n # Slight kludge to support compound models, where\n # cls.param_names is a property; could be improved with a\n # little refactoring but fine for now\n cls._param_names = tuple(cls._parameters_)\n else:\n cls.param_names = tuple(cls._parameters_)\n\n cls._create_inverse_property(members)\n cls._create_bounding_box_property(members)\n cls._handle_special_methods(members)\n\n if not inspect.isabstract(cls) and not name.startswith('_'):\n cls.registry.add(cls)\n\n def __repr__(cls):\n \"\"\"\n Custom repr for Model subclasses.\n \"\"\"\n\n return cls._format_cls_repr()\n\n def _repr_pretty_(cls, p, cycle):\n \"\"\"\n Repr for IPython's pretty printer.\n\n By default IPython \"pretty prints\" classes, so we need to implement\n this so that IPython displays the custom repr for Models.\n \"\"\"\n\n p.text(repr(cls))\n\n def __reduce__(cls):\n if not cls._is_dynamic:\n # Just return a string specifying where the class can be imported\n # from\n return cls.__name__\n else:\n members = dict(cls.__dict__)\n # Delete any ABC-related attributes--these will be restored when\n # the class is reconstructed:\n for key in list(members):\n if key.startswith('_abc_'):\n del members[key]\n\n # Delete custom __init__ and __call__ if they exist:\n for key in ('__init__', '__call__'):\n if key in members:\n del members[key]\n\n return (type(cls), (cls.__name__, cls.__bases__, members))\n\n @property\n def name(cls):\n \"\"\"\n The name of this model class--equivalent to ``cls.__name__``.\n\n This attribute is provided for symmetry with the `Model.name` attribute\n of model instances.\n \"\"\"\n\n return cls.__name__\n\n @property\n def n_inputs(cls):\n return len(cls.inputs)\n\n @property\n def n_outputs(cls):\n return len(cls.outputs)\n\n @property\n def _is_concrete(cls):\n \"\"\"\n A class-level property that determines whether the class is a concrete\n implementation of a Model--i.e. it is not some abstract base class or\n internal implementation detail (i.e. begins with '_').\n \"\"\"\n return not (cls.__name__.startswith('_') or inspect.isabstract(cls))\n\n def rename(cls, name):\n \"\"\"\n Creates a copy of this model class with a new name.\n\n The new class is technically a subclass of the original class, so that\n instance and type checks will still work. For example::\n\n >>> from astropy.modeling.models import Rotation2D\n >>> SkyRotation = Rotation2D.rename('SkyRotation')\n >>> SkyRotation\n <class '__main__.SkyRotation'>\n Name: SkyRotation (Rotation2D)\n Inputs: ('x', 'y')\n Outputs: ('x', 'y')\n Fittable parameters: ('angle',)\n >>> issubclass(SkyRotation, Rotation2D)\n True\n >>> r = SkyRotation(90)\n >>> isinstance(r, Rotation2D)\n True\n \"\"\"\n\n if six.PY2 and isinstance(name, six.text_type):\n # Unicode names are not allowed in Python 2, so just convert to\n # ASCII. As such, for cross-compatibility all model names should\n # just be ASCII for now.\n name = name.encode('ascii')\n\n mod = find_current_module(2)\n if mod:\n modname = mod.__name__\n else:\n modname = '__main__'\n\n new_cls = type(name, (cls,), {})\n # On Python 2 __module__ must be a str, not unicode\n new_cls.__module__ = str(modname)\n\n if hasattr(cls, '__qualname__'):\n if new_cls.__module__ == '__main__':\n # __main__ is not added to a class's qualified name\n new_cls.__qualname__ = name\n else:\n new_cls.__qualname__ = '{0}.{1}'.format(modname, name)\n\n return new_cls\n\n def _create_inverse_property(cls, members):\n inverse = members.get('inverse')\n if inverse is None or cls.__bases__[0] is object:\n # The latter clause is the prevent the below code from running on\n # the Model base class, which implements the default getter and\n # setter for .inverse\n return\n\n if isinstance(inverse, property):\n # We allow the @property decorator to be omitted entirely from\n # the class definition, though its use should be encouraged for\n # clarity\n inverse = inverse.fget\n\n # Store the inverse getter internally, then delete the given .inverse\n # attribute so that cls.inverse resolves to Model.inverse instead\n cls._inverse = inverse\n del cls.inverse\n\n def _create_bounding_box_property(cls, members):\n \"\"\"\n Takes any bounding_box defined on a concrete Model subclass (either\n as a fixed tuple or a property or method) and wraps it in the generic\n getter/setter interface for the bounding_box attribute.\n \"\"\"\n\n # TODO: Much of this is verbatim from _create_inverse_property--I feel\n # like there could be a way to generify properties that work this way,\n # but for the time being that would probably only confuse things more.\n bounding_box = members.get('bounding_box')\n if bounding_box is None or cls.__bases__[0] is object:\n return\n\n if isinstance(bounding_box, property):\n bounding_box = bounding_box.fget\n\n if not callable(bounding_box):\n # See if it's a hard-coded bounding_box (as a sequence) and\n # normalize it\n try:\n bounding_box = _BoundingBox.validate(cls, bounding_box)\n except ValueError as exc:\n raise ModelDefinitionError(exc.args[0])\n else:\n sig = signature(bounding_box)\n # May be a method that only takes 'self' as an argument (like a\n # property, but the @property decorator was forgotten)\n # TODO: Maybe warn in the above case?\n #\n # However, if the method takes additional arguments then this is a\n # parameterized bounding box and should be callable\n if len(sig.parameters) > 1:\n bounding_box = \\\n cls._create_bounding_box_subclass(bounding_box, sig)\n\n if six.PY2 and isinstance(bounding_box, types.MethodType):\n bounding_box = bounding_box.__func__\n\n # See the Model.bounding_box getter definition for how this attribute\n # is used\n cls._bounding_box = bounding_box\n del cls.bounding_box\n\n def _create_bounding_box_subclass(cls, func, sig):\n \"\"\"\n For Models that take optional arguments for defining their bounding\n box, we create a subclass of _BoundingBox with a ``__call__`` method\n that supports those additional arguments.\n\n Takes the function's Signature as an argument since that is already\n computed in _create_bounding_box_property, so no need to duplicate that\n effort.\n \"\"\"\n\n # TODO: Might be convenient if calling the bounding box also\n # automatically sets the _user_bounding_box. So that\n #\n # >>> model.bounding_box(arg=1)\n #\n # in addition to returning the computed bbox, also sets it, so that\n # it's a shortcut for\n #\n # >>> model.bounding_box = model.bounding_box(arg=1)\n #\n # Not sure if that would be non-obvious / confusing though...\n\n def __call__(self, **kwargs):\n return func(self._model, **kwargs)\n\n kwargs = []\n for idx, param in enumerate(sig.parameters.values()):\n if idx == 0:\n # Presumed to be a 'self' argument\n continue\n\n if param.default is param.empty:\n raise ModelDefinitionError(\n 'The bounding_box method for {0} is not correctly '\n 'defined: If defined as a method all arguments to that '\n 'method (besides self) must be keyword arguments with '\n 'default values that can be used to compute a default '\n 'bounding box.'.format(cls.name))\n\n kwargs.append((param.name, param.default))\n\n __call__ = make_function_with_signature(__call__, ('self',), kwargs)\n\n return type(str('_{0}BoundingBox'.format(cls.name)), (_BoundingBox,),\n {'__call__': __call__})\n\n def _handle_special_methods(cls, members):\n\n # Handle init creation from inputs\n def update_wrapper(wrapper, cls):\n # Set up the new __call__'s metadata attributes as though it were\n # manually defined in the class definition\n # A bit like functools.update_wrapper but uses the class instead of\n # the wrapped function\n wrapper.__module__ = cls.__module__\n wrapper.__doc__ = getattr(cls, wrapper.__name__).__doc__\n if hasattr(cls, '__qualname__'):\n wrapper.__qualname__ = '{0}.{1}'.format(\n cls.__qualname__, wrapper.__name__)\n\n if ('__call__' not in members and 'inputs' in members and\n isinstance(members['inputs'], tuple)):\n\n # Don't create a custom __call__ for classes that already have one\n # explicitly defined (this includes the Model base class, and any\n # other classes that manually override __call__\n\n def __call__(self, *inputs, **kwargs):\n \"\"\"Evaluate this model on the supplied inputs.\"\"\"\n return super(cls, self).__call__(*inputs, **kwargs)\n\n # When called, models can take two optional keyword arguments:\n #\n # * model_set_axis, which indicates (for multi-dimensional input)\n # which axis is used to indicate different models\n #\n # * equivalencies, a dictionary of equivalencies to be applied to\n # the input values, where each key should correspond to one of\n # the inputs.\n #\n # The following code creates the __call__ function with these\n # two keyword arguments.\n inputs = members['inputs']\n args = ('self',) + inputs\n new_call = make_function_with_signature(\n __call__, args, [('model_set_axis', None),\n ('with_bounding_box', False),\n ('fill_value', np.nan),\n ('equivalencies', None)])\n\n # The following makes it look like __call__ was defined in the class\n update_wrapper(new_call, cls)\n\n cls.__call__ = new_call\n\n if ('__init__' not in members and not inspect.isabstract(cls) and\n cls._parameters_):\n\n # If *all* the parameters have default values we can make them\n # keyword arguments; otherwise they must all be positional arguments\n if all(p.default is not None for p in six.itervalues(cls._parameters_)):\n args = ('self',)\n kwargs = []\n for param_name in cls.param_names:\n default = cls._parameters_[param_name].default\n unit = cls._parameters_[param_name].unit\n # If the unit was specified in the parameter but the default\n # is not a Quantity, attach the unit to the default.\n if unit is not None:\n default = Quantity(default, unit, copy=False)\n kwargs.append((param_name, default))\n else:\n args = ('self',) + cls.param_names\n kwargs = {}\n\n def __init__(self, *params, **kwargs):\n return super(cls, self).__init__(*params, **kwargs)\n\n new_init = make_function_with_signature(\n __init__, args, kwargs, varkwargs='kwargs')\n update_wrapper(new_init, cls)\n cls.__init__ = new_init\n\n # *** Arithmetic operators for creating compound models ***\n __add__ = _model_oper('+')\n __sub__ = _model_oper('-')\n __mul__ = _model_oper('*')\n __truediv__ = _model_oper('/')\n __pow__ = _model_oper('**')\n __or__ = _model_oper('|')\n __and__ = _model_oper('&')\n\n if six.PY2:\n # The classic __div__ operator need only be implemented for Python 2\n # without from __future__ import division\n __div__ = _model_oper('/')\n\n # *** Other utilities ***\n\n def _format_cls_repr(cls, keywords=[]):\n \"\"\"\n Internal implementation of ``__repr__``.\n\n This is separated out for ease of use by subclasses that wish to\n override the default ``__repr__`` while keeping the same basic\n formatting.\n \"\"\"\n\n # For the sake of familiarity start the output with the standard class\n # __repr__\n parts = [super(_ModelMeta, cls).__repr__()]\n\n if not cls._is_concrete:\n return parts[0]\n\n def format_inheritance(cls):\n bases = []\n for base in cls.mro()[1:]:\n if not issubclass(base, Model):\n continue\n elif (inspect.isabstract(base) or\n base.__name__.startswith('_')):\n break\n bases.append(base.name)\n if bases:\n return '{0} ({1})'.format(cls.name, ' -> '.join(bases))\n else:\n return cls.name\n\n try:\n default_keywords = [\n ('Name', format_inheritance(cls)),\n ('Inputs', cls.inputs),\n ('Outputs', cls.outputs),\n ]\n\n if cls.param_names:\n default_keywords.append(('Fittable parameters',\n cls.param_names))\n\n for keyword, value in default_keywords + keywords:\n if value is not None:\n parts.append('{0}: {1}'.format(keyword, value))\n\n return '\\n'.join(parts)\n except Exception:\n # If any of the above formatting fails fall back on the basic repr\n # (this is particularly useful in debugging)\n return parts[0]\n\n\n@six.add_metaclass(_ModelMeta)\nclass Model(object):\n \"\"\"\n Base class for all models.\n\n This is an abstract class and should not be instantiated directly.\n\n This class sets the constraints and other properties for all individual\n parameters and performs parameter validation.\n\n The following initialization arguments apply to the majority of Model\n subclasses by default (exceptions include specialized utility models\n like `~astropy.modeling.mappings.Mapping`). Parametric models take all\n their parameters as arguments, followed by any of the following optional\n keyword arguments:\n\n Parameters\n ----------\n name : str, optional\n A human-friendly name associated with this model instance\n (particularly useful for identifying the individual components of a\n compound model).\n\n meta : dict, optional\n An optional dict of user-defined metadata to attach to this model.\n How this is used and interpreted is up to the user or individual use\n case.\n\n n_models : int, optional\n If given an integer greater than 1, a *model set* is instantiated\n instead of a single model. This affects how the parameter arguments\n are interpreted. In this case each parameter must be given as a list\n or array--elements of this array are taken along the first axis (or\n ``model_set_axis`` if specified), such that the Nth element is the\n value of that parameter for the Nth model in the set.\n\n See the section on model sets in the documentation for more details.\n\n model_set_axis : int, optional\n This argument only applies when creating a model set (i.e. ``n_models >\n 1``). It changes how parameter values are interpreted. Normally the\n first axis of each input parameter array (properly the 0th axis) is\n taken as the axis corresponding to the model sets. However, any axis\n of an input array may be taken as this \"model set axis\". This accepts\n negative integers as well--for example use ``model_set_axis=-1`` if the\n last (most rapidly changing) axis should be associated with the model\n sets. Also, ``model_set_axis=False`` can be used to tell that a given\n input should be used to evaluate all the models in the model set.\n\n fixed : dict, optional\n Dictionary ``{parameter_name: bool}`` setting the fixed constraint\n for one or more parameters. `True` means the parameter is held fixed\n during fitting and is prevented from updates once an instance of the\n model has been created.\n\n Alternatively the `~astropy.modeling.Parameter.fixed` property of a\n parameter may be used to lock or unlock individual parameters.\n\n tied : dict, optional\n Dictionary ``{parameter_name: callable}`` of parameters which are\n linked to some other parameter. The dictionary values are callables\n providing the linking relationship.\n\n Alternatively the `~astropy.modeling.Parameter.tied` property of a\n parameter may be used to set the ``tied`` constraint on individual\n parameters.\n\n bounds : dict, optional\n A dictionary ``{parameter_name: value}`` of lower and upper bounds of\n parameters. Keys are parameter names. Values are a list or a tuple\n of length 2 giving the desired range for the parameter.\n\n Alternatively the `~astropy.modeling.Parameter.min` and\n `~astropy.modeling.Parameter.max` or\n ~astropy.modeling.Parameter.bounds` properties of a parameter may be\n used to set bounds on individual parameters.\n\n eqcons : list, optional\n List of functions of length n such that ``eqcons[j](x0, *args) == 0.0``\n in a successfully optimized problem.\n\n ineqcons : list, optional\n List of functions of length n such that ``ieqcons[j](x0, *args) >=\n 0.0`` is a successfully optimized problem.\n\n Examples\n --------\n >>> from astropy.modeling import models\n >>> def tie_center(model):\n ... mean = 50 * model.stddev\n ... return mean\n >>> tied_parameters = {'mean': tie_center}\n\n Specify that ``'mean'`` is a tied parameter in one of two ways:\n\n >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,\n ... tied=tied_parameters)\n\n or\n\n >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)\n >>> g1.mean.tied\n False\n >>> g1.mean.tied = tie_center\n >>> g1.mean.tied\n <function tie_center at 0x...>\n\n Fixed parameters:\n\n >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,\n ... fixed={'stddev': True})\n >>> g1.stddev.fixed\n True\n\n or\n\n >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)\n >>> g1.stddev.fixed\n False\n >>> g1.stddev.fixed = True\n >>> g1.stddev.fixed\n True\n \"\"\"\n\n parameter_constraints = Parameter.constraints\n \"\"\"\n Primarily for informational purposes, these are the types of constraints\n that can be set on a model's parameters.\n \"\"\"\n model_constraints = ('eqcons', 'ineqcons')\n \"\"\"\n Primarily for informational purposes, these are the types of constraints\n that constrain model evaluation.\n \"\"\"\n\n param_names = ()\n \"\"\"\n Names of the parameters that describe models of this type.\n\n The parameters in this tuple are in the same order they should be passed in\n when initializing a model of a specific type. Some types of models, such\n as polynomial models, have a different number of parameters depending on\n some other property of the model, such as the degree.\n\n When defining a custom model class the value of this attribute is\n automatically set by the `~astropy.modeling.Parameter` attributes defined\n in the class body.\n \"\"\"\n\n inputs = ()\n \"\"\"The name(s) of the input variable(s) on which a model is evaluated.\"\"\"\n outputs = ()\n \"\"\"The name(s) of the output(s) of the model.\"\"\"\n\n standard_broadcasting = True\n fittable = False\n linear = True\n\n meta = metadata.MetaData()\n \"\"\"A dict-like object to store optional information.\"\"\"\n\n # By default models either use their own inverse property or have no\n # inverse at all, but users may also assign a custom inverse to a model,\n # optionally; in that case it is of course up to the user to determine\n # whether their inverse is *actually* an inverse to the model they assign\n # it to.\n _inverse = None\n _user_inverse = None\n\n _bounding_box = None\n _user_bounding_box = None\n\n # Default n_models attribute, so that __len__ is still defined even when a\n # model hasn't completed initialization yet\n _n_models = 1\n\n # Enforce strict units on inputs to evaluate. If this is set to True, input\n # values to evaluate have to be in the exact right units specified by\n # input_units. In this case, if the input quantities are convertible to\n # input_units, they are converted.\n input_units_strict = False\n\n # Allow dimensionless input (and corresponding output). If this is True,\n # input values to evaluate will gain the units specified in input_units.\n # Only has an effect if input_units is defined.\n input_units_allow_dimensionless = False\n\n # Default equivalencies to apply to input values. If set, this should be a\n # dictionary where each key is a string that corresponds to one of the model\n # inputs. Only has an effect if input_units is defined.\n input_units_equivalencies = None\n\n def __init__(self, *args, **kwargs):\n super(Model, self).__init__()\n meta = kwargs.pop('meta', None)\n if meta is not None:\n self.meta = meta\n\n self._name = kwargs.pop('name', None)\n\n self._initialize_constraints(kwargs)\n # Remaining keyword args are either parameter values or invalid\n # Parameter values must be passed in as keyword arguments in order to\n # distinguish them\n self._initialize_parameters(args, kwargs)\n\n def __repr__(self):\n return self._format_repr()\n\n def __str__(self):\n return self._format_str()\n\n def __len__(self):\n return self._n_models\n\n def __call__(self, *inputs, **kwargs):\n \"\"\"\n Evaluate this model using the given input(s) and the parameter values\n that were specified when the model was instantiated.\n \"\"\"\n\n inputs, format_info = self.prepare_inputs(*inputs, **kwargs)\n\n # Check whether any of the inputs are quantities\n inputs_are_quantity = any([isinstance(i, Quantity) for i in inputs])\n\n parameters = self._param_sets(raw=True, units=True)\n with_bbox = kwargs.pop('with_bounding_box', False)\n fill_value = kwargs.pop('fill_value', np.nan)\n bbox = None\n if with_bbox:\n try:\n bbox = self.bounding_box\n except NotImplementedError:\n bbox = None\n if self.n_inputs > 1 and bbox is not None:\n # bounding_box is in python order - convert it to the order of the inputs\n bbox = bbox[::-1]\n if bbox is None:\n outputs = self.evaluate(*chain(inputs, parameters))\n else:\n if self.n_inputs == 1:\n bbox = [bbox]\n # indices where input is outside the bbox\n # have a value of 1 in ``nan_ind``\n nan_ind = np.zeros(inputs[0].shape, dtype=np.bool)\n for ind, inp in enumerate(inputs):\n # Pass an ``out`` array so that ``axis_ind`` is array for scalars as well.\n axis_ind = np.zeros(inp.shape, dtype=np.bool)\n axis_ind = np.logical_or(inp < bbox[ind][0], inp > bbox[ind][1], out=axis_ind)\n nan_ind[axis_ind] = 1\n # get an array with indices of valid inputs\n valid_ind = np.logical_not(nan_ind).nonzero()\n # inputs holds only inputs within the bbox\n args = []\n for input in inputs:\n if not input.shape:\n # shape is ()\n if nan_ind:\n outputs = [fill_value for a in args]\n else:\n args.append(input)\n else:\n args.append(input[valid_ind])\n valid_result = self.evaluate(*chain(args, parameters))\n if self.n_outputs == 1:\n valid_result = [valid_result]\n # combine the valid results with the ``fill_value`` values\n # outside the bbox\n result = [np.zeros(inputs[0].shape) + fill_value for i in range(len(valid_result))]\n for ind, r in enumerate(valid_result):\n if not result[ind].shape:\n # shape is ()\n result[ind] = r\n else:\n result[ind][valid_ind] = r\n # format output\n if self.n_outputs == 1:\n outputs = np.asarray(result[0])\n else:\n outputs = [np.asarray(r) for r in result]\n else:\n outputs = self.evaluate(*chain(inputs, parameters))\n if self.n_outputs == 1:\n outputs = (outputs,)\n\n outputs = self.prepare_outputs(format_info, *outputs, **kwargs)\n\n # If input values were quantities, we use return_units to cast\n # the return values to the units specified by return_units.\n if self.return_units and inputs_are_quantity:\n # We allow a non-iterable unit only if there is one output\n if self.n_outputs == 1 and not isiterable(self.return_units):\n return_units = {self.outputs[0]: self.return_units}\n else:\n return_units = self.return_units\n\n outputs = tuple([Quantity(out, return_units[out_name], subok=True)\n for out, out_name in zip(outputs, self.outputs)])\n\n if self.n_outputs == 1:\n return outputs[0]\n else:\n return outputs\n\n # *** Arithmetic operators for creating compound models ***\n __add__ = _model_oper('+')\n __sub__ = _model_oper('-')\n __mul__ = _model_oper('*')\n __truediv__ = _model_oper('/')\n __pow__ = _model_oper('**')\n __or__ = _model_oper('|')\n __and__ = _model_oper('&')\n\n if six.PY2:\n __div__ = _model_oper('/')\n\n # *** Properties ***\n @property\n def name(self):\n \"\"\"User-provided name for this model instance.\"\"\"\n\n return self._name\n\n @name.setter\n def name(self, val):\n \"\"\"Assign a (new) name to this model.\"\"\"\n\n self._name = val\n\n @property\n def n_inputs(self):\n \"\"\"\n The number of inputs to this model.\n\n Equivalent to ``len(model.inputs)``.\n \"\"\"\n\n return len(self.inputs)\n\n @property\n def n_outputs(self):\n \"\"\"\n The number of outputs from this model.\n\n Equivalent to ``len(model.outputs)``.\n \"\"\"\n return len(self.outputs)\n\n @property\n def model_set_axis(self):\n \"\"\"\n The index of the model set axis--that is the axis of a parameter array\n that pertains to which model a parameter value pertains to--as\n specified when the model was initialized.\n\n See the documentation on `Model Sets\n <http://docs.astropy.org/en/stable/modeling/models.html#model-sets>`_\n for more details.\n \"\"\"\n\n return self._model_set_axis\n\n @property\n def param_sets(self):\n \"\"\"\n Return parameters as a pset.\n\n This is a list with one item per parameter set, which is an array of\n that parameter's values across all parameter sets, with the last axis\n associated with the parameter set.\n \"\"\"\n\n return self._param_sets()\n\n @property\n def parameters(self):\n \"\"\"\n A flattened array of all parameter values in all parameter sets.\n\n Fittable parameters maintain this list and fitters modify it.\n \"\"\"\n\n # Currently the sequence of a model's parameters must be contiguous\n # within the _parameters array (which may be a view of a larger array,\n # for example when taking a sub-expression of a compound model), so\n # the assumption here is reliable:\n if not self.param_names:\n # Trivial, but not unheard of\n return self._parameters\n\n start = self._param_metrics[self.param_names[0]]['slice'].start\n stop = self._param_metrics[self.param_names[-1]]['slice'].stop\n\n return self._parameters[start:stop]\n\n @parameters.setter\n def parameters(self, value):\n \"\"\"\n Assigning to this attribute updates the parameters array rather than\n replacing it.\n \"\"\"\n\n if not self.param_names:\n return\n\n start = self._param_metrics[self.param_names[0]]['slice'].start\n stop = self._param_metrics[self.param_names[-1]]['slice'].stop\n\n try:\n value = np.array(value).flatten()\n self._parameters[start:stop] = value\n except ValueError as e:\n raise InputParameterError(\n \"Input parameter values not compatible with the model \"\n \"parameters array: {0}\".format(e))\n\n @property\n def fixed(self):\n \"\"\"\n A `dict` mapping parameter names to their fixed constraint.\n \"\"\"\n\n return self._constraints['fixed']\n\n @property\n def tied(self):\n \"\"\"\n A `dict` mapping parameter names to their tied constraint.\n \"\"\"\n\n return self._constraints['tied']\n\n @property\n def bounds(self):\n \"\"\"\n A `dict` mapping parameter names to their upper and lower bounds as\n ``(min, max)`` tuples or ``[min, max]`` lists.\n \"\"\"\n\n return self._constraints['bounds']\n\n @property\n def eqcons(self):\n \"\"\"List of parameter equality constraints.\"\"\"\n\n return self._constraints['eqcons']\n\n @property\n def ineqcons(self):\n \"\"\"List of parameter inequality constraints.\"\"\"\n\n return self._constraints['ineqcons']\n\n @property\n def inverse(self):\n \"\"\"\n Returns a new `~astropy.modeling.Model` instance which performs the\n inverse transform, if an analytic inverse is defined for this model.\n\n Even on models that don't have an inverse defined, this property can be\n set with a manually-defined inverse, such a pre-computed or\n experimentally determined inverse (often given as a\n `~astropy.modeling.polynomial.PolynomialModel`, but not by\n requirement).\n\n A custom inverse can be deleted with ``del model.inverse``. In this\n case the model's inverse is reset to its default, if a default exists\n (otherwise the default is to raise `NotImplementedError`).\n\n Note to authors of `~astropy.modeling.Model` subclasses: To define an\n inverse for a model simply override this property to return the\n appropriate model representing the inverse. The machinery that will\n make the inverse manually-overridable is added automatically by the\n base class.\n \"\"\"\n\n if self._user_inverse is not None:\n return self._user_inverse\n elif self._inverse is not None:\n return self._inverse()\n\n raise NotImplementedError(\"An analytical inverse transform has not \"\n \"been implemented for this model.\")\n\n @inverse.setter\n def inverse(self, value):\n if not isinstance(value, (Model, type(None))):\n raise ValueError(\n \"The ``inverse`` attribute may be assigned a `Model` \"\n \"instance or `None` (where `None` explicitly forces the \"\n \"model to have no inverse.\")\n\n self._user_inverse = value\n\n @inverse.deleter\n def inverse(self):\n \"\"\"\n Resets the model's inverse to its default (if one exists, otherwise\n the model will have no inverse).\n \"\"\"\n\n del self._user_inverse\n\n @property\n def has_user_inverse(self):\n \"\"\"\n A flag indicating whether or not a custom inverse model has been\n assigned to this model by a user, via assignment to ``model.inverse``.\n \"\"\"\n\n return self._user_inverse is not None\n\n @property\n def bounding_box(self):\n r\"\"\"\n A `tuple` of length `n_inputs` defining the bounding box limits, or\n `None` for no bounding box.\n\n The default limits are given by a ``bounding_box`` property or method\n defined in the class body of a specific model. If not defined then\n this property just raises `NotImplementedError` by default (but may be\n assigned a custom value by a user). ``bounding_box`` can be set\n manually to an array-like object of shape ``(model.n_inputs, 2)``. For\n further usage, see :ref:`bounding-boxes`\n\n The limits are ordered according to the `numpy` indexing\n convention, and are the reverse of the model input order,\n e.g. for inputs ``('x', 'y', 'z')``, ``bounding_box`` is defined:\n\n * for 1D: ``(x_low, x_high)``\n * for 2D: ``((y_low, y_high), (x_low, x_high))``\n * for 3D: ``((z_low, z_high), (y_low, y_high), (x_low, x_high))``\n\n Examples\n --------\n\n Setting the ``bounding_box`` limits for a 1D and 2D model:\n\n >>> from astropy.modeling.models import Gaussian1D, Gaussian2D\n >>> model_1d = Gaussian1D()\n >>> model_2d = Gaussian2D(x_stddev=1, y_stddev=1)\n >>> model_1d.bounding_box = (-5, 5)\n >>> model_2d.bounding_box = ((-6, 6), (-5, 5))\n\n Setting the bounding_box limits for a user-defined 3D `custom_model`:\n\n >>> from astropy.modeling.models import custom_model\n >>> def const3d(x, y, z, amp=1):\n ... return amp\n ...\n >>> Const3D = custom_model(const3d)\n >>> model_3d = Const3D()\n >>> model_3d.bounding_box = ((-6, 6), (-5, 5), (-4, 4))\n\n To reset ``bounding_box`` to its default limits just delete the\n user-defined value--this will reset it back to the default defined\n on the class:\n\n >>> del model_1d.bounding_box\n\n To disable the bounding box entirely (including the default),\n set ``bounding_box`` to `None`:\n\n >>> model_1d.bounding_box = None\n >>> model_1d.bounding_box # doctest: +IGNORE_EXCEPTION_DETAIL\n Traceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\n File \"astropy\\modeling\\core.py\", line 980, in bounding_box\n \"No bounding box is defined for this model (note: the \"\n NotImplementedError: No bounding box is defined for this model (note:\n the bounding box was explicitly disabled for this model; use `del\n model.bounding_box` to restore the default bounding box, if one is\n defined for this model).\n \"\"\"\n\n if self._user_bounding_box is not None:\n if self._user_bounding_box is NotImplemented:\n raise NotImplementedError(\n \"No bounding box is defined for this model (note: the \"\n \"bounding box was explicitly disabled for this model; \"\n \"use `del model.bounding_box` to restore the default \"\n \"bounding box, if one is defined for this model).\")\n return self._user_bounding_box\n elif self._bounding_box is None:\n raise NotImplementedError(\n \"No bounding box is defined for this model.\")\n elif isinstance(self._bounding_box, _BoundingBox):\n # This typically implies a hard-coded bounding box. This will\n # probably be rare, but it is an option\n return self._bounding_box\n elif isinstance(self._bounding_box, types.MethodType):\n return self._bounding_box()\n else:\n # The only other allowed possibility is that it's a _BoundingBox\n # subclass, so we call it with its default arguments and return an\n # instance of it (that can be called to recompute the bounding box\n # with any optional parameters)\n # (In other words, in this case self._bounding_box is a *class*)\n bounding_box = self._bounding_box((), _model=self)()\n return self._bounding_box(bounding_box, _model=self)\n\n @bounding_box.setter\n def bounding_box(self, bounding_box):\n \"\"\"\n Assigns the bounding box limits.\n \"\"\"\n\n if bounding_box is None:\n cls = None\n # We use this to explicitly set an unimplemented bounding box (as\n # opposed to no user bounding box defined)\n bounding_box = NotImplemented\n elif (isinstance(self._bounding_box, type) and\n issubclass(self._bounding_box, _BoundingBox)):\n cls = self._bounding_box\n else:\n cls = _BoundingBox\n\n if cls is not None:\n try:\n bounding_box = cls.validate(self, bounding_box)\n except ValueError as exc:\n raise ValueError(exc.args[0])\n\n self._user_bounding_box = bounding_box\n\n @bounding_box.deleter\n def bounding_box(self):\n self._user_bounding_box = None\n\n @property\n def has_user_bounding_box(self):\n \"\"\"\n A flag indicating whether or not a custom bounding_box has been\n assigned to this model by a user, via assignment to\n ``model.bounding_box``.\n \"\"\"\n\n return self._user_bounding_box is not None\n\n # *** Public methods ***\n\n def without_units_for_data(self, **kwargs):\n \"\"\"\n Return an instance of the model for which the parameter values have been\n converted to the right units for the data, then the units have been\n stripped away.\n\n The input and output Quantity objects should be given as keyword\n arguments.\n\n Notes\n -----\n\n This method is needed in order to be able to fit models with units in\n the parameters, since we need to temporarily strip away the units from\n the model during the fitting (which might be done by e.g. scipy\n functions).\n\n The units that the parameters should be converted to are not necessarily\n the units of the input data, but are derived from them. Model subclasses\n that want fitting to work in the presence of quantities need to define a\n _parameter_units_for_data_units method that takes the input and output\n units (as two dictionaries) and returns a dictionary giving the target\n units for each parameter.\n \"\"\"\n\n model = self.copy()\n\n inputs_unit = {inp: getattr(kwargs[inp], 'unit', dimensionless_unscaled)\n for inp in self.inputs if kwargs[inp] is not None}\n\n outputs_unit = {out: getattr(kwargs[out], 'unit', dimensionless_unscaled)\n for out in self.outputs if kwargs[out] is not None}\n\n parameter_units = self._parameter_units_for_data_units(inputs_unit, outputs_unit)\n\n for name, unit in parameter_units.items():\n parameter = getattr(model, name)\n if parameter.unit is not None:\n parameter.value = parameter.quantity.to(unit).value\n parameter._set_unit(None, force=True)\n\n return model\n\n def with_units_from_data(self, **kwargs):\n \"\"\"\n Return an instance of the model which has units for which the parameter\n values are compatible with the data units specified.\n\n The input and output Quantity objects should be given as keyword\n arguments.\n\n Notes\n -----\n\n This method is needed in order to be able to fit models with units in\n the parameters, since we need to temporarily strip away the units from\n the model during the fitting (which might be done by e.g. scipy\n functions).\n\n The units that the parameters will gain are not necessarily the units of\n the input data, but are derived from them. Model subclasses that want\n fitting to work in the presence of quantities need to define a\n _parameter_units_for_data_units method that takes the input and output\n units (as two dictionaries) and returns a dictionary giving the target\n units for each parameter.\n \"\"\"\n\n model = self.copy()\n\n inputs_unit = {inp: getattr(kwargs[inp], 'unit', dimensionless_unscaled)\n for inp in self.inputs if kwargs[inp] is not None}\n\n outputs_unit = {out: getattr(kwargs[out], 'unit', dimensionless_unscaled)\n for out in self.outputs if kwargs[out] is not None}\n\n parameter_units = self._parameter_units_for_data_units(inputs_unit, outputs_unit)\n\n # We are adding units to parameters that already have a value, but we\n # don't want to convert the parameter, just add the unit directly, hence\n # the call to _set_unit.\n for name, unit in parameter_units.items():\n parameter = getattr(model, name)\n parameter._set_unit(unit, force=True)\n\n return model\n\n @property\n def _has_units(self):\n # Returns True if any of the parameters have units\n for param in self.param_names:\n if getattr(self, param).unit is not None:\n return True\n else:\n return False\n\n @property\n def _supports_unit_fitting(self):\n # If the model has a '_parameter_units_for_data_units' method, this\n # indicates that we have enough information to strip the units away\n # and add them back after fitting, when fitting quantities\n return hasattr(self, '_parameter_units_for_data_units')\n\n @abc.abstractmethod\n def evaluate(self, *args, **kwargs):\n \"\"\"Evaluate the model on some input variables.\"\"\"\n\n def sum_of_implicit_terms(self, *args, **kwargs):\n \"\"\"\n Evaluate the sum of any implicit model terms on some input variables.\n This includes any fixed terms used in evaluating a linear model that\n do not have corresponding parameters exposed to the user. The\n prototypical case is `astropy.modeling.functional_models.Shift`, which\n corresponds to a function y = a + bx, where b=1 is intrinsically fixed\n by the type of model, such that sum_of_implicit_terms(x) == x. This\n method is needed by linear fitters to correct the dependent variable\n for the implicit term(s) when solving for the remaining terms\n (ie. a = y - bx).\n \"\"\"\n\n def render(self, out=None, coords=None):\n \"\"\"\n Evaluate a model at fixed positions, respecting the ``bounding_box``.\n\n The key difference relative to evaluating the model directly is that\n this method is limited to a bounding box if the `Model.bounding_box`\n attribute is set.\n\n Parameters\n ----------\n out : `numpy.ndarray`, optional\n An array that the evaluated model will be added to. If this is not\n given (or given as ``None``), a new array will be created.\n coords : array-like, optional\n An array to be used to translate from the model's input coordinates\n to the ``out`` array. It should have the property that\n ``self(coords)`` yields the same shape as ``out``. If ``out`` is\n not specified, ``coords`` will be used to determine the shape of the\n returned array. If this is not provided (or None), the model will be\n evaluated on a grid determined by `Model.bounding_box`.\n\n Returns\n -------\n out : `numpy.ndarray`\n The model added to ``out`` if ``out`` is not ``None``, or else a\n new array from evaluating the model over ``coords``.\n If ``out`` and ``coords`` are both `None`, the returned array is\n limited to the `Model.bounding_box` limits. If\n `Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed.\n\n Raises\n ------\n ValueError\n If ``coords`` are not given and the the `Model.bounding_box` of this\n model is not set.\n\n Examples\n --------\n :ref:`bounding-boxes`\n \"\"\"\n\n try:\n bbox = self.bounding_box\n except NotImplementedError:\n bbox = None\n\n ndim = self.n_inputs\n\n if (coords is None) and (out is None) and (bbox is None):\n raise ValueError('If no bounding_box is set, '\n 'coords or out must be input.')\n\n # for consistent indexing\n if ndim == 1:\n if coords is not None:\n coords = [coords]\n if bbox is not None:\n bbox = [bbox]\n\n if coords is not None:\n coords = np.asanyarray(coords, dtype=float)\n # Check dimensions match out and model\n assert len(coords) == ndim\n if out is not None:\n if coords[0].shape != out.shape:\n raise ValueError('inconsistent shape of the output.')\n else:\n out = np.zeros(coords[0].shape)\n\n if out is not None:\n out = np.asanyarray(out, dtype=float)\n if out.ndim != ndim:\n raise ValueError('the array and model must have the same '\n 'number of dimensions.')\n\n if bbox is not None:\n # assures position is at center pixel, important when using add_array\n pd = np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2))\n for bb in bbox]).astype(int).T\n pos, delta = pd\n\n if coords is not None:\n sub_shape = tuple(delta * 2 + 1)\n sub_coords = np.array([extract_array(c, sub_shape, pos)\n for c in coords])\n else:\n limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]\n sub_coords = np.mgrid[limits]\n\n sub_coords = sub_coords[::-1]\n\n if out is None:\n out = self(*sub_coords)\n else:\n try:\n out = add_array(out, self(*sub_coords), pos)\n except ValueError:\n raise ValueError(\n 'The `bounding_box` is larger than the input out in '\n 'one or more dimensions. Set '\n '`model.bounding_box = None`.')\n else:\n if coords is None:\n im_shape = out.shape\n limits = [slice(i) for i in im_shape]\n coords = np.mgrid[limits]\n\n coords = coords[::-1]\n\n out += self(*coords)\n\n return out\n\n @property\n def input_units(self):\n \"\"\"\n This property is used to indicate what units or sets of units the\n evaluate method expects, and returns a dictionary mapping inputs to\n units (or `None` if any units are accepted).\n\n Model sub-classes can also use function annotations in evaluate to\n indicate valid input units, in which case this property should\n not be overriden since it will return the input units based on the\n annotations.\n \"\"\"\n if hasattr(self, '_input_units'):\n return self._input_units\n elif hasattr(self.evaluate, '__annotations__'):\n annotations = self.evaluate.__annotations__.copy()\n annotations.pop('return', None)\n if annotations:\n # If there are not annotations for all inputs this will error.\n return dict((name, annotations[name]) for name in self.inputs)\n else:\n # None means any unit is accepted\n return None\n\n @input_units.setter\n def input_units(self, input_units):\n self._input_units = input_units\n\n @property\n def return_units(self):\n \"\"\"\n This property is used to indicate what units or sets of units the output\n of evaluate should be in, and returns a dictionary mapping outputs to\n units (or `None` if any units are accepted).\n\n Model sub-classes can also use function annotations in evaluate to\n indicate valid output units, in which case this property should not be\n overriden since it will return the return units based on the\n annotations.\n \"\"\"\n if hasattr(self, '_return_units'):\n return self._return_units\n elif hasattr(self.evaluate, '__annotations__'):\n return self.evaluate.__annotations__.get('return', None)\n else:\n # None means any unit is accepted\n return None\n\n @return_units.setter\n def return_units(self, return_units):\n self._return_units = return_units\n\n def prepare_inputs(self, *inputs, **kwargs):\n \"\"\"\n This method is used in `~astropy.modeling.Model.__call__` to ensure\n that all the inputs to the model can be broadcast into compatible\n shapes (if one or both of them are input as arrays), particularly if\n there are more than one parameter sets. This also makes sure that (if\n applicable) the units of the input will be compatible with the evaluate\n method.\n \"\"\"\n\n # When we instantiate the model class, we make sure that __call__ can\n # take the following two keyword arguments.\n model_set_axis = kwargs.pop('model_set_axis', None)\n equivalencies = kwargs.pop('equivalencies', None)\n\n if model_set_axis is None:\n # By default the model_set_axis for the input is assumed to be the\n # same as that for the parameters the model was defined with\n # TODO: Ensure that negative model_set_axis arguments are respected\n model_set_axis = self.model_set_axis\n\n n_models = len(self)\n\n params = [getattr(self, name) for name in self.param_names]\n inputs = [np.asanyarray(_input, dtype=float) for _input in inputs]\n\n _validate_input_shapes(inputs, self.inputs, n_models,\n model_set_axis, self.standard_broadcasting)\n\n # Check that the units are correct, if applicable\n\n if self.input_units is not None:\n\n # We combine any instance-level input equivalencies with user\n # specified ones at call-time.\n input_units_equivalencies = _combine_equivalency_dict(self.inputs,\n equivalencies,\n self.input_units_equivalencies)\n\n # We now iterate over the different inputs and make sure that their\n # units are consistent with those specified in input_units.\n for i in range(len(inputs)):\n\n input_name = self.inputs[i]\n input_unit = self.input_units.get(input_name, None)\n\n if input_unit is None:\n continue\n\n if isinstance(inputs[i], Quantity):\n\n # We check for consistency of the units with input_units,\n # taking into account any equivalencies\n\n if inputs[i].unit.is_equivalent(input_unit, equivalencies=input_units_equivalencies[input_name]):\n\n # If equivalencies have been specified, we need to\n # convert the input to the input units - this is because\n # some equivalencies are non-linear, and we need to be\n # sure that we evaluate the model in its own frame\n # of reference. If input_units_strict is set, we also\n # need to convert to the input units.\n if len(input_units_equivalencies) > 0 or self.input_units_strict:\n inputs[i] = inputs[i].to(input_unit, equivalencies=input_units_equivalencies[input_name])\n\n else:\n\n # We consider the following two cases separately so as\n # to be able to raise more appropriate/nicer exceptions\n\n if input_unit is dimensionless_unscaled:\n raise UnitsError(\"Units of input '{0}', {1} ({2}), could not be \"\n \"converted to required dimensionless \"\n \"input\".format(self.inputs[i],\n inputs[i].unit,\n inputs[i].unit.physical_type))\n else:\n raise UnitsError(\"Units of input '{0}', {1} ({2}), could not be \"\n \"converted to required input units of \"\n \"{3} ({4})\".format(self.inputs[i],\n inputs[i].unit,\n inputs[i].unit.physical_type,\n input_unit,\n input_unit.physical_type))\n else:\n\n # If we allow dimensionless input, we add the units to the\n # input values without conversion, otherwise we raise an\n # exception.\n\n if (not self.input_units_allow_dimensionless and\n input_unit is not dimensionless_unscaled and input_unit is not None):\n if np.any(inputs[i] != 0):\n raise UnitsError(\"Units of input '{0}', (dimensionless), could not be \"\n \"converted to required input units of \"\n \"{1} ({2})\".format(self.inputs[i], input_unit,\n input_unit.physical_type))\n\n # The input formatting required for single models versus a multiple\n # model set are different enough that they've been split into separate\n # subroutines\n if n_models == 1:\n return _prepare_inputs_single_model(self, params, inputs,\n **kwargs)\n else:\n return _prepare_inputs_model_set(self, params, inputs, n_models,\n model_set_axis, **kwargs)\n\n def prepare_outputs(self, format_info, *outputs, **kwargs):\n if len(self) == 1:\n return _prepare_outputs_single_model(self, outputs, format_info)\n else:\n return _prepare_outputs_model_set(self, outputs, format_info)\n\n def copy(self):\n \"\"\"\n Return a copy of this model.\n\n Uses a deep copy so that all model attributes, including parameter\n values, are copied as well.\n \"\"\"\n\n return copy.deepcopy(self)\n\n @sharedmethod\n def rename(self, name):\n \"\"\"\n Return a copy of this model with a new name.\n \"\"\"\n new_model = self.copy()\n new_model._name = name\n return new_model\n\n @sharedmethod\n def n_submodels(self):\n \"\"\"\n Return the number of components in a single model, which is\n obviously 1.\n \"\"\"\n return 1\n\n # *** Internal methods ***\n @sharedmethod\n def _from_existing(self, existing, param_names):\n \"\"\"\n Creates a new instance of ``cls`` that shares its underlying parameter\n values with an existing model instance given by ``existing``.\n\n This is used primarily by compound models to return a view of an\n individual component of a compound model. ``param_names`` should be\n the names of the parameters in the *existing* model to use as the\n parameters in this new model. Its length should equal the number of\n parameters this model takes, so that it can map parameters on the\n existing model to parameters on this model one-to-one.\n \"\"\"\n\n # Basically this is an alternative __init__\n if isinstance(self, type):\n # self is a class, not an instance\n needs_initialization = True\n dummy_args = (0,) * len(param_names)\n self = self.__new__(self, *dummy_args)\n else:\n needs_initialization = False\n self = self.copy()\n\n aliases = dict(zip(self.param_names, param_names))\n # This is basically an alternative _initialize_constraints\n constraints = {}\n for cons_type in self.parameter_constraints:\n orig = existing._constraints[cons_type]\n constraints[cons_type] = AliasDict(orig, aliases)\n\n self._constraints = constraints\n\n self._n_models = existing._n_models\n self._model_set_axis = existing._model_set_axis\n self._parameters = existing._parameters\n\n self._param_metrics = defaultdict(dict)\n for param_a, param_b in six.iteritems(aliases):\n # Take the param metrics info for the giving parameters in the\n # existing model, and hand them to the appropriate parameters in\n # the new model\n self._param_metrics[param_a] = existing._param_metrics[param_b]\n\n if needs_initialization:\n self.__init__(*dummy_args)\n\n return self\n\n def _initialize_constraints(self, kwargs):\n \"\"\"\n Pop parameter constraint values off the keyword arguments passed to\n `Model.__init__` and store them in private instance attributes.\n \"\"\"\n\n if hasattr(self, '_constraints'):\n # Skip constraint initialization if it has already been handled via\n # an alternate initialization\n return\n\n self._constraints = {}\n # Pop any constraints off the keyword arguments\n for constraint in self.parameter_constraints:\n values = kwargs.pop(constraint, {})\n self._constraints[constraint] = values.copy()\n\n # Update with default parameter constraints\n for param_name in self.param_names:\n param = getattr(self, param_name)\n\n # Parameters don't have all constraint types\n value = getattr(param, constraint)\n if value is not None:\n self._constraints[constraint][param_name] = value\n\n for constraint in self.model_constraints:\n values = kwargs.pop(constraint, [])\n self._constraints[constraint] = values\n\n def _initialize_parameters(self, args, kwargs):\n \"\"\"\n Initialize the _parameters array that stores raw parameter values for\n all parameter sets for use with vectorized fitting algorithms; on\n FittableModels the _param_name attributes actually just reference\n slices of this array.\n \"\"\"\n\n if hasattr(self, '_parameters'):\n # Skip parameter initialization if it has already been handled via\n # an alternate initialization\n return\n\n n_models = kwargs.pop('n_models', None)\n\n if not (n_models is None or\n (isinstance(n_models, (int, np.integer)) and n_models >= 1)):\n raise ValueError(\n \"n_models must be either None (in which case it is \"\n \"determined from the model_set_axis of the parameter initial \"\n \"values) or it must be a positive integer \"\n \"(got {0!r})\".format(n_models))\n\n model_set_axis = kwargs.pop('model_set_axis', None)\n if model_set_axis is None:\n if n_models is not None and n_models > 1:\n # Default to zero\n model_set_axis = 0\n else:\n # Otherwise disable\n model_set_axis = False\n else:\n if not (model_set_axis is False or\n (isinstance(model_set_axis, int) and\n not isinstance(model_set_axis, bool))):\n raise ValueError(\n \"model_set_axis must be either False or an integer \"\n \"specifying the parameter array axis to map to each \"\n \"model in a set of models (got {0!r}).\".format(\n model_set_axis))\n\n # Process positional arguments by matching them up with the\n # corresponding parameters in self.param_names--if any also appear as\n # keyword arguments this presents a conflict\n params = {}\n if len(args) > len(self.param_names):\n raise TypeError(\n \"{0}.__init__() takes at most {1} positional arguments ({2} \"\n \"given)\".format(self.__class__.__name__, len(self.param_names),\n len(args)))\n\n self._model_set_axis = model_set_axis\n self._param_metrics = defaultdict(dict)\n\n for idx, arg in enumerate(args):\n if arg is None:\n # A value of None implies using the default value, if exists\n continue\n # We use quantity_asanyarray here instead of np.asanyarray because\n # if any of the arguments are quantities, we need to return a\n # Quantity object not a plain Numpy array.\n params[self.param_names[idx]] = quantity_asanyarray(arg, dtype=np.float)\n\n # At this point the only remaining keyword arguments should be\n # parameter names; any others are in error.\n for param_name in self.param_names:\n if param_name in kwargs:\n if param_name in params:\n raise TypeError(\n \"{0}.__init__() got multiple values for parameter \"\n \"{1!r}\".format(self.__class__.__name__, param_name))\n value = kwargs.pop(param_name)\n if value is None:\n continue\n # We use quantity_asanyarray here instead of np.asanyarray because\n # if any of the arguments are quantities, we need to return a\n # Quantity object not a plain Numpy array.\n params[param_name] = quantity_asanyarray(value, dtype=np.float)\n\n if kwargs:\n # If any keyword arguments were left over at this point they are\n # invalid--the base class should only be passed the parameter\n # values, constraints, and param_dim\n for kwarg in kwargs:\n # Just raise an error on the first unrecognized argument\n raise TypeError(\n '{0}.__init__() got an unrecognized parameter '\n '{1!r}'.format(self.__class__.__name__, kwarg))\n\n # Determine the number of model sets: If the model_set_axis is\n # None then there is just one parameter set; otherwise it is determined\n # by the size of that axis on the first parameter--if the other\n # parameters don't have the right number of axes or the sizes of their\n # model_set_axis don't match an error is raised\n if model_set_axis is not False and n_models != 1 and params:\n max_ndim = 0\n if model_set_axis < 0:\n min_ndim = abs(model_set_axis)\n else:\n min_ndim = model_set_axis + 1\n\n for name, value in six.iteritems(params):\n param_ndim = np.ndim(value)\n if param_ndim < min_ndim:\n raise InputParameterError(\n \"All parameter values must be arrays of dimension \"\n \"at least {0} for model_set_axis={1} (the value \"\n \"given for {2!r} is only {3}-dimensional)\".format(\n min_ndim, model_set_axis, name, param_ndim))\n\n max_ndim = max(max_ndim, param_ndim)\n\n if n_models is None:\n # Use the dimensions of the first parameter to determine\n # the number of model sets\n n_models = value.shape[model_set_axis]\n elif value.shape[model_set_axis] != n_models:\n raise InputParameterError(\n \"Inconsistent dimensions for parameter {0!r} for \"\n \"{1} model sets. The length of axis {2} must be the \"\n \"same for all input parameter values\".format(\n name, n_models, model_set_axis))\n\n self._check_param_broadcast(params, max_ndim)\n else:\n if n_models is None:\n n_models = 1\n\n self._check_param_broadcast(params, None)\n\n self._n_models = n_models\n self._initialize_parameter_values(params)\n\n def _initialize_parameter_values(self, params):\n # self._param_metrics should have been initialized in\n # self._initialize_parameters\n param_metrics = self._param_metrics\n total_size = 0\n\n for name in self.param_names:\n unit = None\n param_descr = getattr(self, name)\n\n if params.get(name) is None:\n default = param_descr.default\n\n if default is None:\n # No value was supplied for the parameter and the\n # parameter does not have a default, therefore the model\n # is underspecified\n raise TypeError(\n \"{0}.__init__() requires a value for parameter \"\n \"{1!r}\".format(self.__class__.__name__, name))\n\n value = params[name] = default\n unit = param_descr.unit\n else:\n value = params[name]\n if isinstance(value, Quantity):\n unit = value.unit\n else:\n unit = None\n\n param_size = np.size(value)\n param_shape = np.shape(value)\n\n param_slice = slice(total_size, total_size + param_size)\n\n param_metrics[name]['slice'] = param_slice\n param_metrics[name]['shape'] = param_shape\n\n if unit is None and param_descr.unit is not None:\n raise InputParameterError(\n \"{0}.__init__() requires a Quantity for parameter \"\n \"{1!r}\".format(self.__class__.__name__, name))\n\n param_metrics[name]['orig_unit'] = unit\n param_metrics[name]['raw_unit'] = None\n if param_descr._setter is not None:\n _val = param_descr._setter(value)\n if isinstance(_val, Quantity):\n param_metrics[name]['raw_unit'] = _val.unit\n else:\n param_metrics[name]['raw_unit'] = None\n total_size += param_size\n\n self._param_metrics = param_metrics\n self._parameters = np.empty(total_size, dtype=np.float64)\n\n # Now set the parameter values (this will also fill\n # self._parameters)\n # TODO: This is a bit ugly, but easier to deal with than how this was\n # done previously. There's still lots of opportunity for refactoring\n # though, in particular once we move the _get/set_model_value methods\n # out of Parameter and into Model (renaming them\n # _get/set_parameter_value)\n for name, value in params.items():\n # value here may be a Quantity object.\n param_descr = getattr(self, name)\n unit = param_descr.unit\n value = np.array(value)\n orig_unit = param_metrics[name]['orig_unit']\n if param_descr._setter is not None:\n if unit is not None:\n value = np.asarray(param_descr._setter(value * orig_unit).value)\n else:\n value = param_descr._setter(value)\n self._parameters[param_metrics[name]['slice']] = value.ravel()\n\n # Finally validate all the parameters; we do this last so that\n # validators that depend on one of the other parameters' values will\n # work\n for name in params:\n param_descr = getattr(self, name)\n param_descr.validator(param_descr.value)\n\n def _check_param_broadcast(self, params, max_ndim):\n \"\"\"\n This subroutine checks that all parameter arrays can be broadcast\n against each other, and determines the shapes parameters must have in\n order to broadcast correctly.\n\n If model_set_axis is None this merely checks that the parameters\n broadcast and returns an empty dict if so. This mode is only used for\n single model sets.\n \"\"\"\n\n all_shapes = []\n param_names = []\n model_set_axis = self._model_set_axis\n\n for name in self.param_names:\n # Previously this just used iteritems(params), but we loop over all\n # param_names instead just to ensure some determinism in the\n # ordering behavior\n if name not in params:\n continue\n\n value = params[name]\n param_names.append(name)\n # We've already checked that each parameter array is compatible in\n # the model_set_axis dimension, but now we need to check the\n # dimensions excluding that axis\n # Split the array dimensions into the axes before model_set_axis\n # and after model_set_axis\n param_shape = np.shape(value)\n\n param_ndim = len(param_shape)\n if max_ndim is not None and param_ndim < max_ndim:\n # All arrays have the same number of dimensions up to the\n # model_set_axis dimension, but after that they may have a\n # different number of trailing axes. The number of trailing\n # axes must be extended for mutual compatibility. For example\n # if max_ndim = 3 and model_set_axis = 0, an array with the\n # shape (2, 2) must be extended to (2, 1, 2). However, an\n # array with shape (2,) is extended to (2, 1).\n new_axes = (1,) * (max_ndim - param_ndim)\n\n if model_set_axis < 0:\n # Just need to prepend axes to make up the difference\n broadcast_shape = new_axes + param_shape\n else:\n broadcast_shape = (param_shape[:model_set_axis + 1] +\n new_axes +\n param_shape[model_set_axis + 1:])\n self._param_metrics[name]['broadcast_shape'] = broadcast_shape\n all_shapes.append(broadcast_shape)\n else:\n all_shapes.append(param_shape)\n\n # Now check mutual broadcastability of all shapes\n try:\n check_broadcast(*all_shapes)\n except IncompatibleShapeError as exc:\n shape_a, shape_a_idx, shape_b, shape_b_idx = exc.args\n param_a = param_names[shape_a_idx]\n param_b = param_names[shape_b_idx]\n\n raise InputParameterError(\n \"Parameter {0!r} of shape {1!r} cannot be broadcast with \"\n \"parameter {2!r} of shape {3!r}. All parameter arrays \"\n \"must have shapes that are mutually compatible according \"\n \"to the broadcasting rules.\".format(param_a, shape_a,\n param_b, shape_b))\n\n def _param_sets(self, raw=False, units=False):\n \"\"\"\n Implementation of the Model.param_sets property.\n\n This internal implementation has a ``raw`` argument which controls\n whether or not to return the raw parameter values (i.e. the values that\n are actually stored in the ._parameters array, as opposed to the values\n displayed to users. In most cases these are one in the same but there\n are currently a few exceptions.\n\n Note: This is notably an overcomplicated device and may be removed\n entirely in the near future.\n \"\"\"\n\n param_metrics = self._param_metrics\n values = []\n shapes = []\n for name in self.param_names:\n param = getattr(self, name)\n\n if raw:\n value = param._raw_value\n else:\n value = param.value\n\n broadcast_shape = param_metrics[name].get('broadcast_shape')\n if broadcast_shape is not None:\n value = value.reshape(broadcast_shape)\n\n shapes.append(np.shape(value))\n\n if len(self) == 1:\n # Add a single param set axis to the parameter's value (thus\n # converting scalars to shape (1,) array values) for\n # consistency\n value = np.array([value])\n\n if units:\n if raw and self._param_metrics[name]['raw_unit'] is not None:\n unit = self._param_metrics[name]['raw_unit']\n else:\n unit = param.unit\n if unit is not None:\n value = Quantity(value, unit)\n\n values.append(value)\n\n if len(set(shapes)) != 1 or units:\n # If the parameters are not all the same shape, converting to an\n # array is going to produce an object array\n # However the way Numpy creates object arrays is tricky in that it\n # will recurse into array objects in the list and break them up\n # into separate objects. Doing things this way ensures a 1-D\n # object array the elements of which are the individual parameter\n # arrays. There's not much reason to do this over returning a list\n # except for consistency\n psets = np.empty(len(values), dtype=object)\n psets[:] = values\n return psets\n\n # TODO: Returning an array from this method may be entirely pointless\n # for internal use--perhaps only the external param_sets method should\n # return an array (and just for backwards compat--I would prefer to\n # maybe deprecate that method)\n\n return np.array(values)\n\n def _format_repr(self, args=[], kwargs={}, defaults={}):\n \"\"\"\n Internal implementation of ``__repr__``.\n\n This is separated out for ease of use by subclasses that wish to\n override the default ``__repr__`` while keeping the same basic\n formatting.\n \"\"\"\n\n # TODO: I think this could be reworked to preset model sets better\n\n parts = [repr(a) for a in args]\n\n parts.extend(\n \"{0}={1}\".format(name,\n param_repr_oneline(getattr(self, name)))\n for name in self.param_names)\n\n if self.name is not None:\n parts.append('name={0!r}'.format(self.name))\n\n for kwarg, value in kwargs.items():\n if kwarg in defaults and defaults[kwarg] != value:\n continue\n parts.append('{0}={1!r}'.format(kwarg, value))\n\n if len(self) > 1:\n parts.append(\"n_models={0}\".format(len(self)))\n\n return '<{0}({1})>'.format(self.__class__.__name__, ', '.join(parts))\n\n def _format_str(self, keywords=[]):\n \"\"\"\n Internal implementation of ``__str__``.\n\n This is separated out for ease of use by subclasses that wish to\n override the default ``__str__`` while keeping the same basic\n formatting.\n \"\"\"\n\n default_keywords = [\n ('Model', self.__class__.__name__),\n ('Name', self.name),\n ('Inputs', self.inputs),\n ('Outputs', self.outputs),\n ('Model set size', len(self))\n ]\n\n parts = ['{0}: {1}'.format(keyword, value)\n for keyword, value in default_keywords + keywords\n if value is not None]\n\n parts.append('Parameters:')\n\n if len(self) == 1:\n columns = [[getattr(self, name).value]\n for name in self.param_names]\n else:\n columns = [getattr(self, name).value\n for name in self.param_names]\n\n if columns:\n param_table = Table(columns, names=self.param_names)\n # Set units on the columns\n for name in self.param_names:\n param_table[name].unit = getattr(self, name).unit\n parts.append(indent(str(param_table), width=4))\n\n return '\\n'.join(parts)\n\n\nclass FittableModel(Model):\n \"\"\"\n Base class for models that can be fitted using the built-in fitting\n algorithms.\n \"\"\"\n\n linear = False\n # derivative with respect to parameters\n fit_deriv = None\n \"\"\"\n Function (similar to the model's `~Model.evaluate`) to compute the\n derivatives of the model with respect to its parameters, for use by fitting\n algorithms. In other words, this computes the Jacobian matrix with respect\n to the model's parameters.\n \"\"\"\n # Flag that indicates if the model derivatives with respect to parameters\n # are given in columns or rows\n col_fit_deriv = True\n fittable = True\n\n\nclass Fittable1DModel(FittableModel):\n \"\"\"\n Base class for one-dimensional fittable models.\n\n This class provides an easier interface to defining new models.\n Examples can be found in `astropy.modeling.functional_models`.\n \"\"\"\n\n inputs = ('x',)\n outputs = ('y',)\n\n\nclass Fittable2DModel(FittableModel):\n \"\"\"\n Base class for two-dimensional fittable models.\n\n This class provides an easier interface to defining new models.\n Examples can be found in `astropy.modeling.functional_models`.\n \"\"\"\n\n inputs = ('x', 'y')\n outputs = ('z',)\n\n\ndef _make_arithmetic_operator(oper):\n # We don't bother with tuple unpacking here for efficiency's sake, but for\n # documentation purposes:\n #\n # f_eval, f_n_inputs, f_n_outputs = f\n #\n # and similarly for g\n def op(f, g):\n return (make_binary_operator_eval(oper, f[0], g[0]), f[1], f[2])\n\n return op\n\n\ndef _composition_operator(f, g):\n # We don't bother with tuple unpacking here for efficiency's sake, but for\n # documentation purposes:\n #\n # f_eval, f_n_inputs, f_n_outputs = f\n #\n # and similarly for g\n return (lambda inputs, params: g[0](f[0](inputs, params), params),\n f[1], g[2])\n\n\ndef _join_operator(f, g):\n # We don't bother with tuple unpacking here for efficiency's sake, but for\n # documentation purposes:\n #\n # f_eval, f_n_inputs, f_n_outputs = f\n #\n # and similarly for g\n return (lambda inputs, params: (f[0](inputs[:f[1]], params) +\n g[0](inputs[f[1]:], params)),\n f[1] + g[1], f[2] + g[2])\n\n\n# TODO: Support a couple unary operators--at least negation?\nBINARY_OPERATORS = {\n '+': _make_arithmetic_operator(operator.add),\n '-': _make_arithmetic_operator(operator.sub),\n '*': _make_arithmetic_operator(operator.mul),\n '/': _make_arithmetic_operator(operator.truediv),\n '**': _make_arithmetic_operator(operator.pow),\n '|': _composition_operator,\n '&': _join_operator\n}\n\n\n_ORDER_OF_OPERATORS = [('|',), ('&',), ('+', '-'), ('*', '/'), ('**',)]\nOPERATOR_PRECEDENCE = {}\nfor idx, ops in enumerate(_ORDER_OF_OPERATORS):\n for op in ops:\n OPERATOR_PRECEDENCE[op] = idx\ndel idx, op, ops\n\n\nclass _CompoundModelMeta(_ModelMeta):\n _tree = None\n _submodels = None\n _submodel_names = None\n _nextid = 0\n\n _param_names = None\n # _param_map is a mapping of the compound model's generated param names to\n # the parameters of submodels they are associated with. The values in this\n # mapping are (idx, name) tuples were idx is the index of the submodel this\n # parameter is associated with, and name is the same parameter's name on\n # the submodel\n # In principle this will allow compound models to give entirely new names\n # to parameters that don't have to be the same as their original names on\n # the submodels, but right now that isn't taken advantage of\n _param_map = None\n\n _slice_offset = 0\n # When taking slices of a compound model, this keeps track of how offset\n # the first model in the slice is from the first model in the original\n # compound model it was taken from\n\n # This just inverts _param_map, swapping keys with values. This is also\n # useful to have.\n _param_map_inverse = None\n _fittable = None\n\n _evaluate = None\n\n def __getitem__(cls, index):\n index = cls._normalize_index(index)\n\n if isinstance(index, (int, np.integer)):\n return cls._get_submodels()[index]\n else:\n return cls._get_slice(index.start, index.stop)\n\n def __getattr__(cls, attr):\n # Make sure the _tree attribute is set; otherwise we are not looking up\n # an attribute on a concrete compound model class and should just raise\n # the AttributeError\n if cls._tree is not None and attr in cls.param_names:\n cls._init_param_descriptors()\n return getattr(cls, attr)\n\n raise AttributeError(attr)\n\n def __repr__(cls):\n if cls._tree is None:\n # This case is mostly for debugging purposes\n return cls._format_cls_repr()\n\n expression = cls._format_expression()\n components = cls._format_components()\n keywords = [\n ('Expression', expression),\n ('Components', '\\n' + indent(components))\n ]\n\n return cls._format_cls_repr(keywords=keywords)\n\n def __dir__(cls):\n \"\"\"\n Returns a list of attributes defined on a compound model, including\n all of its parameters.\n \"\"\"\n\n try:\n # Annoyingly, this will only work for Python 3.3+\n basedir = super(_CompoundModelMeta, cls).__dir__()\n except AttributeError:\n basedir = list(set((dir(type(cls)) + list(cls.__dict__))))\n\n if cls._tree is not None:\n for name in cls.param_names:\n basedir.append(name)\n\n basedir.sort()\n\n return basedir\n\n def __reduce__(cls):\n rv = super(_CompoundModelMeta, cls).__reduce__()\n\n if isinstance(rv, tuple):\n # Delete _evaluate from the members dict\n with suppress(KeyError):\n del rv[1][2]['_evaluate']\n\n return rv\n\n @property\n def submodel_names(cls):\n if cls._submodel_names is None:\n seen = {}\n names = []\n for idx, submodel in enumerate(cls._get_submodels()):\n name = str(submodel.name)\n if name in seen:\n names.append('{0}_{1}'.format(name, idx))\n if seen[name] >= 0:\n jdx = seen[name]\n names[jdx] = '{0}_{1}'.format(names[jdx], jdx)\n seen[name] = -1\n else:\n names.append(name)\n seen[name] = idx\n cls._submodel_names = tuple(names)\n\n return cls._submodel_names\n\n @property\n def param_names(cls):\n if cls._param_names is None:\n cls._init_param_names()\n\n return cls._param_names\n\n @property\n def fittable(cls):\n if cls._fittable is None:\n cls._fittable = all(m.fittable for m in cls._get_submodels())\n\n return cls._fittable\n\n # TODO: Maybe we could use make_function_with_signature for evaluate, but\n # it's probably not worth it (and I'm not sure what the limit is on number\n # of function arguments/local variables but we could break that limit for\n # complicated compound models...\n def evaluate(cls, *args):\n if cls._evaluate is None:\n func = cls._tree.evaluate(BINARY_OPERATORS,\n getter=cls._model_evaluate_getter)[0]\n # Making this a staticmethod isn't strictly necessary for Python 3,\n # but it is necessary on Python 2 since looking up cls._evaluate\n # will return an unbound method otherwise\n cls._evaluate = staticmethod(func)\n inputs = args[:cls.n_inputs]\n params = iter(args[cls.n_inputs:])\n result = cls._evaluate(inputs, params)\n if cls.n_outputs == 1:\n return result[0]\n else:\n return result\n\n # TODO: This supports creating a new compound model from two existing\n # compound models (or normal models) and a single operator. However, it\n # ought also to be possible to create a new model from an *entire*\n # expression, represented as a sequence of operators and their operands (or\n # an exiting ExpressionTree) and build that into a compound model without\n # creating an intermediate _CompoundModel class for every single operator\n # in the expression. This will prove to be a useful optimization in many\n # cases\n @classmethod\n def _from_operator(mcls, operator, left, right, additional_members={}):\n \"\"\"\n Given a Python operator (represented by a string, such as ``'+'``\n or ``'*'``, and two model classes or instances, return a new compound\n model that evaluates the given operator on the outputs of the left and\n right input models.\n\n If either of the input models are a model *class* (i.e. a subclass of\n `~astropy.modeling.Model`) then the returned model is a new subclass of\n `~astropy.modeling.Model` that may be instantiated with any parameter\n values. If both input models are *instances* of a model, a new class\n is still created, but this method returns an *instance* of that class,\n taking the parameter values from the parameters of the input model\n instances.\n\n If given, the ``additional_members`` `dict` may provide additional\n class members that should be added to the generated\n `~astropy.modeling.Model` subclass. Some members that are generated by\n this method should not be provided by ``additional_members``. These\n include ``_tree``, ``inputs``, ``outputs``, ``linear``,\n ``standard_broadcasting``, and ``__module__`. This is currently for\n internal use only.\n \"\"\"\n # Note, currently this only supports binary operators, but could be\n # easily extended to support unary operators (namely '-') if/when\n # needed\n children = []\n for child in (left, right):\n if isinstance(child, (_CompoundModelMeta, _CompoundModel)):\n \"\"\"\n Although the original child models were copied we make another\n copy here to ensure that changes in this child compound model\n parameters will not propagate to the reuslt, that is\n cm1 = Gaussian1D(1, 5, .1) + Gaussian1D()\n cm2 = cm1 | Scale()\n cm1.amplitude_0 = 100\n assert(cm2.amplitude_0 == 1)\n \"\"\"\n children.append(copy.deepcopy(child._tree))\n elif isinstance(child, Model):\n children.append(ExpressionTree(child.copy()))\n else:\n children.append(ExpressionTree(child))\n\n tree = ExpressionTree(operator, left=children[0], right=children[1])\n\n name = str('CompoundModel{0}'.format(_CompoundModelMeta._nextid))\n _CompoundModelMeta._nextid += 1\n\n mod = find_current_module(3)\n if mod:\n modname = mod.__name__\n else:\n modname = '__main__'\n\n inputs, outputs = mcls._check_inputs_and_outputs(operator, left, right)\n\n if operator in ('|', '+', '-'):\n linear = left.linear and right.linear\n else:\n # Which is not to say it is *definitely* not linear but it would be\n # trickier to determine\n linear = False\n\n standard_broadcasting = \\\n left.standard_broadcasting and right.standard_broadcasting\n\n # Note: If any other members are added here, make sure to mention them\n # in the docstring of this method.\n members = additional_members\n members.update({\n '_tree': tree,\n '_is_dynamic': True, # See docs for _ModelMeta._is_dynamic\n 'inputs': inputs,\n 'outputs': outputs,\n 'linear': linear,\n 'standard_broadcasting': standard_broadcasting,\n '__module__': str(modname)})\n\n new_cls = mcls(name, (_CompoundModel,), members)\n\n if isinstance(left, Model) and isinstance(right, Model):\n # Both models used in the operator were already instantiated models,\n # not model *classes*. As such it's not particularly useful to return\n # the class itself, but to instead produce a new instance:\n instance = new_cls()\n\n # Workaround for https://github.com/astropy/astropy/issues/3542\n # TODO: Any effort to restructure the tree-like data structure for\n # compound models should try to obviate this workaround--if\n # intermediate compound models are stored in the tree as well then\n # we can immediately check for custom inverses on sub-models when\n # computing the inverse\n instance._user_inverse = mcls._make_user_inverse(\n operator, left, right)\n\n if left._n_models == right._n_models:\n instance._n_models = left._n_models\n else:\n raise ValueError('Model sets must have the same number of '\n 'components.')\n\n return instance\n\n # Otherwise return the new uninstantiated class itself\n return new_cls\n\n @classmethod\n def _check_inputs_and_outputs(mcls, operator, left, right):\n # TODO: These aren't the full rules for handling inputs and outputs, but\n # this will handle most basic cases correctly\n if operator == '|':\n inputs = left.inputs\n outputs = right.outputs\n\n if left.n_outputs != right.n_inputs:\n raise ModelDefinitionError(\n \"Unsupported operands for |: {0} (n_inputs={1}, \"\n \"n_outputs={2}) and {3} (n_inputs={4}, n_outputs={5}); \"\n \"n_outputs for the left-hand model must match n_inputs \"\n \"for the right-hand model.\".format(\n left.name, left.n_inputs, left.n_outputs, right.name,\n right.n_inputs, right.n_outputs))\n elif operator == '&':\n inputs = combine_labels(left.inputs, right.inputs)\n outputs = combine_labels(left.outputs, right.outputs)\n else:\n # Without loss of generality\n inputs = left.inputs\n outputs = left.outputs\n\n if (left.n_inputs != right.n_inputs or\n left.n_outputs != right.n_outputs):\n raise ModelDefinitionError(\n \"Unsupported operands for {0}: {1} (n_inputs={2}, \"\n \"n_outputs={3}) and {4} (n_inputs={5}, n_outputs={6}); \"\n \"models must have the same n_inputs and the same \"\n \"n_outputs for this operator\".format(\n operator, left.name, left.n_inputs, left.n_outputs,\n right.name, right.n_inputs, right.n_outputs))\n\n return inputs, outputs\n\n @classmethod\n def _make_user_inverse(mcls, operator, left, right):\n \"\"\"\n Generates an inverse `Model` for this `_CompoundModel` when either\n model in the operation has a *custom inverse* that was manually\n assigned by the user.\n\n If either model has a custom inverse, and in particular if another\n `_CompoundModel` has a custom inverse, then none of that model's\n sub-models should be considered at all when computing the inverse.\n So in that case we just compute the inverse ahead of time and set\n it as the new compound model's custom inverse.\n\n Note, this use case only applies when combining model instances,\n since model classes don't currently have a notion of a \"custom\n inverse\" (though it could probably be supported by overriding the\n class's inverse property).\n\n TODO: Consider fixing things so the aforementioned class-based case\n works as well. However, for the present purposes this is good enough.\n \"\"\"\n\n if not (operator in ('&', '|') and\n (left._user_inverse or right._user_inverse)):\n # These are the only operators that support an inverse right now\n return None\n\n try:\n left_inv = left.inverse\n right_inv = right.inverse\n except NotImplementedError:\n # If either inverse is undefined then just return False; this\n # means the normal _CompoundModel.inverse routine will fail\n # naturally anyways, since it requires all sub-models to have\n # an inverse defined\n return None\n\n if operator == '&':\n return left_inv & right_inv\n else:\n return right_inv | left_inv\n\n # TODO: Perhaps, just perhaps, the post-order (or ???-order) ordering of\n # leaf nodes is something the ExpressionTree class itself could just know\n def _get_submodels(cls):\n # Would make this a lazyproperty but those don't currently work with\n # type objects\n if cls._submodels is not None:\n return cls._submodels\n\n submodels = [c.value for c in cls._tree.traverse_postorder()\n if c.isleaf]\n cls._submodels = submodels\n return submodels\n\n def _init_param_descriptors(cls):\n \"\"\"\n This routine sets up the names for all the parameters on a compound\n model, including figuring out unique names for those parameters and\n also mapping them back to their associated parameters of the underlying\n submodels.\n\n Setting this all up is costly, and only necessary for compound models\n that a user will directly interact with. For example when building an\n expression like::\n\n >>> M = (Model1 + Model2) * Model3 # doctest: +SKIP\n\n the user will generally never interact directly with the temporary\n result of the subexpression ``(Model1 + Model2)``. So there's no need\n to setup all the parameters for that temporary throwaway. Only once\n the full expression is built and the user initializes or introspects\n ``M`` is it necessary to determine its full parameterization.\n \"\"\"\n\n # Accessing cls.param_names will implicitly call _init_param_names if\n # needed and thus also set up the _param_map; I'm not crazy about that\n # design but it stands for now\n for param_name in cls.param_names:\n submodel_idx, submodel_param = cls._param_map[param_name]\n submodel = cls[submodel_idx]\n\n orig_param = getattr(submodel, submodel_param, None)\n\n if isinstance(submodel, Model):\n # Take the parameter's default from the model's value for that\n # parameter\n default = orig_param.value\n else:\n default = orig_param.default\n\n # Copy constraints\n constraints = dict((key, getattr(orig_param, key))\n for key in Model.parameter_constraints)\n\n # Note: Parameter.copy() returns a new unbound Parameter, never\n # a bound Parameter even if submodel is a Model instance (as\n # opposed to a Model subclass)\n new_param = orig_param.copy(name=param_name, default=default,\n unit=orig_param.unit,\n **constraints)\n\n setattr(cls, param_name, new_param)\n\n def _init_param_names(cls):\n \"\"\"\n This subroutine is solely for setting up the ``param_names`` attribute\n itself.\n\n See ``_init_param_descriptors`` for the full parameter setup.\n \"\"\"\n\n # Currently this skips over Model *instances* in the expression tree;\n # basically these are treated as constants and do not add\n # fittable/tunable parameters to the compound model.\n # TODO: I'm not 100% happy with this design, and maybe we need some\n # interface for distinguishing fittable/settable parameters with\n # *constant* parameters (which would be distinct from parameters with\n # fixed constraints since they're permanently locked in place). But I'm\n # not sure if this is really the best way to treat the issue.\n\n names = []\n param_map = {}\n\n # Start counting the suffix indices to put on parameter names from the\n # slice_offset. Usually this will just be zero, but for compound\n # models that were sliced from another compound model this may be > 0\n param_suffix = cls._slice_offset\n\n for idx, model in enumerate(cls._get_submodels()):\n if not model.param_names:\n # Skip models that don't have parameters in the numbering\n # TODO: Reevaluate this if it turns out to be confusing, though\n # parameter-less models are not very common in practice (there\n # are a few projections that don't take parameters)\n continue\n\n for param_name in model.param_names:\n # This is sort of heuristic, but we want to check that\n # model.param_name *actually* returns a Parameter descriptor,\n # and that the model isn't some inconsistent type that happens\n # to have a param_names attribute but does not actually\n # implement settable parameters.\n # In the future we can probably remove this check, but this is\n # here specifically to support the legacy compat\n # _CompositeModel which can be considered a pathological case\n # in the context of the new framework\n # if not isinstance(getattr(model, param_name, None),\n # Parameter):\n # break\n name = '{0}_{1}'.format(param_name, param_suffix + idx)\n names.append(name)\n param_map[name] = (idx, param_name)\n\n cls._param_names = tuple(names)\n cls._param_map = param_map\n cls._param_map_inverse = dict((v, k) for k, v in param_map.items())\n\n def _format_expression(cls):\n # TODO: At some point might be useful to make a public version of this,\n # albeit with more formatting options\n return cls._tree.format_expression(OPERATOR_PRECEDENCE)\n\n def _format_components(cls):\n return '\\n\\n'.join('[{0}]: {1!r}'.format(idx, m)\n for idx, m in enumerate(cls._get_submodels()))\n\n def _normalize_index(cls, index):\n \"\"\"\n Converts an index given to __getitem__ to either an integer, or\n a slice with integer start and stop values.\n\n If the length of the slice is exactly 1 this converts the index to a\n simple integer lookup.\n\n Negative integers are converted to positive integers.\n \"\"\"\n\n def get_index_from_name(name):\n try:\n return cls.submodel_names.index(name)\n except ValueError:\n raise IndexError(\n 'Compound model {0} does not have a component named '\n '{1}'.format(cls.name, name))\n\n def check_for_negative_index(index):\n if index < 0:\n new_index = len(cls.submodel_names) + index\n if new_index < 0:\n # If still < 0 then this is an invalid index\n raise IndexError(\n \"Model index {0} out of range.\".format(index))\n else:\n index = new_index\n\n return index\n\n if isinstance(index, six.string_types):\n return get_index_from_name(index)\n elif isinstance(index, slice):\n if index.step not in (1, None):\n # In principle it could be but I can scarcely imagine a case\n # where it would be useful. If someone can think of one then\n # we can enable it.\n raise ValueError(\n \"Step not supported for compound model slicing.\")\n start = index.start if index.start is not None else 0\n stop = (index.stop\n if index.stop is not None else len(cls.submodel_names))\n if isinstance(start, (int, np.integer)):\n start = check_for_negative_index(start)\n if isinstance(stop, (int, np.integer)):\n stop = check_for_negative_index(stop)\n if isinstance(start, six.string_types):\n start = get_index_from_name(start)\n if isinstance(stop, six.string_types):\n stop = get_index_from_name(stop) + 1\n length = stop - start\n\n if length == 1:\n return start\n elif length <= 0:\n raise ValueError(\"Empty slice of a compound model.\")\n\n return slice(start, stop)\n elif isinstance(index, (int, np.integer)):\n if index >= len(cls.submodel_names):\n raise IndexError(\n \"Model index {0} out of range.\".format(index))\n\n return check_for_negative_index(index)\n\n raise TypeError(\n 'Submodels can be indexed either by their integer order or '\n 'their name (got {0!r}).'.format(index))\n\n def _get_slice(cls, start, stop):\n \"\"\"\n Return a new model build from a sub-expression of the expression\n represented by this model.\n\n Right now this is highly inefficient, as it creates a new temporary\n model for each operator that appears in the sub-expression. It would\n be better if this just built a new expression tree, and the new model\n instantiated directly from that tree.\n\n Once tree -> model instantiation is possible this should be fixed to\n use that instead.\n \"\"\"\n\n members = {'_slice_offset': cls._slice_offset + start}\n operators = dict((oper, _model_oper(oper, additional_members=members))\n for oper in BINARY_OPERATORS)\n\n return cls._tree.evaluate(operators, start=start, stop=stop)\n\n @staticmethod\n def _model_evaluate_getter(idx, model):\n n_params = len(model.param_names)\n n_inputs = model.n_inputs\n n_outputs = model.n_outputs\n\n # There is currently an unfortunate inconsistency in some models, which\n # requires them to be instantiated for their evaluate to work. I think\n # that needs to be reconsidered and fixed somehow, but in the meantime\n # we need to check for that case\n if (not isinstance(model, Model) and\n isinstancemethod(model, model.evaluate)):\n if n_outputs == 1:\n # Where previously model was a class, now make an instance\n def f(inputs, params):\n param_values = tuple(islice(params, n_params))\n return (model(*param_values).evaluate(\n *chain(inputs, param_values)),)\n else:\n def f(inputs, params):\n param_values = tuple(islice(params, n_params))\n return model(*param_values).evaluate(\n *chain(inputs, param_values))\n else:\n evaluate = model.evaluate\n if n_outputs == 1:\n f = lambda inputs, params: \\\n (evaluate(*chain(inputs, islice(params, n_params))),)\n else:\n f = lambda inputs, params: \\\n evaluate(*chain(inputs, islice(params, n_params)))\n\n return (f, n_inputs, n_outputs)\n\n\n@six.add_metaclass(_CompoundModelMeta)\nclass _CompoundModel(Model):\n fit_deriv = None\n col_fit_deriv = False\n\n _submodels = None\n\n def __str__(self):\n expression = self._format_expression()\n components = self._format_components()\n keywords = [\n ('Expression', expression),\n ('Components', '\\n' + indent(components))\n ]\n return super(_CompoundModel, self)._format_str(keywords=keywords)\n\n def __getattr__(self, attr):\n # This __getattr__ is necessary, because _CompoundModelMeta creates\n # Parameter descriptors *lazily*--they do not exist in the class\n # __dict__ until one of them has been accessed.\n # However, this is at odds with how Python looks up descriptors (see\n # (https://docs.python.org/3/reference/datamodel.html#invoking-descriptors)\n # which is to look directly in the class __dict__\n # This workaround allows descriptors to work correctly when they are\n # not initially found in the class __dict__\n value = getattr(self.__class__, attr)\n if hasattr(value, '__get__'):\n # Object is a descriptor, so we should really return the result of\n # its __get__\n value = value.__get__(self, self.__class__)\n return value\n\n def __getitem__(self, index):\n index = self.__class__._normalize_index(index)\n model = self.__class__[index]\n\n if isinstance(index, slice):\n param_names = model.param_names\n else:\n param_map = self.__class__._param_map_inverse\n param_names = tuple(param_map[index, name]\n for name in model.param_names)\n\n return model._from_existing(self, param_names)\n\n if sys.version_info[:3] < (2, 7, 3):\n def __reduce__(self):\n # _CompoundModel classes have a generated evaluate() that is cached\n # off in the _evaluate attribute. This can't be pickled, and so\n # should be regenerated after unpickling (alas)\n if find_current_module(2) is not copy:\n # The copy module also uses __reduce__, but there's no problem\n # there.\n raise RuntimeError(\n \"Pickling of compound models is not possible using Python \"\n \"versions less than 2.7.3 due to a bug in Python. See \"\n \"http://docs.astropy.org/en/v1.0.4/known_issues.html#\"\n \"pickling-error-on-compound-models for more information (\"\n \"tried to pickle {0!r}).\".format(self))\n else:\n return super(_CompoundModel, self).__reduce__()\n\n @property\n def submodel_names(self):\n return self.__class__.submodel_names\n\n @sharedmethod\n def n_submodels(self):\n return len(self.submodel_names)\n\n @property\n def param_names(self):\n return self.__class__.param_names\n\n @property\n def fittable(self):\n return self.__class__.fittable\n\n @sharedmethod\n def evaluate(self, *args):\n return self.__class__.evaluate(*args)\n\n # TODO: The way this works is highly inefficient--the inverse is created by\n # making a new model for each operator in the compound model, which could\n # potentially mean creating a large number of temporary throwaway model\n # classes. This can definitely be optimized in the future by implementing\n # a way to construct a single model class from an existing tree\n @property\n def inverse(self):\n def _not_implemented(oper):\n def _raise(x, y):\n raise NotImplementedError(\n \"The inverse is not currently defined for compound \"\n \"models created using the {0} operator.\".format(oper))\n return _raise\n\n operators = dict((oper, _not_implemented(oper))\n for oper in ('+', '-', '*', '/', '**'))\n operators['&'] = operator.and_\n # Reverse the order of compositions\n operators['|'] = lambda x, y: operator.or_(y, x)\n\n leaf_idx = -1\n\n def getter(idx, model):\n try:\n # By indexing on self[] this will return an instance of the\n # model, with all the appropriate parameters set, which is\n # currently required to return an inverse\n return self[idx].inverse\n except NotImplementedError:\n raise NotImplementedError(\n \"All models in a composite model must have an inverse \"\n \"defined in order for the composite model to have an \"\n \"inverse. {0!r} does not have an inverse.\".format(model))\n\n return self._tree.evaluate(operators, getter=getter)\n\n @sharedmethod\n def _get_submodels(self):\n return self.__class__._get_submodels()\n\n def _parameter_units_for_data_units(self, input_units, output_units):\n units_for_data = {}\n for imodel, model in enumerate(self._submodels):\n units_for_data_sub = model._parameter_units_for_data_units(input_units, output_units)\n for param_sub in units_for_data_sub:\n param = self._param_map_inverse[(imodel, param_sub)]\n units_for_data[param] = units_for_data_sub[param_sub]\n return units_for_data\n\n\ndef custom_model(*args, **kwargs):\n \"\"\"\n Create a model from a user defined function. The inputs and parameters of\n the model will be inferred from the arguments of the function.\n\n This can be used either as a function or as a decorator. See below for\n examples of both usages.\n\n .. note::\n\n All model parameters have to be defined as keyword arguments with\n default values in the model function. Use `None` as a default argument\n value if you do not want to have a default value for that parameter.\n\n Parameters\n ----------\n func : function\n Function which defines the model. It should take N positional\n arguments where ``N`` is dimensions of the model (the number of\n independent variable in the model), and any number of keyword arguments\n (the parameters). It must return the value of the model (typically as\n an array, but can also be a scalar for scalar inputs). This\n corresponds to the `~astropy.modeling.Model.evaluate` method.\n fit_deriv : function, optional\n Function which defines the Jacobian derivative of the model. I.e., the\n derivative with respect to the *parameters* of the model. It should\n have the same argument signature as ``func``, but should return a\n sequence where each element of the sequence is the derivative\n with respect to the corresponding argument. This corresponds to the\n :meth:`~astropy.modeling.FittableModel.fit_deriv` method.\n\n Examples\n --------\n Define a sinusoidal model function as a custom 1D model::\n\n >>> from astropy.modeling.models import custom_model\n >>> import numpy as np\n >>> def sine_model(x, amplitude=1., frequency=1.):\n ... return amplitude * np.sin(2 * np.pi * frequency * x)\n >>> def sine_deriv(x, amplitude=1., frequency=1.):\n ... return 2 * np.pi * amplitude * np.cos(2 * np.pi * frequency * x)\n >>> SineModel = custom_model(sine_model, fit_deriv=sine_deriv)\n\n Create an instance of the custom model and evaluate it::\n\n >>> model = SineModel()\n >>> model(0.25)\n 1.0\n\n This model instance can now be used like a usual astropy model.\n\n The next example demonstrates a 2D Moffat function model, and also\n demonstrates the support for docstrings (this example could also include\n a derivative, but it has been omitted for simplicity)::\n\n >>> @custom_model\n ... def Moffat2D(x, y, amplitude=1.0, x_0=0.0, y_0=0.0, gamma=1.0,\n ... alpha=1.0):\n ... \\\"\\\"\\\"Two dimensional Moffat function.\\\"\\\"\\\"\n ... rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2\n ... return amplitude * (1 + rr_gg) ** (-alpha)\n ...\n >>> print(Moffat2D.__doc__)\n Two dimensional Moffat function.\n >>> model = Moffat2D()\n >>> model(1, 1) # doctest: +FLOAT_CMP\n 0.3333333333333333\n \"\"\"\n\n fit_deriv = kwargs.get('fit_deriv', None)\n\n if len(args) == 1 and six.callable(args[0]):\n return _custom_model_wrapper(args[0], fit_deriv=fit_deriv)\n elif not args:\n return functools.partial(_custom_model_wrapper, fit_deriv=fit_deriv)\n else:\n raise TypeError(\n \"{0} takes at most one positional argument (the callable/\"\n \"function to be turned into a model. When used as a decorator \"\n \"it should be passed keyword arguments only (if \"\n \"any).\".format(__name__))\n\n\ndef _custom_model_wrapper(func, fit_deriv=None):\n \"\"\"\n Internal implementation `custom_model`.\n\n When `custom_model` is called as a function its arguments are passed to\n this function, and the result of this function is returned.\n\n When `custom_model` is used as a decorator a partial evaluation of this\n function is returned by `custom_model`.\n \"\"\"\n\n if not six.callable(func):\n raise ModelDefinitionError(\n \"func is not callable; it must be a function or other callable \"\n \"object\")\n\n if fit_deriv is not None and not six.callable(fit_deriv):\n raise ModelDefinitionError(\n \"fit_deriv not callable; it must be a function or other \"\n \"callable object\")\n\n model_name = func.__name__\n\n inputs, params = get_inputs_and_params(func)\n\n if (fit_deriv is not None and\n len(six.get_function_defaults(fit_deriv)) != len(params)):\n raise ModelDefinitionError(\"derivative function should accept \"\n \"same number of parameters as func.\")\n\n # TODO: Maybe have a clever scheme for default output name?\n if inputs:\n output_names = (inputs[0].name,)\n else:\n output_names = ('x',)\n\n params = dict((param.name, Parameter(param.name, default=param.default))\n for param in params)\n\n mod = find_current_module(2)\n if mod:\n modname = mod.__name__\n else:\n modname = '__main__'\n\n members = {\n '__module__': str(modname),\n '__doc__': func.__doc__,\n 'inputs': tuple(x.name for x in inputs),\n 'outputs': output_names,\n 'evaluate': staticmethod(func),\n }\n\n if fit_deriv is not None:\n members['fit_deriv'] = staticmethod(fit_deriv)\n\n members.update(params)\n\n return type(model_name, (FittableModel,), members)\n\n\ndef render_model(model, arr=None, coords=None):\n \"\"\"\n Evaluates a model on an input array. Evaluation is limited to\n a bounding box if the `Model.bounding_box` attribute is set.\n\n Parameters\n ----------\n model : `Model`\n Model to be evaluated.\n arr : `numpy.ndarray`, optional\n Array on which the model is evaluated.\n coords : array-like, optional\n Coordinate arrays mapping to ``arr``, such that\n ``arr[coords] == arr``.\n\n Returns\n -------\n array : `numpy.ndarray`\n The model evaluated on the input ``arr`` or a new array from ``coords``.\n If ``arr`` and ``coords`` are both `None`, the returned array is\n limited to the `Model.bounding_box` limits. If\n `Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed.\n\n Examples\n --------\n :ref:`bounding-boxes`\n \"\"\"\n\n bbox = model.bounding_box\n\n if (coords is None) & (arr is None) & (bbox is None):\n raise ValueError('If no bounding_box is set, coords or arr must be input.')\n\n # for consistent indexing\n if model.n_inputs == 1:\n if coords is not None:\n coords = [coords]\n if bbox is not None:\n bbox = [bbox]\n\n if arr is not None:\n arr = arr.copy()\n # Check dimensions match model\n if arr.ndim != model.n_inputs:\n raise ValueError('number of array dimensions inconsistent with '\n 'number of model inputs.')\n if coords is not None:\n # Check dimensions match arr and model\n coords = np.array(coords)\n if len(coords) != model.n_inputs:\n raise ValueError('coordinate length inconsistent with the number '\n 'of model inputs.')\n if arr is not None:\n if coords[0].shape != arr.shape:\n raise ValueError('coordinate shape inconsistent with the '\n 'array shape.')\n else:\n arr = np.zeros(coords[0].shape)\n\n if bbox is not None:\n # assures position is at center pixel, important when using add_array\n pd = pos, delta = np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2))\n for bb in bbox]).astype(int).T\n\n if coords is not None:\n sub_shape = tuple(delta * 2 + 1)\n sub_coords = np.array([extract_array(c, sub_shape, pos) for c in coords])\n else:\n limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]\n sub_coords = np.mgrid[limits]\n\n sub_coords = sub_coords[::-1]\n\n if arr is None:\n arr = model(*sub_coords)\n else:\n try:\n arr = add_array(arr, model(*sub_coords), pos)\n except ValueError:\n raise ValueError('The `bounding_box` is larger than the input'\n ' arr in one or more dimensions. Set '\n '`model.bounding_box = None`.')\n else:\n\n if coords is None:\n im_shape = arr.shape\n limits = [slice(i) for i in im_shape]\n coords = np.mgrid[limits]\n\n arr += model(*coords[::-1])\n\n return arr\n\n\ndef _prepare_inputs_single_model(model, params, inputs, **kwargs):\n broadcasts = []\n\n for idx, _input in enumerate(inputs):\n input_shape = _input.shape\n\n # Ensure that array scalars are always upgrade to 1-D arrays for the\n # sake of consistency with how parameters work. They will be cast back\n # to scalars at the end\n if not input_shape:\n inputs[idx] = _input.reshape((1,))\n\n if not params:\n max_broadcast = input_shape\n else:\n max_broadcast = ()\n\n for param in params:\n try:\n if model.standard_broadcasting:\n broadcast = check_broadcast(input_shape, param.shape)\n else:\n broadcast = input_shape\n except IncompatibleShapeError:\n raise ValueError(\n \"Model input argument {0!r} of shape {1!r} cannot be \"\n \"broadcast with parameter {2!r} of shape \"\n \"{3!r}.\".format(model.inputs[idx], input_shape,\n param.name, param.shape))\n\n if len(broadcast) > len(max_broadcast):\n max_broadcast = broadcast\n elif len(broadcast) == len(max_broadcast):\n max_broadcast = max(max_broadcast, broadcast)\n\n broadcasts.append(max_broadcast)\n\n if model.n_outputs > model.n_inputs:\n if len(set(broadcasts)) > 1:\n raise ValueError(\n \"For models with n_outputs > n_inputs, the combination of \"\n \"all inputs and parameters must broadcast to the same shape, \"\n \"which will be used as the shape of all outputs. In this \"\n \"case some of the inputs had different shapes, so it is \"\n \"ambiguous how to format outputs for this model. Try using \"\n \"inputs that are all the same size and shape.\")\n else:\n # Extend the broadcasts list to include shapes for all outputs\n extra_outputs = model.n_outputs - model.n_inputs\n if not broadcasts:\n # If there were no inputs then the broadcasts list is empty\n # just add a None since there is no broadcasting of outputs and\n # inputs necessary (see _prepare_outputs_single_model)\n broadcasts.append(None)\n broadcasts.extend([broadcasts[0]] * extra_outputs)\n\n return inputs, (broadcasts,)\n\n\ndef _prepare_outputs_single_model(model, outputs, format_info):\n broadcasts = format_info[0]\n\n outputs = list(outputs)\n\n for idx, output in enumerate(outputs):\n broadcast_shape = broadcasts[idx]\n if broadcast_shape is not None:\n if not broadcast_shape:\n # Shape is (), i.e. a scalar should be returned\n outputs[idx] = output.item()\n else:\n outputs[idx] = output.reshape(broadcast_shape)\n\n return tuple(outputs)\n\n\ndef _prepare_inputs_model_set(model, params, inputs, n_models, model_set_axis,\n **kwargs):\n reshaped = []\n pivots = []\n\n for idx, _input in enumerate(inputs):\n max_param_shape = ()\n\n if n_models > 1 and model_set_axis is not False:\n # Use the shape of the input *excluding* the model axis\n input_shape = (_input.shape[:model_set_axis] +\n _input.shape[model_set_axis + 1:])\n else:\n input_shape = _input.shape\n\n for param in params:\n try:\n check_broadcast(input_shape, param.shape)\n except IncompatibleShapeError:\n raise ValueError(\n \"Model input argument {0!r} of shape {1!r} cannot be \"\n \"broadcast with parameter {2!r} of shape \"\n \"{3!r}.\".format(model.inputs[idx], input_shape,\n param.name, param.shape))\n\n if len(param.shape) > len(max_param_shape):\n max_param_shape = param.shape\n\n # We've now determined that, excluding the model_set_axis, the\n # input can broadcast with all the parameters\n input_ndim = len(input_shape)\n if model_set_axis is False:\n if len(max_param_shape) > input_ndim:\n # Just needs to prepend new axes to the input\n n_new_axes = 1 + len(max_param_shape) - input_ndim\n new_axes = (1,) * n_new_axes\n new_shape = new_axes + _input.shape\n pivot = model.model_set_axis\n else:\n pivot = input_ndim - len(max_param_shape)\n new_shape = (_input.shape[:pivot] + (1,) +\n _input.shape[pivot:])\n new_input = _input.reshape(new_shape)\n else:\n if len(max_param_shape) >= input_ndim:\n n_new_axes = len(max_param_shape) - input_ndim\n pivot = model.model_set_axis\n new_axes = (1,) * n_new_axes\n new_shape = (_input.shape[:pivot + 1] + new_axes +\n _input.shape[pivot + 1:])\n new_input = _input.reshape(new_shape)\n else:\n pivot = _input.ndim - len(max_param_shape) - 1\n new_input = np.rollaxis(_input, model_set_axis,\n pivot + 1)\n\n pivots.append(pivot)\n reshaped.append(new_input)\n\n if model.n_inputs < model.n_outputs:\n pivots.extend([model_set_axis] * (model.n_outputs - model.n_inputs))\n\n return reshaped, (pivots,)\n\n\ndef _prepare_outputs_model_set(model, outputs, format_info):\n pivots = format_info[0]\n\n outputs = list(outputs)\n\n for idx, output in enumerate(outputs):\n pivot = pivots[idx]\n if pivot < output.ndim and pivot != model.model_set_axis:\n outputs[idx] = np.rollaxis(output, pivot,\n model.model_set_axis)\n\n return tuple(outputs)\n\n\ndef _validate_input_shapes(inputs, argnames, n_models, model_set_axis,\n validate_broadcasting):\n \"\"\"\n Perform basic validation of model inputs--that they are mutually\n broadcastable and that they have the minimum dimensions for the given\n model_set_axis.\n\n If validation succeeds, returns the total shape that will result from\n broadcasting the input arrays with each other.\n \"\"\"\n\n check_model_set_axis = n_models > 1 and model_set_axis is not False\n\n if not (validate_broadcasting or check_model_set_axis):\n # Nothing else needed here\n return\n\n all_shapes = []\n\n for idx, _input in enumerate(inputs):\n input_shape = np.shape(_input)\n # Ensure that the input's model_set_axis matches the model's\n # n_models\n if input_shape and check_model_set_axis:\n # Note: Scalar inputs *only* get a pass on this\n if len(input_shape) < model_set_axis + 1:\n raise ValueError(\n \"For model_set_axis={0}, all inputs must be at \"\n \"least {1}-dimensional.\".format(\n model_set_axis, model_set_axis + 1))\n elif input_shape[model_set_axis] != n_models:\n raise ValueError(\n \"Input argument {0!r} does not have the correct \"\n \"dimensions in model_set_axis={1} for a model set with \"\n \"n_models={2}.\".format(argnames[idx], model_set_axis,\n n_models))\n all_shapes.append(input_shape)\n\n if not validate_broadcasting:\n return\n\n try:\n input_broadcast = check_broadcast(*all_shapes)\n except IncompatibleShapeError as exc:\n shape_a, shape_a_idx, shape_b, shape_b_idx = exc.args\n arg_a = argnames[shape_a_idx]\n arg_b = argnames[shape_b_idx]\n\n raise ValueError(\n \"Model input argument {0!r} of shape {1!r} cannot \"\n \"be broadcast with input {2!r} of shape {3!r}\".format(\n arg_a, shape_a, arg_b, shape_b))\n\n return input_broadcast\n\n\ncopyreg.pickle(_ModelMeta, _ModelMeta.__reduce__)\ncopyreg.pickle(_CompoundModelMeta, _CompoundModelMeta.__reduce__)\n",
"# -*- coding: utf-8 -*-\n# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n# TEST_UNICODE_LITERALS\n\nimport warnings\nfrom collections import OrderedDict\n\nimport numpy as np\n\nfrom ...extern.six.moves import cStringIO as StringIO\nfrom ... import units as u\nfrom ... import time\nfrom ... import coordinates\nfrom ... import table\nfrom ...utils.data_info import data_info_factory, dtype_info_name\nfrom ..table_helpers import simple_table\n\n\ndef test_table_info_attributes(table_types):\n \"\"\"\n Test the info() method of printing a summary of table column attributes\n \"\"\"\n a = np.array([1, 2, 3], dtype='int32')\n b = np.array([1, 2, 3], dtype='float32')\n c = np.array(['a', 'c', 'e'], dtype='|S1')\n t = table_types.Table([a, b, c], names=['a', 'b', 'c'])\n\n # Minimal output for a typical table\n tinfo = t.info(out=None)\n subcls = ['class'] if table_types.Table.__name__ == 'MyTable' else []\n assert tinfo.colnames == ['name', 'dtype', 'shape', 'unit', 'format',\n 'description', 'class', 'n_bad', 'length']\n assert np.all(tinfo['name'] == ['a', 'b', 'c'])\n assert np.all(tinfo['dtype'] == ['int32', 'float32', dtype_info_name('S1')])\n if subcls:\n assert np.all(tinfo['class'] == ['MyColumn'] * 3)\n\n # All output fields including a mixin column\n t['d'] = [1, 2, 3] * u.m\n t['d'].description = 'quantity'\n t['a'].format = '%02d'\n t['e'] = time.Time([1, 2, 3], format='mjd')\n t['e'].info.description = 'time'\n t['f'] = coordinates.SkyCoord([1, 2, 3], [1, 2, 3], unit='deg')\n t['f'].info.description = 'skycoord'\n\n tinfo = t.info(out=None)\n assert np.all(tinfo['name'] == 'a b c d e f'.split())\n assert np.all(tinfo['dtype'] == ['int32', 'float32', dtype_info_name('S1'), 'float64',\n 'object', 'object'])\n assert np.all(tinfo['unit'] == ['', '', '', 'm', '', 'deg,deg'])\n assert np.all(tinfo['format'] == ['%02d', '', '', '', '', ''])\n assert np.all(tinfo['description'] == ['', '', '', 'quantity', 'time', 'skycoord'])\n cls = t.ColumnClass.__name__\n assert np.all(tinfo['class'] == [cls, cls, cls, cls, 'Time', 'SkyCoord'])\n\n # Test that repr(t.info) is same as t.info()\n out = StringIO()\n t.info(out=out)\n assert repr(t.info) == out.getvalue()\n\n\ndef test_table_info_stats(table_types):\n \"\"\"\n Test the info() method of printing a summary of table column statistics\n \"\"\"\n a = np.array([1, 2, 1, 2], dtype='int32')\n b = np.array([1, 2, 1, 2], dtype='float32')\n c = np.array(['a', 'c', 'e', 'f'], dtype='|S1')\n d = time.Time([1, 2, 1, 2], format='mjd')\n t = table_types.Table([a, b, c, d], names=['a', 'b', 'c', 'd'])\n\n # option = 'stats'\n masked = 'masked=True ' if t.masked else ''\n out = StringIO()\n t.info('stats', out=out)\n table_header_line = '<{0} {1}length=4>'.format(t.__class__.__name__, masked)\n exp = [table_header_line,\n 'name mean std min max',\n '---- ---- --- --- ---',\n ' a 1.5 0.5 1 2',\n ' b 1.5 0.5 1.0 2.0',\n ' c -- -- -- --',\n ' d -- -- 1.0 2.0']\n assert out.getvalue().splitlines() == exp\n\n # option = ['attributes', 'stats']\n tinfo = t.info(['attributes', 'stats'], out=None)\n assert tinfo.colnames == ['name', 'dtype', 'shape', 'unit', 'format', 'description',\n 'class', 'mean', 'std', 'min', 'max', 'n_bad', 'length']\n assert np.all(tinfo['mean'] == ['1.5', '1.5', '--', '--'])\n assert np.all(tinfo['std'] == ['0.5', '0.5', '--', '--'])\n assert np.all(tinfo['min'] == ['1', '1.0', '--', '1.0'])\n assert np.all(tinfo['max'] == ['2', '2.0', '--', '2.0'])\n\n out = StringIO()\n t.info('stats', out=out)\n exp = [table_header_line,\n 'name mean std min max',\n '---- ---- --- --- ---',\n ' a 1.5 0.5 1 2',\n ' b 1.5 0.5 1.0 2.0',\n ' c -- -- -- --',\n ' d -- -- 1.0 2.0']\n assert out.getvalue().splitlines() == exp\n\n # option = ['attributes', custom]\n custom = data_info_factory(names=['sum', 'first'],\n funcs=[np.sum, lambda col: col[0]])\n out = StringIO()\n tinfo = t.info(['attributes', custom], out=None)\n assert tinfo.colnames == ['name', 'dtype', 'shape', 'unit', 'format', 'description',\n 'class', 'sum', 'first', 'n_bad', 'length']\n assert np.all(tinfo['name'] == ['a', 'b', 'c', 'd'])\n assert np.all(tinfo['dtype'] == ['int32', 'float32', dtype_info_name('S1'), 'object'])\n assert np.all(tinfo['sum'] == ['6', '6.0', '--', '--'])\n assert np.all(tinfo['first'] == ['1', '1.0', 'a', '1.0'])\n\n\ndef test_data_info():\n \"\"\"\n Test getting info for just a column.\n \"\"\"\n cols = [table.Column([1.0, 2.0, np.nan], name='name',\n description='description', unit='m/s'),\n table.MaskedColumn([1.0, 2.0, 3.0], name='name',\n description='description', unit='m/s',\n mask=[False, False, True])]\n for c in cols:\n # Test getting the full ordered dict\n cinfo = c.info(out=None)\n assert cinfo == OrderedDict([('name', 'name'),\n ('dtype', 'float64'),\n ('shape', ''),\n ('unit', 'm / s'),\n ('format', ''),\n ('description', 'description'),\n ('class', type(c).__name__),\n ('n_bad', 1),\n ('length', 3)])\n\n # Test the console (string) version which omits trivial values\n out = StringIO()\n c.info(out=out)\n exp = ['name = name',\n 'dtype = float64',\n 'unit = m / s',\n 'description = description',\n 'class = {0}'.format(type(c).__name__),\n 'n_bad = 1',\n 'length = 3']\n assert out.getvalue().splitlines() == exp\n\n # repr(c.info) gives the same as c.info()\n assert repr(c.info) == out.getvalue()\n\n # Test stats info\n cinfo = c.info('stats', out=None)\n assert cinfo == OrderedDict([('name', 'name'),\n ('mean', '1.5'),\n ('std', '0.5'),\n ('min', '1.0'),\n ('max', '2.0'),\n ('n_bad', 1),\n ('length', 3)])\n\n\ndef test_data_info_subclass():\n class Column(table.Column):\n \"\"\"\n Confusingly named Column on purpose, but that is legal.\n \"\"\"\n pass\n for data in ([], [1, 2]):\n c = Column(data, dtype='int64')\n cinfo = c.info(out=None)\n assert cinfo == OrderedDict([('dtype', 'int64'),\n ('shape', ''),\n ('unit', ''),\n ('format', ''),\n ('description', ''),\n ('class', 'Column'),\n ('n_bad', 0),\n ('length', len(data))])\n\n\ndef test_scalar_info():\n \"\"\"\n Make sure info works with scalar values\n \"\"\"\n c = time.Time('2000:001')\n cinfo = c.info(out=None)\n assert cinfo['n_bad'] == 0\n assert 'length' not in cinfo\n\n\ndef test_empty_table():\n t = table.Table()\n out = StringIO()\n t.info(out=out)\n exp = ['<Table length=0>', '<No columns>']\n assert out.getvalue().splitlines() == exp\n\n\ndef test_class_attribute():\n \"\"\"\n Test that class info column is suppressed only for identical non-mixin\n columns.\n \"\"\"\n vals = [[1] * u.m, [2] * u.m]\n\n texp = ['<Table length=1>',\n 'name dtype unit',\n '---- ------- ----',\n 'col0 float64 m',\n 'col1 float64 m']\n\n qexp = ['<QTable length=1>',\n 'name dtype unit class ',\n '---- ------- ---- --------',\n 'col0 float64 m Quantity',\n 'col1 float64 m Quantity']\n\n for table_cls, exp in ((table.Table, texp),\n (table.QTable, qexp)):\n t = table_cls(vals)\n out = StringIO()\n t.info(out=out)\n assert out.getvalue().splitlines() == exp\n\n\ndef test_ignore_warnings():\n t = table.Table([[np.nan, np.nan]])\n with warnings.catch_warnings(record=True) as warns:\n t.info('stats', out=None)\n assert len(warns) == 0\n\n\ndef test_no_deprecation_warning():\n # regression test for #5459, where numpy deprecation warnings were\n # emitted unnecessarily.\n t = simple_table()\n with warnings.catch_warnings(record=True) as warns:\n t.info()\n assert len(warns) == 0\n"
] |
[
[
"numpy.prod",
"numpy.arange"
],
[
"numpy.logical_not",
"numpy.logical_or",
"numpy.array",
"numpy.ceil",
"numpy.empty",
"numpy.asarray",
"numpy.zeros",
"numpy.rollaxis",
"numpy.shape",
"numpy.mean",
"numpy.any",
"numpy.size",
"numpy.ndim",
"numpy.asanyarray"
],
[
"numpy.all",
"numpy.array"
]
] |
wright/dymos
|
[
"9d253a16ffcc162a84ef1b4a7dddcebeda5522ac"
] |
[
"dymos/transcriptions/runge_kutta/components/runge_kutta_k_comp.py"
] |
[
"import numpy as np\n\nimport openmdao.api as om\nfrom ....utils.rk_methods import rk_methods\nfrom ....utils.misc import get_rate_units\nfrom ....options import options as dymos_options\n\n\nclass RungeKuttaKComp(om.ExplicitComponent):\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self._no_check_partials = not dymos_options['include_check_partials']\n\n def initialize(self):\n self.options.declare('num_segments', types=int,\n desc='The number of segments (timesteps) in the phase')\n\n self.options.declare('method', default='RK4', types=str,\n desc='Specific Runge-Kutta Method to use.')\n\n self.options.declare('state_options', types=dict,\n desc='Dictionary of state names/options for the phase')\n\n self.options.declare('time_units', default=None, allow_none=True, types=str,\n desc='Units of the integration variable')\n\n def configure_io(self):\n \"\"\"\n I/O creation is delayed until configure so that we can determine the shape and units for\n the states.\n \"\"\"\n self._var_names = {}\n\n num_seg = self.options['num_segments']\n rk_data = rk_methods[self.options['method']]\n num_stages = rk_data['num_stages']\n\n self.add_input('h', val=np.ones(num_seg), units=self.options['time_units'],\n desc='step size for current Runge-Kutta segment.')\n\n for name, options in self.options['state_options'].items():\n shape = options['shape']\n units = options['units']\n rate_units = get_rate_units(units, self.options['time_units'])\n\n self._var_names[name] = {}\n self._var_names[name]['f'] = 'f:{0}'.format(name)\n self._var_names[name]['k'] = 'k:{0}'.format(name)\n\n self.add_input(self._var_names[name]['f'], shape=(num_seg, num_stages) + shape,\n units=rate_units,\n desc='The predicted values of the state at the ODE evaluation points.')\n\n self.add_output(self._var_names[name]['k'], shape=(num_seg, num_stages) + shape,\n units=units, desc='RK multiplier k for each stage in the segment.')\n\n size = np.prod(shape)\n ar = np.arange(size * num_stages * num_seg, dtype=int)\n self.declare_partials(of=self._var_names[name]['k'],\n wrt=self._var_names[name]['f'],\n rows=ar, cols=ar)\n\n r = np.arange(size * num_stages * num_seg, dtype=int)\n c = np.repeat(np.arange(num_seg, dtype=int), num_stages * size)\n self.declare_partials(of=self._var_names[name]['k'],\n wrt='h',\n rows=r, cols=c)\n\n def compute(self, inputs, outputs, discrete_inputs=None, discrete_outputs=None):\n h = inputs['h']\n for name, options in self.options['state_options'].items():\n f = inputs[self._var_names[name]['f']]\n outputs[self._var_names[name]['k']] = f * h[:, np.newaxis, np.newaxis]\n\n def compute_partials(self, inputs, partials):\n num_stages = rk_methods[self.options['method']]['num_stages']\n h = inputs['h']\n for name, options in self.options['state_options'].items():\n size = np.prod(options['shape'])\n k_name = self._var_names[name]['k']\n f_name = self._var_names[name]['f']\n partials[k_name, f_name] = np.repeat(h, num_stages * size)\n partials[k_name, 'h'] = inputs[self._var_names[name]['f']].ravel()\n"
] |
[
[
"numpy.repeat",
"numpy.prod",
"numpy.arange",
"numpy.ones"
]
] |
noveens/sampling_cf
|
[
"e135819b1e7310ee58edbbd138f303e5240a2619"
] |
[
"pytorch_models/NeuMF.py"
] |
[
"import torch\nimport torch.nn as nn\n\nfrom pytorch_models.MF import BaseMF\n\nclass GMF(BaseMF):\n def __init__(self, hyper_params):\n super(GMF, self).__init__(hyper_params)\n \n self.final = nn.Linear(hyper_params['latent_size'], 1)\n self.dropout = nn.Dropout(hyper_params['dropout'])\n\n def get_score(self, user_id, item_id):\n # For the FM\n user_bias = self.user_bias.gather(0, user_id.view(-1)).view(user_id.shape)\n item_bias = self.item_bias.gather(0, item_id.view(-1)).view(item_id.shape)\n\n # Embed Latent space\n user = self.dropout(self.user_embedding(user_id.view(-1)))\n item = self.dropout(self.item_embedding(item_id.view(-1)))\n joint = user * item\n rating = self.final(joint)[:, 0].view(user_id.shape) # [bsz]\n return user_bias + item_bias + self.global_bias + rating\n\nclass MLP(BaseMF):\n def __init__(self, hyper_params):\n super(MLP, self).__init__(hyper_params)\n\n self.project = nn.Sequential(\n nn.Dropout(hyper_params['dropout']),\n nn.Linear(2 * hyper_params['latent_size'], hyper_params['latent_size']),\n nn.ReLU(),\n nn.Linear(hyper_params['latent_size'], hyper_params['latent_size'])\n )\n self.final = nn.Linear(hyper_params['latent_size'], 1)\n self.dropout = nn.Dropout(hyper_params['dropout'])\n\n def get_score(self, user_id, item_id):\n # For the FM\n user_bias = self.user_bias.gather(0, user_id.view(-1)).view(user_id.shape)\n item_bias = self.item_bias.gather(0, item_id.view(-1)).view(item_id.shape)\n\n # Embed Latent space\n user = self.dropout(self.user_embedding(user_id.view(-1)))\n item = self.dropout(self.item_embedding(item_id.view(-1)))\n \n joint = torch.cat([ user, item ], dim = -1)\n joint = self.project(joint)\n rating = self.final(joint)[:, 0].view(user_id.shape)\n return user_bias + item_bias + self.global_bias + rating\n\nclass NeuMF(BaseMF):\n def __init__(self, hyper_params):\n super(NeuMF, self).__init__(hyper_params, keep_gamma = False)\n \n self.gmf_user_embedding = nn.Embedding(hyper_params['total_users'], hyper_params['latent_size'])\n self.gmf_item_embedding = nn.Embedding(hyper_params['total_items'], hyper_params['latent_size'])\n\n self.mlp_user_embedding = nn.Embedding(hyper_params['total_users'], hyper_params['latent_size'])\n self.mlp_item_embedding = nn.Embedding(hyper_params['total_items'], hyper_params['latent_size'])\n\n self.project = nn.Sequential(\n nn.Dropout(hyper_params['dropout']),\n nn.Linear(2 * hyper_params['latent_size'], hyper_params['latent_size']),\n nn.ReLU(),\n nn.Linear(hyper_params['latent_size'], hyper_params['latent_size'])\n )\n self.final = nn.Linear(2 * hyper_params['latent_size'], 1)\n self.dropout = nn.Dropout(hyper_params['dropout'])\n\n def init(self, gmf_model, mlp_model):\n with torch.no_grad():\n self.gmf_user_embedding.weight.data = gmf_model.user_embedding.weight.data\n self.gmf_item_embedding.weight.data = gmf_model.item_embedding.weight.data\n\n self.mlp_user_embedding.weight.data = mlp_model.user_embedding.weight.data\n self.mlp_item_embedding.weight.data = mlp_model.item_embedding.weight.data\n\n for i in range(len(self.project)): \n try:\n self.project[i].weight.data = mlp_model.project[i].weight.data\n self.project[i].bias.data = mlp_model.project[i].bias.data\n except: pass\n\n self.final.weight.data = torch.cat([ gmf_model.final.weight.data, mlp_model.final.weight.data ], dim = -1)\n self.final.bias.data = 0.5 * (gmf_model.final.bias.data + mlp_model.final.bias.data)\n\n self.user_bias.data = 0.5 * (gmf_model.user_bias.data + mlp_model.user_bias.data)\n self.item_bias.data = 0.5 * (gmf_model.item_bias.data + mlp_model.item_bias.data)\n\n def get_score(self, user_id, item_id):\n # For the FM\n user_bias = self.user_bias.gather(0, user_id.view(-1)).view(user_id.shape)\n item_bias = self.item_bias.gather(0, item_id.view(-1)).view(item_id.shape)\n\n # GMF Part\n user = self.dropout(self.gmf_user_embedding(user_id.view(-1))) # [bsz x 32]\n item = self.dropout(self.gmf_item_embedding(item_id.view(-1))) # [bsz x 32]\n gmf_joint = user * item\n\n # MLP Part\n user = self.dropout(self.mlp_user_embedding(user_id.view(-1))) # [bsz x 32]\n item = self.dropout(self.mlp_item_embedding(item_id.view(-1))) # [bsz x 32]\n mlp_joint = torch.cat([ user, item ], dim = -1)\n mlp_joint = self.project(mlp_joint)\n\n # NeuMF\n final = torch.cat([ gmf_joint, mlp_joint ], dim = -1)\n rating = self.final(final)[:, 0].view(user_id.shape) # [bsz]\n\n return user_bias + item_bias + self.global_bias + rating\n"
] |
[
[
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.cat",
"torch.no_grad",
"torch.nn.ReLU",
"torch.nn.Embedding"
]
] |
ostodieck/sharpy
|
[
"aed86428ff88fd14d36cabd91cf7e04b5fc9a39a"
] |
[
"tests/coupled/static/smith_g_4deg/generate_smith_g_4deg.py"
] |
[
"import h5py as h5\nimport numpy as np\nimport configparser\nimport os\n\nimport sharpy.utils.algebra as algebra\n\ncase_name = 'smith_g_4deg'\nroute = os.path.dirname(os.path.realpath(__file__)) + '/'\n\n# flight conditions\nu_inf = 25\nrho = 0.08891\nalpha = 4\nbeta = 0\nc_ref = 1\nb_ref = 16\nsweep = 0*np.pi/180.\naspect_ratio = 32 # = total wing span (chord = 1)\n\nalpha_rad = alpha*np.pi/180\n\n# main geometry data\nmain_span = aspect_ratio/2./np.cos(sweep)\nmain_chord = 1.0\nmain_ea = 0.5\nmain_sigma = 1\nmain_airfoil_P = 0\nmain_airfoil_M = 0\n\nn_surfaces = 2\n\n# discretisation data\nnum_elem_main = 10\n\nnum_node_elem = 3\nnum_elem = num_elem_main + num_elem_main\nnum_node_main = num_elem_main*(num_node_elem - 1) + 1\nnum_node = num_node_main + (num_node_main - 1)\n\nm_main = 10\n\n\ndef clean_test_files():\n fem_file_name = route + '/' + case_name + '.fem.h5'\n if os.path.isfile(fem_file_name):\n os.remove(fem_file_name)\n\n aero_file_name = route + '/' + case_name + '.aero.h5'\n if os.path.isfile(aero_file_name):\n os.remove(aero_file_name)\n\n solver_file_name = route + '/' + case_name + '.sharpy'\n if os.path.isfile(solver_file_name):\n os.remove(solver_file_name)\n\n flightcon_file_name = route + '/' + case_name + '.flightcon.txt'\n if os.path.isfile(flightcon_file_name):\n os.remove(flightcon_file_name)\n\n\ndef generate_fem_file():\n # placeholders\n # coordinates\n global x, y, z\n x = np.zeros((num_node, ))\n y = np.zeros((num_node, ))\n z = np.zeros((num_node, ))\n # struct twist\n structural_twist = np.zeros((num_elem, num_node_elem))\n # beam number\n beam_number = np.zeros((num_elem, ), dtype=int)\n # frame of reference delta\n frame_of_reference_delta = np.zeros((num_elem, num_node_elem, 3))\n # connectivities\n conn = np.zeros((num_elem, num_node_elem), dtype=int)\n # stiffness\n num_stiffness = 1\n ea = 1e5\n ga = 1e5\n gj = 1e4\n eiy = 2e4\n eiz = 5e6\n sigma = 1.\n base_stiffness = sigma*np.diag([ea, ga, ga, gj, eiy, eiz])\n stiffness = np.zeros((num_stiffness, 6, 6))\n stiffness[0, :, :] = main_sigma*base_stiffness\n elem_stiffness = np.zeros((num_elem,), dtype=int)\n # mass\n num_mass = 1\n m_base = 0.75\n j_base = 0.1\n base_mass = np.diag([m_base, m_base, m_base, j_base, j_base, j_base])\n mass = np.zeros((num_mass, 6, 6))\n mass[0, :, :] = base_mass\n elem_mass = np.zeros((num_elem,), dtype=int)\n # boundary conditions\n boundary_conditions = np.zeros((num_node, ), dtype=int)\n boundary_conditions[0] = 1\n # applied forces\n # n_app_forces = 2\n # node_app_forces = np.zeros((n_app_forces,), dtype=int)\n app_forces = np.zeros((num_node, 6))\n\n spacing_param = 4\n\n # right wing (beam 0) --------------------------------------------------------------\n working_elem = 0\n working_node = 0\n beam_number[working_elem:working_elem + num_elem_main] = 0\n domain = np.linspace(0, 1.0, num_node_main)\n # 16 - (np.geomspace(20, 4, 10) - 4)\n # x[working_node:working_node + num_node_main] = np.sin(sweep)*(main_span - (np.geomspace(main_span + spacing_param,\n # 0 + spacing_param,\n # num_node_main)\n # - spacing_param))\n # y[working_node:working_node + num_node_main] = np.abs(np.cos(sweep)*(main_span - (np.geomspace(main_span + spacing_param,\n # 0 + spacing_param,\n # num_node_main)\n # - spacing_param)))\n y[0] = 0\n y[working_node:working_node + num_node_main] = np.cos(sweep)*np.linspace(0.0, main_span, num_node_main)\n x[working_node:working_node + num_node_main] = np.sin(sweep)*np.linspace(0.0, main_span, num_node_main)\n for ielem in range(num_elem_main):\n for inode in range(num_node_elem):\n frame_of_reference_delta[working_elem + ielem, inode, :] = [-1, 0, 0]\n # connectivity\n for ielem in range(num_elem_main):\n conn[working_elem + ielem, :] = ((np.ones((3,))*(working_elem + ielem)*(num_node_elem - 1)) +\n [0, 2, 1])\n elem_stiffness[working_elem:working_elem + num_elem_main] = 0\n elem_mass[working_elem:working_elem + num_elem_main] = 0\n boundary_conditions[0] = 1\n boundary_conditions[working_node + num_node_main - 1] = -1\n working_elem += num_elem_main\n working_node += num_node_main\n\n # left wing (beam 1) --------------------------------------------------------------\n beam_number[working_elem:working_elem + num_elem_main] = 1\n domain = np.linspace(-1.0, 0.0, num_node_main)\n tempy = np.linspace(-main_span, 0.0, num_node_main)\n x[working_node:working_node + num_node_main - 1] = -np.sin(sweep)*tempy[0:-1]\n y[working_node:working_node + num_node_main - 1] = np.cos(sweep)*tempy[0:-1]\n # x[working_node:working_node + num_node_main - 1] = -np.sin(sweep)*(main_span - (np.geomspace(0 + spacing_param,\n # main_span + spacing_param,\n # num_node_main)[:-1]\n # - spacing_param))\n # y[working_node:working_node + num_node_main - 1] = -np.abs(np.cos(sweep)*(main_span - (np.geomspace(0 + spacing_param,\n # main_span + spacing_param,\n # num_node_main)[:-1]\n # - spacing_param)))\n for ielem in range(num_elem_main):\n for inode in range(num_node_elem):\n frame_of_reference_delta[working_elem + ielem, inode, :] = [-1, 0, 0]\n # connectivity\n for ielem in range(num_elem_main):\n conn[working_elem + ielem, :] = ((np.ones((3,))*(working_elem + ielem)*(num_node_elem - 1)) +\n [0, 2, 1]) + 1\n conn[working_elem + num_elem_main - 1, 1] = 0\n elem_stiffness[working_elem:working_elem + num_elem_main] = 0\n elem_mass[working_elem:working_elem + num_elem_main] = 0\n boundary_conditions[working_node] = -1\n working_elem += num_elem_main\n working_node += num_node_main - 1\n\n with h5.File(route + '/' + case_name + '.fem.h5', 'a') as h5file:\n coordinates = h5file.create_dataset('coordinates', data=np.column_stack((x, y, z)))\n conectivities = h5file.create_dataset('connectivities', data=conn)\n num_nodes_elem_handle = h5file.create_dataset(\n 'num_node_elem', data=num_node_elem)\n num_nodes_handle = h5file.create_dataset(\n 'num_node', data=num_node)\n num_elem_handle = h5file.create_dataset(\n 'num_elem', data=num_elem)\n stiffness_db_handle = h5file.create_dataset(\n 'stiffness_db', data=stiffness)\n stiffness_handle = h5file.create_dataset(\n 'elem_stiffness', data=elem_stiffness)\n mass_db_handle = h5file.create_dataset(\n 'mass_db', data=mass)\n mass_handle = h5file.create_dataset(\n 'elem_mass', data=elem_mass)\n frame_of_reference_delta_handle = h5file.create_dataset(\n 'frame_of_reference_delta', data=frame_of_reference_delta)\n structural_twist_handle = h5file.create_dataset(\n 'structural_twist', data=structural_twist)\n bocos_handle = h5file.create_dataset(\n 'boundary_conditions', data=boundary_conditions)\n beam_handle = h5file.create_dataset(\n 'beam_number', data=beam_number)\n app_forces_handle = h5file.create_dataset(\n 'app_forces', data=app_forces)\n # node_app_forces_handle = h5file.create_dataset(\n # 'node_app_forces', data=node_app_forces)\n\n\ndef generate_aero_file():\n global x, y, z\n airfoil_distribution = np.zeros((num_elem, num_node_elem), dtype=int)\n surface_distribution = np.zeros((num_elem,), dtype=int) - 1\n surface_m = np.zeros((n_surfaces, ), dtype=int)\n m_distribution = 'uniform'\n aero_node = np.zeros((num_node,), dtype=bool)\n twist = np.zeros((num_elem, 3))\n chord = np.zeros((num_elem, 3))\n elastic_axis = np.zeros((num_elem, 3,))\n\n working_elem = 0\n working_node = 0\n # right wing (surface 0, beam 0)\n i_surf = 0\n airfoil_distribution[working_elem:working_elem + num_elem_main, :] = 0\n surface_distribution[working_elem:working_elem + num_elem_main] = i_surf\n surface_m[i_surf] = m_main\n aero_node[working_node:working_node + num_node_main] = True\n chord[:] = main_chord\n elastic_axis[:] = main_ea\n working_elem += num_elem_main\n working_node += num_node_main\n\n # left wing (surface 1, beam 1)\n i_surf = 1\n airfoil_distribution[working_elem:working_elem + num_elem_main, :] = 0\n surface_distribution[working_elem:working_elem + num_elem_main] = i_surf\n surface_m[i_surf] = m_main\n aero_node[working_node:working_node + num_node_main - 1] = True\n # chord[working_node:working_node + num_node_main - 1] = main_chord\n # elastic_axis[working_node:working_node + num_node_main - 1] = main_ea\n working_elem += num_elem_main\n working_node += num_node_main - 1\n\n with h5.File(route + '/' + case_name + '.aero.h5', 'a') as h5file:\n airfoils_group = h5file.create_group('airfoils')\n # add one airfoil\n naca_airfoil_main = airfoils_group.create_dataset('0', data=np.column_stack(\n generate_naca_camber(P=main_airfoil_P, M=main_airfoil_M)))\n # chord\n chord_input = h5file.create_dataset('chord', data=chord)\n dim_attr = chord_input .attrs['units'] = 'm'\n\n # twist\n twist_input = h5file.create_dataset('twist', data=twist)\n dim_attr = twist_input.attrs['units'] = 'rad'\n\n # airfoil distribution\n airfoil_distribution_input = h5file.create_dataset('airfoil_distribution', data=airfoil_distribution)\n\n surface_distribution_input = h5file.create_dataset('surface_distribution', data=surface_distribution)\n surface_m_input = h5file.create_dataset('surface_m', data=surface_m)\n m_distribution_input = h5file.create_dataset('m_distribution', data=m_distribution.encode('ascii', 'ignore'))\n\n aero_node_input = h5file.create_dataset('aero_node', data=aero_node)\n elastic_axis_input = h5file.create_dataset('elastic_axis', data=elastic_axis)\n\n\ndef generate_naca_camber(M=0, P=0):\n m = M*1e-2\n p = P*1e-1\n\n def naca(x, m, p):\n if x < 1e-6:\n return 0.0\n elif x < p:\n return m/(p*p)*(2*p*x - x*x)\n elif x > p and x < 1+1e-6:\n return m/((1-p)*(1-p))*(1 - 2*p + 2*p*x - x*x)\n\n x_vec = np.linspace(0, 1, 1000)\n y_vec = np.array([naca(x, m, p) for x in x_vec])\n return x_vec, y_vec\n\n\ndef generate_solver_file(horseshoe=False):\n file_name = route + '/' + case_name + '.sharpy'\n # config = configparser.ConfigParser()\n import configobj\n config = configobj.ConfigObj()\n config.filename = file_name\n config['SHARPy'] = {'case': case_name,\n 'route': route,\n 'flow': ['BeamLoader', 'AerogridLoader', 'StaticCoupled', 'AerogridPlot', 'BeamPlot', 'AeroForcesCalculator', 'WriteVariablesTime'],\n # 'flow': ['BeamLoader', 'NonLinearStatic', 'BeamPlot'],\n 'write_screen': 'off',\n 'write_log': 'on',\n 'log_folder': route + '/output/',\n 'log_file': case_name + '.log'}\n config['BeamLoader'] = {'unsteady': 'off',\n 'orientation': algebra.euler2quat(np.array([0.0,\n alpha_rad,\n beta*np.pi/180]))}\n config['StaticCoupled'] = {'print_info': 'on',\n 'structural_solver': 'NonLinearStatic',\n 'structural_solver_settings': {'print_info': 'off',\n 'max_iterations': 150,\n 'num_load_steps': 1,\n 'delta_curved': 1e-5,\n 'min_delta': 1e-8,\n 'gravity_on': 'on',\n 'gravity': 9.754},\n 'aero_solver': 'StaticUvlm',\n 'aero_solver_settings': {'print_info': 'off',\n 'horseshoe': 'on',\n 'num_cores': 4,\n 'n_rollup': 100,\n 'rollup_dt': main_chord/m_main/u_inf,\n 'rollup_aic_refresh': 1,\n 'rollup_tolerance': 1e-4,\n 'velocity_field_generator': 'SteadyVelocityField',\n 'velocity_field_input': {'u_inf': u_inf,\n 'u_inf_direction': [1., 0, 0]},\n 'rho': rho,\n 'alpha': alpha_rad,\n 'beta': beta},\n 'max_iter': 100,\n 'n_load_steps': 5,\n 'tolerance': 1e-5,\n 'relaxation_factor': 0.}\n config['WriteVariablesTime'] = {'cleanup_old_solution': 'on',\n 'folder': route + '/output/',\n 'structure_variables': ['pos'],\n 'structure_nodes': [num_node_main - 1]}\n\n if horseshoe is True:\n config['AerogridLoader'] = {'unsteady': 'off',\n 'aligned_grid': 'on',\n 'mstar': 1,\n 'freestream_dir': ['1', '0', '0']}\n else:\n config['AerogridLoader'] = {'unsteady': 'off',\n 'aligned_grid': 'on',\n 'mstar': 80,\n 'freestream_dir': ['1', '0', '0']}\n config['AerogridPlot'] = {'folder': route + '/output/',\n 'include_rbm': 'off',\n 'include_applied_forces': 'on',\n 'minus_m_star': 0\n }\n config['AeroForcesCalculator'] = {'folder': route + '/output/forces',\n 'write_text_file': 'on',\n 'text_file_name': case_name + '_aeroforces.csv',\n 'screen_output': 'on',\n 'unsteady': 'off'\n }\n config['BeamPlot'] = {'folder': route + '/output/',\n 'include_rbm': 'off',\n 'include_applied_forces': 'on'}\n config.write()\n\n\nclean_test_files()\ngenerate_fem_file()\ngenerate_solver_file(horseshoe=True)\ngenerate_aero_file()\n"
] |
[
[
"numpy.sin",
"numpy.array",
"numpy.column_stack",
"numpy.zeros",
"numpy.ones",
"numpy.cos",
"numpy.linspace",
"numpy.diag"
]
] |
leeeeeeeee2/srgan
|
[
"608a5fa30f7039da11c18ad70f84f27755cfba6d"
] |
[
"models.py"
] |
[
"import torch.nn as nn\nimport torch.nn.functional as F\nimport torch\nfrom torchvision.models import vgg19\nimport math\n\n\nclass FeatureExtractor(nn.Module):\n def __init__(self):\n super(FeatureExtractor, self).__init__()\n vgg19_model = vgg19(pretrained=True)\n self.feature_extractor = nn.Sequential(*list(vgg19_model.features.children())[:18])\n\n def forward(self, img):\n return self.feature_extractor(img)\n\n\nclass ResidualBlock(nn.Module):\n def __init__(self, in_features):\n super(ResidualBlock, self).__init__()\n self.conv_block = nn.Sequential(\n nn.Conv2d(in_features, in_features, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(in_features, 0.8),\n nn.PReLU(),\n nn.Conv2d(in_features, in_features, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(in_features, 0.8),\n )\n\n def forward(self, x):\n return x + self.conv_block(x)\n\n\nclass GeneratorResNet(nn.Module):\n def __init__(self, in_channels=3, out_channels=3, n_residual_blocks=16):\n super(GeneratorResNet, self).__init__()\n\n # First layer\n self.conv1 = nn.Sequential(nn.Conv2d(in_channels, 64, kernel_size=9, stride=1, padding=4), nn.PReLU())\n\n # Residual blocks\n res_blocks = []\n for _ in range(n_residual_blocks):\n res_blocks.append(ResidualBlock(64))\n self.res_blocks = nn.Sequential(*res_blocks)\n\n # Second conv layer post residual blocks\n self.conv2 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(64, 0.8))\n\n # Upsampling layers\n upsampling = []\n for out_features in range(2):\n upsampling += [\n # nn.Upsample(scale_factor=2),\n nn.Conv2d(64, 256, 3, 1, 1),\n nn.BatchNorm2d(256),\n nn.PixelShuffle(upscale_factor=2),\n nn.PReLU(),\n ]\n self.upsampling = nn.Sequential(*upsampling)\n\n # Final output layer\n self.conv3 = nn.Sequential(nn.Conv2d(64, out_channels, kernel_size=9, stride=1, padding=4), nn.Tanh())\n\n def forward(self, x):\n out1 = self.conv1(x)\n out = self.res_blocks(out1)\n out2 = self.conv2(out)\n out = torch.add(out1, out2)\n out = self.upsampling(out)\n out = self.conv3(out)\n return out\n\n\nclass Discriminator(nn.Module):\n def __init__(self, input_shape):\n super(Discriminator, self).__init__()\n\n self.input_shape = input_shape\n in_channels, in_height, in_width = self.input_shape\n patch_h, patch_w = int(in_height / 2 ** 4), int(in_width / 2 ** 4)\n self.output_shape = (1, patch_h, patch_w)\n\n def discriminator_block(in_filters, out_filters, first_block=False):\n layers = []\n layers.append(nn.Conv2d(in_filters, out_filters, kernel_size=3, stride=1, padding=1))\n if not first_block:\n layers.append(nn.BatchNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n layers.append(nn.Conv2d(out_filters, out_filters, kernel_size=3, stride=2, padding=1))\n layers.append(nn.BatchNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers\n\n layers = []\n in_filters = in_channels\n for i, out_filters in enumerate([64, 128, 256, 512]):\n layers.extend(discriminator_block(in_filters, out_filters, first_block=(i == 0)))\n in_filters = out_filters\n\n layers.append(nn.Conv2d(out_filters, 1, kernel_size=3, stride=1, padding=1))\n\n self.model = nn.Sequential(*layers)\n\n def forward(self, img):\n return self.model(img)"
] |
[
[
"torch.nn.Sequential",
"torch.nn.Tanh",
"torch.nn.BatchNorm2d",
"torch.nn.LeakyReLU",
"torch.add",
"torch.nn.PixelShuffle",
"torch.nn.Conv2d",
"torch.nn.PReLU"
]
] |
moshelooks/incubator-mxnet
|
[
"5245ef68191a6d47594bf331ec6e20ba6e93ad4c"
] |
[
"example/onnx/super_resolution.py"
] |
[
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\"\"\"Testing super_resolution model conversion\"\"\"\nfrom __future__ import absolute_import as _abs\nfrom __future__ import print_function\nfrom collections import namedtuple\nimport logging\nimport numpy as np\nfrom PIL import Image\nimport mxnet as mx\nfrom mxnet.test_utils import download\nimport mxnet.contrib.onnx as onnx_mxnet\n\n# set up logger\nlogging.basicConfig()\nLOGGER = logging.getLogger()\nLOGGER.setLevel(logging.INFO)\n\ndef import_onnx():\n \"\"\"Import the onnx model into mxnet\"\"\"\n model_url = 'https://s3.amazonaws.com/onnx-mxnet/examples/super_resolution.onnx'\n download(model_url, 'super_resolution.onnx')\n\n LOGGER.info(\"Converting onnx format to mxnet's symbol and params...\")\n sym, arg_params, aux_params = onnx_mxnet.import_model('super_resolution.onnx')\n LOGGER.info(\"Successfully Converted onnx format to mxnet's symbol and params...\")\n return sym, arg_params, aux_params\n\ndef get_test_image():\n \"\"\"Download and process the test image\"\"\"\n # Load test image\n input_image_dim = 224\n img_url = 'https://s3.amazonaws.com/onnx-mxnet/examples/super_res_input.jpg'\n download(img_url, 'super_res_input.jpg')\n img = Image.open('super_res_input.jpg').resize((input_image_dim, input_image_dim))\n img_ycbcr = img.convert(\"YCbCr\")\n img_y, img_cb, img_cr = img_ycbcr.split()\n input_image = np.array(img_y)[np.newaxis, np.newaxis, :, :]\n return input_image, img_cb, img_cr\n\ndef perform_inference(sym, arg_params, aux_params, input_img, img_cb, img_cr):\n \"\"\"Perform inference on image using mxnet\"\"\"\n # create module\n mod = mx.mod.Module(symbol=sym, data_names=['input_0'], label_names=None)\n mod.bind(for_training=False, data_shapes=[('input_0', input_img.shape)])\n mod.set_params(arg_params=arg_params, aux_params=aux_params)\n\n # run inference\n batch = namedtuple('Batch', ['data'])\n mod.forward(batch([mx.nd.array(input_img)]))\n\n # Save the result\n img_out_y = Image.fromarray(np.uint8(mod.get_outputs()[0][0][0].\n asnumpy().clip(0, 255)), mode='L')\n\n result_img = Image.merge(\n \"YCbCr\", [img_out_y,\n img_cb.resize(img_out_y.size, Image.BICUBIC),\n img_cr.resize(img_out_y.size, Image.BICUBIC)]).convert(\"RGB\")\n output_img_dim = 672\n assert result_img.size == (output_img_dim, output_img_dim)\n LOGGER.info(\"Super Resolution example success.\")\n result_img.save(\"super_res_output.jpg\")\n return result_img\n\nif __name__ == '__main__':\n MX_SYM, MX_ARG_PARAM, MX_AUX_PARAM = import_onnx()\n INPUT_IMG, IMG_CB, IMG_CR = get_test_image()\n perform_inference(MX_SYM, MX_ARG_PARAM, MX_AUX_PARAM, INPUT_IMG, IMG_CB, IMG_CR)\n"
] |
[
[
"numpy.array"
]
] |
almarklein/stentseg
|
[
"48255fffdc2394d1dc4ce2208c9a91e1d4c35a46",
"48255fffdc2394d1dc4ce2208c9a91e1d4c35a46"
] |
[
"lspeas/phantom/stats_alg_vs_cam123mean_error_2scanners.py",
"lspeas/phantom/plotting_result_error.py"
] |
[
"\"\"\" Read position errors from excel for statistical analysis\r\n\r\n\"\"\"\r\nimport os\r\nfrom stentseg.utils.datahandling import select_dir\r\nimport openpyxl # http://openpyxl.readthedocs.org/\r\nimport numpy as np\r\nfrom lspeas.utils.normality_statistics import paired_samples_ttest\r\n\r\n\r\ndef read_error_cam123(exceldir, workbook, profiles):\r\n \"\"\" read the absolute errors for 10 timepositions for all stent points\r\n \"\"\"\r\n wb = openpyxl.load_workbook(os.path.join(exceldir, workbook), data_only=True)\r\n abs_errors_profiles = []\r\n for profile in profiles:\r\n sheet = wb.get_sheet_by_name(profile)\r\n abs_errors_profile = []\r\n for phaserow in range(8,18): # excel rows 21-30 when 20,30; rows 9-18 when 8,18\r\n abs_errors = sheet.rows[phaserow][1:] # skip first col with notes\r\n abs_errors = [obj.value for obj in abs_errors if obj.value is not None]\r\n abs_errors_profile.append(abs_errors)\r\n spread = np.concatenate([a for a in abs_errors_profile], axis=0)\r\n abs_errors_profiles.append(spread)\r\n \r\n return abs_errors_profiles\r\n\r\ndef read_ampl_errorcam123(exceldir, workbook, profile):\r\n wb = openpyxl.load_workbook(os.path.join(exceldir, workbook), data_only=True)\r\n sheet = wb.get_sheet_by_name(profile)\r\n phaserow = 58 - 1 # 58 for 58; 60 for 60\r\n errors = sheet.rows[phaserow][1:] # skip first col with notes\r\n errors = [obj.value for obj in errors if obj.value is not None]\r\n return errors\r\n\r\n\r\nexceldir = select_dir(r'C:\\Users\\Maaike\\Dropbox\\UTdrive\\LSPEAS\\Analysis\\Validation robot',\r\n r'D:\\Profiles\\koenradesma\\Dropbox\\UTdrive\\LSPEAS\\Analysis\\Validation robot')\r\nworkbook = 'Errors cam123ref_vs_alg Toshiba.xlsx'\r\nworkbookF = 'Errors cam123ref_vs_alg Siemens.xlsx'\r\n\r\n## test over all 10 positions\r\nprof = 'ZA1'\r\nabs_errors_T_x = read_error_cam123(exceldir, workbook, [prof])\r\nabs_errors_F_x = read_error_cam123(exceldir, workbookF, [prof])\r\n\r\nt2, p2 = paired_samples_ttest(abs_errors_T_x, abs_errors_F_x, prof)\r\n\r\n\r\nprof = 'ZA2'\r\nabs_errors_T_x = read_error_cam123(exceldir, workbook, [prof])\r\nabs_errors_F_x = read_error_cam123(exceldir, workbookF, [prof])\r\n\r\nt2, p2 = paired_samples_ttest(abs_errors_T_x, abs_errors_F_x, prof)\r\n\r\n\r\nprof = 'ZA3'\r\nabs_errors_T_x = read_error_cam123(exceldir, workbook, [prof])\r\nabs_errors_F_x = read_error_cam123(exceldir, workbookF, [prof])\r\n\r\nt2, p2 = paired_samples_ttest(abs_errors_T_x, abs_errors_F_x, prof)\r\n\r\n\r\nprof = 'ZA6'\r\nabs_errors_T_x = read_error_cam123(exceldir, workbook, [prof])\r\nabs_errors_F_x = read_error_cam123(exceldir, workbookF, [prof])\r\n\r\nt2, p2 = paired_samples_ttest(abs_errors_T_x, abs_errors_F_x, prof)\r\n\r\n\r\nprof = 'ZB1'\r\nabs_errors_T_x = read_error_cam123(exceldir, workbook, [prof])\r\nabs_errors_F_x = read_error_cam123(exceldir, workbookF, [prof])\r\n\r\nt2, p2 = paired_samples_ttest(abs_errors_T_x, abs_errors_F_x, prof)\r\n\r\n\r\nprof = 'ZB2'\r\nabs_errors_T_x = read_error_cam123(exceldir, workbook, [prof])\r\nabs_errors_F_x = read_error_cam123(exceldir, workbookF, [prof])\r\n\r\nt2, p2 = paired_samples_ttest(abs_errors_T_x, abs_errors_F_x, prof)\r\n\r\n\r\nprof = 'ZB3'\r\nabs_errors_T_x = read_error_cam123(exceldir, workbook, [prof])\r\nabs_errors_F_x = read_error_cam123(exceldir, workbookF, [prof])\r\n\r\nt2, p2 = paired_samples_ttest(abs_errors_T_x, abs_errors_F_x, prof)\r\n\r\n\r\nprof = 'ZB4'\r\nabs_errors_T_x = read_error_cam123(exceldir, workbook, [prof])\r\nabs_errors_F_x = read_error_cam123(exceldir, workbookF, [prof])\r\n\r\nt2, p2 = paired_samples_ttest(abs_errors_T_x, abs_errors_F_x, prof)\r\n\r\n\r\nprof = 'ZB5'\r\nabs_errors_T_x = read_error_cam123(exceldir, workbook, [prof])\r\nabs_errors_F_x = read_error_cam123(exceldir, workbookF, [prof])\r\n\r\nt2, p2 = paired_samples_ttest(abs_errors_T_x, abs_errors_F_x, prof)\r\n\r\n\r\nprof = 'ZB6'\r\nabs_errors_T_x = read_error_cam123(exceldir, workbook, [prof])\r\nabs_errors_F_x = read_error_cam123(exceldir, workbookF, [prof])\r\n\r\nt2, p2 = paired_samples_ttest(abs_errors_T_x, abs_errors_F_x, prof)\r\n\r\n\r\n## for the amplitudes\r\nprint(\"******* Amplitude errors *********\")\r\n\r\nprof = 'ZA1'\r\nerrors_T_x = read_ampl_errorcam123(exceldir, workbook, prof)\r\nerrors_F_x = read_ampl_errorcam123(exceldir, workbookF, prof)\r\n\r\nt2, p2 = paired_samples_ttest(errors_T_x, errors_F_x, prof, amplitude=True)\r\n\r\n\r\nprof = 'ZA2'\r\nerrors_T_x = read_ampl_errorcam123(exceldir, workbook, prof)\r\nerrors_F_x = read_ampl_errorcam123(exceldir, workbookF, prof)\r\n\r\nt2, p2 = paired_samples_ttest(errors_T_x, errors_F_x, prof, amplitude=True)\r\n\r\n\r\nprof = 'ZA3'\r\nerrors_T_x = read_ampl_errorcam123(exceldir, workbook, prof)\r\nerrors_F_x = read_ampl_errorcam123(exceldir, workbookF, prof)\r\n\r\nt2, p2 = paired_samples_ttest(errors_T_x, errors_F_x, prof, amplitude=True)\r\n\r\n\r\nprof = 'ZA6'\r\nerrors_T_x = read_ampl_errorcam123(exceldir, workbook, prof)\r\nerrors_F_x = read_ampl_errorcam123(exceldir, workbookF, prof)\r\n\r\nt2, p2 = paired_samples_ttest(errors_T_x, errors_F_x, prof, amplitude=True)\r\n\r\n\r\nprof = 'ZB1'\r\nerrors_T_x = read_ampl_errorcam123(exceldir, workbook, prof)\r\nerrors_F_x = read_ampl_errorcam123(exceldir, workbookF, prof)\r\n\r\nt2, p2 = paired_samples_ttest(errors_T_x, errors_F_x, prof, amplitude=True)\r\n\r\n\r\nprof = 'ZB2'\r\nerrors_T_x = read_ampl_errorcam123(exceldir, workbook, prof)\r\nerrors_F_x = read_ampl_errorcam123(exceldir, workbookF, prof)\r\n\r\nt2, p2 = paired_samples_ttest(errors_T_x, errors_F_x, prof, amplitude=True)\r\n\r\n\r\nprof = 'ZB3'\r\nerrors_T_x = read_ampl_errorcam123(exceldir, workbook, prof)\r\nerrors_F_x = read_ampl_errorcam123(exceldir, workbookF, prof)\r\n\r\nt2, p2 = paired_samples_ttest(errors_T_x, errors_F_x, prof, amplitude=True)\r\n\r\n\r\nprof = 'ZB4'\r\nerrors_T_x = read_ampl_errorcam123(exceldir, workbook, prof)\r\nerrors_F_x = read_ampl_errorcam123(exceldir, workbookF, prof)\r\n\r\nt2, p2 = paired_samples_ttest(errors_T_x, errors_F_x, prof, amplitude=True)\r\n\r\n\r\nprof = 'ZB5'\r\nerrors_T_x = read_ampl_errorcam123(exceldir, workbook, prof)\r\nerrors_F_x = read_ampl_errorcam123(exceldir, workbookF, prof)\r\n\r\nt2, p2 = paired_samples_ttest(errors_T_x, errors_F_x, prof, amplitude=True)\r\n\r\n\r\nprof = 'ZB6' \r\nerrors_T_x = read_ampl_errorcam123(exceldir, workbook, prof)\r\nerrors_F_x = read_ampl_errorcam123(exceldir, workbookF, prof)\r\n\r\nt2, p2 = paired_samples_ttest(errors_T_x, errors_F_x, prof, amplitude=True)",
"\"\"\" Plotting result of motion_pattern_error\r\nused for SPIE abstract\r\n\"\"\"\r\n\r\n\r\ndef read_error_ouput(exceldir, workbook, rowS=18, colS=1, colE=5):\r\n \"\"\"\r\n \"\"\"\r\n wb = openpyxl.load_workbook(os.path.join(exceldir, workbook), data_only=True)\r\n sheet = wb.get_sheet_by_name('summery')\r\n rowS = rowS\r\n colS = colS\r\n colE = colE\r\n mean_abs_error = sheet.rows[rowS][colS:colE]\r\n mean_abs_error = [obj.value for obj in mean_abs_error] \r\n SD = sheet.rows[rowS+1][colS:colE]\r\n SD = [obj.value for obj in SD] \r\n MIN = sheet.rows[rowS+2][colS:colE]\r\n MIN = [obj.value for obj in MIN] \r\n Q1 = sheet.rows[rowS+3][colS:colE]\r\n Q1 = [obj.value for obj in Q1] \r\n Q3 = sheet.rows[rowS+4][colS:colE]\r\n Q3 = [obj.value for obj in Q3] \r\n MAX = sheet.rows[rowS+5][colS:colE]\r\n MAX = [obj.value for obj in MAX]\r\n profiles = sheet.rows[7][colS:colE]\r\n profiles = [obj.value for obj in profiles]\r\n \r\n return profiles, mean_abs_error, SD, MIN, Q1, Q3, MAX \r\n \r\n\r\n\r\nimport os\r\nimport openpyxl\r\nimport matplotlib.pyplot as plt\r\nfrom stentseg.utils.datahandling import select_dir\r\n# import seaborn as sns #sns.tsplot\r\n# https://www.wakari.io/sharing/bundle/ijstokes/pyvis-1h?has_login=False\r\n# http://spartanideas.msu.edu/2014/06/28/how-to-make-beautiful-data-visualizations-in-python-with-matplotlib/\r\n\r\nexceldir = select_dir(r'C:\\Users\\Maaike\\Dropbox\\UTdrive\\LSPEAS\\Analysis\\Validation robot',\r\n r'D:\\Profiles\\koenradesma\\Dropbox\\UTdrive\\LSPEAS\\Analysis\\Validation robot')\r\nworkbookErrors = 'Errors camera_algorithm Toshiba.xlsx'\r\ndirsave = select_dir(r'C:\\Users\\Maaike\\Desktop','D:\\Profiles\\koenradesma\\Desktop')\r\n\r\n# plot frequency profiles\r\nprofiles, mean_abs_error, SD, MIN, Q1, Q3, MAX = read_error_ouput(exceldir, workbookErrors)\r\n\r\nf1 = plt.figure(num=1, figsize=(7.6, 5))\r\nax1 = f1.add_subplot(111)\r\nax1.spines[\"top\"].set_visible(False) \r\nax1.spines[\"right\"].set_visible(False)\r\nax1.get_xaxis().tick_bottom() \r\nax1.get_yaxis().tick_left()\r\nax1.plot(profiles, mean_abs_error, linestyle='', marker='o', color='b') \r\nax1.errorbar(profiles, mean_abs_error, yerr = SD, fmt=None, color='b', capsize=8)\r\n# plt.xticks(range(len(mean_abs_error)), profiles, size = 'medium')\r\nax1.set_xlabel('heart rate (bpm)', fontsize=14)\r\nax1.set_ylabel('absolute error (mm)', fontsize=14)\r\nplt.xlim(45,105)\r\nplt.ylim(0,0.3)\r\n# save\r\nplt.savefig(os.path.join(dirsave, 'errorgraphfreq.pdf'), papertype='a0', dpi=300)\r\n\r\n\r\n# plot amplitude profiles\r\nprofiles, mean_abs_error, SD, MIN, Q1, Q3, MAX = read_error_ouput(exceldir, workbookErrors, colS=5, colE=12)\r\n\r\nf2 = plt.figure(num=3, figsize=(7.6, 5))\r\nax2 = f2.add_subplot(111)\r\nax2.spines[\"top\"].set_visible(False) \r\nax2.spines[\"right\"].set_visible(False)\r\nax2.get_xaxis().tick_bottom() \r\nax2.get_yaxis().tick_left()\r\nax2.plot(profiles[0], mean_abs_error[0], linestyle='', marker='o', color='k') \r\nax2.errorbar(profiles[0], mean_abs_error[0], yerr = SD[0], fmt=None, ecolor='k', capsize=8)\r\nax2.plot(profiles[1:-2], mean_abs_error[1:-2], linestyle='', marker='o', color='b') \r\nax2.errorbar(profiles[1:-2], mean_abs_error[1:-2], yerr = SD[1:-2], fmt=None, ecolor='b', capsize=8)\r\nax2.plot(profiles[-2:], mean_abs_error[-2:], linestyle='', marker='o', color='r')\r\nax2.errorbar(profiles[-2:], mean_abs_error[-2:], yerr = SD[-2:], fmt=None, ecolor='r', capsize=8) \r\n# ax2.plot(profiles, Q1, 'b.--')\r\n# ax2.plot(profiles, Q3, 'b.--')\r\n# plt.xticks(range(len(mean_abs_error)), profiles, size = 'medium')\r\nax2.set_xlabel('amplitude (mm)', fontsize=14)\r\nax2.set_ylabel('absolute error (mm)', fontsize=14)\r\nplt.xlim(0,1.45)\r\nplt.ylim(0,0.3)\r\n# save\r\nplt.savefig(os.path.join(dirsave, 'errorgraphampl.pdf'), papertype='a0', dpi=300)"
] |
[
[
"numpy.concatenate"
],
[
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.figure"
]
] |
ArmenFirman/Intelligent-Solar-Energy-Manager
|
[
"7a6a796b4e66442bd512eb7e1679c5ba29e145f1"
] |
[
"Main Code/WeatherData.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Amin Asbai\n\"\"\"\nimport json\nimport pandas as pd\nimport requests\n\n\ndef update_Weather_data(df):\n url='http://api.openweathermap.org/data/2.5/weather?q=Andratx&units=metric&appid=1e47e582bff799e3514239429b76f2aa'\n response = requests.get(url)\n climate_data=response.json()\n data=clean_data(climate_data)\n updated_dataframe=update_dataframe(df,data)\n return updated_dataframe\n\ndef clean_data(climate_data):\n main_data=climate_data[\"main\"]\n wind_data=climate_data[\"wind\"]\n data = {**main_data, **wind_data}\n data.pop(\"feels_like\", None)\n data.pop(\"temp_min\", None)\n data.pop(\"temp_max\", None)\n data[\"pressure\"]=100*data[\"pressure\"]\n data[\"irradiance\"]=None\n return data\n\ndef update_dataframe(df,dict_weather):\n df = df.iloc[1:]\n df = df.drop(columns=['Hour', 'Month'])\n aux_df=pd.DataFrame()\n for i in df.columns:\n aux_df.loc[0,i]=dict_weather[i]\n aux_df.insert(0, 'TimeStamp', pd.to_datetime('now').replace(second=0,microsecond=0))\n aux_df.set_index('TimeStamp', inplace=True)\n df=df.append(aux_df)\n df['Hour']=df.index.hour\n df['Month']=df.index.month\n return df"
] |
[
[
"pandas.to_datetime",
"pandas.DataFrame"
]
] |
arunkumarchacko/ML_SageMaker_Studies
|
[
"59660b2bc9b163a62fa271ded3dc328700db7e67"
] |
[
"Project_Plagiarism_Detection/problem_unittests.py"
] |
[
"from unittest.mock import MagicMock, patch\nimport sklearn.naive_bayes\nimport numpy as np\nimport pandas as pd\nimport re\n\n# test csv file\nTEST_CSV = 'data/test_info.csv'\n\nclass AssertTest(object):\n '''Defines general test behavior.'''\n def __init__(self, params):\n self.assert_param_message = '\\n'.join([str(k) + ': ' + str(v) + '' for k, v in params.items()])\n \n def test(self, assert_condition, assert_message):\n assert assert_condition, assert_message + '\\n\\nUnit Test Function Parameters\\n' + self.assert_param_message\n\ndef _print_success_message():\n print('Tests Passed!')\n\n# test clean_dataframe\ndef test_numerical_df(numerical_dataframe):\n \n # test result\n transformed_df = numerical_dataframe(TEST_CSV)\n \n # Check type is a DataFrame\n assert isinstance(transformed_df, pd.DataFrame), 'Returned type is {}.'.format(type(transformed_df))\n \n # check columns\n column_names = list(transformed_df)\n assert 'File' in column_names, 'No File column, found.'\n assert 'Task' in column_names, 'No Task column, found.'\n assert 'Category' in column_names, 'No Category column, found.'\n assert 'Class' in column_names, 'No Class column, found.'\n \n # check conversion values\n assert transformed_df.loc[0, 'Category'] == 1, '`heavy` plagiarism mapping test, failed.'\n assert transformed_df.loc[2, 'Category'] == 0, '`non` plagiarism mapping test, failed.'\n assert transformed_df.loc[30, 'Category'] == 3, '`cut` plagiarism mapping test, failed.'\n assert transformed_df.loc[5, 'Category'] == 2, '`light` plagiarism mapping test, failed.'\n assert transformed_df.loc[37, 'Category'] == -1, 'original file mapping test, failed; should have a Category = -1.'\n assert transformed_df.loc[41, 'Category'] == -1, 'original file mapping test, failed; should have a Category = -1.'\n \n _print_success_message()\n\n\ndef test_containment(complete_df, containment_fn):\n \n # check basic format and value \n # for n = 1 and just the fifth file\n test_val = containment_fn(complete_df, 1, 'g0pA_taske.txt')\n \n assert isinstance(test_val, float), 'Returned type is {}.'.format(type(test_val))\n assert test_val<=1.0, 'It appears that the value is not normalized; expected a value <=1, got: '+str(test_val)\n \n # known vals for first few files\n filenames = ['g0pA_taska.txt', 'g0pA_taskb.txt', 'g0pA_taskc.txt', 'g0pA_taskd.txt']\n ngram_1 = [0.39814814814814814, 1.0, 0.86936936936936937, 0.5935828877005348]\n ngram_3 = [0.0093457943925233638, 0.96410256410256412, 0.61363636363636365, 0.15675675675675677]\n \n # results for comparison\n results_1gram = []\n results_3gram = []\n \n for i in range(4):\n val_1 = containment_fn(complete_df, 1, filenames[i])\n val_3 = containment_fn(complete_df, 3, filenames[i])\n results_1gram.append(val_1)\n results_3gram.append(val_3)\n \n print(results_1gram)\n print(ngram_1)\n # check correct results\n assert all(np.isclose(results_1gram, ngram_1, rtol=1e-04)), \\\n 'n=1 calculations are incorrect. Double check the intersection calculation.'\n # check correct results\n assert all(np.isclose(results_3gram, ngram_3, rtol=1e-04)), \\\n 'n=3 calculations are incorrect.'\n \n _print_success_message()\n \ndef test_lcs(df, lcs_word):\n \n test_index = 10 # file 10\n \n # get answer file text\n answer_text = df.loc[test_index, 'Text'] \n \n # get text for orig file\n # find the associated task type (one character, a-e)\n task = df.loc[test_index, 'Task']\n # we know that source texts have Class = -1\n orig_rows = df[(df['Class'] == -1)]\n orig_row = orig_rows[(orig_rows['Task'] == task)]\n source_text = orig_row['Text'].values[0]\n \n # calculate LCS\n test_val = lcs_word(answer_text, source_text)\n \n # check type\n assert isinstance(test_val, float), 'Returned type is {}.'.format(type(test_val))\n assert test_val<=1.0, 'It appears that the value is not normalized; expected a value <=1, got: '+str(test_val)\n \n # known vals for first few files\n lcs_vals = [0.1917808219178082, 0.8207547169811321, 0.8464912280701754, 0.3160621761658031, 0.24257425742574257]\n \n # results for comparison\n results = []\n \n for i in range(5):\n # get answer and source text\n answer_text = df.loc[i, 'Text'] \n task = df.loc[i, 'Task']\n # we know that source texts have Class = -1\n orig_rows = df[(df['Class'] == -1)]\n orig_row = orig_rows[(orig_rows['Task'] == task)]\n source_text = orig_row['Text'].values[0]\n print(answer_text)\n print(source_text)\n # calc lcs\n val = lcs_word(answer_text, source_text)\n results.append(val)\n \n #print(results)\n #print(lcs_vals)\n # check correct results\n assert all(np.isclose(results, lcs_vals, rtol=1e-05)), 'LCS calculations are incorrect.'\n \n _print_success_message()\n \ndef test_data_split(train_x, train_y, test_x, test_y):\n \n # check types\n assert isinstance(train_x, np.ndarray),\\\n 'train_x is not an array, instead got type: {}'.format(type(train_x))\n assert isinstance(train_y, np.ndarray),\\\n 'train_y is not an array, instead got type: {}'.format(type(train_y))\n assert isinstance(test_x, np.ndarray),\\\n 'test_x is not an array, instead got type: {}'.format(type(test_x))\n assert isinstance(test_y, np.ndarray),\\\n 'test_y is not an array, instead got type: {}'.format(type(test_y))\n \n # should hold all 95 submission files\n assert len(train_x) + len(test_x) == 95, \\\n 'Unexpected amount of train + test data. Expecting 95 answer text files, got ' +str(len(train_x) + len(test_x))\n assert len(test_x) > 1, \\\n 'Unexpected amount of test data. There should be multiple test files.'\n \n # check shape\n assert train_x.shape[1]==2, \\\n 'train_x should have as many columns as selected features, got: {}'.format(train_x.shape[1])\n assert len(train_y.shape)==1, \\\n 'train_y should be a 1D array, got shape: {}'.format(train_y.shape)\n \n _print_success_message()\n \n \n "
] |
[
[
"numpy.isclose"
]
] |
SamvitJ/Deep-Feature-Flow
|
[
"56f982741aa4886878eca3d566419b353c62b698"
] |
[
"dff_deeplab/config/config.py"
] |
[
"# --------------------------------------------------------\n# Deep Feature Flow\n# Copyright (c) 2016 by Contributors\n# Copyright (c) 2017 Microsoft\n# Licensed under The Apache-2.0 License [see LICENSE for details]\n# Modified by Xizhou Zhu, Yuwen Xiong, Bin Xiao\n# --------------------------------------------------------\n\nimport yaml\nimport numpy as np\nfrom easydict import EasyDict as edict\n\nconfig = edict()\n\nconfig.MXNET_VERSION = ''\nconfig.output_path = ''\nconfig.symbol = ''\nconfig.gpus = ''\nconfig.CLASS_AGNOSTIC = True\nconfig.SCALES = [(360, 600)] # first is scale (the shorter side); second is max size\n\n# default training\nconfig.default = edict()\nconfig.default.frequent = 20\nconfig.default.kvstore = 'device'\n\n# network related params\nconfig.network = edict()\nconfig.network.pretrained = ''\nconfig.network.pretrained_flow = ''\nconfig.network.pretrained_epoch = 0\nconfig.network.PIXEL_MEANS = np.array([0, 0, 0])\nconfig.network.IMAGE_STRIDE = 0\nconfig.network.FIXED_PARAMS = ['gamma', 'beta']\nconfig.network.DFF_FEAT_DIM = 2048\n\n# dataset related params\nconfig.dataset = edict()\nconfig.dataset.dataset = 'CityScape'\nconfig.dataset.image_set = 'leftImg8bit_train'\nconfig.dataset.test_image_set = 'leftImg8bit_val'\nconfig.dataset.root_path = '../data'\nconfig.dataset.dataset_path = '../data/cityscapes'\nconfig.dataset.NUM_CLASSES = 19\nconfig.dataset.annotation_prefix = 'gtFine'\n\n\nconfig.TRAIN = edict()\nconfig.TRAIN.lr = 0\nconfig.TRAIN.lr_step = ''\nconfig.TRAIN.lr_factor = 0.1\nconfig.TRAIN.warmup = False\nconfig.TRAIN.warmup_lr = 0\nconfig.TRAIN.warmup_step = 0\nconfig.TRAIN.momentum = 0.9\nconfig.TRAIN.wd = 0.0005\nconfig.TRAIN.begin_epoch = 0\nconfig.TRAIN.end_epoch = 0\nconfig.TRAIN.model_prefix = ''\n\n# whether resume training\nconfig.TRAIN.RESUME = False\n# whether flip image\nconfig.TRAIN.FLIP = True\n# whether shuffle image\nconfig.TRAIN.SHUFFLE = True\n# whether use OHEM\nconfig.TRAIN.ENABLE_OHEM = False\n# size of images for each device, 2 for rcnn, 1 for rpn and e2e\nconfig.TRAIN.BATCH_IMAGES = 1\n# e2e changes behavior of anchor loader and metric\nconfig.TRAIN.END2END = False\n# group images with similar aspect ratio\nconfig.TRAIN.ASPECT_GROUPING = True\n\n# used for end2end training\n\n# DFF, trained image sampled from [min_offset, max_offset]\nconfig.TRAIN.MIN_OFFSET = -4\nconfig.TRAIN.MAX_OFFSET = 0\n\nconfig.TEST = edict()\n# size of images for each device\nconfig.TEST.BATCH_IMAGES = 1\n\n# DFF\nconfig.TEST.KEY_FRAME_INTERVAL = 5\n\nconfig.TEST.max_per_image = 300\n\n# Test Model Epoch\nconfig.TEST.test_epoch = 0\n\n\ndef update_config(config_file):\n exp_config = None\n with open(config_file) as f:\n exp_config = edict(yaml.load(f))\n for k, v in exp_config.items():\n if k in config:\n if isinstance(v, dict):\n if k == 'TRAIN':\n if 'BBOX_WEIGHTS' in v:\n v['BBOX_WEIGHTS'] = np.array(v['BBOX_WEIGHTS'])\n elif k == 'network':\n if 'PIXEL_MEANS' in v:\n v['PIXEL_MEANS'] = np.array(v['PIXEL_MEANS'])\n for vk, vv in v.items():\n config[k][vk] = vv\n else:\n if k == 'SCALES':\n config[k][0] = (tuple(v))\n else:\n config[k] = v\n else:\n raise ValueError(\"key must exist in config.py\")\n"
] |
[
[
"numpy.array"
]
] |
thehomebrewnerd/featuretools
|
[
"5a7e09edf02b463ad903c6d8c40daa86f208c0c0",
"5a7e09edf02b463ad903c6d8c40daa86f208c0c0"
] |
[
"featuretools/tests/primitive_tests/test_groupby_transform_primitives.py",
"featuretools/tests/entityset_tests/test_last_time_index.py"
] |
[
"import numpy as np\nimport pandas as pd\nimport pytest\n\nfrom ..testing_utils import make_ecommerce_entityset\n\nimport featuretools as ft\nfrom featuretools.computational_backends import PandasBackend\nfrom featuretools.primitives import (\n CumCount,\n CumMax,\n CumMean,\n CumMin,\n CumSum,\n Last,\n TransformPrimitive\n)\nfrom featuretools.variable_types import DatetimeTimeIndex, Numeric\n\n\n@pytest.fixture\ndef es():\n return make_ecommerce_entityset()\n\n\nclass TestCumCount:\n\n primitive = CumCount\n\n def test_order(self):\n g = pd.Series([\"a\", \"b\", \"a\"])\n\n answers = ([1, 2], [1])\n\n function = self.primitive().get_function()\n for (_, group), answer in zip(g.groupby(g), answers):\n np.testing.assert_array_equal(function(group), answer)\n\n def test_regular(self):\n g = pd.Series([\"a\", \"b\", \"a\", \"c\", \"d\", \"b\"])\n answers = ([1, 2], [1, 2], [1], [1])\n\n function = self.primitive().get_function()\n for (_, group), answer in zip(g.groupby(g), answers):\n np.testing.assert_array_equal(function(group), answer)\n\n def test_discrete(self):\n g = pd.Series([\"a\", \"b\", \"a\", \"c\", \"d\", \"b\"])\n answers = ([1, 2], [1, 2], [1], [1])\n\n function = self.primitive().get_function()\n for (_, group), answer in zip(g.groupby(g), answers):\n np.testing.assert_array_equal(function(group), answer)\n\n\nclass TestCumSum:\n\n primitive = CumSum\n\n def test_order(self):\n v = pd.Series([1, 2, 2])\n g = pd.Series([\"a\", \"b\", \"a\"])\n\n answers = ([1, 3], [2])\n\n function = self.primitive().get_function()\n for (_, group), answer in zip(v.groupby(g), answers):\n np.testing.assert_array_equal(function(group), answer)\n\n def test_regular(self):\n v = pd.Series([101, 102, 103, 104, 105, 106])\n g = pd.Series([\"a\", \"b\", \"a\", \"c\", \"d\", \"b\"])\n answers = ([101, 204], [102, 208], [104], [105])\n\n function = self.primitive().get_function()\n for (_, group), answer in zip(v.groupby(g), answers):\n np.testing.assert_array_equal(function(group), answer)\n\n\nclass TestCumMean:\n primitive = CumMean\n\n def test_order(self):\n v = pd.Series([1, 2, 2])\n g = pd.Series([\"a\", \"b\", \"a\"])\n\n answers = ([1, 1.5], [2])\n\n function = self.primitive().get_function()\n for (_, group), answer in zip(v.groupby(g), answers):\n np.testing.assert_array_equal(function(group), answer)\n\n def test_regular(self):\n v = pd.Series([101, 102, 103, 104, 105, 106])\n g = pd.Series([\"a\", \"b\", \"a\", \"c\", \"d\", \"b\"])\n answers = ([101, 102], [102, 104], [104], [105])\n\n function = self.primitive().get_function()\n for (_, group), answer in zip(v.groupby(g), answers):\n np.testing.assert_array_equal(function(group), answer)\n\n\nclass TestCumMax:\n\n primitive = CumMax\n\n def test_order(self):\n v = pd.Series([1, 2, 2])\n g = pd.Series([\"a\", \"b\", \"a\"])\n\n answers = ([1, 2], [2])\n\n function = self.primitive().get_function()\n for (_, group), answer in zip(v.groupby(g), answers):\n np.testing.assert_array_equal(function(group), answer)\n\n def test_regular(self):\n v = pd.Series([101, 102, 103, 104, 105, 106])\n g = pd.Series([\"a\", \"b\", \"a\", \"c\", \"d\", \"b\"])\n answers = ([101, 103], [102, 106], [104], [105])\n\n function = self.primitive().get_function()\n for (_, group), answer in zip(v.groupby(g), answers):\n np.testing.assert_array_equal(function(group), answer)\n\n\nclass TestCumMin:\n\n primitive = CumMin\n\n def test_order(self):\n v = pd.Series([1, 2, 2])\n g = pd.Series([\"a\", \"b\", \"a\"])\n\n answers = ([1, 1], [2])\n\n function = self.primitive().get_function()\n for (_, group), answer in zip(v.groupby(g), answers):\n np.testing.assert_array_equal(function(group), answer)\n\n def test_regular(self):\n v = pd.Series([101, 102, 103, 104, 105, 106, 100])\n g = pd.Series([\"a\", \"b\", \"a\", \"c\", \"d\", \"b\", \"a\"])\n answers = ([101, 101, 100], [102, 102], [104], [105])\n\n function = self.primitive().get_function()\n for (_, group), answer in zip(v.groupby(g), answers):\n np.testing.assert_array_equal(function(group), answer)\n\n\ndef test_cum_sum(es):\n log_value_feat = es['log']['value']\n dfeat = ft.Feature(es['sessions']['device_type'], entity=es['log'])\n cum_sum = ft.Feature(log_value_feat, groupby=dfeat, primitive=CumSum)\n features = [cum_sum]\n df = ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=range(15))\n cvalues = df[cum_sum.get_name()].values\n assert len(cvalues) == 15\n cum_sum_values = [0, 5, 15, 30, 50, 0, 1, 3, 6, 6, 50, 55, 55, 62, 76]\n for i, v in enumerate(cum_sum_values):\n assert v == cvalues[i]\n\n\ndef test_cum_min(es):\n log_value_feat = es['log']['value']\n cum_min = ft.Feature(log_value_feat, groupby=es['log']['session_id'], primitive=CumMin)\n features = [cum_min]\n df = ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=range(15))\n cvalues = df[cum_min.get_name()].values\n assert len(cvalues) == 15\n cum_min_values = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n for i, v in enumerate(cum_min_values):\n assert v == cvalues[i]\n\n\ndef test_cum_max(es):\n log_value_feat = es['log']['value']\n cum_max = ft.Feature(log_value_feat, groupby=es['log']['session_id'], primitive=CumMax)\n features = [cum_max]\n df = ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=range(15))\n cvalues = df[cum_max.get_name()].values\n assert len(cvalues) == 15\n cum_max_values = [0, 5, 10, 15, 20, 0, 1, 2, 3, 0, 0, 5, 0, 7, 14]\n for i, v in enumerate(cum_max_values):\n assert v == cvalues[i]\n\n\ndef test_cum_sum_group_on_nan(es):\n log_value_feat = es['log']['value']\n es['log'].df['product_id'] = (['coke zero'] * 3 + ['car'] * 2 +\n ['toothpaste'] * 3 + ['brown bag'] * 2 +\n ['shoes'] +\n [np.nan] * 4 +\n ['coke_zero'] * 2)\n es['log'].df['value'][16] = 10\n cum_sum = ft.Feature(log_value_feat, groupby=es['log']['product_id'], primitive=CumSum)\n features = [cum_sum]\n df = ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=range(17))\n cvalues = df[cum_sum.get_name()].values\n assert len(cvalues) == 17\n cum_sum_values = [0, 5, 15,\n 15, 35,\n 0, 1, 3,\n 3, 3,\n 0,\n np.nan, np.nan, np.nan, np.nan, np.nan, 10]\n\n assert len(cvalues) == len(cum_sum_values)\n for i, v in enumerate(cum_sum_values):\n if np.isnan(v):\n assert (np.isnan(cvalues[i]))\n else:\n assert v == cvalues[i]\n\n\ndef test_cum_sum_numpy_group_on_nan(es):\n class CumSumNumpy(TransformPrimitive):\n \"\"\"Returns the cumulative sum after grouping\"\"\"\n\n name = \"cum_sum\"\n input_types = [Numeric]\n return_type = Numeric\n uses_full_entity = True\n\n def get_function(self):\n def cum_sum(values):\n return values.cumsum().values\n return cum_sum\n\n log_value_feat = es['log']['value']\n es['log'].df['product_id'] = (['coke zero'] * 3 + ['car'] * 2 +\n ['toothpaste'] * 3 + ['brown bag'] * 2 +\n ['shoes'] +\n [np.nan] * 4 +\n ['coke_zero'] * 2)\n es['log'].df['value'][16] = 10\n cum_sum = ft.Feature(log_value_feat, groupby=es['log']['product_id'], primitive=CumSumNumpy)\n assert cum_sum.get_name() == \"CUM_SUM(value) by product_id\"\n features = [cum_sum]\n df = ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=range(17))\n cvalues = df[cum_sum.get_name()].values\n assert len(cvalues) == 17\n cum_sum_values = [0, 5, 15,\n 15, 35,\n 0, 1, 3,\n 3, 3,\n 0,\n np.nan, np.nan, np.nan, np.nan, np.nan, 10]\n\n assert len(cvalues) == len(cum_sum_values)\n for i, v in enumerate(cum_sum_values):\n if np.isnan(v):\n assert (np.isnan(cvalues[i]))\n else:\n assert v == cvalues[i]\n\n\ndef test_cum_handles_uses_full_entity(es):\n def check(feature):\n pandas_backend = PandasBackend(es, [feature])\n df_1 = pandas_backend.calculate_all_features(instance_ids=[0, 1, 2], time_last=None)\n df_2 = pandas_backend.calculate_all_features(instance_ids=[2, 4], time_last=None)\n\n # check that the value for instance id 2 matches\n assert (df_2.loc[2] == df_1.loc[2]).all()\n\n for primitive in [CumSum, CumMean, CumMax, CumMin]:\n check(ft.Feature(es['log']['value'], groupby=es['log']['session_id'], primitive=primitive))\n\n check(ft.Feature(es['log']['session_id'], groupby=es['log']['session_id'], primitive=CumCount))\n\n\ndef test_cum_mean(es):\n log_value_feat = es['log']['value']\n cum_mean = ft.Feature(log_value_feat, groupby=es['log']['session_id'], primitive=CumMean)\n features = [cum_mean]\n df = ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=range(15))\n cvalues = df[cum_mean.get_name()].values\n assert len(cvalues) == 15\n cum_mean_values = [0, 2.5, 5, 7.5, 10, 0, .5, 1, 1.5, 0, 0, 2.5, 0, 3.5, 7]\n for i, v in enumerate(cum_mean_values):\n assert v == cvalues[i]\n\n\ndef test_cum_count(es):\n cum_count = ft.Feature(es['log']['session_id'],\n groupby=es['log']['session_id'],\n primitive=CumCount)\n features = [cum_count]\n df = ft.calculate_feature_matrix(entityset=es,\n features=features,\n instance_ids=range(15))\n cvalues = df[cum_count.get_name()].values\n assert len(cvalues) == 15\n cum_count_values = [1, 2, 3, 4, 5, 1, 2, 3, 4, 1, 1, 2, 1, 2, 3]\n for i, v in enumerate(cum_count_values):\n assert v == cvalues[i]\n\n\ndef test_rename(es):\n cum_count = ft.Feature(es['log']['session_id'],\n groupby=es['log']['session_id'],\n primitive=CumCount)\n copy_feat = cum_count.rename(\"rename_test\")\n assert cum_count.hash() != copy_feat.hash()\n assert cum_count.get_name() != copy_feat.get_name()\n assert all([x.generate_name() == y.generate_name() for x, y\n in zip(cum_count.base_features, copy_feat.base_features)])\n assert cum_count.entity == copy_feat.entity\n\n\ndef test_groupby_no_data(es):\n cum_count = ft.Feature(es['log']['session_id'],\n groupby=es['log']['session_id'],\n primitive=CumCount)\n last_feat = ft.Feature(cum_count, parent_entity=es['customers'], primitive=Last)\n df = ft.calculate_feature_matrix(entityset=es,\n features=[last_feat],\n cutoff_time=pd.Timestamp(\"2011-04-08\"))\n cvalues = df[last_feat.get_name()].values\n assert len(cvalues) == 3\n assert all([pd.isnull(value) for value in cvalues])\n\n\ndef test_groupby_uses_calc_time(es):\n def projected_amount_left(amount, timestamp, time=None):\n # cumulative sum of amout, with timedelta * constant subtracted\n delta = time - timestamp\n delta_seconds = delta / np.timedelta64(1, 's')\n return amount.cumsum() - (delta_seconds)\n\n class ProjectedAmountRemaining(TransformPrimitive):\n name = \"projected_amount_remaining\"\n uses_calc_time = True\n input_types = [Numeric, DatetimeTimeIndex]\n return_type = Numeric\n uses_full_entity = True\n\n def get_function(self):\n return projected_amount_left\n\n time_since_product = ft.Feature([es['log']['value'], es['log']['datetime']],\n groupby=es['log']['product_id'],\n primitive=ProjectedAmountRemaining)\n df = ft.calculate_feature_matrix(entityset=es,\n features=[time_since_product],\n cutoff_time=pd.Timestamp(\"2011-04-10 11:10:30\"))\n answers = [-88830, -88819, -88803, -88797, -88771, -88770, -88760, -88749,\n -88740, -88227, -1830, -1809, -1750, -1740, -1723, np.nan, np.nan]\n\n for x, y in zip(df[time_since_product.get_name()], answers):\n assert ((pd.isnull(x) and pd.isnull(y)) or x == y)\n",
"import copy\nfrom datetime import datetime\n\nimport pandas as pd\nimport pytest\n\nfrom ..testing_utils import make_ecommerce_entityset\n\nfrom featuretools import Relationship\n\n\n@pytest.fixture\ndef entityset():\n return make_ecommerce_entityset()\n\n\n@pytest.fixture\ndef values_es(entityset):\n new_es = copy.deepcopy(entityset)\n new_es.normalize_entity('log', 'values', 'value',\n make_time_index=True,\n new_entity_time_index=\"value_time\")\n return new_es\n\n\n@pytest.fixture\ndef true_values_lti():\n true_values_lti = pd.Series([datetime(2011, 4, 10, 10, 41, 0),\n datetime(2011, 4, 9, 10, 31, 9),\n datetime(2011, 4, 9, 10, 31, 18),\n datetime(2011, 4, 9, 10, 31, 27),\n datetime(2011, 4, 10, 10, 40, 1),\n datetime(2011, 4, 10, 10, 41, 3),\n datetime(2011, 4, 9, 10, 30, 12),\n datetime(2011, 4, 10, 10, 41, 6),\n datetime(2011, 4, 9, 10, 30, 18),\n datetime(2011, 4, 9, 10, 30, 24),\n datetime(2011, 4, 10, 11, 10, 3)])\n return true_values_lti\n\n\n@pytest.fixture\ndef true_sessions_lti():\n sessions_lti = pd.Series([datetime(2011, 4, 9, 10, 30, 24),\n datetime(2011, 4, 9, 10, 31, 27),\n datetime(2011, 4, 9, 10, 40, 0),\n datetime(2011, 4, 10, 10, 40, 1),\n datetime(2011, 4, 10, 10, 41, 6),\n datetime(2011, 4, 10, 11, 10, 3)])\n return sessions_lti\n\n\n@pytest.fixture\ndef wishlist_df():\n wishlist_df = pd.DataFrame({\n \"session_id\": [0, 1, 2, 2, 3, 4, 5],\n \"datetime\": [datetime(2011, 4, 9, 10, 30, 15),\n datetime(2011, 4, 9, 10, 31, 30),\n datetime(2011, 4, 9, 10, 30, 30),\n datetime(2011, 4, 9, 10, 35, 30),\n datetime(2011, 4, 10, 10, 41, 0),\n datetime(2011, 4, 10, 10, 39, 59),\n datetime(2011, 4, 10, 11, 10, 2)],\n \"product_id\": ['coke zero', 'taco clock', 'coke zero', 'car',\n 'toothpaste', 'brown bag', 'coke zero'],\n })\n return wishlist_df\n\n\n@pytest.fixture\ndef extra_session_df(entityset):\n row_values = {'customer_id': 2,\n 'device_name': 'PC',\n 'device_type': 0,\n 'id': 6}\n row = pd.DataFrame(row_values, index=pd.Index([6], name='id'))\n df = entityset['sessions'].df.append(row, sort=True).sort_index()\n return df\n\n\nclass TestLastTimeIndex(object):\n def test_leaf(self, entityset):\n entityset.add_last_time_indexes()\n log = entityset['log']\n assert len(log.last_time_index) == 17\n for v1, v2 in zip(log.last_time_index, log.df['datetime']):\n assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2\n\n def test_leaf_no_time_index(self, entityset):\n entityset.add_last_time_indexes()\n stores = entityset['stores']\n true_lti = pd.Series([None for x in range(6)], dtype='datetime64[ns]')\n assert len(true_lti) == len(stores.last_time_index)\n for v1, v2 in zip(stores.last_time_index, true_lti):\n assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2\n\n def test_parent(self, values_es, true_values_lti):\n # test entity with time index and all instances in child entity\n values_es.add_last_time_indexes()\n values = values_es['values']\n assert len(values.last_time_index) == 11\n sorted_lti = values.last_time_index.sort_index()\n for v1, v2 in zip(sorted_lti, true_values_lti):\n assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2\n\n def test_parent_some_missing(self, values_es, true_values_lti):\n # test entity with time index and not all instances have children\n values = values_es['values']\n\n # add extra value instance with no children\n row_values = {'value': 21.0,\n 'value_time': pd.Timestamp(\"2011-04-10 11:10:02\"),\n 'values_id': 11}\n # make sure index doesn't have same name as column to suppress pandas warning\n row = pd.DataFrame(row_values, index=pd.Index([11]))\n df = values.df.append(row, sort=True)\n df = df[['value', 'value_time']].sort_values(by='value')\n df.index.name = 'values_id'\n values.update_data(df)\n values_es.add_last_time_indexes()\n # lti value should default to instance's time index\n true_values_lti[10] = pd.Timestamp(\"2011-04-10 11:10:02\")\n true_values_lti[11] = pd.Timestamp(\"2011-04-10 11:10:03\")\n\n assert len(values.last_time_index) == 12\n sorted_lti = values.last_time_index.sort_index()\n for v1, v2 in zip(sorted_lti, true_values_lti):\n assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2\n\n def test_parent_no_time_index(self, entityset, true_sessions_lti):\n # test entity without time index and all instances have children\n entityset.add_last_time_indexes()\n sessions = entityset['sessions']\n assert len(sessions.last_time_index) == 6\n sorted_lti = sessions.last_time_index.sort_index()\n for v1, v2 in zip(sorted_lti, true_sessions_lti):\n assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2\n\n def test_parent_no_time_index_missing(self, entityset, extra_session_df,\n true_sessions_lti):\n # test entity without time index and not all instance have children\n sessions = entityset['sessions']\n\n # add session instance with no associated log instances\n sessions.update_data(extra_session_df)\n entityset.add_last_time_indexes()\n # since sessions has no time index, default value is NaT\n true_sessions_lti[6] = pd.NaT\n\n assert len(sessions.last_time_index) == 7\n sorted_lti = sessions.last_time_index.sort_index()\n for v1, v2 in zip(sorted_lti, true_sessions_lti):\n assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2\n\n def test_multiple_children(self, entityset, wishlist_df,\n true_sessions_lti):\n # test all instances in both children\n entityset.entity_from_dataframe(entity_id=\"wishlist_log\",\n dataframe=wishlist_df,\n index='id',\n make_index=True,\n time_index='datetime')\n relationship = Relationship(entityset['sessions']['id'],\n entityset['wishlist_log']['session_id'])\n entityset.add_relationship(relationship)\n entityset.add_last_time_indexes()\n sessions = entityset['sessions']\n # wishlist df has more recent events for two session ids\n true_sessions_lti[1] = pd.Timestamp(\"2011-4-9 10:31:30\")\n true_sessions_lti[3] = pd.Timestamp(\"2011-4-10 10:41:00\")\n\n assert len(sessions.last_time_index) == 6\n sorted_lti = sessions.last_time_index.sort_index()\n for v1, v2 in zip(sorted_lti, true_sessions_lti):\n assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2\n\n def test_multiple_children_right_missing(self, entityset, wishlist_df,\n true_sessions_lti):\n # test all instances in left child\n sessions = entityset['sessions']\n\n # drop wishlist instance related to id 3 so it's only in log\n wishlist_df.drop(4, inplace=True)\n entityset.entity_from_dataframe(entity_id=\"wishlist_log\",\n dataframe=wishlist_df,\n index='id',\n make_index=True,\n time_index='datetime')\n relationship = Relationship(entityset['sessions']['id'],\n entityset['wishlist_log']['session_id'])\n entityset.add_relationship(relationship)\n entityset.add_last_time_indexes()\n\n # now only session id 1 has newer event in wishlist_log\n true_sessions_lti[1] = pd.Timestamp(\"2011-4-9 10:31:30\")\n\n assert len(sessions.last_time_index) == 6\n sorted_lti = sessions.last_time_index.sort_index()\n for v1, v2 in zip(sorted_lti, true_sessions_lti):\n assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2\n\n def test_multiple_children_left_missing(self, entityset, extra_session_df,\n wishlist_df, true_sessions_lti):\n # test all instances in right child\n sessions = entityset['sessions']\n\n # add row to sessions so not all session instances are in log\n sessions.update_data(extra_session_df)\n\n # add row to wishlist df so new session instance in in wishlist_log\n row_values = {'session_id': 6,\n 'datetime': pd.Timestamp(\"2011-04-11 11:11:11\"),\n 'product_id': 'toothpaste'}\n row = pd.DataFrame(row_values, index=pd.RangeIndex(start=7, stop=8))\n df = wishlist_df.append(row)\n entityset.entity_from_dataframe(entity_id=\"wishlist_log\",\n dataframe=df,\n index='id',\n make_index=True,\n time_index='datetime')\n relationship = Relationship(entityset['sessions']['id'],\n entityset['wishlist_log']['session_id'])\n entityset.add_relationship(relationship)\n entityset.add_last_time_indexes()\n\n # now wishlist_log has newer events for 3 session ids\n true_sessions_lti[1] = pd.Timestamp(\"2011-4-9 10:31:30\")\n true_sessions_lti[3] = pd.Timestamp(\"2011-4-10 10:41:00\")\n true_sessions_lti[6] = pd.Timestamp(\"2011-04-11 11:11:11\")\n\n assert len(sessions.last_time_index) == 7\n sorted_lti = sessions.last_time_index.sort_index()\n for v1, v2 in zip(sorted_lti, true_sessions_lti):\n assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2\n\n def test_multiple_children_all_combined(self, entityset, extra_session_df,\n wishlist_df, true_sessions_lti):\n # test some instances in right, some in left, all when combined\n sessions = entityset['sessions']\n\n # add row to sessions so not all session instances are in log\n sessions.update_data(extra_session_df)\n\n # add row to wishlist_log so extra session has child instance\n row_values = {'session_id': 6,\n 'datetime': pd.Timestamp(\"2011-04-11 11:11:11\"),\n 'product_id': 'toothpaste'}\n row = pd.DataFrame(row_values, index=pd.RangeIndex(start=7, stop=8))\n df = wishlist_df.append(row)\n\n # drop instance 4 so wishlist_log does not have session id 3 instance\n df.drop(4, inplace=True)\n entityset.entity_from_dataframe(entity_id=\"wishlist_log\",\n dataframe=df,\n index='id',\n make_index=True,\n time_index='datetime')\n relationship = Relationship(entityset['sessions']['id'],\n entityset['wishlist_log']['session_id'])\n entityset.add_relationship(relationship)\n entityset.add_last_time_indexes()\n\n # wishlist has newer events for 2 sessions\n true_sessions_lti[1] = pd.Timestamp(\"2011-4-9 10:31:30\")\n true_sessions_lti[6] = pd.Timestamp(\"2011-04-11 11:11:11\")\n\n assert len(sessions.last_time_index) == 7\n sorted_lti = sessions.last_time_index.sort_index()\n for v1, v2 in zip(sorted_lti, true_sessions_lti):\n assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2\n\n def test_multiple_children_both_missing(self, entityset, extra_session_df,\n wishlist_df, true_sessions_lti):\n # test all instances in neither child\n sessions = entityset['sessions']\n\n # add row to sessions to create session with no events\n sessions.update_data(extra_session_df)\n\n entityset.entity_from_dataframe(entity_id=\"wishlist_log\",\n dataframe=wishlist_df,\n index='id',\n make_index=True,\n time_index='datetime')\n relationship = Relationship(entityset['sessions']['id'],\n entityset['wishlist_log']['session_id'])\n entityset.add_relationship(relationship)\n entityset.add_last_time_indexes()\n sessions = entityset['sessions']\n\n # wishlist has 2 newer events and one is NaT\n true_sessions_lti[1] = pd.Timestamp(\"2011-4-9 10:31:30\")\n true_sessions_lti[3] = pd.Timestamp(\"2011-4-10 10:41:00\")\n true_sessions_lti[6] = pd.NaT\n\n assert len(sessions.last_time_index) == 7\n sorted_lti = sessions.last_time_index.sort_index()\n for v1, v2 in zip(sorted_lti, true_sessions_lti):\n assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2\n\n def test_grandparent(self, entityset):\n # test sorting by time works correctly across several generations\n log = entityset[\"log\"]\n customers = entityset[\"customers\"]\n\n # For one user, change a log event to be newer than the user's normal\n # last time index. This event should be from a different session than\n # the current last time index.\n log.df['datetime'][5] = pd.Timestamp(\"2011-4-09 10:40:01\")\n log.df = (log.df.set_index('datetime', append=True)\n .sort_index(level=[1, 0], kind=\"mergesort\")\n .reset_index('datetime', drop=False))\n log.update_data(log.df)\n entityset.add_last_time_indexes()\n\n true_customers_lti = pd.Series([datetime(2011, 4, 9, 10, 40, 1),\n datetime(2011, 4, 10, 10, 41, 6),\n datetime(2011, 4, 10, 11, 10, 3)])\n\n assert len(customers.last_time_index) == 3\n sorted_lti = customers.last_time_index.sort_index()\n for v1, v2 in zip(sorted_lti, true_customers_lti):\n assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2\n"
] |
[
[
"pandas.isnull",
"numpy.isnan",
"pandas.Timestamp",
"numpy.timedelta64",
"pandas.Series"
],
[
"pandas.isnull",
"pandas.Timestamp",
"pandas.Index",
"pandas.RangeIndex"
]
] |
ut-amrl/ContrastiveSceneContexts
|
[
"622b9cd32ea2dcf8307d25eb2e7ee1c09d220134",
"622b9cd32ea2dcf8307d25eb2e7ee1c09d220134"
] |
[
"downstream/insseg/datasets/scannet.py",
"downstream/votenet/lib/ddp_trainer.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates.\n# \n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\nimport logging\nimport os\nimport sys\nfrom pathlib import Path\n\nimport torch\nimport numpy as np\nfrom scipy import spatial\n\nfrom datasets.dataset import VoxelizationDataset, DatasetPhase, str2datasetphase_type\nfrom lib.pc_utils import read_plyfile, save_point_cloud\nfrom lib.utils import read_txt, fast_hist, per_class_iu\nfrom lib.io3d import write_triangle_mesh, create_color_palette\n\nclass ScannetVoxelizationDataset(VoxelizationDataset):\n # added\n NUM_LABELS = 41 # Will be converted to 20 as defined in IGNORE_LABELS.\n NUM_IN_CHANNEL = 3\n CLASS_LABELS = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window',\n 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator',\n 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture')\n VALID_CLASS_IDS = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39)\n IGNORE_LABELS = tuple(set(range(NUM_LABELS)) - set(VALID_CLASS_IDS))\n \n CLASS_LABELS_INSTANCE = ['cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter',\n 'desk', 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture']\n VALID_CLASS_IDS_INSTANCE = np.array([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])\n IGNORE_LABELS_INSTANCE = tuple(set(range(NUM_LABELS)) - set(VALID_CLASS_IDS_INSTANCE))\n\n\n # Voxelization arguments\n CLIP_BOUND = None\n TEST_CLIP_BOUND = None\n VOXEL_SIZE = 0.05\n\n # Augmentation arguments\n ROTATION_AUGMENTATION_BOUND = ((-np.pi / 64, np.pi / 64), (-np.pi / 64, np.pi / 64), (-np.pi,\n np.pi))\n TRANSLATION_AUGMENTATION_RATIO_BOUND = ((-0.2, 0.2), (-0.2, 0.2), (0, 0))\n ELASTIC_DISTORT_PARAMS = ((0.2, 0.4), (0.8, 1.6))\n\n ROTATION_AXIS = 'z'\n LOCFEAT_IDX = 2\n IS_FULL_POINTCLOUD_EVAL = True\n\n # If trainval.txt does not exist, copy train.txt and add contents from val.txt\n DATA_PATH_FILE = {\n DatasetPhase.Train: 'scannetv2_train.txt',\n DatasetPhase.Val: 'scannetv2_val.txt',\n DatasetPhase.TrainVal: 'scannetv2_trainval.txt',\n DatasetPhase.Test: 'scannetv2_test.txt',\n DatasetPhase.Debug: 'debug.txt'\n }\n\n def __init__(self,\n config,\n prevoxel_transform=None,\n input_transform=None,\n target_transform=None,\n augment_data=True,\n elastic_distortion=False,\n cache=False,\n phase=DatasetPhase.Train):\n if isinstance(phase, str):\n phase = str2datasetphase_type(phase)\n # Use cropped rooms for train/val\n data_root = config.data.scannet_path\n if phase not in [DatasetPhase.Train, DatasetPhase.TrainVal]:\n self.CLIP_BOUND = self.TEST_CLIP_BOUND\n \n data_paths = read_txt(os.path.join(data_root, 'splits', self.DATA_PATH_FILE[phase]))\n if phase == DatasetPhase.Train and config.data.train_file:\n data_paths = read_txt(os.path.join(data_root, 'splits', config.data.train_file))\n \n # data efficiency by sampling points\n self.sampled_inds = {}\n if config.data.sampled_inds and phase == DatasetPhase.Train:\n self.sampled_inds = torch.load(config.data.sampled_inds)\n\n data_paths = [data_path + '.pth' for data_path in data_paths]\n logging.info('Loading {}: {}'.format(self.__class__.__name__, self.DATA_PATH_FILE[phase]))\n super().__init__(\n data_paths,\n data_root=data_root,\n prevoxel_transform=prevoxel_transform,\n input_transform=input_transform,\n target_transform=target_transform,\n ignore_label=config.data.ignore_label,\n return_transformation=config.data.return_transformation,\n augment_data=augment_data,\n elastic_distortion=elastic_distortion,\n config=config)\n\n def get_output_id(self, iteration):\n return '_'.join(Path(self.data_paths[iteration]).stem.split('_')[:2])\n\n def _augment_locfeat(self, pointcloud):\n # Assuming that pointcloud is xyzrgb(...), append location feat.\n pointcloud = np.hstack(\n (pointcloud[:, :6], 100 * np.expand_dims(pointcloud[:, self.LOCFEAT_IDX], 1),\n pointcloud[:, 6:]))\n return pointcloud\n\n def load_data(self, index):\n filepath = self.data_root / self.data_paths[index]\n pointcloud = torch.load(filepath)\n coords = pointcloud[0].astype(np.float32)\n feats = pointcloud[1].astype(np.float32)\n labels = pointcloud[2].astype(np.int32)\n instances = pointcloud[3].astype(np.int32) \n if self.sampled_inds:\n scene_name = self.get_output_id(index)\n mask = np.ones_like(labels).astype(np.bool)\n sampled_inds = self.sampled_inds[scene_name]\n mask[sampled_inds] = False\n labels[mask] = 0\n instances[mask] = 0\n\n return coords, feats, labels, instances\n \n def get_original_pointcloud(self, coords, transformation, iteration):\n logging.info('===> Start testing on original pointcloud space.')\n data_path = self.data_paths[iteration]\n fullply_f = self.data_root / data_path\n query_xyz, _, query_label, _ = torch.load(fullply_f)\n\n coords = coords[:, 1:].numpy() + 0.5\n curr_transformation = transformation[0, :16].numpy().reshape(4, 4)\n coords = np.hstack((coords, np.ones((coords.shape[0], 1))))\n coords = (np.linalg.inv(curr_transformation) @ coords.T).T\n\n # Run test for each room.\n from pykeops.numpy import LazyTensor\n from pykeops.numpy.utils import IsGpuAvailable\n \n query_xyz = np.array(query_xyz)\n x_i = LazyTensor( query_xyz[:,None,:] ) # x_i.shape = (1e6, 1, 3)\n y_j = LazyTensor( coords[:,:3][None,:,:] ) # y_j.shape = ( 1, 2e6,3)\n D_ij = ((x_i - y_j) ** 2).sum(-1) # (M**2, N) symbolic matrix of squared distances\n indKNN = D_ij.argKmin(1, dim=1) # Grid <-> Samples, (M**2, K) integer tensor\n inds = indKNN[:,0]\n return inds, query_xyz\n\n def save_prediction(self, coords, pred, transformation, iteration, save_dir):\n print('Running full pointcloud evaluation.')\n #if dataset.IGNORE_LABELS:\n # decode_label_map = {}\n # for k, v in dataset.label_map.items():\n # decode_label_map[v] = k\n # orig_pred = np.array([decode_label_map[x.item()] for x in orig_pred.cpu()], dtype=np.int)\n inds_mapping, xyz = self.get_original_pointcloud(coords, transformation, iteration)\n save = {'points': coords, 'mapping': inds_mapping, 'labels': pred}\n\n # Save prediciton in txt format for submission.\n room_id = self.get_output_id(iteration)\n torch.save(save, os.path.join(save_dir, room_id))\n #np.savetxt(f'{save_dir}/{room_id}.txt', ptc_pred, fmt='%i')\n\n def save_groundtruth(self, coords, gt, transformation, iteration, save_dir):\n save = {'points': coords, 'labels': gt}\n # Save prediciton in txt format for submission.\n room_id = self.get_output_id(iteration)\n torch.save(save, os.path.join(save_dir, room_id))\n\n\nclass ScannetVoxelization2cmDataset(ScannetVoxelizationDataset):\n VOXEL_SIZE = 0.02\n",
"# Copyright (c) Facebook, Inc. and its affiliates.\n# \n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\nimport torch\nimport os\nimport sys\nimport logging\nimport numpy as np\nimport importlib\nimport warnings\nimport argparse\n\nimport torch.optim as optim\nimport torch.nn as nn\nfrom datetime import datetime\nfrom models.loss_helper import get_loss as criterion\nfrom tensorboardX import SummaryWriter\nfrom torch.optim import lr_scheduler\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nfrom models.backbone.pointnet2.pytorch_utils import BNMomentumScheduler\nfrom models.dump_helper import dump_results, dump_results_\nfrom models.ap_helper import APCalculator, parse_predictions, parse_groundtruths\n\nfrom omegaconf import OmegaConf\nfrom torch.utils.data import DataLoader\nfrom torch.serialization import default_restore_location\nfrom lib.distributed import multi_proc_run, is_master_proc, get_world_size\n\nclass DetectionTrainer():\n def __init__(self, config):\n self.is_master = is_master_proc(get_world_size()) if get_world_size() > 1 else True\n self.cur_device = torch.cuda.current_device()\n\n # load the configurations\n self.setup_logging()\n if os.path.exists('config.yaml'):\n logging.info('===> Loading exsiting config file')\n config = OmegaConf.load('config.yaml')\n logging.info('===> Loaded exsiting config file')\n logging.info('===> Configurations')\n logging.info(config.pretty())\n\n # Create Dataset and Dataloader\n if config.data.dataset == 'sunrgbd':\n from datasets.sunrgbd.sunrgbd_detection_dataset import SunrgbdDetectionVotesDataset, MAX_NUM_OBJ\n from datasets.sunrgbd.model_util_sunrgbd import SunrgbdDatasetConfig\n dataset_config = SunrgbdDatasetConfig()\n train_dataset = SunrgbdDetectionVotesDataset('train', \n num_points=config.data.num_points,\n augment=True,\n use_color=config.data.use_color, \n use_height=(not config.data.no_height),\n use_v1=(not config.data.use_sunrgbd_v2))\n test_dataset = SunrgbdDetectionVotesDataset(config.test.phase, \n num_points=config.data.num_points,\n augment=False,\n use_color=config.data.use_color, \n use_height=(not config.data.no_height),\n use_v1=(not config.data.use_sunrgbd_v2))\n elif config.data.dataset == 'scannet':\n from datasets.scannet.scannet_detection_dataset import ScannetDetectionDataset, MAX_NUM_OBJ\n from datasets.scannet.model_util_scannet import ScannetDatasetConfig\n dataset_config = ScannetDatasetConfig()\n train_dataset = ScannetDetectionDataset('train', \n num_points=config.data.num_points,\n augment=True,\n use_color=config.data.use_color, \n use_height=(not config.data.no_height),\n by_scenes=config.data.by_scenes,\n by_points=config.data.by_points)\n\n test_dataset = ScannetDetectionDataset(config.test.phase, \n num_points=config.data.num_points,\n augment=False,\n use_color=config.data.use_color, \n use_height=(not config.data.no_height))\n else:\n logging.info('Unknown dataset %s. Exiting...'%(config.data.dataset))\n exit(-1)\n\n COLLATE_FN = None\n if config.data.voxelization:\n from models.backbone.sparseconv.voxelized_dataset import VoxelizationDataset, collate_fn\n train_dataset = VoxelizationDataset(train_dataset, config.data.voxel_size)\n test_dataset = VoxelizationDataset(test_dataset, config.data.voxel_size)\n COLLATE_FN = collate_fn\n logging.info('training: {}, testing: {}'.format(len(train_dataset), len(test_dataset)))\n\n self.sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if get_world_size() > 1 else None\n train_dataloader = DataLoader(\n train_dataset, \n batch_size=config.data.batch_size // config.misc.num_gpus,\n shuffle=(self.sampler is None),\n sampler=self.sampler,\n num_workers=config.data.num_workers, \n collate_fn=COLLATE_FN)\n\n test_dataloader = DataLoader(\n test_dataset, \n batch_size=1,\n shuffle=False, \n num_workers=1, \n collate_fn=COLLATE_FN)\n logging.info('train dataloader: {}, test dataloader: {}'.format(len(train_dataloader),len(test_dataloader)))\n\n # Init the model and optimzier\n MODEL = importlib.import_module('models.' + config.net.model) # import network module\n num_input_channel = int(config.data.use_color)*3 + int(not config.data.no_height)*1\n\n if config.net.model == 'boxnet':\n Detector = MODEL.BoxNet\n else:\n Detector = MODEL.VoteNet\n\n net = Detector(num_class=dataset_config.num_class,\n num_heading_bin=dataset_config.num_heading_bin,\n num_size_cluster=dataset_config.num_size_cluster,\n mean_size_arr=dataset_config.mean_size_arr,\n num_proposal=config.net.num_target,\n input_feature_dim=num_input_channel,\n vote_factor=config.net.vote_factor,\n sampling=config.net.cluster_sampling,\n backbone=config.net.backbone)\n\n if config.net.weights != '':\n #assert config.net.backbone == \"sparseconv\", \"only support sparseconv\"\n print('===> Loading weights: ' + config.net.weights)\n state = torch.load(config.net.weights, map_location=lambda s, l: default_restore_location(s, 'cpu'))\n model = net\n if config.net.is_train:\n model = net.backbone_net\n if config.net.backbone == \"sparseconv\":\n model = net.backbone_net.net\n \n matched_weights = DetectionTrainer.load_state_with_same_shape(model, state['state_dict'])\n model_dict = model.state_dict()\n model_dict.update(matched_weights)\n model.load_state_dict(model_dict)\n\n net.to(self.cur_device)\n if get_world_size() > 1:\n net = torch.nn.parallel.DistributedDataParallel(\n module=net, device_ids=[self.cur_device], output_device=self.cur_device, broadcast_buffers=False) \n\n # Load the Adam optimizer\n self.optimizer = optim.Adam(net.parameters(), lr=config.optimizer.learning_rate, weight_decay=config.optimizer.weight_decay)\n # writer\n if self.is_master:\n self.writer = SummaryWriter(log_dir='tensorboard')\n self.config = config\n self.dataset_config = dataset_config\n self.net = net\n self.train_dataloader = train_dataloader\n self.test_dataloader = test_dataloader\n self.best_mAP = -1\n\n # Used for AP calculation\n self.CONFIG_DICT = {'remove_empty_box':False, 'use_3d_nms':True,\n 'nms_iou':0.25, 'use_old_type_nms':False, 'cls_nms':True,\n 'per_class_proposal': True, 'conf_thresh':0.05, 'dataset_config': dataset_config}\n\n # Used for AP calculation\n self.CONFIG_DICT_TEST = {'remove_empty_box': (not config.test.faster_eval), \n 'use_3d_nms': config.test.use_3d_nms, \n 'nms_iou': config.test.nms_iou,\n 'use_old_type_nms': config.test.use_old_type_nms, \n 'cls_nms': config.test.use_cls_nms, \n 'per_class_proposal': config.test.per_class_proposal,\n 'conf_thresh': config.test.conf_thresh,\n 'dataset_config': dataset_config}\n\n # Load checkpoint if there is any\n self.start_epoch = 0\n CHECKPOINT_PATH = os.path.join('checkpoint.tar')\n if os.path.isfile(CHECKPOINT_PATH):\n checkpoint = torch.load(CHECKPOINT_PATH)\n if get_world_size() > 1:\n _model = self.net.module\n else:\n _model = self.net\n _model.load_state_dict(checkpoint['state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n self.start_epoch = checkpoint['epoch']\n self.best_mAP = checkpoint['best_mAP']\n logging.info(\"-> loaded checkpoint %s (epoch: %d)\"%(CHECKPOINT_PATH, self.start_epoch))\n\n # Decay Batchnorm momentum from 0.5 to 0.999\n # note: pytorch's BN momentum (default 0.1)= 1 - tensorflow's BN momentum\n BN_MOMENTUM_INIT = 0.5\n BN_MOMENTUM_MAX = 0.001\n BN_DECAY_STEP = config.optimizer.bn_decay_step\n BN_DECAY_RATE = config.optimizer.bn_decay_rate\n bn_lbmd = lambda it: max(BN_MOMENTUM_INIT * BN_DECAY_RATE**(int(it / BN_DECAY_STEP)), BN_MOMENTUM_MAX)\n self.bnm_scheduler = BNMomentumScheduler(net, bn_lambda=bn_lbmd, last_epoch=self.start_epoch-1)\n\n def setup_logging(self):\n ch = logging.StreamHandler(sys.stdout)\n logging.getLogger().setLevel(logging.WARN)\n if self.is_master:\n logging.getLogger().setLevel(logging.INFO)\n logging.basicConfig(\n format=os.uname()[1].split('.')[0] + ' %(asctime)s %(message)s',\n datefmt='%m/%d %H:%M:%S',\n handlers=[ch])\n\n @staticmethod\n def load_state_with_same_shape(model, weights):\n model_state = model.state_dict()\n if list(weights.keys())[0].startswith('module.'):\n print(\"Loading multigpu weights with module. prefix...\")\n weights = {k.partition('module.')[2]:weights[k] for k in weights.keys()}\n\n if list(weights.keys())[0].startswith('encoder.'):\n logging.info(\"Loading multigpu weights with encoder. prefix...\")\n weights = {k.partition('encoder.')[2]:weights[k] for k in weights.keys()}\n\n # print(weights.items())\n filtered_weights = {\n k: v for k, v in weights.items() if k in model_state and v.size() == model_state[k].size()\n }\n print(\"Loading weights:\" + ', '.join(filtered_weights.keys()))\n return filtered_weights\n \n @staticmethod\n def get_current_lr(epoch, config):\n lr = config.optimizer.learning_rate\n for i,lr_decay_epoch in enumerate(config.optimizer.lr_decay_steps):\n if epoch >= lr_decay_epoch:\n lr *= config.optimizer.lr_decay_rates[i]\n return lr\n\n @staticmethod\n def adjust_learning_rate(optimizer, epoch, config):\n lr = DetectionTrainer.get_current_lr(epoch, config)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n def train_one_epoch(self, epoch_cnt):\n stat_dict = {} # collect statistics\n DetectionTrainer.adjust_learning_rate(self.optimizer, epoch_cnt, self.config)\n self.bnm_scheduler.step() # decay BN momentum\n self.net.train() # set model to training mode\n for batch_idx, batch_data_label in enumerate(self.train_dataloader):\n for key in batch_data_label:\n if key == 'scan_name':\n continue\n batch_data_label[key] = batch_data_label[key].cuda()\n\n # Forward pass\n self.optimizer.zero_grad()\n inputs = {'point_clouds': batch_data_label['point_clouds']}\n if 'voxel_coords' in batch_data_label:\n inputs.update({\n 'voxel_coords': batch_data_label['voxel_coords'],\n 'voxel_inds': batch_data_label['voxel_inds'],\n 'voxel_feats': batch_data_label['voxel_feats']})\n\n end_points = self.net(inputs)\n \n # Compute loss and gradients, update parameters.\n for key in batch_data_label:\n assert(key not in end_points)\n end_points[key] = batch_data_label[key]\n loss, end_points = criterion(end_points, self.dataset_config)\n loss.backward()\n self.optimizer.step()\n\n # Accumulate statistics and print out\n for key in end_points:\n if 'loss' in key or 'acc' in key or 'ratio' in key:\n if key not in stat_dict: stat_dict[key] = 0\n stat_dict[key] += end_points[key].item()\n\n batch_interval = 10\n if ((batch_idx+1) % batch_interval == 0) and self.is_master:\n logging.info(' ---- batch: %03d ----' % (batch_idx+1))\n for key in stat_dict:\n self.writer.add_scalar('training/{}'.format(key), stat_dict[key]/batch_interval, \n (epoch_cnt*len(self.train_dataloader)+batch_idx)*self.config.data.batch_size)\n for key in sorted(stat_dict.keys()):\n logging.info('mean %s: %f'%(key, stat_dict[key]/batch_interval))\n stat_dict[key] = 0\n\n def evaluate_one_epoch(self, epoch_cnt):\n np.random.seed(0)\n stat_dict = {} # collect statistics\n\n ap_calculator = APCalculator(ap_iou_thresh=self.config.test.ap_iou, class2type_map=self.dataset_config.class2type)\n self.net.eval() # set model to eval mode (for bn and dp)\n for batch_idx, batch_data_label in enumerate(self.test_dataloader):\n if batch_idx % 10 == 0:\n logging.info('Eval batch: %d'%(batch_idx))\n for key in batch_data_label:\n if key == 'scan_name':\n continue\n batch_data_label[key] = batch_data_label[key].cuda()\n \n # Forward pass\n inputs = {'point_clouds': batch_data_label['point_clouds']}\n if 'voxel_coords' in batch_data_label:\n inputs.update({\n 'voxel_coords': batch_data_label['voxel_coords'],\n 'voxel_inds': batch_data_label['voxel_inds'],\n 'voxel_feats': batch_data_label['voxel_feats']})\n\n with torch.no_grad():\n end_points = self.net(inputs)\n\n # Compute loss\n for key in batch_data_label:\n assert(key not in end_points)\n end_points[key] = batch_data_label[key]\n loss, end_points = criterion(end_points, self.dataset_config)\n\n # Accumulate statistics and print out\n for key in end_points:\n if 'loss' in key or 'acc' in key or 'ratio' in key:\n if key not in stat_dict: stat_dict[key] = 0\n stat_dict[key] += end_points[key].item()\n\n batch_pred_map_cls = parse_predictions(end_points, self.CONFIG_DICT) \n batch_gt_map_cls = parse_groundtruths(end_points, self.CONFIG_DICT) \n ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls)\n\n # Dump evaluation results for visualization\n if self.config.data.dump_results and batch_idx == 0 and epoch_cnt %10 == 0 and self.is_master:\n dump_results(end_points, 'results', self.dataset_config) \n\n # Log statistics\n logging.info('eval mean %s: %f'%(key, stat_dict[key]/(float(batch_idx+1))))\n if self.is_master:\n for key in sorted(stat_dict.keys()):\n self.writer.add_scalar('validation/{}'.format(key), stat_dict[key]/float(batch_idx+1),\n (epoch_cnt+1)*len(self.train_dataloader)*self.config.data.batch_size)\n\n # Evaluate average precision\n metrics_dict = ap_calculator.compute_metrics()\n for key in metrics_dict:\n logging.info('eval %s: %f'%(key, metrics_dict[key]))\n if self.is_master:\n self.writer.add_scalar('validation/mAP{}'.format(self.config.test.ap_iou), metrics_dict['mAP'], (epoch_cnt+1)*len(self.train_dataloader)*self.config.data.batch_size)\n #mean_loss = stat_dict['loss']/float(batch_idx+1)\n\n return metrics_dict['mAP']\n\n def train(self):\n for epoch in range(self.start_epoch, self.config.optimizer.max_epoch):\n logging.info('**** EPOCH %03d ****' % (epoch))\n logging.info('Current learning rate: %f'%(DetectionTrainer.get_current_lr(epoch, self.config)))\n logging.info('Current BN decay momentum: %f'%(self.bnm_scheduler.lmbd(self.bnm_scheduler.last_epoch)))\n logging.info(str(datetime.now()))\n # Reset numpy seed.\n # REF: https://github.com/pytorch/pytorch/issues/5059\n np.random.seed()\n if get_world_size() > 1:\n self.sampler.set_epoch(epoch)\n self.train_one_epoch(epoch)\n\n if epoch % 5 == 4 and self.is_master: # Eval every 5 epochs\n best_mAP = self.evaluate_one_epoch(epoch)\n\n if best_mAP > self.best_mAP:\n self.best_mAP = best_mAP\n # Save checkpoint\n save_dict = {'epoch': epoch+1, # after training one epoch, the start_epoch should be epoch+1\n 'optimizer_state_dict': self.optimizer.state_dict(),\n 'best_mAP': self.best_mAP}\n\n if get_world_size() > 1:\n save_dict['state_dict'] = self.net.module.state_dict()\n else:\n save_dict['state_dict'] = self.net.state_dict()\n\n torch.save(save_dict, 'checkpoint.tar')\n OmegaConf.save(self.config, 'config.yaml')\n\n\n @staticmethod\n def write_to_benchmark(data, scene_name):\n from models.ap_helper import flip_axis_back_camera\n OBJ_CLASS_IDS = np.array([3,4,5,6,7,8,9,10,11,12,14,16,24,28,33,34,36,39])\n os.makedirs('benchmark_output', exist_ok=True)\n bsize = len(scene_name)\n for bsize_ in range(bsize):\n write_list = []\n cur_data = data[bsize_]\n cur_name = scene_name[bsize_]\n for class_id, bbox, score in cur_data:\n bbox = flip_axis_back_camera(bbox)\n minx = np.min(bbox[:,0])\n miny = np.min(bbox[:,1])\n minz = np.min(bbox[:,2])\n maxx = np.max(bbox[:,0])\n maxy = np.max(bbox[:,1])\n maxz = np.max(bbox[:,2])\n write_list.append([minx, miny, minz, maxx,maxy, maxz, OBJ_CLASS_IDS[class_id], score])\n\n np.savetxt(os.path.join('benchmark_output', cur_name+'.txt'), np.array(write_list))\n\n\n def test(self):\n if self.config.test.use_cls_nms:\n assert(self.config.test.use_3d_nms)\n\n AP_IOU_THRESHOLDS = self.config.test.ap_iou_thresholds\n logging.info(str(datetime.now()))\n # Reset numpy seed.\n # REF: https://github.com/pytorch/pytorch/issues/5059\n np.random.seed(0)\n stat_dict = {}\n ap_calculator_list = [APCalculator(iou_thresh, self.dataset_config.class2type) for iou_thresh in AP_IOU_THRESHOLDS]\n self.net.eval() # set model to eval mode (for bn and dp)\n for batch_idx, batch_data_label in enumerate(self.test_dataloader):\n if batch_idx % 10 == 0:\n print('Eval batch: %d'%(batch_idx))\n for key in batch_data_label:\n if key == 'scan_name':\n continue\n batch_data_label[key] = batch_data_label[key].cuda()\n # Forward pass\n inputs = {'point_clouds': batch_data_label['point_clouds']}\n if 'voxel_coords' in batch_data_label:\n inputs.update({\n 'voxel_coords': batch_data_label['voxel_coords'],\n 'voxel_inds': batch_data_label['voxel_inds'],\n 'voxel_feats': batch_data_label['voxel_feats']})\n with torch.no_grad():\n end_points = self.net(inputs)\n\n # Compute loss\n for key in batch_data_label:\n assert(key not in end_points)\n end_points[key] = batch_data_label[key]\n loss, end_points = criterion(end_points, self.dataset_config)\n\n # Accumulate statistics and print out\n for key in end_points:\n if 'loss' in key or 'acc' in key or 'ratio' in key:\n if key not in stat_dict: stat_dict[key] = 0\n stat_dict[key] += end_points[key].item()\n\n batch_pred_map_cls = parse_predictions(end_points, self.CONFIG_DICT_TEST) \n batch_gt_map_cls = parse_groundtruths(end_points, self.CONFIG_DICT_TEST) \n for ap_calculator in ap_calculator_list:\n ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls)\n\n # debug\n if self.config.test.write_to_benchmark:\n #from lib.utils.io3d import write_triangle_mesh\n #write_triangle_mesh(batch_data_label['point_clouds'][0].cpu().numpy(), None, None, batch_data_label['scan_name'][0]+'.ply')\n DetectionTrainer.write_to_benchmark(batch_pred_map_cls, batch_data_label['scan_name'])\n \n if self.config.test.save_vis:\n dump_results_(end_points, 'visualization', self.dataset_config)\n\n # Log statistics\n for key in sorted(stat_dict.keys()):\n logging.info('eval mean %s: %f'%(key, stat_dict[key]/(float(batch_idx+1))))\n\n # Evaluate average precision\n if not self.config.test.write_to_benchmark:\n for i, ap_calculator in enumerate(ap_calculator_list):\n logging.info('-'*10 + 'iou_thresh: %f'%(AP_IOU_THRESHOLDS[i]) + '-'*10)\n metrics_dict = ap_calculator.compute_metrics()\n for key in metrics_dict:\n logging.info('eval %s: %f'%(key, metrics_dict[key]))\n\n mean_loss = stat_dict['loss']/float(batch_idx+1)\n return mean_loss\n"
] |
[
[
"numpy.array",
"numpy.ones_like",
"numpy.ones",
"torch.load",
"numpy.linalg.inv",
"numpy.expand_dims"
],
[
"numpy.max",
"numpy.array",
"numpy.random.seed",
"torch.no_grad",
"torch.save",
"torch.nn.parallel.DistributedDataParallel",
"numpy.min",
"torch.cuda.current_device",
"torch.utils.data.DataLoader",
"torch.utils.data.distributed.DistributedSampler",
"torch.load",
"torch.serialization.default_restore_location"
]
] |
blackredscarf/pytorch-SkipGram
|
[
"a9fa5a888a7b0c6170eb1fe146e59f54041b2613"
] |
[
"eval/ranking.py"
] |
[
"\"\"\"\nReference: https://github.com/mfaruqui/eval-word-vectors\n\"\"\"\n\nimport math\nimport numpy\nfrom operator import itemgetter\nfrom numpy.linalg import norm\n\nEPSILON = 1e-6\n\ndef euclidean(vec1, vec2):\n diff = vec1 - vec2\n return math.sqrt(diff.dot(diff))\n\ndef cosine_sim(vec1, vec2):\n vec1 += EPSILON * numpy.ones(len(vec1))\n vec2 += EPSILON * numpy.ones(len(vec1))\n return vec1.dot(vec2)/(norm(vec1)*norm(vec2))\n\ndef assign_ranks(item_dict):\n ranked_dict = {}\n sorted_list = [(key, val) for (key, val) in sorted(item_dict.items(),\n key=itemgetter(1),\n reverse=True)]\n for i, (key, val) in enumerate(sorted_list):\n same_val_indices = []\n for j, (key2, val2) in enumerate(sorted_list):\n if val2 == val:\n same_val_indices.append(j+1)\n if len(same_val_indices) == 1:\n ranked_dict[key] = i+1\n else:\n ranked_dict[key] = 1.*sum(same_val_indices)/len(same_val_indices)\n return ranked_dict\n\ndef correlation(dict1, dict2):\n avg1 = 1.*sum([val for key, val in dict1.iteritems()])/len(dict1)\n avg2 = 1.*sum([val for key, val in dict2.iteritems()])/len(dict2)\n numr, den1, den2 = (0., 0., 0.)\n for val1, val2 in zip(dict1.itervalues(), dict2.itervalues()):\n numr += (val1 - avg1) * (val2 - avg2)\n den1 += (val1 - avg1) ** 2\n den2 += (val2 - avg2) ** 2\n return numr / math.sqrt(den1 * den2)\n\ndef spearmans_rho(ranked_dict1, ranked_dict2):\n assert len(ranked_dict1) == len(ranked_dict2)\n if len(ranked_dict1) == 0 or len(ranked_dict2) == 0:\n return 0.\n x_avg = 1.*sum([val for val in ranked_dict1.values()])/len(ranked_dict1)\n y_avg = 1.*sum([val for val in ranked_dict2.values()])/len(ranked_dict2)\n num, d_x, d_y = (0., 0., 0.)\n for key in ranked_dict1.keys():\n xi = ranked_dict1[key]\n yi = ranked_dict2[key]\n num += (xi-x_avg)*(yi-y_avg)\n d_x += (xi-x_avg)**2\n d_y += (yi-y_avg)**2\n return num/(math.sqrt(d_x*d_y))\n"
] |
[
[
"numpy.linalg.norm"
]
] |
knorth55/chainer-light-head-rcnn
|
[
"4408311384d5abe550cd6ad004fa190aaced2c95"
] |
[
"tests/functions_tests/test_psroi_max_align_2d.py"
] |
[
"import chainer\nfrom chainer.backends import cuda\nfrom chainer import gradient_check\nfrom chainer import testing\nfrom chainer.testing import attr\nfrom chainer.testing import condition\nimport numpy as np\nimport unittest\n\nfrom light_head_rcnn import functions\n\n\nclass TestPSROIMaxPolling2D(unittest.TestCase):\n\n def setUp(self):\n self.N = 3\n self.group_size = 2\n self.out_c = 2\n self.n_channels = self.group_size * self.group_size * self.out_c\n self.x = np.arange(\n self.N * self.n_channels * 10 * 12,\n dtype=np.float32).reshape((self.N, self.n_channels, 10, 12))\n np.random.shuffle(self.x)\n self.x = 2 * self.x / self.x.size - 1\n self.x = self.x.astype(np.float32)\n self.rois = np.array(\n [[0, 0, 7, 7],\n [1, 0, 5, 12],\n [0, 1, 10, 5],\n [3, 3, 4, 4]],\n dtype=np.float32\n )\n self.roi_indices = np.array([0, 2, 1, 0], dtype=np.int32)\n self.n_roi = self.rois.shape[0]\n self.out_h, self.out_w = 4, 4\n self.spatial_scale = 1.0\n self.gy = np.random.uniform(\n -1, 1, (self.n_roi, self.out_c, self.out_h, self.out_w))\n self.gy = self.gy.astype(np.float32)\n self.check_backward_options = {'atol': 5e-4, 'rtol': 5e-3}\n\n def check_forward(self, x_data, roi_data, roi_index_data):\n x = chainer.Variable(x_data)\n rois = chainer.Variable(roi_data)\n roi_indices = chainer.Variable(roi_index_data)\n y = functions.psroi_max_align_2d(\n x, rois, roi_indices, self.out_c, self.out_h, self.out_w,\n self.spatial_scale, self.group_size)\n self.assertEqual(y.data.dtype, np.float32)\n y_data = cuda.to_cpu(y.data)\n self.assertEqual(\n (self.n_roi, self.out_c, self.out_h, self.out_w), y_data.shape)\n\n @condition.retry(3)\n def test_forward_cpu(self):\n self.check_forward(self.x, self.rois, self.roi_indices)\n\n @attr.gpu\n @condition.retry(3)\n def test_forward_gpu(self):\n self.check_forward(\n cuda.to_gpu(self.x), cuda.to_gpu(self.rois),\n cuda.to_gpu(self.roi_indices))\n\n def check_backward(self, x_data, roi_data, roi_index_data, y_grad_data):\n gradient_check.check_backward(\n functions.PSROIMaxAlign2D(\n self.out_c, self.out_h, self.out_w,\n self.spatial_scale, self.group_size),\n (x_data, roi_data, roi_index_data), y_grad_data,\n no_grads=[False, True, True], **self.check_backward_options)\n\n @condition.retry(3)\n def test_backward_cpu(self):\n self.check_backward(self.x, self.rois, self.roi_indices, self.gy)\n\n @attr.gpu\n @condition.retry(3)\n def test_backward_gpu(self):\n self.check_backward(\n cuda.to_gpu(self.x), cuda.to_gpu(self.rois),\n cuda.to_gpu(self.roi_indices), cuda.to_gpu(self.gy))\n\n def apply_backward(self, x_data, roi_data, roi_index_data, y_grad_data):\n x = chainer.Variable(x_data)\n rois = chainer.Variable(roi_data)\n roi_indices = chainer.Variable(roi_index_data)\n y = functions.psroi_max_align_2d(\n x, rois, roi_indices, self.out_c, self.out_h, self.out_w,\n self.spatial_scale, self.group_size)\n x.cleargrad()\n y.grad = y_grad_data\n y.backward()\n return x, y\n\n @attr.gpu\n @condition.retry(3)\n def test_consistency_with_gpu(self):\n x_cpu, y_cpu = self.apply_backward(\n self.x, self.rois, self.roi_indices, self.gy)\n x_gpu, y_gpu = self.apply_backward(\n cuda.to_gpu(self.x), cuda.to_gpu(self.rois),\n cuda.to_gpu(self.roi_indices), cuda.to_gpu(self.gy))\n testing.assert_allclose(y_cpu.data, y_gpu.data)\n testing.assert_allclose(x_cpu.grad, x_gpu.grad)\n\n\ntesting.run_module(__name__, __file__)\n"
] |
[
[
"numpy.array",
"numpy.random.uniform",
"numpy.arange",
"numpy.random.shuffle"
]
] |
dingdian110/AutoDC
|
[
"f5ccca6bea993bcff3e804fb859e8b25ae020b5c",
"f5ccca6bea993bcff3e804fb859e8b25ae020b5c"
] |
[
"autodc/components/ensemble/unnamed_ensemble.py",
"autodc/components/models/regression/adaboost.py"
] |
[
"import numpy as np\nimport pandas as pd\nimport scipy.spatial\nfrom sklearn.metrics.scorer import _BaseScorer\nfrom autodc.components.utils.constants import CLS_TASKS\nfrom sklearn.cluster import AgglomerativeClustering\nfrom sklearn.metrics import accuracy_score\n\n\ndef choose_base_models_regression(predictions, labels, num_model):\n base_mask = [0] * len(predictions)\n dif = predictions - labels\n dif[dif > 0] = 1\n dif[dif < 0] = -1\n '''Calculate the distance between each model'''\n dist = scipy.spatial.distance.cdist(dif, dif)\n total_dist = np.sum(dist, 1)\n '''Select the model which has large distance to other models'''\n selected_models = total_dist.argsort()[-num_model:]\n for model in selected_models:\n base_mask[model] = 1\n return base_mask\n\n\ndef choose_base_models_classification(predictions, num_model, interval=20):\n num_class = predictions.shape[2]\n num_total_models = predictions.shape[0]\n base_mask = [0] * len(predictions)\n bucket = np.arange(interval + 1) / interval\n bucket[-1] += 1e-8\n distribution = []\n for prediction in predictions:\n freq_array = []\n for i in range(num_class):\n class_i = prediction[:, i]\n group = pd.cut(class_i, bucket, right=False)\n counts = group.value_counts()\n freq = list(counts / counts.sum())\n freq_array += freq\n distribution.append(freq_array) # Shape: (num_total_models,20*num_class)\n distribution = np.array(distribution)\n\n # Apply the clustering algorithm\n model = AgglomerativeClustering(n_clusters=num_model, linkage=\"complete\")\n cluster = model.fit(distribution)\n \"\"\"\n Select models which are the most nearest to the clustering center\n selected_models = []\n \"\"\"\n for cluster_label in range(num_model):\n cluster_center = np.zeros(distribution.shape[1])\n count = 0\n \"\"\"\n Averaging the distribution which belong the same clustering class\n and then get the corresponding distribution center\n \"\"\"\n for i in range(num_total_models):\n if cluster.labels_[i] == cluster_label:\n count += 1\n cluster_center += distribution[i]\n cluster_center = cluster_center / count\n distances = np.sqrt(np.sum(np.asarray(cluster_center - distribution) ** 2, axis=1))\n selected_model = distances.argmin()\n base_mask[selected_model] = 1\n\n return base_mask\n\n\ndef calculate_weights(predictions, labels, base_mask):\n num_total_models = predictions.shape[0]\n num_samples = predictions.shape[1]\n weights = np.zeros((num_samples, num_total_models))\n for i in range(num_total_models):\n if base_mask[i] != 0:\n predicted_labels = np.argmax(predictions[i], 1)\n acc = accuracy_score(predicted_labels, labels)\n model_weight = 0.5 * np.log(acc / (1 - acc)) # a concrete value\n shannon_ent = -1.0 * np.sum(predictions[i] * np.log2(predictions[i]), 1) # shape: (1, num_samples)\n confidence = 1 / np.exp(shannon_ent)\n model_weight = model_weight * confidence # The weight of current model to all samples\n model_weight = model_weight.reshape(num_samples, 1)\n weights[:, i] = model_weight\n return weights\n\n\ndef calculate_weights_simple(predictions, labels, base_mask):\n num_total_models = predictions.shape[0]\n weights = [0] * num_total_models\n for i in range(num_total_models):\n if base_mask[i] != 0:\n predicted_labels = np.argmax(predictions[i], 1)\n acc = accuracy_score(predicted_labels, labels)\n model_weight = 0.5 * np.log(acc / (1 - acc)) # a concrete value\n weights[i] = model_weight\n return weights\n\n\nclass UnnamedEnsemble:\n def __init__(\n self,\n ensemble_size: int,\n task_type: int,\n metric: _BaseScorer,\n random_state: np.random.RandomState = None,\n ):\n self.ensemble_size = ensemble_size\n self.task_type = task_type\n self.metric = metric\n self.random_state = random_state\n self.base_model_mask = None\n self.weights_ = None\n\n def fit(self, predictions, labels):\n \"\"\"\n\n :param predictions: proba_predictions for cls. Shape: (num_models,num_samples,num_class) for cls\n :param labels: Shape: (num_samples,)\n :return: self\n \"\"\"\n if self.task_type in CLS_TASKS: # If classification\n self.base_model_mask = choose_base_models(predictions, labels, self.ensemble_size)\n self.weights_ = calculate_weights(predictions, labels, self.base_model_mask)\n else:\n pass\n return self\n\n def predict(self, predictions):\n predictions = np.asarray(predictions)\n\n # if predictions.shape[0] == len(self.weights_),\n # predictions include those of zero-weight models.\n if predictions.shape[0] == len(self.weights_):\n return np.average(predictions, axis=0, weights=self.weights_)\n\n # if prediction model.shape[0] == len(non_null_weights),\n # predictions do not include those of zero-weight models.\n elif predictions.shape[0] == np.count_nonzero(self.weights_):\n non_null_weights = [w for w in self.weights_ if w > 0]\n return np.average(predictions, axis=0, weights=non_null_weights)\n\n # If none of the above applies, then something must have gone wrong.\n else:\n raise ValueError(\"The dimensions of ensemble predictions\"\n \" and ensemble weights do not match!\")\n",
"import numpy as np\nfrom ConfigSpace.configuration_space import ConfigurationSpace\nfrom ConfigSpace.hyperparameters import UniformFloatHyperparameter, \\\n UniformIntegerHyperparameter\n\nfrom autodc.components.models.base_model import BaseRegressionModel\nfrom autodc.components.utils.constants import DENSE, SPARSE, UNSIGNED_DATA, PREDICTIONS\n\n\nclass AdaboostRegressor(BaseRegressionModel):\n\n def __init__(self, n_estimators, learning_rate, max_depth,\n random_state=None):\n self.n_estimators = n_estimators\n self.learning_rate = learning_rate\n self.random_state = random_state\n self.max_depth = max_depth\n self.estimator = None\n self.time_limit = None\n\n def fit(self, X, Y, sample_weight=None):\n from sklearn.ensemble import AdaBoostRegressor as ABR\n from sklearn.tree import DecisionTreeRegressor\n self.n_estimators = int(self.n_estimators)\n self.learning_rate = float(self.learning_rate)\n self.max_depth = int(self.max_depth)\n base_estimator = DecisionTreeRegressor(max_depth=self.max_depth)\n\n estimator = ABR(\n base_estimator=base_estimator,\n n_estimators=self.n_estimators,\n learning_rate=self.learning_rate,\n random_state=self.random_state\n )\n\n estimator.fit(X, Y, sample_weight=sample_weight)\n\n self.estimator = estimator\n return self\n\n def predict(self, X):\n if self.estimator is None:\n raise NotImplementedError\n return self.estimator.predict(X)\n\n @staticmethod\n def get_properties(dataset_properties=None):\n return {'shortname': 'AB',\n 'name': 'AdaBoost Regression',\n 'handles_regression': True,\n 'handles_classification': False,\n 'handles_multiclass': False,\n 'handles_multilabel': False,\n 'is_deterministic': True,\n 'input': (DENSE, SPARSE, UNSIGNED_DATA),\n 'output': (PREDICTIONS,)}\n\n @staticmethod\n def get_hyperparameter_search_space(dataset_properties=None, optimizer='smac'):\n if optimizer == 'smac':\n cs = ConfigurationSpace()\n\n n_estimators = UniformIntegerHyperparameter(\n name=\"n_estimators\", lower=50, upper=500, default_value=50, log=False)\n learning_rate = UniformFloatHyperparameter(\n name=\"learning_rate\", lower=0.01, upper=2, default_value=0.1, log=True)\n max_depth = UniformIntegerHyperparameter(\n name=\"max_depth\", lower=1, upper=10, default_value=1, log=False)\n\n cs.add_hyperparameters([n_estimators, learning_rate, max_depth])\n return cs\n elif optimizer == 'tpe':\n from hyperopt import hp\n space = {'n_estimators': hp.randint('ab_n_estimators', 451) + 50,\n 'learning_rate': hp.loguniform('ab_learning_rate', np.log(0.01), np.log(2)),\n 'max_depth': hp.randint('ab_max_depth', 10) + 1}\n\n init_trial = {'n_estimators': 50, 'learning_rate': 0.1, 'algorithm': \"SAMME.R\", 'max_depth': 1}\n return space\n"
] |
[
[
"numpy.array",
"sklearn.cluster.AgglomerativeClustering",
"pandas.cut",
"numpy.asarray",
"numpy.zeros",
"numpy.log",
"numpy.count_nonzero",
"numpy.sum",
"numpy.exp",
"sklearn.metrics.accuracy_score",
"numpy.arange",
"numpy.argmax",
"numpy.average",
"numpy.log2"
],
[
"sklearn.ensemble.AdaBoostRegressor",
"numpy.log",
"sklearn.tree.DecisionTreeRegressor"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.