repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
VISTAS-IVES/pyvistas
[ "2de1541c0fb40ccbac4014af758ff329ba0677b1" ]
[ "plugins/barchart/main.py" ]
[ "import random\nfrom io import BytesIO\n\nimport numpy\nimport wx\nfrom PIL import Image\nfrom matplotlib import pyplot\n\nfrom vistas.core.color import RGBColor\nfrom vistas.core.plugins.data import DataPlugin\nfrom vistas.core.plugins.option import Option, OptionGroup\nfrom vistas.core.plugins.visualization import VisualizationPlugin2D, VisualizationUpdateEvent\nfrom vistas.core.timeline import Timeline\nfrom vistas.ui.app import App\nfrom vistas.ui.utils import post_newoptions_available\n\n\nclass GraphVisualization(VisualizationPlugin2D):\n id = 'barchart_visualization_plugin'\n name = 'Barchart Visualization'\n description = 'Plots barcharts of values from a grid'\n author = 'Conservation Biology Institute'\n version = '1.0'\n\n def __init__(self):\n super().__init__()\n self.data = None\n\n self.attribute_option = Option(self, Option.CHOICE, 'Attribute', 0)\n self.labels_option = Option(self, Option.CHECKBOX, 'Show Labels', True)\n self.bg_color_option = Option(self, Option.COLOR, 'Background Color', RGBColor(0, 0, 0))\n self.label_color_option = Option(self, Option.COLOR, 'Label Color', RGBColor(1, 1, 1))\n self.num_categories = Option(self, Option.INT, 'Number of Categories', 2)\n self.categories_group = OptionGroup('Categories')\n\n self.global_options = OptionGroup('Options')\n self.global_options.items = [\n self.attribute_option, self.labels_option, self.bg_color_option, self.label_color_option\n ]\n\n def get_options(self):\n options = OptionGroup()\n options.items.append(self.global_options)\n options.items.append(Option(self, Option.SPACER))\n options.items.append(self.num_categories)\n\n num_categories = self.num_categories.value\n current_categories = int(len(self.categories_group.flat_list) / 3)\n\n # move random past current colors\n random.seed(100)\n for i in range(current_categories):\n RGBColor.random()\n\n if num_categories > current_categories:\n for i in range(num_categories - current_categories):\n self.categories_group.items.append(Option(self, Option.INT, 'Value', 0))\n self.categories_group.items.append(Option(self, Option.TEXT, 'Label', ''))\n self.categories_group.items.append(Option(self, Option.COLOR, 'Color', RGBColor.random()))\n self.categories_group.items.append(Option(self, Option.SPACER))\n elif num_categories < current_categories:\n current_options = self.categories_group.flat_list\n self.categories_group = OptionGroup('Categories')\n self.categories_group.items = current_options[:num_categories*4]\n\n random.seed()\n\n options.items.append(self.categories_group)\n return options\n\n def update_option(self, option=None):\n if option.plugin is not self:\n return\n\n if option.name == 'Number of Categories':\n post_newoptions_available(self)\n\n wx.PostEvent(App.get().app_controller.main_window, VisualizationUpdateEvent(plugin=self))\n\n @property\n def can_visualize(self):\n return self.data is not None\n\n @property\n def visualization_name(self):\n return 'Barchart Visualization' if self.data is None else 'Barchart of {}'.format(self.data.data_name)\n\n @property\n def data_roles(self):\n return [\n (DataPlugin.RASTER, 'Data')\n ]\n\n def set_data(self, data: DataPlugin, role):\n self.data = data\n self.attribute_option.labels = self.data.variables if self.data else []\n self.attribute_option.value = 0\n post_newoptions_available(self)\n wx.PostEvent(App.get().app_controller.main_window, VisualizationUpdateEvent(plugin=self))\n\n def get_data(self, role):\n return self.data\n\n def fig_to_pil(self, fig):\n f = BytesIO()\n fig.savefig(f, format='png', facecolor=fig.get_facecolor())\n\n f.seek(0)\n return Image.open(f, 'r')\n\n def render(self, width, height):\n if self.data is None:\n return\n\n grid = self.data.get_data(self.attribute_option.selected, Timeline.app().current)\n\n background_color = self.bg_color_option.value.rgb.rgb_list\n label_color = self.label_color_option.value.rgb.rgb_list\n show_labels = self.labels_option.value\n num_categories = self.num_categories.value\n categories = self.categories_group.flat_list\n\n unique_values = dict(zip(*numpy.unique(grid, return_counts=True))) # dictionary of unique values to count\n values = list()\n colors = list()\n labels = list()\n\n for i in range(num_categories):\n try:\n value, label, color, _ = [opt.value for opt in categories[i*4:i*4+4]]\n except:\n continue # Bail on this index, the viz is probably updating\n\n if label == '':\n label = value\n\n if value in unique_values:\n values.append(unique_values[value])\n else:\n values.append(0)\n\n colors.append(color.rgb.rgb_list)\n labels.append(label)\n\n indices = numpy.arange(len(values))\n values = tuple(values)\n labels = tuple(labels)\n\n fig = pyplot.figure(\n figsize=(width / 100, height / 100), dpi=100, tight_layout=True,\n facecolor=self.bg_color_option.value.rgb.rgb_list\n )\n\n try:\n ax = fig.add_subplot(1, 1, 1, facecolor=background_color)\n ax.margins(1 / width, 1 / height)\n\n bars = ax.bar(indices, values, color='r')\n\n for b in indices:\n bars[b].set_color(colors[b]) # Set unique colors here\n\n # add some text for labels, title and axes ticks\n ax.set_xticks(indices)\n ax.set_xticklabels(tuple(labels)) # labels go here\n ax.get_xaxis().set_visible(show_labels)\n\n ax.tick_params(color=label_color)\n for spine in ('left', 'bottom'):\n ax.spines[spine].set_color(label_color)\n\n for label in ax.get_xticklabels() + ax.get_yticklabels():\n label.set_color(label_color)\n\n max_value = max(values)\n\n # attach a text label within each bar displaying the count\n for i, rect in enumerate(bars):\n count = rect.get_height()\n above = count < max_value / 2\n label_height = count + 5 if above else count * 0.5\n va = 'bottom' if above else 'center'\n bg_color = background_color if above else colors[i]\n bar_label_color = (0, 0, 0) if sum([x * 255 for x in bg_color]) > 384 else (1, 1, 1)\n ax.text(rect.get_x() + rect.get_width() / 2., label_height, '%d' % int(count), ha='center', va=va,\n color=bar_label_color)\n\n return self.fig_to_pil(fig).resize((width, height))\n\n finally:\n pyplot.close(fig)\n\n def timeline_changed(self):\n wx.PostEvent(App.get().app_controller.main_window, VisualizationUpdateEvent(plugin=self))\n" ]
[ [ "matplotlib.pyplot.close", "numpy.unique", "matplotlib.pyplot.figure" ] ]
dPys/niworkflows
[ "071e3e27b6c0168a94fdb3996e63f225ab6de3bb" ]
[ "niworkflows/utils/images.py" ]
[ "\"\"\"Utilities to manipulate images.\"\"\"\nimport nibabel as nb\nimport numpy as np\n\n\ndef unsafe_write_nifti_header_and_data(fname, header, data):\n \"\"\"Write header and data without any consistency checks or data munging\n\n This is almost always a bad idea, and you should not use this function\n without a battery of tests for your specific use case.\n\n If you're not using this for NIfTI files specifically, you're playing\n with Fortran-ordered fire.\n \"\"\"\n # ImageOpener handles zips transparently\n with nb.openers.ImageOpener(fname, mode='wb') as fobj:\n header.write_to(fobj)\n # This function serializes one block at a time to reduce memory usage a bit\n # It assumes Fortran-ordered data.\n nb.volumeutils.array_to_file(data, fobj, offset=header.get_data_offset())\n\n\ndef set_consumables(header, dataobj):\n header.set_slope_inter(dataobj.slope, dataobj.inter)\n header.set_data_offset(dataobj.offset)\n\n\ndef overwrite_header(img, fname):\n \"\"\"Rewrite file with only changes to the header\n\n The data block is copied without scaling, avoiding copies in memory.\n The header is checked against the target file to ensure that no changes\n to the size, offset or interpretation of the data block will result.\n\n This function will not respect calls to:\n\n * img.header.set_slope_inter()\n * img.header.set_data_shape()\n * img.header.set_data_offset()\n\n These will all be determined by img.dataobj, which must be an\n ArrayProxy.\n\n If the qform or sform are updated, the\n ``img.header.get_best_affine()`` method must match ``img.affine``,\n or your changes may be lost.\n\n The intended use of this method is for making small header fixups\n that do not change the data or affine, e.g.:\n\n >>> import nibabel as nb\n >>> img = nb.load(nifti_fname, mmap=False)\n >>> img.header.set_qform(*img.header.get_sform(coded=True))\n >>> img.header['descrip'] = b'Modified with some extremely finicky tooling'\n >>> overwrite_header(img, nifti_fname)\n\n This is a destructive operation, and the image object should be considered unusable\n after calling this function.\n\n This should only be called with an image loaded with ``mmap=False``,\n or else you risk getting a ``BusError``.\n\n \"\"\"\n # Synchronize header and set fields that nibabel transfer from header to dataobj\n img.update_header()\n header = img.header\n dataobj = img.dataobj\n\n if getattr(img.dataobj, '_mmap', False):\n raise ValueError(\"Image loaded with `mmap=True`. Aborting unsafe operation.\")\n\n set_consumables(header, dataobj)\n\n ondisk = nb.load(fname, mmap=False)\n\n errmsg = \"Cannot overwrite header (reason: {}).\".format\n if not isinstance(ondisk.header, img.header_class):\n raise ValueError(errmsg(\"inconsistent header objects\"))\n\n if (\n ondisk.get_data_dtype() != img.get_data_dtype()\n or img.header.get_data_shape() != ondisk.shape\n ):\n raise ValueError(errmsg(\"data blocks are not the same size\"))\n\n if img.header['vox_offset'] != ondisk.dataobj.offset:\n raise ValueError(errmsg(\"change in offset from start of file\"))\n\n if (\n not np.allclose(img.header['scl_slope'], ondisk.dataobj.slope, equal_nan=True)\n or not np.allclose(img.header['scl_inter'], ondisk.dataobj.inter, equal_nan=True)\n ):\n raise ValueError(errmsg(\"change in scale factors\"))\n\n data = np.asarray(dataobj.get_unscaled())\n img._dataobj = data # Allow old dataobj to be garbage collected\n del ondisk, img, dataobj # Drop everything we don't need, to be safe\n unsafe_write_nifti_header_and_data(fname, header, data)\n\n\ndef update_header_fields(fname, **kwargs):\n \"\"\" Adjust header fields \"\"\"\n # No-op\n if not kwargs:\n return\n img = nb.load(fname, mmap=False)\n for field, value in kwargs.items():\n img.header[field] = value\n overwrite_header(img, fname)\n" ]
[ [ "numpy.allclose" ] ]
VibhuJawa/mimic3-benchmarks
[ "4897d597d5ecc71d2dd2ce1ced76cae48dd50fb5" ]
[ "mimic3benchmark/util.py" ]
[ "import pandas as pd\n\ndef dataframe_from_csv(path, header=0, index_col=0):\n return pd.read_csv(path, header=header, index_col=index_col)\n" ]
[ [ "pandas.read_csv" ] ]
Jayant1234/Marsh_Ann
[ "34503f9b41df8c34cd41535207d7308f2916d4a6" ]
[ "marsh_plant_nn_predict.py" ]
[ "import numpy as np\nimport cv2\n\nimport torch\nimport torch.nn as nn\nfrom marsh_plant_dataset import MarshPlant_Dataset\n\nN_CLASSES = 7\nTHRESHOLD_SIG = 0.5\nbatch_size = 32\nbShuffle = False\nnum_workers = 8\n\n\nmodel_path = './modeling/saved_models/ResNet101_marsh_plants_20190415.torch'\n\nmodel = torch.load(model_path)\nmodel.eval()\nsigfunc = nn.Sigmoid()\n\n\ntest_infile = 'marsh_data_all_test.txt'\ntest_data = MarshPlant_Dataset(test_infile)\ndata_loader = torch.utils.data.DataLoader(test_data, batch_size = batch_size, shuffle = bShuffle, num_workers = num_workers)\n\n\ncpu = torch.device(\"cpu\")\ngpu = torch.device(\"cuda\")\n\n\npred = np.empty((0,N_CLASSES), int)\nann = np.empty((0,N_CLASSES), int)\n\nwith torch.no_grad():\n for it, batch in enumerate(data_loader):\n output = model(batch['X'].to(gpu)).to(cpu)\n\n sig = sigfunc(output)\n sig = sig.detach().numpy()\n this_pred = sig > THRESHOLD_SIG;\n print(this_pred.shape)\n pred = np.append(pred, this_pred.astype(int), axis = 0)\n #print(pred.shape)\n\n this_ann = batch['Y'].to(cpu).detach().numpy() #take off gpu, detach from gradients\n ann = np.append(ann, this_ann.astype(int), axis = 0)\n\n\nnp.savetxt('pred.txt',pred, fmt='%i', delimiter='\\t')\nnp.savetxt('ann.txt' ,ann , fmt='%i', delimiter='\\t')\n" ]
[ [ "torch.device", "numpy.empty", "numpy.savetxt", "torch.nn.Sigmoid", "torch.no_grad", "torch.utils.data.DataLoader", "torch.load" ] ]
LJJ12/Deep-Learning-for-SVD-and-Hybrid-Beamforming
[ "996e46a6b9f6a229f722708e0581a5c1e4f53146" ]
[ "my/myfun/python/learning_rate.py" ]
[ "# 初始的学习速率是0.1,总的迭代次数是1000次,如果staircase=True,那就表明每decay_steps次计算学习速率变化,更新原始学习速率,\n# 如果是False,那就是每一步都更新学习速率。红色表示False,蓝色表示True。\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nlearning_rate = 0.1 # 初始学习速率时0.1\ndecay_rate = 0.96 # 衰减率\nglobal_steps = 1000 # 总的迭代次数\ndecay_steps = 100 # 衰减次数\n\nglobal_ = tf.Variable(tf.constant(0))\nc = tf.train.exponential_decay(learning_rate, global_, decay_steps, decay_rate, staircase=True)\nd = tf.train.exponential_decay(learning_rate, global_, decay_steps, decay_rate, staircase=False)\n\nT_C = []\nF_D = []\n\nwith tf.Session() as sess:\n for i in range(global_steps):\n T_c = sess.run(c, feed_dict={global_: i})\n T_C.append(T_c)\n F_d = sess.run(d, feed_dict={global_: i})\n F_D.append(F_d)\n\nplt.figure(1)\nplt.plot(range(global_steps), F_D, 'r-')# \"-\"表示折线图,r表示红色,b表示蓝色\nplt.plot(range(global_steps), T_C, 'b-')\n# 关于函数的值的计算0.96^(3/1000)=0.998\nplt.show()\n\n" ]
[ [ "tensorflow.Session", "matplotlib.pyplot.figure", "tensorflow.constant", "matplotlib.pyplot.show", "tensorflow.train.exponential_decay" ] ]
sjm4976/KSTAR_NN_simulator
[ "1dd4d7a687d32574e6d1ddf6f3f6559492d5df4d" ]
[ "common/model_structure.py" ]
[ "import json, zipfile\nimport numpy as np\nfrom tensorflow.keras import models, layers\n\nclass k2rz():\n def __init__(self, model_path, n_models=1, ntheta=64, closed_surface=True, xpt_correction=True):\n self.nmodels, self.ntheta = n_models, ntheta\n self.closed_surface, self.xpt_correction = closed_surface, xpt_correction\n self.models = [models.load_model(model_path + f'/best_model{i}', compile=False) for i in range(self.nmodels)]\n\n def set_inputs(self, ip, bt, βp, rin, rout, k, du, dl):\n self.x = np.array([ip, bt, βp, rin, rout, k, du, dl])\n\n def predict(self, post=True):\n self.y = np.mean([m.predict(np.array([self.x]))[0] for m in self.models[:self.nmodels]], axis=0)\n rbdry, zbdry = self.y[:self.ntheta], self.y[self.ntheta:]\n if post:\n if self.xpt_correction:\n rgeo, amin = 0.5 * (max(rbdry) + min(rbdry)), 0.5 * (max(rbdry) - min(rbdry))\n if self.x[6] <= self.x[7]:\n rx = rgeo - amin * self.x[7]\n zx = max(zbdry) - 2 * self.x[5] * amin\n rx2 = rgeo - amin * self.x[6]\n rbdry[np.argmin(zbdry)] = rx\n zbdry[np.argmin(zbdry)] = zx\n rbdry[np.argmax(zbdry)] = rx2\n else:\n rx = rgeo - amin * self.x[6]\n zx = min(zbdry) + 2 * self.x[5] * amin\n rx2 = rgeo - amin * self.x[7]\n rbdry[np.argmax(zbdry)] = rx\n zbdry[np.argmax(zbdry)] = zx\n rbdry[np.argmin(zbdry)] = rx2\n \n if self.closed_surface:\n rbdry, zbdry = np.append(rbdry, rbdry[0]), np.append(zbdry, zbdry[0])\n\n return rbdry, zbdry\n\nclass x2rz():\n def __init__(self, model_path, n_models=1, ntheta=64, closed_surface=True, xpt_correction=True):\n self.nmodels, self.ntheta = n_models, ntheta\n self.closed_surface, self.xpt_correction = closed_surface, xpt_correction\n self.models = [models.load_model(model_path + f'/best_model{i}', compile=False) for i in range(self.nmodels)]\n\n def set_inputs(self, ip, bt, βp, rx1, zx1, rx2, zx2, drsep, rin, rout):\n self.x = np.array([ip, bt, βp, rx1, zx1, rx2, zx2, drsep, rin, rout])\n\n def predict(self, post=True):\n self.y = np.mean([m.predict(np.array([self.x]))[0] for m in self.models[:self.nmodels]], axis=0)\n rbdry, zbdry = self.y[:self.ntheta], self.y[self.ntheta:]\n if post:\n if self.xpt_correction:\n rgeo, amin = 0.5 * (max(rbdry) + min(rbdry)), 0.5 * (max(rbdry) - min(rbdry))\n if self.x[7] <= 0: # LSN\n rbdry[np.argmin(zbdry)] = self.x[3]\n zbdry[np.argmin(zbdry)] = self.x[4]\n else: # USN\n rbdry[np.argmax(zbdry)] = self.x[5]\n zbdry[np.argmax(zbdry)] = self.x[6]\n\n if self.closed_surface:\n rbdry, zbdry = np.append(rbdry, rbdry[0]), np.append(zbdry, zbdry[0])\n\n return rbdry, zbdry\n\ndef load_custom_model(input_shape, lstms, denses, model_path):\n model = models.Sequential()\n model.add(layers.BatchNormalization(input_shape = input_shape))\n for i, n in enumerate(lstms):\n rs = False if i == len(lstms) - 1 else True\n model.add(layers.LSTM(n, return_sequences = rs))\n model.add(layers.BatchNormalization())\n for n in denses[:-1]:\n model.add(layers.Dense(n, activation = 'sigmoid'))\n model.add(layers.BatchNormalization())\n model.add(layers.Dense(denses[-1], activation = 'linear'))\n model.load_weights(model_path)\n return model\n\nclass kstar_lstm():\n def __init__(self, model_path, n_models=1, ymean=None, ystd=None):\n self.nmodels = n_models\n if ymean is None:\n self.ymean = [1.30934765, 5.20082444, 1.47538417, 1.14439883]\n self.ystd = [0.74135689, 1.44731883, 0.56747578, 0.23018484]\n else:\n self.ymean, self.ystd = ymean, ystd\n self.models = [load_custom_model((10, 21), [200, 200], [200, 4], model_path + f'/best_model{i}') for i in range(self.nmodels)]\n\n def set_inputs(self, x):\n self.x = np.array(x) if len(np.shape(x)) == 3 else np.array([x])\n\n def predict(self, x=None):\n if type(x) == type(np.zeros(1)):\n self.set_inputs(x)\n self.y = np.mean([m.predict(self.x)[0] * self.ystd + self.ymean for m in self.models[:self.nmodels]], axis=0)\n return self.y\n\nclass kstar_v220505():\n def __init__(self, model_path, n_models=1, ymean=None, ystd=None, length=10):\n if ymean is None or ystd is None:\n self.ymean = [1.4361666, 5.275876, 1.534538, 1.1268075]\n self.ystd = [0.7294007, 1.5010427, 0.6472052, 0.2331879]\n else:\n self.ymean, self.ystd = ymean, ystd\n self.nmodels = n_models\n self.models = [load_custom_model((length, 18), [100, 100], [50, 4], model_path + f'/best_model{i}') for i in range(self.nmodels)]\n\n def set_inputs(self, x):\n self.x = np.array(x) if len(np.shape(x)) == 3 else np.array([x])\n\n def predict(self, x=None):\n if type(x) == type(np.zeros(1)):\n self.set_inputs(x)\n self.y = np.mean([m.predict(self.x)[0] * self.ystd + self.ymean for m in self.models[:self.nmodels]], axis=0)\n return self.y\n\nclass kstar_nn():\n def __init__(self, model_path, n_models=1, ymean=None, ystd=None):\n self.nmodels = n_models\n if ymean is None:\n self.ymean = [1.22379703, 5.2361062, 1.64438005, 1.12040048]\n self.ystd = [0.72255576, 1.5622809, 0.96563557, 0.23868018]\n else:\n self.ymean, self.ystd = ymean, ystd\n self.models = [models.load_model(model_path + f'/best_model{i}', compile=False) for i in range(self.nmodels)]\n\n def set_inputs(self, x):\n self.x = np.array(x) if len(np.shape(x)) == 2 else np.array([x])\n\n def predict(self, x=None):\n if type(x) == type(np.zeros(1)):\n self.set_inputs(x)\n self.y = np.mean([m.predict(self.x)[0] * self.ystd + self.ymean for m in self.models[:self.nmodels]], axis=0)\n return self.y\n\nclass bpw_nn():\n def __init__(self, model_path, n_models=1):\n self.nmodels = n_models\n self.ymean = np.array([1.02158800e+00, 1.87408512e+05])\n self.ystd = np.array([6.43390272e-01, 1.22543529e+05])\n self.models = [models.load_model(model_path + f'/best_model{i}', compile=False) for i in range(self.nmodels)]\n\n def set_inputs(self, x):\n self.x = np.array(x) if len(np.shape(x)) == 2 else np.array([x])\n\n def predict(self, x=None):\n if type(x) == type(np.zeros(1)):\n self.set_inputs(x)\n self.y = np.mean([m.predict(self.x)[0] * self.ystd + self.ymean for m in self.models[:self.nmodels]], axis=0)\n return self.y\n\nclass tf_dense_model():\n def __init__(self, model_path, n_models=1, ymean=0, ystd=1):\n self.nmodels = n_models\n self.ymean, self.ystd = ymean, ystd\n self.models = [models.load_model(model_path + f'/best_model{i}', compile=False) for i in range(n_models)]\n\n def set_inputs(self, x):\n self.x = np.array(x) if len(np.shape(x)) == 2 else np.array([x])\n\n def predict(self, x):\n self.set_inputs(x)\n self.y = np.mean([m.predict(self.x)[0] * self.ystd + self.ymean for m in self.models[:self.nmodels]], axis=0)\n return self.y\n\ndef actv(x, method):\n if method == 'relu':\n return np.max([np.zeros_like(x), x], axis=0)\n elif method == 'tanh':\n return np.tanh(x)\n elif method == 'sigmoid':\n return 1 / (1 + np.exp(-x))\n elif method == 'linear':\n return x\n\nclass SB2_model():\n def __init__(self, model_path, low_state, high_state, low_action, high_action, activation='relu', last_actv='tanh', norm=True, bavg=0.):\n zf = zipfile.ZipFile(model_path)\n data = json.loads(zf.read('data').decode(\"utf-8\"))\n self.parameter_list = json.loads(zf.read('parameter_list').decode(\"utf-8\"))\n self.parameters = np.load(zf.open('parameters'))\n self.layers = data['policy_kwargs']['layers'] if 'layers' in data['policy_kwargs'].keys() else [64, 64]\n self.low_state, self.high_state = low_state, high_state\n self.low_action, self.high_action = low_action, high_action\n self.activation, self.last_actv = activation, last_actv\n self.norm = norm\n self.bavg = bavg\n\n def predict(self, x, yold=None):\n xnorm = 2 * (x - self.low_state) / np.subtract(self.high_state, self.low_state) - 1 if self.norm else x\n ynorm = xnorm\n for i, layer in enumerate(self.layers):\n w, b = self.parameters[f'model/pi/fc{i}/kernel:0'], self.parameters[f'model/pi/fc{i}/bias:0']\n ynorm = actv(np.matmul(ynorm, w) + b, self.activation)\n w, b = self.parameters[f'model/pi/dense/kernel:0'], self.parameters[f'model/pi/dense/bias:0']\n ynorm = actv(np.matmul(ynorm, w) + b, self.last_actv)\n\n y = 0.5 * np.subtract(self.high_action, self.low_action) * (ynorm + 1) + self.low_action if self.norm else ynorm\n if yold is None:\n yold = x[:len(y)]\n y = self.bavg * yold + (1 - self.bavg) * y\n return y\n\nclass SB2_ensemble():\n def __init__(self, model_list, low_state, high_state, low_action, high_action, activation='relu', last_actv='tanh', norm=True, bavg=0.):\n self.models = [SB2_model(model_path, low_state, high_state, low_action, high_action, activation, last_actv, norm, bavg) for model_path in model_list]\n\n def predict(self, x):\n ys = [m.predict(x) for m in self.models]\n return np.mean(ys, axis=0)\n\n\n" ]
[ [ "numpy.array", "numpy.zeros_like", "numpy.matmul", "numpy.zeros", "numpy.argmin", "numpy.exp", "numpy.mean", "tensorflow.keras.layers.Dense", "numpy.shape", "tensorflow.keras.models.load_model", "numpy.tanh", "numpy.subtract", "numpy.argmax", "tensorflow.keras.models.Sequential", "numpy.append", "tensorflow.keras.layers.LSTM", "tensorflow.keras.layers.BatchNormalization" ] ]
arturbeg/tensor2tensor
[ "43b70752311d3b8dc5f11f63d0dea3efdf8ee25b" ]
[ "tensor2tensor/utils/dgmm.py" ]
[ "import tensorflow as tf\nimport numpy as np\nfrom tensor2tensor.utils.dgmm_estimator import Estimator\nimport math\n\n\ndef dgmm(z, is_training, mixtures=3, lambda_1=0.1, lambda_2=0.005):\n \"\"\"\n :param is_training: a tensorflow placeholder to indicate whether it is in the training phase or not\n :param mixtures:\n :param lambda_1:\n :param lambda_2:\n :return:\n \"\"\"\n\n with tf.name_scope('n_count'):\n n_count = tf.shape(z)[0]\n n_count = tf.cast(n_count, tf.float32)\n\n # skipping the autoencoder step\n estimator = Estimator(mixtures, z, is_training=is_training)\n gammas = estimator.output_tensor\n\n with tf.variable_scope('gmm_parameters'):\n # TODO: trainable=False TF\n phis = tf.get_variable('phis', shape=[mixtures], initializer=tf.ones_initializer(), dtype=tf.float32, trainable=False)\n mus = tf.get_variable('mus', shape=[mixtures, z.get_shape()[1]], initializer=tf.ones_initializer(), dtype=tf.float32, trainable=False)\n\n init_sigmas = 0.5 * np.expand_dims(np.identity(z.get_shape()[1]), axis=0)\n init_sigmas = np.tile(init_sigmas, [mixtures, 1, 1])\n init_sigmas = tf.constant_initializer(init_sigmas)\n sigmas = tf.get_variable('sigmas', shape=[mixtures, z.get_shape()[1], z.get_shape()[1]], initializer=init_sigmas, dtype=tf.float32, trainable=False)\n\n sums = tf.reduce_sum(gammas, axis=0)\n sums_exp_dims = tf.expand_dims(sums, axis=-1)\n\n phis_ = sums / n_count\n mus_ = tf.matmul(gammas, z, transpose_a=True) / sums_exp_dims\n\n def assign_training_phis_mus():\n with tf.control_dependencies([phis.assign(phis_), mus.assign(mus_)]):\n return [tf.identity(phis), tf.identity(mus)]\n\n phis, mus = tf.cond(is_training, assign_training_phis_mus, lambda: [phis, mus])\n\n phis_exp_dims = tf.expand_dims(phis, axis=0)\n phis_exp_dims = tf.expand_dims(phis_exp_dims, axis=-1)\n phis_exp_dims = tf.expand_dims(phis_exp_dims, axis=-1)\n\n zs_exp_dims = tf.expand_dims(z, 1)\n zs_exp_dims = tf.expand_dims(zs_exp_dims, -1)\n mus_exp_dims = tf.expand_dims(mus, 0)\n mus_exp_dims = tf.expand_dims(mus_exp_dims, -1)\n\n zs_minus_mus = zs_exp_dims - mus_exp_dims\n\n sigmas_ = tf.matmul(zs_minus_mus, zs_minus_mus, transpose_b=True)\n broadcast_gammas = tf.expand_dims(gammas, axis=-1)\n broadcast_gammas = tf.expand_dims(broadcast_gammas, axis=-1)\n sigmas_ = broadcast_gammas * sigmas_\n sigmas_ = tf.reduce_sum(sigmas_, axis=0)\n sigmas_ = sigmas_ / tf.expand_dims(sums_exp_dims, axis=-1)\n sigmas_ = add_noise(sigmas_)\n\n def assign_training_sigmas():\n with tf.control_dependencies([sigmas.assign(sigmas_)]):\n return tf.identity(sigmas)\n\n sigmas = tf.cond(is_training, assign_training_sigmas, lambda: sigmas)\n\n with tf.name_scope('loss'):\n #loss_reconstruction = tf.reduce_mean(squared_euclidean, name='loss_reconstruction')\n inversed_sigmas = tf.expand_dims(tf.matrix_inverse(sigmas), axis=0)\n inversed_sigmas = tf.tile(inversed_sigmas, [tf.shape(zs_minus_mus)[0], 1, 1, 1])\n energy = tf.matmul(zs_minus_mus, inversed_sigmas, transpose_a=True)\n energy = tf.matmul(energy, zs_minus_mus)\n energy = tf.squeeze(phis_exp_dims * tf.exp(-0.5 * energy), axis=[2, 3])\n energy_divided_by = tf.expand_dims(tf.sqrt(2.0 * math.pi * tf.matrix_determinant(sigmas)), axis=0) + 1e-12\n energy = tf.reduce_sum(energy / energy_divided_by, axis=1) + 1e-12\n energy = -1.0 * tf.log(energy)\n energy_mean = tf.reduce_sum(energy) / n_count\n loss_sigmas_diag = 1.0 / tf.matrix_diag_part(sigmas)\n loss_sigmas_diag = tf.reduce_sum(loss_sigmas_diag)\n #loss = loss_reconstruction + lambda_1 * energy_mean + lambda_2 * loss_sigmas_diag\n loss = lambda_1 * energy_mean + lambda_2 * loss_sigmas_diag\n\n return energy, z, loss, energy_mean, loss_sigmas_diag\n\n\n# TODO: make sure only done at train time\ndef add_noise(mat, stdev=0.001):\n \"\"\"\n :param mat: should be of shape(k, d, d)\n :param stdev: the standard deviation of noise\n :return: a matrix with little noises\n \"\"\"\n with tf.name_scope('gaussian_noise'):\n dims = mat.get_shape().as_list()[1]\n noise = stdev + tf.random_normal([dims], 0, stdev * 1e-1)\n noise = tf.diag(noise)\n noise = tf.expand_dims(noise, axis=0)\n noise = tf.tile(noise, (mat.get_shape()[0], 1, 1))\n return mat + noise" ]
[ [ "tensorflow.exp", "tensorflow.constant_initializer", "tensorflow.diag", "tensorflow.ones_initializer", "tensorflow.matmul", "numpy.tile", "tensorflow.identity", "tensorflow.random_normal", "tensorflow.cast", "tensorflow.shape", "tensorflow.matrix_diag_part", "tensorflow.variable_scope", "tensorflow.expand_dims", "tensorflow.log", "tensorflow.name_scope", "tensorflow.reduce_sum", "tensorflow.matrix_determinant", "tensorflow.cond", "tensorflow.matrix_inverse" ] ]
AaltoML/Newt-test
[ "e3a725124eb63e9994653ed756be7ae8632f52b2" ]
[ "experiments/binary/binary.py" ]
[ "import sys\nimport bayesnewton\nimport objax\nimport numpy as np\nimport time\nimport pickle\n\nprint('generating some data ...')\nnp.random.seed(99)\nN = 10000 # number of points\nx = np.sort(70 * np.random.rand(N))\nsn = 0.01\nf = lambda x_: 12. * np.sin(4 * np.pi * x_) / (0.25 * np.pi * x_ + 1)\ny_ = f(x) + np.math.sqrt(sn)*np.random.randn(x.shape[0])\ny = np.sign(y_)\ny[y == -1] = 0\n\nind_shuffled = np.random.permutation(N)\nind_split = np.stack(np.split(ind_shuffled, 10)) # 10 random batches of data indices\n\nif len(sys.argv) > 1:\n method = int(sys.argv[1])\n fold = int(sys.argv[2])\nelse:\n method = 4\n fold = 0\n\nif len(sys.argv) > 3:\n baseline = bool(int(sys.argv[3]))\nelse:\n baseline = True\n\nif len(sys.argv) > 4:\n parallel = bool(int(sys.argv[4]))\nelse:\n parallel = None\n\nprint('method number:', method)\nprint('batch number:', fold)\nprint('baseline:', baseline)\nprint('parallel:', parallel)\n\n# Get training and test indices\nind_test = ind_split[fold] # np.sort(ind_shuffled[:N//10])\nind_train = np.concatenate(ind_split[np.arange(10) != fold])\n\nx *= 100\n\nx_train = x[ind_train] # 90/10 train/test split\nx_test = x[ind_test]\ny_train = y[ind_train]\ny_test = y[ind_test]\nN = x_train.shape[0] # number of points\nbatch_size = N # 2000\nM = 1000\nz = np.linspace(x[0], x[-1], M)\n\nif len(sys.argv) > 3:\n baseline = int(sys.argv[3])\nelse:\n baseline = 0\n\n# if baseline:\n# batch_size = N\n\nvar_f = 1. # GP variance\nlen_f = 25. # GP lengthscale\n\nkern = bayesnewton.kernels.Matern72(variance=var_f, lengthscale=len_f)\nlik = bayesnewton.likelihoods.Bernoulli(link='logit')\n\nif method == 0:\n inf = bayesnewton.inference.Taylor\nelif method == 1:\n inf = bayesnewton.inference.PosteriorLinearisation\nelif method in [2, 3, 4]:\n inf = bayesnewton.inference.ExpectationPropagation\nelif method == 5:\n inf = bayesnewton.inference.VariationalInference\n\nif baseline:\n mod = bayesnewton.basemodels.MarkovGP\n Mod = bayesnewton.build_model(mod, inf)\n model = Mod(kernel=kern, likelihood=lik, X=x_train, Y=y_train, parallel=parallel)\nelse:\n mod = bayesnewton.basemodels.SparseMarkovGaussianProcess\n Mod = bayesnewton.build_model(mod, inf)\n model = Mod(kernel=kern, likelihood=lik, X=x_train, Y=y_train, Z=z, parallel=parallel)\n\nif method == 2:\n inf_args = {\"power\": 1.}\nelif method == 3:\n inf_args = {\"power\": 0.5}\nelif method == 4:\n inf_args = {\"power\": 0.01}\nelse:\n inf_args = {}\n\n\nlr_adam = 0.1\nlr_newton = 0.5\niters = 500\nopt_hypers = objax.optimizer.Adam(model.vars())\nenergy = objax.GradValues(model.energy, model.vars())\n\n\n@objax.Function.with_vars(model.vars() + opt_hypers.vars())\ndef train_op():\n batch = np.random.permutation(N)[:batch_size]\n model.inference(lr=lr_newton, batch_ind=batch, **inf_args) # perform inference and update variational params\n dE, E = energy(batch_ind=batch, **inf_args) # compute energy and its gradients w.r.t. hypers\n opt_hypers(lr_adam, dE)\n return E\n\n\ntrain_op = objax.Jit(train_op)\n\n\nt0 = time.time()\nfor i in range(1, iters + 1):\n loss = train_op()\n print('iter %2d, energy: %1.4f' % (i, loss[0]))\nt1 = time.time()\nprint('optimisation time: %2.2f secs' % (t1-t0))\n\nprint('calculating the posterior predictive distribution ...')\nt0 = time.time()\nnlpd = model.negative_log_predictive_density(X=x_test, Y=y_test)\nt1 = time.time()\nprint('nlpd: %2.3f' % nlpd)\n\nif baseline:\n with open(\"output/baseline_\" + str(method) + \"_\" + str(fold) + \"_nlpd.txt\", \"wb\") as fp:\n pickle.dump(nlpd, fp)\nelse:\n with open(\"output/\" + str(method) + \"_\" + str(fold) + \"_nlpd.txt\", \"wb\") as fp:\n pickle.dump(nlpd, fp)\n" ]
[ [ "numpy.sin", "numpy.random.rand", "numpy.random.seed", "numpy.random.permutation", "numpy.math.sqrt", "numpy.random.randn", "numpy.split", "numpy.sign", "numpy.arange", "numpy.linspace" ] ]
TUIlmenauAMS/rl_singing_voice
[ "60204c698d48f27b44588c9d6c8dd2c66a13fcd5" ]
[ "nn_modules/cls_basic_conv1ds.py" ]
[ "# -*- coding: utf-8 -*-\n__author__ = 'S.I. Mimilakis'\n__copyright__ = 'MacSeNet'\n\n# imports\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\n\nclass ConvEncoder(nn.Module):\n \"\"\"\n Class for building the analysis part\n of the Front-End ('Fe'), with randomly\n initialized dictionaries.\n \"\"\"\n def __init__(self, in_size=1024, out_size=1024, hop_size=384, exp_settings={}):\n super(ConvEncoder, self).__init__()\n\n # Analysis Parameters\n self.fully_modulated = exp_settings['fully_modulated']\n self.batch_size = None\n self.time_domain_samples = None\n self.sz_in = in_size\n self.sz_out = out_size\n self.hop = hop_size\n self.f_matrix = np.zeros((self.sz_out, self.sz_in), dtype=np.float32)\n self.input_size = exp_settings['fs'] * exp_settings['d_p_length']\n self.output_size = np.ceil(self.input_size/self.hop)\n self.pad = np.int(-self.input_size/2 + self.sz_in/2 - hop_size/2 + self.output_size*hop_size/2)\n\n self.relu = torch.nn.ReLU()\n\n # Model parameters to be optimized\n self.conv_a1 = torch.nn.Conv1d(in_channels=1, out_channels=self.sz_out,\n kernel_size=self.sz_in, stride=self.hop, padding=self.pad, bias=False)\n self.conv_a2 = torch.nn.Conv1d(in_channels=self.sz_out, out_channels=self.sz_out,\n kernel_size=5, dilation=10, padding=20, bias=False)\n\n # Initialize model parameters\n self.initialize()\n\n def initialize(self):\n torch.nn.init.kaiming_uniform_(self.conv_a1.weight)\n torch.nn.init.kaiming_uniform_(self.conv_a2.weight)\n\n def forward(self, wave_form):\n\n # Resize waveform\n batch_size = wave_form.size(0)\n time_domain_samples = wave_form.size(1)\n # Reshaping\n wave_form = wave_form.view(batch_size, 1, time_domain_samples)\n # Cosine part\n x_coeff = self.conv_a1.forward(wave_form)\n x_c_coeff = self.relu(self.conv_a2(x_coeff) + x_coeff)\n\n return x_c_coeff\n\n\nclass ConvDecoder(nn.Module):\n \"\"\"\n Class for building the synthesis part\n of the Front-End ('Fe'), with randomly\n initialized dictionaries.\n \"\"\"\n def __init__(self, ft_size=1024, kernel_size=1024, hop_size=384, exp_settings={}):\n super(ConvDecoder, self).__init__()\n\n # Synthesis Parameters\n self.fully_modulated = exp_settings['fully_modulated']\n self.batch_size = None\n self.time_domain_samples = None\n self.sz_in = ft_size\n self.kernel_sz = kernel_size\n self.hop = hop_size\n self.output_size = exp_settings['fs'] * exp_settings['d_p_length']\n self.input_size = np.ceil(self.output_size / self.hop)\n self.pad = np.int(((self.input_size-1)*self.hop + self.kernel_sz - self.output_size)/2)\n self.f_matrix = np.zeros((self.sz_in, self.kernel_sz), dtype=np.float32)\n self.tanh = torch.nn.Tanh()\n\n self.conv_dec = torch.nn.ConvTranspose1d(in_channels=self.sz_in, out_channels=1,\n kernel_size=self.kernel_sz, bias=None, stride=self.hop,\n padding=self.pad,\n dilation=1, groups=1)\n\n def forward(self, x_coeff, use_sorting):\n # Reshaping\n wave_form = self.tanh(self.conv_dec.forward(x_coeff))\n\n return wave_form[:, 0, :]\n\n# EOF\n" ]
[ [ "numpy.ceil", "numpy.int", "torch.nn.init.kaiming_uniform_", "torch.nn.ConvTranspose1d", "numpy.zeros", "torch.nn.Conv1d", "torch.nn.Tanh", "torch.nn.ReLU" ] ]
NathanDai5287/AMC-10-Answer-Checker
[ "8b4226f1bc8e84be07a84da1087d293aa648c406" ]
[ "answer.py" ]
[ "from typing import Tuple\nfrom selectorlib import Extractor\nfrom pprint import pformat\nimport json\nimport requests\nimport pandas as pd\n\ndef summarize(score: int, cutoff: int, correct: dict, incorrect: dict, skipped: dict) -> str:\n\t\"\"\"formats information\n\n\tArgs:\n\t\tscore (int): score on test\n\t\tcutoff (int): cutoff of test\n\t\tcorrect (dict): key == question number, value == correct answer\n\t\tincorrect (dict): key == question number, value == correct answer\n\t\tskipped (dict): key == question number, value == correct answer\n\n\tReturns:\n\t\tstr: writable and formatted string containing all the necessary information\n\t\"\"\"\n\n\tsummary = ''\n\n\tsummary += f'Score: {score}\\n'\n\tsummary += f'Cutoff: {cutoff}\\n\\n'\n\n\tsummary += f'Correct: {len(correct)}\\n'\n\tsummary += f'\\t{list(correct)}\\n\\n'\n\tsummary += f'Incorrect: {len(incorrect)}\\n'\n\tsummary += f'\\t{list(incorrect)}\\n\\n'\n\tsummary += f'Skipped: {len(skipped)}\\n'\n\tsummary += f'\\t{list(skipped)}\\n'\n\n\tif (score != 150):\n\t\tsummary += '\\nCorrect Answers: \\n'\n\n\t\tmissed = {**incorrect, **skipped}\n\t\tsummary += pformat(missed, indent=4, width=1).upper().replace('{', ' ').replace('}', ' ').replace(',', '').replace(\"'\", ' ') + '\\n'\n\n\treturn summary\n\ndef grade(answers: list, key: list) -> Tuple[dict]:\n\t\"\"\"calculates the score on the test\n\n\tArgs:\n\t\tanswers (list): my answers\n\t\tkey (list): the actual answers\n\n\tReturns:\n\t\ttuple[dict]: score, correct, incorrect, skipped\n\t\"\"\"\n\n\tscore = 0\n\tcorrect = {}\n\tincorrect = {}\n\tskipped = {}\n\n\ti = 0\n\tfor wrong, right in zip(answers, key):\n\t\ti += 1\n\t\tif (wrong == right):\n\t\t\tscore += 6\n\t\t\tcorrect[i] = right\n\t\telif (wrong == 's'):\n\t\t\tscore += 1.5\n\t\t\tskipped[i] = right\n\t\telse:\n\t\t\tincorrect[i] = right\n\n\treturn score, correct, incorrect, skipped\n\ndef cutoff(year: int, test: str) -> int:\n\t\"\"\"AIME cutoff for a specific year and test\n\n\tArgs:\n\t\tyear (int): 2003 - 2020\n\t\ttest (str): A, B\n\n\tReturns:\n\t\tint: cutoff\n\t\"\"\"\n\n\tcutoffs = pd.read_html('https://ivyleaguecenter.org/2016/01/23/1487/')[0]\n\tcutoffs = cutoffs.rename(columns=cutoffs.iloc[0])[1:]\n\n\tscore = cutoffs.loc[cutoffs['Year'] == str(year), 'AMC 10' + test.upper()]\n\n\treturn float(score)\n\nwith open('answers.txt') as f:\n\tanswers = [i.strip().lower() for i in f.readlines()]\n\nwith open('info.json') as f:\n\tdata = json.load(f)\n\tyear = data['year']\n\ttest = data['test'].upper()\n\tlink = f'https://artofproblemsolving.com/wiki/index.php/{year}_AMC_10{test}_Answer_Key'\n\npage = requests.get(link).text\nextractor = Extractor.from_yaml_file('selector.yml')\nkey = [i.lower() for i in extractor.extract(page)['Answers']]\n\nwith open('score.txt', 'w') as f:\n\tscore, correct, incorrect, skipped = grade(answers, key)\n\tf.write(summarize(score, cutoff(year, test), correct, incorrect, skipped))\n" ]
[ [ "pandas.read_html" ] ]
jzhanson/alfred
[ "d5b540e7c9b53d3f70cc2907503935fecff00018" ]
[ "models/nn/resnet.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torchvision import models, transforms\n\n\nclass Resnet18(object):\n '''\n pretrained Resnet18 from torchvision\n '''\n\n def __init__(self, args, eval=True, share_memory=False, use_conv_feat=True):\n self.model = models.resnet18(pretrained=True)\n\n if args.gpu:\n try:\n self.model = self.model.to(torch.device('cuda'))\n except:\n self.model = self.model.to(torch.device('cuda'))\n\n if eval:\n self.model = self.model.eval()\n\n if share_memory:\n self.model.share_memory()\n\n if use_conv_feat:\n self.model = nn.Sequential(*list(self.model.children())[:-2])\n\n def extract(self, x):\n return self.model(x)\n\n\nclass MaskRCNN(object):\n '''\n pretrained MaskRCNN from torchvision\n '''\n\n def __init__(self, args, eval=True, share_memory=False, min_size=224):\n self.model = models.detection.maskrcnn_resnet50_fpn(pretrained=True, min_size=min_size)\n self.model = self.model.backbone.body\n self.feat_layer = 3\n\n if args.gpu:\n try:\n self.model = self.model.to(torch.device('cuda'))\n except:\n self.model = self.model.to(torch.device('cuda'))\n\n if eval:\n self.model = self.model.eval()\n\n if share_memory:\n self.model.share_memory()\n\n\n def extract(self, x):\n features = self.model(x)\n return features[self.feat_layer]\n\n\nclass Resnet(object):\n\n def __init__(self, args, eval=True, share_memory=False, use_conv_feat=True):\n self.model_type = args.visual_model\n self.gpu = args.gpu\n\n # choose model type\n if self.model_type == \"maskrcnn\":\n self.resnet_model = MaskRCNN(args, eval, share_memory)\n else:\n self.resnet_model = Resnet18(args, eval, share_memory, use_conv_feat=use_conv_feat)\n\n # normalization transform\n self.transform = self.get_default_transform()\n\n\n @staticmethod\n def get_default_transform():\n return transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225],\n )\n ])\n\n def featurize(self, images, batch=32):\n images_normalized = torch.stack([self.transform(i) for i in images], dim=0)\n if self.gpu:\n images_normalized = images_normalized.to(torch.device('cuda'))\n\n out = []\n with torch.set_grad_enabled(False):\n for i in range(0, images_normalized.size(0), batch):\n b = images_normalized[i:i+batch]\n out.append(self.resnet_model.extract(b))\n return torch.cat(out, dim=0)\n" ]
[ [ "torch.device", "torch.cat", "torch.set_grad_enabled" ] ]
KColdrick/pvtrace
[ "b4b99905fae0f8b16358ca4e229379b6566f6020", "b4b99905fae0f8b16358ca4e229379b6566f6020" ]
[ "pvtrace/scene/renderer.py", "pvtrace/geometry/utils.py" ]
[ "import numpy as np\nimport os\nimport time\nimport io\nfrom typing import Tuple\nfrom contextlib import contextmanager\nfrom collections import deque\nfrom anytree import LevelOrderIter, PostOrderIter\nfrom pvtrace.geometry.sphere import Sphere\nfrom pvtrace.geometry.cylinder import Cylinder\nfrom pvtrace.geometry.mesh import Mesh\nfrom pvtrace.light.ray import Ray\nfrom pvtrace.light.utils import wavelength_to_rgb, rgb_to_hex_int, wavelength_to_hex_int\nfrom pvtrace.light.event import Event\nimport trimesh\nimport meshcat\nimport meshcat.geometry as g\nimport meshcat.transformations as tf\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass MeshcatRenderer(object):\n \"\"\"Renders a scene nodes structure.\"\"\"\n\n def __init__(\n self,\n zmq_url=None,\n max_histories=10000,\n open_browser=False,\n wireframe=False,\n transparency=True,\n opacity=0.5,\n reflectivity=1.0,\n ):\n super(MeshcatRenderer, self).__init__()\n self.vis = meshcat.Visualizer(zmq_url=zmq_url)\n if open_browser:\n self.vis.open()\n self.ray_histories = deque(maxlen=max_histories)\n self.max_histories = max_histories\n self.added_index = 0\n self.wireframe = wireframe\n self.transparency = transparency\n self.opacity = opacity\n self.reflectivity = reflectivity\n\n def render(self, scene, show_root=False):\n \"\"\"\n \"\"\"\n vis = self.vis\n for node in LevelOrderIter(scene.root):\n if node == scene.root:\n continue\n self.add_node(node)\n\n def add_node(self, node):\n # Using a dot here as a quick fix to _avoid_ Meshcat automatically\n # transforming the parent coordinate system. If you use `\"/\"` then\n # you would get that behaviour.\n pathname = \" | \".join([x.name for x in node.path])\n if node.geometry is not None:\n # Transforming everything to global\n self.add_geometry(\n node.geometry, pathname, node.transformation_to(node.root)\n )\n\n def add_geometry(self, geometry, pathname, transform):\n vis = self.vis\n material = g.MeshBasicMaterial(\n reflectivity=self.reflectivity, sides=0, wireframe=self.wireframe\n )\n material.transparency = self.transparency\n material.opacity = self.opacity\n\n if isinstance(geometry, Sphere):\n sphere = geometry\n vis[pathname].set_object(g.Sphere(sphere.radius), material)\n vis[pathname].set_transform(transform)\n\n elif isinstance(geometry, Cylinder):\n cyl = geometry\n vis[pathname].set_object(g.Cylinder(cyl.length, cyl.radius), material)\n # meshcat cylinder is aligned along y-axis. Align along z then apply the\n # node's transform as normal.\n transform = np.copy(transform)\n # Change basic XYZ -> XZY\n transform[:, [1, 2]] = transform[:, [2, 1]]\n vis[pathname].set_transform(transform)\n\n elif isinstance(geometry, Mesh):\n obj = meshcat.geometry.StlMeshGeometry.from_stream(\n io.BytesIO(trimesh.exchange.stl.export_stl(geometry.trimesh))\n )\n vis[pathname].set_object(obj, material)\n vis[pathname].set_transform(transform)\n else:\n raise NotImplementedError(\n \"Cannot yet add {} to visualiser\".format(type(geometry))\n )\n\n def remove(self, scene):\n vis = self.vis\n vis.delete()\n\n def get_next_identifer(self):\n self.added_index += 1\n return \"rays/{}\".format(str(self.added_index))\n\n def add_line_segment(\n self,\n start: Tuple[float, float, float],\n end: Tuple[float, float, float],\n colour=0xFFFFFF,\n ) -> str:\n \"\"\" Add a line segment to the scene and return the identifier.\n \n Parameters\n ----------\n start : tuple\n The starting point of the line as (x, y, z) coordinates.\n end : tuple\n The ending point of the line as (x, y, z) coordinates.\n colour : int (optional)\n An optional colour specified as a hex integer. The default colour is\n white.\n\n Returns\n -------\n identifier : str\n The string identifier used to add the line to the scene.\n \"\"\"\n vis = self.vis\n line = (start, end)\n self._will_add_expendable_to_scene(line)\n vertices = np.column_stack(line)\n assert vertices.shape[0] == 3 # easy to get this wrong\n identifier = self.get_next_identifer()\n vis[identifier].set_object(\n g.Line(\n g.PointsGeometry(vertices),\n g.MeshBasicMaterial(color=colour, transparency=False, opacity=1),\n )\n )\n self._did_add_expendable_to_scene(identifier)\n return identifier\n\n def add_path(\n self, vertices: Tuple[Tuple[float, float, float]], colour=0xFFFFFF\n ) -> str:\n \"\"\" Add a line to the scene and return the identifier. The line is made from \n multiple line segments. The line will be drawn with a single colour.\n \n Parameters\n ----------\n vertices : tuple of tuple of float\n The starting point of the line as (x, y, z) coordinates.\n colour : int (optional)\n An optional colour specified as a hex integer. The default colour is\n white.\n\n See also\n --------\n add_ray_path : Draws the line using individual line segments. Use this \n method when each line segment needs to be drawn with a different colour.\n \n Returns\n -------\n identifier : str\n The string identifier used to add the line to the scene.\n \"\"\"\n vis = self.vis\n self._will_add_expendable_to_scene(vertices)\n vertices = np.array(vertices)\n assert vertices.shape[0] == 3 # easy to get this wrong\n identifier = self.get_next_identifer()\n vis[identifier].set_object(\n g.Line(\n g.PointsGeometry(vertices),\n g.MeshBasicMaterial(color=colour, transparency=False, opacity=1.0),\n )\n )\n self._did_add_expendable_to_scene(identifier)\n return identifier\n\n def add_ray(self, ray: Ray, length: float) -> str:\n \"\"\" Add the ray path as a single connected line and return an identifier. \n \n Parameters\n ----------\n ray : Ray\n The ray to add to the scene.\n\n Notes\n -----\n Internally the line is drawn using `add_line_segment` because the colour of\n each segment could be unique. If this proves too inefficiency use \n `add_path`.\n\n See also\n --------\n add_ray_path : Adds multiple rays to the scene.\n\n Returns\n -------\n identifier : str\n The string identifier used to add the object to the scene.\n \"\"\"\n nanometers = ray.wavelength\n start = ray.position\n end = np.array(start) + np.array(ray.direction) * length\n colour = wavelength_to_hex_int(nanometers)\n identifier = self.add_line_segment(start, end, colour=colour)\n return identifier\n\n def add_ray_path(self, rays: [Ray]) -> str:\n \"\"\" Add the ray path as a single connected line and return an identifier. \n \n Parameters\n ----------\n rays : list of Ray\n List of ray objects.\n length : float\n The length of the line to render. Default to 1000.\n\n See also\n --------\n add_path : Draws the line in more efficient way than `add_ray_path` but\n limits the line to be a single colour.\n\n Returns\n -------\n identifier : str\n The string identifier used to add the line to the scene.\n \"\"\"\n vis = self.vis\n if len(rays) < 2:\n raise AppError(\"Need at least two points to render a line.\")\n ids = []\n for (start_ray, end_ray) in zip(rays[:-1], rays[1:]):\n nanometers = start_ray.wavelength\n start = start_ray.position\n end = end_ray.position\n colour = wavelength_to_hex_int(nanometers)\n ids.append(self.add_line_segment(start, end, colour=colour))\n return ids\n\n def add_history(\n self,\n history: Tuple,\n baubles: bool = True,\n world_segment: str = \"short\",\n short_length: float = 1.0,\n bauble_radius: float = 0.01,\n ):\n \"\"\" Similar to `add_ray_path` but with improved visualisation options.\n \n Parameters\n ----------\n history: tuple\n Tuple of rays and events as returned from `photon_tracer.follow`\n baubles: bool (optional)\n Default is True. Draws baubles at exit location.\n world_segment: str (optional)\n Opt-out (`'exclude'`) or draw short (`'short`) path segments to the\n world node.\n short_length: float\n The length of the final path segment when `world_segment='short'`.\n bauble_radius: float\n The bauble radius when `baubles=True`.\n \"\"\"\n vis = self.vis\n if not world_segment in {\"exclude\", \"short\"}:\n raise ValueError(\n \"`world_segment` should be either `'exclude'` or `'short'`.\"\n )\n\n if world_segment == \"exclude\":\n rays, events = zip(*history)\n try:\n idx = events.index(Event.EXIT)\n history = history[0:idx]\n if len(history) < 2:\n # nothing left to render\n return\n except ValueError:\n pass\n\n if len(history) < 2:\n raise AppError(\"Need at least two points to render a line.\")\n\n ids = []\n rays, events = zip(*history)\n for (start_part, end_part) in zip(history[:-1], history[1:]):\n start_ray, end_ray = start_part[0], end_part[0]\n nanometers = start_ray.wavelength\n start = start_ray.position\n end = end_ray.position\n if world_segment == \"short\":\n if end_ray == history[-1][0]:\n end = (\n np.array(start_ray.position)\n + np.array(start_ray.direction) * short_length\n )\n colour = wavelength_to_hex_int(nanometers)\n ids.append(self.add_line_segment(start, end, colour=colour))\n\n if baubles:\n event = start_part[1]\n if event in {Event.TRANSMIT}:\n baubid = self.get_next_identifer()\n vis[f\"exit/{baubid}\"].set_object(\n g.Sphere(bauble_radius),\n g.MeshBasicMaterial(\n color=colour, transparency=False, opacity=1\n ),\n )\n vis[f\"exit/{baubid}\"].set_transform(tf.translation_matrix(start))\n\n ids.append(baubid)\n return ids\n\n def remove_object(self, identifier):\n \"\"\" Remove object by its identifier.\n \"\"\"\n vis = self.vis\n vis[identifier].delete()\n\n def _will_add_expendable_to_scene(self, item):\n \"\"\" Private method used to notify buffer that a line or ray object will be\n added to the scene.\n \n Notes\n -----\n This is used to manage the buffer size and will remove the oldest object\n to keep the scene size constant.\n \"\"\"\n if len(self.ray_histories) == self.max_histories:\n self.remove_object(self.ray_histories.popleft())\n\n def _did_add_expendable_to_scene(self, identifier):\n \"\"\" Private method use to notify the buffer that an expendable object has been\n added to the scene. \n \n Notes\n -----\n The identifier is used to remove the object when it is becomes the oldest\n item in the buffer.\n \"\"\"\n self.ray_histories.append(identifier)\n", "import numpy as np\nimport numpy\nimport math\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\n# Set reasonable precision for comparing floats to zero. Originally the multiplier was\n# 10, but I needed to set this to 1000 because some of the trimesh distance methods\n# do not see as accurate as with primitive shapes.\nEPS_ZERO = np.finfo(float).eps * 1000\n\n\ndef on_aabb_surface(size, point, centre=(0.0, 0.0, 0.0), atol=EPS_ZERO):\n \"\"\" Surface test for axis-aligned bounding box with absolute distance \n tolerance along surface normal direction.\n \n >>> size = (1.0, 1.0, 1.0)\n >>> centre = (0.0, 0.0, 0.0)\n >>> pt = np.array([0.5, np.random.uniform(-0.5*size[1], 0.5*size[1]), np.random.uniform(-0.5*size[2], 0.5*size[2])])\n >>> atol = 1e-8\n >>> on_aabb_surface(size, pt, centre=centre, atol=1e-8)\n True\n >>> on_aabb_surface(size, pt + np.array([atol, 0.0, 0.0]), centre=centre, atol=1e-8)\n False\n >>> on_aabb_surface(size, pt + np.array([atol, 0.0, 0.0]), centre=centre, atol=1e-8)\n False\n \"\"\"\n origin = np.array(centre) - 0.5 * np.array(size)\n extent = np.array(centre) + 0.5 * np.array(size)\n # xmin\n xmin_point = np.array(point)\n xmin_point[0] = origin[0]\n # print(\"point: {}, xmin_point: {}\".format(point, xmin_point))\n xmin_dist = distance_between(point, xmin_point)\n # xmax\n xmax_point = np.array(point)\n xmax_point[0] = extent[0]\n # print(\"point: {}, xmax_point: {}\".format(point, xmax_point))\n xmax_dist = distance_between(point, xmax_point)\n # ymin\n ymin_point = np.array(point)\n ymin_point[1] = origin[1]\n ymin_dist = distance_between(point, ymin_point)\n # ymax\n ymax_point = np.array(point)\n ymax_point[1] = extent[1]\n ymax_dist = distance_between(point, ymax_point)\n # ymin\n zmin_point = np.array(point)\n zmin_point[2] = origin[2]\n zmin_dist = distance_between(point, zmin_point)\n # ymax\n zmax_point = np.array(point)\n zmax_point[2] = extent[2]\n zmax_dist = distance_between(point, zmax_point)\n\n dists = (xmin_dist, xmax_dist, ymin_dist, ymax_dist, zmin_dist, zmax_dist)\n tests = [np.abs(dist) < (atol / 2) for dist in dists]\n surfaces = np.where(np.array(tests) == True)[0].tolist()\n return np.any(tests), surfaces\n\n\ndef aabb_intersection(min_point, max_point, ray_position, ray_direction):\n \"\"\"\n Returns an array intersection points with the ray and box using the method of \n Williams [1]. If no intersection occurs return `None`.\n \n Arguments\n ---------\n min_point: tuple like (x0, y0, z0) which is the minimum corner.\n box_size: tuple like (x1, y1, z1) which is the maximum corner.\n ray_position: tuple like (x, y, z), the ray origin.\n ray_direction: tuple like (i, j, k), the ray direction.\n \n Returns\n -------\n intersections: tuple of (x, y, z) tuples or empty list.\n \n References\n ----------\n [1] Amy Williams, Steve Barrus, R. Keith Morley, and \n Peter Shirley, \"An Efficient and Robust Ray-Box Intersection Algorithm\" \n Journal of graphics tools, 10(1):49-54, 2005\n \"\"\"\n rpos = np.array(ray_position)\n rdir = np.array(ray_direction)\n origin = np.array(min_point)\n extent = np.array(max_point)\n pts = (origin, extent)\n\n rinvd = 1.0 / rdir\n rsgn = 1.0 / (rinvd < 0.0)\n tmin = (origin[rsgn[0]] - rpos[0]) * rinvd[0]\n tmax = (origin[1 - rsgn[0]] - rpos[0]) * rinvd[0]\n tymin = (extent[rsgn[1]] - rpos[1]) * rinvd[1]\n tymax = (extent[1 - rsgn[1]] - rpos[1]) * rinvd[1]\n\n if (tmin > tymax) or (tymin > tmax):\n return None\n\n if tymin > tmin:\n tmin = tymin\n if tymax < tmax:\n tmax = tymax\n\n tzmin = (extent[rsgn[2]] - rpos[2]) * rinvd[2]\n tzmax = (extent[1 - rsgn[2]] - rpos[2]) * rinvd[2]\n\n if (tmin > tzmax) or (tzmin > tmax):\n return None\n if tzmin > tmin:\n tmin = tzmin\n if tzmax < tmax:\n tmax = tzmax\n\n # Calculate the hit coordinates then if the solution is in\n # the forward direction append to the hit list.\n hit_coordinates = []\n pt1 = tuple(rpos + tmin * rdir)\n pt2 = tuple(rpos + tmax * rdir)\n\n if tmin >= 0.0:\n hit_coordinates.append(pt1)\n if tmax >= 0.0:\n hit_coordinates.append(pt2)\n return tuple(hit_coordinates)\n\n\ndef ray_z_cylinder(length, radius, ray_origin, ray_direction):\n \"\"\" Returns ray-cylinder intersection points for a cylinder aligned\n along the z-axis with centre at (0, 0, 0).\n \n Parameters\n ----------\n length : float\n The length of the cylinder\n radius : float\n The radius of the cylinder\n ray_origin : tuple of float\n The origin of the ray like, e.g. :math:`\\left(0.0, 1.0, 2.0 \\\\right)`\n ray_direction : tuple of float\n The direction **unit** vector of the ray like, e.g. :math:`(n_x, n_y, n_z)`.\n \n Returns\n -------\n points: tuple of points\n Returns a tuple of tuple like ((0.0, 1.0, 2.0), ...) where each item is an \n intersection point. The tuple is sorted by distance from the ray origin.\n \n Notes\n -----\n \n Equation of ray is [1],\n\n :math:`P(t) = E + t`\n\n where :math:`E` is the origin or \"eye\" point and :math:`D` is the direction vector. \n In component form,\n\n .. math::\n\n \\\\begin{bmatrix}\n x(t) \\\\\n y(t) \\\\\n z(t) \\\\ \n \\end{bmatrix} = \n \\\\begin{bmatrix}\n x_E + t x_D \\\\\n y_E + t y_D \\\\\n z_E + t z_D\\\\ \n \\end{bmatrix}\n\n The equation of cylinder aligned along the z direction is,\n\n .. math::\n\n x^2 + y^2 = R^2\n \n\n where :math`R` is the radius of the cylinder.\n\n Substituting the equation of the ray into the equation of the cylinder,\n\n .. math::\n \n (x_E + t x_D)^2 + (y_E + t y_D)^2 = R^2\n\n and after grouping the :math:`t^2` and :math:`t` terms,\n\n .. math::\n \n t^2\\left(x_D^2 + y_D^2\\\\right) + \n t \\left(2 x_E x_D + 2 y_E y _D \\\\right) + \n \\left( x_E^2 + y_E^2 - R^2 \\\\right) = 0\n\n which is a standard quadratic equation,\n\n .. math::\n \n at^2 + bt + c = 0\n\n Solution of this equation give two values :math:`\\left( t_1, t_2 \\\\right)` which \n give the ray's distance to intersection points. To be ahead on the ray's path \n :math:`\\left( t_1, t_2 \\\\right) >= 0` and to be real intersection points the \n values must be finite and have imaginary component of zero. \n\n The intersection with the cylinder caps is found by intersecting the ray with \n two infinite planes at :math:`z=0` and :math:`z=L`, where :math:`L` is the \n length of the cylinder. The ray-plane intersection is given by [2],\n\n .. math::\n \n t = \\\\frac{(Q - P) \\cdot n}{D \\cdot n}\n\n where :math:`t` is the distance from the ray origin to the intersection point, \n :math:`Q` is a point on the plane and :math:`n` the **outward** facing surface \n normal at that point. As before :math:`P` is the origin of the ray and :math:`D`\n is the ray's direction unit vector.\n\n For the bottom cap at :math:`z=0`,\n\n .. math::\n\n t_{\\\\text{bot}} = \n \\\\frac{\n \\left(\n \\\\begin{bmatrix}\n 0 \\\\\n 0 \\\\\n -0.5 L \\\\ \n \\end{bmatrix} - \n \\\\begin{bmatrix}\n x_E \\\\\n y_E \\\\\n z_E \\\\ \n \\end{bmatrix}\n \\\\right) \\cdot \n \\\\begin{bmatrix}\n 0 \\\\\n 0 \\\\\n -1 \\\\ \n \\end{bmatrix}\n }{\n \\\\begin{bmatrix}\n x_D \\\\\n y_D \\\\\n z_D \\\\ \n \\end{bmatrix} \\cdot\n \\\\begin{bmatrix}\n 0 \\\\\n 0 \\\\\n -1 \\\\ \n \\end{bmatrix}\n }\n\n and for the top cap at :math:`z=L`,\n\n .. math::\n t_{\\\\text{bot}} = \n \\\\frac{\n \\left(\n \\\\begin{bmatrix}\n 0 \\\\\n 0 \\\\\n 0.5 L \\\\ \n \\end{bmatrix} - \n \\\\begin{bmatrix}\n x_E \\\\\n y_E \\\\\n z_E \\\\ \n \\end{bmatrix}\n \\\\right) \\cdot \n \\\\begin{bmatrix}\n 0 \\\\\n 0 \\\\\n 1 \\\\ \n \\end{bmatrix}\n }{\n \\\\begin{bmatrix}\n x_D \\\\\n y_D \\\\\n z_D \\\\ \n \\end{bmatrix} \\cdot\n \\\\begin{bmatrix}\n 0 \\\\\n 0 \\\\\n 1 \\\\ \n \\end{bmatrix}\n }\n \n\n The intersection points with :math:`t<0` and points not contained inside the circle\n of the end cap are rejected using :math:`(x^2 + y^2) < R`, where :math:`x` and \n :math:`y` are the components of the candidate intersection point.\n \n References\n ----------\n [1] https://www.cl.cam.ac.uk/teaching/1999/AGraphHCI/\n\n [2] https://www.scratchapixel.com/lessons/3d-basic-rendering/minimal-ray-tracer-rendering-simple-shapes/ray-plane-and-ray-disk-intersection\n \n \"\"\"\n p0 = np.array(ray_origin)\n n0 = np.array(ray_direction)\n xe, ye, ze = p0\n xd, yd, zd = n0\n\n # Look for intersections on the cylinder surface\n a = xd ** 2 + yd ** 2\n b = 2 * (xe * xd + ye * yd)\n c = xe ** 2 + ye ** 2 - radius ** 2\n tcyl = [\n t for t in np.roots([a, b, c]) if np.isfinite(t) and np.isreal(t) and t >= 0\n ]\n\n # Look for intersections on the cap surfaces\n with np.errstate(divide=\"ignore\"):\n # top cap\n point = np.array([0.0, 0.0, 0.5 * length])\n normal = np.array([0.0, 0.0, 1.0]) # outward facing at z = length\n ttopcap = (point - p0).dot(normal) / n0.dot(normal)\n # bottom cap\n point = np.array([0.0, 0.0, -0.5 * length])\n normal = np.array([0.0, 0.0, -1.0]) # outward facing at z = 0\n tbotcap = (point - p0).dot(normal) / n0.dot(normal)\n tcap = [t for t in (tbotcap, ttopcap) if np.isfinite(t) and t >= 0.0]\n\n # Reject point cap points which are not in the cap's circle radius\n # and cylinder points which outside the length.\n cap_candidates = [(p0 + t * n0, t) for t in tcap]\n cap_candidates = [\n (point, t)\n for (point, t) in cap_candidates\n if np.sqrt(point[0] ** 2 + point[1] ** 2) < radius\n ]\n cyl_candidates = [(p0 + t * n0, t) for t in tcyl]\n cyl_candidates = [\n (point, t)\n for (point, t) in cyl_candidates\n if point[2] > -0.5 * length and point[2] < 0.5 * length\n ]\n intersection_info = tuple(cyl_candidates) + tuple(cap_candidates)\n intersection_info = sorted(intersection_info, key=lambda pair: pair[1])\n if len(intersection_info) == 0:\n return ([], [])\n points = tuple([tuple(p.tolist()) for p in list(zip(*intersection_info))[0]])\n distances = tuple([float(d) for d in list(zip(*intersection_info))[1]])\n return points, distances\n\n\n# Equality tests\n\n\ndef close_to_zero(value) -> bool:\n return np.all(np.absolute(value) < EPS_ZERO)\n\n\ndef points_equal(point1: tuple, point2: tuple) -> bool:\n return close_to_zero(distance_between(point1, point2))\n\n\ndef floats_close(a, b):\n return close_to_zero(a - b)\n\n\ndef allinrange(x, x_range):\n \"\"\" Returns True if all elements of x are inside x_range, inclusive of the \n edge values.\n \n Parameters\n ----------\n x : array-like\n A numpy array of values.\n x_range : tuple of float\n A tuple defining a range like (xmin, xmax)\n \"\"\"\n if isinstance(x, (int, float, np.float, np.int)):\n x = np.array([x])\n return np.where(np.logical_or(x < x_range[0], x > x_range[1]))[0].size == 0\n\n\n# Vector helpers\n\n\ndef flip(vector):\n return -np.array(vector)\n\n\ndef magnitude(vector):\n return np.sqrt(np.dot(np.array(vector), np.array(vector)))\n\n\ndef norm(vector):\n return np.array(vector) / np.linalg.norm(vector)\n\n\ndef angle_between(normal, vector):\n normal = np.array(normal)\n vector = np.array(vector)\n if np.allclose(normal, vector):\n return 0.0\n elif np.allclose(-normal, vector):\n return np.pi\n dot = np.dot(normal, vector)\n return np.arccos(dot)\n\n\ndef is_ahead(position, direction, point):\n \"\"\" Tests whether point is ahead of the current position.\n \"\"\"\n if points_equal(position, point):\n return False\n d1 = np.dot(self.direction, np.array(point))\n d2 = np.dot(self.direction, self.position)\n return (d1 - d2) > 0\n\n\ndef smallest_angle_between(normal, vector):\n rads = angle_between(normal, vector)\n return np.arctan2(np.sin(rads), np.cos(rads))\n\n\ndef distance_between(point1: tuple, point2: tuple) -> float:\n v = np.array(point1) - np.array(point2)\n d = np.linalg.norm(v)\n return d\n\n\ndef intersection_point_is_ahead(ray_position, ray_direction, intersection_point):\n \"\"\" Returns true if the intersection point is ahead of the rays trajectory.\n \n Notes\n -----\n The intersection point must be a point on the line, p(a) = p0 + a * n.\n \"\"\"\n return (\n np.dot(ray_direction, intersection_point) - np.dot(ray_direction, ray_position)\n ) > EPS_ZERO\n" ]
[ [ "numpy.array", "numpy.column_stack", "numpy.copy" ], [ "numpy.logical_or", "numpy.array", "numpy.arccos", "numpy.dot", "numpy.linalg.norm", "numpy.errstate", "numpy.sin", "numpy.absolute", "numpy.roots", "numpy.allclose", "numpy.finfo", "numpy.any", "numpy.isreal", "numpy.cos", "numpy.abs", "numpy.isfinite", "numpy.sqrt" ] ]
aoranwu/grace
[ "1e28915f6f6e8189ef33c0c7d8d3ce314e0a493e" ]
[ "grace_dl/dist/compressor/qsgd.py" ]
[ "import torch\nfrom grace_dl.dist import Compressor\n\n\nclass QSGDCompressor(Compressor):\n\n def __init__(self, quantum_num, bucket_size=128):\n super().__init__()\n self.quantum_num = quantum_num\n self.bucket_size = bucket_size\n\n def compress(self, tensor, name):\n shape = tensor.size()\n tensor = tensor.flatten()\n abs_gradient = tensor.abs()\n\n if tensor.numel() % self.bucket_size != 0:\n pad_size = self.bucket_size - tensor.numel() % self.bucket_size\n pad_tensor = torch.cat([tensor, torch.zeros(pad_size, dtype=tensor.dtype, device=tensor.device)])\n else:\n pad_tensor = tensor\n pad_tensor = pad_tensor.view([-1, self.bucket_size])\n pad_tensor_sqsum = torch.sum(pad_tensor ** 2, dim=1)\n bucket_norm = torch.sqrt(pad_tensor_sqsum)\n b = torch.ones([1, self.bucket_size], device=tensor.device)\n expand_norm = torch.matmul(bucket_norm.view([-1, 1]), b)\n norm = expand_norm.flatten()[:tensor.numel()]\n\n level_float = self.quantum_num / norm * abs_gradient\n previous_level = level_float.floor()\n prob = torch.empty_like(tensor).uniform_()\n is_next_level = (prob < (level_float - previous_level)).type(torch.float32)\n new_level = (previous_level + is_next_level)\n\n sign = tensor.sign()\n tensor_compressed = (new_level * sign).type(torch.int16)\n tensor_compressed = tensor_compressed.type(torch.int8 if self.quantum_num < 128 else torch.half)\n\n return (tensor_compressed, bucket_norm), shape\n\n def decompress(self, tensor_compressed, ctx):\n tensor_compressed, bucket_norm = tensor_compressed\n shape = ctx\n b = torch.ones([1, self.bucket_size], device=tensor_compressed.device)\n expand_norm = torch.matmul(bucket_norm.view([-1, 1]), b)\n norm = expand_norm.flatten()[:shape.numel()]\n decode_output = tensor_compressed.type(torch.float32)\n tensor_decompressed = norm / self.quantum_num * decode_output\n tensor_decompressed = tensor_decompressed.view(shape)\n\n return tensor_decompressed\n\n\nclass QSGDCompressor_CUDA(Compressor):\n\n def __init__(self, quantum_num, bucket_size=128):\n super().__init__()\n self.quantum_num = quantum_num\n self.bucket_size = bucket_size\n\n def compress(self, tensor, name):\n import qsgd_cuda\n shape = tensor.size()\n tensor = tensor.flatten()\n\n tensor_compressed, bucket_norm = qsgd_cuda.compress(tensor, self.quantum_num, self.bucket_size)\n tensor_compressed = tensor_compressed, bucket_norm.float()\n\n return tensor_compressed, shape\n\n def decompress(self, tensor_compressed, shape):\n import qsgd_cuda\n tensor_compressed, bucket_norm = tensor_compressed\n\n tensor_decompressed = qsgd_cuda.decompress(tensor_compressed, bucket_norm.double(), self.quantum_num, self.bucket_size)\n\n return tensor_decompressed.view(shape)\n\n\n" ]
[ [ "torch.zeros", "torch.sqrt", "torch.ones", "torch.sum", "torch.empty_like" ] ]
xinzheshen/WaveRNN
[ "f6cb1a3d6d6b58dbbba5301a88d05c1beb9230af" ]
[ "train_wavernn.py" ]
[ "import os, time\nimport numpy as np\nfrom torch import optim\nimport torch.nn.functional as F\nfrom utils.display import stream, simple_table\nfrom utils.dataset import get_vocoder_datasets\nfrom utils.distribution import discretized_mix_logistic_loss\nimport hparams as hp\nfrom models.fatchord_version import WaveRNN\nfrom gen_wavernn import gen_testset\nfrom utils.paths import Paths\nimport argparse\n\n\ndef voc_train_loop(model, loss_func, optimizer, train_set, test_set, init_lr, total_steps):\n\n for p in optimizer.param_groups: p['lr'] = init_lr\n\n total_iters = len(train_set)\n epochs = (total_steps - model.get_step()) // total_iters + 1\n\n for e in range(1, epochs + 1):\n\n lr = init_lr * (0.5 ** (model.get_step() // 250_000))\n for p in optimizer.param_groups: p['lr'] = lr\n\n start = time.time()\n running_loss = 0.\n\n for i, (x, y, m) in enumerate(train_set, 1):\n x, m, y = x.cuda(), m.cuda(), y.cuda()\n\n y_hat = model(x, m)\n\n if model.mode == 'RAW' :\n y_hat = y_hat.transpose(1, 2).unsqueeze(-1)\n\n elif model.mode == 'MOL' :\n y = y.float()\n\n y = y.unsqueeze(-1)\n\n loss = loss_func(y_hat, y)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n running_loss += loss.item()\n\n speed = i / (time.time() - start)\n avg_loss = running_loss / i\n\n step = model.get_step()\n k = step // 1000\n\n if step % hp.voc_checkpoint_every == 0 :\n gen_testset(model, test_set, hp.voc_gen_at_checkpoint, hp.voc_gen_batched,\n hp.voc_target, hp.voc_overlap, paths.voc_output)\n model.checkpoint(paths.voc_checkpoints)\n\n msg = f'| Epoch: {e}/{epochs} ({i}/{total_iters}) | Loss: {avg_loss:.4f} | {speed:.1f} steps/s | Step: {k}k | '\n stream(msg)\n\n model.save(paths.voc_latest_weights)\n model.log(paths.voc_log, msg)\n print(' ')\n\n\nif __name__ == \"__main__\" :\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n # Parse Arguments\n parser = argparse.ArgumentParser(description='Train WaveRNN Vocoder')\n parser.add_argument('--lr', '-l', type=float, help='[float] override hparams.py learning rate')\n parser.add_argument('--batch_size', '-b', type=int, help='[int] override hparams.py batch size')\n parser.add_argument('--force_train', '-f', action='store_true', help='Forces the model to train past total steps')\n parser.add_argument('--gta', '-g', action='store_true', help='train wavernn on GTA features')\n parser.set_defaults(lr=hp.voc_lr)\n parser.set_defaults(batch_size=hp.voc_batch_size)\n args = parser.parse_args()\n\n batch_size = args.batch_size\n force_train = args.force_train\n train_gta = args.gta\n lr = args.lr\n\n print('\\nInitialising Model...\\n')\n\n # Instantiate WaveRNN Model\n voc_model = WaveRNN(rnn_dims=hp.voc_rnn_dims,\n fc_dims=hp.voc_fc_dims,\n bits=hp.bits,\n pad=hp.voc_pad,\n upsample_factors=hp.voc_upsample_factors,\n feat_dims=hp.num_mels,\n compute_dims=hp.voc_compute_dims,\n res_out_dims=hp.voc_res_out_dims,\n res_blocks=hp.voc_res_blocks,\n hop_length=hp.hop_length,\n sample_rate=hp.sample_rate,\n pad_val=hp.voc_pad_val,\n mode=hp.voc_mode).cuda()\n\n # Check to make sure the hop length is correctly factorised\n assert np.cumprod(hp.voc_upsample_factors)[-1] == hp.hop_length\n\n paths = Paths(hp.data_path, hp.voc_model_id, hp.tts_model_id)\n\n voc_model.restore(paths.voc_latest_weights)\n\n optimizer = optim.Adam(voc_model.parameters())\n\n train_set, test_set = get_vocoder_datasets(paths.data, batch_size, train_gta)\n\n total_steps = 10_000_000 if force_train else hp.voc_total_steps\n\n simple_table([('Remaining', str((total_steps - voc_model.get_step())//1000) + 'k Steps'),\n ('Batch Size', batch_size),\n ('LR', lr),\n ('Sequence Len', hp.voc_seq_len),\n ('GTA Train', train_gta)])\n\n loss_func = F.cross_entropy if voc_model.mode == 'RAW' else discretized_mix_logistic_loss\n\n voc_train_loop(voc_model, loss_func, optimizer, train_set, test_set, lr, total_steps)\n\n print('Training Complete.')\n print('To continue training increase voc_total_steps in hparams.py or use --force_train')\n" ]
[ [ "numpy.cumprod" ] ]
grzegorznowak/tensorflow-rrn-server
[ "1011ea465c298263fa177ba34ba0db0897985d8f" ]
[ "src/rnn_time_series_server_tests.py" ]
[ "import unittest\r\nimport requests\r\nimport rnn_time_series_server as rnn\r\nimport os\r\nimport numpy as np\r\nfrom numpy.testing import assert_array_equal\r\n\r\nclass RNNTimeSeriesServerTestRequests(unittest.TestCase):\r\n\r\n\r\n def test_predict(self):\r\n response = requests.get('http://localhost:5000/prediction?observation=10,-10,0,100,9999,9999,1')\r\n self.assertIs(type(response.json()), int)\r\n\r\n\r\n def test_parsing_input(self):\r\n response = requests.get('http://localhost:5000/echo?observation=10,-10,0,100,9999,0.9999,1')\r\n self.assertEqual(response.json(), [10, -10, 0, 100, 9999, 0.9999,1])\r\n\r\n\r\n def test_failsafe_when_next_observation_isnt_actually_next(self):\r\n requests.get('http://localhost:5000/prediction?observation=10,-10,0,100,9999,0.9999,1')\r\n response1 = requests.get('http://localhost:5000/prediction?observation=10,-10,0,100,9999,0.9999,3') # this should be 2, 3 means we skipped a data\r\n self.assertEqual(response1.json(), \"Observation data skipped an entry\")\r\n\r\n requests.get('http://localhost:5000/prediction?observation=10,-10,0,100,9999,0.9999,4')\r\n response2 = requests.get('http://localhost:5000/prediction?observation=10,-10,0,100,9999,0.9999,1') # this should be 5\r\n self.assertEqual(response2.json(), \"Wrong ordering of observation data\")\r\n\r\n requests.get('http://localhost:5000/prediction?observation=10,-10,0,100,9999,0.9999,2')\r\n response3 = requests.get('http://localhost:5000/prediction?observation=10,-10,0,100,9999,0.9999,0') # this is actually OK, since new day\r\n self.assertNotEqual(response3.json(), \"error: control check didn't pass\")\r\n self.assertIs(type(response3.json()), int)\r\n\r\n\r\n def test_random_response(self):\r\n response = requests.get('http://localhost:5000/random')\r\n self.assertIs(type(response.json()), int)\r\n\r\n\r\nclass RNNTimeSeriesServerTestCore(unittest.TestCase):\r\n\r\n def test_model_importing(self):\r\n stubResponse = rnn.load_module_method_from_path(os.path.dirname(os.path.abspath(__file__))+'/stub_module.py', 'stub_module', 'justAStubFunctionForATest')\r\n self.assertEqual(stubResponse(), 1234)\r\n\r\n def test_observation_parsing(self):\r\n result = rnn.raw_observation_to_list(\"10,-10,0,100,9999,0.9999,1\")\r\n self.assertEqual(result, [10, -10, 0, 100, 9999, 0.9999,1])\r\n\r\n def test_unpacking_observation_data(self):\r\n unpacked_labels = rnn.unpack_labels([rnn.ObservationData([1,1,1,1,1,0.1, 1]), rnn.ObservationData([1,1,1,1,1,0.1, 2])])\r\n assert_array_equal(unpacked_labels, np.array([[1,1,1,1,1,0.1], [1,1,1,1,1,0.1]]))\r\n\r\n def test_filling_batch(self):\r\n output1 = rnn.maybe_fill_batch_with_sparse_vectors(np.array([1,1,1,1,1,0.1]), 5, 6)\r\n assert_array_equal(output1, np.array([[1,1,1,1,1,0.1], [0,0,0,0,0,0], [0,0,0,0,0,0], [0,0,0,0,0,0], [0,0,0,0,0,0]]))\r\n\r\n output2 = rnn.maybe_fill_batch_with_sparse_vectors(np.array([[1,1,1,1,1,0.1], [1,1,1,1,1,0.1]]), 5, 6)\r\n assert_array_equal(output2, np.array([[1,1,1,1,1,0.1], [1,1,1,1,1,0.1], [0,0,0,0,0,0], [0,0,0,0,0,0], [0,0,0,0,0,0]]))\r\n\r\n output3 = rnn.maybe_fill_batch_with_sparse_vectors(np.array([[1,1,1,1,1,0.1], [1,1,1,1,1,0.1], [1,1,1,1,1,0.1], [1,1,1,1,1,0.1], [1,1,1,1,1,0.1]]), 5, 6)\r\n assert_array_equal(output3, np.array([[1,1,1,1,1,0.1], [1,1,1,1,1,0.1], [1,1,1,1,1,0.1], [1,1,1,1,1,0.1], [1,1,1,1,1,0.1]]))\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()\r\n\r\n" ]
[ [ "numpy.array" ] ]
BeeGass/Agents
[ "7785b010625e3a9409849a293badd00500647807" ]
[ "agents/MC/mc.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport numpy as np\nimport random\nfrom collections import defaultdict\n#-------------------------------------------------------------------------\n'''\n Monte-Carlo\n In this problem, you will implememnt an AI player for Blackjack.\n The main goal of this problem is to get familar with Monte-Carlo algorithm.\n You could test the correctness of your code\n by typing 'nosetests -v mc_test.py' in the terminal.\n\n You don't have to follow the comments to write your code. They are provided\n as hints in case you need.\n'''\n#-------------------------------------------------------------------------\n\ndef initial_policy(observation):\n \"\"\"A policy that sticks if the player score is >= 20 and his otherwise\n\n Parameters:\n -----------\n observation\n\n Returns:\n --------\n action: 0 or 1\n 0: STICK\n 1: HIT\n \"\"\"\n ############################\n # YOUR IMPLEMENTATION HERE #\n # get parameters from observation\n score, dealer_score, usable_ace = observation\n action = 1 \n if score >= 20:\n action = 0 \n\n ############################\n return action\n\ndef generate_trajectory(policy, env):\n state = env.reset()\n episode_buffer = []\n\n # loop until episode generation is done\n while True:\n # select an action \n action = policy(state)\n \n # return a reward and new state \n next_state, reward, done, _ = env.step(action)\n\n # append state, action, reward to episode\n episode_buffer.insert(0, (state, action, reward)) # format episode to start at timestep 0 and end at timestep T \n\n if done:\n break\n state = next_state\n # update state to new state\n\n return episode_buffer\n\n\ndef mc_prediction(policy, env, n_episodes, gamma = 1.0):\n \"\"\"Given policy using sampling to calculate the value function\n by using Monte Carlo first visit algorithm.\n\n Parameters:\n -----------\n policy: function\n A function that maps an obversation to action probabilities\n env: function\n OpenAI gym environment\n n_episodes: int\n Number of episodes to sample\n gamma: float\n Gamma discount factor\n Returns:\n --------\n V: defaultdict(float)\n A dictionary that maps from state to value\n\n Note: at the begining of each episode, you need initialize the environment using env.reset()\n \"\"\"\n # initialize empty dictionaries\n returns_sum = defaultdict(float)\n returns_count = defaultdict(float)\n # a nested dictionary that maps state -> value\n V = defaultdict(float)\n\n ############################\n # YOUR IMPLEMENTATION HERE #\n for e in range(n_episodes):\n state = env.reset()\n\n # initialize the episode\n episode_buffer = [] # list that will contain the samples of the state, action and reward of the episode\n\n # generate empty episode list\n state_visited = [] \n\n # loop until episode generation is done\n while True:\n # select an action \n action = policy(state)\n \n # return a reward and new state \n next_state, reward, done, _ = env.step(action)\n\n # append state, action, reward to episode\n episode_buffer.insert(0, (state, action, reward)) # format episode to start at timestep 0 and end at timestep T \n\n if done:\n break\n state = next_state\n # update state to new state \n\n\n G = 0 # average reward \n # loop for each step of episode, t = T-1, T-2,...,0\n for step in episode_buffer:\n (state, _, reward) = step\n # compute G \n G = (gamma * G) + reward \n\n # unless state_t appears in states \n if state not in state_visited:\n state_visited.append(state)\n\n # update return_count \n returns_count[state] += 1 \n\n # update return_sum\n returns_sum[state] += G \n\n # calculate average return for this state over all sampled episodes\n V[state] = returns_sum[state] / returns_count[state] \n\n\n\n ############################\n\n return V\n\ndef epsilon_greedy(Q, state, nA, epsilon = 0.1):\n \"\"\"Selects epsilon-greedy action for supplied state.\n\n Parameters:\n -----------\n Q: dict()\n A dictionary that maps from state -> action-values,\n where Q[s][a] is the estimated action value corresponding to state s and action a.\n state: int\n current state\n nA: int\n Number of actions in the environment\n epsilon: float\n The probability to select a random action, range between 0 and 1\n\n Returns:\n --------\n action: int\n action based current state\n Hints:\n ------\n With probability (1 − epsilon) choose the greedy action.\n With probability epsilon choose an action at random.\n \"\"\"\n ############################\n # YOUR IMPLEMENTATION HERE #\n random_val = random.random()\n action = random.randrange(nA)\n if random_val < 1 - epsilon:\n action = np.argmax(Q[state])\n \n ############################\n return action\n\ndef mc_control_epsilon_greedy(env, n_episodes, gamma = 1.0, epsilon = 0.1):\n \"\"\"Monte Carlo control with exploring starts.\n Find an optimal epsilon-greedy policy.\n\n Parameters:\n -----------\n env: function\n OpenAI gym environment\n n_episodes: int\n Number of episodes to sample\n gamma: float\n Gamma discount factor\n epsilon: float\n The probability to select a random action, range between 0 and 1\n Returns:\n --------\n Q: dict()\n A dictionary that maps from state -> action-values,\n where Q[s][a] is the estimated action value corresponding to state s and action a.\n Hint:\n -----\n You could consider decaying epsilon, i.e. epsilon = epsilon-(0.1/n_episodes) during each episode\n and episode must > 0.\n \"\"\"\n\n returns_sum = defaultdict(float)\n returns_count = defaultdict(float)\n # a nested dictionary that maps state -> (action -> action-value)\n # e.g. Q[state] = np.darrary(nA)\n nA = env.action_space.n\n Q = defaultdict(lambda: np.zeros(nA))\n version = 'a'\n ############################\n # YOUR IMPLEMENTATION HERE #\n for e in range(1, n_episodes + 1):\n state = env.reset()\n\n # initialize the episode \n episode_buffer = []\n\n # generate empty episode list\n state_action_visited = []\n\n # loop until one episode generation is done\n while True:\n\n # get an action from epsilon greedy policy \n action = epsilon_greedy(Q, state, nA, epsilon)\n\n # return a reward and new state\n next_state, reward, done, _ = env.step(action)\n\n # append state, action, reward to episode\n episode_buffer.insert(0, ((state, action), reward)) \n\n # update state to new state \n if done:\n break \n state = next_state \n\n\n G = 0 \n # loop for each step of episode, t = T-1, T-2, ...,0 \n for (state_action, reward) in episode_buffer:\n\n # compute G\n G = (gamma * G) + reward \n\n # unless the pair state_t, action_t appears in <state action> pair list\n if state_action not in state_action_visited:\n state_action_visited.append(state_action)\n\n # update return_count\n returns_count[state_action] += 1\n\n # update return_sum\n returns_sum[state_action] += G\n\n (state, action) = state_action\n\n # calculate average return for this state over all sampled episodes \n if version == 'a':\n Q[state][action] = returns_sum[state_action] / returns_count[state_action]\n else:\n Q[state][action] = Q[state][action] + ((1 / returns_count[state_action]) * (G - Q[state][action])) \n \n epsilon = epsilon - (0.1 / n_episodes) \n\n return Q\n" ]
[ [ "numpy.argmax", "numpy.zeros" ] ]
ioshchepkov/gmeterpy
[ "594cf7c15193ae86b98c9474259843eeadc04f5b" ]
[ "gmeterpy/meters/tsoft.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"TSoft format reader.\n\n\"\"\"\n\nimport re\nimport numpy as np\nimport pandas as pd\n\n# possible tags in TSoft format\n_TAGS = ['TSF-file', 'TIMEFORMAT', 'COUNTINFO', 'INCREMENT', 'CHANNELS',\n 'UNITS', 'UNDETVAL', 'COMMENT', 'DATA', 'LABEL',\n 'LININTERPOL', 'CUBINTERPOL', 'GAP', 'STEP']\n\n\ndef read_tsf(filename, encoding='utf-8', channels=None):\n \"\"\"Read TSoft file and return pandas DataFrame.\n\n \"\"\"\n blocks = {}\n with open(filename, 'r', encoding=encoding) as file_object:\n for line in file_object:\n _block = re.search('|'.join(_TAGS), line)\n if _block:\n tag = _block[0]\n blocks[tag] = []\n line = line.replace('[' + tag + ']', '')\n\n line = line.strip()\n\n if not line:\n continue\n\n blocks[tag].append(line)\n\n blocks['UNDETVAL'] = float(blocks['UNDETVAL'][0])\n blocks['TIMEFORMAT'] = str(blocks['TIMEFORMAT'][0])\n\n datetime_columns = ['year', 'month', 'day', 'hour', 'minute', 'second']\n if blocks['TIMEFORMAT'] == 'DATETIMEFRAC':\n datetime_columns += ['ms']\n elif blocks['TIMEFORMAT'] == 'DATETIME':\n pass\n else:\n raise ValueError\n\n for idx, channel in enumerate(blocks['CHANNELS']):\n blocks['CHANNELS'][idx] = channel.strip().split(':')\n\n data_columns = np.asarray(blocks['CHANNELS'])[:, 2]\n columns = datetime_columns + list(data_columns)\n\n data = np.asarray([line.split() for line in blocks['DATA']])\n df = pd.DataFrame(data, columns=columns, dtype=float).replace(\n blocks['UNDETVAL'], np.NaN)\n time = pd.to_datetime(df[datetime_columns])\n df.drop(datetime_columns, axis='columns', inplace=True)\n df.set_index(time, inplace=True)\n\n if channels is not None:\n df = df[channels]\n\n return df\n" ]
[ [ "pandas.to_datetime", "pandas.DataFrame", "numpy.asarray" ] ]
tphanson/tf-agent-labs
[ "c6cb79be5f0f06d9669a32439b56b4d287faeb69" ]
[ "run.py" ]
[ "import os\nimport numpy as np\nimport tensorflow as tf\n\nfrom env import CartPole\nfrom agent.dqn import DQN\n\n# Compulsory config for tf_agents\ntf.compat.v1.enable_v2_behavior()\n\n# Saving dir\nPOLICY_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n './models/policy')\nCHECKPOINT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n './models/checkpoints')\n\n# Environment\nenv = CartPole.env()\n\n# Agent\ndqn = DQN(env, CHECKPOINT_DIR)\ndqn.load_checkpoint()\n\ncounter = 0\nwhile counter < 10000:\n counter += 1\n time_step = env.current_time_step()\n action_step = dqn.agent.policy.action(time_step)\n print('Action:', np.squeeze(action_step.action.numpy()))\n env.step(action_step.action)\n env.render()\n" ]
[ [ "tensorflow.compat.v1.enable_v2_behavior" ] ]
TL-Rubick/tensorflow
[ "6cf1ccf6060a95aad3ccc84544d0aa166990ec72", "be084bd7a4dd241eb781fc704f57bcacc5c9b6dd", "6cf1ccf6060a95aad3ccc84544d0aa166990ec72", "6cf1ccf6060a95aad3ccc84544d0aa166990ec72" ]
[ "tensorflow/python/keras/optimizer_v2/adadelta.py", "tensorflow/python/saved_model/load_v1_in_v2.py", "tensorflow/python/data/experimental/kernel_tests/optimize_dataset_test.py", "tensorflow/python/distribute/parameter_server_strategy_v2_test.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Adadelta optimizer implementation.\"\"\"\n# pylint: disable=g-classes-have-attributes\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.keras import backend_config\nfrom tensorflow.python.keras.optimizer_v2 import optimizer_v2\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.training import gen_training_ops\nfrom tensorflow.python.util.tf_export import keras_export\n\n\n@keras_export('keras.optimizers.Adadelta')\nclass Adadelta(optimizer_v2.OptimizerV2):\n r\"\"\"Optimizer that implements the Adadelta algorithm.\n\n Adadelta optimization is a stochastic gradient descent method that is based on\n adaptive learning rate per dimension to address two drawbacks:\n\n - The continual decay of learning rates throughout training\n - The need for a manually selected global learning rate\n\n Adadelta is a more robust extension of Adagrad that adapts learning rates\n based on a moving window of gradient updates, instead of accumulating all\n past gradients. This way, Adadelta continues learning even when many updates\n have been done. Compared to Adagrad, in the original version of Adadelta you\n don't have to set an initial learning rate. In this version, initial\n learning rate can be set, as in most other Keras optimizers.\n\n According to section 4.3 (\"Effective Learning rates\"), near the end of\n training step sizes converge to 1 which is effectively a high learning\n rate which would cause divergence. This occurs only near the end of the\n training as gradients and step sizes are small, and the epsilon constant\n in the numerator and denominator dominate past gradients and parameter\n updates which converge the learning rate to 1.\n\n According to section 4.4(\"Speech Data\"),where a large neural network with\n 4 hidden layers was trained on a corpus of US English data, ADADELTA was\n used with 100 network replicas.The epsilon used is 1e-6 with rho=0.95\n which converged faster than ADAGRAD, by the following construction:\n def __init__(self, lr=1.0, rho=0.95, epsilon=1e-6, decay=0., **kwargs):\n\n Args:\n learning_rate: A `Tensor`, floating point value, or a schedule that is a\n `tf.keras.optimizers.schedules.LearningRateSchedule`. The learning rate.\n To match the exact form in the original paper use 1.0.\n rho: A `Tensor` or a floating point value. The decay rate.\n epsilon: A `Tensor` or a floating point value. A constant epsilon used\n to better conditioning the grad update.\n name: Optional name prefix for the operations created when applying\n gradients. Defaults to `\"Adadelta\"`.\n **kwargs: Keyword arguments. Allowed to be one of\n `\"clipnorm\"` or `\"clipvalue\"`.\n `\"clipnorm\"` (float) clips gradients by norm; `\"clipvalue\"` (float) clips\n gradients by value.\n\n Reference:\n - [Zeiler, 2012](http://arxiv.org/abs/1212.5701)\n \"\"\"\n\n _HAS_AGGREGATE_GRAD = True\n\n def __init__(self,\n learning_rate=0.001,\n rho=0.95,\n epsilon=1e-7,\n name='Adadelta',\n **kwargs):\n super(Adadelta, self).__init__(name, **kwargs)\n self._set_hyper('learning_rate', kwargs.get('lr', learning_rate))\n self._set_hyper('decay', self._initial_decay)\n self._set_hyper('rho', rho)\n self.epsilon = epsilon or backend_config.epsilon()\n\n def _create_slots(self, var_list):\n # Separate for-loops to respect the ordering of slot variables from v1.\n for v in var_list:\n self.add_slot(v, 'accum_grad')\n for v in var_list:\n self.add_slot(v, 'accum_var')\n\n def _prepare_local(self, var_device, var_dtype, apply_state):\n super(Adadelta, self)._prepare_local(var_device, var_dtype, apply_state)\n apply_state[(var_device, var_dtype)].update(\n dict(\n epsilon=ops.convert_to_tensor_v2_with_dispatch(\n self.epsilon, var_dtype),\n rho=array_ops.identity(self._get_hyper('rho', var_dtype))))\n\n def set_weights(self, weights):\n params = self.weights\n # Override set_weights for backward compatibility of Keras V1 optimizer\n # since it does not include iteration at head of the weight list. Set\n # iteration to 0.\n if len(params) == len(weights) + 1:\n weights = [np.array(0)] + weights\n super(Adadelta, self).set_weights(weights)\n\n def _resource_apply_dense(self, grad, var, apply_state=None):\n var_device, var_dtype = var.device, var.dtype.base_dtype\n coefficients = ((apply_state or {}).get((var_device, var_dtype))\n or self._fallback_apply_state(var_device, var_dtype))\n\n accum_grad = self.get_slot(var, 'accum_grad')\n accum_var = self.get_slot(var, 'accum_var')\n return gen_training_ops.ResourceApplyAdadelta(\n var=var.handle,\n accum=accum_grad.handle,\n accum_update=accum_var.handle,\n lr=coefficients['lr_t'],\n rho=coefficients['rho'],\n epsilon=coefficients['epsilon'],\n grad=grad,\n use_locking=self._use_locking)\n\n def _resource_apply_sparse(self, grad, var, indices, apply_state=None):\n var_device, var_dtype = var.device, var.dtype.base_dtype\n coefficients = ((apply_state or {}).get((var_device, var_dtype))\n or self._fallback_apply_state(var_device, var_dtype))\n\n accum_grad = self.get_slot(var, 'accum_grad')\n accum_var = self.get_slot(var, 'accum_var')\n return gen_training_ops.ResourceSparseApplyAdadelta(\n var=var.handle,\n accum=accum_grad.handle,\n accum_update=accum_var.handle,\n lr=coefficients['lr_t'],\n rho=coefficients['rho'],\n epsilon=coefficients['epsilon'],\n grad=grad,\n indices=indices,\n use_locking=self._use_locking)\n\n def get_config(self):\n config = super(Adadelta, self).get_config()\n config.update({\n 'learning_rate': self._serialize_hyperparameter('learning_rate'),\n 'decay': self._initial_decay,\n 'rho': self._serialize_hyperparameter('rho'),\n 'epsilon': self.epsilon,\n })\n return config\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Import a TF v1-style SavedModel when executing eagerly.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import lift_to_graph\nfrom tensorflow.python.eager import wrap_function\nfrom tensorflow.python.framework import composite_tensor\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import func_graph\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.saved_model import function_deserialization\nfrom tensorflow.python.saved_model import loader_impl\nfrom tensorflow.python.saved_model import signature_serialization\nfrom tensorflow.python.training import monitored_session\nfrom tensorflow.python.training import saver as tf_saver\nfrom tensorflow.python.training.tracking import tracking\nfrom tensorflow.python.util import nest\n\n\nclass _Initializer(tracking.CapturableResource):\n \"\"\"Represents an initialization operation restored from a SavedModel.\n\n Without this object re-export of imported 1.x SavedModels would omit the\n original SavedModel's initialization procedure.\n\n Created when `tf.saved_model.load` loads a TF 1.x-style SavedModel with an\n initialization op. This object holds a function that runs the\n initialization. It does not require any manual user intervention;\n `tf.saved_model.save` will see this object and automatically add it to the\n exported SavedModel, and `tf.saved_model.load` runs the initialization\n function automatically.\n \"\"\"\n\n def __init__(self, init_fn, asset_paths):\n super(_Initializer, self).__init__()\n self._asset_paths = asset_paths\n self._init_fn = init_fn\n\n def _create_resource(self):\n return array_ops.placeholder(\n dtype=dtypes.resource, shape=[], name=\"unused_resource\")\n\n def _initialize(self):\n return self._init_fn(*[path.asset_path for path in self._asset_paths])\n\n\nclass _EagerSavedModelLoader(loader_impl.SavedModelLoader):\n \"\"\"Loads a SavedModel without using Sessions.\"\"\"\n\n def get_meta_graph_def_from_tags(self, tags):\n \"\"\"Override to support implicit one-MetaGraph loading with tags=None.\"\"\"\n if tags is None:\n if len(self._saved_model.meta_graphs) != 1:\n tag_sets = [mg.meta_info_def.tags\n for mg in self._saved_model.meta_graphs]\n raise ValueError(\n (\"Importing a SavedModel with tf.saved_model.load requires a \"\n \"'tags=' argument if there is more than one MetaGraph. Got \"\n \"'tags=None', but there are {} MetaGraphs in the SavedModel with \"\n \"tag sets {}. Pass a 'tags=' argument to load this SavedModel.\")\n .format(len(self._saved_model.meta_graphs), tag_sets))\n return self._saved_model.meta_graphs[0]\n return super(_EagerSavedModelLoader, self).get_meta_graph_def_from_tags(\n tags)\n\n def load_graph(self, returns, meta_graph_def):\n \"\"\"Called from wrap_function to import `meta_graph_def`.\"\"\"\n # pylint: disable=protected-access\n saver, _ = tf_saver._import_meta_graph_with_return_elements(\n meta_graph_def)\n # pylint: enable=protected-access\n returns[0] = saver\n\n def _extract_saver_restore(self, wrapped, saver):\n if saver is None:\n return None\n saver_def = saver.saver_def\n filename_tensor = wrapped.graph.as_graph_element(\n saver_def.filename_tensor_name)\n # We both feed and fetch filename_tensor so we have an operation to use to\n # feed into variable initializers (only relevant for v1 graph building).\n return wrapped.prune(\n feeds=[filename_tensor],\n fetches=[filename_tensor,\n wrapped.graph.as_graph_element(saver_def.restore_op_name)])\n\n def restore_variables(self, wrapped, restore_from_saver):\n \"\"\"Restores variables from the checkpoint.\"\"\"\n if restore_from_saver is not None:\n initializer, _ = restore_from_saver(\n constant_op.constant(self._variables_path))\n if not ops.executing_eagerly_outside_functions():\n # Add the initialization operation to the table initializers collection\n # in case we don't have any lifted variables to attach it to. There\n # isn't another great place to put it.\n ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, initializer)\n one_unlifted = False\n for variable in wrapped.graph.get_collection_ref(\n ops.GraphKeys.GLOBAL_VARIABLES):\n if variable.graph is wrapped.graph:\n one_unlifted = True\n # pylint: disable=protected-access\n variable._initializer_op = initializer\n # pylint: enable=protected-access\n if one_unlifted:\n logging.warning(\n \"Some variables could not be lifted out of a loaded function. \"\n \"Run the tf.initializers.tables_initializer() operation to \"\n \"restore these variables.\")\n\n def _extract_signatures(self, wrapped, meta_graph_def):\n \"\"\"Creates ConcreteFunctions for signatures in `meta_graph_def`.\"\"\"\n signature_functions = {}\n for signature_key, signature_def in meta_graph_def.signature_def.items():\n if signature_def.inputs:\n original_input_names, input_specs = zip(*signature_def.inputs.items())\n else:\n original_input_names = []\n input_specs = []\n # TODO(allenl): Support optional arguments\n feeds = [\n wrap_function._get_element_from_tensor_info(input_spec, wrapped.graph) # pylint: disable=protected-access\n for input_spec in input_specs\n ]\n input_names = []\n input_tensors = []\n for original_input_name, feed in zip(original_input_names, feeds):\n if isinstance(feed, sparse_tensor.SparseTensor):\n # We have to give explicit name for SparseTensor arguments, because\n # these are not present in the TensorInfo.\n indices_name = \"%s_indices\" % original_input_name\n values_name = \"%s_values\" % original_input_name\n dense_shape_name = \"%s_dense_shape\" % original_input_name\n input_names.extend([indices_name, values_name, dense_shape_name])\n input_tensors.extend([feed.indices, feed.values, feed.dense_shape])\n elif isinstance(feed, composite_tensor.CompositeTensor):\n component_tensors = nest.flatten(feed, expand_composites=True)\n input_names.extend(\"%s_component_%d\" % (original_input_name, n)\n for n in range(len(component_tensors)))\n input_tensors.extend(component_tensors)\n else:\n input_names.append(original_input_name)\n input_tensors.append(feed)\n fetches = {name: out for name, out in signature_def.outputs.items()}\n try:\n signature_fn = wrapped.prune(feeds=feeds, fetches=fetches)\n except lift_to_graph.UnliftableError as ex:\n # Mutate the exception to add a bit more detail.\n args = ex.args\n if not args:\n message = \"\"\n else:\n message = args[0]\n message = (\n (\"A SavedModel signature needs an input for each placeholder the \"\n \"signature's outputs use. An output for signature '{}' depends on \"\n \"a placeholder which is not an input (i.e. the placeholder is not \"\n \"fed a value).\\n\\n\").format(signature_key)\n + message)\n ex.args = (message,) + args[1:]\n raise\n # pylint: disable=protected-access\n signature_fn._arg_keywords = input_names\n signature_fn._func_graph.structured_input_signature = (\n (),\n func_graph.convert_structure_to_signature(\n dict(zip(input_names, input_tensors))))\n\n if len(input_names) == 1:\n # Allowing positional arguments does not create any ambiguity if there's\n # only one.\n signature_fn._num_positional_args = 1\n else:\n signature_fn._num_positional_args = 0\n # pylint: enable=protected-access\n signature_functions[signature_key] = signature_fn\n return signature_functions\n\n def load(self, tags):\n \"\"\"Creates an object from the MetaGraph identified by `tags`.\"\"\"\n meta_graph_def = self.get_meta_graph_def_from_tags(tags)\n load_shared_name_suffix = \"_load_{}\".format(ops.uid())\n functions = function_deserialization.load_function_def_library(\n meta_graph_def.graph_def.library,\n load_shared_name_suffix=load_shared_name_suffix)\n # Replace existing functions in the MetaGraphDef with renamed functions so\n # we don't have duplicates or name collisions.\n meta_graph_def.graph_def.library.Clear()\n for function in functions.values():\n meta_graph_def.graph_def.library.function.add().CopyFrom(\n function.function_def)\n # We've renamed functions and shared names. We need the same operation on\n # the GraphDef itself for consistency.\n for node_def in meta_graph_def.graph_def.node:\n function_deserialization.fix_node_def(node_def, functions,\n load_shared_name_suffix)\n\n load_graph_returns = [None]\n wrapped = wrap_function.wrap_function(\n functools.partial(self.load_graph, load_graph_returns, meta_graph_def),\n signature=[])\n saver, = load_graph_returns\n restore_from_saver = self._extract_saver_restore(wrapped, saver)\n self.restore_variables(wrapped, restore_from_saver)\n with wrapped.graph.as_default():\n init_op = loader_impl.get_init_op(\n meta_graph_def) or monitored_session.Scaffold.default_local_init_op()\n # Add a dummy Tensor we know we can fetch to add control dependencies to.\n init_anchor = constant_op.constant(0., name=\"dummy_fetch\")\n\n root = tracking.AutoTrackable()\n if restore_from_saver is not None:\n root.restore = (\n lambda path: restore_from_saver(constant_op.constant(path)))\n asset_feed_tensors = []\n asset_paths = []\n for tensor_name, value in loader_impl.get_asset_tensors(\n self._export_dir, meta_graph_def).items():\n asset_feed_tensors.append(wrapped.graph.as_graph_element(tensor_name))\n asset_paths.append(tracking.Asset(value))\n init_fn = wrapped.prune(\n feeds=asset_feed_tensors,\n fetches=[init_anchor, wrapped.graph.as_graph_element(init_op)])\n initializer = _Initializer(init_fn, asset_paths)\n # pylint: disable=protected-access\n local_init_op, _ = initializer._initialize()\n # pylint: enable=protected-access\n with ops.init_scope():\n if not context.executing_eagerly():\n ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, local_init_op)\n for variable in wrapped.graph.get_collection_ref(\n ops.GraphKeys.LOCAL_VARIABLES):\n # pylint: disable=protected-access\n variable._initializer_op = local_init_op\n # pylint: enable=protected-access\n root.initializer = initializer\n root.asset_paths = asset_paths\n signature_functions = self._extract_signatures(wrapped, meta_graph_def)\n\n root.signatures = signature_serialization.create_signature_map(\n signature_functions)\n root.variables = list(wrapped.graph.variables)\n root.tensorflow_version = (\n meta_graph_def.meta_info_def.tensorflow_version)\n root.tensorflow_git_version = (\n meta_graph_def.meta_info_def.tensorflow_git_version)\n root.graph = wrapped.graph\n root.prune = wrapped.prune\n return root\n\n\ndef load(export_dir, tags):\n \"\"\"Load a v1-style SavedModel as an object.\"\"\"\n loader = _EagerSavedModelLoader(export_dir)\n return loader.load(tags=tags)\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for the private `_OptimizeDataset` transformation.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport os\nimport warnings\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.python.data.experimental.ops import batching\nfrom tensorflow.python.data.experimental.ops import grouping\nfrom tensorflow.python.data.experimental.ops import optimization_options\nfrom tensorflow.python.data.experimental.ops import scan_ops\nfrom tensorflow.python.data.experimental.ops import testing\nfrom tensorflow.python.data.experimental.ops import threadpool\nfrom tensorflow.python.data.kernel_tests import test_base\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.framework import combinations\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.platform import test\n\n\ndef _captured_refvar_test_combinations():\n\n def make_map_dataset(var):\n return dataset_ops.Dataset.from_tensors(0).map(lambda x: x + var)\n\n def make_flat_map_dataset(var):\n return dataset_ops.Dataset.from_tensors(\n 0).flat_map(lambda _: dataset_ops.Dataset.from_tensors(var))\n\n def make_filter_dataset(var):\n return dataset_ops.Dataset.from_tensors(0).filter(lambda x: x < var)\n\n def make_map_and_batch_dataset(var):\n\n def map_fn(x):\n return x + var\n\n return dataset_ops.Dataset.from_tensors(0).apply(\n batching.map_and_batch(map_fn, 1))\n\n def make_group_by_reducer_dataset(var):\n reducer = grouping.Reducer(\n init_func=lambda _: 0,\n reduce_func=lambda x, y: x,\n finalize_func=lambda _: var)\n return dataset_ops.Dataset.range(5).apply(\n grouping.group_by_reducer(lambda x: x % 2, reducer))\n\n def make_group_by_window_dataset(var):\n\n def reduce_fn(key, bucket):\n del key, bucket\n return dataset_ops.Dataset.from_tensors(var)\n\n return dataset_ops.Dataset.from_tensors(0).repeat(10).apply(\n grouping.group_by_window(lambda _: 0, reduce_fn, 10))\n\n def make_scan_dataset(var):\n return dataset_ops.Dataset.from_tensors(0).apply(\n scan_ops.scan(\n 0, lambda old_state, elem: (old_state + 1, elem + old_state + var)))\n\n cases = [\n # Core datasets\n (\"Map\", make_map_dataset),\n (\"FlatMap\", make_flat_map_dataset),\n (\"Filter\", make_filter_dataset),\n # Experimental datasets\n (\"MapAndBatch\", make_map_and_batch_dataset),\n (\"GroupByReducer\", make_group_by_reducer_dataset),\n (\"GroupByWindow\", make_group_by_window_dataset),\n (\"Scan\", make_scan_dataset)\n ]\n\n def reduce_fn(x, y):\n name, dataset_fn = y\n return x + combinations.combine(\n dataset_fn=combinations.NamedObject(name, dataset_fn))\n\n return functools.reduce(reduce_fn, cases, [])\n\n\ndef _disable_intra_op_parallelism_test_combinations():\n\n def make_tensor_dataset():\n return dataset_ops.Dataset.from_tensors(42)\n\n def make_map_dataset():\n return dataset_ops.Dataset.from_tensors(42).map(lambda x: x + 1)\n\n cases = [\n (\"FromTensors\", make_tensor_dataset, [42]),\n (\"Map\", make_map_dataset, [43]),\n ]\n\n def reduce_fn(x, y):\n name, dataset_fn, expected_output = y\n return x + combinations.combine(\n dataset_fn=combinations.NamedObject(name, dataset_fn),\n expected_output=[expected_output])\n\n return functools.reduce(reduce_fn, cases, [])\n\n\nclass OptimizeDatasetTest(test_base.DatasetTestBase, parameterized.TestCase):\n\n @combinations.generate(test_base.default_test_combinations())\n def testOptimizationStatefulFunction(self):\n dataset = dataset_ops.Dataset.range(\n 10).map(lambda _: random_ops.random_uniform([])).batch(10)\n options = dataset_ops.Options()\n options.experimental_optimization.apply_default_optimizations = False\n dataset = dataset.with_options(options)\n get_next = self.getNext(dataset)\n self.evaluate(get_next())\n\n # TODO(b/123902160)\n @combinations.generate(test_base.graph_only_combinations())\n def testOptimizationLargeInputFromTensor(self):\n input_t = array_ops.placeholder(dtypes.int32, (None, None, None))\n dataset = dataset_ops.Dataset.from_tensors(input_t)\n options = dataset_ops.Options()\n options.experimental_optimization.apply_default_optimizations = False\n dataset = dataset.with_options(options)\n iterator = dataset_ops.make_initializable_iterator(dataset)\n init_op = iterator.initializer\n get_next = iterator.get_next()\n\n with self.cached_session() as sess:\n sess.run(init_op, {input_t: np.ones([512, 1024, 1025], np.int32)})\n self.evaluate(get_next)\n\n # TODO(b/123902160)\n @combinations.generate(test_base.graph_only_combinations())\n def testOptimizationLargeInputFromTensorSlices(self):\n input_t = array_ops.placeholder(dtypes.int32, (None, None, None, None))\n dataset = dataset_ops.Dataset.from_tensor_slices(input_t)\n options = dataset_ops.Options()\n options.experimental_optimization.apply_default_optimizations = False\n dataset = dataset.with_options(options)\n iterator = dataset_ops.make_initializable_iterator(dataset)\n init_op = iterator.initializer\n get_next = iterator.get_next()\n\n with self.cached_session() as sess:\n sess.run(init_op, {input_t: np.ones([1, 512, 1024, 1025], np.int32)})\n self.evaluate(get_next)\n\n @combinations.generate(test_base.default_test_combinations())\n def testOptimizationNestedDataset(self):\n\n def flat_map_fn(_):\n dataset = dataset_ops.Dataset.from_tensors(0)\n dataset = dataset.apply(testing.assert_next([\"MemoryCacheImpl\"]))\n dataset = dataset.skip(0) # Should be removed by noop elimination\n dataset = dataset.cache()\n return dataset\n\n dataset = dataset_ops.Dataset.range(1)\n dataset = dataset.flat_map(flat_map_fn)\n options = dataset_ops.Options()\n options.experimental_optimization.apply_default_optimizations = False\n options.experimental_optimization.noop_elimination = True\n dataset = dataset.with_options(options)\n self.assertDatasetProduces(dataset, expected_output=[0])\n\n @combinations.generate(test_base.default_test_combinations())\n def testOptimizationNestedDatasetWithModifiedRetval(self):\n\n def flat_map_fn(_):\n dataset = dataset_ops.Dataset.from_tensors(0)\n dataset = dataset.apply(testing.assert_next([\"MapAndBatch\"]))\n # Should be fused by map and batch fusion\n dataset = dataset.map(lambda x: x)\n dataset = dataset.batch(1)\n return dataset\n\n dataset = dataset_ops.Dataset.range(1)\n dataset = dataset.flat_map(flat_map_fn)\n\n options = dataset_ops.Options()\n options.experimental_optimization.apply_default_optimizations = False\n options.experimental_optimization.map_and_batch_fusion = True\n dataset = dataset.with_options(options)\n self.assertDatasetProduces(dataset, expected_output=[[0]])\n\n @combinations.generate(test_base.default_test_combinations())\n def testOptimizationDoubleOptimizeDatasetNested(self):\n def flat_map_fn(_):\n dataset = dataset_ops.Dataset.from_tensors(0)\n dataset = dataset.apply(testing.assert_next([\"MapAndBatch\"]))\n dataset = dataset.skip(0)\n # Should be fused by map and batch fusion\n dataset = dataset.map(lambda x: x)\n dataset = dataset.batch(1)\n return dataset\n\n dataset = dataset_ops.Dataset.from_tensors(0)\n dataset = dataset.flat_map(flat_map_fn)\n dataset = dataset_ops._OptimizeDataset(dataset, [\"map_and_batch_fusion\"],\n [], [])\n dataset = dataset_ops._OptimizeDataset(dataset, [\"noop_elimination\"], [],\n [])\n\n self.assertDatasetProduces(dataset, expected_output=[[0]])\n\n @combinations.generate(test_base.default_test_combinations())\n def testOptimizationDifferentOrderOptionsCompareEqual(self):\n with ops.Graph().as_default() as first_graph:\n dataset = dataset_ops.Dataset.from_tensors(0)\n dataset_ops._OptimizeDataset(dataset,\n [\"map_and_batch_fusion\", \"noop_elimination\"],\n [], [])\n\n with ops.Graph().as_default() as second_graph:\n dataset = dataset_ops.Dataset.from_tensors(0)\n dataset_ops._OptimizeDataset(dataset,\n [\"noop_elimination\", \"map_and_batch_fusion\"],\n [], [])\n\n self.assertEqual(first_graph.as_graph_def(), second_graph.as_graph_def())\n\n @combinations.generate(\n combinations.times(\n test_base.default_test_combinations(),\n _disable_intra_op_parallelism_test_combinations(),\n combinations.combine(apply_autotune=[None, True, False])))\n def testOptimizationDisableIntraOpParallelism(self, dataset_fn,\n expected_output,\n apply_autotune):\n dataset = dataset_fn()\n dataset = dataset.apply(testing.assert_next([\"MaxIntraOpParallelism\"]))\n if apply_autotune is not None:\n options = dataset_ops.Options()\n options.experimental_optimization.autotune = apply_autotune\n dataset = dataset.with_options(options)\n\n self.assertDatasetProduces(dataset, expected_output=expected_output)\n\n @combinations.generate(\n combinations.times(\n test_base.default_test_combinations(),\n combinations.combine(autotune=False, autotune_buffers=False) +\n combinations.combine(autotune=True, autotune_buffers=False) +\n combinations.combine(autotune=True, autotune_buffers=True),\n combinations.combine(set_env=[False, True])))\n def testOptimizationEnableGradientDescent(self, autotune, autotune_buffers,\n set_env):\n if set_env:\n os.environ[\"TF_DATA_EXPERIMENT_OPT_IN\"] = \"enable_gradient_descent\"\n os.environ[\"TF_JOB_NAME\"] = \"test_job\"\n\n dataset = dataset_ops.Dataset.range(5)\n dataset = dataset.prefetch(buffer_size=-1)\n dataset = dataset.map(lambda x: x + 1, num_parallel_calls=2)\n dataset = dataset.map(lambda x: x + 1, num_parallel_calls=-1)\n dataset = dataset.prefetch(buffer_size=3)\n dataset = dataset.map(lambda x: x + 1, num_parallel_calls=-1)\n dataset = dataset.prefetch(buffer_size=1)\n\n options = dataset_ops.Options()\n options.experimental_optimization.autotune = autotune\n options.experimental_optimization.autotune_buffers = autotune_buffers\n dataset = dataset.with_options(options)\n\n self.assertDatasetProduces(dataset, expected_output=list(range(3, 8)))\n\n if set_env:\n del os.environ[\"TF_DATA_EXPERIMENT_OPT_IN\"]\n del os.environ[\"TF_JOB_NAME\"]\n\n @combinations.generate(\n combinations.times(\n test_base.default_test_combinations(),\n combinations.combine(autotune=[True, False]),\n combinations.combine(set_env=[True, False])))\n def testOptimizationMapParallelization(self, autotune, set_env):\n if set_env:\n os.environ[\"TF_DATA_EXPERIMENT_OPT_IN\"] = \"map_parallelization\"\n os.environ[\"TF_JOB_NAME\"] = \"test_job\"\n\n dataset = dataset_ops.Dataset.range(5)\n if autotune and set_env:\n dataset = dataset.apply(testing.assert_next([\"ParallelMap\"]))\n else:\n dataset = dataset.apply(testing.assert_next([\"Map\"]))\n dataset = dataset.map(lambda x: x + 1)\n\n options = dataset_ops.Options()\n options.experimental_optimization.autotune = autotune\n dataset = dataset.with_options(options)\n\n self.assertDatasetProduces(dataset, expected_output=list(range(1, 6)))\n\n if set_env:\n del os.environ[\"TF_DATA_EXPERIMENT_OPT_IN\"]\n del os.environ[\"TF_JOB_NAME\"]\n\n @combinations.generate(\n combinations.times(\n test_base.default_test_combinations(),\n combinations.combine(autotune=False, autotune_buffers=False) +\n combinations.combine(autotune=True, autotune_buffers=False) +\n combinations.combine(autotune=True, autotune_buffers=True),\n combinations.combine(first_buffer_sizes=[(1, -1, -1, 4),\n (2, -1, 3, -1),\n (2, 1, -1, -1)]),\n combinations.combine(second_buffer_sizes=[(1, -1, -1, 4),\n (2, -1, 3, -1),\n (2, 1, -1, -1)]))\n )\n def testOptimizationAutotuneBuffers(self, autotune, autotune_buffers,\n first_buffer_sizes, second_buffer_sizes):\n dataset = dataset_ops.Dataset.range(10)\n for buffer_size in first_buffer_sizes:\n dataset = dataset.prefetch(buffer_size=buffer_size)\n dataset = dataset.map(lambda x: x + 1)\n for buffer_size in second_buffer_sizes:\n dataset = dataset.prefetch(buffer_size=buffer_size)\n options = dataset_ops.Options()\n options.experimental_optimization.autotune = autotune\n options.experimental_optimization.autotune_buffers = autotune_buffers\n dataset = dataset.with_options(options)\n self.assertDatasetProduces(dataset, expected_output=list(range(1, 11)))\n\n @combinations.generate(test_base.default_test_combinations())\n def testOptimizationThreadPoolDataset(self):\n dataset = dataset_ops.Dataset.range(10).batch(10)\n\n dataset = threadpool.override_threadpool(\n dataset,\n threadpool.PrivateThreadPool(\n 2, display_name=\"private_thread_pool_%d\" % 2))\n\n options = dataset_ops.Options()\n options.experimental_optimization.apply_default_optimizations = False\n dataset = dataset.with_options(options)\n self.assertDatasetProduces(\n dataset,\n expected_output=[list(range(10))],\n requires_initialization=True)\n\n # Reference variables are not supported in eager mode.\n @combinations.generate(\n combinations.times(test_base.graph_only_combinations(),\n _captured_refvar_test_combinations()))\n def testOptimizationWithCapturedRefVar(self, dataset_fn):\n \"\"\"Tests that default optimizations are disabled with ref variables.\"\"\"\n variable = variable_scope.get_variable(\n \"v\", initializer=0, use_resource=False)\n assign_op = variable.assign_add(1)\n\n # Check that warning is logged.\n warnings.simplefilter(\"always\")\n with warnings.catch_warnings(record=True) as w:\n unoptimized_dataset = dataset_fn(variable)\n\n options = dataset_ops.Options()\n options.experimental_optimization.apply_default_optimizations = False\n options.experimental_optimization.noop_elimination = True\n options.experimental_optimization.map_and_batch_fusion = True\n optimized_dataset = unoptimized_dataset.with_options(options)\n optimized_it = dataset_ops.make_initializable_iterator(optimized_dataset)\n\n self.assertGreaterEqual(len(w), 1)\n graph_rewrites = options._graph_rewrites()\n expected = (\n \"tf.data graph rewrites are not compatible with \"\n \"tf.Variable. The following rewrites will be disabled: %s.\"\n \" To enable rewrites, use resource variables instead by \"\n \"calling `tf.enable_resource_variables()` at the start of the \"\n \"program.\" %\n (\", \".join(graph_rewrites.enabled + graph_rewrites.default)))\n self.assertTrue(any(expected in str(warning) for warning in w))\n\n # Check that outputs are the same in the optimized and unoptimized cases,\n # when the variable value is changing.\n unoptimized_it = dataset_ops.make_initializable_iterator(\n unoptimized_dataset)\n with ops.control_dependencies([assign_op]):\n unoptimized_output = unoptimized_it.get_next()\n optimized_output = optimized_it.get_next()\n\n self.evaluate(variable.initializer)\n self.evaluate((unoptimized_it.initializer, optimized_it.initializer))\n while True:\n try:\n unoptimized, optimized = self.evaluate((unoptimized_output,\n optimized_output))\n self.assertEqual(unoptimized, optimized)\n except errors.OutOfRangeError:\n break\n\n @combinations.generate(test_base.default_test_combinations())\n def testOptimizationDefault(self):\n \"\"\"Tests the optimization settings by default.\"\"\"\n options = dataset_ops.Options()\n expected_optimizations_enabled = []\n expected_optimizations_disabled = []\n expected_optimizations_default = [\n \"map_and_batch_fusion\",\n \"noop_elimination\",\n \"shuffle_and_repeat_fusion\",\n ]\n graph_rewrites = options._graph_rewrites()\n self.assertEqual(set(graph_rewrites.enabled),\n set(expected_optimizations_enabled))\n self.assertEqual(set(graph_rewrites.disabled),\n set(expected_optimizations_disabled))\n self.assertEqual(set(graph_rewrites.default),\n set(expected_optimizations_default))\n\n options.experimental_optimization.apply_default_optimizations = True\n graph_rewrites = options._graph_rewrites()\n self.assertEqual(set(graph_rewrites.enabled),\n set(expected_optimizations_enabled))\n self.assertEqual(set(graph_rewrites.disabled),\n set(expected_optimizations_disabled))\n self.assertEqual(set(graph_rewrites.default),\n set(expected_optimizations_default))\n\n options.experimental_optimization.apply_default_optimizations = False\n expected_optimizations_default = []\n graph_rewrites = options._graph_rewrites()\n self.assertEqual(set(graph_rewrites.enabled),\n set(expected_optimizations_enabled))\n self.assertEqual(set(graph_rewrites.disabled),\n set(expected_optimizations_disabled))\n self.assertEqual(set(graph_rewrites.default),\n set(expected_optimizations_default))\n\n @combinations.generate(test_base.default_test_combinations())\n def testOptimizationEnabled(self):\n \"\"\"Tests the optimization settings by enabling all.\"\"\"\n options = dataset_ops.Options()\n options.experimental_optimization.filter_fusion = True\n options.experimental_optimization.filter_with_random_uniform_fusion = True\n options.experimental_optimization.hoist_random_uniform = True\n options.experimental_optimization.map_and_batch_fusion = True\n options.experimental_optimization.map_and_filter_fusion = True\n options.experimental_optimization.map_parallelization = True\n options.experimental_optimization.map_fusion = True\n options.experimental_optimization.noop_elimination = True\n options.experimental_optimization.parallel_batch = True\n options.experimental_optimization.shuffle_and_repeat_fusion = True\n options.experimental_optimization.map_vectorization.enabled = True\n options.experimental_optimization.autotune_buffers = True\n options.experimental_deterministic = False\n options.experimental_stats.latency_all_edges = True\n options.experimental_slack = True\n\n expected_optimizations_enabled = [\n \"filter_fusion\",\n \"filter_with_random_uniform_fusion\",\n \"hoist_random_uniform\",\n \"map_and_batch_fusion\",\n \"map_and_filter_fusion\",\n \"map_parallelization\",\n \"map_fusion\",\n \"noop_elimination\",\n \"parallel_batch\",\n \"shuffle_and_repeat_fusion\",\n \"map_vectorization\",\n \"autotune_buffer_sizes\",\n \"make_sloppy\",\n \"latency_all_edges\",\n \"slack\",\n \"disable_prefetch_legacy_autotune\",\n ]\n expected_optimizations_disabled = []\n expected_optimizations_default = []\n graph_rewrites = options._graph_rewrites()\n self.assertEqual(set(graph_rewrites.enabled),\n set(expected_optimizations_enabled))\n self.assertEqual(set(graph_rewrites.disabled),\n set(expected_optimizations_disabled))\n self.assertEqual(set(graph_rewrites.default),\n set(expected_optimizations_default))\n\n @combinations.generate(test_base.default_test_combinations())\n def testOptimizationDisabled(self):\n \"\"\"Tests the optimization settings by disabling all.\"\"\"\n options = dataset_ops.Options()\n options.experimental_optimization.filter_fusion = False\n options.experimental_optimization.filter_with_random_uniform_fusion = False\n options.experimental_optimization.hoist_random_uniform = False\n options.experimental_optimization.map_and_batch_fusion = False\n options.experimental_optimization.map_and_filter_fusion = False\n options.experimental_optimization.map_parallelization = False\n options.experimental_optimization.map_fusion = False\n options.experimental_optimization.noop_elimination = False\n options.experimental_optimization.parallel_batch = False\n options.experimental_optimization.shuffle_and_repeat_fusion = False\n options.experimental_optimization.map_vectorization.enabled = False\n options.experimental_optimization.autotune = False\n options.experimental_deterministic = True\n options.experimental_stats.latency_all_edges = False\n options.experimental_slack = False\n\n expected_optimizations_enabled = []\n expected_optimizations_disabled = [\n \"filter_fusion\",\n \"filter_with_random_uniform_fusion\",\n \"hoist_random_uniform\",\n \"map_and_batch_fusion\",\n \"map_and_filter_fusion\",\n \"map_parallelization\",\n \"map_fusion\",\n \"noop_elimination\",\n \"parallel_batch\",\n \"shuffle_and_repeat_fusion\",\n \"map_vectorization\",\n \"autotune_buffer_sizes\",\n \"make_sloppy\",\n \"latency_all_edges\",\n \"slack\",\n \"disable_prefetch_legacy_autotune\",\n ]\n expected_optimizations_default = []\n graph_rewrites = options._graph_rewrites()\n self.assertEqual(set(graph_rewrites.enabled),\n set(expected_optimizations_enabled))\n self.assertEqual(set(graph_rewrites.disabled),\n set(expected_optimizations_disabled))\n self.assertEqual(set(graph_rewrites.default),\n set(expected_optimizations_default))\n\n @combinations.generate(\n combinations.times(\n test_base.default_test_combinations(),\n combinations.combine(autotune=[True, False, None]),\n combinations.combine(autotune_buffers=[True, False, None])))\n def testAutotuningSettings(self, autotune, autotune_buffers):\n options = dataset_ops.Options()\n if autotune is not None:\n options.experimental_optimization.autotune = autotune\n if autotune_buffers is not None:\n options.experimental_optimization.autotune_buffers = autotune_buffers\n\n # Check defaults\n autotune_settings = options._autotune_settings()\n autotune_val = autotune_settings[0]\n autotune_buffers_val = options.experimental_optimization._autotune_buffers()\n\n if autotune is not False: # pylint: disable=g-bool-id-comparison\n self.assertTrue(autotune_val)\n else:\n self.assertFalse(autotune_val)\n if autotune_buffers is True: # pylint: disable=g-bool-id-comparison\n self.assertTrue(autotune_buffers_val)\n else:\n self.assertFalse(autotune_buffers_val)\n\n @combinations.generate(\n combinations.times(\n test_base.default_test_combinations(),\n combinations.combine(autotune_buffers=[True, False, None])))\n def testAutotuneBuffersSettings(self, autotune_buffers):\n options = dataset_ops.Options()\n if autotune_buffers is not None:\n options.experimental_optimization.autotune_buffers = autotune_buffers\n\n graph_rewrites = options._graph_rewrites()\n autotune_settings = options._autotune_settings()\n algorithm = autotune_settings[1]\n\n if autotune_buffers is True: # pylint: disable=g-bool-id-comparison\n self.assertIn(\"autotune_buffer_sizes\", graph_rewrites.enabled)\n self.assertIn(\"disable_prefetch_legacy_autotune\", graph_rewrites.enabled)\n self.assertEqual(algorithm,\n optimization_options._AutotuneAlgorithm.GRADIENT_DESCENT)\n else:\n self.assertNotIn(\"autotune_buffer_sizes\", graph_rewrites.enabled)\n self.assertNotIn(\"disable_prefetch_legacy_autotune\",\n graph_rewrites.enabled)\n self.assertEqual(algorithm,\n optimization_options._AutotuneAlgorithm.HILL_CLIMB)\n\n @combinations.generate(\n combinations.times(\n test_base.default_test_combinations(),\n combinations.combine(set_budget=[True, False]),\n ))\n def testResourceBudgets(self, set_budget):\n options = dataset_ops.Options()\n if set_budget:\n options.experimental_optimization.autotune_cpu_budget = 1000\n options.experimental_optimization.autotune_ram_budget = 999999999\n\n autotune_settings = options._autotune_settings()\n cpu_budget = autotune_settings[2]\n ram_budget = autotune_settings[3]\n\n if set_budget:\n self.assertEqual(cpu_budget, 1000)\n self.assertEqual(ram_budget, 999999999)\n else:\n self.assertEqual(cpu_budget, 0)\n self.assertEqual(ram_budget, 0)\n\n\nif __name__ == \"__main__\":\n test.main()\n", "# Lint as: python3\n# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for parameter_server_strategy_v2.py.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport contextlib\nimport functools\nimport os\n\nfrom absl.testing import parameterized\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.distribute import multi_worker_test_base\nfrom tensorflow.python.distribute import parameter_server_strategy_v2\nfrom tensorflow.python.distribute import sharded_variable\nfrom tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.eager import test\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import init_ops_v2\nfrom tensorflow.python.ops import linalg_ops_impl\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.training.server_lib import ClusterSpec\nfrom tensorflow.python.training.tracking import tracking\nfrom tensorflow.python.training.tracking import util as tracking_util\n\n\nclass ParameterServerStrategyV2Test(test.TestCase):\n\n @classmethod\n def setUpClass(cls):\n super(ParameterServerStrategyV2Test, cls).setUpClass()\n cluster_def = multi_worker_test_base.create_in_process_cluster(\n num_workers=2, num_ps=3)\n cls.cluster_resolver = SimpleClusterResolver(ClusterSpec(cluster_def))\n\n def tearDown(self):\n super().tearDown()\n # reset context to disconnect from the cluster.\n context._reset_context()\n\n def testVariablePlacement(self):\n\n strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(\n self.cluster_resolver)\n v1 = variables.Variable(initial_value=0.0)\n with strategy.scope():\n v2 = variables.Variable(initial_value=1.0)\n v3 = variables.Variable(initial_value=2.0)\n v4 = variables.Variable(initial_value=3.0)\n v5 = variables.Variable(initial_value=4.0)\n # v1 was created outside scope so should be on client.\n self.assertEqual(v1.device, \"/job:chief/replica:0/task:0/device:CPU:0\")\n # v2 through v5 are created in scope and in a round-robin manner.\n self.assertEqual(v2.device, \"/job:ps/replica:0/task:0/device:CPU:0\")\n self.assertEqual(v3.device, \"/job:ps/replica:0/task:1/device:CPU:0\")\n self.assertEqual(v4.device, \"/job:ps/replica:0/task:2/device:CPU:0\")\n self.assertEqual(v5.device, \"/job:ps/replica:0/task:0/device:CPU:0\")\n\n @contextlib.contextmanager\n def _assertRaisesUsageError(self):\n with self.assertRaisesRegexp(\n NotImplementedError,\n \"`tf.distribute.experimental.ParameterServerStrategy` must be used \"\n \"with `tf.distribute.experimental.coordinator.ClusterCoordinator`.\"):\n yield\n\n @contextlib.contextmanager\n def _assertRaisesUsageErrorWithSchedule(self):\n with self.assertRaisesRegexp(\n NotImplementedError,\n \"`tf.distribute.experimental.ParameterServerStrategy`'s `run` or \"\n \"`reduce` must be used within a function passed to `\"\n \"tf.distribute.experimental.coordinator.ClusterCoordinator.schedule`.\"):\n yield\n\n def testRunNotUsedWithClusterCoordinator(self):\n strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(\n self.cluster_resolver)\n dataset = dataset_ops.DatasetV2.range(3)\n with strategy.scope():\n v = variables.Variable(1, dtype=dtypes.int64)\n\n def step_fn(iterator):\n return next(iterator) + v\n\n with self._assertRaisesUsageErrorWithSchedule():\n strategy.run(step_fn, args=(iter(dataset),))\n\n def testReduceNotUsedWithClusterCoordinator(self):\n strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(\n self.cluster_resolver)\n with self._assertRaisesUsageErrorWithSchedule():\n strategy.reduce(\"SUM\", None, axis=None)\n\n def testDistributeDatasetNotUsedWithClusterCoordinator(self):\n strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(\n self.cluster_resolver)\n dataset = dataset_ops.DatasetV2.range(3)\n with self._assertRaisesUsageError():\n def_function.function(\n lambda: strategy.experimental_distribute_dataset(dataset))()\n\n def testDistributeDatasetFromFunctionNotUsedWithClusterCoordinator(self):\n strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(\n self.cluster_resolver)\n\n def dataset_fn(_):\n return dataset_ops.DatasetV2.range(3)\n\n with self._assertRaisesUsageError():\n def_function.function(\n lambda: strategy.distribute_datasets_from_function(dataset_fn))()\n\n\nclass PartitionAwareIdentity(object):\n\n def __call__(self, shape, dtype, **kwargs):\n value = linalg_ops_impl.eye(*shape, dtype=dtype)\n if \"partition_shape\" in kwargs and \"partition_offset\" in kwargs:\n return array_ops.slice(value, kwargs[\"partition_offset\"],\n kwargs[\"partition_shape\"])\n raise AssertionError(\"PartitionAwareIdentity do not support \"\n \"non-partitioned initialization\")\n\n\nclass VariablePartitioningTest(test.TestCase, parameterized.TestCase):\n\n @classmethod\n def setUpClass(cls):\n super(VariablePartitioningTest, cls).setUpClass()\n cluster_def = multi_worker_test_base.create_in_process_cluster(\n num_workers=2, num_ps=2)\n cls.cluster_resolver = SimpleClusterResolver(ClusterSpec(cluster_def))\n\n def tearDown(self):\n super().tearDown()\n # reset context to disconnect from the cluster.\n context._reset_context()\n\n def testDefaultNoPartition(self):\n strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(\n self.cluster_resolver)\n with strategy.scope():\n v = variables.Variable([0, 1, 2, 3])\n\n self.assertIsInstance(v, variables.Variable)\n\n def testBasic(self):\n strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(\n self.cluster_resolver, sharded_variable.FixedShardsPartitioner(2))\n with strategy.scope():\n init1 = init_ops_v2.Constant([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n v1 = variables.Variable(\n initial_value=lambda: init1(shape=(5, 2), dtype=dtypes.int64),\n shape=(5, 2),\n dtype=dtypes.int64)\n\n init2 = init_ops_v2.Constant([0, 1, 2, 3, 4, 5])\n v2 = variables.Variable(\n initial_value=lambda: init2(shape=(6, 1), dtype=dtypes.int64),\n shape=(6, 1),\n dtype=dtypes.int64)\n\n self.assertIsInstance(v1, sharded_variable.ShardedVariable)\n self.assertLen(v1.variables, 2)\n self.assertRegex(v1.variables[0].device, \"/job:ps/replica:0/task:0\")\n self.assertRegex(v1.variables[1].device, \"/job:ps/replica:0/task:1\")\n self.assertAllEqual(v1.variables[0], [[0, 1], [2, 3], [4, 5]])\n self.assertAllEqual(v1.variables[1], [[6, 7], [8, 9]])\n\n self.assertIsInstance(v2, sharded_variable.ShardedVariable)\n self.assertLen(v2.variables, 2)\n self.assertRegex(v2.variables[0].device, \"/job:ps/replica:0/task:0\")\n self.assertRegex(v2.variables[1].device, \"/job:ps/replica:0/task:1\")\n self.assertAllEqual(v2.variables[0], [[0], [1], [2]])\n self.assertAllEqual(v2.variables[1], [[3], [4], [5]])\n\n def testNonCallableInitialValue(self):\n strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(\n self.cluster_resolver, sharded_variable.FixedShardsPartitioner(4))\n with strategy.scope():\n v = variables.Variable([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n\n self.assertIsInstance(v, sharded_variable.ShardedVariable)\n self.assertLen(v.variables, 4)\n self.assertRegex(v.variables[0].device, \"/job:ps/replica:0/task:0\")\n self.assertRegex(v.variables[1].device, \"/job:ps/replica:0/task:1\")\n self.assertRegex(v.variables[2].device, \"/job:ps/replica:0/task:0\")\n self.assertRegex(v.variables[3].device, \"/job:ps/replica:0/task:1\")\n self.assertAllEqual(v.variables[0], [0, 1, 2])\n self.assertAllEqual(v.variables[1], [3, 4, 5])\n self.assertAllEqual(v.variables[2], [6, 7])\n self.assertAllEqual(v.variables[3], [8, 9])\n\n def testNumPartitionsLargerThanSize(self):\n strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(\n self.cluster_resolver, sharded_variable.FixedShardsPartitioner(4))\n with strategy.scope():\n v = variables.Variable([0, 1, 2])\n\n self.assertIsInstance(v, sharded_variable.ShardedVariable)\n self.assertLen(v.variables, 3)\n self.assertRegex(v.variables[0].device, \"/job:ps/replica:0/task:0\")\n self.assertRegex(v.variables[1].device, \"/job:ps/replica:0/task:1\")\n self.assertRegex(v.variables[2].device, \"/job:ps/replica:0/task:0\")\n self.assertAllEqual(v.variables[0], [0])\n self.assertAllEqual(v.variables[1], [1])\n self.assertAllEqual(v.variables[2], [2])\n\n def testPartitionToOne(self):\n # For small variables there is only one partition.\n variable_partitioner = sharded_variable.MinSizePartitioner(\n min_shard_bytes=64 << 20, max_shards=2)\n strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(\n self.cluster_resolver, variable_partitioner)\n with strategy.scope():\n initializer = init_ops_v2.Constant([0] * 10)\n v1 = variables.Variable(\n initial_value=lambda: initializer(shape=(10,), dtype=dtypes.int64),\n shape=(10,),\n dtype=dtypes.int64)\n\n v2 = variables.Variable([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n\n self.assertIsInstance(v1, variables.Variable)\n self.assertNotIsInstance(v1, sharded_variable.ShardedVariable)\n self.assertRegex(v1.device, \"/job:ps/replica:0/task:0\")\n self.assertAllEqual(v1, [0] * 10)\n\n self.assertIsInstance(v2, variables.Variable)\n self.assertNotIsInstance(v2, sharded_variable.ShardedVariable)\n self.assertRegex(v2.device, \"/job:ps/replica:0/task:1\")\n self.assertAllEqual(v2, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n\n def testColocateWith(self):\n strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(\n self.cluster_resolver, sharded_variable.FixedShardsPartitioner(2))\n with strategy.scope():\n v1 = variables.Variable([0, 1, 2, 3])\n\n with strategy.extended.colocate_vars_with(v1.variables[0]):\n v2 = variables.Variable([4, 5])\n\n self.assertIsInstance(v1, sharded_variable.ShardedVariable)\n\n self.assertIsInstance(v2, variables.Variable)\n self.assertNotIsInstance(v2, sharded_variable.ShardedVariable)\n self.assertEqual(v2.device, v1.variables[0].device)\n self.assertAllEqual(v2, [4, 5])\n\n def testCustomPartitionAwareInitializer(self):\n strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(\n self.cluster_resolver, sharded_variable.FixedShardsPartitioner(2))\n with strategy.scope():\n initializer = PartitionAwareIdentity()\n initial_value = functools.partial(\n initializer, shape=(4, 4), dtype=dtypes.int64)\n v = variables.Variable(\n initial_value=initial_value, shape=(4, 4), dtype=dtypes.int64)\n\n self.assertIsInstance(v, sharded_variable.ShardedVariable)\n self.assertLen(v.variables, 2)\n self.assertRegex(v.variables[0].device, \"/job:ps/replica:0/task:0\")\n self.assertRegex(v.variables[1].device, \"/job:ps/replica:0/task:1\")\n self.assertAllEqual(v.variables[0], [[1, 0, 0, 0], [0, 1, 0, 0]])\n self.assertAllEqual(v.variables[1], [[0, 0, 1, 0], [0, 0, 0, 1]])\n\n def testPartitionWhenLackOfInfo(self):\n strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(\n self.cluster_resolver, sharded_variable.FixedShardsPartitioner(2))\n with strategy.scope():\n initializer = init_ops_v2.Constant([0, 1, 2, 3])\n # Shape is not explicitly specified.\n v1 = variables.Variable(\n initial_value=lambda: initializer(shape=(4,), dtype=dtypes.int64),\n dtype=dtypes.int64)\n # Dtype is not explicitly specified.\n v2 = variables.Variable(\n initial_value=lambda: initializer(shape=(4,), dtype=dtypes.int64),\n shape=(4,))\n # Neither shape nor dtype is explicitly specified.\n v3 = variables.Variable(\n initial_value=lambda: initializer(shape=(4,), dtype=dtypes.int64))\n\n for v in [v1, v2, v3]:\n self.assertIsInstance(v, sharded_variable.ShardedVariable)\n self.assertLen(v.variables, 2)\n self.assertRegex(v.variables[0].device, \"/job:ps/replica:0/task:0\")\n self.assertRegex(v.variables[1].device, \"/job:ps/replica:0/task:1\")\n self.assertAllEqual(v.variables[0], [0, 1])\n self.assertAllEqual(v.variables[1], [2, 3])\n\n def testInvalidPartitioner(self):\n strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(\n self.cluster_resolver, lambda shape, dtype: None)\n with self.assertRaisesRegex(ValueError, \"variable_partitioner\"):\n with strategy.scope():\n variables.Variable([[[0, 1], [2, 3]], [[0, 1], [2, 3]]])\n\n strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(\n self.cluster_resolver, lambda shape, dtype: [])\n with self.assertRaisesRegex(ValueError, \"variable_partitioner\"):\n with strategy.scope():\n variables.Variable([[[0, 1], [2, 3]], [[0, 1], [2, 3]]])\n\n strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(\n self.cluster_resolver, lambda shape, dtype: [0, 1, 1])\n with self.assertRaisesRegex(ValueError, \"variable_partitioner\"):\n with strategy.scope():\n variables.Variable([[[0, 1], [2, 3]], [[0, 1], [2, 3]]])\n\n strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(\n self.cluster_resolver, lambda shape, dtype: [2, 2, 1])\n with self.assertRaisesRegex(ValueError, \"variable_partitioner\"):\n with strategy.scope():\n variables.Variable([[[0, 1], [2, 3]], [[0, 1], [2, 3]]])\n\n def testCreateInsideTFFunction(self):\n strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(\n self.cluster_resolver, sharded_variable.FixedShardsPartitioner(2))\n\n collection = []\n\n @def_function.function\n def create_vars():\n if not collection:\n identity = init_ops_v2.Identity()\n v1 = variables.Variable([[1., 0.], [0., 1.]], dtype=dtypes.float32)\n v2 = variables.Variable(lambda: identity((2, 2), dtypes.float32))\n v3 = variables.Variable(\n lambda: identity((2, 2), dtypes.float32),\n dtype=dtypes.float32,\n shape=(2, 2))\n collection.extend([v1, v2, v3])\n\n with strategy.scope():\n create_vars()\n for v in collection:\n self.assertIsInstance(v, sharded_variable.ShardedVariable)\n self.assertLen(v.variables, 2)\n self.assertRegex(v.variables[0].device, \"/job:ps/replica:0/task:0\")\n self.assertRegex(v.variables[1].device, \"/job:ps/replica:0/task:1\")\n self.assertAllEqual(v.variables[0], [[1., 0.]])\n self.assertAllEqual(v.variables[1], [[0., 1.]])\n\n @parameterized.named_parameters(\n (\"Restore\", False, 2),\n (\"RestoreDiffShards\", False, 4),\n (\"DelayedRestore\", True, 2),\n (\"DelayedRestoreDiffShards\", True, 4),\n )\n def testCheckpoint(self, delayed, restore_shards):\n\n def make_variable(name, shape, dtype, initializer):\n initial_value = functools.partial(initializer, shape, dtype=dtype)\n return variables.Variable(\n name=name, initial_value=initial_value, shape=shape, dtype=dtype)\n\n class Model(tracking.AutoTrackable):\n\n def build(self):\n self.w = self._add_variable_with_custom_getter(\n \"w\",\n shape=(4,),\n initializer=init_ops_v2.Ones(),\n getter=make_variable)\n\n strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(\n self.cluster_resolver, sharded_variable.FixedShardsPartitioner(2))\n ckpt_dir = os.path.join(self.get_temp_dir(), \"checkpoint\")\n\n with strategy.scope():\n model1 = Model()\n model1.build()\n self.assertIsInstance(model1.w, sharded_variable.ShardedVariable)\n self.assertLen(model1.w.variables, 2)\n model1.w.assign([1., 2., 3., 4.])\n\n cp1 = tracking_util.Checkpoint(model=model1)\n cp1.write(ckpt_dir)\n\n strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(\n self.cluster_resolver,\n sharded_variable.FixedShardsPartitioner(restore_shards))\n\n with strategy.scope():\n model2 = Model()\n cp2 = tracking_util.Checkpoint(model=model2)\n if delayed:\n cp2.restore(ckpt_dir)\n model2.build()\n else:\n model2.build()\n cp2.restore(ckpt_dir)\n self.assertIsInstance(model2.w, sharded_variable.ShardedVariable)\n self.assertLen(model2.w.variables, restore_shards)\n if restore_shards == 2:\n self.assertAllEqual(model2.w.variables[0], [1., 2.])\n self.assertAllEqual(model2.w.variables[1], [3., 4.])\n elif restore_shards == 4:\n self.assertAllEqual(model2.w.variables[0], [1.])\n self.assertAllEqual(model2.w.variables[1], [2.])\n self.assertAllEqual(model2.w.variables[2], [3.])\n self.assertAllEqual(model2.w.variables[3], [4.])\n\n\nclass ClusterTypeNameTest(test.TestCase):\n\n def testArbitraryChiefName(self):\n cluster_def = multi_worker_test_base._create_cluster(\n num_workers=1,\n num_ps=1,\n has_chief=True,\n chief_name=\"some_arbitrary_name\")\n cluster_def[\"chief\"] = [\n \"localhost:%d\" % multi_worker_test_base.pick_unused_port()\n ]\n cluster_resolver = SimpleClusterResolver(\n ClusterSpec(cluster_def), rpc_layer=\"grpc\")\n with self.assertRaisesRegexp(ValueError, \"Disallowed task type found in\"):\n parameter_server_strategy_v2.ParameterServerStrategyV2(cluster_resolver)\n\n def testArbitraryWorkerName(self):\n cluster_def = multi_worker_test_base._create_cluster(\n num_workers=1, num_ps=1, worker_name=\"some_arbitrary_name\")\n cluster_def[\"chief\"] = [\n \"localhost:%d\" % multi_worker_test_base.pick_unused_port()\n ]\n cluster_resolver = SimpleClusterResolver(\n ClusterSpec(cluster_def), rpc_layer=\"grpc\")\n with self.assertRaisesRegexp(ValueError, \"Disallowed task type found in\"):\n parameter_server_strategy_v2.ParameterServerStrategyV2(cluster_resolver)\n\n def testArbitraryPsName(self):\n cluster_def = multi_worker_test_base._create_cluster(\n num_workers=1, num_ps=1, ps_name=\"some_arbitrary_name\")\n cluster_def[\"chief\"] = [\n \"localhost:%d\" % multi_worker_test_base.pick_unused_port()\n ]\n cluster_resolver = SimpleClusterResolver(\n ClusterSpec(cluster_def), rpc_layer=\"grpc\")\n with self.assertRaisesRegexp(ValueError, \"Disallowed task type found in\"):\n parameter_server_strategy_v2.ParameterServerStrategyV2(cluster_resolver)\n\n def testArbitraryCurrentTaskType(self):\n cluster_def = multi_worker_test_base._create_cluster(\n num_workers=1, num_ps=1)\n cluster_def[\"chief\"] = [\n \"localhost:%d\" % multi_worker_test_base.pick_unused_port()\n ]\n cluster_resolver = SimpleClusterResolver(\n ClusterSpec(cluster_def), rpc_layer=\"grpc\", task_type=\"foobar\")\n with self.assertRaisesRegexp(ValueError, \"Unrecognized task_type: foobar\"):\n parameter_server_strategy_v2.ParameterServerStrategyV2(cluster_resolver)\n\n def testMoreThanOneChief(self):\n cluster_def = multi_worker_test_base._create_cluster(\n num_workers=1, num_ps=1)\n chief_ports = [multi_worker_test_base.pick_unused_port() for _ in range(3)]\n cluster_def[\"chief\"] = [\"localhost:%s\" % port for port in chief_ports]\n cluster_resolver = SimpleClusterResolver(\n ClusterSpec(cluster_def),\n rpc_layer=\"grpc\",\n task_type=\"chief\",\n task_id=1)\n with self.assertRaisesRegexp(ValueError,\n \"There must be at most one 'chief' job.\"):\n parameter_server_strategy_v2.ParameterServerStrategyV2(cluster_resolver)\n\n def testLessThanOneWorker(self):\n cluster_def = multi_worker_test_base._create_cluster(\n num_workers=0, num_ps=1)\n cluster_def[\"chief\"] = [\n \"localhost:%d\" % multi_worker_test_base.pick_unused_port()\n ]\n cluster_resolver = SimpleClusterResolver(\n ClusterSpec(cluster_def), rpc_layer=\"grpc\", task_type=\"ps\", task_id=0)\n with self.assertRaisesRegexp(ValueError,\n \"There must be at least one worker.\"):\n parameter_server_strategy_v2.ParameterServerStrategyV2(cluster_resolver)\n\n def testLessThanOnePs(self):\n cluster_def = multi_worker_test_base._create_cluster(\n num_workers=1, num_ps=0)\n cluster_def[\"chief\"] = [\n \"localhost:%d\" % multi_worker_test_base.pick_unused_port()\n ]\n cluster_resolver = SimpleClusterResolver(\n ClusterSpec(cluster_def),\n rpc_layer=\"grpc\",\n task_type=\"worker\",\n task_id=0)\n with self.assertRaisesRegexp(ValueError, \"There must be at least one ps.\"):\n parameter_server_strategy_v2.ParameterServerStrategyV2(cluster_resolver)\n\n\nif __name__ == \"__main__\":\n test.main()\n" ]
[ [ "numpy.array", "tensorflow.python.util.tf_export.keras_export", "tensorflow.python.keras.backend_config.epsilon", "tensorflow.python.training.gen_training_ops.ResourceSparseApplyAdadelta", "tensorflow.python.framework.ops.convert_to_tensor_v2_with_dispatch", "tensorflow.python.training.gen_training_ops.ResourceApplyAdadelta" ], [ "tensorflow.python.training.tracking.tracking.AutoTrackable", "tensorflow.python.saved_model.function_deserialization.fix_node_def", "tensorflow.python.saved_model.loader_impl.get_asset_tensors", "tensorflow.python.framework.ops.init_scope", "tensorflow.python.util.nest.flatten", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.training.monitored_session.Scaffold.default_local_init_op", "tensorflow.python.training.saver._import_meta_graph_with_return_elements", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.saved_model.function_deserialization.load_function_def_library", "tensorflow.python.framework.ops.executing_eagerly_outside_functions", "tensorflow.python.platform.tf_logging.warning", "tensorflow.python.eager.wrap_function._get_element_from_tensor_info", "tensorflow.python.saved_model.loader_impl.get_init_op", "tensorflow.python.training.tracking.tracking.Asset", "tensorflow.python.framework.constant_op.constant", "tensorflow.python.framework.ops.uid", "tensorflow.python.framework.ops.add_to_collection", "tensorflow.python.saved_model.signature_serialization.create_signature_map" ], [ "tensorflow.python.framework.combinations.NamedObject", "tensorflow.python.data.experimental.ops.threadpool.PrivateThreadPool", "tensorflow.python.ops.variable_scope.get_variable", "tensorflow.python.data.experimental.ops.grouping.group_by_window", "tensorflow.python.data.kernel_tests.test_base.graph_only_combinations", "tensorflow.python.data.experimental.ops.grouping.Reducer", "tensorflow.python.platform.test.main", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.data.ops.dataset_ops.Dataset.from_tensors", "tensorflow.python.data.kernel_tests.test_base.default_test_combinations", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.ops.random_ops.random_uniform", "tensorflow.python.data.ops.dataset_ops.make_initializable_iterator", "tensorflow.python.framework.ops.Graph", "tensorflow.python.data.ops.dataset_ops.Options", "tensorflow.python.data.ops.dataset_ops._OptimizeDataset", "tensorflow.python.framework.combinations.combine", "tensorflow.python.data.experimental.ops.testing.assert_next", "tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices", "tensorflow.python.data.experimental.ops.grouping.group_by_reducer", "numpy.ones", "tensorflow.python.data.ops.dataset_ops.Dataset.range", "tensorflow.python.data.experimental.ops.batching.map_and_batch", "tensorflow.python.data.experimental.ops.scan_ops.scan" ], [ "tensorflow.python.ops.variables.Variable", "tensorflow.python.ops.init_ops_v2.Identity", "tensorflow.python.eager.context._reset_context", "tensorflow.python.training.tracking.util.Checkpoint", "tensorflow.python.ops.linalg_ops_impl.eye", "tensorflow.python.distribute.multi_worker_test_base.pick_unused_port", "tensorflow.python.distribute.multi_worker_test_base.create_in_process_cluster", "tensorflow.python.ops.init_ops_v2.Ones", "tensorflow.python.distribute.sharded_variable.FixedShardsPartitioner", "tensorflow.python.distribute.parameter_server_strategy_v2.ParameterServerStrategyV2", "tensorflow.python.distribute.sharded_variable.MinSizePartitioner", "tensorflow.python.ops.init_ops_v2.Constant", "tensorflow.python.eager.test.main", "tensorflow.python.data.ops.dataset_ops.DatasetV2.range", "tensorflow.python.distribute.multi_worker_test_base._create_cluster", "tensorflow.python.training.server_lib.ClusterSpec", "tensorflow.python.ops.array_ops.slice" ] ]
ruaruaruabick/waveglow
[ "636d2ba2bda4f4efd5f13f8e46aef23d8b7881bd" ]
[ "train.py" ]
[ "# -*- coding: utf-8 -*\n# *****************************************************************************\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the NVIDIA CORPORATION nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n# *****************************************************************************\nimport copy\nimport argparse\nimport json\nimport os\nimport torch\nimport numpy as np\n#=====START: ADDED FOR DISTRIBUTED======\nfrom distributed import init_distributed, apply_gradient_allreduce, reduce_tensor\nfrom torch.utils.data.distributed import DistributedSampler\n#=====END: ADDED FOR DISTRIBUTED======\n\nfrom torch.utils.data import DataLoader\nfrom glow import WaveGlow, WaveGlowLoss\nfrom mel2samp import Mel2Samp\n\ndef load_checkpoint(checkpoint_path, model, optimizer):\n assert os.path.isfile(checkpoint_path)\n checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')\n iteration = checkpoint_dict['iteration']\n optimizer.load_state_dict(checkpoint_dict['optimizer'])\n model_for_loading = checkpoint_dict['model']\n model.load_state_dict(model_for_loading.state_dict())\n print(\"Loaded checkpoint '{}' (iteration {})\" .format(\n checkpoint_path, iteration))\n return model, optimizer, iteration\n\ndef save_checkpoint(model, optimizer, schedular,learning_rate, iteration, filepath):\n print(\"Saving model and optimizer state at iteration {} to {}\".format(\n iteration, filepath))\n model_for_saving = WaveGlow(**waveglow_config).cuda()\n model_for_saving.load_state_dict(model.state_dict())\n torch.save({'model': model_for_saving,\n 'iteration': iteration,\n 'optimizer': optimizer.state_dict(),\n 'learning_rate': learning_rate,\n 'schedular':schedular\n }, filepath)\ndef validate(model,criterion,valset,epoch,batch_size,n_gpus,rank,output_directory,logger):\n model.eval()\n with torch.no_grad():\n test_sampler = DistributedSampler(valset) if n_gpus > 1 else None\n test_loader = DataLoader(valset, num_workers=1, shuffle=False,\n sampler=test_sampler,\n batch_size=batch_size,\n pin_memory=False,\n drop_last=True)\n val_loss =[]\n for i,batch in enumerate(test_loader):\n model.zero_grad()\n #mel=batch*80*63,batch*16000\n mel, audio = batch\n #封装数据\n mel = torch.autograd.Variable(mel.cuda())\n audio = torch.autograd.Variable(audio.cuda())\n outputs = model((mel, audio))\n #计算loss\n loss = criterion(outputs)\n if num_gpus > 1:\n reduced_loss = reduce_tensor(loss.data, num_gpus).item()\n else:\n reduced_loss = loss.item()\n val_loss.append(reduced_loss)\n logger.add_scalar('test_loss', np.mean(val_loss), epoch)\n\ndef train(num_gpus, rank, group_name,tnum, output_directory, epochs, learning_rate,\n sigma, iters_per_checkpoint, batch_size, seed, fp16_run,\n checkpoint_path, with_tensorboard):\n #设定随机数以便复现\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n #=====START: ADDED FOR DISTRIBUTED======\n if num_gpus > 1:\n init_distributed(rank, num_gpus, group_name, **dist_config)\n #=====END: ADDED FOR DISTRIBUTED======\n #计算Loss\n criterion = WaveGlowLoss(sigma)\n #构建waveglow模型\n model = WaveGlow(**waveglow_config).cuda()\n pytorch_total_params = sum(p.numel() for p in model.parameters())\n pytorch_total_params_train = sum(p.numel() for p in model.parameters() if p.requires_grad)\n print(\"param\", pytorch_total_params)\n print(\"param trainable\", pytorch_total_params_train)\n #=====START: ADDED FOR DISTRIBUTED======\n if num_gpus > 1:\n model = apply_gradient_allreduce(model)\n #=====END: ADDED FOR DISTRIBUTED======\n #优化器\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n #apex加速\n if fp16_run:\n from apex import amp\n model, optimizer = amp.initialize(model, optimizer, opt_level='O1')\n\n # Load checkpoint if one exists\n iteration = 0\n if checkpoint_path != \"\":\n checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')\n #iteration = checkpoint_dict['iteration']\n #optimizer.load_state_dict(checkpoint_dict['optimizer'])\n model_for_loading = checkpoint_dict['model']\n model.load_state_dict(model_for_loading.state_dict())\n print(\"Loaded checkpoint '{}' (iteration {})\".format(\n checkpoint_path, iteration))\n #model, optimizer, iteration = load_checkpoint(checkpoint_path, model,\n # optimizer)\n iteration += 1 # next iteration is iteration + 1\n temp_config = copy.deepcopy(data_config)\n temp_config['training_files'] = data_config['training_files'].replace('1',str(tnum))\n trainset = Mel2Samp(**data_config)\n testconfig = copy.deepcopy(data_config)\n testconfig[\"training_files\"] = \"traintestset_eng/test_files_eng.txt\"\n testset = Mel2Samp(**testconfig)\n # =====START: ADDED FOR DISTRIBUTED======\n train_sampler = DistributedSampler(trainset) if num_gpus > 1 else None\n # =====END: ADDED FOR DISTRIBUTED======\n train_loader = DataLoader(trainset, num_workers=1, shuffle=False,\n sampler=train_sampler,\n batch_size=batch_size,\n pin_memory=False,\n drop_last=True)\n\n # Get shared output_directory ready\n if rank == 0:\n if not os.path.isdir(output_directory):\n os.makedirs(output_directory)\n os.chmod(output_directory, 0o775)\n print(\"output directory\", output_directory)\n #用不到\n if with_tensorboard and rank == 0:\n from tensorboardX import SummaryWriter\n logger = SummaryWriter(os.path.join(output_directory, 'logs'))\n\n model.train()\n epoch_offset = max(0, int(iteration / len(train_loader)))\n # for param_group in optimizer.param_groups:\n # param_group['lr'] = 5e-5\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer,step_size=200,gamma=0.25)\n # ================ MAIN TRAINNIG LOOP! ===================\n for epoch in range(epoch_offset, epochs):\n print(\"Epoch: {}\".format(epoch))\n for i, batch in enumerate(train_loader):\n #梯度置0,z符合高斯0分布\n model.zero_grad()\n #mel=batch*80*63,batch*16000\n mel, audio = batch\n #封装数据\n mel = torch.autograd.Variable(mel.cuda())\n audio = torch.autograd.Variable(audio.cuda())\n outputs = model((mel, audio))\n #计算loss\n loss = criterion(outputs)\n if num_gpus > 1:\n reduced_loss = reduce_tensor(loss.data, num_gpus).item()\n else:\n reduced_loss = loss.item()\n #apex加速还原\n if fp16_run:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n optimizer.step()\n \n if not reduced_loss < 0:\n print(\"no\")\n print(\"{}:\\t{:.9f}\".format(iteration, reduced_loss))\n if with_tensorboard and rank == 0:\n logger.add_scalar('training_loss', reduced_loss, i + len(train_loader) * epoch)\n\n if (iteration % iters_per_checkpoint == 0):\n if rank == 0:\n checkpoint_path = \"{}/waveglow_{}\".format(\n output_directory, iteration)\n save_checkpoint(model, optimizer, scheduler,learning_rate, iteration,\n checkpoint_path)\n\n iteration += 1\n # num_p = 0\n # for param in model.parameters():\n # num_p += param.numel()\n # print(num_p)\n #scheduler.step()\n # validate\n if rank == 0:\n validate(model,criterion,testset,epoch,batch_size,num_gpus,rank,output_directory,logger)\n model.train()\n checkpoint_path = \"{}/test{}_eng_model\".format(\n output_directory, tnum)\n save_checkpoint(model, optimizer, scheduler, learning_rate, iteration,\n checkpoint_path)\nif __name__ == \"__main__\":\n #解析参数\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--config', type=str,\n help='JSON file for configuration')\n parser.add_argument('-r', '--rank', type=int, default=0,\n help='rank of process for distributed')\n parser.add_argument('-g', '--group_name', type=str, default='',\n help='name of group for distributed')\n args = parser.parse_args()\n\n # Parse configs. Globals nicer in this case\n with open(args.config) as f:\n data = f.read()\n config = json.loads(data)\n train_config = config[\"train_config\"]\n global data_config\n data_config = config[\"data_config\"]\n global dist_config\n dist_config = config[\"dist_config\"]\n global waveglow_config\n waveglow_config = config[\"waveglow_config\"]\n\n num_gpus = torch.cuda.device_count()\n if num_gpus > 1:\n if args.group_name == '':\n print(\"WARNING: Multiple GPUs detected but no distributed group set\")\n print(\"Only running 1 GPU. Use distributed.py for multiple GPUs\")\n num_gpus = 1\n\n if num_gpus == 1 and args.rank != 0:\n raise Exception(\"Doing single GPU training on rank > 0\")\n #自动使用高效算法\n torch.backends.cudnn.enabled = True\n torch.backends.cudnn.benchmark = False\n for i in range(1,2):\n tnum=i\n train(num_gpus, args.rank, args.group_name,tnum, **train_config)\n" ]
[ [ "torch.cuda.manual_seed", "torch.optim.lr_scheduler.StepLR", "torch.no_grad", "numpy.mean", "torch.cuda.device_count", "torch.manual_seed", "torch.utils.data.DataLoader", "torch.load", "torch.utils.data.distributed.DistributedSampler" ] ]
HanxunH/MDAttack
[ "fd4107c857f11385685b6daf0de7a455749528d5" ]
[ "defense/Overfitting.py" ]
[ "'''\nBased on code from https://github.com/locuslab/robust_overfitting\n'''\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport os\nimport torchvision.transforms as tf\nfrom models.wideresnet import WideResNet\nfrom . import utils\n\nif torch.cuda.is_available():\n device = torch.device('cuda')\nelse:\n device = torch.device('cpu')\n\n\ndef _cifar_meanstd_normalize(x):\n f = tf.Normalize([0.5071, 0.4865, 0.4409],\n [0.2673, 0.2564, 0.2762])\n x = f(x)\n return x\n\n\nclass DefenseOverfitting(torch.nn.Module):\n file_id = '12jPK-KXQc7-AF3w1zCbQxhBQAxm4KHzZ'\n destination = 'checkpoints/Overfitting/'\n\n def __init__(self):\n super(DefenseOverfitting, self).__init__()\n # Download pretrained weights\n filename = os.path.join(\n self.destination, 'cifar10_wide10_linf_eps8.pth')\n if not os.path.exists(filename):\n if not os.path.exists(self.destination):\n os.makedirs(self.destination, exist_ok=True)\n utils.download_file_from_google_drive(self.file_id, filename)\n checkpoint = torch.load(filename)\n\n def strip_data_parallel(s):\n if s.startswith('module'):\n return s[len('module.'):]\n else:\n return s\n checkpoint = {strip_data_parallel(k): v for k, v in checkpoint.items()}\n\n # Load Weights\n self.base_model = WideResNet(depth=34, widen_factor=10, num_classes=10)\n self.base_model = self.base_model.to(device)\n self.base_model.load_state_dict(checkpoint, strict=False)\n self.base_model.eval()\n\n def forward(self, x):\n x = _cifar_meanstd_normalize(x)\n return self.base_model(x)\n" ]
[ [ "torch.device", "torch.cuda.is_available", "torch.load" ] ]
vishalbelsare/zvt
[ "d55051147274c0a4157f08ec60908c781a323c8f", "d55051147274c0a4157f08ec60908c781a323c8f" ]
[ "src/zvt/recorders/joinquant/misc/jq_hk_holder_recorder.py", "src/zvt/contract/api.py" ]
[ "import pandas as pd\nfrom jqdatapy.api import run_query\n\nfrom zvt.contract.api import df_to_db, get_data\nfrom zvt.contract.recorder import TimestampsDataRecorder\nfrom zvt.domain import Index\nfrom zvt.domain.misc.holder import HkHolder\nfrom zvt.recorders.joinquant.common import to_entity_id\nfrom zvt.utils.pd_utils import pd_is_not_null\nfrom zvt.utils.time_utils import to_time_str, TIME_FORMAT_DAY, to_pd_timestamp\n\n\n# 这里选择继承TimestampsDataRecorder是因为\n# 1)时间上就是交易日的列表,这个是可知的,可以以此为增量计算点\n# 2)HkHolder数据结构的设计:\n# 沪股通/深股通 每日 持有 标的(股票)的情况\n# 抓取的角度是entity从Index中获取 沪股通/深股通,然后按 每日 去获取\n\n\nclass JoinquantHkHolderRecorder(TimestampsDataRecorder):\n entity_provider = \"exchange\"\n entity_schema = Index\n\n provider = \"joinquant\"\n data_schema = HkHolder\n\n def __init__(\n self,\n day_data=False,\n force_update=False,\n sleeping_time=5,\n real_time=False,\n start_timestamp=None,\n end_timestamp=None,\n ) -> None:\n # 聚宽编码\n # 市场通编码\t市场通名称\n # 310001\t沪股通\n # 310002\t深股通\n # 310003\t港股通(沪)\n # 310004\t港股通(深)\n codes = [\"310001\", \"310002\"]\n\n super().__init__(\n force_update,\n sleeping_time,\n [\"cn\"],\n None,\n codes,\n day_data,\n real_time=real_time,\n fix_duplicate_way=\"ignore\",\n start_timestamp=start_timestamp,\n end_timestamp=end_timestamp,\n )\n\n def init_timestamps(self, entity):\n # 聚宽数据从2017年3月17开始\n return pd.date_range(start=to_pd_timestamp(\"2017-3-17\"), end=pd.Timestamp.now(), freq=\"B\").tolist()\n\n # 覆盖这个方式是因为,HkHolder里面entity其实是股票,而recorder中entity是 Index类型(沪股通/深股通)\n def get_latest_saved_record(self, entity):\n order = eval(\"self.data_schema.{}.desc()\".format(self.get_evaluated_time_field()))\n\n records = get_data(\n filters=[HkHolder.holder_code == entity.code],\n provider=self.provider,\n data_schema=self.data_schema,\n order=order,\n limit=1,\n return_type=\"domain\",\n session=self.session,\n )\n if records:\n return records[0]\n return None\n\n def record(self, entity, start, end, size, timestamps):\n for timestamp in timestamps:\n df = run_query(\n table=\"finance.STK_HK_HOLD_INFO\", conditions=f\"link_id#=#{entity.code}&day#=#{to_time_str(timestamp)}\"\n )\n print(df)\n\n if pd_is_not_null(df):\n df.rename(\n columns={\"day\": \"timestamp\", \"link_id\": \"holder_code\", \"link_name\": \"holder_name\"}, inplace=True\n )\n df[\"timestamp\"] = pd.to_datetime(df[\"timestamp\"])\n\n df[\"entity_id\"] = df[\"code\"].apply(lambda x: to_entity_id(entity_type=\"stock\", jq_code=x))\n df[\"code\"] = df[\"code\"].apply(lambda x: x.split(\".\")[0])\n\n # id格式为:{holder_name}_{entity_id}_{timestamp}\n df[\"id\"] = df[[\"holder_name\", \"entity_id\", \"timestamp\"]].apply(\n lambda se: \"{}_{}_{}\".format(\n se[\"holder_name\"], se[\"entity_id\"], to_time_str(se[\"timestamp\"], fmt=TIME_FORMAT_DAY)\n ),\n axis=1,\n )\n\n df_to_db(df=df, data_schema=self.data_schema, provider=self.provider, force_update=self.force_update)\n\n\nif __name__ == \"__main__\":\n JoinquantHkHolderRecorder(sleeping_time=10).run()\n# the __all__ is generated\n__all__ = [\"JoinquantHkHolderRecorder\"]\n", "# -*- coding: utf-8 -*-\nimport logging\nimport os\nimport platform\nfrom typing import List, Union, Type\n\nimport pandas as pd\nfrom sqlalchemy import create_engine\nfrom sqlalchemy import func, exists, and_\nfrom sqlalchemy.engine import Engine\nfrom sqlalchemy.ext.declarative import DeclarativeMeta\nfrom sqlalchemy.orm import Query\nfrom sqlalchemy.orm import sessionmaker, Session\n\nfrom zvt import zvt_env\nfrom zvt.contract import IntervalLevel\nfrom zvt.contract import zvt_context\nfrom zvt.contract.schema import Mixin, TradableEntity\nfrom zvt.utils.pd_utils import pd_is_not_null, index_df\nfrom zvt.utils.time_utils import to_pd_timestamp\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_db_name(data_schema: DeclarativeMeta) -> str:\n \"\"\"\n get db name of the domain schema\n\n :param data_schema:\n :type data_schema:\n :return:\n :rtype:\n \"\"\"\n for db_name, base in zvt_context.dbname_map_base.items():\n if issubclass(data_schema, base):\n return db_name\n\n\ndef get_db_engine(\n provider: str, db_name: str = None, data_schema: object = None, data_path: str = zvt_env[\"data_path\"]\n) -> Engine:\n \"\"\"\n get db engine of the (provider,db_name) or (provider,data_schema)\n\n\n :param data_path:\n :param provider:\n :type provider:\n :param db_name:\n :type db_name:\n :param data_schema:\n :type data_schema:\n :return:\n :rtype:\n \"\"\"\n if data_schema:\n db_name = get_db_name(data_schema=data_schema)\n\n db_path = os.path.join(data_path, \"{}_{}.db?check_same_thread=False\".format(provider, db_name))\n\n engine_key = \"{}_{}\".format(provider, db_name)\n db_engine = zvt_context.db_engine_map.get(engine_key)\n if not db_engine:\n db_engine = create_engine(\"sqlite:///\" + db_path, echo=False)\n zvt_context.db_engine_map[engine_key] = db_engine\n return db_engine\n\n\ndef get_schemas(provider: str) -> List[DeclarativeMeta]:\n \"\"\"\n get domain schemas supported by the provider\n\n :param provider:\n :type provider:\n :return:\n :rtype:\n \"\"\"\n schemas = []\n for provider1, dbs in zvt_context.provider_map_dbnames.items():\n if provider == provider1:\n for dbname in dbs:\n schemas1 = zvt_context.dbname_map_schemas.get(dbname)\n if schemas1:\n schemas += schemas1\n return schemas\n\n\ndef get_db_session(provider: str, db_name: str = None, data_schema: object = None, force_new: bool = False) -> Session:\n \"\"\"\n get db session of the (provider,db_name) or (provider,data_schema)\n\n :param provider:\n :type provider:\n :param db_name:\n :type db_name:\n :param data_schema:\n :type data_schema:\n :param force_new:\n :type force_new:\n\n :return:\n :rtype:\n \"\"\"\n if data_schema:\n db_name = get_db_name(data_schema=data_schema)\n\n session_key = \"{}_{}\".format(provider, db_name)\n\n if force_new:\n return get_db_session_factory(provider, db_name, data_schema)()\n\n session = zvt_context.sessions.get(session_key)\n if not session:\n session = get_db_session_factory(provider, db_name, data_schema)()\n zvt_context.sessions[session_key] = session\n return session\n\n\ndef get_db_session_factory(provider: str, db_name: str = None, data_schema: object = None):\n \"\"\"\n get db session factory of the (provider,db_name) or (provider,data_schema)\n\n :param provider:\n :type provider:\n :param db_name:\n :type db_name:\n :param data_schema:\n :type data_schema:\n :return:\n :rtype:\n \"\"\"\n if data_schema:\n db_name = get_db_name(data_schema=data_schema)\n\n session_key = \"{}_{}\".format(provider, db_name)\n session = zvt_context.db_session_map.get(session_key)\n if not session:\n session = sessionmaker()\n zvt_context.db_session_map[session_key] = session\n return session\n\n\ndef get_entity_schema(entity_type: str) -> Type[TradableEntity]:\n \"\"\"\n get entity schema from name\n\n :param entity_type:\n :type entity_type:\n :return:\n :rtype:\n \"\"\"\n return zvt_context.tradable_schema_map[entity_type]\n\n\ndef get_schema_by_name(name: str) -> DeclarativeMeta:\n \"\"\"\n get domain schema by the name\n\n :param name:\n :type name:\n :return:\n :rtype:\n \"\"\"\n for schema in zvt_context.schemas:\n if schema.__name__ == name:\n return schema\n\n\ndef get_schema_columns(schema: DeclarativeMeta) -> List[str]:\n \"\"\"\n get all columns of the domain schema\n\n :param schema:\n :type schema:\n :return:\n :rtype:\n \"\"\"\n return schema.__table__.columns.keys()\n\n\ndef common_filter(\n query: Query,\n data_schema,\n start_timestamp=None,\n end_timestamp=None,\n filters=None,\n order=None,\n limit=None,\n time_field=\"timestamp\",\n):\n assert data_schema is not None\n time_col = eval(\"data_schema.{}\".format(time_field))\n\n if start_timestamp:\n query = query.filter(time_col >= to_pd_timestamp(start_timestamp))\n if end_timestamp:\n query = query.filter(time_col <= to_pd_timestamp(end_timestamp))\n\n if filters:\n for filter in filters:\n query = query.filter(filter)\n if order is not None:\n query = query.order_by(order)\n else:\n query = query.order_by(time_col.asc())\n if limit:\n query = query.limit(limit)\n\n return query\n\n\ndef del_data(data_schema: Type[Mixin], filters: List = None, provider=None):\n if not provider:\n provider = data_schema.providers[0]\n\n session = get_db_session(provider=provider, data_schema=data_schema)\n query = session.query(data_schema)\n if filters:\n for f in filters:\n query = query.filter(f)\n query.delete()\n session.commit()\n\n\ndef get_one(data_schema, id: str, provider: str = None, session: Session = None):\n if \"providers\" not in data_schema.__dict__:\n logger.error(\"no provider registered for: {}\", data_schema)\n if not provider:\n provider = data_schema.providers[0]\n\n if not session:\n session = get_db_session(provider=provider, data_schema=data_schema)\n\n return session.query(data_schema).get(id)\n\n\ndef get_data(\n data_schema: Type[Mixin],\n ids: List[str] = None,\n entity_ids: List[str] = None,\n entity_id: str = None,\n codes: List[str] = None,\n code: str = None,\n level: Union[IntervalLevel, str] = None,\n provider: str = None,\n columns: List = None,\n col_label: dict = None,\n return_type: str = \"df\",\n start_timestamp: Union[pd.Timestamp, str] = None,\n end_timestamp: Union[pd.Timestamp, str] = None,\n filters: List = None,\n session: Session = None,\n order=None,\n limit: int = None,\n index: Union[str, list] = None,\n drop_index_col=False,\n time_field: str = \"timestamp\",\n):\n if \"providers\" not in data_schema.__dict__:\n logger.error(\"no provider registered for: {}\", data_schema)\n if not provider:\n provider = data_schema.providers[0]\n\n if not session:\n session = get_db_session(provider=provider, data_schema=data_schema)\n\n time_col = eval(\"data_schema.{}\".format(time_field))\n\n if columns:\n # support str\n if type(columns[0]) == str:\n columns_ = []\n for col in columns:\n if isinstance(col, str):\n columns_.append(eval(\"data_schema.{}\".format(col)))\n else:\n columns_.append(col)\n columns = columns_\n\n # make sure get timestamp\n if time_col not in columns:\n columns.append(time_col)\n\n if col_label:\n columns_ = []\n for col in columns:\n if col.name in col_label:\n columns_.append(col.label(col_label.get(col.name)))\n else:\n columns_.append(col)\n columns = columns_\n\n query = session.query(*columns)\n else:\n query = session.query(data_schema)\n\n if entity_id:\n query = query.filter(data_schema.entity_id == entity_id)\n if entity_ids:\n query = query.filter(data_schema.entity_id.in_(entity_ids))\n if code:\n query = query.filter(data_schema.code == code)\n if codes:\n query = query.filter(data_schema.code.in_(codes))\n if ids:\n query = query.filter(data_schema.id.in_(ids))\n\n # we always store different level in different schema,the level param is not useful now\n if level:\n try:\n # some schema has no level,just ignore it\n data_schema.level\n if type(level) == IntervalLevel:\n level = level.value\n query = query.filter(data_schema.level == level)\n except Exception as e:\n pass\n\n query = common_filter(\n query,\n data_schema=data_schema,\n start_timestamp=start_timestamp,\n end_timestamp=end_timestamp,\n filters=filters,\n order=order,\n limit=limit,\n time_field=time_field,\n )\n\n if return_type == \"df\":\n df = pd.read_sql(query.statement, query.session.bind)\n if pd_is_not_null(df):\n if index:\n df = index_df(df, index=index, drop=drop_index_col, time_field=time_field)\n return df\n elif return_type == \"domain\":\n return query.all()\n elif return_type == \"dict\":\n return [item.__dict__ for item in query.all()]\n\n\ndef data_exist(session, schema, id):\n return session.query(exists().where(and_(schema.id == id))).scalar()\n\n\ndef get_data_count(data_schema, filters=None, session=None):\n query = session.query(data_schema)\n if filters:\n for filter in filters:\n query = query.filter(filter)\n\n count_q = query.statement.with_only_columns([func.count(data_schema.id)]).order_by(None)\n count = session.execute(count_q).scalar()\n return count\n\n\ndef get_group(provider, data_schema, column, group_func=func.count, session=None):\n if not session:\n session = get_db_session(provider=provider, data_schema=data_schema)\n if group_func:\n query = session.query(column, group_func(column)).group_by(column)\n else:\n query = session.query(column).group_by(column)\n df = pd.read_sql(query.statement, query.session.bind)\n return df\n\n\ndef decode_entity_id(entity_id: str):\n result = entity_id.split(\"_\")\n entity_type = result[0]\n exchange = result[1]\n code = \"\".join(result[2:])\n return entity_type, exchange, code\n\n\ndef get_entity_type(entity_id: str):\n entity_type, _, _ = decode_entity_id(entity_id)\n return entity_type\n\n\ndef get_entity_exchange(entity_id: str):\n _, exchange, _ = decode_entity_id(entity_id)\n return exchange\n\n\ndef get_entity_code(entity_id: str):\n _, _, code = decode_entity_id(entity_id)\n return code\n\n\ndef df_to_db(\n df: pd.DataFrame,\n data_schema: DeclarativeMeta,\n provider: str,\n force_update: bool = False,\n sub_size: int = 5000,\n drop_duplicates: bool = True,\n) -> object:\n \"\"\"\n FIXME:improve\n store the df to db\n\n :param df:\n :param data_schema:\n :param provider:\n :param force_update:\n :param sub_size:\n :param drop_duplicates:\n :return:\n \"\"\"\n if not pd_is_not_null(df):\n return 0\n\n if drop_duplicates and df.duplicated(subset=\"id\").any():\n logger.warning(f\"remove duplicated:{df[df.duplicated()]}\")\n df = df.drop_duplicates(subset=\"id\", keep=\"last\")\n\n db_engine = get_db_engine(provider, data_schema=data_schema)\n\n schema_cols = get_schema_columns(data_schema)\n cols = set(df.columns.tolist()) & set(schema_cols)\n\n if not cols:\n print(\"wrong cols\")\n return 0\n\n df = df[cols]\n\n size = len(df)\n\n if platform.system() == \"Windows\":\n sub_size = 900\n\n if size >= sub_size:\n step_size = int(size / sub_size)\n if size % sub_size:\n step_size = step_size + 1\n else:\n step_size = 1\n\n saved = 0\n\n for step in range(step_size):\n df_current = df.iloc[sub_size * step : sub_size * (step + 1)]\n if force_update:\n session = get_db_session(provider=provider, data_schema=data_schema)\n ids = df_current[\"id\"].tolist()\n if len(ids) == 1:\n sql = f'delete from `{data_schema.__tablename__}` where id = \"{ids[0]}\"'\n else:\n sql = f\"delete from `{data_schema.__tablename__}` where id in {tuple(ids)}\"\n\n session.execute(sql)\n session.commit()\n\n else:\n current = get_data(\n data_schema=data_schema, columns=[data_schema.id], provider=provider, ids=df_current[\"id\"].tolist()\n )\n if pd_is_not_null(current):\n df_current = df_current[~df_current[\"id\"].isin(current[\"id\"])]\n\n if pd_is_not_null(df_current):\n saved = saved + len(df_current)\n df_current.to_sql(data_schema.__tablename__, db_engine, index=False, if_exists=\"append\")\n\n return saved\n\n\ndef get_entities(\n entity_schema: Type[TradableEntity] = None,\n entity_type: str = None,\n exchanges: List[str] = None,\n ids: List[str] = None,\n entity_ids: List[str] = None,\n entity_id: str = None,\n codes: List[str] = None,\n code: str = None,\n provider: str = None,\n columns: List = None,\n col_label: dict = None,\n return_type: str = \"df\",\n start_timestamp: Union[pd.Timestamp, str] = None,\n end_timestamp: Union[pd.Timestamp, str] = None,\n filters: List = None,\n session: Session = None,\n order=None,\n limit: int = None,\n index: Union[str, list] = \"code\",\n) -> List:\n if not entity_schema:\n entity_schema = zvt_context.tradable_schema_map[entity_type]\n\n if not provider:\n provider = entity_schema.providers[0]\n\n if not order:\n order = entity_schema.code.asc()\n\n if exchanges:\n if filters:\n filters.append(entity_schema.exchange.in_(exchanges))\n else:\n filters = [entity_schema.exchange.in_(exchanges)]\n\n return get_data(\n data_schema=entity_schema,\n ids=ids,\n entity_ids=entity_ids,\n entity_id=entity_id,\n codes=codes,\n code=code,\n level=None,\n provider=provider,\n columns=columns,\n col_label=col_label,\n return_type=return_type,\n start_timestamp=start_timestamp,\n end_timestamp=end_timestamp,\n filters=filters,\n session=session,\n order=order,\n limit=limit,\n index=index,\n )\n\n\ndef get_entity_ids(\n entity_type=\"stock\", entity_schema: TradableEntity = None, exchanges=None, codes=None, provider=None, filters=None\n):\n df = get_entities(\n entity_type=entity_type,\n entity_schema=entity_schema,\n exchanges=exchanges,\n codes=codes,\n provider=provider,\n filters=filters,\n )\n if pd_is_not_null(df):\n return df[\"entity_id\"].to_list()\n return None\n\n\n# the __all__ is generated\n__all__ = [\n \"get_db_name\",\n \"get_db_engine\",\n \"get_schemas\",\n \"get_db_session\",\n \"get_db_session_factory\",\n \"get_entity_schema\",\n \"get_schema_by_name\",\n \"get_schema_columns\",\n \"common_filter\",\n \"del_data\",\n \"get_one\",\n \"get_data\",\n \"data_exist\",\n \"get_data_count\",\n \"get_group\",\n \"decode_entity_id\",\n \"get_entity_type\",\n \"get_entity_exchange\",\n \"get_entity_code\",\n \"df_to_db\",\n \"get_entities\",\n \"get_entity_ids\",\n]\n" ]
[ [ "pandas.to_datetime", "pandas.Timestamp.now" ], [ "pandas.read_sql" ] ]
robbierobinette/rcv-tensorflow
[ "984852902f465bb6f61ba863e4b76092249911d0" ]
[ "BallotTest.py" ]
[ "import matplotlib.pyplot as plt\n\nfrom Ballot import Ballot\nfrom DefaultConfigOptions import *\nfrom PartyPrimaryElection import PartyPrimaryElection\n\n\ndef main():\n ideology = []\n for i in range(1000):\n print(\".\")\n if (i + 1) % 100 == 0:\n print(\"\")\n\n ideology.append(run_election())\n\n plt.hist([ideology],\n stacked=True,\n density=True,\n bins=30,\n color=[\"blue\"],\n label=[\"representatives\"],\n )\n plt.xlabel('ideology')\n plt.ylabel('count')\n plt.show()\n\n\ndef gen_candidates(population: PopulationGroup) -> List[Candidate]:\n cc = []\n for i in range(0, 3):\n v = population.partisan_sample_voter()\n cc.append(Candidate(\"%s-%d\" % (population.party.short_name, i + 1), population.party, v.ideology, 0))\n return cc\n\n\ndef run_election() -> float:\n pop = combined_population\n voters = pop.generate_voters(1000)\n candidates = gen_candidates(pop.republicans) + gen_candidates(pop.democrats)\n ballots = list(map(lambda v: Ballot(v, candidates, default_election_config), voters))\n election = PartyPrimaryElection(ballots, set(candidates), pop, default_election_config)\n return election.result().winner().ideology.vec[0]\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.hist", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
mretegan/silx
[ "2c8b05ff1c8c1fc00e3d4a08331c76ff5b44996b" ]
[ "silx/gui/plot/items/curve.py" ]
[ "# coding: utf-8\n# /*##########################################################################\n#\n# Copyright (c) 2017-2020 European Synchrotron Radiation Facility\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# ###########################################################################*/\n\"\"\"This module provides the :class:`Curve` item of the :class:`Plot`.\n\"\"\"\n\n__authors__ = [\"T. Vincent\"]\n__license__ = \"MIT\"\n__date__ = \"24/04/2018\"\n\n\nimport logging\n\nimport numpy\nimport six\n\nfrom ....utils.deprecation import deprecated\nfrom ... import colors\nfrom .core import (PointsBase, LabelsMixIn, ColorMixIn, YAxisMixIn,\n FillMixIn, LineMixIn, SymbolMixIn, ItemChangedType,\n BaselineMixIn, HighlightedMixIn, _Style)\n\n\n_logger = logging.getLogger(__name__)\n\n\nclass CurveStyle(_Style):\n \"\"\"Object storing the style of a curve.\n\n Set a value to None to use the default\n\n :param color: Color\n :param Union[str,None] linestyle: Style of the line\n :param Union[float,None] linewidth: Width of the line\n :param Union[str,None] symbol: Symbol for markers\n :param Union[float,None] symbolsize: Size of the markers\n \"\"\"\n\n def __init__(self, color=None, linestyle=None, linewidth=None,\n symbol=None, symbolsize=None):\n if color is None:\n self._color = None\n else:\n if isinstance(color, six.string_types):\n color = colors.rgba(color)\n else: # array-like expected\n color = numpy.array(color, copy=False)\n if color.ndim == 1: # Array is 1D, this is a single color\n color = colors.rgba(color)\n self._color = color\n\n if linestyle is not None:\n assert linestyle in LineMixIn.getSupportedLineStyles()\n self._linestyle = linestyle\n\n self._linewidth = None if linewidth is None else float(linewidth)\n\n if symbol is not None:\n assert symbol in SymbolMixIn.getSupportedSymbols()\n self._symbol = symbol\n\n self._symbolsize = None if symbolsize is None else float(symbolsize)\n\n def getColor(self, copy=True):\n \"\"\"Returns the color or None if not set.\n\n :param bool copy: True to get a copy (default),\n False to get internal representation (do not modify!)\n\n :rtype: Union[List[float],None]\n \"\"\"\n if isinstance(self._color, numpy.ndarray):\n return numpy.array(self._color, copy=copy)\n else:\n return self._color\n\n def getLineStyle(self):\n \"\"\"Return the type of the line or None if not set.\n\n Type of line::\n\n - ' ' no line\n - '-' solid line\n - '--' dashed line\n - '-.' dash-dot line\n - ':' dotted line\n\n :rtype: Union[str,None]\n \"\"\"\n return self._linestyle\n\n def getLineWidth(self):\n \"\"\"Return the curve line width in pixels or None if not set.\n\n :rtype: Union[float,None]\n \"\"\"\n return self._linewidth\n\n def getSymbol(self):\n \"\"\"Return the point marker type.\n\n Marker type::\n\n - 'o' circle\n - '.' point\n - ',' pixel\n - '+' cross\n - 'x' x-cross\n - 'd' diamond\n - 's' square\n\n :rtype: Union[str,None]\n \"\"\"\n return self._symbol\n\n def getSymbolSize(self):\n \"\"\"Return the point marker size in points.\n\n :rtype: Union[float,None]\n \"\"\"\n return self._symbolsize\n\n def __eq__(self, other):\n if isinstance(other, CurveStyle):\n return (numpy.array_equal(self.getColor(), other.getColor()) and\n self.getLineStyle() == other.getLineStyle() and\n self.getLineWidth() == other.getLineWidth() and\n self.getSymbol() == other.getSymbol() and\n self.getSymbolSize() == other.getSymbolSize())\n else:\n return False\n\n\nclass Curve(PointsBase, ColorMixIn, YAxisMixIn, FillMixIn, LabelsMixIn,\n LineMixIn, BaselineMixIn, HighlightedMixIn):\n \"\"\"Description of a curve\"\"\"\n\n _DEFAULT_Z_LAYER = 1\n \"\"\"Default overlay layer for curves\"\"\"\n\n _DEFAULT_SELECTABLE = True\n \"\"\"Default selectable state for curves\"\"\"\n\n _DEFAULT_LINEWIDTH = 1.\n \"\"\"Default line width of the curve\"\"\"\n\n _DEFAULT_LINESTYLE = '-'\n \"\"\"Default line style of the curve\"\"\"\n\n _DEFAULT_HIGHLIGHT_STYLE = CurveStyle(color='black')\n \"\"\"Default highlight style of the item\"\"\"\n\n _DEFAULT_BASELINE = None\n\n def __init__(self):\n PointsBase.__init__(self)\n ColorMixIn.__init__(self)\n YAxisMixIn.__init__(self)\n FillMixIn.__init__(self)\n LabelsMixIn.__init__(self)\n LineMixIn.__init__(self)\n BaselineMixIn.__init__(self)\n HighlightedMixIn.__init__(self)\n\n self._setBaseline(Curve._DEFAULT_BASELINE)\n\n self.sigItemChanged.connect(self.__itemChanged)\n\n def __itemChanged(self, event):\n if event == ItemChangedType.YAXIS:\n # TODO hackish data range implementation\n plot = self.getPlot()\n if plot is not None:\n plot._invalidateDataRange()\n\n def _addBackendRenderer(self, backend):\n \"\"\"Update backend renderer\"\"\"\n # Filter-out values <= 0\n xFiltered, yFiltered, xerror, yerror = self.getData(\n copy=False, displayed=True)\n\n if len(xFiltered) == 0 or not numpy.any(numpy.isfinite(xFiltered)):\n return None # No data to display, do not add renderer to backend\n\n style = self.getCurrentStyle()\n\n return backend.addCurve(xFiltered, yFiltered,\n color=style.getColor(),\n symbol=style.getSymbol(),\n linestyle=style.getLineStyle(),\n linewidth=style.getLineWidth(),\n yaxis=self.getYAxis(),\n xerror=xerror,\n yerror=yerror,\n fill=self.isFill(),\n alpha=self.getAlpha(),\n symbolsize=style.getSymbolSize(),\n baseline=self.getBaseline(copy=False))\n\n def __getitem__(self, item):\n \"\"\"Compatibility with PyMca and silx <= 0.4.0\"\"\"\n if isinstance(item, slice):\n return [self[index] for index in range(*item.indices(5))]\n elif item == 0:\n return self.getXData(copy=False)\n elif item == 1:\n return self.getYData(copy=False)\n elif item == 2:\n return self.getName()\n elif item == 3:\n info = self.getInfo(copy=False)\n return {} if info is None else info\n elif item == 4:\n params = {\n 'info': self.getInfo(),\n 'color': self.getColor(),\n 'symbol': self.getSymbol(),\n 'linewidth': self.getLineWidth(),\n 'linestyle': self.getLineStyle(),\n 'xlabel': self.getXLabel(),\n 'ylabel': self.getYLabel(),\n 'yaxis': self.getYAxis(),\n 'xerror': self.getXErrorData(copy=False),\n 'yerror': self.getYErrorData(copy=False),\n 'z': self.getZValue(),\n 'selectable': self.isSelectable(),\n 'fill': self.isFill(),\n }\n return params\n else:\n raise IndexError(\"Index out of range: %s\", str(item))\n\n def setVisible(self, visible):\n \"\"\"Set visibility of item.\n\n :param bool visible: True to display it, False otherwise\n \"\"\"\n visible = bool(visible)\n # TODO hackish data range implementation\n if self.isVisible() != visible:\n plot = self.getPlot()\n if plot is not None:\n plot._invalidateDataRange()\n\n super(Curve, self).setVisible(visible)\n\n @deprecated(replacement='Curve.getHighlightedStyle().getColor()',\n since_version='0.9.0')\n def getHighlightedColor(self):\n \"\"\"Returns the RGBA highlight color of the item\n\n :rtype: 4-tuple of float in [0, 1]\n \"\"\"\n return self.getHighlightedStyle().getColor()\n\n @deprecated(replacement='Curve.setHighlightedStyle()',\n since_version='0.9.0')\n def setHighlightedColor(self, color):\n \"\"\"Set the color to use when highlighted\n\n :param color: color(s) to be used for highlight\n :type color: str (\"#RRGGBB\") or (npoints, 4) unsigned byte array or\n one of the predefined color names defined in colors.py\n \"\"\"\n self.setHighlightedStyle(CurveStyle(color))\n\n def getCurrentStyle(self):\n \"\"\"Returns the current curve style.\n\n Curve style depends on curve highlighting\n\n :rtype: CurveStyle\n \"\"\"\n if self.isHighlighted():\n style = self.getHighlightedStyle()\n color = style.getColor()\n linestyle = style.getLineStyle()\n linewidth = style.getLineWidth()\n symbol = style.getSymbol()\n symbolsize = style.getSymbolSize()\n\n return CurveStyle(\n color=self.getColor() if color is None else color,\n linestyle=self.getLineStyle() if linestyle is None else linestyle,\n linewidth=self.getLineWidth() if linewidth is None else linewidth,\n symbol=self.getSymbol() if symbol is None else symbol,\n symbolsize=self.getSymbolSize() if symbolsize is None else symbolsize)\n\n else:\n return CurveStyle(color=self.getColor(),\n linestyle=self.getLineStyle(),\n linewidth=self.getLineWidth(),\n symbol=self.getSymbol(),\n symbolsize=self.getSymbolSize())\n\n @deprecated(replacement='Curve.getCurrentStyle()',\n since_version='0.9.0')\n def getCurrentColor(self):\n \"\"\"Returns the current color of the curve.\n\n This color is either the color of the curve or the highlighted color,\n depending on the highlight state.\n\n :rtype: 4-tuple of float in [0, 1]\n \"\"\"\n return self.getCurrentStyle().getColor()\n\n def setData(self, x, y, xerror=None, yerror=None, baseline=None, copy=True):\n \"\"\"Set the data of the curve.\n\n :param numpy.ndarray x: The data corresponding to the x coordinates.\n :param numpy.ndarray y: The data corresponding to the y coordinates.\n :param xerror: Values with the uncertainties on the x values\n :type xerror: A float, or a numpy.ndarray of float32.\n If it is an array, it can either be a 1D array of\n same length as the data or a 2D array with 2 rows\n of same length as the data: row 0 for positive errors,\n row 1 for negative errors.\n :param yerror: Values with the uncertainties on the y values.\n :type yerror: A float, or a numpy.ndarray of float32. See xerror.\n :param baseline: curve baseline\n :type baseline: Union[None,float,numpy.ndarray]\n :param bool copy: True make a copy of the data (default),\n False to use provided arrays.\n \"\"\"\n PointsBase.setData(self, x=x, y=y, xerror=xerror, yerror=yerror,\n copy=copy)\n self._setBaseline(baseline=baseline)\n" ]
[ [ "numpy.array", "numpy.isfinite" ] ]
nayyarv/bayesnets
[ "090abd1a0a91c2b9d6d57a182ee5be1f65a22e11" ]
[ "tests/test_metrics.py" ]
[ "import numpy as np\nfrom swarm import metrics\nimport pytest\n\n# Example y with 11 points from -1.5 to 1.5.\ny = np.array(\n [\n -0.997495,\n -0.9320391,\n -0.78332686,\n -0.5646425,\n -0.29552022,\n 0.0,\n 0.29552022,\n 0.5646425,\n 0.78332686,\n 0.9320391,\n 0.997495,\n ]\n)\n\nlosses = np.array([[0.82777214, 0.82301313], [0.35649812, 0.35499558], [0.82012618, 0.81833321]])\n\n# Example predictions for first two epochs of a swarm of three bees.\nypreds = np.array(\n [\n [\n [\n -0.75819135,\n -0.6721624,\n -0.5914593,\n -0.5263963,\n -0.4742774,\n -0.42794737,\n -0.4386463,\n -0.45942548,\n -0.5183165,\n -0.6156955,\n -0.7488868,\n ],\n [\n -0.75616974,\n -0.6701199,\n -0.5893732,\n -0.5242175,\n -0.4719131,\n -0.42543185,\n -0.43560237,\n -0.45590907,\n -0.51438874,\n -0.61130494,\n -0.74402857,\n ],\n ],\n [\n [\n -0.18297303,\n -0.21213517,\n -0.18341143,\n -0.15066521,\n -0.11950047,\n -0.09036797,\n -0.0256229,\n 0.0269562,\n 0.06986493,\n 0.1414077,\n 0.19563401,\n ],\n [\n -0.18315202,\n -0.21226275,\n -0.18336335,\n -0.15038337,\n -0.11897573,\n -0.08946133,\n -0.0242492,\n 0.02882081,\n 0.07219976,\n 0.14433557,\n 0.19909364,\n ],\n ],\n [\n [\n 0.36912787,\n 0.34506714,\n 0.32219756,\n 0.3202601,\n 0.30032292,\n 0.259299,\n 0.21430482,\n 0.14271711,\n 0.05134173,\n -0.063667,\n -0.17867568,\n ],\n [\n 0.36715215,\n 0.34335977,\n 0.32078195,\n 0.3192455,\n 0.2996201,\n 0.2587561,\n 0.21395013,\n 0.14270164,\n 0.05165949,\n -0.06302758,\n -0.1777146,\n ],\n ],\n ]\n)\n\n# An example of scores obtained for a swarm that bounce around on the way down.\nepoch_scores = [\n 0.51727545,\n 0.4584964,\n 0.3589881,\n 0.2524824,\n 0.20734829,\n 0.2482427,\n 0.30246153,\n 0.3388226,\n 0.34041768,\n 0.3064342,\n 0.26800793,\n 0.2686419,\n 0.24010916,\n 0.18522426,\n 0.22644123,\n 0.26727045,\n 0.28942722,\n 0.28332102,\n 0.25410518,\n 0.22259913,\n 0.25512502,\n 0.28029743,\n 0.29604492,\n 0.30136263,\n 0.29408443,\n 0.27543014,\n 0.24885914,\n 0.21919054,\n 0.22593765,\n 0.2305434,\n 0.22474495,\n 0.21082267,\n 0.19170743,\n 0.17090012,\n 0.1521816,\n 0.13839552,\n 0.1299243,\n 0.12569669,\n 0.12456866,\n 0.12922356,\n 0.14023647,\n 0.15060309,\n 0.15662336,\n 0.15730526,\n 0.15512368,\n 0.15510257,\n 0.16903949,\n 0.1815229,\n 0.20310307,\n 0.21428823,\n 0.21110815,\n 0.19391632,\n 0.16897929,\n 0.15510854,\n 0.1513776,\n 0.15778454,\n 0.15062831,\n 0.1423014,\n 0.1533089,\n 0.16309854,\n]\n\n\ndef test_summarise_across_bees_ypreds():\n \"\"\"This shows how to get a summary feature for each point x in a swarm. Eg, the average of the swarms ypreds\"\"\"\n for summ_metric in [np.min, np.max, np.mean, np.median, np.std, np.ptp]:\n out = summ_metric(ypreds, axis=0)\n assert type(out) == np.ndarray\n assert out.shape == (2, 11)\n\n\ndef test_summarise_across_bees_losses():\n \"\"\"This shows how to get the average loss across a swarm\"\"\"\n for summ_metric in [np.min, np.max, np.mean, np.median, np.std, np.ptp]:\n out = summ_metric(losses, axis=0)\n assert type(out) == np.ndarray\n assert out.shape == (2,)\n\n\ndef test_rmse_2d():\n b0_preds = ypreds[0]\n out = metrics.mse_loss(b0_preds, y)\n assert len(out.shape) == len(b0_preds.shape) - 1\n assert (\n np.max(np.abs(out - losses[0])) < 0.000001\n ) # I dont' know why this isn't exactly 0, have tried pytest.approx\n\n b2_preds = ypreds[2]\n out = metrics.mse_loss(b2_preds, y)\n assert len(out.shape) == len(b2_preds.shape) - 1\n assert np.max(np.abs(out - losses[2])) < 0.000001 # I dont' know why this isn't exactly 0\n\n\ndef test_rmse_3d():\n out = metrics.mse_loss(ypreds, y)\n assert len(out.shape) == len(ypreds.shape) - 1\n assert np.max(np.abs(out - losses)) < 0.000001 # I don't know why this isn't exactly 0\n\n\ndef test_loss_mean_point_pred():\n \"\"\"\n This is an example of interest, since it is plausible (and of interest) if the averaged prediction of many bees\n in a swarm, at a given point x, might tend to be better than any given one.\n \"\"\"\n mean_point_preds = np.mean(ypreds, axis=0)\n loss_mean_preds = metrics.mse_loss(mean_point_preds, y)\n assert loss_mean_preds.shape == (2,)\n\n\ndef test_if_nom_first_below():\n epoch = metrics.iteration_threshold(epoch_scores, 0.25, \"first\", \"below\")\n assert epoch_scores[epoch] <= 0.25\n assert np.all(np.array(epoch_scores[:epoch]) > 0.25)\n assert metrics.iteration_threshold(epoch_scores, 0.001, \"first\", \"below\") is None\n\n\ndef test_if_nom_always_below():\n epoch = metrics.iteration_threshold(epoch_scores, 0.25, \"always\", \"below\")\n assert np.max(epoch_scores[epoch:]) <= 0.25\n assert epoch_scores[epoch - 1] > 0.25\n assert metrics.iteration_threshold(epoch_scores, 0.001, \"always\", \"below\") is None\n\n\ndef test_if_nom_first_above():\n reverse_scores = 1 - np.array(epoch_scores)\n epoch = metrics.iteration_threshold(reverse_scores, 0.75, \"first\", \"above\")\n assert reverse_scores[epoch] >= 0.75\n assert np.all(reverse_scores[:epoch] < 0.75)\n assert metrics.iteration_threshold(reverse_scores, 0.999, \"first\", \"above\") is None\n\n\ndef test_if_nom_always_above():\n reverse_scores = 1 - np.array(epoch_scores)\n epoch = metrics.iteration_threshold(reverse_scores, 0.75, \"always\", \"above\")\n assert np.min(reverse_scores[epoch:]) >= 0.75\n assert reverse_scores[epoch - 1] < 0.75\n assert metrics.iteration_threshold(reverse_scores, 0.999, \"always\", \"above\") is None\n\n\ndef test_if_ratio_first_below():\n epoch = metrics.iteration_threshold_ratio(epoch_scores, 0.5, \"first\", \"below\")\n epoch_ratios = np.array(epoch_scores) / epoch_scores[0]\n assert epoch_ratios[epoch] <= 0.5\n assert np.all(epoch_ratios[:epoch] > 0.5)\n assert metrics.iteration_threshold_ratio(epoch_scores, 0.001, \"first\", \"below\") is None\n\n\ndef test_if_ratio_always_below():\n epoch = metrics.iteration_threshold_ratio(epoch_scores, 0.5, \"always\", \"below\")\n epoch_ratios = np.array(epoch_scores) / epoch_scores[0]\n assert np.max(epoch_ratios[epoch:]) <= 0.5\n assert epoch_ratios[epoch - 1] > 0.5\n assert metrics.iteration_threshold_ratio(epoch_scores, 0.001, \"always\", \"below\") is None\n\n\ndef test_if_ratio_first_above():\n reverse_scores = 1 / np.array(epoch_scores)\n epoch = metrics.iteration_threshold_ratio(reverse_scores, 1.5, \"first\", \"above\", 3)\n reverse_ratios = reverse_scores / reverse_scores[3]\n assert reverse_ratios[epoch] >= 1.5\n assert np.all(reverse_ratios[:epoch] < 1.5)\n assert metrics.iteration_threshold_ratio(reverse_scores, 200, \"first\", \"above\") is None\n\n\ndef test_if_ratio_always_above():\n reverse_scores = 1 / np.array(epoch_scores)\n epoch = metrics.iteration_threshold_ratio(reverse_scores, 1.1, \"always\", \"above\", 3)\n reverse_ratios = reverse_scores / reverse_scores[3]\n assert np.min(reverse_ratios[epoch:]) >= 1.1\n assert reverse_ratios[epoch - 1] < 1.1\n assert metrics.iteration_threshold_ratio(reverse_scores, 200, \"always\", \"above\") is None\n\n\ndef test_if_ratio_error():\n \"\"\"Should fail due to the score crossing zero\"\"\"\n with pytest.raises(ValueError):\n metrics.iteration_threshold_ratio(np.array([-0.1, 0, 0.1, 1]), 0.1)\n" ]
[ [ "numpy.max", "numpy.array", "numpy.min", "numpy.mean", "numpy.abs", "numpy.all" ] ]
robbisg/mvpa_itab_wu
[ "e3cdb198a21349672f601cd34381e0895fa6484c", "e3cdb198a21349672f601cd34381e0895fa6484c" ]
[ "mvpa_itab/script/misc/colors.py", "mvpa_itab/script/mambo/c2b/simulations/results-20200907.py" ]
[ "from __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport six\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\n\n\ncolors_ = list(six.iteritems(colors.cnames))\n\n# Add the single letter colors.\nfor name, rgb in six.iteritems(colors.ColorConverter.colors):\n hex_ = colors.rgb2hex(rgb)\n colors_.append((name, hex_))\n\n# Transform to hex color values.\nhex_ = [color[1] for color in colors_]\n# Get the rgb equivalent.\nrgb = [colors.hex2color(color) for color in hex_]\n# Get the hsv equivalent.\nhsv = [colors.rgb_to_hsv(color) for color in rgb]\n\n# Split the hsv values to sort.\nhue = [color[0] for color in hsv]\nsat = [color[1] for color in hsv]\nval = [color[2] for color in hsv]\n\n# Sort by hue, saturation and value.\nind = np.lexsort((val, sat, hue))\nsorted_colors = [colors_[i] for i in ind]\n\nn = len(sorted_colors)\nncols = 1\nnrows = 4\n\nfig, ax = plt.subplots(facecolor='black')\nax.set_axis_bgcolor('black')\nX, Y = fig.get_dpi() * fig.get_size_inches()\n\n# row height\nh = Y / (nrows + 1)\n# col width\nw = X / ncols\n\nfor i, (name, color) in enumerate(dict_.iteritems()):\n col = i % ncols\n row = int(i / ncols)\n y = Y - (row * h) - h\n\n xi_line = w * (col + 0.05)\n xf_line = w * (col + 0.25)\n xi_text = w * (col + 0.2)\n\n ax.text(xi_text, y+0.5, name, fontsize=20, color='white',\n horizontalalignment='left',\n verticalalignment='center')\n\n # Add extra black line a little bit thicker to make\n # clear colors more visible.\n #ax.hlines(y, xi_line, xf_line, color='black', linewidth=(h * 0.7))\n ax.plot(xi_line, y, markersize=30, color=color, linewidth=2, marker='o')\n\nax.set_xlim(0, X)\nax.set_ylim(0, Y)\nax.set_axis_off()\n\nfig.subplots_adjust(left=0, right=1,\n top=1, bottom=0,\n hspace=0, wspace=0)\nfig.savefig('/home/robbis/figure1-lr.png', facecolor='black')\nplt.show()", "from pyitab.results.simulations import get_results, purge_dataframe, \\\n calculate_metrics, find_best_k, calculate_centroids, state_errors, \\\n dynamics_errors\nfrom pyitab.results.base import filter_dataframe\nfrom pyitab.results.dataframe import apply_function\nfrom pyitab.utils import make_dict_product\nimport pandas as pd\nimport numpy as np\nfrom tqdm import tqdm\n\n\npath = \"/media/robbis/DATA/fmri/c2b/derivatives/\"\npipeline = \"c2b+chieti\"\n\n\ndata = get_results(path, \n pipeline=pipeline, \n field_list=['sample_slicer', \n 'n_clusters', \n 'n_components', \n 'ds.a.snr',\n 'ds.a.time',\n 'ds.a.states',\n 'fetch', \n 'algorithm'],\n \n #filter={'algorithm':['KMeans']}\n )\n\ndf = purge_dataframe(data)\n\nconditions = {\n\n 'time': [1.5, 2., 2.5, 3.],\n #'num': [str(j) for j in np.arange(1, 480)],\n 'snr': [3, 5, 10],\n 'algorithm': ['GaussianMixture',\n 'KMeans',\n 'AgglomerativeClustering', \n 'SpectralClustering',\n 'MiniBatchKMeans'],\n 'subject': [str(i) for i in np.arange(1, 26)]\n \n}\n\ncombinations = make_dict_product(**conditions)\n\nmetrics = []\nbest_k = []\nfor options in combinations:\n df_ = filter_dataframe(df, **options)\n options = {k: v[0] for k, v in options.items()}\n df_metric = calculate_metrics(df_, fixed_variables=options)\n df_metric = df_metric.sort_values('k')\n df_k = find_best_k(df_metric)\n metrics.append(df_metric)\n best_k.append(df_k)\ndf_metrics = pd.concat(metrics)\ndf_guess = pd.concat(best_k)\n\ndf_guess['hit'] = np.int_(df_guess['guess'].values == 6)\ndf_guess['abshit'] = np.abs(df_guess['guess'].values - 6)\ndf_great_mean = apply_function(df_guess, keys=['name', 'algorithm'], attr='abshit', fx=np.mean)\ndf_great_mean = apply_function(df_guess, keys=['name', 'algorithm'], attr='hit', fx=np.mean)\n\n# Plot of metrics\ndf_mean = apply_function(df_guess, keys=['name'], attr='hit', fx=np.mean)\narg_sort = np.argsort(df_mean['hit'].values)[::-1]\n\nfor alg in np.unique(df_great_mean['algorithm']):\n df_a = filter_dataframe(df_great_mean, algorithm=[alg])\n values = df_a['hit'].values[arg_sort]\n pl.plot(values, '-o')\npl.xticks(np.arange(len(values)), df_a['name'].values[arg_sort])\n \n# State similarity\ndf = calculate_centroids(df)\ndf = state_errors(df)\ndf = dynamics_errors(df)\n\n\n#################################\n\n\n##### Plot of hits by algorithm #####\n#df_guess = pd.read_csv(\"/home/robbis/Dropbox/simulation_guess.csv\")\n_, maskg = filter_dataframe(df_guess, return_mask=True, algorithm=['GaussianMixture'])\n_, maski = filter_dataframe(df_guess, return_mask=True, name=['Index I'])\nmask = np.logical_or(maskg, maski)\n\ndf_guess = df_guess.loc[np.logical_not(mask)]\ndf_great_mean = apply_function(df_guess, keys=['name', 'algorithm', 'snr', 'time'], attr='hit', fx=np.mean)\n\ndf_sort = apply_function(df_guess, keys=['name'], attr='hit', fx=np.mean).sort_values(by='hit')\nencoding = dict(zip(df_sort['name'].values, np.arange(7)[::-1]))\ndf_great_mean['metric'] = [encoding[name] for name in df_great_mean['name'].values]\ndf_guess['metric'] = [encoding[name] for name in df_guess['name'].values]\n\nxlabels = list(encoding.keys())[::-1]\nxlabels = ['SIL', 'GEV', 'WGSS', \"CV\", \"EV\", \"KL\"]\n#### Totale #####\npalette = sns.color_palette(\"magma\", 6)[::-1][::2]\n\nf = sns.relplot(x=\"metric\", y=\"hit\", row=\"time\", col=\"algorithm\", hue=\"snr\", data=df_great_mean, \n kind='line', height=6, aspect=.75, palette=palette,\n legend=\"full\", marker='o', lw=3.5, markersize=15, markeredgecolor='none')\nfor ax in f.axes[-1]:\n ax.set_xticks(np.arange(len(xlabels)))\n ax.set_xticklabels(xlabels)\n\n### Best metric ###\nf = sns.relplot(x=\"metric\", y=\"hit\", hue=\"algorithm\", data=df_guess,\n kind='line', marker='o', \n lw=3.5, markersize=15, markeredgecolor='none')\nfor ax in f.axes[-1]:\n ax.set_xticks(1+np.arange(len(xlabels)))\n ax.set_xticklabels(xlabels)\n\n### Metric vs snr ###\npalette = sns.color_palette(\"magma\", 6)[::-1][::2]\nf = sns.relplot(x=\"metric\", y=\"hit\", hue=\"snr\", data=df_guess, palette=palette,\n kind='line', marker='o', \n lw=3.5, markersize=15, markeredgecolor='none')\nfor ax in f.axes[-1]:\n ax.set_xticks(1+np.arange(len(xlabels)))\n ax.set_xticklabels(xlabels)\n\n### Metric vs time ###\ndf_sort = apply_function(df_guess, keys=['name', 'snr'], attr='hit', fx=np.mean).sort_values(by='hit')\ndf_sort['metric'] = [encoding[name] for name in df_sort['name'].values]\npalette = sns.color_palette(\"magma\", 8)[::-1][::2]\nf = sns.relplot(x=\"metric\", y=\"hit\", hue=\"time\", data=df_guess, palette=palette,\n kind='line', marker='o', lw=3.5, markersize=15, markeredgecolor='none')\nfor ax in f.axes[-1]:\n ax.set_xticks(1+np.arange(len(xlabels)))\n ax.set_xticklabels(xlabels) \n\n############################\nfontsize = 15\nparams = {'axes.labelsize': fontsize-3,\n 'axes.titlesize': fontsize-2, \n 'font.size': fontsize, \n 'legend.fontsize':fontsize-3 , \n 'xtick.labelsize':fontsize-2 , \n 'ytick.labelsize':fontsize-2}\n\npl.rcParams.update(params)\n\nfull_df = df\n\npalette = sns.color_palette(\"magma\", 6)[::-1]\nf = sns.relplot(x=\"time\", y=\"dynamics_errors\", hue=\"snr\", col=\"algorithm\",\n height=5, aspect=.75, facet_kws=dict(sharex=False), \n kind=\"line\", legend=\"full\", data=full_df, palette=palette[::2], \n marker='o', lw=3.5, markersize=15, markeredgecolor='none')\n\nf = sns.relplot(x=\"time\", y=\"centroid_similarity\", hue=\"snr\", col=\"algorithm\",\n height=5, aspect=.75, facet_kws=dict(sharex=False), \n kind=\"line\", legend=\"full\", data=full_df, palette=palette[::2], \n marker='o', lw=3.5, markersize=15, markeredgecolor='none')\n\nflatui = [\"#9b59b6\", \"#3498db\", \"#e74c3c\", \"#34495e\", \"#2ecc71\"]\npalette = sns.color_palette(flatui)\nf = sns.relplot(x=\"time\", y=\"dynamics_errors\", col=\"snr\", hue=\"algorithm\",\n height=5, aspect=.75, facet_kws=dict(sharex=False), \n kind=\"line\", legend=\"full\", data=full_df, palette=palette, \n marker='o', lw=3.5, markersize=15, markeredgecolor='none')\n\nf = sns.relplot(x=\"time\", y=\"centroid_similarity\", col=\"snr\", hue=\"algorithm\",\n height=5, aspect=.75, facet_kws=dict(sharex=False), \n kind=\"line\", legend=\"full\", data=full_df, palette=palette, \n marker='o', lw=3.5, markersize=15, markeredgecolor='none')\n\n##############################################à\n\nf = sns.relplot(x=\"algorithm\", y=\"dynamics_errors\",\n height=5, aspect=.95, facet_kws=dict(sharex=False), \n kind=\"line\", data=full_df, \n marker='o', lw=3.5, markersize=15, markeredgecolor='none')\n\nf = sns.relplot(x=\"algorithm\", y=\"centroid_similarity\",\n height=5, aspect=.95, facet_kws=dict(sharex=False), \n kind=\"line\", data=full_df, \n marker='o', lw=3.5, markersize=15, markeredgecolor='none')\n\n\n\n\n\n\n\n\n\n\n########################################\n\nfrom pyitab.plot.connectivity import plot_connectivity_lines\nfrom matplotlib import animation\nfrom mvpa2.base.hdf5 import h5load\n\npath = \"/home/robbis/mount/aalto-work/data/simulations/meg/ds-min_time_1.5-snr_10000.gzip\"\nds = h5load(path)\n\nsamples = ds.samples\nmatrices = np.array([copy_matrix(array_to_matrix(m)) for m in samples[::50]])\n\n\n\nnames = [\"node_%02d\"%(i+1) for i in range(10)]\n\ndef animate(i, fig):\n names = [\"node_%s\" % (str(j+1)) for j in range(10)]\n #pl.imshow(matrix[i*100])\n pl.clf()\n plot_connectivity_lines(matrices[i], facecolor='white',\n node_names=names, con_thresh=0., \n kind='circle', fig=fig)\n\n\nfig = pl.figure(figsize=(8, 8))\n\nanim = animation.FuncAnimation(fig, animate, fargs=[fig],\n frames=45, interval=20)\nanim.save('/home/robbis/animation.gif', writer='imagemagick', fps=10)" ]
[ [ "numpy.lexsort", "matplotlib.colors.hex2color", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show", "matplotlib.colors.rgb2hex", "matplotlib.colors.rgb_to_hsv" ], [ "numpy.logical_not", "numpy.logical_or", "matplotlib.animation.FuncAnimation", "pandas.concat", "numpy.arange", "numpy.abs", "numpy.int_", "numpy.argsort", "numpy.unique" ] ]
gregunz/ml2017
[ "6235003ef849a13b1da95e4842b9cabd30b70fd3" ]
[ "project01/src/helpers.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"some helper functions for project 1.\"\"\"\r\nimport csv\r\nimport numpy as np\r\n\r\ndef load_csv_data(data_path, sub_sample=False):\r\n \"\"\"Loads data and returns y (class labels), tX (features) and ids (event ids)\"\"\"\r\n y = np.genfromtxt(data_path, delimiter=\",\", skip_header=1, dtype=str, usecols=1)\r\n x = np.genfromtxt(data_path, delimiter=\",\", skip_header=1)\r\n ids = x[:, 0].astype(np.int)\r\n input_data = x[:, 2:]\r\n\r\n # convert class labels from strings to binary (-1,1)\r\n yb = np.ones(len(y))\r\n yb[np.where(y=='b')] = -1\r\n \r\n # sub-sample\r\n if sub_sample:\r\n yb = yb[::50]\r\n input_data = input_data[::50]\r\n ids = ids[::50]\r\n\r\n return yb, input_data, ids\r\n\r\ndef create_csv_submission(ids, y_pred, name):\r\n \"\"\"\r\n Creates an output file in csv format for submission to kaggle\r\n Arguments: ids (event ids associated with each prediction)\r\n y_pred (predicted class labels)\r\n name (string name of .csv output file to be created)\r\n \"\"\"\r\n with open(name, 'w') as csvfile:\r\n fieldnames = ['Id', 'Prediction']\r\n writer = csv.DictWriter(csvfile, delimiter=\",\", fieldnames=fieldnames)\r\n writer.writeheader()\r\n for r1, r2 in zip(ids, y_pred):\r\n writer.writerow({'Id':int(r1),'Prediction':int(r2)})\r\n\r\ndef batch_iter(y, tx, batch_size, num_batches=1, shuffle=True):\r\n \"\"\"\r\n Generate a minibatch iterator for a dataset.\r\n Takes as input two iterables (here the output desired values 'y' and the input data 'tx')\r\n Outputs an iterator which gives mini-batches of `batch_size` matching elements from `y` and `tx`.\r\n Data can be randomly shuffled to avoid ordering in the original data messing with the randomness of the minibatches.\r\n Example of use :\r\n for minibatch_y, minibatch_tx in batch_iter(y, tx, 32):\r\n <DO-SOMETHING>\r\n \"\"\"\r\n data_size = len(y)\r\n\r\n if shuffle:\r\n shuffle_indices = np.random.permutation(np.arange(data_size))\r\n shuffled_y = y[shuffle_indices]\r\n shuffled_tx = tx[shuffle_indices]\r\n else:\r\n shuffled_y = y\r\n shuffled_tx = tx\r\n for batch_num in range(num_batches):\r\n start_index = batch_num * batch_size\r\n end_index = min((batch_num + 1) * batch_size, data_size)\r\n if start_index != end_index:\r\n yield shuffled_y[start_index:end_index], shuffled_tx[start_index:end_index]\r\n\r\ndef range_mask(length, seq):\r\n return np.array([i in seq for i in range(length)])\r\n\r\ndef create_pairs(n, m, with_repetition=False, with_itself=False):\r\n return [(i, j) for i in range(n) for j in range(m) if (with_repetition or j >= i) and (with_itself or j != i)]\r\n\r\ndef all_combinations_of(xs, fn, combs):\r\n combinations = []\r\n for i, pairs in enumerate(combs):\r\n combinations.append(combinations_of(xs[i], fn, pairs))\r\n return combinations\r\n\r\ndef combinations_of(x, fn, pairs):\r\n if len(pairs) > 0:\r\n combinations = [fn(x[:, a], x[:, b]).reshape((x.shape[0], 1)) for a, b in pairs]\r\n return np.concatenate(combinations, axis=1)\r\n return np.array([])\r\n\r\ndef separate_train(xs, train_size, data_masks):\r\n test_size = np.sum([m.sum() for m in data_masks]) - train_size\r\n train_mask = np.r_[[True] * train_size, [False] * test_size]\r\n xs_train_size = [(mask & train_mask).sum() for mask in data_masks]\r\n\r\n xs_train = [f[:size] for f, size in zip(xs, xs_train_size)]\r\n xs_test = [f[size:] for f, size in zip(xs, xs_train_size)]\r\n\r\n return xs_train, xs_test\r\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.genfromtxt", "numpy.where", "numpy.arange" ] ]
nonconvexopt/jax
[ "8b489134c818364577f630ada6aa63beefd7376a" ]
[ "tests/lax_numpy_test.py" ]
[ "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport collections\nimport functools\nfrom functools import partial\nimport inspect\nimport itertools\nimport operator\nfrom typing import cast, Iterator, Optional, List, Tuple\nimport unittest\nfrom unittest import SkipTest\nimport warnings\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\n\nimport numpy as np\ntry:\n import numpy_dispatch\nexcept ImportError:\n numpy_dispatch = None\n\nimport jax\nimport jax.ops\nfrom jax import lax\nfrom jax import numpy as jnp\nfrom jax import test_util as jtu\nfrom jax._src import dtypes\nfrom jax import tree_util\nfrom jax.interpreters import xla\nfrom jax.test_util import check_grads\nfrom jax._src.util import prod\nfrom jax._src.numpy.util import _parse_numpydoc, ParsedDoc\n\nfrom jax.config import config\nconfig.parse_flags_with_absl()\nFLAGS = config.FLAGS\n\nnumpy_version = tuple(map(int, np.__version__.split('.')[:3]))\n\nnonempty_nonscalar_array_shapes = [(4,), (3, 4), (3, 1), (1, 4), (2, 1, 4), (2, 3, 4)]\nnonempty_array_shapes = [()] + nonempty_nonscalar_array_shapes\none_dim_array_shapes = [(1,), (6,), (12,)]\nempty_array_shapes = [(0,), (0, 4), (3, 0),]\n\nscalar_shapes = [jtu.NUMPY_SCALAR_SHAPE, jtu.PYTHON_SCALAR_SHAPE]\narray_shapes = nonempty_array_shapes + empty_array_shapes\nnonzerodim_shapes = nonempty_nonscalar_array_shapes + empty_array_shapes\nnonempty_shapes = scalar_shapes + nonempty_array_shapes\nall_shapes = scalar_shapes + array_shapes\n\nfloat_dtypes = jtu.dtypes.all_floating\ncomplex_dtypes = jtu.dtypes.complex\nint_dtypes = jtu.dtypes.all_integer\nunsigned_dtypes = jtu.dtypes.all_unsigned\nbool_dtypes = jtu.dtypes.boolean\ndefault_dtypes = float_dtypes + int_dtypes\ninexact_dtypes = float_dtypes + complex_dtypes\nnumber_dtypes = float_dtypes + complex_dtypes + int_dtypes\nall_dtypes = number_dtypes + bool_dtypes\n\n\npython_scalar_dtypes = [jnp.bool_, jnp.int_, jnp.float_, jnp.complex_]\n\n# uint64 is problematic because with any uint type it promotes to float:\nint_dtypes_no_uint64 = [d for d in int_dtypes + unsigned_dtypes if d != np.uint64]\n\ndef _valid_dtypes_for_shape(shape, dtypes):\n # Not all (shape, dtype) pairs are valid. In particular, Python scalars only\n # have one type in each category (float, bool, etc.)\n if shape is jtu.PYTHON_SCALAR_SHAPE:\n return [t for t in dtypes if t in python_scalar_dtypes]\n return dtypes\n\ndef _shape_and_dtypes(shapes, dtypes):\n for shape in shapes:\n for dtype in _valid_dtypes_for_shape(shape, dtypes):\n yield (shape, dtype)\n\ndef _compatible_shapes(shape):\n if shape in scalar_shapes or np.ndim(shape) == 0:\n return [shape]\n return (shape[n:] for n in range(len(shape) + 1))\n\ndef _get_y_shapes(y_dtype, shape, rowvar):\n # Helper function for testCov.\n if y_dtype is None:\n return [None]\n if len(shape) == 1:\n return [shape]\n elif rowvar or shape[0] == 1:\n return [(1, shape[-1]), (2, shape[-1]), (5, shape[-1])]\n return [(shape[0], 1), (shape[0], 2), (shape[0], 5)]\n\nOpRecord = collections.namedtuple(\n \"OpRecord\",\n [\"name\", \"nargs\", \"dtypes\", \"shapes\", \"rng_factory\", \"diff_modes\",\n \"test_name\", \"check_dtypes\", \"tolerance\", \"inexact\"])\n\ndef op_record(name, nargs, dtypes, shapes, rng_factory, diff_modes,\n test_name=None, check_dtypes=True,\n tolerance=None, inexact=False):\n test_name = test_name or name\n return OpRecord(name, nargs, dtypes, shapes, rng_factory, diff_modes,\n test_name, check_dtypes, tolerance, inexact)\n\nJAX_ONE_TO_ONE_OP_RECORDS = [\n op_record(\"abs\", 1, number_dtypes + unsigned_dtypes + bool_dtypes,\n all_shapes, jtu.rand_default, [\"rev\"]),\n op_record(\"add\", 2, all_dtypes, all_shapes, jtu.rand_default, [\"rev\"]),\n op_record(\"ceil\", 1, float_dtypes, all_shapes, jtu.rand_default, []),\n op_record(\"ceil\", 1, int_dtypes + unsigned_dtypes, all_shapes,\n jtu.rand_default, [], check_dtypes=False),\n op_record(\"conj\", 1, number_dtypes, all_shapes, jtu.rand_default, [\"rev\"]),\n op_record(\"equal\", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),\n op_record(\"exp\", 1, number_dtypes, all_shapes, jtu.rand_default, [\"rev\"],\n inexact=True),\n op_record(\"fabs\", 1, float_dtypes, all_shapes, jtu.rand_default, [\"rev\"]),\n op_record(\"float_power\", 2, inexact_dtypes, all_shapes,\n partial(jtu.rand_default, scale=1), [\"rev\"],\n tolerance={jnp.bfloat16: 1e-2, np.float32: 1e-3,\n np.float64: 1e-12, np.complex64: 2e-4,\n np.complex128: 1e-12}, check_dtypes=False),\n op_record(\"floor\", 1, float_dtypes, all_shapes, jtu.rand_default, []),\n op_record(\"floor\", 1, int_dtypes + unsigned_dtypes, all_shapes,\n jtu.rand_default, [], check_dtypes=False),\n op_record(\"greater\", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),\n op_record(\"greater_equal\", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),\n op_record(\"i0\", 1, float_dtypes, all_shapes, jtu.rand_default, [],\n check_dtypes=False),\n op_record(\"ldexp\", 2, int_dtypes, all_shapes, jtu.rand_default, [], check_dtypes=False),\n op_record(\"less\", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),\n op_record(\"less_equal\", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),\n op_record(\"log\", 1, number_dtypes, all_shapes, jtu.rand_positive, [\"rev\"],\n inexact=True),\n op_record(\"logical_and\", 2, all_dtypes, all_shapes, jtu.rand_bool, []),\n op_record(\"logical_not\", 1, all_dtypes, all_shapes, jtu.rand_bool, []),\n op_record(\"logical_or\", 2, all_dtypes, all_shapes, jtu.rand_bool, []),\n op_record(\"logical_xor\", 2, all_dtypes, all_shapes, jtu.rand_bool, []),\n op_record(\"maximum\", 2, all_dtypes, all_shapes, jtu.rand_some_inf, []),\n op_record(\"minimum\", 2, all_dtypes, all_shapes, jtu.rand_some_inf, []),\n op_record(\"multiply\", 2, all_dtypes, all_shapes, jtu.rand_default, [\"rev\"]),\n op_record(\"negative\", 1, number_dtypes, all_shapes, jtu.rand_default, [\"rev\"]),\n op_record(\"nextafter\", 2, [f for f in float_dtypes if f != jnp.bfloat16],\n all_shapes, jtu.rand_default, [\"rev\"], inexact=True, tolerance=0),\n op_record(\"not_equal\", 2, all_dtypes, all_shapes, jtu.rand_some_equal, [\"rev\"]),\n op_record(\"array_equal\", 2, number_dtypes, all_shapes, jtu.rand_some_equal, [\"rev\"]),\n op_record(\"array_equiv\", 2, number_dtypes, all_shapes, jtu.rand_some_equal, [\"rev\"]),\n op_record(\"reciprocal\", 1, inexact_dtypes, all_shapes, jtu.rand_default, []),\n op_record(\"subtract\", 2, number_dtypes, all_shapes, jtu.rand_default, [\"rev\"]),\n op_record(\"signbit\", 1, default_dtypes + bool_dtypes, all_shapes,\n jtu.rand_some_inf_and_nan, [\"rev\"]),\n op_record(\"trunc\", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),\n op_record(\"trunc\", 1, int_dtypes + unsigned_dtypes, all_shapes,\n jtu.rand_some_inf_and_nan, [], check_dtypes=False),\n op_record(\"sin\", 1, number_dtypes, all_shapes, jtu.rand_default, [\"rev\"],\n inexact=True),\n op_record(\"cos\", 1, number_dtypes, all_shapes, jtu.rand_default, [\"rev\"],\n inexact=True),\n op_record(\"tan\", 1, number_dtypes, all_shapes,\n partial(jtu.rand_uniform, low=-1.5, high=1.5), [\"rev\"],\n inexact=True),\n op_record(\"sinh\", 1, number_dtypes, all_shapes, jtu.rand_default, [\"rev\"],\n inexact=True),\n op_record(\"cosh\", 1, number_dtypes, all_shapes, jtu.rand_default, [\"rev\"],\n inexact=True),\n # TODO(b/142975473): on CPU, tanh for complex128 is only accurate to\n # ~float32 precision.\n # TODO(b/143135720): on GPU, tanh has only ~float32 precision.\n op_record(\"tanh\", 1, number_dtypes, all_shapes, jtu.rand_default, [\"rev\"],\n tolerance={np.float64: 1e-7, np.complex128: 1e-7},\n inexact=True),\n op_record(\"arcsin\", 1, number_dtypes, all_shapes, jtu.rand_small, [\"rev\"],\n inexact=True),\n op_record(\"arccos\", 1, number_dtypes, all_shapes, jtu.rand_small, [\"rev\"],\n inexact=True),\n op_record(\"arctan\", 1, number_dtypes, all_shapes, jtu.rand_small, [\"rev\"],\n inexact=True),\n op_record(\"arctan2\", 2, float_dtypes, all_shapes, jtu.rand_small, [\"rev\"],\n inexact=True),\n op_record(\"arcsinh\", 1, number_dtypes, all_shapes, jtu.rand_default, [\"rev\"],\n inexact=True, tolerance={np.complex64: 2E-4, np.complex128: 2E-14}),\n op_record(\"arccosh\", 1, number_dtypes, all_shapes, jtu.rand_default, [\"rev\"],\n inexact=True, tolerance={np.complex64: 2E-2, np.complex128: 2E-12}),\n op_record(\"arctanh\", 1, number_dtypes, all_shapes, jtu.rand_small, [\"rev\"],\n inexact=True, tolerance={np.float64: 1e-9}),\n]\n\nJAX_COMPOUND_OP_RECORDS = [\n # angle has inconsistent 32/64-bit return types across numpy versions.\n op_record(\"angle\", 1, number_dtypes, all_shapes, jtu.rand_default, [],\n check_dtypes=False, inexact=True),\n op_record(\"atleast_1d\", 1, default_dtypes, all_shapes, jtu.rand_default, []),\n op_record(\"atleast_2d\", 1, default_dtypes, all_shapes, jtu.rand_default, []),\n op_record(\"atleast_3d\", 1, default_dtypes, all_shapes, jtu.rand_default, []),\n op_record(\"cbrt\", 1, default_dtypes, all_shapes, jtu.rand_some_inf, [\"rev\"],\n inexact=True),\n op_record(\"conjugate\", 1, number_dtypes, all_shapes, jtu.rand_default, [\"rev\"]),\n op_record(\"deg2rad\", 1, float_dtypes, all_shapes, jtu.rand_default, []),\n op_record(\"divide\", 2, number_dtypes, all_shapes, jtu.rand_nonzero, [\"rev\"],\n inexact=True),\n op_record(\"divmod\", 2, int_dtypes + float_dtypes, all_shapes,\n jtu.rand_nonzero, []),\n op_record(\"exp2\", 1, number_dtypes, all_shapes, jtu.rand_default, [\"rev\"],\n tolerance={jnp.bfloat16: 4e-2, np.float16: 1e-2}, inexact=True),\n # TODO(b/142975473): on CPU, expm1 for float64 is only accurate to ~float32\n # precision.\n op_record(\"expm1\", 1, number_dtypes, all_shapes, jtu.rand_positive, [],\n test_name=\"expm1_large\", tolerance={np.float64: 1e-8}, inexact=True),\n op_record(\"expm1\", 1, number_dtypes, all_shapes, jtu.rand_small_positive,\n [], tolerance={np.float64: 1e-8}, inexact=True),\n op_record(\"fix\", 1, float_dtypes, all_shapes, jtu.rand_default, []),\n op_record(\"fix\", 1, int_dtypes + unsigned_dtypes, all_shapes,\n jtu.rand_default, [], check_dtypes=False),\n op_record(\"floor_divide\", 2, number_dtypes, all_shapes,\n jtu.rand_nonzero, [\"rev\"]),\n op_record(\"floor_divide\", 2, unsigned_dtypes, all_shapes,\n jtu.rand_nonzero, [\"rev\"]),\n op_record(\"fmin\", 2, number_dtypes, all_shapes, jtu.rand_some_nan, []),\n op_record(\"fmax\", 2, number_dtypes, all_shapes, jtu.rand_some_nan, []),\n op_record(\"fmod\", 2, default_dtypes, all_shapes, jtu.rand_some_nan, []),\n op_record(\"heaviside\", 2, default_dtypes, all_shapes, jtu.rand_default, [],\n inexact=True),\n op_record(\"hypot\", 2, default_dtypes, all_shapes, jtu.rand_default, [],\n inexact=True),\n op_record(\"kron\", 2, number_dtypes, nonempty_shapes, jtu.rand_default, []),\n op_record(\"outer\", 2, number_dtypes, all_shapes, jtu.rand_default, []),\n op_record(\"imag\", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),\n op_record(\"iscomplex\", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),\n op_record(\"isfinite\", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),\n op_record(\"isinf\", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),\n op_record(\"isnan\", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),\n op_record(\"isneginf\", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),\n op_record(\"isposinf\", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),\n op_record(\"isreal\", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),\n op_record(\"isrealobj\", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),\n op_record(\"log2\", 1, number_dtypes, all_shapes, jtu.rand_positive, [\"rev\"],\n inexact=True),\n op_record(\"log10\", 1, number_dtypes, all_shapes, jtu.rand_positive, [\"rev\"],\n inexact=True),\n op_record(\"log1p\", 1, number_dtypes, all_shapes, jtu.rand_positive, [],\n test_name=\"log1p_large\", tolerance={np.float64: 1e-12},\n inexact=True),\n op_record(\"log1p\", 1, number_dtypes, all_shapes, jtu.rand_small_positive, [],\n tolerance={np.float64: 1e-12}, inexact=True),\n op_record(\"logaddexp\", 2, float_dtypes, all_shapes,\n jtu.rand_some_inf_and_nan, [\"rev\"],\n tolerance={np.float64: 1e-12}, inexact=True),\n op_record(\"logaddexp2\", 2, float_dtypes, all_shapes,\n jtu.rand_some_inf_and_nan, [\"rev\"],\n tolerance={np.float16: 1e-2, np.float64: 2e-14}, inexact=True),\n op_record(\"polyval\", 2, number_dtypes, nonempty_nonscalar_array_shapes,\n jtu.rand_default, [], check_dtypes=False,\n tolerance={dtypes.bfloat16: 4e-2, np.float16: 1e-2,\n np.float64: 1e-12}),\n op_record(\"positive\", 1, number_dtypes, all_shapes, jtu.rand_default, [\"rev\"]),\n op_record(\"power\", 2, number_dtypes, all_shapes, jtu.rand_positive, [\"rev\"],\n tolerance={np.complex128: 1e-14}, check_dtypes=False),\n op_record(\"rad2deg\", 1, float_dtypes, all_shapes, jtu.rand_default, []),\n op_record(\"ravel\", 1, all_dtypes, all_shapes, jtu.rand_default, [\"rev\"]),\n op_record(\"real\", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),\n op_record(\"remainder\", 2, default_dtypes, all_shapes, jtu.rand_nonzero, [],\n tolerance={np.float16: 1e-2}),\n op_record(\"mod\", 2, default_dtypes, all_shapes, jtu.rand_nonzero, []),\n op_record(\"modf\", 1, float_dtypes, all_shapes, jtu.rand_default, []),\n op_record(\"modf\", 1, int_dtypes + unsigned_dtypes, all_shapes,\n jtu.rand_default, [], check_dtypes=False),\n op_record(\"rint\", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan,\n []),\n op_record(\"rint\", 1, int_dtypes + unsigned_dtypes, all_shapes,\n jtu.rand_default, [], check_dtypes=False),\n op_record(\"sign\", 1, number_dtypes + unsigned_dtypes,\n all_shapes, jtu.rand_some_inf_and_nan, []),\n # numpy 1.16 has trouble mixing uint and bfloat16, so we test these separately.\n op_record(\"copysign\", 2, default_dtypes,\n all_shapes, jtu.rand_some_inf_and_nan, [], check_dtypes=False),\n op_record(\"copysign\", 2, unsigned_dtypes,\n all_shapes, jtu.rand_some_inf_and_nan, [], check_dtypes=False),\n op_record(\"sinc\", 1, [t for t in number_dtypes if t != jnp.bfloat16],\n all_shapes, jtu.rand_default, [\"rev\"],\n tolerance={np.complex64: 1e-5}, inexact=True,\n check_dtypes=False),\n op_record(\"square\", 1, number_dtypes, all_shapes, jtu.rand_default, [\"rev\"]),\n op_record(\"sqrt\", 1, number_dtypes, all_shapes, jtu.rand_positive, [\"rev\"],\n inexact=True),\n op_record(\"transpose\", 1, all_dtypes, all_shapes, jtu.rand_default, [\"rev\"],\n check_dtypes=False),\n op_record(\"true_divide\", 2, all_dtypes, all_shapes, jtu.rand_nonzero,\n [\"rev\"], inexact=True),\n op_record(\"ediff1d\", 3, [np.int32], all_shapes, jtu.rand_default, []),\n # TODO(phawkins): np.unwrap does not correctly promote its default period\n # argument under NumPy 1.21 for bfloat16 inputs. It works fine if we\n # explicitly pass a bfloat16 value that does not need promition. We should\n # probably add a custom test harness for unwrap that tests the period\n # argument anyway.\n op_record(\"unwrap\", 1, [t for t in float_dtypes if t != dtypes.bfloat16],\n nonempty_nonscalar_array_shapes,\n jtu.rand_default, [\"rev\"],\n # numpy.unwrap always returns float64\n check_dtypes=False,\n # numpy cumsum is inaccurate, see issue #3517\n tolerance={dtypes.bfloat16: 1e-1, np.float16: 1e-1}),\n op_record(\"isclose\", 2, [t for t in all_dtypes if t != jnp.bfloat16],\n all_shapes, jtu.rand_small_positive, []),\n op_record(\"gcd\", 2, int_dtypes_no_uint64, all_shapes, jtu.rand_default, []),\n op_record(\"lcm\", 2, int_dtypes_no_uint64, all_shapes, jtu.rand_default, []),\n]\n\nJAX_BITWISE_OP_RECORDS = [\n op_record(\"bitwise_and\", 2, int_dtypes + unsigned_dtypes, all_shapes,\n jtu.rand_bool, []),\n op_record(\"bitwise_not\", 1, int_dtypes + unsigned_dtypes, all_shapes,\n jtu.rand_bool, []),\n op_record(\"invert\", 1, int_dtypes + unsigned_dtypes, all_shapes,\n jtu.rand_bool, []),\n op_record(\"bitwise_or\", 2, int_dtypes + unsigned_dtypes, all_shapes,\n jtu.rand_bool, []),\n op_record(\"bitwise_xor\", 2, int_dtypes + unsigned_dtypes, all_shapes,\n jtu.rand_bool, []),\n]\n\nJAX_REDUCER_RECORDS = [\n op_record(\"mean\", 1, number_dtypes, nonempty_shapes, jtu.rand_default, [],\n inexact=True),\n op_record(\"prod\", 1, all_dtypes, all_shapes, jtu.rand_small_positive, []),\n op_record(\"sum\", 1, all_dtypes, all_shapes, jtu.rand_default, []),\n op_record(\"nanmean\", 1, inexact_dtypes, nonempty_shapes, jtu.rand_some_nan,\n [], inexact=True),\n op_record(\"nanprod\", 1, all_dtypes, all_shapes, jtu.rand_some_nan, []),\n op_record(\"nansum\", 1, number_dtypes, all_shapes, jtu.rand_some_nan, []),\n]\n\nJAX_REDUCER_INITIAL_RECORDS = [\n op_record(\"prod\", 1, all_dtypes, all_shapes, jtu.rand_small_positive, []),\n op_record(\"sum\", 1, all_dtypes, all_shapes, jtu.rand_default, []),\n op_record(\"max\", 1, all_dtypes, all_shapes, jtu.rand_default, []),\n op_record(\"min\", 1, all_dtypes, all_shapes, jtu.rand_default, []),\n]\n\nJAX_REDUCER_WHERE_NO_INITIAL_RECORDS = [\n op_record(\"all\", 1, bool_dtypes, all_shapes, jtu.rand_some_zero, []),\n op_record(\"any\", 1, bool_dtypes, all_shapes, jtu.rand_some_zero, []),\n op_record(\"mean\", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],\n inexact=True),\n op_record(\"var\", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],\n inexact=True),\n op_record(\"std\", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],\n inexact=True),\n]\n\nJAX_REDUCER_NO_DTYPE_RECORDS = [\n op_record(\"all\", 1, all_dtypes, all_shapes, jtu.rand_some_zero, []),\n op_record(\"any\", 1, all_dtypes, all_shapes, jtu.rand_some_zero, []),\n op_record(\"max\", 1, all_dtypes, nonempty_shapes, jtu.rand_default, []),\n op_record(\"min\", 1, all_dtypes, nonempty_shapes, jtu.rand_default, []),\n op_record(\"var\", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],\n inexact=True),\n op_record(\"std\", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],\n inexact=True),\n op_record(\"nanmax\", 1, all_dtypes, nonempty_shapes, jtu.rand_some_nan, []),\n op_record(\"nanmin\", 1, all_dtypes, nonempty_shapes, jtu.rand_some_nan, []),\n op_record(\"nanvar\", 1, all_dtypes, nonempty_shapes, jtu.rand_some_nan,\n [], inexact=True),\n op_record(\"nanstd\", 1, all_dtypes, nonempty_shapes, jtu.rand_some_nan,\n [], inexact=True),\n op_record(\"ptp\", 1, number_dtypes, nonempty_shapes, jtu.rand_default, []),\n]\n\nJAX_ARGMINMAX_RECORDS = [\n op_record(\"argmin\", 1, default_dtypes, nonempty_shapes, jtu.rand_some_equal, []),\n op_record(\"argmax\", 1, default_dtypes, nonempty_shapes, jtu.rand_some_equal, []),\n op_record(\"nanargmin\", 1, default_dtypes, nonempty_shapes, jtu.rand_some_nan, []),\n op_record(\"nanargmax\", 1, default_dtypes, nonempty_shapes, jtu.rand_some_nan, []),\n]\n\nJAX_OPERATOR_OVERLOADS = [\n op_record(\"__add__\", 2, number_dtypes, all_shapes, jtu.rand_default, []),\n op_record(\"__sub__\", 2, number_dtypes, all_shapes, jtu.rand_default, []),\n op_record(\"__mul__\", 2, number_dtypes, all_shapes, jtu.rand_default, []),\n op_record(\"__eq__\", 2, number_dtypes, all_shapes, jtu.rand_default, []),\n op_record(\"__ne__\", 2, number_dtypes, all_shapes, jtu.rand_default, []),\n op_record(\"__lt__\", 2, default_dtypes, all_shapes, jtu.rand_default, []),\n op_record(\"__le__\", 2, default_dtypes, all_shapes, jtu.rand_default, []),\n op_record(\"__gt__\", 2, default_dtypes, all_shapes, jtu.rand_default, []),\n op_record(\"__ge__\", 2, default_dtypes, all_shapes, jtu.rand_default, []),\n op_record(\"__pos__\", 1, number_dtypes, all_shapes, jtu.rand_default, []),\n op_record(\"__neg__\", 1, number_dtypes, all_shapes, jtu.rand_default, []),\n op_record(\"__pow__\", 2, inexact_dtypes, all_shapes, jtu.rand_positive, [],\n tolerance={np.float32: 2e-4, np.complex64: 2e-4, np.complex128: 1e-14}),\n op_record(\"__mod__\", 2, default_dtypes, all_shapes, jtu.rand_nonzero, [],\n tolerance={np.float16: 1e-1}),\n op_record(\"__floordiv__\", 2, default_dtypes, all_shapes,\n jtu.rand_nonzero, []),\n op_record(\"__truediv__\", 2, number_dtypes, all_shapes, jtu.rand_nonzero, [],\n inexact=True),\n op_record(\"__abs__\", 1, number_dtypes, all_shapes, jtu.rand_default, []),\n # TODO(mattjj): __invert__ fails on bool dtypes because ~True == -2\n op_record(\"__invert__\", 1, int_dtypes, all_shapes, jtu.rand_default, []),\n # TODO(mattjj): investigate these failures\n # op_record(\"__or__\", 2, number_dtypes, all_shapes, jtu.rand_bool, []),\n # op_record(\"__and__\", 2, number_dtypes, all_shapes, jtu.rand_default, []),\n # op_record(\"__xor__\", 2, number_dtypes, all_shapes, jtu.rand_bool, []),\n # op_record(\"__divmod__\", 2, number_dtypes, all_shapes, jtu.rand_nonzero, []),\n op_record(\"__lshift__\", 2, int_dtypes_no_uint64, all_shapes, partial(jtu.rand_int, high=8), []),\n op_record(\"__rshift__\", 2, int_dtypes_no_uint64, all_shapes, partial(jtu.rand_int, high=8), []),\n]\n\nJAX_RIGHT_OPERATOR_OVERLOADS = [\n op_record(\"__radd__\", 2, number_dtypes, all_shapes, jtu.rand_default, []),\n op_record(\"__rsub__\", 2, number_dtypes, all_shapes, jtu.rand_default, []),\n op_record(\"__rmul__\", 2, number_dtypes, all_shapes, jtu.rand_default, []),\n op_record(\"__rpow__\", 2, inexact_dtypes, all_shapes, jtu.rand_positive, [],\n tolerance={np.float32: 2e-4, np.complex64: 1e-3}),\n op_record(\"__rmod__\", 2, default_dtypes, all_shapes, jtu.rand_nonzero, [],\n tolerance={np.float16: 1e-1}),\n op_record(\"__rfloordiv__\", 2, default_dtypes, all_shapes,\n jtu.rand_nonzero, []),\n op_record(\"__rtruediv__\", 2, number_dtypes, all_shapes, jtu.rand_nonzero, [],\n inexact=True),\n # op_record(\"__ror__\", 2, number_dtypes, all_shapes, jtu.rand_bool, []),\n # op_record(\"__rand__\", 2, number_dtypes, all_shapes, jtu.rand_default, []),\n # op_record(\"__rxor__\", 2, number_dtypes, all_shapes, jtu.rand_bool, []),\n # op_record(\"__rdivmod__\", 2, number_dtypes, all_shapes, jtu.rand_nonzero, []),\n op_record(\"__rlshift__\", 2, int_dtypes_no_uint64, all_shapes, partial(jtu.rand_int, high=8), []),\n op_record(\"__rrshift__\", 2, int_dtypes_no_uint64, all_shapes, partial(jtu.rand_int, high=8), [])\n]\n\nclass _OverrideEverything(object):\n pass\n\nfor rec in JAX_OPERATOR_OVERLOADS + JAX_RIGHT_OPERATOR_OVERLOADS:\n if rec.nargs == 2:\n setattr(_OverrideEverything, rec.name, lambda self, other: self)\n\nclass _OverrideNothing(object):\n pass\n\nfor rec in JAX_OPERATOR_OVERLOADS + JAX_RIGHT_OPERATOR_OVERLOADS:\n if rec.nargs == 2:\n setattr(_OverrideNothing, rec.name, lambda self, other: NotImplemented)\n\n\ndef _dtypes_are_compatible_for_bitwise_ops(args):\n if len(args) <= 1:\n return True\n is_signed = lambda dtype: jnp.issubdtype(dtype, np.signedinteger)\n width = lambda dtype: jnp.iinfo(dtype).bits\n x, y = args\n if width(x) > width(y):\n x, y = y, x\n # The following condition seems a little ad hoc, but seems to capture what\n # numpy actually implements.\n return (\n is_signed(x) == is_signed(y)\n or (width(x) == 32 and width(y) == 32)\n or (width(x) == 32 and width(y) == 64 and is_signed(y)))\n\ndef _shapes_are_broadcast_compatible(shapes):\n accumulator = np.zeros([])\n for shape in shapes:\n try:\n accumulator = accumulator + np.zeros(shape)\n except ValueError:\n return False\n return True\n\ndef _shapes_are_equal_length(shapes):\n return all(len(shape) == len(shapes[0]) for shape in shapes[1:])\n\n\ndef _promote_like_jnp(fun, inexact=False):\n \"\"\"Decorator that promotes the arguments of `fun` to `jnp.result_type(*args)`.\n\n jnp and np have different type promotion semantics; this decorator allows\n tests make an np reference implementation act more like an jnp\n implementation.\n \"\"\"\n def wrapper(*args, **kw):\n flat_args = tree_util.tree_leaves(args)\n if inexact and not any(jnp.issubdtype(jnp.result_type(x), jnp.inexact)\n for x in flat_args):\n dtype = jnp.result_type(jnp.float_, *flat_args)\n else:\n dtype = jnp.result_type(*flat_args)\n args = tree_util.tree_map(lambda a: np.asarray(a, dtype), args)\n return fun(*args, **kw)\n return wrapper\n\n\n@jtu.with_config(jax_numpy_rank_promotion=\"raise\")\nclass LaxBackedNumpyTests(jtu.JaxTestCase):\n \"\"\"Tests for LAX-backed Numpy implementation.\"\"\"\n\n def _GetArgsMaker(self, rng, shapes, dtypes, np_arrays=True):\n def f():\n out = [rng(shape, dtype or jnp.float_)\n for shape, dtype in zip(shapes, dtypes)]\n if np_arrays:\n return out\n return [jnp.asarray(a) if isinstance(a, (np.ndarray, np.generic)) else a\n for a in out]\n return f\n\n def testNotImplemented(self):\n for name in jnp._NOT_IMPLEMENTED:\n func = getattr(jnp, name)\n with self.assertRaises(NotImplementedError):\n func()\n\n @parameterized.named_parameters(itertools.chain.from_iterable(\n jtu.cases_from_list(\n {\"testcase_name\": jtu.format_test_name_suffix(rec.test_name, shapes,\n dtypes),\n \"rng_factory\": rec.rng_factory, \"shapes\": shapes, \"dtypes\": dtypes,\n \"np_op\": getattr(np, rec.name), \"jnp_op\": getattr(jnp, rec.name),\n \"check_dtypes\": rec.check_dtypes, \"tolerance\": rec.tolerance,\n \"inexact\": rec.inexact}\n for shapes in filter(\n _shapes_are_broadcast_compatible,\n itertools.combinations_with_replacement(rec.shapes, rec.nargs))\n for dtypes in itertools.product(\n *(_valid_dtypes_for_shape(s, rec.dtypes) for s in shapes)))\n for rec in itertools.chain(JAX_ONE_TO_ONE_OP_RECORDS,\n JAX_COMPOUND_OP_RECORDS)))\n @jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.\n def testOp(self, np_op, jnp_op, rng_factory, shapes, dtypes, check_dtypes,\n tolerance, inexact):\n np_op = jtu.ignore_warning(category=RuntimeWarning,\n message=\"invalid value.*\")(np_op)\n np_op = jtu.ignore_warning(category=RuntimeWarning,\n message=\"divide by zero.*\")(np_op)\n\n rng = rng_factory(self.rng())\n args_maker = self._GetArgsMaker(rng, shapes, dtypes, np_arrays=False)\n tol = max(jtu.tolerance(dtype, tolerance) for dtype in dtypes)\n tol = functools.reduce(jtu.join_tolerance,\n [tolerance, tol, jtu.default_tolerance()])\n self._CheckAgainstNumpy(_promote_like_jnp(np_op, inexact), jnp_op,\n args_maker, check_dtypes=check_dtypes, tol=tol)\n self._CompileAndCheck(jnp_op, args_maker, check_dtypes=check_dtypes,\n atol=tol, rtol=tol)\n\n @parameterized.named_parameters(itertools.chain.from_iterable(\n jtu.cases_from_list(\n {\"testcase_name\": jtu.format_test_name_suffix(rec.test_name, shapes,\n dtypes),\n \"rng_factory\": rec.rng_factory, \"shapes\": shapes, \"dtypes\": dtypes, \"name\": rec.name,\n \"tol\": rec.tolerance}\n for shapes in filter(\n _shapes_are_broadcast_compatible,\n itertools.combinations_with_replacement(rec.shapes, rec.nargs))\n for dtypes in itertools.product(\n *(_valid_dtypes_for_shape(s, rec.dtypes) for s in shapes)))\n for rec in JAX_OPERATOR_OVERLOADS))\n @jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.\n def testOperatorOverload(self, name, rng_factory, shapes, dtypes, tol):\n rng = rng_factory(self.rng())\n # np and jnp arrays have different type promotion rules; force the use of\n # jnp arrays.\n args_maker = self._GetArgsMaker(rng, shapes, dtypes, np_arrays=False)\n fun = lambda *xs: getattr(operator, name.strip('_'))(*xs)\n self._CompileAndCheck(fun, args_maker, atol=tol, rtol=tol)\n\n @parameterized.named_parameters(itertools.chain.from_iterable(\n jtu.cases_from_list(\n {\"testcase_name\": jtu.format_test_name_suffix(rec.test_name, shapes,\n dtypes),\n \"rng_factory\": rec.rng_factory, \"shapes\": shapes, \"dtypes\": dtypes, \"name\": rec.name,\n \"op_tolerance\": rec.tolerance}\n for shapes in filter(\n _shapes_are_broadcast_compatible,\n itertools.combinations_with_replacement(rec.shapes, rec.nargs))\n for dtypes in itertools.product(\n *(_valid_dtypes_for_shape(s, rec.dtypes) for s in shapes)))\n for rec in JAX_RIGHT_OPERATOR_OVERLOADS))\n @jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.\n def testRightOperatorOverload(self, name, rng_factory, shapes, dtypes,\n op_tolerance):\n if shapes[1] is jtu.PYTHON_SCALAR_SHAPE:\n raise SkipTest(\"scalars not implemented\") # TODO(mattjj): clean up\n rng = rng_factory(self.rng())\n args_maker = self._GetArgsMaker(rng, shapes, dtypes, np_arrays=False)\n fun = lambda fst, snd: getattr(snd, name)(fst)\n tol = max(jtu.tolerance(dtype, op_tolerance) for dtype in dtypes)\n self._CompileAndCheck( fun, args_maker, atol=tol, rtol=tol)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": rec.test_name + \"_{}\".format(dtype),\n \"rng_factory\": rec.rng_factory,\n \"op_name\": rec.name, \"dtype\": dtype}\n for rec in JAX_OPERATOR_OVERLOADS if rec.nargs == 2\n for dtype in rec.dtypes))\n def testBinaryOperatorDefers(self, op_name, rng_factory, dtype):\n rng = rng_factory(self.rng())\n arg = jax.device_put(rng((), dtype))\n op = getattr(operator, op_name)\n\n other = _OverrideEverything()\n assert op(other, arg) is other\n assert op(arg, other) is other\n\n other = _OverrideNothing()\n if op_name == \"__eq__\":\n assert op(other, arg) is False\n assert op(arg, other) is False\n elif op_name == \"__ne__\":\n assert op(other, arg) is True\n assert op(arg, other) is True\n else:\n with self.assertRaises(TypeError):\n op(other, arg)\n with self.assertRaises(TypeError):\n op(arg, other)\n\n def testArrayEqualExamples(self):\n # examples from the array_equal() docstring.\n self.assertTrue(jnp.array_equal([1, 2], [1, 2]))\n self.assertTrue(jnp.array_equal(np.array([1, 2]), np.array([1, 2])))\n self.assertFalse(jnp.array_equal([1, 2], [1, 2, 3]))\n self.assertFalse(jnp.array_equal([1, 2], [1, 4]))\n\n a = np.array([1, np.nan])\n self.assertFalse(jnp.array_equal(a, a))\n self.assertTrue(jnp.array_equal(a, a, equal_nan=True))\n\n a = np.array([1 + 1j])\n b = a.copy()\n a.real = np.nan\n b.imag = np.nan\n self.assertTrue(jnp.array_equal(a, b, equal_nan=True))\n\n def testArrayEquivExamples(self):\n # examples from the array_equiv() docstring.\n self.assertTrue(jnp.array_equiv([1, 2], [1, 2]))\n self.assertFalse(jnp.array_equiv([1, 2], [1, 3]))\n with jax.numpy_rank_promotion('allow'):\n self.assertTrue(jnp.array_equiv([1, 2], [[1, 2], [1, 2]]))\n self.assertFalse(jnp.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]]))\n self.assertFalse(jnp.array_equiv([1, 2], [[1, 2], [1, 3]]))\n\n def testArrayModule(self):\n if numpy_dispatch is None:\n raise SkipTest('requires https://github.com/seberg/numpy-dispatch')\n\n jnp_array = jnp.array(1.0)\n np_array = np.array(1.0)\n\n module = numpy_dispatch.get_array_module(jnp_array)\n self.assertIs(module, jnp)\n\n module = numpy_dispatch.get_array_module(jnp_array, np_array)\n self.assertIs(module, jnp)\n\n def f(x):\n module = numpy_dispatch.get_array_module(x)\n self.assertIs(module, jnp)\n return x\n jax.jit(f)(jnp_array)\n jax.grad(f)(jnp_array)\n\n @parameterized.named_parameters(itertools.chain.from_iterable(\n jtu.cases_from_list(\n {\"testcase_name\": jtu.format_test_name_suffix(\n rec.test_name, shapes, dtypes),\n \"rng_factory\": rec.rng_factory, \"shapes\": shapes, \"dtypes\": dtypes,\n \"np_op\": getattr(np, rec.name), \"jnp_op\": getattr(jnp, rec.name)}\n for shapes in filter(\n _shapes_are_broadcast_compatible,\n itertools.combinations_with_replacement(rec.shapes, rec.nargs))\n for dtypes in filter(\n _dtypes_are_compatible_for_bitwise_ops,\n itertools.combinations_with_replacement(rec.dtypes, rec.nargs)))\n for rec in JAX_BITWISE_OP_RECORDS))\n @jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.\n def testBitwiseOp(self, np_op, jnp_op, rng_factory, shapes, dtypes):\n rng = rng_factory(self.rng())\n if not config.x64_enabled and any(\n jnp.iinfo(dtype).bits == 64 for dtype in dtypes):\n self.skipTest(\"x64 types are disabled by jax_enable_x64\")\n args_maker = self._GetArgsMaker(rng, shapes, dtypes)\n self._CheckAgainstNumpy(np_op, jnp_op, args_maker,\n check_dtypes=jtu.PYTHON_SCALAR_SHAPE not in shapes)\n self._CompileAndCheck(jnp_op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": jtu.format_test_name_suffix(op.__name__, shapes, dtypes),\n \"op\": op, \"dtypes\": dtypes, \"shapes\": shapes}\n for op in [jnp.left_shift, jnp.right_shift]\n for shapes in filter(\n _shapes_are_broadcast_compatible,\n # TODO numpy always promotes to shift dtype for zero-dim shapes:\n itertools.combinations_with_replacement(nonzerodim_shapes, 2))\n for dtypes in itertools.product(\n *(_valid_dtypes_for_shape(s, int_dtypes_no_uint64) for s in shapes))))\n @jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.\n def testShiftOpAgainstNumpy(self, op, dtypes, shapes):\n dtype, shift_dtype = dtypes\n signed_mix = np.issubdtype(dtype, np.signedinteger) != \\\n np.issubdtype(shift_dtype, np.signedinteger)\n has_32 = any(np.iinfo(d).bits == 32 for d in dtypes)\n promoting_to_64 = has_32 and signed_mix\n if promoting_to_64 and not config.x64_enabled:\n self.skipTest(\"np.right_shift/left_shift promoting to int64\"\n \"differs from jnp in 32 bit mode.\")\n\n info, shift_info = map(np.iinfo, dtypes)\n x_rng = jtu.rand_int(self.rng(), low=info.min, high=info.max + 1)\n # NumPy requires shifts to be non-negative and below the bit width:\n shift_rng = jtu.rand_int(self.rng(), high=max(info.bits, shift_info.bits))\n args_maker = lambda: (x_rng(shapes[0], dtype), shift_rng(shapes[1], shift_dtype))\n self._CompileAndCheck(op, args_maker)\n np_op = getattr(np, op.__name__)\n self._CheckAgainstNumpy(np_op, op, args_maker)\n\n @parameterized.named_parameters(itertools.chain.from_iterable(\n jtu.cases_from_list(\n {\"testcase_name\": \"{}_inshape={}_axis={}_dtype={}_keepdims={}\".format(\n rec.test_name.capitalize(),\n jtu.format_shape_dtype_string(shape, dtype), axis,\n \"None\" if out_dtype is None else np.dtype(out_dtype).name, keepdims),\n \"rng_factory\": rec.rng_factory, \"shape\": shape, \"dtype\": dtype, \"out_dtype\": out_dtype,\n \"np_op\": getattr(np, rec.name), \"jnp_op\": getattr(jnp, rec.name),\n \"axis\": axis, \"keepdims\": keepdims, \"inexact\": rec.inexact}\n for shape in rec.shapes for dtype in rec.dtypes\n for out_dtype in [None] + rec.dtypes\n for axis in list(range(-len(shape), len(shape))) + [None]\n for keepdims in [False, True])\n for rec in JAX_REDUCER_RECORDS))\n def testReducer(self, np_op, jnp_op, rng_factory, shape, dtype, out_dtype,\n axis, keepdims, inexact):\n rng = rng_factory(self.rng())\n @jtu.ignore_warning(category=np.ComplexWarning)\n @jtu.ignore_warning(category=RuntimeWarning,\n message=\"mean of empty slice.*\")\n @jtu.ignore_warning(category=RuntimeWarning,\n message=\"overflow encountered.*\")\n def np_fun(x):\n x_cast = x if dtype != jnp.bfloat16 else x.astype(np.float32)\n t = out_dtype if out_dtype != jnp.bfloat16 else np.float32\n return np_op(x_cast, axis, dtype=t, keepdims=keepdims)\n np_fun = _promote_like_jnp(np_fun, inexact)\n jnp_fun = lambda x: jnp_op(x, axis, dtype=out_dtype, keepdims=keepdims)\n jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)\n args_maker = lambda: [rng(shape, dtype)]\n tol_spec = {np.float16: 1e-2, np.int32: 1E-3, np.float32: 1e-3,\n np.complex64: 1e-3, np.float64: 1e-5, np.complex128: 1e-5}\n tol = jtu.tolerance(dtype, tol_spec)\n tol = max(tol, jtu.tolerance(out_dtype, tol_spec)) if out_dtype else tol\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,\n check_dtypes=jnp.bfloat16 not in (dtype, out_dtype),\n tol=tol)\n self._CompileAndCheck(jnp_fun, args_maker, atol=tol,\n rtol=tol)\n\n @parameterized.named_parameters(itertools.chain.from_iterable(\n jtu.cases_from_list(\n {\"testcase_name\": \"{}_inshape={}_axis={}_keepdims={}\".format(\n rec.test_name.capitalize(),\n jtu.format_shape_dtype_string(shape, dtype), axis, keepdims),\n \"rng_factory\": rec.rng_factory, \"shape\": shape, \"dtype\": dtype,\n \"np_op\": getattr(np, rec.name), \"jnp_op\": getattr(jnp, rec.name),\n \"axis\": axis, \"keepdims\": keepdims, \"inexact\": rec.inexact}\n for shape in rec.shapes for dtype in rec.dtypes\n for axis in list(range(-len(shape), len(shape))) + [None]\n for keepdims in [False, True])\n for rec in JAX_REDUCER_NO_DTYPE_RECORDS))\n def testReducerNoDtype(self, np_op, jnp_op, rng_factory, shape, dtype, axis,\n keepdims, inexact):\n rng = rng_factory(self.rng())\n is_bf16_nan_test = dtype == jnp.bfloat16 and rng_factory.__name__ == 'rand_some_nan'\n @jtu.ignore_warning(category=RuntimeWarning,\n message=\"Degrees of freedom <= 0 for slice.*\")\n @jtu.ignore_warning(category=RuntimeWarning,\n message=\"All-NaN slice encountered.*\")\n def np_fun(x):\n x_cast = x if not is_bf16_nan_test else x.astype(np.float32)\n res = np_op(x_cast, axis, keepdims=keepdims)\n res = res if not is_bf16_nan_test else res.astype(jnp.bfloat16)\n return res\n np_fun = _promote_like_jnp(np_fun, inexact)\n jnp_fun = lambda x: jnp_op(x, axis, keepdims=keepdims)\n args_maker = lambda: [rng(shape, dtype)]\n tol = {np.float16: 0.002}\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, tol=tol)\n self._CompileAndCheck(jnp_fun, args_maker, rtol=tol, atol=tol)\n\n @parameterized.named_parameters(itertools.chain.from_iterable(\n jtu.cases_from_list(\n {\"testcase_name\": \"{}_inshape={}_axis={}_keepdims={}_initial={}\".format(\n rec.test_name.capitalize(),\n jtu.format_shape_dtype_string(shape, dtype), axis, keepdims, initial),\n \"rng_factory\": rec.rng_factory, \"shape\": shape, \"dtype\": dtype,\n \"np_op\": getattr(np, rec.name), \"jnp_op\": getattr(jnp, rec.name),\n \"initial\": initial, \"axis\": axis, \"keepdims\": keepdims, \"inexact\": rec.inexact}\n for shape in rec.shapes for dtype in rec.dtypes\n for axis in list(range(-len(shape), len(shape))) + [None]\n for initial in [0, 1] for keepdims in [False, True])\n for rec in JAX_REDUCER_INITIAL_RECORDS))\n def testReducerInitial(self, np_op, jnp_op, rng_factory, shape, dtype, axis,\n keepdims, initial, inexact):\n rng = rng_factory(self.rng())\n is_bf16_nan_test = dtype == jnp.bfloat16 and rng_factory.__name__ == 'rand_some_nan'\n @jtu.ignore_warning(category=RuntimeWarning,\n message=\"Degrees of freedom <= 0 for slice.*\")\n def np_fun(x):\n x_cast = x if not is_bf16_nan_test else x.astype(np.float32)\n res = np_op(x_cast, axis, keepdims=keepdims, initial=initial)\n res = res if not is_bf16_nan_test else res.astype(jnp.bfloat16)\n return res\n np_fun = _promote_like_jnp(np_fun, inexact)\n np_fun = jtu.ignore_warning(category=np.ComplexWarning)(np_fun)\n jnp_fun = lambda x: jnp_op(x, axis, keepdims=keepdims, initial=initial)\n jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(itertools.chain.from_iterable(\n jtu.cases_from_list(\n {\"testcase_name\": \"{}_inshape={}_axis={}_keepdims={}_initial={}_whereshape={}\".format(\n rec.test_name.capitalize(),\n jtu.format_shape_dtype_string(shape, dtype), axis, keepdims, initial,\n jtu.format_shape_dtype_string(whereshape, bool)),\n \"rng_factory\": rec.rng_factory, \"shape\": shape, \"dtype\": dtype,\n \"np_op\": getattr(np, rec.name), \"jnp_op\": getattr(jnp, rec.name), \"whereshape\": whereshape,\n \"initial\": initial, \"axis\": axis, \"keepdims\": keepdims, \"inexact\": rec.inexact}\n for shape in rec.shapes for dtype in rec.dtypes\n for whereshape in _compatible_shapes(shape)\n for axis in list(range(-len(shape), len(shape))) + [None]\n for initial in [0, 1] for keepdims in [False, True])\n for rec in JAX_REDUCER_INITIAL_RECORDS))\n def testReducerWhere(self, np_op, jnp_op, rng_factory, shape, dtype, axis,\n keepdims, initial, inexact, whereshape):\n if (shape in [()] + scalar_shapes and\n dtype in [jnp.int16, jnp.uint16] and\n jnp_op in [jnp.min, jnp.max]):\n self.skipTest(\"Known XLA failure; see https://github.com/google/jax/issues/4971.\")\n rng = rng_factory(self.rng())\n is_bf16_nan_test = dtype == jnp.bfloat16 and rng_factory.__name__ == 'rand_some_nan'\n # Do not pass where via args_maker as that is incompatible with _promote_like_jnp.\n where = jtu.rand_bool(self.rng())(whereshape, np.bool_)\n @jtu.ignore_warning(category=RuntimeWarning,\n message=\"Degrees of freedom <= 0 for slice.*\")\n def np_fun(x):\n x_cast = x if not is_bf16_nan_test else x.astype(np.float32)\n res = np_op(x_cast, axis, keepdims=keepdims, initial=initial, where=where)\n res = res if not is_bf16_nan_test else res.astype(jnp.bfloat16)\n return res\n np_fun = _promote_like_jnp(np_fun, inexact)\n np_fun = jtu.ignore_warning(category=np.ComplexWarning)(np_fun)\n jnp_fun = lambda x: jnp_op(x, axis, keepdims=keepdims, initial=initial, where=where)\n jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @unittest.skipIf(numpy_version < (1, 20), \"where parameter not supported in older numpy\")\n @parameterized.named_parameters(itertools.chain.from_iterable(\n jtu.cases_from_list(\n {\"testcase_name\": \"{}_inshape={}_axis={}_keepdims={}_whereshape={}\".format(\n rec.test_name.capitalize(),\n jtu.format_shape_dtype_string(shape, dtype), axis, keepdims,\n jtu.format_shape_dtype_string(whereshape, bool)),\n \"rng_factory\": rec.rng_factory, \"shape\": shape, \"dtype\": dtype,\n \"np_op\": getattr(np, rec.name), \"jnp_op\": getattr(jnp, rec.name), \"whereshape\": whereshape,\n \"axis\": axis, \"keepdims\": keepdims, \"inexact\": rec.inexact}\n for shape in rec.shapes for dtype in rec.dtypes\n for whereshape in _compatible_shapes(shape)\n for axis in list(range(-len(shape), len(shape))) + [None]\n for keepdims in [False, True])\n for rec in JAX_REDUCER_WHERE_NO_INITIAL_RECORDS))\n def testReducerWhereNoInitial(self, np_op, jnp_op, rng_factory, shape, dtype, axis,\n keepdims, inexact, whereshape):\n rng = rng_factory(self.rng())\n is_bf16_nan_test = dtype == jnp.bfloat16\n # Do not pass where via args_maker as that is incompatible with _promote_like_jnp.\n where = jtu.rand_bool(self.rng())(whereshape, np.bool_)\n @jtu.ignore_warning(category=RuntimeWarning,\n message=\"Degrees of freedom <= 0 for slice.*\")\n @jtu.ignore_warning(category=RuntimeWarning,\n message=\"Mean of empty slice.*\")\n @jtu.ignore_warning(category=RuntimeWarning,\n message=\"invalid value encountered in true_divide*\")\n def np_fun(x):\n x_cast = x if not is_bf16_nan_test else x.astype(np.float32)\n res = np_op(x_cast, axis, keepdims=keepdims, where=where)\n res = res if not is_bf16_nan_test else res.astype(jnp.bfloat16)\n return res\n\n np_fun = _promote_like_jnp(np_fun, inexact)\n np_fun = jtu.ignore_warning(category=np.ComplexWarning)(np_fun)\n jnp_fun = lambda x: jnp_op(x, axis, keepdims=keepdims, where=where)\n jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)\n args_maker = lambda: [rng(shape, dtype)]\n if numpy_version >= (1, 20, 2) or np_op.__name__ in (\"all\", \"any\"):\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_axis={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), axis),\n \"shape\": shape, \"dtype\": dtype, \"axis\": axis}\n for shape in all_shapes for dtype in all_dtypes\n for axis in list(range(-len(shape), len(shape))) + [None]))\n def testCountNonzero(self, shape, dtype, axis):\n rng = jtu.rand_some_zero(self.rng())\n np_fun = lambda x: np.count_nonzero(x, axis)\n jnp_fun = lambda x: jnp.count_nonzero(x, axis)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}\".format(\n jtu.format_shape_dtype_string(shape, dtype)),\n \"shape\": shape, \"dtype\": dtype}\n for shape in all_shapes for dtype in all_dtypes))\n def testNonzero(self, shape, dtype):\n rng = jtu.rand_some_zero(self.rng())\n np_fun = lambda x: np.nonzero(x)\n np_fun = jtu.ignore_warning(\n category=DeprecationWarning,\n message=\"Calling nonzero on 0d arrays.*\")(np_fun)\n jnp_fun = lambda x: jnp.nonzero(x)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_size={}_fill_value={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), size, fill_value),\n \"shape\": shape, \"dtype\": dtype, \"size\": size, \"fill_value\": fill_value}\n for shape in nonempty_array_shapes\n for dtype in all_dtypes\n for fill_value in [None, -1]\n for size in [1, 5, 10]))\n def testNonzeroSize(self, shape, dtype, size, fill_value):\n rng = jtu.rand_some_zero(self.rng())\n args_maker = lambda: [rng(shape, dtype)]\n @jtu.ignore_warning(category=DeprecationWarning, message=\"Calling nonzero on 0d arrays.*\")\n def np_fun(x):\n result = np.nonzero(x)\n if size <= len(result[0]):\n return tuple(arg[:size] for arg in result)\n else:\n return tuple(np.concatenate([arg, np.full(size - len(arg), fill_value or 0, arg.dtype)])\n for arg in result)\n jnp_fun = lambda x: jnp.nonzero(x, size=size, fill_value=fill_value)\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}\".format(\n jtu.format_shape_dtype_string(shape, dtype)),\n \"shape\": shape, \"dtype\": dtype}\n for shape in all_shapes for dtype in all_dtypes))\n def testFlatNonzero(self, shape, dtype):\n rng = jtu.rand_some_zero(self.rng())\n np_fun = jtu.ignore_warning(\n category=DeprecationWarning,\n message=\"Calling nonzero on 0d arrays.*\")(np.flatnonzero)\n jnp_fun = jnp.flatnonzero\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)\n\n # JIT compilation requires specifying the size statically:\n jnp_fun = lambda x: jnp.flatnonzero(x, size=np.size(x) // 2)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}\".format(\n jtu.format_shape_dtype_string(shape, dtype)),\n \"shape\": shape, \"dtype\": dtype}\n for shape in all_shapes for dtype in all_dtypes))\n def testArgWhere(self, shape, dtype):\n rng = jtu.rand_some_zero(self.rng())\n np_fun = jtu.ignore_warning(\n category=DeprecationWarning,\n message=\"Calling nonzero on 0d arrays.*\")(np.argwhere)\n jnp_fun = jnp.argwhere\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)\n\n # JIT compilation requires specifying a size statically. Full test of this\n # behavior is in testNonzeroSize().\n jnp_fun = lambda x: jnp.argwhere(x, size=np.size(x) // 2)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"{}_inshape={}_axis={}\".format(\n rec.test_name.capitalize(),\n jtu.format_shape_dtype_string(shape, dtype), axis),\n \"rng_factory\": rec.rng_factory, \"shape\": shape, \"dtype\": dtype,\n \"np_op\": getattr(np, rec.name), \"jnp_op\": getattr(jnp, rec.name),\n \"axis\": axis}\n for rec in JAX_ARGMINMAX_RECORDS\n for shape, dtype in _shape_and_dtypes(rec.shapes, rec.dtypes)\n for axis in range(-len(shape), len(shape))))\n def testArgMinMax(self, np_op, jnp_op, rng_factory, shape, dtype, axis):\n rng = rng_factory(self.rng())\n if dtype == np.complex128 and jtu.device_under_test() == \"gpu\":\n raise unittest.SkipTest(\"complex128 reductions not supported on GPU\")\n if \"nan\" in np_op.__name__ and dtype == jnp.bfloat16:\n raise unittest.SkipTest(\"NumPy doesn't correctly handle bfloat16 arrays\")\n\n def np_fun(array_to_reduce):\n return np_op(array_to_reduce, axis).astype(jnp.int_)\n\n def jnp_fun(array_to_reduce):\n return jnp_op(array_to_reduce, axis)\n\n args_maker = lambda: [rng(shape, dtype)]\n try:\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n except ValueError as e:\n if str(e) == \"All-NaN slice encountered\":\n self.skipTest(\"JAX doesn't support checking for all-NaN slices\")\n else:\n raise\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": rec.test_name.capitalize(), \"name\": rec.name,\n \"np_op\": getattr(np, rec.name), \"jnp_op\": getattr(jnp, rec.name)}\n for rec in JAX_ARGMINMAX_RECORDS))\n def testArgMinMaxEmpty(self, name, np_op, jnp_op):\n name = name[3:] if name.startswith(\"nan\") else name\n msg = \"attempt to get {} of an empty sequence\".format(name)\n with self.assertRaises(ValueError, msg=msg):\n jnp_op(np.array([]))\n with self.assertRaises(ValueError, msg=msg):\n jnp_op(np.zeros((2, 0)), axis=1)\n np_fun = partial(np_op, axis=0)\n jnp_fun = partial(jnp_op, axis=0)\n args_maker = lambda: [np.zeros((2, 0))]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_{}_{}\".format(\n jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),\n jtu.format_shape_dtype_string(rhs_shape, rhs_dtype),\n axes),\n \"lhs_shape\": lhs_shape, \"lhs_dtype\": lhs_dtype,\n \"rhs_shape\": rhs_shape, \"rhs_dtype\": rhs_dtype,\n \"axes\": axes}\n for lhs_shape, rhs_shape, axes in [\n [(2,), (2,), (-1, -1, -1, None)], # scalar output\n [(2, 4), (2, 4), (-1, -1, -1, 0)], # 2D vectors\n [(3, 4), (3, 4), (-1, -1, -1, 0)], # 3D vectors\n [(3, 4), (3, 6, 5, 4), (-1, -1, -1, 0)], # broadcasting\n [(4, 3), (3, 6, 5, 4), (1, 0, -1, None)], # different axes\n [(6, 1, 3), (5, 3), (-1, -1, -1, None)], # more broadcasting\n [(6, 1, 2), (5, 3), (-1, -1, -1, None)], # mixed 2D and 3D vectors\n [(10, 5, 2, 8), (1, 5, 1, 3), (-2, -1, -3, None)], # axes/broadcasting\n [(4, 5, 2), (4, 5, 2), (-1, -1, 0, None)], # axisc should do nothing\n [(4, 5, 2), (4, 5, 2), (-1, -1, -1, None)] # same as before\n ]\n for lhs_dtype, rhs_dtype in itertools.combinations_with_replacement(number_dtypes, 2)))\n @jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.\n def testCross(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, axes):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]\n axisa, axisb, axisc, axis = axes\n jnp_fun = lambda a, b: jnp.cross(a, b, axisa, axisb, axisc, axis)\n def np_fun(a, b):\n a = a.astype(np.float32) if lhs_dtype == jnp.bfloat16 else a\n b = b.astype(np.float32) if rhs_dtype == jnp.bfloat16 else b\n out = np.cross(a, b, axisa, axisb, axisc, axis)\n return out.astype(jnp.promote_types(lhs_dtype, rhs_dtype))\n tol_spec = {dtypes.bfloat16: 3e-1, np.float16: 0.15}\n tol = max(jtu.tolerance(lhs_dtype, tol_spec),\n jtu.tolerance(rhs_dtype, tol_spec))\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,\n tol=tol)\n self._CompileAndCheck(jnp_fun, args_maker, atol=tol,\n rtol=tol)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_{}_{}\".format(\n name,\n jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),\n jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),\n \"lhs_shape\": lhs_shape, \"lhs_dtype\": lhs_dtype,\n \"rhs_shape\": rhs_shape, \"rhs_dtype\": rhs_dtype}\n for name, lhs_shape, rhs_shape in [\n (\"matrix-scalar\", (3, 3), ()),\n (\"scalar-matrix\", (), (3, 3)),\n (\"matrix-vector\", (4, 5), (5,)),\n (\"vector-matrix\", (6,), (6, 4)),\n (\"matrix-matrix\", (3, 4), (4, 5)),\n (\"tensor-vector\", (4, 3, 2), (2,)),\n (\"vector-tensor\", (2,), (3, 2, 4)),\n (\"tensor-matrix\", (4, 3, 2), (2, 5)),\n (\"matrix-tensor\", (5, 2), (3, 2, 4)),\n (\"tensor-tensor\", (2, 3, 4), (5, 4, 1))]\n for lhs_dtype, rhs_dtype in itertools.combinations_with_replacement(number_dtypes, 2)))\n def testDot(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]\n tol = {np.float16: 1e-2, np.float32: 1e-5, np.float64: 1e-14,\n np.complex128: 1e-14}\n if jtu.device_under_test() == \"tpu\":\n tol[np.float16] = tol[np.float32] = tol[np.complex64] = 2e-1\n def np_dot(x, y):\n x = x.astype(np.float32) if lhs_dtype == jnp.bfloat16 else x\n y = y.astype(np.float32) if rhs_dtype == jnp.bfloat16 else y\n return np.dot(x, y).astype(jnp.promote_types(lhs_dtype, rhs_dtype))\n self._CheckAgainstNumpy(np_dot, jnp.dot, args_maker,\n tol=tol)\n self._CompileAndCheck(jnp.dot, args_maker, atol=tol,\n rtol=tol)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_{}_{}\".format(\n name,\n jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),\n jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),\n \"lhs_shape\": lhs_shape, \"lhs_dtype\": lhs_dtype,\n \"rhs_shape\": rhs_shape, \"rhs_dtype\": rhs_dtype}\n for name, lhs_shape, rhs_shape in [\n (\"vector-vector\", (3,), (3,)),\n (\"matrix-vector\", (3, 3), (3,)),\n (\"vector-matrix\", (3,), (3, 3)),\n (\"matrix-matrix\", (3, 3), (3, 3)),\n (\"vector-tensor\", (3,), (5, 3, 2)),\n (\"tensor-vector\", (5, 3, 2), (2,)),\n (\"matrix-tensor\", (5, 2), (3, 2, 4)),\n (\"tensor-matrix\", (5, 2, 3), (3, 2)),\n (\"tensor-tensor\", (5, 3, 4), (5, 4, 1)),\n (\"tensor-tensor-broadcast\", (3, 1, 3, 4), (5, 4, 1))]\n for lhs_dtype, rhs_dtype in itertools.combinations_with_replacement(number_dtypes, 2)))\n def testMatmul(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype):\n rng = jtu.rand_default(self.rng())\n def np_fun(x, y):\n dtype = jnp.promote_types(lhs_dtype, rhs_dtype)\n return np.matmul(x, y).astype(dtype)\n args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]\n tol = {np.float16: 1e-2, np.float32: 2e-2, np.float64: 1e-12,\n np.complex128: 1e-12}\n if jtu.device_under_test() == \"tpu\":\n tol[np.float16] = tol[np.float32] = tol[np.complex64] = 4e-2\n self._CheckAgainstNumpy(np_fun, jnp.matmul, args_maker, tol=tol)\n self._CompileAndCheck(jnp.matmul, args_maker, atol=tol, rtol=tol)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_{}_{}\".format(\n jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),\n jtu.format_shape_dtype_string(rhs_shape, rhs_dtype),\n axes),\n \"lhs_shape\": lhs_shape, \"lhs_dtype\": lhs_dtype,\n \"rhs_shape\": rhs_shape, \"rhs_dtype\": rhs_dtype,\n \"axes\": axes}\n for lhs_shape, rhs_shape, axes in [\n [(3,), (), 0],\n [(2, 3, 4), (5, 6, 7), 0], # from issue #740\n [(2, 3, 4), (3, 4, 5, 6), 2],\n [(2, 3, 4), (5, 4, 3, 6), [1, 2]],\n [(2, 3, 4), (5, 4, 3, 6), [[1, 2], [2, 1]]],\n [(1, 2, 3, 4), (4, 5, 3, 6), [[2, 3], [2, 0]]],\n ]\n for lhs_dtype, rhs_dtype in itertools.combinations_with_replacement(number_dtypes, 2)))\n def testTensordot(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, axes):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]\n jnp_fun = lambda a, b: jnp.tensordot(a, b, axes)\n def np_fun(a, b):\n a = a if lhs_dtype != jnp.bfloat16 else a.astype(np.float32)\n b = b if rhs_dtype != jnp.bfloat16 else b.astype(np.float32)\n dtype = jnp.promote_types(lhs_dtype, rhs_dtype)\n return np.tensordot(a, b, axes).astype(dtype)\n tol = {np.float16: 1e-1, np.float32: 1e-3, np.float64: 1e-12,\n np.complex64: 1e-3, np.complex128: 1e-12}\n if jtu.device_under_test() == \"tpu\":\n tol[np.float16] = tol[np.float32] = tol[np.complex64] = 2e-1\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,\n tol=tol)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n def testTensordotErrors(self):\n a = np.random.random((3, 2, 2))\n b = np.random.random((2,))\n self.assertRaisesRegex(\n TypeError, \"Number of tensordot axes.*exceeds input ranks.*\",\n lambda: jnp.tensordot(a, b, axes=2))\n\n self.assertRaisesRegex(\n TypeError, \"tensordot requires axes lists to have equal length.*\",\n lambda: jnp.tensordot(a, b, axes=([0], [0, 1])))\n\n self.assertRaisesRegex(\n TypeError, \"tensordot requires both axes lists to be either ints, tuples or lists.*\",\n lambda: jnp.tensordot(a, b, axes=('bad', 'axes')))\n\n self.assertRaisesRegex(\n TypeError, \"tensordot axes argument must be an int, a pair of ints, or a pair of lists.*\",\n lambda: jnp.tensordot(a, b, axes='badaxes'))\n\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_{}_invert={}\".format(\n jtu.format_shape_dtype_string(element_shape, dtype),\n jtu.format_shape_dtype_string(test_shape, dtype), invert),\n \"element_shape\": element_shape, \"test_shape\": test_shape,\n \"dtype\": dtype, \"invert\": invert}\n for element_shape in all_shapes\n for test_shape in all_shapes\n for dtype in default_dtypes\n for invert in [True, False]))\n def testIsin(self, element_shape, test_shape, dtype, invert):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(element_shape, dtype), rng(test_shape, dtype)]\n jnp_fun = lambda e, t: jnp.isin(e, t, invert=invert)\n np_fun = lambda e, t: np.isin(e, t, invert=invert)\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_{}_invert={}\".format(\n jtu.format_shape_dtype_string(element_shape, dtype),\n jtu.format_shape_dtype_string(test_shape, dtype), invert),\n \"element_shape\": element_shape, \"test_shape\": test_shape,\n \"dtype\": dtype, \"invert\": invert}\n for element_shape in all_shapes\n for test_shape in all_shapes\n for dtype in default_dtypes\n for invert in [True, False]))\n def testIn1d(self, element_shape, test_shape, dtype, invert):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(element_shape, dtype), rng(test_shape, dtype)]\n jnp_fun = lambda e, t: jnp.in1d(e, t, invert=invert)\n np_fun = lambda e, t: np.in1d(e, t, invert=invert)\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_{}\".format(\n jtu.format_shape_dtype_string(shape1, dtype1),\n jtu.format_shape_dtype_string(shape2, dtype2)),\n \"shape1\": shape1, \"shape2\": shape2, \"dtype1\": dtype1, \"dtype2\": dtype2}\n for dtype1 in [s for s in default_dtypes if s != jnp.bfloat16]\n for dtype2 in [s for s in default_dtypes if s != jnp.bfloat16]\n for shape1 in all_shapes\n for shape2 in all_shapes))\n def testSetdiff1d(self, shape1, shape2, dtype1, dtype2):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape1, dtype1), rng(shape2, dtype2)]\n self._CheckAgainstNumpy(np.setdiff1d, jnp.setdiff1d, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_{}\".format(\n jtu.format_shape_dtype_string(shape1, dtype1),\n jtu.format_shape_dtype_string(shape2, dtype2)),\n \"shape1\": shape1, \"shape2\": shape2, \"dtype1\": dtype1, \"dtype2\": dtype2}\n for dtype1 in [s for s in default_dtypes if s != jnp.bfloat16]\n for dtype2 in [s for s in default_dtypes if s != jnp.bfloat16]\n for shape1 in nonempty_nonscalar_array_shapes\n for shape2 in nonempty_nonscalar_array_shapes))\n def testUnion1d(self, shape1, shape2, dtype1, dtype2):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape1, dtype1), rng(shape2, dtype2)]\n def np_fun(arg1, arg2):\n dtype = jnp.promote_types(arg1.dtype, arg2.dtype)\n return np.union1d(arg1, arg2).astype(dtype)\n self._CheckAgainstNumpy(np_fun, jnp.union1d, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_{}_size={}\".format(\n jtu.format_shape_dtype_string(shape1, dtype1),\n jtu.format_shape_dtype_string(shape2, dtype2), size),\n \"shape1\": shape1, \"shape2\": shape2, \"dtype1\": dtype1, \"dtype2\": dtype2, \"size\": size}\n for dtype1 in [s for s in default_dtypes if s != jnp.bfloat16]\n for dtype2 in [s for s in default_dtypes if s != jnp.bfloat16]\n for shape1 in nonempty_nonscalar_array_shapes\n for shape2 in nonempty_nonscalar_array_shapes\n for size in [1, 5, 10]))\n def testUnion1dSize(self, shape1, shape2, dtype1, dtype2, size):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape1, dtype1), rng(shape2, dtype2)]\n def np_fun(arg1, arg2):\n dtype = jnp.promote_types(arg1.dtype, arg2.dtype)\n result = np.union1d(arg1, arg2).astype(dtype)\n if size <= len(result):\n return result[:size]\n else:\n return np.concatenate([result, np.full(size - len(result), result[0], result.dtype)])\n def jnp_fun(arg1, arg2):\n return jnp.union1d(arg1, arg2, size=size)\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_{}_assume_unique={}\".format(\n jtu.format_shape_dtype_string(shape1, dtype1),\n jtu.format_shape_dtype_string(shape2, dtype2),\n assume_unique),\n \"shape1\": shape1, \"dtype1\": dtype1, \"shape2\": shape2, \"dtype2\": dtype2,\n \"assume_unique\": assume_unique}\n for dtype1 in [s for s in default_dtypes if s != jnp.bfloat16]\n for dtype2 in [s for s in default_dtypes if s != jnp.bfloat16]\n for shape1 in all_shapes\n for shape2 in all_shapes\n for assume_unique in [False, True]))\n def testSetxor1d(self, shape1, dtype1, shape2, dtype2, assume_unique):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape1, dtype1), rng(shape2, dtype2)]\n jnp_fun = lambda ar1, ar2: jnp.setxor1d(ar1, ar2, assume_unique=assume_unique)\n def np_fun(ar1, ar2):\n if assume_unique:\n # pre-flatten the arrays to match with jax implementation\n ar1 = np.ravel(ar1)\n ar2 = np.ravel(ar2)\n return np.setxor1d(ar1, ar2, assume_unique)\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_{}_assume_unique={}_return_indices={}\".format(\n jtu.format_shape_dtype_string(shape1, dtype1),\n jtu.format_shape_dtype_string(shape2, dtype2),\n assume_unique,\n return_indices),\n \"shape1\": shape1, \"dtype1\": dtype1, \"shape2\": shape2, \"dtype2\": dtype2,\n \"assume_unique\": assume_unique, \"return_indices\": return_indices}\n for dtype1 in [s for s in default_dtypes if s != jnp.bfloat16]\n for dtype2 in [s for s in default_dtypes if s != jnp.bfloat16]\n for shape1 in all_shapes\n for shape2 in all_shapes\n for assume_unique in [False, True]\n for return_indices in [False, True]))\n def testIntersect1d(self, shape1, dtype1, shape2, dtype2, assume_unique, return_indices):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape1, dtype1), rng(shape2, dtype2)]\n jnp_fun = lambda ar1, ar2: jnp.intersect1d(ar1, ar2, assume_unique=assume_unique, return_indices=return_indices)\n np_fun = lambda ar1, ar2: np.intersect1d(ar1, ar2, assume_unique=assume_unique, return_indices=return_indices)\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)\n\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_{}\".format(\n jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),\n jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),\n \"lhs_shape\": lhs_shape, \"lhs_dtype\": lhs_dtype,\n \"rhs_shape\": rhs_shape, \"rhs_dtype\": rhs_dtype}\n # TODO(phawkins): support integer dtypes too.\n for lhs_shape, lhs_dtype in _shape_and_dtypes(all_shapes, inexact_dtypes)\n for rhs_shape, rhs_dtype in _shape_and_dtypes(all_shapes, inexact_dtypes)\n if len(jtu._dims_of_shape(lhs_shape)) == 0\n or len(jtu._dims_of_shape(rhs_shape)) == 0\n or lhs_shape[-1] == rhs_shape[-1]))\n def testInner(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]\n def np_fun(lhs, rhs):\n lhs = lhs if lhs_dtype != jnp.bfloat16 else lhs.astype(np.float32)\n rhs = rhs if rhs_dtype != jnp.bfloat16 else rhs.astype(np.float32)\n dtype = jnp.promote_types(lhs_dtype, rhs_dtype)\n return np.inner(lhs, rhs).astype(dtype)\n jnp_fun = lambda lhs, rhs: jnp.inner(lhs, rhs)\n tol_spec = {np.float16: 1e-2, np.float32: 1e-5, np.float64: 1e-13,\n np.complex64: 1e-5}\n if jtu.device_under_test() == \"tpu\":\n tol_spec[np.float32] = tol_spec[np.complex64] = 2e-1\n tol = max(jtu.tolerance(lhs_dtype, tol_spec),\n jtu.tolerance(rhs_dtype, tol_spec))\n # TODO(phawkins): there are float32/float64 disagreements for some inputs.\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False, tol=tol)\n self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=False, atol=tol, rtol=tol)\n\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_deg={}_rcond={}_full={}_w={}_cov={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n deg,\n rcond,\n full,\n w,\n cov),\n \"shape\": shape, \"dtype\": dtype, \"deg\": deg,\n \"rcond\": rcond, \"full\": full, \"w\":w, \"cov\":cov}\n for dtype in [dt for dt in float_dtypes if dt not in [jnp.float16, jnp.bfloat16]]\n for shape in [shape for shape in one_dim_array_shapes if shape != (1,)]\n for deg in [1, 2, 3]\n for rcond in [None, -1, 10e-3, 10e-5, 10e-10]\n for full in [False, True]\n for w in [False, True]\n for cov in [False, True, \"unscaled\"]))\n def testPolyfit(self, shape, dtype, deg, rcond, full, w, cov):\n rng = jtu.rand_default(self.rng())\n tol_spec = {np.float32: 1e-3, np.float64: 1e-13, np.complex64: 1e-5}\n if jtu.device_under_test() == \"tpu\":\n tol_spec[np.float32] = tol_spec[np.complex64] = 2e-1\n tol = jtu.tolerance(dtype, tol_spec)\n _w = lambda a: abs(a) if w else None\n args_maker = lambda: [rng(shape, dtype), rng(shape, dtype), rng(shape, dtype)]\n jnp_fun = lambda x, y, a: jnp.polyfit(x, y, deg=deg, rcond=rcond, full=full, w=_w(a), cov=cov)\n np_fun = jtu.ignore_warning(\n message=\"Polyfit may be poorly conditioned*\")(lambda x, y, a: np.polyfit(x, y, deg=deg, rcond=rcond, full=full, w=_w(a), cov=cov))\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False, tol=tol)\n self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=False, atol=tol, rtol=tol)\n\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_amin={}_amax={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), a_min, a_max),\n \"shape\": shape, \"dtype\": dtype, \"a_min\": a_min, \"a_max\": a_max}\n for shape in all_shapes for dtype in number_dtypes\n for a_min, a_max in [(-1, None), (None, 1), (-0.9, 1),\n (-np.ones(1), None),\n (None, np.ones(1)),\n (np.full(1, -0.9), np.ones(1))]))\n @jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.\n def testClipStaticBounds(self, shape, dtype, a_min, a_max):\n rng = jtu.rand_default(self.rng())\n np_fun = lambda x: np.clip(x, a_min=a_min, a_max=a_max)\n jnp_fun = lambda x: jnp.clip(x, a_min=a_min, a_max=a_max)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n def testClipError(self):\n with self.assertRaisesRegex(ValueError, \"At most one of a_min and a_max.*\"):\n jnp.clip(jnp.zeros((3,)))\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_decimals={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), decimals),\n \"shape\": shape, \"dtype\": dtype, \"decimals\": decimals}\n for shape, dtype in _shape_and_dtypes(all_shapes, number_dtypes)\n for decimals in [0, 1, -2]))\n def testRoundStaticDecimals(self, shape, dtype, decimals):\n rng = jtu.rand_default(self.rng())\n if jnp.issubdtype(dtype, np.integer) and decimals < 0:\n self.skipTest(\"Integer rounding with decimals < 0 not implemented\")\n np_fun = lambda x: np.round(x, decimals=decimals)\n jnp_fun = lambda x: jnp.round(x, decimals=decimals)\n args_maker = lambda: [rng(shape, dtype)]\n tol = {jnp.bfloat16: 5e-2, np.float16: 1e-2}\n check_dtypes = shape is not jtu.PYTHON_SCALAR_SHAPE\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,\n check_dtypes=check_dtypes, tol=tol)\n self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=check_dtypes,\n atol=tol, rtol=tol)\n\n def testOperatorRound(self):\n self.assertAllClose(round(np.float32(7.532), 1),\n round(jnp.float32(7.5), 1))\n self.assertAllClose(round(np.float32(1.234), 2),\n round(jnp.float32(1.234), 2))\n self.assertAllClose(round(np.float32(1.234)),\n round(jnp.float32(1.234)), check_dtypes=False)\n self.assertAllClose(round(np.float32(7.532), 1),\n round(jnp.array(7.5, jnp.float32), 1))\n self.assertAllClose(round(np.float32(1.234), 2),\n round(jnp.array(1.234, jnp.float32), 2))\n self.assertAllClose(round(np.float32(1.234)),\n round(jnp.array(1.234, jnp.float32)),\n check_dtypes=False)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_mode={}_padwidth={}_constantvalues={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), mode, pad_width,\n constant_values),\n \"shape\": shape, \"dtype\": dtype, \"mode\": mode,\n \"pad_width\": pad_width, \"constant_values\": constant_values}\n for mode, shapes in [\n ('constant', all_shapes),\n ('wrap', nonempty_shapes),\n ('edge', nonempty_shapes),\n ]\n for shape, dtype in _shape_and_dtypes(shapes, all_dtypes)\n for constant_values in [\n # None is used for modes other than 'constant'\n None,\n # constant\n 0, 1,\n # (constant,)\n (0,), (2.718,),\n # ((before_const, after_const),)\n ((0, 2),), ((-1, 3.14),),\n # ((before_1, after_1), ..., (before_N, after_N))\n tuple((i / 2, -3.14 * i) for i in range(len(shape))),\n ]\n for pad_width in [\n # ((before_1, after_1), ..., (before_N, after_N))\n tuple((i % 3, (i + 1) % 3) for i in range(len(shape))),\n # ((before, after),)\n ((1, 2),), ((2, 0),),\n # (before, after) (not in the docstring but works in numpy)\n (2, 0), (0, 0),\n # (pad,)\n (1,), (2,),\n # pad\n 0, 1,\n ]\n if (pad_width != () and constant_values != () and\n ((mode == 'constant' and constant_values is not None) or\n (mode != 'constant' and constant_values is None)))))\n def testPad(self, shape, dtype, mode, pad_width, constant_values):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype)]\n if constant_values is None:\n np_fun = partial(np.pad, pad_width=pad_width, mode=mode)\n jnp_fun = partial(jnp.pad, pad_width=pad_width, mode=mode)\n else:\n np_fun = partial(np.pad, pad_width=pad_width, mode=mode,\n constant_values=constant_values)\n jnp_fun = partial(jnp.pad, pad_width=pad_width, mode=mode,\n constant_values=constant_values)\n\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,\n check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_mode={}_pad_width={}_stat_length={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), mode, pad_width, stat_length),\n \"shape\": shape, \"dtype\": dtype, \"mode\": mode, \"pad_width\": pad_width,\n \"stat_length\": stat_length}\n for mode in ['maximum', 'minimum', 'mean', 'median']\n for shape, dtype in _shape_and_dtypes(nonempty_shapes, all_dtypes)\n for pad_width in [\n # ((before_1, after_1), ..., (before_N, after_N))\n tuple((i % 3, (i + 1) % 3) for i in range(len(shape))),\n # ((before, after),)\n ((1, 2),), ((2, 0),),\n # (before, after) (not in the docstring but works in numpy)\n (2, 0), (0, 0),\n # (pad,)\n (1,), (2,),\n # pad\n 0, 1,\n ]\n for stat_length in [\n None,\n # ((before_1, after_1), ..., (before_N, after_N))\n tuple(((i % 3 + 1), ((i + 1) % 3) + 1) for i in range(len(shape))),\n # ((before, after),)\n ((1, 2),), ((2, 2),),\n # (before, after) (not in the docstring but works in numpy)\n (1, 1), (3, 4),\n # (pad,)\n (1,), (2,),\n # pad\n 1, 2\n ]\n if (pad_width != () and stat_length != () and\n not (dtype in bool_dtypes and mode == 'mean'))))\n def testPadStatValues(self, shape, dtype, mode, pad_width, stat_length):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype)]\n\n np_fun = partial(np.pad, pad_width=pad_width, mode=mode, stat_length=stat_length)\n jnp_fun = partial(jnp.pad, pad_width=pad_width, mode=mode, stat_length=stat_length)\n\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,\n check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_mode={}_pad_width={}_reflect_type={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), mode, pad_width, reflect_type),\n \"shape\": shape, \"dtype\": dtype, \"mode\": mode, \"pad_width\": pad_width,\n \"reflect_type\": reflect_type}\n for mode in ['symmetric', 'reflect']\n for shape, dtype in _shape_and_dtypes(nonempty_shapes, all_dtypes)\n for pad_width in [\n # ((before_1, after_1), ..., (before_N, after_N))\n tuple((i % 3, (i + 1) % 3) for i in range(len(shape))),\n # ((before, after),)\n ((1, 2),), ((2, 3),),\n # (before, after) (not in the docstring but works in numpy)\n (2, 1), (1, 2),\n # (pad,)\n (1,), (2,), (3,),\n # pad\n 0, 5, 7, 10\n ]\n for reflect_type in ['even', 'odd']\n if (pad_width != () and\n # following types lack precision when calculating odd values\n (reflect_type != 'odd' or dtype not in [np.bool_, np.float16, jnp.bfloat16]))))\n def testPadSymmetricAndReflect(self, shape, dtype, mode, pad_width, reflect_type):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype)]\n\n np_fun = partial(np.pad, pad_width=pad_width, mode=mode, reflect_type=reflect_type)\n jnp_fun = partial(jnp.pad, pad_width=pad_width, mode=mode, reflect_type=reflect_type)\n\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,\n check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE,\n tol={np.float32: 1e-3, np.complex64: 1e-3})\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_mode={}_pad_width={}_end_values={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), \"linear_ramp\", pad_width, end_values),\n \"shape\": shape, \"dtype\": dtype, \"pad_width\": pad_width,\n \"end_values\": end_values}\n for shape, dtype in _shape_and_dtypes(nonempty_shapes, all_dtypes)\n for pad_width in [\n # ((before_1, after_1), ..., (before_N, after_N))\n tuple((i % 3, (i + 1) % 3) for i in range(len(shape))),\n # ((before, after),)\n ((1, 2),), ((2, 0),),\n # (before, after) (not in the docstring but works in numpy)\n (2, 0), (0, 0),\n # (pad,)\n (1,), (2,),\n # pad\n 0, 1,\n ]\n for end_values in [\n # ((before_1, after_1), ..., (before_N, after_N))\n tuple((i % 3, (i + 1) % 3) for i in range(len(shape))),\n # ((before, after),)\n ((1, 2),), ((2.0, 3.14),),\n # (before, after) (not in the docstring but works in numpy)\n (0, 0), (-8.0, 2.0),\n # (end_values,)\n (1,), (2,),\n # end_values\n 0, 1, 100, 10.0, 3.5, 4.2, -5, -3\n ]\n if (pad_width != () and end_values != () and\n # following types lack precision\n dtype not in [np.int8, np.int16, np.float16, jnp.bfloat16])))\n def testPadLinearRamp(self, shape, dtype, pad_width, end_values):\n if numpy_version < (1, 20) and np.issubdtype(dtype, np.integer):\n raise unittest.SkipTest(\"NumPy 1.20 changed the semantics of np.linspace\")\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype)]\n\n np_fun = partial(np.pad, pad_width=pad_width, mode=\"linear_ramp\",\n end_values=end_values)\n jnp_fun = partial(jnp.pad, pad_width=pad_width, mode=\"linear_ramp\",\n end_values=end_values)\n\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,\n check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n def testPadEmpty(self):\n arr = np.arange(6).reshape(2, 3)\n\n pad_width = ((2, 3), (3, 1))\n np_res = np.pad(arr, pad_width=pad_width, mode=\"empty\")\n jnp_res = jnp.pad(arr, pad_width=pad_width, mode=\"empty\")\n\n np.testing.assert_equal(np_res.shape, jnp_res.shape)\n np.testing.assert_equal(arr, np_res[2:-3, 3:-1])\n np.testing.assert_equal(arr, jnp_res[2:-3, 3:-1])\n np.testing.assert_equal(np_res[2:-3, 3:-1], jnp_res[2:-3, 3:-1])\n\n def testPadKwargs(self):\n modes = {\n 'constant': {'constant_values': 0},\n 'edge': {},\n 'linear_ramp': {'end_values': 0},\n 'maximum': {'stat_length': None},\n 'mean': {'stat_length': None},\n 'median': {'stat_length': None},\n 'minimum': {'stat_length': None},\n 'reflect': {'reflect_type': 'even'},\n 'symmetric': {'reflect_type': 'even'},\n 'wrap': {},\n 'empty': {}\n }\n arr = jnp.array([1, 2, 3])\n pad_width = 1\n\n for mode in modes.keys():\n allowed = modes[mode]\n not_allowed = {}\n for kwargs in modes.values():\n if kwargs != allowed:\n not_allowed.update(kwargs)\n\n # Test if allowed keyword arguments pass\n jnp.pad(arr, pad_width, mode, **allowed)\n # Test if prohibited keyword arguments of other modes raise an error\n match = \"unsupported keyword arguments for mode '{}'\".format(mode)\n for key, value in not_allowed.items():\n with self.assertRaisesRegex(ValueError, match):\n jnp.pad(arr, pad_width, mode, **{key: value})\n\n # Test if unsupported mode raise error.\n unsupported_modes = [1, None, \"foo\"]\n for mode in unsupported_modes:\n match = \"Unimplemented padding mode '{}' for np.pad.\".format(mode)\n with self.assertRaisesRegex(NotImplementedError, match):\n jnp.pad(arr, pad_width, mode)\n\n def testPadFunction(self):\n def np_pad_with(vector, pad_width, iaxis, kwargs):\n pad_value = kwargs.get('padder', 10)\n vector[:pad_width[0]] = pad_value\n vector[-pad_width[1]:] = pad_value\n\n def jnp_pad_with(vector, pad_width, iaxis, kwargs):\n pad_value = kwargs.get('padder', 10)\n vector = vector.at[:pad_width[0]].set(pad_value)\n vector = vector.at[-pad_width[1]:].set(pad_value)\n return vector\n\n arr = np.arange(6).reshape(2, 3)\n np_res = np.pad(arr, 2, np_pad_with)\n jnp_res = jnp.pad(arr, 2, jnp_pad_with)\n np.testing.assert_equal(np_res, jnp_res)\n\n arr = np.arange(24).reshape(2, 3, 4)\n np_res = np.pad(arr, 1, np_pad_with, padder=100)\n jnp_res = jnp.pad(arr, 1, jnp_pad_with, padder=100)\n np.testing.assert_equal(np_res, jnp_res)\n\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(arr.shape, arr.dtype)]\n jnp_fun = partial(jnp.pad, pad_width=1, mode=jnp_pad_with)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n def testPadWithNumpyPadWidth(self):\n a = jnp.array([1, 2, 3, 4, 5])\n f = jax.jit(\n partial(\n jnp.pad,\n pad_width=np.asarray((2, 3)),\n mode=\"constant\",\n constant_values=(4, 6)))\n\n np.testing.assert_array_equal(\n f(a),\n np.pad(\n a,\n pad_width=np.asarray((2, 3)),\n mode=\"constant\",\n constant_values=(4, 6)))\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape=[{}]_reps={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), reps),\n \"shape\": shape, \"dtype\": dtype, \"reps\": reps}\n for reps in [(), (2,), (3, 4), (2, 3, 4), (1, 0, 2)]\n for shape, dtype in _shape_and_dtypes(all_shapes, default_dtypes)\n ))\n def testTile(self, shape, dtype, reps):\n rng = jtu.rand_default(self.rng())\n np_fun = lambda arg: np.tile(arg, reps)\n jnp_fun = lambda arg: jnp.tile(arg, reps)\n\n args_maker = lambda: [rng(shape, dtype)]\n\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,\n check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}\".format(\n jtu.format_shape_dtype_string(shape, dtype)),\n \"shape\": shape, \"dtype\": dtype}\n for shape in all_shapes\n for dtype in all_dtypes))\n def testExtract(self, shape, dtype):\n rng = jtu.rand_some_zero(self.rng())\n args_maker = lambda: [rng(shape, jnp.float32), rng(shape, dtype)]\n self._CheckAgainstNumpy(np.extract, jnp.extract, args_maker)\n\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_ncond={}_nfunc={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), ncond, nfunc),\n \"shape\": shape, \"dtype\": dtype, \"ncond\": ncond, \"nfunc\": nfunc}\n for ncond in [1, 2, 3]\n for nfunc in [ncond, ncond + 1]\n for shape in all_shapes\n for dtype in all_dtypes))\n def testPiecewise(self, shape, dtype, ncond, nfunc):\n rng = jtu.rand_default(self.rng())\n rng_bool = jtu.rand_int(self.rng(), 0, 2)\n funclist = [lambda x: x - 1, 1, lambda x: x, 0][:nfunc]\n args_maker = lambda: (rng(shape, dtype), [rng_bool(shape, bool) for i in range(ncond)])\n np_fun = partial(np.piecewise, funclist=funclist)\n jnp_fun = partial(jnp.piecewise, funclist=funclist)\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)\n # This is a higher-order function, so the cache miss check will fail.\n self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True, check_cache_misses=False)\n\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"{}_perm={}_{}\".format(\n jtu.format_shape_dtype_string(shape, dtype), perm, arg_type),\n \"dtype\": dtype, \"shape\": shape, \"perm\": perm, \"arg_type\": arg_type}\n for dtype in default_dtypes\n for shape in array_shapes\n for arg_type in [\"splat\", \"value\"]\n for perm in [None, tuple(np.random.RandomState(0).permutation(np.zeros(shape).ndim))]))\n def testTransposeTuple(self, shape, dtype, perm, arg_type):\n rng = jtu.rand_some_zero(self.rng())\n args_maker = lambda: [rng(shape, dtype)]\n if arg_type == \"value\":\n np_fun = lambda x: x.transpose(perm)\n jnp_fun = lambda x: jnp.array(x).transpose(perm)\n else:\n np_fun = lambda x: x.transpose(*(perm or ()))\n jnp_fun = lambda x: jnp.array(x).transpose(*(perm or ()))\n\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)\n\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"{}_trim={}\".format(\n jtu.format_shape_dtype_string(a_shape, dtype), trim),\n \"dtype\": dtype, \"a_shape\": a_shape, \"trim\": trim}\n for dtype in default_dtypes\n for a_shape in one_dim_array_shapes\n for trim in [\"f\", \"b\", \"fb\"]))\n def testTrimZeros(self, a_shape, dtype, trim):\n rng = jtu.rand_some_zero(self.rng())\n args_maker = lambda: [rng(a_shape, dtype)]\n np_fun = lambda arg1: np.trim_zeros(arg1, trim)\n jnp_fun = lambda arg1: jnp.trim_zeros(arg1, trim)\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)\n\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_rank{}\".format(\n jtu.format_shape_dtype_string(a_shape, dtype), rank),\n \"dtype\": dtype, \"a_shape\": a_shape, \"rank\": rank}\n for rank in (1, 2)\n for dtype in default_dtypes\n for a_shape in one_dim_array_shapes))\n def testPoly(self, a_shape, dtype, rank):\n if dtype in (np.float16, jnp.bfloat16, np.int16):\n self.skipTest(f\"{dtype} gets promoted to {np.float16}, which is not supported.\")\n elif rank == 2 and jtu.device_under_test() in (\"tpu\", \"gpu\"):\n self.skipTest(\"Nonsymmetric eigendecomposition is only implemented on the CPU backend.\")\n rng = jtu.rand_default(self.rng())\n tol = { np.int8: 1e-3, np.int32: 1e-3, np.float32: 1e-3, np.float64: 1e-6 }\n if jtu.device_under_test() == \"tpu\":\n tol[np.int32] = tol[np.float32] = 1e-1\n tol = jtu.tolerance(dtype, tol)\n args_maker = lambda: [rng(a_shape * rank, dtype)]\n self._CheckAgainstNumpy(np.poly, jnp.poly, args_maker, check_dtypes=False, tol=tol)\n self._CompileAndCheck(jnp.poly, args_maker, check_dtypes=True, rtol=tol, atol=tol)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"a_shape={} , b_shape={}\".format(\n jtu.format_shape_dtype_string(a_shape, dtype),\n jtu.format_shape_dtype_string(b_shape, dtype)),\n \"dtype\": dtype, \"a_shape\": a_shape, \"b_shape\" : b_shape}\n for dtype in default_dtypes\n for a_shape in one_dim_array_shapes\n for b_shape in one_dim_array_shapes))\n def testPolyAdd(self, a_shape, b_shape, dtype):\n rng = jtu.rand_default(self.rng())\n np_fun = lambda arg1, arg2: np.polyadd(arg1, arg2)\n jnp_fun = lambda arg1, arg2: jnp.polyadd(arg1, arg2)\n args_maker = lambda: [rng(a_shape, dtype), rng(b_shape, dtype)]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)\n\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"a_shape={} , b_shape={}\".format(\n jtu.format_shape_dtype_string(a_shape, dtype),\n jtu.format_shape_dtype_string(b_shape, dtype)),\n \"dtype\": dtype, \"a_shape\": a_shape, \"b_shape\" : b_shape}\n for dtype in default_dtypes\n for a_shape in one_dim_array_shapes\n for b_shape in one_dim_array_shapes))\n def testPolySub(self, a_shape, b_shape, dtype):\n rng = jtu.rand_default(self.rng())\n np_fun = lambda arg1, arg2: np.polysub(arg1, arg2)\n jnp_fun = lambda arg1, arg2: jnp.polysub(arg1, arg2)\n args_maker = lambda: [rng(a_shape, dtype), rng(b_shape, dtype)]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)\n\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_order={}_k={}\".format(\n jtu.format_shape_dtype_string(a_shape, dtype),\n order, k),\n \"dtype\": dtype, \"a_shape\": a_shape, \"order\" : order, \"k\": k}\n for dtype in default_dtypes\n for a_shape in one_dim_array_shapes\n for order in range(5)\n for k in [np.arange(order, dtype=dtype), np.ones(1, dtype), None]))\n def testPolyInt(self, a_shape, order, k, dtype):\n rng = jtu.rand_default(self.rng())\n np_fun = lambda arg1: np.polyint(arg1, m=order, k=k)\n jnp_fun = lambda arg1: jnp.polyint(arg1, m=order, k=k)\n args_maker = lambda: [rng(a_shape, dtype)]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)\n self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)\n\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_order={}\".format(\n jtu.format_shape_dtype_string(a_shape, dtype),\n order),\n \"dtype\": dtype, \"a_shape\": a_shape, \"order\" : order}\n for dtype in default_dtypes\n for a_shape in one_dim_array_shapes\n for order in range(5)))\n def testPolyDer(self, a_shape, order, dtype):\n rng = jtu.rand_default(self.rng())\n np_fun = lambda arg1: np.polyder(arg1, m=order)\n jnp_fun = lambda arg1: jnp.polyder(arg1, m=order)\n args_maker = lambda: [rng(a_shape, dtype)]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)\n self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_ptype={}\".format(ptype), \"ptype\": ptype}\n for ptype in ['int', 'np.int', 'jnp.int']))\n def testIntegerPower(self, ptype):\n p = {'int': 2, 'np.int': np.int32(2), 'jnp.int': jnp.int32(2)}[ptype]\n jaxpr = jax.make_jaxpr(partial(jnp.power, x2=p))(1)\n eqns = jaxpr.jaxpr.eqns\n self.assertLen(eqns, 1)\n self.assertEqual(eqns[0].primitive, lax.integer_pow_p)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_x={}_y={}\".format(x, y), \"x\": x, \"y\": y}\n for x in [-1, 0, 1]\n for y in [0, 32, 64, 128]))\n def testIntegerPowerOverflow(self, x, y):\n # Regression test for https://github.com/google/jax/issues/5987\n args_maker = lambda: [x, y]\n self._CheckAgainstNumpy(np.power, jnp.power, args_maker)\n self._CompileAndCheck(jnp.power, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_axis={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), axis),\n \"shape\": shape, \"dtype\": dtype, \"axis\": axis}\n for shape in all_shapes\n for dtype in all_dtypes\n for axis in [None] + list(range(len(shape)))))\n def testCompress(self, shape, dtype, axis):\n rng = jtu.rand_some_zero(self.rng())\n if shape in scalar_shapes or len(shape) == 0:\n cond_shape = (0,)\n elif axis is None:\n cond_shape = (prod(shape),)\n else:\n cond_shape = (shape[axis],)\n\n args_maker = lambda: [rng(cond_shape, jnp.float32), rng(shape, dtype)]\n\n np_fun = partial(np.compress, axis=axis)\n jnp_fun = partial(jnp.compress, axis=axis)\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_condition=array[{}]_axis={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), len(condition), axis),\n \"shape\": shape, \"dtype\": dtype, \"condition\": condition, \"axis\": axis}\n for shape in [(2, 3)]\n for dtype in int_dtypes\n # condition entries beyond axis size must be zero.\n for condition in [[1], [1, 0, 0, 0, 0, 0, 0]]\n for axis in [None, 0, 1]))\n def testCompressMismatchedShapes(self, shape, dtype, condition, axis):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [np.array(condition), rng(shape, dtype)]\n np_fun = partial(np.compress, axis=axis)\n jnp_fun = partial(jnp.compress, axis=axis)\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_axis={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), axis),\n \"shape\": shape, \"dtype\": dtype, \"axis\": axis}\n for shape in array_shapes\n for dtype in all_dtypes\n for axis in [None] + list(range(len(shape)))))\n def testCompressMethod(self, shape, dtype, axis):\n rng = jtu.rand_some_zero(self.rng())\n if shape in scalar_shapes or len(shape) == 0:\n cond_shape = (0,)\n elif axis is None:\n cond_shape = (prod(shape),)\n else:\n cond_shape = (shape[axis],)\n\n args_maker = lambda: [rng(cond_shape, jnp.float32), rng(shape, dtype)]\n\n np_fun = lambda condition, x: np.compress(condition, x, axis=axis)\n jnp_fun = lambda condition, x: x.compress(condition, axis=axis)\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_axis={}_baseshape=[{}]_dtypes=[{}]\".format(\n axis, \",\".join(str(d) for d in base_shape),\n \",\".join(np.dtype(dtype).name for dtype in arg_dtypes)),\n \"axis\": axis, \"base_shape\": base_shape, \"arg_dtypes\": arg_dtypes}\n for num_arrs in [3]\n for arg_dtypes in itertools.combinations_with_replacement(default_dtypes, num_arrs)\n for base_shape in [(4,), (3, 4), (2, 3, 4)]\n for axis in range(-len(base_shape)+1, len(base_shape))))\n def testConcatenate(self, axis, base_shape, arg_dtypes):\n rng = jtu.rand_default(self.rng())\n wrapped_axis = axis % len(base_shape)\n shapes = [base_shape[:wrapped_axis] + (size,) + base_shape[wrapped_axis+1:]\n for size, _ in zip(itertools.cycle([3, 1, 4]), arg_dtypes)]\n def np_fun(*args):\n args = [x if x.dtype != jnp.bfloat16 else x.astype(np.float32)\n for x in args]\n dtype = functools.reduce(jnp.promote_types, arg_dtypes)\n return np.concatenate(args, axis=axis).astype(dtype)\n jnp_fun = lambda *args: jnp.concatenate(args, axis=axis)\n\n def args_maker():\n return [rng(shape, dtype) for shape, dtype in zip(shapes, arg_dtypes)]\n\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_axis={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), axis),\n \"shape\": shape, \"dtype\": dtype, \"axis\": axis}\n for shape in [(4, 1), (4, 3), (4, 5, 6)]\n for dtype in all_dtypes\n for axis in [None] + list(range(1 - len(shape), len(shape) - 1))))\n def testConcatenateArray(self, shape, dtype, axis):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype)]\n np_fun = lambda x: np.concatenate(x, axis=axis)\n jnp_fun = lambda x: jnp.concatenate(x, axis=axis)\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n def testConcatenateAxisNone(self):\n # https://github.com/google/jax/issues/3419\n a = jnp.array([[1, 2], [3, 4]])\n b = jnp.array([[5]])\n jnp.concatenate((a, b), axis=None)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_axis={}_baseshape=[{}]_dtypes=[{}]\".format(\n axis, \",\".join(str(d) for d in base_shape),\n \",\".join(np.dtype(dtype).name for dtype in arg_dtypes)),\n \"axis\": axis, \"base_shape\": base_shape, \"arg_dtypes\": arg_dtypes}\n for arg_dtypes in itertools.combinations_with_replacement(default_dtypes, 2)\n for base_shape in [(4,), (3, 4), (2, 3, 4)]\n for axis in range(-len(base_shape)+1, len(base_shape))))\n def testAppend(self, axis, base_shape, arg_dtypes):\n rng = jtu.rand_default(self.rng())\n wrapped_axis = axis % len(base_shape)\n shapes = [base_shape[:wrapped_axis] + (size,) + base_shape[wrapped_axis+1:]\n for size, _ in zip(itertools.cycle([3, 1, 4]), arg_dtypes)]\n def np_fun(arr, values):\n arr = arr.astype(np.float32) if arr.dtype == jnp.bfloat16 else arr\n values = (values.astype(np.float32) if values.dtype == jnp.bfloat16\n else values)\n out = np.append(arr, values, axis=axis)\n return out.astype(jnp.promote_types(*arg_dtypes))\n jnp_fun = lambda arr, values: jnp.append(arr, values, axis=axis)\n\n def args_maker():\n return [rng(shape, dtype) for shape, dtype in zip(shapes, arg_dtypes)]\n\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_axis={}_idx={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), axis, idx),\n \"dtype\": dtype, \"shape\": shape, \"axis\": axis, \"idx\": idx}\n for shape in nonempty_nonscalar_array_shapes\n for dtype in all_dtypes\n for axis in [None] + list(range(-len(shape), len(shape)))\n for idx in (range(-prod(shape), prod(shape))\n if axis is None else\n range(-shape[axis], shape[axis]))))\n def testDeleteInteger(self, shape, dtype, idx, axis):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype)]\n np_fun = lambda arg: np.delete(arg, idx, axis=axis)\n jnp_fun = lambda arg: jnp.delete(arg, idx, axis=axis)\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_axis={}_slc={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), axis, slc),\n \"dtype\": dtype, \"shape\": shape, \"axis\": axis, \"slc\": slc}\n for shape in nonempty_nonscalar_array_shapes\n for dtype in all_dtypes\n for axis in [None] + list(range(-len(shape), len(shape)))\n for slc in [slice(None), slice(1, 3), slice(1, 5, 2)]))\n def testDeleteSlice(self, shape, dtype, axis, slc):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype)]\n np_fun = lambda arg: np.delete(arg, slc, axis=axis)\n jnp_fun = lambda arg: jnp.delete(arg, slc, axis=axis)\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_axis={}_idx={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), axis,\n jtu.format_shape_dtype_string(idx_shape, int)),\n \"dtype\": dtype, \"shape\": shape, \"axis\": axis, \"idx_shape\": idx_shape}\n for shape in nonempty_nonscalar_array_shapes\n for dtype in all_dtypes\n for axis in [None] + list(range(-len(shape), len(shape)))\n for idx_shape in all_shapes))\n def testDeleteIndexArray(self, shape, dtype, axis, idx_shape):\n rng = jtu.rand_default(self.rng())\n max_idx = np.zeros(shape).size if axis is None else np.zeros(shape).shape[axis]\n # Previous to numpy 1.19, negative indices were ignored so we don't test this.\n low = 0 if numpy_version < (1, 19, 0) else -max_idx\n idx = jtu.rand_int(self.rng(), low=low, high=max_idx)(idx_shape, int)\n args_maker = lambda: [rng(shape, dtype)]\n np_fun = lambda arg: np.delete(arg, idx, axis=axis)\n jnp_fun = lambda arg: jnp.delete(arg, idx, axis=axis)\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @unittest.skipIf(numpy_version < (1, 19), \"boolean mask not supported in numpy < 1.19.0\")\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_axis={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), axis),\n \"dtype\": dtype, \"shape\": shape, \"axis\": axis}\n for shape in nonempty_nonscalar_array_shapes\n for dtype in all_dtypes\n for axis in [None] + list(range(-len(shape), len(shape)))))\n def testDeleteMaskArray(self, shape, dtype, axis):\n rng = jtu.rand_default(self.rng())\n mask_size = np.zeros(shape).size if axis is None else np.zeros(shape).shape[axis]\n mask = jtu.rand_int(self.rng(), low=0, high=2)(mask_size, bool)\n args_maker = lambda: [rng(shape, dtype)]\n np_fun = lambda arg: np.delete(arg, mask, axis=axis)\n jnp_fun = lambda arg: jnp.delete(arg, mask, axis=axis)\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_axis={}_out_dims={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n axis, out_dims),\n \"shape\": shape, \"dtype\": dtype, \"axis\": axis, \"out_dims\": out_dims}\n for shape in nonempty_array_shapes\n for dtype in default_dtypes\n for axis in range(-len(shape), len(shape))\n for out_dims in [0, 1, 2]))\n def testApplyAlongAxis(self, shape, dtype, axis, out_dims):\n def func(x, out_dims):\n if out_dims == 0:\n return x.sum()\n elif out_dims == 1:\n return x * x[0]\n elif out_dims == 2:\n return x[:, None] + x[None, :]\n else:\n raise NotImplementedError(f\"out_dims={out_dims}\")\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype)]\n np_fun = lambda arr: np.apply_along_axis(func, axis, arr, out_dims=out_dims)\n jnp_fun = lambda arr: jnp.apply_along_axis(func, axis, arr, out_dims=out_dims)\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_func={}_keepdims={}_axes={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n func, keepdims, axes),\n \"shape\": shape, \"dtype\": dtype, \"func\": func, \"keepdims\": keepdims, \"axes\": axes}\n for shape in nonempty_shapes\n for func in [\"sum\"]\n for keepdims in [True, False]\n for axes in itertools.combinations(range(len(shape)), 2)\n # Avoid low-precision types in sum()\n for dtype in default_dtypes if dtype not in [np.float16, jnp.bfloat16]))\n def testApplyOverAxes(self, shape, dtype, func, keepdims, axes):\n f = lambda x, axis: getattr(x, func)(axis=axis, keepdims=keepdims)\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: (rng(shape, dtype),)\n np_fun = lambda a: np.apply_over_axes(f, a, axes)\n jnp_fun = lambda a: jnp.apply_over_axes(f, a, axes)\n self._CompileAndCheck(jnp_fun, args_maker)\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape=[{}]_axis={}_repeats={}_fixed_size={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n axis, repeats, fixed_size),\n \"axis\": axis, \"shape\": shape, \"dtype\": dtype, \"repeats\": repeats,\n 'fixed_size': fixed_size}\n for repeats in [0, 1, 2]\n for shape, dtype in _shape_and_dtypes(all_shapes, default_dtypes)\n for axis in [None] + list(range(-len(shape), max(1, len(shape))))\n for fixed_size in [True, False]))\n def testRepeat(self, axis, shape, dtype, repeats, fixed_size):\n rng = jtu.rand_default(self.rng())\n np_fun = lambda arg: np.repeat(arg, repeats=repeats, axis=axis)\n np_fun = _promote_like_jnp(np_fun)\n if fixed_size:\n total_repeat_length = np.repeat(np.zeros(shape), repeats, axis).shape[axis or 0]\n jnp_fun = lambda arg, rep: jnp.repeat(arg, repeats=rep, axis=axis,\n total_repeat_length=total_repeat_length)\n jnp_args_maker = lambda: [rng(shape, dtype), repeats]\n clo_fun = lambda arg: jnp.repeat(arg, repeats=repeats, axis=axis,\n total_repeat_length=total_repeat_length)\n clo_fun_args_maker = lambda: [rng(shape, dtype)]\n self._CompileAndCheck(jnp_fun, jnp_args_maker)\n self._CheckAgainstNumpy(np_fun, clo_fun, clo_fun_args_maker)\n else:\n # Now repeats is in a closure, so a constant.\n jnp_fun = lambda arg: jnp.repeat(arg, repeats=repeats, axis=axis)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n def testRepeatScalarFastPath(self):\n a = jnp.array([1,2,3,4])\n f = lambda a: jnp.repeat(a, repeats=2)\n jaxpr = jax.make_jaxpr(f)(a)\n self.assertLessEqual(len(jaxpr.jaxpr.eqns), 6)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_axis={}_ind={}_inv={}_count={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), axis,\n return_index, return_inverse, return_counts),\n \"shape\": shape, \"dtype\": dtype, \"axis\": axis,\n \"return_index\": return_index, \"return_inverse\": return_inverse,\n \"return_counts\": return_counts}\n for dtype in number_dtypes\n for shape in all_shapes\n for axis in [None] + list(range(len(shape)))\n for return_index in [False, True]\n for return_inverse in [False, True]\n for return_counts in [False, True]))\n def testUnique(self, shape, dtype, axis, return_index, return_inverse, return_counts):\n if axis is not None and numpy_version < (1, 19) and np.empty(shape).size == 0:\n self.skipTest(\"zero-sized axis in unique leads to error in older numpy.\")\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype)]\n np_fun = lambda x: np.unique(x, return_index, return_inverse, return_counts, axis=axis)\n jnp_fun = lambda x: jnp.unique(x, return_index, return_inverse, return_counts, axis=axis)\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_size={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), size),\n \"shape\": shape, \"dtype\": dtype, \"size\": size}\n for dtype in number_dtypes\n for size in [1, 5, 10]\n for shape in nonempty_array_shapes))\n def testUniqueSize(self, shape, dtype, size):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype)]\n kwds = dict(return_index=True, return_inverse=True, return_counts=True)\n\n def np_fun(x):\n u, ind, inv, counts = jnp.unique(x, **kwds)\n if size <= len(u):\n u, ind, counts = u[:size], ind[:size], counts[:size]\n else:\n extra = size - len(u)\n u = np.concatenate([u, np.full(extra, u[0], u.dtype)])\n ind = np.concatenate([ind, np.full(extra, ind[0], ind.dtype)])\n counts = np.concatenate([counts, np.zeros(extra, counts.dtype)])\n return u, ind, inv, counts\n\n jnp_fun = lambda x: jnp.unique(x, size=size, **kwds)\n\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_fixed_size={}\".format(fixed_size),\n \"fixed_size\": fixed_size}\n for fixed_size in [True, False]))\n def testNonScalarRepeats(self, fixed_size):\n '''\n Following numpy test suite from `test_repeat` at\n https://github.com/numpy/numpy/blob/main/numpy/core/tests/test_multiarray.py\n '''\n tol = 1e-5\n\n def test_single(m, args_maker, repeats, axis):\n lax_ans = jnp.repeat(m, repeats, axis)\n numpy_ans = np.repeat(m, repeats, axis)\n\n self.assertAllClose(lax_ans, numpy_ans, rtol=tol, atol=tol)\n if fixed_size:\n\n # Calculate expected size of the repeated axis.\n rep_length = np.repeat(np.zeros_like(m), repeats, axis).shape[axis or 0]\n jnp_fun = lambda arg, rep: jnp.repeat(\n arg, repeats=rep, axis=axis, total_repeat_length=rep_length)\n else:\n jnp_fun = lambda arg: jnp.repeat(arg, repeats = repeats, axis=axis)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n m = jnp.array([1,2,3,4,5,6])\n if fixed_size:\n args_maker = lambda: [m, repeats]\n else:\n args_maker = lambda: [m]\n\n for repeats in [2, jnp.array([1,3,0,1,1,2]), jnp.array([1,3,2,1,1,2]), jnp.array([2])]:\n test_single(m, args_maker, repeats, axis=None)\n test_single(m, args_maker, repeats, axis=0)\n\n m_rect = m.reshape((2,3))\n if fixed_size:\n args_maker = lambda: [m_rect, repeats]\n else:\n args_maker = lambda: [m_rect]\n\n for repeats in [2, jnp.array([2,1]), jnp.array([2])]:\n test_single(m_rect, args_maker, repeats, axis=0)\n\n for repeats in [2, jnp.array([1,3,2]), jnp.array([2])]:\n test_single(m_rect, args_maker, repeats, axis=1)\n\n def testIssue2330(self):\n '''\n Make sure return value of jnp.concatenate is a jax.ndarray and is side-effect save\n '''\n def attempt_sideeffect(x):\n x = [x]\n x = jnp.concatenate(x)\n x -= 1.\n return x\n\n np_input = np.ones((1))\n jnp_input = jnp.ones((1))\n expected_np_input_after_call = np.ones((1))\n expected_jnp_input_after_call = jnp.ones((1))\n\n self.assertTrue(xla.type_is_device_array(jnp.concatenate([np_input])))\n\n attempt_sideeffect(np_input)\n attempt_sideeffect(jnp_input)\n\n self.assertAllClose(np_input, expected_np_input_after_call)\n self.assertAllClose(jnp_input, expected_jnp_input_after_call)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"op={}_xshape=[{}]_yshape=[{}]_mode={}\".format(\n op,\n jtu.format_shape_dtype_string(xshape, dtype),\n jtu.format_shape_dtype_string(yshape, dtype),\n mode),\n \"xshape\": xshape, \"yshape\": yshape, \"dtype\": dtype, \"mode\": mode,\n \"jnp_op\": getattr(jnp, op),\n \"np_op\": getattr(np, op)}\n for mode in ['full', 'same', 'valid']\n for op in ['convolve', 'correlate']\n for dtype in number_dtypes\n for xshape in one_dim_array_shapes\n for yshape in one_dim_array_shapes))\n def testConvolutions(self, xshape, yshape, dtype, mode, jnp_op, np_op):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(xshape, dtype), rng(yshape, dtype)]\n precision = lax.Precision.HIGHEST if jtu.device_under_test() == \"tpu\" else None\n np_fun = partial(np_op, mode=mode)\n jnp_fun = partial(jnp_op, mode=mode, precision=precision)\n tol = {np.float16: 2e-1, np.float32: 1e-2, np.float64: 1e-14,\n np.complex128: 1e-14}\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,\n tol=tol)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"op={}_shape=[{}]_axis={}_out_dtype={}\".format(\n op, jtu.format_shape_dtype_string(shape, dtype), axis,\n out_dtype.__name__),\n \"axis\": axis, \"shape\": shape, \"dtype\": dtype, \"out_dtype\": out_dtype,\n \"jnp_op\": getattr(jnp, op), \"np_op\": getattr(np, op)}\n for op in [\"cumsum\", \"cumprod\"]\n for dtype in all_dtypes\n for out_dtype in default_dtypes\n for shape in all_shapes\n for axis in [None] + list(range(-len(shape), len(shape)))))\n def testCumSumProd(self, axis, shape, dtype, out_dtype, np_op, jnp_op):\n rng = jtu.rand_default(self.rng())\n np_fun = lambda arg: np_op(arg, axis=axis, dtype=out_dtype)\n np_fun = jtu.ignore_warning(category=np.ComplexWarning)(np_fun)\n jnp_fun = lambda arg: jnp_op(arg, axis=axis, dtype=out_dtype)\n jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)\n\n args_maker = lambda: [rng(shape, dtype)]\n\n tol_thresholds = {dtypes.bfloat16: 4e-2}\n tol = max(jtu.tolerance(dtype, tol_thresholds),\n jtu.tolerance(out_dtype, tol_thresholds))\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,\n tol=tol)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"op={}_shape=[{}]_axis={}_out_dtype={}\".format(\n op, jtu.format_shape_dtype_string(shape, dtype), axis,\n out_dtype.__name__),\n \"axis\": axis, \"shape\": shape, \"dtype\": dtype, \"out_dtype\": out_dtype,\n \"jnp_op\": getattr(jnp, op), \"np_op\": getattr(np, op)}\n for op in [\"nancumsum\", \"nancumprod\"]\n for dtype in all_dtypes\n for out_dtype in default_dtypes\n for shape in all_shapes\n for axis in [None] + list(range(-len(shape), len(shape)))))\n def testNanCumSumProd(self, axis, shape, dtype, out_dtype, np_op, jnp_op):\n rng = jtu.rand_some_nan(self.rng())\n np_fun = partial(np_op, axis=axis, dtype=out_dtype)\n np_fun = jtu.ignore_warning(category=np.ComplexWarning)(np_fun)\n jnp_fun = partial(jnp_op, axis=axis, dtype=out_dtype)\n jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)\n\n args_maker = lambda: [rng(shape, dtype)]\n\n tol_thresholds = {dtypes.bfloat16: 4e-2}\n tol = max(jtu.tolerance(dtype, tol_thresholds),\n jtu.tolerance(out_dtype, tol_thresholds))\n if dtype != jnp.bfloat16:\n # numpy functions do not properly handle bfloat16\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True,\n tol=tol)\n self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)\n\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_yshape={}_xshape={}_dx={}_axis={}\".format(\n jtu.format_shape_dtype_string(yshape, dtype),\n jtu.format_shape_dtype_string(xshape, dtype) if xshape is not None else None,\n dx, axis),\n \"yshape\": yshape, \"xshape\": xshape, \"dtype\": dtype, \"dx\": dx, \"axis\": axis}\n for dtype in default_dtypes\n for yshape, xshape, dx, axis in [\n ((10,), None, 1.0, -1),\n ((3, 10), None, 2.0, -1),\n ((3, 10), None, 3.0, -0),\n ((10, 3), (10,), 1.0, -2),\n ((3, 10), (10,), 1.0, -1),\n ((3, 10), (3, 10), 1.0, -1),\n ((2, 3, 10), (3, 10), 1.0, -2),\n ]))\n @jtu.skip_on_devices(\"tpu\") # TODO(jakevdp): fix and reenable this test.\n @jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.\n def testTrapz(self, yshape, xshape, dtype, dx, axis):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(yshape, dtype), rng(xshape, dtype) if xshape is not None else None]\n np_fun = partial(np.trapz, dx=dx, axis=axis)\n jnp_fun = partial(jnp.trapz, dx=dx, axis=axis)\n tol = jtu.tolerance(dtype, {np.float64: 1e-12,\n dtypes.bfloat16: 4e-2})\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, tol=tol,\n check_dtypes=False)\n self._CompileAndCheck(jnp_fun, args_maker, atol=tol, rtol=tol,\n check_dtypes=False)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_dtype={}_m={}_n={}_k={}\".format(\n np.dtype(dtype).name, m, n, k),\n \"m\": m, \"n\": n, \"k\": k, \"dtype\": dtype}\n for dtype in default_dtypes\n for n in [0, 4]\n for m in [None, 0, 1, 3, 4]\n for k in list(range(-4, 4))))\n def testTri(self, m, n, k, dtype):\n np_fun = lambda: np.tri(n, M=m, k=k, dtype=dtype)\n jnp_fun = lambda: jnp.tri(n, M=m, k=k, dtype=dtype)\n args_maker = lambda: []\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_op={}_shape={}_k={}\".format(\n op, jtu.format_shape_dtype_string(shape, dtype), k),\n \"dtype\": dtype, \"shape\": shape, \"op\": op, \"k\": k}\n for dtype in default_dtypes\n for shape in [shape for shape in all_shapes if len(shape) >= 2]\n for op in [\"tril\", \"triu\"]\n for k in list(range(-3, 3))))\n def testTriLU(self, dtype, shape, op, k):\n rng = jtu.rand_default(self.rng())\n np_fun = lambda arg: getattr(np, op)(arg, k=k)\n jnp_fun = lambda arg: getattr(jnp, op)(arg, k=k)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"n={}_k={}_m={}\".format(n, k, m),\n \"n\": n, \"k\": k, \"m\": m}\n for n in range(1, 5)\n for k in [-1, 0, 1]\n for m in range(1, 5)))\n def testTrilIndices(self, n, k, m):\n np_fun = lambda n, k, m: np.tril_indices(n, k=k, m=m)\n jnp_fun = lambda n, k, m: jnp.tril_indices(n, k=k, m=m)\n args_maker = lambda: [n, k, m]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"n={}_k={}_m={}\".format(n, k, m),\n \"n\": n, \"k\": k, \"m\": m}\n for n in range(1, 5)\n for k in [-1, 0, 1]\n for m in range(1, 5)))\n def testTriuIndices(self, n, k, m):\n np_fun = lambda n, k, m: np.triu_indices(n, k=k, m=m)\n jnp_fun = lambda n, k, m: jnp.triu_indices(n, k=k, m=m)\n args_maker = lambda: [n, k, m]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_k={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), k),\n \"dtype\": dtype, \"shape\": shape, \"k\": k}\n for dtype in default_dtypes\n for shape in [(1,1), (1,2), (2,2), (2,3), (3,2), (3,3), (4,4)]\n for k in [-1, 0, 1]))\n def testTriuIndicesFrom(self, shape, dtype, k):\n rng = jtu.rand_default(self.rng())\n np_fun = lambda arr, k: np.triu_indices_from(arr, k=k)\n jnp_fun = lambda arr, k: jnp.triu_indices_from(arr, k=k)\n args_maker = lambda: [rng(shape, dtype), k]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_k={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), k),\n \"dtype\": dtype, \"shape\": shape, \"k\": k}\n for dtype in default_dtypes\n for shape in [(1,1), (1,2), (2,2), (2,3), (3,2), (3,3), (4,4)]\n for k in [-1, 0, 1]))\n def testTrilIndicesFrom(self, shape, dtype, k):\n rng = jtu.rand_default(self.rng())\n np_fun = lambda arr, k: np.tril_indices_from(arr, k=k)\n jnp_fun = lambda arr, k: jnp.tril_indices_from(arr, k=k)\n args_maker = lambda: [rng(shape, dtype), k]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_ndim={}_n={}\".format(ndim, n),\n \"ndim\": ndim, \"n\": n}\n for ndim in [0, 1, 4]\n for n in [0, 1, 7]))\n def testDiagIndices(self, ndim, n):\n np.testing.assert_equal(np.diag_indices(n, ndim),\n jnp.diag_indices(n, ndim))\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"arr_shape={}\".format(\n jtu.format_shape_dtype_string(shape, dtype)\n ),\n \"dtype\": dtype, \"shape\": shape}\n for dtype in default_dtypes\n for shape in [(1,1), (2,2), (3,3), (4,4), (5,5)]))\n def testDiagIndicesFrom(self, dtype, shape):\n rng = jtu.rand_default(self.rng())\n np_fun = np.diag_indices_from\n jnp_fun = jnp.diag_indices_from\n args_maker = lambda : [rng(shape, dtype)]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_k={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), k),\n \"dtype\": dtype, \"shape\": shape, \"k\": k}\n for dtype in default_dtypes\n for shape in [shape for shape in all_shapes if len(shape) in (1, 2)]\n for k in list(range(-4, 4))))\n def testDiag(self, shape, dtype, k):\n rng = jtu.rand_default(self.rng())\n np_fun = lambda arg: np.diag(arg, k)\n jnp_fun = lambda arg: jnp.diag(arg, k)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_k={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), k),\n \"dtype\": dtype, \"shape\": shape, \"k\": k}\n for dtype in default_dtypes\n for shape in all_shapes\n for k in range(-4, 4)))\n def testDiagFlat(self, shape, dtype, k):\n rng = jtu.rand_default(self.rng())\n # numpy has inconsistencies for scalar values\n # https://github.com/numpy/numpy/issues/16477\n # jax differs in that it treats scalars values as length-1 arrays\n np_fun = lambda arg: np.diagflat(np.atleast_1d(arg), k)\n jnp_fun = lambda arg: jnp.diagflat(arg, k)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_a1_shape={}_a2_shape2={}\".format(\n jtu.format_shape_dtype_string(a1_shape, dtype),\n jtu.format_shape_dtype_string(a2_shape, dtype)),\n \"dtype\": dtype, \"a1_shape\": a1_shape, \"a2_shape\": a2_shape}\n for dtype in default_dtypes\n for a1_shape in one_dim_array_shapes\n for a2_shape in one_dim_array_shapes))\n def testPolyMul(self, a1_shape, a2_shape, dtype):\n rng = jtu.rand_default(self.rng())\n np_fun = lambda arg1, arg2: np.polymul(arg1, arg2)\n jnp_fun_np = lambda arg1, arg2: jnp.polymul(arg1, arg2, trim_leading_zeros=True)\n jnp_fun_co = lambda arg1, arg2: jnp.polymul(arg1, arg2)\n args_maker = lambda: [rng(a1_shape, dtype), rng(a2_shape, dtype)]\n tol = {np.float16: 2e-1, np.float32: 5e-2, np.float64: 1e-13}\n self._CheckAgainstNumpy(np_fun, jnp_fun_np, args_maker, check_dtypes=False, tol=tol)\n self._CompileAndCheck(jnp_fun_co, args_maker, check_dtypes=False)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_offset={}_axis1={}_axis2={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), offset, axis1, axis2),\n \"dtype\": dtype, \"shape\": shape, \"offset\": offset, \"axis1\": axis1,\n \"axis2\": axis2}\n for dtype in default_dtypes\n for shape in [shape for shape in all_shapes if len(shape) >= 2]\n for axis1 in range(-len(shape), len(shape))\n for axis2 in [a for a in range(-len(shape), len(shape))\n if a % len(shape) != axis1 % len(shape)]\n for offset in list(range(-4, 4))))\n def testDiagonal(self, shape, dtype, offset, axis1, axis2):\n rng = jtu.rand_default(self.rng())\n np_fun = lambda arg: np.diagonal(arg, offset, axis1, axis2)\n jnp_fun = lambda arg: jnp.diagonal(arg, offset, axis1, axis2)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_n={}\".format(np.dtype(dtype).name, n),\n \"dtype\": dtype, \"n\": n}\n for dtype in default_dtypes\n for n in list(range(4))))\n def testIdentity(self, n, dtype):\n np_fun = lambda: np.identity(n, dtype)\n jnp_fun = lambda: jnp.identity(n, dtype)\n args_maker = lambda: []\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_period={}_left={}_right={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), period, left, right),\n \"shape\": shape, \"dtype\": dtype,\n \"period\": period, \"left\": left, \"right\": right}\n for shape in nonempty_shapes\n for period in [None, 0.59]\n for left in [None, 0]\n for right in [None, 1]\n for dtype in default_dtypes\n # following types lack precision for meaningful tests\n if dtype not in [np.int8, np.int16, np.float16, jnp.bfloat16]\n ))\n def testInterp(self, shape, dtype, period, left, right):\n rng = jtu.rand_default(self.rng(), scale=10)\n kwds = dict(period=period, left=left, right=right)\n np_fun = partial(np.interp, **kwds)\n jnp_fun = partial(jnp.interp, **kwds)\n args_maker = lambda: [rng(shape, dtype), np.sort(rng((20,), dtype)), np.linspace(0, 1, 20)]\n\n # skip numpy comparison for integer types with period specified, because numpy\n # uses an unstable sort and so results differ for duplicate values.\n if not (period and np.issubdtype(dtype, np.integer)):\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, tol={np.float32: 2E-4})\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_x1={}_x2={}_x1_rng={}\".format(\n jtu.format_shape_dtype_string(x1_shape, x1_dtype),\n jtu.format_shape_dtype_string(x2_shape, np.int32),\n x1_rng_factory_id),\n \"x1_shape\": x1_shape, \"x1_dtype\": x1_dtype,\n \"x2_shape\": x2_shape, \"x1_rng_factory\": x1_rng_factory,\n \"x2_rng_factory\": x2_rng_factory}\n for x1_rng_factory_id, x1_rng_factory in\n enumerate([jtu.rand_some_inf_and_nan, jtu.rand_some_zero])\n for x2_rng_factory in [partial(jtu.rand_int, low=-1075, high=1024)]\n for x1_shape, x2_shape in filter(_shapes_are_broadcast_compatible,\n itertools.combinations_with_replacement(array_shapes, 2))\n for x1_dtype in default_dtypes))\n @jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.\n def testLdexp(self, x1_shape, x1_dtype, x2_shape, x1_rng_factory, x2_rng_factory):\n # integer types are converted to float64 in numpy's implementation\n if (x1_dtype not in [jnp.bfloat16, np.float16, np.float32]\n and not config.x64_enabled):\n self.skipTest(\"Only run float64 testcase when float64 is enabled.\")\n x1_rng = x1_rng_factory(self.rng())\n x2_rng = x2_rng_factory(self.rng())\n np_fun = lambda x1, x2: np.ldexp(x1, x2)\n np_fun = jtu.ignore_warning(category=RuntimeWarning,\n message=\"overflow.*\")(np_fun)\n jnp_fun = lambda x1, x2: jnp.ldexp(x1, x2)\n args_maker = lambda: [x1_rng(x1_shape, x1_dtype),\n x2_rng(x2_shape, np.int32)]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_x={}_rng_factory={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), rng_factory_id),\n \"shape\": shape, \"dtype\": dtype, \"rng_factory\": rng_factory}\n for rng_factory_id, rng_factory in enumerate([\n jtu.rand_some_inf_and_nan,\n jtu.rand_some_zero,\n partial(jtu.rand_not_small, offset=1e8),\n ])\n for shape in all_shapes\n for dtype in default_dtypes))\n def testFrexp(self, shape, dtype, rng_factory):\n # integer types are converted to float64 in numpy's implementation\n if (dtype not in [jnp.bfloat16, np.float16, np.float32]\n and not config.x64_enabled):\n self.skipTest(\"Only run float64 testcase when float64 is enabled.\")\n rng = rng_factory(self.rng())\n np_fun = lambda x: np.frexp(x)\n jnp_fun = lambda x: jnp.frexp(x)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,\n check_dtypes=np.issubdtype(dtype, np.inexact))\n self._CompileAndCheck(jnp_fun, args_maker)\n\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_dtype_{}_offset={}_axis1={}_axis2={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n out_dtype, offset, axis1, axis2),\n \"dtype\": dtype, \"out_dtype\": out_dtype, \"shape\": shape, \"offset\": offset,\n \"axis1\": axis1, \"axis2\": axis2}\n for dtype in default_dtypes\n for out_dtype in [None] + number_dtypes\n for shape in [shape for shape in all_shapes if len(shape) >= 2]\n for axis1 in range(-len(shape), len(shape))\n for axis2 in range(-len(shape), len(shape))\n if (axis1 % len(shape)) != (axis2 % len(shape))\n for offset in list(range(-4, 4))))\n def testTrace(self, shape, dtype, out_dtype, offset, axis1, axis2):\n rng = jtu.rand_default(self.rng())\n def np_fun(arg):\n if out_dtype == jnp.bfloat16:\n return np.trace(arg, offset, axis1, axis2, np.float32).astype(jnp.bfloat16)\n else:\n return np.trace(arg, offset, axis1, axis2, out_dtype)\n jnp_fun = lambda arg: jnp.trace(arg, offset, axis1, axis2, out_dtype)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_a={}_v={}_side={}\".format(\n jtu.format_shape_dtype_string(ashape, dtype),\n jtu.format_shape_dtype_string(vshape, dtype),\n side), \"ashape\": ashape, \"vshape\": vshape, \"side\": side,\n \"dtype\": dtype}\n for ashape in [(15,), (16,), (17,)]\n for vshape in [(), (5,), (5, 5)]\n for side in ['left', 'right']\n for dtype in default_dtypes\n ))\n def testSearchsorted(self, ashape, vshape, side, dtype):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [np.sort(rng(ashape, dtype)), rng(vshape, dtype)]\n np_fun = lambda a, v: np.searchsorted(a, v, side=side)\n jnp_fun = lambda a, v: jnp.searchsorted(a, v, side=side)\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_x={}_bins={}_right={}_reverse={}\".format(\n jtu.format_shape_dtype_string(xshape, dtype),\n jtu.format_shape_dtype_string(binshape, dtype),\n right, reverse), \"xshape\": xshape, \"binshape\": binshape,\n \"right\": right, \"reverse\": reverse, \"dtype\": dtype}\n for xshape in [(20,), (5, 4)]\n for binshape in [(1,), (5,)]\n for right in [True, False]\n for reverse in [True, False]\n for dtype in default_dtypes\n ))\n def testDigitize(self, xshape, binshape, right, reverse, dtype):\n order = jax.ops.index[::-1] if reverse else jax.ops.index[:]\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(xshape, dtype), jnp.sort(rng(binshape, dtype))[order]]\n np_fun = lambda x, bins: np.digitize(x, bins, right=right)\n jnp_fun = lambda x, bins: jnp.digitize(x, bins, right=right)\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_array={}\".format(\n jtu.format_test_name_suffix(\"\", [shape] * len(dtypes), dtypes), array_input),\n \"shape\": shape, \"dtypes\": dtypes, \"array_input\": array_input}\n for dtypes in [\n [np.float32],\n [np.float32, np.float32],\n [np.float32, np.int32, np.float32],\n [np.float32, np.int64, np.float32],\n [np.float32, np.int32, np.float64],\n ]\n for shape in [(), (2,), (3, 4), (1, 5)]\n for array_input in [True, False]))\n def testColumnStack(self, shape, dtypes, array_input):\n rng = jtu.rand_default(self.rng())\n if array_input:\n args_maker = lambda: [np.array([rng(shape, dtype) for dtype in dtypes])]\n else:\n args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]\n np_fun = _promote_like_jnp(np.column_stack)\n jnp_fun = jnp.column_stack\n self._CheckAgainstNumpy(jnp_fun, np_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_axis={}_array={}\".format(\n jtu.format_test_name_suffix(\"\", [shape] * len(dtypes), dtypes), axis, array_input),\n \"shape\": shape, \"axis\": axis, \"dtypes\": dtypes, \"array_input\": array_input}\n for dtypes in [\n [np.float32],\n [np.float32, np.float32],\n [np.float32, np.int32, np.float32],\n [np.float32, np.int64, np.float32],\n [np.float32, np.int32, np.float64],\n ]\n for shape in [(), (2,), (3, 4), (1, 100)]\n for axis in range(-len(shape), len(shape) + 1)\n for array_input in [True, False]))\n def testStack(self, shape, axis, dtypes, array_input):\n rng = jtu.rand_default(self.rng())\n if array_input:\n args_maker = lambda: [np.array([rng(shape, dtype) for dtype in dtypes])]\n else:\n args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]\n np_fun = _promote_like_jnp(partial(np.stack, axis=axis))\n jnp_fun = partial(jnp.stack, axis=axis)\n self._CheckAgainstNumpy(jnp_fun, np_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_op={}_{}_array={}\".format(\n op, jtu.format_test_name_suffix(\"\", [shape] * len(dtypes), dtypes), array_input),\n \"shape\": shape, \"op\": op, \"dtypes\": dtypes, \"array_input\": array_input}\n for op in [\"hstack\", \"vstack\", \"dstack\"]\n for dtypes in [\n [np.float32],\n [np.float32, np.float32],\n [np.float32, np.int32, np.float32],\n [np.float32, np.int64, np.float32],\n [np.float32, np.int32, np.float64],\n ]\n for shape in [(), (2,), (3, 4), (1, 100), (2, 3, 4)]\n for array_input in [True, False]))\n def testHVDStack(self, shape, op, dtypes, array_input):\n rng = jtu.rand_default(self.rng())\n if array_input:\n args_maker = lambda: [np.array([rng(shape, dtype) for dtype in dtypes])]\n else:\n args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]\n np_fun = _promote_like_jnp(getattr(np, op))\n jnp_fun = getattr(jnp, op)\n self._CheckAgainstNumpy(jnp_fun, np_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_outdtype={}_fillshape={}\".format(\n jtu.format_shape_dtype_string(shape, fill_value_dtype),\n np.dtype(out_dtype).name if out_dtype else \"None\",\n fill_value_shape),\n \"fill_value_dtype\": fill_value_dtype, \"fill_value_shape\": fill_value_shape,\n \"shape\": shape, \"out_dtype\": out_dtype}\n for shape in array_shapes + [3, np.array(7, dtype=np.int32)]\n for fill_value_dtype in default_dtypes\n for fill_value_shape in _compatible_shapes(shape)\n for out_dtype in [None] + default_dtypes))\n def testFull(self, shape, fill_value_dtype, fill_value_shape, out_dtype):\n rng = jtu.rand_default(self.rng())\n np_fun = lambda fill_value: np.full(shape, fill_value, dtype=out_dtype)\n jnp_fun = lambda fill_value: jnp.full(shape, fill_value, dtype=out_dtype)\n args_maker = lambda: [rng(fill_value_shape, fill_value_dtype)]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({\n \"testcase_name\": \"_shape={}_n={}_axis={}_prepend={}_append={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n n, axis, prepend, append),\n \"shape\": shape, \"dtype\": dtype, \"n\": n, \"axis\": axis,\n \"prepend\": prepend, \"append\": append\n } for shape, dtype in s(_shape_and_dtypes(nonempty_nonscalar_array_shapes, default_dtypes))\n for n in s([0, 1, 2])\n for axis in s(list(range(-len(shape), max(1, len(shape)))))\n for prepend in s([None, 1, np.zeros(shape, dtype=dtype)])\n for append in s([None, 1, np.zeros(shape, dtype=dtype)])\n )))\n def testDiff(self, shape, dtype, n, axis, prepend, append):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype)]\n\n def np_fun(x, n=n, axis=axis, prepend=prepend, append=append):\n if prepend is None:\n prepend = np._NoValue\n elif not np.isscalar(prepend) and prepend.dtype == jnp.bfloat16:\n prepend = prepend.astype(np.float32)\n\n if append is None:\n append = np._NoValue\n elif not np.isscalar(append) and append.dtype == jnp.bfloat16:\n append = append.astype(np.float32)\n\n if x.dtype == jnp.bfloat16:\n return np.diff(x.astype(np.float32), n=n, axis=axis, prepend=prepend, append=append).astype(jnp.bfloat16)\n else:\n return np.diff(x, n=n, axis=axis, prepend=prepend, append=append)\n\n jnp_fun = lambda x: jnp.diff(x, n=n, axis=axis, prepend=prepend, append=append)\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(\n jtu.cases_from_list(\n {\"testcase_name\": (\"_op={}_shape={}_dtype={}\").format(op, shape, dtype),\n \"np_op\": getattr(np, op), \"jnp_op\": getattr(jnp, op),\n \"shape\": shape, \"dtype\": dtype}\n for op in [\"zeros\", \"ones\"]\n for shape in [2, (), (2,), (3, 0), np.array((4, 5, 6), dtype=np.int32),\n np.array(4, dtype=np.int32)]\n for dtype in all_dtypes))\n def testZerosOnes(self, np_op, jnp_op, shape, dtype):\n args_maker = lambda: []\n np_op = partial(np_op, shape, dtype)\n jnp_op = partial(jnp_op, shape, dtype)\n self._CheckAgainstNumpy(np_op, jnp_op, args_maker)\n self._CompileAndCheck(jnp_op, args_maker)\n\n def testOnesWithInvalidShape(self):\n with self.assertRaises(TypeError):\n jnp.ones((-1, 1))\n\n @parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({\n \"testcase_name\": \"_inshape={}_filldtype={}_fillshape={}_outdtype={}_outshape={}\".format(\n jtu.format_shape_dtype_string(shape, in_dtype),\n np.dtype(fill_value_dtype).name, fill_value_shape,\n np.dtype(out_dtype).name, out_shape),\n \"shape\": shape, \"in_dtype\": in_dtype,\n \"fill_value_dtype\": fill_value_dtype, \"fill_value_shape\": fill_value_shape,\n \"out_dtype\": out_dtype, \"out_shape\": out_shape\n } for shape in s(array_shapes)\n for out_shape in s([None] + array_shapes)\n for in_dtype in s(default_dtypes)\n for fill_value_dtype in s(default_dtypes)\n for fill_value_shape in s(_compatible_shapes(shape if out_shape is None else out_shape))\n for out_dtype in s(default_dtypes))))\n def testFullLike(self, shape, in_dtype, fill_value_dtype, fill_value_shape, out_dtype, out_shape):\n if numpy_version < (1, 19) and out_shape == ():\n raise SkipTest(\"Numpy < 1.19 treats out_shape=() like out_shape=None\")\n rng = jtu.rand_default(self.rng())\n np_fun = lambda x, fill_value: np.full_like(\n x, fill_value, dtype=out_dtype, shape=out_shape)\n jnp_fun = lambda x, fill_value: jnp.full_like(\n x, fill_value, dtype=out_dtype, shape=out_shape)\n args_maker = lambda: [rng(shape, in_dtype), rng(fill_value_shape, fill_value_dtype)]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_func={}_inshape={}_outshape={}_outdtype={}\".format(\n func, jtu.format_shape_dtype_string(shape, in_dtype),\n out_shape, out_dtype),\n \"func\": func, \"shape\": shape, \"in_dtype\": in_dtype,\n \"out_shape\": out_shape, \"out_dtype\": out_dtype}\n for shape in array_shapes\n for out_shape in [None] + array_shapes\n for in_dtype in default_dtypes\n for func in [\"ones_like\", \"zeros_like\"]\n for out_dtype in default_dtypes))\n def testZerosOnesLike(self, func, shape, in_dtype, out_shape, out_dtype):\n if numpy_version < (1, 19) and out_shape == ():\n raise SkipTest(\"Numpy < 1.19 treats out_shape=() like out_shape=None\")\n rng = jtu.rand_default(self.rng())\n np_fun = lambda x: getattr(np, func)(x, dtype=out_dtype, shape=out_shape)\n jnp_fun = lambda x: getattr(jnp, func)(x, dtype=out_dtype, shape=out_shape)\n args_maker = lambda: [rng(shape, in_dtype)]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_func={}_inshape={}_weak_type={}_outshape={}_outdtype={}\".format(\n func, jtu.format_shape_dtype_string(shape, in_dtype),\n weak_type, out_shape, out_dtype),\n \"func\": func, \"args\": args,\n \"shape\": shape, \"in_dtype\": in_dtype, \"weak_type\": weak_type,\n \"out_shape\": out_shape, \"out_dtype\": out_dtype}\n for shape in array_shapes\n for in_dtype in [np.int32, np.float32, np.complex64]\n for weak_type in [True, False]\n for out_shape in [None, (), (10,)]\n for func, args in [(\"full_like\", (-100,)), (\"ones_like\", ()), (\"zeros_like\", ())]\n for out_dtype in [None, float]))\n def testZerosOnesFullLikeWeakType(self, func, args, shape, in_dtype, weak_type, out_shape, out_dtype):\n if numpy_version < (1, 19) and out_shape == ():\n raise SkipTest(\"Numpy < 1.19 treats out_shape=() like out_shape=None\")\n rng = jtu.rand_default(self.rng())\n x = lax._convert_element_type(rng(shape, in_dtype), weak_type=weak_type)\n fun = lambda x: getattr(jnp, func)(x, *args, dtype=out_dtype, shape=out_shape)\n expected_weak_type = weak_type and (out_dtype is None)\n self.assertEqual(dtypes.is_weakly_typed(fun(x)), expected_weak_type)\n self.assertEqual(dtypes.is_weakly_typed(jax.jit(fun)(x)), expected_weak_type)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_funcname={}_input_type={}_val={}_dtype={}\".format(\n funcname, input_type, val, dtype),\n \"funcname\": funcname, \"input_type\": input_type, \"val\": val, \"dtype\": dtype}\n for funcname in [\"array\", \"asarray\"]\n for dtype in [int, float, None]\n for val in [0, 1]\n for input_type in [int, float, np.int32, np.float32]))\n def testArrayWeakType(self, funcname, input_type, val, dtype):\n func = lambda x: getattr(jnp, funcname)(x, dtype=dtype)\n fjit = jax.jit(func)\n val = input_type(val)\n expected_weak_type = dtype is None and input_type in set(dtypes._weak_types)\n self.assertEqual(dtypes.is_weakly_typed(func(val)), expected_weak_type)\n self.assertEqual(dtypes.is_weakly_typed(fjit(val)), expected_weak_type)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_weak_type={}_slc={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), weak_type, slc),\n \"shape\": shape, \"dtype\": dtype, \"weak_type\": weak_type, \"slc\": slc}\n for shape in nonempty_nonscalar_array_shapes\n for dtype in [int, float, complex]\n for weak_type in [True, False]\n for slc in [slice(None), slice(0), slice(3), 0, ...]))\n def testSliceWeakTypes(self, shape, dtype, weak_type, slc):\n rng = jtu.rand_default(self.rng())\n x = lax._convert_element_type(rng(shape, dtype), weak_type=weak_type)\n op = lambda x: x[slc]\n self.assertEqual(op(x).aval.weak_type, weak_type)\n self.assertEqual(jax.jit(op)(x).aval.weak_type, weak_type)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_axis={}_{}sections\".format(\n jtu.format_shape_dtype_string(shape, dtype), axis, num_sections),\n \"shape\": shape, \"num_sections\": num_sections, \"axis\": axis,\n \"dtype\": dtype}\n for shape, axis, num_sections in [\n ((3,), 0, 3), ((12,), 0, 3), ((12, 4), 0, 4), ((12, 4), 1, 2),\n ((2, 3, 4), -1, 2), ((2, 3, 4), -2, 3)]\n for dtype in default_dtypes))\n def testSplitStaticInt(self, shape, num_sections, axis, dtype):\n rng = jtu.rand_default(self.rng())\n np_fun = lambda x: np.split(x, num_sections, axis=axis)\n jnp_fun = lambda x: jnp.split(x, num_sections, axis=axis)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_axis={}_{}sections\".format(\n jtu.format_shape_dtype_string(shape, dtype), axis, num_sections),\n \"shape\": shape, \"num_sections\": num_sections, \"axis\": axis, \"dtype\": dtype}\n # All testcases split the specified axis unequally\n for shape, axis, num_sections in [\n ((3,), 0, 2), ((12,), 0, 5), ((12, 4), 0, 7), ((12, 4), 1, 3),\n ((2, 3, 5), -1, 2), ((2, 4, 4), -2, 3), ((7, 2, 2), 0, 3)]\n for dtype in default_dtypes))\n def testArraySplitStaticInt(self, shape, num_sections, axis, dtype):\n rng = jtu.rand_default(self.rng())\n np_fun = lambda x: np.array_split(x, num_sections, axis=axis)\n jnp_fun = lambda x: jnp.array_split(x, num_sections, axis=axis)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n def testSplitTypeError(self):\n # If we pass an ndarray for indices_or_sections -> no error\n self.assertEqual(3, len(jnp.split(jnp.zeros(3), jnp.array([1, 2]))))\n\n CONCRETIZATION_MSG = \"Abstract tracer value encountered where concrete value is expected.\"\n with self.assertRaisesRegex(TypeError, CONCRETIZATION_MSG):\n # An abstract tracer for idx\n jax.jit(lambda idx: jnp.split(jnp.zeros((12, 2)), idx))(2.)\n with self.assertRaisesRegex(TypeError, CONCRETIZATION_MSG):\n # A list including an abstract tracer\n jax.jit(lambda idx: jnp.split(jnp.zeros((12, 2)), [2, idx]))(2.)\n\n # A concrete tracer -> no error\n jax.jvp(lambda idx: jnp.split(jnp.zeros((12, 2)), idx),\n (2.,), (1.,))\n # A tuple including a concrete tracer -> no error\n jax.jvp(lambda idx: jnp.split(jnp.zeros((12, 2)), (1, idx)),\n (2.,), (1.,))\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_bins={}_range={}_weights={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), bins, range, weights),\n \"shape\": shape,\n \"dtype\": dtype,\n \"bins\": bins,\n \"range\": range,\n \"weights\": weights,\n }\n for shape in [(5,), (5, 5)]\n for dtype in number_dtypes\n for bins in [10, np.arange(-5, 6), np.array([-5, 0, 3])]\n for range in [None, (0, 0), (0, 10)]\n for weights in [True, False]\n ))\n def testHistogramBinEdges(self, shape, dtype, bins, range, weights):\n rng = jtu.rand_default(self.rng())\n _weights = lambda w: abs(w) if weights else None\n np_fun = lambda a, w, r: np.histogram_bin_edges(a, bins=bins, range=r,\n weights=_weights(w))\n jnp_fun = lambda a, w, r: jnp.histogram_bin_edges(a, bins=bins, range=r,\n weights=_weights(w))\n args_maker = lambda: [rng(shape, dtype), rng(shape, dtype), range]\n tol = {jnp.bfloat16: 2E-2, np.float16: 1E-2}\n # linspace() compares poorly to numpy when using bfloat16\n if dtype != jnp.bfloat16:\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False, tol=tol)\n self._CompileAndCheck(jnp_fun, args_maker,\n atol=tol, rtol=tol)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_bins={}_density={}_weights={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), bins, density, weights),\n \"shape\": shape,\n \"dtype\": dtype,\n \"bins\": bins,\n \"density\": density,\n \"weights\": weights,\n }\n for shape in [(5,), (5, 5)]\n for dtype in default_dtypes\n # We only test explicit integer-valued bin edges because in other cases\n # rounding errors lead to flaky tests.\n for bins in [np.arange(-5, 6), np.array([-5, 0, 3])]\n for density in [True, False]\n for weights in [True, False]\n ))\n def testHistogram(self, shape, dtype, bins, density, weights):\n rng = jtu.rand_default(self.rng())\n _weights = lambda w: abs(w) if weights else None\n np_fun = lambda a, w: np.histogram(a, bins=bins, density=density,\n weights=_weights(w))\n jnp_fun = lambda a, w: jnp.histogram(a, bins=bins, density=density,\n weights=_weights(w))\n args_maker = lambda: [rng(shape, dtype), rng(shape, dtype)]\n tol = {jnp.bfloat16: 2E-2, np.float16: 1E-1}\n # np.searchsorted errors on bfloat16 with\n # \"TypeError: invalid type promotion with custom data type\"\n if dtype != jnp.bfloat16:\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,\n tol=tol)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_bins={}_weights={}_density={}_range={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), bins, weights, density, range),\n \"shape\": shape, \"dtype\": dtype, \"bins\": bins, \"weights\": weights, \"density\": density, \"range\": range,\n }\n for shape in [(5,), (12,)]\n for dtype in int_dtypes\n for bins in [2, [2, 2], [np.array([0, 1, 3, 5]), np.array([0, 2, 3, 4, 6])]]\n for weights in [False, True]\n for density in [False, True]\n for range in [None, [(-1, 1), None], [(-1, 1), (-2, 2)]]\n ))\n def testHistogram2d(self, shape, dtype, bins, weights, density, range):\n rng = jtu.rand_default(self.rng())\n _weights = lambda w: abs(w) if weights else None\n np_fun = jtu.ignore_warning(category=RuntimeWarning, message=\"invalid value.*\")(\n lambda a, b, w: np.histogram2d(a, b, bins=bins, weights=_weights(w), density=density, range=range))\n jnp_fun = lambda a, b, w: jnp.histogram2d(a, b, bins=bins, weights=_weights(w), density=density, range=range)\n args_maker = lambda: [rng(shape, dtype), rng(shape, dtype), rng(shape, dtype)]\n tol = {jnp.bfloat16: 2E-2, np.float16: 1E-1}\n # np.searchsorted errors on bfloat16 with\n # \"TypeError: invalid type promotion with custom data type\"\n with np.errstate(divide='ignore', invalid='ignore'):\n if dtype != jnp.bfloat16:\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,\n tol=tol)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_bins={}_weights={}_density={}_range={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), bins, weights, density, range),\n \"shape\": shape, \"dtype\": dtype, \"bins\": bins, \"weights\": weights, \"density\": density, \"range\": range,\n }\n for shape in [(5, 3), (10, 3)]\n for dtype in int_dtypes\n for bins in [(2, 2, 2), [np.array([-5, 0, 4]), np.array([-4, -1, 2]), np.array([-6, -1, 4])]]\n for weights in [False, True]\n for density in [False, True]\n for range in [None, [(-1, 1), None, None], [(-1, 1), (-2, 2), (-3, 3)]]\n ))\n def testHistogramdd(self, shape, dtype, bins, weights, density, range):\n rng = jtu.rand_default(self.rng())\n _weights = lambda w: abs(w) if weights else None\n np_fun = jtu.ignore_warning(category=RuntimeWarning, message=\"invalid value.*\")(\n lambda a, w: np.histogramdd(a, bins=bins, weights=_weights(w), density=density, range=range))\n jnp_fun = lambda a, w: jnp.histogramdd(a, bins=bins, weights=_weights(w), density=density, range=range)\n args_maker = lambda: [rng(shape, dtype), rng((shape[0],), dtype)]\n tol = {jnp.bfloat16: 2E-2, np.float16: 1E-1}\n # np.searchsorted errors on bfloat16 with\n # \"TypeError: invalid type promotion with custom data type\"\n if dtype != jnp.bfloat16:\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,\n tol=tol)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_axis={}_{}sections\".format(\n jtu.format_shape_dtype_string(shape, dtype), axis, num_sections),\n \"shape\": shape, \"num_sections\": num_sections, \"axis\": axis,\n \"dtype\": dtype}\n for shape, axis, num_sections in [\n ((12, 4), 0, 4), ((12, 4), 1, 2),\n ((2, 3, 4), 2, 2), ((4, 3, 4), 0, 2)]\n for dtype in default_dtypes))\n def testHVDSplit(self, shape, num_sections, axis, dtype):\n rng = jtu.rand_default(self.rng())\n def fn(module, axis):\n if axis == 0:\n return module.vsplit\n elif axis == 1:\n return module.hsplit\n else:\n assert axis == 2\n return module.dsplit\n\n np_fun = lambda x: fn(np, axis)(x, num_sections)\n jnp_fun = lambda x: fn(jnp, axis)(x, num_sections)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_outshape={}_order={}\".format(\n jtu.format_shape_dtype_string(arg_shape, dtype),\n jtu.format_shape_dtype_string(out_shape, dtype),\n order),\n \"arg_shape\": arg_shape, \"out_shape\": out_shape, \"dtype\": dtype,\n \"order\": order}\n for dtype in default_dtypes\n for order in [\"C\", \"F\"]\n for arg_shape, out_shape in [\n (jtu.NUMPY_SCALAR_SHAPE, (1, 1, 1)),\n ((), (1, 1, 1)),\n ((7, 0), (0, 42, 101)),\n ((3, 4), 12),\n ((3, 4), (12,)),\n ((3, 4), -1),\n ((2, 1, 4), (-1,)),\n ((2, 2, 4), (2, 8))\n ]))\n def testReshape(self, arg_shape, out_shape, dtype, order):\n rng = jtu.rand_default(self.rng())\n np_fun = lambda x: np.reshape(x, out_shape, order=order)\n jnp_fun = lambda x: jnp.reshape(x, out_shape, order=order)\n args_maker = lambda: [rng(arg_shape, dtype)]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_outshape={}\".format(\n jtu.format_shape_dtype_string(arg_shape, dtype),\n jtu.format_shape_dtype_string(out_shape, dtype)),\n \"arg_shape\": arg_shape, \"out_shape\": out_shape, \"dtype\": dtype}\n for dtype in default_dtypes\n for arg_shape, out_shape in [\n ((7, 0), (0, 42, 101)),\n ((2, 1, 4), (-1,)),\n ((2, 2, 4), (2, 8))\n ]))\n def testReshapeMethod(self, arg_shape, out_shape, dtype):\n rng = jtu.rand_default(self.rng())\n np_fun = lambda x: np.reshape(x, out_shape)\n jnp_fun = lambda x: x.reshape(*out_shape)\n args_maker = lambda: [rng(arg_shape, dtype)]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_outshape={}\".format(\n jtu.format_shape_dtype_string(arg_shape, dtype),\n jtu.format_shape_dtype_string(out_shape, dtype)),\n \"arg_shape\": arg_shape, \"out_shape\": out_shape, \"dtype\": dtype}\n for dtype in default_dtypes\n for arg_shape, out_shape in itertools.product(all_shapes, array_shapes)))\n def testResize(self, arg_shape, out_shape, dtype):\n rng = jtu.rand_default(self.rng())\n np_fun = lambda x: np.resize(x, out_shape)\n jnp_fun = lambda x: jnp.resize(x, out_shape)\n args_maker = lambda: [rng(arg_shape, dtype)]\n if len(out_shape) > 0 or numpy_version >= (1, 20, 0):\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_expanddim={!r}\".format(\n jtu.format_shape_dtype_string(arg_shape, dtype), dim),\n \"arg_shape\": arg_shape, \"dtype\": dtype, \"dim\": dim}\n for arg_shape in [(), (3,), (3, 4)]\n for dtype in default_dtypes\n for dim in (list(range(-len(arg_shape)+1, len(arg_shape)))\n + [np.array(0), np.array(-1), (0,), [np.array(0)],\n (len(arg_shape), len(arg_shape) + 1)])))\n def testExpandDimsStaticDim(self, arg_shape, dtype, dim):\n rng = jtu.rand_default(self.rng())\n np_fun = lambda x: np.expand_dims(x, dim)\n jnp_fun = lambda x: jnp.expand_dims(x, dim)\n args_maker = lambda: [rng(arg_shape, dtype)]\n self._CompileAndCheck(jnp_fun, args_maker)\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_axes=({},{})\".format(\n jtu.format_shape_dtype_string(arg_shape, dtype), ax1, ax2),\n \"arg_shape\": arg_shape, \"dtype\": dtype, \"ax1\": ax1, \"ax2\": ax2}\n for arg_shape, ax1, ax2 in [\n ((3, 4), 0, 1), ((3, 4), 1, 0), ((3, 4, 5), 1, 2),\n ((3, 4, 5), -1, -2), ((3, 4, 5), 0, 1)]\n for dtype in default_dtypes))\n def testSwapAxesStaticAxes(self, arg_shape, dtype, ax1, ax2):\n rng = jtu.rand_default(self.rng())\n np_fun = lambda x: np.swapaxes(x, ax1, ax2)\n jnp_fun = lambda x: jnp.swapaxes(x, ax1, ax2)\n args_maker = lambda: [rng(arg_shape, dtype)]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_axis={!r}\".format(\n jtu.format_shape_dtype_string(arg_shape, dtype), ax),\n \"arg_shape\": arg_shape, \"dtype\": dtype, \"ax\": ax}\n for arg_shape, ax in [\n ((3, 1), None),\n ((3, 1), 1),\n ((3, 1), -1),\n ((3, 1), np.array(1)),\n ((1, 3, 1), (0, 2)),\n ((1, 3, 1), (0,)),\n ((1, 4, 1), (np.array(0),))]\n for dtype in default_dtypes))\n def testSqueeze(self, arg_shape, dtype, ax):\n rng = jtu.rand_default(self.rng())\n np_fun = lambda x: np.squeeze(x, ax)\n jnp_fun = lambda x: jnp.squeeze(x, ax)\n args_maker = lambda: [rng(arg_shape, dtype)]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_axis={}_weights={}_returned={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n axis,\n (None if weights_shape is None else jtu.format_shape_dtype_string(weights_shape, dtype)),\n returned),\n \"shape\": shape, \"dtype\": dtype, \"axis\": axis,\n \"weights_shape\": weights_shape, \"returned\": returned}\n for shape, dtype in _shape_and_dtypes(nonempty_shapes, number_dtypes)\n for axis in list(range(-len(shape), len(shape))) + [None]\n # `weights_shape` is either `None`, same as the averaged axis, or same as\n # that of the input\n for weights_shape in ([None, shape] if axis is None or len(shape) == 1\n else [None, (shape[axis],), shape])\n for returned in [False, True]))\n def testAverage(self, shape, dtype, axis, weights_shape, returned):\n rng = jtu.rand_default(self.rng())\n if weights_shape is None:\n np_fun = lambda x: np.average(x, axis, returned=returned)\n jnp_fun = lambda x: jnp.average(x, axis, returned=returned)\n args_maker = lambda: [rng(shape, dtype)]\n else:\n np_fun = lambda x, weights: np.average(x, axis, weights, returned)\n jnp_fun = lambda x, weights: jnp.average(x, axis, weights, returned)\n args_maker = lambda: [rng(shape, dtype), rng(weights_shape, dtype)]\n np_fun = _promote_like_jnp(np_fun, inexact=True)\n tol = {dtypes.bfloat16: 2e-1, np.float16: 1e-2, np.float32: 1e-5,\n np.float64: 1e-12, np.complex64: 1e-5}\n check_dtypes = shape is not jtu.PYTHON_SCALAR_SHAPE\n try:\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,\n check_dtypes=check_dtypes, tol=tol)\n except ZeroDivisionError:\n self.skipTest(\"don't support checking for ZeroDivisionError\")\n self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=check_dtypes,\n rtol=tol, atol=tol)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n f\"_arg{i}_ndmin={ndmin}_dtype={np.dtype(dtype) if dtype else None}\",\n \"arg\": arg, \"ndmin\": ndmin, \"dtype\": dtype}\n for i, (arg, dtypes) in enumerate([\n ([True, False, True], all_dtypes),\n (3., all_dtypes),\n ([1, 2, 3], all_dtypes),\n (np.array([1, 2, 3], dtype=np.int64), all_dtypes),\n ([1., 2., 3.], all_dtypes),\n ([[1, 2], [3, 4], [5, 6]], all_dtypes),\n ([[1, 2.], [3, 4], [5, 6]], all_dtypes),\n ([[1., 2j], [3., 4.], [5., 6.]], complex_dtypes),\n ([[3, np.array(2, dtype=jnp.float_), 1],\n np.arange(3., dtype=jnp.float_)], all_dtypes),\n ])\n for dtype in [None] + dtypes\n for ndmin in [None, np.ndim(arg), np.ndim(arg) + 1, np.ndim(arg) + 2]))\n def testArray(self, arg, ndmin, dtype):\n args_maker = lambda: [arg]\n canonical_dtype = dtypes.canonicalize_dtype(dtype or np.array(arg).dtype)\n if ndmin is not None:\n np_fun = partial(np.array, ndmin=ndmin, dtype=canonical_dtype)\n jnp_fun = partial(jnp.array, ndmin=ndmin, dtype=dtype)\n else:\n np_fun = partial(np.array, dtype=canonical_dtype)\n jnp_fun = partial(jnp.array, dtype=dtype)\n\n # We are testing correct canonicalization behavior here, so we turn off the\n # permissive canonicalization logic in the test harness.\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,\n canonicalize_dtypes=False)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n def testArrayUnsupportedDtypeError(self):\n with self.assertRaisesRegex(TypeError,\n \"JAX only supports number and bool dtypes.*\"):\n jnp.array(3, [('a','<i4'),('b','<i4')])\n\n def testArrayFromInteger(self):\n int_dtype = dtypes.canonicalize_dtype(jnp.int64)\n int_max = jnp.iinfo(int_dtype).max\n int_min = jnp.iinfo(int_dtype).min\n\n # Values at extremes are converted correctly.\n for val in [int_min, 0, int_max]:\n self.assertEqual(jnp.array(val).dtype, int_dtype)\n\n # out of bounds leads to an OverflowError\n val = int_max + 1\n with self.assertRaisesRegex(OverflowError, f\"Python int {val} too large to convert to {int_dtype.name}\"):\n jnp.array(val)\n\n # explicit uint64 should work\n if config.x64_enabled:\n self.assertEqual(np.uint64(val), jnp.array(val, dtype='uint64'))\n\n # TODO(jakevdp): fix list inputs to jnp.array and enable the following test\n # def testArrayFromList(self):\n # int_max = jnp.iinfo(jnp.int64).max\n # int_min = jnp.iinfo(jnp.int64).min\n #\n # # Values at extremes are converted correctly.\n # for val in [int_min, 0, int_max]:\n # self.assertEqual(jnp.array([val]).dtype, dtypes.canonicalize_dtype('int64'))\n #\n # # list of values results in promoted type.\n # self.assertEqual(jnp.array([0, np.float16(1)]).dtype, jnp.result_type('int64', 'float16'))\n #\n # # out of bounds leads to an OverflowError\n # val = int_min - 1\n # with self.assertRaisesRegex(OverflowError, f\"Python int {val} too large to convert to int64\"):\n # jnp.array([0, val])\n\n def testIssue121(self):\n assert not np.isscalar(jnp.array(3))\n\n def testArrayOutputsDeviceArrays(self):\n assert xla.type_is_device_array(jnp.array([]))\n assert xla.type_is_device_array(jnp.array(np.array([])))\n\n class NDArrayLike:\n def __array__(self, dtype=None):\n return np.array([], dtype=dtype)\n assert xla.type_is_device_array(jnp.array(NDArrayLike()))\n\n # NOTE(mattjj): disabled b/c __array__ must produce ndarrays\n # class DeviceArrayLike:\n # def __array__(self, dtype=None):\n # return jnp.array([], dtype=dtype)\n # assert xla.type_is_device_array(jnp.array(DeviceArrayLike()))\n\n def testArrayMethod(self):\n class arraylike(object):\n dtype = np.float32\n def __array__(self, dtype=None):\n return np.array(3., dtype=dtype)\n a = arraylike()\n ans = jnp.array(a)\n assert ans == 3.\n\n def testMemoryView(self):\n ans = jnp.array(bytearray(b'\\x2a'))\n self.assertAllClose(\n ans,\n np.array([0x2a], dtype=np.uint8))\n\n def testIsClose(self):\n c_isclose = jax.jit(jnp.isclose)\n c_isclose_nan = jax.jit(partial(jnp.isclose, equal_nan=True))\n n = 2\n\n rng = np.random.RandomState(0)\n x = rng.randn(n, 1)\n y = rng.randn(n, 1)\n inf = np.asarray(n * [np.inf]).reshape([n, 1])\n nan = np.asarray(n * [np.nan]).reshape([n, 1])\n args = [x, y, inf, -inf, nan]\n\n for arg0 in args:\n for arg1 in args:\n result_np = np.isclose(arg0, arg1)\n result_jax = jnp.isclose(arg0, arg1)\n result_jit = c_isclose(arg0, arg1)\n self.assertTrue(jnp.all(jnp.equal(result_np, result_jax)))\n self.assertTrue(jnp.all(jnp.equal(result_np, result_jit)))\n result_np = np.isclose(arg0, arg1, equal_nan=True)\n result_jax = jnp.isclose(arg0, arg1, equal_nan=True)\n result_jit = c_isclose_nan(arg0, arg1)\n self.assertTrue(jnp.all(jnp.equal(result_np, result_jax)))\n self.assertTrue(jnp.all(jnp.equal(result_np, result_jit)))\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_x={}_y={}_equal_nan={}\".format(x, y, equal_nan),\n \"x\": x, \"y\": y, \"equal_nan\": equal_nan}\n for x, y in itertools.product([\n 1, [1], [1, 1 + 1E-4], [1, np.nan]], repeat=2)\n for equal_nan in [True, False]))\n def testAllClose(self, x, y, equal_nan):\n jnp_fun = partial(jnp.allclose, equal_nan=equal_nan, rtol=1E-3)\n np_fun = partial(np.allclose, equal_nan=equal_nan, rtol=1E-3)\n args_maker = lambda: [np.array(x), np.array(y)]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n def testZeroStridesConstantHandler(self):\n raw_const = np.random.RandomState(0).randn(1, 2, 1, 1, 5, 1)\n const = np.broadcast_to(raw_const, (3, 2, 3, 4, 5, 6))\n\n def fun(x):\n return x * const\n\n fun = jax.jit(fun)\n out_val = fun(3.)\n self.assertAllClose(out_val, 3. * const, check_dtypes=False)\n\n def testIsInstanceNdarrayDuringTracing(self):\n arr = np.ones(3)\n\n @jax.jit\n def f(x):\n self.assertIsInstance(x, jnp.ndarray)\n return jnp.sum(x)\n\n f(arr)\n\n def testNonArrayErrorMessage(self):\n x = [1., 2.]\n y = np.array([3., 4.])\n\n def g(x, y):\n return jnp.add(x, y)\n\n def f(x, y):\n return jnp.dot(x, y)\n\n self.assertRaises(TypeError, lambda: g(x, y))\n self.assertRaises(TypeError, lambda: f(x, y))\n self.assertRaises(TypeError, lambda: jax.jit(g)(x, y))\n self.assertRaises(TypeError, lambda: jax.jit(f)(x, y))\n\n def testAbstractionErrorMessage(self):\n\n @jax.jit\n def f(x, n):\n for _ in range(n):\n x = x * x\n return x\n\n self.assertRaises(jax.errors.TracerIntegerConversionError, lambda: f(3., 3))\n\n @jax.jit\n def g(x):\n if x > 0.:\n return x * 2\n else:\n return x + 2\n\n self.assertRaises(jax.errors.ConcretizationTypeError, lambda: g(3.))\n\n def testTracingPrimitiveWithNoTranslationErrorMessage(self):\n # TODO(mattjj): update this for jax3\n self.skipTest(\"test needs jax3 update\")\n foo = jnp._not_implemented(lambda x: x)\n\n # No error if there's no tracing.\n foo(np.arange(3))\n\n cfoo = jax.jit(foo)\n self.assertRaises(NotImplementedError, lambda: cfoo(np.arange(3)))\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_axis={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), axis),\n \"shape\": shape, \"dtype\": dtype, \"axis\": axis}\n for shape in [(3,), (2, 3)]\n for dtype in default_dtypes\n for axis in list(range(-len(shape), len(shape))) + [None] + [tuple(range(len(shape)))] # Test negative axes and tuples\n ))\n def testFlip(self, shape, dtype, axis):\n rng = jtu.rand_default(self.rng())\n args_maker = self._GetArgsMaker(rng, [shape], [dtype])\n jnp_op = lambda x: jnp.flip(x, axis)\n np_op = lambda x: np.flip(x, axis)\n self._CheckAgainstNumpy(np_op, jnp_op, args_maker)\n self._CompileAndCheck(jnp_op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}\".format(\n jtu.format_shape_dtype_string(shape, dtype)),\n \"shape\": shape, \"dtype\": dtype}\n for shape in [(3,), (2, 3), (3, 2, 4)]\n for dtype in default_dtypes))\n def testFlipud(self, shape, dtype):\n rng = jtu.rand_default(self.rng())\n args_maker = self._GetArgsMaker(rng, [shape], [dtype])\n jnp_op = lambda x: jnp.flipud(x)\n np_op = lambda x: np.flipud(x)\n self._CheckAgainstNumpy(np_op, jnp_op, args_maker)\n self._CompileAndCheck(jnp_op, args_maker)\n\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}\".format(\n jtu.format_shape_dtype_string(shape, dtype)),\n \"shape\": shape, \"dtype\": dtype}\n for shape in [(3, 2), (2, 3), (3, 2, 4)]\n for dtype in default_dtypes))\n def testFliplr(self, shape, dtype):\n rng = jtu.rand_default(self.rng())\n args_maker = self._GetArgsMaker(rng, [shape], [dtype])\n jnp_op = lambda x: jnp.fliplr(x)\n np_op = lambda x: np.fliplr(x)\n self._CheckAgainstNumpy(np_op, jnp_op, args_maker)\n self._CompileAndCheck(jnp_op, args_maker)\n\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_k={}_axes={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), k, axes),\n \"shape\": shape, \"dtype\": dtype, \"k\": k, \"axes\": axes}\n for shape, axes in [\n [(2, 3), (0, 1)],\n [(2, 3), (1, 0)],\n [(4, 3, 2), (0, 2)],\n [(4, 3, 2), (2, 1)],\n ]\n for k in range(-3, 4)\n for dtype in default_dtypes))\n def testRot90(self, shape, dtype, k, axes):\n rng = jtu.rand_default(self.rng())\n args_maker = self._GetArgsMaker(rng, [shape], [dtype])\n jnp_op = lambda x: jnp.rot90(x, k, axes)\n np_op = lambda x: np.rot90(x, k, axes)\n self._CheckAgainstNumpy(np_op, jnp_op, args_maker)\n self._CompileAndCheck(jnp_op, args_maker)\n\n # TODO(mattjj): test infix operator overrides\n\n def testRavel(self):\n rng = np.random.RandomState(0)\n args_maker = lambda: [rng.randn(3, 4).astype(\"float32\")]\n self._CompileAndCheck(lambda x: x.ravel(), args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_order={}_mode={}\".format(\n shape, order, mode),\n \"shape\": shape, \"order\": order, \"mode\": mode}\n for shape in nonempty_nonscalar_array_shapes\n for order in ['C', 'F']\n for mode in ['wrap', 'clip', 'raise']))\n @jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.\n def testRavelMultiIndex(self, shape, order, mode):\n # generate indices in each dimension with a few out of bounds.\n rngs = [jtu.rand_int(self.rng(), low=-1, high=dim + 1)\n for dim in shape]\n # generate multi_indices of different dimensions that broadcast.\n args_maker = lambda: [tuple(rng(ndim * (3,), jnp.int_)\n for ndim, rng in enumerate(rngs))]\n def np_fun(x):\n try:\n return np.ravel_multi_index(x, shape, order=order, mode=mode)\n except ValueError as err:\n if str(err).startswith('invalid entry'):\n # sentinel indicating expected error.\n return -999\n else:\n raise\n def jnp_fun(x):\n try:\n return jnp.ravel_multi_index(x, shape, order=order, mode=mode)\n except ValueError as err:\n if str(err).startswith('invalid entry'):\n # sentinel indicating expected error.\n return -999\n else:\n raise\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)\n if mode == 'raise':\n msg = (\"The error occurred because ravel_multi_index was jit-compiled \"\n \"with mode='raise'. Use mode='wrap' or mode='clip' instead.\")\n with self.assertRaisesRegex(jax.core.ConcretizationTypeError, msg):\n jax.jit(jnp_fun)(*args_maker())\n else:\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_ashape={}{}_cshapes={}{}_mode={}\".format(\n adtype.__name__, ashape, cdtype.__name__, cshapes, mode),\n \"ashape\": ashape, \"adtype\": adtype, \"cshapes\": cshapes, \"cdtype\": cdtype, \"mode\": mode}\n for ashape in ((), (4,), (3, 4))\n for cshapes in [\n [(), (4,)],\n [(3, 4), (4,), (3, 1)]\n ]\n for adtype in int_dtypes\n for cdtype in default_dtypes\n for mode in ['wrap', 'clip', 'raise']))\n def testChoose(self, ashape, adtype, cshapes, cdtype, mode):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(ashape, adtype), [rng(s, cdtype) for s in cshapes]]\n def np_fun(a, c):\n try:\n return np.choose(a, c, mode=mode)\n except ValueError as err:\n if mode == 'raise' and str(err).startswith('invalid entry'):\n return -999 # sentinel indicating expected error.\n else:\n raise\n def jnp_fun(a, c):\n try:\n return jnp.choose(a, c, mode=mode)\n except ValueError as err:\n if mode == 'raise' and str(err).startswith('invalid entry'):\n return -999 # sentinel indicating expected error.\n else:\n raise\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)\n if mode == 'raise':\n msg = (\"The error occurred because jnp.choose was jit-compiled\"\n \" with mode='raise'. Use mode='wrap' or mode='clip' instead.\")\n with self.assertRaisesRegex(jax.core.ConcretizationTypeError, msg):\n jax.jit(jnp_fun)(*args_maker())\n else:\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.parameters(\n (0, (2, 1, 3)),\n (5, (2, 1, 3)),\n (0, ()),\n (np.array([0, 1, 2]), (2, 2)),\n (np.array([[[0, 1], [2, 3]]]), (2, 2)))\n def testUnravelIndex(self, flat_index, shape):\n args_maker = lambda: (flat_index, shape)\n self._CheckAgainstNumpy(np.unravel_index, jnp.unravel_index,\n args_maker)\n self._CompileAndCheck(jnp.unravel_index, args_maker)\n\n def testUnravelIndexOOB(self):\n self.assertEqual(jnp.unravel_index(2, (2,)), (1,))\n self.assertEqual(jnp.unravel_index(-2, (2, 1, 3,)), (1, 0, 1))\n self.assertEqual(jnp.unravel_index(-3, (2,)), (0,))\n\n def testAstype(self):\n rng = np.random.RandomState(0)\n args_maker = lambda: [rng.randn(3, 4).astype(\"float32\")]\n np_op = lambda x: np.asarray(x).astype(jnp.int32)\n jnp_op = lambda x: jnp.asarray(x).astype(jnp.int32)\n self._CheckAgainstNumpy(np_op, jnp_op, args_maker)\n self._CompileAndCheck(jnp_op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}\".format(\n jtu.format_shape_dtype_string(shape, dtype)),\n \"shape\": shape, \"dtype\": dtype}\n for shape in array_shapes\n for dtype in all_dtypes))\n def testNbytes(self, shape, dtype):\n rng = jtu.rand_default(self.rng())\n np_op = lambda x: np.asarray(x).nbytes\n jnp_op = lambda x: jnp.asarray(x).nbytes\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(np_op, jnp_op, args_maker)\n self._CompileAndCheck(jnp_op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_dtype={}\".format(\n jtu.format_shape_dtype_string(shape, a_dtype), dtype),\n \"shape\": shape, \"a_dtype\": a_dtype, \"dtype\": dtype}\n for shape in [(8,), (3, 8)] # last dim = 8 to ensure shape compatibility\n for a_dtype in (default_dtypes + unsigned_dtypes + bool_dtypes)\n for dtype in (default_dtypes + unsigned_dtypes + bool_dtypes)))\n def testView(self, shape, a_dtype, dtype):\n if jtu.device_under_test() == 'tpu':\n if jnp.dtype(a_dtype).itemsize in [1, 2] or jnp.dtype(dtype).itemsize in [1, 2]:\n self.skipTest(\"arr.view() not supported on TPU for 8- or 16-bit types.\")\n if not config.x64_enabled:\n if jnp.dtype(a_dtype).itemsize == 8 or jnp.dtype(dtype).itemsize == 8:\n self.skipTest(\"x64 types are disabled by jax_enable_x64\")\n rng = jtu.rand_fullrange(self.rng())\n args_maker = lambda: [rng(shape, a_dtype)]\n np_op = lambda x: np.asarray(x).view(dtype)\n jnp_op = lambda x: jnp.asarray(x).view(dtype)\n # Above may produce signaling nans; ignore warnings from invalid values.\n with np.errstate(invalid='ignore'):\n self._CheckAgainstNumpy(np_op, jnp_op, args_maker)\n self._CompileAndCheck(jnp_op, args_maker)\n\n def testPathologicalFloats(self):\n args_maker = lambda: [np.array([\n 0b_0111_1111_1000_0000_0000_0000_0000_0000, # inf\n 0b_1111_1111_1000_0000_0000_0000_0000_0000, # -inf\n 0b_0111_1111_1100_0000_0000_0000_0000_0000, # qnan\n 0b_1111_1111_1100_0000_0000_0000_0000_0000, # -qnan\n 0b_0111_1111_1000_0000_0000_0000_0000_0001, # snan\n 0b_1111_1111_1000_0000_0000_0000_0000_0001, # -snan\n 0b_0111_1111_1000_0000_0000_1100_0000_0000, # nonstandard nan\n 0b_1111_1111_1000_0000_0000_1100_0000_0000, # -nonstandard nan\n 0b_0000_0000_0000_0000_0000_0000_0000_0000, # zero\n 0b_1000_0000_0000_0000_0000_0000_0000_0000, # -zero\n ], dtype='uint32')]\n\n np_op = lambda x: np.asarray(x).view('float32').view('uint32')\n jnp_op = lambda x: jnp.asarray(x).view('float32').view('uint32')\n\n self._CheckAgainstNumpy(np_op, jnp_op, args_maker)\n self._CompileAndCheck(jnp_op, args_maker)\n\n # TODO(mattjj): test other ndarray-like method overrides\n\n def testNpMean(self):\n # from https://github.com/google/jax/issues/125\n x = lax.add(jnp.eye(3, dtype=float), 0.)\n ans = np.mean(x)\n self.assertAllClose(ans, np.array(1./3), check_dtypes=False)\n\n def testArangeOnFloats(self):\n # from https://github.com/google/jax/issues/145\n self.assertAllClose(np.arange(0.0, 1.0, 0.1, dtype=jnp.float_),\n jnp.arange(0.0, 1.0, 0.1))\n # from https://github.com/google/jax/issues/3450\n self.assertAllClose(np.arange(2.5, dtype=jnp.float_),\n jnp.arange(2.5))\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_axis={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), axis),\n \"shape\": shape, \"dtype\": dtype, \"axis\": axis}\n for dtype in all_dtypes\n for shape in nonzerodim_shapes\n for axis in (None, *range(len(shape)))))\n def testSort(self, dtype, shape, axis):\n rng = jtu.rand_some_equal(self.rng())\n args_maker = lambda: [rng(shape, dtype)]\n jnp_fun = jnp.sort\n np_fun = np.sort\n if axis is not None:\n jnp_fun = partial(jnp_fun, axis=axis)\n np_fun = partial(np_fun, axis=axis)\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_axis={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), axis),\n \"shape\": shape, \"dtype\": dtype, \"axis\": axis}\n for dtype in all_dtypes\n for shape in one_dim_array_shapes\n for axis in [None]))\n def testSortComplex(self, dtype, shape, axis):\n rng = jtu.rand_some_equal(self.rng())\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(np.sort_complex, jnp.sort_complex, args_maker, check_dtypes=False)\n self._CompileAndCheck(jnp.sort_complex, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_input_type={}_axis={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n input_type.__name__, axis),\n \"shape\": shape, \"dtype\": dtype, \"input_type\": input_type, \"axis\": axis}\n for dtype in all_dtypes\n for shape in nonempty_nonscalar_array_shapes\n for input_type in [np.array, tuple]\n for axis in (-1, *range(len(shape) - 1))))\n def testLexsort(self, dtype, shape, input_type, axis):\n rng = jtu.rand_some_equal(self.rng())\n args_maker = lambda: [input_type(rng(shape, dtype))]\n jnp_op = lambda x: jnp.lexsort(x, axis=axis)\n np_op = lambda x: np.lexsort(x, axis=axis)\n self._CheckAgainstNumpy(np_op, jnp_op, args_maker)\n self._CompileAndCheck(jnp_op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_axis={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), axis),\n \"shape\": shape, \"dtype\": dtype, \"axis\": axis}\n for dtype in all_dtypes\n for shape in nonzerodim_shapes\n for axis in (None, *range(len(shape)))))\n def testArgsort(self, dtype, shape, axis):\n rng = jtu.rand_some_equal(self.rng())\n args_maker = lambda: [rng(shape, dtype)]\n jnp_fun = jnp.argsort\n np_fun = np.argsort\n if axis is not None:\n jnp_fun = partial(jnp_fun, axis=axis)\n np_fun = partial(np_fun, axis=axis)\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}\".format(\n jtu.format_shape_dtype_string(shape, dtype)),\n \"shape\": shape, \"dtype\": dtype}\n for dtype in all_dtypes\n for shape in nonzerodim_shapes))\n def testMsort(self, dtype, shape):\n rng = jtu.rand_some_equal(self.rng())\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(np.msort, jnp.msort, args_maker)\n self._CompileAndCheck(jnp.msort, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_shifts={}_axis={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n shifts, axis),\n \"shape\": shape, \"dtype\": dtype, \"shifts\": shifts, \"axis\": axis}\n for dtype in all_dtypes\n for shape in [(3, 4), (3, 4, 5), (7, 4, 0)]\n for shifts, axis in [\n (3, None),\n (1, 1),\n ((3,), (0,)),\n ((-2,), (-2,)),\n ((1, 2), (0, -1)),\n ((4, 2, 5, 5, 2, 4), None),\n (100, None),\n ]))\n def testRoll(self, shape, dtype, shifts, axis):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype), np.array(shifts)]\n jnp_op = partial(jnp.roll, axis=axis)\n np_op = partial(np.roll, axis=axis)\n self._CheckAgainstNumpy(np_op, jnp_op, args_maker)\n self._CompileAndCheck(jnp_op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_axis={}_start={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n axis, start),\n \"shape\": shape, \"dtype\": dtype, \"axis\": axis,\n \"start\": start}\n for dtype in all_dtypes\n for shape in [(1, 2, 3, 4)]\n for axis in [-3, 0, 2, 3]\n for start in [-4, -1, 2, 4]))\n def testRollaxis(self, shape, dtype, start, axis):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype)]\n jnp_op = partial(jnp.rollaxis, axis=axis, start=start)\n np_op = partial(np.rollaxis, axis=axis, start=start)\n self._CheckAgainstNumpy(np_op, jnp_op, args_maker)\n self._CompileAndCheck(jnp_op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_axis={}_bitorder={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), axis, bitorder),\n \"shape\": shape, \"dtype\": dtype, \"axis\": axis,\n \"bitorder\": bitorder}\n for dtype in [np.uint8, np.bool_]\n for bitorder in ['big', 'little']\n for shape in [(1, 2, 3, 4)]\n for axis in [None, 0, 1, -2, -1]))\n def testPackbits(self, shape, dtype, axis, bitorder):\n rng = jtu.rand_some_zero(self.rng())\n args_maker = lambda: [rng(shape, dtype)]\n jnp_op = partial(jnp.packbits, axis=axis, bitorder=bitorder)\n np_op = partial(np.packbits, axis=axis, bitorder=bitorder)\n self._CheckAgainstNumpy(np_op, jnp_op, args_maker)\n self._CompileAndCheck(jnp_op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_axis={}_bitorder={}_count={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), axis, bitorder, count),\n \"shape\": shape, \"dtype\": dtype, \"axis\": axis, \"bitorder\": bitorder,\n \"count\": count}\n for dtype in [np.uint8]\n for bitorder in ['big', 'little']\n for shape in [(1, 2, 3, 4)]\n for axis in [None, 0, 1, -2, -1]\n for count in [None, 20]))\n def testUnpackbits(self, shape, dtype, axis, bitorder, count):\n rng = jtu.rand_int(self.rng(), 0, 256)\n args_maker = lambda: [rng(shape, dtype)]\n jnp_op = partial(jnp.unpackbits, axis=axis, bitorder=bitorder)\n np_op = partial(np.unpackbits, axis=axis, bitorder=bitorder)\n self._CheckAgainstNumpy(np_op, jnp_op, args_maker)\n self._CompileAndCheck(jnp_op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_index={}_axis={}_mode={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n jtu.format_shape_dtype_string(index_shape, index_dtype),\n axis, mode),\n \"shape\": shape, \"index_shape\": index_shape, \"dtype\": dtype,\n \"index_dtype\": index_dtype, \"axis\": axis, \"mode\": mode}\n for shape in [(3,), (3, 4), (3, 4, 5)]\n for index_shape in scalar_shapes + [(3,), (2, 1, 3)]\n for axis in itertools.chain(range(-len(shape), len(shape)),\n [cast(Optional[int], None)])\n for dtype in all_dtypes\n for index_dtype in int_dtypes\n for mode in [None, 'wrap', 'clip']))\n def testTake(self, shape, dtype, index_shape, index_dtype, axis, mode):\n def args_maker():\n x = rng(shape, dtype)\n i = rng_indices(index_shape, index_dtype)\n return x, i\n\n rng = jtu.rand_default(self.rng())\n if mode is None:\n rng_indices = jtu.rand_int(self.rng(), -shape[axis or 0], shape[axis or 0])\n else:\n rng_indices = jtu.rand_int(self.rng(), -5, 5)\n jnp_op = lambda x, i: jnp.take(x, i, axis=axis, mode=mode)\n np_op = lambda x, i: np.take(x, i, axis=axis, mode=mode)\n self._CheckAgainstNumpy(np_op, jnp_op, args_maker)\n self._CompileAndCheck(jnp_op, args_maker)\n\n def testTakeEmpty(self):\n np.testing.assert_array_equal(\n jnp.array([], dtype=jnp.float32),\n jnp.take(jnp.array([], jnp.float32), jnp.array([], jnp.int32)))\n\n np.testing.assert_array_equal(\n jnp.ones((2, 0, 4), dtype=jnp.float32),\n jnp.take(jnp.ones((2, 0, 4), dtype=jnp.float32), jnp.array([], jnp.int32),\n axis=1))\n\n with self.assertRaisesRegex(IndexError, \"non-empty jnp.take\"):\n jnp.take(jnp.ones((2, 0, 4), dtype=jnp.float32),\n jnp.array([0], jnp.int32), axis=1)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_index={}_axis={}\".format(\n jtu.format_shape_dtype_string(x_shape, dtype),\n jtu.format_shape_dtype_string(i_shape, index_dtype), axis),\n \"x_shape\": x_shape, \"i_shape\": i_shape, \"dtype\": dtype,\n \"index_dtype\": index_dtype, \"axis\": axis}\n for x_shape, i_shape in filter(\n _shapes_are_equal_length,\n filter(_shapes_are_broadcast_compatible,\n itertools.combinations_with_replacement(nonempty_nonscalar_array_shapes, 2)))\n for axis in itertools.chain(range(len(x_shape)), [-1],\n [cast(Optional[int], None)])\n for dtype in default_dtypes\n for index_dtype in int_dtypes))\n def testTakeAlongAxis(self, x_shape, i_shape, dtype, index_dtype, axis):\n rng = jtu.rand_default(self.rng())\n\n i_shape = np.array(i_shape)\n if axis is None:\n i_shape = [np.prod(i_shape, dtype=np.int64)]\n else:\n # Test the case where the size of the axis doesn't necessarily broadcast.\n i_shape[axis] *= 3\n i_shape = list(i_shape)\n def args_maker():\n x = rng(x_shape, dtype)\n n = np.prod(x_shape, dtype=np.int32) if axis is None else x_shape[axis]\n if np.issubdtype(index_dtype, np.unsignedinteger):\n index_rng = jtu.rand_int(self.rng(), 0, n)\n else:\n index_rng = jtu.rand_int(self.rng(), -n, n)\n i = index_rng(i_shape, index_dtype)\n return x, i\n\n jnp_op = lambda x, i: jnp.take_along_axis(x, i, axis=axis)\n\n if hasattr(np, \"take_along_axis\"):\n np_op = lambda x, i: np.take_along_axis(x, i, axis=axis)\n self._CheckAgainstNumpy(np_op, jnp_op, args_maker)\n self._CompileAndCheck(jnp_op, args_maker)\n\n def testTakeAlongAxisWithUint8IndicesDoesNotOverflow(self):\n # https://github.com/google/jax/issues/5088\n h = jtu.rand_default(self.rng())((256, 256, 100), np.float32)\n g = jtu.rand_int(self.rng(), 0, 100)((256, 256, 1), np.uint8)\n q0 = jnp.take_along_axis(h, g, axis=-1)\n q1 = np.take_along_axis( h, g, axis=-1)\n np.testing.assert_equal(q0, q1)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_n={}_increasing={}\".format(\n jtu.format_shape_dtype_string([shape], dtype),\n n, increasing),\n \"dtype\": dtype, \"shape\": shape, \"n\": n, \"increasing\": increasing}\n for dtype in inexact_dtypes\n for shape in [0, 5]\n for n in [2, 4]\n for increasing in [False, True]))\n def testVander(self, shape, dtype, n, increasing):\n rng = jtu.rand_default(self.rng())\n def np_fun(arg):\n arg = arg.astype(np.float32) if dtype == jnp.bfloat16 else arg\n return np.vander(arg, N=n, increasing=increasing)\n jnp_fun = lambda arg: jnp.vander(arg, N=n, increasing=increasing)\n args_maker = lambda: [rng([shape], dtype)]\n # np.vander seems to return float64 for all floating types. We could obey\n # those semantics, but they seem like a bug.\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,\n tol={np.float32: 1e-3})\n self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=False)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": jtu.format_test_name_suffix(\n \"nan_to_num\", [shape], [dtype]),\n \"shape\": shape, \"dtype\": dtype}\n for shape in array_shapes\n for dtype in inexact_dtypes))\n def testNanToNum(self, shape, dtype):\n rng = jtu.rand_some_inf_and_nan(self.rng())\n dtype = np.dtype(dtypes.canonicalize_dtype(dtype)).type\n def np_fun(x):\n if dtype == jnp.bfloat16:\n x = np.where(np.isnan(x), dtype(0), x)\n x = np.where(np.isposinf(x), jnp.finfo(dtype).max, x)\n x = np.where(np.isneginf(x), jnp.finfo(dtype).min, x)\n return x\n else:\n return np.nan_to_num(x).astype(dtype)\n\n args_maker = lambda: [rng(shape, dtype)]\n check_dtypes = shape is not jtu.PYTHON_SCALAR_SHAPE\n self._CheckAgainstNumpy(np_fun, jnp.nan_to_num, args_maker,\n check_dtypes=check_dtypes)\n self._CompileAndCheck(jnp.nan_to_num, args_maker,\n check_dtypes=check_dtypes)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": jtu.format_test_name_suffix(\"ix_\", shapes, dtypes),\n \"shapes\": shapes, \"dtypes\": dtypes}\n for shapes, dtypes in (\n ((), ()),\n (((7,),), (np.int32,)),\n (((3,), (4,)), (np.int32, np.int32)),\n (((3,), (1,), (4,)), (np.int32, np.int32, np.int32)),\n )))\n def testIx_(self, shapes, dtypes):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype)\n for shape, dtype in zip(shapes, dtypes)]\n self._CheckAgainstNumpy(np.ix_, jnp.ix_, args_maker)\n self._CompileAndCheck(jnp.ix_, args_maker)\n\n @parameterized.named_parameters(\n jtu.cases_from_list(\n {\"testcase_name\": \"_dimensions={}_dtype={}_sparse={}\".format(\n dimensions, dtype, sparse),\n \"dimensions\": dimensions, \"dtype\": dtype, \"sparse\": sparse}\n for dimensions in [(), (2,), (3, 0), (4, 5, 6)]\n for dtype in number_dtypes\n for sparse in [True, False]))\n def testIndices(self, dimensions, dtype, sparse):\n def args_maker(): return []\n np_fun = partial(np.indices, dimensions=dimensions,\n dtype=dtype, sparse=sparse)\n jnp_fun = partial(jnp.indices, dimensions=dimensions,\n dtype=dtype, sparse=sparse)\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_op={}_a_shape={}_q_shape={}_axis={}_keepdims={}_interpolation={}\".format(\n op,\n jtu.format_shape_dtype_string(a_shape, a_dtype),\n jtu.format_shape_dtype_string(q_shape, q_dtype),\n axis, keepdims, interpolation),\n \"a_rng\": jtu.rand_some_nan,\n \"q_rng\": q_rng, \"op\": op,\n \"a_shape\": a_shape, \"a_dtype\": a_dtype,\n \"q_shape\": q_shape, \"q_dtype\": q_dtype, \"axis\": axis,\n \"keepdims\": keepdims,\n \"interpolation\": interpolation}\n for (op, q_rng) in (\n (\"percentile\", partial(jtu.rand_uniform, low=0., high=100.)),\n (\"quantile\", partial(jtu.rand_uniform, low=0., high=1.)),\n (\"nanpercentile\", partial(jtu.rand_uniform, low=0., high=100.)),\n (\"nanquantile\", partial(jtu.rand_uniform, low=0., high=1.)),\n )\n for a_dtype in default_dtypes\n for a_shape, axis in (\n ((7,), None),\n ((47, 7), 0),\n ((4, 101), 1),\n )\n for q_dtype in [np.float32]\n for q_shape in scalar_shapes + [(4,)]\n for keepdims in [False, True]\n for interpolation in ['linear', 'lower', 'higher', 'nearest',\n 'midpoint']))\n def testQuantile(self, op, a_rng, q_rng, a_shape, a_dtype, q_shape, q_dtype,\n axis, keepdims, interpolation):\n a_rng = a_rng(self.rng())\n q_rng = q_rng(self.rng())\n if \"median\" in op:\n args_maker = lambda: [a_rng(a_shape, a_dtype)]\n else:\n args_maker = lambda: [a_rng(a_shape, a_dtype), q_rng(q_shape, q_dtype)]\n\n def np_fun(*args):\n args = [x if jnp.result_type(x) != jnp.bfloat16 else\n np.asarray(x, np.float32) for x in args]\n return getattr(np, op)(*args, axis=axis, keepdims=keepdims,\n interpolation=interpolation)\n jnp_fun = partial(getattr(jnp, op), axis=axis, keepdims=keepdims,\n interpolation=interpolation)\n\n # TODO(phawkins): we currently set dtype=False because we aren't as\n # aggressive about promoting to float64. It's not clear we want to mimic\n # Numpy here.\n tol_spec = {np.float32: 2e-4, np.float64: 5e-6}\n tol = max(jtu.tolerance(a_dtype, tol_spec),\n jtu.tolerance(q_dtype, tol_spec))\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,\n tol=tol)\n self._CompileAndCheck(jnp_fun, args_maker, rtol=tol)\n\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_{}_a_shape={}_axis={}_keepdims={}\".format(\n op, jtu.format_shape_dtype_string(a_shape, a_dtype),\n axis, keepdims),\n \"op\": op, \"a_shape\": a_shape, \"a_dtype\": a_dtype,\n \"axis\": axis,\n \"keepdims\": keepdims}\n for a_dtype in default_dtypes\n for a_shape, axis in (\n ((7,), None),\n ((47, 7), 0),\n ((4, 101), 1),\n )\n for keepdims in [False, True]\n for op in [\"median\", \"nanmedian\"]))\n def testMedian(self, op, a_shape, a_dtype, axis, keepdims):\n if op == \"median\":\n a_rng = jtu.rand_default(self.rng())\n else:\n a_rng = jtu.rand_some_nan(self.rng())\n args_maker = lambda: [a_rng(a_shape, a_dtype)]\n def np_fun(*args):\n args = [x if jnp.result_type(x) != jnp.bfloat16 else\n np.asarray(x, np.float32) for x in args]\n return getattr(np, op)(*args, axis=axis, keepdims=keepdims)\n jnp_fun = partial(getattr(jnp, op), axis=axis, keepdims=keepdims)\n # TODO(phawkins): we currently set dtype=False because we aren't as\n # aggressive about promoting to float64. It's not clear we want to mimic\n # Numpy here.\n tol_spec = {np.float32: 2e-4, np.float64: 5e-6}\n tol = jtu.tolerance(a_dtype, tol_spec)\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,\n tol=tol)\n self._CompileAndCheck(jnp_fun, args_maker, rtol=tol)\n\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}\".format(\n jtu.format_shape_dtype_string(shape, dtype)),\n \"shape\": shape, \"dtype\": dtype}\n for shape in all_shapes for dtype in all_dtypes))\n def testWhereOneArgument(self, shape, dtype):\n rng = jtu.rand_some_zero(self.rng())\n np_fun = lambda x: np.where(x)\n np_fun = jtu.ignore_warning(\n category=DeprecationWarning,\n message=\"Calling nonzero on 0d arrays.*\")(np_fun)\n jnp_fun = lambda x: jnp.where(x)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)\n\n # JIT compilation requires specifying a size statically. Full test of\n # this behavior is in testNonzeroSize().\n jnp_fun = lambda x: jnp.where(x, size=np.size(x) // 2)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({\n \"testcase_name\": \"_{}\".format(\"_\".join(\n jtu.format_shape_dtype_string(shape, dtype)\n for shape, dtype in zip(shapes, dtypes))),\n \"shapes\": shapes, \"dtypes\": dtypes\n } for shapes in s(filter(_shapes_are_broadcast_compatible,\n itertools.combinations_with_replacement(all_shapes, 3)))\n for dtypes in s(itertools.combinations_with_replacement(all_dtypes, 3)))))\n def testWhereThreeArgument(self, shapes, dtypes):\n rng = jtu.rand_default(self.rng())\n args_maker = self._GetArgsMaker(rng, shapes, dtypes)\n def np_fun(cond, x, y):\n return _promote_like_jnp(partial(np.where, cond))(x, y)\n self._CheckAgainstNumpy(np_fun, jnp.where, args_maker)\n self._CompileAndCheck(jnp.where, args_maker)\n\n def testWhereScalarPromotion(self):\n x = jnp.where(jnp.array([True, False]), 3,\n jnp.ones((2,), dtype=jnp.float32))\n self.assertEqual(x.dtype, np.dtype(np.float32))\n\n @parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({\n \"testcase_name\": jtu.format_test_name_suffix(\"\", shapes, (np.bool_,) * n + dtypes),\n \"shapes\": shapes, \"dtypes\": dtypes\n } for n in s(range(1, 3))\n for shapes in s(filter(\n _shapes_are_broadcast_compatible,\n itertools.combinations_with_replacement(all_shapes, 2 * n + 1)))\n for dtypes in s(itertools.combinations_with_replacement(all_dtypes, n + 1)))))\n def testSelect(self, shapes, dtypes):\n rng = jtu.rand_default(self.rng())\n n = len(dtypes) - 1\n def args_maker():\n condlist = [rng(shape, np.bool_) for shape in shapes[:n]]\n choicelist = [rng(shape, dtype)\n for shape, dtype in zip(shapes[n:-1], dtypes[:n])]\n default = rng(shapes[-1], dtypes[-1])\n return condlist, choicelist, default\n # TODO(phawkins): float32/float64 type mismatches\n def np_fun(condlist, choicelist, default):\n choicelist = [x if jnp.result_type(x) != jnp.bfloat16\n else x.astype(np.float32) for x in choicelist]\n dtype = jnp.result_type(default, *choicelist)\n return np.select(condlist,\n [np.asarray(x, dtype=dtype) for x in choicelist],\n np.asarray(default, dtype=dtype))\n self._CheckAgainstNumpy(np_fun, jnp.select, args_maker,\n check_dtypes=False)\n self._CompileAndCheck(jnp.select, args_maker,\n rtol={np.float64: 1e-7, np.complex128: 1e-7})\n\n\n def testIssue330(self):\n x = jnp.full((1, 1), jnp.array([1])[0]) # doesn't crash\n self.assertEqual(x[0, 0], 1)\n\n def testScalarDtypePromotion(self):\n orig_numpy_result = (1 + np.eye(1, dtype=np.float32)).dtype\n jax_numpy_result = (1 + jnp.eye(1, dtype=jnp.float32)).dtype\n self.assertEqual(orig_numpy_result, jax_numpy_result)\n\n def testSymmetrizeDtypePromotion(self):\n x = np.eye(3, dtype=np.float32)\n orig_numpy_result = ((x + x.T) / 2).dtype\n\n x = jnp.eye(3, dtype=jnp.float32)\n jax_numpy_result = ((x + x.T) / 2).dtype\n self.assertEqual(orig_numpy_result, jax_numpy_result)\n\n # NOTE(mattjj): I disabled this test when removing lax._safe_mul because\n # introducing the convention 0 * inf = 0 leads to silently wrong results in\n # some cases. See this comment for details:\n # https://github.com/google/jax/issues/1052#issuecomment-514083352\n # def testIssue347(self):\n # # https://github.com/google/jax/issues/347\n # def test_fail(x):\n # x = jnp.sqrt(jnp.sum(x ** 2, axis=1))\n # ones = jnp.ones_like(x)\n # x = jnp.where(x > 0.5, x, ones)\n # return jnp.sum(x)\n # x = jnp.array([[1, 2], [3, 4], [0, 0]], dtype=jnp.float64)\n # result = jax.grad(test_fail)(x)\n # assert not np.any(np.isnan(result))\n\n def testIssue453(self):\n # https://github.com/google/jax/issues/453\n a = np.arange(6) + 1\n ans = jnp.reshape(a, (3, 2), order='F')\n expected = np.reshape(a, (3, 2), order='F')\n self.assertAllClose(ans, expected)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_op={}_dtype={}\".format(op, pytype.__name__),\n \"pytype\": pytype, \"dtype\": dtype, \"op\": op}\n for pytype, dtype in [(int, jnp.int_), (float, jnp.float_),\n (bool, jnp.bool_), (complex, jnp.complex_)]\n for op in [\"atleast_1d\", \"atleast_2d\", \"atleast_3d\"]))\n def testAtLeastNdLiterals(self, pytype, dtype, op):\n # Fixes: https://github.com/google/jax/issues/634\n np_fun = lambda arg: getattr(np, op)(arg).astype(dtype)\n jnp_fun = lambda arg: getattr(jnp, op)(arg)\n args_maker = lambda: [pytype(2)]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\n \"testcase_name\": \"_shape={}_dtype={}_weights={}_minlength={}_length={}\".format(\n shape, dtype, weights, minlength, length\n ),\n \"shape\": shape,\n \"dtype\": dtype,\n \"weights\": weights,\n \"minlength\": minlength,\n \"length\": length}\n for shape in [(0,), (5,), (10,)]\n for dtype in int_dtypes\n for weights in [True, False]\n for minlength in [0, 20]\n for length in [None, 10]\n ))\n def testBincount(self, shape, dtype, weights, minlength, length):\n rng = jtu.rand_positive(self.rng())\n args_maker = lambda: (rng(shape, dtype), (rng(shape, 'float32') if weights else None))\n\n np_fun = partial(np.bincount, minlength=minlength)\n jnp_fun = partial(jnp.bincount, minlength=minlength, length=length)\n\n if length is not None:\n self._CompileAndCheck(jnp_fun, args_maker)\n if length is None:\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)\n\n def testBincountNegative(self):\n # Test that jnp.bincount ignores negative values.\n x_rng = jtu.rand_int(self.rng(), -100, 100)\n w_rng = jtu.rand_uniform(self.rng())\n shape = (1000,)\n x = x_rng(shape, 'int32')\n w = w_rng(shape, 'float32')\n\n xn = np.array(x)\n xn[xn < 0] = 0\n wn = np.array(w)\n np_result = np.bincount(xn[xn >= 0], wn[xn >= 0])\n jnp_result = jnp.bincount(x, w)\n self.assertAllClose(np_result, jnp_result, check_dtypes=False)\n\n\n @parameterized.named_parameters(*jtu.cases_from_list(\n {\"testcase_name\": \"_case={}\".format(i),\n \"input\": input}\n for i, input in enumerate([\n 3,\n [3],\n [np.array(3)],\n [np.array([3])],\n [[np.array(3)]],\n [[np.array([3])]],\n [3, 4, 5],\n [\n [np.eye(2, dtype=np.int32) * 2, np.zeros((2, 3), dtype=np.int32)],\n [np.ones((3, 2), dtype=np.int32), np.eye(3, dtype=np.int32) * 3],\n ],\n [np.array([1, 2, 3]), np.array([2, 3, 4]), 10],\n [np.ones((2, 2), dtype=np.int32), np.zeros((2, 2), dtype=np.int32)],\n [[np.array([1, 2, 3])], [np.array([2, 3, 4])]],\n ])))\n def testBlock(self, input):\n args_maker = lambda: [input]\n self._CheckAgainstNumpy(np.block, jnp.block, args_maker)\n self._CompileAndCheck(jnp.block, args_maker)\n\n def testLongLong(self):\n self.assertAllClose(np.int64(7), jax.jit(lambda x: x)(np.longlong(7)))\n\n @jtu.ignore_warning(category=UserWarning,\n message=\"Explicitly requested dtype.*\")\n def testArange(self):\n # test cases inspired by dask tests at\n # https://github.com/dask/dask/blob/main/dask/array/tests/test_creation.py#L92\n self.assertAllClose(jnp.arange(77),\n np.arange(77, dtype=jnp.int_))\n self.assertAllClose(jnp.arange(2, 13),\n np.arange(2, 13, dtype=jnp.int_))\n self.assertAllClose(jnp.arange(4, 21, 9),\n np.arange(4, 21, 9, dtype=jnp.int_))\n self.assertAllClose(jnp.arange(53, 5, -3),\n np.arange(53, 5, -3, dtype=jnp.int_))\n self.assertAllClose(jnp.arange(77, dtype=float),\n np.arange(77, dtype=float))\n self.assertAllClose(jnp.arange(2, 13, dtype=int),\n np.arange(2, 13, dtype=int))\n self.assertAllClose(jnp.arange(0, 1, -0.5),\n np.arange(0, 1, -0.5, dtype=jnp.float_))\n\n self.assertRaises(TypeError, lambda: jnp.arange())\n\n # test that jnp.arange(N) doesn't instantiate an ndarray\n self.assertNotEqual(type(jnp.arange(77)), type(np.arange(77)))\n self.assertEqual(type(jnp.arange(77)), type(lax.iota(np.int32, 77)))\n\n # test that jnp.arange(N, dtype=int32) doesn't instantiate an ndarray\n self.assertNotEqual(type(jnp.arange(77, dtype=jnp.int32)),\n type(np.arange(77, dtype=np.int32)))\n self.assertEqual(type(jnp.arange(77, dtype=jnp.int32)),\n type(lax.iota(np.int32, 77)))\n\n def testArangeJit(self):\n ans = jax.jit(lambda: jnp.arange(5))()\n expected = np.arange(5)\n self.assertAllClose(ans, expected)\n\n def testIssue830(self):\n a = jnp.arange(4, dtype=jnp.complex64)\n self.assertEqual(a.dtype, jnp.complex64)\n\n def testIssue728(self):\n assert jnp.allclose(jnp.eye(5000), np.eye(5000))\n self.assertEqual(0, np.sum(jnp.eye(1050) - np.eye(1050)))\n\n def testIssue746(self):\n jnp.arange(12).reshape(3, 4) # doesn't crash\n\n def testIssue764(self):\n x = jnp.linspace(190, 200, 4)\n f = jax.grad(lambda x: jnp.sum(jnp.tanh(x)))\n # Expected values computed with autograd in float64 precision.\n expected = np.array([3.71669453e-165, 4.72999108e-168, 6.01954653e-171,\n 7.66067839e-174], np.float64)\n self.assertAllClose(f(x), expected, check_dtypes=False)\n\n def testIssue776(self):\n \"\"\"Tests that the scatter-add transpose rule instantiates symbolic zeros.\"\"\"\n def f(u):\n y = jnp.ones(10).at[np.array([2, 4, 5])].add(u)\n # The transpose rule for lax.tie_in returns a symbolic zero for its first\n # argument.\n return lax.tie_in(y, 7.)\n\n self.assertAllClose(np.zeros(3,), jax.grad(f)(np.ones(3,)))\n\n # NOTE(mattjj): I disabled this test when removing lax._safe_mul because this\n # is a numerical stability issue that should be solved with a custom jvp rule\n # of the sigmoid function being differentiated here, not by safe_mul.\n # def testIssue777(self):\n # x = jnp.linspace(-200, 0, 4, dtype=np.float32)\n # f = jax.grad(lambda x: jnp.sum(1 / (1 + jnp.exp(-x))))\n # self.assertAllClose(f(x), np.array([0., 0., 0., 0.25], dtype=np.float32))\n\n @parameterized.named_parameters(\n jtu.cases_from_list(\n {\"testcase_name\": jtu.format_test_name_suffix(op, [()], [dtype]),\n \"dtype\": dtype, \"op\": op}\n for dtype in float_dtypes\n for op in (\"sqrt\", \"arccos\", \"arcsin\", \"arctan\", \"sin\", \"cos\", \"tan\",\n \"sinh\", \"cosh\", \"tanh\", \"arccosh\", \"arcsinh\", \"arctanh\", \"exp\",\n \"log\", \"expm1\", \"log1p\")))\n def testMathSpecialFloatValues(self, op, dtype):\n np_op = getattr(np, op)\n np_op = jtu.ignore_warning(category=RuntimeWarning,\n message=\"invalid value.*\")(np_op)\n np_op = jtu.ignore_warning(category=RuntimeWarning,\n message=\"divide by zero.*\")(np_op)\n np_op = jtu.ignore_warning(category=RuntimeWarning,\n message=\"overflow.*\")(np_op)\n\n jnp_op = getattr(jnp, op)\n dtype = np.dtype(dtypes.canonicalize_dtype(dtype)).type\n for x in (np.nan, -np.inf, -100., -2., -1., 0., 1., 2., 100., np.inf,\n jnp.finfo(dtype).max, np.sqrt(jnp.finfo(dtype).max),\n np.sqrt(jnp.finfo(dtype).max) * 2.):\n if (op in (\"sin\", \"cos\", \"tan\") and\n jtu.device_under_test() == \"tpu\"):\n continue # TODO(b/132196789): fix and reenable.\n x = dtype(x)\n expected = np_op(x)\n actual = jnp_op(x)\n tol = jtu.tolerance(dtype, {np.float32: 1e-3, np.float64: 1e-7})\n self.assertAllClose(expected, actual, atol=tol,\n rtol=tol)\n\n def testIssue883(self):\n # from https://github.com/google/jax/issues/883\n raise SkipTest(\"we decided to disallow arrays as static args\")\n\n @partial(jax.jit, static_argnums=(1,))\n def f(x, v):\n return x\n\n x = jnp.ones((10, 10))\n v = jnp.array([1, 2, 3])\n _ = f(x, v)\n _ = f(x, v) # doesn't crash\n\n def testReductionOfOutOfBoundsAxis(self): # Issue 888\n x = jnp.ones((3, 4))\n self.assertRaises(ValueError, lambda: jnp.sum(x, axis=2))\n\n def testIssue956(self):\n self.assertRaises(TypeError, lambda: jnp.ndarray((1, 1)))\n\n @parameterized.named_parameters(\n jtu.cases_from_list(\n {\"testcase_name\":\n \"_shape={}_dtype={}_out_dtype={}_axis={}_ddof={}_keepdims={}\"\n .format(shape, dtype, out_dtype, axis, ddof, keepdims),\n \"shape\": shape, \"dtype\": dtype, \"out_dtype\": out_dtype, \"axis\": axis,\n \"ddof\": ddof, \"keepdims\": keepdims}\n for shape in [(5,), (10, 5)]\n for dtype in all_dtypes\n for out_dtype in inexact_dtypes\n for axis in [None, 0, -1]\n for ddof in [0, 1, 2]\n for keepdims in [False, True]))\n def testVar(self, shape, dtype, out_dtype, axis, ddof, keepdims):\n rng = jtu.rand_default(self.rng())\n args_maker = self._GetArgsMaker(rng, [shape], [dtype])\n @jtu.ignore_warning(category=RuntimeWarning,\n message=\"Degrees of freedom <= 0 for slice.\")\n def np_fun(x):\n out = np.var(x.astype(jnp.promote_types(np.float32, dtype)),\n axis=axis, ddof=ddof, keepdims=keepdims)\n return out.astype(out_dtype)\n jnp_fun = partial(jnp.var, dtype=out_dtype, axis=axis, ddof=ddof, keepdims=keepdims)\n tol = jtu.tolerance(out_dtype, {np.float16: 1e-1, np.float32: 1e-3,\n np.float64: 1e-3, np.complex128: 1e-6})\n if (jnp.issubdtype(dtype, jnp.complexfloating) and\n not jnp.issubdtype(out_dtype, jnp.complexfloating)):\n self.assertRaises(ValueError, lambda: jnp_fun(*args_maker()))\n else:\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,\n tol=tol)\n self._CompileAndCheck(jnp_fun, args_maker, rtol=tol,\n atol=tol)\n\n @parameterized.named_parameters(\n jtu.cases_from_list(\n {\"testcase_name\":\n \"_shape={}_dtype={}_out_dtype={}_axis={}_ddof={}_keepdims={}\"\n .format(shape, dtype, out_dtype, axis, ddof, keepdims),\n \"shape\": shape, \"dtype\": dtype, \"out_dtype\": out_dtype, \"axis\": axis,\n \"ddof\": ddof, \"keepdims\": keepdims}\n for shape in [(5,), (10, 5)]\n for dtype in all_dtypes\n for out_dtype in inexact_dtypes\n for axis in [None, 0, -1]\n for ddof in [0, 1, 2]\n for keepdims in [False, True]))\n def testNanVar(self, shape, dtype, out_dtype, axis, ddof, keepdims):\n rng = jtu.rand_some_nan(self.rng())\n args_maker = self._GetArgsMaker(rng, [shape], [dtype])\n @jtu.ignore_warning(category=RuntimeWarning,\n message=\"Degrees of freedom <= 0 for slice.\")\n def np_fun(x):\n out = np.nanvar(x.astype(jnp.promote_types(np.float32, dtype)),\n axis=axis, ddof=ddof, keepdims=keepdims)\n return out.astype(out_dtype)\n jnp_fun = partial(jnp.nanvar, dtype=out_dtype, axis=axis, ddof=ddof, keepdims=keepdims)\n tol = jtu.tolerance(out_dtype, {np.float16: 1e-1, np.float32: 1e-3,\n np.float64: 1e-3, np.complex128: 1e-6})\n if (jnp.issubdtype(dtype, jnp.complexfloating) and\n not jnp.issubdtype(out_dtype, jnp.complexfloating)):\n self.assertRaises(ValueError, lambda: jnp_fun(*args_maker()))\n else:\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,\n tol=tol)\n self._CompileAndCheck(jnp_fun, args_maker, rtol=tol,\n atol=tol)\n\n\n @parameterized.named_parameters(\n jtu.cases_from_list(\n {\"testcase_name\":\n \"_shape={}_dtype={}_y_shape={}_y_dtype={}_rowvar={}_ddof={}_bias={}_fweights={}_aweights={}\".format(\n shape, dtype, y_shape, y_dtype, rowvar, ddof, bias, fweights, aweights),\n \"shape\": shape, \"y_shape\": y_shape, \"dtype\": dtype, \"y_dtype\": y_dtype,\"rowvar\": rowvar, \"ddof\": ddof,\n \"bias\": bias, \"fweights\": fweights, \"aweights\": aweights}\n for shape in [(5,), (10, 5), (5, 10)]\n for dtype in all_dtypes\n for y_dtype in [None, dtype]\n for rowvar in [True, False]\n for y_shape in _get_y_shapes(y_dtype, shape, rowvar)\n for bias in [True, False]\n for ddof in [None, 2, 3]\n for fweights in [True, False]\n for aweights in [True, False]))\n def testCov(self, shape, dtype, y_shape, y_dtype, rowvar, ddof, bias, fweights, aweights):\n rng = jtu.rand_default(self.rng())\n wrng = jtu.rand_positive(self.rng())\n wdtype = np.real(dtype(0)).dtype\n wshape = shape[-1:] if rowvar or shape[0] == 1 else shape[:1]\n\n args_maker = lambda: [rng(shape, dtype),\n rng(y_shape, y_dtype) if y_dtype else None,\n wrng(wshape, int) if fweights else None,\n wrng(wshape, wdtype) if aweights else None]\n kwargs = dict(rowvar=rowvar, ddof=ddof, bias=bias)\n np_fun = lambda m, y, f, a: np.cov(m, y, fweights=f, aweights=a, **kwargs)\n jnp_fun = lambda m, y, f, a: jnp.cov(m, y, fweights=f, aweights=a, **kwargs)\n tol = {jnp.bfloat16: 5E-2, np.float16: 1E-2, np.float32: 1e-5,\n np.float64: 1e-13, np.complex64: 1e-5, np.complex128: 1e-13}\n tol = 7e-2 if jtu.device_under_test() == \"tpu\" else tol\n tol = jtu.join_tolerance(tol, jtu.tolerance(dtype))\n self._CheckAgainstNumpy(\n np_fun, jnp_fun, args_maker, check_dtypes=False, tol=tol)\n self._CompileAndCheck(jnp_fun, args_maker, atol=tol,\n rtol=tol)\n\n def testIssue967(self):\n self.assertRaises(TypeError, lambda: jnp.zeros(1.5))\n\n @parameterized.named_parameters(\n jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_dtype={}_rowvar={}\".format(\n shape, dtype.__name__, rowvar),\n \"shape\": shape, \"dtype\": dtype, \"rowvar\": rowvar}\n for shape in [(5,), (10, 5), (3, 10)]\n for dtype in number_dtypes\n for rowvar in [True, False]))\n def testCorrCoef(self, shape, dtype, rowvar):\n rng = jtu.rand_default(self.rng())\n def args_maker():\n ok = False\n while not ok:\n x = rng(shape, dtype)\n ok = not np.any(np.isclose(np.std(x), 0.0))\n return (x,)\n np_fun = partial(np.corrcoef, rowvar=rowvar)\n np_fun = jtu.ignore_warning(\n category=RuntimeWarning, message=\"invalid value encountered.*\")(np_fun)\n jnp_fun = partial(jnp.corrcoef, rowvar=rowvar)\n tol = 1e-2 if jtu.device_under_test() == \"tpu\" else None\n self._CheckAgainstNumpy(\n np_fun, jnp_fun, args_maker, check_dtypes=False,\n tol=tol)\n self._CompileAndCheck(jnp_fun, args_maker, atol=tol, rtol=tol)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_{}_{}\".format(jtu.format_shape_dtype_string(shape, dtype),\n \"None\" if end_dtype is None else jtu.format_shape_dtype_string(end_shape, end_dtype),\n \"None\" if begin_dtype is None else jtu.format_shape_dtype_string(begin_shape, begin_dtype)),\n \"shape\": shape, \"dtype\": dtype, \"end_shape\": end_shape,\n \"end_dtype\": end_dtype, \"begin_shape\": begin_shape,\n \"begin_dtype\": begin_dtype}\n for dtype in number_dtypes\n for end_dtype in [None] + [dtype]\n for begin_dtype in [None] + [dtype]\n for shape in [s for s in all_shapes if s != jtu.PYTHON_SCALAR_SHAPE]\n for begin_shape in (\n [None] if begin_dtype is None\n else [s for s in all_shapes if s != jtu.PYTHON_SCALAR_SHAPE])\n for end_shape in (\n [None] if end_dtype is None\n else [s for s in all_shapes if s != jtu.PYTHON_SCALAR_SHAPE])))\n def testEDiff1d(self, shape, dtype, end_shape, end_dtype, begin_shape,\n begin_dtype):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype),\n (None if end_dtype is None else rng(end_shape, end_dtype)),\n (None if begin_dtype is None else rng(begin_shape, begin_dtype))]\n np_fun = lambda x, to_end, to_begin: np.ediff1d(x, to_end, to_begin)\n jnp_fun = lambda x, to_end, to_begin: jnp.ediff1d(x, to_end, to_begin)\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n def testEDiff1dWithDtypeCast(self):\n rng = jtu.rand_default(self.rng())\n shape = jtu.NUMPY_SCALAR_SHAPE\n dtype = jnp.float32\n end_dtype = jnp.int32\n args_maker = lambda: [rng(shape, dtype), rng(shape, end_dtype), rng(shape, dtype)]\n np_fun = lambda x, to_end, to_begin: np.ediff1d(x, to_end, to_begin)\n jnp_fun = lambda x, to_end, to_begin: jnp.ediff1d(x, to_end, to_begin)\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(\n jtu.cases_from_list(\n {\"testcase_name\": \"_shapes={}_dtype={}_indexing={}_sparse={}\".format(\n shapes, dtype, indexing, sparse),\n \"shapes\": shapes, \"dtype\": dtype, \"indexing\": indexing,\n \"sparse\": sparse}\n for shapes in [(), (5,), (5, 3)]\n for dtype in number_dtypes\n for indexing in ['xy', 'ij']\n for sparse in [True, False]))\n def testMeshGrid(self, shapes, dtype, indexing, sparse):\n rng = jtu.rand_default(self.rng())\n args_maker = self._GetArgsMaker(rng, [(x,) for x in shapes],\n [dtype] * len(shapes))\n np_fun = partial(np.meshgrid, indexing=indexing, sparse=sparse)\n jnp_fun = partial(jnp.meshgrid, indexing=indexing, sparse=sparse)\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n def testMgrid(self):\n assertAllEqual = partial(self.assertAllClose, atol=0, rtol=0)\n assertAllEqual(np.mgrid[:4], jnp.mgrid[:4])\n assertAllEqual(np.mgrid[:4,], jnp.mgrid[:4,])\n assertAllEqual(np.mgrid[:4], jax.jit(lambda: jnp.mgrid[:4])())\n assertAllEqual(np.mgrid[:5, :5], jnp.mgrid[:5, :5])\n assertAllEqual(np.mgrid[:3, :2], jnp.mgrid[:3, :2])\n assertAllEqual(np.mgrid[1:4:2], jnp.mgrid[1:4:2])\n assertAllEqual(np.mgrid[1:5:3, :5], jnp.mgrid[1:5:3, :5])\n assertAllEqual(np.mgrid[:3, :2, :5], jnp.mgrid[:3, :2, :5])\n assertAllEqual(np.mgrid[:3:2, :2, :5], jnp.mgrid[:3:2, :2, :5])\n # Corner cases\n assertAllEqual(np.mgrid[:], jnp.mgrid[:])\n # When the step length is a complex number, because of float calculation,\n # the values between jnp and np might slightly different.\n atol = 1e-6\n rtol = 1e-6\n self.assertAllClose(np.mgrid[-1:1:5j],\n jnp.mgrid[-1:1:5j],\n atol=atol,\n rtol=rtol)\n self.assertAllClose(np.mgrid[3:4:7j],\n jnp.mgrid[3:4:7j],\n atol=atol,\n rtol=rtol)\n self.assertAllClose(np.mgrid[1:6:8j, 2:4],\n jnp.mgrid[1:6:8j, 2:4],\n atol=atol,\n rtol=rtol)\n # Non-integer steps\n self.assertAllClose(np.mgrid[0:3.5:0.5],\n jnp.mgrid[0:3.5:0.5],\n atol=atol,\n rtol=rtol)\n self.assertAllClose(np.mgrid[1.3:4.2:0.3],\n jnp.mgrid[1.3:4.2:0.3],\n atol=atol,\n rtol=rtol)\n # abstract tracer value for jnp.mgrid slice\n with self.assertRaisesRegex(jax.core.ConcretizationTypeError,\n \"slice start of jnp.mgrid\"):\n jax.jit(lambda a, b: jnp.mgrid[a:b])(0, 2)\n\n def testOgrid(self):\n def assertListOfArraysEqual(xs, ys):\n self.assertIsInstance(xs, list)\n self.assertIsInstance(ys, list)\n self.assertEqual(len(xs), len(ys))\n for x, y in zip(xs, ys):\n self.assertArraysEqual(x, y)\n\n self.assertArraysEqual(np.ogrid[:5], jnp.ogrid[:5])\n self.assertArraysEqual(np.ogrid[:5], jax.jit(lambda: jnp.ogrid[:5])())\n self.assertArraysEqual(np.ogrid[1:7:2], jnp.ogrid[1:7:2])\n # List of arrays\n assertListOfArraysEqual(np.ogrid[:5,], jnp.ogrid[:5,])\n assertListOfArraysEqual(np.ogrid[0:5, 1:3], jnp.ogrid[0:5, 1:3])\n assertListOfArraysEqual(np.ogrid[1:3:2, 2:9:3], jnp.ogrid[1:3:2, 2:9:3])\n assertListOfArraysEqual(np.ogrid[:5, :9, :11], jnp.ogrid[:5, :9, :11])\n # Corner cases\n self.assertArraysEqual(np.ogrid[:], jnp.ogrid[:])\n # Complex number steps\n atol = 1e-6\n rtol = 1e-6\n self.assertAllClose(np.ogrid[-1:1:5j],\n jnp.ogrid[-1:1:5j],\n atol=atol,\n rtol=rtol)\n # Non-integer steps\n self.assertAllClose(np.ogrid[0:3.5:0.3],\n jnp.ogrid[0:3.5:0.3],\n atol=atol,\n rtol=rtol)\n self.assertAllClose(np.ogrid[1.2:4.8:0.24],\n jnp.ogrid[1.2:4.8:0.24],\n atol=atol,\n rtol=rtol)\n # abstract tracer value for ogrid slice\n with self.assertRaisesRegex(jax.core.ConcretizationTypeError,\n \"slice start of jnp.ogrid\"):\n jax.jit(lambda a, b: jnp.ogrid[a:b])(0, 2)\n\n def testR_(self):\n a = np.arange(6).reshape((2,3))\n self.assertArraysEqual(np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])],\n jnp.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])])\n self.assertArraysEqual(np.r_['-1', a, a], jnp.r_['-1', a, a])\n self.assertArraysEqual(np.r_['0,2', [1,2,3], [4,5,6]], jnp.r_['0,2', [1,2,3], [4,5,6]])\n self.assertArraysEqual(np.r_['0,2,0', [1,2,3], [4,5,6]], jnp.r_['0,2,0', [1,2,3], [4,5,6]])\n self.assertArraysEqual(np.r_['1,2,0', [1,2,3], [4,5,6]], jnp.r_['1,2,0', [1,2,3], [4,5,6]])\n # negative 1d axis start\n self.assertArraysEqual(np.r_['0,4,-1', [1,2,3], [4,5,6]], jnp.r_['0,4,-1', [1,2,3], [4,5,6]])\n self.assertArraysEqual(np.r_['0,4,-2', [1,2,3], [4,5,6]], jnp.r_['0,4,-2', [1,2,3], [4,5,6]])\n\n # matrix directives\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=PendingDeprecationWarning)\n self.assertArraysEqual(np.r_['r',[1,2,3], [4,5,6]], jnp.r_['r',[1,2,3], [4,5,6]])\n self.assertArraysEqual(np.r_['c', [1, 2, 3], [4, 5, 6]], jnp.r_['c', [1, 2, 3], [4, 5, 6]])\n\n # bad directive\n with self.assertRaisesRegex(ValueError, \"could not understand directive.*\"):\n jnp.r_[\"asdfgh\",[1,2,3]]\n # abstract tracer value for r_ slice\n with self.assertRaisesRegex(jax.core.ConcretizationTypeError,\n \"slice start of jnp.r_\"):\n jax.jit(lambda a, b: jnp.r_[a:b])(0, 2)\n\n # Complex number steps\n atol = 1e-6\n rtol = 1e-6\n self.assertAllClose(np.r_[-1:1:6j],\n jnp.r_[-1:1:6j],\n atol=atol,\n rtol=rtol)\n self.assertAllClose(np.r_[-1:1:6j, [0]*3, 5, 6],\n jnp.r_[-1:1:6j, [0]*3, 5, 6],\n atol=atol,\n rtol=rtol)\n # Non-integer steps\n self.assertAllClose(np.r_[1.2:4.8:0.24],\n jnp.r_[1.2:4.8:0.24],\n atol=atol,\n rtol=rtol)\n\n def testC_(self):\n a = np.arange(6).reshape((2, 3))\n self.assertArraysEqual(np.c_[np.array([1,2,3]), np.array([4,5,6])],\n jnp.c_[np.array([1,2,3]), np.array([4,5,6])])\n self.assertArraysEqual(np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])],\n jnp.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])])\n self.assertArraysEqual(np.c_['-1', a, a], jnp.c_['-1', a, a])\n self.assertArraysEqual(np.c_['0,2', [1,2,3], [4,5,6]], jnp.c_['0,2', [1,2,3], [4,5,6]])\n self.assertArraysEqual(np.c_['0,2,0', [1,2,3], [4,5,6]], jnp.c_['0,2,0', [1,2,3], [4,5,6]])\n self.assertArraysEqual(np.c_['1,2,0', [1,2,3], [4,5,6]], jnp.c_['1,2,0', [1,2,3], [4,5,6]])\n # negative 1d axis start\n self.assertArraysEqual(np.c_['0,4,-1', [1,2,3], [4,5,6]], jnp.c_['0,4,-1', [1,2,3], [4,5,6]])\n self.assertArraysEqual(np.c_['0,4,-2', [1,2,3], [4,5,6]], jnp.c_['0,4,-2', [1,2,3], [4,5,6]])\n # matrix directives, avoid numpy deprecation warning\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=PendingDeprecationWarning)\n self.assertArraysEqual(np.c_['r',[1,2,3], [4,5,6]], jnp.c_['r',[1,2,3], [4,5,6]])\n self.assertArraysEqual(np.c_['c', [1, 2, 3], [4, 5, 6]], jnp.c_['c', [1, 2, 3], [4, 5, 6]])\n\n # bad directive\n with self.assertRaisesRegex(ValueError, \"could not understand directive.*\"):\n jnp.c_[\"asdfgh\",[1,2,3]]\n # abstract tracer value for c_ slice\n with self.assertRaisesRegex(jax.core.ConcretizationTypeError,\n \"slice start of jnp.c_\"):\n jax.jit(lambda a, b: jnp.c_[a:b])(0, 2)\n\n # Complex number steps\n atol = 1e-6\n rtol = 1e-6\n self.assertAllClose(np.c_[-1:1:6j],\n jnp.c_[-1:1:6j],\n atol=atol,\n rtol=rtol)\n\n # Non-integer steps\n self.assertAllClose(np.c_[1.2:4.8:0.24],\n jnp.c_[1.2:4.8:0.24],\n atol=atol,\n rtol=rtol)\n\n def testS_(self):\n self.assertEqual(np.s_[1:2:20],jnp.s_[1:2:20])\n\n def testIndex_exp(self):\n self.assertEqual(np.index_exp[5:3:2j],jnp.index_exp[5:3:2j])\n\n @parameterized.named_parameters(\n jtu.cases_from_list(\n {\"testcase_name\": f\"_start_shape={start_shape}_stop_shape={stop_shape}\"\n f\"_num={num}_endpoint={endpoint}_retstep={retstep}\"\n f\"_dtype={dtype.__name__ if dtype else 'None'}\",\n \"start_shape\": start_shape, \"stop_shape\": stop_shape,\n \"num\": num, \"endpoint\": endpoint, \"retstep\": retstep,\n \"dtype\": dtype}\n for start_shape in [(), (2,), (2, 2)]\n for stop_shape in [(), (2,), (2, 2)]\n for num in [0, 1, 2, 5, 20]\n for endpoint in [True, False]\n for retstep in [True, False]\n # floating-point compute between jitted platforms and non-jit + rounding\n # cause unavoidable variation in integer truncation for some inputs, so\n # we currently only test inexact 'dtype' arguments.\n for dtype in inexact_dtypes + [None,]))\n @jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.\n def testLinspace(self, start_shape, stop_shape, num, endpoint, retstep, dtype):\n rng = jtu.rand_default(self.rng())\n # relax default tolerances slightly\n tol = jtu.tolerance(dtype if dtype else np.float32) * 10\n args_maker = self._GetArgsMaker(rng,\n [start_shape, stop_shape],\n [dtype, dtype])\n start, stop = args_maker()\n ndim = len(np.shape(start + stop))\n for axis in range(-ndim, ndim):\n jnp_op = lambda start, stop: jnp.linspace(\n start, stop, num,\n endpoint=endpoint, retstep=retstep, dtype=dtype, axis=axis)\n # NumPy 1.20.0 changed the semantics of linspace to floor for integer\n # dtypes.\n if numpy_version >= (1, 20) or not np.issubdtype(dtype, np.integer):\n np_op = lambda start, stop: np.linspace(\n start, stop, num,\n endpoint=endpoint, retstep=retstep, dtype=dtype, axis=axis)\n else:\n def np_op(start, stop):\n out = np.linspace(start, stop, num, endpoint=endpoint,\n retstep=retstep, axis=axis)\n if retstep:\n return np.floor(out[0]).astype(dtype), out[1]\n else:\n return np.floor(out).astype(dtype)\n\n self._CheckAgainstNumpy(np_op, jnp_op, args_maker,\n check_dtypes=False, tol=tol)\n self._CompileAndCheck(jnp_op, args_maker,\n check_dtypes=False, atol=tol, rtol=tol)\n\n @parameterized.named_parameters(\n jtu.cases_from_list(\n {\"testcase_name\": f\"_dtype={dtype.__name__}\", \"dtype\": dtype}\n for dtype in number_dtypes))\n def testLinspaceEndpoints(self, dtype):\n \"\"\"Regression test for Issue #3014.\"\"\"\n rng = jtu.rand_default(self.rng())\n endpoints = rng((2,), dtype)\n out = jnp.linspace(*endpoints, 10, dtype=dtype)\n self.assertAllClose(out[np.array([0, -1])], endpoints, rtol=0, atol=0)\n\n @parameterized.named_parameters(\n jtu.cases_from_list(\n {\"testcase_name\": (\"_start_shape={}_stop_shape={}_num={}_endpoint={}\"\n \"_base={}_dtype={}\").format(\n start_shape, stop_shape, num, endpoint, base,\n dtype.__name__ if dtype else \"None\"),\n \"start_shape\": start_shape,\n \"stop_shape\": stop_shape,\n \"num\": num, \"endpoint\": endpoint, \"base\": base,\n \"dtype\": dtype}\n for start_shape in [(), (2,), (2, 2)]\n for stop_shape in [(), (2,), (2, 2)]\n for num in [0, 1, 2, 5, 20]\n for endpoint in [True, False]\n for base in [10.0, 2, np.e]\n for dtype in inexact_dtypes + [None,]))\n @jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.\n def testLogspace(self, start_shape, stop_shape, num,\n endpoint, base, dtype):\n if (dtype in int_dtypes and\n jtu.device_under_test() in (\"gpu\", \"tpu\") and\n not config.x64_enabled):\n raise unittest.SkipTest(\"GPUx32 truncated exponentiation\"\n \" doesn't exactly match other platforms.\")\n rng = jtu.rand_default(self.rng())\n # relax default tolerances slightly\n tol = {np.float16: 2e-2, np.float32: 1e-2, np.float64: 1e-6,\n np.complex64: 1e-3, np.complex128: 1e-6}\n args_maker = self._GetArgsMaker(rng,\n [start_shape, stop_shape],\n [dtype, dtype])\n start, stop = args_maker()\n ndim = len(np.shape(start + stop))\n for axis in range(-ndim, ndim):\n jnp_op = lambda start, stop: jnp.logspace(\n start, stop, num, endpoint=endpoint, base=base, dtype=dtype, axis=axis)\n @jtu.ignore_warning(category=RuntimeWarning,\n message=\"overflow encountered in power\")\n def np_op(start, stop):\n return np.logspace(start, stop, num, endpoint=endpoint,\n base=base, dtype=dtype, axis=axis)\n self._CheckAgainstNumpy(np_op, jnp_op, args_maker,\n check_dtypes=False, tol=tol)\n if dtype in (inexact_dtypes + [None,]):\n # Why do compiled and op-by-op float16 np.power numbers differ\n # slightly more than expected?\n atol = {np.float16: 1e-2}\n self._CompileAndCheck(jnp_op, args_maker,\n check_dtypes=False, atol=atol, rtol=tol)\n\n @parameterized.named_parameters(\n jtu.cases_from_list(\n {\"testcase_name\": (\"_start_shape={}_stop_shape={}_num={}_endpoint={}\"\n \"_dtype={}_axis={}\").format(\n start_shape, stop_shape, num, endpoint,\n dtype.__name__ if dtype else \"None\", axis),\n \"start_shape\": start_shape,\n \"stop_shape\": stop_shape,\n \"num\": num, \"endpoint\": endpoint,\n \"dtype\": dtype, \"axis\": axis}\n for start_shape in [(), (2,), (2, 2)]\n for stop_shape in [(), (2,), (2, 2)]\n for num in [0, 1, 2, 5, 20]\n for endpoint in [True, False]\n # NB: numpy's geomspace gives nonsense results on integer types\n for dtype in inexact_dtypes + [None,]\n for axis in range(-max(len(start_shape), len(stop_shape)),\n max(len(start_shape), len(stop_shape)))))\n @jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.\n def testGeomspace(self, start_shape, stop_shape, num,\n endpoint, dtype, axis):\n rng = jtu.rand_default(self.rng())\n # relax default tolerances slightly\n tol = {np.float16: 4e-3, np.float32: 2e-3, np.float64: 1e-14,\n np.complex128: 1e-14}\n def args_maker():\n \"\"\"Test the set of inputs np.geomspace is well-defined on.\"\"\"\n start, stop = self._GetArgsMaker(rng,\n [start_shape, stop_shape],\n [dtype, dtype])()\n # np.geomspace can't handle differently ranked tensors\n # w. negative numbers!\n start, stop = jnp.broadcast_arrays(start, stop)\n if dtype in complex_dtypes:\n return start, stop\n # to avoid NaNs, non-complex start and stop cannot\n # differ in sign, elementwise\n start = start * jnp.sign(start) * jnp.sign(stop)\n return start, stop\n start, stop = args_maker()\n def jnp_op(start, stop):\n return jnp.geomspace(start, stop, num, endpoint=endpoint, dtype=dtype,\n axis=axis)\n def np_op(start, stop):\n start = start.astype(np.float32) if dtype == jnp.bfloat16 else start\n stop = stop.astype(np.float32) if dtype == jnp.bfloat16 else stop\n return np.geomspace(\n start, stop, num, endpoint=endpoint,\n dtype=dtype if dtype != jnp.bfloat16 else np.float32,\n axis=axis).astype(dtype)\n self._CheckAgainstNumpy(np_op, jnp_op, args_maker,\n check_dtypes=False, tol=tol)\n if dtype in (inexact_dtypes + [None,]):\n self._CompileAndCheck(jnp_op, args_maker,\n check_dtypes=False, atol=tol, rtol=tol)\n\n def testDisableNumpyRankPromotionBroadcasting(self):\n try:\n prev_flag = config.jax_numpy_rank_promotion\n FLAGS.jax_numpy_rank_promotion = \"allow\"\n jnp.ones(2) + jnp.ones((1, 2)) # works just fine\n finally:\n FLAGS.jax_numpy_rank_promotion = prev_flag\n\n try:\n prev_flag = config.jax_numpy_rank_promotion\n FLAGS.jax_numpy_rank_promotion = \"raise\"\n self.assertRaises(ValueError, lambda: jnp.ones(2) + jnp.ones((1, 2)))\n finally:\n FLAGS.jax_numpy_rank_promotion = prev_flag\n\n try:\n prev_flag = config.jax_numpy_rank_promotion\n FLAGS.jax_numpy_rank_promotion = \"warn\"\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n jnp.ones(2) + jnp.ones((1, 2))\n assert len(w) > 0\n msg = str(w[-1].message)\n expected_msg = (\"Following NumPy automatic rank promotion for add on \"\n \"shapes (2,) (1, 2).\")\n self.assertEqual(msg[:len(expected_msg)], expected_msg)\n\n prev_len = len(w)\n jnp.ones(2) + 3\n self.assertEqual(len(w), prev_len) # don't want to warn for scalars\n finally:\n FLAGS.jax_numpy_rank_promotion = prev_flag\n\n @unittest.skip(\"Test fails on CI, perhaps due to JIT caching\")\n def testDisableNumpyRankPromotionBroadcastingDecorator(self):\n with jax.numpy_rank_promotion(\"allow\"):\n jnp.ones(2) + jnp.ones((1, 2)) # works just fine\n\n with jax.numpy_rank_promotion(\"raise\"):\n self.assertRaises(ValueError, lambda: jnp.ones(2) + jnp.ones((1, 2)))\n\n with jax.numpy_rank_promotion(\"warn\"):\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n jnp.ones(2) + jnp.ones((1, 2))\n assert len(w) > 0\n msg = str(w[-1].message)\n expected_msg = (\"Following NumPy automatic rank promotion for add on \"\n \"shapes (2,) (1, 2).\")\n self.assertEqual(msg[:len(expected_msg)], expected_msg)\n\n prev_len = len(w)\n jnp.ones(2) + 3\n self.assertEqual(len(w), prev_len) # don't want to warn for scalars\n\n def testStackArrayArgument(self):\n # tests https://github.com/google/jax/issues/1271\n @jax.jit\n def foo(x):\n return jnp.stack(x)\n foo(np.zeros(2)) # doesn't crash\n\n @jax.jit\n def foo(x):\n return jnp.concatenate(x)\n foo(np.zeros((2, 2))) # doesn't crash\n\n def testReluGradientConstants(self):\n # This is a regression test that verifies that constants associated with the\n # gradient of np.maximum (from lax._balanced_eq) aren't hoisted into the\n # outermost jaxpr. This was producing some large materialized constants for\n # every relu activation in a model.\n def body(i, xy):\n x, y = xy\n y = y + jax.grad(lambda z: jnp.sum(jnp.maximum(z, 0.)))(x)\n return x, y\n\n f = lambda y: lax.fori_loop(0, 5, body, (y, y))\n jaxpr = jax.make_jaxpr(f)(np.zeros((3, 4), np.float32))\n self.assertFalse(\n any(np.array_equal(x, np.full((3, 4), 2., dtype=np.float32))\n for x in jaxpr.consts))\n\n @parameterized.named_parameters(\n {\"testcase_name\": \"_from={}_to={}\".format(from_shape, to_shape),\n \"from_shape\": from_shape, \"to_shape\": to_shape}\n for from_shape, to_shape in [\n [(1, 3), (4, 3)],\n [(3,), (2, 1, 3)],\n [(3,), (3, 3)],\n [(1,), (3,)],\n [(1,), 3],\n ])\n def testBroadcastTo(self, from_shape, to_shape):\n rng = jtu.rand_default(self.rng())\n args_maker = self._GetArgsMaker(rng, [from_shape], [np.float32])\n np_op = lambda x: np.broadcast_to(x, to_shape)\n jnp_op = lambda x: jnp.broadcast_to(x, to_shape)\n self._CheckAgainstNumpy(np_op, jnp_op, args_maker)\n self._CompileAndCheck(jnp_op, args_maker)\n\n @parameterized.named_parameters(\n {\"testcase_name\": f\"_{shapes}\", \"shapes\": shapes, \"broadcasted_shape\": broadcasted_shape}\n for shapes, broadcasted_shape in [\n [[], ()],\n [[()], ()],\n [[(1, 3), (4, 3)], (4, 3)],\n [[(3,), (2, 1, 3)], (2, 1, 3)],\n [[(3,), (3, 3)], (3, 3)],\n [[(1,), (3,)], (3,)],\n [[(1,), 3], (3,)],\n [[(6, 7), (5, 6, 1), (7,), (5, 1, 7)], (5, 6, 7)],\n [[[1], [0, 1]], (0, 1)],\n [[(1,), np.array([0, 1])], (0, 1)],\n ])\n def testBroadcastShapes(self, shapes, broadcasted_shape):\n # Test against np.broadcast_shapes once numpy 1.20 is minimum required version\n np.testing.assert_equal(jnp.broadcast_shapes(*shapes), broadcasted_shape)\n\n def testBroadcastToIssue1522(self):\n self.assertRaisesRegex(\n ValueError, \"Incompatible shapes for broadcasting: .*\",\n lambda: jnp.broadcast_to(np.ones((2, 3)), (1, 3)))\n\n def testBroadcastToIntIssue1548(self):\n self.assertAllClose(jnp.broadcast_to(1, (3, 2)), np.ones((3, 2)),\n check_dtypes=False)\n\n def testBroadcastToOnScalar(self):\n self.assertIsInstance(jnp.broadcast_to(10.0, ()), jnp.ndarray)\n self.assertIsInstance(np.broadcast_to(10.0, ()), np.ndarray)\n\n def testPrecision(self):\n\n ones_1d = np.ones((2,))\n ones_2d = np.ones((2, 2))\n ones_3d = np.ones((2, 2, 2))\n HIGHEST = lax.Precision.HIGHEST\n\n jtu.assert_dot_precision(None, jnp.dot, ones_1d, ones_1d)\n jtu.assert_dot_precision(\n HIGHEST,\n partial(jnp.dot, precision=HIGHEST),\n ones_1d, ones_1d)\n jtu.assert_dot_precision(\n HIGHEST,\n partial(jnp.dot, precision=HIGHEST),\n ones_3d, ones_3d)\n jtu.assert_dot_precision(\n HIGHEST,\n partial(jnp.matmul, precision=HIGHEST),\n ones_2d, ones_2d)\n jtu.assert_dot_precision(\n HIGHEST,\n partial(jnp.vdot, precision=HIGHEST),\n ones_1d, ones_1d)\n jtu.assert_dot_precision(\n HIGHEST,\n partial(jnp.tensordot, axes=2, precision=HIGHEST),\n ones_2d, ones_2d)\n jtu.assert_dot_precision(\n HIGHEST,\n partial(jnp.tensordot, axes=(0, 0), precision=HIGHEST),\n ones_1d, ones_1d)\n jtu.assert_dot_precision(\n HIGHEST,\n partial(jnp.tensordot, axes=((0,), (0,)), precision=HIGHEST),\n ones_1d, ones_1d)\n jtu.assert_dot_precision(\n HIGHEST,\n partial(jnp.einsum, 'i,i', precision=HIGHEST),\n ones_1d, ones_1d)\n jtu.assert_dot_precision(\n HIGHEST,\n partial(jnp.einsum, 'ij,ij', precision=HIGHEST),\n ones_2d, ones_2d)\n jtu.assert_dot_precision(\n HIGHEST,\n partial(jnp.inner, precision=HIGHEST),\n ones_1d, ones_1d)\n\n @parameterized.named_parameters(\n jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_varargs={} axis={}_dtype={}\".format(\n shape, varargs, axis, dtype),\n \"shape\": shape, \"varargs\": varargs, \"axis\": axis, \"dtype\": dtype}\n for shape in [(10,), (10, 15), (10, 15, 20)]\n for _num_axes in range(len(shape))\n for varargs in itertools.combinations(range(1, len(shape) + 1), _num_axes)\n for axis in itertools.combinations(range(len(shape)), _num_axes)\n for dtype in inexact_dtypes))\n def testGradient(self, shape, varargs, axis, dtype):\n rng = jtu.rand_default(self.rng())\n args_maker = self._GetArgsMaker(rng, [shape], [dtype])\n jnp_fun = lambda y: jnp.gradient(y, *varargs, axis=axis)\n np_fun = lambda y: np.gradient(y, *varargs, axis=axis)\n self._CheckAgainstNumpy(\n np_fun, jnp_fun, args_maker, check_dtypes=False)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n def testZerosShapeErrors(self):\n # see https://github.com/google/jax/issues/1822\n self.assertRaisesRegex(\n TypeError,\n \"Shapes must be 1D sequences of concrete values of integer type.*\",\n lambda: jnp.zeros(1.))\n self.assertRaisesRegex(\n TypeError,\n r\"Shapes must be 1D sequences of concrete values of integer type.*\\n\"\n \"If using `jit`, try using `static_argnums` or applying `jit` to smaller subfunctions.\",\n lambda: jax.jit(jnp.zeros)(2))\n\n def testTraceMethod(self):\n x = self.rng().randn(3, 4).astype(jnp.float_)\n self.assertAllClose(x.trace(), jnp.array(x).trace())\n self.assertAllClose(x.trace(), jax.jit(lambda y: y.trace())(x))\n\n def testIntegerPowersArePrecise(self):\n # See https://github.com/google/jax/pull/3036\n # Checks if the squares of float32 integers have no numerical errors.\n # It should be satisfied with all integers less than sqrt(2**24).\n x = jnp.arange(-2**12, 2**12, dtype=jnp.int32)\n np.testing.assert_array_equal(jnp.square(x.astype(jnp.float32)), x * x)\n np.testing.assert_array_equal(x.astype(jnp.float32) ** 2, x * x)\n\n # Similarly for cubes.\n x = jnp.arange(-2**8, 2**8, dtype=jnp.int32)\n np.testing.assert_array_equal(x.astype(jnp.float32) ** 3, x * x * x)\n\n x = np.arange(10, dtype=np.float32)\n for i in range(10):\n self.assertAllClose(x.astype(jnp.float32) ** i, x ** i,\n check_dtypes=False)\n\n def testToBytes(self):\n v = np.arange(12, dtype=np.int32).reshape(3, 4)\n for order in ['C', 'F']:\n self.assertEqual(jnp.asarray(v).tobytes(order), v.tobytes(order))\n\n def testToList(self):\n v = np.arange(12, dtype=np.int32).reshape(3, 4)\n self.assertEqual(jnp.asarray(v).tolist(), v.tolist())\n\n def testReductionWithRepeatedAxisError(self):\n with self.assertRaisesRegex(ValueError, r\"duplicate value in 'axis': \\(0, 0\\)\"):\n jnp.sum(jnp.arange(3), (0, 0))\n\n def testArangeConcretizationError(self):\n msg = r\"It arose in jax.numpy.arange argument `{}`\".format\n with self.assertRaisesRegex(jax.core.ConcretizationTypeError, msg('stop')):\n jax.jit(jnp.arange)(3)\n\n with self.assertRaisesRegex(jax.core.ConcretizationTypeError, msg('start')):\n jax.jit(lambda start: jnp.arange(start, 3))(0)\n\n with self.assertRaisesRegex(jax.core.ConcretizationTypeError, msg('stop')):\n jax.jit(lambda stop: jnp.arange(0, stop))(3)\n\n def testIssue2347(self):\n # https://github.com/google/jax/issues/2347\n object_list = List[Tuple[jnp.array, float, float, jnp.array, bool]]\n self.assertRaises(TypeError, jnp.array, object_list)\n\n np_object_list = np.array(object_list)\n self.assertRaises(TypeError, jnp.array, np_object_list)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": jtu.format_test_name_suffix(\"\", shapes, dtypes),\n \"shapes\": shapes, \"dtypes\": dtypes}\n for shapes in filter(\n _shapes_are_broadcast_compatible,\n itertools.combinations_with_replacement(all_shapes, 2))\n for dtypes in itertools.product(\n *(_valid_dtypes_for_shape(s, complex_dtypes) for s in shapes))))\n @jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.\n def testLogaddexpComplex(self, shapes, dtypes):\n @jtu.ignore_warning(category=RuntimeWarning, message=\"invalid value.*\")\n def np_op(x1, x2):\n return np.log(np.exp(x1) + np.exp(x2))\n\n rng = jtu.rand_some_nan(self.rng())\n args_maker = lambda: tuple(rng(shape, dtype) for shape, dtype in zip(shapes, dtypes))\n if jtu.device_under_test() == 'tpu':\n tol = {np.complex64: 1e-3, np.complex128: 1e-10}\n else:\n tol = {np.complex64: 1e-5, np.complex128: 1e-14}\n self._CheckAgainstNumpy(_promote_like_jnp(np_op), jnp.logaddexp, args_maker, tol=tol)\n self._CompileAndCheck(jnp.logaddexp, args_maker, rtol=tol, atol=tol)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": jtu.format_test_name_suffix(\"\", shapes, dtypes),\n \"shapes\": shapes, \"dtypes\": dtypes}\n for shapes in filter(\n _shapes_are_broadcast_compatible,\n itertools.combinations_with_replacement(all_shapes, 2))\n for dtypes in itertools.product(\n *(_valid_dtypes_for_shape(s, complex_dtypes) for s in shapes))))\n @jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.\n def testLogaddexp2Complex(self, shapes, dtypes):\n @jtu.ignore_warning(category=RuntimeWarning, message=\"invalid value.*\")\n def np_op(x1, x2):\n return np.log2(np.exp2(x1) + np.exp2(x2))\n\n rng = jtu.rand_some_nan(self.rng())\n args_maker = lambda: tuple(rng(shape, dtype) for shape, dtype in zip(shapes, dtypes))\n if jtu.device_under_test() == 'tpu':\n tol = {np.complex64: 1e-3, np.complex128: 1e-10}\n else:\n tol = {np.complex64: 1e-5, np.complex128: 1e-14}\n self._CheckAgainstNumpy(_promote_like_jnp(np_op), jnp.logaddexp2, args_maker, tol=tol)\n self._CompileAndCheck(jnp.logaddexp2, args_maker, rtol=tol, atol=tol)\n\n# Most grad tests are at the lax level (see lax_test.py), but we add some here\n# as needed for e.g. particular compound ops of interest.\n\nGradTestSpec = collections.namedtuple(\n \"GradTestSpec\",\n [\"op\", \"nargs\", \"order\", \"rng_factory\", \"dtypes\", \"name\", \"tol\"])\ndef grad_test_spec(op, nargs, order, rng_factory, dtypes, name=None, tol=None):\n return GradTestSpec(\n op, nargs, order, rng_factory, dtypes, name or op.__name__, tol)\n\nGRAD_TEST_RECORDS = [\n grad_test_spec(jnp.arcsinh, nargs=1, order=2,\n rng_factory=jtu.rand_positive,\n dtypes=[np.float64, np.complex64],\n tol={np.complex64: 2e-2}),\n grad_test_spec(jnp.arccosh, nargs=1, order=2,\n rng_factory=jtu.rand_positive,\n dtypes=[np.float64, np.complex64],\n tol={np.complex64: 2e-2}),\n grad_test_spec(jnp.arctanh, nargs=1, order=2,\n rng_factory=partial(jtu.rand_uniform, low=-0.9, high=0.9),\n dtypes=[np.float64, np.complex64],\n tol={np.complex64: 2e-2}),\n grad_test_spec(jnp.logaddexp, nargs=2, order=1,\n rng_factory=partial(jtu.rand_uniform, low=-0.9, high=0.9),\n dtypes=[np.float64], tol=1e-4),\n grad_test_spec(jnp.logaddexp2, nargs=2, order=2,\n rng_factory=partial(jtu.rand_uniform, low=-0.9, high=0.9),\n dtypes=[np.float64], tol=1e-4),\n]\n\nGradSpecialValuesTestSpec = collections.namedtuple(\n \"GradSpecialValuesTestSpec\", [\"op\", \"values\", \"order\"])\n\nGRAD_SPECIAL_VALUE_TEST_RECORDS = [\n GradSpecialValuesTestSpec(jnp.arcsinh, [0., 1000.], 2),\n GradSpecialValuesTestSpec(jnp.arccosh, [1000.], 2),\n GradSpecialValuesTestSpec(jnp.arctanh, [0.], 2),\n GradSpecialValuesTestSpec(jnp.sinc, [0.], 1),\n]\n\n@jtu.with_config(jax_numpy_rank_promotion=\"raise\")\nclass NumpyGradTests(jtu.JaxTestCase):\n\n @parameterized.named_parameters(itertools.chain.from_iterable(\n jtu.cases_from_list(\n {\"testcase_name\": jtu.format_test_name_suffix(\n rec.name, shapes, itertools.repeat(dtype)),\n \"op\": rec.op, \"rng_factory\": rec.rng_factory, \"shapes\": shapes, \"dtype\": dtype,\n \"order\": rec.order, \"tol\": rec.tol}\n for shapes in itertools.combinations_with_replacement(nonempty_shapes, rec.nargs)\n for dtype in rec.dtypes)\n for rec in GRAD_TEST_RECORDS))\n @jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.\n def testOpGrad(self, op, rng_factory, shapes, dtype, order, tol):\n rng = rng_factory(self.rng())\n tol = jtu.join_tolerance(tol, {np.float32: 1e-1, np.float64: 1e-3,\n np.complex64: 1e-1, np.complex128: 1e-3})\n args = tuple(rng(shape, dtype) for shape in shapes)\n check_grads(op, args, order, [\"fwd\", \"rev\"], tol, tol)\n\n @parameterized.named_parameters(itertools.chain.from_iterable(\n jtu.cases_from_list(\n {\"testcase_name\": \"_{}_{}\".format(rec.op.__name__, special_value),\n \"op\": rec.op, \"special_value\": special_value, \"order\": rec.order}\n for special_value in rec.values)\n for rec in GRAD_SPECIAL_VALUE_TEST_RECORDS))\n def testOpGradSpecialValue(self, op, special_value, order):\n check_grads(op, (special_value,), order, [\"fwd\", \"rev\"],\n atol={np.float32: 3e-3})\n\n def testSincAtZero(self):\n # Some manual tests for sinc at zero, since it doesn't have well-behaved\n # numerical derivatives at zero\n def deriv(f):\n return lambda x: jax.jvp(f, (x,), (1.,))[1]\n\n def apply_all(fns, x):\n for f in fns:\n x = f(x)\n return x\n\n d1 = 0.\n for ops in itertools.combinations_with_replacement([deriv, jax.grad], 1):\n self.assertAllClose(apply_all(ops, jnp.sinc)(0.), d1)\n\n d2 = -np.pi ** 2 / 3\n for ops in itertools.combinations_with_replacement([deriv, jax.grad], 2):\n self.assertAllClose(apply_all(ops, jnp.sinc)(0.), d2)\n\n d3 = 0.\n for ops in itertools.combinations_with_replacement([deriv, jax.grad], 3):\n self.assertAllClose(apply_all(ops, jnp.sinc)(0.), d3)\n\n d4 = np.pi ** 4 / 5\n for ops in itertools.combinations_with_replacement([deriv, jax.grad], 4):\n self.assertAllClose(apply_all(ops, jnp.sinc)(0.), d4)\n\n def testSincGradArrayInput(self):\n # tests for a bug almost introduced in #5077\n jax.grad(lambda x: jnp.sinc(x).sum())(jnp.arange(10.)) # doesn't crash\n\n def testTakeAlongAxisIssue1521(self):\n # https://github.com/google/jax/issues/1521\n idx = jnp.repeat(jnp.arange(3), 10).reshape((30, 1))\n\n def f(x):\n y = x * jnp.arange(3.).reshape((1, 3))\n return jnp.take_along_axis(y, idx, -1).sum()\n\n check_grads(f, (1.,), order=1)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": jtu.format_test_name_suffix(\"\", shapes, itertools.repeat(dtype)),\n \"shapes\": shapes, \"dtype\": dtype}\n for shapes in filter(\n _shapes_are_broadcast_compatible,\n itertools.combinations_with_replacement(nonempty_shapes, 2))\n for dtype in (np.complex128, )))\n @jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.\n def testGradLogaddexpComplex(self, shapes, dtype):\n rng = jtu.rand_default(self.rng())\n args = tuple(rng(shape, dtype) for shape in shapes)\n if jtu.device_under_test() == \"tpu\":\n tol = 5e-2\n else:\n tol = 3e-2\n check_grads(jnp.logaddexp, args, 1, [\"fwd\", \"rev\"], tol, tol)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": jtu.format_test_name_suffix(\"\", shapes, itertools.repeat(dtype)),\n \"shapes\": shapes, \"dtype\": dtype}\n for shapes in filter(\n _shapes_are_broadcast_compatible,\n itertools.combinations_with_replacement(nonempty_shapes, 2))\n for dtype in (np.complex128, )))\n @jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.\n def testGradLogaddexp2Complex(self, shapes, dtype):\n rng = jtu.rand_default(self.rng())\n args = tuple(rng(shape, dtype) for shape in shapes)\n if jtu.device_under_test() == \"tpu\":\n tol = 5e-2\n else:\n tol = 3e-2\n check_grads(jnp.logaddexp2, args, 1, [\"fwd\", \"rev\"], tol, tol)\n\n@jtu.with_config(jax_numpy_rank_promotion=\"raise\")\nclass NumpySignaturesTest(jtu.JaxTestCase):\n\n def testWrappedSignaturesMatch(self):\n \"\"\"Test that jax.numpy function signatures match numpy.\"\"\"\n jnp_funcs = {name: getattr(jnp, name) for name in dir(jnp)}\n func_pairs = {name: (fun, fun.__np_wrapped__) for name, fun in jnp_funcs.items()\n if hasattr(fun, '__np_wrapped__')}\n assert len(func_pairs) > 0\n\n # TODO(jakevdp): fix some of the following signatures. Some are due to wrong argument names.\n unsupported_params = {\n 'angle': ['deg'],\n 'asarray': ['like'],\n 'broadcast_to': ['subok', 'array'],\n 'clip': ['kwargs'],\n 'corrcoef': ['ddof', 'bias', 'dtype'],\n 'cov': ['dtype'],\n 'empty_like': ['subok', 'order'],\n 'einsum': ['kwargs'],\n 'einsum_path': ['einsum_call'],\n 'eye': ['order', 'like'],\n 'identity': ['like'],\n 'full': ['order', 'like'],\n 'full_like': ['subok', 'order'],\n 'histogram': ['normed'],\n 'histogram2d': ['normed'],\n 'histogramdd': ['normed'],\n 'ones': ['order', 'like'],\n 'ones_like': ['subok', 'order'],\n 'tri': ['like'],\n 'unwrap': ['period'],\n 'zeros_like': ['subok', 'order']\n }\n\n extra_params = {\n 'broadcast_to': ['arr'],\n 'einsum': ['precision'],\n 'einsum_path': ['subscripts'],\n }\n\n mismatches = {}\n\n for name, (jnp_fun, np_fun) in func_pairs.items():\n # broadcast_shapes is not available in numpy < 1.20\n if numpy_version < (1, 20) and name == \"broadcast_shapes\":\n continue\n # Some signatures have changed; skip for older numpy versions.\n if numpy_version < (1, 19) and name in ['einsum_path', 'gradient', 'isscalar']:\n continue\n # Note: can't use inspect.getfullargspec due to numpy issue\n # https://github.com/numpy/numpy/issues/12225\n try:\n np_params = inspect.signature(np_fun).parameters\n except ValueError:\n # Some functions cannot be inspected\n continue\n jnp_params = inspect.signature(jnp_fun).parameters\n extra = set(extra_params.get(name, []))\n unsupported = set(unsupported_params.get(name, []))\n\n # Checks to prevent tests from becoming out-of-date. If these fail,\n # it means that extra_params or unsupported_params need to be updated.\n assert extra.issubset(jnp_params), f\"{name}: extra={extra} is not a subset of jnp_params={set(jnp_params)}.\"\n assert not unsupported.intersection(jnp_params), f\"{name}: unsupported={unsupported} overlaps with jnp_params={set(jnp_params)}.\"\n\n # Skip functions that only have *args and **kwargs; we can't introspect these further.\n var_args = (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD)\n if all(p.kind in var_args for p in jnp_params.values()):\n continue\n if all(p.kind in var_args for p in np_params.values()):\n continue\n\n # Remove known extra parameters.\n jnp_params = {a: p for a, p in jnp_params.items() if a not in extra}\n\n # Remove known unsupported parameters.\n np_params = {a: p for a, p in np_params.items() if a not in unsupported}\n\n # Older versions of numpy may have fewer parameters; to avoid extraneous errors on older numpy\n # versions, we allow for jnp to have more parameters.\n if list(jnp_params)[:len(np_params)] != list(np_params):\n mismatches[name] = {'np_params': list(np_params), 'jnp_params': list(jnp_params)}\n\n self.assertEqual(mismatches, {})\n\n\n_all_dtypes: List[str] = [\n \"bool_\",\n \"uint8\", \"uint16\", \"uint32\", \"uint64\",\n \"int8\", \"int16\", \"int32\", \"int64\",\n \"float16\", \"float32\", \"float64\",\n \"complex64\", \"complex128\",\n]\n\n\ndef _all_numpy_ufuncs() -> Iterator[str]:\n \"\"\"Generate the names of all ufuncs in the top-level numpy namespace.\"\"\"\n for name in dir(np):\n f = getattr(np, name)\n if isinstance(f, np.ufunc):\n yield name\n\n\ndef _dtypes_for_ufunc(name: str) -> Iterator[Tuple[str, ...]]:\n \"\"\"Generate valid dtypes of inputs to the given numpy ufunc.\"\"\"\n func = getattr(np, name)\n for arg_dtypes in itertools.product(_all_dtypes, repeat=func.nin):\n args = (np.ones(1, dtype=dtype) for dtype in arg_dtypes)\n try:\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", \"divide by zero\", RuntimeWarning)\n _ = func(*args)\n except TypeError:\n pass\n else:\n yield arg_dtypes\n\n\n@jtu.with_config(jax_numpy_rank_promotion=\"raise\")\nclass NumpyUfuncTests(jtu.JaxTestCase):\n\n @parameterized.named_parameters(\n {\"testcase_name\": f\"_{name}_{','.join(arg_dtypes)}\",\n \"name\": name, \"arg_dtypes\": arg_dtypes}\n for name in _all_numpy_ufuncs()\n for arg_dtypes in jtu.cases_from_list(_dtypes_for_ufunc(name)))\n def testUfuncInputTypes(self, name, arg_dtypes):\n # TODO(jakevdp): fix following failures and remove from this exception list.\n if (name in ['divmod', 'floor_divide', 'fmod', 'gcd', 'left_shift', 'mod',\n 'power', 'remainder', 'right_shift', 'rint', 'square']\n and 'bool_' in arg_dtypes):\n self.skipTest(f\"jax.numpy does not support {name}{tuple(arg_dtypes)}\")\n if name == 'arctanh' and jnp.issubdtype(arg_dtypes[0], jnp.complexfloating):\n self.skipTest(\"np.arctanh & jnp.arctanh have mismatched NaNs for complex input.\")\n for dtype in arg_dtypes:\n jtu.skip_if_unsupported_type(dtype)\n\n jnp_op = getattr(jnp, name)\n np_op = getattr(np, name)\n np_op = jtu.ignore_warning(category=RuntimeWarning,\n message=\"divide by zero.*\")(np_op)\n args_maker = lambda: tuple(np.ones(1, dtype=dtype) for dtype in arg_dtypes)\n\n try:\n jnp_op(*args_maker())\n except NotImplementedError:\n self.skipTest(f\"jtu.{name} is not yet implemented.\")\n\n # large tol comes from the fact that numpy returns float16 in places\n # that jnp returns float32. e.g. np.cos(np.uint8(0))\n self._CheckAgainstNumpy(np_op, jnp_op, args_maker, check_dtypes=False, tol=1E-2)\n\n@jtu.with_config(jax_numpy_rank_promotion=\"raise\")\nclass NumpyDocTests(jtu.JaxTestCase):\n\n def test_lax_numpy_docstrings(self):\n # Test that docstring wrapping & transformation didn't fail.\n\n # Functions that have their own docstrings & don't wrap numpy.\n known_exceptions = {'broadcast_arrays', 'vectorize'}\n\n for name in dir(jnp):\n if name in known_exceptions or name.startswith('_'):\n continue\n\n # We only check signatures of functions.\n obj = getattr(jnp, name)\n if isinstance(obj, type) or not callable(obj):\n continue\n\n # Some jnp functions are imported from numpy or jax.dtypes directly.\n if any(obj is getattr(mod, obj.__name__, None) for mod in [np, dtypes]):\n continue\n\n wrapped_fun = obj.__np_wrapped__\n\n # If the wrapped function has a docstring, obj should too\n if wrapped_fun.__doc__ and not obj.__doc__:\n raise Exception(f\"jnp.{name} does not contain wrapped docstring.\")\n\n if obj.__doc__ and \"*Original docstring below.*\" not in obj.__doc__:\n raise Exception(f\"jnp.{name} does not have a wrapped docstring.\")\n\n\n def test_parse_numpydoc(self):\n # Unit test ensuring that _parse_numpydoc correctly parses docstrings for all\n # functions in NumPy's top-level namespace.\n section_titles = {'Attributes', 'Examples', 'Notes',\n 'Parameters', 'Raises', 'References',\n 'Returns', 'See also', 'See Also', 'Warnings', 'Warns'}\n headings = [title + '\\n' + '-'*len(title) for title in section_titles]\n\n for name in dir(np):\n if name.startswith('_'):\n continue\n obj = getattr(np, name)\n if isinstance(obj, type):\n continue\n if not callable(obj):\n continue\n if 'built-in function' in repr(obj):\n continue\n parsed = _parse_numpydoc(obj.__doc__)\n\n # Check that no docstring is handled gracefully.\n if not obj.__doc__:\n self.assertEqual(parsed, ParsedDoc(obj.__doc__))\n continue\n\n # Check that no unexpected section names are found.\n extra_keys = parsed.sections.keys() - section_titles\n if extra_keys:\n raise ValueError(f\"Extra section headers found in np.{name}: {extra_keys}\")\n\n # Check that every docstring has a summary.\n if not parsed.summary:\n raise ValueError(f\"No summary found for np.{name}\")\n\n # Check that no expected headings are missed.\n for heading in headings:\n assert heading not in parsed.front_matter\n\n\nif __name__ == \"__main__\":\n absltest.main(testLoader=jtu.JaxTestLoader())\n" ]
[ [ "numpy.diag_indices", "numpy.triu_indices_from", "numpy.tile", "numpy.trim_zeros", "numpy.resize", "numpy.tensordot", "numpy.tril_indices", "numpy.unique", "numpy.random.random", "numpy.logspace", "numpy.full_like", "numpy.bincount", "numpy.count_nonzero", "numpy.empty", "numpy.apply_over_axes", "numpy.nonzero", "numpy.prod", "numpy.frexp", "numpy.in1d", "numpy.cross", "numpy.expand_dims", "numpy.setxor1d", "numpy.diff", "numpy.ravel_multi_index", "numpy.iinfo", "numpy.cov", "numpy.union1d", "numpy.errstate", "numpy.random.RandomState", "numpy.isneginf", "numpy.ravel", "numpy.average", "numpy.dot", "numpy.isposinf", "numpy.mean", "numpy.geomspace", "numpy.issubdtype", "numpy.gradient", "numpy.zeros_like", "numpy.eye", "numpy.polymul", "numpy.flipud", "numpy.arange", "numpy.array", "numpy.triu_indices", "numpy.matmul", "numpy.zeros", "numpy.lexsort", "numpy.round", "numpy.exp2", "numpy.identity", "numpy.tril_indices_from", "numpy.fliplr", "numpy.polyder", "numpy.trace", "numpy.polyint", "numpy.ones", "numpy.choose", "numpy.digitize", "numpy.repeat", "numpy.diag", "numpy.isin", "numpy.rot90", "numpy.compress", "numpy.vander", "numpy.polysub", "numpy.where", "numpy.nan_to_num", "numpy.take", "numpy.delete", "numpy.std", "numpy.float32", "numpy.isscalar", "numpy.searchsorted", "numpy.squeeze", "numpy.array_split", "numpy.floor", "numpy.asarray", "numpy.__version__.split", "numpy.ediff1d", "numpy.uint64", "numpy.linspace", "numpy.isclose", "numpy.exp", "numpy.apply_along_axis", "numpy.size", "numpy.inner", "numpy.broadcast_to", "numpy.dtype", "numpy.concatenate", "numpy.full", "numpy.longlong", "numpy.swapaxes", "numpy.ndim", "numpy.append", "numpy.int32", "numpy.pad", "numpy.reshape", "numpy.testing.assert_equal", "numpy.polyadd", "numpy.shape", "numpy.take_along_axis", "numpy.ldexp", "numpy.intersect1d", "numpy.clip", "numpy.tri", "numpy.isnan", "numpy.diagonal", "numpy.split", "numpy.atleast_1d", "numpy.int64", "numpy.flip" ] ]
jlenain/ctapipe
[ "65a6950dded44f81d5c218f4e117e1e38fce8fd4" ]
[ "ctapipe/io/tests/test_astropy_helpers.py" ]
[ "#!/usr/bin/env python3\nimport warnings\nimport numpy as np\nfrom astropy import units as u\nimport tables\nimport pytest\nfrom astropy.time import Time\n\nfrom astropy.io.fits.verify import VerifyWarning\n\nfrom ctapipe.core import Container, Field\nfrom ctapipe.containers import ReconstructedEnergyContainer, TelescopeTriggerContainer\nfrom ctapipe.io import HDF5TableWriter\nfrom ctapipe.io.astropy_helpers import read_table\n\n\ndef test_read_table(tmp_path):\n\n # write a simple hdf5 file using\n\n container = ReconstructedEnergyContainer()\n filename = tmp_path / \"test_astropy_table.h5\"\n\n with HDF5TableWriter(filename) as writer:\n for energy in np.logspace(1, 2, 10) * u.TeV:\n container.energy = energy\n writer.write(\"events\", container)\n\n # try opening the result\n table = read_table(filename, \"/events\")\n\n assert \"energy\" in table.columns\n assert table[\"energy\"].unit == u.TeV\n assert \"CTAPIPE_VERSION\" in table.meta\n assert table[\"energy\"].description is not None\n\n # test using a string\n table = read_table(str(filename), \"/events\")\n\n # test write the table back out to some other format:\n table.write(tmp_path / \"test_output.ecsv\")\n with warnings.catch_warnings():\n # ignore warnings about too long keywords stored using HIERARCH\n warnings.simplefilter(\"ignore\", VerifyWarning)\n table.write(tmp_path / \"test_output.fits.gz\")\n\n # test using a file handle\n with tables.open_file(filename) as handle:\n table = read_table(handle, \"/events\")\n\n # test a bad input\n with pytest.raises(ValueError):\n table = read_table(12345, \"/events\")\n\n\ndef test_read_table_slicing(tmp_path):\n filename = tmp_path / \"test_slicing.h5\"\n\n # write a simple hdf5 file using\n class Data(Container):\n index = Field(0)\n value = Field(0.0)\n\n rng = np.random.default_rng(0)\n values = rng.normal(size=100)\n index = np.arange(len(values))\n\n with HDF5TableWriter(filename) as writer:\n for i, value in zip(index, values):\n container = Data(index=i, value=value)\n writer.write(\"events\", container)\n\n # try opening the result\n table = read_table(filename, \"/events\", start=50)\n assert len(table) == 50\n assert np.all(table[\"index\"] == index[50:])\n assert np.all(table[\"value\"] == values[50:])\n\n table = read_table(filename, \"/events\", stop=50)\n assert len(table) == 50\n assert np.all(table[\"index\"] == index[:50])\n assert np.all(table[\"value\"] == values[:50])\n\n table = read_table(filename, \"/events\", start=10, stop=30)\n assert len(table) == 20\n assert np.all(table[\"index\"] == index[10:30])\n assert np.all(table[\"value\"] == values[10:30])\n\n table = read_table(filename, \"/events\", step=5)\n assert len(table) == 20\n assert np.all(table[\"index\"] == index[::5])\n assert np.all(table[\"value\"] == values[::5])\n\n\ndef test_read_table_time(tmp_path):\n t0 = Time(\"2020-01-01T20:00:00.0\")\n times = t0 + np.arange(10) * u.s\n\n # use table writer to write test file\n filename = tmp_path / \"test_astropy_table.h5\"\n with HDF5TableWriter(filename) as writer:\n for t in times:\n container = TelescopeTriggerContainer(time=t, n_trigger_pixels=10)\n writer.write(\"events\", container)\n\n # check reading in the times works as expected\n table = read_table(filename, \"/events\")\n assert isinstance(table[\"time\"], Time)\n assert np.allclose(times.tai.mjd, table[\"time\"].tai.mjd)\n\n\ndef test_transforms(tmp_path):\n path = tmp_path / \"test_trans.hdf5\"\n\n data = np.array([100, 110], dtype=\"int16\").view([(\"waveform\", \"int16\")])\n print(data)\n\n with tables.open_file(path, \"w\") as f:\n f.create_table(\"/data\", \"test\", obj=data, createparents=True)\n f.root.data.test.attrs[\"waveform_TRANSFORM_SCALE\"] = 100.0\n f.root.data.test.attrs[\"waveform_TRANSFORM_OFFSET\"] = 200\n f.root.data.test.attrs[\"waveform_TRANSFORM_DTYPE\"] = \"float64\"\n\n table = read_table(path, \"/data/test\")\n\n assert np.all(table[\"waveform\"] == [-1.0, -0.9])\n\n\ndef test_file_closed(tmp_path):\n \"\"\"Test read_table closes the file even when an exception happens during read\"\"\"\n\n path = tmp_path / \"empty.hdf5\"\n with tables.open_file(path, \"w\"):\n pass\n\n with pytest.raises(tables.NoSuchNodeError):\n read_table(path, \"/foo\")\n\n # test we can open the file for writing, fails if read_table did not close\n # the file\n with tables.open_file(path, \"w\"):\n pass\n\n\ndef test_condition(tmp_path):\n # write a simple hdf5 file using\n\n container = ReconstructedEnergyContainer()\n filename = tmp_path / \"test_astropy_table.h5\"\n\n with HDF5TableWriter(filename) as writer:\n for energy in [np.nan, 100, np.nan, 50, -1.0] * u.TeV:\n container.energy = energy\n writer.write(\"events\", container)\n\n # try opening the result\n table = read_table(filename, \"/events\", condition=\"energy > 0\")\n assert len(table) == 2\n assert np.all(table[\"energy\"] == [100, 50] * u.TeV)\n" ]
[ [ "numpy.array", "numpy.random.default_rng", "numpy.allclose", "numpy.arange", "numpy.all", "numpy.logspace" ] ]
mysteryshen/AICIG
[ "95bd3c711bc70661bf16c88635bd2bf660b61ff5" ]
[ "batch_generator.py" ]
[ "import numpy as np\nfrom dataset import load_data\n\nclass BatchGenerator:\n TRAIN = 1\n TEST = 0\n\n def __init__(self, data_src, seed, batch_size=32, dataset='MNIST'):\n self.batch_size = batch_size\n self.data_src = data_src\n\n # Load data\n ((x, y), (x_test, y_test)) = load_data(dataset,\n seed=seed,\n imbalance=True) # tf.keras.datasets.cifar10.load_data()\n\n if self.data_src == self.TRAIN:\n self.dataset_x = x\n self.dataset_y = y\n else:\n self.dataset_x = x_test\n self.dataset_y = y_test\n\n # Arrange x: channel first\n self.dataset_x = np.transpose(self.dataset_x, axes=(0, 3, 1, 2))\n\n # Normalize between -1 and 1\n # self.dataset_x = self.dataset_x / 255 - 0.5\n\n # Y 1D format\n # self.dataset_y = self.dataset_y[:, 0]\n\n assert (self.dataset_x.shape[0] == self.dataset_y.shape[0])\n\n # Compute per class instance count.\n classes = np.unique(self.dataset_y)\n self.classes = classes\n per_class_count = list()\n for c in classes:\n per_class_count.append(np.sum(np.array(self.dataset_y == c)))\n\n # Recount after pruning\n per_class_count = list()\n for c in classes:\n per_class_count.append(np.sum(np.array(self.dataset_y == c)))\n self.per_class_count = per_class_count\n\n # List of labels\n self.label_table = [str(c) for c in range(len(self.classes))]\n\n # Preload all the labels.\n self.labels = self.dataset_y[:]\n\n # per class ids\n self.per_class_ids = dict()\n ids = np.array(range(len(self.dataset_x)))\n for c in classes:\n self.per_class_ids[c] = ids[self.labels == c]\n\n def get_samples_for_class(self, c, samples=None):\n if samples is None:\n samples = self.batch_size\n\n np.random.shuffle(self.per_class_ids[c])\n to_return = self.per_class_ids[c][0:samples]\n return self.dataset_x[to_return]\n\n def get_label_table(self):\n return self.label_table\n\n def get_num_classes(self):\n return len(self.label_table)\n\n def get_class_probability(self):\n return self.per_class_count / sum(self.per_class_count)\n\n ### ACCESS DATA AND SHAPES ###\n def get_num_samples(self):\n return self.dataset_x.shape[0]\n\n def get_image_shape(self):\n return [self.dataset_x.shape[1], self.dataset_x.shape[2], self.dataset_x.shape[3]]\n\n def next_batch(self):\n dataset_x = self.dataset_x\n labels = self.labels\n\n indices = np.arange(dataset_x.shape[0])\n\n # np.random.shuffle(indices)\n\n for start_idx in range(0, dataset_x.shape[0], self.batch_size):\n access_pattern = indices[start_idx:start_idx + self.batch_size]\n access_pattern = sorted(access_pattern)\n\n yield dataset_x[access_pattern, :, :, :], labels[access_pattern]\n" ]
[ [ "numpy.array", "numpy.random.shuffle", "numpy.arange", "numpy.transpose", "numpy.unique" ] ]
YuHuang42/cogdl
[ "36eafd4a2ced8a513643b99a3e63e9919c38717c", "36eafd4a2ced8a513643b99a3e63e9919c38717c" ]
[ "examples/gnn_models/grand.py", "examples/gnn_models/sgc.py" ]
[ "import torch\n\nfrom utils import print_result, set_random_seed, get_dataset\nfrom cogdl.tasks import build_task\nfrom cogdl.utils import build_args_from_dict\n\nDATASET_REGISTRY = {}\n\n\ndef build_default_args_for_node_classification(dataset):\n cpu = not torch.cuda.is_available()\n args = {\n \"lr\": 0.01,\n \"weight_decay\": 5e-4,\n \"max_epoch\": 1000,\n \"patience\": 100,\n \"cpu\": cpu,\n \"device_id\": [0],\n \"seed\": [100],\n \"input_dropout\": 0.5,\n \"hidden_dropout\": 0.5,\n \"hidden_size\": 32,\n \"dropnode_rate\": 0.5,\n \"order\": 5,\n \"tem\": 0.5,\n \"lam\": 0.5,\n \"sample\": 10,\n \"alpha\": 0.2,\n \"bn\": False,\n \"task\": \"node_classification\",\n \"model\": \"grand\",\n \"dataset\": dataset,\n }\n return build_args_from_dict(args)\n\n\ndef register_func(name):\n def register_func_name(func):\n DATASET_REGISTRY[name] = func\n return func\n\n return register_func_name\n\n\n@register_func(\"cora\")\ndef cora_config(args):\n args.order = 8\n args.sample = 4\n args.lam = 1.0\n args.tem = 0.5\n args.alpha = 0.5\n args.patience = 200\n return args\n\n\n@register_func(\"citeseer\")\ndef citeseer_config(args):\n args.order = 2\n args.sample = 2\n args.lam = 0.7\n args.tem = 0.3\n args.alpha = 0.5\n args.input_dropout = 0.0\n args.hidden_dropout = 0.2\n args.patience = 200\n return args\n\n\n@register_func(\"pubmed\")\ndef pubmed_config(args):\n args.order = 5\n args.sample = 4\n args.lam = 1.0\n args.tem = 0.2\n args.alpha = 0.5\n args.lr = 0.2\n args.bn = True\n args.input_dropout = 0.6\n args.hidden_dropout = 0.8\n return args\n\n\ndef run(dataset_name):\n args = build_default_args_for_node_classification(dataset_name)\n args = DATASET_REGISTRY[dataset_name](args)\n dataset, args = get_dataset(args)\n results = []\n for seed in args.seed:\n set_random_seed(seed)\n task = build_task(args, dataset=dataset)\n result = task.train()\n results.append(result)\n return results\n\n\nif __name__ == \"__main__\":\n datasets = [\"cora\", \"citeseer\", \"pubmed\"]\n results = []\n for x in datasets:\n results += run(x)\n print_result(results, datasets, \"grand\")\n", "import torch\nfrom cogdl.tasks import build_task\nfrom cogdl.datasets import build_dataset\nfrom cogdl.models import build_model\nfrom cogdl.utils import build_args_from_dict\n\n\ndef get_default_args():\n cuda_available = torch.cuda.is_available()\n default_dict = {\n \"hidden_size\": 16,\n \"dropout\": 0.5,\n \"patience\": 100,\n \"max_epoch\": 500,\n \"cpu\": not cuda_available,\n \"lr\": 0.01,\n \"device_id\": [0],\n \"weight_decay\": 5e-4,\n \"missing_rate\": -1,\n }\n return build_args_from_dict(default_dict)\n\n\nif __name__ == \"__main__\":\n args = get_default_args()\n args.task = \"node_classification\"\n args.dataset = \"cora\"\n args.model = \"sgc\"\n dataset = build_dataset(args)\n args.num_features = dataset.num_features\n args.num_classes = dataset.num_classes\n args.num_layers = 1\n\n model = build_model(args)\n task = build_task(args, dataset=dataset, model=model)\n ret = task.train()\n" ]
[ [ "torch.cuda.is_available" ], [ "torch.cuda.is_available" ] ]
y0ast/flax
[ "01afb539c4b91ff9e6c83ad9b5f6b36b3babffa8" ]
[ "tests/core/scope_test.py" ]
[ "# Copyright 2021 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom flax import errors\nfrom flax.core import Scope, scope, freeze, init, apply, nn\n\nfrom jax import random\n\nimport numpy as np\n\n\nfrom absl.testing import absltest\n\nclass ScopeTest(absltest.TestCase):\n\n def test_rng(self):\n def f(scope):\n self.assertTrue(scope.has_rng('params'))\n self.assertFalse(scope.has_rng('dropout'))\n rng = scope.make_rng('params')\n self.assertTrue(np.all(rng == random.fold_in(random.PRNGKey(0), 1)))\n init(f)(random.PRNGKey(0))\n\n def test_in_filter(self):\n filter_true = lambda x, y : self.assertTrue(scope.in_filter(x, y))\n filter_false = lambda x, y : self.assertFalse(scope.in_filter(x, y))\n\n filter_true(True, 'any_string1')\n filter_false(False, 'any_string2')\n filter_true('exact_match', 'exact_match')\n filter_false('no_match1', 'no_match2')\n filter_true(['one', 'two'], 'one')\n filter_false(['one', 'two'], 'three')\n filter_false([], 'one')\n filter_false([], None)\n\n def test_union_filter(self):\n def union_check(a, b, ans):\n self.assertEqual(scope.union_filters(a, b), ans)\n self.assertEqual(scope.union_filters(b, a), ans)\n\n union_check(['a', 'b'], ['b', 'c'], set(['a', 'b', 'c']))\n union_check(True, False, True)\n union_check(False, False, set())\n union_check(True, True, True)\n union_check(scope.DenyList(['a', 'b']), scope.DenyList(['b', 'c']), scope.DenyList(set(['b'])))\n union_check(scope.DenyList(['a', 'b']), ['b', 'c'], scope.DenyList(set(['a'])))\n \n def test_intersect_filter(self):\n def intersect_check(a, b, ans):\n self.assertEqual(scope.intersect_filters(a, b), ans)\n self.assertEqual(scope.intersect_filters(b, a), ans)\n\n intersect_check(['a', 'b'], ['b', 'c'], set(['b']))\n intersect_check(True, False, False)\n intersect_check(False, False, set())\n intersect_check(True, True, True)\n intersect_check(scope.DenyList(['a', 'b']), scope.DenyList(['b', 'c']), scope.DenyList(set(['a', 'b', 'c'])))\n intersect_check(scope.DenyList(['a', 'b']), ['b', 'c'], set(['c']))\n \n def test_subtract_filter(self):\n def subtract_check(a, b, ans):\n self.assertEqual(scope.subtract_filters(a, b), ans)\n\n subtract_check(['a', 'b'], ['b', 'c'], set(['a']))\n subtract_check(True, False, scope.DenyList(False))\n subtract_check(False, False, set())\n subtract_check(True, True, False)\n subtract_check(True, 'a', scope.DenyList('a'))\n subtract_check(scope.DenyList(['a', 'b']), scope.DenyList(['b', 'c']), set(['c']))\n subtract_check(scope.DenyList(['a', 'b']), ['b', 'c'], scope.DenyList(set(['a', 'b', 'c'])))\n\n\n def test_group_collections(self):\n params = { 'dense1': { 'x': [10, 20] } }\n batch_stats = { 'dense1': { 'ema': 5 } }\n xs = { 'params': params, 'batch_stats': batch_stats }\n\n # Retrieve all keys only once.\n group = scope.group_collections(xs, ['params', 'params'])\n self.assertEqual(group, ({'params': params}, {}))\n\n # Ignore non-existing keys.\n self.assertEqual(scope.group_collections(xs, ['vars']), ({},))\n\n # False gets nothing and True retrieves all keys once.\n self.assertEqual(scope.group_collections(xs, [False, True, True]), \n ({}, xs, {}))\n\n def test_inconsistent_param_shapes(self):\n def f(scope):\n scope.param('test', nn.initializers.ones, (4,))\n \n msg = r'Inconsistent shapes between value and initializer for parameter \"test\" in \"/\": \\(2,\\), \\(4,\\).'\n with self.assertRaisesRegex(errors.ScopeParamShapeError, msg):\n apply(f)(freeze({'params': {'test': np.ones((2,))}}))\n\n def test_mutate_undefined_collection(self):\n def f(scope):\n scope.put_variable('state', 'test', 123)\n\n msg = r'Cannot update variable \"test\" in \"/\" because collection \"state\" is immutable.'\n with self.assertRaisesRegex(errors.ModifyScopeVariableError, msg):\n init(f, mutable='params')(random.PRNGKey(0))\n\n def test_undefined_param(self):\n def f(scope):\n nn.dense(scope.push('dense'), np.ones((1, 2)), 2)\n\n msg = r'No parameter named \"kernel\" exists in \"/dense\".'\n with self.assertRaisesRegex(errors.ScopeParamNotFoundError, msg):\n apply(f)({})\n\n def test_variable_is_mutable(self):\n def f(scope, should_be_mutable):\n test = scope.variable('state', 'test', lambda: 1)\n self.assertEqual(test.is_mutable(), should_be_mutable)\n\n _, variables = apply(f, mutable='state')({}, True)\n apply(f, mutable=False)(variables, False)\n\n def test_rngs_check_w_frozen_dict(self):\n def f(scope, x):\n return x\n _ = apply(f)(\n {}, np.array([0.]), rngs=freeze({'a':random.PRNGKey(0)}))\n\n\nif __name__ == '__main__':\n absltest.main()\n" ]
[ [ "numpy.array", "numpy.ones" ] ]
TonySoloProjects/lake_analyses
[ "08a07306b4da3b85e3445732999cb0742ca03e87" ]
[ "old/numpy-examples.py" ]
[ "# fun with numpy\nimport numpy as np\n\ndef f(x,y):\n print(f'x=\\n{x}')\n print(f'y=\\n{y}')\n return x+y\n\nz = np.fromfunction(f,(4,3))\n\nprint(f'z=\\n{z}')\n" ]
[ [ "numpy.fromfunction" ] ]
sharanjeetsinghmago/online_reward_shaping
[ "55ac60f59ea6cc48fc8a788625aa50ff08453c1d" ]
[ "src/terrain.py" ]
[ "from OpenGL.GL import *\nfrom OpenGL.arrays import vbo\nfrom OpenGL.GLU import *\n\nfrom PyQt5.QtGui import QColor, QVector3D, QMatrix4x4\nfrom PyQt5.QtCore import QRect\n\nfrom shader import Shader\nfrom textures import bindHeightMap, ReadTexture, bindRewardMap, createEmptyTexture\n\nimport cv2 as cv\n\nimport numpy as np\nclass Terrain():\n vertexCount = 502\n terrainVertices = []\n terrainIndices = []\n\n\n def __init__(self, position, heightMap):\n self.position = position\n self.heightMap = heightMap\n self.setup()\n\n def draw(self, perspective , view):\n self.shader.use()\n self.shader.setMat4(\"perspective\", perspective)\n self.shader.setMat4(\"view\", view)\n\n glBindVertexArray(self.__vao)\n # glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);\n glDrawElements(GL_TRIANGLES, len(self.terrainIndices), GL_UNSIGNED_INT, None)\n # glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);\n glBindVertexArray(0);\n\n glActiveTexture(GL_TEXTURE0)\n glBindTexture(GL_TEXTURE_2D, self.colors)\n self.shader.setInt(\"terrainTexture\", 0)\n\n glActiveTexture(GL_TEXTURE1)\n glBindTexture(GL_TEXTURE_2D, self.heightMap)\n self.shader.setInt(\"heightMap\", 1)\n\n glActiveTexture(GL_TEXTURE2)\n glBindTexture(GL_TEXTURE_2D, self.rewardMap)\n self.shader.setInt(\"rewardMap\", 2)\n self.shader.stop()\n\n\n def getVerticesCount(self, vertexCount):\n return vertexCount*vertexCount*3\n\n def getIndicesCount(self, vertexCount):\n return 6*(vertexCount-1)*(vertexCount-1)\n\n def getVertices(self, vertexCount):\n vertices = [0.0]*self.getVerticesCount(vertexCount)\n vertexPointer = 0\n for i in range(vertexCount):\n for j in range(vertexCount):\n vertices[vertexPointer*3] = (j/(vertexCount-1))*2.0 - 1.0\n vertices[vertexPointer*3+1] = 0\n vertices[vertexPointer*3+2] = (i/(vertexCount-1))*2.0 - 1.0\n vertexPointer = vertexPointer+1\n return vertices\n\n def getIndices(self, vertexCount):\n indices = [0.0]*self.getIndicesCount(vertexCount)\n pointer = 0\n for gz in range(vertexCount-1):\n for gx in range(vertexCount-1):\n topLeft = (gz*vertexCount)+gx\n topRight = topLeft + 1\n bottomLeft = ((gz+1)*vertexCount)+gx\n bottomRight = bottomLeft + 1\n indices[pointer] = topLeft\n pointer = pointer+1\n indices[pointer] = bottomLeft\n pointer = pointer+1\n indices[pointer] = topRight\n pointer = pointer+1\n indices[pointer] = topRight\n pointer = pointer+1\n indices[pointer] = bottomLeft\n pointer = pointer+1\n indices[pointer] = bottomRight\n pointer = pointer+1\n return indices\n\n\n\n def getObjectCoord(self, windowPos, perspective, view, viewport):\n modelView = QMatrix4x4()\n modelView*=view\n modelView*=self.model\n objectCoord = windowPos.unproject(modelView, perspective, self.np2QRect(viewport))\n return objectCoord\n\n def matrixTypeConversion(self, matrix):\n return QMatrix4x4(matrix.m11, matrix.m12,matrix.m13, matrix.m14,matrix.m21, matrix.m22,matrix.m23, matrix.m24,matrix.m31, matrix.m32,matrix.m33, matrix.m34,matrix.m41, matrix.m42,matrix.m43, matrix.m44)\n\n def np2QRect(self, raw_array):\n return QRect(raw_array[0], raw_array[1], raw_array[2], raw_array[3])\n\n\n def updateRewards(self, rewardMap):\n print(\"updating rewards\")\n rewardColors = self.rewardMapColors(rewardMap)\n bindRewardMap(self.rewardMap, rewardColors)\n\n def rewardMapColors(self, rewardMap):\n colors = np.zeros([1001, 1001, 3], dtype='uint8')\n\n noReward = (rewardMap==0)\n positiveReward = (rewardMap==1)\n negativeReward = (rewardMap==-1)\n colors[..., 0] = 255*positiveReward\n colors[..., 1] = 255*noReward\n colors[..., 2] = 255*negativeReward\n cv.imwrite(\"abc.png\",colors)\n return np.array(colors, dtype='uint8')\n\n def setup(self):\n\n # Set up vertices and indices\n self.terrainVertices = np.array(self.getVertices(self.vertexCount), dtype='float32')\n self.terrainIndices = np.array(self.getIndices(self.vertexCount), dtype='uint32')\n\n # Setup shaders\n self.shader = Shader(vertex_source=\"shaders/terrain.vs\", fragment_source=\"shaders/terrain.fs\")\n self.shader.use()\n\n # Set model matrix of terrain\n # self.model = Matrix44.from_translation(np.array(self.position))\n self.model = QMatrix4x4()\n self.model.scale(500.5, 1.0, 500.5)\n #self.model.translate(self.position)\n self.shader.setMat4(\"model\", self.model)\n\n # Create Vertex Array Object\n self.__vao = glGenVertexArrays(1)\n glBindVertexArray(self.__vao)\n\n # Create Buffers and assign data\n bufs = glGenBuffers(2)\n glBindBuffer(GL_ARRAY_BUFFER, bufs[0])\n glBufferData(GL_ARRAY_BUFFER, sizeof(ctypes.c_float) * len(self.terrainVertices), (ctypes.c_float * len(self.terrainVertices))(*self.terrainVertices), GL_STATIC_DRAW)\n glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, bufs[1])\n glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(ctypes.c_uint) * len(self.terrainIndices), (ctypes.c_uint * len(self.terrainIndices))(*self.terrainIndices), GL_STATIC_DRAW)\n\n # Turn on position attribute and set its size\n glEnableVertexAttribArray(0)\n glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3*sizeof(ctypes.c_float), None)\n\n # Unbind buffers and VAO\n glBindBuffer(GL_ARRAY_BUFFER, 0)\n glBindVertexArray(0);\n\n # Setup textures\n self.colors = ReadTexture(\"textures/atacama_rgb3.jpg\")\n self.rewardMap = createEmptyTexture()\n self.heightMap = bindHeightMap(self.heightMap.getHeightMap())\n self.shader.stop()\n" ]
[ [ "numpy.array", "numpy.zeros" ] ]
jinyeom/general-bipedal-walker
[ "fd76be55d0b29b55008846d6dfbee572a6ce8ef3" ]
[ "general_bipedal_walker/robot.py" ]
[ "import math\nimport numpy as np\nfrom Box2D.b2 import (\n edgeShape, \n circleShape, \n fixtureDef, \n polygonShape, \n revoluteJointDef, \n contactListener,\n rayCastCallback\n)\nfrom .color import Color\n\nclass Hull:\n VERTICES = [(-30, 9), (6, 9), (34, 1), (34, -8), (-30, -8)]\n\n def __init__(self, config):\n self.color = config.hull_color\n self.body = None\n vertices = [(x / config.scale, y / config.scale) for x, y in Hull.VERTICES]\n self.fixture = fixtureDef(\n shape=polygonShape(vertices=vertices),\n density=5.0,\n friction=0.1,\n categoryBits=0x0020,\n maskBits=0x001, # collide only with ground\n restitution=0.0 # 0.99 bouncy\n )\n\n @property\n def parts(self):\n return [self.body]\n\n def reset(self, world, init_x, init_y, noise):\n self.body = world.CreateDynamicBody(\n position=(init_x, init_y), \n fixtures=self.fixture\n )\n self.body.color1 = self.color\n self.body.color2 = Color.BLACK\n self.body.ApplyForceToCenter(noise, True)\n\nclass Lidar:\n class Callback(rayCastCallback):\n def ReportFixture(self, fixture, point, normal, fraction):\n if (fixture.filterData.categoryBits & 1) == 0:\n return 1\n self.p2 = point\n self.fraction = fraction\n return 0\n\n def __init__(self, config):\n self.scan_range = config.lidar_range\n self.callbacks = None\n\n def reset(self):\n self.callbacks = [Lidar.Callback() for _ in range(10)]\n\n def scan(self, world, pos):\n for i, lidar in enumerate(self.callbacks):\n self.callbacks[i].fraction = 1.0\n self.callbacks[i].p1 = pos\n self.callbacks[i].p2 = (\n pos[0] + math.sin(1.5 * i / 10.0) * self.scan_range,\n pos[1] - math.cos(1.5 * i / 10.0) * self.scan_range\n )\n world.RayCast(lidar, lidar.p1, lidar.p2)\n\nclass Leg:\n def __init__(self, config, left=True):\n self.left = left\n if self.left:\n self.color = Color.lighter(config.leg_color)\n self.top_width = config.leg1_top_width / config.scale\n self.top_height = config.leg1_top_height / config.scale\n self.bot_width = config.leg1_bot_width / config.scale\n self.bot_height = config.leg1_bot_height / config.scale\n else:\n self.color = Color.darker(config.leg_color)\n self.top_width = config.leg2_top_width / config.scale\n self.top_height = config.leg2_top_height / config.scale\n self.bot_width = config.leg2_bot_width / config.scale\n self.bot_height = config.leg2_bot_height / config.scale\n self.leg_down = -8 / config.scale\n\n # configure the top leg part \n self.top_shift = self.top_height / 2 + self.leg_down\n self.top_body = None\n self.top_fixture = fixtureDef(\n shape=polygonShape(box=(self.top_width / 2, self.top_height / 2)),\n density=1.0,\n restitution=0.0,\n categoryBits=0x0020,\n maskBits=0x001\n )\n\n # configure the motor torque\n self.motors_torque = config.motors_torque\n self.joint = None\n\n # configure the bottom leg par\n self.bot_shift = self.top_height + self.bot_height / 2 + self.leg_down\n self.bot_body = None\n self.bot_fixture = fixtureDef(\n shape=polygonShape(box=(self.bot_width / 2, self.bot_height / 2)),\n density=1.0,\n restitution=0.0,\n categoryBits=0x0020,\n maskBits=0x001\n )\n\n @property\n def parts(self):\n return [self.bot_body, self.top_body]\n\n def reset(self, world, init_x, init_y):\n self.top_body = world.CreateDynamicBody(\n position=(init_x, init_y - self.top_shift),\n angle=0.05 if self.left else -0.05,\n fixtures=self.top_fixture\n )\n\n self.bot_body = world.CreateDynamicBody(\n position=(init_x, init_y - self.bot_shift),\n angle=0.05 if self.left else -0.05,\n fixtures=self.bot_fixture\n )\n self.bot_body.ground_contact = False\n\n self.joint = world.CreateJoint(\n revoluteJointDef(\n bodyA=self.top_body,\n bodyB=self.bot_body,\n localAnchorA=(0, -self.top_height/2),\n localAnchorB=(0, self.bot_height/2),\n enableMotor=True,\n enableLimit=True,\n maxMotorTorque=self.motors_torque,\n motorSpeed=1,\n lowerAngle=-1.6,\n upperAngle=-0.1\n )\n )\n\n self.top_body.color1 = self.color\n self.top_body.color2 = Color.BLACK\n self.bot_body.color1 = self.color\n self.bot_body.color2 = Color.BLACK\n\nclass RobotConfig:\n LEG_TOP_WIDTH = 8.0\n LEG_TOP_HEIGHT = 34.0\n LEG_BOT_WIDTH = 6.4\n LEG_BOT_HEIGHT = 34.0\n LIDAR_RANGE = 160.0\n MOTORS_TORQUE = 80.0\n SPEED_HIP = 4.0\n SPEED_KNEE = 6.0\n\n def __init__(self, scale, params=None):\n self.scale = scale\n\n self.hull_color = Color.rand()\n self.leg_color = Color.rand()\n\n self.params = params if params is not None else np.ones(12)\n self.leg1_top_width = self.params[0] * self.LEG_TOP_WIDTH \n self.leg1_top_height = self.params[1] * self.LEG_TOP_HEIGHT\n self.leg1_bot_width = self.params[2] * self.LEG_BOT_WIDTH \n self.leg1_bot_height = self.params[3] * self.LEG_BOT_HEIGHT\n self.leg2_top_width = self.params[4] * self.LEG_TOP_WIDTH \n self.leg2_top_height = self.params[5] * self.LEG_TOP_HEIGHT\n self.leg2_bot_width = self.params[6] * self.LEG_BOT_WIDTH \n self.leg2_bot_height = self.params[7] * self.LEG_BOT_HEIGHT\n self.lidar_range = self.params[8] * self.LIDAR_RANGE\n self.motors_torque = self.params[9] * self.MOTORS_TORQUE\n self.speed_hip = self.params[10] * self.SPEED_HIP\n self.speed_knee = self.params[11] * self.SPEED_KNEE\n\n @classmethod\n def sample(cls, scale, np_random, low=0.5, high=1.5, symmetric=True):\n if symmetric:\n shape_params = np_random.uniform(low, high, size=4)\n shape_params = np.concatenate((shape_params, shape_params))\n dyna_params = np_random.uniform(low, high, size=4)\n params = np.concatenate((shape_params, dyna_params))\n else:\n params = np_random.uniform(low, high, size=12)\n return RobotConfig(scale, params=params)\n\nclass BipedalRobot:\n def __init__(self, config):\n self.config = config\n self.world = None\n\n self.hull = Hull(config)\n self.lidar = Lidar(config)\n\n self.joint1 = None\n self.joint2 = None\n\n self.leg1 = Leg(config, left=True)\n self.leg2 = Leg(config, left=False)\n\n @property\n def parts(self):\n # NOTE: their order is important for rendering.\n return self.leg1.parts + self.leg2.parts + self.hull.parts\n\n @property\n def joints(self):\n # NOTE: their order defines the action space.\n return [self.joint1, self.leg1.joint, self.joint2, self.leg2.joint]\n\n def destroy(self):\n if self.world is not None:\n for part in self.parts:\n if part is not None:\n self.world.DestroyBody(part)\n\n def reset(self, world, init_x, init_y, noise):\n self.world = world\n self.hull.reset(world, init_x, init_y, noise)\n self.leg1.reset(world, init_x, init_y)\n self.leg2.reset(world, init_x, init_y)\n self.lidar.reset()\n self.joint1 = world.CreateJoint(\n revoluteJointDef(\n bodyA=self.hull.body,\n bodyB=self.leg1.top_body,\n localAnchorA=(0, self.leg1.leg_down),\n localAnchorB=(0, self.leg1.top_height / 2),\n enableMotor=True,\n enableLimit=True,\n maxMotorTorque=self.config.motors_torque,\n motorSpeed=-1.0,\n lowerAngle=-0.8,\n upperAngle=1.1\n )\n )\n self.joint2 = world.CreateJoint(\n revoluteJointDef(\n bodyA=self.hull.body,\n bodyB=self.leg2.top_body,\n localAnchorA=(0, self.leg2.leg_down),\n localAnchorB=(0, self.leg2.top_height / 2),\n enableMotor=True,\n enableLimit=True,\n maxMotorTorque=self.config.motors_torque,\n motorSpeed=1.0,\n lowerAngle=-0.8,\n upperAngle=1.1\n )\n )\n\n def step(self, action):\n joint0, joint1, joint2, joint3 = self.joints\n joint0.motorSpeed = self.config.speed_hip * np.sign(action[0])\n joint0.maxMotorTorque = self.config.motors_torque * np.clip(np.abs(action[0]), 0, 1)\n joint1.motorSpeed = self.config.speed_knee * np.sign(action[1])\n joint1.maxMotorTorque = self.config.motors_torque * np.clip(np.abs(action[1]), 0, 1)\n joint2.motorSpeed = self.config.speed_hip * np.sign(action[2])\n joint2.maxMotorTorque = self.config.motors_torque * np.clip(np.abs(action[2]), 0, 1)\n joint3.motorSpeed = self.config.speed_knee * np.sign(action[3])\n joint3.maxMotorTorque = self.config.motors_torque * np.clip(np.abs(action[3]), 0, 1)\n return self.joints\n\n def scan(self, world, pos):\n self.lidar.scan(world, pos)\n return self.lidar.callbacks" ]
[ [ "numpy.concatenate", "numpy.sign", "numpy.ones", "numpy.abs" ] ]
kinow/bcdp
[ "f4366a307672d84ed7992f3bb68a04303a107c56" ]
[ "examples/scripts/bcdp_ocw_benchmark.py" ]
[ "from contextlib import contextmanager\nfrom datetime import datetime\nimport time\nimport os\nimport glob\nimport numpy as np\nimport matplotlib\nimport pandas as pd\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport bcdp\nimport ocw.data_source.local as local\nimport ocw.dataset_processor as dsp\nfrom ocw.dataset import Bounds as Bounds\n\n\n@contextmanager\ndef time_block(results, name):\n print(name)\n t0 = time.time()\n yield\n t1 = time.time()\n results[name] = t1 - t0\n\ndef dt64_to_datetime(dt64):\n t = dt64.astype(int)*1e-9\n dt = datetime.utcfromtimestamp(t)\n return dt\n \n# Dataset paths\npaths = os.path.join(os.path.expanduser('~'), 'data/CORDEX_Africa/*clt*')\n\n### BCDP SECTION\nprint('BCDP Benchmarks')\nbcdp_results = {}\nwith time_block(bcdp_results, 'Dataset Loading'):\n project = 'CORDEX-Africa'\n template = '*_{model}_*_{variable}.nc'\n bcdp.build_extractor(project, template, name_field='model', index=[1, 6])\n ens = bcdp.load_local(paths=paths, project=project)\n \n# Ouput grid info\ndomain = ens.overlap\noutput_grid = bcdp.utils.grid_from_res((0.88, 0.88), domain)\nnew_lats = output_grid.lat.values\nnew_lons = output_grid.lon.values\nstart_time = dt64_to_datetime(domain.time_bnds.min)\nend_time = dt64_to_datetime(domain.time_bnds.max)\nbnds = Bounds(lat_min=domain.lat_bnds.min, lat_max=domain.lat_bnds.max,\n lon_min=domain.lon_bnds.min, lon_max=domain.lon_bnds.max,\n start=start_time, end=end_time)\n\n\nwith time_block(bcdp_results, 'Domain Subsetting'):\n ens = ens.subset()\n \nwith time_block(bcdp_results, 'Seasonal Subsetting'):\n ens = ens.select_season(season='DJF')\n \nwith time_block(bcdp_results, 'Resampling'):\n ens = ens.resample(freq='Y')\n \nwith time_block(bcdp_results, 'Regridding'):\n ens.regrid(backend='scipy', method='linear', output_grid=output_grid)\n\nprint(f'BCDP Results: {bcdp_results}')\n\n### OCW SECTION\nprint('OCW Benchmarks')\nocw_results = {}\nwith time_block(ocw_results, 'Dataset Loading'):\n datasets = local.load_multiple_files(paths, 'clt')\n\nwith time_block(ocw_results, 'Domain Subsetting'):\n for i, ds in enumerate(datasets):\n datasets[i] = dsp.subset(ds, bnds)\n\nwith time_block(ocw_results, 'Seasonal Subsetting'):\n for i, ds in enumerate(datasets):\n datasets[i] = dsp.temporal_subset(ds, 9, 11)\n \nwith time_block(ocw_results, 'Resampling'):\n for i, ds in enumerate(datasets):\n datasets[i] = dsp.temporal_rebin(ds, 'annual')\n \nwith time_block(ocw_results, 'Regridding'):\n for i, ds in enumerate(datasets):\n datasets[i] = dsp.spatial_regrid(ds, new_lats, new_lons)\n\nprint(f'OCW Results: {ocw_results}')\n\n# Plot results\nmatplotlib.style.use('ggplot')\ndf = pd.DataFrame({'OCW': ocw_results, 'BCDP': bcdp_results})\ndf.plot.bar(logy=True, rot=12)\nfor p in ax.patches:\n val = np.round(p.get_height(), decimals=2)\n ax.annotate(str(val), (p.get_x() + .02, p.get_height()), size=9.5)\n\nplt.ylabel('Running Time [s]')\nplt.savefig('bcdp_ocw_benchmarks.png')\n" ]
[ [ "matplotlib.use", "matplotlib.style.use", "pandas.DataFrame", "matplotlib.pyplot.savefig", "matplotlib.pyplot.ylabel" ] ]
Complicateddd/Fairmot_Adpoted
[ "576f252496f48c95be882db6dcb001882596eeac" ]
[ "src/lib/models/networks/pose_dla_dcn.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging\nimport math\nfrom os.path import join\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torch.utils.model_zoo as model_zoo\nfrom torch import nn\n\nfrom .DCNv2.dcn_v2 import DCN\n\nBN_MOMENTUM = 0.1\nlogger = logging.getLogger(__name__)\n\ndef get_model_url(data='imagenet', name='dla34', hash='ba72cf86'):\n return join('http://dl.yf.io/dla/models', data, '{}-{}.pth'.format(name, hash))\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"3x3 convolution with padding\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n def __init__(self, inplanes, planes, stride=1, dilation=1):\n super(BasicBlock, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3,\n stride=stride, padding=dilation,\n bias=False, dilation=dilation)\n self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,\n stride=1, padding=dilation,\n bias=False, dilation=dilation)\n self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.stride = stride\n\n def forward(self, x, residual=None):\n if residual is None:\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 2\n\n def __init__(self, inplanes, planes, stride=1, dilation=1):\n super(Bottleneck, self).__init__()\n expansion = Bottleneck.expansion\n bottle_planes = planes // expansion\n self.conv1 = nn.Conv2d(inplanes, bottle_planes,\n kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)\n self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,\n stride=stride, padding=dilation,\n bias=False, dilation=dilation)\n self.bn2 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)\n self.conv3 = nn.Conv2d(bottle_planes, planes,\n kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.stride = stride\n\n def forward(self, x, residual=None):\n if residual is None:\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass BottleneckX(nn.Module):\n expansion = 2\n cardinality = 32\n\n def __init__(self, inplanes, planes, stride=1, dilation=1):\n super(BottleneckX, self).__init__()\n cardinality = BottleneckX.cardinality\n # dim = int(math.floor(planes * (BottleneckV5.expansion / 64.0)))\n # bottle_planes = dim * cardinality\n bottle_planes = planes * cardinality // 32\n self.conv1 = nn.Conv2d(inplanes, bottle_planes,\n kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)\n self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,\n stride=stride, padding=dilation, bias=False,\n dilation=dilation, groups=cardinality)\n self.bn2 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)\n self.conv3 = nn.Conv2d(bottle_planes, planes,\n kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.stride = stride\n\n def forward(self, x, residual=None):\n if residual is None:\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Root(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, residual):\n super(Root, self).__init__()\n self.conv = nn.Conv2d(\n in_channels, out_channels, 1,\n stride=1, bias=False, padding=(kernel_size - 1) // 2)\n self.bn = nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.residual = residual\n\n def forward(self, *x):\n children = x\n x = self.conv(torch.cat(x, 1))\n x = self.bn(x)\n if self.residual:\n x += children[0]\n x = self.relu(x)\n\n return x\n\n\nclass Tree(nn.Module):\n def __init__(self, levels, block, in_channels, out_channels, stride=1,\n level_root=False, root_dim=0, root_kernel_size=1,\n dilation=1, root_residual=False):\n super(Tree, self).__init__()\n if root_dim == 0:\n root_dim = 2 * out_channels\n if level_root:\n root_dim += in_channels\n if levels == 1:\n self.tree1 = block(in_channels, out_channels, stride,\n dilation=dilation)\n self.tree2 = block(out_channels, out_channels, 1,\n dilation=dilation)\n else:\n self.tree1 = Tree(levels - 1, block, in_channels, out_channels,\n stride, root_dim=0,\n root_kernel_size=root_kernel_size,\n dilation=dilation, root_residual=root_residual)\n self.tree2 = Tree(levels - 1, block, out_channels, out_channels,\n root_dim=root_dim + out_channels,\n root_kernel_size=root_kernel_size,\n dilation=dilation, root_residual=root_residual)\n if levels == 1:\n self.root = Root(root_dim, out_channels, root_kernel_size,\n root_residual)\n self.level_root = level_root\n self.root_dim = root_dim\n self.downsample = None\n self.project = None\n self.levels = levels\n if stride > 1:\n self.downsample = nn.MaxPool2d(stride, stride=stride)\n if in_channels != out_channels:\n self.project = nn.Sequential(\n nn.Conv2d(in_channels, out_channels,\n kernel_size=1, stride=1, bias=False),\n nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM)\n )\n\n def forward(self, x, residual=None, children=None):\n children = [] if children is None else children\n bottom = self.downsample(x) if self.downsample else x\n residual = self.project(bottom) if self.project else bottom\n if self.level_root:\n children.append(bottom)\n x1 = self.tree1(x, residual)\n if self.levels == 1:\n x2 = self.tree2(x1)\n x = self.root(x2, x1, *children)\n else:\n children.append(x1)\n x = self.tree2(x1, children=children)\n return x\n\n\nclass DLA(nn.Module):\n def __init__(self, levels, channels, num_classes=1000,\n block=BasicBlock, residual_root=False, linear_root=False):\n super(DLA, self).__init__()\n self.channels = channels\n self.num_classes = num_classes\n self.base_layer = nn.Sequential(\n nn.Conv2d(3, channels[0], kernel_size=7, stride=1,\n padding=3, bias=False),\n nn.BatchNorm2d(channels[0], momentum=BN_MOMENTUM),\n nn.ReLU(inplace=True))\n self.level0 = self._make_conv_level(\n channels[0], channels[0], levels[0])\n self.level1 = self._make_conv_level(\n channels[0], channels[1], levels[1], stride=2)\n self.level2 = Tree(levels[2], block, channels[1], channels[2], 2,\n level_root=False,\n root_residual=residual_root)\n self.level3 = Tree(levels[3], block, channels[2], channels[3], 2,\n level_root=True, root_residual=residual_root)\n self.level4 = Tree(levels[4], block, channels[3], channels[4], 2,\n level_root=True, root_residual=residual_root)\n self.level5 = Tree(levels[5], block, channels[4], channels[5], 2,\n level_root=True, root_residual=residual_root)\n\n # for m in self.modules():\n # if isinstance(m, nn.Conv2d):\n # n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n # m.weight.data.normal_(0, math.sqrt(2. / n))\n # elif isinstance(m, nn.BatchNorm2d):\n # m.weight.data.fill_(1)\n # m.bias.data.zero_()\n\n def _make_level(self, block, inplanes, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or inplanes != planes:\n downsample = nn.Sequential(\n nn.MaxPool2d(stride, stride=stride),\n nn.Conv2d(inplanes, planes,\n kernel_size=1, stride=1, bias=False),\n nn.BatchNorm2d(planes, momentum=BN_MOMENTUM),\n )\n\n layers = []\n layers.append(block(inplanes, planes, stride, downsample=downsample))\n for i in range(1, blocks):\n layers.append(block(inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1):\n modules = []\n for i in range(convs):\n modules.extend([\n nn.Conv2d(inplanes, planes, kernel_size=3,\n stride=stride if i == 0 else 1,\n padding=dilation, bias=False, dilation=dilation),\n nn.BatchNorm2d(planes, momentum=BN_MOMENTUM),\n nn.ReLU(inplace=True)])\n inplanes = planes\n return nn.Sequential(*modules)\n\n def forward(self, x):\n y = []\n x = self.base_layer(x)\n for i in range(6):\n x = getattr(self, 'level{}'.format(i))(x)\n y.append(x)\n return y\n\n def load_pretrained_model(self, data='imagenet', name='dla34', hash='ba72cf86'):\n # fc = self.fc\n if name.endswith('.pth'):\n model_weights = torch.load(data + name)\n else:\n model_url = get_model_url(data, name, hash)\n model_weights = model_zoo.load_url(model_url)\n num_classes = len(model_weights[list(model_weights.keys())[-1]])\n self.fc = nn.Conv2d(\n self.channels[-1], num_classes,\n kernel_size=1, stride=1, padding=0, bias=True)\n self.load_state_dict(model_weights)\n # self.fc = fc\n\n\ndef dla34(pretrained=False, **kwargs): # DLA-34\n model = DLA([1, 1, 1, 2, 2, 1],\n [16, 32, 64, 128, 256, 512],\n block=BasicBlock, **kwargs)\n # if pretrained:\n # model.load_pretrained_model(data='imagenet', name='dla34', hash='ba72cf86')\n return model\n\nclass Identity(nn.Module):\n\n def __init__(self):\n super(Identity, self).__init__()\n\n def forward(self, x):\n return x\n\n\ndef fill_fc_weights(layers):\n for m in layers.modules():\n if isinstance(m, nn.Conv2d):\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n\n\ndef fill_up_weights(up):\n w = up.weight.data\n f = math.ceil(w.size(2) / 2)\n c = (2 * f - 1 - f % 2) / (2. * f)\n for i in range(w.size(2)):\n for j in range(w.size(3)):\n w[0, 0, i, j] = \\\n (1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))\n for c in range(1, w.size(0)):\n w[c, 0, :, :] = w[0, 0, :, :]\n\n\nclass DeformConv(nn.Module):\n def __init__(self, chi, cho):\n super(DeformConv, self).__init__()\n self.actf = nn.Sequential(\n nn.BatchNorm2d(cho, momentum=BN_MOMENTUM),\n nn.ReLU(inplace=True)\n )\n self.conv = DCN(chi, cho, kernel_size=(3,3), stride=1, padding=1, dilation=1, deformable_groups=1)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.actf(x)\n return x\n\n\nclass IDAUp(nn.Module):\n\n def __init__(self, o, channels, up_f):\n super(IDAUp, self).__init__()\n for i in range(1, len(channels)):\n c = channels[i]\n f = int(up_f[i]) \n proj = DeformConv(c, o)\n node = DeformConv(o, o)\n \n up = nn.ConvTranspose2d(o, o, f * 2, stride=f, \n padding=f // 2, output_padding=0,\n groups=o, bias=False)\n fill_up_weights(up)\n\n setattr(self, 'proj_' + str(i), proj)\n setattr(self, 'up_' + str(i), up)\n setattr(self, 'node_' + str(i), node)\n \n \n def forward(self, layers, startp, endp):\n for i in range(startp + 1, endp):\n upsample = getattr(self, 'up_' + str(i - startp))\n project = getattr(self, 'proj_' + str(i - startp))\n layers[i] = upsample(project(layers[i]))\n node = getattr(self, 'node_' + str(i - startp))\n layers[i] = node(layers[i] + layers[i - 1])\n\n\n\nclass DLAUp(nn.Module):\n def __init__(self, startp, channels, scales, in_channels=None):\n super(DLAUp, self).__init__()\n self.startp = startp\n if in_channels is None:\n in_channels = channels\n self.channels = channels\n channels = list(channels)\n scales = np.array(scales, dtype=int)\n for i in range(len(channels) - 1):\n j = -i - 2\n setattr(self, 'ida_{}'.format(i),\n IDAUp(channels[j], in_channels[j:],\n scales[j:] // scales[j]))\n scales[j + 1:] = scales[j]\n in_channels[j + 1:] = [channels[j] for _ in channels[j + 1:]]\n\n def forward(self, layers):\n out = [layers[-1]] # start with 32\n for i in range(len(layers) - self.startp - 1):\n ida = getattr(self, 'ida_{}'.format(i))\n ida(layers, len(layers) -i - 2, len(layers))\n out.insert(0, layers[-1])\n return out\n\n\nclass Interpolate(nn.Module):\n def __init__(self, scale, mode):\n super(Interpolate, self).__init__()\n self.scale = scale\n self.mode = mode\n \n def forward(self, x):\n x = F.interpolate(x, scale_factor=self.scale, mode=self.mode, align_corners=False)\n return x\n\n\nclass DLASeg(nn.Module):\n def __init__(self, base_name, heads, pretrained, down_ratio, final_kernel,\n last_level, head_conv, out_channel=0):\n super(DLASeg, self).__init__()\n assert down_ratio in [2, 4, 8, 16]\n self.first_level = int(np.log2(down_ratio))\n self.last_level = last_level\n self.base = globals()[base_name](pretrained=pretrained)\n channels = self.base.channels\n scales = [2 ** i for i in range(len(channels[self.first_level:]))]\n self.dla_up = DLAUp(self.first_level, channels[self.first_level:], scales)\n\n if out_channel == 0:\n out_channel = channels[self.first_level]\n\n self.ida_up = IDAUp(out_channel, channels[self.first_level:self.last_level], \n [2 ** i for i in range(self.last_level - self.first_level)])\n \n self.heads = heads\n for head in self.heads:\n classes = self.heads[head]\n if head_conv > 0:\n fc = nn.Sequential(\n nn.Conv2d(channels[self.first_level], head_conv,\n kernel_size=3, padding=1, bias=True),\n nn.ReLU(inplace=True),\n nn.Conv2d(head_conv, classes, \n kernel_size=final_kernel, stride=1, \n padding=final_kernel // 2, bias=True))\n if 'hm' in head:\n fc[-1].bias.data.fill_(-2.19)\n else:\n fill_fc_weights(fc)\n else:\n fc = nn.Conv2d(channels[self.first_level], classes, \n kernel_size=final_kernel, stride=1, \n padding=final_kernel // 2, bias=True)\n if 'hm' in head:\n fc.bias.data.fill_(-2.19)\n else:\n fill_fc_weights(fc)\n self.__setattr__(head, fc)\n\n def forward(self, x):\n x = self.base(x)\n x = self.dla_up(x)\n\n y = []\n for i in range(self.last_level - self.first_level):\n y.append(x[i].clone())\n self.ida_up(y, 0, len(y))\n\n z = {}\n for head in self.heads:\n z[head] = self.__getattr__(head)(y[-1])\n return [z]\n \n\ndef get_pose_net(num_layers, heads, head_conv=256, down_ratio=4):\n model = DLASeg('dla{}'.format(num_layers), heads,\n pretrained=True,\n down_ratio=down_ratio,\n final_kernel=1,\n last_level=5,\n head_conv=head_conv)\n return model\n\n" ]
[ [ "numpy.array", "torch.cat", "torch.nn.MaxPool2d", "torch.nn.Sequential", "torch.nn.init.constant_", "torch.nn.BatchNorm2d", "torch.nn.functional.interpolate", "torch.utils.model_zoo.load_url", "torch.nn.ConvTranspose2d", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.load", "numpy.log2" ] ]
IshchenkoRoman/pommerman
[ "117824dca6974822d90e8fc3345da32eeb43cb43" ]
[ "cli/run_battle.py" ]
[ "\"\"\"Run a battle among agents.\n\nCall this with a config, a game, and a list of agents. The script will start separate threads to operate the agents\nand then report back the result.\n\nAn example with all four test agents running ffa:\npython run_battle.py --agents=test::agents.SimpleAgent,test::agents.SimpleAgent,test::agents.SimpleAgent,test::agents.SimpleAgent --config=PommeFFACompetition-v0\n\nAn example with one player, two random agents, and one test agent:\npython run_battle.py --agents=player::arrows,test::agents.SimpleAgent,random::null,random::null --config=PommeFFACompetition-v0\n\nAn example with a docker agent:\npython run_battle.py --agents=player::arrows,docker::pommerman/test-agent,random::null,random::null --config=PommeFFACompetition-v0\n\"\"\"\nimport atexit\nfrom datetime import datetime\nimport os\nimport random\nimport sys\nimport time\n\nimport argparse\nimport numpy as np\n\n#from .. import helpers\n#from .. import make\n\nfrom pommerman import helpers, make\nfrom pommerman.agents import TensorForceAgent\n\nfrom pommerman import utility\n\nimport pdb\n\ndef run(args, num_times=1, seed=None):\n '''Wrapper to help start the game'''\n config = args.config\n record_pngs_dir = args.record_pngs_dir\n record_json_dir = args.record_json_dir\n agent_env_vars = args.agent_env_vars\n game_state_file = args.game_state_file\n render_mode = args.render_mode\n do_sleep = args.do_sleep\n\n agents = [\n helpers.make_agent_from_string(agent_string, agent_id)\n for agent_id, agent_string in enumerate(args.agents.split(','))\n ]\n\n #TODO: DELETE\n #pdb.set_trace()\n ###############\n\n env = make(config, agents, game_state_file, render_mode=render_mode)\n\n for i, agent_string in enumerate(args.agents.split(',')):\n if agent_string.split('::')[0] == \"tensorforce\":\n print(\"run_buttle[51] \", type(agents[i]))\n a = agents[i]\n a_env = a.initialize(env)\n a_env.restore_model(directory=\"./pommerman/cli/saved_win/\")\n # agents[i].initialize(env).restore_model(directory=\"./saved_win/\")\n\n def _run(record_pngs_dir=None, record_json_dir=None):\n '''Runs a game'''\n print(\"Starting the Game.\")\n if record_pngs_dir and not os.path.isdir(record_pngs_dir):\n os.makedirs(record_pngs_dir)\n if record_json_dir and not os.path.isdir(record_json_dir):\n os.makedirs(record_json_dir)\n\n obs = env.reset()\n done = False\n\n while not done:\n if args.render:\n env.render(\n record_pngs_dir=record_pngs_dir,\n record_json_dir=record_json_dir,\n do_sleep=do_sleep)\n actions = env.act(obs)\n obs, reward, done, info = env.step(actions)\n\n print(\"Final Result: \", info)\n if args.render:\n env.render(\n record_pngs_dir=record_pngs_dir,\n record_json_dir=record_json_dir,\n do_sleep=do_sleep)\n if do_sleep:\n time.sleep(5)\n env.render(close=True)\n\n if record_json_dir:\n finished_at = datetime.now().isoformat()\n _agents = args.agents.split(',')\n utility.join_json_state(record_json_dir, _agents, finished_at,\n config, info)\n\n return info\n\n if seed is None:\n # Pick a random seed between 0 and 2^31 - 1\n seed = random.randint(0, np.iinfo(np.int32).max)\n np.random.seed(seed)\n random.seed(seed)\n env.seed(seed)\n\n infos = []\n times = []\n for i in range(num_times):\n start = time.time()\n\n record_pngs_dir_ = record_pngs_dir + '/%d' % (i+1) \\\n if record_pngs_dir else None\n record_json_dir_ = record_json_dir + '/%d' % (i+1) \\\n if record_json_dir else None\n infos.append(_run(record_pngs_dir_, record_json_dir_))\n\n times.append(time.time() - start)\n print(\"Game Time: \", times[-1])\n\n atexit.register(env.close)\n return infos\n\n\ndef main():\n '''CLI entry pointed used to bootstrap a battle'''\n simple_agent = 'test::agents.SimpleAgent'\n player_agent = 'player::arrows'\n docker_agent = 'docker::pommerman/simple-agent'\n\n parser = argparse.ArgumentParser(description='Playground Flags.')\n parser.add_argument(\n '--config',\n default='PommeFFACompetition-v0',\n help='Configuration to execute. See env_ids in '\n 'configs.py for options.')\n parser.add_argument(\n '--agents',\n default=','.join([simple_agent] * 4),\n # default=','.join([player_agent] + [simple_agent]*3]),\n # default=','.join([docker_agent] + [simple_agent]*3]),\n help='Comma delineated list of agent types and docker '\n 'locations to run the agents.')\n parser.add_argument(\n '--agent_env_vars',\n help='Comma delineated list of agent environment vars '\n 'to pass to Docker. This is only for the Docker Agent.'\n \" An example is '0:foo=bar:baz=lar,3:foo=lam', which \"\n 'would send two arguments to Docker Agent 0 and one '\n 'to Docker Agent 3.',\n default=\"\")\n parser.add_argument(\n '--record_pngs_dir',\n default=None,\n help='Directory to record the PNGs of the game. '\n \"Doesn't record if None.\")\n parser.add_argument(\n '--record_json_dir',\n default=None,\n help='Directory to record the JSON representations of '\n \"the game. Doesn't record if None.\")\n parser.add_argument(\n \"--render\",\n default=False,\n action='store_true',\n help=\"Whether to render or not. Defaults to False.\")\n parser.add_argument(\n '--render_mode',\n default='human',\n help=\"What mode to render. Options are human, rgb_pixel, and rgb_array\")\n parser.add_argument(\n '--game_state_file',\n default=None,\n help=\"File from which to load game state.\")\n parser.add_argument(\n '--do_sleep',\n default=True,\n help=\"Whether we sleep after each rendering.\")\n args = parser.parse_args()\n run(args)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.random.seed", "numpy.iinfo" ] ]
q2675315436/underwater_sub
[ "334e945f04d6c309285ffdde19384344b8180720" ]
[ "mmdet/core/post_processing/bbox_nms.py" ]
[ "import torch\nfrom mmcv.ops.nms import batched_nms\n\n\ndef multiclass_nms(multi_bboxes,\n multi_scores,\n score_thr,\n nms_cfg,\n max_num=-1,\n score_factors=None):\n \"\"\"NMS for multi-class bboxes.\n\n Args:\n multi_bboxes (Tensor): shape (n, #class*4) or (n, 4)\n multi_scores (Tensor): shape (n, #class), where the last column\n contains scores of the background class, but this will be ignored.\n score_thr (float): bbox threshold, bboxes with scores lower than it\n will not be considered.\n nms_thr (float): NMS IoU threshold\n max_num (int): if there are more than max_num bboxes after NMS,\n only top max_num will be kept.\n score_factors (Tensor): The factors multiplied to scores before\n applying NMS\n\n Returns:\n tuple: (bboxes, labels), tensors of shape (k, 5) and (k, 1). Labels \\\n are 0-based.\n \"\"\"\n num_classes = multi_scores.size(1) - 1\n # exclude background category\n if multi_bboxes.shape[1] > 4:\n bboxes = multi_bboxes.view(multi_scores.size(0), -1, 4)\n else:\n bboxes = multi_bboxes[:, None].expand(\n multi_scores.size(0), num_classes, 4)\n scores = multi_scores[:, :-1]\n\n # filter out boxes with low scores\n valid_mask = scores > score_thr\n\n # We use masked_select for ONNX exporting purpose,\n # which is equivalent to bboxes = bboxes[valid_mask]\n # (TODO): as ONNX does not support repeat now,\n # we have to use this ugly code\n bboxes = torch.masked_select(\n bboxes,\n torch.stack((valid_mask, valid_mask, valid_mask, valid_mask),\n -1)).view(-1, 4)\n if score_factors is not None:\n scores = scores * score_factors[:, None]\n scores = torch.masked_select(scores, valid_mask)\n labels = valid_mask.nonzero(as_tuple=False)[:, 1]\n\n if bboxes.numel() == 0:\n bboxes = multi_bboxes.new_zeros((0, 5))\n labels = multi_bboxes.new_zeros((0, ), dtype=torch.long)\n\n if torch.onnx.is_in_onnx_export():\n raise RuntimeError('[ONNX Error] Can not record NMS '\n 'as it has not been executed this time')\n return bboxes, labels\n\n dets, keep = batched_nms(bboxes, scores, labels, nms_cfg)\n\n if max_num > 0:\n dets = dets[:max_num]\n keep = keep[:max_num]\n\n return dets, labels[keep]\n" ]
[ [ "torch.onnx.is_in_onnx_export", "torch.stack", "torch.masked_select" ] ]
finalelement/MONAI
[ "8e8e1b391fa649d1227087164dba208008d00bc4" ]
[ "monai/apps/mmars/mmars.py" ]
[ "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nUtilities for accessing Nvidia MMARs\n\nSee Also:\n - https://docs.nvidia.com/clara/clara-train-sdk/pt/mmar.html\n\"\"\"\n\nimport json\nimport os\nimport warnings\nfrom typing import Mapping, Union\n\nimport torch\n\nimport monai.networks.nets as monai_nets\nfrom monai.apps.utils import download_and_extract, logger\nfrom monai.utils.module import optional_import\n\nfrom .model_desc import MODEL_DESC\nfrom .model_desc import RemoteMMARKeys as Keys\n\n__all__ = [\"get_model_spec\", \"download_mmar\", \"load_from_mmar\"]\n\n\ndef get_model_spec(idx: Union[int, str]):\n \"\"\"get model specification by `idx`. `idx` could be index of the constant tuple of dict or the actual model ID.\"\"\"\n if isinstance(idx, int):\n return MODEL_DESC[idx]\n if isinstance(idx, str):\n key = idx.strip().lower()\n for cand in MODEL_DESC:\n if str(cand[Keys.ID]).strip().lower() == key:\n return cand\n logger.info(f\"Available specs are: {MODEL_DESC}.\")\n raise ValueError(f\"Unknown MODEL_DESC request: {idx}\")\n\n\ndef _get_all_ngc_models(pattern, page_index=0, page_size=50):\n url = \"https://api.ngc.nvidia.com/v2/search/catalog/resources/MODEL\"\n query_dict = {\n \"query\": \"\",\n \"orderBy\": [{\"field\": \"score\", \"value\": \"DESC\"}],\n \"queryFields\": [\"all\", \"description\", \"displayName\", \"name\", \"resourceId\"],\n \"fields\": [\n \"isPublic\",\n \"attributes\",\n \"guestAccess\",\n \"name\",\n \"orgName\",\n \"teamName\",\n \"displayName\",\n \"dateModified\",\n \"labels\",\n \"description\",\n ],\n \"page\": 0,\n }\n\n filter = [dict(field=\"name\", value=f\"*{pattern}*\")]\n query_dict[\"page\"] = page_index\n query_dict[\"pageSize\"] = page_size\n query_dict[\"filters\"] = filter\n query_str = json.dumps(query_dict)\n full_url = f\"{url}?q={query_str}\"\n requests_get, has_requests = optional_import(\"requests\", name=\"get\")\n if has_requests:\n resp = requests_get(full_url)\n else:\n raise ValueError(\"NGC API requires requests package. Please install it.\")\n model_list = json.loads(resp.text)\n model_dict = {}\n for result in model_list[\"results\"]:\n for model in result[\"resources\"]:\n current_res_id = model[\"resourceId\"]\n model_dict[current_res_id] = {\"name\": model[\"name\"]}\n for attribute in model[\"attributes\"]:\n if attribute[\"key\"] == \"latestVersionIdStr\":\n model_dict[current_res_id][\"latest\"] = attribute[\"value\"]\n return model_dict\n\n\ndef _get_ngc_url(model_name: str, version: str, model_prefix=\"\"):\n return f\"https://api.ngc.nvidia.com/v2/models/{model_prefix}{model_name}/versions/{version}/zip\"\n\n\ndef _get_ngc_doc_url(model_name: str, model_prefix=\"\"):\n return f\"https://ngc.nvidia.com/catalog/models/{model_prefix}{model_name}\"\n\n\ndef download_mmar(item, mmar_dir=None, progress: bool = True, api: bool = False, version: int = -1):\n \"\"\"\n Download and extract Medical Model Archive (MMAR) from Nvidia Clara Train.\n\n See Also:\n - https://docs.nvidia.com/clara/\n - Nvidia NGC Registry CLI\n - https://docs.nvidia.com/clara/clara-train-sdk/pt/mmar.html\n\n Args:\n item: the corresponding model item from `MODEL_DESC`.\n Or when api is True, the substring to query NGC's model name field.\n mmar_dir: target directory to store the MMAR, default is `mmars` subfolder under `torch.hub get_dir()`.\n progress: whether to display a progress bar.\n api: whether to query NGC and download via api\n version: which version of MMAR to download. -1 means the latest from ngc.\n\n Examples::\n >>> from monai.apps import download_mmar\n >>> download_mmar(\"clara_pt_prostate_mri_segmentation_1\", mmar_dir=\".\")\n >>> download_mmar(\"prostate_mri_segmentation\", mmar_dir=\".\", api=True)\n\n\n Returns:\n The local directory of the downloaded model.\n If api is True, a list of local directories of downloaded models.\n \"\"\"\n if not mmar_dir:\n get_dir, has_home = optional_import(\"torch.hub\", name=\"get_dir\")\n if has_home:\n mmar_dir = os.path.join(get_dir(), \"mmars\")\n else:\n raise ValueError(\"mmar_dir=None, but no suitable default directory computed. Upgrade Pytorch to 1.6+ ?\")\n\n if api:\n model_dict = _get_all_ngc_models(item)\n if len(model_dict) == 0:\n raise ValueError(f\"api query returns no item for pattern {item}. Please change or shorten it.\")\n model_dir_list = []\n for k, v in model_dict.items():\n ver = v[\"latest\"] if version == -1 else str(version)\n download_url = _get_ngc_url(k, ver)\n model_dir = os.path.join(mmar_dir, v[\"name\"])\n download_and_extract(\n url=download_url,\n filepath=os.path.join(mmar_dir, f'{v[\"name\"]}_{ver}.zip'),\n output_dir=model_dir,\n hash_val=None,\n hash_type=\"md5\",\n file_type=\"zip\",\n has_base=False,\n progress=progress,\n )\n model_dir_list.append(model_dir)\n return model_dir_list\n\n if not isinstance(item, Mapping):\n item = get_model_spec(item)\n\n ver = item.get(Keys.VERSION, 1)\n if version > 0:\n ver = str(version)\n model_fullname = f\"{item[Keys.NAME]}_{ver}\"\n model_dir = os.path.join(mmar_dir, model_fullname)\n model_url = item.get(Keys.URL) or _get_ngc_url(item[Keys.NAME], version=ver, model_prefix=\"nvidia/med/\")\n download_and_extract(\n url=model_url,\n filepath=os.path.join(mmar_dir, f\"{model_fullname}.{item[Keys.FILE_TYPE]}\"),\n output_dir=model_dir,\n hash_val=item[Keys.HASH_VAL],\n hash_type=item[Keys.HASH_TYPE],\n file_type=item[Keys.FILE_TYPE],\n has_base=False,\n progress=progress,\n )\n return model_dir\n\n\ndef load_from_mmar(\n item,\n mmar_dir=None,\n progress: bool = True,\n version: int = -1,\n map_location=None,\n pretrained=True,\n weights_only=False,\n model_key: str = \"model\",\n):\n \"\"\"\n Download and extract Medical Model Archive (MMAR) model weights from Nvidia Clara Train.\n\n Args:\n item: the corresponding model item from `MODEL_DESC`.\n mmar_dir: : target directory to store the MMAR, default is mmars subfolder under `torch.hub get_dir()`.\n progress: whether to display a progress bar when downloading the content.\n version: version number of the MMAR. Set it to `-1` to use `item[Keys.VERSION]`.\n map_location: pytorch API parameter for `torch.load` or `torch.jit.load`.\n pretrained: whether to load the pretrained weights after initializing a network module.\n weights_only: whether to load only the weights instead of initializing the network module and assign weights.\n model_key: a key to search in the model file or config file for the model dictionary.\n Currently this function assumes that the model dictionary has\n `{\"[name|path]\": \"test.module\", \"args\": {'kw': 'test'}}`.\n\n Examples::\n >>> from monai.apps import load_from_mmar\n >>> unet_model = load_from_mmar(\"clara_pt_prostate_mri_segmentation_1\", mmar_dir=\".\", map_location=\"cpu\")\n >>> print(unet_model)\n\n See Also:\n https://docs.nvidia.com/clara/\n \"\"\"\n if not isinstance(item, Mapping):\n item = get_model_spec(item)\n model_dir = download_mmar(item=item, mmar_dir=mmar_dir, progress=progress, version=version)\n model_file = os.path.join(model_dir, item[Keys.MODEL_FILE])\n logger.info(f'\\n*** \"{item[Keys.ID]}\" available at {model_dir}.')\n\n # loading with `torch.jit.load`\n if f\"{model_file}\".endswith(\".ts\"):\n if not pretrained:\n warnings.warn(\"Loading a ScriptModule, 'pretrained' option ignored.\")\n if weights_only:\n warnings.warn(\"Loading a ScriptModule, 'weights_only' option ignored.\")\n return torch.jit.load(model_file, map_location=map_location)\n\n # loading with `torch.load`\n model_dict = torch.load(model_file, map_location=map_location)\n if weights_only:\n return model_dict.get(model_key, model_dict) # model_dict[model_key] or model_dict directly\n\n # 1. search `model_dict['train_config]` for model config spec.\n model_config = _get_val(dict(model_dict).get(\"train_conf\", {}), key=model_key, default={})\n if not model_config:\n # 2. search json CONFIG_FILE for model config spec.\n json_path = os.path.join(model_dir, item.get(Keys.CONFIG_FILE, \"config_train.json\"))\n with open(json_path) as f:\n conf_dict = json.load(f)\n conf_dict = dict(conf_dict)\n model_config = _get_val(conf_dict, key=model_key, default={})\n if not model_config:\n # 3. search `model_dict` for model config spec.\n model_config = _get_val(dict(model_dict), key=model_key, default={})\n\n if not (model_config and isinstance(model_config, Mapping)):\n raise ValueError(\n f\"Could not load model config dictionary from config: {item.get(Keys.CONFIG_FILE)}, \"\n f\"or from model file: {item.get(Keys.MODEL_FILE)}.\"\n )\n\n # parse `model_config` for model class and model parameters\n if model_config.get(\"name\"): # model config section is a \"name\"\n model_name = model_config[\"name\"]\n model_cls = monai_nets.__dict__[model_name]\n elif model_config.get(\"path\"): # model config section is a \"path\"\n # https://docs.nvidia.com/clara/clara-train-sdk/pt/byom.html\n model_module, model_name = model_config.get(\"path\", \".\").rsplit(\".\", 1)\n model_cls, has_cls = optional_import(module=model_module, name=model_name)\n if not has_cls:\n raise ValueError(\n f\"Could not load MMAR model config {model_config.get('path', '')}, \"\n f\"Please make sure MMAR's sub-folders in '{model_dir}' is on the PYTHONPATH.\"\n \"See also: https://docs.nvidia.com/clara/clara-train-sdk/pt/byom.html\"\n )\n else:\n raise ValueError(f\"Could not load model config {model_config}.\")\n\n logger.info(f\"*** Model: {model_cls}\")\n model_kwargs = model_config.get(\"args\", None)\n if model_kwargs:\n model_inst = model_cls(**model_kwargs)\n logger.info(f\"*** Model params: {model_kwargs}\")\n else:\n model_inst = model_cls()\n if pretrained:\n model_inst.load_state_dict(model_dict.get(model_key, model_dict))\n logger.info(\"\\n---\")\n doc_url = item.get(Keys.DOC) or _get_ngc_doc_url(item[Keys.NAME], model_prefix=\"nvidia:med:\")\n logger.info(f\"For more information, please visit {doc_url}\\n\")\n return model_inst\n\n\ndef _get_val(input_dict: Mapping, key=\"model\", default=None):\n \"\"\"\n Search for the item with `key` in `config_dict`.\n Returns: the first occurrence of `key` in a breadth first search.\n \"\"\"\n if key in input_dict:\n return input_dict[key]\n for sub_dict in input_dict:\n val = input_dict[sub_dict]\n if isinstance(val, Mapping):\n found_val = _get_val(val, key=key, default=None)\n if found_val is not None:\n return found_val\n return default\n" ]
[ [ "torch.jit.load", "torch.load" ] ]
zhangdongkun98/rl-lib
[ "50e36c18b130cff40abc6621923becd6cdc48e2b" ]
[ "rllib/utils/tools.py" ]
[ "\nimport torch\nimport torch.nn as nn\n\n\ndef soft_update(target, source, t):\n for target_param, source_param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_( (1 - t) * target_param.data + t * source_param.data )\n\ndef hard_update(target, source):\n for target_param, source_param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(source_param.data)\n\n\n\ndef init_weights(m):\n if isinstance(m, (nn.Conv2d, nn.Linear)):\n nn.init.orthogonal_(m.weight)\n try: nn.init.constant_(m.bias, 0.01)\n except: pass\n if isinstance(m, nn.LSTM):\n for name, param in m.named_parameters():\n if name.startswith('weight'): nn.init.orthogonal_(param)\n return\n\n\n\n'''\nhttps://github.com/MadryLab/implementation-matters.git\n'''\n\ndef init_weights_new(m):\n for p in m.parameters():\n if len(p.data.shape) >= 2:\n orthogonal_init(p.data)\n else:\n p.data.zero_()\n return\n\ndef orthogonal_init(tensor, gain=1):\n '''\n Fills the input `Tensor` using the orthogonal initialization scheme from OpenAI\n Args:\n tensor: an n-dimensional `torch.Tensor`, where :math:`n \\geq 2`\n gain: optional scaling factor\n\n Examples:\n >>> w = torch.empty(3, 5)\n >>> orthogonal_init(w)\n '''\n if tensor.ndimension() < 2:\n raise ValueError(\"Only tensors with 2 or more dimensions are supported\")\n\n rows = tensor.size(0)\n cols = tensor[0].numel()\n flattened = tensor.new(rows, cols).normal_(0, 1)\n\n if rows < cols:\n flattened.t_()\n\n # Compute the QR factorization\n u, s, v = torch.svd(flattened, some=True)\n if rows < cols:\n u.t_()\n q = u if tuple(u.shape) == (rows, cols) else v\n with torch.no_grad():\n tensor.view_as(q).copy_(q)\n tensor.mul_(gain)\n return tensor\n\n\n" ]
[ [ "torch.nn.init.orthogonal_", "torch.no_grad", "torch.nn.init.constant_", "torch.svd" ] ]
flycoderRuan/rzq_retinanet
[ "a449398745cea8b5e53c0caecdb8039a89e77379" ]
[ "retinanet/losses.py" ]
[ "import numpy as np\nimport torch\nimport torch.nn as nn\n\ndef calc_iou(a, b):\n area = (b[:, 2] - b[:, 0]) * (b[:, 3] - b[:, 1])\n\n iw = torch.min(torch.unsqueeze(a[:, 2], dim=1), b[:, 2]) - torch.max(torch.unsqueeze(a[:, 0], 1), b[:, 0])\n ih = torch.min(torch.unsqueeze(a[:, 3], dim=1), b[:, 3]) - torch.max(torch.unsqueeze(a[:, 1], 1), b[:, 1])\n\n iw = torch.clamp(iw, min=0)\n ih = torch.clamp(ih, min=0)\n\n ua = torch.unsqueeze((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), dim=1) + area - iw * ih\n\n ua = torch.clamp(ua, min=1e-8)\n\n intersection = iw * ih\n\n IoU = intersection / ua\n\n return IoU\n\nclass FocalLoss(nn.Module):\n #def __init__(self):\n\n def forward(self, classifications, regressions, anchors, annotations):\n alpha = 0.25\n gamma = 2.0\n batch_size = classifications.shape[0]\n classification_losses = []\n regression_losses = []\n\n anchor = anchors[0, :, :]\n\n anchor_widths = anchor[:, 2] - anchor[:, 0]\n anchor_heights = anchor[:, 3] - anchor[:, 1]\n anchor_ctr_x = anchor[:, 0] + 0.5 * anchor_widths\n anchor_ctr_y = anchor[:, 1] + 0.5 * anchor_heights\n\n for j in range(batch_size):\n\n classification = classifications[j, :, :]\n regression = regressions[j, :, :]\n\n bbox_annotation = annotations[j, :, :]\n bbox_annotation = bbox_annotation[bbox_annotation[:, 4] != -1]\n\n if bbox_annotation.shape[0] == 0:\n regression_losses.append(torch.tensor(0).float().cuda())\n classification_losses.append(torch.tensor(0).float().cuda())\n\n continue\n\n classification = torch.clamp(classification, 1e-4, 1.0 - 1e-4)\n\n IoU = calc_iou(anchors[0, :, :], bbox_annotation[:, :4]) # num_anchors x num_annotations\n\n IoU_max, IoU_argmax = torch.max(IoU, dim=1) # num_anchors x 1\n\n #import pdb\n #pdb.set_trace()\n\n # compute the loss for classification\n targets = torch.ones(classification.shape) * -1\n targets = targets.cuda()\n\n targets[torch.lt(IoU_max, 0.4), :] = 0\n\n #torch.ge(input, other, out=None) → Tensor\n #逐元素比较input和other,即是否 input>=otherinput>=other。 \n positive_indices = torch.ge(IoU_max, 0.5)\n\n num_positive_anchors = positive_indices.sum()\n\n assigned_annotations = bbox_annotation[IoU_argmax, :]\n\n targets[positive_indices, :] = 0\n targets[positive_indices, assigned_annotations[positive_indices, 4].long()] = 1\n\n alpha_factor = torch.ones(targets.shape).cuda() * alpha\n\n alpha_factor = torch.where(torch.eq(targets, 1.), alpha_factor, 1. - alpha_factor)\n focal_weight = torch.where(torch.eq(targets, 1.), 1. - classification, classification)\n focal_weight = alpha_factor * torch.pow(focal_weight, gamma)\n\n bce = -(targets * torch.log(classification) + (1.0 - targets) * torch.log(1.0 - classification))\n\n # cls_loss = focal_weight * torch.pow(bce, gamma)\n cls_loss = focal_weight * bce\n\n cls_loss = torch.where(torch.ne(targets, -1.0), cls_loss, torch.zeros(cls_loss.shape).cuda())\n\n classification_losses.append(cls_loss.sum()/torch.clamp(num_positive_anchors.float(), min=1.0))\n\n # compute the loss for regression\n\n if positive_indices.sum() > 0:\n assigned_annotations = assigned_annotations[positive_indices, :]\n\n anchor_widths_pi = anchor_widths[positive_indices]\n anchor_heights_pi = anchor_heights[positive_indices]\n anchor_ctr_x_pi = anchor_ctr_x[positive_indices]\n anchor_ctr_y_pi = anchor_ctr_y[positive_indices]\n\n gt_widths = assigned_annotations[:, 2] - assigned_annotations[:, 0]\n gt_heights = assigned_annotations[:, 3] - assigned_annotations[:, 1]\n gt_ctr_x = assigned_annotations[:, 0] + 0.5 * gt_widths\n gt_ctr_y = assigned_annotations[:, 1] + 0.5 * gt_heights\n\n # clip widths to 1\n gt_widths = torch.clamp(gt_widths, min=1)\n gt_heights = torch.clamp(gt_heights, min=1)\n\n targets_dx = (gt_ctr_x - anchor_ctr_x_pi) / anchor_widths_pi\n targets_dy = (gt_ctr_y - anchor_ctr_y_pi) / anchor_heights_pi\n targets_dw = torch.log(gt_widths / anchor_widths_pi)\n targets_dh = torch.log(gt_heights / anchor_heights_pi)\n\n targets = torch.stack((targets_dx, targets_dy, targets_dw, targets_dh))\n targets = targets.t()\n\n targets = targets/torch.Tensor([[0.1, 0.1, 0.2, 0.2]]).cuda()\n\n negative_indices = 1 + (~positive_indices)\n\n regression_diff = torch.abs(targets - regression[positive_indices, :])\n\n regression_loss = torch.where(\n torch.le(regression_diff, 1.0 / 9.0),\n 0.5 * 9.0 * torch.pow(regression_diff, 2),\n regression_diff - 0.5 / 9.0\n )\n regression_losses.append(regression_loss.mean())\n else:\n regression_losses.append(torch.tensor(0).float().cuda())\n\n return torch.stack(classification_losses).mean(dim=0, keepdim=True), torch.stack(regression_losses).mean(dim=0, keepdim=True)\n\n \n" ]
[ [ "torch.zeros", "torch.stack", "torch.eq", "torch.max", "torch.ne", "torch.le", "torch.clamp", "torch.ones", "torch.unsqueeze", "torch.abs", "torch.tensor", "torch.lt", "torch.log", "torch.ge", "torch.Tensor", "torch.pow" ] ]
atb00ker/scripts-lab
[ "71a5cc9c7f301c274798686db4a227e84b65926a" ]
[ "scripts/spam-filter/LogisticRegressionModel.py" ]
[ "import pandas as pd\nimport numpy as np\n# scikit-learn\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score\nfrom sklearn.linear_model.logistic import LogisticRegression\nfrom sklearn.feature_extraction.text import (\n CountVectorizer,\n TfidfVectorizer,\n HashingVectorizer\n)\nfrom sklearn.model_selection import train_test_split, cross_val_score\n\n# DataSet\ndata_set = pd.read_csv('SpamCollection', delimiter='\\t', header=None)\nx_train_data, x_test_data, y_train, y_test = train_test_split(\n data_set[1], data_set[0], test_size=0.15)\n\n# Vectorizers\nvectorizer = CountVectorizer()\n\n# Logistic Regression\nx_train = vectorizer.fit_transform(x_train_data)\nclassifier = LogisticRegression()\nclassifier.fit(x_train, y_train)\n\n# Testing Accuracy\nx_test = vectorizer.transform(x_test_data)\npredictions = classifier.predict(x_test)\naccuracy = accuracy_score(y_test, predictions)\nprint('Accuracy score: {}'.format(accuracy))\n\n\ndef predict_spam(input):\n x_test = vectorizer.transform([input])\n prediction = classifier.predict(x_test)\n return prediction[0]\n" ]
[ [ "sklearn.metrics.accuracy_score", "sklearn.linear_model.logistic.LogisticRegression", "sklearn.feature_extraction.text.CountVectorizer", "sklearn.model_selection.train_test_split", "pandas.read_csv" ] ]
mlund/scipp
[ "26648fdcda49b21a7aacdafd58625fab7ee3403b" ]
[ "tests/plotting/plot_methods_test.py" ]
[ "# SPDX-License-Identifier: BSD-3-Clause\n# Copyright (c) 2022 Scipp contributors (https://github.com/scipp)\n# @file\n# @author Neil Vaytet\n\nimport numpy as np\nimport scipp as sc\nimport matplotlib\n\nmatplotlib.use('Agg')\n\n\ndef test_plot_variable():\n v = sc.arange('x', 10.0, unit='m')\n v.plot().close()\n\n\ndef test_plot_data_array():\n da = sc.DataArray(data=sc.Variable(dims=['x'], values=np.random.random(10)),\n coords={'x': sc.arange('x', 10.0, unit='m')})\n da.plot().close()\n\n\ndef test_plot_dataset():\n N = 100\n ds = sc.Dataset()\n ds['a'] = sc.Variable(dims=['x'], values=np.random.random(N), unit=sc.units.K)\n ds['b'] = sc.Variable(dims=['x'], values=np.random.random(N), unit=sc.units.K)\n ds.coords['x'] = sc.arange('x', float(N), unit='m')\n ds.plot().close()\n\n\ndef test_plot_data_array_with_kwargs():\n da = sc.DataArray(data=sc.Variable(dims=['y', 'x'],\n values=np.random.random([10, 5])),\n coords={\n 'x': sc.arange('x', 5.0, unit='m'),\n 'y': sc.arange('y', 10.0, unit='m')\n })\n da.plot(cmap=\"magma\", norm=\"log\").close()\n" ]
[ [ "matplotlib.use", "numpy.random.random" ] ]
FCeoni/astropop-1
[ "cc7fa7f5e20a7335bf30ee70f18a178222f80cd7" ]
[ "astropop/tests/test_pyutils.py" ]
[ "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport sys\nimport os\nimport pytest\nfrom astropop.py_utils import mkdir_p, string_fix, process_list, \\\n check_iterable, batch_key_replace, \\\n run_command, IndexedDict\nimport numpy as np\n\n\ndef test_mkdir(tmpdir):\n p = tmpdir.join('level1/level2').strpath\n mkdir_p(p)\n assert os.path.isdir(p)\n # mkdir a existent dir should not raise error\n mkdir_p(p)\n\n\ndef test_mkdir_oserror(tmpdir):\n p = '/bin/bash'\n with pytest.raises(OSError):\n mkdir_p(p)\n\n\ndef test_run_command():\n com = [\"python\", \"-c\", \"print(__import__('sys').version)\"]\n stdout = []\n stderr = []\n res, out, err = run_command(com, stdout=stdout, stderr=stderr,\n stdout_loglevel='WARN')\n assert out is stdout\n assert err is stderr\n assert '\\n'.join(stdout) == sys.version\n\n\ndef test_run_command_string():\n com = \"python -c \\\"print(__import__('sys').version)\\\"\"\n stdout = []\n stderr = []\n res, out, err = run_command(com, stdout=stdout, stderr=stderr,\n stdout_loglevel='WARN')\n assert out is stdout\n assert err is stderr\n assert '\\n'.join(stdout) == sys.version\n\n\ndef test_process_list():\n def dummy_func(i):\n return 1\n a = np.zeros(20)\n b = np.ones(20)\n c = process_list(dummy_func, a)\n assert np.array_equal(b, c)\n assert not np.array_equal(a, c)\n\n\ndef test_process_list_with_args():\n def dummy_func(i, a, b):\n return (i+a)*b\n i_array = np.arange(20)\n a_val = 2\n b_val = 3\n res = process_list(dummy_func, i_array, a_val, b=b_val)\n assert np.array_equal((i_array+a_val)*b_val, res)\n\n\ndef test_check_iterabel_array():\n a = [1, 2, 3, 4, 5]\n assert check_iterable(a)\n\n\ndef test_check_iterabel_string():\n a = '12345'\n assert not check_iterable(a)\n\n\ndef test_check_iterabel_nparray():\n a = np.zeros(20)\n assert check_iterable(a)\n\n\ndef test_check_iterabel_number():\n a = 10\n assert not check_iterable(a)\n\n\ndef test_check_iterabel_range():\n a = range(10)\n assert check_iterable(a)\n\n\ndef test_check_iterabel_dict():\n a = dict(a=1, b=2, c=3, d=4)\n assert check_iterable(a)\n assert check_iterable(a.items())\n assert check_iterable(a.keys())\n assert check_iterable(a.values())\n\n\ndef test_batch_key_replace():\n dic1 = {'a': '{b} value', 'b': '6{c}', 'c': 2}\n res = batch_key_replace(dic1)\n assert dic1['a'] == '62 value'\n\n\ndef test_batch_key_replace_list():\n dic1 = {'a': '{b} value', 'b': ['6{c}', '4{d}'], 'c': 1, 'd': 2}\n res = batch_key_replace(dic1)\n assert dic1['a'] == \"['61', '42'] value\"\n\n\n@pytest.mark.parametrize(\"inp, enc, res\", [(\"a!^1ö~[😀\", None, \"a!^1ö~[😀\"),\n (\"a!^1ö~[😀\", \"utf-8\", \"a!^1ö~[😀\"),\n (\"a!1[\", 'latin-1', \"a!1[\"),\n (b'bytes', None, 'bytes'),\n (42, None, \"42\")])\ndef test_string_fix(inp, enc, res):\n if enc is not None:\n inp = inp.encode(enc)\n assert string_fix(inp, enc) == res\n\n\ndef test_indexeddict_create():\n d = dict(a=1, b=2, c=3)\n i = IndexedDict(a=1, b=2, c=3)\n assert isinstance(i, dict)\n assert len(d) == len(i)\n # Python 3.6 and above ensure items order\n assert list(d.keys()) == list(i.keys())\n assert list(d.values()) == list(i.values())\n assert i == d\n\n\ndef test_indexeddict_insert_at():\n a = IndexedDict(a=1, b=2, c=3, d=4)\n a.insert_at(2, 'e', 5)\n assert a == {'a': 1, 'b': 2, 'e': 5, 'c': 3, 'd': 4}\n\n\ndef test_indexeddict_insert_at_first():\n a = IndexedDict(a=1, b=2, c=3, d=4)\n a.insert_at(0, 'e', 5)\n assert a == {'e': 5, 'a': 1, 'b': 2, 'c': 3, 'd': 4}\n\n\ndef test_indexeddict_insert_at_last():\n a = IndexedDict(a=1, b=2, c=3, d=4)\n a.insert_at(4, 'e', 5)\n assert a == {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5}\n\n\ndef test_indexeddict_insert_at_away():\n a = IndexedDict(a=1, b=2, c=3, d=4)\n a.insert_at(42, 'e', 5)\n assert a == {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5}\n\n\ndef test_indexeddict_insert_at_negative():\n a = IndexedDict(a=1, b=2, c=3, d=4)\n a.insert_at(-2, 'e', 5)\n assert a == {'a': 1, 'b': 2, 'c': 3, 'e': 5, 'd': 4}\n\n\ndef test_indexeddict_after():\n a = IndexedDict(a=1, b=2, c=3, d=4)\n a.insert_after('b', 'e', 5)\n assert a == {'a': 1, 'b': 2, 'e': 5, 'c': 3, 'd': 4}\n\n\ndef test_indexeddict_before():\n a = IndexedDict(a=1, b=2, c=3, d=4)\n a.insert_before('b', 'e', 5)\n assert a == {'a': 1, 'e': 5, 'b': 2, 'c': 3, 'd': 4}\n\n\ndef test_indexeddict_existing_before_before():\n a = IndexedDict(a=1, b=2, c=3, d=4)\n a.insert_before('b', 'c', 3)\n assert a == {'a': 1, 'c': 3, 'b': 2, 'd': 4}\n\n\ndef test_indexeddict_existing_after_before():\n a = IndexedDict(a=1, b=2, c=3, d=4, e=5)\n a.insert_before('e', 'c', 4)\n assert a == {'a': 1, 'b': 2, 'd': 4, 'c': 4, 'e': 5}\n\n\ndef test_indexeddict_existing_before_after():\n a = IndexedDict(a=1, b=2, c=3, d=4)\n a.insert_after('b', 'c', 3)\n assert a == {'a': 1, 'c': 3, 'b': 2, 'd': 4}\n\n\ndef test_indexeddict_existing_after_after():\n a = IndexedDict(a=1, b=2, c=3, d=4, e=5)\n a.insert_after('e', 'c', 4)\n assert a == {'a': 1, 'b': 2, 'd': 4, 'c': 4, 'e': 5}\n\n\ndef test_indexeddict_first():\n a = IndexedDict(a=1, b=2, c=3, d=4)\n a.insert_before('a', 'e', 5)\n assert a == {'e': 5, 'a': 1, 'b': 2, 'c': 3, 'd': 4}\n\n\ndef test_indexeddict_last():\n a = IndexedDict(a=1, b=2, c=3, d=4)\n a.insert_after('d', 'e', 5)\n assert a == {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5}\n\n\n@pytest.mark.parametrize('val, res', [('a', 0), ('b', 1), ('c', 2), ('d', 3)])\ndef test_indexeddict_index(val, res):\n a = IndexedDict(a=1, b=2, c=3, d=4)\n assert a.index(val) == res\n\n\ndef test_indexeddict_invalid_key():\n a = IndexedDict(a=1, b=2, c=3, d=4)\n with pytest.raises(KeyError) as exc:\n a.index('e')\n" ]
[ [ "numpy.arange", "numpy.ones", "numpy.array_equal", "numpy.zeros" ] ]
fmi-basel/gzenke-nonlinear-transient-amplification
[ "f3b0c8c89b42c34f1aad740c7026865cf3164f1d" ]
[ "src/Fig_6_supplement_1_Plotting.py" ]
[ "import numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom matplotlib import patches\nimport matplotlib.patches as mpatches\nimport scipy.io as sio\n\n# plotting configuration\nratio = 1.5\nfigure_len, figure_width = 15*ratio, 12*ratio\nfont_size_1, font_size_2 = 36*ratio, 36*ratio\nlegend_size = 18*ratio\nline_width, tick_len = 3*ratio, 10*ratio\nmarker_size = 15*ratio\nmarker_edge_width = 3 * ratio\nplot_line_width = 5*ratio\nhfont = {'fontname': 'Arial'}\n\nratio_80, ratio_85, ratio_90, ratio_95, ratio_100, ratio_105, ratio_110, ratio_115, ratio_120, ratio_125, ratio_130, ratio_135, ratio_140 = [], [], [], [], [], [], [], [], [], [], [], [], []\nn_loop = 20\nfor loop_idx in range(n_loop):\n bs_80 = sio.loadmat(\n 'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_bl_firing_amp_80_' + str(\n loop_idx) + '.mat')['mean_bl_firing_4_2'][0]\n ss_80 = sio.loadmat(\n 'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_ss_firing_amp_80_' + str(\n loop_idx) + '.mat')['mean_ss_firing_4_2'][0]\n\n bs_85 = sio.loadmat(\n 'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_bl_firing_amp_85_' + str(\n loop_idx) + '.mat')['mean_bl_firing_4_2'][0]\n ss_85 = sio.loadmat(\n 'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_ss_firing_amp_85_' + str(\n loop_idx) + '.mat')['mean_ss_firing_4_2'][0]\n\n bs_90 = sio.loadmat(\n 'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_bl_firing_amp_90_' + str(\n loop_idx) + '.mat')['mean_bl_firing_4_2'][0]\n ss_90 = sio.loadmat(\n 'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_ss_firing_amp_90_' + str(\n loop_idx) + '.mat')['mean_ss_firing_4_2'][0]\n\n bs_95 = sio.loadmat(\n 'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_bl_firing_amp_95_' + str(\n loop_idx) + '.mat')['mean_bl_firing_4_2'][0]\n ss_95 = sio.loadmat(\n 'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_ss_firing_amp_95_' + str(\n loop_idx) + '.mat')['mean_ss_firing_4_2'][0]\n\n bs_100 = sio.loadmat(\n 'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_bl_firing_amp_100_' + str(\n loop_idx) + '.mat')['mean_bl_firing_4_2'][0]\n ss_100 = sio.loadmat(\n 'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_ss_firing_amp_100_' + str(\n loop_idx) + '.mat')['mean_ss_firing_4_2'][0]\n\n bs_105 = sio.loadmat(\n 'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_bl_firing_amp_105_' + str(\n loop_idx) + '.mat')['mean_bl_firing_4_2'][0]\n ss_105 = sio.loadmat(\n 'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_ss_firing_amp_105_' + str(\n loop_idx) + '.mat')['mean_ss_firing_4_2'][0]\n\n bs_110 = sio.loadmat(\n 'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_bl_firing_amp_110_' + str(\n loop_idx) + '.mat')['mean_bl_firing_4_2'][0]\n ss_110 = sio.loadmat(\n 'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_ss_firing_amp_110_' + str(\n loop_idx) + '.mat')['mean_ss_firing_4_2'][0]\n\n bs_115 = sio.loadmat(\n 'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_bl_firing_amp_115_' + str(\n loop_idx) + '.mat')['mean_bl_firing_4_2'][0]\n ss_115 = sio.loadmat(\n 'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_ss_firing_amp_115_' + str(\n loop_idx) + '.mat')['mean_ss_firing_4_2'][0]\n\n bs_120 = sio.loadmat(\n 'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_bl_firing_amp_120_' + str(\n loop_idx) + '.mat')['mean_bl_firing_4_2'][0]\n ss_120 = sio.loadmat(\n 'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_ss_firing_amp_120_' + str(\n loop_idx) + '.mat')['mean_ss_firing_4_2'][0]\n\n bs_125 = sio.loadmat(\n 'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_bl_firing_amp_125_' + str(\n loop_idx) + '.mat')['mean_bl_firing_4_2'][0]\n ss_125 = sio.loadmat(\n 'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_ss_firing_amp_125_' + str(\n loop_idx) + '.mat')['mean_ss_firing_4_2'][0]\n\n bs_130 = sio.loadmat(\n 'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_bl_firing_amp_130_' + str(\n loop_idx) + '.mat')['mean_bl_firing_4_2'][0]\n ss_130 = sio.loadmat(\n 'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_ss_firing_amp_130_' + str(\n loop_idx) + '.mat')['mean_ss_firing_4_2'][0]\n\n bs_135 = sio.loadmat(\n 'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_bl_firing_amp_135_' + str(\n loop_idx) + '.mat')['mean_bl_firing_4_2'][0]\n ss_135 = sio.loadmat(\n 'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_ss_firing_amp_135_' + str(\n loop_idx) + '.mat')['mean_ss_firing_4_2'][0]\n\n bs_140 = sio.loadmat(\n 'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_bl_firing_amp_140_' + str(\n loop_idx) + '.mat')['mean_bl_firing_4_2'][0]\n ss_140 = sio.loadmat(\n 'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_ss_firing_amp_140_' + str(\n loop_idx) + '.mat')['mean_ss_firing_4_2'][0]\n\n ratio_80.append(ss_80 / bs_80)\n ratio_85.append(ss_85 / bs_85)\n ratio_90.append(ss_90 / bs_90)\n ratio_95.append(ss_95 / bs_95)\n ratio_100.append(ss_100 / bs_100)\n ratio_105.append(ss_105 / bs_105)\n ratio_110.append(ss_110 / bs_110)\n ratio_115.append(ss_115 / bs_115)\n ratio_120.append(ss_120 / bs_120)\n ratio_125.append(ss_125 / bs_125)\n ratio_130.append(ss_130 / bs_130)\n ratio_135.append(ss_135 / bs_135)\n ratio_140.append(ss_140 / bs_140)\n\n# plotting\nplt.figure(figsize=(figure_len, figure_width))\nax = plt.gca()\nax.spines['top'].set_visible(False)\nax.spines['right'].set_visible(False)\nax.spines['bottom'].set_visible(True)\nax.spines['left'].set_visible(True)\nfor axis in ['top', 'bottom', 'left', 'right']:\n ax.spines[axis].set_linewidth(line_width)\nplt.tick_params(width=line_width, length=tick_len)\n# plt.yscale('symlog', linthreshy=1)\n\n# sns.boxplot(data=[ratio_80, ratio_85, ratio_90, ratio_95, ratio_100, ratio_105, ratio_110, ratio_115, ratio_120, ratio_125, ratio_130, ratio_135, ratio_140], width=0.4, linewidth=line_width)\nax = sns.boxplot(data=[ratio_80, ratio_90, ratio_100, ratio_110, ratio_120, ratio_130, ratio_140], width=0.45,\n linewidth=line_width, color='white') # , showfliers = False)\n\nprint(len(ax.lines))\n# iterate over boxes\nfor m, box in enumerate(ax.artists):\n print(m)\n box.set_edgecolor('black')\n box.set_facecolor('white')\n\n # iterate over whiskers and median lines\n for j in range(6 * m, 6 * (m + 1)):\n # print(j)\n ax.lines[j].set_color('black')\n\n# plot the data points\nfor i in range(len(ratio_80)):\n if i % 2 == 0:\n plt.plot(0 - 0.1, ratio_80[i], linestyle='none', marker='o', fillstyle='full',\n markeredgewidth=marker_edge_width, markersize=marker_size,\n markeredgecolor='black', markerfacecolor='none')\n else:\n plt.plot(0 + 0.1, ratio_80[i], linestyle='none', marker='o', fillstyle='full',\n markeredgewidth=marker_edge_width, markersize=marker_size,\n markeredgecolor='black', markerfacecolor='none')\n\n# for i in range(len(ratio_85)):\n# if i%2 == 0:\n# plt.plot(1 - 0.1, ratio_85[i], linestyle='none', marker='o', fillstyle='full',\n# markeredgewidth=marker_edge_width, markersize=marker_size,\n# markeredgecolor='black', markerfacecolor='none')\n# else:\n# plt.plot(1 + 0.1, ratio_85[i], linestyle='none', marker='o', fillstyle='full',\n# markeredgewidth=marker_edge_width, markersize=marker_size,\n# markeredgecolor='black', markerfacecolor='none')\n\nfor i in range(len(ratio_90)):\n if i % 2 == 0:\n plt.plot(1 - 0.1, ratio_90[i], linestyle='none', marker='o', fillstyle='full',\n markeredgewidth=marker_edge_width, markersize=marker_size,\n markeredgecolor='black', markerfacecolor='none')\n else:\n plt.plot(1 + 0.1, ratio_90[i], linestyle='none', marker='o', fillstyle='full',\n markeredgewidth=marker_edge_width, markersize=marker_size,\n markeredgecolor='black', markerfacecolor='none')\n\n# for i in range(len(ratio_95)):\n# if i%2 == 0:\n# plt.plot(3 - 0.1, ratio_95[i], linestyle='none', marker='o', fillstyle='full',\n# markeredgewidth=marker_edge_width, markersize=marker_size,\n# markeredgecolor='black', markerfacecolor='none')\n# else:\n# plt.plot(3 + 0.1, ratio_95[i], linestyle='none', marker='o', fillstyle='full',\n# markeredgewidth=marker_edge_width, markersize=marker_size,\n# markeredgecolor='black', markerfacecolor='none')\n\n\nfor i in range(len(ratio_100)):\n if i % 2 == 0:\n plt.plot(2 - 0.1, ratio_100[i], linestyle='none', marker='o', fillstyle='full',\n markeredgewidth=marker_edge_width, markersize=marker_size,\n markeredgecolor='black', markerfacecolor='none')\n else:\n plt.plot(2 + 0.1, ratio_100[i], linestyle='none', marker='o', fillstyle='full',\n markeredgewidth=marker_edge_width, markersize=marker_size,\n markeredgecolor='black', markerfacecolor='none')\n\n# for i in range(len(ratio_105)):\n# if i%2 == 0:\n# plt.plot(5 - 0.1, ratio_105[i], linestyle='none', marker='o', fillstyle='full',\n# markeredgewidth=marker_edge_width, markersize=marker_size,\n# markeredgecolor='black', markerfacecolor='none')\n# else:\n# plt.plot(5 + 0.1, ratio_105[i], linestyle='none', marker='o', fillstyle='full',\n# markeredgewidth=marker_edge_width, markersize=marker_size,\n# markeredgecolor='black', markerfacecolor='none')\n\nfor i in range(len(ratio_110)):\n if i % 2 == 0:\n plt.plot(3 - 0.1, ratio_110[i], linestyle='none', marker='o', fillstyle='full',\n markeredgewidth=marker_edge_width, markersize=marker_size,\n markeredgecolor='black', markerfacecolor='none')\n else:\n plt.plot(3 + 0.1, ratio_110[i], linestyle='none', marker='o', fillstyle='full',\n markeredgewidth=marker_edge_width, markersize=marker_size,\n markeredgecolor='black', markerfacecolor='none')\n\n# for i in range(len(ratio_115)):\n# if i%2 == 0:\n# plt.plot(7 - 0.1, ratio_115[i], linestyle='none', marker='o', fillstyle='full',\n# markeredgewidth=marker_edge_width, markersize=marker_size,\n# markeredgecolor='black', markerfacecolor='none')\n# else:\n# plt.plot(7 + 0.1, ratio_115[i], linestyle='none', marker='o', fillstyle='full',\n# markeredgewidth=marker_edge_width, markersize=marker_size,\n# markeredgecolor='black', markerfacecolor='none')\n\nfor i in range(len(ratio_120)):\n if i % 2 == 0:\n plt.plot(4 - 0.1, ratio_120[i], linestyle='none', marker='o', fillstyle='full',\n markeredgewidth=marker_edge_width, markersize=marker_size,\n markeredgecolor='black', markerfacecolor='none')\n else:\n plt.plot(4 + 0.1, ratio_120[i], linestyle='none', marker='o', fillstyle='full',\n markeredgewidth=marker_edge_width, markersize=marker_size,\n markeredgecolor='black', markerfacecolor='none')\n\n# for i in range(len(ratio_125)):\n# if i%2 == 0:\n# plt.plot(9 - 0.1, ratio_125[i], linestyle='none', marker='o', fillstyle='full',\n# markeredgewidth=marker_edge_width, markersize=marker_size,\n# markeredgecolor='black', markerfacecolor='none')\n# else:\n# plt.plot(9 + 0.1, ratio_125[i], linestyle='none', marker='o', fillstyle='full',\n# markeredgewidth=marker_edge_width, markersize=marker_size,\n# markeredgecolor='black', markerfacecolor='none')\n\nfor i in range(len(ratio_130)):\n if i % 2 == 0:\n plt.plot(5 - 0.1, ratio_130[i], linestyle='none', marker='o', fillstyle='full',\n markeredgewidth=marker_edge_width, markersize=marker_size,\n markeredgecolor='black', markerfacecolor='none')\n else:\n plt.plot(5 + 0.1, ratio_130[i], linestyle='none', marker='o', fillstyle='full',\n markeredgewidth=marker_edge_width, markersize=marker_size,\n markeredgecolor='black', markerfacecolor='none')\n\n# for i in range(len(ratio_135)):\n# if i%2 == 0:\n# plt.plot(11 - 0.1, ratio_135[i], linestyle='none', marker='o', fillstyle='full',\n# markeredgewidth=marker_edge_width, markersize=marker_size,\n# markeredgecolor='black', markerfacecolor='none')\n# else:\n# plt.plot(11 + 0.1, ratio_135[i], linestyle='none', marker='o', fillstyle='full',\n# markeredgewidth=marker_edge_width, markersize=marker_size,\n# markeredgecolor='black', markerfacecolor='none')\n\nfor i in range(len(ratio_140)):\n if i % 2 == 0:\n plt.plot(6 - 0.1, ratio_140[i], linestyle='none', marker='o', fillstyle='full',\n markeredgewidth=marker_edge_width, markersize=marker_size,\n markeredgecolor='black', markerfacecolor='none')\n else:\n plt.plot(6 + 0.1, ratio_140[i], linestyle='none', marker='o', fillstyle='full',\n markeredgewidth=marker_edge_width, markersize=marker_size,\n markeredgecolor='black', markerfacecolor='none')\n\nplt.xticks([0, 2, 4, 6], ['8/30', '10/30', '12/30', '14/30'], fontsize=font_size_1, **hfont)\n# plt.xticks([0, 1, 2, 3], ['8/30', '8.5/30', '9/30', '10/30'], fontsize=font_size_1, **hfont)\nplt.yticks([0, 1, 2, 3, 4, 5], fontsize=font_size_1, **hfont)\nplt.xlabel('Feedforward input', fontsize=font_size_1, **hfont)\nplt.ylabel('Fixed point to baseline ratio', fontsize=font_size_1, **hfont)\nplt.xlim([-0.5, 6.5])\n# plt.xlim([-0.5, 12.5])\nplt.ylim([0, 5])\nplt.hlines(y=1, xmin=-0.5, xmax=6.5, colors='k', linestyles=[(0, (6, 6, 6, 6))], linewidth=line_width)\nplt.savefig('paper_figures/png/Revision_Fig_Point_1_2_Unstimulated_cotuned_neuron_SNN.png')\nplt.savefig('paper_figures/pdf/Revision_Fig_Point_1_2_Unstimulated_cotuned_neuron_SNN.pdf')" ]
[ [ "matplotlib.pyplot.xlim", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylim", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.yticks", "matplotlib.pyplot.figure", "matplotlib.pyplot.tick_params", "matplotlib.pyplot.hlines", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.gca", "matplotlib.pyplot.xticks" ] ]
StrangeGirlMurph/CodingProjects
[ "8400a610c0a54a2721a73824df7aab4e92ec891d" ]
[ "07-AdventOfCode2021/05/day-05.py" ]
[ "import numpy as np\n\nlines = open(\"input.txt\", \"r\").readlines()\nlines = [line[:-1] for line in lines]\nlines = [line.split(\" -> \") for line in lines]\nlines = [list(([int(num) for num in coordinate.split(\",\")]) for coordinate in line) for line in lines]\n\n# filter all diagonal\ntemplines = []\nfor idx, line in enumerate(lines):\n if (line[0][0] == line[1][0] or line[0][1] == line[1][1]):\n\n # the I don't want to come up with a smart solution so I'll just sort the coordinates part\n difference = np.diff([line[0], line[1]], axis=0)\n if np.sum(difference) < 0:\n templines.append(line[::-1])\n else:\n templines.append(line)\n\nlines = templines\n\n# print(lines)\n\n# find biggest values\nbiggestX = 0\nbiggestY = 0\nfor line in lines:\n for coordinate in line:\n if coordinate[0] > biggestX:\n biggestX = coordinate[0]\n if coordinate[1] > biggestY:\n biggestY = coordinate[1]\ngrid = np.zeros((biggestY, biggestX))\n\n# \"draw\" lines in grid\nfor line in lines:\n # x fixed\n if line[0][0] == line[1][0]:\n for i in range(abs(line[0][1] - line[1][1]) + 1):\n grid[line[0][1] + i-1][line[0][0]-1] += 1 # y,x\n\n # y fixed\n if line[0][1] == line[1][1]:\n for i in range(abs(line[0][0] - line[1][0]) + 1):\n grid[line[0][1]-1][line[0][0] + i-1] += 1 # y,x\n\ntwoOverlaps = 0\n\nfor line in grid:\n for item in line:\n if item >= 2:\n twoOverlaps += 1\n\n# print(grid)\n\nprint(\"At how many points do at least two lines overlap?\", twoOverlaps)\n" ]
[ [ "numpy.sum", "numpy.zeros", "numpy.diff" ] ]
ardalanghadimi/ATC
[ "cbe7eece9c7b8b316a0503f9e6e805c47f688d77" ]
[ "openmdao/recorders/sqlite_recorder.py" ]
[ "\"\"\"\nClass definition for SqliteRecorder, which provides dictionary backed by SQLite.\n\"\"\"\n\nimport io\nimport os\nimport sqlite3\n\nimport warnings\nimport numpy as np\nfrom six import iteritems\nfrom six.moves import cPickle as pickle\n\nfrom openmdao.recorders.base_recorder import BaseRecorder\nfrom openmdao.utils.mpi import MPI\nfrom openmdao.utils.record_util import values_to_array, check_path\nfrom openmdao.utils.options_dictionary import OptionsDictionary\nfrom openmdao.core.driver import Driver\nfrom openmdao.core.system import System\n\n\ndef array_to_blob(array):\n \"\"\"\n Make numpy array in to BLOB type.\n\n Convert a numpy array to something that can be written\n to a BLOB field in sqlite.\n\n TODO: move this to a util file?\n\n Parameters\n ----------\n array : array\n The array that will be converted to a blob.\n\n Returns\n -------\n blob :\n The blob created from the array.\n\n \"\"\"\n out = io.BytesIO()\n np.save(out, array)\n out.seek(0)\n return sqlite3.Binary(out.read())\n\n\ndef blob_to_array(blob):\n \"\"\"\n Convert sqlite BLOB to numpy array.\n\n TODO: move this to a util file?\n\n Parameters\n ----------\n blob : blob\n The blob that will be converted to an array.\n\n Returns\n -------\n array :\n The array created from the blob.\n \"\"\"\n out = io.BytesIO(blob)\n out.seek(0)\n return np.load(out)\n\n\nformat_version = 1\n\n\nclass SqliteRecorder(BaseRecorder):\n \"\"\"\n Recorder that saves cases in a sqlite db.\n\n Attributes\n ----------\n model_viewer_data : dict\n Dict that holds the data needed to generate N2 diagram.\n connection : sqlite connection object\n Connection to the sqlite3 database.\n _abs2prom : {'input': dict, 'output': dict}\n Dictionary mapping absolute names to promoted names.\n _prom2abs : {'input': dict, 'output': dict}\n Dictionary mapping promoted names to absolute names.\n _abs2meta : {'name': {}}\n Dictionary mapping absolute variable names to their metadata including units,\n bounds, and scaling.\n _pickle_version : int\n The pickle protocol version to use when pickling metadata.\n _filepath : str\n Path to the recorder file.\n _database_initialized : bool\n Flag indicating whether or not the database has been initialized.\n _record_on_proc : bool\n Flag indicating whether to record on this processor when running in parallel.\n \"\"\"\n\n def __init__(self, filepath, append=False, pickle_version=2):\n \"\"\"\n Initialize the SqliteRecorder.\n\n Parameters\n ----------\n filepath : str\n Path to the recorder file.\n append : bool\n Optional. If True, append to an existing case recorder file.\n pickle_version : int\n Optional. The pickle protocol version to use when pickling metadata.\n \"\"\"\n if append:\n raise NotImplementedError(\"Append feature not implemented for SqliteRecorder\")\n\n self.connection = None\n self.model_viewer_data = None\n\n self._abs2prom = {'input': {}, 'output': {}}\n self._prom2abs = {'input': {}, 'output': {}}\n self._abs2meta = {}\n self._pickle_version = pickle_version\n self._filepath = filepath\n self._database_initialized = False\n\n # default to record on all procs when running in parallel\n self._record_on_proc = True\n\n super(SqliteRecorder, self).__init__()\n\n def _initialize_database(self):\n \"\"\"\n Initialize the database.\n \"\"\"\n if MPI:\n rank = MPI.COMM_WORLD.rank\n if self._parallel and self._record_on_proc:\n filepath = '%s_%d' % (self._filepath, rank)\n print(\"Note: SqliteRecorder is running on multiple processors. \"\n \"Cases from rank %d are being written to %s.\" %\n (rank, filepath))\n elif rank == 0:\n filepath = self._filepath\n else:\n filepath = None\n else:\n filepath = self._filepath\n\n if filepath:\n try:\n os.remove(filepath)\n except OSError:\n pass\n\n self.connection = sqlite3.connect(filepath)\n with self.connection as c:\n c.execute(\"CREATE TABLE metadata( format_version INT, \"\n \"abs2prom BLOB, prom2abs BLOB, abs2meta BLOB)\")\n c.execute(\"INSERT INTO metadata(format_version, abs2prom, prom2abs) \"\n \"VALUES(?,?,?)\", (format_version, None, None))\n\n # used to keep track of the order of the case records across all three tables\n c.execute(\"CREATE TABLE global_iterations(id INTEGER PRIMARY KEY, \"\n \"record_type TEXT, rowid INT)\")\n c.execute(\"CREATE TABLE driver_iterations(id INTEGER PRIMARY KEY, \"\n \"counter INT,iteration_coordinate TEXT, timestamp REAL, \"\n \"success INT, msg TEXT, inputs BLOB, outputs BLOB)\")\n c.execute(\"CREATE TABLE system_iterations(id INTEGER PRIMARY KEY, \"\n \"counter INT, iteration_coordinate TEXT, timestamp REAL, \"\n \"success INT, msg TEXT, inputs BLOB, outputs BLOB, residuals BLOB)\")\n c.execute(\"CREATE TABLE solver_iterations(id INTEGER PRIMARY KEY, \"\n \"counter INT, iteration_coordinate TEXT, timestamp REAL, \"\n \"success INT, msg TEXT, abs_err REAL, rel_err REAL, \"\n \"solver_inputs BLOB, solver_output BLOB, solver_residuals BLOB)\")\n c.execute(\"CREATE TABLE driver_metadata(id TEXT PRIMARY KEY, \"\n \"model_viewer_data BLOB)\")\n c.execute(\"CREATE TABLE system_metadata(id TEXT PRIMARY KEY, \"\n \"scaling_factors BLOB, component_metadata BLOB)\")\n c.execute(\"CREATE TABLE solver_metadata(id TEXT PRIMARY KEY, \"\n \"solver_options BLOB, solver_class TEXT)\")\n\n self._database_initialized = True\n\n def startup(self, recording_requester):\n \"\"\"\n Prepare for a new run and create/update the abs2prom and prom2abs variables.\n\n Parameters\n ----------\n recording_requester : object\n Object to which this recorder is attached.\n \"\"\"\n super(SqliteRecorder, self).startup(recording_requester)\n\n if not self._database_initialized:\n self._initialize_database()\n\n # grab the system\n if isinstance(recording_requester, Driver):\n system = recording_requester._problem.model\n elif isinstance(recording_requester, System):\n system = recording_requester\n else:\n system = recording_requester._system\n\n # grab all of the units and type (collective calls)\n states = system._list_states_allprocs()\n desvars = system.get_design_vars(True)\n responses = system.get_responses(True)\n objectives = system.get_objectives(True)\n constraints = system.get_constraints(True)\n inputs = system._var_allprocs_abs_names['input']\n outputs = system._var_allprocs_abs_names['output']\n full_var_set = [(inputs, 'input'), (outputs, 'output'),\n (desvars, 'desvar'), (responses, 'response'),\n (objectives, 'objective'), (constraints, 'constraint')]\n\n if self.connection:\n # merge current abs2prom and prom2abs with this system's version\n for io in ['input', 'output']:\n for v in system._var_abs2prom[io]:\n self._abs2prom[io][v] = system._var_abs2prom[io][v]\n for v in system._var_allprocs_prom2abs_list[io]:\n if v not in self._prom2abs[io]:\n self._prom2abs[io][v] = system._var_allprocs_prom2abs_list[io][v]\n else:\n self._prom2abs[io][v] = list(set(self._prom2abs[io][v]) |\n set(system._var_allprocs_prom2abs_list[io][v]))\n\n for var_set, var_type in full_var_set:\n for name in var_set:\n if name not in self._abs2meta:\n self._abs2meta[name] = system._var_allprocs_abs2meta[name].copy()\n self._abs2meta[name]['type'] = set()\n if name in states:\n self._abs2meta[name]['explicit'] = False\n\n if var_type not in self._abs2meta[name]['type']:\n self._abs2meta[name]['type'].add(var_type)\n self._abs2meta[name]['explicit'] = True\n\n for name in inputs:\n self._abs2meta[name] = system._var_allprocs_abs2meta[name].copy()\n self._abs2meta[name]['type'] = set()\n self._abs2meta[name]['type'].add('input')\n self._abs2meta[name]['explicit'] = True\n if name in states:\n self._abs2meta[name]['explicit'] = False\n\n # store the updated abs2prom and prom2abs\n abs2prom = pickle.dumps(self._abs2prom)\n prom2abs = pickle.dumps(self._prom2abs)\n abs2meta = pickle.dumps(self._abs2meta)\n\n with self.connection as c:\n c.execute(\"UPDATE metadata SET abs2prom=?, prom2abs=?, abs2meta=?\",\n (abs2prom, prom2abs, abs2meta))\n\n def record_iteration_driver(self, recording_requester, data, metadata):\n \"\"\"\n Record data and metadata from a Driver.\n\n Parameters\n ----------\n recording_requester : object\n Driver in need of recording.\n data : dict\n Dictionary containing desvars, objectives, constraints, responses, and System vars.\n metadata : dict\n Dictionary containing execution metadata.\n \"\"\"\n if self.connection:\n outputs = data['out']\n inputs = data['in']\n\n outputs_array = values_to_array(outputs)\n inputs_array = values_to_array(inputs)\n\n outputs_blob = array_to_blob(outputs_array)\n inputs_blob = array_to_blob(inputs_array)\n\n with self.connection as c:\n c = c.cursor() # need a real cursor for lastrowid\n\n c.execute(\"INSERT INTO driver_iterations(counter, iteration_coordinate, \"\n \"timestamp, success, msg, inputs, outputs) VALUES(?,?,?,?,?,?,?)\",\n (self._counter, self._iteration_coordinate,\n metadata['timestamp'], metadata['success'], metadata['msg'],\n inputs_blob, outputs_blob))\n\n c.execute(\"INSERT INTO global_iterations(record_type, rowid) VALUES(?,?)\",\n ('driver', c.lastrowid))\n\n def record_iteration_system(self, recording_requester, data, metadata):\n \"\"\"\n Record data and metadata from a System.\n\n Parameters\n ----------\n recording_requester : System\n System in need of recording.\n data : dict\n Dictionary containing inputs, outputs, and residuals.\n metadata : dict\n Dictionary containing execution metadata.\n \"\"\"\n if self.connection:\n inputs = data['i']\n outputs = data['o']\n residuals = data['r']\n\n inputs_array = values_to_array(inputs)\n outputs_array = values_to_array(outputs)\n residuals_array = values_to_array(residuals)\n\n inputs_blob = array_to_blob(inputs_array)\n outputs_blob = array_to_blob(outputs_array)\n residuals_blob = array_to_blob(residuals_array)\n\n with self.connection as c:\n c = c.cursor() # need a real cursor for lastrowid\n\n c.execute(\"INSERT INTO system_iterations(counter, iteration_coordinate, \"\n \"timestamp, success, msg, inputs , outputs , residuals ) \"\n \"VALUES(?,?,?,?,?,?,?,?)\",\n (self._counter, self._iteration_coordinate,\n metadata['timestamp'], metadata['success'], metadata['msg'],\n inputs_blob, outputs_blob, residuals_blob))\n\n c.execute(\"INSERT INTO global_iterations(record_type, rowid) VALUES(?,?)\",\n ('system', c.lastrowid))\n\n def record_iteration_solver(self, recording_requester, data, metadata):\n \"\"\"\n Record data and metadata from a Solver.\n\n Parameters\n ----------\n recording_requester : Solver\n Solver in need of recording.\n data : dict\n Dictionary containing outputs, residuals, and errors.\n metadata : dict\n Dictionary containing execution metadata.\n \"\"\"\n if self.connection:\n abs = data['abs']\n rel = data['rel']\n inputs = data['i']\n outputs = data['o']\n residuals = data['r']\n\n inputs_array = values_to_array(inputs)\n outputs_array = values_to_array(outputs)\n residuals_array = values_to_array(residuals)\n\n inputs_blob = array_to_blob(inputs_array)\n outputs_blob = array_to_blob(outputs_array)\n residuals_blob = array_to_blob(residuals_array)\n\n with self.connection as c:\n c = c.cursor() # need a real cursor for lastrowid\n\n c.execute(\"INSERT INTO solver_iterations(counter, iteration_coordinate, \"\n \"timestamp, success, msg, abs_err, rel_err, \"\n \"solver_inputs, solver_output, solver_residuals) \"\n \"VALUES(?,?,?,?,?,?,?,?,?,?)\",\n (self._counter, self._iteration_coordinate,\n metadata['timestamp'], metadata['success'], metadata['msg'],\n abs, rel, inputs_blob, outputs_blob, residuals_blob))\n\n c.execute(\"INSERT INTO global_iterations(record_type, rowid) VALUES(?,?)\",\n ('solver', c.lastrowid))\n\n def record_metadata_driver(self, recording_requester):\n \"\"\"\n Record driver metadata.\n\n Parameters\n ----------\n recording_requester : Driver\n The Driver that would like to record its metadata.\n \"\"\"\n if self.connection:\n driver_class = type(recording_requester).__name__\n model_viewer_data = pickle.dumps(recording_requester._model_viewer_data,\n self._pickle_version)\n model_viewer_data = sqlite3.Binary(model_viewer_data)\n\n try:\n with self.connection as c:\n c.execute(\"INSERT INTO driver_metadata(id, model_viewer_data) \"\n \"VALUES(?,?)\", (driver_class, model_viewer_data))\n except sqlite3.IntegrityError:\n print(\"Metadata has already been recorded for %s.\" % driver_class)\n\n def record_metadata_system(self, recording_requester):\n \"\"\"\n Record system metadata.\n\n Parameters\n ----------\n recording_requester : System\n The System that would like to record its metadata.\n \"\"\"\n if self.connection:\n # Cannot handle PETScVector yet\n from openmdao.api import PETScVector\n if PETScVector and isinstance(recording_requester._outputs, PETScVector):\n return # Cannot handle PETScVector yet\n\n # collect scaling arrays\n scaling_vecs = {}\n for kind, odict in iteritems(recording_requester._vectors):\n scaling_vecs[kind] = scaling = {}\n for vecname, vec in iteritems(odict):\n scaling[vecname] = vec._scaling\n scaling_factors = pickle.dumps(scaling_vecs, self._pickle_version)\n\n # create a copy of the system's metadata excluding what is in 'options_excludes'\n user_options = OptionsDictionary()\n excludes = recording_requester.recording_options['options_excludes']\n for key in recording_requester.options._dict:\n if check_path(key, [], excludes, True):\n user_options._dict[key] = recording_requester.options._dict[key]\n user_options._read_only = recording_requester.options._read_only\n\n # try to pickle the metadata, report if it failed\n try:\n pickled_metadata = pickle.dumps(user_options, self._pickle_version)\n except Exception:\n pickled_metadata = pickle.dumps(OptionsDictionary(), self._pickle_version)\n warnings.warn(\"Trying to record options which cannot be pickled \"\n \"on system with name: %s. Use the 'options_excludes' \"\n \"recording option on system objects to avoid attempting \"\n \"to record options which cannot be pickled. Skipping \"\n \"recording options for this system.\" % recording_requester.name,\n RuntimeWarning)\n\n path = recording_requester.pathname\n if not path:\n path = 'root'\n\n scaling_factors = sqlite3.Binary(scaling_factors)\n pickled_metadata = sqlite3.Binary(pickled_metadata)\n\n with self.connection as c:\n c.execute(\"INSERT INTO system_metadata(id, scaling_factors, component_metadata) \"\n \"VALUES(?,?,?)\", (path, scaling_factors, pickled_metadata))\n\n def record_metadata_solver(self, recording_requester):\n \"\"\"\n Record solver metadata.\n\n Parameters\n ----------\n recording_requester : Solver\n The Solver that would like to record its metadata.\n \"\"\"\n if self.connection:\n path = recording_requester._system.pathname\n solver_class = type(recording_requester).__name__\n if not path:\n path = 'root'\n id = \"{}.{}\".format(path, solver_class)\n\n solver_options = pickle.dumps(recording_requester.options, self._pickle_version)\n\n with self.connection as c:\n c.execute(\"INSERT INTO solver_metadata(id, solver_options, solver_class) \"\n \"VALUES(?,?,?)\", (id, sqlite3.Binary(solver_options), solver_class))\n\n def close(self):\n \"\"\"\n Close `out`.\n \"\"\"\n if self.connection:\n self.connection.close()\n" ]
[ [ "numpy.load", "numpy.save" ] ]
danielmlow/composition
[ "d3de032cfe60f4b73e88b50afac78077b0af8f84" ]
[ "models/cnn41_gs.py" ]
[ "'''\nThis is based on cnn35_64. This is after the first pilot. \nChanges:\n-don't filter out # in the tokenizer, tokenize both together. or save tokenizer https://stackoverflow.com/questions/45735070/keras-text-preprocessing-saving-tokenizer-object-to-file-for-scoring\n-use 'number' w2v as representation for any digit\n-shuffling problem should be check before advancing: plot random selection of conv1 layers. theys should all be 14 or 15.\n-tune hyperparameters. \n'''\nimport datetime\nimport sys\nimport numpy as np\nimport pandas as pd\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.layers import Input, Dense, Embedding, Conv2D, MaxPool2D\nfrom keras.layers import Reshape, Flatten, Dropout\nfrom keras.models import Model\nimport os\nimport data_helpers\nimport config\nimport pickle\nimport matplotlib.pyplot as plt\nplt.switch_backend('agg')\nfrom keras.utils import np_utils\nfrom numpy.random import seed\nseed(123)\nfrom tensorflow import set_random_seed\nset_random_seed(123)\n\n# Parameters\n# =====================================================================\ncategories = config.categories\nverbose = config.verbose\ntoy = config.toy\nsave_checkpoints = config.save_checkpoints\nplot_RSA = config.plot_RSA\n\nif toy:\n # categories = categories[:4]\n epochs = 1 #it will probably need more.\nelse:\n epochs = config.epochs # it will probably need more.\n\nif config.local_or_cluster:\n categories = categories[:3]\n epochs=1\n verbose=1\n\n# epochs = 6\n#\nsave_checkpoints=False\n\n\nprint('running for '+str(epochs)+' epochs')\n\n\n\n\nif config.local_or_cluster:\n directory_name = '18-06-09-15-46-19'\n # directory_name = datetime.datetime.now().strftime(\"%y-%m-%d-%H-%M-%S\")\n file_name = 'cnn'\nelse:\n directory_name = '18-06-09-15-46-19'\n # directory_name = datetime.datetime.now().strftime(\"%y-%m-%d-%H-%M-%S\")\n file_name = os.path.basename(__file__)\n\nprint('running '+directory_name+' '+file_name)\n\n# Model parameters\nactivation = 'elu'\nconv2_size = 2 #conv1_size=3\npool_size = (3, 1)\nstride_size = (2, 1)\npadding = 'same' #same: classic, you go until end of zero pad, valid: you truncate end if it doesnt fit in filter.\nfilter_sizes = [3] #[3,4,7] #Takes up to three.\nbatch_size = config.batch_size\n# sequence_length = x.shape[1] # 56\nsequence_length = config.sequence_length\nembedding_dim = 300\n\ndef get_output(model, layer_name, batch_size=batch_size, Xvalidation=None, layer_2d_or_1d='2d'):\n intermediate_layer_model = Model(inputs=model.input, outputs=model.get_layer(layer_name).output)\n layer_output = intermediate_layer_model.predict(Xvalidation, batch_size=batch_size, verbose=verbose)\n if layer_2d_or_1d=='2d':\n layer_output = np.reshape(layer_output,(len(Xvalidation), int(layer_output.shape[1])*int(layer_output.shape[3])))\n layer_output = pd.DataFrame(layer_output)\n return layer_output\n\n# layer_name = 'kmaxpool_1'\n# intermediate_layer_model = Model(inputs=model.input, outputs=model.get_layer(layer_name).output_shape)\n# Load data\n##============================================================================================\n#\n\n# importlib.reload(data_helpers)\n\n# Xtrain, Ytrain = data_helpers.load_all_data(config.train_path,config.validation_path, categories, shuffle=False) # I changed this so it combines train and validation\n# Xvalidation, Yvalidation = data_helpers.load_data(config.validation_path, categories)\n# Xvalidation_raw, Yvalidation_raw = data_helpers.load_data_raw(config.validation_path, categories)\n\nXtrain, Ytrain = data_helpers.load_data(config.train_path, categories)\nXvalidation, Yvalidation = data_helpers.load_data(config.validation_path, categories)\nXvalidation_raw, Yvalidation_raw = data_helpers.load_data_raw(config.validation_path, categories)\n\n## Encode Ytrain\n# =====================================================================================\n#one hot encode and integer encode\nYtrain_encoded = np_utils.to_categorical(Ytrain)\nYtrain_integer = np.array(Ytrain)\nYvalidation_encoded = np_utils.to_categorical(Yvalidation)\nYvalidation_integer = np.array(Yvalidation)\n\n# Zero pad (encode) Xtrain and Xvalidation\n# ==================================================================================================\ntokenizer = Tokenizer(filters='!\"$%&()*+,-./:;<=>?@[\\]^_`{|}~') #TODO depending on word embedding, set lower=False.\ntokenizer.fit_on_texts(np.append(np.array(Xtrain), np.array(Xvalidation)))\n# tokenizer.fit_on_texts(Xtrain)\nsequences = tokenizer.texts_to_sequences(Xtrain)\nsequences2 = tokenizer.texts_to_sequences(Xvalidation)\nword_index = tokenizer.word_index\nprint('Found %s unique tokens.' % len(word_index))\n\nXtrain_encoded = pad_sequences(sequences, maxlen=sequence_length, padding='post')\nXvalidation_encoded = pad_sequences(sequences2, maxlen=sequence_length, padding='post')\n\n\n#define max length of doc\n# l1 = []\n# for text in Xtrain:\n# l1.append(len(text_to_word_sequence(text)))\n\n# def roundup(x):\n# return int(math.ceil(x / 100.0)) * 100\n#\n# sequence_length = roundup(np.max(l1)) #\n\n\n## embedding layer https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html\n# =====================================================================================\n\n# Creating the model\n#\n# from gensim.models.keyedvectors import KeyedVectors\n# it_model = KeyedVectors.load_word2vec_format(config.word_embeddings_path+'wiki.it.vec')\n#\n# # Getting the tokens\n# words = []\n# for word in it_model.vocab:\n# words.append(word)\n#\n# # Printing out number of tokens available\n# print(\"Number of Tokens: {}\".format(len(words)))\n# # Printing out the dimension of a word vector\n# print(\"Dimension of a word vector: {}\".format(len(it_model[words[0]])))\n# # Print out the vector of a word\n# embeddings_index = {}\n# for i in range(len(words)):\n# embeddings_index[words[i]] = it_model[words[i]]\n#\n# def save_obj(obj, path_and_filename):\n# with open(path_and_filename + '.pkl', 'wb+') as f:\n# pickle.dump(obj, f)\n#\n# save_obj(embeddings_index,'/Users/danielmlow/Dropbox/cnn/data/wiki.it/gensim_it_w2v')\n\n\ndef load_obj(path_and_filename):\n with open(path_and_filename, 'rb') as f:\n return pickle.load(f)\n\n\nembeddings_index = load_obj(config.word_embeddings_path+'/gensim_it_w2v.pkl') #dictionary embeddings_index.get('è') returns word embedding\n\n# from scipy import spatial\n# res = spatial.distance.cosine(embeddings_index.get('uno'), embeddings_index.get('dieci'))\n\n# embeddings_index = {}\n# # with open(os.path.join(config.word_embeddings_path,'GoogleNews-vectors-negative300.bin')) as f:\n# with open(os.path.join(config.word_embeddings_path,'glove.6B.'+str(embedding_dim)+'d.txt')) as f:\n# for line in f:\n# values = line.split()\n# word = values[0]\n# coefs = np.asarray(values[1:], dtype='float32')\n# embeddings_index[word] = coefs\n#\n# print('Found %s word vectors.' % len(embeddings_index))\n\n\nembedding_matrix = np.zeros((len(word_index) + 1, embedding_dim)) #this will be all embeddings for my vocabulary\n\nfor word, i in word_index.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n # words not found in embedding index will be all-zeros.\n embedding_matrix[i] = embedding_vector\n\n# # leverage our embedding_index dictionary and our word_index to compute our embedding matrix\n# # ============================================================================================================\n# embedding_matrix = np.zeros((len(word_index) + 1, embedding_dim))\n#\n# for word, i in word_index.items():\n# embedding_vector = embeddings_index.get(word)\n# if embedding_vector is not None:\n# # words not found in embedding index will be all-zeros.\n# embedding_matrix[i] = embedding_vector\n#\n\n\n\n\n\n\npath_to_dir = os.path.join(config.save_to, directory_name + '/')\ntry: os.makedirs(path_to_dir)\nexcept: pass\nprint('directory_name: '+directory_name)\nprint('path_to_dir: '+path_to_dir)\n\n\n\n# Model\n## ======================================================================================================\nprint(\"Creating Model...\")\n\ndef cnn(drop, batch_size, optimizer, num_filters, activation1, activation2, dense_1_neurons, dense_final_neurons, dense_2_layer):\n inputs = Input(shape=(sequence_length,), dtype='int32')\n embedding = Embedding(input_dim=len(word_index) + 1, weights=[embedding_matrix],output_dim=embedding_dim, input_length=sequence_length,trainable=True)(inputs)\n dropout_1 = Dropout(drop)(embedding)\n reshape1 = Reshape((sequence_length,embedding_dim,1))(dropout_1)\n conv_1 = Conv2D(num_filters, kernel_size=(filter_sizes[0], embedding_dim),use_bias=True, strides=1, padding='valid', activation=activation1, name='conv_1')(reshape1)\n maxpool_1 = MaxPool2D(pool_size=pool_size, strides=stride_size, padding=padding,name='pool_1')(conv_1)\n maxpool_1_reshape = Reshape( (int(maxpool_1.shape[1]),int(maxpool_1.shape[3]),1), name='pool_1_reshaped')(maxpool_1)\n dropout_2 = Dropout(drop)(maxpool_1_reshape)\n conv_2 = Conv2D(num_filters*2, kernel_size=(conv2_size, int(maxpool_1_reshape.shape[2])), padding='valid', kernel_initializer='normal', activation=activation1, name='conv_2')(dropout_2)\n maxpool_2 = MaxPool2D(pool_size=pool_size, strides=stride_size, padding='valid',name='pool_2')(conv_2)\n maxpool_2_reshape = Reshape((int(maxpool_2.shape[1]),int(maxpool_2.shape[3]),1),name='pool_2_reshaped')(maxpool_2)\n\n flatten = Flatten()(maxpool_2_reshape)\n dropout_3 = Dropout(drop)(flatten)\n dense_1 = Dense(units=dense_1_neurons, activation=activation2,name='dense_1')(dropout_3)\n dropout_4 = Dropout(drop)(dense_1)\n if dense_2_layer == 'yes':\n dense_2 = Dense(units=dense_1_neurons, activation=activation2, name='dense_2')(dropout_4)\n dropout_5 = Dropout(drop)(dense_2)\n dense_final = Dense(units=512, activation=activation2, name='dense_final')(dropout_5)\n else:\n dense_final = Dense(units=dense_final_neurons, activation=activation2, name='dense_final')(dropout_4)\n dropout_6 = Dropout(drop)(dense_final)\n softmax_final = Dense(units=len(categories), activation='softmax', name='softmax_final')(dropout_6)\n model = Model(inputs=inputs, outputs=softmax_final)\n model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])\n model.fit(Xtrain_encoded, Ytrain_encoded, batch_size=batch_size, epochs=epochs,\n verbose=verbose) # starts training\n return model\n\nwith open(path_to_dir + 'log.txt', 'a+') as f:\n f.write(file_name + '\\n')\n f.write(directory_name+ '\\n\\n')\n\n\n\n\n# dropout_rates = [0.1, 0.2, 0.3, 0.4, 0.5]\n# batch_sizes = [64,128, 256,512]\n# optimizers = ['RMSprop','Adam']\n# activations2 = ['elu','relu']\n# num_filters = 64\n# dense_1_neurons = 1024\n# dense_final_neurons = 64\n\nnum_filters_all = [16,32,64,128]\ndense_1_neurons_all= [2048,1024]\ndense_2_layer_all = ['yes', 'no']\ndense_final_neurons_all = [128]\ndrop = 0.3\nbatch_size=256\noptimizer = 'Adam'\nactivation1 = 'elu'\nactivation2 = 'elu'\n\n\n# l=[]\n# for drop in dropout_rates:\n# for batch_size in batch_sizes:\n# for optimizer in optimizers:\n# for activation2 in activations2:\n# l.append([drop, batch_size, optimizer,activation2])\n# l[]\nl=[]\nfor num_filters in num_filters_all:\n for dense_1_neurons in dense_1_neurons_all:\n for dense_final_neurons in dense_final_neurons_all:\n for dense_2_layer in dense_2_layer_all:\n l.append([num_filters, dense_1_neurons, dense_final_neurons, dense_2_layer])\n\n\ngs_numb = int(sys.argv[1])\ni = int(sys.argv[1])\nprint(i)\nfor parameters in l[gs_numb:gs_numb+6]:\n print(parameters)\n # drop, batch_size, optimizer, activation2 = parameters\n num_filters, dense_1_neurons, dense_final_neurons, dense_2_layer = parameters\n model = cnn(drop, batch_size, optimizer, num_filters, activation1, activation2, dense_1_neurons, dense_final_neurons, dense_2_layer)\n accuracy = model.evaluate(Xvalidation_encoded, Yvalidation_encoded,verbose=verbose)\n print(i)\n with open(path_to_dir + 'log.txt', 'a+') as f:\n f.write(str(i)+'=================\\n')\n f.write('Parameters: '+str(parameters)+'\\n')\n f.write('Loss and Accuracy: ' +str(accuracy)+'\\n')\n f.write(str(np.round(accuracy[1],4)) + '\\n\\n')\n i += 1\n\n\n\n\n\n# if save_checkpoints:\n# filepath = path_to_dir+\"weights-improvement-{epoch:02d}-{val_acc:.2f}.hdf5\" #https://machinelearningmastery.com/check-point-deep-learning-models-keras/\n# checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=verbose, save_best_only=True, mode='auto')\n#\n# print(\"Training Model...\")\n# if save_checkpoints:\n# history = model.fit(Xtrain_encoded, Ytrain_encoded, batch_size=batch_size, epochs=epochs, verbose=verbose, callbacks=[checkpoint]) # starts training\n# else:\n# history = model.fit(Xtrain_encoded,Ytrain_encoded, batch_size=batch_size, epochs=epochs, verbose=verbose) # starts training\n\n# outputs:\n# ============================================================================================================================\n# SAVE\n\n\n\n\n# model.save(path_to_dir + 'model.h5', overwrite=True) #Save model #TODO: not working nor checkpoint save model\n# model.save_weights(path_to_dir +\"model_weights.h5\", overwrite=True)\n# # plot_model(model, to_file=path_to_dir + 'model.png') # Save plot of model\n# np.save(path_to_dir + 'history_dict.npy', history.history) #Save history\n# plot_outputs.learning_curve(history.history, path_to_dir) #'loss' 'both'\n# accuracy = model.evaluate(Xvalidation_encoded,Yvalidation_encoded,verbose=verbose) # TODO:change to validation set for final model.\n# Ypredict = model.predict(Xvalidation_encoded, batch_size=batch_size, verbose=verbose) # TODO:change to validation set for final model.\n# Ypredict_encoded = np_utils.to_categorical(Ypredict.argmax(axis=-1))\n# Ypredict_integer = Ypredict.argmax(axis=-1)\n# np.save(path_to_dir+'Ypredict_integer',Ypredict_integer)\n# clas_rep = classification_report(Yvalidation_encoded, Ypredict_encoded,target_names=categories) # TODO:change to validation set for final model.\n# df_clas_rep, df_clas_rep_latex = plot_outputs.classification_report_df(clas_rep)\n# cm = confusion_matrix(y_true=Yvalidation_integer, y_pred=Ypredict_integer,sample_weight=None) # TODO:change to validation set for final model\n# pd.DataFrame(cm, columns=categories, index=categories).to_csv(path_to_dir+'cm.csv')\n# index_min = df_clas_rep['f1_score'].idxmin()\n# index_max = df_clas_rep['f1_score'].idxmax()\n#\n# a2 = conv_1.shape[:]\n# a2 = str(a2[1])+'*'+str(a2[3])+'='+str(a2[1]*a2[3])\n# a3 = maxpool_1.shape[1:3]\n# a3 = str(a3[0])+'*'+str(a3[1])+'='+str(a3[0]*a3[1])\n# a4 = conv_2.shape[:]\n# a4 = str(a4[1])+'*'+str(a4[3])+'='+str(a4[1]*a4[3])\n# a5 = maxpool_2.shape[1:3]\n# a5 = str(a5[0])+'*'+str(a5[1])+'='+str(a5[0]*a5[1])\n#\n# reporting = [len(categories),sequence_length,embedding.shape[1:],a2,a3,a4,a5,[''], dense_final_neurons,[''],activation, num_filters, filter_sizes,padding, pool_size, stride_size, drop, epochs, [''], df_clas_rep['f1_score'][len(categories)], [df_clas_rep['class'][index_min],df_clas_rep['f1_score'][index_min]], [df_clas_rep['class'][index_max],df_clas_rep['f1_score'][index_max]]]\n# # reporting = [len(categories),sequence_length,embedding.shape[1:], dense_final_neurons,[''],activation, num_filters, filter_sizes,padding, pool_size, stride_size, drop, epochs, [''], df_clas_rep['f1_score'][len(categories)], [df_clas_rep['class'][index_min],df_clas_rep['f1_score'][index_min]], [df_clas_rep['class'][index_max],df_clas_rep['f1_score'][index_max]]]\n#\n# with open(path_to_dir + 'log.txt', 'a+') as f:\n# f.write(file_name+'\\n')\n# f.write(directory_name)\n# f.write('\\n\\n')\n# model.summary(print_fn=lambda x: f.write(x + '\\n'))\n# f.write('\\n\\n')\n# for i, name in enumerate(model.metrics_names):\n# f.write(name+': '+str(np.round(accuracy[i], 2))+'\\n')\n# # f.write('accuracy: %f ' % (accuracy * 100))\n# # f.write('loss: %f ' % (loss))\n# f.write('\\n\\n')\n# f.write('Classification Report: \\n'+df_clas_rep_latex)\n# for i in reporting:\n# f.write(str(i)+'\\n')\n\n# # Save output_layers\n# # loaded_model = load_model(path_to_dir+'model.h5')\n# # model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy'])\n# conv_1 = get_output(model, 'conv_1', layer_2d_or_1d='2d', Xvalidation=Xvalidation_encoded)\n# # kmaxpool_1 = get_output(model, 'kmaxpool_1', layer_2d_or_1d='2d', Xvalidation=Xvalidation_encoded)\n# pool_1 = get_output(model, 'pool_1', layer_2d_or_1d='2d', Xvalidation=Xvalidation_encoded)\n# conv_2 = get_output(model, 'conv_2', layer_2d_or_1d='2d', Xvalidation=Xvalidation_encoded)\n# pool_2 = get_output(model, 'pool_2', layer_2d_or_1d='2d', Xvalidation=Xvalidation_encoded)\n# dense_1 = get_output(model, 'dense_1', layer_2d_or_1d='1d', Xvalidation=Xvalidation_encoded)\n# dense_final = get_output(model, 'dense_final', layer_2d_or_1d='1d', Xvalidation=Xvalidation_encoded)\n# softmax_final = get_output(model, 'softmax_final', layer_2d_or_1d='1d', Xvalidation=Xvalidation_encoded)\n#\n# # np.savez_compressed(path_to_dir+'output_layers.npz', a=conv_1, b=pool_1,c=dense_general,d=conv_2, e=pool_2, f=dense_final)\n# np.savez_compressed(path_to_dir+'output_layers.npz', a=conv_1, b=pool_1,c=conv_2, d=pool_2, e=dense_1, f=dense_final, g=softmax_final)\n# # np.savez_compressed(path_to_dir+'output_layers.npz', a=conv_1, b=kmaxpool_1,c=conv_2, d=pool_2, e=dense_1, f=dense_final)\n#\n# # import importlib\n# # importlib.reload(corr_between_layers)\n#\n# # Log.txt\n# models = [directory_name]\n# statistic = 'spearman'\n#\n# # layers= data_helpers.load_output_layers(path_to_dir)\n# layers = [conv_1, pool_1, conv_2, pool_2, dense_1, dense_final, softmax_final]\n#\n# zeros1 = []\n# zeros2 = []\n# zeros3 = []\n#\n# for layer in layers:\n# zero1 = (np.count_nonzero(layer.round(1) == 0)) / (layer.shape[0] * layer.shape[1])\n# zeros1.append(np.round(zero1,3))\n# zero2 = (np.count_nonzero(layer.round(2) == 0)) / (layer.shape[0] * layer.shape[1])\n# zeros2.append(np.round(zero2, 3))\n# zero3 = (np.count_nonzero(layer.round(3) == 0)) / (layer.shape[0] * layer.shape[1])\n# zeros3.append(np.round(zero3, 3))\n#\n# with open(path_to_dir + 'log.txt', 'a+') as f:\n# f.write('zeros round to 1 decimal: '+str(zeros1)+'\\n')\n# f.write('zeros round to 2 decimals: '+str(zeros2)+'\\n')\n# f.write('zeros round to 3 decimals: '+str(zeros3)+'\\n')\n#\n#\n# # corr_between_layers1 = corr_between_layers.corr_between_layers_method(statistic,models,layers, config.save_to,Xvalidation, categories, model_type=file_name)\n# # reporting.append(list(np.round(corr_between_layers1.iloc[-1, :-1], 2)))\n#\n#\n# # ===================================================================================================================\n# import random\n# # Generate random sequence so you always compare same sentences\n# amount_sent = int(len(Xvalidation)/len(categories))\n# # sentences_index = list(range(amount_sent))\n# # random.shuffle(sentences_index)\n# sentences_index = [767, 445, 551, 566, 224, 853, 836, 564, 575, 793, 730, 759, 82, 390, 110, 804, 615, 577, 815, 290, 190, 239, 255, 731, 851, 186, 799, 628, 940, 209, 580, 13, 533, 926, 125, 2, 285, 784, 130, 171, 181, 599, 500, 457, 432, 753, 847, 118, 795, 570, 729, 56, 769, 929, 701, 64, 387, 687, 144, 33, 325, 423, 828, 657, 166, 96, 175, 664, 671, 87, 9, 315, 204, 741, 57, 588, 245, 407, 433, 326, 902, 820, 916, 187, 416, 882, 357, 105, 693, 349, 520, 501, 676, 698, 737, 267, 910, 899, 327, 362, 320, 478, 309, 198, 346, 824, 734, 678, 529, 523, 240, 652, 884, 351, 356, 89, 462, 283, 466, 116, 287, 258, 833, 219, 591, 381, 74, 900, 915, 241, 37, 4, 430, 605, 498, 141, 220, 494, 376, 601, 849, 379, 688, 506, 644, 164, 864, 424, 414, 957, 422, 810, 686, 363, 92, 233, 102, 595, 717, 380, 579, 908, 126, 297, 319, 496, 228, 733, 52, 524, 377, 0, 221, 632, 199, 8, 642, 256, 858, 143, 119, 299, 821, 145, 448, 848, 611, 188, 817, 463, 403, 368, 140, 685, 700, 806, 68, 935, 567, 919, 619, 440, 742, 123, 232, 114, 608, 23, 76, 304, 903, 775, 932, 259, 493, 720, 948, 223, 251, 542, 340, 77, 237, 460, 665, 812, 360, 639, 640, 323, 749, 450, 578, 782, 208, 196, 863, 773, 797, 475, 625, 331, 288, 850, 61, 563, 798, 139, 527, 479, 316, 277, 525, 402, 12, 598, 722, 473, 149, 574, 101, 162, 860, 586, 172, 715, 941, 272, 616, 662, 270, 62, 585, 375, 373, 807, 339, 877, 46, 173, 284, 526, 163, 825, 248, 594, 857, 866, 622, 808, 868, 497, 572, 231, 88, 249, 117, 67, 518, 881, 573, 788, 385, 453, 956, 435, 874, 544, 438, 839, 593, 311, 505, 394, 568, 545, 943, 904, 781, 613, 442, 41, 246, 648, 179, 276, 292, 397, 787, 689, 834, 182, 38, 42, 274, 582, 345, 94, 928, 597, 242, 732, 111, 511, 534, 716, 726, 624, 324, 337, 875, 756, 571, 160, 294, 289, 951, 441, 65, 831, 467, 286, 206, 790, 225, 418, 404, 413, 282, 446, 508, 661, 39, 569, 250, 663, 18, 637, 485, 837, 539, 461, 895, 213, 439, 675, 543, 630, 658, 487, 855, 491, 476, 822, 838, 587, 378, 561, 924, 28, 865, 44, 428, 826, 7, 727, 844, 384, 469, 654, 60, 829, 348, 763, 152, 672, 695, 449, 230, 764, 128, 938, 129, 301, 452, 426, 709, 32, 590, 159, 631, 918, 192, 528, 34, 897, 468, 612, 19, 40, 547, 516, 482, 408, 194, 382, 623, 330, 106, 419, 168, 703, 207, 437, 724, 465, 431, 776, 271, 536, 75, 170, 934, 670, 744, 275, 85, 15, 358, 841, 81, 515, 254, 553, 667, 842, 679, 750, 71, 765, 43, 655, 712, 805, 819, 761, 811, 191, 614, 310, 937, 36, 723, 950, 21, 883, 161, 747, 169, 321, 263, 214, 336, 714, 830, 659, 617, 552, 51, 954, 406, 10, 234, 474, 183, 146, 405, 603, 6, 484, 581, 458, 873, 604, 522, 770, 328, 636, 861, 156, 548, 707, 499, 746, 813, 354, 257, 222, 11, 393, 708, 540, 684, 79, 618, 401, 133, 713, 803, 789, 22, 876, 489, 927, 153, 682, 268, 70, 791, 920, 556, 681, 303, 786, 513, 921, 792, 706, 913, 507, 852, 260, 280, 54, 645, 412, 420, 100, 361, 53, 415, 115, 308, 127, 514, 886, 565, 471, 896, 892, 800, 859, 740, 490, 939, 584, 945, 226, 592, 949, 823, 26, 550, 338, 131, 602, 626, 350, 878, 854, 953, 470, 495, 748, 777, 367, 210, 683, 641, 562, 158, 492, 55, 486, 344, 421, 802, 785, 372, 136, 107, 73, 690, 151, 455, 725, 669, 879, 643, 780, 90, 366, 719, 621, 923, 84, 147, 371, 177, 306, 692, 589, 610, 867, 779, 743, 383, 634, 893, 31, 554, 137, 503, 649, 718, 174, 193, 391, 5, 334, 755, 898, 946, 197, 298, 216, 176, 739, 370, 930, 472, 889, 751, 609, 78, 291, 558, 124, 318, 754, 48, 521, 512, 705, 809, 157, 396, 653, 203, 447, 300, 178, 801, 880, 451, 132, 47, 353, 925, 399, 417, 646, 436, 342, 794, 890, 481, 333, 738, 796, 165, 955, 244, 647, 952, 409, 905, 721, 80, 771, 121, 313, 189, 517, 870, 596, 103, 329, 185, 425, 656, 3, 359, 560, 894, 519, 374, 99, 541, 480, 355, 269, 1, 135, 783, 205, 235, 843, 835, 58, 25, 247, 530, 108, 236, 215, 252, 410, 386, 633, 17, 50, 483, 958, 557, 936, 607, 832, 914, 389, 93, 760, 818, 735, 150, 872, 218, 906, 211, 459, 650, 942, 752, 546, 532, 454, 699, 762, 278, 745, 72, 138, 279, 704, 14, 109, 364, 766, 200, 395, 69, 302, 535, 758, 83, 184, 907, 846, 917, 862, 365, 434, 660, 97, 728, 95, 845, 606, 901, 668, 576, 531, 774, 217, 265, 314, 477, 195, 600, 909, 305, 444, 694, 322, 227, 86, 559, 549, 29, 398, 510, 91, 266, 45, 167, 814, 201, 295, 154, 388, 509, 347, 816, 651, 538, 148, 691, 180, 261, 343, 638, 155, 400, 122, 456, 891, 49, 443, 293, 711, 736, 944, 464, 933, 772, 113, 24, 212, 59, 243, 332, 666, 710, 312, 768, 66, 273, 238, 856, 696, 296, 335, 427, 134, 635, 778, 63, 502, 317, 112, 35, 98, 827, 307, 20, 429, 697, 947, 488, 341, 702, 680, 264, 871, 620, 911, 352, 583, 202, 757, 262, 885, 673, 504, 887, 677, 411, 16, 912, 30, 922, 392, 369, 27, 229, 120, 281, 840, 142, 537, 555, 931, 629, 627, 959, 253, 888, 869, 104, 674]\n#\n# def corr_layers(n=None, layers=None, layer_names=None, statistic='spearman'):\n# corr_matrices = []\n# a = np.zeros((len(layers),len(layers)))\n# corr_between_layers = pd.DataFrame(a, columns=layer_names, index=layer_names)\n# # obtain N sentences from each category\n# for layer in layers:\n# randomN_sentences = pd.DataFrame()\n# Nsentences = sentences_index[:n]\n# for i in range(len(categories)):\n# category_start_number = i*amount_sent\n# Nsentences_1category= [n+category_start_number for n in Nsentences]\n# randomN_sentences_1category = layer.iloc[Nsentences_1category, :]\n# randomN_sentences = pd.concat([pd.DataFrame(randomN_sentences), randomN_sentences_1category],axis=0)\n# corr_matrix_lX = randomN_sentences.T.corr(method=statistic)\n# corr_matrix_lX_triu = pd.DataFrame(np.triu(corr_matrix_lX, k=1)).replace(0, np.nan)\n# corr_matrices.append(corr_matrix_lX_triu)\n# for j in range(0,len(layers)):\n# for i in range(0,len(layers)):\n# res = corr_matrices[j].corrwith(corr_matrices[i]).mean() #TODO: Pearson. correlation between layers. # res = corr_matrices[j].apply(lambda col: col.corr(corr_matrices[i], method=statistic), axis=0)\n# corr_between_layers.iloc[j,i] = res\n# return corr_between_layers\n#\n# statistic='spearman'\n# sentences = 40\n# # layer_names = ['lstm1', 'dense1', 'dense2', 'dense_final', 'softmax_final']\n# layer_names = ['conv_1', 'pool_1', 'conv_2', 'pool_2', 'dense_1', 'dense_final', 'softmax_final']\n# layers = [conv_1, pool_1, conv_2, pool_2, dense_1, dense_final, softmax_final]\n# corr_between_layers1 = corr_layers(n=sentences, layers=layers, layer_names=layer_names)\n# random.shuffle(sentences_index)\n# corr_between_layers2 = corr_layers(n=sentences, layers=layers, layer_names=layer_names)\n#\n# with open(path_to_dir + 'log.txt', 'a+') as f:\n# f.write('\\nCorrelation between layers' +str(sentences)+ 'sentences (spearman)\\n')\n# f.write(str(corr_between_layers1.round(2)) + '\\n')\n# f.write('\\nCorrelation between layers' +str(sentences)+ 'different sentences (spearman)\\n')\n# f.write(str(corr_between_layers2.round(2)) + '\\n')\n# # f.write('\\nCorrelation between layers (kendall)\\n')\n# # f.write(str(corr_between_layers2.round(2)) + '\\n')\n# # f.write('\\nCorrelation between layers (kendall)\\n')\n# # f.write(str(corr_between_layers2.round(2)) + '\\n')\n#\n# corr_between_layers0 = pd.concat([corr_between_layers1.round(2), corr_between_layers2.round(2)])\n# corr_between_layers0.to_csv(path_to_dir+'corr_between_layers0.csv', index=True, header=True)\n#\n#\n#\n#\n# # path_to_dir = '/Users/danielmlow/Dropbox/cnn/thesis/runs_cluster/cnn10/'\n# # RSA_arr_dense_final = np.load(path_to_dir+'RSA_arr_dense_final.npy')\n# # layer_name='dense_finalb'\n# #\n# # from scipy.spatial import distance\n# # from scipy.cluster import hierarchy\n# # import seaborn as sns\n# # # correlations = df.corr()\n# # # correlations_array = np.asarray(df.corr())\n# #\n# # correlations = pd.DataFrame(RSA_arr_dense_final, columns=categories, index=categories)\n# # correlations_array = RSA_arr_dense_final[:]\n# #\n# # row_linkage = hierarchy.linkage(\n# # distance.pdist(correlations_array, metric='euclidean'), method='ward', optimal_ordering=True)\n# #\n# # col_linkage = hierarchy.linkage(\n# # distance.pdist(correlations_array.T, metric='euclidean'), method='ward', optimal_ordering=True)\n# # #\n# # # sns.clustermap(correlations, row_linkage=row_linkage, col_linkage=col_linkage, row_colors=network_colors, method=\"average\",\n# # # col_colors=network_colors, figsize=(13, 13), cmap=cmap)\n# # sns.set(font_scale=0.5)\n# # cg = sns.clustermap(correlations, row_linkage=row_linkage, col_linkage=col_linkage,cmap=\"RdBu_r\", vmin = -0.8, vmax=0.8, cbar_kws={\"ticks\":[-0.8,-0.4,0.0, 0.4, 0.8]})\n# # plt.setp(cg.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)\n# # plt.setp(cg.ax_heatmap.xaxis.get_majorticklabels(), rotation=90)\n# # cg.savefig(path_to_dir + 'RSA_ward_'+ layer_name + '.eps', format='eps', dpi=100)\n# #\n# # rsa.plot_rsm(path_to_dir, RSA_arr_dense_final, categories, layer_name='dense_finalb')\n#\n# conv_1_shape = sequence_length - filter_sizes[0] + 1\n# pool_1_shape = int(conv_1_shape/stride_size[0])\n# #\n# # # TODO: choose short and long sentence from new dataset.\n# #\n# # for i in range(len(Xvalidation)):\n# # length = len(Xvalidation[i].split())\n# # if length == 20:\n# # print(i)\n# # break\n#\n# # 51 has 10 words, 10 has 20 words\n# for sentence_id in [51]:\n# for layer, layer_name in zip(layers[:2], layer_names[:2]):\n# plt.clf()\n# sentence_length = len(Xvalidation[sentence_id].split())\n# sentence = np.array(layer.T[sentence_id])\n# sns.set(font_scale=0.5)\n# if layer_name =='conv_1':\n# reshaped = np.reshape(sentence, [conv_1_shape, num_filters])\n# elif layer_name =='pool_1':\n# reshaped = np.reshape(sentence, [pool_1_shape, num_filters])\n# elif layer_name =='conv_2':\n# reshaped = np.reshape(sentence, [pool_1_shape-2, num_filters*2])\n# elif layer_name =='pool_2':\n# reshaped = np.reshape(sentence, [int((pool_1_shape-2)/2), num_filters*2])\n# else:\n# reshaped = np.reshape(sentence, [1,layer.shape[1]])\n# cg = sns.heatmap(reshaped, cmap=\"RdBu_r\", vmin=-1., vmax=1.,\n# cbar_kws={\"ticks\": [-1., -0.5, 0.0, 0.5, 1.0]})\n# # cg = sns.heatmap(reshaped, cmap=\"RdBu_r\")\n# # cg = sns.heatmap(reshaped, cmap=\"Reds\")\n# plt.xticks(rotation=90)\n# plt.ylabel('Words')\n# plt.yticks(rotation=0)\n# plt.xlabel('Filters/Channels')\n# plt.title(layer_name+' - Single Sentence \\n')\n# plt.savefig(path_to_dir+'single_sent_heatmap_'+layer_name+'_'+str(sentence_length)+'words.png')\n#\n# # TODO: careful when using layer[-1] because I want to use dense_final[-2] not softmax_final [-1]\n# df_prototypical, df_prototypical_score, df_prototypical_sentences = corr_between_layers.prototypical_sentences(statistic, Xvalidation,Xvalidation_raw, path_to_dir, layer=layers[-2], validation_size= len(Xvalidation), amount_sent=int(len(Xvalidation)/len(categories)),nan='', categories=categories)\n#\n# if plot_RSA:\n# # rsa.plot_RSA(path_to_dir, categories, layer=conv_1, layer_name='conv_1',\n# # amount_sent=int(conv_1.shape[0] / len(categories)))\n# # rsa.plot_RSA(path_to_dir, categories, layer=pool_1, layer_name='pool_1',\n# # amount_sent=int(conv_1.shape[0] / len(categories)))\n# # rsa.plot_RSA(path_to_dir, categories, layer=conv_2, layer_name='conv_2', amount_sent=int(conv_1.shape[0]/len(categories)))\n# # rsa.plot_RSA(path_to_dir, categories, layer=pool_2, layer_name='pool_2',\n# # amount_sent=int(conv_1.shape[0] / len(categories)))\n# rsa.plot_RSA(path_to_dir, categories, layer=dense_1, layer_name='dense_1', amount_sent=int(conv_1.shape[0]/len(categories)))\n# rsa.plot_RSA(path_to_dir, categories, layer=dense_final, layer_name='dense_final',\n# amount_sent=int(len(Xvalidation) / len(categories)))\n# rsa.plot_RSA(path_to_dir, categories, layer=softmax_final, layer_name='softmax_final',\n# amount_sent=int(len(Xvalidation)/ len(categories)))\n# # rsa.plot_RSA(path_to_dir, categories, layer=dense_general, layer_name='dense_general',amount_sent=int(conv_1.shape[0] / len(categories)))\n# # rsa.plot_RSA(path_to_dir, categories, layer=softmax_final_sigmoid, layer_name='softmax_final_sigmoig')\n# # rsa.plot_RSA(path_to_dir, categories, layer=softmax_final_elu, layer_name='softmax_final_elu')\n#\n#\n# # def Nsentences_a_k_length(n,a, k, layer, amount_sent_per_category, Xvalidation):\n# # layer_sample = pd.DataFrame()\n# # for i in range(0,layer.shape[0], amount_sent_per_category):\n# # n_sentences_same_length = pd.DataFrame()\n# # sentences = 0\n# # # loop through sentences in categories\n# # for j in range(i,i+amount_sent_per_category):\n# # if (len(Xvalidation[j].split())>=a & len(Xvalidation[j].split())<=k):\n# # n_sentences_same_length = n_sentences_same_length.append(layer.T[j])\n# # sentences+=1\n# # if sentences == n:\n# # print('a category does not have enough')\n# # break\n# # if n_sentences_same_length.shape[0] < n:\n# # print('not enough sentences of that length. Try again.')\n# # break\n# # else:\n# # layer_sample = layer_sample.append(n_sentences_same_length)\n# # return layer_sample\n#\n#\n# def Nsentences(n, layer, amount_sent_per_category):\n# layer_sample = pd.DataFrame()\n# for i in range(0,layer.shape[0], amount_sent_per_category):\n# n_sentences_1category = layer.iloc[i:i+amount_sent_per_category]\n# n_sentences_1category = n_sentences_1category.sample(frac=1).iloc[:n]\n# layer_sample = layer_sample.append(n_sentences_1category)\n# return layer_sample\n#\n# # RSA single sentences\n# # =====================================================================================================================================\n# amount_sent_per_category = int(len(Xvalidation)/len(categories))\n# n = 5\n#\n# layer_names = ['conv_1', 'pool_1', 'conv_2', 'pool_2','dense_1' ,'dense_final', 'softmax_final']\n# for layer, layer_name in zip(layers, layer_names):\n# # layer_sample = Nsentences_a_k_length(n, 10, 14, layer, amount_sent_per_category, Xvalidation)\n# layer_sample = Nsentences(n, layer, amount_sent_per_category)\n# # Clustermap\n# df = pd.DataFrame(layer_sample)\n# # df[(df >= -0.08) & (df <= 0.09)] = np.nan\n# statistic = 'spearman'\n# df = df.T.corr(method=statistic)\n# columns = [[i] * n for i in categories] # TODO: categories or categories_wrong.\n# # columns = [[i]*n for i in config.categories_wrong]\n# columns = [i for j in columns for i in j]\n# df.columns = columns\n# df.index = columns\n# sns.set(font_scale=0.08)\n# cg = sns.clustermap(df, method='ward', cmap=\"RdBu_r\", vmin=-1., vmax=1.,cbar_kws={\"ticks\": [-1., -0.5, 0.0, 0.5, 1.0]})\n# plt.setp(cg.ax_heatmap.yaxis.get_majorticklabels(), rotation=0, )\n# plt.setp(cg.ax_heatmap.xaxis.get_majorticklabels(), rotation=90)\n# # tick_locator = ticker.MaxNLocator(int(df.shape[0]))\n# # plt.setp(cg.ax_heatmap.xaxis.set_major_locator(tick_locator))\n# # plt.setp(cg.ax_heatmap.yaxis.set_major_locator(tick_locator))\n# cg.savefig(path_to_dir+ '/RSA_ward_clustermap_' + layer_name + '_single_sentences_'+statistic+'.eps', format='eps', dpi=100)\n#\n# # With NaNs just first two layers\n# for layer, layer_name in zip(layers[:2], layer_names[:2]):\n# # layer_sample = Nsentences_a_k_length(n, 10, 14, layer, amount_sent_per_category, Xvalidation)\n# layer_sample = Nsentences(n, layer, amount_sent_per_category)\n# # Clustermap\n# df = pd.DataFrame(layer_sample)\n# df[(df >= -0.09) & (df <= 0.09)] = np.nan\n# statistic = 'spearman'\n# df = df.T.corr(method=statistic)\n# columns = [[i] * n for i in categories] # TODO: categories or categories_wrong.\n# # columns = [[i]*n for i in config.categories_wrong]\n# columns = [i for j in columns for i in j]\n# df.columns = columns\n# df.index = columns\n# sns.set(font_scale=0.08)\n# try:\n# cg = sns.clustermap(df, method='ward', cmap=\"RdBu_r\", vmin=-1., vmax=1.,\n# cbar_kws={\"ticks\": [-1., -0.5, 0.0, 0.5, 1.0]})\n# plt.setp(cg.ax_heatmap.yaxis.get_majorticklabels(), rotation=0, )\n# plt.setp(cg.ax_heatmap.xaxis.get_majorticklabels(), rotation=90)\n# # tick_locator = ticker.MaxNLocator(int(df.shape[0]))\n# # plt.setp(cg.ax_heatmap.xaxis.set_major_locator(tick_locator))\n# # plt.setp(cg.ax_heatmap.yaxis.set_major_locator(tick_locator))\n# cg.savefig(path_to_dir+ '/RSA_ward_clustermap_' + layer_name + '_single_sentences_'+statistic+'_with_NaNs.eps', format='eps', dpi=100)\n# except:\n# pass\n#\n#\n# layer_names = ['conv_1_nans', 'pool_1_nans', 'conv_2_nans', 'pool_2_nans', 'dense_final_nans']\n# layers_nans=[]\n# for layer, layer_name in zip(layers, layer_names):\n# df = pd.DataFrame(layer)\n# df[(df>=-0.09) & (df<=0.09)] = np.nan\n# layers_nans.append(df)\n# with open(path_to_dir + 'log.txt', 'a+') as f:\n# f.write(layer_name+': amount of nans: ')\n# f.write(str(df.isnull().sum().sum())+ ' ')\n# f.write('rel. amount: '+str((df.isnull().sum().sum()/(df.shape[0]*df.shape[1])).round(2)))\n# f.write('\\n')\n# # rsa.plot_RSA(path_to_dir, categories, layer=df, layer_name=layer_name, amount_sent=int(layer.shape[0]/len(categories)))\n#\n# #\n# # corr_between_layers1 = corr_between_layers.corr_between_layers_method(statistic,models,layers_nans,config.save_to,Xvalidation, categories,nan='with_nans_', model_type=file_name)\n# #\n#\n# # corr_between_layers1 = corr_layers(n_random_sentences, df_prototypical,layers, df_prototypical_sentences, layer_names, statistic, categories=categories)\n#\n# def Nsentences_Klength(n, sentence_length, layer, amount_sent_per_category, Xvalidation):\n# layer_sample = pd.DataFrame()\n# for i in range(0,layer.shape[0], amount_sent_per_category):\n# n_sentences_same_length = pd.DataFrame()\n# sentences = 0\n# # loop through sentences in categories\n# for j in range(i,i+amount_sent_per_category):\n# if len(Xvalidation[j].split())==sentence_length:\n# n_sentences_same_length = n_sentences_same_length.append(layer.T[j])\n# sentences+=1\n# if sentences == n:\n# break\n# if n_sentences_same_length.shape[0] < n:\n# print('not enough sentences of that length. Try again.')\n# break\n# else:\n# layer_sample = layer_sample.append(n_sentences_same_length)\n# return layer_sample\n#\n# for layer, layer_name in zip(layers[:2], layer_names[:2]):\n# amount_sent_per_category = int(len(Xvalidation)/len(categories))\n# n = 5\n# k = [8,28]\n# layer_sample_small = Nsentences_Klength(n, k[0], layer, amount_sent_per_category, Xvalidation)\n# layer_sample_large = Nsentences_Klength(n, k[1], layer, amount_sent_per_category, Xvalidation)\n# statistic = 'spearman'\n# sentences_from_each = len(categories)*n\n# layer_sample = pd.concat([layer_sample_small, layer_sample_large])\n# df = pd.DataFrame(layer_sample)\n# df = df.T.corr(method=statistic) #TODO: should I transpose like this?\n# df.index= range(0,sentences_from_each*2)\n# df.columns= range(0,sentences_from_each*2)\n# df_triu = pd.DataFrame(np.triu(df, k=1)).replace(0, np.nan)\n#\n# zeros = pd.DataFrame(np.ones([sentences_from_each,sentences_from_each]))\n# a15 = pd.DataFrame(np.full([sentences_from_each, sentences_from_each], 20))\n# sentence_len1 = pd.concat([zeros, a15])\n# sentence_len2 = pd.concat([a15, zeros])\n# sentence_len = pd.concat([sentence_len1 ,sentence_len2], axis=1)\n# sentence_len.index=range(sentences_from_each*2)\n# sentence_len.columns=range(sentences_from_each*2)\n# sentence_len_triu= pd.DataFrame(np.triu(sentence_len, k=0)).replace(0, np.nan)\n# sentence_len_triu = sentence_len_triu.replace(1,0)\n# # zeros = pd.DataFrame(np.zeros([3,3]))\n# # a15 = pd.DataFrame(np.full([3, 3], 15 ))\n# # sentence_len.to_csv(output_dir+'sentence_len_distance_matrix.csv', header=False, index=False)\n# # sentence_len_triu.to_csv(output_dir+'sentence_len_distance_matrix.csv', header=False, index=False)\n#\n# with open(path_to_dir + 'log.txt', 'a+') as f:\n# f.write('Sentence_len_triu_dist effect '+layer_name+' '+str(k)+': \\n')\n# f.write(str(sentence_len_triu.corrwith(df_triu).mean())+'\\n')\n# f.write(str(categories)+'\\n')\n#\n#\n#\n\n" ]
[ [ "matplotlib.pyplot.switch_backend", "tensorflow.set_random_seed", "numpy.array", "numpy.random.seed", "pandas.DataFrame", "numpy.round" ] ]
paulkogni/backpack
[ "3122de062d5bbcdcba8f8e02d24adb1bd2cdada6" ]
[ "examples/example_indiv_grads.py" ]
[ "\"\"\"Compute the gradient with PyTorch and the variance with BackPACK.\"\"\"\n\nfrom torch.nn import CrossEntropyLoss, Flatten, Linear, Sequential\n\nfrom backpack import backpack, extend, extensions\nfrom backpack.utils.examples import load_mnist_data\n\nB = 4\nX, y = load_mnist_data(B)\n\nprint(\"# Gradient with PyTorch, individual gradients with BackPACK | B =\", B)\n\nmodel = Sequential(Flatten(), Linear(784, 10),)\nlossfunc = CrossEntropyLoss()\n\nmodel = extend(model)\nlossfunc = extend(lossfunc)\n\nloss = lossfunc(model(X), y)\n\nwith backpack(extensions.BatchGrad()):\n loss.backward()\n\nfor name, param in model.named_parameters():\n print(name)\n print(\".grad.shape: \", param.grad.shape)\n print(\".grad_batch.shape: \", param.grad_batch.shape)\n" ]
[ [ "torch.nn.Linear", "torch.nn.CrossEntropyLoss", "torch.nn.Flatten" ] ]
MarouaJaoua/cells-nuclei-segmentation
[ "09d65db104a7297ec6f4c975b668bb7ca93c7372", "09d65db104a7297ec6f4c975b668bb7ca93c7372" ]
[ "source/model/layers/fusion_net_layers.py", "source/train/train.py" ]
[ "\"\"\"Source: https://github.com/marshuang80/cell-segmentation\"\"\"\nimport torch.nn as nn\n\n\nclass ConvLayer(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size,\n padding=1, stride=1, name=None):\n super(ConvLayer, self).__init__()\n\n block = []\n block.append(nn.Conv2d(in_channels, out_channels, kernel_size,\n padding=padding, stride=stride))\n block.append(nn.ReLU())\n block.append(nn.BatchNorm2d(out_channels))\n\n self.conv_layer = nn.Sequential(*block)\n\n def forward(self, x):\n output = self.conv_layer(x)\n\n return output\n\n\nclass ResidualBlock(nn.Module):\n\n def __init__(self, channels, kernel_size, name=None):\n super(ResidualBlock, self).__init__()\n block = [ConvLayer(channels, channels, kernel_size) for _ in range(3)]\n self.residual_block = nn.Sequential(*block)\n\n def forward(self, x):\n output = self.residual_block(x)\n\n output += x\n\n return output\n\n\nclass DownSampling(nn.Module):\n\n def __init__(self, channels, kernel_size, name=None):\n super(DownSampling, self).__init__()\n\n self.conv = ConvLayer(channels, channels, kernel_size)\n self.max_pool = nn.MaxPool2d(kernel_size=2, stride=2)\n\n def forward(self, x):\n conv_out = self.conv(x)\n output = self.max_pool(conv_out)\n\n return output, conv_out\n\n\nclass UpSampling(nn.Module):\n\n def __init__(self, in_channels, out_channels, kernel_size, name=None):\n super(UpSampling, self).__init__()\n\n self.conv = ConvLayer(in_channels, out_channels, kernel_size)\n self.conv_t = nn.ConvTranspose2d(out_channels, out_channels,\n kernel_size, padding=1, stride=2,\n output_padding=1)\n\n def forward(self, x, skip):\n conv_out = self.conv(x)\n output = self.conv_t(conv_out)\n\n output += skip\n\n return output\n", "import os\nimport time\nimport torch\nimport numpy as np\nimport torch.nn as nn\nimport cv2\nfrom torchinfo import summary\nfrom torch.utils.data import DataLoader\nimport source.logger as logger\nfrom source.model import FusionNet, UNet\nfrom source.dataset.dataset import NucleiCellDataset\nimport source.utils as utils\nimport source.arguments as arguments\n\n\ndef main(m_args):\n # Name model\n model_name = utils.get_model_name(m_args)\n\n # Tensorboard\n logger_tb = logger.Logger(log_dir=model_name)\n\n # Get dataset\n train_dataset = NucleiCellDataset(m_args.train_data,\n phase=\"train\",\n transform=m_args.transform,\n image_size=m_args.image_size)\n validation_dataset = NucleiCellDataset(m_args.train_data,\n phase=\"validation\",\n transform=m_args.transform,\n image_size=m_args.image_size)\n\n # Create dataloader\n train_dataloader = DataLoader(train_dataset,\n batch_size=m_args.batch_size,\n shuffle=True,\n num_workers=m_args.num_workers,\n pin_memory=True)\n\n val_dataloader = DataLoader(validation_dataset,\n batch_size=m_args.batch_size,\n shuffle=False,\n num_workers=m_args.num_workers,\n pin_memory=True)\n\n # Device\n device = torch.device(\"cuda:\" + m_args.gpu_ids) \\\n if torch.cuda.is_available() else \"cpu\"\n\n # Model\n if m_args.model == \"fusion\":\n model = FusionNet(m_args, train_dataset.dim)\n else:\n model = UNet(m_args.num_kernel, m_args.kernel_size, train_dataset.dim,\n train_dataset.target_dim)\n\n summary(model)\n print(list(model.parameters())[0].shape)\n print(\"total number of training examples\", str(len(train_dataset)))\n print(\"total number of validation examples\", str(len(validation_dataset)))\n print(\"length of train data loader\", str(len(train_dataloader)))\n print(\"length of validation data loader\", str(len(val_dataloader)))\n model = model.to(device)\n dataiter = iter(train_dataloader)\n imgs, _, _ = dataiter.next()\n imgs = imgs.float().to(device)\n print(imgs.shape)\n # logger_tb.update_graph(model, imgs)\n\n # Optimizer\n parameters = model.parameters()\n if m_args.optimizer == \"adam\":\n optimizer = torch.optim.Adam(parameters, m_args.lr)\n else:\n optimizer = torch.optim.SGD(parameters, m_args.lr)\n\n # Loss\n criterion = nn.CrossEntropyLoss()\n criterion = criterion.to(device)\n\n count = 0\n try:\n cp_p = os.path.join(\"output/\", m_args.experiment_name,\n model_name + \".pth.tar\")\n count = utils.load_checkpoint(torch.load(cp_p), model, optimizer)\n print(\"Train from a previous checkpoint...\")\n except FileNotFoundError:\n print(\"No checkpoint found, start training from step 0...\")\n pass\n # Train model\n model.train()\n best_valid_loss = float(\"Inf\")\n total_time_min, total_time_sec = 0.0, 0.0\n global_steps_list = []\n for epoch in range(m_args.epoch):\n start_time = time.time()\n total_loss = []\n for i, (x_train, y_nuclei, y_cell) in enumerate(train_dataloader):\n\n optimizer.zero_grad()\n\n if m_args.target_type == \"nuclei\":\n y_train = y_nuclei\n else:\n y_train = y_cell\n\n # Send data and label to device\n x = x_train.to(device)\n # Input should be between 0 and 1\n x = torch.div(x, 255)\n\n y = y_train.to(device)\n\n # Predict segmentation\n pred = model(x).squeeze(1)\n\n # Calculate loss\n loss = criterion(pred, y.long())\n total_loss.append(loss.item())\n\n # Get the class with the highest probability\n _, pred = torch.max(pred, dim=1)\n\n # Back prop\n loss.backward()\n optimizer.step()\n\n # Log loss, dice and iou\n avg_loss = np.mean(total_loss)\n count += 1\n logger_tb.update_value(\"steps vs train loss\", avg_loss, count)\n global_steps_list.append(count)\n\n # Display segmentation on tensorboard\n if i == 0:\n original = x_train[i].detach().cpu().numpy()\n truth = y[i].squeeze().detach().cpu().numpy()\n seg = pred[i].squeeze().detach().cpu().numpy()\n\n logger_tb.update_image(\"original\", original, count)\n\n seg = cv2.normalize(seg, None, alpha=0, beta=255,\n norm_type=cv2.NORM_MINMAX)\n seg = np.expand_dims(seg, axis=0)\n seg = seg.astype(np.uint8)\n logger_tb.update_image(\"segmentation\", seg, count)\n\n truth = cv2.normalize(truth, None, alpha=0, beta=255,\n norm_type=cv2.NORM_MINMAX)\n truth = np.expand_dims(truth, axis=0)\n truth = truth.astype(np.uint8)\n logger_tb.update_image(\"truth\", truth, count)\n\n if count % 5 == 0:\n avg_loss_val, dice, valid_loss = validate(m_args, criterion,\n device, model,\n val_dataloader)\n print(\"Epoch [{}/{}], Step [{}/{}] || \"\n \"Train Loss: {:.4f}, Valid Loss: {:.4f}\"\n .format(epoch + 1, m_args.epoch, count,\n m_args.epoch * len(train_dataloader),\n avg_loss, avg_loss_val))\n logger_tb.update_value(\"steps vs validation loss\",\n avg_loss_val,\n count)\n logger_tb.update_value(\"steps vs validation dice\",\n dice,\n count)\n\n if best_valid_loss > avg_loss_val:\n best_valid_loss = avg_loss_val\n utils.create_checkpoint(model_name,\n count,\n global_steps_list,\n model,\n optimizer,\n total_loss,\n valid_loss,\n m_args.experiment_name)\n model.train()\n\n ep_loss_val, epoch_dice, epoch_val_loss = validate(m_args,\n criterion,\n device,\n model,\n val_dataloader)\n end_time = time.time()\n epoch_mins, epoch_secs = utils.epoch_time(start_time, end_time)\n total_time_min += epoch_mins\n total_time_sec += epoch_secs\n\n logger_tb.update_value(\"epoch vs validation loss\", ep_loss_val, epoch)\n logger_tb.update_value(\"epoch vs validation dice\", epoch_dice, epoch)\n logger_tb.update_value(\"epoch vs time\", total_time_min, epoch)\n logger_tb.update_value(\"steps vs time\", total_time_min, count)\n\n\ndef validate(v_args, criterion, device, model, validation_dataloader):\n model.eval()\n valid_loss = []\n intersections, totals = 0, 0\n with torch.no_grad():\n for i_val, (x_val, y_nuclei_val, y_cell_val) in enumerate(\n validation_dataloader):\n if v_args.target_type == \"nuclei\":\n y_train = y_nuclei_val\n else:\n y_train = y_cell_val\n\n # Send data and label to device\n x = x_val.to(device)\n # Input should be between 0 and 1\n x = torch.div(x, 255)\n y = y_train.to(device)\n\n # Predict segmentation\n pred = model(x).squeeze(1)\n\n # Calculate loss\n loss = criterion(pred, y.long())\n\n # Get the class with the highest probability\n _, pred = torch.max(pred, dim=1)\n\n inputs = pred.view(-1)\n targets = y.view(-1)\n intersection = (inputs * targets).sum()\n total = inputs.sum() + targets.sum()\n\n # intersection is equivalent to True Positive count\n intersections += intersection\n # union is the mutually inclusive area of all labels & predictions\n totals += total\n valid_loss.append(loss.item())\n dice = (2. * intersections) / totals\n avg_loss_val = np.mean(valid_loss)\n return avg_loss_val, dice, valid_loss\n\n\nif __name__ == \"__main__\":\n args = arguments.get_arguments()\n main(args)\n" ]
[ [ "torch.nn.MaxPool2d", "torch.nn.Sequential", "torch.nn.BatchNorm2d", "torch.nn.ConvTranspose2d", "torch.nn.ReLU", "torch.nn.Conv2d" ], [ "torch.device", "torch.max", "torch.no_grad", "torch.optim.Adam", "torch.optim.SGD", "numpy.mean", "torch.cuda.is_available", "torch.utils.data.DataLoader", "torch.load", "torch.div", "torch.nn.CrossEntropyLoss", "numpy.expand_dims" ] ]
carterjgreen/undergrad-thesis
[ "b6cd0270ab06eb889bd409f585f44953b1994887" ]
[ "AN24_05.py" ]
[ "# AN77_05 -- Mimo processing\nimport Class.Adf24Tx2Rx4 as Adf24Tx2Rx4\nimport Class.RadarProc as RadarProc\nimport time as time\nimport matplotlib.pyplot as plt\nfrom numpy import *\n\n# (1) Connect to DemoRad\n# (2) Enable Supply\n# (3) Configure RX\n# (4) Configure TX\n# (5) Start Measurements\n# (6) Configure signal processing\n# (7) Calculate DBF algorithm\n\nc0 = 3e8\n\n#--------------------------------------------------------------------------\n# Setup Connection\n#--------------------------------------------------------------------------\nBrd = Adf24Tx2Rx4.Adf24Tx2Rx4()\n\nBrd.BrdRst()\n\nProc = RadarProc.RadarProc()\n#--------------------------------------------------------------------------\n# Load Calibration Data\n#--------------------------------------------------------------------------\n#dCalData = Brd.BrdGetCalDat()\n#CalData = dCalData['Dat']\n\n#--------------------------------------------------------------------------\n# Configure RF Transceivers\n#--------------------------------------------------------------------------\nBrd.RfRxEna()\nBrd.RfTxEna(1, 63)\n\nplt.ion()\nplt.show()\n\n#--------------------------------------------------------------------------\n# Configure Up-Chirp\n#--------------------------------------------------------------------------\ndCfg = {\n \"fs\" : 1.0e6,\n \"fStrt\" : 24.0e9,\n \"fStop\" : 24.25e9,\n \"TRampUp\" : 260/1.0e6,\n \"Tp\" : 280/1.0e6,\n \"N\" : 256,\n \"StrtIdx\" : 1,\n \"StopIdx\" : 2,\n \"MimoEna\" : 0,\n \"NrFrms\" : 100\n }\n\nBrd.RfMeas('Adi', dCfg)\n#--------------------------------------------------------------------------\n# Read actual configuration\n#--------------------------------------------------------------------------\nNrChn = Brd.Get('NrChn')\nN = Brd.Get('N')\nfs = Brd.RfGet('fs')\nprint(\"fsss: \", fs)\n#--------------------------------------------------------------------------\n# Configure Signal Processing\n#--------------------------------------------------------------------------\ndProcCfg = {\n \"fs\" : Brd.RfGet('fs'),\n \"kf\" : Brd.RfGet('kf'),\n \"NFFT\" : 2**12,\n \"Abs\" : 1,\n \"Ext\" : 1,\n \"RMin\" : 1,\n \"RMax\" : 10\n }\nProc.CfgRangeProfile(dProcCfg)\n\n\ndUlaCfg = {\n \"fs\" : Brd.RfGet('fs'),\n \"kf\" : Brd.RfGet('kf'),\n \"RangeFFT\" : 2**10,\n \"AngFFT\" : 2**7,\n \"Abs\" : 1,\n \"Ext\" : 1,\n \"RMin\" : 1,\n \"RMax\" : 10,\n \"CalData\" : CalData\n }\n\nprint(\"fs: \", Brd.RfGet(\"fs\")/1e6, \" MHz\")\nprint(\"kf: \", Brd.RfGet(\"kf\")/1e12,\" GHz/ms\")\n\nProc.CfgBeamformingUla(dUlaCfg)\nRange = Proc.GetRangeProfile('Range')\n\n#--------------------------------------------------------------------------\n# Measure and calculate DBF\n#--------------------------------------------------------------------------\nfor MeasIdx in range(0,int(dCfg[\"NrFrms\"])):\n Data = Brd.BrdGetData()\n RP = Proc.RangeProfile(Data)\n\n JOpt = Proc.BeamformingUla(Data)\n #print(\"Siz \", JOpt.shape)\n #print(JOpt)\n JMax = amax(JOpt)\n JNorm = JOpt - JMax\n JNorm[JNorm < -18] = -18\n\n plt.clf()\n plt.imshow(JNorm, origin='lower', extent=[-1,1,-1,1])\n plt.draw()\n\n plt.pause(0.001)\n\n\n" ]
[ [ "matplotlib.pyplot.ion", "matplotlib.pyplot.show", "matplotlib.pyplot.draw", "matplotlib.pyplot.pause", "matplotlib.pyplot.clf", "matplotlib.pyplot.imshow" ] ]
soddencarpenter/dataviz
[ "289ac890b04820acf1c0fc516e0cb502570626e4" ]
[ "ExplorePy/one.py" ]
[ "import pandas as pd\nimport numpy as np\n\ndata = np.array(['python','php','java'])\nseries = pd.Series(data)\nprint (series)\n\n# Create a Dict from a input\ndata = {'Courses' :\"pandas\", 'Fees' : 20000, 'Duration' : \"30days\"}\ns2 = pd.Series(data)\nprint (s2)\n\n\n# read the chicago temperature csv into a data frame\n# in python, starts with column 0\nchitemp = pd.read_csv('/mnt/d/DivvyDatasets/ChicagoTemperature.csv')\nprint(chitemp)\n\n# some info on the chitemp dataframe\nprint(chitemp.shape) # rows, cols as a tuple, e.g. (731, 9)\nprint(chitemp.columns.values) # the column names as a list\nprint(chitemp['avg_temperature_fahrenheit']) # the values of the column\n\n# create a new dataframe for when temperature >. 80\nchihot = chitemp[chitemp['avg_temperature_fahrenheit'] >= 80.0]\nprint(chihot)\n\n\ndef is_hot(temp):\n if temp >= 80.0:\n return True\n else:\n return False\n\n# add a new column to the chitemp indicating if it is hot\n# column is added by using a function\nchitemp['ishot'] = chitemp['avg_temperature_fahrenheit'].apply(is_hot)\nprint(chitemp)" ]
[ [ "numpy.array", "pandas.read_csv", "pandas.Series" ] ]
0xflotus/rembg
[ "7fb6683169d588f653281d53c3c258838194c950" ]
[ "src/rembg/u2net/detect.py" ]
[ "import errno\nimport os\nimport time\nimport urllib.request\nimport sys\n\nimport numpy as np\nimport pkg_resources\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision\nfrom PIL import Image\nfrom skimage import transform\nfrom torchvision import transforms\nfrom tqdm import tqdm\n\nfrom . import data_loader, u2net\n\n\nclass DownloadProgressBar(tqdm):\n def update_to(self, b=1, bsize=1, tsize=None):\n if tsize is not None:\n self.total = tsize\n self.update(b * bsize - self.n)\n\n\ndef download_url(url, model_name, output_path):\n if os.path.exists(output_path):\n return\n\n print(\n f\"Downloading model to {output_path}\".format(output_path=output_path),\n file=sys.stderr,\n )\n\n with DownloadProgressBar(\n unit=\"B\", unit_scale=True, miniters=1, desc=url.split(\"/\")[-1]\n ) as t:\n urllib.request.urlretrieve(url, filename=output_path, reporthook=t.update_to)\n\n\ndef load_model(model_name: str = \"u2net\"):\n if model_name == \"u2netp\":\n net = u2net.U2NETP(3, 1)\n path = os.path.expanduser(\"~/.u2net/u2netp.pth\")\n download_url(\n \"https://www.dropbox.com/s/usb1fyiuh8as5gi/u2netp.pth?dl=1\",\n \"u2netp.pth\",\n path,\n )\n elif model_name == \"u2net\":\n net = u2net.U2NET(3, 1)\n path = os.path.expanduser(\"~/.u2net/u2net.pth\")\n download_url(\n \"https://www.dropbox.com/s/kdu5mhose1clds0/u2net.pth?dl=1\",\n \"u2net.pth\",\n path,\n )\n else:\n print(\"Choose between u2net or u2netp\", file=sys.stderr)\n\n try:\n if torch.cuda.is_available():\n net.load_state_dict(torch.load(path))\n net.to(torch.device(\"cuda\"))\n else:\n net.load_state_dict(torch.load(path, map_location=\"cpu\",))\n except FileNotFoundError:\n raise FileNotFoundError(\n errno.ENOENT, os.strerror(errno.ENOENT), model_name + \".pth\"\n )\n\n net.eval()\n\n return net\n\n\ndef norm_pred(d):\n ma = torch.max(d)\n mi = torch.min(d)\n dn = (d - mi) / (ma - mi)\n\n return dn\n\n\ndef preprocess(image):\n label_3 = np.zeros(image.shape)\n label = np.zeros(label_3.shape[0:2])\n\n if 3 == len(label_3.shape):\n label = label_3[:, :, 0]\n elif 2 == len(label_3.shape):\n label = label_3\n\n if 3 == len(image.shape) and 2 == len(label.shape):\n label = label[:, :, np.newaxis]\n elif 2 == len(image.shape) and 2 == len(label.shape):\n image = image[:, :, np.newaxis]\n label = label[:, :, np.newaxis]\n\n transform = transforms.Compose(\n [data_loader.RescaleT(320), data_loader.ToTensorLab(flag=0)]\n )\n sample = transform({\"imidx\": np.array([0]), \"image\": image, \"label\": label})\n\n return sample\n\n\ndef predict(net, item):\n\n sample = preprocess(item)\n\n with torch.no_grad():\n\n if torch.cuda.is_available():\n inputs_test = torch.cuda.FloatTensor(sample[\"image\"].unsqueeze(0).float())\n else:\n inputs_test = torch.FloatTensor(sample[\"image\"].unsqueeze(0).float())\n\n d1, d2, d3, d4, d5, d6, d7 = net(inputs_test)\n\n pred = d1[:, 0, :, :]\n predict = norm_pred(pred)\n\n predict = predict.squeeze()\n predict_np = predict.cpu().detach().numpy()\n img = Image.fromarray(predict_np * 255).convert(\"RGB\")\n\n del d1, d2, d3, d4, d5, d6, d7, pred, predict, predict_np, inputs_test, sample\n\n return img\n" ]
[ [ "torch.device", "numpy.array", "torch.min", "numpy.zeros", "torch.max", "torch.no_grad", "torch.cuda.is_available", "torch.load" ] ]
orestmy/Facial-Similarity-with-Siamese-Networks-in-Pytorch
[ "96570ffece22d23f82e8218147d94d49ec125722" ]
[ "src/main.py" ]
[ "import torchvision.datasets as dset\nfrom torch.utils.data import DataLoader\nimport torch\nfrom torchvision.transforms import transforms\nfrom data import SiameseNetworkDataset\nfrom helpers import Config, show_plot\nfrom models import SiameseNetwork, SoftMaxLoss\n\nclass Trainer():\n def __init__(self):\n\n train_folder = dset.ImageFolder(root=Config.training_dir)\n test_folder = dset.ImageFolder(root=Config.testing_dir)\n\n resize_to_tensor = transforms.Compose([transforms.Resize((100, 100)), transforms.ToTensor()])\n\n train_dataset = SiameseNetworkDataset(imageFolderDataset=train_folder, transform=resize_to_tensor, should_invert=False)\n test_dataset = SiameseNetworkDataset(imageFolderDataset=test_folder, transform=resize_to_tensor, should_invert=False)\n\n self.train_dataloader = DataLoader(train_dataset, shuffle=True, num_workers=0, batch_size=32)\n self.test_dataloader = DataLoader(test_dataset, shuffle=False, num_workers=0, batch_size=32)\n\n self.model = SiameseNetwork().cuda()\n self.criterion = SoftMaxLoss().cuda()\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=0.0005)\n\n self._init_metrics()\n\n def _init_metrics(self):\n self.counter, self.test_counter = [], []\n self.loss_history, self.test_loss_history = [], []\n self.acc_history, self.test_acc_history = [], []\n self.iteration_number, self.test_iteration_number = 0, 0\n\n def train(self):\n for epoch in range(0, Config.train_number_epochs):\n self.train_epoch(epoch)\n self.test_epoch()\n self.finalise()\n\n def eval(self, checkpoint_path):\n state_dict = torch.load(checkpoint_path)\n\n self.model.load_state_dict(state_dict['model'])\n self.criterion.load_state_dict(state_dict['criterion'])\n\n self.test_epoch()\n\n def train_epoch(self, epochNumber):\n # set to training mode\n self.model.train()\n self.criterion.train()\n\n for i, data in enumerate(self.train_dataloader, 0):\n img0, img1, label, _, _ = data\n img0, img1, label = img0.cuda(), img1.cuda(), label.cuda()\n self.optimizer.zero_grad()\n output1, output2 = self.model(img0, img1)\n loss_contrastive = self.criterion(output1, output2, label)\n loss_contrastive.backward()\n self.optimizer.step()\n if i % 10 == 0:\n acc = self.criterion.get_accuracy(self.criterion.pred, label)\n print(\"Epoch number={}, Current loss={}, accuracy={}\".format(epochNumber, loss_contrastive.item(), acc.item()))\n self.iteration_number += 10\n self.counter.append(self.iteration_number)\n self.loss_history.append(loss_contrastive.item())\n self.acc_history.append(acc)\n\n def test_epoch(self):\n self.model.eval()\n self.criterion.eval()\n\n for i, data in enumerate(self.test_dataloader, 0):\n img0, img1, label, _, _ = data\n img0, img1, label = img0.cuda(), img1.cuda(), label.cuda()\n output1, output2 = self.model(img0, img1)\n loss_contrastive = self.criterion(output1, output2, label)\n acc = self.criterion.get_accuracy(self.criterion.pred, label)\n\n self.test_iteration_number += 1\n self.test_counter.append(self.test_iteration_number)\n self.test_loss_history.append(loss_contrastive.item())\n self.test_acc_history.append(acc)\n\n def _save_model(self, path):\n torch.save({'model': self.model.state_dict(),\n 'criterion': self.criterion.state_dict()}, path)\n\n def finalise(self, test_only=False):\n if test_only:\n show_plot(self.test_counter, self.test_acc_history)\n return\n\n self._save_model(Config.checkpoint_path)\n show_plot(self.counter, self.loss_history)\n show_plot(self.counter, self.acc_history)\n show_plot(self.test_counter, self.test_acc_history)\n\n def _dump_embeddings(self, feat1, q_id, s_id, same, posfix=''):\n # create embeddings for t-SNE tensorboard projector visualisation.\n # https://projector.tensorflow.org/ - append Q_id and Same as header for labels.tsv\n filename = '../viz/features_{}.tsv'.format(posfix)\n filename_label = '../viz/labels_{}.tsv'.format(posfix)\n with open(filename, 'a+') as embed_file, open(filename_label, 'a+') as label_file:\n for i in range(feat1.shape[0]):\n embedding = feat1[i].squeeze().cpu().detach().numpy()\n embedding_str = ''.join([\"{:.1f}\".format(num) + '\\t' for num in embedding])\n embed_file.write(embedding_str + '\\n')\n label_file.write(\n str(q_id[i].cpu().detach().numpy()) + '\\t' + str(s_id[i].cpu().detach().numpy()) + '\\t' + str(\n same[i].cpu().detach().numpy()[0]) + '\\n')\n\n def save_embeddings(self, postfix=''):\n\n for i, data in enumerate(self.train_dataloader, 0):\n img0, img1, label, q_id, s_id = data\n img0, img1, label = img0.cuda(), img1.cuda(), label.cuda()\n output1, output2 = self.model(img0, img1)\n self._dump_embeddings(output1, q_id + 10, s_id + 10, label, postfix)\n\n for i, data in enumerate(self.test_dataloader, 0):\n img0, img1, label, q_id, s_id = data\n img0, img1, label = img0.cuda(), img1.cuda(), label.cuda()\n output1, output2 = self.model(img0, img1)\n self._dump_embeddings(output1, q_id, s_id, label, postfix)\n\n\ndef train():\n trainer = Trainer()\n trainer.train()\n trainer.finalise()\n # trainer.save_embeddings(postfix='softmax')\n\ndef eval():\n trainer = Trainer()\n trainer.eval(Config.checkpoint_path)\n trainer.finalise(test_only=True)\n\nif __name__ == \"__main__\":\n train()\n" ]
[ [ "torch.utils.data.DataLoader", "torch.load" ] ]
VincentLa/pandas
[ "67112b813af6c367f604366f2352c9a1bb1fedf3" ]
[ "pandas/core/indexes/interval.py" ]
[ "\"\"\" define the IntervalIndex \"\"\"\nimport textwrap\nimport warnings\n\nimport numpy as np\n\nfrom pandas.compat import add_metaclass\nfrom pandas.core.dtypes.missing import isna\nfrom pandas.core.dtypes.cast import (\n find_common_type, maybe_downcast_to_dtype, infer_dtype_from_scalar)\nfrom pandas.core.dtypes.common import (\n ensure_platform_int,\n is_list_like,\n is_datetime_or_timedelta_dtype,\n is_datetime64tz_dtype,\n is_dtype_equal,\n is_integer_dtype,\n is_float_dtype,\n is_interval_dtype,\n is_object_dtype,\n is_scalar,\n is_float,\n is_number,\n is_integer)\nfrom pandas.core.indexes.base import (\n Index, ensure_index,\n default_pprint, _index_shared_docs)\nfrom pandas.core.ops import get_op_result_name\n\nfrom pandas._libs import Timestamp, Timedelta\nfrom pandas._libs.interval import (\n Interval, IntervalMixin, IntervalTree,\n)\n\nfrom pandas.core.indexes.datetimes import date_range, DatetimeIndex\nfrom pandas.core.indexes.timedeltas import timedelta_range, TimedeltaIndex\nfrom pandas.core.indexes.multi import MultiIndex\nimport pandas.core.common as com\nfrom pandas.util._decorators import cache_readonly, Appender\nfrom pandas.util._doctools import _WritableDoc\nfrom pandas.util._exceptions import rewrite_exception\nfrom pandas.core.config import get_option\nfrom pandas.tseries.frequencies import to_offset\nfrom pandas.tseries.offsets import DateOffset\n\nimport pandas.core.indexes.base as ibase\nfrom pandas.core.arrays.interval import (IntervalArray,\n _interval_shared_docs)\n\n_VALID_CLOSED = {'left', 'right', 'both', 'neither'}\n_index_doc_kwargs = dict(ibase._index_doc_kwargs)\n\n# TODO(jschendel) remove constructor key when IntervalArray is public (GH22860)\n_index_doc_kwargs.update(\n dict(klass='IntervalIndex',\n constructor='pd.IntervalIndex',\n target_klass='IntervalIndex or list of Intervals',\n name=textwrap.dedent(\"\"\"\\\n name : object, optional\n Name to be stored in the index.\n \"\"\"),\n ))\n\n\ndef _get_next_label(label):\n dtype = getattr(label, 'dtype', type(label))\n if isinstance(label, (Timestamp, Timedelta)):\n dtype = 'datetime64'\n if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):\n return label + np.timedelta64(1, 'ns')\n elif is_integer_dtype(dtype):\n return label + 1\n elif is_float_dtype(dtype):\n return np.nextafter(label, np.infty)\n else:\n raise TypeError('cannot determine next label for type {typ!r}'\n .format(typ=type(label)))\n\n\ndef _get_prev_label(label):\n dtype = getattr(label, 'dtype', type(label))\n if isinstance(label, (Timestamp, Timedelta)):\n dtype = 'datetime64'\n if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):\n return label - np.timedelta64(1, 'ns')\n elif is_integer_dtype(dtype):\n return label - 1\n elif is_float_dtype(dtype):\n return np.nextafter(label, -np.infty)\n else:\n raise TypeError('cannot determine next label for type {typ!r}'\n .format(typ=type(label)))\n\n\ndef _get_interval_closed_bounds(interval):\n \"\"\"\n Given an Interval or IntervalIndex, return the corresponding interval with\n closed bounds.\n \"\"\"\n left, right = interval.left, interval.right\n if interval.open_left:\n left = _get_next_label(left)\n if interval.open_right:\n right = _get_prev_label(right)\n return left, right\n\n\ndef _new_IntervalIndex(cls, d):\n \"\"\"\n This is called upon unpickling, rather than the default which doesn't have\n arguments and breaks __new__\n \"\"\"\n return cls.from_arrays(**d)\n\n\n@Appender(_interval_shared_docs['class'] % dict(\n klass=\"IntervalIndex\",\n summary=\"Immutable index of intervals that are closed on the same side.\",\n name=_index_doc_kwargs['name'],\n versionadded=\"0.20.0\",\n extra_methods=\"contains\\n\",\n examples=textwrap.dedent(\"\"\"\\\n Examples\n --------\n A new ``IntervalIndex`` is typically constructed using\n :func:`interval_range`:\n\n >>> pd.interval_range(start=0, end=5)\n IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]],\n closed='right',\n dtype='interval[int64]')\n\n It may also be constructed using one of the constructor\n methods: :meth:`IntervalIndex.from_arrays`,\n :meth:`IntervalIndex.from_breaks`, and :meth:`IntervalIndex.from_tuples`.\n\n See further examples in the doc strings of ``interval_range`` and the\n mentioned constructor methods.\n \"\"\"),\n\n))\n@add_metaclass(_WritableDoc)\nclass IntervalIndex(IntervalMixin, Index):\n _typ = 'intervalindex'\n _comparables = ['name']\n _attributes = ['name', 'closed']\n\n # we would like our indexing holder to defer to us\n _defer_to_indexing = True\n\n # Immutable, so we are able to cache computations like isna in '_mask'\n _mask = None\n\n def __new__(cls, data, closed=None, dtype=None, copy=False,\n name=None, verify_integrity=True):\n\n if name is None and hasattr(data, 'name'):\n name = data.name\n\n with rewrite_exception(\"IntervalArray\", cls.__name__):\n array = IntervalArray(data, closed=closed, copy=copy, dtype=dtype,\n verify_integrity=verify_integrity)\n\n return cls._simple_new(array, name)\n\n @classmethod\n def _simple_new(cls, array, name, closed=None):\n \"\"\"\n Construct from an IntervalArray\n\n Parameters\n ----------\n array : IntervalArray\n name : str\n Attached as result.name\n closed : Any\n Ignored.\n \"\"\"\n result = IntervalMixin.__new__(cls)\n result._data = array\n result.name = name\n result._reset_identity()\n return result\n\n @Appender(_index_shared_docs['_shallow_copy'])\n def _shallow_copy(self, left=None, right=None, **kwargs):\n result = self._data._shallow_copy(left=left, right=right)\n attributes = self._get_attributes_dict()\n attributes.update(kwargs)\n return self._simple_new(result, **attributes)\n\n @cache_readonly\n def _isnan(self):\n \"\"\"Return a mask indicating if each value is NA\"\"\"\n if self._mask is None:\n self._mask = isna(self.left)\n return self._mask\n\n @cache_readonly\n def _engine(self):\n left = self._maybe_convert_i8(self.left)\n right = self._maybe_convert_i8(self.right)\n return IntervalTree(left, right, closed=self.closed)\n\n def __contains__(self, key):\n \"\"\"\n return a boolean if this key is IN the index\n We *only* accept an Interval\n\n Parameters\n ----------\n key : Interval\n\n Returns\n -------\n boolean\n \"\"\"\n if not isinstance(key, Interval):\n return False\n\n try:\n self.get_loc(key)\n return True\n except KeyError:\n return False\n\n def contains(self, key):\n \"\"\"\n Return a boolean indicating if the key is IN the index\n\n We accept / allow keys to be not *just* actual\n objects.\n\n Parameters\n ----------\n key : int, float, Interval\n\n Returns\n -------\n boolean\n \"\"\"\n try:\n self.get_loc(key)\n return True\n except KeyError:\n return False\n\n @classmethod\n @Appender(_interval_shared_docs['from_breaks'] % _index_doc_kwargs)\n def from_breaks(cls, breaks, closed='right', name=None, copy=False,\n dtype=None):\n with rewrite_exception(\"IntervalArray\", cls.__name__):\n array = IntervalArray.from_breaks(breaks, closed=closed, copy=copy,\n dtype=dtype)\n return cls._simple_new(array, name=name)\n\n @classmethod\n @Appender(_interval_shared_docs['from_arrays'] % _index_doc_kwargs)\n def from_arrays(cls, left, right, closed='right', name=None, copy=False,\n dtype=None):\n with rewrite_exception(\"IntervalArray\", cls.__name__):\n array = IntervalArray.from_arrays(left, right, closed, copy=copy,\n dtype=dtype)\n return cls._simple_new(array, name=name)\n\n @classmethod\n @Appender(_interval_shared_docs['from_intervals'] % _index_doc_kwargs)\n def from_intervals(cls, data, closed=None, name=None, copy=False,\n dtype=None):\n msg = ('IntervalIndex.from_intervals is deprecated and will be '\n 'removed in a future version; Use IntervalIndex(...) instead')\n warnings.warn(msg, FutureWarning, stacklevel=2)\n with rewrite_exception(\"IntervalArray\", cls.__name__):\n array = IntervalArray(data, closed=closed, copy=copy, dtype=dtype)\n\n if name is None and isinstance(data, cls):\n name = data.name\n\n return cls._simple_new(array, name=name)\n\n @classmethod\n @Appender(_interval_shared_docs['from_tuples'] % _index_doc_kwargs)\n def from_tuples(cls, data, closed='right', name=None, copy=False,\n dtype=None):\n with rewrite_exception(\"IntervalArray\", cls.__name__):\n arr = IntervalArray.from_tuples(data, closed=closed, copy=copy,\n dtype=dtype)\n return cls._simple_new(arr, name=name)\n\n @Appender(_interval_shared_docs['to_tuples'] % dict(\n return_type=\"Index\",\n examples=\"\"\"\n Examples\n --------\n >>> idx = pd.IntervalIndex.from_arrays([0, np.nan, 2], [1, np.nan, 3])\n >>> idx.to_tuples()\n Index([(0.0, 1.0), (nan, nan), (2.0, 3.0)], dtype='object')\n >>> idx.to_tuples(na_tuple=False)\n Index([(0.0, 1.0), nan, (2.0, 3.0)], dtype='object')\"\"\",\n ))\n def to_tuples(self, na_tuple=True):\n tuples = self._data.to_tuples(na_tuple=na_tuple)\n return Index(tuples)\n\n @cache_readonly\n def _multiindex(self):\n return MultiIndex.from_arrays([self.left, self.right],\n names=['left', 'right'])\n\n @property\n def left(self):\n \"\"\"\n Return the left endpoints of each Interval in the IntervalIndex as\n an Index\n \"\"\"\n return self._data._left\n\n @property\n def right(self):\n \"\"\"\n Return the right endpoints of each Interval in the IntervalIndex as\n an Index\n \"\"\"\n return self._data._right\n\n @property\n def closed(self):\n \"\"\"\n Whether the intervals are closed on the left-side, right-side, both or\n neither\n \"\"\"\n return self._data._closed\n\n @Appender(_interval_shared_docs['set_closed'] % _index_doc_kwargs)\n def set_closed(self, closed):\n if closed not in _VALID_CLOSED:\n msg = \"invalid option for 'closed': {closed}\"\n raise ValueError(msg.format(closed=closed))\n\n # return self._shallow_copy(closed=closed)\n array = self._data.set_closed(closed)\n return self._simple_new(array, self.name)\n\n @property\n def length(self):\n \"\"\"\n Return an Index with entries denoting the length of each Interval in\n the IntervalIndex\n \"\"\"\n return self._data.length\n\n @property\n def size(self):\n # Avoid materializing ndarray[Interval]\n return self._data.size\n\n @property\n def shape(self):\n # Avoid materializing ndarray[Interval]\n return self._data.shape\n\n @property\n def itemsize(self):\n msg = ('IntervalIndex.itemsize is deprecated and will be removed in '\n 'a future version')\n warnings.warn(msg, FutureWarning, stacklevel=2)\n\n # supress the warning from the underlying left/right itemsize\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n return self.left.itemsize + self.right.itemsize\n\n def __len__(self):\n return len(self.left)\n\n @cache_readonly\n def values(self):\n \"\"\"\n Return the IntervalIndex's data as an IntervalArray.\n \"\"\"\n return self._data\n\n @cache_readonly\n def _values(self):\n return self._data\n\n @cache_readonly\n def _ndarray_values(self):\n return np.array(self._data)\n\n def __array__(self, result=None):\n \"\"\" the array interface, return my values \"\"\"\n return self._ndarray_values\n\n def __array_wrap__(self, result, context=None):\n # we don't want the superclass implementation\n return result\n\n def __reduce__(self):\n d = dict(left=self.left,\n right=self.right)\n d.update(self._get_attributes_dict())\n return _new_IntervalIndex, (self.__class__, d), None\n\n @Appender(_index_shared_docs['copy'])\n def copy(self, deep=False, name=None):\n array = self._data.copy(deep=deep)\n attributes = self._get_attributes_dict()\n if name is not None:\n attributes.update(name=name)\n\n return self._simple_new(array, **attributes)\n\n @Appender(_index_shared_docs['astype'])\n def astype(self, dtype, copy=True):\n with rewrite_exception('IntervalArray', self.__class__.__name__):\n new_values = self.values.astype(dtype, copy=copy)\n if is_interval_dtype(new_values):\n return self._shallow_copy(new_values.left, new_values.right)\n return super(IntervalIndex, self).astype(dtype, copy=copy)\n\n @cache_readonly\n def dtype(self):\n \"\"\"Return the dtype object of the underlying data\"\"\"\n return self._data.dtype\n\n @property\n def inferred_type(self):\n \"\"\"Return a string of the type inferred from the values\"\"\"\n return 'interval'\n\n @Appender(Index.memory_usage.__doc__)\n def memory_usage(self, deep=False):\n # we don't use an explicit engine\n # so return the bytes here\n return (self.left.memory_usage(deep=deep) +\n self.right.memory_usage(deep=deep))\n\n @cache_readonly\n def mid(self):\n \"\"\"\n Return the midpoint of each Interval in the IntervalIndex as an Index\n \"\"\"\n return self._data.mid\n\n @cache_readonly\n def is_monotonic(self):\n \"\"\"\n Return True if the IntervalIndex is monotonic increasing (only equal or\n increasing values), else False\n \"\"\"\n return self._multiindex.is_monotonic\n\n @cache_readonly\n def is_monotonic_increasing(self):\n \"\"\"\n Return True if the IntervalIndex is monotonic increasing (only equal or\n increasing values), else False\n \"\"\"\n return self._multiindex.is_monotonic_increasing\n\n @cache_readonly\n def is_monotonic_decreasing(self):\n \"\"\"\n Return True if the IntervalIndex is monotonic decreasing (only equal or\n decreasing values), else False\n \"\"\"\n return self._multiindex.is_monotonic_decreasing\n\n @cache_readonly\n def is_unique(self):\n \"\"\"\n Return True if the IntervalIndex contains unique elements, else False\n \"\"\"\n return self._multiindex.is_unique\n\n @cache_readonly\n def is_non_overlapping_monotonic(self):\n return self._data.is_non_overlapping_monotonic\n\n @Appender(_index_shared_docs['_convert_scalar_indexer'])\n def _convert_scalar_indexer(self, key, kind=None):\n if kind == 'iloc':\n return super(IntervalIndex, self)._convert_scalar_indexer(\n key, kind=kind)\n return key\n\n def _maybe_cast_slice_bound(self, label, side, kind):\n return getattr(self, side)._maybe_cast_slice_bound(label, side, kind)\n\n @Appender(_index_shared_docs['_convert_list_indexer'])\n def _convert_list_indexer(self, keyarr, kind=None):\n \"\"\"\n we are passed a list-like indexer. Return the\n indexer for matching intervals.\n \"\"\"\n locs = self.get_indexer_for(keyarr)\n\n # we have missing values\n if (locs == -1).any():\n raise KeyError\n\n return locs\n\n def _maybe_cast_indexed(self, key):\n \"\"\"\n we need to cast the key, which could be a scalar\n or an array-like to the type of our subtype\n \"\"\"\n if isinstance(key, IntervalIndex):\n return key\n\n subtype = self.dtype.subtype\n if is_float_dtype(subtype):\n if is_integer(key):\n key = float(key)\n elif isinstance(key, (np.ndarray, Index)):\n key = key.astype('float64')\n elif is_integer_dtype(subtype):\n if is_integer(key):\n key = int(key)\n\n return key\n\n def _needs_i8_conversion(self, key):\n \"\"\"\n Check if a given key needs i8 conversion. Conversion is necessary for\n Timestamp, Timedelta, DatetimeIndex, and TimedeltaIndex keys. An\n Interval-like requires conversion if it's endpoints are one of the\n aforementioned types.\n\n Assumes that any list-like data has already been cast to an Index.\n\n Parameters\n ----------\n key : scalar or Index-like\n The key that should be checked for i8 conversion\n\n Returns\n -------\n boolean\n \"\"\"\n if is_interval_dtype(key) or isinstance(key, Interval):\n return self._needs_i8_conversion(key.left)\n\n i8_types = (Timestamp, Timedelta, DatetimeIndex, TimedeltaIndex)\n return isinstance(key, i8_types)\n\n def _maybe_convert_i8(self, key):\n \"\"\"\n Maybe convert a given key to it's equivalent i8 value(s). Used as a\n preprocessing step prior to IntervalTree queries (self._engine), which\n expects numeric data.\n\n Parameters\n ----------\n key : scalar or list-like\n The key that should maybe be converted to i8.\n\n Returns\n -------\n key: scalar or list-like\n The original key if no conversion occured, int if converted scalar,\n Int64Index if converted list-like.\n \"\"\"\n original = key\n if is_list_like(key):\n key = ensure_index(key)\n\n if not self._needs_i8_conversion(key):\n return original\n\n scalar = is_scalar(key)\n if is_interval_dtype(key) or isinstance(key, Interval):\n # convert left/right and reconstruct\n left = self._maybe_convert_i8(key.left)\n right = self._maybe_convert_i8(key.right)\n constructor = Interval if scalar else IntervalIndex.from_arrays\n return constructor(left, right, closed=self.closed)\n\n if scalar:\n # Timestamp/Timedelta\n key_dtype, key_i8 = infer_dtype_from_scalar(key, pandas_dtype=True)\n else:\n # DatetimeIndex/TimedeltaIndex\n key_dtype, key_i8 = key.dtype, Index(key.asi8)\n\n # ensure consistency with IntervalIndex subtype\n subtype = self.dtype.subtype\n msg = ('Cannot index an IntervalIndex of subtype {subtype} with '\n 'values of dtype {other}')\n if not is_dtype_equal(subtype, key_dtype):\n raise ValueError(msg.format(subtype=subtype, other=key_dtype))\n\n return key_i8\n\n def _check_method(self, method):\n if method is None:\n return\n\n if method in ['bfill', 'backfill', 'pad', 'ffill', 'nearest']:\n msg = 'method {method} not yet implemented for IntervalIndex'\n raise NotImplementedError(msg.format(method=method))\n\n raise ValueError(\"Invalid fill method\")\n\n def _searchsorted_monotonic(self, label, side, exclude_label=False):\n if not self.is_non_overlapping_monotonic:\n raise KeyError('can only get slices from an IntervalIndex if '\n 'bounds are non-overlapping and all monotonic '\n 'increasing or decreasing')\n\n if isinstance(label, IntervalMixin):\n raise NotImplementedError\n\n # GH 20921: \"not is_monotonic_increasing\" for the second condition\n # instead of \"is_monotonic_decreasing\" to account for single element\n # indexes being both increasing and decreasing\n if ((side == 'left' and self.left.is_monotonic_increasing) or\n (side == 'right' and not self.left.is_monotonic_increasing)):\n sub_idx = self.right\n if self.open_right or exclude_label:\n label = _get_next_label(label)\n else:\n sub_idx = self.left\n if self.open_left or exclude_label:\n label = _get_prev_label(label)\n\n return sub_idx._searchsorted_monotonic(label, side)\n\n def _get_loc_only_exact_matches(self, key):\n if isinstance(key, Interval):\n\n if not self.is_unique:\n raise ValueError(\"cannot index with a slice Interval\"\n \" and a non-unique index\")\n\n # TODO: this expands to a tuple index, see if we can\n # do better\n return Index(self._multiindex.values).get_loc(key)\n raise KeyError\n\n def _find_non_overlapping_monotonic_bounds(self, key):\n if isinstance(key, IntervalMixin):\n start = self._searchsorted_monotonic(\n key.left, 'left', exclude_label=key.open_left)\n stop = self._searchsorted_monotonic(\n key.right, 'right', exclude_label=key.open_right)\n elif isinstance(key, slice):\n # slice\n start, stop = key.start, key.stop\n if (key.step or 1) != 1:\n raise NotImplementedError(\"cannot slice with a slice step\")\n if start is None:\n start = 0\n else:\n start = self._searchsorted_monotonic(start, 'left')\n if stop is None:\n stop = len(self)\n else:\n stop = self._searchsorted_monotonic(stop, 'right')\n else:\n # scalar or index-like\n\n start = self._searchsorted_monotonic(key, 'left')\n stop = self._searchsorted_monotonic(key, 'right')\n return start, stop\n\n def get_loc(self, key, method=None):\n \"\"\"Get integer location, slice or boolean mask for requested label.\n\n Parameters\n ----------\n key : label\n method : {None}, optional\n * default: matches where the label is within an interval only.\n\n Returns\n -------\n loc : int if unique index, slice if monotonic index, else mask\n\n Examples\n ---------\n >>> i1, i2 = pd.Interval(0, 1), pd.Interval(1, 2)\n >>> index = pd.IntervalIndex([i1, i2])\n >>> index.get_loc(1)\n 0\n\n You can also supply an interval or an location for a point inside an\n interval.\n\n >>> index.get_loc(pd.Interval(0, 2))\n array([0, 1], dtype=int64)\n >>> index.get_loc(1.5)\n 1\n\n If a label is in several intervals, you get the locations of all the\n relevant intervals.\n\n >>> i3 = pd.Interval(0, 2)\n >>> overlapping_index = pd.IntervalIndex([i2, i3])\n >>> overlapping_index.get_loc(1.5)\n array([0, 1], dtype=int64)\n \"\"\"\n self._check_method(method)\n\n original_key = key\n key = self._maybe_cast_indexed(key)\n\n if self.is_non_overlapping_monotonic:\n if isinstance(key, Interval):\n left = self._maybe_cast_slice_bound(key.left, 'left', None)\n right = self._maybe_cast_slice_bound(key.right, 'right', None)\n key = Interval(left, right, key.closed)\n else:\n key = self._maybe_cast_slice_bound(key, 'left', None)\n\n start, stop = self._find_non_overlapping_monotonic_bounds(key)\n\n if start is None or stop is None:\n return slice(start, stop)\n elif start + 1 == stop:\n return start\n elif start < stop:\n return slice(start, stop)\n else:\n raise KeyError(original_key)\n\n else:\n # use the interval tree\n key = self._maybe_convert_i8(key)\n if isinstance(key, Interval):\n left, right = _get_interval_closed_bounds(key)\n return self._engine.get_loc_interval(left, right)\n else:\n return self._engine.get_loc(key)\n\n def get_value(self, series, key):\n if com.is_bool_indexer(key):\n loc = key\n elif is_list_like(key):\n loc = self.get_indexer(key)\n elif isinstance(key, slice):\n\n if not (key.step is None or key.step == 1):\n raise ValueError(\"cannot support not-default step in a slice\")\n\n try:\n loc = self.get_loc(key)\n except TypeError:\n # we didn't find exact intervals or are non-unique\n msg = \"unable to slice with this key: {key}\".format(key=key)\n raise ValueError(msg)\n\n else:\n loc = self.get_loc(key)\n return series.iloc[loc]\n\n @Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)\n def get_indexer(self, target, method=None, limit=None, tolerance=None):\n\n self._check_method(method)\n target = ensure_index(target)\n target = self._maybe_cast_indexed(target)\n\n if self.equals(target):\n return np.arange(len(self), dtype='intp')\n\n if self.is_non_overlapping_monotonic:\n start, stop = self._find_non_overlapping_monotonic_bounds(target)\n\n start_plus_one = start + 1\n if not ((start_plus_one < stop).any()):\n return np.where(start_plus_one == stop, start, -1)\n\n if not self.is_unique:\n raise ValueError(\"cannot handle non-unique indices\")\n\n # IntervalIndex\n if isinstance(target, IntervalIndex):\n indexer = self._get_reindexer(target)\n\n # non IntervalIndex\n else:\n indexer = np.concatenate([self.get_loc(i) for i in target])\n\n return ensure_platform_int(indexer)\n\n def _get_reindexer(self, target):\n \"\"\"\n Return an indexer for a target IntervalIndex with self\n \"\"\"\n\n # find the left and right indexers\n left = self._maybe_convert_i8(target.left)\n right = self._maybe_convert_i8(target.right)\n lindexer = self._engine.get_indexer(left.values)\n rindexer = self._engine.get_indexer(right.values)\n\n # we want to return an indexer on the intervals\n # however, our keys could provide overlapping of multiple\n # intervals, so we iterate thru the indexers and construct\n # a set of indexers\n\n indexer = []\n n = len(self)\n\n for i, (lhs, rhs) in enumerate(zip(lindexer, rindexer)):\n\n target_value = target[i]\n\n # matching on the lhs bound\n if (lhs != -1 and\n self.closed == 'right' and\n target_value.left == self[lhs].right):\n lhs += 1\n\n # matching on the lhs bound\n if (rhs != -1 and\n self.closed == 'left' and\n target_value.right == self[rhs].left):\n rhs -= 1\n\n # not found\n if lhs == -1 and rhs == -1:\n indexer.append(np.array([-1]))\n\n elif rhs == -1:\n\n indexer.append(np.arange(lhs, n))\n\n elif lhs == -1:\n\n # care about left/right closed here\n value = self[i]\n\n # target.closed same as self.closed\n if self.closed == target.closed:\n if target_value.left < value.left:\n indexer.append(np.array([-1]))\n continue\n\n # target.closed == 'left'\n elif self.closed == 'right':\n if target_value.left <= value.left:\n indexer.append(np.array([-1]))\n continue\n\n # target.closed == 'right'\n elif self.closed == 'left':\n if target_value.left <= value.left:\n indexer.append(np.array([-1]))\n continue\n\n indexer.append(np.arange(0, rhs + 1))\n\n else:\n indexer.append(np.arange(lhs, rhs + 1))\n\n return np.concatenate(indexer)\n\n @Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs)\n def get_indexer_non_unique(self, target):\n target = self._maybe_cast_indexed(ensure_index(target))\n return super(IntervalIndex, self).get_indexer_non_unique(target)\n\n @Appender(_index_shared_docs['where'])\n def where(self, cond, other=None):\n if other is None:\n other = self._na_value\n values = np.where(cond, self.values, other)\n return self._shallow_copy(values)\n\n def delete(self, loc):\n \"\"\"\n Return a new IntervalIndex with passed location(-s) deleted\n\n Returns\n -------\n new_index : IntervalIndex\n \"\"\"\n new_left = self.left.delete(loc)\n new_right = self.right.delete(loc)\n return self._shallow_copy(new_left, new_right)\n\n def insert(self, loc, item):\n \"\"\"\n Return a new IntervalIndex inserting new item at location. Follows\n Python list.append semantics for negative values. Only Interval\n objects and NA can be inserted into an IntervalIndex\n\n Parameters\n ----------\n loc : int\n item : object\n\n Returns\n -------\n new_index : IntervalIndex\n \"\"\"\n if isinstance(item, Interval):\n if item.closed != self.closed:\n raise ValueError('inserted item must be closed on the same '\n 'side as the index')\n left_insert = item.left\n right_insert = item.right\n elif is_scalar(item) and isna(item):\n # GH 18295\n left_insert = right_insert = item\n else:\n raise ValueError('can only insert Interval objects and NA into '\n 'an IntervalIndex')\n\n new_left = self.left.insert(loc, left_insert)\n new_right = self.right.insert(loc, right_insert)\n return self._shallow_copy(new_left, new_right)\n\n def _as_like_interval_index(self, other):\n self._assert_can_do_setop(other)\n other = ensure_index(other)\n if not isinstance(other, IntervalIndex):\n msg = ('the other index needs to be an IntervalIndex too, but '\n 'was type {}').format(other.__class__.__name__)\n raise TypeError(msg)\n elif self.closed != other.closed:\n msg = ('can only do set operations between two IntervalIndex '\n 'objects that are closed on the same side')\n raise ValueError(msg)\n return other\n\n def _concat_same_dtype(self, to_concat, name):\n \"\"\"\n assert that we all have the same .closed\n we allow a 0-len index here as well\n \"\"\"\n if not len({i.closed for i in to_concat if len(i)}) == 1:\n msg = ('can only append two IntervalIndex objects '\n 'that are closed on the same side')\n raise ValueError(msg)\n return super(IntervalIndex, self)._concat_same_dtype(to_concat, name)\n\n @Appender(_index_shared_docs['take'] % _index_doc_kwargs)\n def take(self, indices, axis=0, allow_fill=True,\n fill_value=None, **kwargs):\n result = self._data.take(indices, axis=axis, allow_fill=allow_fill,\n fill_value=fill_value, **kwargs)\n attributes = self._get_attributes_dict()\n return self._simple_new(result, **attributes)\n\n def __getitem__(self, value):\n result = self._data[value]\n if isinstance(result, IntervalArray):\n return self._shallow_copy(result)\n else:\n # scalar\n return result\n\n # __repr__ associated methods are based on MultiIndex\n\n def _format_with_header(self, header, **kwargs):\n return header + list(self._format_native_types(**kwargs))\n\n def _format_native_types(self, na_rep='', quoting=None, **kwargs):\n \"\"\" actually format my specific types \"\"\"\n from pandas.io.formats.format import IntervalArrayFormatter\n return IntervalArrayFormatter(values=self,\n na_rep=na_rep,\n justify='all').get_result()\n\n def _format_data(self, name=None):\n\n # TODO: integrate with categorical and make generic\n # name argument is unused here; just for compat with base / categorical\n n = len(self)\n max_seq_items = min((get_option(\n 'display.max_seq_items') or n) // 10, 10)\n\n formatter = str\n\n if n == 0:\n summary = '[]'\n elif n == 1:\n first = formatter(self[0])\n summary = '[{first}]'.format(first=first)\n elif n == 2:\n first = formatter(self[0])\n last = formatter(self[-1])\n summary = '[{first}, {last}]'.format(first=first, last=last)\n else:\n\n if n > max_seq_items:\n n = min(max_seq_items // 2, 10)\n head = [formatter(x) for x in self[:n]]\n tail = [formatter(x) for x in self[-n:]]\n summary = '[{head} ... {tail}]'.format(\n head=', '.join(head), tail=', '.join(tail))\n else:\n tail = [formatter(x) for x in self]\n summary = '[{tail}]'.format(tail=', '.join(tail))\n\n return summary + ',' + self._format_space()\n\n def _format_attrs(self):\n attrs = [('closed', repr(self.closed))]\n if self.name is not None:\n attrs.append(('name', default_pprint(self.name)))\n attrs.append(('dtype', \"'{dtype}'\".format(dtype=self.dtype)))\n return attrs\n\n def _format_space(self):\n space = ' ' * (len(self.__class__.__name__) + 1)\n return \"\\n{space}\".format(space=space)\n\n def argsort(self, *args, **kwargs):\n return np.lexsort((self.right, self.left))\n\n def equals(self, other):\n \"\"\"\n Determines if two IntervalIndex objects contain the same elements\n \"\"\"\n if self.is_(other):\n return True\n\n # if we can coerce to an II\n # then we can compare\n if not isinstance(other, IntervalIndex):\n if not is_interval_dtype(other):\n return False\n other = Index(getattr(other, '.values', other))\n\n return (self.left.equals(other.left) and\n self.right.equals(other.right) and\n self.closed == other.closed)\n\n @Appender(_interval_shared_docs['overlaps'] % _index_doc_kwargs)\n def overlaps(self, other):\n return self._data.overlaps(other)\n\n def _setop(op_name):\n def func(self, other):\n other = self._as_like_interval_index(other)\n\n # GH 19016: ensure set op will not return a prohibited dtype\n subtypes = [self.dtype.subtype, other.dtype.subtype]\n common_subtype = find_common_type(subtypes)\n if is_object_dtype(common_subtype):\n msg = ('can only do {op} between two IntervalIndex '\n 'objects that have compatible dtypes')\n raise TypeError(msg.format(op=op_name))\n\n result = getattr(self._multiindex, op_name)(other._multiindex)\n result_name = get_op_result_name(self, other)\n\n # GH 19101: ensure empty results have correct dtype\n if result.empty:\n result = result.values.astype(self.dtype.subtype)\n else:\n result = result.values\n\n return type(self).from_tuples(result, closed=self.closed,\n name=result_name)\n return func\n\n union = _setop('union')\n intersection = _setop('intersection')\n difference = _setop('difference')\n symmetric_difference = _setop('symmetric_difference')\n\n # TODO: arithmetic operations\n\n\nIntervalIndex._add_logical_methods_disabled()\n\n\ndef _is_valid_endpoint(endpoint):\n \"\"\"helper for interval_range to check if start/end are valid types\"\"\"\n return any([is_number(endpoint),\n isinstance(endpoint, Timestamp),\n isinstance(endpoint, Timedelta),\n endpoint is None])\n\n\ndef _is_type_compatible(a, b):\n \"\"\"helper for interval_range to check type compat of start/end/freq\"\"\"\n is_ts_compat = lambda x: isinstance(x, (Timestamp, DateOffset))\n is_td_compat = lambda x: isinstance(x, (Timedelta, DateOffset))\n return ((is_number(a) and is_number(b)) or\n (is_ts_compat(a) and is_ts_compat(b)) or\n (is_td_compat(a) and is_td_compat(b)) or\n com._any_none(a, b))\n\n\ndef interval_range(start=None, end=None, periods=None, freq=None,\n name=None, closed='right'):\n \"\"\"\n Return a fixed frequency IntervalIndex\n\n Parameters\n ----------\n start : numeric or datetime-like, default None\n Left bound for generating intervals\n end : numeric or datetime-like, default None\n Right bound for generating intervals\n periods : integer, default None\n Number of periods to generate\n freq : numeric, string, or DateOffset, default None\n The length of each interval. Must be consistent with the type of start\n and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1\n for numeric and 'D' for datetime-like.\n name : string, default None\n Name of the resulting IntervalIndex\n closed : {'left', 'right', 'both', 'neither'}, default 'right'\n Whether the intervals are closed on the left-side, right-side, both\n or neither.\n\n Notes\n -----\n Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,\n exactly three must be specified. If ``freq`` is omitted, the resulting\n ``IntervalIndex`` will have ``periods`` linearly spaced elements between\n ``start`` and ``end``, inclusively.\n\n To learn more about datetime-like frequency strings, please see `this link\n <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.\n\n Returns\n -------\n rng : IntervalIndex\n\n Examples\n --------\n Numeric ``start`` and ``end`` is supported.\n\n >>> pd.interval_range(start=0, end=5)\n IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]]\n closed='right', dtype='interval[int64]')\n\n Additionally, datetime-like input is also supported.\n\n >>> pd.interval_range(start=pd.Timestamp('2017-01-01'),\n end=pd.Timestamp('2017-01-04'))\n IntervalIndex([(2017-01-01, 2017-01-02], (2017-01-02, 2017-01-03],\n (2017-01-03, 2017-01-04]]\n closed='right', dtype='interval[datetime64[ns]]')\n\n The ``freq`` parameter specifies the frequency between the left and right.\n endpoints of the individual intervals within the ``IntervalIndex``. For\n numeric ``start`` and ``end``, the frequency must also be numeric.\n\n >>> pd.interval_range(start=0, periods=4, freq=1.5)\n IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]]\n closed='right', dtype='interval[float64]')\n\n Similarly, for datetime-like ``start`` and ``end``, the frequency must be\n convertible to a DateOffset.\n\n >>> pd.interval_range(start=pd.Timestamp('2017-01-01'),\n periods=3, freq='MS')\n IntervalIndex([(2017-01-01, 2017-02-01], (2017-02-01, 2017-03-01],\n (2017-03-01, 2017-04-01]]\n closed='right', dtype='interval[datetime64[ns]]')\n\n Specify ``start``, ``end``, and ``periods``; the frequency is generated\n automatically (linearly spaced).\n\n >>> pd.interval_range(start=0, end=6, periods=4)\n IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]]\n closed='right',\n dtype='interval[float64]')\n\n The ``closed`` parameter specifies which endpoints of the individual\n intervals within the ``IntervalIndex`` are closed.\n\n >>> pd.interval_range(end=5, periods=4, closed='both')\n IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]]\n closed='both', dtype='interval[int64]')\n\n See Also\n --------\n IntervalIndex : an Index of intervals that are all closed on the same side.\n \"\"\"\n start = com.maybe_box_datetimelike(start)\n end = com.maybe_box_datetimelike(end)\n endpoint = start if start is not None else end\n\n if freq is None and com._any_none(periods, start, end):\n freq = 1 if is_number(endpoint) else 'D'\n\n if com.count_not_none(start, end, periods, freq) != 3:\n raise ValueError('Of the four parameters: start, end, periods, and '\n 'freq, exactly three must be specified')\n\n if not _is_valid_endpoint(start):\n msg = 'start must be numeric or datetime-like, got {start}'\n raise ValueError(msg.format(start=start))\n elif not _is_valid_endpoint(end):\n msg = 'end must be numeric or datetime-like, got {end}'\n raise ValueError(msg.format(end=end))\n\n if is_float(periods):\n periods = int(periods)\n elif not is_integer(periods) and periods is not None:\n msg = 'periods must be a number, got {periods}'\n raise TypeError(msg.format(periods=periods))\n\n if freq is not None and not is_number(freq):\n try:\n freq = to_offset(freq)\n except ValueError:\n raise ValueError('freq must be numeric or convertible to '\n 'DateOffset, got {freq}'.format(freq=freq))\n\n # verify type compatibility\n if not all([_is_type_compatible(start, end),\n _is_type_compatible(start, freq),\n _is_type_compatible(end, freq)]):\n raise TypeError(\"start, end, freq need to be type compatible\")\n\n # +1 to convert interval count to breaks count (n breaks = n-1 intervals)\n if periods is not None:\n periods += 1\n\n if is_number(endpoint):\n # force consistency between start/end/freq (lower end if freq skips it)\n if com._all_not_none(start, end, freq):\n end -= (end - start) % freq\n\n # compute the period/start/end if unspecified (at most one)\n if periods is None:\n periods = int((end - start) // freq) + 1\n elif start is None:\n start = end - (periods - 1) * freq\n elif end is None:\n end = start + (periods - 1) * freq\n\n breaks = np.linspace(start, end, periods)\n if all(is_integer(x) for x in com._not_none(start, end, freq)):\n # np.linspace always produces float output\n breaks = maybe_downcast_to_dtype(breaks, 'int64')\n else:\n # delegate to the appropriate range function\n if isinstance(endpoint, Timestamp):\n range_func = date_range\n else:\n range_func = timedelta_range\n\n breaks = range_func(start=start, end=end, periods=periods, freq=freq)\n\n return IntervalIndex.from_breaks(breaks, name=name, closed=closed)\n" ]
[ [ "numpy.nextafter", "pandas.core.arrays.interval.IntervalArray.from_tuples", "pandas.core.common._all_not_none", "pandas._libs.interval.IntervalMixin.__new__", "pandas.core.dtypes.cast.maybe_downcast_to_dtype", "numpy.where", "pandas.core.dtypes.cast.find_common_type", "pandas.core.dtypes.common.is_float_dtype", "pandas.core.ops.get_op_result_name", "pandas.io.formats.format.IntervalArrayFormatter", "pandas.util._exceptions.rewrite_exception", "pandas.core.dtypes.common.ensure_platform_int", "numpy.concatenate", "pandas.core.dtypes.missing.isna", "pandas.core.dtypes.common.is_number", "pandas.core.indexes.multi.MultiIndex.from_arrays", "pandas.core.dtypes.common.is_interval_dtype", "pandas.core.dtypes.common.is_datetime64tz_dtype", "numpy.arange", "pandas.core.common.is_bool_indexer", "pandas.core.dtypes.common.is_integer_dtype", "pandas.core.dtypes.common.is_object_dtype", "pandas.util._decorators.Appender", "pandas.core.dtypes.common.is_dtype_equal", "pandas.core.config.get_option", "numpy.array", "pandas.core.indexes.base.ensure_index", "numpy.lexsort", "pandas.compat.add_metaclass", "pandas.core.dtypes.common.is_scalar", "pandas.core.arrays.interval.IntervalArray", "pandas.core.dtypes.common.is_integer", "pandas.core.indexes.base.default_pprint", "pandas.core.common.count_not_none", "pandas.core.arrays.interval.IntervalArray.from_arrays", "pandas.core.common._not_none", "pandas._libs.interval.IntervalTree", "pandas.core.common.maybe_box_datetimelike", "numpy.timedelta64", "pandas._libs.interval.Interval", "pandas.core.dtypes.common.is_list_like", "pandas.core.indexes.base.Index", "pandas.core.dtypes.cast.infer_dtype_from_scalar", "pandas.tseries.frequencies.to_offset", "pandas.core.common._any_none", "pandas.core.dtypes.common.is_datetime_or_timedelta_dtype", "pandas.core.arrays.interval.IntervalArray.from_breaks", "numpy.linspace", "pandas.core.dtypes.common.is_float" ] ]
leandroaquinopereira/cnn-comparison
[ "65f45c4a44a364f97a500b38d9dced43c9f83f91" ]
[ "experiments/googlenet.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"GoogLeNet.ipynb\n\nAutomatically generated by Colaboratory.\n\nOriginal file is located at\n https://colab.research.google.com/drive/1LMqTT0uVAZgFbN6wyy5NlctyS9pb0AYo\n\n# References\n\nhttps://medium.com/mlearning-ai/implementation-of-googlenet-on-keras-d9873aeed83c\n\nhttps://www.analyticsvidhya.com/blog/2018/10/understanding-inception-network-from-scratch/\n\nhttps://gist.github.com/joelouismarino/a2ede9ab3928f999575423b9887abd14\n\nhttps://ai.plainenglish.io/googlenet-inceptionv1-with-tensorflow-9e7f3a161e87\n\"\"\"\n\n# Commented out IPython magic to ensure Python compatibility.\n# Load the TensorBoard notebook extension\n# %load_ext tensorboard\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers, Model\nimport datetime\nimport os\nfrom tensorflow.keras.layers import Convolution2D as Conv2D\nfrom tensorflow.keras.layers import Input, Dense, Concatenate\nfrom tensorflow.keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D\nfrom tensorflow.keras.layers import Flatten, Dropout\n\ntf.test.gpu_device_name() # GPU Test\n\nimage_size = (224, 224)\nbatch_size = 64\n\ntrain_ds = tf.keras.preprocessing.image_dataset_from_directory(\n \"/content/drive/MyDrive/dataset/train\",\n label_mode = \"categorical\",\n validation_split=0.20,\n subset=\"training\",\n seed=1337,\n image_size=image_size,\n batch_size=batch_size,\n)\nval_ds = tf.keras.preprocessing.image_dataset_from_directory(\n \"/content/drive/MyDrive/dataset/train\",\n label_mode = \"categorical\",\n validation_split=0.20,\n subset=\"validation\",\n seed=1337,\n image_size=image_size,\n batch_size=batch_size,\n)\ntest_ds = tf.keras.preprocessing.image_dataset_from_directory(\n \"/content/drive/MyDrive/dataset/test\",\n label_mode = \"categorical\",\n# validation_split=0.25,\n# subset=\"validation\",\n seed=1337,\n image_size=image_size,\n batch_size=batch_size,\n)\n\ndata_augmentation = keras.Sequential(\n [\n layers.RandomFlip(\"horizontal\"),\n layers.RandomRotation(0.1),\n ]\n)\n\ntrain_ds = train_ds.prefetch(buffer_size=32)\nval_ds = val_ds.prefetch(buffer_size=32)\n\naugmented_train_ds = train_ds.map(\n lambda x, y: (data_augmentation(x, training=True), y))\n\ndef Inception_block(input_layer, f1, f2_conv1, f2_conv3, f3_conv1, f3_conv5, f4): \n # Input: \n # - f1: number of filters of the 1x1 convolutional layer in the first path\n # - f2_conv1, f2_conv3 are number of filters corresponding to the 1x1 and 3x3 convolutional layers in the second path\n # - f3_conv1, f3_conv5 are the number of filters corresponding to the 1x1 and 5x5 convolutional layer in the third path\n # - f4: number of filters of the 1x1 convolutional layer in the fourth path\n\n # 1st path:\n path1 = Conv2D(filters=f1, kernel_size = (1,1), padding = 'same', activation = 'relu')(input_layer)\n\n # 2nd path\n path2 = Conv2D(filters = f2_conv1, kernel_size = (1,1), padding = 'same', activation = 'relu')(input_layer)\n path2 = Conv2D(filters = f2_conv3, kernel_size = (3,3), padding = 'same', activation = 'relu')(path2)\n\n # 3rd path\n path3 = Conv2D(filters = f3_conv1, kernel_size = (1,1), padding = 'same', activation = 'relu')(input_layer)\n path3 = Conv2D(filters = f3_conv5, kernel_size = (5,5), padding = 'same', activation = 'relu')(path3)\n\n # 4th path\n path4 = MaxPooling2D((3,3), strides= (1,1), padding = 'same')(input_layer)\n path4 = Conv2D(filters = f4, kernel_size = (1,1), padding = 'same', activation = 'relu')(path4)\n\n output_layer = Concatenate(axis = -1)([path1, path2, path3, path4])\n\n return output_layer\n\ndef GoogLeNet():\n # input layer \n input_layer = Input(shape = (224, 224, 3))\n\n # convolutional layer: filters = 64, kernel_size = (7,7), strides = 2\n X = Conv2D(filters = 64, kernel_size = (7,7), strides = 2, padding = 'valid', activation = 'relu')(input_layer)\n\n # max-pooling layer: pool_size = (3,3), strides = 2\n X = MaxPooling2D(pool_size = (3,3), strides = 2)(X)\n\n # convolutional layer: filters = 64, strides = 1\n X = Conv2D(filters = 64, kernel_size = (1,1), strides = 1, padding = 'same', activation = 'relu')(X)\n\n # convolutional layer: filters = 192, kernel_size = (3,3)\n X = Conv2D(filters = 192, kernel_size = (3,3), padding = 'same', activation = 'relu')(X)\n\n # max-pooling layer: pool_size = (3,3), strides = 2\n X = MaxPooling2D(pool_size= (3,3), strides = 2)(X)\n\n # 1st Inception block\n X = Inception_block(X, f1 = 64, f2_conv1 = 96, f2_conv3 = 128, f3_conv1 = 16, f3_conv5 = 32, f4 = 32)\n\n # 2nd Inception block\n X = Inception_block(X, f1 = 128, f2_conv1 = 128, f2_conv3 = 192, f3_conv1 = 32, f3_conv5 = 96, f4 = 64)\n\n # max-pooling layer: pool_size = (3,3), strides = 2\n X = MaxPooling2D(pool_size= (3,3), strides = 2)(X)\n\n # 3rd Inception block\n X = Inception_block(X, f1 = 192, f2_conv1 = 96, f2_conv3 = 208, f3_conv1 = 16, f3_conv5 = 48, f4 = 64)\n\n # Extra network 1:\n X1 = AveragePooling2D(pool_size = (5,5), strides = 3)(X)\n X1 = Conv2D(filters = 128, kernel_size = (1,1), padding = 'same', activation = 'relu')(X1)\n X1 = Flatten()(X1)\n X1 = Dense(1024, activation = 'relu')(X1)\n X1 = Dropout(0.7)(X1)\n X1 = Dense(4, activation = 'softmax')(X1) # 5\n\n \n # 4th Inception block\n X = Inception_block(X, f1 = 160, f2_conv1 = 112, f2_conv3 = 224, f3_conv1 = 24, f3_conv5 = 64, f4 = 64)\n\n # 5th Inception block\n X = Inception_block(X, f1 = 128, f2_conv1 = 128, f2_conv3 = 256, f3_conv1 = 24, f3_conv5 = 64, f4 = 64)\n\n # 6th Inception block\n X = Inception_block(X, f1 = 112, f2_conv1 = 144, f2_conv3 = 288, f3_conv1 = 32, f3_conv5 = 64, f4 = 64)\n\n # Extra network 2:\n X2 = AveragePooling2D(pool_size = (5,5), strides = 3)(X)\n X2 = Conv2D(filters = 128, kernel_size = (1,1), padding = 'same', activation = 'relu')(X2)\n X2 = Flatten()(X2)\n X2 = Dense(1024, activation = 'relu')(X2)\n X2 = Dropout(0.7)(X2)\n X2 = Dense(4, activation = 'softmax')(X2)\n \n \n # 7th Inception block\n X = Inception_block(X, f1 = 256, f2_conv1 = 160, f2_conv3 = 320, f3_conv1 = 32, \n f3_conv5 = 128, f4 = 128)\n\n # max-pooling layer: pool_size = (3,3), strides = 2\n X = MaxPooling2D(pool_size = (3,3), strides = 2)(X)\n\n # 8th Inception block\n X = Inception_block(X, f1 = 256, f2_conv1 = 160, f2_conv3 = 320, f3_conv1 = 32, f3_conv5 = 128, f4 = 128)\n\n # 9th Inception block\n X = Inception_block(X, f1 = 384, f2_conv1 = 192, f2_conv3 = 384, f3_conv1 = 48, f3_conv5 = 128, f4 = 128)\n\n # Global Average pooling layer \n X = GlobalAveragePooling2D(name = 'GAPL')(X)\n\n # Dropoutlayer \n X = Dropout(0.4)(X)\n\n # output layer \n X = Dense(4, activation = 'softmax')(X)\n \n # model\n model = Model(input_layer, [X, X1, X2], name = 'GoogLeNet') #\n\n return model\n\n# Commented out IPython magic to ensure Python compatibility.\n# %tensorboard --logdir logs\n\nepochs = 50\n\nlogdir = os.path.join(\"logs\", datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\"))\n\ncallbacks = [\n keras.callbacks.ModelCheckpoint(\"save_at_{epoch}.h5\"),\n tf.keras.callbacks.TensorBoard(logdir, histogram_freq=1),\n tf.keras.callbacks.EarlyStopping(patience=3),\n]\n\nmodel = GoogLeNet()\n\nmodel.compile(\n optimizer=keras.optimizers.Adam(1e-3),\n loss=\"categorical_crossentropy\",\n metrics=['accuracy', 'binary_accuracy', 'categorical_accuracy', \n 'binary_crossentropy', 'categorical_crossentropy', \n 'kullback_leibler_divergence', 'poisson'],\n)\nmodel.fit(\n augmented_train_ds, \n epochs=epochs, \n callbacks=callbacks, \n validation_data=val_ds, \n)\n\nmodel.evaluate(test_ds)" ]
[ [ "tensorflow.keras.callbacks.TensorBoard", "tensorflow.test.gpu_device_name", "tensorflow.keras.layers.Input", "tensorflow.keras.preprocessing.image_dataset_from_directory", "tensorflow.keras.layers.AveragePooling2D", "tensorflow.keras.layers.Flatten", "tensorflow.keras.callbacks.ModelCheckpoint", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Dropout", "tensorflow.keras.Model", "tensorflow.keras.layers.MaxPooling2D", "tensorflow.keras.layers.RandomRotation", "tensorflow.keras.layers.GlobalAveragePooling2D", "tensorflow.keras.layers.Convolution2D", "tensorflow.keras.layers.RandomFlip", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.layers.Concatenate", "tensorflow.keras.callbacks.EarlyStopping" ] ]
Babdus/Protolanguage
[ "050aeed5e7ac5905515a887dcbab434457ae2f47" ]
[ "Code/IPA/parse_IPA_single_symbols.py" ]
[ "import sys\nimport pandas as pd\nfrom collections import Counter\n\ndef main(argv):\n df = pd.io.parsers.read_csv(argv[0],index_col=0)\n # print(df)\n #\n # IPA_dict = {}\n # for i, row in df.iterrows():\n # temp_dict = {}\n # for col_name in row.index:\n # print(i, col_name, row[col_name])\n # if not pd.isnull(row[col_name]):\n # temp_dict[col_name] = int(row[col_name])\n # IPA_dict[i] = temp_dict\n # print(IPA_dict)\n print(df.columns.values.tolist())\n\n # df_dict_of_dicts = df.to_dict('index')\n # print(df_dict_of_dicts)\n # IPA_dict = {}\n # for df_dict in df_dict_of_dicts:\n # IPA_dict[df_dict['Symbol']] =\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n" ]
[ [ "pandas.io.parsers.read_csv" ] ]
ashoknar/TensorNetwork
[ "82636b75a0c53b5447c84d9a4e85226fe0e6f43a" ]
[ "tensornetwork/tests/split_node_symmetric_test.py" ]
[ "# Copyright 2019 The TensorNetwork Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport tensornetwork as tn\nimport pytest\nimport numpy as np\nfrom tensornetwork.block_sparse import BlockSparseTensor, Index\nfrom tensornetwork.block_sparse.charge import charge_equal, BaseCharge, U1Charge\nfrom tensornetwork.block_sparse.utils import _find_diagonal_sparse_blocks\n\n\ndef get_random(shape, num_charges, dtype=np.float64):\n R = len(shape)\n charges = [\n BaseCharge(\n np.random.randint(-5, 6, (num_charges, shape[n])),\n charge_types=[U1Charge] * num_charges) for n in range(R)\n ]\n flows = list(np.full(R, fill_value=False, dtype=np.bool))\n indices = [Index(charges[n], flows[n]) for n in range(R)]\n return BlockSparseTensor.randn(indices=indices, dtype=dtype)\n\n\ndef get_zeros(shape, num_charges, dtype=np.float64):\n R = len(shape)\n charges = [\n BaseCharge(\n np.random.randint(-5, 6, (num_charges, shape[n])),\n charge_types=[U1Charge] * num_charges) for n in range(R)\n ]\n flows = list(np.full(R, fill_value=False, dtype=np.bool))\n indices = [Index(charges[n], flows[n]) for n in range(R)]\n return BlockSparseTensor.zeros(indices=indices, dtype=dtype)\n\n\n@pytest.mark.parametrize(\"dtype\", [np.float64, np.complex128])\n@pytest.mark.parametrize(\"num_charges\", [1, 2, 3])\ndef test_split_node(dtype, num_charges):\n np.random.seed(111)\n a = tn.Node(\n get_zeros((2, 3, 4, 5, 6), num_charges, dtype), backend='symmetric')\n\n left_edges = []\n for i in range(3):\n left_edges.append(a[i])\n right_edges = []\n for i in range(3, 5):\n right_edges.append(a[i])\n left, right, _ = tn.split_node(a, left_edges, right_edges)\n tn.check_correct({left, right})\n actual = left @ right\n np.testing.assert_allclose(actual.tensor.shape, (2, 3, 4, 5, 6))\n np.testing.assert_allclose(a.tensor.shape, (2, 3, 4, 5, 6))\n np.testing.assert_allclose(left.tensor.data, 0)\n np.testing.assert_allclose(right.tensor.data, 0)\n assert np.all([\n charge_equal(a.tensor._charges[n], actual.tensor._charges[n])\n for n in range(len(a.tensor._charges))\n ])\n\n\n@pytest.mark.parametrize(\"dtype\", [np.float64, np.complex128])\n@pytest.mark.parametrize(\"num_charges\", [1, 2, 3])\ndef test_split_node_mixed_order(dtype, num_charges):\n np.random.seed(111)\n a = tn.Node(\n get_zeros((2, 3, 4, 5, 6), num_charges, dtype), backend='symmetric')\n\n left_edges = []\n for i in [0, 2, 4]:\n left_edges.append(a[i])\n right_edges = []\n for i in [1, 3]:\n right_edges.append(a[i])\n left, right, _ = tn.split_node(a, left_edges, right_edges)\n\n tn.check_correct({left, right})\n actual = left @ right\n np.testing.assert_allclose(actual.tensor.shape, (2, 4, 6, 3, 5))\n np.testing.assert_allclose(a.tensor.shape, (2, 3, 4, 5, 6))\n\n np.testing.assert_allclose(left.tensor.data, 0)\n np.testing.assert_allclose(right.tensor.data, 0)\n np.testing.assert_allclose(left.tensor.shape[0:3], (2, 4, 6))\n np.testing.assert_allclose(right.tensor.shape[1:], (3, 5))\n new_order = [0, 2, 4, 1, 3]\n assert np.all([\n charge_equal(a.tensor.charges[new_order[n]][0],\n actual.tensor.charges[n][0])\n for n in range(len(a.tensor._charges))\n ])\n\n\n@pytest.mark.parametrize(\"dtype\", [np.float64, np.complex128])\n@pytest.mark.parametrize(\"num_charges\", [1, 2, 3])\ndef test_svd_consistency(dtype, num_charges):\n np.random.seed(111)\n original_tensor = get_random((20, 20), num_charges, dtype)\n node = tn.Node(original_tensor, backend='symmetric')\n u, vh, _ = tn.split_node(node, [node[0]], [node[1]])\n final_node = tn.contract_between(u, vh)\n np.testing.assert_allclose(\n final_node.tensor.data, original_tensor.data, rtol=1e-6)\n assert np.all([\n charge_equal(final_node.tensor._charges[n], original_tensor._charges[n])\n for n in range(len(original_tensor._charges))\n ])\n" ]
[ [ "numpy.random.seed", "numpy.testing.assert_allclose", "numpy.full", "numpy.random.randint" ] ]
sandeep-krishnamurthy/keras-mxnet-tests
[ "94772497798b98231202c26ebd49027128e41ca5" ]
[ "keras1.2/nightly_test/test_variational_autoencoder.py" ]
[ "'''\nThis code is forked from https://github.com/fchollet/keras/blob/master/examples/\nand modified to use as MXNet-Keras integration testing for functionality and sanity performance\nbenchmarking.\n\nThis script demonstrates how to build a variational autoencoder with Keras.\n\nReference: \"Auto-Encoding Variational Bayes\" https://arxiv.org/abs/1312.6114\n'''\nimport numpy as np\nimport os\nfrom scipy.stats import norm\n\nfrom keras.layers import Input, Dense, Lambda\nfrom keras.models import Model\nfrom keras import backend as K\nfrom keras import objectives\nfrom keras.datasets import mnist\n\n# Imports for benchmarking\nfrom utils.profiler import profile\nfrom utils.model_util import make_model\n\n# Imports for assertions\nfrom utils.assertion_util import assert_results\n\nbatch_size = 100\noriginal_dim = 784\nlatent_dim = 2\nintermediate_dim = 256\n# Ideal number of epochs is 50\nnb_epoch = 10\nepsilon_std = 1.0\n\nx = Input(batch_shape=(batch_size/int(os.environ['GPU_NUM']), original_dim))\nh = Dense(intermediate_dim, activation='relu')(x)\nz_mean = Dense(latent_dim)(h)\nz_log_var = Dense(latent_dim)(h)\n\n\ndef sampling(args):\n z_mean, z_log_var = args\n epsilon = K.random_normal(shape=(batch_size/int(os.environ['GPU_NUM']), latent_dim), mean=0.,\n std=epsilon_std)\n return z_mean + K.exp(z_log_var / 2) * epsilon\n\n# note that \"output_shape\" isn't necessary with the TensorFlow backend\nz = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var])\n\n# we instantiate these layers separately so as to reuse them later\ndecoder_h = Dense(intermediate_dim, activation='relu')\ndecoder_mean = Dense(original_dim, activation='sigmoid')\nh_decoded = decoder_h(z)\nx_decoded_mean = decoder_mean(h_decoded)\n\n#Result dictionary\nglobal ret_dict\nret_dict = dict()\n\ndef vae_loss(x, x_decoded_mean):\n xent_loss = original_dim * objectives.binary_crossentropy(x, x_decoded_mean)\n kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)\n return xent_loss + kl_loss\n\nvae = Model(x, x_decoded_mean)\nvae = make_model(vae, optimizer='rmsprop', loss=vae_loss)\n\n# train the VAE on MNIST digits\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\nx_train = x_train.astype('float32') / 255.\nx_test = x_test.astype('float32') / 255.\nx_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))\nx_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))\n\ndef train_func():\n history = vae.fit(x_train, x_train,\n shuffle=True,\n nb_epoch=nb_epoch,\n batch_size=batch_size,\n validation_data=(x_test, x_test))\n ret_dict[\"training_accuracy\"] = history.history['loss'][-1]\n ret_dict[\"test_accuracy\"] = history.history['val_loss'][-1]\n\ndef test_variational_autoencoder():\n ret = profile(train_func)\n\n ret_dict[\"training_time\"] = str(ret[0]) + ' sec'\n ret_dict[\"max_memory\"] = str(ret[1]) + ' MB'\n\n print(\"Test variational autoencoder\")\n print(ret_dict)\n # TODO: ASSERT results. Above tests whether it is functional. Assert statements will confirm accuracy/memory/speed.\n" ]
[ [ "numpy.prod" ] ]
jlpalomino/matplotcheck
[ "b225e51d645f6a6b3b7a6db139350c9ff4e22451" ]
[ "matplotcheck/timeseries.py" ]
[ "import numpy as np\nimport matplotlib.dates as mdates\nfrom dateutil.relativedelta import relativedelta\nimport math\n\nfrom .base import PlotTester\n\n\nclass TimeSeriesTester(PlotTester):\n \"\"\"A PlotTester for 2 dimensional time series plots.\n\n Parameters\n ----------\n ax: ```matplotlib.axes.Axes``` object\n The plot to be tested.\n\n \"\"\"\n\n def __init__(self, ax):\n \"\"\"Initialize the time series tester\"\"\"\n super(TimeSeriesTester, self).__init__(ax)\n\n def assert_xticks_reformatted(\n self,\n tick_size=\"large\",\n loc_exp=None,\n m=\"x ticks have not been reformatted properly\",\n ):\n \"\"\"Asserts that Axes ax xtick have been reformatted as denoted by\n tick_size and loc_exp, with error message m\n\n Parameters\n ----------\n tick_size: must be one of the following ['large','small']\n 'large': if testing large ticks\n 'small': if testing small ticks\n loc_exp: string ['decade','year', 'month', 'week', 'day']\n 'decade': if tick should be shown every ten years\n 'year': if tick should be shown every new year\n 'month': if tick should be shown every new month\n 'week': if tick should be shown every new week\n 'day': if tick should be shown every new day\n None: if no tick format has been specified. This will automatically assert True\n m: string error message if assertion is not met\n \"\"\"\n if loc_exp:\n if tick_size == \"large\":\n test_date = (\n self.ax.xaxis.get_major_formatter()\n .format_data(735141)\n .replace(\" \", \"\")\n .lower()\n ) # September 30, 2013\n elif tick_size == \"small\":\n test_date = (\n self.ax.xaxis.get_minor_formatter()\n .format_data(735141)\n .replace(\" \", \"\")\n .lower()\n ) # September 30, 2013\n else:\n raise ValueError(\n \"tick_size must be on of the following string ['large', 'small']\"\n )\n if loc_exp == \"decade\" or loc_exp == \"year\":\n accepted_responses = [\"2013\"]\n elif loc_exp == \"month\":\n accepted_responses = [\"sep\", \"september\"]\n elif loc_exp == \"week\" or loc_exp == \"day\":\n accepted_responses = [\"sep30\", \"september30\"]\n else:\n raise ValueError(\n \"\"\"loc_exp must be one of the following strings ['decade', \n 'year', 'month', 'week', 'day', None]\"\"\"\n )\n assert test_date in accepted_responses, m\n\n def assert_xticks_locs(\n self,\n tick_size=\"large\",\n loc_exp=None,\n m=\"Incorrect X axis tick locations\",\n ):\n \"\"\"Asserts that Axes ax has xaxis ticks as noted by tick_size and loc_exp\n\n Parameters\n ----------\n tick_size: str, opts: ['large','small']\n 'large': if testing large ticks\n 'small': if testing small ticks\n loc_exp: string ['decade','year', 'month', 'week', 'day']\n 'decade': if tick should be shown every ten years\n 'year': if tick should be shown every new year\n 'month': if tick should be shown every new month\n 'week': if tick should be shown every new week\n 'day': if tick should be shown every new day\n None: if no tick location has been specified. This will\n automatically assert True\n m: error message if assertion is not met\n \"\"\"\n\n if loc_exp:\n xlims = [mdates.num2date(l) for l in self.ax.get_xlim()]\n if tick_size == \"large\":\n ticks = self.ax.xaxis.get_majorticklocs()\n elif tick_size == \"small\":\n ticks = self.ax.xaxis.get_minorticklocs()\n else:\n raise ValueError(\n \"\"\"\"Tick_size must be one of the following strings \n ['large', 'small']\"\"\"\n )\n\n if loc_exp == \"decade\":\n inc = relativedelta(years=10)\n elif loc_exp == \"year\":\n inc = relativedelta(years=1)\n elif loc_exp == \"month\":\n inc = relativedelta(months=1)\n elif loc_exp == \"week\":\n inc = relativedelta(days=7)\n elif loc_exp == \"day\":\n inc = relativedelta(days=1)\n else:\n raise ValueError(\n \"\"\"\"loc_exp must be one of the following strings ['decade', \n 'year', 'month', 'week', 'day'] or None\"\"\"\n )\n\n start, end = mdates.num2date(ticks[0]), mdates.num2date(ticks[-1])\n assert start < xlims[0] + inc, \"Tick locators do not cover x axis\"\n assert end > xlims[1] - inc, \"Tick locators do not cover x axis\"\n ticks_exp = [\n d.toordinal() for d in self._my_range(start, end, inc)\n ]\n np.testing.assert_equal(ticks, ticks_exp, m)\n\n def _my_range(self, start, end, step):\n \"\"\"helper function for assert_xticks_locs\n alternative to range to use in a for loop. my_range allows for dataype\n other than ints. both start and end are included in the loop.\n\n Parameters\n ----------\n start: value to start while loop at\n end: last value to run in while loop\n step: about to increase between cycles in loop\n start, end, and step must be comparable datatypes.\n \"\"\"\n while start <= end:\n yield start\n start += step\n\n def assert_no_data_value(self, nodata=999.99):\n \"\"\"Asserts nodata values have been removed from the data, when x is a\n datetime with error message m\n\n Parameters\n ----------\n nodata: float or int\n a nodata value that will be searched for in dataset\n xtime: boolean\n does the x-axis contains datetime values?\n \"\"\"\n if nodata != None:\n xy = self.get_xy(xtime=False)\n assert ~np.isin(\n nodata, xy[\"x\"]\n ), \"Values of {0} have been found in data. Be sure to remove no data values\".format(\n nodata\n )\n assert ~np.isin(\n nodata, xy[\"y\"]\n ), \"Values of {0} have been found in data. Be sure to remove no data values\".format(\n nodata\n )\n\n def assert_xdata_date(\n self, x_exp, m=\"X-axis is not in appropriate date format\"\n ):\n \"\"\"Asserts x-axis data has been parsed into datetime objects.\n Matplotlib changes datetime to floats representing number of days since\n day 0. If you are using dates prior to year 270, this assertion will\n fail.\n\n Parameters\n ----------\n x_exp: expected x_axis values, must be in a datetime format\n \"\"\"\n x_data = [math.floor(d) for d in self.get_xy(xtime=False)[\"x\"]]\n x_exp = [d.toordinal() for d in x_exp] # convert to days elapsed\n assert np.array_equal(sorted(x_exp), sorted(x_data)), m\n" ]
[ [ "matplotlib.dates.num2date", "numpy.isin", "numpy.testing.assert_equal" ] ]
wwoods/job_stream
[ "7bed3d9d42b8a08bcc92dfbc632f389d6ecc9b7d" ]
[ "python/job_stream/test/test_baked.py" ]
[ "\nfrom .common import ExecuteError, JobStreamTest\n\nimport pandas as pd\n\n\nclass TestBaked(JobStreamTest):\n OUT_PATH = \"/tmp/js_out.csv\"\n\n @classmethod\n def setUpClass(cls):\n src = \"\"\"\n from job_stream.inline import getCpuCount, Work\n with Work([1]) as w:\n @w.job\n def handle(r):\n print(getCpuCount())\n \"\"\"\n inst = TestBaked()\n cls._cpus = int(inst.executePy(src)[0].strip())\n\n\n def test_sweep(self):\n # Integration test; show that everything in general works\n src = \"\"\"\n from job_stream.baked import sweep\n import numpy as np\n\n with sweep({{ 'a': np.arange(10) }}, -100, output='{}') as w:\n @w.job\n def square(id, trial, a):\n return {{ 'value': a*a + np.random.random()*8 }}\n \"\"\".format(self.OUT_PATH)\n\n # Since this is a thing, ensure 5 failures to fail\n for nfail in range(5, -1, -1):\n try:\n r = self.executePy(src)\n\n df = pd.read_csv(self.OUT_PATH).set_index('id')\n print(df.to_string())\n self.assertEqual(10, len(df))\n self.assertTrue(\n (\n (df['value_dev'] > 0.5)\n & (df['value_dev'] < 5.)\n ).all())\n for i in range(10):\n err = df.loc[i]['value_err']\n self.assertLess(i*i+4 - 3.*err, df.loc[i]['value'])\n self.assertGreater(i*i+4 + 3.*err, df.loc[i]['value'])\n self.assertLess(df.loc[0]['value_err'], df.loc[9]['value_err'])\n except:\n if not nfail:\n raise\n else:\n break\n\n\n def test_sweep_list(self):\n # Ensure that a list of param dictionaries works\n src = \"\"\"\n from job_stream.baked import sweep\n from job_stream import inline\n inline.getCpuCount = lambda: 8\n with sweep([ {{ 'a': 8 }}, {{ 'a': 9 }} ], output='{}') as s:\n @s.job\n def handle(id, trial, a):\n return {{ 'value': a }}\n \"\"\".format(self.OUT_PATH)\n self.executePy(src)\n df = pd.read_csv(self.OUT_PATH).set_index('id')\n self.assertEqual(2, len(df))\n self.assertEqual(4, df.loc[0]['trials'])\n self.assertEqual(8, df.loc[0]['value'])\n self.assertEqual(9, df.loc[1]['value'])\n\n\n def test_sweep_list_mismatch(self):\n # Mismatched parameters should fail\n src = \"\"\"\n from job_stream.baked import sweep\n with sweep([ { 'a': 8 }, { 'b': 9 } ]) as s:\n @s.job\n def handle(trial, id, a=8, b=9):\n return { 'val': a }\n \"\"\"\n try:\n self.executePy(src)\n except Exception as e:\n self.assertTrue('must have same keys; found ' in e.stderr)\n self.assertTrue(' against ' in e.stderr)\n self.assertTrue(\"{'a'}\" in e.stderr)\n self.assertTrue(\"{'b'}\" in e.stderr)\n else:\n self.fail(\"Nothing raised\")\n\n\n def test_sweep_multiple(self):\n # Ensure that emitting Multiple() from a sweep pipeline fails\n src = \"\"\"\n from job_stream.baked import sweep\n from job_stream.inline import Multiple\n with sweep() as s:\n @s.job\n def handle(id, trial):\n return Multiple([ { 'a': 0 }, { 'a': 1 } ])\n \"\"\"\n try:\n self.executePy(src)\n except Exception as e:\n self.assertTrue('cannot emit Multiple' in str(e.stderr))\n else:\n self.fail(\"Nothing raised\")\n\n\n def test_sweep_minTrials(self):\n # Test minTrials behavior\n src = \"\"\"\n # By using something that is NOT random, the minimum number of\n # trials is guaranteed to be sufficient.\n from job_stream.baked import sweep\n from job_stream import inline\n inline.getCpuCount = lambda: {cpus}\n with sweep({{ 'c': range({combos1}), 'c2': range({combos2}) }},\n output='{out}', trials={trials}) as s:\n @s.job\n def handle(id, trial, c, c2):\n return {{ 'value': 0. }}\n \"\"\"\n def getTrials(trials, cpus, combos1=1, combos2=1):\n self.executePy(src.format(trials=trials, cpus=cpus,\n out=self.OUT_PATH, combos1=combos1, combos2=combos2))\n df = pd.read_csv(self.OUT_PATH).set_index('id')\n self.assertEqual(combos1 * combos2, len(df))\n return df.loc[0]['trials']\n\n self.assertEqual(3, getTrials(0, 1))\n self.assertEqual(8, getTrials(0, 8))\n self.assertEqual(3, getTrials(-3, 8))\n self.assertEqual(8, getTrials(-10, 8))\n self.assertEqual(1, getTrials(1, 100))\n\n # With multiple parameter combinations\n self.assertEqual(3, getTrials(0, 1, 2, 2))\n self.assertEqual(8, getTrials(0, 32, 2, 2))\n self.assertEqual(2, getTrials(-2, 32, 2, 2))\n\n\n def test_sweep_trialsCount(self):\n # Ensure min/max work\n src = \"\"\"\n from job_stream.baked import sweep\n from job_stream import inline\n inline.getCpuCount = lambda: 1\n trialsParms = {{}}\n if {min} > 0:\n trialsParms['min'] = {min}\n if {max} > 0:\n trialsParms['max'] = {max}\n with sweep(trials={trials}, trialsParms=trialsParms,\n output='{out}') as s:\n @s.job\n def handle(id, trial):\n return {{ 'v': trial if {dev} else 1. }}\n \"\"\"\n def getTrials(min, max, testMax, trials=0):\n self.executePy(src.format(min=min, max=max, dev=testMax,\n trials=trials, out=self.OUT_PATH))\n df = pd.read_csv(self.OUT_PATH).set_index('id')\n return df.loc[0]['trials']\n\n self.assertEqual(3, getTrials(3, 8, False))\n self.assertEqual(8, getTrials(3, 8, True))\n self.assertEqual(8, getTrials(8, 8, False))\n # One more to make sure a negative trials specification also is max\n self.assertEqual(8, getTrials(-1, -1, True, -8))\n\n # min cannot be > max\n with self.assertRaises(ExecuteError):\n getTrials(9, 8, False)\n # if min specified, trials cannot be positive\n with self.assertRaises(ExecuteError):\n getTrials(1, -1, True, 3)\n # but can be negative\n self.assertEqual(2, getTrials(2, -1, False, -8))\n\n # max cannot be specified with trials != 0\n with self.assertRaises(ExecuteError):\n getTrials(-1, 1, True, 3)\n with self.assertRaises(ExecuteError):\n getTrials(-1, 1, True, -3)\n\n" ]
[ [ "pandas.read_csv" ] ]
hillarypan/plato
[ "181ad0e0e00b0b7486fa364200a8187d879a450e" ]
[ "plato/draw/blender/Scene.py" ]
[ "from ... import draw\nimport numpy as np\nimport bpy\n\nclass Scene(draw.Scene):\n __doc__ = draw.Scene.__doc__\n\n RENDER_COUNT = 0\n\n def render(self):\n new_scene = bpy.data.scenes.new('plato_{}'.format(self.RENDER_COUNT))\n (width, height) = self.size_pixels\n new_scene.render.resolution_x = width\n new_scene.render.resolution_y = height\n new_scene.render.alpha_mode = 'TRANSPARENT'\n\n kwargs = dict(scene=new_scene, translation=self.translation,\n rotation=self.rotation)\n\n self.render_camera(**kwargs)\n self.render_lights(**kwargs)\n\n for (i, prim) in enumerate(self._primitives):\n prim.render(suffix=str(i), **kwargs)\n\n self.RENDER_COUNT += 1\n return new_scene\n\n def render_camera(self, scene, rotation=(1, 0, 0, 0), **kwargs):\n dz = np.sqrt(np.sum(self.size**2))\n rotation = np.asarray(rotation)\n\n camera_params = bpy.data.cameras.new('plato_camera')\n camera_params.type = 'ORTHO'\n camera_params.ortho_scale = np.max(self.size/self.zoom)\n camera_object = bpy.data.objects.new('plato_camera', object_data=camera_params)\n camera_object.location = (0, 0, dz)\n scene.objects.link(camera_object)\n scene.camera = camera_object\n\n def render_lights(self, scene, **kwargs):\n if 'ambient_light' in self.enabled_features:\n pass\n\n if 'directional_light' in self.enabled_features:\n config = self.get_feature_config('directional_light')\n lights = config.get('value', (.25, .5, -1))\n lights = np.atleast_2d(lights).astype(np.float32)\n\n for (i, light) in enumerate(lights):\n name = 'light_{}'.format(i)\n\n magnitude = np.linalg.norm(light)\n direction = light/magnitude\n\n light_params = bpy.data.lamps.new(name=name, type='SUN')\n light_params.color = (magnitude, magnitude, magnitude)\n light_object = bpy.data.objects.new(name, object_data=light_params)\n scene.objects.link(light_object)\n\n def show(self):\n blender_scene = self.render()\n bpy.context.screen.scene = blender_scene\n return blender_scene\n\n def save(self, filename):\n blender_scene = self.show()\n if filename.endswith('.png'):\n blender_scene.render.filepath = filename\n bpy.ops.render.render('INVOKE_DEFAULT', write_still=True)\n else:\n bpy.ops.wm.save_as_mainfile(filepath=filename)\n return blender_scene\n" ]
[ [ "numpy.max", "numpy.linalg.norm", "numpy.asarray", "numpy.sum", "numpy.atleast_2d" ] ]
asparsh/cuddly-spoon
[ "63d0a481c7274cb572ad5340d6dd2218a01d1816" ]
[ "train_neural_network.py" ]
[ "import torch\nimport torch.nn as nn\n\ndef training_routine(net,train_loader,n_iters,gpu):\n \n optimizer = torch.optim.SGD(net.parameters(),lr=0.0001,momentum=0.8)\n criterion = nn.BCELoss()\n loss_list = []\n index_list = []\n \n for j in range(n_iters): \n running_loss = 0\n full_accuracy = 0\n \n for index, (data, labels) in enumerate(train_loader):\n \n net.train()\n optimizer.zero_grad()\n \n if gpu:\n data,labels = data.float().cuda(),labels.long().cuda()\n net = net.cuda()\n else:\n data,labels = data.float(),labels.float()\n \n train_output = net(data)\n train_loss = criterion(train_output,labels.unsqueeze(1))\n running_loss = running_loss + train_loss.cpu().detach().numpy()\n train_loss.backward()\n optimizer.step()\n \n net.eval()\n train_prediction = train_output.cpu().detach().argmax(dim=1)\n train_accuracy = (train_prediction.numpy()==labels.cpu().numpy()).mean() \n full_accuracy += train_accuracy\n \n loss_list.append(float(running_loss)/index+1)\n index_list.append(index+1)\n print(\"---------------------------------------------------------------------\")\n print(\"Epoch: \", j+1)\n print(\"---------------------------------------------------------------------\")\n print(\"Running Loss: \", float(running_loss)/index+1)\n \n net = net.cpu()\n return net, loss_list, index_list" ]
[ [ "torch.nn.BCELoss" ] ]
clowdr/clowdr
[ "346263ee806ae7c992a37dfc9bd9b87db3fa48c3" ]
[ "clowdr/task.py" ]
[ "#!/usr/bin/env python\n#\n# This software is distributed with the MIT license:\n# https://github.com/gkiar/clowdr/blob/master/LICENSE\n#\n# clowdr/task.py\n# Created by Greg Kiar on 2018-02-28.\n# Email: gkiar@mcin.ca\n\nfrom argparse import ArgumentParser\nfrom datetime import datetime\nfrom time import mktime, localtime\nfrom subprocess import PIPE\nimport multiprocessing as mp\nimport numpy as np\nimport os.path as op\nimport subprocess\nimport psutil\nimport time\nimport json\nimport csv\nimport os\nimport re\nimport warnings\n\n\nwarnings.filterwarnings(\"ignore\", message=\"numpy.dtype size changed\")\n\nimport pandas as pd\n\nimport boutiques as bosh\nfrom clowdr import utils\n\n\nclass TaskHandler:\n def __init__(self, taskfile, **kwargs):\n self.manageTask(taskfile, **kwargs)\n\n def manageTask(self, taskfile, provdir=None, verbose=False, **kwargs):\n # Get metadata\n if provdir is None:\n self.localtaskdir = \"/clowtask/\"\n else:\n self.localtaskdir = provdir\n\n # The below grabs an ID from the form: /some/path/to/fname-#.ext\n self.task_id = taskfile.split('.')[0].split('-')[-1]\n\n self.localtaskdir = op.join(self.localtaskdir, \"clowtask_\"+self.task_id)\n if not op.exists(self.localtaskdir):\n os.makedirs(self.localtaskdir)\n\n if(verbose):\n print(\"Fetching metadata...\", flush=True)\n remotetaskdir = op.dirname(taskfile)\n taskfile = utils.get(taskfile, self.localtaskdir)[0]\n\n # Parse metadata\n taskinfo = json.load(open(taskfile))\n descriptor = taskinfo['tool']\n invocation = taskinfo['invocation']\n input_data = taskinfo['dataloc']\n output_loc = utils.truepath(taskinfo['taskloc'])\n\n if(verbose):\n print(\"Fetching descriptor and invocation...\", flush=True)\n # Get descriptor and invocation\n desc_local = utils.get(descriptor, self.localtaskdir)[0]\n invo_local = utils.get(invocation, self.localtaskdir)[0]\n\n # Get input data, if running remotely\n if not kwargs.get(\"local\") and \\\n any([dl.startswith(\"s3://\") for dl in input_data]):\n if(verbose):\n print(\"Fetching input data...\", flush=True)\n localdatadir = op.join(\"/data\")\n local_input_data = []\n for dataloc in input_data:\n local_input_data += utils.get(dataloc, localdatadir)\n # Move to correct location\n os.chdir(localdatadir)\n else:\n if(verbose):\n print(\"Skipping data fetch (local execution)...\", flush=True)\n if kwargs.get(\"workdir\") and op.exists(kwargs.get(\"workdir\")):\n os.chdir(kwargs[\"workdir\"])\n\n if(verbose):\n print(\"Beginning execution...\", flush=True)\n # Launch task\n copts = ['launch', desc_local, invo_local]\n if kwargs.get(\"volumes\"):\n copts += ['-v'] + kwargs.get(\"volumes\")\n if kwargs.get(\"user\"):\n copts += ['-u']\n\n start_time = time.time()\n self.provLaunch(copts, verbose=verbose, **kwargs)\n if(verbose):\n print(self.output, flush=True)\n duration = time.time() - start_time\n\n # Get list of bosh exec outputs\n with open(desc_local) as fhandle:\n outputs_all = json.load(fhandle)[\"output-files\"]\n\n outputs_present = []\n outputs_all = bosh.evaluate(desc_local, invo_local, 'output-files/')\n for outfile in outputs_all.values():\n outputs_present += [outfile] if op.exists(outfile) else []\n\n # Write memory/cpu stats to file\n usagef = \"task-{}-usage.csv\".format(self.task_id)\n self.cpu_ram_usage.to_csv(op.join(self.localtaskdir, usagef),\n sep=',', index=False)\n utils.post(op.join(self.localtaskdir, usagef), remotetaskdir)\n\n # Write stdout to file\n stdoutf = \"task-{}-stdout.txt\".format(self.task_id)\n with open(op.join(self.localtaskdir, stdoutf), \"w\") as fhandle:\n fhandle.write(self.output.stdout)\n utils.post(op.join(self.localtaskdir, stdoutf), remotetaskdir)\n\n # Write sterr to file\n stderrf = \"task-{}-stderr.txt\".format(self.task_id)\n with open(op.join(self.localtaskdir, stderrf), \"w\") as fhandle:\n fhandle.write(self.output.stderr)\n utils.post(op.join(self.localtaskdir, stderrf), remotetaskdir)\n\n start_time = datetime.fromtimestamp(mktime(localtime(start_time)))\n summary = {\"duration\": duration,\n \"launchtime\": str(start_time),\n \"exitcode\": self.output.exit_code,\n \"outputs\": [],\n \"usage\": op.join(remotetaskdir, usagef),\n \"stdout\": op.join(remotetaskdir, stdoutf),\n \"stderr\": op.join(remotetaskdir, stderrf)}\n\n if not kwargs.get(\"local\"):\n if(verbose):\n print(\"Uploading outputs...\", flush=True)\n # Push outputs\n for local_output in outputs_present:\n if(verbose):\n print(\"{} --> {}\".format(local_output, output_loc),\n flush=True)\n tmpouts = utils.post(local_output, output_loc)\n print(tmpouts)\n summary[\"outputs\"] += tmpouts\n else:\n if(verbose):\n print(\"Skipping uploading outputs (local execution)...\",\n flush=True)\n summary[\"outputs\"] = outputs_present\n\n summarf = \"task-{}-summary.json\".format(self.task_id)\n with open(op.join(self.localtaskdir, summarf), \"w\") as fhandle:\n fhandle.write(json.dumps(summary, indent=4, sort_keys=True) + \"\\n\")\n utils.post(op.join(self.localtaskdir, summarf), remotetaskdir)\n\n # If not local, delete all: inputs, outputs, and summaries\n if not kwargs.get(\"local\"):\n for local_output in outputs_present:\n utils.remove(local_output)\n utils.remove(self.localtaskdir)\n for local_input in local_input_data:\n utils.remove(local_input)\n\n def execWrapper(self, sender):\n # if reprozip: use it\n if not subprocess.Popen(\"type reprozip 2>/dev/null\", shell=True).wait():\n if self.runner_kwargs.get(\"verbose\"):\n print(\"Reprozip found; will use to record provenance!\",\n flush=True)\n cmd = 'reprozip usage_report --disable'\n p = subprocess.Popen(cmd, shell=True).wait()\n\n cmd = 'reprozip trace -w --dir={}/task-{}-reprozip/ bosh exec {}'\n p = subprocess.Popen(cmd.format(self.localtaskdir,\n self.task_id,\n \" \".join(self.runner_args)),\n shell=True).wait()\n\n cmd = ('reprozip pack --dir={0}/task-{1}-reprozip/ '\n '{0}/task-{1}-reprozip'.format(self.localtaskdir,\n self.task_id))\n p = subprocess.Popen(cmd, shell=True).wait()\n else:\n if self.runner_kwargs.get(\"verbose\"):\n print(\"Reprozip not found; install to record more provenance!\",\n flush=True)\n sender.send(bosh.execute(*self.runner_args))\n\n def provLaunch(self, options, **kwargs):\n self.runner_args = options\n self.runner_kwargs = kwargs\n timing, cpu, ram = self.monitor(self.execWrapper, **kwargs)\n\n basetime = timing[0]\n\n total_df = pd.DataFrame(columns=['time', 'cpu', 'ram'])\n for ttime, tcpu, tram in zip(timing, cpu, ram):\n total_df.loc[len(total_df)] = (ttime-basetime, tcpu, tram)\n\n self.cpu_ram_usage = total_df\n\n def monitor(self, target, **kwargs):\n ram_lut = {'B': 1/1024/1024,\n 'KiB': 1/1024,\n 'MiB': 1,\n 'GiB': 1024}\n self.output, sender = mp.Pipe(False)\n worker_process = mp.Process(target=target, args=(sender,))\n worker_process.start()\n p = psutil.Process(worker_process.pid)\n\n log_time = []\n log_cpu = []\n log_mem = []\n while worker_process.is_alive():\n try:\n cpu = p.cpu_percent()\n ram = p.memory_info()[0]*ram_lut['B']\n\n for subproc in p.children(recursive=True):\n if not subproc.is_running():\n continue\n\n subproc_dict = subproc.as_dict(attrs=['pid',\n 'name',\n 'cmdline',\n 'memory_info'])\n if subproc_dict['name'] == 'docker':\n call = subproc_dict['cmdline'][-1]\n tcmd = psutil.Popen([\"docker\", \"ps\", \"-q\"], stdout=PIPE)\n running = tcmd.communicate()[0].decode('utf-8')\n running = running.split('\\n')\n tcmd = psutil.Popen([\"docker\", \"inspect\"] + running,\n stdout=PIPE, stderr=PIPE)\n tinf = json.loads(tcmd.communicate()[0].decode('utf-8'))\n for tcon in tinf:\n if (tcon.get(\"Config\") and\n tcon.get(\"Config\").get(\"Cmd\") and\n call in tcon['Config']['Cmd']):\n tid = tcon['Id']\n tcmd = psutil.Popen([\n \"docker\",\n \"stats\",\n tid,\n \"--no-stream\",\n \"--format\",\n \"'{{.MemUsage}} \" +\n \"{{.CPUPerc}}'\"\n ],\n stdout=PIPE)\n tout = tcmd.communicate()[0].decode('utf-8')\n tout = tout.strip('\\n').replace(\"'\", \"\")\n\n _ram, _, _, _cpu = tout.split(' ')\n _ram, ending = re.match('([0-9.]+)([MGK]?i?B)',\n _ram).groups()\n ram += float(_ram) * ram_lut[ending]\n cpu += float(_cpu.strip('%'))\n\n else:\n cpu += subproc.cpu_percent(interval=1)\n ram += subproc_dict['memory_info'][0] * ram_lut['B']\n\n if kwargs.get('verbose'):\n print(cpu, ram)\n\n tim = time.time()\n log_time.append(tim)\n log_cpu.append(cpu)\n log_mem.append(ram)\n time.sleep(1)\n\n except (psutil._exceptions.AccessDenied,\n psutil._exceptions.NoSuchProcess,\n TypeError, ValueError, AttributeError) as e:\n if kwargs.get('verbose'):\n print(\"Logging failed: {0}\".format(e))\n continue\n\n worker_process.join()\n self.output = self.output.recv()\n return log_time, log_cpu, log_mem\n" ]
[ [ "pandas.DataFrame" ] ]
DeShrike/C_python_ipc
[ "14380f6b82003585580ce452d5d5581bdf54ad07" ]
[ "sender.py" ]
[ "# http://weifan-tmm.blogspot.kr/2015/07/a-simple-turorial-for-python-c-inter.html\nimport sysv_ipc\nimport numpy as np\nimport struct\n\nBUFF_SIZE = 16\n\nfrom type_definitions import *\n\nif __name__ == \"__main__\":\n msg_string = \"sample string\\0\"\n msg_double1 = 1234.56789\n msg_double2 = 9876.12345\n msg_integer = 20212021\n msg_npy = np.arange(BUFF_SIZE, dtype=np.uint8).reshape((2,BUFF_SIZE//2))\n msg_npy_half = np.arange(BUFF_SIZE//2, dtype=np.uint8).reshape((2,BUFF_SIZE//4))\n try:\n mq = sysv_ipc.MessageQueue(1234, sysv_ipc.IPC_CREAT)\n\n print(\"Ready\")\n\n # string transmission\n mq.send(msg_string, True, type=TYPE_STRING)\n print(f\"string sent: {msg_string}\")\n\n # Two double transmission\n bytearray1 = struct.pack(\"d\", msg_double1)\n bytearray2 = struct.pack(\"d\", msg_double2)\n mq.send(bytearray1 + bytearray2, True, type=TYPE_TWODOUBLES)\n print(f\"two doubles sent: {msg_double1}, {msg_double2}\")\n\n # numpy array transmission\n mq.send(msg_npy.tobytes(order='C'), True, type=TYPE_NUMPY)\n print(f\"numpy array sent: {msg_npy}\")\n\n # one double one numpy transmission\n bytearray1 = struct.pack(\"d\", msg_double1)\n mq.send(bytearray1 + msg_npy_half.tobytes(order='C'), True, type=TYPE_DOUBLEANDNUMPY)\n print(f\"one double and numpy array sent: {msg_double1}, {msg_npy_half}\")\n\n # integer transmission\n bytearray1 = struct.pack(\"i\", msg_integer)\n mq.send(bytearray1, True, type=TYPE_INTEGER)\n print(f\"integer sent: {msg_integer}\")\n\n except sysv_ipc.ExistentialError:\n print(\"ERROR: message queue creation failed\")\n\n\n" ]
[ [ "numpy.arange" ] ]
kakun45/MTADashVisualization
[ "7edac4de650c54671356a11ac91d1c1f477d33bc" ]
[ "history_of_changes/callback2-for-MTA-01-works.py" ]
[ "import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nimport plotly.graph_objs as go\nimport pandas as pd\n\n# get help from installed module:\n# in terminal\n# import dash_html_components as html\n# print(help(html.Div))\n\n# Create a file\ndf = pd.read_csv(\"_59THST_NQR456W_diff.csv\")\n\n# launch app\napp = dash.Dash()\n\n# from https://dash.plot.ly/dash-core-components/dropdown\n# Crate a dash layout that con tains a Graph component\nhour_options = []\nfor hour in df[\"TIME\"].unique():\n hour_options.append({\"label\": str(hour), \"value\": hour})\n\napp.layout = html.Div(\n [\n dcc.Graph(id=\"graph\"),\n dcc.Dropdown(id=\"hour-picker\", options=hour_options, value=df[\"TIME\"].min()),\n ]\n)\n\n\n@app.callback(Output(\"graph\", \"figure\"), [Input(\"hour-picker\", \"value\")])\ndef update_figure(selected_time):\n filtered_df = df[df[\"TIME\"] == selected_time]\n traces = []\n for station_name in filtered_df[\"STATION\"].unique():\n df_by_station = filtered_df[filtered_df[\"STATION\"] == station_name]\n traces.append(\n go.Scatter(\n x=df_by_station[\"entries_diff\"],\n y=df_by_station[\"WEEKDAY\"],\n text=df_by_station[\"TIME\"],\n mode=\"markers\",\n opacity=0.7,\n marker={\"size\": 10},\n name=station_name,\n )\n )\n return {\n \"data\": traces,\n \"layout\": go.Layout(\n xaxis={\"type\": \"log\", \"title\": \"Entries\"},\n yaxis={\"title\": \"Weekday\"},\n hovermode=\"closest\",\n ),\n }\n\n\nif __name__ == \"__main__\":\n app.run_server(debug=True)\n" ]
[ [ "pandas.read_csv" ] ]
davidjwilson/pceb
[ "259cf4b18b51b7163d6ce84ab150c5f65f8cfdec" ]
[ "eg_uma/.ipynb_checkpoints/find_line-checkpoint.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport astropy.io.fits as fits\nimport glob\nfrom astropy.table import Table\nfrom astropy.io import ascii\nfrom astropy.convolution import convolve, Box1DKernel\nimport astropy.units as u\n\nx1 = []\n\ndef on_key(event):\n global x1\n if event.key == 'w':\n x1.append(event.xdata)\n print('%.3f' %event.xdata)\n plt.close()\n if event.key == 'e':\n x1.append(-1)\n print('%.3f' %event.xdata)\n plt.close()\n\n\n\"\"\"\nPlots each line in EG Uma in turn to identify the line position. If no lines, marks it with -1\n\n\"\"\"\ndata = Table.read('wd_removed_eguma.ecsv')\nw, f, e = data['WAVELENGTH'], data['FLUX'], data['ERROR']\n#f = convolve(f,Box1DKernel(5))\n#e = convolve(e,Box1DKernel(5))/(5**0.5)\n\nlinelist = Table.read('/home/david/work/muscles/FUV_linelist.csv')\nmask = (linelist['Wavelength'] > w[0]) & (linelist['Wavelength'] < w[-1]) & (linelist['Likelihood to measure'] == 'High')\nlinelist = linelist[mask]\n\nfor row in linelist:\n line= row['Wavelength']\n mask = (w > line-2) & (w <line+2)\n fig =plt.figure(row['Ion'])\n plt.step(w[mask], f[mask], where='mid')\n plt.axvline(line, c='C1', ls='--')\n dv= -140*u.km/u.s\n line_shift = dv.to(u.AA, equivalencies=u.doppler_optical(line*u.AA))\n plt.axvline(line_shift.value, c='C3', ls='--')\n [plt.axvline(lines, c='C2', ls='--', zorder=-10, alpha=0.5) for lines in linelist['Wavelength']]\n plt.xlim(line-2, line+2)\n cid = fig.canvas.mpl_connect('key_press_event',on_key)\n plt.show()\n \nsavedat = Table([linelist['Wavelength'], x1], names=['FILENAME', 'Xs'])\nascii.write(savedat, 'lines_to_measure.ecsv', format='ecsv', overwrite=True)\n " ]
[ [ "matplotlib.pyplot.xlim", "matplotlib.pyplot.step", "matplotlib.pyplot.close", "matplotlib.pyplot.figure", "matplotlib.pyplot.show", "matplotlib.pyplot.axvline" ] ]
christiaanjs/phylo-hacking
[ "b4095995a8c789267cee4268d8e6ba107d1b8428" ]
[ "pymc/eval/variational_analysis.py" ]
[ "import numpy as np\nimport theano.tensor as tt\nfrom pylo.topology import TreeTopology\nimport pylo.transform\nimport newick\nimport pymc3 as pm\nfrom pylo.tree.coalescent import CoalescentTree, ConstantPopulationFunction\nfrom pylo.hky import HKYSubstitutionModel\nfrom pylo.pruning import LeafSequences\nimport sys\nimport json\nimport datetime\nimport pickle\n\ndef construct_model(config, tree, sequence_dict):\n topology = TreeTopology(tree)\n sequence_dict_encoded = pylo.transform.encode_sequences(sequence_dict)\n pattern_dict, pattern_counts = pylo.transform.group_sequences(sequence_dict_encoded)\n pattern_counts = tt.as_tensor_variable(pattern_counts)\n child_patterns = tt.as_tensor_variable(topology.build_sequence_table(pattern_dict))\n\n def get_lognormal_params(var):\n return { 'mu': config['prior_params'][var]['m'], 'sd': config['prior_params'][var]['s'] }\n\n with pm.Model() as model:\n pop_size = pm.Lognormal('pop_size', **get_lognormal_params('pop_size'))\n pop_func = ConstantPopulationFunction(topology, pop_size)\n tree_heights = CoalescentTree('tree', topology, pop_func)\n \n kappa = pm.Lognormal('kappa', **get_lognormal_params('kappa'))\n pi = pm.Dirichlet('pi', a=np.ones(4))\n substitution_model = HKYSubstitutionModel(kappa, pi)\n\n branch_lengths = topology.get_child_branch_lengths(tree_heights)\n sequences = LeafSequences('sequences', topology, substitution_model, branch_lengths, child_patterns, pattern_counts)\n return model\n\nclass SampleTracker(pm.callbacks.Tracker):\n def __init__(self, save_every=1, *args, **kwargs):\n self.save_every = save_every\n super().__init__(*args, **kwargs)\n\n def record(self, approx, hist, i):\n if i % self.save_every == 0:\n super().record(approx, hist, i)\n\n __call__ = record\n\ndef construct_inference(config, model):\n return {\n 'mean_field': pm.ADVI,\n 'full_rank': pm.FullRankADVI\n }[config['inference']](model=model)\n\ndef run_analysis(config, newick_string, sequence_dict, out_file):\n tree = newick.loads(newick_string)[0]\n model = construct_model(config, tree, sequence_dict)\n inference = construct_inference(config, model)\n\n tracker = SampleTracker(\n save_every=config['log_every'],\n i=lambda approx, hist, i: i,\n date_time=datetime.datetime.now,\n **{ key: value.eval for key, value in inference.approx.shared_params.items() }\n )\n\n inference.fit(n=config['n_iter'], callbacks=[tracker])\n \n with open(out_file, 'wb') as f: \n pickle.dump(tracker.hist, f)\n\n return model, inference, tracker.hist\n\nclass TimedTrace(pm.backends.NDArray):\n def setup(self, draws, chain, sampler_vars=None):\n super().setup(draws, chain, sampler_vars=sampler_vars)\n self.times = np.empty(draws, dtype='datetime64[s]')\n \n def record(self, point, sampler_stats=None):\n self.times[self.draw_idx] = np.datetime64(datetime.datetime.now())\n super().record(point, sampler_stats=sampler_stats)\n \n def close(self):\n super().close()\n if self.draw_idx == self.draws:\n return\n self.times = self.times[:self.draw_idx]\n \n\ndef run_mcmc(config, model, out_file):\n with model:\n trace = TimedTrace()\n step = pm.Metropolis()\n pm.sample(chains=1, draws=config['chain_length'], trace=trace, step=[step], tune=0)\n \n with open(out_file, 'wb') as f:\n pickle.dump(trace, f) \n\n return trace\n" ]
[ [ "numpy.ones", "numpy.empty" ] ]
moneygeek/zipline
[ "c90019754d4a02d7118c181535d3932e40430633" ]
[ "zipline/utils/factory.py" ]
[ "#\n# Copyright 2016 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"\nFactory functions to prepare useful data.\n\"\"\"\nfrom datetime import timedelta, datetime\nfrom typing import Callable\n\nimport numpy as np\nimport pandas as pd\nfrom trading_calendars import get_calendar\n\nfrom zipline.finance.trading import SimulationParameters\nfrom zipline.protocol import Account, Portfolio\nfrom zipline.sources import SpecificEquityTrades\nfrom zipline.sources.test_source import create_trade\n\n\ndef create_simulation_parameters(year=2006,\n start=None,\n end=None,\n capital_base=float(\"1.0e5\"),\n num_days=None,\n data_frequency='daily',\n emission_rate='daily',\n trading_calendar=None,\n financing_costs: Callable[[Account], float] = None,\n manager_fees: Callable[[Account, Portfolio, pd.Timestamp], float] = None):\n\n if not trading_calendar:\n trading_calendar = get_calendar(\"NYSE\")\n\n if start is None:\n start = pd.Timestamp(\"{0}-01-01\".format(year), tz='UTC')\n elif type(start) == datetime:\n start = pd.Timestamp(start)\n\n if end is None:\n if num_days:\n start_index = trading_calendar.all_sessions.searchsorted(start)\n end = trading_calendar.all_sessions[start_index + num_days - 1]\n else:\n end = pd.Timestamp(\"{0}-12-31\".format(year), tz='UTC')\n elif type(end) == datetime:\n end = pd.Timestamp(end)\n\n sim_params = SimulationParameters(\n start_session=start,\n end_session=end,\n capital_base=capital_base,\n data_frequency=data_frequency,\n emission_rate=emission_rate,\n trading_calendar=trading_calendar,\n financing_costs=financing_costs,\n manager_fees=manager_fees\n )\n\n return sim_params\n\n\ndef get_next_trading_dt(current, interval, trading_calendar):\n next_dt = pd.Timestamp(current).tz_convert(trading_calendar.tz)\n\n while True:\n # Convert timestamp to naive before adding day, otherwise the when\n # stepping over EDT an hour is added.\n next_dt = pd.Timestamp(next_dt.replace(tzinfo=None))\n next_dt = next_dt + interval\n next_dt = pd.Timestamp(next_dt, tz=trading_calendar.tz)\n next_dt_utc = next_dt.tz_convert('UTC')\n if trading_calendar.is_open_on_minute(next_dt_utc):\n break\n next_dt = next_dt_utc.tz_convert(trading_calendar.tz)\n\n return next_dt_utc\n\n\ndef create_trade_history(sid, prices, amounts, interval, sim_params,\n trading_calendar, source_id=\"test_factory\"):\n trades = []\n current = sim_params.first_open\n\n oneday = timedelta(days=1)\n use_midnight = interval >= oneday\n for price, amount in zip(prices, amounts):\n if use_midnight:\n trade_dt = current.replace(hour=0, minute=0)\n else:\n trade_dt = current\n trade = create_trade(sid, price, amount, trade_dt, source_id)\n trades.append(trade)\n current = get_next_trading_dt(current, interval, trading_calendar)\n\n assert len(trades) == len(prices)\n return trades\n\n\ndef create_returns_from_range(sim_params):\n return pd.Series(index=sim_params.sessions,\n data=np.random.rand(len(sim_params.sessions)))\n\n\ndef create_returns_from_list(returns, sim_params):\n return pd.Series(index=sim_params.sessions[:len(returns)],\n data=returns)\n\n\ndef create_daily_trade_source(sids,\n sim_params,\n asset_finder,\n trading_calendar):\n \"\"\"\n creates trade_count trades for each sid in sids list.\n first trade will be on sim_params.start_session, and daily\n thereafter for each sid. Thus, two sids should result in two trades per\n day.\n \"\"\"\n return create_trade_source(\n sids,\n timedelta(days=1),\n sim_params,\n asset_finder,\n trading_calendar=trading_calendar,\n )\n\n\ndef create_trade_source(sids,\n trade_time_increment,\n sim_params,\n asset_finder,\n trading_calendar):\n # If the sim_params define an end that is during market hours, that will be\n # used as the end of the data source\n if trading_calendar.is_open_on_minute(sim_params.end_session):\n end = sim_params.end_session\n # Otherwise, the last_close after the end_session is used as the end of the\n # data source\n else:\n end = sim_params.last_close\n\n args = tuple()\n kwargs = {\n 'sids': sids,\n 'start': sim_params.first_open,\n 'end': end,\n 'delta': trade_time_increment,\n 'trading_calendar': trading_calendar,\n 'asset_finder': asset_finder,\n }\n source = SpecificEquityTrades(*args, **kwargs)\n\n return source\n" ]
[ [ "pandas.Timestamp" ] ]
HAL-42/DeepLabV2YQ
[ "96bfcf1055da7adeb4a7c1ed841f6ec29957be59" ]
[ "python/utils/crf.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n#\n# Author: Kazuto Nakashima\n# URL: https://kazuto1011.github.io\n# Date: 09 January 2019\n\n\nimport numpy as np\nimport pydensecrf.densecrf as dcrf\nimport pydensecrf.utils as utils\n\n\nclass DenseCRF(object):\n def __init__(self, iter_max, pos_w, pos_xy_std, bi_w, bi_xy_std, bi_rgb_std):\n self.iter_max = iter_max\n self.pos_w = pos_w\n self.pos_xy_std = pos_xy_std\n self.bi_w = bi_w\n self.bi_xy_std = bi_xy_std\n self.bi_rgb_std = bi_rgb_std\n\n def __call__(self, image, probmap):\n C, H, W = probmap.shape\n\n U = utils.unary_from_softmax(probmap)\n U = np.ascontiguousarray(U)\n\n image = np.ascontiguousarray(image)\n\n d = dcrf.DenseCRF2D(W, H, C)\n d.setUnaryEnergy(U)\n d.addPairwiseGaussian(sxy=self.pos_xy_std, compat=self.pos_w)\n d.addPairwiseBilateral(\n sxy=self.bi_xy_std, srgb=self.bi_rgb_std, rgbim=image, compat=self.bi_w\n )\n\n Q = d.inference(self.iter_max)\n Q = np.array(Q).reshape((C, H, W))\n\n return Q" ]
[ [ "numpy.ascontiguousarray", "numpy.array" ] ]
joeferg425/clarke_park_exploration
[ "9834dfbb1211f477c9dc99499f30cfb02175c302" ]
[ "clarke_park_3d.py" ]
[ "\"\"\"This python script plots the Clarke and Park Transforms.\r\n\r\nThe Transforms of three-phase helixes allow user interaction with a variety\r\nof functions variables.\r\n\r\nAuthor: joe f.\r\nGitHub: https://github.com/joeferg425\r\n\"\"\"\r\nfrom typing import Any\r\nimport numpy as np\r\nimport dash\r\nfrom dash import dcc\r\nfrom dash import html\r\nfrom dash.dependencies import Input, Output, ClientsideFunction\r\nfrom enum import IntEnum, Enum\r\nimport dash_bootstrap_components as dbc\r\nimport dash_daq as daq\r\n\r\n\r\nclass AxisEnum(IntEnum):\r\n \"\"\"Enumeration of axis indices.\r\n\r\n Args:\r\n IntEnum: enumeration value\r\n \"\"\"\r\n\r\n X = 0\r\n Y = 1\r\n Z = 2\r\n\r\n\r\nclass PhaseEnum(IntEnum):\r\n \"\"\"Enumeration of phase indices.\r\n\r\n Args:\r\n IntEnum: enumeration value\r\n \"\"\"\r\n\r\n A = 0\r\n B = 1\r\n C = 2\r\n N = 3\r\n\r\n\r\nclass ClarkeEnum(IntEnum):\r\n \"\"\"Enumeration of Clarke matrix indices.\r\n\r\n Args:\r\n IntEnum: enumeration value\r\n \"\"\"\r\n\r\n A = 0\r\n B = 1\r\n Z = 2\r\n\r\n\r\nclass ParkEnum(IntEnum):\r\n \"\"\"Enumeration of Park matrix indices.\r\n\r\n Args:\r\n IntEnum: enumeration value\r\n \"\"\"\r\n\r\n D = 0\r\n Q = 1\r\n Z = 2\r\n\r\n\r\nclass ColorEnum(Enum):\r\n \"\"\"This is supposed to be a colorblind-friendly palette.\r\n\r\n Args:\r\n Enum: color enum\r\n \"\"\"\r\n\r\n PhaseA = \"#E69F00\"\r\n PhaseB = \"#0072B2\"\r\n PhaseC = \"#CC79A7\"\r\n PhaseN = \"#DDDDDD\"\r\n ClarkeA = \"#D55E00\"\r\n ClarkeB = \"#56B4E9\"\r\n ClarkeZ = \"#22CC22\"\r\n ParkD = \"#F0E442\"\r\n ParkQ = \"#999999\"\r\n\r\n\r\nclass DashEnum(Enum):\r\n \"\"\"Enumeration of line formats.\r\n\r\n Args:\r\n IntEnum: enumeration value\r\n \"\"\"\r\n\r\n Normal = \"solid\"\r\n Clarke = \"dot\"\r\n Park = \"dash\"\r\n\r\n\r\nclass WidthEnum(Enum):\r\n \"\"\"Enumeration of line widths.\r\n\r\n Args:\r\n IntEnum: enumeration value\r\n \"\"\"\r\n\r\n Time = 3\r\n Phasor = 7\r\n Clarke = 9\r\n Park = 7\r\n\r\n\r\nclass FocusAxis(IntEnum):\r\n \"\"\"Enumeration of views.\r\n\r\n Args:\r\n IntEnum: enumeration value\r\n \"\"\"\r\n\r\n XY = 0\r\n XZ = 1\r\n YZ = 2\r\n XYZ = 3\r\n NONE = 4\r\n\r\n\r\nDEBUG = False\r\n\r\napp = dash.Dash(\r\n __name__,\r\n title=\"Clarke & Park Transforms\",\r\n external_stylesheets=[dbc.themes.DARKLY],\r\n external_scripts=[\r\n {\r\n \"type\": \"text/javascript\",\r\n \"id\": \"MathJax-script\",\r\n \"src\": \"https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.7/MathJax.js?config=TeX-MML-AM_CHTML\",\r\n },\r\n {\r\n \"type\": \"text/javascript\",\r\n \"id\": \"MathJax-callback\",\r\n \"src\": \"assets/mathjax.js\",\r\n },\r\n ],\r\n)\r\n\r\nPHASE_COUNT = 3\r\nAXIS_COUNT = 3\r\nTWO_PI = 2 * np.pi\r\n_120 = TWO_PI * (1 / 3)\r\n_240 = TWO_PI * (2 / 3)\r\nmargin = 1\r\nfig = None\r\n\r\n\r\nclass ClarkeParkExploration:\r\n \"\"\"This class defines the controls and graphs of the Clarke and Park transforms.\"\"\"\r\n\r\n INSTANCE: \"ClarkeParkExploration\"\r\n\r\n def __init__(self) -> None:\r\n \"\"\"Create instance of class for use in plots and updates.\"\"\"\r\n self.frequency: float = 1.0\r\n self.sample_count: int = 100\r\n self.slider_count: int = 100\r\n self.three_phase_data: np.ndarray = np.ones((PHASE_COUNT + 1, AXIS_COUNT, self.sample_count))\r\n self.three_phase_data[:, :] *= np.linspace(0, 1, self.sample_count)\r\n self.clarke_data: np.ndarray = np.ones((PHASE_COUNT, self.sample_count))\r\n self.park_data: np.ndarray = np.ones((AXIS_COUNT, self.sample_count))\r\n self.zeros = np.zeros((self.sample_count))\r\n self.ones = np.zeros((self.sample_count))\r\n self.zeros3 = np.zeros((3, self.sample_count))\r\n self.ones3 = np.zeros((3, self.sample_count))\r\n self.height = 800\r\n self.width = self.height * 1.25\r\n self.zero_sequence = 0.0\r\n self.projection = \"\"\r\n self.projection_label = \"\"\r\n self.run_mode = \"\"\r\n self.time_offset = 0\r\n self.frequency = 1\r\n self.phaseA_offset = 0\r\n self.phaseB_offset = 0\r\n self.phaseC_offset = 0\r\n self.phaseA_amplitude = 0\r\n self.phaseB_amplitude = 0\r\n self.phaseC_amplitude = 0\r\n self.changed_id: Any = None\r\n self.focus_selection: FocusAxis = FocusAxis.XYZ\r\n self.time = np.linspace(0, -1, self.sample_count)\r\n\r\n self.first = True\r\n\r\n # Clarke transform\r\n self.clarke_matrix = (2 / 3) * np.array(\r\n [\r\n [1, -(1 / 2), -(1 / 2)],\r\n [0, (np.sqrt(3) / 2), -(np.sqrt(3) / 2)],\r\n [(1 / 2), (1 / 2), (1 / 2)],\r\n ]\r\n )\r\n\r\n # Park Transform\r\n self.park_matrix = np.array(\r\n [\r\n [\r\n self.three_phase_data[PhaseEnum.A, AxisEnum.Z, :],\r\n -self.three_phase_data[PhaseEnum.A, AxisEnum.Y, :],\r\n self.zeros,\r\n ],\r\n [\r\n self.three_phase_data[PhaseEnum.A, AxisEnum.Y, :],\r\n self.three_phase_data[PhaseEnum.A, AxisEnum.Z, :],\r\n self.zeros,\r\n ],\r\n [\r\n self.zeros,\r\n self.zeros,\r\n self.ones,\r\n ],\r\n ]\r\n )\r\n\r\n ClarkeParkExploration.INSTANCE = self\r\n\r\n def generate_three_phase_data(self) -> None:\r\n \"\"\"Create three 3D helixes 120 degrees offset from each other.\"\"\"\r\n self.time_plus_offset = self.time + self.time_offset\r\n\r\n self.three_phase_data[PhaseEnum.A, AxisEnum.Y, :] = self.phaseA_amplitude * np.cos(\r\n self.frequency * TWO_PI * self.time_plus_offset + (self.phaseA_offset * np.pi)\r\n )\r\n self.three_phase_data[PhaseEnum.A, AxisEnum.Z, :] = self.phaseA_amplitude * np.sin(\r\n self.frequency * TWO_PI * self.time_plus_offset + (self.phaseA_offset * np.pi)\r\n )\r\n\r\n self.three_phase_data[PhaseEnum.B, AxisEnum.Y, :] = (\r\n self.phaseB_amplitude\r\n * np.cos(self.frequency * TWO_PI * self.time_plus_offset + (self.phaseB_offset * np.pi) - _120)\r\n - np.cos(_120) * self.zero_sequence\r\n )\r\n self.three_phase_data[PhaseEnum.B, AxisEnum.Z, :] = (\r\n self.phaseB_amplitude\r\n * np.sin(self.frequency * TWO_PI * self.time_plus_offset + (self.phaseB_offset * np.pi) - _120)\r\n - np.sin(_120) * self.zero_sequence\r\n )\r\n\r\n self.three_phase_data[PhaseEnum.C, AxisEnum.Y, :] = (\r\n self.phaseC_amplitude\r\n * np.cos(self.frequency * TWO_PI * self.time_plus_offset + (self.phaseC_offset * np.pi) + _120)\r\n - np.cos(_240) * self.zero_sequence\r\n )\r\n self.three_phase_data[PhaseEnum.C, AxisEnum.Z, :] = (\r\n self.phaseC_amplitude\r\n * np.sin(self.frequency * TWO_PI * self.time_plus_offset + (self.phaseC_offset * np.pi) + _120)\r\n - np.sin(_240) * self.zero_sequence\r\n )\r\n\r\n self.three_phase_data[PhaseEnum.N, AxisEnum.Y, :] = (\r\n self.three_phase_data[PhaseEnum.A, AxisEnum.Y, :]\r\n + self.three_phase_data[PhaseEnum.B, AxisEnum.Y, :]\r\n + self.three_phase_data[PhaseEnum.C, AxisEnum.Y, :]\r\n )\r\n self.three_phase_data[PhaseEnum.N, AxisEnum.Z, :] = (\r\n self.three_phase_data[PhaseEnum.A, AxisEnum.Z, :]\r\n + self.three_phase_data[PhaseEnum.B, AxisEnum.Z, :]\r\n + self.three_phase_data[PhaseEnum.C, AxisEnum.Z, :]\r\n )\r\n\r\n def do_clarke_transform(self):\r\n \"\"\"Perform Clarke transform function.\r\n\r\n https://en.wikipedia.org/wiki/Alpha%E2%80%93beta_transformation\r\n https://www.mathworks.com/help/physmod/sps/ref/clarketransform.html\r\n \"\"\"\r\n # Clarke transform function\r\n self.clarke_data[:, :] = np.dot(\r\n self.clarke_matrix,\r\n np.array(\r\n [\r\n self.three_phase_data[PhaseEnum.A, AxisEnum.Y, :],\r\n self.three_phase_data[PhaseEnum.B, AxisEnum.Y, :],\r\n self.three_phase_data[PhaseEnum.C, AxisEnum.Y, :],\r\n ]\r\n ),\r\n )\r\n\r\n def do_park_transform(self) -> None:\r\n \"\"\"Perform Park transform function.\r\n\r\n https://de.wikipedia.org/wiki/D/q-Transformation\r\n https://www.mathworks.com/help/physmod/sps/ref/clarketoparkangletransform.html\r\n \"\"\"\r\n # create Park transformation matrix, with reference based on enum value\r\n self.park_matrix[0, 0, :] = self.three_phase_data[PhaseEnum.A, AxisEnum.Z, :]\r\n self.park_matrix[1, 0, :] = -self.three_phase_data[PhaseEnum.A, AxisEnum.Y, :]\r\n self.park_matrix[0, 1, :] = self.three_phase_data[PhaseEnum.A, AxisEnum.Y, :]\r\n self.park_matrix[1, 1, :] = self.three_phase_data[PhaseEnum.A, AxisEnum.Z, :]\r\n\r\n # perform the matrix math\r\n self.park_data = np.einsum(\r\n \"ijk,ik->jk\",\r\n self.park_matrix,\r\n self.clarke_data,\r\n )\r\n\r\n def generate_figure_data(self) -> None:\r\n \"\"\"Create plotly data structure used to update web page in callback.\"\"\"\r\n self.generate_three_phase_data()\r\n self.do_clarke_transform()\r\n self.do_park_transform()\r\n mmax1 = np.max(self.three_phase_data)\r\n mmax2 = np.max(self.clarke_data)\r\n mmax3 = np.max(self.park_data)\r\n mmin1 = np.min(self.three_phase_data)\r\n mmin2 = np.min(self.clarke_data)\r\n mmin3 = np.min(self.park_data)\r\n mmax = np.max([mmax1, mmax2, mmax3, -mmin1, -mmin2, -mmin3])\r\n ticks = [-mmax, mmax]\r\n self.figure_data = {\r\n \"data\": [\r\n {\r\n \"x\": [0, 1],\r\n \"y\": ticks,\r\n \"z\": ticks,\r\n \"type\": \"scatter3d\",\r\n \"mode\": \"lines\",\r\n \"name\": \"fixed_xyz_range\",\r\n \"line\": {\r\n \"width\": 0,\r\n \"color\": \"rgba(0,0,0,0)\",\r\n },\r\n },\r\n {\r\n \"x\": self.three_phase_data[PhaseEnum.A, AxisEnum.X, :],\r\n \"y\": self.three_phase_data[PhaseEnum.A, AxisEnum.Y, :],\r\n \"z\": self.three_phase_data[PhaseEnum.A, AxisEnum.Z, :],\r\n \"type\": \"scatter3d\",\r\n \"mode\": \"lines\",\r\n \"name\": \"Phase A (t)\",\r\n \"line\": {\r\n \"width\": WidthEnum.Time.value,\r\n \"dash\": DashEnum.Normal.value,\r\n \"color\": ColorEnum.PhaseA.value,\r\n },\r\n },\r\n {\r\n \"x\": self.three_phase_data[PhaseEnum.B, AxisEnum.X, :],\r\n \"y\": self.three_phase_data[PhaseEnum.B, AxisEnum.Y, :],\r\n \"z\": self.three_phase_data[PhaseEnum.B, AxisEnum.Z, :],\r\n \"type\": \"scatter3d\",\r\n \"mode\": \"lines\",\r\n \"name\": \"Phase B (t)\",\r\n \"line\": {\r\n \"width\": WidthEnum.Time.value,\r\n \"dash\": DashEnum.Normal.value,\r\n \"color\": ColorEnum.PhaseB.value,\r\n },\r\n },\r\n {\r\n \"x\": self.three_phase_data[PhaseEnum.C, AxisEnum.X, :],\r\n \"y\": self.three_phase_data[PhaseEnum.C, AxisEnum.Y, :],\r\n \"z\": self.three_phase_data[PhaseEnum.C, AxisEnum.Z, :],\r\n \"type\": \"scatter3d\",\r\n \"mode\": \"lines\",\r\n \"name\": \"Phase C (t)\",\r\n \"line\": {\r\n \"width\": WidthEnum.Time.value,\r\n \"dash\": DashEnum.Normal.value,\r\n \"color\": ColorEnum.PhaseC.value,\r\n },\r\n },\r\n {\r\n \"x\": self.three_phase_data[PhaseEnum.N, AxisEnum.X, :],\r\n \"y\": self.three_phase_data[PhaseEnum.N, AxisEnum.Y, :],\r\n \"z\": self.three_phase_data[PhaseEnum.N, AxisEnum.Z, :],\r\n \"type\": \"scatter3d\",\r\n \"mode\": \"lines\",\r\n \"name\": \"Neutral (t)\",\r\n \"line\": {\r\n \"width\": WidthEnum.Time.value,\r\n \"dash\": DashEnum.Normal.value,\r\n \"color\": ColorEnum.PhaseN.value,\r\n },\r\n },\r\n {\r\n \"x\": [0, 0],\r\n \"y\": [\r\n 0,\r\n self.three_phase_data[PhaseEnum.A, AxisEnum.Y, 0],\r\n ],\r\n \"z\": [\r\n 0,\r\n self.three_phase_data[PhaseEnum.A, AxisEnum.Z, 0],\r\n ],\r\n \"type\": \"scatter3d\",\r\n \"mode\": \"lines\",\r\n \"name\": f\"Phase A({self.time_offset:0.2f})\",\r\n \"line\": {\r\n \"width\": WidthEnum.Phasor.value,\r\n \"dash\": DashEnum.Normal.value,\r\n \"color\": ColorEnum.PhaseA.value,\r\n },\r\n },\r\n {\r\n \"x\": [0, 0],\r\n \"y\": [\r\n 0,\r\n self.three_phase_data[PhaseEnum.B, AxisEnum.Y, 0],\r\n ],\r\n \"z\": [\r\n 0,\r\n self.three_phase_data[PhaseEnum.B, AxisEnum.Z, 0],\r\n ],\r\n \"type\": \"scatter3d\",\r\n \"mode\": \"lines\",\r\n \"name\": f\"Phase B({self.time_offset:0.2f})\",\r\n \"line\": {\r\n \"width\": WidthEnum.Phasor.value,\r\n \"dash\": DashEnum.Normal.value,\r\n \"color\": ColorEnum.PhaseB.value,\r\n },\r\n },\r\n {\r\n \"x\": [0, 0],\r\n \"y\": [\r\n 0,\r\n self.three_phase_data[PhaseEnum.C, AxisEnum.Y, 0],\r\n ],\r\n \"z\": [\r\n 0,\r\n self.three_phase_data[PhaseEnum.C, AxisEnum.Z, 0],\r\n ],\r\n \"type\": \"scatter3d\",\r\n \"mode\": \"lines\",\r\n \"name\": f\"Phase C({self.time_offset:0.2f})\",\r\n \"line\": {\r\n \"width\": WidthEnum.Phasor.value,\r\n \"dash\": DashEnum.Normal.value,\r\n \"color\": ColorEnum.PhaseC.value,\r\n },\r\n },\r\n {\r\n \"x\": [0, 0],\r\n \"y\": [\r\n 0,\r\n self.three_phase_data[PhaseEnum.N, AxisEnum.Y, 0],\r\n ],\r\n \"z\": [\r\n 0,\r\n self.three_phase_data[PhaseEnum.N, AxisEnum.Z, 0],\r\n ],\r\n \"type\": \"scatter3d\",\r\n \"mode\": \"lines\",\r\n \"name\": f\"Neutral ({self.time_offset:0.2f})\",\r\n \"line\": {\r\n \"width\": WidthEnum.Clarke.value,\r\n \"dash\": DashEnum.Normal.value,\r\n \"color\": ColorEnum.PhaseN.value,\r\n },\r\n },\r\n {\r\n \"x\": self.three_phase_data[PhaseEnum.A, AxisEnum.X, :],\r\n \"y\": self.clarke_data[ClarkeEnum.A, :],\r\n \"z\": self.zeros,\r\n \"type\": \"scatter3d\",\r\n \"mode\": \"lines\",\r\n \"name\": \"Clarke α (t)\",\r\n \"line\": {\r\n \"width\": WidthEnum.Time.value,\r\n \"dash\": DashEnum.Clarke.value,\r\n \"color\": ColorEnum.ClarkeA.value,\r\n },\r\n },\r\n {\r\n \"x\": self.three_phase_data[PhaseEnum.A, AxisEnum.X, :],\r\n \"y\": self.zeros,\r\n \"z\": self.clarke_data[ClarkeEnum.B, :],\r\n \"type\": \"scatter3d\",\r\n \"mode\": \"lines\",\r\n \"name\": \"Clarke β (t)\",\r\n \"line\": {\r\n \"width\": WidthEnum.Time.value,\r\n \"dash\": DashEnum.Clarke.value,\r\n \"color\": ColorEnum.ClarkeB.value,\r\n },\r\n },\r\n {\r\n \"x\": self.three_phase_data[PhaseEnum.A, AxisEnum.X, :],\r\n \"y\": self.clarke_data[ClarkeEnum.Z, :],\r\n \"z\": self.clarke_data[ClarkeEnum.Z, :],\r\n \"type\": \"scatter3d\",\r\n \"mode\": \"lines\",\r\n \"name\": \"Clarke Zero (t)\",\r\n \"line\": {\r\n \"width\": WidthEnum.Time.value,\r\n \"dash\": DashEnum.Clarke.value,\r\n \"color\": ColorEnum.ClarkeZ.value,\r\n },\r\n },\r\n {\r\n \"x\": [0, 0],\r\n \"y\": [0, self.clarke_data[ClarkeEnum.A, 0]],\r\n \"z\": [0, 0],\r\n \"type\": \"scatter3d\",\r\n \"mode\": \"lines\",\r\n \"name\": f\"Clarke α ({self.time_offset:0.2f})\",\r\n \"line\": {\r\n \"width\": WidthEnum.Clarke.value,\r\n \"dash\": DashEnum.Clarke.value,\r\n \"color\": ColorEnum.ClarkeA.value,\r\n },\r\n },\r\n {\r\n \"x\": [0, 0],\r\n \"y\": [0, 0],\r\n \"z\": [0, self.clarke_data[ClarkeEnum.B, 0]],\r\n \"type\": \"scatter3d\",\r\n \"mode\": \"lines\",\r\n \"name\": f\"Clarke β ({self.time_offset:0.2f})\",\r\n \"line\": {\r\n \"width\": WidthEnum.Clarke.value,\r\n \"dash\": DashEnum.Clarke.value,\r\n \"color\": ColorEnum.ClarkeB.value,\r\n },\r\n },\r\n {\r\n \"x\": [0, 0],\r\n \"y\": [0, self.clarke_data[ClarkeEnum.Z, 0]],\r\n \"z\": [0, self.clarke_data[ClarkeEnum.Z, 0]],\r\n \"type\": \"scatter3d\",\r\n \"mode\": \"lines\",\r\n \"name\": f\"Clarke Zero ({self.time_offset:0.2f})\",\r\n \"line\": {\r\n \"width\": WidthEnum.Time.value,\r\n \"dash\": DashEnum.Clarke.value,\r\n \"color\": ColorEnum.ClarkeZ.value,\r\n },\r\n },\r\n {\r\n \"x\": self.three_phase_data[PhaseEnum.A, AxisEnum.X, :],\r\n \"y\": np.cos(\r\n self.frequency * TWO_PI * self.time_plus_offset[0]\r\n + (self.phaseA_offset * np.pi)\r\n + (np.pi / 2)\r\n )\r\n * self.park_data[ParkEnum.D, :],\r\n \"z\": np.sin(\r\n self.frequency * TWO_PI * self.time_plus_offset[0]\r\n + (self.phaseA_offset * np.pi)\r\n + (np.pi / 2)\r\n )\r\n * self.park_data[ParkEnum.D, :],\r\n \"type\": \"scatter3d\",\r\n \"mode\": \"lines\",\r\n \"name\": \"Park d (t)\",\r\n \"line\": {\r\n \"width\": WidthEnum.Park.value,\r\n \"dash\": DashEnum.Park.value,\r\n \"color\": ColorEnum.ParkD.value,\r\n },\r\n },\r\n {\r\n \"x\": self.three_phase_data[PhaseEnum.A, AxisEnum.X, :],\r\n \"y\": np.cos(\r\n self.frequency * TWO_PI * self.time_plus_offset[0] + (self.phaseA_offset * np.pi)\r\n )\r\n * self.park_data[ParkEnum.Q, :],\r\n \"z\": np.sin(\r\n self.frequency * TWO_PI * self.time_plus_offset[0] + (self.phaseA_offset * np.pi)\r\n )\r\n * self.park_data[ParkEnum.Q, :],\r\n \"type\": \"scatter3d\",\r\n \"mode\": \"lines\",\r\n \"name\": \"Park q (t)\",\r\n \"line\": {\r\n \"width\": WidthEnum.Park.value,\r\n \"dash\": DashEnum.Park.value,\r\n \"color\": ColorEnum.ParkQ.value,\r\n },\r\n },\r\n {\r\n \"x\": [0, 0],\r\n \"y\": [\r\n 0,\r\n np.cos(\r\n self.frequency * TWO_PI * self.time_plus_offset[0]\r\n + (self.phaseA_offset * np.pi)\r\n + (np.pi / 2)\r\n )\r\n * self.park_data[ParkEnum.D, 0],\r\n ],\r\n \"z\": [\r\n 0,\r\n np.sin(\r\n self.frequency * TWO_PI * self.time_plus_offset[0]\r\n + (self.phaseA_offset * np.pi)\r\n + (np.pi / 2)\r\n )\r\n * self.park_data[ParkEnum.D, 0],\r\n ],\r\n \"type\": \"scatter3d\",\r\n \"mode\": \"lines\",\r\n \"name\": f\"Park d ({self.time_offset:0.2f})\",\r\n \"line\": {\r\n \"width\": WidthEnum.Park.value,\r\n \"dash\": DashEnum.Park.value,\r\n \"color\": ColorEnum.ParkD.value,\r\n },\r\n },\r\n {\r\n \"x\": [0, 0],\r\n \"y\": [\r\n 0,\r\n np.cos(\r\n self.frequency * TWO_PI * self.time_plus_offset[0] + (self.phaseA_offset * np.pi)\r\n )\r\n * self.park_data[ParkEnum.Q, 0],\r\n ],\r\n \"z\": [\r\n 0,\r\n np.sin(\r\n self.frequency * TWO_PI * self.time_plus_offset[0] + (self.phaseA_offset * np.pi)\r\n )\r\n * self.park_data[ParkEnum.Q, 0],\r\n ],\r\n \"type\": \"scatter3d\",\r\n \"mode\": \"lines\",\r\n \"name\": f\"Park q ({self.time_offset:0.2f})\",\r\n \"line\": {\r\n \"width\": WidthEnum.Park.value,\r\n \"dash\": DashEnum.Park.value,\r\n \"color\": ColorEnum.ParkQ.value,\r\n },\r\n },\r\n ],\r\n \"layout\": {\r\n \"scene\": {\r\n \"xaxis\": {\r\n \"title\": \"x (Time)\",\r\n \"tickvals\": [-1, 0, 1],\r\n },\r\n \"yaxis\": {\r\n \"title\": \"y (Real)\",\r\n \"tickvals\": [-1, 0, 1],\r\n },\r\n \"zaxis\": {\r\n \"title\": \"z (Imaginary)\",\r\n \"tickvals\": [-1, 0, 1],\r\n },\r\n },\r\n \"plot_bgcolor\": \"rgba(0, 0, 0, 0)\",\r\n \"paper_bgcolor\": \"rgba(0, 0, 0, 0)\",\r\n },\r\n }\r\n if self.first is False:\r\n self.figure_data[\"layout\"][\"uirevision\"] = 1\r\n self.figure_data[\"layout\"][\"scene\"][\"aspectratio\"] = {\r\n \"x\": 1,\r\n \"y\": 1,\r\n \"z\": 1,\r\n }\r\n self.figure_data[\"layout\"][\"height\"] = self.height\r\n self.figure_data[\"layout\"][\"width\"] = self.width\r\n self.figure_data[\"layout\"][\"margin\"] = {\r\n \"l\": margin,\r\n \"r\": margin,\r\n \"t\": margin,\r\n \"b\": margin,\r\n }\r\n\r\n else:\r\n self.first = False\r\n self.figure_data[\"layout\"][\"uirevision\"] = 1\r\n self.figure_data[\"layout\"][\"height\"] = self.height\r\n self.figure_data[\"layout\"][\"width\"] = self.width\r\n self.figure_data[\"layout\"][\"scene_aspectmode\"] = \"cube\"\r\n self.figure_data[\"layout\"][\"autosize\"] = False\r\n self.figure_data[\"layout\"][\"scene\"][\"aspectmode\"] = \"manual\"\r\n self.figure_data[\"layout\"][\"scene\"][\"aspectratio\"] = {\r\n \"x\": 1,\r\n \"y\": 1,\r\n \"z\": 1,\r\n }\r\n self.figure_data[\"layout\"][\"margin\"] = {\r\n \"l\": margin,\r\n \"r\": margin,\r\n \"t\": margin,\r\n \"b\": margin,\r\n }\r\n\r\n if self.focus_selection == FocusAxis.XY:\r\n self.figure_data[\"layout\"][\"scene\"][\"camera\"] = {\r\n \"up\": {\r\n \"x\": 0.0,\r\n \"y\": 0.5,\r\n \"z\": 0.0,\r\n },\r\n \"eye\": {\r\n \"x\": 0.0,\r\n \"y\": 0.0,\r\n \"z\": 2.0,\r\n },\r\n }\r\n elif self.focus_selection == FocusAxis.XZ:\r\n self.figure_data[\"layout\"][\"scene\"][\"camera\"] = {\r\n \"up\": {\r\n \"x\": 0.0,\r\n \"y\": 0.0,\r\n \"z\": 0.5,\r\n },\r\n \"eye\": {\r\n \"x\": 0.0,\r\n \"y\": -2.0,\r\n \"z\": 0.0,\r\n },\r\n }\r\n elif self.focus_selection == FocusAxis.YZ:\r\n self.figure_data[\"layout\"][\"scene\"][\"camera\"] = {\r\n \"up\": {\r\n \"x\": 0.0,\r\n \"y\": 0.5,\r\n \"z\": 0.0,\r\n },\r\n \"eye\": {\r\n \"x\": -2.0,\r\n \"y\": 0.0,\r\n \"z\": 0.0,\r\n },\r\n }\r\n elif self.focus_selection == FocusAxis.XYZ:\r\n self.figure_data[\"layout\"][\"scene\"][\"camera\"] = {\r\n \"up\": {\r\n \"x\": 0.0,\r\n \"y\": 0.5,\r\n \"z\": 0.0,\r\n },\r\n \"eye\": {\r\n \"x\": 1.75,\r\n \"y\": 1.75,\r\n \"z\": 1.75,\r\n },\r\n }\r\n self.figure_data[\"layout\"][\"scene\"][\"camera\"][\"projection\"] = {\r\n \"type\": self.projection,\r\n }\r\n\r\n @staticmethod\r\n @app.callback(\r\n [\r\n Output(\"scatter_plot\", \"figure\"),\r\n Output(\"three_phase_data\", \"children\"),\r\n Output(\"clarke_data\", \"children\"),\r\n Output(\"park_data\", \"children\"),\r\n Output(\"projection\", \"label\"),\r\n Output(\"run-mode\", \"label\"),\r\n Output(\"interval-component\", \"max_intervals\"),\r\n Output(\"time_slider\", \"value\"),\r\n ],\r\n [\r\n Input(\"interval-component\", \"n_intervals\"),\r\n Input(\"time_slider\", \"value\"),\r\n Input(\"frequency_slider\", \"value\"),\r\n Input(\"phaseA_amplitude_slider\", \"value\"),\r\n Input(\"phaseB_amplitude_slider\", \"value\"),\r\n Input(\"phaseC_amplitude_slider\", \"value\"),\r\n Input(\"phaseA_phase_slider\", \"value\"),\r\n Input(\"phaseB_phase_slider\", \"value\"),\r\n Input(\"phaseC_phase_slider\", \"value\"),\r\n Input(\"size_slider\", \"value\"),\r\n Input(\"zerosequence_slider\", \"value\"),\r\n Input(\"focus_xy\", \"n_clicks\"),\r\n Input(\"focus_xz\", \"n_clicks\"),\r\n Input(\"focus_yz\", \"n_clicks\"),\r\n Input(\"focus_corner\", \"n_clicks\"),\r\n Input(\"projection\", \"on\"),\r\n Input(\"run-mode\", \"on\"),\r\n ],\r\n )\r\n def update_graphs(\r\n interval,\r\n time_slider,\r\n frequency_slider,\r\n phaseA_amplitude_slider,\r\n phaseB_amplitude_slider,\r\n phaseC_amplitude_slider,\r\n phaseA_phase_slider,\r\n phaseB_phase_slider,\r\n phaseC_phase_slider,\r\n size_slider,\r\n zero_sequence_slider,\r\n btn1,\r\n btn2,\r\n btn3,\r\n btn4,\r\n projection_isometric,\r\n run_mode,\r\n ):\r\n \"\"\"Callback function used by plotly when use interacts with controls.\r\n\r\n Args:\r\n interval: _description_\r\n time_slider: _description_\r\n frequency_slider: _description_\r\n phaseA_amplitude_slider: _description_\r\n phaseB_amplitude_slider: _description_\r\n phaseC_amplitude_slider: _description_\r\n phaseA_phase_slider: _description_\r\n phaseB_phase_slider: _description_\r\n phaseC_phase_slider: _description_\r\n size_slider: _description_\r\n zero_sequence_slider: _description_\r\n btn1: _description_\r\n btn2: _description_\r\n btn3: _description_\r\n btn4: _description_\r\n projection_isometric: _description_\r\n run_mode: _description_\r\n\r\n Returns:\r\n dictionary of objects for plotly's consumption\r\n \"\"\"\r\n self = ClarkeParkExploration.INSTANCE\r\n self.time_offset = time_slider\r\n self.frequency = frequency_slider\r\n self.phaseA_offset = phaseA_phase_slider\r\n self.phaseB_offset = phaseB_phase_slider\r\n self.phaseC_offset = phaseC_phase_slider\r\n self.phaseA_amplitude = phaseA_amplitude_slider\r\n self.phaseB_amplitude = phaseB_amplitude_slider\r\n self.phaseC_amplitude = phaseC_amplitude_slider\r\n self.height = size_slider\r\n self.width = size_slider * 1.25\r\n self.zero_sequence = zero_sequence_slider\r\n if projection_isometric is False:\r\n self.projection = \"isometric\"\r\n self.projection_label = \"Enable Orthographic Projection\"\r\n else:\r\n self.projection = \"orthographic\"\r\n self.projection_label = \"Disable Orthographic Projection\"\r\n if run_mode is True:\r\n self.run_mode = \"Disable Continuous Mode\"\r\n max_intervals = 10000000\r\n self.time_offset += 1.0 / self.slider_count\r\n if self.time_offset > 1:\r\n self.time_offset = 0\r\n else:\r\n self.run_mode = \"Enable Continuous Mode\"\r\n max_intervals = 0\r\n self.changed_id = [p[\"prop_id\"] for p in dash.callback_context.triggered][0]\r\n if \"focus_xy\" in self.changed_id:\r\n self.focus_selection = FocusAxis.XY\r\n elif \"focus_xz\" in self.changed_id:\r\n self.focus_selection = FocusAxis.XZ\r\n elif \"focus_yz\" in self.changed_id:\r\n self.focus_selection = FocusAxis.YZ\r\n elif \"focus_corner\" in self.changed_id:\r\n self.focus_selection = FocusAxis.XYZ\r\n self.generate_figure_data()\r\n return [\r\n self.figure_data,\r\n html.Td(\r\n [\r\n html.Tr(\r\n [\r\n html.Td(f\"{self.three_phase_data[PhaseEnum.A, AxisEnum.X, 0]:0.2f}\\u00A0\\u00A0\"),\r\n html.Td(f\"{self.three_phase_data[PhaseEnum.A, AxisEnum.Y, 0]:0.2f}\\u00A0\\u00A0\"),\r\n html.Td(f\"{self.three_phase_data[PhaseEnum.A, AxisEnum.Z, 0]:0.2f}\\u00A0\\u00A0\"),\r\n ]\r\n ),\r\n html.Tr(\r\n [\r\n html.Td(f\"{self.three_phase_data[PhaseEnum.B, AxisEnum.X, 0]:0.2f}\\u00A0\\u00A0\"),\r\n html.Td(f\"{self.three_phase_data[PhaseEnum.B, AxisEnum.Y, 0]:0.2f}\\u00A0\\u00A0\"),\r\n html.Td(f\"{self.three_phase_data[PhaseEnum.B, AxisEnum.Z, 0]:0.2f}\\u00A0\\u00A0\"),\r\n ]\r\n ),\r\n html.Tr(\r\n [\r\n html.Td(f\"{self.three_phase_data[PhaseEnum.C, AxisEnum.X, 0]:0.2f}\\u00A0\\u00A0\"),\r\n html.Td(f\"{self.three_phase_data[PhaseEnum.C, AxisEnum.Y, 0]:0.2f}\\u00A0\\u00A0\"),\r\n html.Td(f\"{self.three_phase_data[PhaseEnum.C, AxisEnum.Z, 0]:0.2f}\\u00A0\\u00A0\"),\r\n ]\r\n ),\r\n html.Tr(\r\n [\r\n html.Td(f\"{self.three_phase_data[PhaseEnum.N, AxisEnum.X, 0]:0.2f}\\u00A0\\u00A0\"),\r\n html.Td(f\"{self.three_phase_data[PhaseEnum.N, AxisEnum.Y, 0]:0.2f}\\u00A0\\u00A0\"),\r\n html.Td(f\"{self.three_phase_data[PhaseEnum.N, AxisEnum.Z, 0]:0.2f}\\u00A0\\u00A0\"),\r\n ]\r\n ),\r\n ]\r\n ),\r\n html.Td(\r\n [\r\n html.Tr(\r\n [\r\n html.Td(f\"{self.clarke_data[ClarkeEnum.A, 0]:0.2f}\"),\r\n ]\r\n ),\r\n html.Tr(\r\n [\r\n html.Td(f\"{self.clarke_data[ClarkeEnum.B, 0]:0.2f}\"),\r\n ]\r\n ),\r\n html.Tr(\r\n [\r\n html.Td(f\"{self.clarke_data[ClarkeEnum.Z, 0]:0.2f}\"),\r\n ]\r\n ),\r\n ]\r\n ),\r\n html.Td(\r\n [\r\n html.Tr(\r\n [\r\n html.Td(f\"{self.park_data[ParkEnum.D, 0]:0.2f}\"),\r\n ]\r\n ),\r\n html.Tr(\r\n [\r\n html.Td(f\"{self.park_data[ParkEnum.Q, 0]:0.2f}\"),\r\n ]\r\n ),\r\n html.Tr(\r\n [\r\n html.Td(f\"{self.park_data[ParkEnum.Z, 0]:0.2f}\"),\r\n ]\r\n ),\r\n ]\r\n ),\r\n self.projection_label,\r\n self.run_mode,\r\n max_intervals,\r\n self.time_offset,\r\n ]\r\n\r\n\r\ncpe = ClarkeParkExploration()\r\napp.layout = dbc.Container(\r\n [\r\n html.H1(\"Interactive Clarke & Park Transforms\"),\r\n html.Div(\r\n [\r\n html.H3(\"Overview\"),\r\n html.P(\r\n \"This interactive plot is meant to be used for exploring The interactions of \"\r\n + \"variables in a three-phase system and the Clarke and Park transforms.\"\r\n ),\r\n html.H3(\"Introduction\"),\r\n html.P(\r\n \"The Plotted lines can be turned on or off by clicking on the plot legend. \"\r\n + \"The buttons below can be used to set the view to various fixed perspectives. \"\r\n + \"The graph can be zoomed and panned by interacting with it using the mouse and menu. \"\r\n + \"The sliders under the graph can be used to adjust variables used in the graph. \"\r\n + \"The math below will update along with the graph to help better understant effects \"\r\n + \"that the sliders have on the graph\"\r\n ),\r\n ]\r\n ),\r\n html.H3(\"Equations\"),\r\n html.Table(\r\n html.Tr(\r\n [\r\n html.Td(\r\n [\r\n html.P(\"Three phase helix data (plus vector sum of the three).\"),\r\n html.P(\r\n \"$$ \\\\begin{bmatrix} \"\r\n + \"A_x(t) & A_y(t) & A_z(t) \\\\\\\\\"\r\n + \"B_x(t) & B_y(t) & B_z(t) \\\\\\\\\"\r\n + \"C_x(t) & C_y(t) & C_z(t) \\\\\\\\\"\r\n + \"N_x(t) & N_y(t) & N_z(t) \\\\end{bmatrix} = \"\r\n + \"\\\\begin{bmatrix} \"\r\n + \"x(t) & sin(t) & cos(t) \\\\\\\\\"\r\n + \"x(t) & sin(t+\\\\frac{2*\\\\pi}{3})) & cos(t+\\\\frac{2*\\\\pi}{3}) \\\\\\\\\"\r\n + \"x(t) & sin(t-\\\\frac{2*\\\\pi}{3}) & cos(t-\\\\frac{2*\\\\pi}{3}) \\\\\\\\\"\r\n + \"x(t) & A_y(t) + B_y(t) + C_y(t) & A_z(t) + B_z(t) + C_z(t) \"\r\n + \"\\\\end{bmatrix} $$\"\r\n ),\r\n ]\r\n ),\r\n html.Td(\"\\u00A0\\u00A0\\u00A0\\u00A0\\u00A0\\u00A0\\u00A0\\u00A0\"),\r\n html.Td(\r\n [\r\n html.P(\r\n \"\\u00A0\\u00A0\\u00A0\\u00A0\\u00A0\\u00A0\\u00A0\\u00A0 For t = time slider below.\"\r\n ),\r\n html.P(\r\n \"$$ \\\\begin{bmatrix} A_x(slider) & A_y(slider) & A_z(slider) \\\\\\\\\"\r\n + \"B_x(slider) & B_y(slider) & B_z(slider) \\\\\\\\\"\r\n + \"C_x(slider) & C_y(slider) & C_z(slider) \\\\\\\\\"\r\n + \"N_x(slider) & N_y(slider) & N_z(slider) \\\\end{bmatrix} = $$\"\r\n ),\r\n ]\r\n ),\r\n html.Td(id=\"three_phase_data\"),\r\n ]\r\n )\r\n ),\r\n html.Table(\r\n html.Tr(\r\n [\r\n html.Td(\r\n [\r\n html.P(\"Clarke transform data.\"),\r\n html.P(\r\n \"$$ \\\\frac{2}{3} \\\\begin{bmatrix} 1 & -\\\\frac{1}{2} & -\\\\frac{1}{2} \"\r\n + \"\\\\\\\\ 0 & \\\\frac{\\\\sqrt{3}}{2} & -\\\\frac{\\\\sqrt{3}}{2} \\\\\\\\ \"\r\n + \"\\\\frac{1}{2} & \\\\frac{1}{2} & \\\\frac{1}{2} \\\\end{bmatrix} \"\r\n + \" \\\\begin{bmatrix} A_z(t) \\\\\\\\ B_z(t) \\\\\\\\ C_z(t) \\\\end{bmatrix} =\"\r\n + \" \\\\begin{bmatrix} \\\\alpha(t) \\\\\\\\ \\\\beta(t) \\\\\\\\ Z_{C}(t) \"\r\n + \"\\\\end{bmatrix} $$\"\r\n ),\r\n ]\r\n ),\r\n html.Td(\"\\u00A0\\u00A0\\u00A0\\u00A0\\u00A0\\u00A0\\u00A0\\u00A0\"),\r\n html.Td(\r\n [\r\n html.P(\r\n \"\\u00A0\\u00A0\\u00A0\\u00A0\\u00A0\\u00A0\\u00A0\\u00A0 For t = time slider below.\"\r\n ),\r\n html.P(\r\n \"$$ \\\\begin{bmatrix} \\\\alpha(slider) \\\\\\\\ \\\\beta(slider) \"\r\n + \" \\\\\\\\ Z_{C}(slider) \\\\end{bmatrix} = $$\",\r\n ),\r\n ]\r\n ),\r\n html.Td(id=\"clarke_data\"),\r\n ]\r\n )\r\n ),\r\n html.Table(\r\n html.Tr(\r\n [\r\n html.Td(\r\n [\r\n html.P(\"Park Transform data.\"),\r\n html.P(\r\n \"$$ \\\\begin{bmatrix} \"\r\n + \"sin(\\\\omega t) & -cos(\\\\omega t) & 0 \\\\\\\\\"\r\n + \"cos(\\\\omega t) & sin(\\\\omega t) & 0 \\\\\\\\ \"\r\n + \"0 & 0 & 1 \"\r\n + \"\\\\end{bmatrix} \"\r\n + \"\\\\begin{bmatrix} \"\r\n + \"\\\\alpha(t) \\\\\\\\ \\\\beta(t) \\\\\\\\ Z_{C}(t) \"\r\n + \"\\\\end{bmatrix} = \"\r\n + \"\\\\begin{bmatrix} \"\r\n + \"d(t) \\\\\\\\ q(t) \\\\\\\\ Z_{P}(t) \"\r\n + \"\\\\end{bmatrix} $$\"\r\n ),\r\n ]\r\n ),\r\n html.Td(\"\\u00A0\\u00A0\\u00A0\\u00A0\\u00A0\\u00A0\\u00A0\\u00A0\"),\r\n html.Td(\r\n [\r\n html.P(\r\n \"\\u00A0\\u00A0\\u00A0\\u00A0\\u00A0\\u00A0\\u00A0\\u00A0 For t = time slider below.\"\r\n ),\r\n html.P(\r\n \"$$ \\\\begin{bmatrix} d(slider) \\\\\\\\ q(slider) \\\\\\\\ \"\r\n + \"Z_{P}(slider) \\\\end{bmatrix} = $$\"\r\n ),\r\n ]\r\n ),\r\n html.Td(id=\"park_data\"),\r\n ]\r\n )\r\n ),\r\n html.Table(\r\n html.Tr(\r\n [\r\n html.Td(\r\n [\r\n html.H3(\"View\"),\r\n html.Div(\r\n [\r\n html.P(\r\n \"Use the following controls to change the perspective of the graph.\"\r\n ),\r\n html.P(\r\n \"Sometimes you have to switch views more than once to reset rotation.\"\r\n ),\r\n ]\r\n ),\r\n html.Table(\r\n html.Tr(\r\n [\r\n html.Td(\r\n daq.BooleanSwitch(\r\n id=\"projection\",\r\n on=False,\r\n label=\"Isometric Projection\",\r\n labelPosition=\"top\",\r\n ),\r\n ),\r\n html.Td(\r\n html.P(\"\\u00A0\\u00A0\\u00A0\\u00A0\\u00A0\\u00A0\\u00A0\\u00A0\"),\r\n ),\r\n html.Td(\r\n daq.BooleanSwitch(\r\n id=\"run-mode\",\r\n on=False,\r\n label=\"Enable Continuous Mode\",\r\n labelPosition=\"top\",\r\n ),\r\n ),\r\n ]\r\n )\r\n ),\r\n html.P(),\r\n html.Table(\r\n html.Tr(\r\n [\r\n html.Td(\r\n html.Button(\r\n \"View X/Y (real / cosine)\", id=\"focus_xy\", n_clicks=0\r\n ),\r\n ),\r\n html.Td(\"\\u00A0\\u00A0\"),\r\n html.Td(\r\n html.Button(\r\n \"View X/Z (imaginary / sine)\", id=\"focus_xz\", n_clicks=0\r\n ),\r\n ),\r\n html.Td(\"\\u00A0\\u00A0\"),\r\n html.Td(\r\n html.Button(\"View Y/Z (polar)\", id=\"focus_yz\", n_clicks=0),\r\n ),\r\n html.Td(\"\\u00A0\\u00A0\"),\r\n html.Td(\r\n html.Button(\"View X/Y/Z\", id=\"focus_corner\", n_clicks=0),\r\n ),\r\n ]\r\n )\r\n ),\r\n html.P(),\r\n html.H3(\"Controls\"),\r\n html.P(\"Time offset\"),\r\n dcc.Slider(\r\n id=\"time_slider\",\r\n min=0,\r\n max=1,\r\n marks={0: \"0\", 1: \"1\"},\r\n step=1 / cpe.slider_count,\r\n value=0,\r\n updatemode=\"drag\",\r\n tooltip={\r\n \"placement\": \"bottom\",\r\n \"always_visible\": True,\r\n },\r\n ),\r\n html.P(\"Frequency (\\\\( \\\\omega \\\\)).\"),\r\n dcc.Slider(\r\n id=\"frequency_slider\",\r\n min=0.5,\r\n max=5,\r\n marks={0.5: \"0.5\", 5: \"5\"},\r\n step=1 / cpe.slider_count,\r\n value=1,\r\n updatemode=\"drag\",\r\n tooltip={\r\n \"placement\": \"bottom\",\r\n \"always_visible\": True,\r\n },\r\n ),\r\n html.Table(\r\n html.Tr(\r\n [\r\n html.Td(\r\n [\r\n html.P(\"A amplitude\"),\r\n dcc.Slider(\r\n id=\"phaseA_amplitude_slider\",\r\n min=0.1,\r\n max=2,\r\n marks={0.1: \"0.1\", 2: \"2\"},\r\n step=1 / cpe.slider_count,\r\n value=1,\r\n updatemode=\"drag\",\r\n tooltip={\r\n \"placement\": \"bottom\",\r\n \"always_visible\": True,\r\n },\r\n ),\r\n ]\r\n ),\r\n html.Td(\r\n [\r\n html.P(\"B amplitude\"),\r\n dcc.Slider(\r\n id=\"phaseB_amplitude_slider\",\r\n min=0.1,\r\n max=2,\r\n marks={0.1: \"0.1\", 2: \"2\"},\r\n step=1 / cpe.slider_count,\r\n value=1,\r\n updatemode=\"drag\",\r\n tooltip={\r\n \"placement\": \"bottom\",\r\n \"always_visible\": True,\r\n },\r\n ),\r\n ]\r\n ),\r\n html.Td(\r\n [\r\n html.P(\"C amplitude\"),\r\n dcc.Slider(\r\n id=\"phaseC_amplitude_slider\",\r\n min=0.1,\r\n max=2,\r\n marks={0.1: \"0.1\", 2: \"2\"},\r\n step=1 / cpe.slider_count,\r\n value=1,\r\n updatemode=\"drag\",\r\n tooltip={\r\n \"placement\": \"bottom\",\r\n \"always_visible\": True,\r\n },\r\n ),\r\n ]\r\n ),\r\n ],\r\n ),\r\n style={\"width\": \"100%\"},\r\n ),\r\n html.Table(\r\n html.Tr(\r\n [\r\n html.Td(\r\n [\r\n html.P(\"A phase offset\"),\r\n dcc.Slider(\r\n id=\"phaseA_phase_slider\",\r\n min=-1,\r\n max=1,\r\n marks={-1: \"-1\", 1: \"1\"},\r\n step=1 / cpe.slider_count,\r\n value=0,\r\n updatemode=\"drag\",\r\n tooltip={\r\n \"placement\": \"bottom\",\r\n \"always_visible\": True,\r\n },\r\n ),\r\n ]\r\n ),\r\n html.Td(\r\n [\r\n html.P(\"B phase offset\"),\r\n dcc.Slider(\r\n id=\"phaseB_phase_slider\",\r\n min=-1,\r\n max=1,\r\n marks={-1: \"-1\", 1: \"1\"},\r\n step=1 / cpe.slider_count,\r\n value=0,\r\n updatemode=\"drag\",\r\n tooltip={\r\n \"placement\": \"bottom\",\r\n \"always_visible\": True,\r\n },\r\n ),\r\n ]\r\n ),\r\n html.Td(\r\n [\r\n html.P(\"C phase offset\"),\r\n dcc.Slider(\r\n id=\"phaseC_phase_slider\",\r\n min=-1,\r\n max=1,\r\n marks={-1: \"-1\", 1: \"1\"},\r\n step=1 / cpe.slider_count,\r\n value=0,\r\n updatemode=\"drag\",\r\n tooltip={\r\n \"placement\": \"bottom\",\r\n \"always_visible\": True,\r\n },\r\n ),\r\n ]\r\n ),\r\n ],\r\n ),\r\n style={\"width\": \"100%\"},\r\n ),\r\n html.P(\"DC offset\"),\r\n dcc.Slider(\r\n id=\"zerosequence_slider\",\r\n min=0.0,\r\n max=1.0,\r\n marks={0: \"0\", 1: \"1\"},\r\n step=1.0 / cpe.slider_count,\r\n value=0,\r\n updatemode=\"drag\",\r\n tooltip={\r\n \"placement\": \"bottom\",\r\n \"always_visible\": True,\r\n },\r\n ),\r\n html.P(\"Graph size\"),\r\n dcc.Slider(\r\n id=\"size_slider\",\r\n min=400,\r\n max=1600,\r\n marks={400: \"400\", 1600: \"1600\"},\r\n step=cpe.slider_count,\r\n value=700,\r\n updatemode=\"drag\",\r\n tooltip={\r\n \"placement\": \"bottom\",\r\n \"always_visible\": True,\r\n },\r\n ),\r\n ],\r\n style={\"width\": \"40%\"},\r\n ),\r\n html.Td(\r\n [\r\n dcc.Graph(\r\n id=\"scatter_plot\",\r\n style={\r\n \"scene_aspectmode\": \"cube\",\r\n },\r\n ),\r\n ]\r\n ),\r\n ]\r\n )\r\n ),\r\n html.P(id=\"ignore\"),\r\n dcc.Interval(id=\"interval-component\", interval=250, n_intervals=0, max_intervals=0),\r\n ],\r\n style={\"width\": \"100%\"},\r\n fluid=True,\r\n)\r\napp.clientside_callback(\r\n ClientsideFunction(namespace=\"clientside\", function_name=\"mathjax_call\"),\r\n Output(\"ignore\", \"children\"),\r\n Input(\"time_slider\", \"value\"),\r\n Input(\"frequency_slider\", \"value\"),\r\n Input(\"phaseA_amplitude_slider\", \"value\"),\r\n Input(\"phaseB_amplitude_slider\", \"value\"),\r\n Input(\"phaseC_amplitude_slider\", \"value\"),\r\n Input(\"phaseA_phase_slider\", \"value\"),\r\n Input(\"phaseB_phase_slider\", \"value\"),\r\n Input(\"phaseC_phase_slider\", \"value\"),\r\n Input(\"size_slider\", \"value\"),\r\n Input(\"zerosequence_slider\", \"value\"),\r\n Input(\"focus_xy\", \"n_clicks\"),\r\n Input(\"focus_xz\", \"n_clicks\"),\r\n Input(\"focus_yz\", \"n_clicks\"),\r\n Input(\"focus_corner\", \"n_clicks\"),\r\n Input(\"projection\", \"on\"),\r\n)\r\n\r\n\r\nif DEBUG is True:\r\n app.run_server(debug=True)\r\nelse:\r\n app.run_server(\"0.0.0.0\", 8050)\r\n" ]
[ [ "numpy.max", "numpy.array", "numpy.sin", "numpy.zeros", "numpy.ones", "numpy.min", "numpy.einsum", "numpy.cos", "numpy.sqrt", "numpy.linspace" ] ]
omerlux/Recurrent_Neural_Network_-_Part_2
[ "afaa4f31fcaf1c9fcf97f6757263c1ed6b0fa4eb" ]
[ "mos-pytorch1.1/PTB-20201018-170341-SOTA/scripts/model.py" ]
[ "import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nfrom embed_regularize import embedded_dropout\nfrom locked_dropout import LockedDropout\nfrom weight_drop import WeightDrop\n\nclass RNNModel(nn.Module):\n \"\"\"Container module with an encoder, a recurrent module, and a decoder.\"\"\"\n\n def __init__(self, rnn_type, ntoken, ninp, nhid, nhidlast, nlayers, \n dropout=0.5, dropouth=0.5, dropouti=0.5, dropoute=0.1, wdrop=0, \n tie_weights=False, ldropout=0.5, n_experts=10):\n super(RNNModel, self).__init__()\n self.use_dropout = True\n self.lockdrop = LockedDropout()\n self.encoder = nn.Embedding(ntoken, ninp)\n \n self.rnns = [torch.nn.LSTM(ninp if l == 0 else nhid, nhid if l != nlayers - 1 else nhidlast, 1, dropout=0) for l in range(nlayers)]\n if wdrop:\n self.rnns = [WeightDrop(rnn, ['weight_hh_l0'], dropout=wdrop if self.use_dropout else 0) for rnn in self.rnns]\n self.rnns = torch.nn.ModuleList(self.rnns)\n\n self.prior = nn.Linear(nhidlast, n_experts, bias=False)\n self.latent = nn.Sequential(nn.Linear(nhidlast, n_experts*ninp), nn.Tanh())\n self.decoder = nn.Linear(ninp, ntoken)\n\n # Optionally tie weights as in:\n # \"Using the Output Embedding to Improve Language Models\" (Press & Wolf 2016)\n # https://arxiv.org/abs/1608.05859\n # and\n # \"Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling\" (Inan et al. 2016)\n # https://arxiv.org/abs/1611.01462\n if tie_weights:\n #if nhid != ninp:\n # raise ValueError('When using the tied flag, nhid must be equal to emsize')\n self.decoder.weight = self.encoder.weight\n\n self.init_weights()\n\n self.rnn_type = rnn_type\n self.ninp = ninp\n self.nhid = nhid\n self.nhidlast = nhidlast\n self.nlayers = nlayers\n self.dropout = dropout\n self.dropouti = dropouti\n self.dropouth = dropouth\n self.dropoute = dropoute\n self.ldropout = ldropout\n self.dropoutl = ldropout\n self.n_experts = n_experts\n self.ntoken = ntoken\n\n size = 0\n for p in self.parameters():\n size += p.nelement()\n print('param size: {}'.format(size))\n\n def init_weights(self):\n initrange = 0.1\n self.encoder.weight.data.uniform_(-initrange, initrange)\n self.decoder.bias.data.fill_(0)\n self.decoder.weight.data.uniform_(-initrange, initrange)\n\n def forward(self, input, hidden, return_h=False, return_prob=False):\n batch_size = input.size(1)\n\n emb = embedded_dropout(self.encoder, input, dropout=self.dropoute if (self.training and self.use_dropout) else 0)\n #emb = self.idrop(emb)\n\n emb = self.lockdrop(emb, self.dropouti if self.use_dropout else 0)\n\n raw_output = emb\n new_hidden = []\n #raw_output, hidden = self.rnn(emb, hidden)\n raw_outputs = []\n outputs = []\n for l, rnn in enumerate(self.rnns):\n current_input = raw_output\n raw_output, new_h = rnn(raw_output, hidden[l])\n new_hidden.append(new_h)\n raw_outputs.append(raw_output)\n if l != self.nlayers - 1:\n #self.hdrop(raw_output)\n raw_output = self.lockdrop(raw_output, self.dropouth if self.use_dropout else 0)\n outputs.append(raw_output)\n hidden = new_hidden\n\n output = self.lockdrop(raw_output, self.dropout if self.use_dropout else 0)\n outputs.append(output)\n\n latent = self.latent(output)\n latent = self.lockdrop(latent, self.dropoutl if self.use_dropout else 0)\n logit = self.decoder(latent.view(-1, self.ninp))\n\n prior_logit = self.prior(output).contiguous().view(-1, self.n_experts)\n prior = nn.functional.softmax(prior_logit, -1)\n\n prob = nn.functional.softmax(logit.view(-1, self.ntoken), -1).view(-1, self.n_experts, self.ntoken)\n prob = (prob * prior.unsqueeze(2).expand_as(prob)).sum(1)\n\n if return_prob:\n model_output = prob\n else:\n log_prob = torch.log(prob.add_(1e-8))\n model_output = log_prob\n\n model_output = model_output.view(-1, batch_size, self.ntoken)\n\n if return_h:\n return model_output, hidden, raw_outputs, outputs\n return model_output, hidden\n\n def init_hidden(self, bsz):\n weight = next(self.parameters()).data\n return [(weight.new(1, bsz, self.nhid if l != self.nlayers - 1 else self.nhidlast).zero_(),\n weight.new(1, bsz, self.nhid if l != self.nlayers - 1 else self.nhidlast).zero_())\n for l in range(self.nlayers)]\n\nif __name__ == '__main__':\n model = RNNModel('LSTM', 10, 12, 12, 12, 2)\n input = torch.LongTensor(13, 9).random_(0, 10)\n hidden = model.init_hidden(9)\n model(input, hidden)\n print(model)\n\n # input = Variable(torch.LongTensor(13, 9).random_(0, 10))\n # hidden = model.init_hidden(9)\n # print(model.sample(input, hidden, 5, 6, 1, 2, sample_latent=True).size())\n" ]
[ [ "torch.nn.Linear", "torch.nn.LSTM", "torch.nn.ModuleList", "torch.nn.Tanh", "torch.LongTensor", "torch.nn.functional.softmax", "torch.nn.Embedding" ] ]
yangliu-re/nasbench
[ "bfd4328afc24727d1e7d5e33f8d8839310101830" ]
[ "nasbench/lib/model_builder.py" ]
[ "# Copyright 2019 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Builds the TensorFlow computational graph.\n\nTensors flowing into a single vertex are added together for all vertices\nexcept the output, which is concatenated instead. Tensors flowing out of input\nare always added.\n\nIf interior edge channels don't match, drop the extra channels (channels are\nguaranteed non-decreasing). Tensors flowing out of the input as always\nprojected instead.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom nasbench.lib import base_ops\nfrom nasbench.lib import training_time\nimport numpy as np\nimport tensorflow as tf\n\n\ndef build_model_fn(spec, config, num_train_images):\n \"\"\"Returns a model function for Estimator.\"\"\"\n if config['data_format'] == 'channels_last':\n channel_axis = 3\n elif config['data_format'] == 'channels_first':\n # Currently this is not well supported\n channel_axis = 1\n else:\n raise ValueError('invalid data_format')\n\n def model_fn(features, labels, mode, params):\n \"\"\"Builds the model from the input features.\"\"\"\n del params # Unused\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n # Store auxiliary activations increasing in depth of network. First\n # activation occurs immediately after the stem and the others immediately\n # follow each stack.\n aux_activations = []\n\n # Initial stem convolution\n with tf.compat.v1.variable_scope('stem'):\n net = base_ops.conv_bn_relu(\n features, 3, config['stem_filter_size'],\n is_training, config['data_format'])\n aux_activations.append(net)\n\n for stack_num in range(config['num_stacks']):\n channels = net.get_shape()[channel_axis]#.value\n\n # Downsample at start (except first)\n if stack_num > 0:\n net = tf.compat.v1.layers.max_pooling2d(\n inputs=net,\n pool_size=(2, 2),\n strides=(2, 2),\n padding='same',\n data_format=config['data_format'])\n\n # Double output channels each time we downsample\n channels *= 2\n\n with tf.compat.v1.variable_scope('stack{}'.format(stack_num)):\n for module_num in range(config['num_modules_per_stack']):\n with tf.compat.v1.variable_scope('module{}'.format(module_num)):\n net = build_module(\n spec,\n inputs=net,\n channels=channels,\n is_training=is_training)\n aux_activations.append(net)\n\n # Global average pool\n if config['data_format'] == 'channels_last':\n net = tf.reduce_mean(input_tensor=net, axis=[1, 2])\n elif config['data_format'] == 'channels_first':\n net = tf.reduce_mean(input_tensor=net, axis=[2, 3])\n else:\n raise ValueError('invalid data_format')\n\n # Fully-connected layer to labels\n logits = tf.compat.v1.layers.dense(\n inputs=net,\n units=config['num_labels'])\n\n if mode == tf.estimator.ModeKeys.PREDICT and not config['use_tpu']:\n # It is a known limitation of Estimator that the labels\n # are not passed during PREDICT mode when running on CPU/GPU\n # (https://github.com/tensorflow/tensorflow/issues/17824), thus we cannot\n # compute the loss or anything dependent on it (i.e., the gradients).\n loss = tf.constant(0.0)\n else:\n loss = tf.compat.v1.losses.softmax_cross_entropy(\n onehot_labels=tf.one_hot(labels, config['num_labels']),\n logits=logits)\n\n loss += config['weight_decay'] * tf.add_n(\n [tf.nn.l2_loss(v) for v in tf.compat.v1.trainable_variables()])\n\n # Use inference mode to compute some useful metrics on a fixed sample\n # Due to the batch being sharded on TPU, these metrics should be run on CPU\n # only to ensure that the metrics are computed on the whole batch. We add a\n # leading dimension because PREDICT expects batch-shaped tensors.\n if mode == tf.estimator.ModeKeys.PREDICT:\n parameter_norms = {\n 'param:' + tensor.name:\n tf.expand_dims(tf.norm(tensor=tensor, ord=2), 0)\n for tensor in tf.compat.v1.trainable_variables()\n }\n\n # Compute gradients of all parameters and the input simultaneously\n all_params_names = []\n all_params_tensors = []\n for tensor in tf.compat.v1.trainable_variables():\n all_params_names.append('param_grad_norm:' + tensor.name)\n all_params_tensors.append(tensor)\n all_params_names.append('input_grad_norm')\n all_params_tensors.append(features)\n\n grads = tf.gradients(ys=loss, xs=all_params_tensors)\n\n param_gradient_norms = {}\n for name, grad in list(zip(all_params_names, grads))[:-1]:\n if grad is not None:\n param_gradient_norms[name] = (\n tf.expand_dims(tf.norm(tensor=grad, ord=2), 0))\n else:\n param_gradient_norms[name] = (\n tf.expand_dims(tf.constant(0.0), 0))\n\n if grads[-1] is not None:\n input_grad_norm = tf.sqrt(tf.reduce_sum(\n input_tensor=tf.square(grads[-1]), axis=[1, 2, 3]))\n else:\n input_grad_norm = tf.expand_dims(tf.constant(0.0), 0)\n\n covariance_matrices = {\n 'cov_matrix_%d' % i:\n tf.expand_dims(_covariance_matrix(aux), 0)\n for i, aux in enumerate(aux_activations)\n }\n\n predictions = {\n 'logits': logits,\n 'loss': tf.expand_dims(loss, 0),\n 'input_grad_norm': input_grad_norm,\n }\n predictions.update(parameter_norms)\n predictions.update(param_gradient_norms)\n predictions.update(covariance_matrices)\n\n return tf.compat.v1.estimator.tpu.TPUEstimatorSpec(mode=mode, predictions=predictions)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n global_step = tf.compat.v1.train.get_or_create_global_step()\n base_lr = config['learning_rate']\n if config['use_tpu']:\n base_lr *= config['tpu_num_shards']\n\n if config['lr_decay_method'] == 'COSINE_BY_STEP':\n total_steps = int(config['train_epochs'] * num_train_images /\n config['batch_size'])\n progress_fraction = tf.cast(global_step, tf.float32) / total_steps\n learning_rate = (0.5 * base_lr *\n (1 + tf.cos(np.pi * progress_fraction)))\n\n elif config['lr_decay_method'] == 'COSINE_BY_TIME':\n # Requires training_time.limit hooks to be added to Estimator\n elapsed_time = tf.cast(training_time.get_total_time(), dtype=tf.float32)\n progress_fraction = elapsed_time / config['train_seconds']\n learning_rate = (0.5 * base_lr *\n (1 + tf.cos(np.pi * progress_fraction)))\n\n elif config['lr_decay_method'] == 'STEPWISE':\n # divide LR by 10 at 1/2, 2/3, and 5/6 of total epochs\n total_steps = (config['train_epochs'] * num_train_images /\n config['batch_size'])\n boundaries = [int(0.5 * total_steps),\n int(0.667 * total_steps),\n int(0.833 * total_steps)]\n values = [1.0 * base_lr,\n 0.1 * base_lr,\n 0.01 * base_lr,\n 0.0001 * base_lr]\n learning_rate = tf.compat.v1.train.piecewise_constant(\n global_step, boundaries, values)\n\n else:\n raise ValueError('invalid lr_decay_method')\n\n # Set LR to 0 for step 0 to initialize the weights without training\n learning_rate = tf.compat.v1.where(tf.equal(global_step, 0), 0.0, learning_rate)\n\n optimizer = tf.compat.v1.train.RMSPropOptimizer(\n learning_rate=learning_rate,\n momentum=config['momentum'],\n epsilon=1.0)\n if config['use_tpu']:\n optimizer = tf.compat.v1.tpu.CrossShardOptimizer(optimizer)\n\n # Update ops required for batch norm moving variables\n update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n train_op = optimizer.minimize(loss, global_step)\n\n return tf.compat.v1.estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=loss,\n train_op=train_op)\n\n elif mode == tf.estimator.ModeKeys.EVAL:\n def metric_fn(labels, logits):\n predictions = tf.argmax(input=logits, axis=1)\n accuracy = tf.compat.v1.metrics.accuracy(labels, predictions)\n\n return {'accuracy': accuracy}\n\n eval_metrics = (metric_fn, [labels, logits])\n\n return tf.compat.v1.estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=loss,\n eval_metrics=eval_metrics)\n\n return model_fn\n\n\ndef build_module(spec, inputs, channels, is_training):\n \"\"\"Build a custom module using a proposed model spec.\n\n Builds the model using the adjacency matrix and op labels specified. Channels\n controls the module output channel count but the interior channels are\n determined via equally splitting the channel count whenever there is a\n concatenation of Tensors.\n\n Args:\n spec: ModelSpec object.\n inputs: input Tensors to this module.\n channels: output channel count.\n is_training: bool for whether this model is training.\n\n Returns:\n output Tensor from built module.\n\n Raises:\n ValueError: invalid spec\n \"\"\"\n num_vertices = np.shape(spec.matrix)[0]\n\n if spec.data_format == 'channels_last':\n channel_axis = 3\n elif spec.data_format == 'channels_first':\n channel_axis = 1\n else:\n raise ValueError('invalid data_format')\n\n input_channels = inputs.get_shape()[channel_axis]#.value\n # vertex_channels[i] = number of output channels of vertex i\n vertex_channels = compute_vertex_channels(\n input_channels, channels, spec.matrix)\n\n # Construct tensors from input forward\n tensors = [tf.identity(inputs, name='input')]\n\n final_concat_in = []\n for t in range(1, num_vertices - 1):\n with tf.compat.v1.variable_scope('vertex_{}'.format(t)):\n # Create interior connections, truncating if necessary\n add_in = [truncate(tensors[src], vertex_channels[t], spec.data_format)\n for src in range(1, t) if spec.matrix[src, t]]\n\n # Create add connection from projected input\n if spec.matrix[0, t]:\n add_in.append(projection(\n tensors[0],\n vertex_channels[t],\n is_training,\n spec.data_format))\n\n if len(add_in) == 1:\n vertex_input = add_in[0]\n else:\n vertex_input = tf.add_n(add_in)\n\n # Perform op at vertex t\n op = base_ops.OP_MAP[spec.ops[t]](\n is_training=is_training,\n data_format=spec.data_format)\n vertex_value = op.build(vertex_input, vertex_channels[t])\n\n tensors.append(vertex_value)\n if spec.matrix[t, num_vertices - 1]:\n final_concat_in.append(tensors[t])\n\n # Construct final output tensor by concating all fan-in and adding input.\n if not final_concat_in:\n # No interior vertices, input directly connected to output\n assert spec.matrix[0, num_vertices - 1]\n with tf.compat.v1.variable_scope('output'):\n outputs = projection(\n tensors[0],\n channels,\n is_training,\n spec.data_format)\n\n else:\n if len(final_concat_in) == 1:\n outputs = final_concat_in[0]\n else:\n outputs = tf.concat(final_concat_in, channel_axis)\n\n if spec.matrix[0, num_vertices - 1]:\n outputs += projection(\n tensors[0],\n channels,\n is_training,\n spec.data_format)\n\n outputs = tf.identity(outputs, name='output')\n return outputs\n\n\ndef projection(inputs, channels, is_training, data_format):\n \"\"\"1x1 projection (as in ResNet) followed by batch normalization and ReLU.\"\"\"\n with tf.compat.v1.variable_scope('projection'):\n net = base_ops.conv_bn_relu(inputs, 1, channels, is_training, data_format)\n\n return net\n\n\ndef truncate(inputs, channels, data_format):\n \"\"\"Slice the inputs to channels if necessary.\"\"\"\n if data_format == 'channels_last':\n input_channels = inputs.get_shape()[3]#.value\n else:\n assert data_format == 'channels_first'\n input_channels = inputs.get_shape()[1]#.value\n\n if input_channels < channels:\n raise ValueError('input channel < output channels for truncate')\n elif input_channels == channels:\n return inputs # No truncation necessary\n else:\n # Truncation should only be necessary when channel division leads to\n # vertices with +1 channels. The input vertex should always be projected to\n # the minimum channel count.\n assert input_channels - channels == 1\n if data_format == 'channels_last':\n return tf.slice(inputs, [0, 0, 0, 0], [-1, -1, -1, channels])\n else:\n return tf.slice(inputs, [0, 0, 0, 0], [-1, channels, -1, -1])\n\n\ndef compute_vertex_channels(input_channels, output_channels, matrix):\n \"\"\"Computes the number of channels at every vertex.\n\n Given the input channels and output channels, this calculates the number of\n channels at each interior vertex. Interior vertices have the same number of\n channels as the max of the channels of the vertices it feeds into. The output\n channels are divided amongst the vertices that are directly connected to it.\n When the division is not even, some vertices may receive an extra channel to\n compensate.\n\n Args:\n input_channels: input channel count.\n output_channels: output channel count.\n matrix: adjacency matrix for the module (pruned by model_spec).\n\n Returns:\n list of channel counts, in order of the vertices.\n \"\"\"\n num_vertices = np.shape(matrix)[0]\n\n vertex_channels = [0] * num_vertices\n vertex_channels[0] = input_channels\n vertex_channels[num_vertices - 1] = output_channels\n\n if num_vertices == 2:\n # Edge case where module only has input and output vertices\n return vertex_channels\n\n # Compute the in-degree ignoring input, axis 0 is the src vertex and axis 1 is\n # the dst vertex. Summing over 0 gives the in-degree count of each vertex.\n in_degree = np.sum(matrix[1:], axis=0)\n interior_channels = output_channels // in_degree[num_vertices - 1]\n correction = output_channels % in_degree[num_vertices - 1] # Remainder to add\n\n # Set channels of vertices that flow directly to output\n for v in range(1, num_vertices - 1):\n if matrix[v, num_vertices - 1]:\n vertex_channels[v] = interior_channels\n if correction:\n vertex_channels[v] += 1\n correction -= 1\n\n # Set channels for all other vertices to the max of the out edges, going\n # backwards. (num_vertices - 2) index skipped because it only connects to\n # output.\n for v in range(num_vertices - 3, 0, -1):\n if not matrix[v, num_vertices - 1]:\n for dst in range(v + 1, num_vertices - 1):\n if matrix[v, dst]:\n vertex_channels[v] = max(vertex_channels[v], vertex_channels[dst])\n assert vertex_channels[v] > 0\n\n tf.compat.v1.logging.info('vertex_channels: %s', str(vertex_channels))\n\n # Sanity check, verify that channels never increase and final channels add up.\n final_fan_in = 0\n for v in range(1, num_vertices - 1):\n if matrix[v, num_vertices - 1]:\n final_fan_in += vertex_channels[v]\n for dst in range(v + 1, num_vertices - 1):\n if matrix[v, dst]:\n assert vertex_channels[v] >= vertex_channels[dst]\n assert final_fan_in == output_channels or num_vertices == 2\n # num_vertices == 2 means only input/output nodes, so 0 fan-in\n\n return vertex_channels\n\n\ndef _covariance_matrix(activations):\n \"\"\"Computes the unbiased covariance matrix of the samples within the batch.\n\n Computes the sample covariance between the samples in the batch. Specifically,\n\n C(i,j) = (x_i - mean(x_i)) dot (x_j - mean(x_j)) / (N - 1)\n\n Matches the default behavior of np.cov().\n\n Args:\n activations: tensor activations with batch dimension first.\n\n Returns:\n [batch, batch] shape tensor for the covariance matrix.\n \"\"\"\n batch_size = activations.get_shape()[0]#.value\n flattened = tf.reshape(activations, [batch_size, -1])\n means = tf.reduce_mean(input_tensor=flattened, axis=1, keepdims=True)\n\n centered = flattened - means\n squared = tf.matmul(centered, tf.transpose(a=centered))\n cov = squared / (tf.cast(tf.shape(input=flattened)[1], tf.float32) - 1)\n\n return cov\n\n" ]
[ [ "tensorflow.reshape", "tensorflow.gradients", "tensorflow.compat.v1.trainable_variables", "tensorflow.compat.v1.train.piecewise_constant", "tensorflow.control_dependencies", "tensorflow.compat.v1.layers.dense", "tensorflow.one_hot", "tensorflow.identity", "tensorflow.cast", "tensorflow.shape", "tensorflow.compat.v1.train.RMSPropOptimizer", "tensorflow.concat", "tensorflow.argmax", "tensorflow.transpose", "tensorflow.add_n", "tensorflow.norm", "tensorflow.constant", "tensorflow.compat.v1.tpu.CrossShardOptimizer", "tensorflow.compat.v1.metrics.accuracy", "tensorflow.compat.v1.train.get_or_create_global_step", "tensorflow.expand_dims", "tensorflow.cos", "tensorflow.compat.v1.variable_scope", "numpy.shape", "tensorflow.compat.v1.layers.max_pooling2d", "tensorflow.nn.l2_loss", "tensorflow.compat.v1.estimator.tpu.TPUEstimatorSpec", "tensorflow.compat.v1.get_collection", "numpy.sum", "tensorflow.equal", "tensorflow.slice", "tensorflow.reduce_mean", "tensorflow.square" ] ]
ecohydro/GlobalUrbanHeat
[ "0590fc16420f32cbe834f1838745d6e7e9704132" ]
[ "src/json_to_csv.py" ]
[ "##################################################################################\n#\n# Trends\n# By Cascade Tuholske July 2021\n#\n# Write json outputs to .csv\n#\n#################################################################################\n\n# Depedencies\nimport pandas as pd\nimport os \nimport glob\n\nif __name__==\"__main__\":\n \n # json files\n data_path = os.path.join('') # file path\n fns = glob.glob(data_path+'*.json')\n \n # write to csvs\n for fn in fns:\n fn_tail = fn.split(data_path)[1].split('.json')[0] + '.csv' # change to csv\n df = pd.read_json(fn, orient = 'split') # open it\n df.to_csv(os.path.join(data_path,fn_tail))\n print(fn_tail)\n print('done!')" ]
[ [ "pandas.read_json" ] ]
Meatplay/steam-vr-wheel
[ "01d02d9036d5a718b570b5a3316d9a3989f0f7f2" ]
[ "steam_vr_wheel/_wheel.py" ]
[ "from collections import deque\r\nfrom math import pi, atan2, sin, cos\r\n\r\nimport numpy as np\r\nimport openvr\r\nimport os\r\nimport copy\r\n\r\nfrom steam_vr_wheel._virtualpad import VirtualPad, RightTrackpadAxisDisablerMixin\r\nfrom steam_vr_wheel.pyvjoy import HID_USAGE_X\r\n\r\n\r\n\r\nFULLTURN = 4\r\n\r\n\r\ndef check_result(result):\r\n if result:\r\n error_name = openvr.VROverlay().getOverlayErrorNameFromEnum(result)\r\n raise Exception(\"OpenVR Error:\", error_name)\r\n\r\ndef print_matrix(matrix):\r\n l = []\r\n for i in range(3):\r\n ll = []\r\n for j in range(4):\r\n ll.append(matrix[j])\r\n l.append(ll)\r\n print(l)\r\n\r\n\r\ndef initRotationMatrix(axis, angle, matrix=None):\r\n # angle in radians\r\n if matrix is None:\r\n matrix = openvr.HmdMatrix34_t()\r\n if axis==0:\r\n matrix.m[0][0] = 1.0\r\n matrix.m[0][1] = 0.0\r\n matrix.m[0][2] = 0.0\r\n matrix.m[0][3] = 0.0\r\n matrix.m[1][0] = 0.0\r\n matrix.m[1][1] = cos(angle)\r\n matrix.m[1][2] = -sin(angle)\r\n matrix.m[1][3] = 0.0\r\n matrix.m[2][0] = 0.0\r\n matrix.m[2][1] = sin(angle)\r\n matrix.m[2][2] = cos(angle)\r\n matrix.m[2][3] = 0.0\r\n elif axis==1:\r\n matrix.m[0][0] = cos(angle)\r\n matrix.m[0][1] = 0.0\r\n matrix.m[0][2] = sin(angle)\r\n matrix.m[0][3] = 0.0\r\n matrix.m[1][0] = 0.0\r\n matrix.m[1][1] = 1.0\r\n matrix.m[1][2] = 0.0\r\n matrix.m[1][3] = 0.0\r\n matrix.m[2][0] = -sin(angle)\r\n matrix.m[2][1] = 0.0\r\n matrix.m[2][2] = cos(angle)\r\n matrix.m[2][3] = 0.0\r\n elif axis == 2:\r\n matrix.m[0][0] = cos(angle)\r\n matrix.m[0][1] = -sin(angle)\r\n matrix.m[0][2] = 0.0\r\n matrix.m[0][3] = 0.0\r\n matrix.m[1][0] = sin(angle)\r\n matrix.m[1][1] = cos(angle)\r\n matrix.m[1][2] = 0.0\r\n matrix.m[1][3] = 0.0\r\n matrix.m[2][0] = 0.0\r\n matrix.m[2][1] = 0.0\r\n matrix.m[2][2] = 1.0\r\n matrix.m[2][3] = 0.0\r\n return matrix\r\n\r\n\r\ndef matMul33(a, b, result=None):\r\n if result is None:\r\n result = openvr.HmdMatrix34_t()\r\n for i in range(3):\r\n for j in range(3):\r\n result.m[i][j] = 0.0\r\n for k in range(3):\r\n result.m[i][j] += a.m[i][k] * b.m[k][j]\r\n result[0][3] = b[0][3]\r\n result[1][3] = b[1][3]\r\n result[2][3] = b[2][3]\r\n return result\r\n\r\n\r\nclass HandsImage:\r\n def __init__(self, left_ctr, right_ctr):\r\n self._handl_closed = False\r\n self._handr_closed = False\r\n self.left_ctr = left_ctr\r\n self.right_ctr = right_ctr\r\n hand_size = 0.15\r\n\r\n self.vrsys = openvr.VRSystem()\r\n self.vroverlay = openvr.IVROverlay()\r\n\r\n result, self.l_ovr = self.vroverlay.createOverlay('left_hand'.encode(), 'left_hand'.encode())\r\n result, self.r_ovr = self.vroverlay.createOverlay('right_hand'.encode(), 'right_hand'.encode())\r\n\r\n check_result(self.vroverlay.setOverlayColor(self.l_ovr, 1, 1, 1))\r\n check_result(self.vroverlay.setOverlayColor(self.r_ovr, 1, 1, 1))\r\n check_result(self.vroverlay.setOverlayAlpha(self.l_ovr, 1))\r\n check_result(self.vroverlay.setOverlayAlpha(self.r_ovr, 1))\r\n check_result(self.vroverlay.setOverlayWidthInMeters(self.l_ovr, hand_size))\r\n check_result(self.vroverlay.setOverlayWidthInMeters(self.r_ovr, hand_size))\r\n\r\n this_dir = os.path.abspath(os.path.dirname(__file__))\r\n\r\n self.l_open_png = os.path.join(this_dir, 'media', 'hand_open_l.png')\r\n self.r_open_png = os.path.join(this_dir, 'media', 'hand_open_r.png')\r\n self.l_close_png = os.path.join(this_dir, 'media', 'hand_closed_l.png')\r\n self.r_close_png = os.path.join(this_dir, 'media', 'hand_closed_r.png')\r\n\r\n check_result(self.vroverlay.setOverlayFromFile(self.l_ovr, self.l_open_png.encode()))\r\n check_result(self.vroverlay.setOverlayFromFile(self.r_ovr, self.r_open_png.encode()))\r\n\r\n\r\n\r\n result, transform = self.vroverlay.setOverlayTransformTrackedDeviceRelative(self.l_ovr, self.left_ctr.id)\r\n result, transform = self.vroverlay.setOverlayTransformTrackedDeviceRelative(self.r_ovr, self.right_ctr.id)\r\n\r\n transform[0][0] = 1.0\r\n transform[0][1] = 0.0\r\n transform[0][2] = 0.0\r\n transform[0][3] = 0\r\n\r\n transform[1][0] = 0.0\r\n transform[1][1] = 1.0\r\n transform[1][2] = 0.0\r\n transform[1][3] = 0\r\n\r\n transform[2][0] = 0.0\r\n transform[2][1] = 0.0\r\n transform[2][2] = 1.0\r\n transform[2][3] = 0\r\n\r\n self.transform = transform\r\n\r\n rotate = initRotationMatrix(0, -pi / 2)\r\n self.transform = matMul33(rotate, self.transform)\r\n\r\n fn = self.vroverlay.function_table.setOverlayTransformTrackedDeviceRelative\r\n result = fn(self.l_ovr, self.left_ctr.id, openvr.byref(self.transform))\r\n result = fn(self.r_ovr, self.right_ctr.id, openvr.byref(self.transform))\r\n\r\n check_result(result)\r\n check_result(self.vroverlay.showOverlay(self.l_ovr))\r\n check_result(self.vroverlay.showOverlay(self.r_ovr))\r\n\r\n def left_grab(self):\r\n if not self._handl_closed:\r\n self.vroverlay.setOverlayFromFile(self.l_ovr, self.l_close_png.encode())\r\n self._handl_closed = True\r\n\r\n def left_ungrab(self):\r\n if self._handl_closed:\r\n self.vroverlay.setOverlayFromFile(self.l_ovr, self.l_open_png.encode())\r\n self._handl_closed = False\r\n\r\n def right_grab(self):\r\n if not self._handr_closed:\r\n self.vroverlay.setOverlayFromFile(self.r_ovr, self.r_close_png.encode())\r\n self._handr_closed = True\r\n\r\n def right_ungrab(self):\r\n if self._handr_closed:\r\n self.vroverlay.setOverlayFromFile(self.r_ovr, self.r_open_png.encode())\r\n self._handr_closed = False\r\n\r\n def hide(self):\r\n check_result(self.vroverlay.hideOverlay(self.l_ovr))\r\n check_result(self.vroverlay.hideOverlay(self.r_ovr))\r\n\r\n\r\nclass SteeringWheelImage:\r\n def __init__(self, x=0, y=-0.4, z=-0.35, size=0.55):\r\n self.vrsys = openvr.VRSystem()\r\n self.vroverlay = openvr.IVROverlay()\r\n result, self.wheel = self.vroverlay.createOverlay('keyiiii'.encode(), 'keyiiii'.encode())\r\n check_result(result)\r\n\r\n check_result(self.vroverlay.setOverlayColor(self.wheel, 1, 1, 1))\r\n check_result(self.vroverlay.setOverlayAlpha(self.wheel, 1))\r\n check_result(self.vroverlay.setOverlayWidthInMeters(self.wheel, size))\r\n\r\n this_dir = os.path.abspath(os.path.dirname(__file__))\r\n wheel_img = os.path.join(this_dir, 'media', 'steering_wheel.png')\r\n\r\n check_result(self.vroverlay.setOverlayFromFile(self.wheel, wheel_img.encode()))\r\n\r\n\r\n result, transform = self.vroverlay.setOverlayTransformAbsolute(self.wheel, openvr.TrackingUniverseSeated)\r\n\r\n transform[0][0] = 1.0\r\n transform[0][1] = 0.0\r\n transform[0][2] = 0.0\r\n transform[0][3] = x\r\n\r\n transform[1][0] = 0.0\r\n transform[1][1] = 1.0\r\n transform[1][2] = 0.0\r\n transform[1][3] = y\r\n\r\n transform[2][0] = 0.0\r\n transform[2][1] = 0.0\r\n transform[2][2] = 1.0\r\n transform[2][3] = z\r\n\r\n self.transform = transform\r\n self.size = size\r\n\r\n fn = self.vroverlay.function_table.setOverlayTransformAbsolute\r\n pmatTrackingOriginToOverlayTransform = transform\r\n result = fn(self.wheel, openvr.TrackingUniverseSeated, openvr.byref(pmatTrackingOriginToOverlayTransform))\r\n\r\n check_result(result)\r\n check_result(self.vroverlay.showOverlay(self.wheel))\r\n\r\n def move(self, point, size):\r\n self.transform[0][3] = point.x\r\n self.transform[1][3] = point.y\r\n self.transform[2][3] = point.z\r\n print(point.x, point.y, point.z)\r\n self.size = size\r\n fn = self.vroverlay.function_table.setOverlayTransformAbsolute\r\n fn(self.wheel, openvr.TrackingUniverseSeated, openvr.byref(self.transform))\r\n check_result(self.vroverlay.setOverlayWidthInMeters(self.wheel, size))\r\n\r\n def rotate(self, angles, axis=[2,]):\r\n try:\r\n self.rotation_matrix\r\n except AttributeError:\r\n self.rotation_matrix = openvr.HmdMatrix34_t()\r\n if not isinstance(angles, list):\r\n angles = [angles, ]\r\n\r\n if not isinstance(axis, list):\r\n axis = [axis, ]\r\n\r\n result = copy.copy(self.transform)\r\n for angle, ax in zip(angles, axis):\r\n initRotationMatrix(ax, -angle, self.rotation_matrix)\r\n result = matMul33(self.rotation_matrix, result)\r\n\r\n fn = self.vroverlay.function_table.setOverlayTransformAbsolute\r\n fn(self.wheel, openvr.TrackingUniverseSeated, openvr.byref(result))\r\n\r\n def hide(self):\r\n check_result(self.vroverlay.hideOverlay(self.wheel))\r\n\r\n\r\nclass Point:\r\n def __init__(self, x, y, z):\r\n self.x = x\r\n self.y = y\r\n self.z = z\r\n\r\nclass GrabControllerPoint(Point):\r\n def __init__(self, x, y, z, id=0):\r\n super().__init__(x, y, z)\r\n self.id = id\r\n\r\n\r\nclass Wheel(RightTrackpadAxisDisablerMixin, VirtualPad):\r\n def __init__(self, inertia=0.95, center_speed=pi/180):\r\n super().__init__()\r\n self.vrsys = openvr.VRSystem()\r\n self.hands_overlay = None\r\n x, y, z = self.config.wheel_center\r\n size = self.config.wheel_size\r\n self._inertia = inertia\r\n self._center_speed = center_speed # radians per frame, force which returns wheel to center when not grabbed\r\n self._center_speed_coeff = 1 # might be calculated later using game telemetry\r\n self.x = 0 # -1 0 1\r\n self._wheel_angles = deque(maxlen=10)\r\n self._wheel_angles.append(0)\r\n self._wheel_angles.append(0)\r\n self._snapped = False\r\n\r\n # radians per frame last turn speed when wheel was being held, gradually decreases after wheel is released\r\n self._turn_speed = 0\r\n\r\n self.wheel_image = SteeringWheelImage(x=x, y=y, z=z, size=size)\r\n self.center = Point(x, y, z)\r\n self.size = size\r\n self._grab_started_point = None\r\n self._wheel_grab_offset = 0\r\n\r\n # for manual grab:\r\n self._left_controller_grabbed = False\r\n self._right_controller_grabbed = False\r\n\r\n def point_in_holding_bounds(self, point):\r\n width = 0.10\r\n a = self.size/2 + width\r\n b = self.size/2 - width\r\n if self.config.vertical_wheel:\r\n x = point.x - self.center.x\r\n y = point.y - self.center.y\r\n z = point.z - self.center.z\r\n else:\r\n z = point.y - self.center.y\r\n y = point.x - self.center.x\r\n x = point.z - self.center.z\r\n\r\n if abs(z) < width:\r\n distance = (x**2+y**2)**0.5\r\n if distance < b:\r\n return False\r\n if distance < a:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n def unwrap_wheel_angles(self):\r\n period = 2 * pi\r\n angle = np.array(self._wheel_angles, dtype=float)\r\n diff = np.diff(angle)\r\n diff_to_correct = (diff + period / 2.) % period - period / 2.\r\n increment = np.cumsum(diff_to_correct - diff)\r\n angle[1:] += increment\r\n self._wheel_angles[-1] = angle[-1]\r\n\r\n def wheel_raw_angle(self, point):\r\n if self.config.vertical_wheel:\r\n a = float(point.y) - self.center.y\r\n b = float(point.x) - self.center.x\r\n else:\r\n a = float(point.x) - self.center.x\r\n b = float(point.z) - self.center.z\r\n angle = atan2(a, b)\r\n return angle\r\n\r\n def wheel_double_raw_angle(self, left_ctr, right_ctr):\r\n if self.config.vertical_wheel:\r\n a = left_ctr.y - right_ctr.y\r\n b = left_ctr.x - right_ctr.x\r\n else:\r\n a = left_ctr.x - right_ctr.x\r\n b = left_ctr.z - right_ctr.z\r\n return atan2(a, b)\r\n\r\n def ready_to_unsnap(self, l, r):\r\n d = (l.x - r.x)**2 + (l.y - r.y)**2 + (l.z - r.z)**2\r\n\r\n if d > self.size**2:\r\n return True\r\n\r\n dc = ((self.center.x - (l.x+r.x)/2)**2\r\n + (self.center.y - (l.y+r.y)/2)**2\r\n + (self.center.z - (l.z+r.z)/2)**2\r\n )\r\n if dc > self.size**2:\r\n return True\r\n\r\n return False\r\n\r\n def set_button_unpress(self, button, hand):\r\n super().set_button_unpress(button, hand)\r\n if self.config.wheel_grabbed_by_grip_toggle:\r\n if button == openvr.k_EButton_Grip and hand == 'left':\r\n self._left_controller_grabbed = False\r\n\r\n if button == openvr.k_EButton_Grip and hand == 'right':\r\n self._right_controller_grabbed = False\r\n\r\n if self._right_controller_grabbed and self._left_controller_grabbed:\r\n pass\r\n else:\r\n self._snapped = False\r\n\r\n def set_button_press(self, button, hand):\r\n super().set_button_press(button, hand)\r\n if button == openvr.k_EButton_Grip and hand == 'left':\r\n if self.config.wheel_grabbed_by_grip_toggle:\r\n self._left_controller_grabbed = True\r\n else:\r\n self._left_controller_grabbed = not self._left_controller_grabbed\r\n\r\n if button == openvr.k_EButton_Grip and hand == 'right':\r\n if self.config.wheel_grabbed_by_grip_toggle:\r\n self._right_controller_grabbed = True\r\n else:\r\n self._right_controller_grabbed = not self._right_controller_grabbed\r\n\r\n if self._right_controller_grabbed and self._left_controller_grabbed:\r\n pass\r\n else:\r\n self._snapped = False\r\n\r\n def _wheel_update(self, left_ctr, right_ctr):\r\n if self.config.wheel_grabbed_by_grip:\r\n left_bound, right_bound = self._left_controller_grabbed, self._right_controller_grabbed\r\n else: # automatic gripping\r\n right_bound = self.point_in_holding_bounds(right_ctr)\r\n left_bound = self.point_in_holding_bounds(left_ctr)\r\n if self.ready_to_unsnap(left_ctr, right_ctr):\r\n self._snapped = False\r\n\r\n if right_bound and left_bound and not self._snapped:\r\n self.is_held([left_ctr, right_ctr])\r\n\r\n if self._snapped:\r\n angle = self.wheel_double_raw_angle(left_ctr, right_ctr) + self._wheel_grab_offset\r\n return angle\r\n\r\n if right_bound:\r\n controller = right_ctr\r\n self.is_held(controller)\r\n elif left_bound:\r\n controller = left_ctr\r\n self.is_held(controller)\r\n else:\r\n self.is_not_held()\r\n return None\r\n angle = self.wheel_raw_angle(controller) + self._wheel_grab_offset\r\n return angle\r\n\r\n def calculate_grab_offset(self, raw_angle=None):\r\n if raw_angle is None:\r\n raw_angle = self.wheel_raw_angle(self._grab_started_point)\r\n self._wheel_grab_offset = self._wheel_angles[-1] - raw_angle\r\n\r\n def is_held(self, controller):\r\n\r\n if isinstance(controller, list):\r\n self._snapped = True\r\n angle = self.wheel_double_raw_angle(controller[0], controller[1])\r\n self.calculate_grab_offset(angle)\r\n self._grab_started_point = None\r\n return\r\n\r\n if self._grab_started_point is None or self._grab_started_point.id != controller.id:\r\n self._grab_started_point = GrabControllerPoint(controller.x, controller.y, controller.z, controller.id)\r\n self.calculate_grab_offset()\r\n\r\n def is_not_held(self):\r\n self._grab_started_point = None\r\n\r\n def inertia(self):\r\n if self._grab_started_point:\r\n self._turn_speed = self._wheel_angles[-1] - self._wheel_angles[-2]\r\n else:\r\n self._wheel_angles.append(self._wheel_angles[-1] + self._turn_speed)\r\n self._turn_speed *= self._inertia\r\n\r\n def center_force(self):\r\n angle = self._wheel_angles[-1]\r\n sign = 1\r\n if angle < 0:\r\n sign = -1\r\n if abs(angle) < self._center_speed:\r\n self._wheel_angles[-1] = 0\r\n return\r\n self._wheel_angles[-1] -= self._center_speed * sign\r\n\r\n def send_to_vjoy(self):\r\n wheel_turn = self._wheel_angles[-1] / (2 * pi)\r\n axisX = int((-wheel_turn / FULLTURN + 0.5) * 0x8000)\r\n self.device.set_axis(HID_USAGE_X, axisX)\r\n\r\n def render(self):\r\n wheel_angle = self._wheel_angles[-1]\r\n if self.config.vertical_wheel:\r\n self.wheel_image.rotate(-wheel_angle)\r\n else:\r\n self.wheel_image.rotate([-wheel_angle, np.pi / 2], [2, 0])\r\n\r\n def limiter(self, left_ctr, right_ctr):\r\n if abs(self._wheel_angles[-1])/(2*pi)>FULLTURN/2:\r\n self._wheel_angles[-1] = self._wheel_angles[-2]\r\n openvr.VRSystem().triggerHapticPulse(left_ctr.id, 0, 3000)\r\n openvr.VRSystem().triggerHapticPulse(right_ctr\r\n .id, 0, 3000)\r\n\r\n\r\n def render_hands(self):\r\n if self._snapped:\r\n self.hands_overlay.left_grab()\r\n self.hands_overlay.right_grab()\r\n return\r\n if self._grab_started_point is None:\r\n self.hands_overlay.left_ungrab()\r\n self.hands_overlay.right_ungrab()\r\n return\r\n grab_hand_role = self.vrsys.getControllerRoleForTrackedDeviceIndex(self._grab_started_point.id)\r\n if grab_hand_role == openvr.TrackedControllerRole_RightHand:\r\n self.hands_overlay.right_grab()\r\n self.hands_overlay.left_ungrab()\r\n return\r\n if grab_hand_role == openvr.TrackedControllerRole_LeftHand:\r\n self.hands_overlay.left_grab()\r\n self.hands_overlay.right_ungrab()\r\n return\r\n\r\n\r\n def _wheel_update_common(self, angle, left_ctr, right_ctr):\r\n if angle:\r\n self._wheel_angles.append(angle)\r\n\r\n self.unwrap_wheel_angles()\r\n\r\n self.inertia()\r\n self.center_force()\r\n self.limiter(left_ctr, right_ctr)\r\n self.send_to_vjoy()\r\n\r\n\r\n def update(self, left_ctr, right_ctr):\r\n if self.hands_overlay is None:\r\n self.hands_overlay = HandsImage(left_ctr, right_ctr)\r\n super().update(left_ctr, right_ctr)\r\n\r\n angle = self._wheel_update(left_ctr, right_ctr)\r\n\r\n self._wheel_update_common(angle, left_ctr, right_ctr)\r\n\r\n self.render()\r\n self.render_hands()\r\n\r\n def move_wheel(self, right_ctr, left_ctr):\r\n self.center = Point(right_ctr.x, right_ctr.y, right_ctr.z)\r\n self.config.wheel_center = [self.center.x, self.center.y, self.center.z]\r\n size = ((right_ctr.x-left_ctr.x)**2 +(right_ctr.y-left_ctr.y)**2 + (right_ctr.z-left_ctr.z)**2 )**0.5*2\r\n self.config.wheel_size = size\r\n self.size = size\r\n self.wheel_image.move(self.center, size)\r\n\r\n\r\n\r\n def edit_mode(self, left_ctr, right_ctr):\r\n result, state_r = openvr.VRSystem().getControllerState(right_ctr.id)\r\n if state_r.ulButtonPressed:\r\n if list(reversed(bin(state_r.ulButtonPressed)[2:])).index('1') == openvr.k_EButton_SteamVR_Trigger:\r\n self.move_wheel(right_ctr, left_ctr)\r\n super().edit_mode(left_ctr, right_ctr)\r\n" ]
[ [ "numpy.cumsum", "numpy.array", "numpy.diff" ] ]
Swanson-Hysell-Group/2018_Midcontinent_Rift
[ "9dfa585fb43a2305ed284f306801fdda7db5a055" ]
[ "Code/bayesian_inversion/kewee_inversion/apw_path.py" ]
[ "from __future__ import print_function\nimport os, sys\nimport numpy as np\nimport scipy.stats as st\nimport pandas as pd\n\nsys.path.append(os.path.abspath('../mcplates'))\nimport mcplates\n\n# Shift all longitudes by 180 degrees to get around some plotting\n# issues. This is error prone, so it should be fixed eventually\nlon_shift = 180.\n\nslat = 46.8 # Duluth lat\nslon = 360. - 92.1 - lon_shift # Duluth lon\nduluth = mcplates.PlateCentroid(slon, slat)\n\n\ndef create_model(n_euler_rotations, use_tpw):\n if n_euler_rotations < 0:\n raise Exception(\"Number of plate Euler rotations must be greater than or equal to zero\")\n if use_tpw != False and use_tpw != True:\n raise Exception(\"Must enter 'true' or 'false' for whether to use TPW\")\n if n_euler_rotations == 0 and use_tpw == False:\n raise Exception(\"Must use either TPW or plate Euler rotations, or both\")\n\n print(\"Fitting Keweenawan APW track with\"\\\n +(\"out TPW and \" if use_tpw == False else \" TPW and \")\\\n +str(n_euler_rotations)+\" Euler rotation\"\\\n + (\"\" if n_euler_rotations == 1 else \"s\") )\n\n\n data = pd.read_csv(\"pole_means.csv\")\n # Give unnamed column an appropriate name\n data.rename(columns={'Unnamed: 0': 'Name',\\\n 'Unnamed: 14': 'GaussianOrUniform'},\\\n inplace=True)\n data = data[data.Name != 'Osler_N'] #Huge error, does not contribute much to the model\n data = data[data.PoleName != 'Abitibi'] # Standstill at the beginning, not realistic to fit\n data = data[data.PoleName != 'Haliburton'] #Much younger, far away pole, difficutlt to fit\n data.sort_values('AgeNominal', ascending=False, inplace=True)\n\n poles = []\n pole_names = []\n pole_colors = []\n for i, row in data.iterrows():\n pole_lat = row['PLat']\n pole_lon = row['PLon'] - lon_shift\n a95 = row['A95']\n age = row['AgeNominal']\n\n if row['GaussianOrUniform'] == 'gaussian':\n sigma_age = row['Gaussian_2sigma'] / 2.\n elif row['GaussianOrUniform'] == 'uniform':\n sigma_age = (row['AgeLower'], row['AgeUpper'])\n else:\n raise Exception(\"Unrecognized age error type\")\n\n pole = mcplates.PaleomagneticPole(\n pole_lon, pole_lat, angular_error=a95, age=age, sigma_age=sigma_age)\n poles.append(pole)\n pole_names.append(row['PoleName'])\n pole_colors.append(row['color'])\n\n tpw_str = 'true' if use_tpw else 'false'\n prefix = 'keweenawan_'+str(n_euler_rotations)+'_'+tpw_str\n path = mcplates.APWPath(prefix, poles, n_euler_rotations)\n tpw_rate_scale = 2.5 if use_tpw else None\n path.create_model(site_lon_lat=(slon, slat), watson_concentration=0.0,\\\n rate_scale=2.5, tpw_rate_scale=tpw_rate_scale)\n return path, poles, pole_names, pole_colors\n\ndef load_or_sample_model(path):\n if os.path.isfile(path.dbname):\n print(\"Loading MCMC results from disk...\")\n path.load_mcmc()\n print(\"Done\")\n else:\n path.sample_mcmc(2000000)\n\nif __name__ == \"__main__\":\n # Parse input\n #Get number of euler rotations\n if len(sys.argv) < 3:\n raise Exception(\"Please enter the number of Euler rotations to fit, and 'true' or 'false' for whether to include TPW\")\n n_euler_rotations = int(sys.argv[1])\n use_tpw = False if sys.argv[2] == 'false' else True;\n path, poles, pole_names, pole_colors = create_model(n_euler_rotations, use_tpw)\n load_or_sample_model(path)\n" ]
[ [ "pandas.read_csv" ] ]
GekFreeman/SparrowRecSys
[ "4592dd7fa556e9ee30512ca244b81885d045ba02" ]
[ "TFRecModel/src/com/wzhe/sparrowrecsys/offline/tensorflow/DeepFM.py" ]
[ "import tensorflow as tf\n\n# Training samples path, change to your local path\nTRAIN_DATA_URL = \"file:///Users/zhewang/Workspace/SparrowRecSys/src/main/resources/webroot/sampledata/modelSamples.csv\"\nsamples_file_path = tf.keras.utils.get_file(\"modelSamples.csv\", TRAIN_DATA_URL)\n\n\n# load sample as tf dataset\ndef get_dataset(file_path):\n dataset = tf.data.experimental.make_csv_dataset(\n file_path,\n batch_size=12,\n label_name='label',\n na_value=\"0\",\n num_epochs=1,\n ignore_errors=True)\n return dataset\n\n\n# sample dataset size 110830/12(batch_size) = 9235\nraw_samples_data = get_dataset(samples_file_path)\nprint(raw_samples_data)\n\n# split as test dataset and training dataset\ntest_dataset = raw_samples_data.take(1000)\ntrain_dataset = raw_samples_data.skip(1000)\n\n# define input for keras model\ninputs = {\n 'movieAvgRating': tf.keras.layers.Input(name='movieAvgRating', shape=(), dtype='float32'),\n 'movieRatingStddev': tf.keras.layers.Input(name='movieRatingStddev', shape=(), dtype='float32'),\n 'movieRatingCount': tf.keras.layers.Input(name='movieRatingCount', shape=(), dtype='int32'),\n 'userAvgRating': tf.keras.layers.Input(name='userAvgRating', shape=(), dtype='float32'),\n 'userRatingStddev': tf.keras.layers.Input(name='userRatingStddev', shape=(), dtype='float32'),\n 'userRatingCount': tf.keras.layers.Input(name='userRatingCount', shape=(), dtype='int32'),\n 'releaseYear': tf.keras.layers.Input(name='releaseYear', shape=(), dtype='int32'),\n\n 'movieId': tf.keras.layers.Input(name='movieId', shape=(), dtype='int32'),\n 'userId': tf.keras.layers.Input(name='userId', shape=(), dtype='int32'),\n 'userRatedMovie1': tf.keras.layers.Input(name='userRatedMovie1', shape=(), dtype='int32'),\n\n 'userGenre1': tf.keras.layers.Input(name='userGenre1', shape=(), dtype='string'),\n 'userGenre2': tf.keras.layers.Input(name='userGenre2', shape=(), dtype='string'),\n 'userGenre3': tf.keras.layers.Input(name='userGenre3', shape=(), dtype='string'),\n 'userGenre4': tf.keras.layers.Input(name='userGenre4', shape=(), dtype='string'),\n 'userGenre5': tf.keras.layers.Input(name='userGenre5', shape=(), dtype='string'),\n 'movieGenre1': tf.keras.layers.Input(name='movieGenre1', shape=(), dtype='string'),\n 'movieGenre2': tf.keras.layers.Input(name='movieGenre2', shape=(), dtype='string'),\n 'movieGenre3': tf.keras.layers.Input(name='movieGenre3', shape=(), dtype='string'),\n}\n\n# movie id embedding feature\nmovie_col = tf.feature_column.categorical_column_with_identity(key='movieId', num_buckets=1001)\nmovie_emb_col = tf.feature_column.embedding_column(movie_col, 10)\nmovie_ind_col = tf.feature_column.indicator_column(movie_col) # movid id indicator columns\n\n# user id embedding feature\nuser_col = tf.feature_column.categorical_column_with_identity(key='userId', num_buckets=30001)\nuser_emb_col = tf.feature_column.embedding_column(user_col, 10)\nuser_ind_col = tf.feature_column.indicator_column(user_col) # user id indicator columns\n\n# genre features vocabulary\ngenre_vocab = ['Film-Noir', 'Action', 'Adventure', 'Horror', 'Romance', 'War', 'Comedy', 'Western', 'Documentary',\n 'Sci-Fi', 'Drama', 'Thriller',\n 'Crime', 'Fantasy', 'Animation', 'IMAX', 'Mystery', 'Children', 'Musical']\n# user genre embedding feature\nuser_genre_col = tf.feature_column.categorical_column_with_vocabulary_list(key=\"userGenre1\",\n vocabulary_list=genre_vocab)\nuser_genre_emb_col = tf.feature_column.embedding_column(user_genre_col, 10)\nuser_genre_ind_col = tf.feature_column.indicator_column(user_genre_col) # user genre indicator columns\n# item genre embedding feature\nitem_genre_col = tf.feature_column.categorical_column_with_vocabulary_list(key=\"movieGenre1\",\n vocabulary_list=genre_vocab)\nitem_genre_emb_col = tf.feature_column.embedding_column(item_genre_col, 10)\nitem_genre_ind_col = tf.feature_column.indicator_column(item_genre_col) # item genre indicator columns\n\n# fm first-order term columns: without embedding and concatenate to the output layer directly\nfm_first_order_columns = [movie_ind_col, user_ind_col, user_genre_ind_col, item_genre_ind_col]\n\ndeep_feature_columns = [tf.feature_column.numeric_column('releaseYear'),\n tf.feature_column.numeric_column('movieRatingCount'),\n tf.feature_column.numeric_column('movieAvgRating'),\n tf.feature_column.numeric_column('movieRatingStddev'),\n tf.feature_column.numeric_column('userRatingCount'),\n tf.feature_column.numeric_column('userAvgRating'),\n tf.feature_column.numeric_column('userRatingStddev'),\n movie_emb_col,\n user_emb_col]\n\nitem_emb_layer = tf.keras.layers.DenseFeatures([movie_emb_col])(inputs)\nuser_emb_layer = tf.keras.layers.DenseFeatures([user_emb_col])(inputs)\nitem_genre_emb_layer = tf.keras.layers.DenseFeatures([item_genre_emb_col])(inputs)\nuser_genre_emb_layer = tf.keras.layers.DenseFeatures([user_genre_emb_col])(inputs)\n\n# The first-order term in the FM layer\nfm_first_order_layer = tf.keras.layers.DenseFeatures(fm_first_order_columns)(inputs)\n\n# FM part, cross different categorical feature embeddings\nproduct_layer_item_user = tf.keras.layers.Dot(axes=1)([item_emb_layer, user_emb_layer])\nproduct_layer_item_genre_user_genre = tf.keras.layers.Dot(axes=1)([item_genre_emb_layer, user_genre_emb_layer])\nproduct_layer_item_genre_user = tf.keras.layers.Dot(axes=1)([item_genre_emb_layer, user_emb_layer])\nproduct_layer_user_genre_item = tf.keras.layers.Dot(axes=1)([item_emb_layer, user_genre_emb_layer])\n\n# deep part, MLP to generalize all input features\ndeep = tf.keras.layers.DenseFeatures(deep_feature_columns)(inputs)\ndeep = tf.keras.layers.Dense(64, activation='relu')(deep)\ndeep = tf.keras.layers.Dense(64, activation='relu')(deep)\n\n# concatenate fm part and deep part\nconcat_layer = tf.keras.layers.concatenate([fm_first_order_layer, product_layer_item_user, product_layer_item_genre_user_genre,\n product_layer_item_genre_user, product_layer_user_genre_item, deep], axis=1)\noutput_layer = tf.keras.layers.Dense(1, activation='sigmoid')(concat_layer)\n\nmodel = tf.keras.Model(inputs, output_layer)\n# compile the model, set loss function, optimizer and evaluation metrics\nmodel.compile(\n loss='binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n# train the model\nmodel.fit(train_dataset, epochs=5)\n\n# evaluate the model\ntest_loss, test_accuracy = model.evaluate(test_dataset)\nprint('\\n\\nTest Loss {}, Test Accuracy {}'.format(test_loss, test_accuracy))\n\n# print some predict results\npredictions = model.predict(test_dataset)\nfor prediction, goodRating in zip(predictions[:12], list(test_dataset)[0][1][:12]):\n print(\"Predicted good rating: {:.2%}\".format(prediction[0]),\n \" | Actual rating label: \",\n (\"Good Rating\" if bool(goodRating) else \"Bad Rating\"))" ]
[ [ "tensorflow.feature_column.categorical_column_with_identity", "tensorflow.keras.utils.get_file", "tensorflow.keras.layers.Input", "tensorflow.keras.layers.DenseFeatures", "tensorflow.feature_column.categorical_column_with_vocabulary_list", "tensorflow.feature_column.numeric_column", "tensorflow.feature_column.indicator_column", "tensorflow.keras.layers.Dense", "tensorflow.keras.Model", "tensorflow.keras.layers.Dot", "tensorflow.keras.layers.concatenate", "tensorflow.data.experimental.make_csv_dataset", "tensorflow.feature_column.embedding_column" ] ]
numenic/pyNastran
[ "fd5d3f0bf18db6595d85b9ac152f611e23122a68", "fd5d3f0bf18db6595d85b9ac152f611e23122a68" ]
[ "pyNastran/op2/tables/oes_stressStrain/real/oes_bend.py", "pyNastran/bdf/cards/elements/beam.py" ]
[ "from itertools import cycle\nimport numpy as np\n\nfrom pyNastran.utils.numpy_utils import integer_types\nfrom pyNastran.op2.tables.oes_stressStrain.real.oes_objects import (\n StressObject, StrainObject, OES_Object)\nfrom pyNastran.f06.f06_formatting import write_floats_13e, write_floats_8p1e\n\n\nclass RealBendArray(OES_Object):\n \"\"\"\n Common class for:\n - RealBendStressArray\n - RealBendStrainArray\n \"\"\"\n def __init__(self, data_code, is_sort1, isubcase, dt):\n OES_Object.__init__(self, data_code, isubcase, apply_data_code=False) ## why???\n self.element_node = None\n #self.code = [self.format_code, self.sort_code, self.s_code]\n\n #self.ntimes = 0 # or frequency/mode\n #self.ntotal = 0\n #self.itime = 0\n self.nelements = 0 # result specific\n\n #if is_sort1:\n #pass\n #else:\n #raise NotImplementedError('SORT2')\n\n @property\n def is_real(self):\n return False\n\n @property\n def is_complex(self):\n return True\n\n def _reset_indices(self):\n self.itotal = 0\n self.ielement = 0\n\n #def get_nnodes(self):\n #return get_nnodes(self)\n\n def build(self):\n \"\"\"sizes the vectorized attributes of the ComplexBendArray\"\"\"\n if not hasattr(self, 'subtitle'):\n self.subtitle = self.data_code['subtitle']\n #print('ntimes=%s nelements=%s ntotal=%s subtitle=%s' % (\n #self.ntimes, self.nelements, self.ntotal, self.subtitle))\n nnodes = 1\n\n #self.names = []\n #self.nelements //= nnodes\n self.nelements //= self.ntimes\n self.ntotal = self.nelements * nnodes * 2\n #self.ntotal\n self.itime = 0\n self.ielement = 0\n self.itotal = 0\n self.is_built = True\n #print('ntotal=%s ntimes=%s nelements=%s' % (self.ntotal, self.ntimes, self.nelements))\n\n #print(\"ntimes=%s nelements=%s ntotal=%s\" % (self.ntimes, self.nelements, self.ntotal))\n self._times = np.zeros(self.ntimes, 'float32')\n #self.ntotal = self.nelements * nnodes\n\n self.element_node = np.zeros((self.ntotal, 2), 'int32')\n\n # the number is messed up because of the offset for the element's properties\n if not self.nelements * nnodes * 2 == self.ntotal:\n msg = 'ntimes=%s nelements=%s nnodes=%s ne*nn=%s ntotal=%s' % (\n self.ntimes, self.nelements, nnodes, self.nelements * nnodes,\n self.ntotal)\n raise RuntimeError(msg)\n\n # [angle, sc, sd, se, sf, omax, omin, mst, msc]\n self.data = np.zeros((self.ntimes, self.ntotal, 9), 'float32')\n\n #def build_dataframe(self):\n #\"\"\"creates a pandas dataframe\"\"\"\n #import pandas as pd\n #print(self.data_code)\n #headers = self.headers\n #column_names, column_values = self._build_dataframe_transient_header()\n #self.data_frame = pd.Panel(self.data, items=column_values,\n #major_axis=self.element_node, minor_axis=headers).to_frame()\n #self.data_frame.columns.names = column_names\n #self.data_frame.index.names = ['ElementID', 'Item']\n\n def __eq__(self, table): # pragma: no cover\n assert self.is_sort1 == table.is_sort1\n self._eq_header(table)\n if not np.array_equal(self.data, table.data):\n msg = 'table_name=%r class_name=%s\\n' % (self.table_name, self.__class__.__name__)\n msg += '%s\\n' % str(self.code_information())\n ntimes = self.data.shape[0]\n\n i = 0\n if self.is_sort1:\n for itime in range(ntimes):\n for ieid, (eid, nid) in enumerate(self.element_node):\n t1 = self.data[itime, ieid, :]\n t2 = table.data[itime, ieid, :]\n (angle1, sc1, sd1, se1, sf1, omax1, omin1, mst1, msc1) = t1\n (angle2, sc2, sd2, se2, sf2, omax2, omin2, mst2, msc2) = t2\n delta = t1 - t2\n if not np.allclose([angle1, sc1, sd1, se1, sf1],\n [angle2, sc2, sd2, se2, sf2], atol=0.0001):\n #if not np.array_equal(t1, t2):\n msg += '%-4s (%s, %s, %s, %s)\\n (%s, %s, %s, %s)\\n dt12=(%s, %s, %s, %s)\\n' % (\n eid,\n sc1, sd1, se1, sf1,\n sc2, sd2, se2, sf2,\n delta[0], delta[1], delta[2], delta[3],)\n i += 1\n if i > 10:\n print(msg)\n raise ValueError(msg)\n else:\n raise NotImplementedError(self.is_sort2)\n if i > 0:\n print(msg)\n raise ValueError(msg)\n return True\n\n def add_sort1(self, dt, eid, grid, angle, sc, sd, se, sf, omax, omin, mst, msc):\n \"\"\"unvectorized method for adding SORT1 transient data\"\"\"\n assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)\n self._times[self.itime] = dt\n self.data[self.itime, self.itotal] = [angle, sc, sd, se, sf, omax, omin, mst, msc]\n self.element_node[self.itotal] = [eid, grid]\n #self.ielement += 1\n self.itotal += 1\n\n def get_stats(self, short=False):\n if not self.is_built:\n return [\n '<%s>\\n' % self.__class__.__name__,\n ' ntimes: %i\\n' % self.ntimes,\n ' ntotal: %i\\n' % self.ntotal,\n ]\n\n nelements = self.nelements\n ntimes = self.ntimes\n nnodes = self.element_node.shape[0]\n #ntotal = self.ntotal\n msg = []\n if self.nonlinear_factor not in (None, np.nan): # transient\n msg.append(' type=%s ntimes=%i nelements=%i nnodes=%i\\n'\n % (self.__class__.__name__, ntimes, nelements, nnodes))\n else:\n msg.append(' type=%s nelements=%i nnodes=%i\\n' % (self.__class__.__name__, nelements, nnodes))\n msg.append(' data: [ntimes, nnodes, 5] where 5=[%s]\\n' % str(', '.join(self._get_headers())))\n msg.append(' element_node.shape = %s\\n' % str(self.element_node.shape).replace('L', ''))\n msg.append(' data.shape = %s\\n' % str(self.data.shape).replace('L', ''))\n msg.append(' %s\\n' % self.element_name)\n msg += self.get_data_code()\n return msg\n\n @property\n def headers(self):\n return self._get_headers()\n\n def get_headers(self):\n return self.headers\n\n def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',\n page_num=1, is_mag_phase=False, is_sort1=True):\n \"\"\"\n ' S T R A I N S I N B E N D E L E M E N T S ( C B E N D )'\n ' CIRC.'\n ' ELEMENT-ID GRID END ANG. SXC SXD SXE SXF S-MAX S-MIN M.S.-T M.S.-C'\n '0 6901 6901 A 0 4.372282E-16 -5.960465E-15 0.0 0.0 4.372282E-16 -5.960465E-15 '\n ' 6902 B 0 -6.533992E-15 5.000001E-07 -5.000000E-13 -5.000000E-13 5.000001E-07 -5.000000E-13 -6.0E-01 6.0E+05'\n \"\"\"\n #raise NotImplementedError('CBEND.stress/strain.real write_f06')\n msg_temp = _get_cbend_msg(self.is_stress, is_mag_phase, is_sort1)\n ntimes = self.data.shape[0]\n eids = self.element_node[:, 0]\n nids = self.element_node[:, 1]\n counter = cycle([0, 1])\n if self.is_sort1:\n if is_sort1:\n for itime in range(ntimes):\n dt = self._times[itime]\n if self.nonlinear_factor in (None, np.nan):\n dt_line = ''\n else:\n\n dt_line = ' %14s = %12.5E\\n' % (self.data_code['name'], dt)\n header[1] = dt_line\n msg = header + msg_temp\n f06_file.write('\\n'.join(msg))\n\n # [angle, sc, sd, se, sf]\n angles = self.data[itime, :, 0]\n scs = self.data[itime, :, 1]\n sds = self.data[itime, :, 2]\n ses = self.data[itime, :, 3]\n sfs = self.data[itime, :, 4]\n\n maxs = self.data[itime, :, 5]\n mins = self.data[itime, :, 6]\n msts = self.data[itime, :, 7]\n mscs = self.data[itime, :, 8]\n assert len(eids) == len(angles)\n assert len(angles) > 0, angles\n for i, eid, nid, anglei, sci, sdi, sei, sfi, maxi, mini, msti, msci in zip(counter, eids, nids, angles, scs, sds, ses, sfs, maxs, mins, msts, mscs):\n assert isinstance(eid, integer_types), 'eid=%s type=%s' % (eid, type(eid))\n [angle, sc, sd, se, sf, omax, omin] = write_floats_13e([anglei, sci, sdi, sei, sfi, maxi, mini])\n [mst, msc] = write_floats_8p1e([msti, msci])\n\n #f.write(' 28 0.0 / 0.0 0.0 / 0.0\\n')\n\n #' ELEMENT-ID = 6901'\n #' C O M P L E X S T R E S S E S I N B E N D E L E M E N T S ( C B E N D ) '\n #' (REAL/IMAGINARY)'\n #' CIRC. LOCATION LOCATION LOCATION LOCATION'\n #' FREQUENCY GRID END ANG. C D E F'\n #'0 0.0 6901 A 0 1.384767E+01 6.258920E-01 -1.217803E+01 1.043753E+00'\n #' -4.615430E-01 -2.086098E-02 4.058937E-01 -3.478828E-02'\n if i == 0:\n f06_file.write(\n '0 %9i%8i A %.2f %-13s %-13s %-13s %-13s %-13s %-13s %s %s\\n'% (\n eid, nid, anglei,\n sc, sd, se, sf, omax, omin, mst, msc,\n ))\n else:\n f06_file.write(\n ' %9s%8i B %.2f %-13s %-13s %-13s %-13s %-13s %-13s %s %s\\n'% (\n '', nid, anglei,\n sc, sd, se, sf, omax, omin, mst, msc,\n ))\n f06_file.write(page_stamp % page_num)\n page_num += 1\n else:\n raise NotImplementedError('RealBendArray-sort2')\n else:\n raise NotImplementedError('RealBendArray-sort2')\n return page_num - 1\n\ndef _get_cbend_msg(is_stress, is_mag_phase, is_sort1):\n \"\"\"get the header for the CBEND result\"\"\"\n if is_mag_phase:\n raise NotImplementedError()\n else:\n realimag_magphase = ' (REAL/IMAGINARY)'\n\n if is_stress:\n stress_strain = ' S T R E S S E S I N B E N D E L E M E N T S ( C B E N D )'\n else:\n stress_strain = ' S T R A I N S I N B E N D E L E M E N T S ( C B E N D )'\n\n assert is_sort1 is True\n sort1 = ' ELEMENT-ID GRID END ANG. SXC SXD SXE SXF S-MAX S-MIN M.S.-T M.S.-C\\n'\n msg = [\n stress_strain,\n ' CIRC.',\n sort1,\n ]\n #'0 6901 6901 A 0 4.372282E-16 -5.960465E-15 0.0 0.0 4.372282E-16 -5.960465E-15 '\n #' 6902 B 0 -6.533992E-15 5.000001E-07 -5.000000E-13 -5.000000E-13 5.000001E-07 -5.000000E-13 -6.0E-01 6.0E+05'\n\n #if is_sort1:\n #msg.append(' ELEMENT-ID GRID END ANG. C D E F\\n')\n #else:\n #msg.append(' FREQUENCY GRID END ANG. C D E F\\n')\n return msg\n\nclass RealBendStressArray(RealBendArray, StressObject):\n def __init__(self, data_code, is_sort1, isubcase, dt):\n RealBendArray.__init__(self, data_code, is_sort1, isubcase, dt)\n StressObject.__init__(self, data_code, isubcase)\n\n def _get_headers(self):\n return ['angle', 'sc', 'sd', 'se', 'sf', 'omax', 'omin', 'mst', 'msc']\n\nclass RealBendStrainArray(RealBendArray, StrainObject):\n def __init__(self, data_code, is_sort1, isubcase, dt):\n RealBendArray.__init__(self, data_code, is_sort1, isubcase, dt)\n StrainObject.__init__(self, data_code, isubcase)\n assert self.is_strain, self.stress_bits\n\n def _get_headers(self):\n return ['angle', 'sc', 'sd', 'se', 'sf', 'emax', 'emin', 'mst', 'msc']\n", "# pylint: disable=C0103\n\"\"\"\ndefines:\n - CBEAM\n - BEAMOR\n\n\"\"\"\nimport numpy as np\nfrom numpy.linalg import norm # type: ignore\n\nfrom pyNastran.utils.numpy_utils import integer_types\nfrom pyNastran.bdf.cards.elements.bars import (\n LineElement, init_x_g0, BaseCard, rotate_v_wa_wb, check_offt)\nfrom pyNastran.bdf.bdf_interface.assign_type import (\n integer, integer_or_blank, double_or_blank, integer_double_string_or_blank,\n integer_double_or_blank, integer_string_or_blank,\n)\nfrom pyNastran.bdf.field_writer_8 import set_blank_if_default\nfrom pyNastran.bdf.field_writer_8 import print_card_8\nfrom pyNastran.bdf.field_writer_16 import print_card_16\nfrom pyNastran.utils.mathematics import integrate_positive_unit_line\n\n\nclass CBEAM(LineElement):\n \"\"\"\n +-------+-----+-----+-----+-----+-----+-----+-----+----------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +=======+=====+=====+=====+=====+=====+=====+=====+==========+\n | CBEAM | EID | PID | GA | GB | X1 | X2 | X3 | OFFT/BIT |\n +-------+-----+-----+-----+-----+-----+-----+-----+----------+\n | | PA | PB | W1A | W2A | W3A | W1B | W2B | W3B |\n +-------+-----+-----+-----+-----+-----+-----+-----+----------+\n | | SA | SB | | | | | | |\n +-------+-----+-----+-----+-----+-----+-----+-----+----------+\n\n or\n\n +-------+-----+-----+-----+-----+-----+-----+-----+----------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +=======+=====+=====+=====+=====+=====+=====+=====+==========+\n | CBEAM | EID | PID | GA | GB | G0 | | | OFFT/BIT |\n +-------+-----+-----+-----+-----+-----+-----+-----+----------+\n | | PA | PB | W1A | W2A | W3A | W1B | W2B | W3B |\n +-------+-----+-----+-----+-----+-----+-----+-----+----------+\n | | SA | SB | | | | | | |\n +-------+-----+-----+-----+-----+-----+-----+-----+----------+\n\n offt/bit are MSC specific fields\n\n \"\"\"\n type = 'CBEAM'\n _field_map = {\n 1: 'eid', 2:'pid', 3:'ga', 4:'gb', #5:'x_g0', 6:'g1', 7:'g2',\n #8:'offt',\n 9:'pa', 10:'pb',\n 17:'sa', 18:'sb',\n }\n\n def _update_field_helper(self, n, value):\n if n == 11:\n self.wa[0] = value\n elif n == 12:\n self.wa[1] = value\n elif n == 13:\n self.wa[2] = value\n\n elif n == 14:\n self.wb[0] = value\n elif n == 15:\n self.wb[1] = value\n elif n == 16:\n self.wb[2] = value\n else:\n if self.g0 is not None:\n if n == 5:\n self.g0 = value\n else: # offt\n msg = 'Field %r=%r is an invalid %s entry or is unsupported.' % (\n n, value, self.type)\n raise KeyError(msg)\n else:\n if n == 5:\n self.x[0] = value\n elif n == 6:\n self.x[1] = value\n elif n == 7:\n self.x[2] = value\n else:\n msg = 'Field %r=%r is an invalid %s entry or is unsupported.' % (\n n, value, self.type)\n raise KeyError(msg)\n\n @classmethod\n def export_to_hdf5(cls, h5_file, model, eids):\n \"\"\"exports the elements in a vectorized way\"\"\"\n encoding = model._encoding\n #comments = []\n pids = []\n nodes = []\n\n x = []\n g0 = []\n offt = []\n bit = []\n\n pa = []\n pb = []\n wa = []\n wb = []\n sa = []\n sb = []\n nan = np.full(3, np.nan)\n for eid in eids:\n element = model.elements[eid]\n #comments.append(element.comment)\n pids.append(element.pid)\n nodes.append(element.nodes)\n if element.g0 is None:\n x.append(element.x)\n g0.append(-1)\n else:\n x.append(nan)\n g0.append(element.g0)\n\n if element.bit is not None:\n bit.append(element.bit)\n offt.append(b'')\n else:\n bit.append(np.nan)\n offti = element.offt\n if isinstance(offti, integer_types):\n offti = str(offti)\n offt.append(offti.encode(encoding))\n\n\n pa.append(element.pa)\n pb.append(element.pb)\n sa.append(element.sa)\n sb.append(element.sb)\n wa.append(element.wa)\n wb.append(element.wb)\n #h5_file.create_dataset('_comment', data=comments)\n h5_file.create_dataset('eid', data=eids)\n h5_file.create_dataset('nodes', data=nodes)\n h5_file.create_dataset('pid', data=pids)\n #print('x =', x)\n #print('g0 =', g0)\n h5_file.create_dataset('x', data=x)\n h5_file.create_dataset('g0', data=g0)\n h5_file.create_dataset('offt', data=offt)\n h5_file.create_dataset('bit', data=bit)\n\n h5_file.create_dataset('pa', data=pa)\n h5_file.create_dataset('pb', data=pb)\n\n h5_file.create_dataset('sa', data=sa)\n h5_file.create_dataset('sb', data=sb)\n\n h5_file.create_dataset('wa', data=wa)\n h5_file.create_dataset('wb', data=wb)\n\n\n def __init__(self, eid, pid, nids, x, g0, offt='GGG', bit=None,\n pa=0, pb=0, wa=None, wb=None, sa=0, sb=0, comment=''):\n \"\"\"\n Adds a CBEAM card\n\n Parameters\n ----------\n pid : int\n property id\n mid : int\n material id\n nids : List[int, int]\n node ids; connected grid points at ends A and B\n x : List[float, float, float]\n Components of orientation vector, from GA, in the displacement\n coordinate system at GA (default), or in the basic coordinate system\n g0 : int\n Alternate method to supply the orientation vector using grid\n point G0. Direction of is from GA to G0. is then transferred\n to End A\n offt : str; default='GGG'\n Offset vector interpretation flag\n None : bit is active\n bit : float; default=None\n Built-in twist of the cross-sectional axes about the beam axis\n at end B relative to end A.\n For beam p-elements ONLY!\n None : offt is active\n pa / pb : int; default=0\n Pin Flag at End A/B. Releases the specified DOFs\n wa / wb : List[float, float, float]\n Components of offset vectors from the grid points to the end\n points of the axis of the shear center\n sa / sb : int; default=0\n Scalar or grid point identification numbers for the ends A and B,\n respectively. The degrees-of-freedom at these points are the\n warping variables . SA and SB cannot be specified for\n beam p-elements\n comment : str; default=''\n a comment for the card\n\n offt/bit are MSC specific fields\n\n \"\"\"\n LineElement.__init__(self)\n if comment:\n self.comment = comment\n if wa is None:\n wa = np.zeros(3, dtype='float64')\n else:\n wa = np.asarray(wa)\n if wb is None:\n wb = np.zeros(3, dtype='float64')\n else:\n wb = np.asarray(wb)\n\n if isinstance(offt, str):\n offt = offt.replace('E', 'O')\n offt = int(offt) if offt.isdigit() else offt\n self.eid = eid\n self.pid = pid\n self.ga = nids[0]\n self.gb = nids[1]\n self.x = x\n self.g0 = g0\n self.offt = offt\n self.bit = bit\n self.pa = pa\n self.pb = pb\n self.wa = wa\n self.wb = wb\n self.sa = sa\n self.sb = sb\n self.ga_ref = None\n self.gb_ref = None\n self.pid_ref = None\n self.g0_ref = None\n self.g0_vector = None\n\n def validate(self):\n msg = ''\n if self.x is None:\n if not isinstance(self.g0, integer_types):\n msg += 'CBEAM eid=%s: x is None, so g0=%s must be an integer' % (self.eid, self.g0)\n else:\n if not isinstance(self.x, (list, np.ndarray)):\n msg += 'CBEAM eid=%s: x=%s and g0=%s, so x must be a list; type(x)=%s' % (\n self.eid, self.x, self.g0, type(self.x))\n if msg:\n raise ValueError(msg)\n\n if self.g0 is not None:\n assert isinstance(self.g0, integer_types), 'g0=%s must be an integer' % self.g0\n if self.g0 in [self.ga, self.gb]:\n msg = 'G0=%s cannot be GA=%s or GB=%s' % (self.g0, self.ga, self.gb)\n raise RuntimeError(msg)\n\n if self.bit is None and self.offt is None:\n msg = 'OFFT/BIT must not be None; offt=%r bit=%s' % (self.offt, self.bit)\n raise RuntimeError(msg)\n\n if self.offt is not None:\n if isinstance(self.offt, integer_types):\n assert self.offt in [1, 2, 21, 22, 41, 42], 'invalid offt; offt=%i' % self.offt\n #raise NotImplementedError('invalid offt; offt=%i' % self.offt)\n elif isinstance(self.offt, str):\n check_offt(self)\n else:\n raise TypeError('invalid offt expected a string of length 3 '\n 'offt=%r; Type=%s' % (self.offt, type(self.offt)))\n\n @classmethod\n def add_card(cls, card, beamor=None, comment=''):\n \"\"\"\n Adds a CBEAM card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n beamor : BEAMOR() or None\n defines the defaults\n comment : str; default=''\n a comment for the card\n\n \"\"\"\n eid = integer(card, 1, 'eid')\n\n pid_default = eid\n x1_default, x2_default, x3_default = 0., 0., 0.\n offt_default = 'GGG'\n if beamor is not None:\n if beamor.pid is not None:\n pid_default = beamor.pid\n if beamor.x is None:\n x1_default = beamor.g0\n x2_default = None\n x3_default = None\n else:\n x1_default, x2_default, x3_default = beamor.x\n offt_default = beamor.offt\n\n pid = integer_or_blank(card, 2, 'pid', pid_default)\n ga = integer(card, 3, 'ga')\n gb = integer(card, 4, 'gb')\n\n x, g0 = init_x_g0(card, eid, x1_default, x2_default, x3_default)\n offt, bit = _init_offt_bit(card, eid, offt_default)# offt doesn't exist in NX nastran\n pa = integer_or_blank(card, 9, 'pa', 0)\n pb = integer_or_blank(card, 10, 'pb', 0)\n\n wa = np.array([double_or_blank(card, 11, 'w1a', 0.0),\n double_or_blank(card, 12, 'w2a', 0.0),\n double_or_blank(card, 13, 'w3a', 0.0)], 'float64')\n\n wb = np.array([double_or_blank(card, 14, 'w1b', 0.0),\n double_or_blank(card, 15, 'w2b', 0.0),\n double_or_blank(card, 16, 'w3b', 0.0)], 'float64')\n\n sa = integer_or_blank(card, 17, 'sa', 0)\n sb = integer_or_blank(card, 18, 'sb', 0)\n assert len(card) <= 19, 'len(CBEAM card) = %i\\ncard=%s' % (len(card), card)\n return CBEAM(eid, pid, [ga, gb], x, g0, offt, bit,\n pa=pa, pb=pb, wa=wa, wb=wb, sa=sa, sb=sb, comment=comment)\n\n @classmethod\n def add_op2_data(cls, data, f, comment=''):\n \"\"\"\n Adds a CBEAM card from the OP2\n\n Parameters\n ----------\n data : List[varies]\n a list of fields defined in OP2 format\n f : int\n beam flag\n 0 : basic\n [x1, x2, x3] is used\n 1 : cid\n [x1, x2, x3] is used\n 2 : grid\n g0 is used instead of [x1, x2, x3]\n comment : str; default=''\n a comment for the card\n\n \"\"\"\n #: .. todo:: verify\n assert len(data) == 2, 'data=%s len(data)=%s' % (data, len(data))\n #data = [[eid,pid,ga,gb,sa,sb, pa,pb,w1a,w2a,w3a,w1b,w2b,w3b],\n # [f,g0]]\n #data = [[eid,pid,ga,gb,sa,sb, pa,pb,w1a,w2a,w3a,w1b,w2b,w3b],\n # [f,x1,x2,x3]]\n\n main, aft = data\n flag = aft[0]\n assert f == flag, 'f=%s flag=%s' % (f, flag)\n if flag == 0:\n # basic cid\n #data_in = [[eid, pid, ga, gb, sa, sb, pa, pb, w1a, w2a, w3a, w1b, w2b, w3b],\n #[f, x1, x2, x3]]\n assert len(aft) == 4, 'f=%s aft=%s len(aft)=%s' % (f, aft, len(aft))\n x1, x2, x3 = aft[1:]\n g0 = None\n x = np.array([x1, x2, x3], dtype='float64')\n elif flag == 1:\n # global cid\n #data_in = [[eid, pid, ga, gb, sa, sb, pa, pb, w1a, w2a, w3a, w1b, w2b, w3b],\n #[f, x1, x2, x3]]\n assert len(aft) == 4, 'f=%s aft=%s len(aft)=%s' % (f, aft, len(aft))\n g0 = None\n x1, x2, x3 = aft[1:]\n x = np.array([x1, x2, x3], dtype='float64')\n elif flag == 2:\n # grid option\n #data_in = [[eid, pid, ga, gb, sa, sb, pa, pb, w1a, w2a, w3a, w1b, w2b, w3b],\n #[f, g0]]\n assert len(aft) == 2, 'f=%s aft=%s len(aft)=%s' % (f, aft, len(aft))\n g0 = data[1][1]\n x = None\n else:\n raise NotImplementedError()\n\n eid = main[0]\n pid = main[1]\n ga = main[2]\n gb = main[3]\n sa = main[4]\n sb = main[5]\n\n #offt = str(data[6]) # GGG\n bit = None # ???\n offt = 'GGG' #: .. todo:: is this correct???\n\n pa = main[6]\n pb = main[7]\n\n wa = np.array([main[8], main[9], main[10]], 'float64')\n wb = np.array([main[11], main[12], main[13]], 'float64')\n return CBEAM(eid, pid, [ga, gb], x, g0, offt, bit,\n pa=pa, pb=pb, wa=wa, wb=wb, sa=sa, sb=sb, comment=comment)\n\n def Nodes(self):\n return [self.ga, self.gb]\n\n def Centroid(self):\n \"\"\"\"\"\"\n node1 = self.ga_ref\n node2 = self.gb_ref\n xyz1 = node1.get_position()\n xyz2 = node2.get_position()\n centroid = (xyz1 + xyz2) / 2.\n return centroid\n\n def center_of_mass(self):\n \"\"\"the centroid formuala is way more complicated if you consider the nonstructural mass axis\"\"\"\n elem = self\n prop = self.pid_ref\n node1 = self.ga_ref\n node2 = self.gb_ref\n xyz1 = node1.get_position()\n xyz2 = node2.get_position()\n #centroid = ( + self.gb_ref.get_position()) / 2.\n centroid = (xyz1 + xyz2) / 2.\n #length = norm(xyz2 - xyz1)\n #cda = model.nodes[n1].cid_ref\n #cdb = model.nodes[n2].cid_ref\n\n model = None\n log = None\n is_failed, out = elem.get_axes_by_nodes(model, self.pid_ref, node1, node2, xyz1, xyz2, log)\n if is_failed:\n #model.log.error(out)\n raise RuntimeError(out)\n\n wa, wb, _ihat, jhat, khat = out\n p1 = xyz1 + wa\n p2 = xyz2 + wb\n\n if prop.type == 'PBEAM':\n rho = prop.Rho()\n\n # we don't call the MassPerLength method so we can put the NSM centroid\n # on a different axis (the PBEAM is weird)\n mass_per_lengths = []\n nsm_per_lengths = []\n for (area, nsm) in zip(prop.A, prop.nsm):\n mass_per_lengths.append(area * rho)\n nsm_per_lengths.append(nsm)\n mass_per_length = integrate_positive_unit_line(prop.xxb, mass_per_lengths)\n nsm_per_length = integrate_positive_unit_line(prop.xxb, nsm_per_lengths)\n nsm_n1 = (p1 + jhat * prop.m1a + khat * prop.m2a)\n nsm_n2 = (p2 + jhat * prop.m1b + khat * prop.m2b)\n #print(\"nsm_per_length=%s\" % nsm_per_length)\n #print(\"nsm_n1=%s\" % nsm_n1)\n #print(\"nsm_n2=%s\" % nsm_n2)\n nsm_centroid = (nsm_n1 + nsm_n2) / 2.\n #if nsm != 0.:\n #p1_nsm = p1 + prop.ma\n #p2_nsm = p2 + prop.mb\n elif prop.type == 'PBEAML':\n mass_per_lengths = prop.get_mass_per_lengths()\n #mass_per_length = prop.MassPerLength() # includes simplified nsm\n\n # m1a, m1b, m2a, m2b=0.\n nsm_centroid = (p1 + p2) / 2.\n\n # mass_per_length already includes nsm\n mass_per_length = integrate_positive_unit_line(prop.xxb, mass_per_lengths)\n nsm_per_length = 0.\n\n #print('mass_per_lengths=%s nsm_per_lengths=%s' % (\n #mass_per_lengths, nsm_per_lengths))\n #print('mass_per_length=%s nsm_per_length=%s' % (\n #mass_per_length, nsm_per_length))\n\n #nsm_centroid = np.zeros(3) # TODO: what is this...\n #nsm = prop.nsm[0] * length # TODO: simplified\n elif prop.type == 'PBCOMP':\n mass_per_length = prop.MassPerLength()\n nsm_per_length = prop.nsm\n nsm_n1 = (p1 + jhat * prop.m1 + khat * prop.m2)\n nsm_n2 = (p2 + jhat * prop.m1 + khat * prop.m2)\n nsm_centroid = (nsm_n1 + nsm_n2) / 2.\n #elif prop.type == 'PBMSECT':\n #continue\n #mass_per_length = prop.MassPerLength()\n #m = mass_per_length * length\n #nsm = prop.nsm\n elif prop.type == 'PBMSECT':\n mass_per_length = 0. ## TODO: fix me\n nsm_per_length = prop.nsm\n nsm_centroid = (p1 + p2) / 2.\n else:\n raise NotImplementedError(prop.type)\n\n total_mass = mass_per_length + nsm_per_length\n if total_mass == 0.0:\n return centroid\n centroid2 = (centroid * mass_per_length + nsm_centroid * nsm_per_length) / total_mass\n return centroid2\n\n def center_of_mass_xform(self):\n \"\"\"\n A B\n *----------*\n ^ ^\n | wa | wb\n | |\n 1----------2\n\n 1-2 are the nodes of the bar\n A-B defines the axis of the shear center\n\n\n ^ z\n +--+ |\n | | |\n | | 2---> y\n | +--+\n | |\n +-----+\n\n \"\"\"\n cda = self.ga_ref.cid_ref\n cdb = self.gb_ref.cid_ref\n ga = self.ga_ref.get_position() + cda.transform_node_to_global_assuming_rectangular(self.wa)\n gb = self.gb_ref.get_position() + cdb.transform_node_to_global_assuming_rectangular(self.wb)\n #x = self.get_orientation_vector()\n return (ga + gb) / 2.\n\n def get_axes(self, model):\n \"\"\"\n Gets the axes of a CBAR/CBEAM, while respecting the OFFT flag.\n\n Notes\n -----\n :func:`pyNastran.bdf.cards.elements.bars.rotate_v_wa_wb` for a\n description of the OFFT flag.\n\n \"\"\"\n #TODO: not integrated with CBAR yet...\n if self.bit is not None:\n print(self.get_stats())\n return False, None\n\n check_offt(self)\n is_failed = True\n ihat = None\n yhat = None\n zhat = None\n\n eid = self.eid\n (nid1, nid2) = self.node_ids\n node1 = model.nodes[nid1]\n node2 = model.nodes[nid2]\n xyz1 = node1.get_position()\n xyz2 = node2.get_position()\n\n elem = model.elements[eid]\n pid_ref = elem.pid_ref\n if pid_ref is None:\n pid_ref = model.Property(elem.pid)\n assert not isinstance(pid_ref, integer_types), elem\n\n is_failed, (wa, wb, ihat, yhat, zhat) = self.get_axes_by_nodes(\n model, pid_ref, node1, node2, xyz1, xyz2, model.log)\n return is_failed, (wa, wb, ihat, yhat, zhat)\n\n def get_axes_by_nodes(self, model, pid_ref, node1, node2, xyz1, xyz2, log):\n \"\"\"\n Gets the axes of a CBAR/CBEAM, while respecting the OFFT flag.\n\n Notes\n -----\n :func:`pyNastran.bdf.cards.elements.bars.rotate_v_wa_wb` for a\n description of the OFFT flag.\n\n \"\"\"\n #TODO: not integrated with CBAR yet...\n\n is_failed = True\n eid = self.eid\n #centroid = (n1 + n2) / 2.\n #i = n2 - n1\n #Li = norm(i)\n #ihat = i / Li\n\n elem = self\n #(nid1, nid2) = elem.node_ids\n #node1 = model.nodes[nid1]\n #node2 = model.nodes[nid2]\n #xyz1 = node1.get_position()\n #xyz2 = node2.get_position()\n\n # wa/wb are not considered in i_offset\n # they are considered in ihat\n i = xyz2 - xyz1\n Li = norm(i)\n if Li == 0.:\n msg = 'xyz1=%s xyz2=%s\\n%s' % (xyz1, xyz2, self)\n raise ValueError(msg)\n i_offset = i / Li\n\n unused_v, wa, wb, xform = rotate_v_wa_wb(\n model, elem,\n xyz1, xyz2, node1, node2,\n i_offset, i, eid, Li, log)\n if wb is None:\n # one or more of v, wa, wb are bad\n\n # xform is xform_offset...assuming None\n ihat = None\n yhat = None\n zhat = None\n return is_failed, (wa, wb, ihat, yhat, zhat)\n\n ihat = xform[0, :]\n yhat = xform[1, :]\n zhat = xform[2, :]\n\n is_failed = False\n return is_failed, (wa, wb, ihat, yhat, zhat)\n\n @property\n def node_ids(self):\n return [self.Ga(), self.Gb()]\n\n def get_edge_ids(self):\n return [tuple(sorted(self.node_ids))]\n\n @property\n def nodes(self):\n return [self.ga, self.gb]\n\n @nodes.setter\n def nodes(self, values):\n self.ga = values[0]\n self.gb = values[1]\n\n @property\n def nodes_ref(self):\n return [self.ga_ref, self.gb_ref]\n\n @nodes_ref.setter\n def nodes_ref(self, values):\n assert values is not None, values\n self.ga_ref = values[0]\n self.gb_ref = values[1]\n\n def Mid(self):\n if self.pid_ref is None:\n raise RuntimeError('Element eid=%i has not been '\n 'cross referenced.\\n%s' % (self.eid, str(self)))\n return self.pid_ref.Mid()\n\n def Area(self):\n if self.pid_ref is None:\n raise RuntimeError('Element eid=%i has not been '\n 'cross referenced.\\n%s' % (self.eid, str(self)))\n return self.pid_ref.Area()\n\n def Nsm(self):\n if self.pid_ref is None:\n raise RuntimeError('Element eid=%i has not been '\n 'cross referenced.\\n%s' % (self.eid, str(self)))\n return self.pid_ref.Nsm()\n\n @property\n def is_offt(self):\n \"\"\"is the offt flag active?\"\"\"\n if self.bit is not None:\n assert isinstance(self.bit, float), 'bit=%r type=%s' % (self.bit, type(self.bit))\n return False\n #assert isinstance(self.offt, str), 'offt=%r' % self.offt\n return True\n\n @property\n def is_bit(self):\n \"\"\"is the bit flag active?\"\"\"\n return not self.is_offt\n\n def get_offt_bit_defaults(self):\n \"\"\"\n offt doesn't exist in NX nastran\n \"\"\"\n if self.is_offt:\n field8 = set_blank_if_default(self.offt, 'GGG')\n else:\n field8 = set_blank_if_default(self.bit, 0.0)\n return field8\n\n def cross_reference(self, model):\n \"\"\"\n Cross links the card so referenced cards can be extracted directly\n\n Parameters\n ----------\n model : BDF()\n the BDF object\n\n \"\"\"\n msg = ', which is required by CBEAM eid=%s' % (self.eid)\n self.ga_ref = model.Node(self.ga, msg=msg)\n self.gb_ref = model.Node(self.gb, msg=msg)\n self.nodes_ref = [self.ga_ref, self.gb_ref]\n self.pid_ref = model.Property(self.pid, msg=msg)\n if self.g0:\n self.g0_ref = model.nodes[self.g0]\n self.g0_vector = self.g0_ref.get_position() - self.ga_ref.get_position()\n else:\n self.g0_vector = self.x\n if model.is_nx:\n assert self.offt == 'GGG', 'NX only support offt=GGG; offt=%r' % self.offt\n\n def safe_cross_reference(self, model, xref_errors):\n msg = ', which is required by CBEAM eid=%s' % (self.eid)\n self.ga_ref = model.Node(self.ga, msg=msg)\n self.gb_ref = model.Node(self.gb, msg=msg)\n self.nodes_ref = [self.ga_ref, self.gb_ref]\n self.pid_ref = model.safe_property(self.pid, self.eid, xref_errors, msg=msg)\n\n if self.g0:\n try:\n self.g0_ref = model.nodes[self.g0]\n self.g0_vector = self.g0_ref.get_position() - self.ga_ref.get_position()\n except KeyError:\n model.log.warning('Node=%s%s' % (self.g0, msg))\n else:\n self.g0_vector = self.x\n\n def uncross_reference(self) -> None:\n \"\"\"Removes cross-reference links\"\"\"\n self.pid = self.Pid()\n self.ga = self.Ga()\n self.gb = self.Gb()\n self.g0 = self.G0()\n self.ga_ref = None\n self.gb_ref = None\n self.g0_ref = None\n self.pid_ref = None\n\n def _verify(self, xref):\n eid = self.eid\n unused_pid = self.Pid()\n unused_edges = self.get_edge_ids()\n if xref: # True\n prop = self.pid_ref\n assert prop.type in ['PBEAM', 'PBEAML', 'PBCOMP', 'PBMSECT'], prop\n mid = self.Mid()\n nsm = self.Nsm()\n assert isinstance(mid, int), 'mid=%r' % mid\n assert isinstance(nsm, float), 'nsm=%r' % nsm\n assert self.pid_ref.type in ['PBEAM', 'PBEAML', 'PBCOMP', 'PBMSECT'], '%s%s' % (self, self.pid_ref)\n A = self.Area()\n mpl = self.MassPerLength()\n L = self.Length()\n mass = self.Mass()\n assert isinstance(A, float), 'eid=%s A=%r' % (eid, A)\n assert isinstance(L, float), 'eid=%s L=%r' % (eid, L)\n assert isinstance(mpl, float), 'eid=%s mass_per_length=%r' % (eid, mpl)\n assert isinstance(mass, float), 'eid=%s mass=%r' % (eid, mass)\n assert L > 0.0, 'eid=%s L=%s' % (eid, L)\n\n def Ga(self):\n \"\"\"gets Ga/G1\"\"\"\n if self.ga_ref is None:\n return self.ga\n return self.ga_ref.nid\n\n def Gb(self):\n \"\"\"gets Gb/G2\"\"\"\n if self.gb_ref is None:\n return self.gb\n return self.gb_ref.nid\n\n def G0(self):\n \"\"\"gets G0\"\"\"\n if self.g0_ref is None:\n return self.g0\n return self.g0_ref.nid\n\n def get_x_g0_defaults(self):\n \"\"\"\n X and G0 compete for the same fields, so the method exists to\n make it easier to write the card\n\n Returns\n -------\n x_g0 : varies\n g0 : List[int, None, None]\n x : List[float, float, float]\n\n Notes\n -----\n Used by CBAR and CBEAM\n\n \"\"\"\n if self.g0 is not None:\n return (self.G0(), None, None)\n else:\n return list(self.x)\n\n def raw_fields(self):\n (x1, x2, x3) = self.get_x_g0_defaults()\n offt = self.get_offt_bit_defaults()\n ga, gb = self.node_ids\n list_fields = ['CBEAM', self.eid, self.Pid(), ga, gb, x1, x2, x3, offt,\n self.pa, self.pb] + list(self.wa) + list(self.wb) + [self.sa, self.sb]\n return list_fields\n\n def repr_fields(self):\n w1a = set_blank_if_default(self.wa[0], 0.0)\n w2a = set_blank_if_default(self.wa[1], 0.0)\n w3a = set_blank_if_default(self.wa[2], 0.0)\n w1b = set_blank_if_default(self.wb[0], 0.0)\n w2b = set_blank_if_default(self.wb[1], 0.0)\n w3b = set_blank_if_default(self.wb[2], 0.0)\n pa = set_blank_if_default(self.pa, 0)\n pb = set_blank_if_default(self.pb, 0)\n\n sa = set_blank_if_default(self.sa, 0)\n sb = set_blank_if_default(self.sb, 0)\n (x1, x2, x3) = self.get_x_g0_defaults()\n offt = self.get_offt_bit_defaults()\n ga, gb = self.node_ids\n list_fields = ['CBEAM', self.eid, self.Pid(), ga, gb, x1, x2, x3, offt,\n pa, pb, w1a, w2a, w3a,\n w1b, w2b, w3b, sa, sb]\n return list_fields\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n card = self.repr_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n return self.comment + print_card_16(card)\n\n def write_card_16(self, is_double=False):\n card = self.repr_fields()\n return self.comment + print_card_16(card)\n\ndef _init_offt_bit(card, unused_eid, offt_default):\n \"\"\"\n offt doesn't exist in NX nastran\n \"\"\"\n field8 = integer_double_string_or_blank(card, 8, 'field8', offt_default)\n if isinstance(field8, float):\n offt = None\n bit = field8\n elif field8 is None:\n offt = 'GGG' # default\n bit = None\n elif isinstance(field8, integer_types):\n bit = None\n offt = field8\n elif isinstance(field8, str):\n bit = None\n offt = field8\n msg = 'invalid offt parameter of CBEAM...offt=%s' % offt\n assert offt[0] in ['G', 'B', 'O', 'E'], msg\n assert offt[1] in ['G', 'B', 'O', 'E'], msg\n assert offt[2] in ['G', 'B', 'O', 'E'], msg\n else:\n msg = ('field8 on %s card is not a string(offt) or bit '\n '(float)...field8=%s\\n' % (card.field(0), field8))\n raise SyntaxError(\"Card Instantiation: %s\" % msg)\n return offt, bit\n\n\nclass BEAMOR(BaseCard):\n \"\"\"\n +--------+-----+---+---+---+-------+-----+-------+------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +========+=====+===+===+===+=======+=====+=======+======+\n | BEAMOR | PID | | | | G0/X1 | X2 | X3 | OFFT |\n +--------+-----+---+---+---+-------+-----+-------+------+\n | BEAMOR | 39 | | | | 0.6 | 2.9 | -5.87 | GOG |\n +--------+-----+---+---+---+-------+-----+-------+------+\n\n \"\"\"\n type = 'BEAMOR'\n def __init__(self, pid, is_g0, g0, x, offt='GGG', comment=''):\n BaseCard.__init__(self)\n if comment:\n self.comment = comment\n self.pid = pid\n self.g0 = g0\n self.x = x\n self.offt = offt\n\n @classmethod\n def _init_from_empty(cls):\n pid = 1\n is_g0 = True\n g0 = 1\n x = None\n return BEAMOR(pid, is_g0, g0, x, offt='GGG', comment='')\n\n @classmethod\n def add_card(cls, card, comment=''):\n pid = integer_or_blank(card, 2, 'pid')\n\n # x / g0\n field5 = integer_double_or_blank(card, 5, 'g0_x1', 0.0)\n if isinstance(field5, integer_types):\n is_g0 = True\n g0 = field5\n x = [0., 0., 0.]\n elif isinstance(field5, float):\n is_g0 = False\n g0 = None\n x = np.array([field5,\n double_or_blank(card, 6, 'x2', 0.0),\n double_or_blank(card, 7, 'x3', 0.0)],\n dtype='float64')\n else:\n raise NotImplementedError('BEAMOR field5 = %r' % field5)\n offt = integer_string_or_blank(card, 8, 'offt', 'GGG')\n assert len(card) <= 9, 'len(BEAMOR card) = %i\\ncard=%s' % (len(card), card)\n return BEAMOR(pid, is_g0, g0, x, offt=offt, comment=comment)\n\n def raw_fields(self):\n return ['BEAMOR', None, self.pid, None, None] + list(self.x) + [self.offt]\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n card = self.repr_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n return self.comment + print_card_16(card)\n" ]
[ [ "numpy.allclose", "numpy.array_equal", "numpy.zeros" ], [ "numpy.full", "numpy.array", "numpy.linalg.norm", "numpy.asarray", "numpy.zeros" ] ]
catalystneuro/brody-lab-to-nwb
[ "bb792591eae988b2dec1a3a608979832da8f884d" ]
[ "brody_lab_to_nwb/interfaces/customsortingextractor.py" ]
[ "import numpy as np\n\nimport spikeextractors as se\n\n\nclass CustomSortingExtractor(se.SortingExtractor):\n extractor_name = \"custom\"\n is_writable = False\n\n def __init__(self):\n super().__init__()\n self._units = {}\n self.is_dumpable = False\n\n def set_sampling_frequency(self, sampling_frequency):\n self._sampling_frequency = sampling_frequency\n\n def add_unit(self, unit_id, times):\n self._units[unit_id] = dict(times=times)\n\n def get_unit_ids(self):\n return list(self._units.keys())\n\n @se.extraction_tools.check_get_unit_spike_train\n def get_unit_spike_train(self, unit_id, start_frame=None, end_frame=None):\n times = self._units[unit_id][\"times\"]\n inds = np.where((start_frame <= times) & (times < end_frame))[0]\n return np.array(times[inds])\n" ]
[ [ "numpy.where", "numpy.array" ] ]
Inevitable-Marzipan/pandas
[ "ff50b46045886604dd70438f73df7bf9da3da89b" ]
[ "pandas/core/nanops.py" ]
[ "import functools\nimport itertools\nimport operator\nfrom typing import Any, Optional, Tuple, Union\n\nimport numpy as np\n\nfrom pandas._config import get_option\n\nfrom pandas._libs import iNaT, lib, tslibs\nfrom pandas.compat._optional import import_optional_dependency\n\nfrom pandas.core.dtypes.cast import _int64_max, maybe_upcast_putmask\nfrom pandas.core.dtypes.common import (\n _get_dtype, is_any_int_dtype, is_bool_dtype, is_complex, is_complex_dtype,\n is_datetime64_dtype, is_datetime64tz_dtype, is_datetime_or_timedelta_dtype,\n is_float, is_float_dtype, is_integer, is_integer_dtype, is_numeric_dtype,\n is_object_dtype, is_scalar, is_timedelta64_dtype, pandas_dtype)\nfrom pandas.core.dtypes.dtypes import DatetimeTZDtype\nfrom pandas.core.dtypes.missing import isna, na_value_for_dtype, notna\n\nimport pandas.core.common as com\n\nbn = import_optional_dependency(\"bottleneck\",\n raise_on_missing=False,\n on_version=\"warn\")\n_BOTTLENECK_INSTALLED = bn is not None\n_USE_BOTTLENECK = False\n\n\ndef set_use_bottleneck(v=True):\n # set/unset to use bottleneck\n global _USE_BOTTLENECK\n if _BOTTLENECK_INSTALLED:\n _USE_BOTTLENECK = v\n\n\nset_use_bottleneck(get_option('compute.use_bottleneck'))\n\n\nclass disallow:\n\n def __init__(self, *dtypes):\n super().__init__()\n self.dtypes = tuple(pandas_dtype(dtype).type for dtype in dtypes)\n\n def check(self, obj):\n return hasattr(obj, 'dtype') and issubclass(obj.dtype.type,\n self.dtypes)\n\n def __call__(self, f):\n @functools.wraps(f)\n def _f(*args, **kwargs):\n obj_iter = itertools.chain(args, kwargs.values())\n if any(self.check(obj) for obj in obj_iter):\n msg = 'reduction operation {name!r} not allowed for this dtype'\n raise TypeError(msg.format(name=f.__name__.replace('nan', '')))\n try:\n with np.errstate(invalid='ignore'):\n return f(*args, **kwargs)\n except ValueError as e:\n # we want to transform an object array\n # ValueError message to the more typical TypeError\n # e.g. this is normally a disallowed function on\n # object arrays that contain strings\n if is_object_dtype(args[0]):\n raise TypeError(e)\n raise\n\n return _f\n\n\nclass bottleneck_switch:\n\n def __init__(self, **kwargs):\n self.kwargs = kwargs\n\n def __call__(self, alt):\n bn_name = alt.__name__\n\n try:\n bn_func = getattr(bn, bn_name)\n except (AttributeError, NameError): # pragma: no cover\n bn_func = None\n\n @functools.wraps(alt)\n def f(values, axis=None, skipna=True, **kwds):\n if len(self.kwargs) > 0:\n for k, v in self.kwargs.items():\n if k not in kwds:\n kwds[k] = v\n try:\n if values.size == 0 and kwds.get('min_count') is None:\n # We are empty, returning NA for our type\n # Only applies for the default `min_count` of None\n # since that affects how empty arrays are handled.\n # TODO(GH-18976) update all the nanops methods to\n # correctly handle empty inputs and remove this check.\n # It *may* just be `var`\n return _na_for_min_count(values, axis)\n\n if (_USE_BOTTLENECK and skipna and\n _bn_ok_dtype(values.dtype, bn_name)):\n result = bn_func(values, axis=axis, **kwds)\n\n # prefer to treat inf/-inf as NA, but must compute the func\n # twice :(\n if _has_infs(result):\n result = alt(values, axis=axis, skipna=skipna, **kwds)\n else:\n result = alt(values, axis=axis, skipna=skipna, **kwds)\n except Exception:\n try:\n result = alt(values, axis=axis, skipna=skipna, **kwds)\n except ValueError as e:\n # we want to transform an object array\n # ValueError message to the more typical TypeError\n # e.g. this is normally a disallowed function on\n # object arrays that contain strings\n\n if is_object_dtype(values):\n raise TypeError(e)\n raise\n\n return result\n\n return f\n\n\ndef _bn_ok_dtype(dt, name):\n # Bottleneck chokes on datetime64\n if (not is_object_dtype(dt) and\n not (is_datetime_or_timedelta_dtype(dt) or\n is_datetime64tz_dtype(dt))):\n\n # GH 15507\n # bottleneck does not properly upcast during the sum\n # so can overflow\n\n # GH 9422\n # further we also want to preserve NaN when all elements\n # are NaN, unlinke bottleneck/numpy which consider this\n # to be 0\n if name in ['nansum', 'nanprod']:\n return False\n\n return True\n return False\n\n\ndef _has_infs(result):\n if isinstance(result, np.ndarray):\n if result.dtype == 'f8':\n return lib.has_infs_f8(result.ravel())\n elif result.dtype == 'f4':\n return lib.has_infs_f4(result.ravel())\n try:\n return np.isinf(result).any()\n except (TypeError, NotImplementedError):\n # if it doesn't support infs, then it can't have infs\n return False\n\n\ndef _get_fill_value(dtype, fill_value=None, fill_value_typ=None):\n \"\"\" return the correct fill value for the dtype of the values \"\"\"\n if fill_value is not None:\n return fill_value\n if _na_ok_dtype(dtype):\n if fill_value_typ is None:\n return np.nan\n else:\n if fill_value_typ == '+inf':\n return np.inf\n else:\n return -np.inf\n else:\n if fill_value_typ is None:\n return tslibs.iNaT\n else:\n if fill_value_typ == '+inf':\n # need the max int here\n return _int64_max\n else:\n return tslibs.iNaT\n\n\ndef _maybe_get_mask(values: np.ndarray, skipna: bool,\n mask: Optional[np.ndarray]) -> Optional[np.ndarray]:\n \"\"\" This function will compute a mask iff it is necessary. Otherwise,\n return the provided mask (potentially None) when a mask does not need to be\n computed.\n\n A mask is never necessary if the values array is of boolean or integer\n dtypes, as these are incapable of storing NaNs. If passing a NaN-capable\n dtype that is interpretable as either boolean or integer data (eg,\n timedelta64), a mask must be provided.\n\n If the skipna parameter is False, a new mask will not be computed.\n\n The mask is computed using isna() by default. Setting invert=True selects\n notna() as the masking function.\n\n Parameters\n ----------\n values : ndarray\n input array to potentially compute mask for\n skipna : bool\n boolean for whether NaNs should be skipped\n mask : Optional[ndarray]\n nan-mask if known\n\n Returns\n -------\n Optional[np.ndarray]\n\n \"\"\"\n\n if mask is None:\n if is_bool_dtype(values.dtype) or is_integer_dtype(values.dtype):\n # Boolean data cannot contain nulls, so signal via mask being None\n return None\n\n if skipna:\n mask = isna(values)\n\n return mask\n\n\ndef _get_values(values: np.ndarray, skipna: bool, fill_value: Any = None,\n fill_value_typ: str = None, mask: Optional[np.ndarray] = None\n ) -> Tuple[np.ndarray, Optional[np.ndarray], np.dtype,\n np.dtype, Any]:\n \"\"\" Utility to get the values view, mask, dtype, dtype_max, and fill_value.\n\n If both mask and fill_value/fill_value_typ are not None and skipna is True,\n the values array will be copied.\n\n For input arrays of boolean or integer dtypes, copies will only occur if a\n precomputed mask, a fill_value/fill_value_typ, and skipna=True are\n provided.\n\n Parameters\n ----------\n values : ndarray\n input array to potentially compute mask for\n skipna : bool\n boolean for whether NaNs should be skipped\n fill_value : Any\n value to fill NaNs with\n fill_value_typ : str\n Set to '+inf' or '-inf' to handle dtype-specific infinities\n mask : Optional[np.ndarray]\n nan-mask if known\n\n Returns\n -------\n values : ndarray\n Potential copy of input value array\n mask : Optional[ndarray[bool]]\n Mask for values, if deemed necessary to compute\n dtype : dtype\n dtype for values\n dtype_max : dtype\n platform independent dtype\n fill_value : Any\n fill value used\n \"\"\"\n mask = _maybe_get_mask(values, skipna, mask)\n\n if is_datetime64tz_dtype(values):\n # com.values_from_object returns M8[ns] dtype instead of tz-aware,\n # so this case must be handled separately from the rest\n dtype = values.dtype\n values = getattr(values, \"_values\", values)\n else:\n values = com.values_from_object(values)\n dtype = values.dtype\n\n if is_datetime_or_timedelta_dtype(values) or is_datetime64tz_dtype(values):\n # changing timedelta64/datetime64 to int64 needs to happen after\n # finding `mask` above\n values = getattr(values, \"asi8\", values)\n values = values.view(np.int64)\n\n dtype_ok = _na_ok_dtype(dtype)\n\n # get our fill value (in case we need to provide an alternative\n # dtype for it)\n fill_value = _get_fill_value(dtype, fill_value=fill_value,\n fill_value_typ=fill_value_typ)\n\n copy = (mask is not None) and (fill_value is not None)\n\n if skipna and copy:\n values = values.copy()\n if dtype_ok:\n np.putmask(values, mask, fill_value)\n\n # promote if needed\n else:\n values, changed = maybe_upcast_putmask(values, mask, fill_value)\n\n # return a platform independent precision dtype\n dtype_max = dtype\n if is_integer_dtype(dtype) or is_bool_dtype(dtype):\n dtype_max = np.int64\n elif is_float_dtype(dtype):\n dtype_max = np.float64\n\n return values, mask, dtype, dtype_max, fill_value\n\n\ndef _isfinite(values):\n if is_datetime_or_timedelta_dtype(values):\n return isna(values)\n if (is_complex_dtype(values) or is_float_dtype(values) or\n is_integer_dtype(values) or is_bool_dtype(values)):\n return ~np.isfinite(values)\n return ~np.isfinite(values.astype('float64'))\n\n\ndef _na_ok_dtype(dtype):\n # TODO: what about datetime64tz? PeriodDtype?\n return not issubclass(dtype.type,\n (np.integer, np.timedelta64, np.datetime64))\n\n\ndef _wrap_results(result, dtype, fill_value=None):\n \"\"\" wrap our results if needed \"\"\"\n\n if is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):\n if fill_value is None:\n # GH#24293\n fill_value = iNaT\n if not isinstance(result, np.ndarray):\n tz = getattr(dtype, 'tz', None)\n assert not isna(fill_value), \"Expected non-null fill_value\"\n if result == fill_value:\n result = np.nan\n result = tslibs.Timestamp(result, tz=tz)\n else:\n result = result.view(dtype)\n elif is_timedelta64_dtype(dtype):\n if not isinstance(result, np.ndarray):\n if result == fill_value:\n result = np.nan\n\n # raise if we have a timedelta64[ns] which is too large\n if np.fabs(result) > _int64_max:\n raise ValueError(\"overflow in timedelta operation\")\n\n result = tslibs.Timedelta(result, unit='ns')\n else:\n result = result.astype('i8').view(dtype)\n\n return result\n\n\ndef _na_for_min_count(values, axis):\n \"\"\"Return the missing value for `values`\n\n Parameters\n ----------\n values : ndarray\n axis : int or None\n axis for the reduction\n\n Returns\n -------\n result : scalar or ndarray\n For 1-D values, returns a scalar of the correct missing type.\n For 2-D values, returns a 1-D array where each element is missing.\n \"\"\"\n # we either return np.nan or pd.NaT\n if is_numeric_dtype(values):\n values = values.astype('float64')\n fill_value = na_value_for_dtype(values.dtype)\n\n if values.ndim == 1:\n return fill_value\n else:\n result_shape = (values.shape[:axis] +\n values.shape[axis + 1:])\n result = np.empty(result_shape, dtype=values.dtype)\n result.fill(fill_value)\n return result\n\n\ndef nanany(values, axis=None, skipna=True, mask=None):\n \"\"\"\n Check if any elements along an axis evaluate to True.\n\n Parameters\n ----------\n values : ndarray\n axis : int, optional\n skipna : bool, default True\n mask : ndarray[bool], optional\n nan-mask if known\n\n Returns\n -------\n result : bool\n\n Examples\n --------\n >>> import pandas.core.nanops as nanops\n >>> s = pd.Series([1, 2])\n >>> nanops.nanany(s)\n True\n\n >>> import pandas.core.nanops as nanops\n >>> s = pd.Series([np.nan])\n >>> nanops.nanany(s)\n False\n \"\"\"\n values, _, _, _, _ = _get_values(values, skipna, fill_value=False,\n mask=mask)\n return values.any(axis)\n\n\ndef nanall(values, axis=None, skipna=True, mask=None):\n \"\"\"\n Check if all elements along an axis evaluate to True.\n\n Parameters\n ----------\n values : ndarray\n axis: int, optional\n skipna : bool, default True\n mask : ndarray[bool], optional\n nan-mask if known\n\n Returns\n -------\n result : bool\n\n Examples\n --------\n >>> import pandas.core.nanops as nanops\n >>> s = pd.Series([1, 2, np.nan])\n >>> nanops.nanall(s)\n True\n\n >>> import pandas.core.nanops as nanops\n >>> s = pd.Series([1, 0])\n >>> nanops.nanall(s)\n False\n \"\"\"\n values, _, _, _, _ = _get_values(values, skipna, fill_value=True,\n mask=mask)\n return values.all(axis)\n\n\n@disallow('M8')\ndef nansum(values, axis=None, skipna=True, min_count=0, mask=None):\n \"\"\"\n Sum the elements along an axis ignoring NaNs\n\n Parameters\n ----------\n values : ndarray[dtype]\n axis: int, optional\n skipna : bool, default True\n min_count: int, default 0\n mask : ndarray[bool], optional\n nan-mask if known\n\n Returns\n -------\n result : dtype\n\n Examples\n --------\n >>> import pandas.core.nanops as nanops\n >>> s = pd.Series([1, 2, np.nan])\n >>> nanops.nansum(s)\n 3.0\n \"\"\"\n values, mask, dtype, dtype_max, _ = _get_values(values, skipna,\n fill_value=0, mask=mask)\n dtype_sum = dtype_max\n if is_float_dtype(dtype):\n dtype_sum = dtype\n elif is_timedelta64_dtype(dtype):\n dtype_sum = np.float64\n the_sum = values.sum(axis, dtype=dtype_sum)\n the_sum = _maybe_null_out(the_sum, axis, mask, values.shape,\n min_count=min_count)\n\n return _wrap_results(the_sum, dtype)\n\n\n@disallow('M8', DatetimeTZDtype)\n@bottleneck_switch()\ndef nanmean(values, axis=None, skipna=True, mask=None):\n \"\"\"\n Compute the mean of the element along an axis ignoring NaNs\n\n Parameters\n ----------\n values : ndarray\n axis: int, optional\n skipna : bool, default True\n mask : ndarray[bool], optional\n nan-mask if known\n\n Returns\n -------\n result : float\n Unless input is a float array, in which case use the same\n precision as the input array.\n\n Examples\n --------\n >>> import pandas.core.nanops as nanops\n >>> s = pd.Series([1, 2, np.nan])\n >>> nanops.nanmean(s)\n 1.5\n \"\"\"\n values, mask, dtype, dtype_max, _ = _get_values(values, skipna,\n fill_value=0, mask=mask)\n dtype_sum = dtype_max\n dtype_count = np.float64\n if (is_integer_dtype(dtype) or is_timedelta64_dtype(dtype) or\n is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype)):\n dtype_sum = np.float64\n elif is_float_dtype(dtype):\n dtype_sum = dtype\n dtype_count = dtype\n count = _get_counts(values.shape, mask, axis, dtype=dtype_count)\n the_sum = _ensure_numeric(values.sum(axis, dtype=dtype_sum))\n\n if axis is not None and getattr(the_sum, 'ndim', False):\n with np.errstate(all=\"ignore\"):\n # suppress division by zero warnings\n the_mean = the_sum / count\n ct_mask = count == 0\n if ct_mask.any():\n the_mean[ct_mask] = np.nan\n else:\n the_mean = the_sum / count if count > 0 else np.nan\n\n return _wrap_results(the_mean, dtype)\n\n\n@disallow('M8')\n@bottleneck_switch()\ndef nanmedian(values, axis=None, skipna=True, mask=None):\n \"\"\"\n Parameters\n ----------\n values : ndarray\n axis: int, optional\n skipna : bool, default True\n mask : ndarray[bool], optional\n nan-mask if known\n\n Returns\n -------\n result : float\n Unless input is a float array, in which case use the same\n precision as the input array.\n\n Examples\n --------\n >>> import pandas.core.nanops as nanops\n >>> s = pd.Series([1, np.nan, 2, 2])\n >>> nanops.nanmedian(s)\n 2.0\n \"\"\"\n def get_median(x):\n mask = notna(x)\n if not skipna and not mask.all():\n return np.nan\n return np.nanmedian(x[mask])\n\n values, mask, dtype, dtype_max, _ = _get_values(values, skipna, mask=mask)\n if not is_float_dtype(values):\n values = values.astype('f8')\n if mask is not None:\n values[mask] = np.nan\n\n if axis is None:\n values = values.ravel()\n\n notempty = values.size\n\n # an array from a frame\n if values.ndim > 1:\n\n # there's a non-empty array to apply over otherwise numpy raises\n if notempty:\n if not skipna:\n return _wrap_results(\n np.apply_along_axis(get_median, axis, values), dtype)\n\n # fastpath for the skipna case\n return _wrap_results(np.nanmedian(values, axis), dtype)\n\n # must return the correct shape, but median is not defined for the\n # empty set so return nans of shape \"everything but the passed axis\"\n # since \"axis\" is where the reduction would occur if we had a nonempty\n # array\n shp = np.array(values.shape)\n dims = np.arange(values.ndim)\n ret = np.empty(shp[dims != axis])\n ret.fill(np.nan)\n return _wrap_results(ret, dtype)\n\n # otherwise return a scalar value\n return _wrap_results(get_median(values) if notempty else np.nan, dtype)\n\n\ndef _get_counts_nanvar(value_counts: Tuple[int], mask: Optional[np.ndarray],\n axis: Optional[int], ddof: int,\n dtype=float) -> Tuple[Union[int, np.ndarray],\n Union[int, np.ndarray]]:\n \"\"\" Get the count of non-null values along an axis, accounting\n for degrees of freedom.\n\n Parameters\n ----------\n values_shape : Tuple[int]\n shape tuple from values ndarray, used if mask is None\n mask : Optional[ndarray[bool]]\n locations in values that should be considered missing\n axis : Optional[int]\n axis to count along\n ddof : int\n degrees of freedom\n dtype : type, optional\n type to use for count\n\n Returns\n -------\n count : scalar or array\n d : scalar or array\n \"\"\"\n dtype = _get_dtype(dtype)\n count = _get_counts(value_counts, mask, axis, dtype=dtype)\n d = count - dtype.type(ddof)\n\n # always return NaN, never inf\n if is_scalar(count):\n if count <= ddof:\n count = np.nan\n d = np.nan\n else:\n mask2 = count <= ddof # type: np.ndarray\n if mask2.any():\n np.putmask(d, mask2, np.nan)\n np.putmask(count, mask2, np.nan)\n return count, d\n\n\n@disallow('M8')\n@bottleneck_switch(ddof=1)\ndef nanstd(values, axis=None, skipna=True, ddof=1, mask=None):\n \"\"\"\n Compute the standard deviation along given axis while ignoring NaNs\n\n Parameters\n ----------\n values : ndarray\n axis: int, optional\n skipna : bool, default True\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations is N - ddof,\n where N represents the number of elements.\n mask : ndarray[bool], optional\n nan-mask if known\n\n Returns\n -------\n result : float\n Unless input is a float array, in which case use the same\n precision as the input array.\n\n Examples\n --------\n >>> import pandas.core.nanops as nanops\n >>> s = pd.Series([1, np.nan, 2, 3])\n >>> nanops.nanstd(s)\n 1.0\n \"\"\"\n result = np.sqrt(nanvar(values, axis=axis, skipna=skipna, ddof=ddof,\n mask=mask))\n return _wrap_results(result, values.dtype)\n\n\n@disallow('M8')\n@bottleneck_switch(ddof=1)\ndef nanvar(values, axis=None, skipna=True, ddof=1, mask=None):\n \"\"\"\n Compute the variance along given axis while ignoring NaNs\n\n Parameters\n ----------\n values : ndarray\n axis: int, optional\n skipna : bool, default True\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations is N - ddof,\n where N represents the number of elements.\n mask : ndarray[bool], optional\n nan-mask if known\n\n Returns\n -------\n result : float\n Unless input is a float array, in which case use the same\n precision as the input array.\n\n Examples\n --------\n >>> import pandas.core.nanops as nanops\n >>> s = pd.Series([1, np.nan, 2, 3])\n >>> nanops.nanvar(s)\n 1.0\n \"\"\"\n values = com.values_from_object(values)\n dtype = values.dtype\n mask = _maybe_get_mask(values, skipna, mask)\n if is_any_int_dtype(values):\n values = values.astype('f8')\n if mask is not None:\n values[mask] = np.nan\n\n if is_float_dtype(values):\n count, d = _get_counts_nanvar(values.shape, mask, axis, ddof,\n values.dtype)\n else:\n count, d = _get_counts_nanvar(values.shape, mask, axis, ddof)\n\n if skipna and mask is not None:\n values = values.copy()\n np.putmask(values, mask, 0)\n\n # xref GH10242\n # Compute variance via two-pass algorithm, which is stable against\n # cancellation errors and relatively accurate for small numbers of\n # observations.\n #\n # See https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance\n avg = _ensure_numeric(values.sum(axis=axis, dtype=np.float64)) / count\n if axis is not None:\n avg = np.expand_dims(avg, axis)\n sqr = _ensure_numeric((avg - values) ** 2)\n if mask is not None:\n np.putmask(sqr, mask, 0)\n result = sqr.sum(axis=axis, dtype=np.float64) / d\n\n # Return variance as np.float64 (the datatype used in the accumulator),\n # unless we were dealing with a float array, in which case use the same\n # precision as the original values array.\n if is_float_dtype(dtype):\n result = result.astype(dtype)\n return _wrap_results(result, values.dtype)\n\n\n@disallow('M8', 'm8')\ndef nansem(values, axis=None, skipna=True, ddof=1, mask=None):\n \"\"\"\n Compute the standard error in the mean along given axis while ignoring NaNs\n\n Parameters\n ----------\n values : ndarray\n axis: int, optional\n skipna : bool, default True\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations is N - ddof,\n where N represents the number of elements.\n mask : ndarray[bool], optional\n nan-mask if known\n\n Returns\n -------\n result : float64\n Unless input is a float array, in which case use the same\n precision as the input array.\n\n Examples\n --------\n >>> import pandas.core.nanops as nanops\n >>> s = pd.Series([1, np.nan, 2, 3])\n >>> nanops.nansem(s)\n 0.5773502691896258\n \"\"\"\n\n # This checks if non-numeric-like data is passed with numeric_only=False\n # and raises a TypeError otherwise\n nanvar(values, axis, skipna, ddof=ddof, mask=mask)\n\n mask = _maybe_get_mask(values, skipna, mask)\n if not is_float_dtype(values.dtype):\n values = values.astype('f8')\n\n count, _ = _get_counts_nanvar(values.shape, mask, axis, ddof, values.dtype)\n var = nanvar(values, axis, skipna, ddof=ddof)\n\n return np.sqrt(var) / np.sqrt(count)\n\n\ndef _nanminmax(meth, fill_value_typ):\n @bottleneck_switch()\n def reduction(values, axis=None, skipna=True, mask=None):\n\n values, mask, dtype, dtype_max, fill_value = _get_values(\n values, skipna, fill_value_typ=fill_value_typ, mask=mask)\n\n if ((axis is not None and values.shape[axis] == 0) or\n values.size == 0):\n try:\n result = getattr(values, meth)(axis, dtype=dtype_max)\n result.fill(np.nan)\n except (AttributeError, TypeError,\n ValueError, np.core._internal.AxisError):\n result = np.nan\n else:\n result = getattr(values, meth)(axis)\n\n result = _wrap_results(result, dtype, fill_value)\n return _maybe_null_out(result, axis, mask, values.shape)\n\n reduction.__name__ = 'nan' + meth\n return reduction\n\n\nnanmin = _nanminmax('min', fill_value_typ='+inf')\nnanmax = _nanminmax('max', fill_value_typ='-inf')\n\n\n@disallow('O')\ndef nanargmax(values, axis=None, skipna=True, mask=None):\n \"\"\"\n Parameters\n ----------\n values : ndarray\n axis: int, optional\n skipna : bool, default True\n mask : ndarray[bool], optional\n nan-mask if known\n\n Returns\n -------\n result : int\n The index of max value in specified axis or -1 in the NA case\n\n Examples\n --------\n >>> import pandas.core.nanops as nanops\n >>> s = pd.Series([1, 2, 3, np.nan, 4])\n >>> nanops.nanargmax(s)\n 4\n \"\"\"\n values, mask, dtype, _, _ = _get_values(\n values, True, fill_value_typ='-inf', mask=mask)\n result = values.argmax(axis)\n result = _maybe_arg_null_out(result, axis, mask, skipna)\n return result\n\n\n@disallow('O')\ndef nanargmin(values, axis=None, skipna=True, mask=None):\n \"\"\"\n Parameters\n ----------\n values : ndarray\n axis: int, optional\n skipna : bool, default True\n mask : ndarray[bool], optional\n nan-mask if known\n\n Returns\n -------\n result : int\n The index of min value in specified axis or -1 in the NA case\n\n Examples\n --------\n >>> import pandas.core.nanops as nanops\n >>> s = pd.Series([1, 2, 3, np.nan, 4])\n >>> nanops.nanargmin(s)\n 0\n \"\"\"\n values, mask, dtype, _, _ = _get_values(\n values, True, fill_value_typ='+inf', mask=mask)\n result = values.argmin(axis)\n result = _maybe_arg_null_out(result, axis, mask, skipna)\n return result\n\n\n@disallow('M8', 'm8')\ndef nanskew(values, axis=None, skipna=True, mask=None):\n \"\"\" Compute the sample skewness.\n\n The statistic computed here is the adjusted Fisher-Pearson standardized\n moment coefficient G1. The algorithm computes this coefficient directly\n from the second and third central moment.\n\n Parameters\n ----------\n values : ndarray\n axis: int, optional\n skipna : bool, default True\n mask : ndarray[bool], optional\n nan-mask if known\n\n Returns\n -------\n result : float64\n Unless input is a float array, in which case use the same\n precision as the input array.\n\n Examples\n --------\n >>> import pandas.core.nanops as nanops\n >>> s = pd.Series([1,np.nan, 1, 2])\n >>> nanops.nanskew(s)\n 1.7320508075688787\n \"\"\"\n values = com.values_from_object(values)\n mask = _maybe_get_mask(values, skipna, mask)\n if not is_float_dtype(values.dtype):\n values = values.astype('f8')\n count = _get_counts(values.shape, mask, axis)\n else:\n count = _get_counts(values.shape, mask, axis, dtype=values.dtype)\n\n if skipna and mask is not None:\n values = values.copy()\n np.putmask(values, mask, 0)\n\n mean = values.sum(axis, dtype=np.float64) / count\n if axis is not None:\n mean = np.expand_dims(mean, axis)\n\n adjusted = values - mean\n if skipna and mask is not None:\n np.putmask(adjusted, mask, 0)\n adjusted2 = adjusted ** 2\n adjusted3 = adjusted2 * adjusted\n m2 = adjusted2.sum(axis, dtype=np.float64)\n m3 = adjusted3.sum(axis, dtype=np.float64)\n\n # floating point error\n #\n # #18044 in _libs/windows.pyx calc_skew follow this behavior\n # to fix the fperr to treat m2 <1e-14 as zero\n m2 = _zero_out_fperr(m2)\n m3 = _zero_out_fperr(m3)\n\n with np.errstate(invalid='ignore', divide='ignore'):\n result = (count * (count - 1) ** 0.5 / (count - 2)) * (m3 / m2 ** 1.5)\n\n dtype = values.dtype\n if is_float_dtype(dtype):\n result = result.astype(dtype)\n\n if isinstance(result, np.ndarray):\n result = np.where(m2 == 0, 0, result)\n result[count < 3] = np.nan\n return result\n else:\n result = 0 if m2 == 0 else result\n if count < 3:\n return np.nan\n return result\n\n\n@disallow('M8', 'm8')\ndef nankurt(values, axis=None, skipna=True, mask=None):\n \"\"\"\n Compute the sample excess kurtosis\n\n The statistic computed here is the adjusted Fisher-Pearson standardized\n moment coefficient G2, computed directly from the second and fourth\n central moment.\n\n Parameters\n ----------\n values : ndarray\n axis: int, optional\n skipna : bool, default True\n mask : ndarray[bool], optional\n nan-mask if known\n\n Returns\n -------\n result : float64\n Unless input is a float array, in which case use the same\n precision as the input array.\n\n Examples\n --------\n >>> import pandas.core.nanops as nanops\n >>> s = pd.Series([1,np.nan, 1, 3, 2])\n >>> nanops.nankurt(s)\n -1.2892561983471076\n \"\"\"\n values = com.values_from_object(values)\n mask = _maybe_get_mask(values, skipna, mask)\n if not is_float_dtype(values.dtype):\n values = values.astype('f8')\n count = _get_counts(values.shape, mask, axis)\n else:\n count = _get_counts(values.shape, mask, axis, dtype=values.dtype)\n\n if skipna and mask is not None:\n values = values.copy()\n np.putmask(values, mask, 0)\n\n mean = values.sum(axis, dtype=np.float64) / count\n if axis is not None:\n mean = np.expand_dims(mean, axis)\n\n adjusted = values - mean\n if skipna and mask is not None:\n np.putmask(adjusted, mask, 0)\n adjusted2 = adjusted ** 2\n adjusted4 = adjusted2 ** 2\n m2 = adjusted2.sum(axis, dtype=np.float64)\n m4 = adjusted4.sum(axis, dtype=np.float64)\n\n with np.errstate(invalid='ignore', divide='ignore'):\n adj = 3 * (count - 1) ** 2 / ((count - 2) * (count - 3))\n numer = count * (count + 1) * (count - 1) * m4\n denom = (count - 2) * (count - 3) * m2 ** 2\n\n # floating point error\n #\n # #18044 in _libs/windows.pyx calc_kurt follow this behavior\n # to fix the fperr to treat denom <1e-14 as zero\n numer = _zero_out_fperr(numer)\n denom = _zero_out_fperr(denom)\n\n if not isinstance(denom, np.ndarray):\n # if ``denom`` is a scalar, check these corner cases first before\n # doing division\n if count < 4:\n return np.nan\n if denom == 0:\n return 0\n\n with np.errstate(invalid='ignore', divide='ignore'):\n result = numer / denom - adj\n\n dtype = values.dtype\n if is_float_dtype(dtype):\n result = result.astype(dtype)\n\n if isinstance(result, np.ndarray):\n result = np.where(denom == 0, 0, result)\n result[count < 4] = np.nan\n\n return result\n\n\n@disallow('M8', 'm8')\ndef nanprod(values, axis=None, skipna=True, min_count=0, mask=None):\n \"\"\"\n Parameters\n ----------\n values : ndarray[dtype]\n axis: int, optional\n skipna : bool, default True\n min_count: int, default 0\n mask : ndarray[bool], optional\n nan-mask if known\n\n Returns\n -------\n result : dtype\n\n Examples\n --------\n >>> import pandas.core.nanops as nanops\n >>> s = pd.Series([1, 2, 3, np.nan])\n >>> nanops.nanprod(s)\n 6.0\n\n Returns\n -------\n The product of all elements on a given axis. ( NaNs are treated as 1)\n \"\"\"\n mask = _maybe_get_mask(values, skipna, mask)\n\n if skipna and mask is not None:\n values = values.copy()\n values[mask] = 1\n result = values.prod(axis)\n return _maybe_null_out(result, axis, mask, values.shape,\n min_count=min_count)\n\n\ndef _maybe_arg_null_out(result: np.ndarray, axis: Optional[int],\n mask: Optional[np.ndarray],\n skipna: bool) -> Union[np.ndarray, int]:\n # helper function for nanargmin/nanargmax\n if mask is None:\n return result\n\n if axis is None or not getattr(result, 'ndim', False):\n if skipna:\n if mask.all():\n result = -1\n else:\n if mask.any():\n result = -1\n else:\n if skipna:\n na_mask = mask.all(axis)\n else:\n na_mask = mask.any(axis)\n if na_mask.any():\n result[na_mask] = -1\n return result\n\n\ndef _get_counts(values_shape: Tuple[int], mask: Optional[np.ndarray],\n axis: Optional[int], dtype=float) -> Union[int, np.ndarray]:\n \"\"\" Get the count of non-null values along an axis\n\n Parameters\n ----------\n values_shape : Tuple[int]\n shape tuple from values ndarray, used if mask is None\n mask : Optional[ndarray[bool]]\n locations in values that should be considered missing\n axis : Optional[int]\n axis to count along\n dtype : type, optional\n type to use for count\n\n Returns\n -------\n count : scalar or array\n \"\"\"\n dtype = _get_dtype(dtype)\n if axis is None:\n if mask is not None:\n n = mask.size - mask.sum()\n else:\n n = np.prod(values_shape)\n return dtype.type(n)\n\n if mask is not None:\n count = mask.shape[axis] - mask.sum(axis)\n else:\n count = values_shape[axis]\n\n if is_scalar(count):\n return dtype.type(count)\n try:\n return count.astype(dtype)\n except AttributeError:\n return np.array(count, dtype=dtype)\n\n\ndef _maybe_null_out(result: np.ndarray, axis: Optional[int],\n mask: Optional[np.ndarray], shape: Tuple,\n min_count: int = 1) -> np.ndarray:\n if (mask is not None and axis is not None and\n getattr(result, 'ndim', False)):\n null_mask = (mask.shape[axis] - mask.sum(axis) - min_count) < 0\n if np.any(null_mask):\n if is_numeric_dtype(result):\n if np.iscomplexobj(result):\n result = result.astype('c16')\n else:\n result = result.astype('f8')\n result[null_mask] = np.nan\n else:\n # GH12941, use None to auto cast null\n result[null_mask] = None\n elif result is not tslibs.NaT:\n if mask is not None:\n null_mask = mask.size - mask.sum()\n else:\n null_mask = np.prod(shape)\n if null_mask < min_count:\n result = np.nan\n\n return result\n\n\ndef _zero_out_fperr(arg):\n # #18044 reference this behavior to fix rolling skew/kurt issue\n if isinstance(arg, np.ndarray):\n with np.errstate(invalid='ignore'):\n return np.where(np.abs(arg) < 1e-14, 0, arg)\n else:\n return arg.dtype.type(0) if np.abs(arg) < 1e-14 else arg\n\n\n@disallow('M8', 'm8')\ndef nancorr(a, b, method='pearson', min_periods=None):\n \"\"\"\n a, b: ndarrays\n \"\"\"\n if len(a) != len(b):\n raise AssertionError('Operands to nancorr must have same size')\n\n if min_periods is None:\n min_periods = 1\n\n valid = notna(a) & notna(b)\n if not valid.all():\n a = a[valid]\n b = b[valid]\n\n if len(a) < min_periods:\n return np.nan\n\n f = get_corr_func(method)\n return f(a, b)\n\n\ndef get_corr_func(method):\n if method in ['kendall', 'spearman']:\n from scipy.stats import kendalltau, spearmanr\n elif callable(method):\n return method\n\n def _pearson(a, b):\n return np.corrcoef(a, b)[0, 1]\n\n def _kendall(a, b):\n rs = kendalltau(a, b)\n if isinstance(rs, tuple):\n return rs[0]\n return rs\n\n def _spearman(a, b):\n return spearmanr(a, b)[0]\n\n _cor_methods = {\n 'pearson': _pearson,\n 'kendall': _kendall,\n 'spearman': _spearman\n }\n return _cor_methods[method]\n\n\n@disallow('M8', 'm8')\ndef nancov(a, b, min_periods=None):\n if len(a) != len(b):\n raise AssertionError('Operands to nancov must have same size')\n\n if min_periods is None:\n min_periods = 1\n\n valid = notna(a) & notna(b)\n if not valid.all():\n a = a[valid]\n b = b[valid]\n\n if len(a) < min_periods:\n return np.nan\n\n return np.cov(a, b)[0, 1]\n\n\ndef _ensure_numeric(x):\n if isinstance(x, np.ndarray):\n if is_integer_dtype(x) or is_bool_dtype(x):\n x = x.astype(np.float64)\n elif is_object_dtype(x):\n try:\n x = x.astype(np.complex128)\n except (TypeError, ValueError):\n x = x.astype(np.float64)\n else:\n if not np.any(x.imag):\n x = x.real\n elif not (is_float(x) or is_integer(x) or is_complex(x)):\n try:\n x = float(x)\n except Exception:\n try:\n x = complex(x)\n except Exception:\n raise TypeError('Could not convert {value!s} to numeric'\n .format(value=x))\n return x\n\n# NA-friendly array comparisons\n\n\ndef make_nancomp(op):\n def f(x, y):\n xmask = isna(x)\n ymask = isna(y)\n mask = xmask | ymask\n\n with np.errstate(all='ignore'):\n result = op(x, y)\n\n if mask.any():\n if is_bool_dtype(result):\n result = result.astype('O')\n np.putmask(result, mask, np.nan)\n\n return result\n\n return f\n\n\nnangt = make_nancomp(operator.gt)\nnange = make_nancomp(operator.ge)\nnanlt = make_nancomp(operator.lt)\nnanle = make_nancomp(operator.le)\nnaneq = make_nancomp(operator.eq)\nnanne = make_nancomp(operator.ne)\n\n\ndef _nanpercentile_1d(values, mask, q, na_value, interpolation):\n \"\"\"\n Wraper for np.percentile that skips missing values, specialized to\n 1-dimensional case.\n\n Parameters\n ----------\n values : array over which to find quantiles\n mask : ndarray[bool]\n locations in values that should be considered missing\n q : scalar or array of quantile indices to find\n na_value : scalar\n value to return for empty or all-null values\n interpolation : str\n\n Returns\n -------\n quantiles : scalar or array\n \"\"\"\n # mask is Union[ExtensionArray, ndarray]\n values = values[~mask]\n\n if len(values) == 0:\n if lib.is_scalar(q):\n return na_value\n else:\n return np.array([na_value] * len(q),\n dtype=values.dtype)\n\n return np.percentile(values, q, interpolation=interpolation)\n\n\ndef nanpercentile(values, q, axis, na_value, mask, ndim, interpolation):\n \"\"\"\n Wraper for np.percentile that skips missing values.\n\n Parameters\n ----------\n values : array over which to find quantiles\n q : scalar or array of quantile indices to find\n axis : {0, 1}\n na_value : scalar\n value to return for empty or all-null values\n mask : ndarray[bool]\n locations in values that should be considered missing\n ndim : {1, 2}\n interpolation : str\n\n Returns\n -------\n quantiles : scalar or array\n \"\"\"\n if not lib.is_scalar(mask) and mask.any():\n if ndim == 1:\n return _nanpercentile_1d(values, mask, q, na_value,\n interpolation=interpolation)\n else:\n # for nonconsolidatable blocks mask is 1D, but values 2D\n if mask.ndim < values.ndim:\n mask = mask.reshape(values.shape)\n if axis == 0:\n values = values.T\n mask = mask.T\n result = [_nanpercentile_1d(val, m, q, na_value,\n interpolation=interpolation)\n for (val, m) in zip(list(values), list(mask))]\n result = np.array(result, dtype=values.dtype, copy=False).T\n return result\n else:\n return np.percentile(values, q, axis=axis, interpolation=interpolation)\n" ]
[ [ "pandas._libs.tslibs.Timedelta", "numpy.where", "numpy.iscomplexobj", "numpy.apply_along_axis", "pandas.core.dtypes.common.is_float_dtype", "pandas.core.dtypes.common.is_any_int_dtype", "pandas.core.dtypes.cast.maybe_upcast_putmask", "pandas.core.dtypes.common.is_datetime64_dtype", "pandas._libs.lib.is_scalar", "pandas.core.dtypes.missing.isna", "pandas.core.dtypes.missing.na_value_for_dtype", "pandas.core.dtypes.missing.notna", "numpy.empty", "numpy.putmask", "pandas.core.dtypes.common.is_datetime64tz_dtype", "pandas.core.dtypes.common.pandas_dtype", "numpy.fabs", "numpy.prod", "numpy.arange", "numpy.sqrt", "numpy.isfinite", "pandas.core.dtypes.common.is_integer_dtype", "pandas.core.dtypes.common.is_object_dtype", "numpy.nanmedian", "numpy.expand_dims", "numpy.array", "pandas.core.dtypes.common.is_scalar", "pandas.core.common.values_from_object", "numpy.percentile", "pandas.core.dtypes.common.is_integer", "pandas.core.dtypes.common._get_dtype", "pandas.core.dtypes.common.is_complex_dtype", "numpy.corrcoef", "numpy.isinf", "scipy.stats.kendalltau", "numpy.cov", "numpy.errstate", "pandas.compat._optional.import_optional_dependency", "scipy.stats.spearmanr", "pandas.core.dtypes.common.is_complex", "pandas._config.get_option", "pandas.core.dtypes.common.is_numeric_dtype", "numpy.any", "pandas.core.dtypes.common.is_datetime_or_timedelta_dtype", "numpy.abs", "pandas.core.dtypes.common.is_timedelta64_dtype", "pandas.core.dtypes.common.is_float", "pandas.core.dtypes.common.is_bool_dtype", "pandas._libs.tslibs.Timestamp" ] ]
vadim0x60/mimic3-benchmarks
[ "2f6fa1ff32ac8b75b9bb0c900fea14124a6976f2" ]
[ "mimic3models/multitask/main.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import print_function\n\nfrom mimic3models.multitask import utils\nfrom mimic3benchmark.readers import MultitaskReader\nfrom mimic3models.preprocessing import Discretizer, Normalizer\nfrom mimic3models import metrics\nfrom mimic3models import keras_utils\nfrom mimic3models import common_utils\nfrom keras.callbacks import ModelCheckpoint, CSVLogger\n\nimport mimic3models.in_hospital_mortality.utils as ihm_utils\nimport mimic3models.decompensation.utils as decomp_utils\nimport mimic3models.length_of_stay.utils as los_utils\nimport mimic3models.phenotyping.utils as pheno_utils\n\nimport numpy as np\nimport argparse\nimport os\nimport imp\nimport re\n\nparser = argparse.ArgumentParser()\ncommon_utils.add_common_arguments(parser)\nparser.add_argument('--target_repl_coef', type=float, default=0.0)\nparser.add_argument('--partition', type=str, default='custom', help=\"log, custom, none\")\nparser.add_argument('--ihm_C', type=float, default=1.0)\nparser.add_argument('--los_C', type=float, default=1.0)\nparser.add_argument('--pheno_C', type=float, default=1.0)\nparser.add_argument('--decomp_C', type=float, default=1.0)\nparser.add_argument('--data', type=str, help='Path to the data of multitasking',\n default=os.path.join(os.path.dirname(__file__), '../../data/multitask/'))\nparser.add_argument('--output_dir', type=str, help='Directory relative which all output files are stored',\n default='.')\nargs = parser.parse_args()\nprint(args)\n\nif args.small_part:\n args.save_every = 2 ** 30\n\ntarget_repl = (args.target_repl_coef > 0.0 and args.mode == 'train')\n\n# Build readers, discretizers, normalizers\ntrain_reader = MultitaskReader(dataset_dir=os.path.join(args.data, 'train'),\n listfile=os.path.join(args.data, 'train_listfile.csv'))\n\nval_reader = MultitaskReader(dataset_dir=os.path.join(args.data, 'train'),\n listfile=os.path.join(args.data, 'val_listfile.csv'))\n\ndiscretizer = Discretizer(timestep=args.timestep,\n store_masks=True,\n impute_strategy='previous',\n start_time='zero')\n\ndiscretizer_header = discretizer.transform(train_reader.read_example(0)[\"X\"])[1].split(',')\ncont_channels = [i for (i, x) in enumerate(discretizer_header) if x.find(\"->\") == -1]\n\nnormalizer = Normalizer(fields=cont_channels) # choose here which columns to standardize\nnormalizer_state = args.normalizer_state\nif normalizer_state is None:\n normalizer_state = 'mult_ts{}.input_str:{}.start_time:zero.normalizer'.format(args.timestep, args.imputation)\n normalizer_state = os.path.join(os.path.dirname(__file__), normalizer_state)\nnormalizer.load_params(normalizer_state)\n\nargs_dict = dict(args._get_kwargs())\nargs_dict['header'] = discretizer_header\nargs_dict['ihm_pos'] = int(48.0 / args.timestep - 1e-6)\nargs_dict['target_repl'] = target_repl\n\n# Build the model\nprint(\"==> using model {}\".format(args.network))\nmodel_module = imp.load_source(os.path.basename(args.network), args.network)\nmodel = model_module.Network(**args_dict)\nsuffix = \".bs{}{}{}.ts{}{}_partition={}_ihm={}_decomp={}_los={}_pheno={}\".format(\n args.batch_size,\n \".L1{}\".format(args.l1) if args.l1 > 0 else \"\",\n \".L2{}\".format(args.l2) if args.l2 > 0 else \"\",\n args.timestep,\n \".trc{}\".format(args.target_repl_coef) if args.target_repl_coef > 0 else \"\",\n args.partition,\n args.ihm_C,\n args.decomp_C,\n args.los_C,\n args.pheno_C)\nmodel.final_name = args.prefix + model.say_name() + suffix\nprint(\"==> model.final_name:\", model.final_name)\n\n# Compile the model\nprint(\"==> compiling the model\")\noptimizer_config = {'class_name': args.optimizer,\n 'config': {'lr': args.lr,\n 'beta_1': args.beta_1}}\n\n# Define loss functions\n\nloss_dict = {}\nloss_weights = {}\n\n# ihm\nif target_repl:\n loss_dict['ihm_single'] = 'binary_crossentropy'\n loss_dict['ihm_seq'] = 'binary_crossentropy'\n loss_weights['ihm_single'] = args.ihm_C * (1 - args.target_repl_coef)\n loss_weights['ihm_seq'] = args.ihm_C * args.target_repl_coef\nelse:\n loss_dict['ihm'] = 'binary_crossentropy'\n loss_weights['ihm'] = args.ihm_C\n\n# decomp\nloss_dict['decomp'] = 'binary_crossentropy'\nloss_weights['decomp'] = args.decomp_C\n\n# los\nif args.partition == 'none':\n # other options are: 'mean_squared_error', 'mean_absolute_percentage_error'\n loss_dict['los'] = 'mean_squared_logarithmic_error'\nelse:\n loss_dict['los'] = 'sparse_categorical_crossentropy'\nloss_weights['los'] = args.los_C\n\n# pheno\nif target_repl:\n loss_dict['pheno_single'] = 'binary_crossentropy'\n loss_dict['pheno_seq'] = 'binary_crossentropy'\n loss_weights['pheno_single'] = args.pheno_C * (1 - args.target_repl_coef)\n loss_weights['pheno_seq'] = args.pheno_C * args.target_repl_coef\nelse:\n loss_dict['pheno'] = 'binary_crossentropy'\n loss_weights['pheno'] = args.pheno_C\n\nmodel.compile(optimizer=optimizer_config,\n loss=loss_dict,\n loss_weights=loss_weights)\nmodel.summary()\n\n# Load model weights\nn_trained_chunks = 0\nif args.load_state != \"\":\n model.load_weights(args.load_state)\n n_trained_chunks = int(re.match(\".*epoch([0-9]+).*\", args.load_state).group(1))\n\n# Build data generators\ntrain_data_gen = utils.BatchGen(reader=train_reader,\n discretizer=discretizer,\n normalizer=normalizer,\n ihm_pos=args_dict['ihm_pos'],\n partition=args.partition,\n target_repl=target_repl,\n batch_size=args.batch_size,\n small_part=args.small_part,\n shuffle=True)\nval_data_gen = utils.BatchGen(reader=val_reader,\n discretizer=discretizer,\n normalizer=normalizer,\n ihm_pos=args_dict['ihm_pos'],\n partition=args.partition,\n target_repl=target_repl,\n batch_size=args.batch_size,\n small_part=args.small_part,\n shuffle=False)\n\nif args.mode == 'train':\n # Prepare training\n path = os.path.join(args.output_dir, 'keras_states/' + model.final_name + '.epoch{epoch}.test{val_loss}.state')\n\n metrics_callback = keras_utils.MultitaskMetrics(train_data_gen=train_data_gen,\n val_data_gen=val_data_gen,\n partition=args.partition,\n batch_size=args.batch_size,\n verbose=args.verbose)\n # make sure save directory exists\n dirname = os.path.dirname(path)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n saver = ModelCheckpoint(path, verbose=1, period=args.save_every)\n\n keras_logs = os.path.join(args.output_dir, 'keras_logs')\n if not os.path.exists(keras_logs):\n os.makedirs(keras_logs)\n csv_logger = CSVLogger(os.path.join(keras_logs, model.final_name + '.csv'),\n append=True, separator=';')\n\n print(\"==> training\")\n model.fit_generator(generator=train_data_gen,\n steps_per_epoch=train_data_gen.steps,\n validation_data=val_data_gen,\n validation_steps=val_data_gen.steps,\n epochs=n_trained_chunks + args.epochs,\n initial_epoch=n_trained_chunks,\n callbacks=[metrics_callback, saver, csv_logger],\n verbose=args.verbose)\n\nelif args.mode == 'test':\n # ensure that the code uses test_reader\n del train_reader\n del val_reader\n del train_data_gen\n del val_data_gen\n\n test_reader = MultitaskReader(dataset_dir=os.path.join(args.data, 'test'),\n listfile=os.path.join(args.data, 'test_listfile.csv'))\n\n test_data_gen = utils.BatchGen(reader=test_reader,\n discretizer=discretizer,\n normalizer=normalizer,\n ihm_pos=args_dict['ihm_pos'],\n partition=args.partition,\n target_repl=target_repl,\n batch_size=args.batch_size,\n small_part=args.small_part,\n shuffle=False,\n return_names=True)\n ihm_y_true = []\n decomp_y_true = []\n los_y_true = []\n pheno_y_true = []\n\n ihm_pred = []\n decomp_pred = []\n los_pred = []\n pheno_pred = []\n\n ihm_names = []\n decomp_names = []\n los_names = []\n pheno_names = []\n\n decomp_ts = []\n los_ts = []\n pheno_ts = []\n\n for i in range(test_data_gen.steps):\n print(\"\\tdone {}/{}\".format(i, test_data_gen.steps), end='\\r')\n ret = test_data_gen.next(return_y_true=True)\n (X, y, los_y_reg) = ret[\"data\"]\n outputs = model.predict(X, batch_size=args.batch_size)\n\n names = list(ret[\"names\"])\n names_extended = np.array(names).repeat(X[0].shape[1], axis=-1)\n\n ihm_M = X[1]\n decomp_M = X[2]\n los_M = X[3]\n\n assert len(outputs) == 4 # no target replication\n (ihm_p, decomp_p, los_p, pheno_p) = outputs\n (ihm_t, decomp_t, los_t, pheno_t) = y\n\n los_t = los_y_reg # real value not the label\n\n # ihm\n for (m, t, p, name) in zip(ihm_M.flatten(), ihm_t.flatten(), ihm_p.flatten(), names):\n if np.equal(m, 1):\n ihm_y_true.append(t)\n ihm_pred.append(p)\n ihm_names.append(name)\n\n # decomp\n for x in ret['decomp_ts']:\n decomp_ts += x\n for (name, m, t, p) in zip(names_extended.flatten(), decomp_M.flatten(),\n decomp_t.flatten(), decomp_p.flatten()):\n if np.equal(m, 1):\n decomp_names.append(name)\n decomp_y_true.append(t)\n decomp_pred.append(p)\n\n # los\n for x in ret['los_ts']:\n los_ts += x\n if los_p.shape[-1] == 1: # regression\n for (name, m, t, p) in zip(names_extended.flatten(), los_M.flatten(),\n los_t.flatten(), los_p.flatten()):\n if np.equal(m, 1):\n los_names.append(name)\n los_y_true.append(t)\n los_pred.append(p)\n else: # classification\n for (name, m, t, p) in zip(names_extended.flatten(), los_M.flatten(),\n los_t.flatten(), los_p.reshape((-1, 10))):\n if np.equal(m, 1):\n los_names.append(name)\n los_y_true.append(t)\n los_pred.append(p)\n\n # pheno\n pheno_names += list(names)\n pheno_ts += list(ret[\"pheno_ts\"])\n for (t, p) in zip(pheno_t.reshape((-1, 25)), pheno_p.reshape((-1, 25))):\n pheno_y_true.append(t)\n pheno_pred.append(p)\n print('\\n')\n\n # ihm\n if args.ihm_C > 0:\n print(\"\\n ================= 48h mortality ================\")\n ihm_pred = np.array(ihm_pred)\n ihm_ret = metrics.print_metrics_binary(ihm_y_true, ihm_pred)\n\n # decomp\n if args.decomp_C > 0:\n print(\"\\n ================ decompensation ================\")\n decomp_pred = np.array(decomp_pred)\n decomp_ret = metrics.print_metrics_binary(decomp_y_true, decomp_pred)\n\n # los\n if args.los_C > 0:\n print(\"\\n ================ length of stay ================\")\n if args.partition == 'log':\n los_pred = [metrics.get_estimate_log(x, 10) for x in los_pred]\n los_ret = metrics.print_metrics_log_bins(los_y_true, los_pred)\n if args.partition == 'custom':\n los_pred = [metrics.get_estimate_custom(x, 10) for x in los_pred]\n los_ret = metrics.print_metrics_custom_bins(los_y_true, los_pred)\n if args.partition == 'none':\n los_ret = metrics.print_metrics_regression(los_y_true, los_pred)\n\n # pheno\n if args.pheno_C > 0:\n print(\"\\n =================== phenotype ==================\")\n pheno_pred = np.array(pheno_pred)\n pheno_ret = metrics.print_metrics_multilabel(pheno_y_true, pheno_pred)\n\n print(\"Saving the predictions in test_predictions/task directories ...\")\n\n # ihm\n ihm_path = os.path.join(os.path.join(args.output_dir,\n \"test_predictions/ihm\", os.path.basename(args.load_state)) + \".csv\")\n ihm_utils.save_results(ihm_names, ihm_pred, ihm_y_true, ihm_path)\n\n # decomp\n decomp_path = os.path.join(os.path.join(args.output_dir,\n \"test_predictions/decomp\", os.path.basename(args.load_state)) + \".csv\")\n decomp_utils.save_results(decomp_names, decomp_ts, decomp_pred, decomp_y_true, decomp_path)\n\n # los\n los_path = os.path.join(os.path.join(args.output_dir,\n \"test_predictions/los\", os.path.basename(args.load_state)) + \".csv\")\n los_utils.save_results(los_names, los_ts, los_pred, los_y_true, los_path)\n\n # pheno\n pheno_path = os.path.join(os.path.join(args.output_dir,\n \"test_predictions/pheno\", os.path.basename(args.load_state)) + \".csv\")\n pheno_utils.save_results(pheno_names, pheno_ts, pheno_pred, pheno_y_true, pheno_path)\n\nelse:\n raise ValueError(\"Wrong value for args.mode\")\n" ]
[ [ "numpy.equal", "numpy.array" ] ]
liyingben/kaggle-airbus-ship-detection
[ "21d89b2f1273b31a6ffafb4fe5f7e643ffbbc567" ]
[ "src/models/linknet.py" ]
[ "from collections import OrderedDict\nimport torch.nn as nn\nimport torchvision.models as models\n\n\nclass LinkNet(nn.Module):\n def __init__(self, num_classes, resnet_size=18, pretrained_encoder=True):\n super().__init__()\n self.num_classes = num_classes\n\n # The LinkNet encoder is a ResNet18 without the last average pooling layer and\n # the fully connected layer\n if resnet_size == 18:\n resnet = models.resnet18(pretrained=pretrained_encoder)\n elif resnet_size == 34:\n resnet = models.resnet34(pretrained=pretrained_encoder)\n else:\n raise ValueError(\n \"expected 18 or 34 for resnet_size, got {}\".format(resnet_size)\n )\n encoder_list = list(resnet.named_children())[:-2]\n self.encoder = nn.Sequential(OrderedDict([*encoder_list]))\n\n # Construct the decoder\n self.layer4_d = DecoderBlock(512, 256, stride=2, padding=1)\n self.layer3_d = DecoderBlock(256, 128, stride=2, padding=1)\n self.layer2_d = DecoderBlock(128, 64, stride=2, padding=1)\n self.layer1_d = DecoderBlock(64, 64, stride=1, padding=1)\n self.tconv1_d = nn.ConvTranspose2d(64, 32, 3, stride=2, padding=1)\n self.bn1_d = nn.BatchNorm2d(32)\n self.relu1_d = nn.ReLU()\n self.conv1_d = nn.Conv2d(32, 32, 3, padding=1)\n self.bn2_d = nn.BatchNorm2d(32)\n self.relu2_d = nn.ReLU()\n self.tconv2_d = nn.ConvTranspose2d(32, self.num_classes, 3, stride=2, padding=1)\n\n def forward(self, x):\n input_x = x\n\n # Have to access the output of a few layers in the encoder to make the skip\n # connections. For that, iterate over all modules in the encoder, do the\n # forward pass and save the output for the layers that are needed\n skip = {}\n for name, module in self.encoder.named_children():\n x = module(x)\n if name in (\"conv1\", \"maxpool\", \"layer1\", \"layer2\", \"layer3\"):\n skip[name] = x\n\n x = skip[\"layer3\"] + self.layer4_d(x, skip[\"layer3\"].size())\n x = skip[\"layer2\"] + self.layer3_d(x, skip[\"layer2\"].size())\n x = skip[\"layer1\"] + self.layer2_d(x, skip[\"layer1\"].size())\n x = self.layer1_d(x, skip[\"maxpool\"].size())\n x = self.tconv1_d(x, skip[\"conv1\"].size())\n x = self.bn1_d(x)\n x = self.relu1_d(x)\n x = self.conv1_d(x)\n x = self.bn2_d(x)\n x = self.relu2_d(x)\n\n return self.tconv2_d(x, input_x.size())\n\n\nclass DecoderBlock(nn.Module):\n \"\"\"Creates a decoder block.\n\n Decoder block architecture:\n 1. Conv2D\n 2. BatchNormalization\n 3. ReLU\n 4. Conv2DTranspose\n 5. BatchNormalization\n 6. ReLU\n 7. Conv2D\n 8. BatchNormalization\n 9. ReLU\n\n \"\"\"\n\n def __init__(\n self,\n in_channels,\n out_channels,\n stride=1,\n padding=0,\n output_padding=0,\n projection_ratio=4,\n bias=False,\n ):\n super().__init__()\n\n proj_channels = in_channels // projection_ratio\n self.conv1 = nn.Conv2d(in_channels, proj_channels, 1)\n self.bn1 = nn.BatchNorm2d(proj_channels)\n self.relu1 = nn.ReLU()\n self.tconv = nn.ConvTranspose2d(\n proj_channels,\n proj_channels,\n 3,\n stride=stride,\n padding=padding,\n output_padding=output_padding,\n )\n self.bn2 = nn.BatchNorm2d(proj_channels)\n self.relu2 = nn.ReLU()\n self.conv2 = nn.Conv2d(proj_channels, out_channels, 1)\n self.bn3 = nn.BatchNorm2d(out_channels)\n self.relu3 = nn.ReLU()\n\n def forward(self, x, output_size=None):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu1(x)\n x = self.tconv(x, output_size=output_size)\n x = self.bn2(x)\n x = self.relu2(x)\n x = self.conv2(x)\n x = self.bn3(x)\n return self.relu3(x)\n" ]
[ [ "torch.nn.ReLU", "torch.nn.BatchNorm2d", "torch.nn.ConvTranspose2d", "torch.nn.Conv2d" ] ]
isaacsultan/comp-550
[ "24e7d22a6f998a94ad6eb020f1df13970da4b150" ]
[ "src/models/glove_filter.py" ]
[ "import pickle\r\nimport csv\r\nimport os\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\nfrom util.params import Params\r\n\r\n\r\ndef filter_glove(word_indices_path, filtered_output):\r\n\r\n print(\"Read files...\")\r\n iterator = pd.read_csv('data/glove.42B.300d.txt', header=None, index_col=0, \r\n delim_whitespace=True, quoting=csv.QUOTE_NONE, dtype=\"str\", chunksize=100000)\r\n\r\n with open(word_indices_path, 'rb') as f:\r\n word_indices = pickle.load(f)\r\n print(\"Done.\")\r\n\r\n df = pd.DataFrame()\r\n\r\n words = set(word_indices.keys())\r\n\r\n total = 0\r\n in_glove = 0\r\n total_ubuntu = len(words)\r\n\r\n print(\"Iterating through chunks...\")\r\n done = 0\r\n # Iterate chunk by chunk\r\n for i in iterator:\r\n total += i.shape[0]\r\n unique_toks = set(i.index.values)\r\n in_glove += len(unique_toks.intersection(words))\r\n\r\n remain = unique_toks - words\r\n df = df.append(i.drop(remain, axis=0))\r\n done += 1\r\n print(\"Batch {} done\".format(done))\r\n print(\"Done.\")\r\n\r\n # Print compression percentage\r\n filtered = df.shape[0]\r\n print(\"Kept {0:.4f}% of the rows\".format((filtered/total) * 100))\r\n print(\"{0:.4f}% of tokens were in glove\".format(in_glove/total_ubuntu))\r\n\r\n df.to_csv(filtered_output, sep=\" \", header=False, index=True, quoting=csv.QUOTE_NONE)\r\n\r\n\r\ndef main():\r\n params = Params()\r\n indices_path = os.path.join(params.dump_folder, \"word_indices.pkl\")\r\n output_path = os.path.join(params.data_folder, \"glove_filtered.txt\")\r\n\r\n filter_glove(indices_path, output_path)\r\n\r\nif __name__ == \"__main__\":\r\n main()" ]
[ [ "pandas.DataFrame", "pandas.read_csv" ] ]
xihuaiwen/chinese_bert
[ "631afbc76c40b0ac033be2186e717885246f446c" ]
[ "code_examples/tensorflow/cosmoflow/models/cosmoflow.py" ]
[ "\"\"\"Configurable model specification for CosmoFlow\"\"\"\n\nimport tensorflow as tf\nimport tensorflow.keras.layers as layers\n\nfrom .layers import scale_1p2\n\n\ndef build_model(input_shape, target_size,\n conv_size=16, kernel_size=2, n_conv_layers=5,\n fc1_size=128, fc2_size=64,\n hidden_activation='LeakyReLU',\n pooling_type='MaxPool3D',\n dropout=0):\n \"\"\"Construct the CosmoFlow 3D CNN model\"\"\"\n\n conv_args = dict(kernel_size=kernel_size, padding='same')\n hidden_activation = getattr(layers, hidden_activation)\n pooling_type = getattr(layers, pooling_type)\n\n model = tf.keras.models.Sequential()\n\n # First convolutional layer\n model.add(layers.Conv3D(conv_size, input_shape=input_shape, **conv_args))\n model.add(hidden_activation())\n model.add(pooling_type(pool_size=2))\n\n # Additional conv layers\n for i in range(1, n_conv_layers):\n # Double conv channels at every layer\n model.add(layers.Conv3D(conv_size*2**i, **conv_args))\n model.add(hidden_activation())\n model.add(pooling_type(pool_size=2))\n model.add(layers.Flatten())\n\n # Fully-connected layers\n model.add(layers.Dense(fc1_size))\n model.add(hidden_activation())\n model.add(layers.Dropout(dropout))\n model.add(layers.Dense(fc2_size))\n model.add(hidden_activation())\n model.add(layers.Dropout(dropout))\n\n # Output layers\n model.add(layers.Dense(target_size, activation='tanh'))\n model.add(layers.Lambda(scale_1p2))\n\n return model\n" ]
[ [ "tensorflow.keras.layers.Lambda", "tensorflow.keras.layers.Flatten", "tensorflow.keras.layers.Conv3D", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Dropout", "tensorflow.keras.models.Sequential" ] ]
JIAQING-XIE/Google_NLP_DL
[ "45f45e8cbca695ad079af58790edd0619783b0c2" ]
[ "9.11/tor/model_lstm/lstm.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom config import *\nfrom torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence\n\nclass LSTM(nn.Module):\n def __init__(self, vocab_size, embedding_size, word_embedding_matrix):\n super(LSTM, self).__init__()\n\n # self.word_emb = nn.Embedding(vocab_size, embedding_size)\n # self.word_emb.weight.data.copy_(torch.from_numpy(word_embedding_matrix))\n # self.word_emb.weight.requires_grad = False\n word_embedding_matrix = torch.from_numpy(word_embedding_matrix)\n self.word_emb = nn.Embedding.from_pretrained(word_embedding_matrix)\n self.word_emb.weight.requires_grad = False\n\n self.lstm = nn.LSTM(input_size=embedding_size, hidden_size=lstm_hidding_dim, bidirectional=True,batch_first=True)\n self.dropout = nn.Dropout(p=dropout)\n\n self.classify_layer = nn.Linear(lstm_hidding_dim * 2, classfy_number)\n\n def forward(self, input_words_ids):\n input_words_ids_embedding= self.word_emb(input_words_ids)\n input_words_ids_embedding = self.dropout(input_words_ids_embedding)\n\n batch_size = input_words_ids_embedding.shape[0]\n\n hidden_state = torch.randn(1 * 2, batch_size,lstm_hidding_dim) # [num_layers(=1) * num_directions(=2), batch_size, n_hidden]\n cell_state = torch.randn(1 * 2, batch_size,lstm_hidding_dim) # [num_layers(=1) * num_directions(=2), batch_size, n_hidden]\n\n hidden_state = torch.nn.init.xavier_uniform_(hidden_state)\n cell_state = torch.nn.init.xavier_uniform_(cell_state)\n\n outputs, (_, _) = self.lstm(input_words_ids_embedding, (hidden_state, cell_state))\n\n scores = self.classify_layer(outputs)\n scores_softmax = F.softmax(scores,dim=-1)\n predict = torch.max(scores_softmax,dim=-1)[1]\n scores_log = F.log_softmax(scores,dim=-1)\n return scores_log,predict\n\n def adjust_learning_rate(self,lr, optimizer, epoch,):\n lr = lr / (1 + epoch * decay_rate)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n" ]
[ [ "torch.nn.Linear", "torch.nn.Embedding.from_pretrained", "torch.nn.Dropout", "torch.nn.LSTM", "torch.max", "torch.nn.init.xavier_uniform_", "torch.from_numpy", "torch.nn.functional.log_softmax", "torch.nn.functional.softmax", "torch.randn" ] ]
acumos/acumos-c-client
[ "717e97e10c04fead31cb116a1dd30342cde3b726" ]
[ "acumos_cpp/tests/test_protogen.py" ]
[ "# -*- coding: utf-8 -*-\n# ===============LICENSE_START=======================================================\n# Acumos Apache-2.0\n# ===================================================================================\n# Copyright (C) 2017-2018 AT&T Intellectual Property & Tech Mahindra. All rights reserved.\n# ===================================================================================\n# This Acumos software file is distributed by AT&T and Tech Mahindra\n# under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# This file is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ===============LICENSE_END=========================================================\n\"\"\"\nProvides modeling tests\n\"\"\"\nimport tempfile\n\nimport pytest\nimport numpy as np\nimport pandas as pd\n\nfrom acumos.modeling import Model, Enum, List, NamedTuple, create_dataframe\nfrom acumos.protogen import _types_equal, _require_unique, _nt2proto, model2proto, compile_protostr\nfrom acumos.exc import AcumosError\n\n\ndef test_type_equality():\n '''Tests that type equality function works as expected'''\n t1 = NamedTuple('T1', [('x', int), ('y', int)])\n t2 = NamedTuple('T1', [('x', int), ('y', float)])\n t3 = NamedTuple('T2', [('x', int), ('y', t1)])\n t4 = NamedTuple('T2', [('x', int), ('y', t2)])\n t5 = NamedTuple('T3', [('x', int), ('y', t1)])\n t6 = NamedTuple('T2', [('x', int), ('y', t1)])\n t7 = NamedTuple('T2', [('x', int), ('z', t1)])\n\n assert not _types_equal(t1, t2) # type differs\n assert not _types_equal(t3, t4) # type differs\n assert not _types_equal(t3, t5) # name differs\n assert not _types_equal(t3, t7) # field differs\n assert _types_equal(t3, t6)\n\n\ndef test_require_unique():\n '''Tests that unique types are tested for'''\n t1 = NamedTuple('T1', [('x', int), ('y', int)])\n t2 = NamedTuple('T1', [('x', int), ('y', float)])\n t3 = NamedTuple('T1', [('x', int), ('y', int)])\n\n with pytest.raises(AcumosError):\n _require_unique((t1, t2, t3)) # t2 is a different definition of T1\n\n uniq = _require_unique((t1, t3))\n assert len(uniq) == 1\n assert t1 in uniq or t3 in uniq\n\n\ndef test_nt2proto():\n '''Tests the generation of protobuf messages from NamedTuple'''\n Foo = NamedTuple('Foo', [('x', int), ('y', int)])\n Bar = NamedTuple('Bar', [('x', Foo)])\n\n _nt2proto(Foo, set())\n\n # dependence on Foo which has not been declared\n with pytest.raises(AcumosError):\n _nt2proto(Bar, set())\n\n _nt2proto(Bar, {Foo.__name__, })\n\n\ndef test_model2proto():\n '''Tests the generation of protobuf messages from a Model'''\n T1 = NamedTuple('T1', [('x', int), ('y', int)])\n T2 = NamedTuple('T2', [('data', int)])\n\n Thing = Enum('Thing', 'a b c d e')\n\n def f1(x: int, y: int) -> int:\n return x + y\n\n def f2(data: T1) -> T2:\n return T2(data.x + data.y)\n\n def f3(data: List[Thing]) -> Thing:\n return data[0]\n\n def f4(data: List[T1]) -> None:\n pass\n\n def f5(x: List[np.int32]) -> np.int32:\n return np.sum(x)\n\n df = pd.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})\n TestDataFrame = create_dataframe('TestDataFrame', df)\n\n def f6(in_: TestDataFrame) -> None:\n pass\n\n model = Model(f1=f1, f2=f2, f3=f3, f4=f4, f5=f5, f6=f6)\n module = 'model'\n package = 'pkg'\n protostr = model2proto(model, package)\n\n # main test is to make sure that compilation doesn't fail\n with tempfile.TemporaryDirectory() as tdir:\n compile_protostr(protostr, package, module, tdir)\n\n\nif __name__ == '__main__':\n '''Test area'''\n pytest.main([__file__, ])\n" ]
[ [ "pandas.DataFrame", "numpy.sum" ] ]
selonsy/MachineLearning
[ "4e1be16aeab6a312511206751e9c168963d31839", "4e1be16aeab6a312511206751e9c168963d31839" ]
[ "asimo/train/Config.py", "asimo/Z_DaSiamRPN/utils.py" ]
[ "\"\"\"\nConfiguration for training SiamFC and tracking evaluation\nWritten by Heng Fan\n\"\"\"\nimport numpy as np\n\nclass Config:\n def __init__(self):\n\n\n self.show_interval = 100 # 用于多久显示一次训练的信息\n self.anchor_scales = np.array([32, 64, 128, 256]) # np.array([8, ]) siameseRPN; siamFPN: 32, 64, 128, 256 ,512\n self.anchor_ratios = np.array([0.5, 1, 2]) # np.array([0.33, 0.5, 1, 2, 3])\n self.anchor_num = len(self.anchor_scales) * len(self.anchor_ratios)\n self.num_pos = 16 # 正样本的数量\n self.num_neg = 48 # 负样本的数量\n self.lamb = 100 # cls和reg的调节比率 \n self.log_dir = 'models//logs'\n # anchor_scales = (32, 64, 128, 256 ,512)\n # context_amount = 0.5 # context amount for the exemplar\n # ratios = [0.5, 1, 2] \n\n # parameters for training\n self.pos_pair_range = 100\n self.num_pairs = 53200 #5.32e4 # z&x 的图片对数\n self.val_ratio = 0.1\n self.num_epoch = 1 # 训练轮次,暂定为1,原始为50(一轮是指将所有的训练数据跑一遍)\n self.batch_size = 8\n self.examplar_size = 127\n # self.instance_size = 255\n self.instance_size = 271\n self.sub_mean = 0\n self.train_num_workers = 4 # 12 # number of threads to load data when training\n self.val_num_workers = 4 # 8\n self.stride = 8\n self.rPos = 16\n self.rNeg = 0\n self.label_weight_method = \"balanced\"\n\n self.lr = 1e-2 # learning rate of SGD\n self.momentum = 0.9 # momentum of SGD\n self.weight_decay = 5e-4 # weight decay of optimizator\n self.step_size = 1 # step size of LR_Schedular\n self.gamma = 0.8685 # decay rate of LR_Schedular\n\n # parameters for tracking (SiamFC-3s by default)\n self.num_scale = 3\n self.scale_step = 1.0375\n self.scale_penalty = 0.9745\n self.scale_LR = 0.59\n self.response_UP = 16\n self.windowing = \"cosine\"\n self.w_influence = 0.176\n\n self.video = \"Lemming\"\n self.visualization = 1\n self.bbox_output = True\n self.bbox_output_path = \"./tracking_result/\"\n\n self.context_amount = 0.5\n self.scale_min = 0.2\n self.scale_max = 5\n self.score_size = int((self.instance_size - self.examplar_size) / 8 + 1) # 255/127=17,271/127=19\n\n # path to your trained model\n # self.net_base_path = \"/home/hfan/Desktop/PyTorch-SiamFC/Train/model/\"\n self.net_base_path = r\"D:\\workspace\\vot\\asimo\\SiamFPN\\SiamFC-PyTorch-master\\Train\\model\"\n # path to your sequences (sequence should be in OTB format)\n # self.seq_base_path = \"/home/hfan/Desktop/demo-sequences/\"\n self.seq_base_path = r\"D:\\workspace\\vot\\tracker_benchmark\\OTB\"\n # which model to use\n self.net = \"SiamFC_50_model.pth\"", "# --------------------------------------------------------\n# DaSiamRPN\n# Licensed under The MIT License\n# Written by Qiang Wang (wangqiang2015 at ia.ac.cn)\n# --------------------------------------------------------\nimport cv2\nimport torch\nimport numpy as np\n\n\ndef to_numpy(tensor):\n if torch.is_tensor(tensor):\n return tensor.cpu().numpy()\n elif type(tensor).__module__ != 'numpy':\n raise ValueError(\"Cannot convert {} to numpy array\"\n .format(type(tensor)))\n return tensor\n\n\ndef to_torch(ndarray):\n if type(ndarray).__module__ == 'numpy':\n return torch.from_numpy(ndarray)\n elif not torch.is_tensor(ndarray):\n raise ValueError(\"Cannot convert {} to torch tensor\"\n .format(type(ndarray)))\n return ndarray\n\n\ndef im_to_numpy(img):\n img = to_numpy(img)\n img = np.transpose(img, (1, 2, 0)) # H*W*C\n return img\n\n\ndef im_to_torch(img):\n img = np.transpose(img, (2, 0, 1)) # C*H*W\n img = to_torch(img).float()\n return img\n\n\ndef torch_to_img(img):\n img = to_numpy(torch.squeeze(img, 0))\n img = np.transpose(img, (1, 2, 0)) # H*W*C\n return img\n\n\ndef get_subwindow_tracking(im, pos, model_sz, original_sz, avg_chans, out_mode='torch', new=False):\n \"\"\"\n 获取图像及目标信息\n :param im: \n :param pos: \n :param model_sz: \n :param original_sz: \n :param avg_chans: \n :param out_mode='torch': \n :param new=False: \n \"\"\"\n if isinstance(pos, float):\n pos = [pos, pos]\n sz = original_sz\n im_sz = im.shape\n c = (original_sz+1) / 2\n context_xmin = round(pos[0] - c) # floor(pos(2) - sz(2) / 2);\n context_xmax = context_xmin + sz - 1\n context_ymin = round(pos[1] - c) # floor(pos(1) - sz(1) / 2);\n context_ymax = context_ymin + sz - 1\n left_pad = int(max(0., -context_xmin))\n top_pad = int(max(0., -context_ymin))\n right_pad = int(max(0., context_xmax - im_sz[1] + 1))\n bottom_pad = int(max(0., context_ymax - im_sz[0] + 1))\n\n # 获得背景坐标(考虑到可能出界了,所以加上pad的值,完成截取)\n context_xmin = context_xmin + left_pad\n context_xmax = context_xmax + left_pad\n context_ymin = context_ymin + top_pad\n context_ymax = context_ymax + top_pad\n\n # zzp: a more easy speed version\n # 如果需要填充,首先初始化te_im,再进行对应位置赋值,最后赋给im_patch_original\n r, c, k = im.shape\n if any([top_pad, bottom_pad, left_pad, right_pad]):#如果四边有不在图像im中的,则直接利用im的均值进行填充\n te_im = np.zeros((r + top_pad + bottom_pad, c + left_pad + right_pad, k), np.uint8) # 0 is better than 1 initialization\n te_im[top_pad:top_pad + r, left_pad:left_pad + c, :] = im\n if top_pad:\n te_im[0:top_pad, left_pad:left_pad + c, :] = avg_chans\n if bottom_pad:\n te_im[r + top_pad:, left_pad:left_pad + c, :] = avg_chans\n if left_pad:\n te_im[:, 0:left_pad, :] = avg_chans\n if right_pad:\n te_im[:, c + left_pad:, :] = avg_chans\n im_patch_original = te_im[int(context_ymin):int(context_ymax + 1), int(context_xmin):int(context_xmax + 1), :]\n else: #如果以pos为中心,original_sz为边长的正方形在图像im中,则直接进行截取\n im_patch_original = im[int(context_ymin):int(context_ymax + 1), int(context_xmin):int(context_xmax + 1), :]\n\n # 如果原始图像块大小与模型输入不同则调用 OpenCV 函数\n if not np.array_equal(model_sz, original_sz):\n im_patch = cv2.resize(im_patch_original, (model_sz, model_sz)) # zzp: use cv to get a better speed\n else:\n im_patch = im_patch_original\n\n return im_to_torch(im_patch) if out_mode in 'torch' else im_patch\n\n\ndef cxy_wh_2_rect(pos, sz):\n return np.array([pos[0]-sz[0]/2, pos[1]-sz[1]/2, sz[0], sz[1]]) # 0-index\n\n\ndef rect_2_cxy_wh(rect):\n return np.array([rect[0]+rect[2]/2, rect[1]+rect[3]/2]), np.array([rect[2], rect[3]]) # 0-index\n\n\ndef get_axis_aligned_bbox(region):\n try:\n region = np.array([region[0][0][0], region[0][0][1], region[0][1][0], region[0][1][1],\n region[0][2][0], region[0][2][1], region[0][3][0], region[0][3][1]])\n except:\n region = np.array(region)\n cx = np.mean(region[0::2]) # region[0::2]:从下标为0的地方开始,到结束,步长为2 np.mean:求平均值\n cy = np.mean(region[1::2])\n x1 = min(region[0::2])\n x2 = max(region[0::2])\n y1 = min(region[1::2])\n y2 = max(region[1::2])\n # linalg=linear(线性)+algebra(代数)\n # 求范数,默认为L_2范数,即所有值的平方和再开方\n # A1=bbox的实际面积\n A1 = np.linalg.norm(region[0:2] - region[2:4]) * np.linalg.norm(region[2:4] - region[4:6]) #region[0:2]:表示下标从0开始到2截止,不包括2\n A2 = (x2 - x1) * (y2 - y1) # A2:为完全框住region的矩形面积\n s = np.sqrt(A1 / A2)\n w = s * (x2 - x1) + 1\n h = s * (y2 - y1) + 1\n return cx, cy, w, h # region中心点的坐标以及宽高" ]
[ [ "numpy.array" ], [ "numpy.array", "numpy.linalg.norm", "numpy.array_equal", "numpy.zeros", "torch.is_tensor", "numpy.mean", "torch.from_numpy", "torch.squeeze", "numpy.transpose", "numpy.sqrt" ] ]
DiegoOrtegoP/Software
[ "4a07dd2dab29db910ca2e26848fa6b53b7ab00cd", "4a07dd2dab29db910ca2e26848fa6b53b7ab00cd" ]
[ "catkin_ws/src/f23-LED/led_detection/include/led_detection/LEDDetector_forloops.py", "catkin_ws/src/ros_cap/src/line_detector.py" ]
[ "from api import LEDDetector\nfrom duckietown_msgs.msg import Vector2D, LEDDetection, LEDDetectionArray\nfrom led_detection import logger\nfrom math import floor, ceil\nimport numpy as np\nimport rospy\nimport time\n\n# plotting \nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom matplotlib.patches import Rectangle\n\n# image filters\nfrom scipy.ndimage.filters import maximum_filter\nfrom scipy.ndimage.morphology import generate_binary_structure, binary_erosion\n\n# fft\nimport scipy.fftpack\n\n__all__ = ['LEDDetector']\n\nclass LEDDetector_forloops():\n \"\"\" The LEDDetector class \"\"\"\n\n def __init__(self, ploteverything_=False, verbose_=False, plotfinal_=False):\n self.ploteverything = ploteverything_\n self.verbose = verbose_\n self.plotfinal = plotfinal_\n pass\n\n # ~~~~~~~~~~~~~~~~~~~ Downsample ~~~~~~~~~~~~~~~~~~~~~~~~\n\n def downsample(self, channel, cell_width=20, cell_height=20):\n W = channel.shape[2]\n H = channel.shape[1]\n cell_intensity_data = []\n for x in range(0, W, cell_width):\n for y in range(0, H, cell_height):\n #logger.info('Looking at partition %s %s' % (x, y))\n # look at same partition across multiple images\n average_intensities = []\n for image in channel[:15]: # TODO hardcoded value\n intensities = []\n # loop through pixels in each partition\n for x_coord in range(x, x+cell_width):\n for y_coord in range(y,y+cell_height):\n if x_coord >= W or y_coord >= H:\n continue\n pixel = image[y_coord][x_coord]\n # a simple approach from http://stackoverflow.com/questions/596216/formula-to-determine-brightness-of-rgb-color\n # could also use matplotlib's rgb_to_hsv?\n # TODO: check if pixel order is RGB or BGR (assuming RGB)\n #intensity = 0.299*pixel[0] + 0.587*pixel[1] + 0.114*pixel[2]\n #intensity = pixel[0] # red channel\n #intensity = pixel[1] # green channel\n intensity = pixel # blue channel\n intensities.append(intensity)\n\n\n average_intensities.append(np.amax(intensities))\n cell_intensity_data.append(average_intensities)\n out = np.array(cell_intensity_data)\n print(1.0*W/cell_width)\n print(1.0*H/cell_height)\n print(1.0*W/cell_width*1.0*H/cell_height)\n # HACK1 \n out = np.reshape(out, [ceil(1.0*W/cell_width), ceil(1.0*H/cell_height), out.shape[1]]) # TODO change the code above later\n #HACK2\n out = np.swapaxes(out, 0,2)\n print(out.shape)\n #plt.imshow(out[:,:,0])\n return (out, [0,0])\n\n # ~~~~~~~~~~~~~~~~~~~ Find local maxima ~~~~~~~~~~~~~~~~~~~~\n\n def detect_peaks(self, image):\n \"\"\"\n Takes an image and detect the peaks usingthe local maximum filter.\n Returns a boolean mask of the peaks (i.e. 1 when\n the pixel's value is the neighborhood maximum, 0 otherwise)\n \"\"\"\n neighborhood = generate_binary_structure(2,2)\n local_max = maximum_filter(image, 5)==image\n background = (image==0)\n eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)\n detected_peaks = local_max - eroded_background\n return detected_peaks\n\n # ~~~~~~~~~~~~~~~~~~~ Select candidates ~~~~~~~~~~~~~~~~~~~~\n\n def get_candidate_cells(self, cell_values, threshold):\n variance = np.var(cell_values, axis=0)\n peaks_mask = self.detect_peaks(variance)\n threshold_mask = variance>threshold\n if(self.ploteverything):\n plt.figure()\n plt.imshow(variance,cmap=cm.gray, interpolation=\"nearest\")\n plt.title('Variance map')\n plt.figure()\n plt.imshow(peaks_mask*threshold_mask)\n plt.title('Peaks')\n plt.show()\n \n return peaks_mask*threshold_mask\n\n # ~~~~~~~~~~~~~~~~~~~ Detect LEDs ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n def detect_led(self,\n images,\n mask,\n frequencies_to_detect,\n min_distance_between_LEDs_pixels):\n\n assert len(images.shape) == 1\n n = images.shape[0]\n if n == 0:\n raise ValueError('No images provided')\n\n timestamps = images['timestamp']\n rgb = images['rgb']\n\n rgb0 = rgb[0]\n if not mask.shape == rgb0.shape:\n raise ValueError('Invalid mask')\n\n if not isinstance(frequencies_to_detect, list):\n raise ValueError(frequencies_to_detect)\n\n if not min_distance_between_LEDs_pixels > 0:\n raise ValueError(min_distance_between_LEDs_pixels)\n\n channel = images['rgb'][:,:,:,0] # just using first channel\n\n cell_width = 15\n cell_height = 15\n\n (cell_vals, crop_offset) = self.downsample(channel, cell_width, cell_height)\n\n candidates_mask = self.get_candidate_cells(cell_vals, 100)\n candidate_cells = [(i,j) for (i,j) in np.ndindex(candidates_mask.shape) if candidates_mask[i,j]]\n\n # Create result object\n result = LEDDetectionArray()\n\n # Detect frequencies and discard non-periodic signals\n # ts_tolerance = 0.2 # unnecessary\n f_tolerance = 0.25\n min_num_periods = 3\n\n for (i,j) in candidate_cells:\n signal = cell_vals[:,i,j]\n signal = signal-np.mean(signal)\n\n zero_crossings = np.where(np.diff(np.sign(signal)))[0]\n zero_crossings_t = timestamps[zero_crossings]\n led_img_coords = Vector2D((0.5+j)*cell_width+crop_offset[1], (0.5+i)*cell_height+crop_offset[0]) \n diffs = [b-a for a, b in zip(zero_crossings_t, zero_crossings_t[1:])]\n \n if(self.verbose):\n logger.info('Coords: %s, %s'% (led_img_coords.x,led_img_coords.y))\n logger.info('Zero crossings: %s'%zero_crossings_t)\n logger.info('Diffs: %s'%diffs)\n logger.info('Zero-crossing measured freq %s'% (0.5/np.mean(diffs)))\n\n if(len(zero_crossings)<min_num_periods):\n if(self.verbose):\n logger.info('Not an LED, discarded\\n')\n continue\n\n # Frequency estimation based on zero crossings - quite bad\n #for f in frequencies_to_detect:\n # if(all(d-ts_tolerance <= 0.5/f <= d+ts_tolerance for d in diffs)):\n # if(self.verbose):\n # logger.info('Confirmed LED with frequency %s\\n'%f)\n # recover coordinates of centroid\n # result.detections.append(LEDDetection(timestamps[0], timestamps[-1],\n # led_img_coords, f, '', -1)) # -1...confidence not implemented\n # break\n\n # Frequency estimation based on FFT\n T = 1.0/30 # TODO expecting 30 fps, but RESAMPLE to be sure\n f = np.linspace(0.0, 1.0/(2.0*T), n/2)\n signal_f = scipy.fftpack.fft(signal)\n y_f = 2.0/n * np.abs(signal_f[:n/2])\n fft_peak_freq = 1.0*np.argmax(y_f)/T/n\n if(self.verbose):\n logger.info('FFT peak frequency: %s'% fft_peak_freq)\n\n # Bin frequency into the ones to detect\n freq = [x for x in frequencies_to_detect if abs(x-fft_peak_freq)<f_tolerance]\n if(freq):\n result.detections.append(LEDDetection(rospy.Time.from_sec(timestamps[0]),\n rospy.Time.from_sec(timestamps[-1]), led_img_coords, freq[0], '', -1)) # -1...confidence not implemented\n if(self.verbose):\n logger.info('LED confirmed, frequency: %s'% freq)\n else:\n logger.info('Could not associate frequency, discarding')\n\n print(signal.shape)\n print(timestamps[:15].shape)\n # Plot all signals and FFTs\n if(self.ploteverything):\n fig, ax1 = plt.subplots()\n ax1.plot(timestamps[:15], signal)\n fig, ax2 = plt.subplots()\n ax2.plot(f[:15],y_f)\n plt.show()\n\n plt.imshow(rgb0)\n ax = plt.gca()\n\n font = {'family': 'serif',\n 'color': 'red',\n 'weight': 'bold',\n 'size': 16,\n }\n\n # Plot all results\n if(self.plotfinal):\n for r in result.detections:\n pos = r.pixels_normalized\n ax.add_patch(Rectangle((pos.x-0.5*cell_width, pos.y-0.5*cell_height), cell_width, cell_height, edgecolor=\"red\", linewidth=3, facecolor=\"none\"))\n plt.text(pos.x-0.5*cell_width, pos.y-cell_height, str(r.frequency), fontdict=font)\n\n plt.show()\n\n return result\n\n\n", "#!/usr/bin/env python\n\nimport math\nimport rospy\nimport cv2\n\nfrom sensor_msgs.msg import Image\nfrom geometry_msgs.msg import Point\nfrom std_srvs.srv import Empty, EmptyResponse\n\nfrom cv_bridge import CvBridge, CvBridgeError\n\nimport numpy as np\n\n# define range of blue color in HSV\nlower_blue = np.array([110,50,50])\nupper_blue = np.array([130,255,255])\nlower_red = np.array([0,0,0])\nupper_red = np.array([0,0,0])\nlower_yellow = np.array([20,150,130])\nupper_yellow = np.array([35,255,255])\nlower_white=np.array([180,0,200])\nupper_white=np.array([180,90,255])\n\n\n\nclass BlobColor():\n\n def __init__(self):\n\n\n #Subscribirce al topico \"/duckiebot/camera_node/image/raw\" para recibir imagen\n self.image_subscriber = rospy.Subscriber(\"/duckiebot/camera_node/image/raw\",Image,self._process_image) \n\n #Clase necesaria para transformar el tipo de imagen\n self.bridge = CvBridge()\n\n #Ultima imagen adquirida\n self.cv_image = Image()\n\n #Area minima\n self.min_area = 30\n\n #Publicar a los topicos \"\n self.pub = rospy.Publisher(\"/duckiebot/patofiltrado\",Image,queue_size=1)\n self.publito = rospy.Publisher(\"/duckiebot/punto\",Point,queue_size=1)\n print(\"explotando en 3, 2, 1...\") \n\n def _process_image(self,img):\n\n #Se cambiar mensage tipo ros a imagen opencv\n try:\n self.cv_image = self.bridge.imgmsg_to_cv2(img, \"bgr8\")\n except CvBridgeError as e:\n print(e)\n\n #Se deja en frame la imagen actual\n frame = self.cv_image\n\n #Cambiar tipo de color de BGR a HSV\n color_space = cv2.COLOR_BGR2HSV\n image_out = cv2.cvtColor(frame, color_space)\n\n # Filtrar colores de la imagen en el rango utilizando \n mask = cv2.inRange(image_out, lower_white, upper_white)\n\n # Bitwise-AND mask and original image\n #segment_image = cv2.bitwise_and(frame,frame, mask= mask)\n #imga= self.bridge.cv2_to_imgmsg(segment_image, \"bgr8\")\n #self.pub.publish(imga)\n\n kernel = np.ones((5,5),np.uint8)\n\n #Operacion morfologica erode\n img_out = cv2.erode(mask, kernel, iterations = 1)\n \n #Operacion morfologica dilate\n img_out = cv2.dilate(img_out, kernel, iterations = 1)\n\n \n\n image, contours, hierarchy = cv2.findContours(img_out,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n x1=0\n x2=0\n y1=0\n y2=0\n for cnt in contours:\n #Obtener rectangulo\n x,y,w,h = cv2.boundingRect(cnt)\n\n #Filtrar por area minima\n if w*h > self.min_area:\n\n\n #Dibujar un rectangulo en la imagen\n x1=x\n y1=y\n x2=x+w\n y2=y+h\n frame=cv2.rectangle(frame, (x1,y1), (x2,y2), (80,20,77), 2)\n #Publicar Point center de mayor tamanio\n puntillo=Point()\n puntillo.x=((x1+x2)/2)\n puntillo.y=((y1+y2)/2)\n #Foco respecto a X fx truncado\n puntillo.z=(310.089*3.5/w)\n #foco respecto a Y fy truncado\n #puntillo.z=(309.71*3.5/sqrt(y1^2+y2^2))\n self.publito.publish(puntillo)\n \n #Publicar frame\n #imagesita=cv2.cvtColor(rectangle,cv2.COLOR_GRAY2BGR)\n imgb= self.bridge.cv2_to_imgmsg(frame, \"bgr8\")\n self.pub.publish(imgb)\n \ndef main():\n\n rospy.init_node('BlobColor')\n\n BlobColor()\n\n rospy.spin()\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.mean", "numpy.sign", "matplotlib.patches.Rectangle", "numpy.ndindex", "matplotlib.pyplot.subplots", "numpy.swapaxes", "numpy.argmax", "matplotlib.pyplot.gca", "numpy.array", "matplotlib.pyplot.title", "matplotlib.pyplot.figure", "scipy.ndimage.filters.maximum_filter", "numpy.amax", "matplotlib.pyplot.show", "scipy.ndimage.morphology.binary_erosion", "scipy.ndimage.morphology.generate_binary_structure", "numpy.abs", "numpy.linspace", "numpy.var", "matplotlib.pyplot.imshow" ], [ "numpy.array", "numpy.ones" ] ]
solapark/frcnn_keras_original
[ "3561d1de18f41868efc9cec927761613d75a5dc3" ]
[ "utils.py" ]
[ "import cv2\nimport numpy as np\n\ndef get_concat_img(img_list, cols=3):\n rows = int(len(img_list)/cols)\n hor_imgs = [np.hstack(img_list[i*cols:(i+1)*cols]) for i in range(rows)]\n ver_imgs = np.vstack(hor_imgs)\n return ver_imgs\n\ndef draw_box(image, box, color = (0, 255, 0)):\n x1, y1, x2, y2 = box\n image = cv2.rectangle(image, (x1, y1), (x2, y2), color, 2)\n return image \n\ndef intersection(ai, bi):\n x = max(ai[0], bi[0])\n y = max(ai[1], bi[1])\n w = min(ai[2], bi[2]) - x\n h = min(ai[3], bi[3]) - y\n if w < 0 or h < 0:\n return 0\n return w*h\n\ndef iou(a, b):\n # a and b should be (x1,y1,x2,y2)\n if a[0] >= a[2] or a[1] >= a[3] or b[0] >= b[2] or b[1] >= b[3]:\n return 0.0\n\n area_i = intersection(a, b)\n area_u = union(a, b, area_i)\n\n return float(area_i) / float(area_u + 1e-6)\n\ndef union(au, bu, area_intersection):\n area_a = (au[2] - au[0]) * (au[3] - au[1])\n area_b = (bu[2] - bu[0]) * (bu[3] - bu[1])\n area_union = area_a + area_b - area_intersection\n return area_union\n\ndef get_file_list_from_dir(dir_path, exp='', is_full_path = True, file_form = ''):\n #use : get_file_list_from_dir(\"/home/sap\", \"png\")\n file_form = dir_path + '/*' \n if(exp):\n file_form += '.'+exp \n file_list = glob.glob(file_form)\n if not is_full_path :\n new_file_list = [get_name_from_path(cur_file) for cur_file in file_list]\n file_list = new_file_list\n return file_list\n\ndef calc_emb_dist(embs1, embs2) : \n '''\n calc emb dist for last axis\n Args :\n embs1 and embs2 have same shape (ex : (2, 3, 4))\n Return :\n dist for last axis (ex : (2, 3))\n '''\n return np.sqrt(np.sum(np.square(embs1 - embs2), -1)) \n\ndef get_min_emb_dist_idx(emb, embs, thresh = np.zeros(0), is_want_dist = 0): \n '''\n Args :\n emb (shape : m, n)\n embs (shape : m, k, n)\n thresh_dist : lower thersh. throw away too small dist (shape : m, )\n Return :\n min_dist_idx (shape : m, 1)\n '''\n emb_ref = emb[:, np.newaxis, :]\n dist = calc_emb_dist(emb_ref, embs) #(m, k)\n\n if(thresh.size) : \n thresh = thresh[:, np.newaxis] #(m, 1)\n dist[dist<=thresh] = np.inf \n min_dist_idx = np.argmin(dist, 1) #(m, )\n if(is_want_dist):\n min_dist = dist[np.arange(len(dist)), min_dist_idx]\n return min_dist_idx, min_dist\n return min_dist_idx\n\ndef get_new_img_size(width, height, img_min_side=300):\n if width <= height:\n f = float(img_min_side) / width\n resized_height = int(f * height)\n resized_width = img_min_side\n else:\n f = float(img_min_side) / height\n resized_width = int(f * width)\n resized_height = img_min_side\n return resized_width, resized_height\n\n\n" ]
[ [ "numpy.square", "numpy.zeros", "numpy.argmin", "numpy.hstack", "numpy.vstack" ] ]
qxzcode/aoc_2019
[ "5a6ae570d4ec62a1e05456b58562cb05d1c10f71" ]
[ "08/second.py" ]
[ "import sys # argv\nimport numpy as np\n\n\n# load the input file\nwith open(sys.argv[1]) as f:\n arr = np.array([int(d) for d in f.read().strip()])\n\nwidth = int(sys.argv[2])\nheight = int(sys.argv[3])\narr = arr.reshape(-1, height, width)\n\nfirst_non2 = np.vectorize(lambda arr: arr[np.where(arr != 2)[0][0]], signature='(n)->()')\nimage = np.apply_along_axis(first_non2, 0, arr)\nfor row in image:\n print(''.join([' ','##'][v] for v in row))\n" ]
[ [ "numpy.where", "numpy.apply_along_axis" ] ]
mathcbc/nn_robust_attacks
[ "5c80091dcf2b80d6d22af8e5e1b103218c36e889" ]
[ "setup_inception.py" ]
[ "## Modified by Nicholas Carlini to match model structure for attack code.\n## Original copyright license follows.\n\n\n# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Simple image classification with Inception.\n\nRun image classification with Inception trained on ImageNet 2012 Challenge data\nset.\n\nThis program creates a graph from a saved GraphDef protocol buffer,\nand runs inference on an input JPEG image. It outputs human readable\nstrings of the top 5 predictions along with their probabilities.\n\nChange the --image_file argument to any jpg image to compute a\nclassification of that image.\n\nPlease see the tutorial and website for a detailed description of how\nto use this script to perform image recognition.\n\nhttps://tensorflow.org/tutorials/image_recognition/\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os.path\nimport re\nimport sys\nimport tarfile\nimport scipy.misc\n\nimport numpy as np\nfrom six.moves import urllib\nimport tensorflow as tf\n\nFLAGS = tf.app.flags.FLAGS\n\n# classify_image_graph_def.pb:\n# Binary representation of the GraphDef protocol buffer.\n# imagenet_synset_to_human_label_map.txt:\n# Map from synset ID to a human readable string.\n# imagenet_2012_challenge_label_map_proto.pbtxt:\n# Text representation of a protocol buffer mapping a label to synset ID.\ntf.app.flags.DEFINE_string(\n 'model_dir', 'tmp/imagenet',\n \"\"\"Path to classify_image_graph_def.pb, \"\"\"\n \"\"\"imagenet_synset_to_human_label_map.txt, and \"\"\"\n \"\"\"imagenet_2012_challenge_label_map_proto.pbtxt.\"\"\")\ntf.app.flags.DEFINE_string('image_file', '',\n \"\"\"Absolute path to image file.\"\"\")\ntf.app.flags.DEFINE_integer('num_top_predictions', 5,\n \"\"\"Display this many predictions.\"\"\")\n\n# pylint: disable=line-too-long\nDATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'\n# pylint: enable=line-too-long\n\n\nclass NodeLookup(object):\n \"\"\"Converts integer node ID's to human readable labels.\"\"\"\n\n def __init__(self,\n label_lookup_path=None,\n uid_lookup_path=None):\n if not label_lookup_path:\n label_lookup_path = os.path.join(\n FLAGS.model_dir, 'imagenet_2012_challenge_label_map_proto.pbtxt')\n if not uid_lookup_path:\n uid_lookup_path = os.path.join(\n FLAGS.model_dir, 'imagenet_synset_to_human_label_map.txt')\n self.node_lookup = self.load(label_lookup_path, uid_lookup_path)\n\n def load(self, label_lookup_path, uid_lookup_path):\n \"\"\"Loads a human readable English name for each softmax node.\n\n Args:\n label_lookup_path: string UID to integer node ID.\n uid_lookup_path: string UID to human-readable string.\n\n Returns:\n dict from integer node ID to human-readable string.\n \"\"\"\n if not tf.gfile.Exists(uid_lookup_path):\n tf.logging.fatal('File does not exist %s', uid_lookup_path)\n if not tf.gfile.Exists(label_lookup_path):\n tf.logging.fatal('File does not exist %s', label_lookup_path)\n\n # Loads mapping from string UID to human-readable string\n proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()\n uid_to_human = {}\n p = re.compile(r'[n\\d]*[ \\S,]*')\n for line in proto_as_ascii_lines:\n parsed_items = p.findall(line)\n uid = parsed_items[0]\n human_string = parsed_items[2]\n uid_to_human[uid] = human_string\n\n # Loads mapping from string UID to integer node ID.\n node_id_to_uid = {}\n proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()\n for line in proto_as_ascii:\n if line.startswith(' target_class:'):\n target_class = int(line.split(': ')[1])\n if line.startswith(' target_class_string:'):\n target_class_string = line.split(': ')[1]\n node_id_to_uid[target_class] = target_class_string[1:-2]\n\n # Loads the final mapping of integer node ID to human-readable string\n node_id_to_name = {}\n for key, val in node_id_to_uid.items():\n if val not in uid_to_human:\n tf.logging.fatal('Failed to locate: %s', val)\n name = uid_to_human[val]\n node_id_to_name[key] = name\n\n return node_id_to_name\n\n def id_to_string(self, node_id):\n if node_id not in self.node_lookup:\n return ''\n return self.node_lookup[node_id]\n\n\ndef create_graph():\n \"\"\"Creates a graph from saved GraphDef file and returns a saver.\"\"\"\n # Creates graph from saved graph_def.pb.\n with tf.gfile.FastGFile(os.path.join(\n FLAGS.model_dir, 'classify_image_graph_def.pb'), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n #for line in repr(graph_def).split(\"\\n\"):\n # if \"tensor_content\" not in line:\n # print(line)\n _ = tf.import_graph_def(graph_def, name='')\n\n\ndef run_inference_on_image(image):\n \"\"\"Runs inference on an image.\n\n Args:\n image: Image file name.\n\n Returns:\n Nothing\n \"\"\"\n if not tf.gfile.Exists(image):\n tf.logging.fatal('File does not exist %s', image)\n image_data = tf.gfile.FastGFile(image, 'rb').read()\n\n # Creates graph from saved GraphDef.\n create_graph()\n\n with tf.Session() as sess:\n # Some useful tensors:\n # 'softmax:0': A tensor containing the normalized prediction across\n # 1000 labels.\n # 'pool_3:0': A tensor containing the next-to-last layer containing 2048\n # float description of the image.\n # 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG\n # encoding of the image.\n # Runs the softmax tensor by feeding the image_data as input to the graph.\n #softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')\n img = tf.placeholder(tf.uint8, (299,299,3))\n softmax_tensor = tf.import_graph_def(\n sess.graph.as_graph_def(),\n input_map={'DecodeJpeg:0': tf.reshape(img,((299,299,3)))},\n return_elements=['softmax/logits:0'])\n\n dat = scipy.misc.imresize(scipy.misc.imread(image),(299,299))\n predictions = sess.run(softmax_tensor,\n {img: dat})\n\n predictions = np.squeeze(predictions)\n\n # Creates node ID --> English string lookup.\n node_lookup = NodeLookup()\n\n top_k = predictions.argsort()#[-FLAGS.num_top_predictions:][::-1]\n for node_id in top_k:\n print('id',node_id)\n human_string = node_lookup.id_to_string(node_id)\n score = predictions[node_id]\n print('%s (score = %.5f)' % (human_string, score))\n\nCREATED_GRAPH = False\nclass InceptionModel:\n image_size = 299\n num_labels = 1008\n num_channels = 3\n def __init__(self, sess):\n global CREATED_GRAPH\n self.sess = sess\n if not CREATED_GRAPH:\n create_graph()\n CREATED_GRAPH = True\n\n def predict(self, img):\n scaled = (0.5+tf.reshape(img,((299,299,3))))*255\n softmax_tensor = tf.import_graph_def(\n self.sess.graph.as_graph_def(),\n input_map={'Cast:0': scaled},\n return_elements=['softmax/logits:0'])\n return softmax_tensor[0]\n \n\ndef maybe_download_and_extract():\n \"\"\"Download and extract model tar file.\"\"\"\n dest_directory = FLAGS.model_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)\n\n\ndef main(_):\n maybe_download_and_extract()\n image = (FLAGS.image_file if FLAGS.image_file else\n os.path.join(FLAGS.model_dir, 'cropped_panda.jpg'))\n run_inference_on_image(image)\n\n\ndef readimg(ff):\n f = \"../imagenetdata/imgs/\"+ff\n img = np.array(scipy.misc.imresize(scipy.misc.imread(f),(299,299)),dtype=np.float32)/255-.5\n if img.shape != (299, 299, 3):\n return None\n return [img, int(ff.split(\".\")[0])]\n\nclass ImageNet:\n def __init__(self):\n from multiprocessing import Pool\n pool = Pool(8)\n r = pool.map(readimg, os.listdir(\"../imagenetdata/imgs/\")[:200])\n r = [x for x in r if x != None]\n test_data, test_labels = zip(*r)\n self.test_data = np.array(test_data)\n self.test_labels = np.zeros((len(test_labels), 1008))\n self.test_labels[np.arange(len(test_labels)), test_labels] = 1\n\n \n\n\nif __name__ == '__main__':\n tf.app.run()\n" ]
[ [ "numpy.array", "tensorflow.app.flags.DEFINE_integer", "tensorflow.app.flags.DEFINE_string", "tensorflow.GraphDef", "tensorflow.gfile.Exists", "tensorflow.import_graph_def", "tensorflow.Session", "tensorflow.reshape", "tensorflow.gfile.GFile", "tensorflow.placeholder", "tensorflow.gfile.FastGFile", "tensorflow.logging.fatal", "tensorflow.app.run", "numpy.squeeze" ] ]
odidev/cmdstanpy
[ "49f00baff21bfd11541b3c98a5f2fb36e6b7d9ce" ]
[ "cmdstanpy/cmdstan_args.py" ]
[ "\"\"\"\nCmdStan arguments\n\"\"\"\nimport logging\nimport os\nfrom enum import Enum, auto\nfrom numbers import Integral, Real\nfrom time import time\nfrom typing import List, Union\n\nfrom numpy.random import RandomState\n\nfrom cmdstanpy.utils import (\n cmdstan_path,\n cmdstan_version_at,\n get_logger,\n read_metric,\n)\n\n\nclass Method(Enum):\n \"\"\"Supported CmdStan method names.\"\"\"\n\n SAMPLE = auto()\n OPTIMIZE = auto()\n GENERATE_QUANTITIES = auto()\n VARIATIONAL = auto()\n\n def __repr__(self):\n return '<%s.%s>' % (self.__class__.__name__, self.name)\n\n\nclass SamplerArgs:\n \"\"\"Arguments for the NUTS adaptive sampler.\"\"\"\n\n def __init__(\n self,\n iter_warmup: int = None,\n iter_sampling: int = None,\n save_warmup: bool = False,\n thin: int = None,\n max_treedepth: int = None,\n metric: Union[str, List[str]] = None,\n step_size: Union[float, List[float]] = None,\n adapt_engaged: bool = True,\n adapt_delta: float = None,\n adapt_init_phase: int = None,\n adapt_metric_window: int = None,\n adapt_step_size: int = None,\n fixed_param: bool = False,\n ) -> None:\n \"\"\"Initialize object.\"\"\"\n self.iter_warmup = iter_warmup\n self.iter_sampling = iter_sampling\n self.save_warmup = save_warmup\n self.thin = thin\n self.max_treedepth = max_treedepth\n self.metric = metric\n self.metric_file = None\n self.step_size = step_size\n self.adapt_engaged = adapt_engaged\n self.adapt_delta = adapt_delta\n self.adapt_init_phase = adapt_init_phase\n self.adapt_metric_window = adapt_metric_window\n self.adapt_step_size = adapt_step_size\n self.fixed_param = fixed_param\n self.diagnostic_file = None\n\n def validate(self, chains: int) -> None:\n \"\"\"\n Check arguments correctness and consistency.\n\n * adaptation and warmup args are consistent\n * if file(s) for metric are supplied, check contents.\n * length of per-chain lists equals specified # of chains\n \"\"\"\n if not isinstance(chains, Integral) or chains < 1:\n raise ValueError(\n 'sampler expects number of chains to be greater than 0'\n )\n if not (\n self.adapt_delta is None\n and self.adapt_init_phase is None\n and self.adapt_metric_window is None\n and self.adapt_step_size is None\n ):\n if self.adapt_engaged is False:\n msg = 'conflicting arguments: adapt_engaged: False'\n if self.adapt_delta is not None:\n msg = '{}, adapt_delta: {}'.format(msg, self.adapt_delta)\n if self.adapt_init_phase is not None:\n msg = '{}, adapt_init_phase: {}'.format(\n msg, self.adapt_init_phase\n )\n if self.adapt_metric_window is not None:\n msg = '{}, adapt_metric_window: {}'.format(\n msg, self.adapt_metric_window\n )\n if self.adapt_step_size is not None:\n msg = '{}, adapt_step_size: {}'.format(\n msg, self.adapt_step_size\n )\n raise ValueError(msg)\n\n if self.iter_warmup is not None:\n if self.iter_warmup < 0 or not isinstance(\n self.iter_warmup, Integral\n ):\n raise ValueError(\n 'iter_warmup must be a non-negative integer,'\n ' found {}'.format(self.iter_warmup)\n )\n if self.iter_warmup > 0 and not self.adapt_engaged:\n raise ValueError(\n 'adapt_engaged is False, cannot specify warmup iterations'\n )\n if self.iter_sampling is not None:\n if self.iter_sampling < 0 or not isinstance(\n self.iter_sampling, Integral\n ):\n raise ValueError(\n 'iter_sampling must be a non-negative integer,'\n ' found {}'.format(self.iter_sampling)\n )\n if self.thin is not None:\n if self.thin < 1 or not isinstance(self.thin, Integral):\n raise ValueError(\n 'thin must be a positive integer,'\n 'found {}'.format(self.thin)\n )\n if self.max_treedepth is not None:\n if self.max_treedepth < 1 or not isinstance(\n self.max_treedepth, Integral\n ):\n raise ValueError(\n 'max_treedepth must be a positive integer,'\n ' found {}'.format(self.max_treedepth)\n )\n if self.step_size is not None:\n if isinstance(self.step_size, Real):\n if self.step_size <= 0:\n raise ValueError(\n 'step_size must be > 0, found {}'.format(self.step_size)\n )\n else:\n if len(self.step_size) != chains:\n raise ValueError(\n 'number of step_sizes must match number of chains,'\n ' found {} step_sizes for {} chains'.format(\n len(self.step_size), chains\n )\n )\n for step_size in self.step_size:\n if step_size < 0:\n raise ValueError(\n 'step_size must be > 0, found {}'.format(step_size)\n )\n if self.metric is not None:\n dims = []\n if isinstance(self.metric, str):\n if self.metric in ['diag', 'diag_e']:\n self.metric = 'diag_e'\n elif self.metric in ['dense', 'dense_e']:\n self.metric = 'dense_e'\n else:\n if not os.path.exists(self.metric):\n raise ValueError('no such file {}'.format(self.metric))\n dims = read_metric(self.metric)\n elif isinstance(self.metric, (list, tuple)):\n if len(self.metric) != chains:\n raise ValueError(\n 'number of metric files must match number of chains,'\n ' found {} metric files for {} chains'.format(\n len(self.metric), chains\n )\n )\n names_set = set(self.metric)\n if len(names_set) != len(self.metric):\n raise ValueError(\n 'each chain must have its own metric file,'\n ' found duplicates in metric files list.'\n )\n for i, metric in enumerate(self.metric):\n if not os.path.exists(metric):\n raise ValueError('no such file {}'.format(metric))\n if i == 0:\n dims = read_metric(metric)\n else:\n dims2 = read_metric(metric)\n if len(dims) != len(dims2):\n raise ValueError(\n 'metrics files {}, {},'\n ' inconsistent metrics'.format(\n self.metric[0], metric\n )\n )\n for dim, dim2 in zip(dims, dims2):\n if dim != dim2:\n raise ValueError(\n 'metrics files {}, {},'\n ' inconsistent metrics'.format(\n self.metric[0], metric\n )\n )\n if any(dims):\n if len(dims) > 2 or (len(dims) == 2 and dims[0] != dims[1]):\n raise ValueError('bad metric specifiation')\n self.metric_file = self.metric\n if len(dims) == 1:\n self.metric = 'diag_e'\n elif len(dims) == 2:\n self.metric = 'dense_e'\n if self.adapt_delta is not None:\n if not 0 < self.adapt_delta < 1:\n raise ValueError(\n 'adapt_delta must be between 0 and 1,'\n ' found {}'.format(self.adapt_delta)\n )\n if self.adapt_init_phase is not None:\n if self.adapt_init_phase < 0 or not isinstance(\n self.adapt_init_phase, Integral\n ):\n raise ValueError(\n 'adapt_init_phase must be a non-negative integer,'\n 'found {}'.format(self.adapt_init_phase)\n )\n if self.adapt_metric_window is not None:\n if self.adapt_metric_window < 0 or not isinstance(\n self.adapt_metric_window, Integral\n ):\n raise ValueError(\n 'adapt_metric_window must be a non-negative integer,'\n 'found {}'.format(self.adapt_metric_window)\n )\n if self.adapt_step_size is not None:\n if self.adapt_step_size < 0 or not isinstance(\n self.adapt_step_size, Integral\n ):\n raise ValueError(\n 'adapt_step_size must be a non-negative integer,'\n 'found {}'.format(self.adapt_step_size)\n )\n\n if self.fixed_param and (\n (self.iter_warmup is not None and self.iter_warmup > 0)\n or self.save_warmup\n or self.max_treedepth is not None\n or self.metric is not None\n or self.step_size is not None\n or not (\n self.adapt_delta is None\n and self.adapt_init_phase is None\n and self.adapt_metric_window is None\n and self.adapt_step_size is None\n )\n ):\n raise ValueError(\n 'when fixed_param=True, cannot specify warmup'\n ' or adaptation parameters.'\n )\n\n def compose(self, idx: int, cmd: List) -> str:\n \"\"\"\n Compose CmdStan command for method-specific non-default arguments.\n \"\"\"\n cmd.append('method=sample')\n if self.iter_sampling is not None:\n cmd.append('num_samples={}'.format(self.iter_sampling))\n if self.iter_warmup is not None:\n cmd.append('num_warmup={}'.format(self.iter_warmup))\n if self.save_warmup:\n cmd.append('save_warmup=1')\n if self.thin is not None:\n cmd.append('thin={}'.format(self.thin))\n if self.fixed_param:\n cmd.append('algorithm=fixed_param')\n return cmd\n else:\n cmd.append('algorithm=hmc')\n if self.max_treedepth is not None:\n cmd.append('engine=nuts')\n cmd.append('max_depth={}'.format(self.max_treedepth))\n if self.step_size is not None:\n if not isinstance(self.step_size, list):\n cmd.append('stepsize={}'.format(self.step_size))\n else:\n cmd.append('stepsize={}'.format(self.step_size[idx]))\n if self.metric is not None:\n cmd.append('metric={}'.format(self.metric))\n if self.metric_file is not None:\n if not isinstance(self.metric_file, list):\n cmd.append('metric_file={}'.format(self.metric_file))\n else:\n cmd.append('metric_file={}'.format(self.metric_file[idx]))\n cmd.append('adapt')\n if self.adapt_engaged:\n cmd.append('engaged=1')\n else:\n cmd.append('engaged=0')\n if self.adapt_delta is not None:\n cmd.append('delta={}'.format(self.adapt_delta))\n if self.adapt_init_phase is not None:\n cmd.append('init_buffer={}'.format(self.adapt_init_phase))\n if self.adapt_metric_window is not None:\n cmd.append('window={}'.format(self.adapt_metric_window))\n if self.adapt_step_size is not None:\n cmd.append('term_buffer={}'.format(self.adapt_step_size))\n\n return cmd\n\n\nclass OptimizeArgs:\n \"\"\"Container for arguments for the optimizer.\"\"\"\n\n OPTIMIZE_ALGOS = {'BFGS', 'bfgs', 'LBFGS', 'lbfgs', 'Newton', 'newton'}\n\n def __init__(\n self,\n algorithm: str = None,\n init_alpha: Real = None,\n iter: int = None,\n tol_obj: Real = None,\n tol_rel_obj: Real = None,\n tol_grad: Real = None,\n tol_rel_grad: Real = None,\n tol_param: Real = None,\n history_size: int = None,\n ) -> None:\n\n self.algorithm = algorithm\n self.init_alpha = init_alpha\n self.iter = iter\n self.tol_obj = tol_obj\n self.tol_rel_obj = tol_rel_obj\n self.tol_grad = tol_grad\n self.tol_rel_grad = tol_rel_grad\n self.tol_param = tol_param\n self.history_size = history_size\n\n def validate(self, chains=None) -> None: # pylint: disable=unused-argument\n \"\"\"\n Check arguments correctness and consistency.\n \"\"\"\n if (\n self.algorithm is not None\n and self.algorithm not in self.OPTIMIZE_ALGOS\n ):\n raise ValueError(\n 'Please specify optimizer algorithms as one of [{}]'.format(\n ', '.join(self.OPTIMIZE_ALGOS)\n )\n )\n\n if self.init_alpha is not None:\n if self.algorithm == 'Newton':\n raise ValueError(\n 'init_alpha must not be set when algorithm is Newton'\n )\n if isinstance(self.init_alpha, Real):\n if self.init_alpha <= 0:\n raise ValueError('init_alpha must be greater than 0')\n else:\n raise ValueError('init_alpha must be type of float')\n\n if self.iter is not None:\n if isinstance(self.iter, Integral):\n if self.iter < 0:\n raise ValueError('iter must be greater than 0')\n else:\n raise ValueError('iter must be type of int')\n\n if self.tol_obj is not None:\n if self.algorithm == 'Newton':\n raise ValueError(\n 'tol_obj must not be set when algorithm is Newton'\n )\n if isinstance(self.tol_obj, Real):\n if self.tol_obj <= 0:\n raise ValueError('tol_obj must be greater than 0')\n else:\n raise ValueError('tol_obj must be type of float')\n\n if self.tol_rel_obj is not None:\n if self.algorithm == 'Newton':\n raise ValueError(\n 'tol_rel_obj must not be set when algorithm is Newton'\n )\n if isinstance(self.tol_rel_obj, Real):\n if self.tol_rel_obj <= 0:\n raise ValueError('tol_rel_obj must be greater than 0')\n else:\n raise ValueError('tol_rel_obj must be type of float')\n\n if self.tol_grad is not None:\n if self.algorithm == 'Newton':\n raise ValueError(\n 'tol_grad must not be set when algorithm is Newton'\n )\n if isinstance(self.tol_grad, Real):\n if self.tol_grad <= 0:\n raise ValueError('tol_grad must be greater than 0')\n else:\n raise ValueError('tol_grad must be type of float')\n\n if self.tol_rel_grad is not None:\n if self.algorithm == 'Newton':\n raise ValueError(\n 'tol_rel_grad must not be set when algorithm is Newton'\n )\n if isinstance(self.tol_rel_grad, Real):\n if self.tol_rel_grad <= 0:\n raise ValueError('tol_rel_grad must be greater than 0')\n else:\n raise ValueError('tol_rel_grad must be type of float')\n\n if self.tol_param is not None:\n if self.algorithm == 'Newton':\n raise ValueError(\n 'tol_param must not be set when algorithm is Newton'\n )\n if isinstance(self.tol_param, Real):\n if self.tol_param <= 0:\n raise ValueError('tol_param must be greater than 0')\n else:\n raise ValueError('tol_param must be type of float')\n\n if self.history_size is not None:\n if self.algorithm == 'Newton' or self.algorithm == 'BFGS':\n raise ValueError(\n 'history_size must not be set when algorithm is '\n 'Newton or BFGS'\n )\n if isinstance(self.history_size, Integral):\n if self.history_size < 0:\n raise ValueError('history_size must be greater than 0')\n else:\n raise ValueError('history_size must be type of int')\n\n # pylint: disable=unused-argument\n def compose(self, idx: int, cmd: List) -> str:\n \"\"\"compose command string for CmdStan for non-default arg values.\"\"\"\n cmd.append('method=optimize')\n if self.algorithm:\n cmd.append('algorithm={}'.format(self.algorithm.lower()))\n if self.init_alpha is not None:\n cmd.append('init_alpha={}'.format(self.init_alpha))\n if self.tol_obj is not None:\n cmd.append('tol_obj={}'.format(self.tol_obj))\n if self.tol_rel_obj is not None:\n cmd.append('tol_rel_obj={}'.format(self.tol_rel_obj))\n if self.tol_grad is not None:\n cmd.append('tol_grad={}'.format(self.tol_grad))\n if self.tol_rel_grad is not None:\n cmd.append('tol_rel_grad={}'.format(self.tol_rel_grad))\n if self.tol_param is not None:\n cmd.append('tol_param={}'.format(self.tol_param))\n if self.history_size is not None:\n cmd.append('history_size={}'.format(self.history_size))\n if self.iter is not None:\n cmd.append('iter={}'.format(self.iter))\n\n return cmd\n\n\nclass GenerateQuantitiesArgs:\n \"\"\"Arguments needed for generate_quantities method.\"\"\"\n\n def __init__(self, csv_files: List[str]) -> None:\n \"\"\"Initialize object.\"\"\"\n self.sample_csv_files = csv_files\n\n def validate(self, chains: int) -> None: # pylint: disable=unused-argument\n \"\"\"\n Check arguments correctness and consistency.\n\n * check that sample csv files exist\n \"\"\"\n for csv in self.sample_csv_files:\n if not os.path.exists(csv):\n raise ValueError(\n 'Invalid path for sample csv file: {}'.format(csv)\n )\n\n def compose(self, idx: int, cmd: List) -> str:\n \"\"\"\n Compose CmdStan command for method-specific non-default arguments.\n \"\"\"\n cmd.append('method=generate_quantities')\n cmd.append('fitted_params={}'.format(self.sample_csv_files[idx]))\n return cmd\n\n\nclass VariationalArgs:\n \"\"\"Arguments needed for variational method.\"\"\"\n\n VARIATIONAL_ALGOS = {'meanfield', 'fullrank'}\n\n def __init__(\n self,\n algorithm: str = None,\n iter: int = None,\n grad_samples: int = None,\n elbo_samples: int = None,\n eta: Real = None,\n adapt_iter: int = None,\n adapt_engaged: bool = True,\n tol_rel_obj: Real = None,\n eval_elbo: int = None,\n output_samples: int = None,\n ) -> None:\n self.algorithm = algorithm\n self.iter = iter\n self.grad_samples = grad_samples\n self.elbo_samples = elbo_samples\n self.eta = eta\n self.adapt_iter = adapt_iter\n self.adapt_engaged = adapt_engaged\n self.tol_rel_obj = tol_rel_obj\n self.eval_elbo = eval_elbo\n self.output_samples = output_samples\n\n def validate(self, chains=None) -> None: # pylint: disable=unused-argument\n \"\"\"\n Check arguments correctness and consistency.\n \"\"\"\n if (\n self.algorithm is not None\n and self.algorithm not in self.VARIATIONAL_ALGOS\n ):\n raise ValueError(\n 'Please specify variational algorithms as one of [{}]'.format(\n ', '.join(self.VARIATIONAL_ALGOS)\n )\n )\n if self.iter is not None:\n if self.iter < 1 or not isinstance(self.iter, Integral):\n raise ValueError(\n 'iter must be a positive integer,'\n ' found {}'.format(self.iter)\n )\n if self.grad_samples is not None:\n if self.grad_samples < 1 or not isinstance(\n self.grad_samples, Integral\n ):\n raise ValueError(\n 'grad_samples must be a positive integer,'\n ' found {}'.format(self.grad_samples)\n )\n if self.elbo_samples is not None:\n if self.elbo_samples < 1 or not isinstance(\n self.elbo_samples, Integral\n ):\n raise ValueError(\n 'elbo_samples must be a positive integer,'\n ' found {}'.format(self.elbo_samples)\n )\n if self.eta is not None:\n if self.eta < 0 or not isinstance(self.eta, (Integral, Real)):\n raise ValueError(\n 'eta must be a non-negative number,'\n ' found {}'.format(self.eta)\n )\n if self.adapt_iter is not None:\n if self.adapt_iter < 1 or not isinstance(self.adapt_iter, Integral):\n raise ValueError(\n 'adapt_iter must be a positive integer,'\n ' found {}'.format(self.adapt_iter)\n )\n if self.tol_rel_obj is not None:\n if self.tol_rel_obj <= 0 or not isinstance(\n self.tol_rel_obj, (Integral, Real)\n ):\n raise ValueError(\n 'tol_rel_obj must be a positive number,'\n ' found {}'.format(self.tol_rel_obj)\n )\n if self.eval_elbo is not None:\n if self.eval_elbo < 1 or not isinstance(self.eval_elbo, Integral):\n raise ValueError(\n 'eval_elbo must be a positive integer,'\n ' found {}'.format(self.eval_elbo)\n )\n if self.output_samples is not None:\n if self.output_samples < 1 or not isinstance(\n self.output_samples, Integral\n ):\n raise ValueError(\n 'output_samples must be a positive integer,'\n ' found {}'.format(self.output_samples)\n )\n\n # pylint: disable=unused-argument\n def compose(self, idx: int, cmd: List) -> str:\n \"\"\"\n Compose CmdStan command for method-specific non-default arguments.\n \"\"\"\n cmd.append('method=variational')\n if self.algorithm is not None:\n cmd.append('algorithm={}'.format(self.algorithm))\n if self.iter is not None:\n cmd.append('iter={}'.format(self.iter))\n if self.grad_samples is not None:\n cmd.append('grad_samples={}'.format(self.grad_samples))\n if self.elbo_samples is not None:\n cmd.append('elbo_samples={}'.format(self.elbo_samples))\n if self.eta is not None:\n cmd.append('eta={}'.format(self.eta))\n cmd.append('adapt')\n if self.adapt_engaged:\n cmd.append('engaged=1')\n if self.adapt_iter is not None:\n cmd.append('iter={}'.format(self.adapt_iter))\n else:\n cmd.append('engaged=0')\n if self.tol_rel_obj is not None:\n cmd.append('tol_rel_obj={}'.format(self.tol_rel_obj))\n if self.eval_elbo is not None:\n cmd.append('eval_elbo={}'.format(self.eval_elbo))\n if self.output_samples is not None:\n cmd.append('output_samples={}'.format(self.output_samples))\n return cmd\n\n\nclass CmdStanArgs:\n \"\"\"\n Container for CmdStan command line arguments.\n Consists of arguments common to all methods and\n and an object which contains the method-specific arguments.\n \"\"\"\n\n def __init__(\n self,\n model_name: str,\n model_exe: str,\n chain_ids: Union[List[int], None],\n method_args: Union[\n SamplerArgs, OptimizeArgs, GenerateQuantitiesArgs, VariationalArgs\n ],\n data: Union[str, dict] = None,\n seed: Union[int, List[int]] = None,\n inits: Union[int, float, str, List[str]] = None,\n output_dir: str = None,\n sig_figs: str = None,\n save_diagnostics: bool = False,\n save_profile: bool = False,\n refresh: int = None,\n logger: logging.Logger = None,\n ) -> None:\n \"\"\"Initialize object.\"\"\"\n self.model_name = model_name\n self.model_exe = model_exe\n self.chain_ids = chain_ids\n self.data = data\n self.seed = seed\n self.inits = inits\n self.output_dir = output_dir\n self.sig_figs = sig_figs\n self.save_diagnostics = save_diagnostics\n self.save_profile = save_profile\n self.refresh = refresh\n self.method_args = method_args\n if isinstance(method_args, SamplerArgs):\n self.method = Method.SAMPLE\n elif isinstance(method_args, OptimizeArgs):\n self.method = Method.OPTIMIZE\n elif isinstance(method_args, GenerateQuantitiesArgs):\n self.method = Method.GENERATE_QUANTITIES\n elif isinstance(method_args, VariationalArgs):\n self.method = Method.VARIATIONAL\n self.method_args.validate(len(chain_ids) if chain_ids else None)\n self._logger = logger or get_logger()\n self.validate()\n\n def validate(self) -> None:\n \"\"\"\n Check arguments correctness and consistency.\n\n * input files must exist\n * output files must be in a writeable directory\n * if no seed specified, set random seed.\n * length of per-chain lists equals specified # of chains\n \"\"\"\n if self.model_name is None:\n raise ValueError('no stan model specified')\n if self.model_exe is None:\n raise ValueError('model not compiled')\n\n if self.chain_ids is not None:\n for chain_id in self.chain_ids:\n if chain_id < 1:\n raise ValueError('invalid chain_id {}'.format(chain_id))\n if self.output_dir is not None:\n self.output_dir = os.path.realpath(\n os.path.expanduser(self.output_dir)\n )\n if not os.path.exists(self.output_dir):\n try:\n os.makedirs(self.output_dir)\n self._logger.info(\n 'created output directory: %s', self.output_dir\n )\n except (RuntimeError, PermissionError) as exc:\n raise ValueError(\n 'invalid path for output files, no such dir: {}'.format(\n self.output_dir\n )\n ) from exc\n if not os.path.isdir(self.output_dir):\n raise ValueError(\n 'specified output_dir not a directory: {}'.format(\n self.output_dir\n )\n )\n try:\n testpath = os.path.join(self.output_dir, str(time()))\n with open(testpath, 'w+'):\n pass\n os.remove(testpath) # cleanup\n except Exception as exc:\n raise ValueError(\n 'invalid path for output files,'\n ' cannot write to dir: {}'.format(self.output_dir)\n ) from exc\n if self.refresh is not None:\n if not isinstance(self.refresh, int) or self.refresh < 1:\n raise ValueError(\n 'Argument refresh must be a positive integer value, '\n 'found {}.'.format(self.refresh)\n )\n\n if self.sig_figs is not None:\n if (\n not isinstance(self.sig_figs, int)\n or self.sig_figs < 1\n or self.sig_figs > 18\n ):\n raise ValueError(\n 'sig_figs must be an integer between 1 and 18,'\n ' found {}'.format(self.sig_figs)\n )\n if not cmdstan_version_at(2, 25):\n self.sig_figs = None\n self._logger.warning(\n 'arg sig_figs not valid, CmdStan version must be 2.25 '\n 'or higher, using verson %s in directory %s',\n os.path.basename(cmdstan_path()),\n os.path.dirname(cmdstan_path()),\n )\n\n if self.seed is None:\n rng = RandomState()\n self.seed = rng.randint(1, 99999 + 1)\n else:\n if not isinstance(self.seed, (int, list)):\n raise ValueError(\n 'seed must be an integer between 0 and 2**32-1,'\n ' found {}'.format(self.seed)\n )\n if isinstance(self.seed, int):\n if self.seed < 0 or self.seed > 2 ** 32 - 1:\n raise ValueError(\n 'seed must be an integer between 0 and 2**32-1,'\n ' found {}'.format(self.seed)\n )\n else:\n if self.chain_ids is None:\n raise ValueError(\n 'seed must not be a list when no chains used'\n )\n\n if len(self.seed) != len(self.chain_ids):\n raise ValueError(\n 'number of seeds must match number of chains,'\n ' found {} seed for {} chains '.format(\n len(self.seed), len(self.chain_ids)\n )\n )\n for seed in self.seed:\n if seed < 0 or seed > 2 ** 32 - 1:\n raise ValueError(\n 'seed must be an integer value'\n ' between 0 and 2**32-1,'\n ' found {}'.format(seed)\n )\n\n if isinstance(self.data, str):\n if not os.path.exists(self.data):\n raise ValueError('no such file {}'.format(self.data))\n elif self.data is not None and not isinstance(self.data, (str, dict)):\n raise ValueError('data must be string or dict')\n\n if self.inits is not None:\n if isinstance(self.inits, (Integral, Real)):\n if self.inits < 0:\n raise ValueError(\n 'inits must be > 0, found {}'.format(self.inits)\n )\n elif isinstance(self.inits, str):\n if not os.path.exists(self.inits):\n raise ValueError('no such file {}'.format(self.inits))\n elif isinstance(self.inits, list):\n if self.chain_ids is None:\n raise ValueError(\n 'inits must not be a list when no chains are used'\n )\n\n if len(self.inits) != len(self.chain_ids):\n raise ValueError(\n 'number of inits files must match number of chains,'\n ' found {} inits files for {} chains '.format(\n len(self.inits), len(self.chain_ids)\n )\n )\n names_set = set(self.inits)\n if len(names_set) != len(self.inits):\n raise ValueError(\n 'each chain must have its own init file,'\n ' found duplicates in inits files list.'\n )\n for inits in self.inits:\n if not os.path.exists(inits):\n raise ValueError('no such file {}'.format(inits))\n\n def compose_command(\n self,\n idx: int,\n csv_file: str,\n *,\n diagnostic_file: str = None,\n profile_file: str = None\n ) -> str:\n \"\"\"\n Compose CmdStan command for non-default arguments.\n \"\"\"\n cmd = []\n if idx is not None and self.chain_ids is not None:\n if idx < 0 or idx > len(self.chain_ids) - 1:\n raise ValueError(\n 'index ({}) exceeds number of chains ({})'.format(\n idx, len(self.chain_ids)\n )\n )\n cmd.append(self.model_exe)\n cmd.append('id={}'.format(self.chain_ids[idx]))\n else:\n cmd.append(self.model_exe)\n\n if self.seed is not None:\n if not isinstance(self.seed, list):\n cmd.append('random')\n cmd.append('seed={}'.format(self.seed))\n else:\n cmd.append('random')\n cmd.append('seed={}'.format(self.seed[idx]))\n if self.data is not None:\n cmd.append('data')\n cmd.append('file={}'.format(self.data))\n if self.inits is not None:\n if not isinstance(self.inits, list):\n cmd.append('init={}'.format(self.inits))\n else:\n cmd.append('init={}'.format(self.inits[idx]))\n cmd.append('output')\n cmd.append('file={}'.format(csv_file))\n if diagnostic_file is not None:\n cmd.append('diagnostic_file={}'.format(diagnostic_file))\n if profile_file is not None:\n cmd.append('profile_file={}'.format(profile_file))\n if self.refresh is not None:\n cmd.append('refresh={}'.format(self.refresh))\n if self.sig_figs is not None:\n cmd.append('sig_figs={}'.format(self.sig_figs))\n cmd = self.method_args.compose(idx, cmd)\n return cmd\n" ]
[ [ "numpy.random.RandomState" ] ]
yhyeh/LG-FedAvg
[ "f64a2943c7f1fed214412033e0fa0a63f3c03fb8" ]
[ "main_local.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Python version: 3.6\n\nimport copy\nimport os\nimport pickle\nimport pandas as pd\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.utils.data import DataLoader\n\nfrom utils.options import args_parser\nfrom utils.train_utils import get_data, get_model\nfrom models.Update import DatasetSplit\nfrom models.test import test_img_local, test_img_local_all, test_img_avg_all, test_img_ensemble_all\n\nimport pdb\n\nif __name__ == '__main__':\n # parse args\n args = args_parser()\n args.device = torch.device('cuda:{}'.format(args.gpu) if torch.cuda.is_available() and args.gpu != -1 else 'cpu')\n\n base_dir = './save/{}/{}_iid{}_num{}_C{}_le{}/shard{}/{}/'.format(\n args.dataset, args.model, args.iid, args.num_users, args.frac, args.local_ep, args.shard_per_user, args.results_save)\n if not os.path.exists(os.path.join(base_dir, 'local')):\n os.makedirs(os.path.join(base_dir, 'local'), exist_ok=True)\n\n dataset_train, dataset_test, dict_users_train, dict_users_test = get_data(args)\n dict_save_path = os.path.join(base_dir, 'dict_users.pkl')\n with open(dict_save_path, 'rb') as handle:\n dict_users_train, dict_users_test = pickle.load(handle)\n\n # build model\n net_glob = get_model(args)\n net_glob.train()\n\n net_local_list = []\n for user_ix in range(args.num_users):\n net_local_list.append(copy.deepcopy(net_glob))\n\n # training\n results_save_path = os.path.join(base_dir, 'local/results.csv')\n\n loss_train = []\n net_best = None\n best_loss = None\n best_acc = None\n best_epoch = None\n\n lr = args.lr\n results = []\n\n criterion = nn.CrossEntropyLoss()\n\n for user, net_local in enumerate(net_local_list):\n model_save_path = os.path.join(base_dir, 'local/model_user{}.pt'.format(user))\n net_best = None\n best_acc = None\n\n ldr_train = DataLoader(DatasetSplit(dataset_train, dict_users_train[user]), batch_size=args.local_bs, shuffle=True)\n optimizer = torch.optim.SGD(net_local.parameters(), lr=lr, momentum=0.5)\n for iter in range(args.epochs):\n for batch_idx, (images, labels) in enumerate(ldr_train):\n images, labels = images.to(args.device), labels.to(args.device)\n net_local.zero_grad()\n log_probs = net_local(images)\n\n loss = criterion(log_probs, labels)\n loss.backward()\n optimizer.step()\n\n acc_test, loss_test = test_img_local(net_local, dataset_test, args, user_idx=user, idxs=dict_users_test[user])\n if best_acc is None or acc_test > best_acc:\n best_acc = acc_test\n net_best = copy.deepcopy(net_local)\n # torch.save(net_local_list[user].state_dict(), model_save_path)\n\n print('User {}, Epoch {}, Acc {:.2f}'.format(user, iter, acc_test))\n\n if iter > 50 and acc_test >= 99:\n break\n\n net_local_list[user] = net_best\n\n acc_test_local, loss_test_local = test_img_local_all(net_local_list, args, dataset_test, dict_users_test)\n acc_test_avg, loss_test_avg = test_img_avg_all(net_glob, net_local_list, args, dataset_test)\n acc_test_ens_avg, loss_test, acc_test_ens_maj = test_img_ensemble_all(net_local_list, args, dataset_test)\n\n print('Final: acc: {:.2f}, acc (avg): {:.2f}, acc (ens,avg): {:.2f}, acc (ens,maj): {:.2f}'.format(acc_test_local, acc_test_avg, acc_test_ens_avg, acc_test_ens_maj))\n\n final_results = np.array([[acc_test_local, acc_test_avg, acc_test_ens_avg, acc_test_ens_maj]])\n final_results = pd.DataFrame(final_results, columns=['acc_test_local', 'acc_test_avg', 'acc_test_ens_avg', 'acc_test_ens_maj'])\n final_results.to_csv(results_save_path, index=False)\n" ]
[ [ "pandas.DataFrame", "numpy.array", "torch.cuda.is_available", "torch.nn.CrossEntropyLoss" ] ]
psridhar-asapp/espnet
[ "7825783ef60cfe6b3a218d58008cafbe71559a11" ]
[ "utils/convert_fbank_to_wav.py" ]
[ "#!/usr/bin/env python3\n\n# Copyright 2018 Nagoya University (Tomoki Hayashi)\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\nimport argparse\nimport logging\nimport os\n\nfrom distutils.version import LooseVersion\n\nimport librosa\nimport numpy as np\nfrom scipy.io.wavfile import write\n\nfrom espnet.utils.cli_readers import file_reader_helper\nfrom espnet.utils.cli_utils import get_commandline_args\n\n\nEPS = 1e-10\n\n\ndef logmelspc_to_linearspc(lmspc, fs, n_mels, n_fft, fmin=None, fmax=None):\n \"\"\"Convert log Mel filterbank to linear spectrogram.\n\n Args:\n lmspc (ndarray): Log Mel filterbank (T, n_mels).\n fs (int): Sampling frequency.\n n_mels (int): Number of mel basis.\n n_fft (int): Number of FFT points.\n f_min (int, optional): Minimum frequency to analyze.\n f_max (int, optional): Maximum frequency to analyze.\n\n Returns:\n ndarray: Linear spectrogram (T, n_fft // 2 + 1).\n\n \"\"\"\n assert lmspc.shape[1] == n_mels\n fmin = 0 if fmin is None else fmin\n fmax = fs / 2 if fmax is None else fmax\n mspc = np.power(10.0, lmspc)\n mel_basis = librosa.filters.mel(fs, n_fft, n_mels, fmin, fmax)\n inv_mel_basis = np.linalg.pinv(mel_basis)\n spc = np.maximum(EPS, np.dot(inv_mel_basis, mspc.T).T)\n\n return spc\n\n\ndef griffin_lim(spc, n_fft, n_shift, win_length, window='hann', n_iters=100):\n \"\"\"Convert linear spectrogram into waveform using Griffin-Lim.\n\n Args:\n spc (ndarray): Linear spectrogram (T, n_fft // 2 + 1).\n n_fft (int): Number of FFT points.\n n_shift (int): Shift size in points.\n win_length (int): Window length in points.\n window (str, optional): Window function type.\n n_iters (int, optionl): Number of iterations of Griffin-Lim Algorithm.\n\n Returns:\n ndarray: Reconstructed waveform (N,).\n\n \"\"\"\n # assert the size of input linear spectrogram\n assert spc.shape[1] == n_fft // 2 + 1\n\n if LooseVersion(librosa.__version__) >= LooseVersion('0.7.0'):\n # use librosa's fast Grriffin-Lim algorithm\n spc = np.abs(spc.T)\n y = librosa.griffinlim(\n S=spc,\n n_iter=n_iters,\n hop_length=n_shift,\n win_length=win_length,\n window=window,\n center=True if spc.shape[1] > 1 else False\n )\n else:\n # use slower version of Grriffin-Lim algorithm\n logging.warning(\"librosa version is old. use slow version of Grriffin-Lim algorithm.\"\n \"if you want to use fast Griffin-Lim, please update librosa via \"\n \"`source ./path.sh && pip install librosa==0.7.0`.\")\n cspc = np.abs(spc).astype(np.complex).T\n angles = np.exp(2j * np.pi * np.random.rand(*cspc.shape))\n y = librosa.istft(cspc * angles, n_shift, win_length, window=window)\n for i in range(n_iters):\n angles = np.exp(1j * np.angle(librosa.stft(y, n_fft, n_shift, win_length, window=window)))\n y = librosa.istft(cspc * angles, n_shift, win_length, window=window)\n\n return y\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(\n description='convert FBANK to WAV using Griffin-Lim algorithm',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--fs', type=int, default=22050,\n help='Sampling frequency')\n parser.add_argument('--fmax', type=int, default=None, nargs='?',\n help='Maximum frequency')\n parser.add_argument('--fmin', type=int, default=None, nargs='?',\n help='Minimum frequency')\n parser.add_argument('--n_fft', type=int, default=1024,\n help='FFT length in point')\n parser.add_argument('--n_shift', type=int, default=512,\n help='Shift length in point')\n parser.add_argument('--win_length', type=int, default=None, nargs='?',\n help='Analisys window length in point')\n parser.add_argument('--n_mels', type=int, default=None, nargs='?',\n help='Number of mel basis')\n parser.add_argument('--window', type=str, default='hann',\n choices=['hann', 'hamming'],\n help='Type of window')\n parser.add_argument('--iters', type=int, default=100,\n help='Number of iterations in Grriffin Lim')\n parser.add_argument('--filetype', type=str, default='mat',\n choices=['mat', 'hdf5'],\n help='Specify the file format for the rspecifier. '\n '\"mat\" is the matrix format in kaldi')\n parser.add_argument('rspecifier', type=str, help='Input feature')\n parser.add_argument('outdir', type=str,\n help='Output directory')\n return parser\n\n\ndef main():\n parser = get_parser()\n args = parser.parse_args()\n\n # logging info\n logging.basicConfig(\n level=logging.INFO,\n format=\"%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s\")\n logging.info(get_commandline_args())\n\n # check directory\n if not os.path.exists(args.outdir):\n os.makedirs(args.outdir)\n\n for idx, (utt_id, lmspc) in enumerate(\n file_reader_helper(args.rspecifier, args.filetype), 1):\n if args.n_mels is not None:\n spc = logmelspc_to_linearspc(\n lmspc,\n fs=args.fs,\n n_mels=args.n_mels,\n n_fft=args.n_fft,\n fmin=args.fmin,\n fmax=args.fmax)\n else:\n spc = lmspc\n y = griffin_lim(\n spc,\n n_fft=args.n_fft,\n n_shift=args.n_shift,\n win_length=args.win_length,\n window=args.window,\n n_iters=args.iters)\n logging.info(\"(%d) %s\" % (idx, utt_id))\n write(args.outdir + \"/%s.wav\" % utt_id,\n args.fs,\n (y * np.iinfo(np.int16).max).astype(np.int16))\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.dot", "numpy.random.rand", "numpy.linalg.pinv", "numpy.power", "numpy.abs", "numpy.iinfo" ] ]
simpeg/geosci-labs
[ "0963c5766477e59af6625954036f580481cfaf82" ]
[ "geoscilabs/em/DipoleWidget1D.py" ]
[ "from __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nimport numpy as np\nfrom SimPEG import electromagnetics as EM\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nimport matplotlib\nimport matplotlib.gridspec as gridspec\n\nfrom ipywidgets import ToggleButtons, FloatSlider\n\nfrom ..base import widgetify\nfrom .view import DataView\n\nmatplotlib.rcParams[\"font.size\"] = 12\n\n\ndef linefun(x1, x2, y1, y2, nx, tol=1e-3):\n dx = x2 - x1\n dy = y2 - y1\n\n if np.abs(dx) < tol:\n y = np.linspace(y1, y2, nx)\n x = np.ones_like(y) * x1\n elif np.abs(dy) < tol:\n x = np.linspace(x1, x2, nx)\n y = np.ones_like(x) * y1\n else:\n x = np.linspace(x1, x2, nx)\n slope = (y2 - y1) / (x2 - x1)\n y = slope * (x - x1) + y1\n return x, y\n\n\nclass DipoleWidget1D(object):\n \"\"\"DipoleWidget\"\"\"\n\n x = None\n y = None\n z = None\n func = None\n sig = None\n freq = None\n obsLoc = None\n\n # Fixed spatial range in 3D\n xmin, xmax = -50.0, 50.0\n ymin, ymax = -50.0, 50.0\n zmin, zmax = -50.0, 50.0\n sigmin, sigmax = -4.0, 4\n fmin, fmax = -4.0, 8.0\n ns = 81\n nf = 121\n sigvec = np.linspace(sigmin, sigmax, ns)\n fvec = np.linspace(fmin, fmax, nf)\n\n def __init__(self):\n self.dataview = DataView()\n\n def SetDataview_1D(self, srcLoc, obsLoc, sigvec, fvec, orientation, normal, func):\n self.dataview.eval_loc(srcLoc, obsLoc, sigvec, fvec, orientation, normal, func)\n\n def InteractiveDipole1D(self):\n sigvec = self.sigvec\n fvec = self.fvec\n\n def Dipole1Dviz(\n orientation, component, view, normal, sigsl, freqsl, absloc, coordloc, mode\n ):\n\n x = np.linspace(-50.0, 50.0, 100)\n y = np.arange(-50.0, 50.0, 100)\n\n srcLoc = np.r_[0.0, 0.0, 0.0] # source location\n # sig, f = (\n # 10.0 ** sigsl,\n # np.r_[10.0 ** freqsl],\n # ) # conductivity (S/m), frequency (Hz)\n\n if normal.upper() == \"Z\":\n obsLoc = np.c_[absloc, coordloc, np.r_[0.0]]\n self.dataview.set_xyz(\n x, y, np.r_[0.0], normal=normal\n ) # set plane and locations ...\n\n elif normal.upper() == \"Y\":\n obsLoc = np.c_[absloc, np.r_[0.0], coordloc]\n self.dataview.set_xyz(\n x, np.r_[0.0], y, normal=normal\n ) # set plane and locations ...\n\n elif normal.upper() == \"X\":\n obsLoc = np.c_[np.r_[0.0], absloc, coordloc]\n self.dataview.set_xyz(\n np.r_[0.0], x, y, normal=normal\n ) # set plane and locations ...\n\n self.dataview.eval_loc(\n srcLoc,\n obsLoc,\n sigvec,\n fvec,\n orientation,\n normal,\n EM.analytics.E_from_ElectricDipoleWholeSpace,\n ) # evaluate\n\n plt.figure(figsize=(6.5 * 3, 5))\n ax0 = plt.subplot(121)\n ax2 = plt.subplot(122)\n\n ax1 = ax0.twinx()\n ax3 = ax2.twinx()\n\n if mode == \"RI\":\n ax0 = self.dataview.plot1D_FD(\n component=\"real\",\n view=view,\n abscisse=\"Conductivity\",\n slic=freqsl,\n logamp=True,\n ax=ax0,\n color=\"blue\",\n )\n ax1 = self.dataview.plot1D_FD(\n component=\"imag\",\n view=view,\n abscisse=\"Conductivity\",\n slic=freqsl,\n logamp=True,\n ax=ax1,\n legend=False,\n color=\"red\",\n )\n\n ax2 = self.dataview.plot1D_FD(\n component=\"real\",\n view=view,\n abscisse=\"Frequency\",\n slic=sigsl,\n logamp=True,\n ax=ax2,\n color=\"blue\",\n )\n ax3 = self.dataview.plot1D_FD(\n component=\"imag\",\n view=view,\n abscisse=\"Frequency\",\n slic=sigsl,\n logamp=True,\n ax=ax3,\n legend=False,\n color=\"red\",\n )\n\n elif mode == \"AP\":\n ax0 = self.dataview.plot1D_FD(\n component=\"Amplitude\",\n view=view,\n abscisse=\"Conductivity\",\n slic=freqsl,\n logamp=True,\n ax=ax0,\n color=\"blue\",\n )\n ax1 = self.dataview.plot1D_FD(\n component=\"Phase\",\n view=view,\n abscisse=\"Conductivity\",\n slic=freqsl,\n logamp=True,\n ax=ax1,\n legend=False,\n color=\"red\",\n )\n\n ax2 = self.dataview.plot1D_FD(\n component=\"Amplitude\",\n view=view,\n abscisse=\"Frequency\",\n slic=sigsl,\n logamp=True,\n ax=ax3,\n color=\"blue\",\n )\n ax3 = self.dataview.plot1D_FD(\n component=\"Phase\",\n view=view,\n abscisse=\"Frequency\",\n slic=sigsl,\n logamp=True,\n ax=ax3,\n legend=False,\n color=\"red\",\n )\n\n elif mode == \"Phasor\":\n ax0 = self.dataview.plot1D_FD(\n component=\"PHASOR\",\n view=view,\n abscisse=\"Conductivity\",\n slic=freqsl,\n logamp=True,\n ax=ax0,\n color=\"black\",\n )\n ax2 = self.dataview.plot1D_FD(\n component=\"PHASOR\",\n view=view,\n abscisse=\"Frequency\",\n slic=sigsl,\n logamp=True,\n ax=ax2,\n color=\"black\",\n )\n\n plt.tight_layout()\n\n out = widgetify(\n Dipole1Dviz,\n mode=ToggleButtons(options=[\"RI\", \"AP\", \"Phasor\"], value=\"RI\"),\n view=ToggleButtons(options=[\"x\", \"y\", \"z\"], value=\"x\"),\n sigsl=FloatSlider(min=-4, max=4, step=0.1, value=0),\n freqsl=FloatSlider(min=-4, max=8, step=0.1, value=-4),\n absloc=FloatSlider(min=-50, max=50, step=1, value=25),\n coordloc=FloatSlider(min=-50, max=50, step=1, value=0),\n orientation=ToggleButtons(options=[\"x\", \"y\", \"z\"], value=\"x\"),\n component=ToggleButtons(\n options=[\"real\", \"imag\", \"amplitude\", \"phase\"], value=\"real\"\n ),\n normal=ToggleButtons(options=[\"x\", \"y\", \"z\"], value=\"z\"),\n )\n return out\n" ]
[ [ "numpy.ones_like", "matplotlib.pyplot.figure", "numpy.arange", "matplotlib.pyplot.tight_layout", "numpy.abs", "numpy.linspace", "matplotlib.pyplot.subplot" ] ]
mihirp1998/sbnet_3d_tensorflow
[ "2a990c6e16d33b5b89815c9543819a3e42ebab1d" ]
[ "sbnet_tensorflow/benchmark/sparse_conv_lib.py" ]
[ "\"\"\"\n\n Sparse Blocks Network\n Copyright (c) 2017, Uber Technologies, Inc.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\"\"\"\n\n#\n# Sparse convolution operators.\n#\n# Usage:\n# ```\n# import numpy as np\n# import tensorflow as tf\n#\n# from sparse_conv_lib import convert_mask_to_block_indices, sparse_conv2d\n#\n# # Binary mask to define sparsity.\n# mask = tf.constant(\n# np.array(\n# [[\n# [0, 0, 0, 0, 0], # YAPF_NO_FORMAT\n# [0, 0, 1, 0, 0],\n# [1, 0, 0, 0, 0],\n# [0, 0, 0, 0, 0],\n# [0, 0, 0, 0, 0]\n# ]],\n# dtype=np.float32))\n# # Convert binary mask to block representation.\n# ind_blk = convert_mask_to_block_indices(mask, [1, 3, 3, 1], [1, 1, 1, 1], [3, 3, 1, 1],\n# [1, 1, 1, 1], 'SAME', .1)\n#\n# # Sparse convolution.\n# x = tf.constant(np.ones([1, 5, 5, 1], dtype=np.float32))\n# w = tf.constant(np.ones([3, 3, 1, 1], dtype=np.float32))\n# y = sparse_conv2d(x, w, ind_blk, [1, 1, 1, 1], 'SAME')\n#\n# with tf.Session():\n# print(np.squeeze(y.eval()))\n#\n# >> Output\n# >> [[ 0. 6. 6. 6. 0.]\n# [ 6. 9. 9. 9. 0.]\n# [ 6. 9. 9. 9. 0.]\n# [ 6. 9. 0. 0. 0.]\n# [ 0. 0. 0. 0. 0.]]\n# ```\nfrom __future__ import division, print_function\n\nimport os\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.python.framework import ops\nfrom collections import namedtuple\n\nimport logger\nfrom tf_conv_dims import calc_padding_4d, calc_out_size_4d, calc_out_size_4d_np\n\nlog = logger.get()\n\nsbnet_module = tf.load_op_library('../sbnet_ops/libsbnet.so')\n\nBlockParams = namedtuple('BlockParams', ['bsize', 'bsize_out', 'boffset', 'bcount', 'bstrides'])\n\n\n# Gradients registration.\n@ops.RegisterGradient(\"SparseGather\")\ndef _sparse_gather_grad(op, grad):\n # x is shaped like full tensor [NHWC]\n # grad is shaped as gathered blocks [Nblocks*BH*BW*C]\n x = op.inputs[0]\n binCounts = op.inputs[1]\n activeBlockIndices = op.inputs[2]\n bsize = op.inputs[3]\n bstride = op.inputs[4]\n boffset = op.inputs[5]\n transpose = op.get_attr(\"transpose\")\n\n # if scatter is overlapping then gradient should still work\n # because we are overwriting the same values\n # compute dOutput/dx\n result = sbnet_module.sparse_scatter(\n grad,\n binCounts,\n activeBlockIndices,\n tf.zeros_like(x), # output base tensor to add on top of\n dynamic_bsize=bsize,\n dynamic_bstride=bstride,\n dynamic_boffset=boffset,\n add=True,\n transpose=transpose,\n atomic=True)\n\n return [result, None, None, None, None, None] # no gradients wrt indices or block params\n\n\n@ops.RegisterGradient(\"SparseScatter\")\ndef _sparse_scatter_grad(op, grad):\n # x is shaped like blocked tensor of gathered blocks [Nblocks*BH*BW*C]\n # grad is shaped as output tensor [NHWC]\n blocksX = op.inputs[0]\n binCounts = op.inputs[1]\n activeBlockIndices = op.inputs[2]\n ybase = op.inputs[3]\n bsize = op.inputs[4]\n bstride = op.inputs[5]\n boffset = op.inputs[6]\n doAdd = op.get_attr(\"add\")\n\n dout_dx = sbnet_module.sparse_gather(\n grad,\n binCounts,\n activeBlockIndices,\n dynamic_bsize=bsize,\n dynamic_bstride=bstride,\n dynamic_boffset=boffset)\n\n # return a list of gradients of output with respect to each input\n if not doAdd:\n # scatter blocks of zeroes over a base tensor of ones to compute a stamp-out gradient mask for dy_dybase\n stamp_out_blocks = sbnet_module.sparse_scatter(\n tf.zeros_like(blocksX),\n binCounts,\n activeBlockIndices,\n tf.ones_like(grad),\n dynamic_bsize=bsize,\n dynamic_bstride=bstride,\n dynamic_boffset=boffset,\n add=False)\n dy_dybase = grad * stamp_out_blocks\n return [dout_dx, None, None, dy_dybase, None, None, None]\n else:\n # d(x+ybase)/dybase = 1, so just pass back grad as dout_dybase\n return [dout_dx, None, None, grad, None, None, None]\n\n\ndef _pad_input(x, ksize, strides, padding, bsize=None, bstrides=None):\n \"\"\"Pads the input tensor.\n Optional to pass in block strides. The right hand side padding will be increased if the last\n block does not fit in (no effect on the convolution results.\n\n :param x: [Tensor] [N, H, W, C]. input tensor, dtype float32.\n :param ksize: [list] List of 4 int. Sparse convolution kernel size.\n :param strides: [list] List of 4 int. Sparse convolution stride size.\n :param padding: [string] `VALID` or `SAME`, padding method for sparse convolution.\n :param bsize [list] List of 4 int. Block size. Optional.\n :param bstrides: [list] List of 4 int. Block strides. Optional.\n\n :return [Tensor] [N, H+Ph, W+Pw, C]. Padded input tensor.\n \"\"\"\n x_shape = tf.shape(x)\n if padding == 'SAME':\n pad_h0, pad_h1, pad_w0, pad_w1 = calc_padding_4d(x_shape, ksize, strides, padding)\n\n if bstrides is not None:\n # Here we do not use the standard padding on the right hand side.\n # If the convolution results is larger than expected, the scatter function will not use\n # out-of-boundary points.\n assert bsize is not None, 'Must pass in bsize and bstrides together.'\n h = x_shape[1] + pad_h0 + pad_h1\n w = x_shape[2] + pad_w0 + pad_w1\n pad_h1 += tf.mod(-h + bsize[1], bstrides[1])\n pad_w1 += tf.mod(-w + bsize[2], bstrides[2])\n return tf.pad(x, [[0, 0], [pad_h0, pad_h1], [pad_w0, pad_w1], [0, 0]])\n else:\n if bstrides is not None:\n assert bsize is not None, 'Must pass in bsize and bstrides together.'\n h = x_shape[1]\n w = x_shape[2]\n pad_h1 = tf.mod(-h + bsize[1], bstrides[1])\n pad_w1 = tf.mod(-w + bsize[2], bstrides[2])\n return tf.cond(\n tf.logical_or(tf.greater(pad_h1, 0), tf.greater(pad_w1, 0)),\n lambda: tf.pad(x, [[0, 0], [0, pad_h1], [0, pad_w1], [0, 0]]), lambda: x)\n else:\n return x\n\n\ndef _get_offset_array_tf(shape):\n \"\"\"\n Computes the offset array used to upsample indices with TensorFlow.\n\n :param shape: [list] Window shape.\n \"\"\"\n center = [(ss - 1) // 2 for ss in shape]\n axes = [tf.range(-cc, ss - cc, dtype=tf.int32) for cc, ss in zip(center, shape)]\n # Broadcast and match dimension.\n if len(shape) > 1:\n for jj in range(len(shape)):\n for ii in range(len(shape) + 1):\n if ii != jj:\n axes[jj] = tf.expand_dims(axes[jj], ii)\n for jj in range(len(shape)):\n shape_ = [ss for ss in shape] + [1]\n shape_[jj] = 1\n axes[jj] = tf.tile(axes[jj], shape_)\n offset = tf.concat(axes, len(shape))\n return offset\n\n\ndef _get_offset_array(shape):\n \"\"\"\n Computes the offset array used to upsample indices with NumPy (static).\n\n :param shape: [list] Window shape.\n \"\"\"\n center = [int(ss - 1) // 2 for ss in shape]\n axes = [np.arange(-cc, int(ss) - cc).astype(np.int32) for cc, ss in zip(center, shape)]\n if len(shape) > 1:\n for jj in range(len(shape)):\n for ii in range(len(shape) + 1):\n if ii != jj:\n axes[jj] = np.expand_dims(axes[jj], ii)\n for jj in range(len(shape)):\n shape_ = [int(ss) for ss in shape] + [1]\n shape_[jj] = 1\n axes[jj] = np.tile(axes[jj], shape_)\n offset = np.concatenate(axes, len(shape))\n return tf.constant(offset)\n else:\n return tf.constant(axes[0])\n\n\ndef _calc_block_strides(bsize, ksize, strides):\n \"\"\"Calculates strides for blocks.\n\n :param bsize: [list] List of 4 int. Size of blocks, or downsample ratio.\n :param ksize: [list] List of 4 int. Sparse convolution kernel size.\n :param strides: [list] List of 4 int. Sparse convolution strides.\n\n :return [list] List of 4 int. Block strides.\n \"\"\"\n return [1, bsize[1] - ksize[0] + strides[1], bsize[2] - ksize[1] + strides[2], 1]\n\n\ndef upsample_indices(indices, ksize, strides):\n \"\"\"\n Upsamples the indices to have all indices in a rectangle.\n\n :param indices: [Tensor] [M, 3]. Center locations (N, H, W) of the M rectangles.\n Dtype int32.\n :param ksize: [list] Size of the rectangle, or downsample ratio.\n :param strides: [list] Strides of the pooling operation.\n\n :return [Tensor] [M, h, w, 3]. Locations of all pixels in the rectangles.\n Dtype int32.\n \"\"\"\n assert len(indices.get_shape()) == 2, 'Expect indices rank = 2'\n assert ksize[0] == ksize[3] == 1, 'Expect first and last dimensions of ksize = 1'\n assert strides[0] == strides[3] == 1, 'Expect first and last dimensions of strides = 1, {}'.format(\n strides)\n h_scale = strides[1]\n w_scale = strides[2]\n scale = tf.stack([1, h_scale, w_scale])\n indices *= scale\n # Since we always use VALID to perform pooling, shift is needed here.\n shift = tf.stack([0, (ksize[1] - 1) // 2, (ksize[2] - 1) // 2])\n indices += shift\n indices_ = tf.expand_dims(tf.expand_dims(indices, 1), 2)\n # indices_ = tf.tile(indices_, [1, ksize[1], ksize[2], 1])\n offset = _get_offset_array(ksize[0:3])\n indices_ += offset\n return indices_\n\n\ndef convert_mask_to_indices(mask, bsize, ksize, strides, padding, tol):\n \"\"\"\n Converts a binary mask to sparse indices.\n\n :param mask: [Tensor] [N, H, W]. 1 indicates non-sparse locations. Dtype float32.\n :param bsize: [list] List of 4 int. Size of blocks, or downsample ratio.\n :param ksize: [list] List of 4 int. Sparse convolution kernel size.\n :param strides: [list] List of 4 int. Sparse convolution stride size.\n Currently only supports when,\n 1) (bsize[1] - ksize[0]) % strides[1] == 0 and,\n 2) (bsize[2] - ksize[1]) % strides[2] == 0\n :param padding: [string] `VALID` or `SAME`, padding method for sparse convolution.\n :param tol: [float] Lower bound of occupancy for creating a rectangle.\n\n :return [Tensor] [M, 3]. Center locations (N, H, W) of M rectangles. Dtype int32.\n \"\"\"\n ERR_MSG_RANK = 'Expect mask rank = 3'\n ERR_MSG_DIV = 'Expect `stride` divides `bsize` - `ksize`. stride {}, bsize {}, ksize {}.'\n ERR_MSG_DIM = 'Expect first and last dimensions of strides = 1. Dim {}.'\n\n assert len(mask.get_shape()) == 3, ERR_MSG_RANK\n assert type(bsize) in [list, tuple], '`bsize` needs to be a list or tuple.'\n assert type(ksize) in [list, tuple], '`ksize` needs to be a list or tuple.'\n assert type(strides) in [list, tuple], '`strides` needs to be a list or tuple.'\n assert (bsize[1] - ksize[0]) % strides[1] == 0, ERR_MSG_DIV.format(\n strides[1], bsize[1], ksize[0])\n assert (bsize[2] - ksize[1]) % strides[2] == 0, ERR_MSG_DIV.format(\n strides[2], bsize[2], ksize[1])\n assert strides[0] == strides[3] == 1, ERR_MSG_DIM.format(strides)\n\n bstrides = _calc_block_strides(bsize, ksize, strides)\n\n # Pad mask.\n mask_ = tf.expand_dims(mask, 3)\n mask_ = _pad_input(mask_, ksize, strides, padding, bsize=bsize, bstrides=bstrides)\n mask_ = tf.nn.max_pool(mask_, bsize, bstrides, 'VALID') # Blocks are always valid conv.\n mask_ = tf.squeeze(mask_, [3])\n indices = tf.where(tf.greater(mask_, tol))\n indices = tf.cast(indices, tf.int32)\n return indices\n\n\ndef convert_mask_to_block_indices(mask, bsize, ksize, strides, padding, tol):\n \"\"\"\n Converts a binary mask to block sparse indices.\n\n :param mask: [Tensor] [N, H, W]. 1 indicates non-sparse locations. Dtype float32.\n :param bsize: [list] List of 4 int. Size of blocks, or downsample ratio.\n :param ksize: [list] List of 4 int. Sparse convolution kernel size.\n :param strides: [list] List of 4 int. Sparse convolution stride size.\n Currently only supports when,\n 1) (bsize[1] - ksize[0]) % strides[1] == 0 and,\n 2) (bsize[2] - ksize[1]) % strides[2] == 0\n :param padding: [string] `VALID` or `SAME`, padding method for sparse convolution.\n :param tol: [float] Lower bound of occupancy for creating a rectangle.\n\n :return [Tensor] [M, h, w, 3]. Pixel locations of M rectangles. Dtype int32.\n \"\"\"\n indices = convert_mask_to_indices(mask, bsize, ksize, strides, padding, tol)\n bstrides = _calc_block_strides(bsize, ksize, strides)\n blk_indices = upsample_indices(indices, bsize, bstrides)\n return blk_indices\n\n\ndef calc_block_params(in_size, bsize, ksize, strides, padding):\n \"\"\"\n Calculates block parameters for a single convolution layer.\n\n :param in_size: [list] List of 4 int, or a Tensor of size 4. Size of the convolution input.\n :param bsize: [list] List of 4 int. Size of blocks, or downsample ratio.\n :param ksize: [list] List of 4 int. Sparse convolution kernel size.\n :param strides: [list] List of 4 int. Sparse convolution stride size.\n Currently only supports when,\n 1) (bsize[1] - ksize[0]) % strides[1] == 0 and,\n 2) (bsize[2] - ksize[1]) % strides[2] == 0\n :param padding: [string] `VALID` or `SAME`, padding method for sparse convolution.\n\n :return [tuple]\n bsize:\n bsize_out:\n boffset:\n bcount:\n bstrides:\n \"\"\"\n static = not (type(in_size) == tf.Tensor)\n\n assert ((bsize[1] - ksize[0]) % strides[1] == 0)\n assert ((bsize[2] - ksize[1]) % strides[2] == 0)\n\n bstrides = _calc_block_strides(bsize, ksize, strides)\n pad_h0, pad_h1, pad_w0, pad_w1 = calc_padding_4d(in_size, ksize, strides, padding)\n h = in_size[1]\n w = in_size[2]\n # Make padding divides blocks.\n pad_h1 += (-h + bsize[1]) % bstrides[1]\n pad_w1 += (-w + bsize[2]) % bstrides[2]\n boffset = [-pad_h0, -pad_w0]\n x_pad_shape = [\n in_size[0], in_size[1] + pad_h0 + pad_h1, in_size[2] + pad_w0 + pad_w1, in_size[3]\n ]\n if static:\n out_shape = calc_out_size_4d_np(x_pad_shape, [bsize[1], bsize[2], 1, 1], bstrides, 'VALID')\n else:\n out_shape = calc_out_size_4d(x_pad_shape, [bsize[1], bsize[2], 1, 1], bstrides, 'VALID')\n bcount = [out_shape[1], out_shape[2]]\n bsize_out = calc_out_size_4d_np(bsize, ksize, strides, 'VALID')\n bsize = bsize[1:3]\n bstrides = bstrides[1:3]\n bsize_out = bsize_out[1:3]\n if static:\n assert (pad_h0 == -boffset[0])\n assert (pad_w0 == -boffset[1])\n for i, siz in zip([0, 1], [h, w]):\n # make sure last block is inside\n err_msg = 'Making sure last block is inside boffset {} bstrides {} bcount {} size {}'.format(\n boffset[i], bstrides[i], bcount[i], siz)\n assert (boffset[i] + bstrides[i] * (bcount[i] - 1) < siz), err_msg\n return BlockParams(\n bsize=bsize, bsize_out=bsize_out, boffset=boffset, bcount=bcount, bstrides=bstrides)\n\n\ndef calc_block_params_res_block(in_size, bsize, ksize_list, strides, padding):\n \"\"\"\n Calculates block parameters for a residual block.\n\n :param in_size: [list] List of 4 int. Size of the residual block input.\n :param bsize: [list] List of 4 int. Size of blocks, or downsample ratio, for each\n convolution layer in the residual block.\n :param ksize: [list] List of list of 4 int. Sparse convolution kernel size.\n :param strides: [list] List of 4 int. Sparse convolution stride size, for the first\n convolution in the residual block.\n Currently only supports when,\n 1) (bsize[1] - ksize[0]) % strides[1] == 0 and,\n 2) (bsize[2] - ksize[1]) % strides[2] == 0\n :param padding: [string] `VALID` or `SAME`, padding method for sparse convolution.\n\n :return\n \"\"\"\n # Use the receptive field as the kernel size.\n ksize_h = 1 + sum([kk[0] - 1 for kk in ksize_list])\n ksize_w = 1 + sum([kk[1] - 1 for kk in ksize_list])\n ksize_real = [ksize_h, ksize_w, 1, 1]\n return calc_block_params(in_size, bsize, ksize_real, strides, padding)\n\n\ndef convert_mask_to_indices_custom(mask, block_params, tol, avgpool=False):\n \"\"\"\n Converts a binary mask to sparse index format for custom CUDA kernel and TF ops.\n\n :param mask: [Tensor] [N, H, W]. 1 indicates non-sparse locations. Dtype float32.\n :param block_params [tuple] Contains bsize, boffset, bcount, bstrides.\n :param tol: [float] Lower bound of occupancy for creating a rectangle.\n\n :return [tuple]\n bin_counts: [Tensor]. Number of active locations for each bin.\n active_block_indices: [Tensor]. [M]. Center locations of M rectangles. Dtype int64.\n \"\"\"\n\n def to_tensor(a, dtype):\n if type(a) == tf.Tensor:\n if a.dtype != dtype:\n return tf.cast(a, dtype)\n else:\n return a\n elif type(a) == list:\n if type(a[0]) == tf.Tensor:\n return tf.stack(a, 0)\n else:\n return tf.constant(a, dtype)\n else:\n print(type(a))\n return tf.constant(a, dtype)\n\n return sbnet_module.reduce_mask(\n mask,\n block_params.bcount,\n dynamic_bsize=to_tensor(block_params.bsize, tf.int32),\n dynamic_bstride=to_tensor(block_params.bstrides, tf.int32),\n dynamic_boffset=to_tensor(block_params.boffset, tf.int32),\n avgpool=avgpool,\n tol=tol)\n\n\ndef sparse_conv2d(x, w, blk_indices, strides, padding):\n \"\"\"\n Performs 2D convolution on a sparse feature map, given indices.\n Naive python implementation of sparse convolution using gather and scatter.\n\n :param x: [Tensor] [N, H, W, C]. Input activation tensor, dtype float32.\n :param w: [Tensor] [I, J, C, K]. Convolution kernel, dtype float32.\n :param blk_indices: [Tensor] [M, h, w, 3]. Block indices of rectangles.\n :param strides: [list] List of 4 int, convolution strides.\n :param padding: [string] `VALID` or `SAME`, padding method for sparse convolution.\n\n :return [Tensor] [N, H', W', C]. Convolution results.\n \"\"\"\n blk_shape = tf.shape(blk_indices)\n blk_indices_ = tf.reshape(blk_indices, [-1, 3])\n ksize = tf.shape(w)\n\n # Calculate the block strides.\n bstrides = _calc_block_strides(blk_shape, ksize, strides)\n\n # Calculate the output size.\n x_shape = tf.shape(x)\n out_shape = calc_out_size_4d(x_shape, ksize, strides, padding)\n\n # Pad input.\n x_ = _pad_input(\n x, ksize, strides, padding, bsize=[1, blk_shape[1], blk_shape[2], 1], bstrides=bstrides)\n\n # Convolution when number of indices is larger than zero.\n def _conv_nonzero():\n # Gather patches.\n p = tf.gather_nd(x_, blk_indices_)\n\n # Reshape patches.\n p = tf.reshape(p, [blk_shape[0], blk_shape[1], blk_shape[2], -1])\n\n # Convolution on patches.\n q = tf.nn.conv2d(p, w, strides, 'VALID', use_cudnn_on_gpu=True)\n\n # Paste convolution results.\n q_shape = tf.shape(q)\n\n def _strides_gt_one():\n # Calculate output indices when strides > 1.\n blk_indices_crop = tf.strided_slice(blk_indices, [0, 0, 0, 0], [\n blk_shape[0], q_shape[1] * strides[1], q_shape[2] * strides[2], 3\n ], strides)\n blk_indices_crop = blk_indices_crop // tf.stack([1, strides[1], strides[2]])\n return blk_indices_crop\n\n def _strides_one():\n # Calculate otuput indices when strides = 1.\n return blk_indices[:, :q_shape[1], :q_shape[2], :]\n\n strides_gt_one = tf.logical_or(tf.greater(strides[1], 1), tf.greater(strides[2], 1))\n blk_indices_crop = tf.cond(strides_gt_one, _strides_gt_one, _strides_one)\n y = tf.scatter_nd(blk_indices_crop, q, out_shape)\n return y\n\n return tf.cond(\n tf.equal(tf.size(blk_indices_), 0), lambda: tf.zeros(out_shape, dtype=x.dtype),\n _conv_nonzero)\n\n\n# returns an int64 start timer handle that should be passed to cuda_timer_end_op\ndef cuda_timer_start_op():\n return sbnet_module.cuda_timer_start()\n\n\n# returns a float\ndef cuda_timer_end_op(start_timer):\n return sbnet_module.cuda_timer_end(start_timer)\n\n\ndef sparse_conv2d_custom(x,\n w,\n indices,\n block_params,\n strides,\n use_var=False,\n transpose=False,\n atomic=False):\n assert strides[1] == strides[2] == 1, 'Only accept strides=1'\n # TODO: make the gather op also accepting a Tensor for bsize, ksize, etc.\n ksize = [int(ss) for ss in w.get_shape()]\n p = sbnet_module.sparse_gather(\n x,\n indices.bin_counts,\n indices.active_block_indices,\n dynamic_bsize=block_params.bsize,\n dynamic_bstride=block_params.bstrides,\n dynamic_boffset=block_params.boffset,\n transpose=transpose)\n\n # Convolution on patches.\n if transpose:\n q = tf.nn.conv2d(p, w, strides, 'VALID', data_format='NCHW', use_cudnn_on_gpu=True)\n else:\n q = tf.nn.conv2d(p, w, strides, 'VALID', use_cudnn_on_gpu=True)\n\n # Allocate output tensor.\n if use_var:\n y = sbnet_module.sparse_scatter_var(\n q,\n indices.bin_counts,\n indices.active_block_indices,\n x,\n dynamic_bsize=tf.constant(block_params.bsize_out, dtype=tf.int32),\n dynamic_bstride=tf.constant(block_params.bstrides, dtype=tf.int32),\n dynamic_boffset=tf.constant([0, 0], dtype=tf.int32),\n add=False,\n transpose=transpose,\n atomic=atomic)\n else:\n y = sbnet_module.sparse_scatter(\n q,\n indices.bin_counts,\n indices.active_block_indices,\n x,\n dynamic_bsize=tf.constant(block_params.bsize_out, dtype=tf.int32),\n dynamic_bstride=tf.constant(block_params.bsize_out, dtype=tf.int32),\n dynamic_boffset=tf.constant([0, 0], dtype=tf.int32),\n add=False,\n transpose=transpose,\n atomic=atomic)\n return y\n\n\ndef _batch_norm(name, x, is_training, data_format='NHWC'):\n \"\"\"\n Applies batch normalization.\n\n :param name: [string] Name of the variable scope.\n :param x: [Tensor] Tensor to apply BN on.\n :param is_training [bool] Whether in training mode.\n\n :return: [Tensor] Normalized activation.\n \"\"\"\n bn = tf.contrib.layers.batch_norm(\n x, fused=True, scale=True, data_format=data_format, is_training=is_training, scope=name)\n return bn\n # log.warning('Not using BN to test performance at inference time')\n # return x\n\n\ndef _relu(name, x):\n \"\"\"\n Applies ReLU function.\n\n :param name: [string] Name of the op.\n :param x: [Tensor] Input to the function.\n\n :return: [Tensor] Output of the function.\n \"\"\"\n return tf.nn.relu(x, name=name)\n # log.warning('Not using ReLU to test performance at inference time')\n # return x\n\n\ndef _stride_arr(n, data_format='NHWC'):\n \"\"\"Makes strides array for downsampling convolution.\"\"\"\n if data_format == 'NHWC':\n return [1, n, n, 1]\n elif data_format == 'NCHW':\n return [1, 1, n, n]\n else:\n raise ValueError('Unknown data format: {}'.format(data_format))\n\n\ndef _conv(name,\n x,\n ksize,\n strides,\n padding,\n data_format='NHWC',\n weight_decay=None,\n dtype=tf.float32,\n weights_on_cpu=False):\n \"\"\"\n Convolution layer.\n\n :param name [string] Name of the op.\n :param x: [Tensor] Input to the downsample.\n :param ksize [list] 4-D kernel shape.\n :param strides: [list] 4-D strides array.\n :param padding: [string] Convolution padding strategy.\n :param data_format: [string] 'NHWC' or 'NCHW'.\n\n :return: [Tensor] Convolution output.\n \"\"\"\n with tf.variable_scope(name):\n in_filters = ksize[2]\n out_filters = ksize[3]\n n = ksize[0] * ksize[1] * out_filters\n init = tf.truncated_normal_initializer(\n mean=0.0, stddev=np.sqrt(2.0 / n), seed=0, dtype=dtype)\n\n def _reg(x):\n if weight_decay is not None:\n return tf.multiply(tf.nn.l2_loss(x), weight_decay)\n else:\n return None\n\n if weight_decay is not None:\n reg = _reg\n else:\n reg = None\n\n kernel = tf.get_variable(\n 'w', ksize, initializer=init, regularizer=reg, dtype=dtype, trainable=True)\n\n return tf.nn.conv2d(\n x, kernel, strides, padding, data_format=data_format, use_cudnn_on_gpu=True)\n\n\ndef _bottleneck_residual(x,\n ksize_list,\n strides,\n padding,\n is_training,\n data_format='NHWC',\n no_activation=False):\n with tf.variable_scope('sub1'):\n if not no_activation:\n x = _batch_norm('bn1', x, is_training, data_format)\n x = _relu('relu1', x)\n\n STRIDES_ERR_MSG = 'Strides height and width are not the same.'\n if data_format == 'NHWC':\n assert strides[1] == strides[2], STRIDES_ERR_MSG\n elif data_format == 'NCHW':\n assert strides[2] == strides[3], STRIDES_ERR_MSG\n x = _conv(\n 'conv1',\n x,\n ksize_list[0],\n _stride_arr(strides[2], data_format),\n padding,\n data_format=data_format)\n\n with tf.variable_scope('sub2'):\n x = _batch_norm('bn2', x, is_training, data_format)\n x = _relu('relu2', x)\n x = _conv(\n 'conv2',\n x,\n ksize_list[1],\n _stride_arr(1, data_format),\n padding,\n data_format=data_format)\n\n with tf.variable_scope('sub3'):\n x = _batch_norm('bn3', x, is_training, data_format)\n x = _relu('relu3', x)\n x = _conv(\n 'conv3',\n x,\n ksize_list[2],\n _stride_arr(1, data_format),\n padding,\n data_format=data_format)\n return x\n\n\ndef res_block_bottleneck(x,\n ksize_list,\n strides,\n is_training,\n data_format='NHWC',\n w_project=None,\n no_activation=False):\n \"\"\"\n Computes y = x + F(x), where F(x) is the residual block function. At downsample layers, applies\n a downsample function on x as well.\n \"\"\"\n if w_project is not None:\n x_ = tf.conv2d(x, w_project, strides, padding='SAME', data_format=data_format)\n else:\n x_ = x\n return x_ + _bottleneck_residual(\n x,\n ksize_list,\n strides,\n 'SAME',\n is_training,\n data_format=data_format,\n no_activation=no_activation)\n\n\ndef sparse_res_block_bottleneck(x,\n ksize_list,\n indices,\n block_params,\n strides,\n is_training,\n data_format='NHWC',\n w_project=None,\n no_activation=False,\n use_var=False):\n \"\"\"\n Computes y = x + F(x), where F(x) is the residual block function. At downsample layers, applies\n a downsample function on x as well.\n\n :param x: [Tensor] [N, H, W, C]. Input activation tensor, dtype float32.\n :param ksize_list: [list] List of list of 4 int. Kernel size for each convolution\n layer in the residual block.\n :param indices: [tuple] Non-sparse locations returned by reduce_mask.\n :param block_params: [tuple] BlockParam namedtuple.\n :param\n\n :return\n \"\"\"\n transpose = True if data_format == 'NCHW' else False\n p = sbnet_module.sparse_gather(\n x,\n indices.bin_counts,\n indices.active_block_indices,\n dynamic_bsize=block_params.bsize,\n dynamic_bstride=block_params.bstrides,\n dynamic_boffset=block_params.boffset,\n transpose=transpose)\n\n if w_project is not None:\n x = tf.conv2d(x, w_project, strides, padding='SAME')\n\n # Set shape for BN in the residual function.\n if transpose:\n p.set_shape([None, x.get_shape()[3], block_params.bsize[0], block_params.bsize[1]])\n else:\n p.set_shape([None, block_params.bsize[0], block_params.bsize[1], x.get_shape()[3]])\n\n q = _bottleneck_residual(\n p,\n ksize_list,\n strides,\n 'VALID',\n is_training,\n data_format=data_format,\n no_activation=no_activation)\n\n if use_var:\n y = sbnet_module.sparse_scatter_var(\n q,\n indices.bin_counts,\n indices.active_block_indices,\n x,\n dynamic_bsize=tf.constant(block_params.bsize_out, dtype=tf.int32),\n dynamic_bstride=tf.constant(block_params.bsize_out, dtype=tf.int32),\n dynamic_boffset=tf.constant([0, 0], dtype=tf.int32),\n add=True,\n transpose=transpose)\n else:\n y = sbnet_module.sparse_scatter(\n q,\n indices.bin_counts,\n indices.active_block_indices,\n x,\n dynamic_bsize=tf.constant(block_params.bsize_out, dtype=tf.int32),\n dynamic_bstride=tf.constant(block_params.bsize_out, dtype=tf.int32),\n dynamic_boffset=tf.constant([0, 0], dtype=tf.int32),\n add=True,\n transpose=transpose)\n return y\n\n\ndef sparse_conv2d_matmul(x, w, blk_indices, strides, padding):\n \"\"\"\n Performs 2D convolution using matrix multiplication on a sparse feature map.\n Naive python implementation of sparse convolution using gather and scatter.\n\n :param x: [Tensor] [N, H, W, C]. Input activation tensor, dtype float32.\n :param w: [Tensor] [I, J, C, K]. Convolution kernel, dtype float32.\n :param blk_indices: [Tensor] [M, h, w, 3]. Block indices of rectangles.\n :param strides: [list] List of 4 int, convolution strides.\n :param padding: [string] `VALID` or `SAME`, padding method for sparse convolution.\n\n :return [Tensor] [N, H', W', C]. Convolution results.\n \"\"\"\n blk_indices_ = tf.reshape(blk_indices, [-1, 3])\n blk_shape = tf.shape(blk_indices)\n ksize = tf.shape(w)\n\n # Calculate the block strides.\n bstrides = _calc_block_strides(blk_shape, ksize, strides)\n\n # Calculate the output size.\n x_shape = tf.shape(x)\n out_shape = calc_out_size_4d(x_shape, ksize, strides, padding)\n\n # Pad input.\n x_ = _pad_input(\n x, ksize, strides, padding, bsize=[1, blk_shape[1], blk_shape[2], 1], bstrides=bstrides)\n\n # In matrix multiplication mode, the block patch should be the same as the kernel size.\n assert_shape = tf.assert_equal(\n tf.stack([blk_shape[1], blk_shape[2]]),\n tf.stack([ksize[0], ksize[1]]),\n message='Expect blk_indices.shape[1] == w.shape[0] and blk_indices.shape[2] == w.shape[1].')\n\n # Currently we do not support strides > 1 in this matrix multiplication mode. Could be supported\n # in the future.\n assert_strides = tf.assert_equal(\n tf.cast(tf.stack([strides[1], strides[2]]), tf.int64),\n tf.constant([1, 1], dtype=tf.int64),\n message='Strides > 1 not supported.')\n\n # Convolution when number of indices is larger than zero.\n def _conv_nonzero():\n # Gather patches.\n p = tf.gather_nd(x_, blk_indices_)\n p_ = tf.reshape(p, [-1, ksize[0] * ksize[1] * ksize[2]])\n\n # Convolution on patches.\n w_ = tf.reshape(w, [ksize[0] * ksize[1] * ksize[2], -1])\n q = tf.matmul(p_, w_)\n\n # Center locations.\n blk_indices_crop = blk_indices[:, 0, 0, :]\n\n # Project back to an image.\n y = tf.scatter_nd(blk_indices_crop, q, out_shape)\n return y\n\n with tf.control_dependencies([assert_shape, assert_strides]):\n return tf.cond(\n tf.equal(tf.size(blk_indices_), 0), lambda: tf.zeros(out_shape, dtype=x.dtype),\n _conv_nonzero)\n\n\ndef mask_conv2d(x, w, mask, strides, padding):\n \"\"\"Masked 2D convolution. Used to check 2D sparse convolution.\n\n :param x: [Tensor] Convolution feature map, 4D, dtype float32.\n :param w: [Tensor] Convolution kernel, 4D, dtype float32.\n :param mask: [Tensor] Binary mask, 3D or 4D, [N, H, W] or [N, H, W, 1], dtype float32.\n :param strides: [list] List of 4 int. Convolution strides.\n :param padding: [string] Convolution padding method, `VALID` or `SAME`.\n \"\"\"\n assert len(mask.get_shape()) in [3, 4], 'Mask shape must be 3D or 4D.'\n if len(mask.get_shape()) == 3:\n mask_ = tf.expand_dims(mask, 3)\n elif len(mask.get_shape()) == 4:\n mask_ = mask\n assert mask.get_shape()[-1] == 1, '4D mask last dimension must be 1.'\n ksize = [int(ss) for ss in w.get_shape()]\n psize = [1, ksize[0], ksize[1], 1]\n mask_ = tf.nn.max_pool(mask_, psize, strides, padding)\n return tf.nn.conv2d(x, w, strides, padding) * mask_\n" ]
[ [ "tensorflow.contrib.layers.batch_norm", "tensorflow.nn.conv2d", "tensorflow.load_op_library", "tensorflow.matmul", "tensorflow.ones_like", "numpy.tile", "tensorflow.reshape", "tensorflow.scatter_nd", "tensorflow.zeros_like", "tensorflow.stack", "tensorflow.greater", "tensorflow.control_dependencies", "tensorflow.tile", "tensorflow.cast", "tensorflow.shape", "tensorflow.constant", "tensorflow.variable_scope", "tensorflow.squeeze", "numpy.sqrt", "tensorflow.pad", "tensorflow.nn.max_pool", "numpy.expand_dims", "tensorflow.range", "tensorflow.zeros", "tensorflow.nn.relu", "tensorflow.expand_dims", "tensorflow.gather_nd", "tensorflow.mod", "tensorflow.nn.l2_loss", "tensorflow.get_variable", "tensorflow.conv2d", "tensorflow.size", "tensorflow.cond", "tensorflow.strided_slice", "tensorflow.python.framework.ops.RegisterGradient" ] ]
wollbo/threshold
[ "378a32260fe4f4c5fa481138f778398427fb82e3" ]
[ "main.py" ]
[ "import numpy as np\nimport argparse\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom lightgbm import LGBMClassifier\nfrom sklearn import metrics, model_selection\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import make_pipeline\nfrom data import core\n\n\ndef arg_parse():\n \"\"\"Parsing input arguments\"\"\"\n parser = argparse.ArgumentParser(description='KL-D optimal threshold selection')\n\n # Main experiment parameters\n parser.add_argument('-d', nargs=1, type=str, default=\"breastcancer\", help=\"Dataset\")\n parser.add_argument('-p', dest='plot', default=True, action='store_true', help=\"Saving plots\")\n parser.add_argument('-l', nargs=1, type=list, default=[1, 10, 0.1], help=\"Evaluated FP/FN ratios\")\n parser.add_argument('-e', nargs=1, type=int, default=[0], help=\"Exponential prediction\")\n parser.add_argument('-r', dest='roc', default=False, action='store_true', help=\"Plot ROC curve\")\n parser.add_argument('-c', dest='cost', default=False, action='store_true', help=\"Plot cost function\")\n parser.add_argument('-s', nargs=1, type=int, default=None, help=\"Random seed for splitting train/test data\")\n\n # ML-model parameters\n\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n\n args = arg_parse()\n\n dataset = args.d[0] # dataset\n lambdas = args.l # list of evaluated fp/fn ratios\n exp_pred = args.e[0] # exponential test prediction parameter\n roc = args.roc # plot roc curve of prediction\n pgf = args.plot\n cost = args.cost\n seed = args.s[0] if args.s else None\n\n if pgf:\n matplotlib.use(\"pgf\")\n matplotlib.rcParams.update({\n \"pgf.texsystem\": \"pdflatex\",\n 'font.family': 'serif',\n 'text.usetex': True,\n 'pgf.rcfonts': False,\n })\n\n prop_cycle = plt.rcParams['axes.prop_cycle']\n colors = prop_cycle.by_key()['color']\n\n x, y = core.load_data(dataset)\n x_train, x_test, y_train, y_test = model_selection.train_test_split(x, y, random_state=seed)\n\n p = sum(y_train) / len(y_train) # note: varying data structure between datasets\n print(f'p = {p}')\n N = len(y_test)\n print(f'test samples N: {N}')\n\n model = make_pipeline(StandardScaler(), LGBMClassifier())\n model.fit(x_train, y_train)\n pred = model.predict_proba(x_test)[:, 1] # Model outputs both class probabilities\n span = np.linspace(start=0.001, stop=1, endpoint=False) # cost function not defined at endpoints (0, 1)\n\n if roc:\n tpr, fpr, thres = metrics.roc_curve(y_test, pred)\n plt.plot(tpr, fpr)\n plt.plot(fpr, fpr, c='black', dashes=[4, 2])\n plt.plot()\n plt.ylabel('True positive rate')\n plt.xlabel('False positive rate')\n plt.show()\n\n if exp_pred:\n thresholds = []\n span = np.arange(0, 1, step=0.001)\n mix, f0, f1 = core.exponential_mixture(span, p=p, l1=exp_pred, l0=exp_pred)\n plt.xlim(0, 1)\n plt.ylim(0, 3.5)\n plt.xlabel(\"$x$\")\n plt.ylabel(\"$f(x)$\")\n # plt.plot(span, f0) # plotting additional densities clutters the image\n # plt.plot(span, f1)\n for idx, a in enumerate(lambdas):\n threshold = core.exponential_threshold(p, alpha=a, l=exp_pred)\n plt.axvline(x=threshold, c=colors[idx+2], lw=2)\n thresholds.append(threshold)\n plt.legend(labels=[f'$\\lambda = {a}$, $\\\\theta = {np.around(t, 2)}$' for (a,t) in zip(lambdas, thresholds)])\n plt.plot(span, mix, c=colors[1]) # set y-span 0 - 3.5\n\n plt.savefig(f'report/figures/thresholds_{dataset}_exponential_{exp_pred}.pgf') if pgf else plt.show()\n plt.close(fig='all')\n\n plt.hist(pred[y_test == 1], density=True, alpha=0.65, bins=50)\n plt.hist(pred[y_test == 0], density=True, alpha=0.65, bins=50)\n thresholds = []\n for idx, a in enumerate(lambdas): # Basic plotting structure\n q = core.kl_threshold(p, alpha=a)\n print(f'q is {q}')\n threshold = core.find_threshold(pred, q) # find for which threshold where the number of positives = q\n print(f'theta is {threshold}')\n plt.axvline(x=threshold, c=colors[idx+2], lw=2)\n thresholds.append(threshold)\n plt.legend(labels=[f'$\\lambda = {a}$, $\\\\theta = {np.around(t, 2)}$' for (a,t) in zip(lambdas, thresholds)])\n plt.savefig(f'report/figures/thresholds_{dataset}.pgf') if pgf else plt.show()\n\n if cost:\n for idx, a in enumerate(lambdas): # Basic plotting structure\n plt.plot(span, core.kl_cost(span, p, alpha=a), c=colors[idx+2])\n plt.legend(labels=[f'λ = {a}' for a in lambdas])\n plt.show()\n\n\n" ]
[ [ "matplotlib.pyplot.xlim", "matplotlib.pyplot.savefig", "numpy.arange", "numpy.around", "matplotlib.use", "matplotlib.pyplot.close", "matplotlib.pyplot.hist", "matplotlib.rcParams.update", "sklearn.model_selection.train_test_split", "matplotlib.pyplot.axvline", "matplotlib.pyplot.show", "sklearn.preprocessing.StandardScaler", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylim", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.ylabel", "numpy.linspace", "sklearn.metrics.roc_curve" ] ]
EnsembleGovServices/Kamodo-ccmc-readers
[ "75841f7ad832997159046d4b2523e0a244316e9d" ]
[ "kamodo_ccmc/flythrough/SF_utilities.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Apr 23 17:09:14 2021\r\n\r\n@author: rringuet\r\n\r\nMost needed functions to support the SatelliteFlythrough and SingleSatelliteFlythrough\r\nsoftwares. The corresponding height function to be inverted by CalcIlev\r\nwill need to be labeled H_ilev for ilev, H_ilev1 for the ilev1 variation, etc.\r\n\r\nWhen Zach's new method of height extrapolation is ready, it is to be called in \r\nCalcIlev (around line 217) where the note is.\r\n\"\"\"\r\n\r\nfrom numpy import vectorize, array, linspace, diff, where, isnan, float64\r\nfrom numpy import concatenate, argmin, argsort, unique, ndarray, NaN\r\nfrom numpy import abs as npabs\r\nfrom time import perf_counter\r\nfrom os.path import basename, isfile\r\nfrom datetime import datetime, timedelta, timezone\r\nimport kamodo_ccmc.flythrough.model_wrapper as MW\r\nfrom kamodo_ccmc.flythrough.utils import ConvertCoord\r\nfrom astropy.constants import R_earth #to convert from radius to height in CalcIlev\r\n\r\n\r\n@vectorize\r\ndef ts_to_hrs(time_val, filedate):\r\n '''Convert array of timestamps to hours since midnight of filedate string'''\r\n \r\n file_datetime = datetime.strptime(filedate+' 00:00:00', '%Y-%m-%d %H:%M:%S')\r\n return (datetime.utcfromtimestamp(time_val)-file_datetime).total_seconds()/3600.\r\n\r\n@vectorize\r\ndef hrs_to_ts(time_val, filedate):\r\n '''Convert array of hours since midnight of filedate string to timestamps'''\r\n \r\n file_datetime = datetime.strptime(filedate+' 00:00:00', '%Y-%m-%d %H:%M:%S').replace(tzinfo=timezone.utc) \r\n return datetime.timestamp(file_datetime+timedelta(hours=time_val))\r\n\r\ndef ts_to_ISOstring(utc_ts):\r\n '''Convert timestamp to string of format 2017-05-28T00:00:00'''\r\n \r\n return datetime.utcfromtimestamp(utc_ts).isoformat()\r\n\r\ndef check_plot_dir(plot_dir):\r\n '''If plot_dir does not exist, create it'''\r\n from os import path, mkdir\r\n \r\n if not path.isdir(plot_dir): mkdir(plot_dir)\r\n return\r\n\r\ndef write_timescsv(csv_filename, times):\r\n '''writes times dict from day_files to csv for faster execution time next time.''' \r\n \r\n data_out = open(csv_filename, 'w')\r\n data_out.write('# '+csv_filename)\r\n data_out.write('\\n#file_date, filename, datetimes[0], datetimes[1], filetimes[0], filetimes[1], dt')\r\n for key in times.keys():\r\n data_out.write('\\n'+key+','+''.join([f'{value},' for value in times[key]]).strip(','))\r\n data_out.close() \r\n return\r\n\r\ndef read_timescsv(csv_filename):\r\n '''reads times dict from csv_filename for faster execution time.'''\r\n \r\n times = {}\r\n data_in = open(csv_filename, 'r')\r\n lines = data_in.readlines()\r\n data_in.close()\r\n for line in lines[2:]:\r\n data_line = line.strip('\\n').split(',')\r\n times[data_line[0]] = data_line[1:4]\r\n times[data_line[0]].extend([float64(data_line[4]), float64(data_line[5]), \r\n float(data_line[6])])\r\n return times\r\n\r\ndef day_files(file_pattern, model, call_type):\r\n '''Retrieve file times. Convert files if necessary.'''\r\n\r\n #file_pattern could be a list if more than one pattern in file_dir exists \r\n if not isinstance(file_pattern, str): #if a list/array of strings (GITM/similar)\r\n files, times = file_pattern, {} #run reader with file_prefixes given\r\n else: \r\n from glob import glob\r\n files, times = glob(file_pattern), {}\r\n \r\n #collect only time information from files for full time range\r\n reader = MW.Model_Reader(model)\r\n for f in files:\r\n k = reader(f, variables_requested=[], filetime=True, fulltime=True, printfiles=False)\r\n if hasattr(k, 'conversion_test'): \r\n if not k.conversion_test:\r\n continue #if file conversion errors, skip file_pattern\r\n if call_type=='normal':\r\n file_date = k.datetimes[0][0:10] #'YYYY-MM-DD'\r\n elif call_type=='single':\r\n file_date = k.datetimes[0][0:13].replace(' ','_') #'YYYY-MM-DD_HH'\r\n if file_date not in times.keys(): #prevent overwriting\r\n times[file_date] = [f,k.datetimes[0],k.datetimes[1],\r\n k.filetimes[0], k.filetimes[1], k.dt]\r\n else: \r\n file_date+='_'+k.datetimes[0][11:13] #'YYYY-MM-DD_HH'\r\n times[file_date] = [f,k.datetimes[0],k.datetimes[1],\r\n k.filetimes[0], k.filetimes[1], k.dt]\r\n return times\r\n\r\ndef check_timescsv(file_pattern, model, call_type='normal'):\r\n '''check for times csv file, write if not found in file_dir'''\r\n \r\n #file_pattern could be a list if more than one pattern in file_dir exists \r\n if not isinstance(file_pattern, str): #if a list/array of strings (GITM/similar)\r\n sample_pattern = file_pattern[0] #\r\n else: \r\n sample_pattern = file_pattern\r\n \r\n #determine csv filename\r\n sample_prefix = basename(sample_pattern)\r\n file_dir = sample_pattern.split(sample_prefix)[0]\r\n if call_type=='normal':\r\n csv_filename = file_dir+model+'_times.csv'\r\n elif call_type=='single':\r\n csv_filename = file_dir+model+'_singletimes.csv'\r\n #print('csv_filename', csv_filename)\r\n \r\n #if file DNE, write and return, else read and return\r\n if not isfile(csv_filename):\r\n times = day_files(file_pattern, model, call_type)\r\n write_timescsv(csv_filename, times)\r\n else:\r\n times = read_timescsv(csv_filename)\r\n return times\r\n\r\ndef save_times(file_patterns, sat_time, model, verbose=False):\r\n '''Adjust times between files to filetime within half of dt in seconds (7.5min by default).\r\n Works for models with one day of data per file.'''\r\n\r\n times = check_timescsv(file_patterns, model) \r\n #print(times)\r\n #look for sat_times not in files\r\n l_idx, file_dates = 0, list(times.keys())\r\n for i in range(len(file_dates)): #filter out times not in files\r\n idx = where((sat_time>=times[file_dates[i]][3]) & (sat_time<=times[file_dates[i]][4]\\\r\n +times[file_dates[i]][5]))[0] #end_time+dt\r\n times[file_dates[i]].append(idx) \r\n if i>0: #remove indices from previous idx list if it occurs in this one\r\n #prefer time to be after beg time and not in dt section after end time\r\n tmp_idx = [ival for ival in times[file_dates[i-1]][6] if ival not in idx]\r\n times[file_dates[i-1]][6] = tmp_idx \r\n\r\n #collect indices into one array for plotting\r\n net_idx = array(concatenate(tuple([times[file_date][6] for file_date in times.keys()])), dtype=int)\r\n l_idx = len(net_idx)\r\n test_idx = unique(net_idx)\r\n if len(test_idx)!=l_idx: \r\n print(l_idx, len(test_idx))\r\n raise AttributeError(\"net_idx has repeating values. Idx filtering didn't work right.\")\r\n \r\n #print errors for any remaining 'bad' times\r\n nbad_times = len(sat_time)-l_idx\r\n #print('save_times function', len(sat_time), l_idx)\r\n if nbad_times>0:\r\n print(f'{nbad_times} data points are not in model output files and are excluded from the flythrough.')\r\n \r\n return sat_time, times, net_idx\r\n\r\n\r\n''' Using CTIPe object for these tests:\r\n#speed test for CalcIlev section, for two step process, one satellite position at a time\r\n#outputs n_stepd, bounds of height as delta_h, bounds of ilev for delta_ilev, ilev, \r\n# and processing time\r\nt, height, lat, lon = sat_track[0]\r\nsample_ilev = linspace(1,15,15,dtype=float) #global variable\r\nfor i in [10,50,100,250,500]:\r\n start = ti.time()\r\n rough_height = array([ctipe.H([t, ilev, lat, lon])[0] for ilev in sample_ilev])\r\n ilev_range = sort(sample_ilev[argsort(abs(height-rough_height))[0:2]])\r\n test_ilev = linspace(ilev_range[0],ilev_range[1],i,dtype=float)\r\n finer_height = array([ctipe.H([t, ilev, lat, lon])[0] for ilev in test_ilev])\r\n ilev_idx = argmin(abs(height-finer_height))\r\n print(i, finer_height[ilev_idx+1]-finer_height[ilev_idx-1], \r\n test_ilev[ilev_idx+1]-test_ilev[ilev_idx-1], test_ilev[ilev_idx],ti.time()-start)\r\n\r\nOUTPUT: (i, delta_h, delta_ilev, ilev, calc_time)\r\n10 10061.973765432136 0.2222222222222232 12.777777777777779 0.01804208755493164\r\n50 1848.1176303854445 0.040816326530611846 12.775510204081632 0.04254770278930664\r\n100 914.7248877665261 0.020202020202019 12.787878787878787 0.07263469696044922\r\n250 363.68579875055 0.008032128514056325 12.783132530120483 0.20418334007263184\r\n500 181.47848474729108 0.004008016032063466 12.783567134268537 0.3631880283355713\r\n-> Too slow to execute repeatedly\r\n \r\n\r\n#speed test with one step process, same outputs\r\nt, height, lat, lon = sat_track[0]\r\nfor i in [10,50,100,500,1000,5000,10000]:\r\n test_ilev = linspace(1,15,n,dtype=float) #global variable each time\r\n start = ti.time()\r\n test_track = array([[t, ilev, lat, lon] for ilev in test_ilev])\r\n finer_height = ctipe.H(test_track)\r\n ilev_idx = argmin(abs(height-finer_height))\r\n print(i, finer_height[ilev_idx+1]-finer_height[ilev_idx-1], \r\n test_ilev[ilev_idx+1]-test_ilev[ilev_idx-1], test_ilev[ilev_idx],ti.time()-start)\r\n \r\nOUTPUT: (i, delta_h, delta_ilev, ilev, calc_time)\r\n10 152055.1091820988 3.1111111111111107 13.444444444444445 0.0\r\n50 25873.64682539692 0.571428571428573 12.714285714285714 0.0025177001953125\r\n100 12806.148428731773 0.28282828282828376 12.737373737373737 0.0010514259338378906\r\n500 2540.6987864619005 0.056112224448899184 12.783567134268537 0.0\r\n1000 1269.0777722166968 0.02802802802802873 12.785785785785786 0.006573677062988281\r\n5000 253.6124613811844 0.005601120224044465 12.784756951390278 0.002038717269897461\r\n10000 126.79354879941093 0.0028002800280031437 12.783578357835783 0.010225057601928711 *chosen method\r\n-> Not accurate to within 200 km until i=10000, but better speed than 1st option\r\n\r\n\r\n#faster 2-step process?\r\nt, height, lat, lon = sat_track[0]\r\nsample_ilev = linspace(1,15,15,dtype=float) #global variable\r\nfor i in [10,50,100,250,500,1000,5000,10000]:\r\n start = ti.time()\r\n rough_track = array([[t, ilev, lat, lon] for ilev in sample_ilev])\r\n rough_height = ctipe.H(rough_track)\r\n ilev_range = sort(sample_ilev[argsort(abs(height-rough_height))[0:2]])\r\n test_ilev = linspace(ilev_range[0],ilev_range[1],i,dtype=float)\r\n finer_track = array([[t, ilev, lat, lon] for ilev in test_ilev])\r\n finer_height = ctipe.H(finer_track)\r\n ilev_idx = argmin(abs(height-finer_height))\r\n print(i, finer_height[ilev_idx+1]-finer_height[ilev_idx-1], \r\n test_ilev[ilev_idx+1]-test_ilev[ilev_idx-1], test_ilev[ilev_idx],ti.time()-start)\r\n \r\nOUTPUT: (i, delta_h, delta_ilev, ilev, calc_time)\r\n10 10061.973765432136 0.2222222222222232 12.777777777777779 0.0\r\n50 1848.1176303854445 0.040816326530611846 12.775510204081632 0.0\r\n100 914.7248877665261 0.020202020202019 12.787878787878787 0.0\r\n250 363.68579875055 0.008032128514056325 12.783132530120483 0.015614032745361328\r\n500 181.47848474729108 0.004008016032063466 12.783567134268537 0.0 \r\n1000 90.64841230114689 0.0020020020020012907 12.783783783783784 0.0\r\n5000 18.115175812970847 0.0004000800160035567 12.783956791358271 0.0\r\n10000 9.056682057096623 0.00020002000199959014 12.783978397839784 0.015622377395629883\r\n\r\n-> Accurate to within 200 km for i=500, and too fast to time it. Best option out of first 3.\r\nFreezes when including i values above 500. Not sure why. Gives answer if I hit enter a few times.\r\n\r\n-> Comparing method 2 (i=10000) and 3 (i=500) in execution showed a large time difference.\r\nMethod 2 took about 150 seconds while method 3 took about 16 seconds. Choosing method 3.\r\n\r\n#Method 2\r\nsample_ilev = linspace(1,15,10000, dtype=float)\r\ndef CalcIlev2(H, t, height, lat, lon):\r\n \r\n finer_height = H(array([[t, ilev, lat, lon] for ilev in sample_ilev]))\r\n ilev_idx = argmin(abs(height-finer_height))\r\n return sample_ilev[ilev_idx] \r\n''' \r\n \r\ndef CalcIlev(H, Hunit, t, c1_val, c2_val, height, ilev_grid, z_unit, high_res, verbose=True):\r\n '''Approximate ilev by inverting the chosen height function for one sat point.\r\n high_res default is 20 meters. (high_res), input height is in meters.'''\r\n \r\n #if input height not an altitude, then convert. Else convert to meters.\r\n if z_unit=='R_E': #given height is a radius. convert\r\n radius = height\r\n height = (radius - 1.0)*R_earth.value #in meters\r\n elif z_unit=='km':\r\n height*=1000.\r\n elif z_unit=='cm':\r\n height/=100.\r\n #else assume in meters\r\n \r\n #input height is in meters, time is in hours since midnight.\r\n #convert for common function output units. Add as more are discovered.\r\n if Hunit=='m':\r\n Hconv=1\r\n elif Hunit=='cm':\r\n Hconv=1./100. #convert from cm to m\r\n elif Hunit=='km':\r\n Hconv=1000. #convert from km to m\r\n \r\n #get height function output for the ilev range allowed in model\r\n #sample_ilev = linspace(min_ilev,max_ilev,75,dtype=float) #typical range is 15\r\n rough_height = H(array([[t, c1_val, c2_val, ilev] for ilev in ilev_grid]))*Hconv\r\n max_height = rough_height.max() #save for output\r\n if isnan(max_height): #this happens if one or more of the coordinates is out of range (typically time)\r\n if verbose: print('Coordinate(s) are out of range:', t, c1_val, c2_val)\r\n return NaN, NaN\r\n \r\n #allow extrapolation for heights ABOVE height function range for this time/location\r\n #when/if Zach writes a more physical way to extrapolate, can call from here.\r\n if height>max_height:\r\n if verbose: print('Given height is above pressure level. Returning max possible pressure level instead')\r\n return ilev_grid.max(), abs(height-max_height)\r\n\r\n #continue with numerical inversion\r\n ilev_idx = argsort(npabs(height-rough_height))[0] #first value may not be in center of curve\r\n if ilev_idx==len(ilev_grid)-1: ilev_idx-=1 #use end instead to avoid errors\r\n test_ilev = linspace(ilev_grid[ilev_idx-1],ilev_grid[ilev_idx+1],100,dtype=float) \r\n finer_height = H(array([[t, c1_val, c2_val, ilev] for ilev in test_ilev]))*Hconv\r\n \r\n #loop through process until requested resolution is acheived\r\n loop_count, final_res = 0, npabs(height-finer_height).min() \r\n while final_res>high_res: \r\n ilev_idx = argsort(npabs(height-finer_height))[0]\r\n if ilev_idx==len(test_ilev)-1: ilev_idx-=1 #use end instead to avoid errors\r\n test_ilev = linspace(test_ilev[ilev_idx-1],test_ilev[ilev_idx+1],100,dtype=float) \r\n finer_height = H(array([[t, c1_val, c2_val, ilev] for ilev in test_ilev]))*Hconv\r\n #initial_res = final_res\r\n final_res = npabs(height-finer_height).min()\r\n #limit iterations to prevent infinite loops\r\n loop_count+=1\r\n if loop_count>10:# and (final_res-initial_res)/final_res<0.01: \r\n break\r\n \r\n #output info for inspection\r\n if verbose or loop_count>10 or isnan(npabs(height-finer_height).min()): \r\n print(f'\\nRequested height: {height:.3f} m')\r\n print(f'Maximum allowed height: {max_height:.3f} m')\r\n print(f'Number of loops required to acheive resolution: {loop_count}')\r\n print('Time, c1, c2, height:', t, c1_val, c2_val, height)\r\n print(f'Calculated equivalent pressure level: {test_ilev[argmin(npabs(height-finer_height))]}')\r\n print(f'Height resolution achieved: {npabs(height-finer_height).min():.5f} m\\n')\r\n return [test_ilev[argmin(npabs(height-finer_height))], npabs(height-finer_height).min()]\r\n\r\ndef call_CalcIlev(ilev_string, kamodo_object, sat_track0, z_unit, high_res):\r\n '''ilev agnostic method to gather parameters and call CalcIlev with less code.'''\r\n \r\n #retrieve parameters for CalcIlev call\r\n ilev_grid = getattr(kamodo_object, '_'+ilev_string)\r\n Hfunc = getattr(kamodo_object, 'H_'+ilev_string) \r\n Hunit = kamodo_object.variables['H_'+ilev_string]['units']\r\n \r\n #for each call version, call CalcIlev and build satellite track with pressure level\r\n sat_ilev, height_res = array([CalcIlev(Hfunc, Hunit, *sat_position, ilev_grid, \r\n z_unit, high_res, verbose=False) for sat_position in sat_track0]).T\r\n if len(height_res)>1:\r\n #Give user feedback about range of height resolution for height to ilev conversion\r\n clean_height = array([val for val in height_res if not isnan(val)]) #remove NaN values\r\n max_res = clean_height.max()\r\n print(f'\\nBest height resolution achieved: {clean_height.min():.5f} m')\r\n print(f'Worst height resolution achieved: {max_res:.5f} m\\n')\r\n else:\r\n print(f'\\nHeight resolution achieved: {height_res[0]:.5f} m\\n')\r\n max_res = height_res\r\n if max_res>high_res: print('Files:', kamodo_object.filename)\r\n \r\n return sat_ilev\r\n\r\n\r\ndef sat_tracks(variable_list, kamodo_object, sat_time, c1, c2, c3, z_unit, \r\n z_dependencies, high_res, verbose=False):\r\n '''Calculate satellite tracks for interpolation'''\r\n \r\n #Create satellite tracks with appropriate inputs\r\n model = kamodo_object.modelname\r\n ilev_list = MW.Var_ilev(model)\r\n sat_track = {} #initialize list of satellite tracks\r\n if '3D' in z_dependencies.keys():\r\n if verbose: print('Building height-independent satellite track.')\r\n sat_track['3D']=[[t, c1_val, c2_val] for t, c1_val, c2_val in zip(\r\n sat_time,c1,c2)]\r\n if '4D' in z_dependencies.keys(): #if function requires height (in km)\r\n if verbose: print('Building height-dependent satellite track (km).')\r\n sat_track['4D']=[[t, c1_val, c2_val, c3_val] for t, c1_val, c2_val, c3_val in zip(\r\n sat_time,c1,c2,c3)]\r\n if sum([True if key in ilev_list else False for key in z_dependencies.keys()])>0: \r\n #if ilev, ilev1, or milev is required for at least one variable\r\n if verbose: print('Converting height to pressure level and building satellite track.')\r\n start = perf_counter() #Input h in meters\r\n sat_track0 = [[t, c1_val, c2_val, c3_val] for t, c1_val, c2_val, c3_val in zip(\r\n sat_time,c1,c2,c3)]\r\n for ilev_string in ilev_list:\r\n if ilev_string in z_dependencies.keys():\r\n if len(z_dependencies[ilev_string])>0: #ignore empty lists\r\n #add new track type to dictionary\r\n sat_ilev = call_CalcIlev(ilev_string, kamodo_object, sat_track0, \r\n z_unit, high_res) \r\n sat_track[ilev_string]=[[t, c1_val, c2_val, ilev_val] for \\\r\n t, c1_val, c2_val, ilev_val in zip(\r\n sat_time,c1,c2,sat_ilev)]\r\n if verbose: print(f'{ilev_string} track added') \r\n if verbose: print(f'Conversion took {perf_counter()-start} s for {len(sat_time)} positions.')\r\n \r\n return sat_track\r\n\r\ndef Model_FlyAway(reader, filename, variable_list, sat_time, c1, c2, c3, \r\n z_unit, z_dependencies, high_res, verbose=False):\r\n '''Perform flythrough for one day of data and one coordinate system.'''\r\n \r\n #create kamodo object, initialize some variables\r\n var_list = variable_list.copy() #save copy before it gets altered by the reader\r\n kamodo_object = reader(filename, variables_requested=variable_list, gridded_int=False)\r\n \r\n #remove requested variables not found in data from variable list and z_dependencies\r\n newvar_list = [var for var in var_list if var in kamodo_object.variables.keys()]\r\n z_dependencies = {key:[var for var in value if var in newvar_list] \\\r\n for key, value in z_dependencies.items()}\r\n \r\n #create satellite tracks of types needed based on vertical dependencies\r\n sat_track = sat_tracks(newvar_list, kamodo_object, sat_time, c1, c2, c3,\r\n z_unit, z_dependencies, high_res, verbose=verbose)\r\n\r\n #retrieve interpolator and interpolate data for each variable, using track \r\n #type appropriate for each variable. \r\n #print('Model_FlyAway2',sat_track.keys())\r\n #for var in var_list:\r\n # print(var,[key for key, value in z_dependencies.items() if var in value][0])\r\n \r\n results = {var: kamodo_object[var](sat_track[[key for key, value in \\\r\n z_dependencies.items() if var in value][0]])\\\r\n for var in newvar_list}\r\n \r\n return results\r\n\r\ndef coordinate_systems(model, sat_time, c1, c2, c3, variable_list, coord_type, coord_grid):\r\n '''Detect what coordinate system is needed per variable, convert and return per type.'''\r\n \r\n #determine coordinate types needed, convert to alternative coordinates if necessary\r\n var_dict = MW.Model_Variables(model, return_dict=True) #{varname:[desc, int, coord_name, grid_type, coord_list, unit]}\r\n var_coord_strs = unique([value[2].split('_')[0]+','+value[3] for key, value in var_dict.items() \\\r\n if key in variable_list]) #'SPH,sph','MAG,sph','GDZ,sph', etc\r\n if len(var_coord_strs)!=1 or var_coord_strs[0]!=(coord_type+','+coord_grid): #then coordinate conversion needed\r\n #print('CoordConv check',coord_type+','+coord_grid, var_coord_strs, model, variable_list)\r\n new_coords={coord_name:[[key for key, value in var_dict.items() \\\r\n if (value[2].split('_')[0]+','+value[3]==coord_name)\\\r\n and key in variable_list]] \\\r\n for coord_name in var_coord_strs} #key is coord type 'name,type'\r\n #first value is a list of the variable names needed those coordinates\r\n #print('Performing needed coordinate conversions using SpacePy.')\r\n for key in new_coords.keys(): #e.g. key= 'GDZ,sph'\r\n #convert to needed coordinates, in/out order NO LONGER depends on coord_grid\r\n #can't use net_idx because indices won't line up anymore with split by files\r\n alt_c1, alt_c2, alt_c3, units_out = \\\r\n ConvertCoord(sat_time,c1,c2,c3,coord_type,coord_grid,*key.split(','))\r\n '''#used with utils_old ConvertCoord function\r\n if key.split(',')[1]=='sph' and coord_grid=='sph': \r\n alt_c3, alt_c2, alt_c1, units_out = \\\r\n ConvertCoord(sat_time,c3,c2,c1,coord_type,coord_grid,*key.split(',')) #height, lat, lon for both\r\n elif key.split(',')[1]=='sph' and coord_grid=='car':\r\n alt_c3, alt_c2, alt_c1, units_out = \\\r\n ConvertCoord(sat_time,c1,c2,c3,coord_type,coord_grid,*key.split(',')) #x, y, z input\r\n elif key.split(',')[1]=='car' and coord_grid=='sph':\r\n alt_c1, alt_c2, alt_c3, units_out = \\\r\n ConvertCoord(sat_time,c3,c2,c1,coord_type,coord_grid,*key.split(',')) #height, lat, lon input\r\n elif key.split(',')[1]=='car' and coord_grid=='car':\r\n alt_c1, alt_c2, alt_c3, units_out = \\\r\n ConvertCoord(sat_time,c1,c2,c3,coord_type,coord_grid,*key.split(',')) #x, y, z for both\r\n '''\r\n new_coords[key].extend([alt_c1,alt_c2,alt_c3]) #elements 1, 2, 3\r\n #print(key, alt_c1.min(), alt_c1.max(), alt_c2.min(), alt_c2.max(), \r\n # alt_c3.min(), alt_c3.max())\r\n \r\n #determine unit of z coordinate. needed for conversion to ilev\r\n if key.split(',')[0]=='GDZ': z_unit='km'\r\n else: z_unit='R_E'\r\n new_coords[key].append(z_unit) #element 4\r\n #print('z_unit:', z_unit)\r\n else:\r\n new_coords = {coord_type+','+coord_grid:[variable_list,c1,c2,c3]} \r\n \r\n #determine unit of z coordinate. needed for conversion to ilev\r\n if coord_type=='GDZ': z_unit='km'\r\n else: z_unit='R_E'\r\n new_coords[coord_type+','+coord_grid].append(z_unit) #element 4\r\n #print('z_unit:', z_unit)\r\n \r\n #determine z_dependency of relevant variables for each coordinate system \r\n for key in new_coords.keys():\r\n z_dependencies = {}\r\n if 3 in [len(value[4]) for keyv, value in var_dict.items() if keyv in new_coords[key][0]]:\r\n z_dependencies['3D'] = [keyv for keyv, value in var_dict.items() if len(value[4])==3\\\r\n and keyv in new_coords[key][0]]\r\n end_4coords = unique([value[4][-1] for keyv, value in var_dict.items() \\\r\n if keyv in new_coords[key][0] and len(value[4])==4]) #e.g. ['radius','ilev','height']\r\n if len(end_4coords)>0: #if some variables in lista are 4D\r\n ilev_list = MW.Var_ilev(model)\r\n ilev_coords = list(unique([ilev for ilev in end_4coords if ilev in ilev_list]))\r\n if len(ilev_coords)>0: \r\n for ilev_type in ilev_coords:\r\n z_dependencies[ilev_type] = [keyv for keyv, value in var_dict.items()\\\r\n if value[4][-1]==ilev_type and \\\r\n keyv in new_coords[key][0]] #add ilev dependencies\r\n if len(ilev_coords)<len(end_4coords): #if there are other dependencies\r\n z_dependencies['4D'] = [keyv for keyv, value in var_dict.items()\\\r\n if value[4][-1] not in ilev_list \\\r\n and value[4][-1] in end_4coords\\\r\n and keyv in new_coords[key][0]]\r\n new_coords[key].append(z_dependencies) #element 5 \r\n #print('z_den:', new_coords[key][0], z_dependencies) \r\n return new_coords\r\n\r\ndef Model_SatelliteFlythrough(model, file_dir, variable_list, sat_time, c1, c2, \r\n c3, coord_type, coord_grid, high_res, verbose=False):\r\n '''\r\n Execute flythrough for model data. Returns results_dict.\r\n results_dict is a dictionary of the interpolated data for the entire data set\r\n sorted by variable name.\r\n file_dir is a string indicating where the data files are located.\r\n variable_list is a list of strings of the desired variable names. \r\n sat_time is an array of timestamp values.\r\n c1, c2, c3 = x, y, z or lon, lat, height (or radius)\r\n if x, y, z, then must be in R_E units\r\n if radius -> R_E. if height -> km\r\n '''\r\n\r\n #Check that sat data is all the same length, will error if not\r\n if max(diff(array([len(sat_time),len(c3),len(c2),len(c1)])))>0:\r\n raise AttributeError (f'Satellite arrays or lists must all be the same length.\\\r\n Current array lengths are {len(sat_time)}, {len(c1)},\\\r\n {len(c2)}, and {len(c3)}') \r\n \r\n #reader prefers converted filename, even if it does not exist. will create if no wrapped data found.\r\n file_patterns = MW.FileSearch(model, file_dir)\r\n reader = MW.Model_Reader(model) #Kamodo gets imported here\r\n #print(file_patterns)\r\n \r\n #match trajectory times to model data output files\r\n sat_time, times, net_idx = save_times(file_patterns, sat_time, model, verbose=verbose)\r\n \r\n #initialize results dictionary with given trajectory\r\n results_dict = {'utc_time': sat_time[net_idx], 'c1': c1[net_idx],\r\n 'c2': c2[net_idx], 'c3': c3[net_idx],\r\n 'net_idx': net_idx} #index for comparison with other data from real satellite\r\n \r\n #perform coordinate conversions and sort variables by coordinate systems\r\n coord_dict = coordinate_systems(model, sat_time, c1, c2, c3, variable_list, coord_type, coord_grid)\r\n \r\n #perform flythroughs\r\n print('Interpolating through model data...',end=\"\")\r\n #print(coord_dict['GDZ,sph'][0], coord_dict['GDZ,sph'][5])\r\n interp_time = perf_counter()\r\n for key in coord_dict.keys():\r\n #interpolate requested data for each day. FlyAway is specific to each wrapper\r\n #reader, file_name, variable_list, sat_time in hrs, c1, c2, c3, z_unit, z_dependencies, high_res\r\n list_results = [Model_FlyAway(reader, times[file_date][0], coord_dict[key][0], \r\n ts_to_hrs(sat_time[times[file_date][6]], file_date.split('_')[0]),\r\n coord_dict[key][1][times[file_date][6]], \r\n coord_dict[key][2][times[file_date][6]], \r\n coord_dict[key][3][times[file_date][6]], \r\n coord_dict[key][4], coord_dict[key][5],\r\n high_res, verbose=verbose) \\\r\n for file_date in times.keys() if len(sat_time[times[file_date][6]])>0]\r\n \r\n #get new variable list from results dictionaries\r\n newvar_list = []\r\n [newvar_list.extend(list(results.keys())) for results in list_results]\r\n #print(newvar_list)\r\n \r\n #collect interpolated data into the same dictionary\r\n for var in newvar_list: #sort and combine arrays for the same variable\r\n results_dict[var] = concatenate(tuple([results[var] for results in list_results]))\r\n print(f'done in {perf_counter()-interp_time:.5f} s.')\r\n\r\n return results_dict\r\n\r\ndef Prepare_Files(model, file_dir, call_type='normal'):\r\n '''Return a list of the required height input for each variable. Create wrapped files if needed.'''\r\n \r\n #Determine possible file patterns. Create wrapped files if needed.\r\n file_patterns = MW.FileSearch(model, file_dir, call_type=call_type)\r\n times = check_timescsv(file_patterns, model, call_type=call_type) #reader creates converted files if DNE\r\n #print('Files prepared for run.')\r\n return \r\n\r\n#----------Code below is for Single Satellite Flythrough software--------------------------------\r\n\r\ndef find_singlefiletime(file_patterns, sat_time, model):\r\n '''Find file containing given single time. Adjust if within dt seconds.'''\r\n \r\n #t_time = perf_counter()\r\n times = check_timescsv(file_patterns, model, call_type='single') #Retrieve file times\r\n #print('day_files timing:', perf_counter()-t_time)\r\n \r\n #Check if time is not in a file. Break if not.\r\n filename=''\r\n for file_date in times.keys(): \r\n if ((sat_time>=times[file_date][3]) & (sat_time<=times[file_date][4]\\\r\n +times[file_date][5])): #end_time+dt\r\n filename = times[file_date][0] #overwrites on purpose if time is found in later file\r\n #better for time to be in file than after end but in dt section\r\n \r\n #if time not found in file, print stuff and break, else return filename\r\n if filename=='': \r\n print('\\nCheck that time fits in file time ranges:')\r\n print(f'Data time (UTC): {sat_time}')\r\n print('Filename, Min DateTime, Max DateTime, Min Time (UTC), Max Time (UTC)')\r\n for file_date in times.keys(): \r\n print (times[file_date][0].split('/')[-1].split('\\\\')[-1], times[file_date][3:5])\r\n raise AttributeError('No file found with the time given.')\r\n\r\n return filename \r\n\r\ndef Single_DzCalc(results, sat_positions, kamodo_object, variable_list, z_dependence, dz):\r\n '''fly satellite through model data, per position\r\n sat_time, c1, c2, and c3 are all floats, not arrays\r\n z_dependence = dict '''\r\n \r\n if sum(dz)==0: return results \r\n for i in range(len(variable_list)):\r\n sat_pos = sat_positions[[key for key, value in z_dependence.items() \\\r\n if variable_list[i] in value][0]][0]\r\n if dz[i]==1 and len(sat_pos)==3:\r\n print(f'{variable_list[i]} has no vertical dependency. Vertical partial derivative not calculated.')\r\n continue\r\n elif dz[i]==1 and len(sat_pos)==4: #calculate dz for +/- 1% of vertical value\r\n #check for undefined value at original position\r\n if isnan(kamodo_object[variable_list[i]](sat_pos)):\r\n print('Interpolated value at given position is undefined. Cannot calculate Dz for',\r\n variable_list[i])\r\n continue\r\n \r\n #get +/-1% positions and interpolate\r\n sat_pos_up = [*sat_pos[0:3],sat_pos[3]*1.01]\r\n sat_pos_dn = [*sat_pos[0:3],sat_pos[3]*0.99]\r\n value_up = kamodo_object[variable_list[i]](sat_pos_up)[0]\r\n value_dn = kamodo_object[variable_list[i]](sat_pos_dn)[0]\r\n \r\n #if one is NaN (out of range), then replace with original value\r\n if isnan(value_up): value_up = kamodo_object[variable_list[i]](sat_pos)[0]\r\n elif isnan(value_dn): value_dn = kamodo_object[variable_list[i]](sat_pos)[0]\r\n \r\n #calculate diff and store\r\n results[variable_list[i]+'_dz'] = value_up-value_dn\r\n \r\n return results\r\n\r\ndef Single_FlyAway(model, file_dir, variable_list, dz, sat_time, c1, c2, c3, \r\n coord_type, coord_grid, high_res):\r\n '''Fly given satellite position through model code.'''\r\n #t0 = perf_counter()\r\n #time_array=[]\r\n\r\n #find file with sat_time \r\n file_patterns = MW.FileSearch(model, file_dir, call_type='single')\r\n filename = find_singlefiletime(file_patterns, sat_time, model)\r\n #time_array.append(['test0a',perf_counter()-t0])\r\n #t1 = perf_counter()\r\n \r\n #get ctipe object for requested variable (+H too), remove variables not found\r\n var_list = variable_list #save a copy because it gets altered\r\n reader = MW.Model_Reader(model) #imports Kamodo here\r\n kamodo_object = reader(filename, variables_requested=variable_list, \r\n gridded_int=False)\r\n newvar_list = [var for var in var_list if var in kamodo_object.variables.keys()]\r\n #time_array.append(['test1',perf_counter()-t1])\r\n #t2 = perf_counter() \r\n \r\n #convert to arrays if needed\r\n if not isinstance(sat_time, ndarray): sat_time = array([sat_time])\r\n if not isinstance(c1, ndarray): c1 = array([c1])\r\n if not isinstance(c2, ndarray): c2 = array([c2])\r\n if not isinstance(c3, ndarray): c3 = array([c3])\r\n \r\n #convert time to hrs since midnight\r\n str_date = datetime.strftime(kamodo_object.filedate, format='%Y-%m-%d')\r\n model_sat_time = ts_to_hrs(sat_time, str_date)\r\n #time_array.append(['test2',perf_counter()-t2])\r\n #t3 = perf_counter() \r\n \r\n #deal with diff coordinate systems\r\n coord_dict = coordinate_systems(model, sat_time, c1, c2, c3, newvar_list, \r\n coord_type, coord_grid)\r\n #time_array.append(['test3',perf_counter()-t3])\r\n #t4 = perf_counter() \r\n #print(coord_dict) \r\n \r\n #get tracks and interpolate for coordinate systems separately\r\n results_dict={}\r\n for key in coord_dict.keys():\r\n \r\n #get all types of tracks needed: variable list, ko, time_hrs, c1, c2, c3, z_unit, z_depend, high_res\r\n sat_positions = sat_tracks(coord_dict[key][0], kamodo_object, model_sat_time, \r\n *coord_dict[key][1:], high_res) \r\n \r\n #perform interpolation with correct version of satellite position, store in dict\r\n results = {var: kamodo_object[var](sat_positions[[key for key, value in \\\r\n coord_dict[key][5].items() if var in value][0]])[0]\\\r\n for var in coord_dict[key][0]} \r\n \r\n #collect interpolated data into the same dictionary\r\n for var in coord_dict[key][0]: #will be different variables in each loop\r\n results_dict[var] = results[var]\r\n \r\n #if requested, approximate dz for variables with +/-1% diff in vertical coordinate\r\n if sum(dz)>0: #results, positions, ko, var_list, z_depend, dz\r\n results_dict = Single_DzCalc(results_dict, sat_positions, kamodo_object, \r\n coord_dict[key][0], coord_dict[key][5], dz)\r\n #time_array.append(['test4',perf_counter()-t4])\r\n #print('test4', perf_counter()-t4)\r\n #print()\r\n #for i in range(len(time_array)):\r\n # print(time_array[i])\r\n #print()\r\n return results_dict\r\n\r\n\r\n\r\n#------ Code below here is for possible link directly into fortran------------\r\ndef test_validobject(kamodo_object, sat_time): \r\n ''' Determine if a new ctipe object is needed bsed on the time given'''\r\n \r\n if isinstance(kamodo_object, list): \r\n return True #if a list, then ctipe DNE, get a new one\r\n elif (sat_time>=kamodo_object.filetimes[0]) and (sat_time<=kamodo_object.filetimes[1]):\r\n return False #sat_time is within known time range of file, use current ctipe\r\n else: #sat_time is not within known time range of file, get a new ctipe object\r\n return True\r\n" ]
[ [ "numpy.array", "numpy.isnan", "numpy.float64", "numpy.where", "numpy.abs", "numpy.linspace", "numpy.unique" ] ]
711e/mmdetection
[ "89da8dbe4dbcfd7c92a184d54c7c87675e49c70c" ]
[ "mmdet/models/anchor_heads/ssd_head.py" ]
[ "import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import xavier_init\n\nfrom mmdet.core import (AnchorGenerator, anchor_target, weighted_smoothl1,\n multi_apply)\nfrom .anchor_head import AnchorHead\nfrom ..registry import HEADS\n\n\n@HEADS.register_module\nclass SSDHead(AnchorHead):\n\n def __init__(self,\n input_size=300,\n num_classes=81,\n in_channels=(512, 1024, 512, 256, 256, 256),\n anchor_strides=(8, 16, 32, 64, 100, 300),\n basesize_ratio_range=(0.1, 0.9),\n anchor_ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]),\n target_means=(.0, .0, .0, .0),\n target_stds=(1.0, 1.0, 1.0, 1.0)):\n super(AnchorHead, self).__init__()\n self.input_size = input_size\n self.num_classes = num_classes\n self.in_channels = in_channels\n self.cls_out_channels = num_classes\n num_anchors = [len(ratios) * 2 + 2 for ratios in anchor_ratios]\n reg_convs = []\n cls_convs = []\n for i in range(len(in_channels)):\n reg_convs.append(\n nn.Conv2d(\n in_channels[i],\n num_anchors[i] * 4,\n kernel_size=3,\n padding=1))\n cls_convs.append(\n nn.Conv2d(\n in_channels[i],\n num_anchors[i] * num_classes,\n kernel_size=3,\n padding=1))\n self.reg_convs = nn.ModuleList(reg_convs)\n self.cls_convs = nn.ModuleList(cls_convs)\n\n min_ratio, max_ratio = basesize_ratio_range\n min_ratio = int(min_ratio * 100)\n max_ratio = int(max_ratio * 100)\n step = int(np.floor(max_ratio - min_ratio) / (len(in_channels) - 2))\n min_sizes = []\n max_sizes = []\n for r in range(int(min_ratio), int(max_ratio) + 1, step):\n min_sizes.append(int(input_size * r / 100))\n max_sizes.append(int(input_size * (r + step) / 100))\n if input_size == 300:\n if basesize_ratio_range[0] == 0.15: # SSD300 COCO\n min_sizes.insert(0, int(input_size * 7 / 100))\n max_sizes.insert(0, int(input_size * 15 / 100))\n elif basesize_ratio_range[0] == 0.2: # SSD300 VOC\n min_sizes.insert(0, int(input_size * 10 / 100))\n max_sizes.insert(0, int(input_size * 20 / 100))\n elif input_size == 512:\n if basesize_ratio_range[0] == 0.1: # SSD512 COCO\n min_sizes.insert(0, int(input_size * 4 / 100))\n max_sizes.insert(0, int(input_size * 10 / 100))\n elif basesize_ratio_range[0] == 0.15: # SSD512 VOC\n min_sizes.insert(0, int(input_size * 7 / 100))\n max_sizes.insert(0, int(input_size * 15 / 100))\n self.anchor_generators = []\n self.anchor_strides = anchor_strides\n for k in range(len(anchor_strides)):\n base_size = min_sizes[k]\n stride = anchor_strides[k]\n ctr = ((stride - 1) / 2., (stride - 1) / 2.)\n scales = [1., np.sqrt(max_sizes[k] / min_sizes[k])]\n ratios = [1.]\n for r in anchor_ratios[k]:\n ratios += [1 / r, r] # 4 or 6 ratio\n anchor_generator = AnchorGenerator(\n base_size, scales, ratios, scale_major=False, ctr=ctr)\n indices = list(range(len(ratios)))\n indices.insert(1, len(indices))\n anchor_generator.base_anchors = torch.index_select(\n anchor_generator.base_anchors, 0, torch.LongTensor(indices))\n self.anchor_generators.append(anchor_generator)\n\n self.target_means = target_means\n self.target_stds = target_stds\n self.use_sigmoid_cls = False\n self.use_focal_loss = False\n\n def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n xavier_init(m, distribution='uniform', bias=0)\n\n def forward(self, feats):\n cls_scores = []\n bbox_preds = []\n for feat, reg_conv, cls_conv in zip(feats, self.reg_convs,\n self.cls_convs):\n cls_scores.append(cls_conv(feat))\n bbox_preds.append(reg_conv(feat))\n return cls_scores, bbox_preds\n\n def loss_single(self, cls_score, bbox_pred, labels, label_weights,\n bbox_targets, bbox_weights, mix_inds, num_total_samples, cfg):\n loss_cls_all = F.cross_entropy(\n cls_score, labels, reduction='none') * label_weights\n loss_cls_all = loss_cls_all * mix_inds\n\n pos_inds = (labels > 0).nonzero().view(-1)\n neg_inds = (labels == 0).nonzero().view(-1)\n\n num_pos_samples = pos_inds.size(0)\n num_neg_samples = cfg.neg_pos_ratio * num_pos_samples\n if num_neg_samples > neg_inds.size(0):\n num_neg_samples = neg_inds.size(0)\n topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples)\n loss_cls_pos = loss_cls_all[pos_inds].sum()\n loss_cls_neg = topk_loss_cls_neg.sum()\n loss_cls = (loss_cls_pos + loss_cls_neg) / num_total_samples\n\n loss_reg = weighted_smoothl1(\n bbox_pred,\n bbox_targets,\n bbox_weights,\n mix_inds=mix_inds,\n beta=cfg.smoothl1_beta,\n avg_factor=num_total_samples)\n return loss_cls[None], loss_reg\n\n def loss(self,\n cls_scores,\n bbox_preds,\n gt_bboxes,\n gt_labels,\n img_metas,\n cfg,\n gt_bboxes_ignore=None):\n featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n assert len(featmap_sizes) == len(self.anchor_generators)\n\n anchor_list, valid_flag_list = self.get_anchors(\n featmap_sizes, img_metas)\n cls_reg_targets = anchor_target(\n anchor_list,\n valid_flag_list,\n gt_bboxes,\n img_metas,\n self.target_means,\n self.target_stds,\n cfg,\n gt_bboxes_ignore_list=gt_bboxes_ignore,\n gt_labels_list=gt_labels,\n label_channels=1,\n sampling=False,\n unmap_outputs=False)\n if cls_reg_targets is None:\n return None\n (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,\n num_total_pos, num_total_neg, mix_inds_list) = cls_reg_targets\n\n num_images = len(img_metas)\n all_cls_scores = torch.cat([\n s.permute(0, 2, 3, 1).reshape(\n num_images, -1, self.cls_out_channels) for s in cls_scores\n ], 1)\n all_labels = torch.cat(labels_list, -1).view(num_images, -1)\n all_label_weights = torch.cat(label_weights_list, -1).view(\n num_images, -1)\n all_bbox_preds = torch.cat([\n b.permute(0, 2, 3, 1).reshape(num_images, -1, 4)\n for b in bbox_preds\n ], -2)\n all_bbox_targets = torch.cat(bbox_targets_list, -2).view(\n num_images, -1, 4)\n all_bbox_weights = torch.cat(bbox_weights_list, -2).view(\n num_images, -1, 4)\n mix_inds_list = torch.cat(mix_inds_list, -1).view(num_images, -1)\n losses_cls, losses_reg = multi_apply(\n self.loss_single,\n all_cls_scores,\n all_bbox_preds,\n all_labels,\n all_label_weights,\n all_bbox_targets,\n all_bbox_weights,\n mix_inds_list,\n num_total_samples=num_total_pos,\n cfg=cfg)\n return dict(loss_cls=losses_cls, loss_reg=losses_reg)\n" ]
[ [ "torch.cat", "torch.nn.ModuleList", "torch.nn.functional.cross_entropy", "torch.nn.Conv2d", "torch.LongTensor", "numpy.sqrt", "numpy.floor" ] ]